• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2015, VIXL authors
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are met:
6 //
7 //   * Redistributions of source code must retain the above copyright notice,
8 //     this list of conditions and the following disclaimer.
9 //   * Redistributions in binary form must reproduce the above copyright notice,
10 //     this list of conditions and the following disclaimer in the documentation
11 //     and/or other materials provided with the distribution.
12 //   * Neither the name of ARM Limited nor the names of its contributors may be
13 //     used to endorse or promote products derived from this software without
14 //     specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 
27 #ifndef VIXL_AARCH64_ASSEMBLER_AARCH64_H_
28 #define VIXL_AARCH64_ASSEMBLER_AARCH64_H_
29 
30 #include "../assembler-base-vixl.h"
31 #include "../code-generation-scopes-vixl.h"
32 #include "../cpu-features.h"
33 #include "../globals-vixl.h"
34 #include "../invalset-vixl.h"
35 #include "../utils-vixl.h"
36 #include "operands-aarch64.h"
37 
38 namespace vixl {
39 namespace aarch64 {
40 
41 class LabelTestHelper;  // Forward declaration.
42 
43 
44 class Label {
45  public:
Label()46   Label() : location_(kLocationUnbound) {}
~Label()47   ~Label() {
48     // All links to a label must have been resolved before it is destructed.
49     VIXL_ASSERT(!IsLinked());
50   }
51 
IsBound()52   bool IsBound() const { return location_ >= 0; }
IsLinked()53   bool IsLinked() const { return !links_.empty(); }
54 
GetLocation()55   ptrdiff_t GetLocation() const { return location_; }
56   VIXL_DEPRECATED("GetLocation", ptrdiff_t location() const) {
57     return GetLocation();
58   }
59 
60   static const int kNPreallocatedLinks = 4;
61   static const ptrdiff_t kInvalidLinkKey = PTRDIFF_MAX;
62   static const size_t kReclaimFrom = 512;
63   static const size_t kReclaimFactor = 2;
64 
65   typedef InvalSet<ptrdiff_t,
66                    kNPreallocatedLinks,
67                    ptrdiff_t,
68                    kInvalidLinkKey,
69                    kReclaimFrom,
70                    kReclaimFactor>
71       LinksSetBase;
72   typedef InvalSetIterator<LinksSetBase> LabelLinksIteratorBase;
73 
74  private:
75   class LinksSet : public LinksSetBase {
76    public:
LinksSet()77     LinksSet() : LinksSetBase() {}
78   };
79 
80   // Allows iterating over the links of a label. The behaviour is undefined if
81   // the list of links is modified in any way while iterating.
82   class LabelLinksIterator : public LabelLinksIteratorBase {
83    public:
LabelLinksIterator(Label * label)84     explicit LabelLinksIterator(Label* label)
85         : LabelLinksIteratorBase(&label->links_) {}
86 
87     // TODO: Remove these and use the STL-like interface instead.
88     using LabelLinksIteratorBase::Advance;
89     using LabelLinksIteratorBase::Current;
90   };
91 
Bind(ptrdiff_t location)92   void Bind(ptrdiff_t location) {
93     // Labels can only be bound once.
94     VIXL_ASSERT(!IsBound());
95     location_ = location;
96   }
97 
AddLink(ptrdiff_t instruction)98   void AddLink(ptrdiff_t instruction) {
99     // If a label is bound, the assembler already has the information it needs
100     // to write the instruction, so there is no need to add it to links_.
101     VIXL_ASSERT(!IsBound());
102     links_.insert(instruction);
103   }
104 
DeleteLink(ptrdiff_t instruction)105   void DeleteLink(ptrdiff_t instruction) { links_.erase(instruction); }
106 
ClearAllLinks()107   void ClearAllLinks() { links_.clear(); }
108 
109   // TODO: The comment below considers average case complexity for our
110   // usual use-cases. The elements of interest are:
111   // - Branches to a label are emitted in order: branch instructions to a label
112   // are generated at an offset in the code generation buffer greater than any
113   // other branch to that same label already generated. As an example, this can
114   // be broken when an instruction is patched to become a branch. Note that the
115   // code will still work, but the complexity considerations below may locally
116   // not apply any more.
117   // - Veneers are generated in order: for multiple branches of the same type
118   // branching to the same unbound label going out of range, veneers are
119   // generated in growing order of the branch instruction offset from the start
120   // of the buffer.
121   //
122   // When creating a veneer for a branch going out of range, the link for this
123   // branch needs to be removed from this `links_`. Since all branches are
124   // tracked in one underlying InvalSet, the complexity for this deletion is the
125   // same as for finding the element, ie. O(n), where n is the number of links
126   // in the set.
127   // This could be reduced to O(1) by using the same trick as used when tracking
128   // branch information for veneers: split the container to use one set per type
129   // of branch. With that setup, when a veneer is created and the link needs to
130   // be deleted, if the two points above hold, it must be the minimum element of
131   // the set for its type of branch, and that minimum element will be accessible
132   // in O(1).
133 
134   // The offsets of the instructions that have linked to this label.
135   LinksSet links_;
136   // The label location.
137   ptrdiff_t location_;
138 
139   static const ptrdiff_t kLocationUnbound = -1;
140 
141 // It is not safe to copy labels, so disable the copy constructor and operator
142 // by declaring them private (without an implementation).
143 #if __cplusplus >= 201103L
144   Label(const Label&) = delete;
145   void operator=(const Label&) = delete;
146 #else
147   Label(const Label&);
148   void operator=(const Label&);
149 #endif
150 
151   // The Assembler class is responsible for binding and linking labels, since
152   // the stored offsets need to be consistent with the Assembler's buffer.
153   friend class Assembler;
154   // The MacroAssembler and VeneerPool handle resolution of branches to distant
155   // targets.
156   friend class MacroAssembler;
157   friend class VeneerPool;
158 };
159 
160 
161 class Assembler;
162 class LiteralPool;
163 
164 // A literal is a 32-bit or 64-bit piece of data stored in the instruction
165 // stream and loaded through a pc relative load. The same literal can be
166 // referred to by multiple instructions but a literal can only reside at one
167 // place in memory. A literal can be used by a load before or after being
168 // placed in memory.
169 //
170 // Internally an offset of 0 is associated with a literal which has been
171 // neither used nor placed. Then two possibilities arise:
172 //  1) the label is placed, the offset (stored as offset + 1) is used to
173 //     resolve any subsequent load using the label.
174 //  2) the label is not placed and offset is the offset of the last load using
175 //     the literal (stored as -offset -1). If multiple loads refer to this
176 //     literal then the last load holds the offset of the preceding load and
177 //     all loads form a chain. Once the offset is placed all the loads in the
178 //     chain are resolved and future loads fall back to possibility 1.
179 class RawLiteral {
180  public:
181   enum DeletionPolicy {
182     kDeletedOnPlacementByPool,
183     kDeletedOnPoolDestruction,
184     kManuallyDeleted
185   };
186 
187   RawLiteral(size_t size,
188              LiteralPool* literal_pool,
189              DeletionPolicy deletion_policy = kManuallyDeleted);
190 
191   // The literal pool only sees and deletes `RawLiteral*` pointers, but they are
192   // actually pointing to `Literal<T>` objects.
~RawLiteral()193   virtual ~RawLiteral() {}
194 
GetSize()195   size_t GetSize() const {
196     VIXL_STATIC_ASSERT(kDRegSizeInBytes == kXRegSizeInBytes);
197     VIXL_STATIC_ASSERT(kSRegSizeInBytes == kWRegSizeInBytes);
198     VIXL_ASSERT((size_ == kXRegSizeInBytes) || (size_ == kWRegSizeInBytes) ||
199                 (size_ == kQRegSizeInBytes));
200     return size_;
201   }
202   VIXL_DEPRECATED("GetSize", size_t size()) { return GetSize(); }
203 
GetRawValue128Low64()204   uint64_t GetRawValue128Low64() const {
205     VIXL_ASSERT(size_ == kQRegSizeInBytes);
206     return low64_;
207   }
208   VIXL_DEPRECATED("GetRawValue128Low64", uint64_t raw_value128_low64()) {
209     return GetRawValue128Low64();
210   }
211 
GetRawValue128High64()212   uint64_t GetRawValue128High64() const {
213     VIXL_ASSERT(size_ == kQRegSizeInBytes);
214     return high64_;
215   }
216   VIXL_DEPRECATED("GetRawValue128High64", uint64_t raw_value128_high64()) {
217     return GetRawValue128High64();
218   }
219 
GetRawValue64()220   uint64_t GetRawValue64() const {
221     VIXL_ASSERT(size_ == kXRegSizeInBytes);
222     VIXL_ASSERT(high64_ == 0);
223     return low64_;
224   }
225   VIXL_DEPRECATED("GetRawValue64", uint64_t raw_value64()) {
226     return GetRawValue64();
227   }
228 
GetRawValue32()229   uint32_t GetRawValue32() const {
230     VIXL_ASSERT(size_ == kWRegSizeInBytes);
231     VIXL_ASSERT(high64_ == 0);
232     VIXL_ASSERT(IsUint32(low64_) || IsInt32(low64_));
233     return static_cast<uint32_t>(low64_);
234   }
235   VIXL_DEPRECATED("GetRawValue32", uint32_t raw_value32()) {
236     return GetRawValue32();
237   }
238 
IsUsed()239   bool IsUsed() const { return offset_ < 0; }
IsPlaced()240   bool IsPlaced() const { return offset_ > 0; }
241 
GetLiteralPool()242   LiteralPool* GetLiteralPool() const { return literal_pool_; }
243 
GetOffset()244   ptrdiff_t GetOffset() const {
245     VIXL_ASSERT(IsPlaced());
246     return offset_ - 1;
247   }
248   VIXL_DEPRECATED("GetOffset", ptrdiff_t offset()) { return GetOffset(); }
249 
250  protected:
SetOffset(ptrdiff_t offset)251   void SetOffset(ptrdiff_t offset) {
252     VIXL_ASSERT(offset >= 0);
253     VIXL_ASSERT(IsWordAligned(offset));
254     VIXL_ASSERT(!IsPlaced());
255     offset_ = offset + 1;
256   }
set_offset(ptrdiff_t offset)257   VIXL_DEPRECATED("SetOffset", void set_offset(ptrdiff_t offset)) {
258     SetOffset(offset);
259   }
260 
GetLastUse()261   ptrdiff_t GetLastUse() const {
262     VIXL_ASSERT(IsUsed());
263     return -offset_ - 1;
264   }
265   VIXL_DEPRECATED("GetLastUse", ptrdiff_t last_use()) { return GetLastUse(); }
266 
SetLastUse(ptrdiff_t offset)267   void SetLastUse(ptrdiff_t offset) {
268     VIXL_ASSERT(offset >= 0);
269     VIXL_ASSERT(IsWordAligned(offset));
270     VIXL_ASSERT(!IsPlaced());
271     offset_ = -offset - 1;
272   }
set_last_use(ptrdiff_t offset)273   VIXL_DEPRECATED("SetLastUse", void set_last_use(ptrdiff_t offset)) {
274     SetLastUse(offset);
275   }
276 
277   size_t size_;
278   ptrdiff_t offset_;
279   uint64_t low64_;
280   uint64_t high64_;
281 
282  private:
283   LiteralPool* literal_pool_;
284   DeletionPolicy deletion_policy_;
285 
286   friend class Assembler;
287   friend class LiteralPool;
288 };
289 
290 
291 template <typename T>
292 class Literal : public RawLiteral {
293  public:
294   explicit Literal(T value,
295                    LiteralPool* literal_pool = NULL,
296                    RawLiteral::DeletionPolicy ownership = kManuallyDeleted)
RawLiteral(sizeof (value),literal_pool,ownership)297       : RawLiteral(sizeof(value), literal_pool, ownership) {
298     VIXL_STATIC_ASSERT(sizeof(value) <= kXRegSizeInBytes);
299     UpdateValue(value);
300   }
301 
302   Literal(T high64,
303           T low64,
304           LiteralPool* literal_pool = NULL,
305           RawLiteral::DeletionPolicy ownership = kManuallyDeleted)
RawLiteral(kQRegSizeInBytes,literal_pool,ownership)306       : RawLiteral(kQRegSizeInBytes, literal_pool, ownership) {
307     VIXL_STATIC_ASSERT(sizeof(low64) == (kQRegSizeInBytes / 2));
308     UpdateValue(high64, low64);
309   }
310 
~Literal()311   virtual ~Literal() {}
312 
313   // Update the value of this literal, if necessary by rewriting the value in
314   // the pool.
315   // If the literal has already been placed in a literal pool, the address of
316   // the start of the code buffer must be provided, as the literal only knows it
317   // offset from there. This also allows patching the value after the code has
318   // been moved in memory.
319   void UpdateValue(T new_value, uint8_t* code_buffer = NULL) {
320     VIXL_ASSERT(sizeof(new_value) == size_);
321     memcpy(&low64_, &new_value, sizeof(new_value));
322     if (IsPlaced()) {
323       VIXL_ASSERT(code_buffer != NULL);
324       RewriteValueInCode(code_buffer);
325     }
326   }
327 
328   void UpdateValue(T high64, T low64, uint8_t* code_buffer = NULL) {
329     VIXL_ASSERT(sizeof(low64) == size_ / 2);
330     memcpy(&low64_, &low64, sizeof(low64));
331     memcpy(&high64_, &high64, sizeof(high64));
332     if (IsPlaced()) {
333       VIXL_ASSERT(code_buffer != NULL);
334       RewriteValueInCode(code_buffer);
335     }
336   }
337 
338   void UpdateValue(T new_value, const Assembler* assembler);
339   void UpdateValue(T high64, T low64, const Assembler* assembler);
340 
341  private:
RewriteValueInCode(uint8_t * code_buffer)342   void RewriteValueInCode(uint8_t* code_buffer) {
343     VIXL_ASSERT(IsPlaced());
344     VIXL_STATIC_ASSERT(sizeof(T) <= kXRegSizeInBytes);
345     switch (GetSize()) {
346       case kSRegSizeInBytes:
347         *reinterpret_cast<uint32_t*>(code_buffer + GetOffset()) =
348             GetRawValue32();
349         break;
350       case kDRegSizeInBytes:
351         *reinterpret_cast<uint64_t*>(code_buffer + GetOffset()) =
352             GetRawValue64();
353         break;
354       default:
355         VIXL_ASSERT(GetSize() == kQRegSizeInBytes);
356         uint64_t* base_address =
357             reinterpret_cast<uint64_t*>(code_buffer + GetOffset());
358         *base_address = GetRawValue128Low64();
359         *(base_address + 1) = GetRawValue128High64();
360     }
361   }
362 };
363 
364 
365 // Control whether or not position-independent code should be emitted.
366 enum PositionIndependentCodeOption {
367   // All code generated will be position-independent; all branches and
368   // references to labels generated with the Label class will use PC-relative
369   // addressing.
370   PositionIndependentCode,
371 
372   // Allow VIXL to generate code that refers to absolute addresses. With this
373   // option, it will not be possible to copy the code buffer and run it from a
374   // different address; code must be generated in its final location.
375   PositionDependentCode,
376 
377   // Allow VIXL to assume that the bottom 12 bits of the address will be
378   // constant, but that the top 48 bits may change. This allows `adrp` to
379   // function in systems which copy code between pages, but otherwise maintain
380   // 4KB page alignment.
381   PageOffsetDependentCode
382 };
383 
384 
385 // Control how scaled- and unscaled-offset loads and stores are generated.
386 enum LoadStoreScalingOption {
387   // Prefer scaled-immediate-offset instructions, but emit unscaled-offset,
388   // register-offset, pre-index or post-index instructions if necessary.
389   PreferScaledOffset,
390 
391   // Prefer unscaled-immediate-offset instructions, but emit scaled-offset,
392   // register-offset, pre-index or post-index instructions if necessary.
393   PreferUnscaledOffset,
394 
395   // Require scaled-immediate-offset instructions.
396   RequireScaledOffset,
397 
398   // Require unscaled-immediate-offset instructions.
399   RequireUnscaledOffset
400 };
401 
402 
403 // Assembler.
404 class Assembler : public vixl::internal::AssemblerBase {
405  public:
406   explicit Assembler(
407       PositionIndependentCodeOption pic = PositionIndependentCode)
pic_(pic)408       : pic_(pic), cpu_features_(CPUFeatures::AArch64LegacyBaseline()) {}
409   explicit Assembler(
410       size_t capacity,
411       PositionIndependentCodeOption pic = PositionIndependentCode)
AssemblerBase(capacity)412       : AssemblerBase(capacity),
413         pic_(pic),
414         cpu_features_(CPUFeatures::AArch64LegacyBaseline()) {}
415   Assembler(byte* buffer,
416             size_t capacity,
417             PositionIndependentCodeOption pic = PositionIndependentCode)
AssemblerBase(buffer,capacity)418       : AssemblerBase(buffer, capacity),
419         pic_(pic),
420         cpu_features_(CPUFeatures::AArch64LegacyBaseline()) {}
421 
422   // Upon destruction, the code will assert that one of the following is true:
423   //  * The Assembler object has not been used.
424   //  * Nothing has been emitted since the last Reset() call.
425   //  * Nothing has been emitted since the last FinalizeCode() call.
~Assembler()426   ~Assembler() {}
427 
428   // System functions.
429 
430   // Start generating code from the beginning of the buffer, discarding any code
431   // and data that has already been emitted into the buffer.
432   void Reset();
433 
434   // Label.
435   // Bind a label to the current PC.
436   void bind(Label* label);
437 
438   // Bind a label to a specified offset from the start of the buffer.
439   void BindToOffset(Label* label, ptrdiff_t offset);
440 
441   // Place a literal at the current PC.
442   void place(RawLiteral* literal);
443 
444   VIXL_DEPRECATED("GetCursorOffset", ptrdiff_t CursorOffset() const) {
445     return GetCursorOffset();
446   }
447 
448   VIXL_DEPRECATED("GetBuffer().GetCapacity()",
449                   ptrdiff_t GetBufferEndOffset() const) {
450     return static_cast<ptrdiff_t>(GetBuffer().GetCapacity());
451   }
452   VIXL_DEPRECATED("GetBuffer().GetCapacity()",
453                   ptrdiff_t BufferEndOffset() const) {
454     return GetBuffer().GetCapacity();
455   }
456 
457   // Return the address of a bound label.
458   template <typename T>
GetLabelAddress(const Label * label)459   T GetLabelAddress(const Label* label) const {
460     VIXL_ASSERT(label->IsBound());
461     VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t));
462     return GetBuffer().GetOffsetAddress<T>(label->GetLocation());
463   }
464 
GetInstructionAt(ptrdiff_t instruction_offset)465   Instruction* GetInstructionAt(ptrdiff_t instruction_offset) {
466     return GetBuffer()->GetOffsetAddress<Instruction*>(instruction_offset);
467   }
468   VIXL_DEPRECATED("GetInstructionAt",
469                   Instruction* InstructionAt(ptrdiff_t instruction_offset)) {
470     return GetInstructionAt(instruction_offset);
471   }
472 
GetInstructionOffset(Instruction * instruction)473   ptrdiff_t GetInstructionOffset(Instruction* instruction) {
474     VIXL_STATIC_ASSERT(sizeof(*instruction) == 1);
475     ptrdiff_t offset =
476         instruction - GetBuffer()->GetStartAddress<Instruction*>();
477     VIXL_ASSERT((0 <= offset) &&
478                 (offset < static_cast<ptrdiff_t>(GetBuffer()->GetCapacity())));
479     return offset;
480   }
481   VIXL_DEPRECATED("GetInstructionOffset",
482                   ptrdiff_t InstructionOffset(Instruction* instruction)) {
483     return GetInstructionOffset(instruction);
484   }
485 
486   // Instruction set functions.
487 
488   // Branch / Jump instructions.
489   // Branch to register.
490   void br(const Register& xn);
491 
492   // Branch with link to register.
493   void blr(const Register& xn);
494 
495   // Branch to register with return hint.
496   void ret(const Register& xn = lr);
497 
498   // Branch to register, with pointer authentication. Using key A and a modifier
499   // of zero [Armv8.3].
500   void braaz(const Register& xn);
501 
502   // Branch to register, with pointer authentication. Using key B and a modifier
503   // of zero [Armv8.3].
504   void brabz(const Register& xn);
505 
506   // Branch with link to register, with pointer authentication. Using key A and
507   // a modifier of zero [Armv8.3].
508   void blraaz(const Register& xn);
509 
510   // Branch with link to register, with pointer authentication. Using key B and
511   // a modifier of zero [Armv8.3].
512   void blrabz(const Register& xn);
513 
514   // Return from subroutine, with pointer authentication. Using key A [Armv8.3].
515   void retaa();
516 
517   // Return from subroutine, with pointer authentication. Using key B [Armv8.3].
518   void retab();
519 
520   // Branch to register, with pointer authentication. Using key A [Armv8.3].
521   void braa(const Register& xn, const Register& xm);
522 
523   // Branch to register, with pointer authentication. Using key B [Armv8.3].
524   void brab(const Register& xn, const Register& xm);
525 
526   // Branch with link to register, with pointer authentication. Using key A
527   // [Armv8.3].
528   void blraa(const Register& xn, const Register& xm);
529 
530   // Branch with link to register, with pointer authentication. Using key B
531   // [Armv8.3].
532   void blrab(const Register& xn, const Register& xm);
533 
534   // Unconditional branch to label.
535   void b(Label* label);
536 
537   // Conditional branch to label.
538   void b(Label* label, Condition cond);
539 
540   // Unconditional branch to PC offset.
541   void b(int64_t imm26);
542 
543   // Conditional branch to PC offset.
544   void b(int64_t imm19, Condition cond);
545 
546   // Branch with link to label.
547   void bl(Label* label);
548 
549   // Branch with link to PC offset.
550   void bl(int64_t imm26);
551 
552   // Compare and branch to label if zero.
553   void cbz(const Register& rt, Label* label);
554 
555   // Compare and branch to PC offset if zero.
556   void cbz(const Register& rt, int64_t imm19);
557 
558   // Compare and branch to label if not zero.
559   void cbnz(const Register& rt, Label* label);
560 
561   // Compare and branch to PC offset if not zero.
562   void cbnz(const Register& rt, int64_t imm19);
563 
564   // Table lookup from one register.
565   void tbl(const VRegister& vd, const VRegister& vn, const VRegister& vm);
566 
567   // Table lookup from two registers.
568   void tbl(const VRegister& vd,
569            const VRegister& vn,
570            const VRegister& vn2,
571            const VRegister& vm);
572 
573   // Table lookup from three registers.
574   void tbl(const VRegister& vd,
575            const VRegister& vn,
576            const VRegister& vn2,
577            const VRegister& vn3,
578            const VRegister& vm);
579 
580   // Table lookup from four registers.
581   void tbl(const VRegister& vd,
582            const VRegister& vn,
583            const VRegister& vn2,
584            const VRegister& vn3,
585            const VRegister& vn4,
586            const VRegister& vm);
587 
588   // Table lookup extension from one register.
589   void tbx(const VRegister& vd, const VRegister& vn, const VRegister& vm);
590 
591   // Table lookup extension from two registers.
592   void tbx(const VRegister& vd,
593            const VRegister& vn,
594            const VRegister& vn2,
595            const VRegister& vm);
596 
597   // Table lookup extension from three registers.
598   void tbx(const VRegister& vd,
599            const VRegister& vn,
600            const VRegister& vn2,
601            const VRegister& vn3,
602            const VRegister& vm);
603 
604   // Table lookup extension from four registers.
605   void tbx(const VRegister& vd,
606            const VRegister& vn,
607            const VRegister& vn2,
608            const VRegister& vn3,
609            const VRegister& vn4,
610            const VRegister& vm);
611 
612   // Test bit and branch to label if zero.
613   void tbz(const Register& rt, unsigned bit_pos, Label* label);
614 
615   // Test bit and branch to PC offset if zero.
616   void tbz(const Register& rt, unsigned bit_pos, int64_t imm14);
617 
618   // Test bit and branch to label if not zero.
619   void tbnz(const Register& rt, unsigned bit_pos, Label* label);
620 
621   // Test bit and branch to PC offset if not zero.
622   void tbnz(const Register& rt, unsigned bit_pos, int64_t imm14);
623 
624   // Address calculation instructions.
625   // Calculate a PC-relative address. Unlike for branches the offset in adr is
626   // unscaled (i.e. the result can be unaligned).
627 
628   // Calculate the address of a label.
629   void adr(const Register& xd, Label* label);
630 
631   // Calculate the address of a PC offset.
632   void adr(const Register& xd, int64_t imm21);
633 
634   // Calculate the page address of a label.
635   void adrp(const Register& xd, Label* label);
636 
637   // Calculate the page address of a PC offset.
638   void adrp(const Register& xd, int64_t imm21);
639 
640   // Data Processing instructions.
641   // Add.
642   void add(const Register& rd, const Register& rn, const Operand& operand);
643 
644   // Add and update status flags.
645   void adds(const Register& rd, const Register& rn, const Operand& operand);
646 
647   // Compare negative.
648   void cmn(const Register& rn, const Operand& operand);
649 
650   // Subtract.
651   void sub(const Register& rd, const Register& rn, const Operand& operand);
652 
653   // Subtract and update status flags.
654   void subs(const Register& rd, const Register& rn, const Operand& operand);
655 
656   // Compare.
657   void cmp(const Register& rn, const Operand& operand);
658 
659   // Negate.
660   void neg(const Register& rd, const Operand& operand);
661 
662   // Negate and update status flags.
663   void negs(const Register& rd, const Operand& operand);
664 
665   // Add with carry bit.
666   void adc(const Register& rd, const Register& rn, const Operand& operand);
667 
668   // Add with carry bit and update status flags.
669   void adcs(const Register& rd, const Register& rn, const Operand& operand);
670 
671   // Subtract with carry bit.
672   void sbc(const Register& rd, const Register& rn, const Operand& operand);
673 
674   // Subtract with carry bit and update status flags.
675   void sbcs(const Register& rd, const Register& rn, const Operand& operand);
676 
677   // Negate with carry bit.
678   void ngc(const Register& rd, const Operand& operand);
679 
680   // Negate with carry bit and update status flags.
681   void ngcs(const Register& rd, const Operand& operand);
682 
683   // Logical instructions.
684   // Bitwise and (A & B).
685   void and_(const Register& rd, const Register& rn, const Operand& operand);
686 
687   // Bitwise and (A & B) and update status flags.
688   void ands(const Register& rd, const Register& rn, const Operand& operand);
689 
690   // Bit test and set flags.
691   void tst(const Register& rn, const Operand& operand);
692 
693   // Bit clear (A & ~B).
694   void bic(const Register& rd, const Register& rn, const Operand& operand);
695 
696   // Bit clear (A & ~B) and update status flags.
697   void bics(const Register& rd, const Register& rn, const Operand& operand);
698 
699   // Bitwise or (A | B).
700   void orr(const Register& rd, const Register& rn, const Operand& operand);
701 
702   // Bitwise nor (A | ~B).
703   void orn(const Register& rd, const Register& rn, const Operand& operand);
704 
705   // Bitwise eor/xor (A ^ B).
706   void eor(const Register& rd, const Register& rn, const Operand& operand);
707 
708   // Bitwise enor/xnor (A ^ ~B).
709   void eon(const Register& rd, const Register& rn, const Operand& operand);
710 
711   // Logical shift left by variable.
712   void lslv(const Register& rd, const Register& rn, const Register& rm);
713 
714   // Logical shift right by variable.
715   void lsrv(const Register& rd, const Register& rn, const Register& rm);
716 
717   // Arithmetic shift right by variable.
718   void asrv(const Register& rd, const Register& rn, const Register& rm);
719 
720   // Rotate right by variable.
721   void rorv(const Register& rd, const Register& rn, const Register& rm);
722 
723   // Bitfield instructions.
724   // Bitfield move.
725   void bfm(const Register& rd,
726            const Register& rn,
727            unsigned immr,
728            unsigned imms);
729 
730   // Signed bitfield move.
731   void sbfm(const Register& rd,
732             const Register& rn,
733             unsigned immr,
734             unsigned imms);
735 
736   // Unsigned bitfield move.
737   void ubfm(const Register& rd,
738             const Register& rn,
739             unsigned immr,
740             unsigned imms);
741 
742   // Bfm aliases.
743   // Bitfield insert.
bfi(const Register & rd,const Register & rn,unsigned lsb,unsigned width)744   void bfi(const Register& rd,
745            const Register& rn,
746            unsigned lsb,
747            unsigned width) {
748     VIXL_ASSERT(width >= 1);
749     VIXL_ASSERT(lsb + width <= static_cast<unsigned>(rn.GetSizeInBits()));
750     bfm(rd,
751         rn,
752         (rd.GetSizeInBits() - lsb) & (rd.GetSizeInBits() - 1),
753         width - 1);
754   }
755 
756   // Bitfield extract and insert low.
bfxil(const Register & rd,const Register & rn,unsigned lsb,unsigned width)757   void bfxil(const Register& rd,
758              const Register& rn,
759              unsigned lsb,
760              unsigned width) {
761     VIXL_ASSERT(width >= 1);
762     VIXL_ASSERT(lsb + width <= static_cast<unsigned>(rn.GetSizeInBits()));
763     bfm(rd, rn, lsb, lsb + width - 1);
764   }
765 
766   // Bitfield clear [Armv8.2].
bfc(const Register & rd,unsigned lsb,unsigned width)767   void bfc(const Register& rd, unsigned lsb, unsigned width) {
768     bfi(rd, AppropriateZeroRegFor(rd), lsb, width);
769   }
770 
771   // Sbfm aliases.
772   // Arithmetic shift right.
asr(const Register & rd,const Register & rn,unsigned shift)773   void asr(const Register& rd, const Register& rn, unsigned shift) {
774     VIXL_ASSERT(shift < static_cast<unsigned>(rd.GetSizeInBits()));
775     sbfm(rd, rn, shift, rd.GetSizeInBits() - 1);
776   }
777 
778   // Signed bitfield insert with zero at right.
sbfiz(const Register & rd,const Register & rn,unsigned lsb,unsigned width)779   void sbfiz(const Register& rd,
780              const Register& rn,
781              unsigned lsb,
782              unsigned width) {
783     VIXL_ASSERT(width >= 1);
784     VIXL_ASSERT(lsb + width <= static_cast<unsigned>(rn.GetSizeInBits()));
785     sbfm(rd,
786          rn,
787          (rd.GetSizeInBits() - lsb) & (rd.GetSizeInBits() - 1),
788          width - 1);
789   }
790 
791   // Signed bitfield extract.
sbfx(const Register & rd,const Register & rn,unsigned lsb,unsigned width)792   void sbfx(const Register& rd,
793             const Register& rn,
794             unsigned lsb,
795             unsigned width) {
796     VIXL_ASSERT(width >= 1);
797     VIXL_ASSERT(lsb + width <= static_cast<unsigned>(rn.GetSizeInBits()));
798     sbfm(rd, rn, lsb, lsb + width - 1);
799   }
800 
801   // Signed extend byte.
sxtb(const Register & rd,const Register & rn)802   void sxtb(const Register& rd, const Register& rn) { sbfm(rd, rn, 0, 7); }
803 
804   // Signed extend halfword.
sxth(const Register & rd,const Register & rn)805   void sxth(const Register& rd, const Register& rn) { sbfm(rd, rn, 0, 15); }
806 
807   // Signed extend word.
sxtw(const Register & rd,const Register & rn)808   void sxtw(const Register& rd, const Register& rn) { sbfm(rd, rn, 0, 31); }
809 
810   // Ubfm aliases.
811   // Logical shift left.
lsl(const Register & rd,const Register & rn,unsigned shift)812   void lsl(const Register& rd, const Register& rn, unsigned shift) {
813     unsigned reg_size = rd.GetSizeInBits();
814     VIXL_ASSERT(shift < reg_size);
815     ubfm(rd, rn, (reg_size - shift) % reg_size, reg_size - shift - 1);
816   }
817 
818   // Logical shift right.
lsr(const Register & rd,const Register & rn,unsigned shift)819   void lsr(const Register& rd, const Register& rn, unsigned shift) {
820     VIXL_ASSERT(shift < static_cast<unsigned>(rd.GetSizeInBits()));
821     ubfm(rd, rn, shift, rd.GetSizeInBits() - 1);
822   }
823 
824   // Unsigned bitfield insert with zero at right.
ubfiz(const Register & rd,const Register & rn,unsigned lsb,unsigned width)825   void ubfiz(const Register& rd,
826              const Register& rn,
827              unsigned lsb,
828              unsigned width) {
829     VIXL_ASSERT(width >= 1);
830     VIXL_ASSERT(lsb + width <= static_cast<unsigned>(rn.GetSizeInBits()));
831     ubfm(rd,
832          rn,
833          (rd.GetSizeInBits() - lsb) & (rd.GetSizeInBits() - 1),
834          width - 1);
835   }
836 
837   // Unsigned bitfield extract.
ubfx(const Register & rd,const Register & rn,unsigned lsb,unsigned width)838   void ubfx(const Register& rd,
839             const Register& rn,
840             unsigned lsb,
841             unsigned width) {
842     VIXL_ASSERT(width >= 1);
843     VIXL_ASSERT(lsb + width <= static_cast<unsigned>(rn.GetSizeInBits()));
844     ubfm(rd, rn, lsb, lsb + width - 1);
845   }
846 
847   // Unsigned extend byte.
uxtb(const Register & rd,const Register & rn)848   void uxtb(const Register& rd, const Register& rn) { ubfm(rd, rn, 0, 7); }
849 
850   // Unsigned extend halfword.
uxth(const Register & rd,const Register & rn)851   void uxth(const Register& rd, const Register& rn) { ubfm(rd, rn, 0, 15); }
852 
853   // Unsigned extend word.
uxtw(const Register & rd,const Register & rn)854   void uxtw(const Register& rd, const Register& rn) { ubfm(rd, rn, 0, 31); }
855 
856   // Extract.
857   void extr(const Register& rd,
858             const Register& rn,
859             const Register& rm,
860             unsigned lsb);
861 
862   // Conditional select: rd = cond ? rn : rm.
863   void csel(const Register& rd,
864             const Register& rn,
865             const Register& rm,
866             Condition cond);
867 
868   // Conditional select increment: rd = cond ? rn : rm + 1.
869   void csinc(const Register& rd,
870              const Register& rn,
871              const Register& rm,
872              Condition cond);
873 
874   // Conditional select inversion: rd = cond ? rn : ~rm.
875   void csinv(const Register& rd,
876              const Register& rn,
877              const Register& rm,
878              Condition cond);
879 
880   // Conditional select negation: rd = cond ? rn : -rm.
881   void csneg(const Register& rd,
882              const Register& rn,
883              const Register& rm,
884              Condition cond);
885 
886   // Conditional set: rd = cond ? 1 : 0.
887   void cset(const Register& rd, Condition cond);
888 
889   // Conditional set mask: rd = cond ? -1 : 0.
890   void csetm(const Register& rd, Condition cond);
891 
892   // Conditional increment: rd = cond ? rn + 1 : rn.
893   void cinc(const Register& rd, const Register& rn, Condition cond);
894 
895   // Conditional invert: rd = cond ? ~rn : rn.
896   void cinv(const Register& rd, const Register& rn, Condition cond);
897 
898   // Conditional negate: rd = cond ? -rn : rn.
899   void cneg(const Register& rd, const Register& rn, Condition cond);
900 
901   // Rotate right.
ror(const Register & rd,const Register & rs,unsigned shift)902   void ror(const Register& rd, const Register& rs, unsigned shift) {
903     extr(rd, rs, rs, shift);
904   }
905 
906   // Conditional comparison.
907   // Conditional compare negative.
908   void ccmn(const Register& rn,
909             const Operand& operand,
910             StatusFlags nzcv,
911             Condition cond);
912 
913   // Conditional compare.
914   void ccmp(const Register& rn,
915             const Operand& operand,
916             StatusFlags nzcv,
917             Condition cond);
918 
919   // CRC-32 checksum from byte.
920   void crc32b(const Register& wd, const Register& wn, const Register& wm);
921 
922   // CRC-32 checksum from half-word.
923   void crc32h(const Register& wd, const Register& wn, const Register& wm);
924 
925   // CRC-32 checksum from word.
926   void crc32w(const Register& wd, const Register& wn, const Register& wm);
927 
928   // CRC-32 checksum from double word.
929   void crc32x(const Register& wd, const Register& wn, const Register& xm);
930 
931   // CRC-32 C checksum from byte.
932   void crc32cb(const Register& wd, const Register& wn, const Register& wm);
933 
934   // CRC-32 C checksum from half-word.
935   void crc32ch(const Register& wd, const Register& wn, const Register& wm);
936 
937   // CRC-32 C checksum from word.
938   void crc32cw(const Register& wd, const Register& wn, const Register& wm);
939 
940   // CRC-32C checksum from double word.
941   void crc32cx(const Register& wd, const Register& wn, const Register& xm);
942 
943   // Multiply.
944   void mul(const Register& rd, const Register& rn, const Register& rm);
945 
946   // Negated multiply.
947   void mneg(const Register& rd, const Register& rn, const Register& rm);
948 
949   // Signed long multiply: 32 x 32 -> 64-bit.
950   void smull(const Register& xd, const Register& wn, const Register& wm);
951 
952   // Signed multiply high: 64 x 64 -> 64-bit <127:64>.
953   void smulh(const Register& xd, const Register& xn, const Register& xm);
954 
955   // Multiply and accumulate.
956   void madd(const Register& rd,
957             const Register& rn,
958             const Register& rm,
959             const Register& ra);
960 
961   // Multiply and subtract.
962   void msub(const Register& rd,
963             const Register& rn,
964             const Register& rm,
965             const Register& ra);
966 
967   // Signed long multiply and accumulate: 32 x 32 + 64 -> 64-bit.
968   void smaddl(const Register& xd,
969               const Register& wn,
970               const Register& wm,
971               const Register& xa);
972 
973   // Unsigned long multiply and accumulate: 32 x 32 + 64 -> 64-bit.
974   void umaddl(const Register& xd,
975               const Register& wn,
976               const Register& wm,
977               const Register& xa);
978 
979   // Unsigned long multiply: 32 x 32 -> 64-bit.
umull(const Register & xd,const Register & wn,const Register & wm)980   void umull(const Register& xd, const Register& wn, const Register& wm) {
981     umaddl(xd, wn, wm, xzr);
982   }
983 
984   // Unsigned multiply high: 64 x 64 -> 64-bit <127:64>.
985   void umulh(const Register& xd, const Register& xn, const Register& xm);
986 
987   // Signed long multiply and subtract: 64 - (32 x 32) -> 64-bit.
988   void smsubl(const Register& xd,
989               const Register& wn,
990               const Register& wm,
991               const Register& xa);
992 
993   // Unsigned long multiply and subtract: 64 - (32 x 32) -> 64-bit.
994   void umsubl(const Register& xd,
995               const Register& wn,
996               const Register& wm,
997               const Register& xa);
998 
999   // Signed integer divide.
1000   void sdiv(const Register& rd, const Register& rn, const Register& rm);
1001 
1002   // Unsigned integer divide.
1003   void udiv(const Register& rd, const Register& rn, const Register& rm);
1004 
1005   // Bit reverse.
1006   void rbit(const Register& rd, const Register& rn);
1007 
1008   // Reverse bytes in 16-bit half words.
1009   void rev16(const Register& rd, const Register& rn);
1010 
1011   // Reverse bytes in 32-bit words.
1012   void rev32(const Register& xd, const Register& xn);
1013 
1014   // Reverse bytes in 64-bit general purpose register, an alias for rev
1015   // [Armv8.2].
rev64(const Register & xd,const Register & xn)1016   void rev64(const Register& xd, const Register& xn) {
1017     VIXL_ASSERT(xd.Is64Bits() && xn.Is64Bits());
1018     rev(xd, xn);
1019   }
1020 
1021   // Reverse bytes.
1022   void rev(const Register& rd, const Register& rn);
1023 
1024   // Count leading zeroes.
1025   void clz(const Register& rd, const Register& rn);
1026 
1027   // Count leading sign bits.
1028   void cls(const Register& rd, const Register& rn);
1029 
1030   // Pointer Authentication Code for Instruction address, using key A [Armv8.3].
1031   void pacia(const Register& xd, const Register& rn);
1032 
1033   // Pointer Authentication Code for Instruction address, using key A and a
1034   // modifier of zero [Armv8.3].
1035   void paciza(const Register& xd);
1036 
1037   // Pointer Authentication Code for Instruction address, using key A, with
1038   // address in x17 and modifier in x16 [Armv8.3].
1039   void pacia1716();
1040 
1041   // Pointer Authentication Code for Instruction address, using key A, with
1042   // address in LR and modifier in SP [Armv8.3].
1043   void paciasp();
1044 
1045   // Pointer Authentication Code for Instruction address, using key A, with
1046   // address in LR and a modifier of zero [Armv8.3].
1047   void paciaz();
1048 
1049   // Pointer Authentication Code for Instruction address, using key B [Armv8.3].
1050   void pacib(const Register& xd, const Register& xn);
1051 
1052   // Pointer Authentication Code for Instruction address, using key B and a
1053   // modifier of zero [Armv8.3].
1054   void pacizb(const Register& xd);
1055 
1056   // Pointer Authentication Code for Instruction address, using key B, with
1057   // address in x17 and modifier in x16 [Armv8.3].
1058   void pacib1716();
1059 
1060   // Pointer Authentication Code for Instruction address, using key B, with
1061   // address in LR and modifier in SP [Armv8.3].
1062   void pacibsp();
1063 
1064   // Pointer Authentication Code for Instruction address, using key B, with
1065   // address in LR and a modifier of zero [Armv8.3].
1066   void pacibz();
1067 
1068   // Pointer Authentication Code for Data address, using key A [Armv8.3].
1069   void pacda(const Register& xd, const Register& xn);
1070 
1071   // Pointer Authentication Code for Data address, using key A and a modifier of
1072   // zero [Armv8.3].
1073   void pacdza(const Register& xd);
1074 
1075   // Pointer Authentication Code for Data address, using key A, with address in
1076   // x17 and modifier in x16 [Armv8.3].
1077   void pacda1716();
1078 
1079   // Pointer Authentication Code for Data address, using key A, with address in
1080   // LR and modifier in SP [Armv8.3].
1081   void pacdasp();
1082 
1083   // Pointer Authentication Code for Data address, using key A, with address in
1084   // LR and a modifier of zero [Armv8.3].
1085   void pacdaz();
1086 
1087   // Pointer Authentication Code for Data address, using key B [Armv8.3].
1088   void pacdb(const Register& xd, const Register& xn);
1089 
1090   // Pointer Authentication Code for Data address, using key B and a modifier of
1091   // zero [Armv8.3].
1092   void pacdzb(const Register& xd);
1093 
1094   // Pointer Authentication Code for Data address, using key B, with address in
1095   // x17 and modifier in x16 [Armv8.3].
1096   void pacdb1716();
1097 
1098   // Pointer Authentication Code for Data address, using key B, with address in
1099   // LR and modifier in SP [Armv8.3].
1100   void pacdbsp();
1101 
1102   // Pointer Authentication Code for Data address, using key B, with address in
1103   // LR and a modifier of zero [Armv8.3].
1104   void pacdbz();
1105 
1106   // Pointer Authentication Code, using Generic key [Armv8.3].
1107   void pacga(const Register& xd, const Register& xn, const Register& xm);
1108 
1109   // Authenticate Instruction address, using key A [Armv8.3].
1110   void autia(const Register& xd, const Register& xn);
1111 
1112   // Authenticate Instruction address, using key A and a modifier of zero
1113   // [Armv8.3].
1114   void autiza(const Register& xd);
1115 
1116   // Authenticate Instruction address, using key A, with address in x17 and
1117   // modifier in x16 [Armv8.3].
1118   void autia1716();
1119 
1120   // Authenticate Instruction address, using key A, with address in LR and
1121   // modifier in SP [Armv8.3].
1122   void autiasp();
1123 
1124   // Authenticate Instruction address, using key A, with address in LR and a
1125   // modifier of zero [Armv8.3].
1126   void autiaz();
1127 
1128   // Authenticate Instruction address, using key B [Armv8.3].
1129   void autib(const Register& xd, const Register& xn);
1130 
1131   // Authenticate Instruction address, using key B and a modifier of zero
1132   // [Armv8.3].
1133   void autizb(const Register& xd);
1134 
1135   // Authenticate Instruction address, using key B, with address in x17 and
1136   // modifier in x16 [Armv8.3].
1137   void autib1716();
1138 
1139   // Authenticate Instruction address, using key B, with address in LR and
1140   // modifier in SP [Armv8.3].
1141   void autibsp();
1142 
1143   // Authenticate Instruction address, using key B, with address in LR and a
1144   // modifier of zero [Armv8.3].
1145   void autibz();
1146 
1147   // Authenticate Data address, using key A [Armv8.3].
1148   void autda(const Register& xd, const Register& xn);
1149 
1150   // Authenticate Data address, using key A and a modifier of zero [Armv8.3].
1151   void autdza(const Register& xd);
1152 
1153   // Authenticate Data address, using key A, with address in x17 and modifier in
1154   // x16 [Armv8.3].
1155   void autda1716();
1156 
1157   // Authenticate Data address, using key A, with address in LR and modifier in
1158   // SP [Armv8.3].
1159   void autdasp();
1160 
1161   // Authenticate Data address, using key A, with address in LR and a modifier
1162   // of zero [Armv8.3].
1163   void autdaz();
1164 
1165   // Authenticate Data address, using key B [Armv8.3].
1166   void autdb(const Register& xd, const Register& xn);
1167 
1168   // Authenticate Data address, using key B and a modifier of zero [Armv8.3].
1169   void autdzb(const Register& xd);
1170 
1171   // Authenticate Data address, using key B, with address in x17 and modifier in
1172   // x16 [Armv8.3].
1173   void autdb1716();
1174 
1175   // Authenticate Data address, using key B, with address in LR and modifier in
1176   // SP [Armv8.3].
1177   void autdbsp();
1178 
1179   // Authenticate Data address, using key B, with address in LR and a modifier
1180   // of zero [Armv8.3].
1181   void autdbz();
1182 
1183   // Strip Pointer Authentication Code of Data address [Armv8.3].
1184   void xpacd(const Register& xd);
1185 
1186   // Strip Pointer Authentication Code of Instruction address [Armv8.3].
1187   void xpaci(const Register& xd);
1188 
1189   // Strip Pointer Authentication Code of Instruction address in LR [Armv8.3].
1190   void xpaclri();
1191 
1192   // Memory instructions.
1193   // Load integer or FP register.
1194   void ldr(const CPURegister& rt,
1195            const MemOperand& src,
1196            LoadStoreScalingOption option = PreferScaledOffset);
1197 
1198   // Store integer or FP register.
1199   void str(const CPURegister& rt,
1200            const MemOperand& dst,
1201            LoadStoreScalingOption option = PreferScaledOffset);
1202 
1203   // Load word with sign extension.
1204   void ldrsw(const Register& xt,
1205              const MemOperand& src,
1206              LoadStoreScalingOption option = PreferScaledOffset);
1207 
1208   // Load byte.
1209   void ldrb(const Register& rt,
1210             const MemOperand& src,
1211             LoadStoreScalingOption option = PreferScaledOffset);
1212 
1213   // Store byte.
1214   void strb(const Register& rt,
1215             const MemOperand& dst,
1216             LoadStoreScalingOption option = PreferScaledOffset);
1217 
1218   // Load byte with sign extension.
1219   void ldrsb(const Register& rt,
1220              const MemOperand& src,
1221              LoadStoreScalingOption option = PreferScaledOffset);
1222 
1223   // Load half-word.
1224   void ldrh(const Register& rt,
1225             const MemOperand& src,
1226             LoadStoreScalingOption option = PreferScaledOffset);
1227 
1228   // Store half-word.
1229   void strh(const Register& rt,
1230             const MemOperand& dst,
1231             LoadStoreScalingOption option = PreferScaledOffset);
1232 
1233   // Load half-word with sign extension.
1234   void ldrsh(const Register& rt,
1235              const MemOperand& src,
1236              LoadStoreScalingOption option = PreferScaledOffset);
1237 
1238   // Load integer or FP register (with unscaled offset).
1239   void ldur(const CPURegister& rt,
1240             const MemOperand& src,
1241             LoadStoreScalingOption option = PreferUnscaledOffset);
1242 
1243   // Store integer or FP register (with unscaled offset).
1244   void stur(const CPURegister& rt,
1245             const MemOperand& src,
1246             LoadStoreScalingOption option = PreferUnscaledOffset);
1247 
1248   // Load word with sign extension.
1249   void ldursw(const Register& xt,
1250               const MemOperand& src,
1251               LoadStoreScalingOption option = PreferUnscaledOffset);
1252 
1253   // Load byte (with unscaled offset).
1254   void ldurb(const Register& rt,
1255              const MemOperand& src,
1256              LoadStoreScalingOption option = PreferUnscaledOffset);
1257 
1258   // Store byte (with unscaled offset).
1259   void sturb(const Register& rt,
1260              const MemOperand& dst,
1261              LoadStoreScalingOption option = PreferUnscaledOffset);
1262 
1263   // Load byte with sign extension (and unscaled offset).
1264   void ldursb(const Register& rt,
1265               const MemOperand& src,
1266               LoadStoreScalingOption option = PreferUnscaledOffset);
1267 
1268   // Load half-word (with unscaled offset).
1269   void ldurh(const Register& rt,
1270              const MemOperand& src,
1271              LoadStoreScalingOption option = PreferUnscaledOffset);
1272 
1273   // Store half-word (with unscaled offset).
1274   void sturh(const Register& rt,
1275              const MemOperand& dst,
1276              LoadStoreScalingOption option = PreferUnscaledOffset);
1277 
1278   // Load half-word with sign extension (and unscaled offset).
1279   void ldursh(const Register& rt,
1280               const MemOperand& src,
1281               LoadStoreScalingOption option = PreferUnscaledOffset);
1282 
1283   // Load integer or FP register pair.
1284   void ldp(const CPURegister& rt,
1285            const CPURegister& rt2,
1286            const MemOperand& src);
1287 
1288   // Store integer or FP register pair.
1289   void stp(const CPURegister& rt,
1290            const CPURegister& rt2,
1291            const MemOperand& dst);
1292 
1293   // Load word pair with sign extension.
1294   void ldpsw(const Register& xt, const Register& xt2, const MemOperand& src);
1295 
1296   // Load integer or FP register pair, non-temporal.
1297   void ldnp(const CPURegister& rt,
1298             const CPURegister& rt2,
1299             const MemOperand& src);
1300 
1301   // Store integer or FP register pair, non-temporal.
1302   void stnp(const CPURegister& rt,
1303             const CPURegister& rt2,
1304             const MemOperand& dst);
1305 
1306   // Load integer or FP register from literal pool.
1307   void ldr(const CPURegister& rt, RawLiteral* literal);
1308 
1309   // Load word with sign extension from literal pool.
1310   void ldrsw(const Register& xt, RawLiteral* literal);
1311 
1312   // Load integer or FP register from pc + imm19 << 2.
1313   void ldr(const CPURegister& rt, int64_t imm19);
1314 
1315   // Load word with sign extension from pc + imm19 << 2.
1316   void ldrsw(const Register& xt, int64_t imm19);
1317 
1318   // Store exclusive byte.
1319   void stxrb(const Register& rs, const Register& rt, const MemOperand& dst);
1320 
1321   // Store exclusive half-word.
1322   void stxrh(const Register& rs, const Register& rt, const MemOperand& dst);
1323 
1324   // Store exclusive register.
1325   void stxr(const Register& rs, const Register& rt, const MemOperand& dst);
1326 
1327   // Load exclusive byte.
1328   void ldxrb(const Register& rt, const MemOperand& src);
1329 
1330   // Load exclusive half-word.
1331   void ldxrh(const Register& rt, const MemOperand& src);
1332 
1333   // Load exclusive register.
1334   void ldxr(const Register& rt, const MemOperand& src);
1335 
1336   // Store exclusive register pair.
1337   void stxp(const Register& rs,
1338             const Register& rt,
1339             const Register& rt2,
1340             const MemOperand& dst);
1341 
1342   // Load exclusive register pair.
1343   void ldxp(const Register& rt, const Register& rt2, const MemOperand& src);
1344 
1345   // Store-release exclusive byte.
1346   void stlxrb(const Register& rs, const Register& rt, const MemOperand& dst);
1347 
1348   // Store-release exclusive half-word.
1349   void stlxrh(const Register& rs, const Register& rt, const MemOperand& dst);
1350 
1351   // Store-release exclusive register.
1352   void stlxr(const Register& rs, const Register& rt, const MemOperand& dst);
1353 
1354   // Load-acquire exclusive byte.
1355   void ldaxrb(const Register& rt, const MemOperand& src);
1356 
1357   // Load-acquire exclusive half-word.
1358   void ldaxrh(const Register& rt, const MemOperand& src);
1359 
1360   // Load-acquire exclusive register.
1361   void ldaxr(const Register& rt, const MemOperand& src);
1362 
1363   // Store-release exclusive register pair.
1364   void stlxp(const Register& rs,
1365              const Register& rt,
1366              const Register& rt2,
1367              const MemOperand& dst);
1368 
1369   // Load-acquire exclusive register pair.
1370   void ldaxp(const Register& rt, const Register& rt2, const MemOperand& src);
1371 
1372   // Store-release byte.
1373   void stlrb(const Register& rt, const MemOperand& dst);
1374 
1375   // Store-release half-word.
1376   void stlrh(const Register& rt, const MemOperand& dst);
1377 
1378   // Store-release register.
1379   void stlr(const Register& rt, const MemOperand& dst);
1380 
1381   // Load-acquire byte.
1382   void ldarb(const Register& rt, const MemOperand& src);
1383 
1384   // Load-acquire half-word.
1385   void ldarh(const Register& rt, const MemOperand& src);
1386 
1387   // Load-acquire register.
1388   void ldar(const Register& rt, const MemOperand& src);
1389 
1390   // Store LORelease byte [Armv8.1].
1391   void stllrb(const Register& rt, const MemOperand& dst);
1392 
1393   // Store LORelease half-word [Armv8.1].
1394   void stllrh(const Register& rt, const MemOperand& dst);
1395 
1396   // Store LORelease register [Armv8.1].
1397   void stllr(const Register& rt, const MemOperand& dst);
1398 
1399   // Load LORelease byte [Armv8.1].
1400   void ldlarb(const Register& rt, const MemOperand& src);
1401 
1402   // Load LORelease half-word [Armv8.1].
1403   void ldlarh(const Register& rt, const MemOperand& src);
1404 
1405   // Load LORelease register [Armv8.1].
1406   void ldlar(const Register& rt, const MemOperand& src);
1407 
1408   // Compare and Swap word or doubleword in memory [Armv8.1].
1409   void cas(const Register& rs, const Register& rt, const MemOperand& src);
1410 
1411   // Compare and Swap word or doubleword in memory [Armv8.1].
1412   void casa(const Register& rs, const Register& rt, const MemOperand& src);
1413 
1414   // Compare and Swap word or doubleword in memory [Armv8.1].
1415   void casl(const Register& rs, const Register& rt, const MemOperand& src);
1416 
1417   // Compare and Swap word or doubleword in memory [Armv8.1].
1418   void casal(const Register& rs, const Register& rt, const MemOperand& src);
1419 
1420   // Compare and Swap byte in memory [Armv8.1].
1421   void casb(const Register& rs, const Register& rt, const MemOperand& src);
1422 
1423   // Compare and Swap byte in memory [Armv8.1].
1424   void casab(const Register& rs, const Register& rt, const MemOperand& src);
1425 
1426   // Compare and Swap byte in memory [Armv8.1].
1427   void caslb(const Register& rs, const Register& rt, const MemOperand& src);
1428 
1429   // Compare and Swap byte in memory [Armv8.1].
1430   void casalb(const Register& rs, const Register& rt, const MemOperand& src);
1431 
1432   // Compare and Swap halfword in memory [Armv8.1].
1433   void cash(const Register& rs, const Register& rt, const MemOperand& src);
1434 
1435   // Compare and Swap halfword in memory [Armv8.1].
1436   void casah(const Register& rs, const Register& rt, const MemOperand& src);
1437 
1438   // Compare and Swap halfword in memory [Armv8.1].
1439   void caslh(const Register& rs, const Register& rt, const MemOperand& src);
1440 
1441   // Compare and Swap halfword in memory [Armv8.1].
1442   void casalh(const Register& rs, const Register& rt, const MemOperand& src);
1443 
1444   // Compare and Swap Pair of words or doublewords in memory [Armv8.1].
1445   void casp(const Register& rs,
1446             const Register& rs2,
1447             const Register& rt,
1448             const Register& rt2,
1449             const MemOperand& src);
1450 
1451   // Compare and Swap Pair of words or doublewords in memory [Armv8.1].
1452   void caspa(const Register& rs,
1453              const Register& rs2,
1454              const Register& rt,
1455              const Register& rt2,
1456              const MemOperand& src);
1457 
1458   // Compare and Swap Pair of words or doublewords in memory [Armv8.1].
1459   void caspl(const Register& rs,
1460              const Register& rs2,
1461              const Register& rt,
1462              const Register& rt2,
1463              const MemOperand& src);
1464 
1465   // Compare and Swap Pair of words or doublewords in memory [Armv8.1].
1466   void caspal(const Register& rs,
1467               const Register& rs2,
1468               const Register& rt,
1469               const Register& rt2,
1470               const MemOperand& src);
1471 
1472   // Atomic add on byte in memory [Armv8.1]
1473   void ldaddb(const Register& rs, const Register& rt, const MemOperand& src);
1474 
1475   // Atomic add on byte in memory, with Load-acquire semantics [Armv8.1]
1476   void ldaddab(const Register& rs, const Register& rt, const MemOperand& src);
1477 
1478   // Atomic add on byte in memory, with Store-release semantics [Armv8.1]
1479   void ldaddlb(const Register& rs, const Register& rt, const MemOperand& src);
1480 
1481   // Atomic add on byte in memory, with Load-acquire and Store-release semantics
1482   // [Armv8.1]
1483   void ldaddalb(const Register& rs, const Register& rt, const MemOperand& src);
1484 
1485   // Atomic add on halfword in memory [Armv8.1]
1486   void ldaddh(const Register& rs, const Register& rt, const MemOperand& src);
1487 
1488   // Atomic add on halfword in memory, with Load-acquire semantics [Armv8.1]
1489   void ldaddah(const Register& rs, const Register& rt, const MemOperand& src);
1490 
1491   // Atomic add on halfword in memory, with Store-release semantics [Armv8.1]
1492   void ldaddlh(const Register& rs, const Register& rt, const MemOperand& src);
1493 
1494   // Atomic add on halfword in memory, with Load-acquire and Store-release
1495   // semantics [Armv8.1]
1496   void ldaddalh(const Register& rs, const Register& rt, const MemOperand& src);
1497 
1498   // Atomic add on word or doubleword in memory [Armv8.1]
1499   void ldadd(const Register& rs, const Register& rt, const MemOperand& src);
1500 
1501   // Atomic add on word or doubleword in memory, with Load-acquire semantics
1502   // [Armv8.1]
1503   void ldadda(const Register& rs, const Register& rt, const MemOperand& src);
1504 
1505   // Atomic add on word or doubleword in memory, with Store-release semantics
1506   // [Armv8.1]
1507   void ldaddl(const Register& rs, const Register& rt, const MemOperand& src);
1508 
1509   // Atomic add on word or doubleword in memory, with Load-acquire and
1510   // Store-release semantics [Armv8.1]
1511   void ldaddal(const Register& rs, const Register& rt, const MemOperand& src);
1512 
1513   // Atomic bit clear on byte in memory [Armv8.1]
1514   void ldclrb(const Register& rs, const Register& rt, const MemOperand& src);
1515 
1516   // Atomic bit clear on byte in memory, with Load-acquire semantics [Armv8.1]
1517   void ldclrab(const Register& rs, const Register& rt, const MemOperand& src);
1518 
1519   // Atomic bit clear on byte in memory, with Store-release semantics [Armv8.1]
1520   void ldclrlb(const Register& rs, const Register& rt, const MemOperand& src);
1521 
1522   // Atomic bit clear on byte in memory, with Load-acquire and Store-release
1523   // semantics [Armv8.1]
1524   void ldclralb(const Register& rs, const Register& rt, const MemOperand& src);
1525 
1526   // Atomic bit clear on halfword in memory [Armv8.1]
1527   void ldclrh(const Register& rs, const Register& rt, const MemOperand& src);
1528 
1529   // Atomic bit clear on halfword in memory, with Load-acquire semantics
1530   // [Armv8.1]
1531   void ldclrah(const Register& rs, const Register& rt, const MemOperand& src);
1532 
1533   // Atomic bit clear on halfword in memory, with Store-release semantics
1534   // [Armv8.1]
1535   void ldclrlh(const Register& rs, const Register& rt, const MemOperand& src);
1536 
1537   // Atomic bit clear on halfword in memory, with Load-acquire and Store-release
1538   // semantics [Armv8.1]
1539   void ldclralh(const Register& rs, const Register& rt, const MemOperand& src);
1540 
1541   // Atomic bit clear on word or doubleword in memory [Armv8.1]
1542   void ldclr(const Register& rs, const Register& rt, const MemOperand& src);
1543 
1544   // Atomic bit clear on word or doubleword in memory, with Load-acquire
1545   // semantics [Armv8.1]
1546   void ldclra(const Register& rs, const Register& rt, const MemOperand& src);
1547 
1548   // Atomic bit clear on word or doubleword in memory, with Store-release
1549   // semantics [Armv8.1]
1550   void ldclrl(const Register& rs, const Register& rt, const MemOperand& src);
1551 
1552   // Atomic bit clear on word or doubleword in memory, with Load-acquire and
1553   // Store-release semantics [Armv8.1]
1554   void ldclral(const Register& rs, const Register& rt, const MemOperand& src);
1555 
1556   // Atomic exclusive OR on byte in memory [Armv8.1]
1557   void ldeorb(const Register& rs, const Register& rt, const MemOperand& src);
1558 
1559   // Atomic exclusive OR on byte in memory, with Load-acquire semantics
1560   // [Armv8.1]
1561   void ldeorab(const Register& rs, const Register& rt, const MemOperand& src);
1562 
1563   // Atomic exclusive OR on byte in memory, with Store-release semantics
1564   // [Armv8.1]
1565   void ldeorlb(const Register& rs, const Register& rt, const MemOperand& src);
1566 
1567   // Atomic exclusive OR on byte in memory, with Load-acquire and Store-release
1568   // semantics [Armv8.1]
1569   void ldeoralb(const Register& rs, const Register& rt, const MemOperand& src);
1570 
1571   // Atomic exclusive OR on halfword in memory [Armv8.1]
1572   void ldeorh(const Register& rs, const Register& rt, const MemOperand& src);
1573 
1574   // Atomic exclusive OR on halfword in memory, with Load-acquire semantics
1575   // [Armv8.1]
1576   void ldeorah(const Register& rs, const Register& rt, const MemOperand& src);
1577 
1578   // Atomic exclusive OR on halfword in memory, with Store-release semantics
1579   // [Armv8.1]
1580   void ldeorlh(const Register& rs, const Register& rt, const MemOperand& src);
1581 
1582   // Atomic exclusive OR on halfword in memory, with Load-acquire and
1583   // Store-release semantics [Armv8.1]
1584   void ldeoralh(const Register& rs, const Register& rt, const MemOperand& src);
1585 
1586   // Atomic exclusive OR on word or doubleword in memory [Armv8.1]
1587   void ldeor(const Register& rs, const Register& rt, const MemOperand& src);
1588 
1589   // Atomic exclusive OR on word or doubleword in memory, with Load-acquire
1590   // semantics [Armv8.1]
1591   void ldeora(const Register& rs, const Register& rt, const MemOperand& src);
1592 
1593   // Atomic exclusive OR on word or doubleword in memory, with Store-release
1594   // semantics [Armv8.1]
1595   void ldeorl(const Register& rs, const Register& rt, const MemOperand& src);
1596 
1597   // Atomic exclusive OR on word or doubleword in memory, with Load-acquire and
1598   // Store-release semantics [Armv8.1]
1599   void ldeoral(const Register& rs, const Register& rt, const MemOperand& src);
1600 
1601   // Atomic bit set on byte in memory [Armv8.1]
1602   void ldsetb(const Register& rs, const Register& rt, const MemOperand& src);
1603 
1604   // Atomic bit set on byte in memory, with Load-acquire semantics [Armv8.1]
1605   void ldsetab(const Register& rs, const Register& rt, const MemOperand& src);
1606 
1607   // Atomic bit set on byte in memory, with Store-release semantics [Armv8.1]
1608   void ldsetlb(const Register& rs, const Register& rt, const MemOperand& src);
1609 
1610   // Atomic bit set on byte in memory, with Load-acquire and Store-release
1611   // semantics [Armv8.1]
1612   void ldsetalb(const Register& rs, const Register& rt, const MemOperand& src);
1613 
1614   // Atomic bit set on halfword in memory [Armv8.1]
1615   void ldseth(const Register& rs, const Register& rt, const MemOperand& src);
1616 
1617   // Atomic bit set on halfword in memory, with Load-acquire semantics [Armv8.1]
1618   void ldsetah(const Register& rs, const Register& rt, const MemOperand& src);
1619 
1620   // Atomic bit set on halfword in memory, with Store-release semantics
1621   // [Armv8.1]
1622   void ldsetlh(const Register& rs, const Register& rt, const MemOperand& src);
1623 
1624   // Atomic bit set on halfword in memory, with Load-acquire and Store-release
1625   // semantics [Armv8.1]
1626   void ldsetalh(const Register& rs, const Register& rt, const MemOperand& src);
1627 
1628   // Atomic bit set on word or doubleword in memory [Armv8.1]
1629   void ldset(const Register& rs, const Register& rt, const MemOperand& src);
1630 
1631   // Atomic bit set on word or doubleword in memory, with Load-acquire semantics
1632   // [Armv8.1]
1633   void ldseta(const Register& rs, const Register& rt, const MemOperand& src);
1634 
1635   // Atomic bit set on word or doubleword in memory, with Store-release
1636   // semantics [Armv8.1]
1637   void ldsetl(const Register& rs, const Register& rt, const MemOperand& src);
1638 
1639   // Atomic bit set on word or doubleword in memory, with Load-acquire and
1640   // Store-release semantics [Armv8.1]
1641   void ldsetal(const Register& rs, const Register& rt, const MemOperand& src);
1642 
1643   // Atomic signed maximum on byte in memory [Armv8.1]
1644   void ldsmaxb(const Register& rs, const Register& rt, const MemOperand& src);
1645 
1646   // Atomic signed maximum on byte in memory, with Load-acquire semantics
1647   // [Armv8.1]
1648   void ldsmaxab(const Register& rs, const Register& rt, const MemOperand& src);
1649 
1650   // Atomic signed maximum on byte in memory, with Store-release semantics
1651   // [Armv8.1]
1652   void ldsmaxlb(const Register& rs, const Register& rt, const MemOperand& src);
1653 
1654   // Atomic signed maximum on byte in memory, with Load-acquire and
1655   // Store-release semantics [Armv8.1]
1656   void ldsmaxalb(const Register& rs, const Register& rt, const MemOperand& src);
1657 
1658   // Atomic signed maximum on halfword in memory [Armv8.1]
1659   void ldsmaxh(const Register& rs, const Register& rt, const MemOperand& src);
1660 
1661   // Atomic signed maximum on halfword in memory, with Load-acquire semantics
1662   // [Armv8.1]
1663   void ldsmaxah(const Register& rs, const Register& rt, const MemOperand& src);
1664 
1665   // Atomic signed maximum on halfword in memory, with Store-release semantics
1666   // [Armv8.1]
1667   void ldsmaxlh(const Register& rs, const Register& rt, const MemOperand& src);
1668 
1669   // Atomic signed maximum on halfword in memory, with Load-acquire and
1670   // Store-release semantics [Armv8.1]
1671   void ldsmaxalh(const Register& rs, const Register& rt, const MemOperand& src);
1672 
1673   // Atomic signed maximum on word or doubleword in memory [Armv8.1]
1674   void ldsmax(const Register& rs, const Register& rt, const MemOperand& src);
1675 
1676   // Atomic signed maximum on word or doubleword in memory, with Load-acquire
1677   // semantics [Armv8.1]
1678   void ldsmaxa(const Register& rs, const Register& rt, const MemOperand& src);
1679 
1680   // Atomic signed maximum on word or doubleword in memory, with Store-release
1681   // semantics [Armv8.1]
1682   void ldsmaxl(const Register& rs, const Register& rt, const MemOperand& src);
1683 
1684   // Atomic signed maximum on word or doubleword in memory, with Load-acquire
1685   // and Store-release semantics [Armv8.1]
1686   void ldsmaxal(const Register& rs, const Register& rt, const MemOperand& src);
1687 
1688   // Atomic signed minimum on byte in memory [Armv8.1]
1689   void ldsminb(const Register& rs, const Register& rt, const MemOperand& src);
1690 
1691   // Atomic signed minimum on byte in memory, with Load-acquire semantics
1692   // [Armv8.1]
1693   void ldsminab(const Register& rs, const Register& rt, const MemOperand& src);
1694 
1695   // Atomic signed minimum on byte in memory, with Store-release semantics
1696   // [Armv8.1]
1697   void ldsminlb(const Register& rs, const Register& rt, const MemOperand& src);
1698 
1699   // Atomic signed minimum on byte in memory, with Load-acquire and
1700   // Store-release semantics [Armv8.1]
1701   void ldsminalb(const Register& rs, const Register& rt, const MemOperand& src);
1702 
1703   // Atomic signed minimum on halfword in memory [Armv8.1]
1704   void ldsminh(const Register& rs, const Register& rt, const MemOperand& src);
1705 
1706   // Atomic signed minimum on halfword in memory, with Load-acquire semantics
1707   // [Armv8.1]
1708   void ldsminah(const Register& rs, const Register& rt, const MemOperand& src);
1709 
1710   // Atomic signed minimum on halfword in memory, with Store-release semantics
1711   // [Armv8.1]
1712   void ldsminlh(const Register& rs, const Register& rt, const MemOperand& src);
1713 
1714   // Atomic signed minimum on halfword in memory, with Load-acquire and
1715   // Store-release semantics [Armv8.1]
1716   void ldsminalh(const Register& rs, const Register& rt, const MemOperand& src);
1717 
1718   // Atomic signed minimum on word or doubleword in memory [Armv8.1]
1719   void ldsmin(const Register& rs, const Register& rt, const MemOperand& src);
1720 
1721   // Atomic signed minimum on word or doubleword in memory, with Load-acquire
1722   // semantics [Armv8.1]
1723   void ldsmina(const Register& rs, const Register& rt, const MemOperand& src);
1724 
1725   // Atomic signed minimum on word or doubleword in memory, with Store-release
1726   // semantics [Armv8.1]
1727   void ldsminl(const Register& rs, const Register& rt, const MemOperand& src);
1728 
1729   // Atomic signed minimum on word or doubleword in memory, with Load-acquire
1730   // and Store-release semantics [Armv8.1]
1731   void ldsminal(const Register& rs, const Register& rt, const MemOperand& src);
1732 
1733   // Atomic unsigned maximum on byte in memory [Armv8.1]
1734   void ldumaxb(const Register& rs, const Register& rt, const MemOperand& src);
1735 
1736   // Atomic unsigned maximum on byte in memory, with Load-acquire semantics
1737   // [Armv8.1]
1738   void ldumaxab(const Register& rs, const Register& rt, const MemOperand& src);
1739 
1740   // Atomic unsigned maximum on byte in memory, with Store-release semantics
1741   // [Armv8.1]
1742   void ldumaxlb(const Register& rs, const Register& rt, const MemOperand& src);
1743 
1744   // Atomic unsigned maximum on byte in memory, with Load-acquire and
1745   // Store-release semantics [Armv8.1]
1746   void ldumaxalb(const Register& rs, const Register& rt, const MemOperand& src);
1747 
1748   // Atomic unsigned maximum on halfword in memory [Armv8.1]
1749   void ldumaxh(const Register& rs, const Register& rt, const MemOperand& src);
1750 
1751   // Atomic unsigned maximum on halfword in memory, with Load-acquire semantics
1752   // [Armv8.1]
1753   void ldumaxah(const Register& rs, const Register& rt, const MemOperand& src);
1754 
1755   // Atomic unsigned maximum on halfword in memory, with Store-release semantics
1756   // [Armv8.1]
1757   void ldumaxlh(const Register& rs, const Register& rt, const MemOperand& src);
1758 
1759   // Atomic unsigned maximum on halfword in memory, with Load-acquire and
1760   // Store-release semantics [Armv8.1]
1761   void ldumaxalh(const Register& rs, const Register& rt, const MemOperand& src);
1762 
1763   // Atomic unsigned maximum on word or doubleword in memory [Armv8.1]
1764   void ldumax(const Register& rs, const Register& rt, const MemOperand& src);
1765 
1766   // Atomic unsigned maximum on word or doubleword in memory, with Load-acquire
1767   // semantics [Armv8.1]
1768   void ldumaxa(const Register& rs, const Register& rt, const MemOperand& src);
1769 
1770   // Atomic unsigned maximum on word or doubleword in memory, with Store-release
1771   // semantics [Armv8.1]
1772   void ldumaxl(const Register& rs, const Register& rt, const MemOperand& src);
1773 
1774   // Atomic unsigned maximum on word or doubleword in memory, with Load-acquire
1775   // and Store-release semantics [Armv8.1]
1776   void ldumaxal(const Register& rs, const Register& rt, const MemOperand& src);
1777 
1778   // Atomic unsigned minimum on byte in memory [Armv8.1]
1779   void lduminb(const Register& rs, const Register& rt, const MemOperand& src);
1780 
1781   // Atomic unsigned minimum on byte in memory, with Load-acquire semantics
1782   // [Armv8.1]
1783   void lduminab(const Register& rs, const Register& rt, const MemOperand& src);
1784 
1785   // Atomic unsigned minimum on byte in memory, with Store-release semantics
1786   // [Armv8.1]
1787   void lduminlb(const Register& rs, const Register& rt, const MemOperand& src);
1788 
1789   // Atomic unsigned minimum on byte in memory, with Load-acquire and
1790   // Store-release semantics [Armv8.1]
1791   void lduminalb(const Register& rs, const Register& rt, const MemOperand& src);
1792 
1793   // Atomic unsigned minimum on halfword in memory [Armv8.1]
1794   void lduminh(const Register& rs, const Register& rt, const MemOperand& src);
1795 
1796   // Atomic unsigned minimum on halfword in memory, with Load-acquire semantics
1797   // [Armv8.1]
1798   void lduminah(const Register& rs, const Register& rt, const MemOperand& src);
1799 
1800   // Atomic unsigned minimum on halfword in memory, with Store-release semantics
1801   // [Armv8.1]
1802   void lduminlh(const Register& rs, const Register& rt, const MemOperand& src);
1803 
1804   // Atomic unsigned minimum on halfword in memory, with Load-acquire and
1805   // Store-release semantics [Armv8.1]
1806   void lduminalh(const Register& rs, const Register& rt, const MemOperand& src);
1807 
1808   // Atomic unsigned minimum on word or doubleword in memory [Armv8.1]
1809   void ldumin(const Register& rs, const Register& rt, const MemOperand& src);
1810 
1811   // Atomic unsigned minimum on word or doubleword in memory, with Load-acquire
1812   // semantics [Armv8.1]
1813   void ldumina(const Register& rs, const Register& rt, const MemOperand& src);
1814 
1815   // Atomic unsigned minimum on word or doubleword in memory, with Store-release
1816   // semantics [Armv8.1]
1817   void lduminl(const Register& rs, const Register& rt, const MemOperand& src);
1818 
1819   // Atomic unsigned minimum on word or doubleword in memory, with Load-acquire
1820   // and Store-release semantics [Armv8.1]
1821   void lduminal(const Register& rs, const Register& rt, const MemOperand& src);
1822 
1823   // Atomic add on byte in memory, without return. [Armv8.1]
1824   void staddb(const Register& rs, const MemOperand& src);
1825 
1826   // Atomic add on byte in memory, with Store-release semantics and without
1827   // return. [Armv8.1]
1828   void staddlb(const Register& rs, const MemOperand& src);
1829 
1830   // Atomic add on halfword in memory, without return. [Armv8.1]
1831   void staddh(const Register& rs, const MemOperand& src);
1832 
1833   // Atomic add on halfword in memory, with Store-release semantics and without
1834   // return. [Armv8.1]
1835   void staddlh(const Register& rs, const MemOperand& src);
1836 
1837   // Atomic add on word or doubleword in memory, without return. [Armv8.1]
1838   void stadd(const Register& rs, const MemOperand& src);
1839 
1840   // Atomic add on word or doubleword in memory, with Store-release semantics
1841   // and without return. [Armv8.1]
1842   void staddl(const Register& rs, const MemOperand& src);
1843 
1844   // Atomic bit clear on byte in memory, without return. [Armv8.1]
1845   void stclrb(const Register& rs, const MemOperand& src);
1846 
1847   // Atomic bit clear on byte in memory, with Store-release semantics and
1848   // without return. [Armv8.1]
1849   void stclrlb(const Register& rs, const MemOperand& src);
1850 
1851   // Atomic bit clear on halfword in memory, without return. [Armv8.1]
1852   void stclrh(const Register& rs, const MemOperand& src);
1853 
1854   // Atomic bit clear on halfword in memory, with Store-release semantics and
1855   // without return. [Armv8.1]
1856   void stclrlh(const Register& rs, const MemOperand& src);
1857 
1858   // Atomic bit clear on word or doubleword in memory, without return. [Armv8.1]
1859   void stclr(const Register& rs, const MemOperand& src);
1860 
1861   // Atomic bit clear on word or doubleword in memory, with Store-release
1862   // semantics and without return. [Armv8.1]
1863   void stclrl(const Register& rs, const MemOperand& src);
1864 
1865   // Atomic exclusive OR on byte in memory, without return. [Armv8.1]
1866   void steorb(const Register& rs, const MemOperand& src);
1867 
1868   // Atomic exclusive OR on byte in memory, with Store-release semantics and
1869   // without return. [Armv8.1]
1870   void steorlb(const Register& rs, const MemOperand& src);
1871 
1872   // Atomic exclusive OR on halfword in memory, without return. [Armv8.1]
1873   void steorh(const Register& rs, const MemOperand& src);
1874 
1875   // Atomic exclusive OR on halfword in memory, with Store-release semantics
1876   // and without return. [Armv8.1]
1877   void steorlh(const Register& rs, const MemOperand& src);
1878 
1879   // Atomic exclusive OR on word or doubleword in memory, without return.
1880   // [Armv8.1]
1881   void steor(const Register& rs, const MemOperand& src);
1882 
1883   // Atomic exclusive OR on word or doubleword in memory, with Store-release
1884   // semantics and without return. [Armv8.1]
1885   void steorl(const Register& rs, const MemOperand& src);
1886 
1887   // Atomic bit set on byte in memory, without return. [Armv8.1]
1888   void stsetb(const Register& rs, const MemOperand& src);
1889 
1890   // Atomic bit set on byte in memory, with Store-release semantics and without
1891   // return. [Armv8.1]
1892   void stsetlb(const Register& rs, const MemOperand& src);
1893 
1894   // Atomic bit set on halfword in memory, without return. [Armv8.1]
1895   void stseth(const Register& rs, const MemOperand& src);
1896 
1897   // Atomic bit set on halfword in memory, with Store-release semantics and
1898   // without return. [Armv8.1]
1899   void stsetlh(const Register& rs, const MemOperand& src);
1900 
1901   // Atomic bit set on word or doubleword in memory, without return. [Armv8.1]
1902   void stset(const Register& rs, const MemOperand& src);
1903 
1904   // Atomic bit set on word or doubleword in memory, with Store-release
1905   // semantics and without return. [Armv8.1]
1906   void stsetl(const Register& rs, const MemOperand& src);
1907 
1908   // Atomic signed maximum on byte in memory, without return. [Armv8.1]
1909   void stsmaxb(const Register& rs, const MemOperand& src);
1910 
1911   // Atomic signed maximum on byte in memory, with Store-release semantics and
1912   // without return. [Armv8.1]
1913   void stsmaxlb(const Register& rs, const MemOperand& src);
1914 
1915   // Atomic signed maximum on halfword in memory, without return. [Armv8.1]
1916   void stsmaxh(const Register& rs, const MemOperand& src);
1917 
1918   // Atomic signed maximum on halfword in memory, with Store-release semantics
1919   // and without return. [Armv8.1]
1920   void stsmaxlh(const Register& rs, const MemOperand& src);
1921 
1922   // Atomic signed maximum on word or doubleword in memory, without return.
1923   // [Armv8.1]
1924   void stsmax(const Register& rs, const MemOperand& src);
1925 
1926   // Atomic signed maximum on word or doubleword in memory, with Store-release
1927   // semantics and without return. [Armv8.1]
1928   void stsmaxl(const Register& rs, const MemOperand& src);
1929 
1930   // Atomic signed minimum on byte in memory, without return. [Armv8.1]
1931   void stsminb(const Register& rs, const MemOperand& src);
1932 
1933   // Atomic signed minimum on byte in memory, with Store-release semantics and
1934   // without return. [Armv8.1]
1935   void stsminlb(const Register& rs, const MemOperand& src);
1936 
1937   // Atomic signed minimum on halfword in memory, without return. [Armv8.1]
1938   void stsminh(const Register& rs, const MemOperand& src);
1939 
1940   // Atomic signed minimum on halfword in memory, with Store-release semantics
1941   // and without return. [Armv8.1]
1942   void stsminlh(const Register& rs, const MemOperand& src);
1943 
1944   // Atomic signed minimum on word or doubleword in memory, without return.
1945   // [Armv8.1]
1946   void stsmin(const Register& rs, const MemOperand& src);
1947 
1948   // Atomic signed minimum on word or doubleword in memory, with Store-release
1949   // semantics and without return. semantics [Armv8.1]
1950   void stsminl(const Register& rs, const MemOperand& src);
1951 
1952   // Atomic unsigned maximum on byte in memory, without return. [Armv8.1]
1953   void stumaxb(const Register& rs, const MemOperand& src);
1954 
1955   // Atomic unsigned maximum on byte in memory, with Store-release semantics and
1956   // without return. [Armv8.1]
1957   void stumaxlb(const Register& rs, const MemOperand& src);
1958 
1959   // Atomic unsigned maximum on halfword in memory, without return. [Armv8.1]
1960   void stumaxh(const Register& rs, const MemOperand& src);
1961 
1962   // Atomic unsigned maximum on halfword in memory, with Store-release semantics
1963   // and without return. [Armv8.1]
1964   void stumaxlh(const Register& rs, const MemOperand& src);
1965 
1966   // Atomic unsigned maximum on word or doubleword in memory, without return.
1967   // [Armv8.1]
1968   void stumax(const Register& rs, const MemOperand& src);
1969 
1970   // Atomic unsigned maximum on word or doubleword in memory, with Store-release
1971   // semantics and without return. [Armv8.1]
1972   void stumaxl(const Register& rs, const MemOperand& src);
1973 
1974   // Atomic unsigned minimum on byte in memory, without return. [Armv8.1]
1975   void stuminb(const Register& rs, const MemOperand& src);
1976 
1977   // Atomic unsigned minimum on byte in memory, with Store-release semantics and
1978   // without return. [Armv8.1]
1979   void stuminlb(const Register& rs, const MemOperand& src);
1980 
1981   // Atomic unsigned minimum on halfword in memory, without return. [Armv8.1]
1982   void stuminh(const Register& rs, const MemOperand& src);
1983 
1984   // Atomic unsigned minimum on halfword in memory, with Store-release semantics
1985   // and without return. [Armv8.1]
1986   void stuminlh(const Register& rs, const MemOperand& src);
1987 
1988   // Atomic unsigned minimum on word or doubleword in memory, without return.
1989   // [Armv8.1]
1990   void stumin(const Register& rs, const MemOperand& src);
1991 
1992   // Atomic unsigned minimum on word or doubleword in memory, with Store-release
1993   // semantics and without return. [Armv8.1]
1994   void stuminl(const Register& rs, const MemOperand& src);
1995 
1996   // Swap byte in memory [Armv8.1]
1997   void swpb(const Register& rs, const Register& rt, const MemOperand& src);
1998 
1999   // Swap byte in memory, with Load-acquire semantics [Armv8.1]
2000   void swpab(const Register& rs, const Register& rt, const MemOperand& src);
2001 
2002   // Swap byte in memory, with Store-release semantics [Armv8.1]
2003   void swplb(const Register& rs, const Register& rt, const MemOperand& src);
2004 
2005   // Swap byte in memory, with Load-acquire and Store-release semantics
2006   // [Armv8.1]
2007   void swpalb(const Register& rs, const Register& rt, const MemOperand& src);
2008 
2009   // Swap halfword in memory [Armv8.1]
2010   void swph(const Register& rs, const Register& rt, const MemOperand& src);
2011 
2012   // Swap halfword in memory, with Load-acquire semantics [Armv8.1]
2013   void swpah(const Register& rs, const Register& rt, const MemOperand& src);
2014 
2015   // Swap halfword in memory, with Store-release semantics [Armv8.1]
2016   void swplh(const Register& rs, const Register& rt, const MemOperand& src);
2017 
2018   // Swap halfword in memory, with Load-acquire and Store-release semantics
2019   // [Armv8.1]
2020   void swpalh(const Register& rs, const Register& rt, const MemOperand& src);
2021 
2022   // Swap word or doubleword in memory [Armv8.1]
2023   void swp(const Register& rs, const Register& rt, const MemOperand& src);
2024 
2025   // Swap word or doubleword in memory, with Load-acquire semantics [Armv8.1]
2026   void swpa(const Register& rs, const Register& rt, const MemOperand& src);
2027 
2028   // Swap word or doubleword in memory, with Store-release semantics [Armv8.1]
2029   void swpl(const Register& rs, const Register& rt, const MemOperand& src);
2030 
2031   // Swap word or doubleword in memory, with Load-acquire and Store-release
2032   // semantics [Armv8.1]
2033   void swpal(const Register& rs, const Register& rt, const MemOperand& src);
2034 
2035   // Load-Acquire RCpc Register byte [Armv8.3]
2036   void ldaprb(const Register& rt, const MemOperand& src);
2037 
2038   // Load-Acquire RCpc Register halfword [Armv8.3]
2039   void ldaprh(const Register& rt, const MemOperand& src);
2040 
2041   // Load-Acquire RCpc Register word or doubleword [Armv8.3]
2042   void ldapr(const Register& rt, const MemOperand& src);
2043 
2044   // Prefetch memory.
2045   void prfm(PrefetchOperation op,
2046             const MemOperand& addr,
2047             LoadStoreScalingOption option = PreferScaledOffset);
2048 
2049   // Prefetch memory (with unscaled offset).
2050   void prfum(PrefetchOperation op,
2051              const MemOperand& addr,
2052              LoadStoreScalingOption option = PreferUnscaledOffset);
2053 
2054   // Prefetch memory in the literal pool.
2055   void prfm(PrefetchOperation op, RawLiteral* literal);
2056 
2057   // Prefetch from pc + imm19 << 2.
2058   void prfm(PrefetchOperation op, int64_t imm19);
2059 
2060   // Move instructions. The default shift of -1 indicates that the move
2061   // instruction will calculate an appropriate 16-bit immediate and left shift
2062   // that is equal to the 64-bit immediate argument. If an explicit left shift
2063   // is specified (0, 16, 32 or 48), the immediate must be a 16-bit value.
2064   //
2065   // For movk, an explicit shift can be used to indicate which half word should
2066   // be overwritten, eg. movk(x0, 0, 0) will overwrite the least-significant
2067   // half word with zero, whereas movk(x0, 0, 48) will overwrite the
2068   // most-significant.
2069 
2070   // Move immediate and keep.
2071   void movk(const Register& rd, uint64_t imm, int shift = -1) {
2072     MoveWide(rd, imm, shift, MOVK);
2073   }
2074 
2075   // Move inverted immediate.
2076   void movn(const Register& rd, uint64_t imm, int shift = -1) {
2077     MoveWide(rd, imm, shift, MOVN);
2078   }
2079 
2080   // Move immediate.
2081   void movz(const Register& rd, uint64_t imm, int shift = -1) {
2082     MoveWide(rd, imm, shift, MOVZ);
2083   }
2084 
2085   // Misc instructions.
2086   // Monitor debug-mode breakpoint.
2087   void brk(int code);
2088 
2089   // Halting debug-mode breakpoint.
2090   void hlt(int code);
2091 
2092   // Generate exception targeting EL1.
2093   void svc(int code);
2094 
2095   // Move register to register.
2096   void mov(const Register& rd, const Register& rn);
2097 
2098   // Move inverted operand to register.
2099   void mvn(const Register& rd, const Operand& operand);
2100 
2101   // System instructions.
2102   // Move to register from system register.
2103   void mrs(const Register& xt, SystemRegister sysreg);
2104 
2105   // Move from register to system register.
2106   void msr(SystemRegister sysreg, const Register& xt);
2107 
2108   // System instruction.
2109   void sys(int op1, int crn, int crm, int op2, const Register& xt = xzr);
2110 
2111   // System instruction with pre-encoded op (op1:crn:crm:op2).
2112   void sys(int op, const Register& xt = xzr);
2113 
2114   // System data cache operation.
2115   void dc(DataCacheOp op, const Register& rt);
2116 
2117   // System instruction cache operation.
2118   void ic(InstructionCacheOp op, const Register& rt);
2119 
2120   // System hint (named type).
2121   void hint(SystemHint code);
2122 
2123   // System hint (numbered type).
2124   void hint(int imm7);
2125 
2126   // Clear exclusive monitor.
2127   void clrex(int imm4 = 0xf);
2128 
2129   // Data memory barrier.
2130   void dmb(BarrierDomain domain, BarrierType type);
2131 
2132   // Data synchronization barrier.
2133   void dsb(BarrierDomain domain, BarrierType type);
2134 
2135   // Instruction synchronization barrier.
2136   void isb();
2137 
2138   // Error synchronization barrier.
2139   void esb();
2140 
2141   // Conditional speculation dependency barrier.
2142   void csdb();
2143 
2144   // Alias for system instructions.
2145   // No-op.
nop()2146   void nop() { hint(NOP); }
2147 
2148   // FP and NEON instructions.
2149   // Move double precision immediate to FP register.
2150   void fmov(const VRegister& vd, double imm);
2151 
2152   // Move single precision immediate to FP register.
2153   void fmov(const VRegister& vd, float imm);
2154 
2155   // Move half precision immediate to FP register [Armv8.2].
2156   void fmov(const VRegister& vd, Float16 imm);
2157 
2158   // Move FP register to register.
2159   void fmov(const Register& rd, const VRegister& fn);
2160 
2161   // Move register to FP register.
2162   void fmov(const VRegister& vd, const Register& rn);
2163 
2164   // Move FP register to FP register.
2165   void fmov(const VRegister& vd, const VRegister& fn);
2166 
2167   // Move 64-bit register to top half of 128-bit FP register.
2168   void fmov(const VRegister& vd, int index, const Register& rn);
2169 
2170   // Move top half of 128-bit FP register to 64-bit register.
2171   void fmov(const Register& rd, const VRegister& vn, int index);
2172 
2173   // FP add.
2174   void fadd(const VRegister& vd, const VRegister& vn, const VRegister& vm);
2175 
2176   // FP subtract.
2177   void fsub(const VRegister& vd, const VRegister& vn, const VRegister& vm);
2178 
2179   // FP multiply.
2180   void fmul(const VRegister& vd, const VRegister& vn, const VRegister& vm);
2181 
2182   // FP fused multiply-add.
2183   void fmadd(const VRegister& vd,
2184              const VRegister& vn,
2185              const VRegister& vm,
2186              const VRegister& va);
2187 
2188   // FP fused multiply-subtract.
2189   void fmsub(const VRegister& vd,
2190              const VRegister& vn,
2191              const VRegister& vm,
2192              const VRegister& va);
2193 
2194   // FP fused multiply-add and negate.
2195   void fnmadd(const VRegister& vd,
2196               const VRegister& vn,
2197               const VRegister& vm,
2198               const VRegister& va);
2199 
2200   // FP fused multiply-subtract and negate.
2201   void fnmsub(const VRegister& vd,
2202               const VRegister& vn,
2203               const VRegister& vm,
2204               const VRegister& va);
2205 
2206   // FP multiply-negate scalar.
2207   void fnmul(const VRegister& vd, const VRegister& vn, const VRegister& vm);
2208 
2209   // FP reciprocal exponent scalar.
2210   void frecpx(const VRegister& vd, const VRegister& vn);
2211 
2212   // FP divide.
2213   void fdiv(const VRegister& vd, const VRegister& fn, const VRegister& vm);
2214 
2215   // FP maximum.
2216   void fmax(const VRegister& vd, const VRegister& fn, const VRegister& vm);
2217 
2218   // FP minimum.
2219   void fmin(const VRegister& vd, const VRegister& fn, const VRegister& vm);
2220 
2221   // FP maximum number.
2222   void fmaxnm(const VRegister& vd, const VRegister& fn, const VRegister& vm);
2223 
2224   // FP minimum number.
2225   void fminnm(const VRegister& vd, const VRegister& fn, const VRegister& vm);
2226 
2227   // FP absolute.
2228   void fabs(const VRegister& vd, const VRegister& vn);
2229 
2230   // FP negate.
2231   void fneg(const VRegister& vd, const VRegister& vn);
2232 
2233   // FP square root.
2234   void fsqrt(const VRegister& vd, const VRegister& vn);
2235 
2236   // FP round to integer, nearest with ties to away.
2237   void frinta(const VRegister& vd, const VRegister& vn);
2238 
2239   // FP round to integer, implicit rounding.
2240   void frinti(const VRegister& vd, const VRegister& vn);
2241 
2242   // FP round to integer, toward minus infinity.
2243   void frintm(const VRegister& vd, const VRegister& vn);
2244 
2245   // FP round to integer, nearest with ties to even.
2246   void frintn(const VRegister& vd, const VRegister& vn);
2247 
2248   // FP round to integer, toward plus infinity.
2249   void frintp(const VRegister& vd, const VRegister& vn);
2250 
2251   // FP round to integer, exact, implicit rounding.
2252   void frintx(const VRegister& vd, const VRegister& vn);
2253 
2254   // FP round to integer, towards zero.
2255   void frintz(const VRegister& vd, const VRegister& vn);
2256 
2257   void FPCompareMacro(const VRegister& vn, double value, FPTrapFlags trap);
2258 
2259   void FPCompareMacro(const VRegister& vn,
2260                       const VRegister& vm,
2261                       FPTrapFlags trap);
2262 
2263   // FP compare registers.
2264   void fcmp(const VRegister& vn, const VRegister& vm);
2265 
2266   // FP compare immediate.
2267   void fcmp(const VRegister& vn, double value);
2268 
2269   void FPCCompareMacro(const VRegister& vn,
2270                        const VRegister& vm,
2271                        StatusFlags nzcv,
2272                        Condition cond,
2273                        FPTrapFlags trap);
2274 
2275   // FP conditional compare.
2276   void fccmp(const VRegister& vn,
2277              const VRegister& vm,
2278              StatusFlags nzcv,
2279              Condition cond);
2280 
2281   // FP signaling compare registers.
2282   void fcmpe(const VRegister& vn, const VRegister& vm);
2283 
2284   // FP signaling compare immediate.
2285   void fcmpe(const VRegister& vn, double value);
2286 
2287   // FP conditional signaling compare.
2288   void fccmpe(const VRegister& vn,
2289               const VRegister& vm,
2290               StatusFlags nzcv,
2291               Condition cond);
2292 
2293   // FP conditional select.
2294   void fcsel(const VRegister& vd,
2295              const VRegister& vn,
2296              const VRegister& vm,
2297              Condition cond);
2298 
2299   // Common FP Convert functions.
2300   void NEONFPConvertToInt(const Register& rd, const VRegister& vn, Instr op);
2301   void NEONFPConvertToInt(const VRegister& vd, const VRegister& vn, Instr op);
2302   void NEONFP16ConvertToInt(const VRegister& vd, const VRegister& vn, Instr op);
2303 
2304   // FP convert between precisions.
2305   void fcvt(const VRegister& vd, const VRegister& vn);
2306 
2307   // FP convert to higher precision.
2308   void fcvtl(const VRegister& vd, const VRegister& vn);
2309 
2310   // FP convert to higher precision (second part).
2311   void fcvtl2(const VRegister& vd, const VRegister& vn);
2312 
2313   // FP convert to lower precision.
2314   void fcvtn(const VRegister& vd, const VRegister& vn);
2315 
2316   // FP convert to lower prevision (second part).
2317   void fcvtn2(const VRegister& vd, const VRegister& vn);
2318 
2319   // FP convert to lower precision, rounding to odd.
2320   void fcvtxn(const VRegister& vd, const VRegister& vn);
2321 
2322   // FP convert to lower precision, rounding to odd (second part).
2323   void fcvtxn2(const VRegister& vd, const VRegister& vn);
2324 
2325   // FP convert to signed integer, nearest with ties to away.
2326   void fcvtas(const Register& rd, const VRegister& vn);
2327 
2328   // FP convert to unsigned integer, nearest with ties to away.
2329   void fcvtau(const Register& rd, const VRegister& vn);
2330 
2331   // FP convert to signed integer, nearest with ties to away.
2332   void fcvtas(const VRegister& vd, const VRegister& vn);
2333 
2334   // FP convert to unsigned integer, nearest with ties to away.
2335   void fcvtau(const VRegister& vd, const VRegister& vn);
2336 
2337   // FP convert to signed integer, round towards -infinity.
2338   void fcvtms(const Register& rd, const VRegister& vn);
2339 
2340   // FP convert to unsigned integer, round towards -infinity.
2341   void fcvtmu(const Register& rd, const VRegister& vn);
2342 
2343   // FP convert to signed integer, round towards -infinity.
2344   void fcvtms(const VRegister& vd, const VRegister& vn);
2345 
2346   // FP convert to unsigned integer, round towards -infinity.
2347   void fcvtmu(const VRegister& vd, const VRegister& vn);
2348 
2349   // FP convert to signed integer, nearest with ties to even.
2350   void fcvtns(const Register& rd, const VRegister& vn);
2351 
2352   // FP JavaScript convert to signed integer, rounding toward zero [Armv8.3].
2353   void fjcvtzs(const Register& rd, const VRegister& vn);
2354 
2355   // FP convert to unsigned integer, nearest with ties to even.
2356   void fcvtnu(const Register& rd, const VRegister& vn);
2357 
2358   // FP convert to signed integer, nearest with ties to even.
2359   void fcvtns(const VRegister& rd, const VRegister& vn);
2360 
2361   // FP convert to unsigned integer, nearest with ties to even.
2362   void fcvtnu(const VRegister& rd, const VRegister& vn);
2363 
2364   // FP convert to signed integer or fixed-point, round towards zero.
2365   void fcvtzs(const Register& rd, const VRegister& vn, int fbits = 0);
2366 
2367   // FP convert to unsigned integer or fixed-point, round towards zero.
2368   void fcvtzu(const Register& rd, const VRegister& vn, int fbits = 0);
2369 
2370   // FP convert to signed integer or fixed-point, round towards zero.
2371   void fcvtzs(const VRegister& vd, const VRegister& vn, int fbits = 0);
2372 
2373   // FP convert to unsigned integer or fixed-point, round towards zero.
2374   void fcvtzu(const VRegister& vd, const VRegister& vn, int fbits = 0);
2375 
2376   // FP convert to signed integer, round towards +infinity.
2377   void fcvtps(const Register& rd, const VRegister& vn);
2378 
2379   // FP convert to unsigned integer, round towards +infinity.
2380   void fcvtpu(const Register& rd, const VRegister& vn);
2381 
2382   // FP convert to signed integer, round towards +infinity.
2383   void fcvtps(const VRegister& vd, const VRegister& vn);
2384 
2385   // FP convert to unsigned integer, round towards +infinity.
2386   void fcvtpu(const VRegister& vd, const VRegister& vn);
2387 
2388   // Convert signed integer or fixed point to FP.
2389   void scvtf(const VRegister& fd, const Register& rn, int fbits = 0);
2390 
2391   // Convert unsigned integer or fixed point to FP.
2392   void ucvtf(const VRegister& fd, const Register& rn, int fbits = 0);
2393 
2394   // Convert signed integer or fixed-point to FP.
2395   void scvtf(const VRegister& fd, const VRegister& vn, int fbits = 0);
2396 
2397   // Convert unsigned integer or fixed-point to FP.
2398   void ucvtf(const VRegister& fd, const VRegister& vn, int fbits = 0);
2399 
2400   // Unsigned absolute difference.
2401   void uabd(const VRegister& vd, const VRegister& vn, const VRegister& vm);
2402 
2403   // Signed absolute difference.
2404   void sabd(const VRegister& vd, const VRegister& vn, const VRegister& vm);
2405 
2406   // Unsigned absolute difference and accumulate.
2407   void uaba(const VRegister& vd, const VRegister& vn, const VRegister& vm);
2408 
2409   // Signed absolute difference and accumulate.
2410   void saba(const VRegister& vd, const VRegister& vn, const VRegister& vm);
2411 
2412   // Add.
2413   void add(const VRegister& vd, const VRegister& vn, const VRegister& vm);
2414 
2415   // Subtract.
2416   void sub(const VRegister& vd, const VRegister& vn, const VRegister& vm);
2417 
2418   // Unsigned halving add.
2419   void uhadd(const VRegister& vd, const VRegister& vn, const VRegister& vm);
2420 
2421   // Signed halving add.
2422   void shadd(const VRegister& vd, const VRegister& vn, const VRegister& vm);
2423 
2424   // Unsigned rounding halving add.
2425   void urhadd(const VRegister& vd, const VRegister& vn, const VRegister& vm);
2426 
2427   // Signed rounding halving add.
2428   void srhadd(const VRegister& vd, const VRegister& vn, const VRegister& vm);
2429 
2430   // Unsigned halving sub.
2431   void uhsub(const VRegister& vd, const VRegister& vn, const VRegister& vm);
2432 
2433   // Signed halving sub.
2434   void shsub(const VRegister& vd, const VRegister& vn, const VRegister& vm);
2435 
2436   // Unsigned saturating add.
2437   void uqadd(const VRegister& vd, const VRegister& vn, const VRegister& vm);
2438 
2439   // Signed saturating add.
2440   void sqadd(const VRegister& vd, const VRegister& vn, const VRegister& vm);
2441 
2442   // Unsigned saturating subtract.
2443   void uqsub(const VRegister& vd, const VRegister& vn, const VRegister& vm);
2444 
2445   // Signed saturating subtract.
2446   void sqsub(const VRegister& vd, const VRegister& vn, const VRegister& vm);
2447 
2448   // Add pairwise.
2449   void addp(const VRegister& vd, const VRegister& vn, const VRegister& vm);
2450 
2451   // Add pair of elements scalar.
2452   void addp(const VRegister& vd, const VRegister& vn);
2453 
2454   // Multiply-add to accumulator.
2455   void mla(const VRegister& vd, const VRegister& vn, const VRegister& vm);
2456 
2457   // Multiply-subtract to accumulator.
2458   void mls(const VRegister& vd, const VRegister& vn, const VRegister& vm);
2459 
2460   // Multiply.
2461   void mul(const VRegister& vd, const VRegister& vn, const VRegister& vm);
2462 
2463   // Multiply by scalar element.
2464   void mul(const VRegister& vd,
2465            const VRegister& vn,
2466            const VRegister& vm,
2467            int vm_index);
2468 
2469   // Multiply-add by scalar element.
2470   void mla(const VRegister& vd,
2471            const VRegister& vn,
2472            const VRegister& vm,
2473            int vm_index);
2474 
2475   // Multiply-subtract by scalar element.
2476   void mls(const VRegister& vd,
2477            const VRegister& vn,
2478            const VRegister& vm,
2479            int vm_index);
2480 
2481   // Signed long multiply-add by scalar element.
2482   void smlal(const VRegister& vd,
2483              const VRegister& vn,
2484              const VRegister& vm,
2485              int vm_index);
2486 
2487   // Signed long multiply-add by scalar element (second part).
2488   void smlal2(const VRegister& vd,
2489               const VRegister& vn,
2490               const VRegister& vm,
2491               int vm_index);
2492 
2493   // Unsigned long multiply-add by scalar element.
2494   void umlal(const VRegister& vd,
2495              const VRegister& vn,
2496              const VRegister& vm,
2497              int vm_index);
2498 
2499   // Unsigned long multiply-add by scalar element (second part).
2500   void umlal2(const VRegister& vd,
2501               const VRegister& vn,
2502               const VRegister& vm,
2503               int vm_index);
2504 
2505   // Signed long multiply-sub by scalar element.
2506   void smlsl(const VRegister& vd,
2507              const VRegister& vn,
2508              const VRegister& vm,
2509              int vm_index);
2510 
2511   // Signed long multiply-sub by scalar element (second part).
2512   void smlsl2(const VRegister& vd,
2513               const VRegister& vn,
2514               const VRegister& vm,
2515               int vm_index);
2516 
2517   // Unsigned long multiply-sub by scalar element.
2518   void umlsl(const VRegister& vd,
2519              const VRegister& vn,
2520              const VRegister& vm,
2521              int vm_index);
2522 
2523   // Unsigned long multiply-sub by scalar element (second part).
2524   void umlsl2(const VRegister& vd,
2525               const VRegister& vn,
2526               const VRegister& vm,
2527               int vm_index);
2528 
2529   // Signed long multiply by scalar element.
2530   void smull(const VRegister& vd,
2531              const VRegister& vn,
2532              const VRegister& vm,
2533              int vm_index);
2534 
2535   // Signed long multiply by scalar element (second part).
2536   void smull2(const VRegister& vd,
2537               const VRegister& vn,
2538               const VRegister& vm,
2539               int vm_index);
2540 
2541   // Unsigned long multiply by scalar element.
2542   void umull(const VRegister& vd,
2543              const VRegister& vn,
2544              const VRegister& vm,
2545              int vm_index);
2546 
2547   // Unsigned long multiply by scalar element (second part).
2548   void umull2(const VRegister& vd,
2549               const VRegister& vn,
2550               const VRegister& vm,
2551               int vm_index);
2552 
2553   // Signed saturating double long multiply by element.
2554   void sqdmull(const VRegister& vd,
2555                const VRegister& vn,
2556                const VRegister& vm,
2557                int vm_index);
2558 
2559   // Signed saturating double long multiply by element (second part).
2560   void sqdmull2(const VRegister& vd,
2561                 const VRegister& vn,
2562                 const VRegister& vm,
2563                 int vm_index);
2564 
2565   // Signed saturating doubling long multiply-add by element.
2566   void sqdmlal(const VRegister& vd,
2567                const VRegister& vn,
2568                const VRegister& vm,
2569                int vm_index);
2570 
2571   // Signed saturating doubling long multiply-add by element (second part).
2572   void sqdmlal2(const VRegister& vd,
2573                 const VRegister& vn,
2574                 const VRegister& vm,
2575                 int vm_index);
2576 
2577   // Signed saturating doubling long multiply-sub by element.
2578   void sqdmlsl(const VRegister& vd,
2579                const VRegister& vn,
2580                const VRegister& vm,
2581                int vm_index);
2582 
2583   // Signed saturating doubling long multiply-sub by element (second part).
2584   void sqdmlsl2(const VRegister& vd,
2585                 const VRegister& vn,
2586                 const VRegister& vm,
2587                 int vm_index);
2588 
2589   // Compare equal.
2590   void cmeq(const VRegister& vd, const VRegister& vn, const VRegister& vm);
2591 
2592   // Compare signed greater than or equal.
2593   void cmge(const VRegister& vd, const VRegister& vn, const VRegister& vm);
2594 
2595   // Compare signed greater than.
2596   void cmgt(const VRegister& vd, const VRegister& vn, const VRegister& vm);
2597 
2598   // Compare unsigned higher.
2599   void cmhi(const VRegister& vd, const VRegister& vn, const VRegister& vm);
2600 
2601   // Compare unsigned higher or same.
2602   void cmhs(const VRegister& vd, const VRegister& vn, const VRegister& vm);
2603 
2604   // Compare bitwise test bits nonzero.
2605   void cmtst(const VRegister& vd, const VRegister& vn, const VRegister& vm);
2606 
2607   // Compare bitwise to zero.
2608   void cmeq(const VRegister& vd, const VRegister& vn, int value);
2609 
2610   // Compare signed greater than or equal to zero.
2611   void cmge(const VRegister& vd, const VRegister& vn, int value);
2612 
2613   // Compare signed greater than zero.
2614   void cmgt(const VRegister& vd, const VRegister& vn, int value);
2615 
2616   // Compare signed less than or equal to zero.
2617   void cmle(const VRegister& vd, const VRegister& vn, int value);
2618 
2619   // Compare signed less than zero.
2620   void cmlt(const VRegister& vd, const VRegister& vn, int value);
2621 
2622   // Signed shift left by register.
2623   void sshl(const VRegister& vd, const VRegister& vn, const VRegister& vm);
2624 
2625   // Unsigned shift left by register.
2626   void ushl(const VRegister& vd, const VRegister& vn, const VRegister& vm);
2627 
2628   // Signed saturating shift left by register.
2629   void sqshl(const VRegister& vd, const VRegister& vn, const VRegister& vm);
2630 
2631   // Unsigned saturating shift left by register.
2632   void uqshl(const VRegister& vd, const VRegister& vn, const VRegister& vm);
2633 
2634   // Signed rounding shift left by register.
2635   void srshl(const VRegister& vd, const VRegister& vn, const VRegister& vm);
2636 
2637   // Unsigned rounding shift left by register.
2638   void urshl(const VRegister& vd, const VRegister& vn, const VRegister& vm);
2639 
2640   // Signed saturating rounding shift left by register.
2641   void sqrshl(const VRegister& vd, const VRegister& vn, const VRegister& vm);
2642 
2643   // Unsigned saturating rounding shift left by register.
2644   void uqrshl(const VRegister& vd, const VRegister& vn, const VRegister& vm);
2645 
2646   // Bitwise and.
2647   void and_(const VRegister& vd, const VRegister& vn, const VRegister& vm);
2648 
2649   // Bitwise or.
2650   void orr(const VRegister& vd, const VRegister& vn, const VRegister& vm);
2651 
2652   // Bitwise or immediate.
2653   void orr(const VRegister& vd, const int imm8, const int left_shift = 0);
2654 
2655   // Move register to register.
2656   void mov(const VRegister& vd, const VRegister& vn);
2657 
2658   // Bitwise orn.
2659   void orn(const VRegister& vd, const VRegister& vn, const VRegister& vm);
2660 
2661   // Bitwise eor.
2662   void eor(const VRegister& vd, const VRegister& vn, const VRegister& vm);
2663 
2664   // Bit clear immediate.
2665   void bic(const VRegister& vd, const int imm8, const int left_shift = 0);
2666 
2667   // Bit clear.
2668   void bic(const VRegister& vd, const VRegister& vn, const VRegister& vm);
2669 
2670   // Bitwise insert if false.
2671   void bif(const VRegister& vd, const VRegister& vn, const VRegister& vm);
2672 
2673   // Bitwise insert if true.
2674   void bit(const VRegister& vd, const VRegister& vn, const VRegister& vm);
2675 
2676   // Bitwise select.
2677   void bsl(const VRegister& vd, const VRegister& vn, const VRegister& vm);
2678 
2679   // Polynomial multiply.
2680   void pmul(const VRegister& vd, const VRegister& vn, const VRegister& vm);
2681 
2682   // Vector move immediate.
2683   void movi(const VRegister& vd,
2684             const uint64_t imm,
2685             Shift shift = LSL,
2686             const int shift_amount = 0);
2687 
2688   // Bitwise not.
2689   void mvn(const VRegister& vd, const VRegister& vn);
2690 
2691   // Vector move inverted immediate.
2692   void mvni(const VRegister& vd,
2693             const int imm8,
2694             Shift shift = LSL,
2695             const int shift_amount = 0);
2696 
2697   // Signed saturating accumulate of unsigned value.
2698   void suqadd(const VRegister& vd, const VRegister& vn);
2699 
2700   // Unsigned saturating accumulate of signed value.
2701   void usqadd(const VRegister& vd, const VRegister& vn);
2702 
2703   // Absolute value.
2704   void abs(const VRegister& vd, const VRegister& vn);
2705 
2706   // Signed saturating absolute value.
2707   void sqabs(const VRegister& vd, const VRegister& vn);
2708 
2709   // Negate.
2710   void neg(const VRegister& vd, const VRegister& vn);
2711 
2712   // Signed saturating negate.
2713   void sqneg(const VRegister& vd, const VRegister& vn);
2714 
2715   // Bitwise not.
2716   void not_(const VRegister& vd, const VRegister& vn);
2717 
2718   // Extract narrow.
2719   void xtn(const VRegister& vd, const VRegister& vn);
2720 
2721   // Extract narrow (second part).
2722   void xtn2(const VRegister& vd, const VRegister& vn);
2723 
2724   // Signed saturating extract narrow.
2725   void sqxtn(const VRegister& vd, const VRegister& vn);
2726 
2727   // Signed saturating extract narrow (second part).
2728   void sqxtn2(const VRegister& vd, const VRegister& vn);
2729 
2730   // Unsigned saturating extract narrow.
2731   void uqxtn(const VRegister& vd, const VRegister& vn);
2732 
2733   // Unsigned saturating extract narrow (second part).
2734   void uqxtn2(const VRegister& vd, const VRegister& vn);
2735 
2736   // Signed saturating extract unsigned narrow.
2737   void sqxtun(const VRegister& vd, const VRegister& vn);
2738 
2739   // Signed saturating extract unsigned narrow (second part).
2740   void sqxtun2(const VRegister& vd, const VRegister& vn);
2741 
2742   // Extract vector from pair of vectors.
2743   void ext(const VRegister& vd,
2744            const VRegister& vn,
2745            const VRegister& vm,
2746            int index);
2747 
2748   // Duplicate vector element to vector or scalar.
2749   void dup(const VRegister& vd, const VRegister& vn, int vn_index);
2750 
2751   // Move vector element to scalar.
2752   void mov(const VRegister& vd, const VRegister& vn, int vn_index);
2753 
2754   // Duplicate general-purpose register to vector.
2755   void dup(const VRegister& vd, const Register& rn);
2756 
2757   // Insert vector element from another vector element.
2758   void ins(const VRegister& vd,
2759            int vd_index,
2760            const VRegister& vn,
2761            int vn_index);
2762 
2763   // Move vector element to another vector element.
2764   void mov(const VRegister& vd,
2765            int vd_index,
2766            const VRegister& vn,
2767            int vn_index);
2768 
2769   // Insert vector element from general-purpose register.
2770   void ins(const VRegister& vd, int vd_index, const Register& rn);
2771 
2772   // Move general-purpose register to a vector element.
2773   void mov(const VRegister& vd, int vd_index, const Register& rn);
2774 
2775   // Unsigned move vector element to general-purpose register.
2776   void umov(const Register& rd, const VRegister& vn, int vn_index);
2777 
2778   // Move vector element to general-purpose register.
2779   void mov(const Register& rd, const VRegister& vn, int vn_index);
2780 
2781   // Signed move vector element to general-purpose register.
2782   void smov(const Register& rd, const VRegister& vn, int vn_index);
2783 
2784   // One-element structure load to one register.
2785   void ld1(const VRegister& vt, const MemOperand& src);
2786 
2787   // One-element structure load to two registers.
2788   void ld1(const VRegister& vt, const VRegister& vt2, const MemOperand& src);
2789 
2790   // One-element structure load to three registers.
2791   void ld1(const VRegister& vt,
2792            const VRegister& vt2,
2793            const VRegister& vt3,
2794            const MemOperand& src);
2795 
2796   // One-element structure load to four registers.
2797   void ld1(const VRegister& vt,
2798            const VRegister& vt2,
2799            const VRegister& vt3,
2800            const VRegister& vt4,
2801            const MemOperand& src);
2802 
2803   // One-element single structure load to one lane.
2804   void ld1(const VRegister& vt, int lane, const MemOperand& src);
2805 
2806   // One-element single structure load to all lanes.
2807   void ld1r(const VRegister& vt, const MemOperand& src);
2808 
2809   // Two-element structure load.
2810   void ld2(const VRegister& vt, const VRegister& vt2, const MemOperand& src);
2811 
2812   // Two-element single structure load to one lane.
2813   void ld2(const VRegister& vt,
2814            const VRegister& vt2,
2815            int lane,
2816            const MemOperand& src);
2817 
2818   // Two-element single structure load to all lanes.
2819   void ld2r(const VRegister& vt, const VRegister& vt2, const MemOperand& src);
2820 
2821   // Three-element structure load.
2822   void ld3(const VRegister& vt,
2823            const VRegister& vt2,
2824            const VRegister& vt3,
2825            const MemOperand& src);
2826 
2827   // Three-element single structure load to one lane.
2828   void ld3(const VRegister& vt,
2829            const VRegister& vt2,
2830            const VRegister& vt3,
2831            int lane,
2832            const MemOperand& src);
2833 
2834   // Three-element single structure load to all lanes.
2835   void ld3r(const VRegister& vt,
2836             const VRegister& vt2,
2837             const VRegister& vt3,
2838             const MemOperand& src);
2839 
2840   // Four-element structure load.
2841   void ld4(const VRegister& vt,
2842            const VRegister& vt2,
2843            const VRegister& vt3,
2844            const VRegister& vt4,
2845            const MemOperand& src);
2846 
2847   // Four-element single structure load to one lane.
2848   void ld4(const VRegister& vt,
2849            const VRegister& vt2,
2850            const VRegister& vt3,
2851            const VRegister& vt4,
2852            int lane,
2853            const MemOperand& src);
2854 
2855   // Four-element single structure load to all lanes.
2856   void ld4r(const VRegister& vt,
2857             const VRegister& vt2,
2858             const VRegister& vt3,
2859             const VRegister& vt4,
2860             const MemOperand& src);
2861 
2862   // Count leading sign bits.
2863   void cls(const VRegister& vd, const VRegister& vn);
2864 
2865   // Count leading zero bits (vector).
2866   void clz(const VRegister& vd, const VRegister& vn);
2867 
2868   // Population count per byte.
2869   void cnt(const VRegister& vd, const VRegister& vn);
2870 
2871   // Reverse bit order.
2872   void rbit(const VRegister& vd, const VRegister& vn);
2873 
2874   // Reverse elements in 16-bit halfwords.
2875   void rev16(const VRegister& vd, const VRegister& vn);
2876 
2877   // Reverse elements in 32-bit words.
2878   void rev32(const VRegister& vd, const VRegister& vn);
2879 
2880   // Reverse elements in 64-bit doublewords.
2881   void rev64(const VRegister& vd, const VRegister& vn);
2882 
2883   // Unsigned reciprocal square root estimate.
2884   void ursqrte(const VRegister& vd, const VRegister& vn);
2885 
2886   // Unsigned reciprocal estimate.
2887   void urecpe(const VRegister& vd, const VRegister& vn);
2888 
2889   // Signed pairwise long add.
2890   void saddlp(const VRegister& vd, const VRegister& vn);
2891 
2892   // Unsigned pairwise long add.
2893   void uaddlp(const VRegister& vd, const VRegister& vn);
2894 
2895   // Signed pairwise long add and accumulate.
2896   void sadalp(const VRegister& vd, const VRegister& vn);
2897 
2898   // Unsigned pairwise long add and accumulate.
2899   void uadalp(const VRegister& vd, const VRegister& vn);
2900 
2901   // Shift left by immediate.
2902   void shl(const VRegister& vd, const VRegister& vn, int shift);
2903 
2904   // Signed saturating shift left by immediate.
2905   void sqshl(const VRegister& vd, const VRegister& vn, int shift);
2906 
2907   // Signed saturating shift left unsigned by immediate.
2908   void sqshlu(const VRegister& vd, const VRegister& vn, int shift);
2909 
2910   // Unsigned saturating shift left by immediate.
2911   void uqshl(const VRegister& vd, const VRegister& vn, int shift);
2912 
2913   // Signed shift left long by immediate.
2914   void sshll(const VRegister& vd, const VRegister& vn, int shift);
2915 
2916   // Signed shift left long by immediate (second part).
2917   void sshll2(const VRegister& vd, const VRegister& vn, int shift);
2918 
2919   // Signed extend long.
2920   void sxtl(const VRegister& vd, const VRegister& vn);
2921 
2922   // Signed extend long (second part).
2923   void sxtl2(const VRegister& vd, const VRegister& vn);
2924 
2925   // Unsigned shift left long by immediate.
2926   void ushll(const VRegister& vd, const VRegister& vn, int shift);
2927 
2928   // Unsigned shift left long by immediate (second part).
2929   void ushll2(const VRegister& vd, const VRegister& vn, int shift);
2930 
2931   // Shift left long by element size.
2932   void shll(const VRegister& vd, const VRegister& vn, int shift);
2933 
2934   // Shift left long by element size (second part).
2935   void shll2(const VRegister& vd, const VRegister& vn, int shift);
2936 
2937   // Unsigned extend long.
2938   void uxtl(const VRegister& vd, const VRegister& vn);
2939 
2940   // Unsigned extend long (second part).
2941   void uxtl2(const VRegister& vd, const VRegister& vn);
2942 
2943   // Shift left by immediate and insert.
2944   void sli(const VRegister& vd, const VRegister& vn, int shift);
2945 
2946   // Shift right by immediate and insert.
2947   void sri(const VRegister& vd, const VRegister& vn, int shift);
2948 
2949   // Signed maximum.
2950   void smax(const VRegister& vd, const VRegister& vn, const VRegister& vm);
2951 
2952   // Signed pairwise maximum.
2953   void smaxp(const VRegister& vd, const VRegister& vn, const VRegister& vm);
2954 
2955   // Add across vector.
2956   void addv(const VRegister& vd, const VRegister& vn);
2957 
2958   // Signed add long across vector.
2959   void saddlv(const VRegister& vd, const VRegister& vn);
2960 
2961   // Unsigned add long across vector.
2962   void uaddlv(const VRegister& vd, const VRegister& vn);
2963 
2964   // FP maximum number across vector.
2965   void fmaxnmv(const VRegister& vd, const VRegister& vn);
2966 
2967   // FP maximum across vector.
2968   void fmaxv(const VRegister& vd, const VRegister& vn);
2969 
2970   // FP minimum number across vector.
2971   void fminnmv(const VRegister& vd, const VRegister& vn);
2972 
2973   // FP minimum across vector.
2974   void fminv(const VRegister& vd, const VRegister& vn);
2975 
2976   // Signed maximum across vector.
2977   void smaxv(const VRegister& vd, const VRegister& vn);
2978 
2979   // Signed minimum.
2980   void smin(const VRegister& vd, const VRegister& vn, const VRegister& vm);
2981 
2982   // Signed minimum pairwise.
2983   void sminp(const VRegister& vd, const VRegister& vn, const VRegister& vm);
2984 
2985   // Signed minimum across vector.
2986   void sminv(const VRegister& vd, const VRegister& vn);
2987 
2988   // One-element structure store from one register.
2989   void st1(const VRegister& vt, const MemOperand& src);
2990 
2991   // One-element structure store from two registers.
2992   void st1(const VRegister& vt, const VRegister& vt2, const MemOperand& src);
2993 
2994   // One-element structure store from three registers.
2995   void st1(const VRegister& vt,
2996            const VRegister& vt2,
2997            const VRegister& vt3,
2998            const MemOperand& src);
2999 
3000   // One-element structure store from four registers.
3001   void st1(const VRegister& vt,
3002            const VRegister& vt2,
3003            const VRegister& vt3,
3004            const VRegister& vt4,
3005            const MemOperand& src);
3006 
3007   // One-element single structure store from one lane.
3008   void st1(const VRegister& vt, int lane, const MemOperand& src);
3009 
3010   // Two-element structure store from two registers.
3011   void st2(const VRegister& vt, const VRegister& vt2, const MemOperand& src);
3012 
3013   // Two-element single structure store from two lanes.
3014   void st2(const VRegister& vt,
3015            const VRegister& vt2,
3016            int lane,
3017            const MemOperand& src);
3018 
3019   // Three-element structure store from three registers.
3020   void st3(const VRegister& vt,
3021            const VRegister& vt2,
3022            const VRegister& vt3,
3023            const MemOperand& src);
3024 
3025   // Three-element single structure store from three lanes.
3026   void st3(const VRegister& vt,
3027            const VRegister& vt2,
3028            const VRegister& vt3,
3029            int lane,
3030            const MemOperand& src);
3031 
3032   // Four-element structure store from four registers.
3033   void st4(const VRegister& vt,
3034            const VRegister& vt2,
3035            const VRegister& vt3,
3036            const VRegister& vt4,
3037            const MemOperand& src);
3038 
3039   // Four-element single structure store from four lanes.
3040   void st4(const VRegister& vt,
3041            const VRegister& vt2,
3042            const VRegister& vt3,
3043            const VRegister& vt4,
3044            int lane,
3045            const MemOperand& src);
3046 
3047   // Unsigned add long.
3048   void uaddl(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3049 
3050   // Unsigned add long (second part).
3051   void uaddl2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3052 
3053   // Unsigned add wide.
3054   void uaddw(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3055 
3056   // Unsigned add wide (second part).
3057   void uaddw2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3058 
3059   // Signed add long.
3060   void saddl(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3061 
3062   // Signed add long (second part).
3063   void saddl2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3064 
3065   // Signed add wide.
3066   void saddw(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3067 
3068   // Signed add wide (second part).
3069   void saddw2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3070 
3071   // Unsigned subtract long.
3072   void usubl(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3073 
3074   // Unsigned subtract long (second part).
3075   void usubl2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3076 
3077   // Unsigned subtract wide.
3078   void usubw(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3079 
3080   // Unsigned subtract wide (second part).
3081   void usubw2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3082 
3083   // Signed subtract long.
3084   void ssubl(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3085 
3086   // Signed subtract long (second part).
3087   void ssubl2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3088 
3089   // Signed integer subtract wide.
3090   void ssubw(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3091 
3092   // Signed integer subtract wide (second part).
3093   void ssubw2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3094 
3095   // Unsigned maximum.
3096   void umax(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3097 
3098   // Unsigned pairwise maximum.
3099   void umaxp(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3100 
3101   // Unsigned maximum across vector.
3102   void umaxv(const VRegister& vd, const VRegister& vn);
3103 
3104   // Unsigned minimum.
3105   void umin(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3106 
3107   // Unsigned pairwise minimum.
3108   void uminp(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3109 
3110   // Unsigned minimum across vector.
3111   void uminv(const VRegister& vd, const VRegister& vn);
3112 
3113   // Transpose vectors (primary).
3114   void trn1(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3115 
3116   // Transpose vectors (secondary).
3117   void trn2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3118 
3119   // Unzip vectors (primary).
3120   void uzp1(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3121 
3122   // Unzip vectors (secondary).
3123   void uzp2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3124 
3125   // Zip vectors (primary).
3126   void zip1(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3127 
3128   // Zip vectors (secondary).
3129   void zip2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3130 
3131   // Signed shift right by immediate.
3132   void sshr(const VRegister& vd, const VRegister& vn, int shift);
3133 
3134   // Unsigned shift right by immediate.
3135   void ushr(const VRegister& vd, const VRegister& vn, int shift);
3136 
3137   // Signed rounding shift right by immediate.
3138   void srshr(const VRegister& vd, const VRegister& vn, int shift);
3139 
3140   // Unsigned rounding shift right by immediate.
3141   void urshr(const VRegister& vd, const VRegister& vn, int shift);
3142 
3143   // Signed shift right by immediate and accumulate.
3144   void ssra(const VRegister& vd, const VRegister& vn, int shift);
3145 
3146   // Unsigned shift right by immediate and accumulate.
3147   void usra(const VRegister& vd, const VRegister& vn, int shift);
3148 
3149   // Signed rounding shift right by immediate and accumulate.
3150   void srsra(const VRegister& vd, const VRegister& vn, int shift);
3151 
3152   // Unsigned rounding shift right by immediate and accumulate.
3153   void ursra(const VRegister& vd, const VRegister& vn, int shift);
3154 
3155   // Shift right narrow by immediate.
3156   void shrn(const VRegister& vd, const VRegister& vn, int shift);
3157 
3158   // Shift right narrow by immediate (second part).
3159   void shrn2(const VRegister& vd, const VRegister& vn, int shift);
3160 
3161   // Rounding shift right narrow by immediate.
3162   void rshrn(const VRegister& vd, const VRegister& vn, int shift);
3163 
3164   // Rounding shift right narrow by immediate (second part).
3165   void rshrn2(const VRegister& vd, const VRegister& vn, int shift);
3166 
3167   // Unsigned saturating shift right narrow by immediate.
3168   void uqshrn(const VRegister& vd, const VRegister& vn, int shift);
3169 
3170   // Unsigned saturating shift right narrow by immediate (second part).
3171   void uqshrn2(const VRegister& vd, const VRegister& vn, int shift);
3172 
3173   // Unsigned saturating rounding shift right narrow by immediate.
3174   void uqrshrn(const VRegister& vd, const VRegister& vn, int shift);
3175 
3176   // Unsigned saturating rounding shift right narrow by immediate (second part).
3177   void uqrshrn2(const VRegister& vd, const VRegister& vn, int shift);
3178 
3179   // Signed saturating shift right narrow by immediate.
3180   void sqshrn(const VRegister& vd, const VRegister& vn, int shift);
3181 
3182   // Signed saturating shift right narrow by immediate (second part).
3183   void sqshrn2(const VRegister& vd, const VRegister& vn, int shift);
3184 
3185   // Signed saturating rounded shift right narrow by immediate.
3186   void sqrshrn(const VRegister& vd, const VRegister& vn, int shift);
3187 
3188   // Signed saturating rounded shift right narrow by immediate (second part).
3189   void sqrshrn2(const VRegister& vd, const VRegister& vn, int shift);
3190 
3191   // Signed saturating shift right unsigned narrow by immediate.
3192   void sqshrun(const VRegister& vd, const VRegister& vn, int shift);
3193 
3194   // Signed saturating shift right unsigned narrow by immediate (second part).
3195   void sqshrun2(const VRegister& vd, const VRegister& vn, int shift);
3196 
3197   // Signed sat rounded shift right unsigned narrow by immediate.
3198   void sqrshrun(const VRegister& vd, const VRegister& vn, int shift);
3199 
3200   // Signed sat rounded shift right unsigned narrow by immediate (second part).
3201   void sqrshrun2(const VRegister& vd, const VRegister& vn, int shift);
3202 
3203   // FP reciprocal step.
3204   void frecps(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3205 
3206   // FP reciprocal estimate.
3207   void frecpe(const VRegister& vd, const VRegister& vn);
3208 
3209   // FP reciprocal square root estimate.
3210   void frsqrte(const VRegister& vd, const VRegister& vn);
3211 
3212   // FP reciprocal square root step.
3213   void frsqrts(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3214 
3215   // Signed absolute difference and accumulate long.
3216   void sabal(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3217 
3218   // Signed absolute difference and accumulate long (second part).
3219   void sabal2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3220 
3221   // Unsigned absolute difference and accumulate long.
3222   void uabal(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3223 
3224   // Unsigned absolute difference and accumulate long (second part).
3225   void uabal2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3226 
3227   // Signed absolute difference long.
3228   void sabdl(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3229 
3230   // Signed absolute difference long (second part).
3231   void sabdl2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3232 
3233   // Unsigned absolute difference long.
3234   void uabdl(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3235 
3236   // Unsigned absolute difference long (second part).
3237   void uabdl2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3238 
3239   // Polynomial multiply long.
3240   void pmull(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3241 
3242   // Polynomial multiply long (second part).
3243   void pmull2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3244 
3245   // Signed long multiply-add.
3246   void smlal(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3247 
3248   // Signed long multiply-add (second part).
3249   void smlal2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3250 
3251   // Unsigned long multiply-add.
3252   void umlal(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3253 
3254   // Unsigned long multiply-add (second part).
3255   void umlal2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3256 
3257   // Signed long multiply-sub.
3258   void smlsl(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3259 
3260   // Signed long multiply-sub (second part).
3261   void smlsl2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3262 
3263   // Unsigned long multiply-sub.
3264   void umlsl(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3265 
3266   // Unsigned long multiply-sub (second part).
3267   void umlsl2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3268 
3269   // Signed long multiply.
3270   void smull(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3271 
3272   // Signed long multiply (second part).
3273   void smull2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3274 
3275   // Signed saturating doubling long multiply-add.
3276   void sqdmlal(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3277 
3278   // Signed saturating doubling long multiply-add (second part).
3279   void sqdmlal2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3280 
3281   // Signed saturating doubling long multiply-subtract.
3282   void sqdmlsl(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3283 
3284   // Signed saturating doubling long multiply-subtract (second part).
3285   void sqdmlsl2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3286 
3287   // Signed saturating doubling long multiply.
3288   void sqdmull(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3289 
3290   // Signed saturating doubling long multiply (second part).
3291   void sqdmull2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3292 
3293   // Signed saturating doubling multiply returning high half.
3294   void sqdmulh(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3295 
3296   // Signed saturating rounding doubling multiply returning high half.
3297   void sqrdmulh(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3298 
3299   // Signed dot product [Armv8.2].
3300   void sdot(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3301 
3302   // Signed saturating rounding doubling multiply accumulate returning high
3303   // half [Armv8.1].
3304   void sqrdmlah(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3305 
3306   // Unsigned dot product [Armv8.2].
3307   void udot(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3308 
3309   // Signed saturating rounding doubling multiply subtract returning high half
3310   // [Armv8.1].
3311   void sqrdmlsh(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3312 
3313   // Signed saturating doubling multiply element returning high half.
3314   void sqdmulh(const VRegister& vd,
3315                const VRegister& vn,
3316                const VRegister& vm,
3317                int vm_index);
3318 
3319   // Signed saturating rounding doubling multiply element returning high half.
3320   void sqrdmulh(const VRegister& vd,
3321                 const VRegister& vn,
3322                 const VRegister& vm,
3323                 int vm_index);
3324 
3325   // Signed dot product by element [Armv8.2].
3326   void sdot(const VRegister& vd,
3327             const VRegister& vn,
3328             const VRegister& vm,
3329             int vm_index);
3330 
3331   // Signed saturating rounding doubling multiply accumulate element returning
3332   // high half [Armv8.1].
3333   void sqrdmlah(const VRegister& vd,
3334                 const VRegister& vn,
3335                 const VRegister& vm,
3336                 int vm_index);
3337 
3338   // Unsigned dot product by element [Armv8.2].
3339   void udot(const VRegister& vd,
3340             const VRegister& vn,
3341             const VRegister& vm,
3342             int vm_index);
3343 
3344   // Signed saturating rounding doubling multiply subtract element returning
3345   // high half [Armv8.1].
3346   void sqrdmlsh(const VRegister& vd,
3347                 const VRegister& vn,
3348                 const VRegister& vm,
3349                 int vm_index);
3350 
3351   // Unsigned long multiply long.
3352   void umull(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3353 
3354   // Unsigned long multiply (second part).
3355   void umull2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3356 
3357   // Add narrow returning high half.
3358   void addhn(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3359 
3360   // Add narrow returning high half (second part).
3361   void addhn2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3362 
3363   // Rounding add narrow returning high half.
3364   void raddhn(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3365 
3366   // Rounding add narrow returning high half (second part).
3367   void raddhn2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3368 
3369   // Subtract narrow returning high half.
3370   void subhn(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3371 
3372   // Subtract narrow returning high half (second part).
3373   void subhn2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3374 
3375   // Rounding subtract narrow returning high half.
3376   void rsubhn(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3377 
3378   // Rounding subtract narrow returning high half (second part).
3379   void rsubhn2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3380 
3381   // FP vector multiply accumulate.
3382   void fmla(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3383 
3384   // FP vector multiply subtract.
3385   void fmls(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3386 
3387   // FP vector multiply extended.
3388   void fmulx(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3389 
3390   // FP absolute greater than or equal.
3391   void facge(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3392 
3393   // FP absolute greater than.
3394   void facgt(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3395 
3396   // FP multiply by element.
3397   void fmul(const VRegister& vd,
3398             const VRegister& vn,
3399             const VRegister& vm,
3400             int vm_index);
3401 
3402   // FP fused multiply-add to accumulator by element.
3403   void fmla(const VRegister& vd,
3404             const VRegister& vn,
3405             const VRegister& vm,
3406             int vm_index);
3407 
3408   // FP fused multiply-sub from accumulator by element.
3409   void fmls(const VRegister& vd,
3410             const VRegister& vn,
3411             const VRegister& vm,
3412             int vm_index);
3413 
3414   // FP multiply extended by element.
3415   void fmulx(const VRegister& vd,
3416              const VRegister& vn,
3417              const VRegister& vm,
3418              int vm_index);
3419 
3420   // FP compare equal.
3421   void fcmeq(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3422 
3423   // FP greater than.
3424   void fcmgt(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3425 
3426   // FP greater than or equal.
3427   void fcmge(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3428 
3429   // FP compare equal to zero.
3430   void fcmeq(const VRegister& vd, const VRegister& vn, double imm);
3431 
3432   // FP greater than zero.
3433   void fcmgt(const VRegister& vd, const VRegister& vn, double imm);
3434 
3435   // FP greater than or equal to zero.
3436   void fcmge(const VRegister& vd, const VRegister& vn, double imm);
3437 
3438   // FP less than or equal to zero.
3439   void fcmle(const VRegister& vd, const VRegister& vn, double imm);
3440 
3441   // FP less than to zero.
3442   void fcmlt(const VRegister& vd, const VRegister& vn, double imm);
3443 
3444   // FP absolute difference.
3445   void fabd(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3446 
3447   // FP pairwise add vector.
3448   void faddp(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3449 
3450   // FP pairwise add scalar.
3451   void faddp(const VRegister& vd, const VRegister& vn);
3452 
3453   // FP pairwise maximum vector.
3454   void fmaxp(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3455 
3456   // FP pairwise maximum scalar.
3457   void fmaxp(const VRegister& vd, const VRegister& vn);
3458 
3459   // FP pairwise minimum vector.
3460   void fminp(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3461 
3462   // FP pairwise minimum scalar.
3463   void fminp(const VRegister& vd, const VRegister& vn);
3464 
3465   // FP pairwise maximum number vector.
3466   void fmaxnmp(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3467 
3468   // FP pairwise maximum number scalar.
3469   void fmaxnmp(const VRegister& vd, const VRegister& vn);
3470 
3471   // FP pairwise minimum number vector.
3472   void fminnmp(const VRegister& vd, const VRegister& vn, const VRegister& vm);
3473 
3474   // FP pairwise minimum number scalar.
3475   void fminnmp(const VRegister& vd, const VRegister& vn);
3476 
3477   // v8.3 complex numbers - note that these are only partial/helper functions
3478   // and must be used in series in order to perform full CN operations.
3479   // FP complex multiply accumulate (by element) [Armv8.3].
3480   void fcmla(const VRegister& vd,
3481              const VRegister& vn,
3482              const VRegister& vm,
3483              int vm_index,
3484              int rot);
3485 
3486   // FP complex multiply accumulate [Armv8.3].
3487   void fcmla(const VRegister& vd,
3488              const VRegister& vn,
3489              const VRegister& vm,
3490              int rot);
3491 
3492   // FP complex add [Armv8.3].
3493   void fcadd(const VRegister& vd,
3494              const VRegister& vn,
3495              const VRegister& vm,
3496              int rot);
3497 
3498   // Emit generic instructions.
3499   // Emit raw instructions into the instruction stream.
dci(Instr raw_inst)3500   void dci(Instr raw_inst) { Emit(raw_inst); }
3501 
3502   // Emit 32 bits of data into the instruction stream.
dc32(uint32_t data)3503   void dc32(uint32_t data) { dc(data); }
3504 
3505   // Emit 64 bits of data into the instruction stream.
dc64(uint64_t data)3506   void dc64(uint64_t data) { dc(data); }
3507 
3508   // Emit data in the instruction stream.
3509   template <typename T>
dc(T data)3510   void dc(T data) {
3511     VIXL_ASSERT(AllowAssembler());
3512     GetBuffer()->Emit<T>(data);
3513   }
3514 
3515   // Copy a string into the instruction stream, including the terminating NULL
3516   // character. The instruction pointer is then aligned correctly for
3517   // subsequent instructions.
EmitString(const char * string)3518   void EmitString(const char* string) {
3519     VIXL_ASSERT(string != NULL);
3520     VIXL_ASSERT(AllowAssembler());
3521 
3522     GetBuffer()->EmitString(string);
3523     GetBuffer()->Align();
3524   }
3525 
3526   // Code generation helpers.
3527 
3528   // Register encoding.
Rd(CPURegister rd)3529   static Instr Rd(CPURegister rd) {
3530     VIXL_ASSERT(rd.GetCode() != kSPRegInternalCode);
3531     return rd.GetCode() << Rd_offset;
3532   }
3533 
Rn(CPURegister rn)3534   static Instr Rn(CPURegister rn) {
3535     VIXL_ASSERT(rn.GetCode() != kSPRegInternalCode);
3536     return rn.GetCode() << Rn_offset;
3537   }
3538 
Rm(CPURegister rm)3539   static Instr Rm(CPURegister rm) {
3540     VIXL_ASSERT(rm.GetCode() != kSPRegInternalCode);
3541     return rm.GetCode() << Rm_offset;
3542   }
3543 
RmNot31(CPURegister rm)3544   static Instr RmNot31(CPURegister rm) {
3545     VIXL_ASSERT(rm.GetCode() != kSPRegInternalCode);
3546     VIXL_ASSERT(!rm.IsZero());
3547     return Rm(rm);
3548   }
3549 
Ra(CPURegister ra)3550   static Instr Ra(CPURegister ra) {
3551     VIXL_ASSERT(ra.GetCode() != kSPRegInternalCode);
3552     return ra.GetCode() << Ra_offset;
3553   }
3554 
Rt(CPURegister rt)3555   static Instr Rt(CPURegister rt) {
3556     VIXL_ASSERT(rt.GetCode() != kSPRegInternalCode);
3557     return rt.GetCode() << Rt_offset;
3558   }
3559 
Rt2(CPURegister rt2)3560   static Instr Rt2(CPURegister rt2) {
3561     VIXL_ASSERT(rt2.GetCode() != kSPRegInternalCode);
3562     return rt2.GetCode() << Rt2_offset;
3563   }
3564 
Rs(CPURegister rs)3565   static Instr Rs(CPURegister rs) {
3566     VIXL_ASSERT(rs.GetCode() != kSPRegInternalCode);
3567     return rs.GetCode() << Rs_offset;
3568   }
3569 
3570   // These encoding functions allow the stack pointer to be encoded, and
3571   // disallow the zero register.
RdSP(Register rd)3572   static Instr RdSP(Register rd) {
3573     VIXL_ASSERT(!rd.IsZero());
3574     return (rd.GetCode() & kRegCodeMask) << Rd_offset;
3575   }
3576 
RnSP(Register rn)3577   static Instr RnSP(Register rn) {
3578     VIXL_ASSERT(!rn.IsZero());
3579     return (rn.GetCode() & kRegCodeMask) << Rn_offset;
3580   }
3581 
RmSP(Register rm)3582   static Instr RmSP(Register rm) {
3583     VIXL_ASSERT(!rm.IsZero());
3584     return (rm.GetCode() & kRegCodeMask) << Rm_offset;
3585   }
3586 
3587   // Flags encoding.
Flags(FlagsUpdate S)3588   static Instr Flags(FlagsUpdate S) {
3589     if (S == SetFlags) {
3590       return 1 << FlagsUpdate_offset;
3591     } else if (S == LeaveFlags) {
3592       return 0 << FlagsUpdate_offset;
3593     }
3594     VIXL_UNREACHABLE();
3595     return 0;
3596   }
3597 
Cond(Condition cond)3598   static Instr Cond(Condition cond) { return cond << Condition_offset; }
3599 
3600   // PC-relative address encoding.
ImmPCRelAddress(int64_t imm21)3601   static Instr ImmPCRelAddress(int64_t imm21) {
3602     VIXL_ASSERT(IsInt21(imm21));
3603     Instr imm = static_cast<Instr>(TruncateToUint21(imm21));
3604     Instr immhi = (imm >> ImmPCRelLo_width) << ImmPCRelHi_offset;
3605     Instr immlo = imm << ImmPCRelLo_offset;
3606     return (immhi & ImmPCRelHi_mask) | (immlo & ImmPCRelLo_mask);
3607   }
3608 
3609   // Branch encoding.
ImmUncondBranch(int64_t imm26)3610   static Instr ImmUncondBranch(int64_t imm26) {
3611     VIXL_ASSERT(IsInt26(imm26));
3612     return TruncateToUint26(imm26) << ImmUncondBranch_offset;
3613   }
3614 
ImmCondBranch(int64_t imm19)3615   static Instr ImmCondBranch(int64_t imm19) {
3616     VIXL_ASSERT(IsInt19(imm19));
3617     return TruncateToUint19(imm19) << ImmCondBranch_offset;
3618   }
3619 
ImmCmpBranch(int64_t imm19)3620   static Instr ImmCmpBranch(int64_t imm19) {
3621     VIXL_ASSERT(IsInt19(imm19));
3622     return TruncateToUint19(imm19) << ImmCmpBranch_offset;
3623   }
3624 
ImmTestBranch(int64_t imm14)3625   static Instr ImmTestBranch(int64_t imm14) {
3626     VIXL_ASSERT(IsInt14(imm14));
3627     return TruncateToUint14(imm14) << ImmTestBranch_offset;
3628   }
3629 
ImmTestBranchBit(unsigned bit_pos)3630   static Instr ImmTestBranchBit(unsigned bit_pos) {
3631     VIXL_ASSERT(IsUint6(bit_pos));
3632     // Subtract five from the shift offset, as we need bit 5 from bit_pos.
3633     unsigned b5 = bit_pos << (ImmTestBranchBit5_offset - 5);
3634     unsigned b40 = bit_pos << ImmTestBranchBit40_offset;
3635     b5 &= ImmTestBranchBit5_mask;
3636     b40 &= ImmTestBranchBit40_mask;
3637     return b5 | b40;
3638   }
3639 
3640   // Data Processing encoding.
SF(Register rd)3641   static Instr SF(Register rd) {
3642     return rd.Is64Bits() ? SixtyFourBits : ThirtyTwoBits;
3643   }
3644 
ImmAddSub(int imm)3645   static Instr ImmAddSub(int imm) {
3646     VIXL_ASSERT(IsImmAddSub(imm));
3647     if (IsUint12(imm)) {  // No shift required.
3648       imm <<= ImmAddSub_offset;
3649     } else {
3650       imm = ((imm >> 12) << ImmAddSub_offset) | (1 << ShiftAddSub_offset);
3651     }
3652     return imm;
3653   }
3654 
ImmS(unsigned imms,unsigned reg_size)3655   static Instr ImmS(unsigned imms, unsigned reg_size) {
3656     VIXL_ASSERT(((reg_size == kXRegSize) && IsUint6(imms)) ||
3657                 ((reg_size == kWRegSize) && IsUint5(imms)));
3658     USE(reg_size);
3659     return imms << ImmS_offset;
3660   }
3661 
ImmR(unsigned immr,unsigned reg_size)3662   static Instr ImmR(unsigned immr, unsigned reg_size) {
3663     VIXL_ASSERT(((reg_size == kXRegSize) && IsUint6(immr)) ||
3664                 ((reg_size == kWRegSize) && IsUint5(immr)));
3665     USE(reg_size);
3666     VIXL_ASSERT(IsUint6(immr));
3667     return immr << ImmR_offset;
3668   }
3669 
ImmSetBits(unsigned imms,unsigned reg_size)3670   static Instr ImmSetBits(unsigned imms, unsigned reg_size) {
3671     VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
3672     VIXL_ASSERT(IsUint6(imms));
3673     VIXL_ASSERT((reg_size == kXRegSize) || IsUint6(imms + 3));
3674     USE(reg_size);
3675     return imms << ImmSetBits_offset;
3676   }
3677 
ImmRotate(unsigned immr,unsigned reg_size)3678   static Instr ImmRotate(unsigned immr, unsigned reg_size) {
3679     VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
3680     VIXL_ASSERT(((reg_size == kXRegSize) && IsUint6(immr)) ||
3681                 ((reg_size == kWRegSize) && IsUint5(immr)));
3682     USE(reg_size);
3683     return immr << ImmRotate_offset;
3684   }
3685 
ImmLLiteral(int64_t imm19)3686   static Instr ImmLLiteral(int64_t imm19) {
3687     VIXL_ASSERT(IsInt19(imm19));
3688     return TruncateToUint19(imm19) << ImmLLiteral_offset;
3689   }
3690 
BitN(unsigned bitn,unsigned reg_size)3691   static Instr BitN(unsigned bitn, unsigned reg_size) {
3692     VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
3693     VIXL_ASSERT((reg_size == kXRegSize) || (bitn == 0));
3694     USE(reg_size);
3695     return bitn << BitN_offset;
3696   }
3697 
ShiftDP(Shift shift)3698   static Instr ShiftDP(Shift shift) {
3699     VIXL_ASSERT(shift == LSL || shift == LSR || shift == ASR || shift == ROR);
3700     return shift << ShiftDP_offset;
3701   }
3702 
ImmDPShift(unsigned amount)3703   static Instr ImmDPShift(unsigned amount) {
3704     VIXL_ASSERT(IsUint6(amount));
3705     return amount << ImmDPShift_offset;
3706   }
3707 
ExtendMode(Extend extend)3708   static Instr ExtendMode(Extend extend) { return extend << ExtendMode_offset; }
3709 
ImmExtendShift(unsigned left_shift)3710   static Instr ImmExtendShift(unsigned left_shift) {
3711     VIXL_ASSERT(left_shift <= 4);
3712     return left_shift << ImmExtendShift_offset;
3713   }
3714 
ImmCondCmp(unsigned imm)3715   static Instr ImmCondCmp(unsigned imm) {
3716     VIXL_ASSERT(IsUint5(imm));
3717     return imm << ImmCondCmp_offset;
3718   }
3719 
Nzcv(StatusFlags nzcv)3720   static Instr Nzcv(StatusFlags nzcv) {
3721     return ((nzcv >> Flags_offset) & 0xf) << Nzcv_offset;
3722   }
3723 
3724   // MemOperand offset encoding.
ImmLSUnsigned(int64_t imm12)3725   static Instr ImmLSUnsigned(int64_t imm12) {
3726     VIXL_ASSERT(IsUint12(imm12));
3727     return TruncateToUint12(imm12) << ImmLSUnsigned_offset;
3728   }
3729 
ImmLS(int64_t imm9)3730   static Instr ImmLS(int64_t imm9) {
3731     VIXL_ASSERT(IsInt9(imm9));
3732     return TruncateToUint9(imm9) << ImmLS_offset;
3733   }
3734 
ImmLSPair(int64_t imm7,unsigned access_size)3735   static Instr ImmLSPair(int64_t imm7, unsigned access_size) {
3736     VIXL_ASSERT(IsMultiple(imm7, 1 << access_size));
3737     int64_t scaled_imm7 = imm7 / (1 << access_size);
3738     VIXL_ASSERT(IsInt7(scaled_imm7));
3739     return TruncateToUint7(scaled_imm7) << ImmLSPair_offset;
3740   }
3741 
ImmShiftLS(unsigned shift_amount)3742   static Instr ImmShiftLS(unsigned shift_amount) {
3743     VIXL_ASSERT(IsUint1(shift_amount));
3744     return shift_amount << ImmShiftLS_offset;
3745   }
3746 
ImmPrefetchOperation(int imm5)3747   static Instr ImmPrefetchOperation(int imm5) {
3748     VIXL_ASSERT(IsUint5(imm5));
3749     return imm5 << ImmPrefetchOperation_offset;
3750   }
3751 
ImmException(int imm16)3752   static Instr ImmException(int imm16) {
3753     VIXL_ASSERT(IsUint16(imm16));
3754     return imm16 << ImmException_offset;
3755   }
3756 
ImmSystemRegister(int imm16)3757   static Instr ImmSystemRegister(int imm16) {
3758     VIXL_ASSERT(IsUint16(imm16));
3759     return imm16 << ImmSystemRegister_offset;
3760   }
3761 
ImmHint(int imm7)3762   static Instr ImmHint(int imm7) {
3763     VIXL_ASSERT(IsUint7(imm7));
3764     return imm7 << ImmHint_offset;
3765   }
3766 
CRm(int imm4)3767   static Instr CRm(int imm4) {
3768     VIXL_ASSERT(IsUint4(imm4));
3769     return imm4 << CRm_offset;
3770   }
3771 
CRn(int imm4)3772   static Instr CRn(int imm4) {
3773     VIXL_ASSERT(IsUint4(imm4));
3774     return imm4 << CRn_offset;
3775   }
3776 
SysOp(int imm14)3777   static Instr SysOp(int imm14) {
3778     VIXL_ASSERT(IsUint14(imm14));
3779     return imm14 << SysOp_offset;
3780   }
3781 
ImmSysOp1(int imm3)3782   static Instr ImmSysOp1(int imm3) {
3783     VIXL_ASSERT(IsUint3(imm3));
3784     return imm3 << SysOp1_offset;
3785   }
3786 
ImmSysOp2(int imm3)3787   static Instr ImmSysOp2(int imm3) {
3788     VIXL_ASSERT(IsUint3(imm3));
3789     return imm3 << SysOp2_offset;
3790   }
3791 
ImmBarrierDomain(int imm2)3792   static Instr ImmBarrierDomain(int imm2) {
3793     VIXL_ASSERT(IsUint2(imm2));
3794     return imm2 << ImmBarrierDomain_offset;
3795   }
3796 
ImmBarrierType(int imm2)3797   static Instr ImmBarrierType(int imm2) {
3798     VIXL_ASSERT(IsUint2(imm2));
3799     return imm2 << ImmBarrierType_offset;
3800   }
3801 
3802   // Move immediates encoding.
ImmMoveWide(uint64_t imm)3803   static Instr ImmMoveWide(uint64_t imm) {
3804     VIXL_ASSERT(IsUint16(imm));
3805     return static_cast<Instr>(imm << ImmMoveWide_offset);
3806   }
3807 
ShiftMoveWide(int64_t shift)3808   static Instr ShiftMoveWide(int64_t shift) {
3809     VIXL_ASSERT(IsUint2(shift));
3810     return static_cast<Instr>(shift << ShiftMoveWide_offset);
3811   }
3812 
3813   // FP Immediates.
3814   static Instr ImmFP16(Float16 imm);
3815   static Instr ImmFP32(float imm);
3816   static Instr ImmFP64(double imm);
3817 
3818   // FP register type.
FPType(FPRegister fd)3819   static Instr FPType(FPRegister fd) {
3820     switch (fd.GetSizeInBits()) {
3821       case 16:
3822         return FP16;
3823       case 32:
3824         return FP32;
3825       case 64:
3826         return FP64;
3827       default:
3828         VIXL_UNREACHABLE();
3829         return 0;
3830     }
3831   }
3832 
FPScale(unsigned scale)3833   static Instr FPScale(unsigned scale) {
3834     VIXL_ASSERT(IsUint6(scale));
3835     return scale << FPScale_offset;
3836   }
3837 
3838   // Immediate field checking helpers.
3839   static bool IsImmAddSub(int64_t immediate);
3840   static bool IsImmConditionalCompare(int64_t immediate);
3841   static bool IsImmFP16(Float16 imm);
3842   static bool IsImmFP32(float imm);
3843   static bool IsImmFP64(double imm);
3844   static bool IsImmLogical(uint64_t value,
3845                            unsigned width,
3846                            unsigned* n = NULL,
3847                            unsigned* imm_s = NULL,
3848                            unsigned* imm_r = NULL);
3849   static bool IsImmLSPair(int64_t offset, unsigned access_size);
3850   static bool IsImmLSScaled(int64_t offset, unsigned access_size);
3851   static bool IsImmLSUnscaled(int64_t offset);
3852   static bool IsImmMovn(uint64_t imm, unsigned reg_size);
3853   static bool IsImmMovz(uint64_t imm, unsigned reg_size);
3854 
3855   // Instruction bits for vector format in data processing operations.
VFormat(VRegister vd)3856   static Instr VFormat(VRegister vd) {
3857     if (vd.Is64Bits()) {
3858       switch (vd.GetLanes()) {
3859         case 2:
3860           return NEON_2S;
3861         case 4:
3862           return NEON_4H;
3863         case 8:
3864           return NEON_8B;
3865         default:
3866           return 0xffffffff;
3867       }
3868     } else {
3869       VIXL_ASSERT(vd.Is128Bits());
3870       switch (vd.GetLanes()) {
3871         case 2:
3872           return NEON_2D;
3873         case 4:
3874           return NEON_4S;
3875         case 8:
3876           return NEON_8H;
3877         case 16:
3878           return NEON_16B;
3879         default:
3880           return 0xffffffff;
3881       }
3882     }
3883   }
3884 
3885   // Instruction bits for vector format in floating point data processing
3886   // operations.
FPFormat(VRegister vd)3887   static Instr FPFormat(VRegister vd) {
3888     switch (vd.GetLanes()) {
3889       case 1:
3890         // Floating point scalar formats.
3891         switch (vd.GetSizeInBits()) {
3892           case 16:
3893             return FP16;
3894           case 32:
3895             return FP32;
3896           case 64:
3897             return FP64;
3898           default:
3899             VIXL_UNREACHABLE();
3900         }
3901         break;
3902       case 2:
3903         // Two lane floating point vector formats.
3904         switch (vd.GetSizeInBits()) {
3905           case 64:
3906             return NEON_FP_2S;
3907           case 128:
3908             return NEON_FP_2D;
3909           default:
3910             VIXL_UNREACHABLE();
3911         }
3912         break;
3913       case 4:
3914         // Four lane floating point vector formats.
3915         switch (vd.GetSizeInBits()) {
3916           case 64:
3917             return NEON_FP_4H;
3918           case 128:
3919             return NEON_FP_4S;
3920           default:
3921             VIXL_UNREACHABLE();
3922         }
3923         break;
3924       case 8:
3925         // Eight lane floating point vector format.
3926         VIXL_ASSERT(vd.Is128Bits());
3927         return NEON_FP_8H;
3928       default:
3929         VIXL_UNREACHABLE();
3930         return 0;
3931     }
3932     VIXL_UNREACHABLE();
3933     return 0;
3934   }
3935 
3936   // Instruction bits for vector format in load and store operations.
LSVFormat(VRegister vd)3937   static Instr LSVFormat(VRegister vd) {
3938     if (vd.Is64Bits()) {
3939       switch (vd.GetLanes()) {
3940         case 1:
3941           return LS_NEON_1D;
3942         case 2:
3943           return LS_NEON_2S;
3944         case 4:
3945           return LS_NEON_4H;
3946         case 8:
3947           return LS_NEON_8B;
3948         default:
3949           return 0xffffffff;
3950       }
3951     } else {
3952       VIXL_ASSERT(vd.Is128Bits());
3953       switch (vd.GetLanes()) {
3954         case 2:
3955           return LS_NEON_2D;
3956         case 4:
3957           return LS_NEON_4S;
3958         case 8:
3959           return LS_NEON_8H;
3960         case 16:
3961           return LS_NEON_16B;
3962         default:
3963           return 0xffffffff;
3964       }
3965     }
3966   }
3967 
3968   // Instruction bits for scalar format in data processing operations.
SFormat(VRegister vd)3969   static Instr SFormat(VRegister vd) {
3970     VIXL_ASSERT(vd.GetLanes() == 1);
3971     switch (vd.GetSizeInBytes()) {
3972       case 1:
3973         return NEON_B;
3974       case 2:
3975         return NEON_H;
3976       case 4:
3977         return NEON_S;
3978       case 8:
3979         return NEON_D;
3980       default:
3981         return 0xffffffff;
3982     }
3983   }
3984 
ImmNEONHLM(int index,int num_bits)3985   static Instr ImmNEONHLM(int index, int num_bits) {
3986     int h, l, m;
3987     if (num_bits == 3) {
3988       VIXL_ASSERT(IsUint3(index));
3989       h = (index >> 2) & 1;
3990       l = (index >> 1) & 1;
3991       m = (index >> 0) & 1;
3992     } else if (num_bits == 2) {
3993       VIXL_ASSERT(IsUint2(index));
3994       h = (index >> 1) & 1;
3995       l = (index >> 0) & 1;
3996       m = 0;
3997     } else {
3998       VIXL_ASSERT(IsUint1(index) && (num_bits == 1));
3999       h = (index >> 0) & 1;
4000       l = 0;
4001       m = 0;
4002     }
4003     return (h << NEONH_offset) | (l << NEONL_offset) | (m << NEONM_offset);
4004   }
4005 
ImmRotFcadd(int rot)4006   static Instr ImmRotFcadd(int rot) {
4007     VIXL_ASSERT(rot == 90 || rot == 270);
4008     return (((rot == 270) ? 1 : 0) << ImmRotFcadd_offset);
4009   }
4010 
ImmRotFcmlaSca(int rot)4011   static Instr ImmRotFcmlaSca(int rot) {
4012     VIXL_ASSERT(rot == 0 || rot == 90 || rot == 180 || rot == 270);
4013     return (rot / 90) << ImmRotFcmlaSca_offset;
4014   }
4015 
ImmRotFcmlaVec(int rot)4016   static Instr ImmRotFcmlaVec(int rot) {
4017     VIXL_ASSERT(rot == 0 || rot == 90 || rot == 180 || rot == 270);
4018     return (rot / 90) << ImmRotFcmlaVec_offset;
4019   }
4020 
ImmNEONExt(int imm4)4021   static Instr ImmNEONExt(int imm4) {
4022     VIXL_ASSERT(IsUint4(imm4));
4023     return imm4 << ImmNEONExt_offset;
4024   }
4025 
ImmNEON5(Instr format,int index)4026   static Instr ImmNEON5(Instr format, int index) {
4027     VIXL_ASSERT(IsUint4(index));
4028     int s = LaneSizeInBytesLog2FromFormat(static_cast<VectorFormat>(format));
4029     int imm5 = (index << (s + 1)) | (1 << s);
4030     return imm5 << ImmNEON5_offset;
4031   }
4032 
ImmNEON4(Instr format,int index)4033   static Instr ImmNEON4(Instr format, int index) {
4034     VIXL_ASSERT(IsUint4(index));
4035     int s = LaneSizeInBytesLog2FromFormat(static_cast<VectorFormat>(format));
4036     int imm4 = index << s;
4037     return imm4 << ImmNEON4_offset;
4038   }
4039 
ImmNEONabcdefgh(int imm8)4040   static Instr ImmNEONabcdefgh(int imm8) {
4041     VIXL_ASSERT(IsUint8(imm8));
4042     Instr instr;
4043     instr = ((imm8 >> 5) & 7) << ImmNEONabc_offset;
4044     instr |= (imm8 & 0x1f) << ImmNEONdefgh_offset;
4045     return instr;
4046   }
4047 
NEONCmode(int cmode)4048   static Instr NEONCmode(int cmode) {
4049     VIXL_ASSERT(IsUint4(cmode));
4050     return cmode << NEONCmode_offset;
4051   }
4052 
NEONModImmOp(int op)4053   static Instr NEONModImmOp(int op) {
4054     VIXL_ASSERT(IsUint1(op));
4055     return op << NEONModImmOp_offset;
4056   }
4057 
4058   // Size of the code generated since label to the current position.
GetSizeOfCodeGeneratedSince(Label * label)4059   size_t GetSizeOfCodeGeneratedSince(Label* label) const {
4060     VIXL_ASSERT(label->IsBound());
4061     return GetBuffer().GetOffsetFrom(label->GetLocation());
4062   }
4063   VIXL_DEPRECATED("GetSizeOfCodeGeneratedSince",
4064                   size_t SizeOfCodeGeneratedSince(Label* label) const) {
4065     return GetSizeOfCodeGeneratedSince(label);
4066   }
4067 
4068   VIXL_DEPRECATED("GetBuffer().GetCapacity()",
4069                   size_t GetBufferCapacity() const) {
4070     return GetBuffer().GetCapacity();
4071   }
4072   VIXL_DEPRECATED("GetBuffer().GetCapacity()", size_t BufferCapacity() const) {
4073     return GetBuffer().GetCapacity();
4074   }
4075 
4076   VIXL_DEPRECATED("GetBuffer().GetRemainingBytes()",
4077                   size_t GetRemainingBufferSpace() const) {
4078     return GetBuffer().GetRemainingBytes();
4079   }
4080   VIXL_DEPRECATED("GetBuffer().GetRemainingBytes()",
4081                   size_t RemainingBufferSpace() const) {
4082     return GetBuffer().GetRemainingBytes();
4083   }
4084 
GetPic()4085   PositionIndependentCodeOption GetPic() const { return pic_; }
4086   VIXL_DEPRECATED("GetPic", PositionIndependentCodeOption pic() const) {
4087     return GetPic();
4088   }
4089 
GetCPUFeatures()4090   CPUFeatures* GetCPUFeatures() { return &cpu_features_; }
4091 
SetCPUFeatures(const CPUFeatures & cpu_features)4092   void SetCPUFeatures(const CPUFeatures& cpu_features) {
4093     cpu_features_ = cpu_features;
4094   }
4095 
AllowPageOffsetDependentCode()4096   bool AllowPageOffsetDependentCode() const {
4097     return (GetPic() == PageOffsetDependentCode) ||
4098            (GetPic() == PositionDependentCode);
4099   }
4100 
AppropriateZeroRegFor(const CPURegister & reg)4101   static Register AppropriateZeroRegFor(const CPURegister& reg) {
4102     return reg.Is64Bits() ? Register(xzr) : Register(wzr);
4103   }
4104 
4105  protected:
4106   void LoadStore(const CPURegister& rt,
4107                  const MemOperand& addr,
4108                  LoadStoreOp op,
4109                  LoadStoreScalingOption option = PreferScaledOffset);
4110 
4111   void LoadStorePair(const CPURegister& rt,
4112                      const CPURegister& rt2,
4113                      const MemOperand& addr,
4114                      LoadStorePairOp op);
4115   void LoadStoreStruct(const VRegister& vt,
4116                        const MemOperand& addr,
4117                        NEONLoadStoreMultiStructOp op);
4118   void LoadStoreStruct1(const VRegister& vt,
4119                         int reg_count,
4120                         const MemOperand& addr);
4121   void LoadStoreStructSingle(const VRegister& vt,
4122                              uint32_t lane,
4123                              const MemOperand& addr,
4124                              NEONLoadStoreSingleStructOp op);
4125   void LoadStoreStructSingleAllLanes(const VRegister& vt,
4126                                      const MemOperand& addr,
4127                                      NEONLoadStoreSingleStructOp op);
4128   void LoadStoreStructVerify(const VRegister& vt,
4129                              const MemOperand& addr,
4130                              Instr op);
4131 
4132   void Prefetch(PrefetchOperation op,
4133                 const MemOperand& addr,
4134                 LoadStoreScalingOption option = PreferScaledOffset);
4135 
4136   // TODO(all): The third parameter should be passed by reference but gcc 4.8.2
4137   // reports a bogus uninitialised warning then.
4138   void Logical(const Register& rd,
4139                const Register& rn,
4140                const Operand operand,
4141                LogicalOp op);
4142   void LogicalImmediate(const Register& rd,
4143                         const Register& rn,
4144                         unsigned n,
4145                         unsigned imm_s,
4146                         unsigned imm_r,
4147                         LogicalOp op);
4148 
4149   void ConditionalCompare(const Register& rn,
4150                           const Operand& operand,
4151                           StatusFlags nzcv,
4152                           Condition cond,
4153                           ConditionalCompareOp op);
4154 
4155   void AddSubWithCarry(const Register& rd,
4156                        const Register& rn,
4157                        const Operand& operand,
4158                        FlagsUpdate S,
4159                        AddSubWithCarryOp op);
4160 
4161 
4162   // Functions for emulating operands not directly supported by the instruction
4163   // set.
4164   void EmitShift(const Register& rd,
4165                  const Register& rn,
4166                  Shift shift,
4167                  unsigned amount);
4168   void EmitExtendShift(const Register& rd,
4169                        const Register& rn,
4170                        Extend extend,
4171                        unsigned left_shift);
4172 
4173   void AddSub(const Register& rd,
4174               const Register& rn,
4175               const Operand& operand,
4176               FlagsUpdate S,
4177               AddSubOp op);
4178 
4179   void NEONTable(const VRegister& vd,
4180                  const VRegister& vn,
4181                  const VRegister& vm,
4182                  NEONTableOp op);
4183 
4184   // Find an appropriate LoadStoreOp or LoadStorePairOp for the specified
4185   // registers. Only simple loads are supported; sign- and zero-extension (such
4186   // as in LDPSW_x or LDRB_w) are not supported.
4187   static LoadStoreOp LoadOpFor(const CPURegister& rt);
4188   static LoadStorePairOp LoadPairOpFor(const CPURegister& rt,
4189                                        const CPURegister& rt2);
4190   static LoadStoreOp StoreOpFor(const CPURegister& rt);
4191   static LoadStorePairOp StorePairOpFor(const CPURegister& rt,
4192                                         const CPURegister& rt2);
4193   static LoadStorePairNonTemporalOp LoadPairNonTemporalOpFor(
4194       const CPURegister& rt, const CPURegister& rt2);
4195   static LoadStorePairNonTemporalOp StorePairNonTemporalOpFor(
4196       const CPURegister& rt, const CPURegister& rt2);
4197   static LoadLiteralOp LoadLiteralOpFor(const CPURegister& rt);
4198 
4199   // Convenience pass-through for CPU feature checks.
4200   bool CPUHas(CPUFeatures::Feature feature0,
4201               CPUFeatures::Feature feature1 = CPUFeatures::kNone,
4202               CPUFeatures::Feature feature2 = CPUFeatures::kNone,
4203               CPUFeatures::Feature feature3 = CPUFeatures::kNone) const {
4204     return cpu_features_.Has(feature0, feature1, feature2, feature3);
4205   }
4206 
4207   // Determine whether the target CPU has the specified registers, based on the
4208   // currently-enabled CPU features. Presence of a register does not imply
4209   // support for arbitrary operations on it. For example, CPUs with FP have H
4210   // registers, but most half-precision operations require the FPHalf feature.
4211   //
4212   // These are used to check CPU features in loads and stores that have the same
4213   // entry point for both integer and FP registers.
4214   bool CPUHas(const CPURegister& rt) const;
4215   bool CPUHas(const CPURegister& rt, const CPURegister& rt2) const;
4216 
4217  private:
4218   static uint32_t FP16ToImm8(Float16 imm);
4219   static uint32_t FP32ToImm8(float imm);
4220   static uint32_t FP64ToImm8(double imm);
4221 
4222   // Instruction helpers.
4223   void MoveWide(const Register& rd,
4224                 uint64_t imm,
4225                 int shift,
4226                 MoveWideImmediateOp mov_op);
4227   void DataProcShiftedRegister(const Register& rd,
4228                                const Register& rn,
4229                                const Operand& operand,
4230                                FlagsUpdate S,
4231                                Instr op);
4232   void DataProcExtendedRegister(const Register& rd,
4233                                 const Register& rn,
4234                                 const Operand& operand,
4235                                 FlagsUpdate S,
4236                                 Instr op);
4237   void LoadStorePairNonTemporal(const CPURegister& rt,
4238                                 const CPURegister& rt2,
4239                                 const MemOperand& addr,
4240                                 LoadStorePairNonTemporalOp op);
4241   void LoadLiteral(const CPURegister& rt, uint64_t imm, LoadLiteralOp op);
4242   void ConditionalSelect(const Register& rd,
4243                          const Register& rn,
4244                          const Register& rm,
4245                          Condition cond,
4246                          ConditionalSelectOp op);
4247   void DataProcessing1Source(const Register& rd,
4248                              const Register& rn,
4249                              DataProcessing1SourceOp op);
4250   void DataProcessing3Source(const Register& rd,
4251                              const Register& rn,
4252                              const Register& rm,
4253                              const Register& ra,
4254                              DataProcessing3SourceOp op);
4255   void FPDataProcessing1Source(const VRegister& fd,
4256                                const VRegister& fn,
4257                                FPDataProcessing1SourceOp op);
4258   void FPDataProcessing3Source(const VRegister& fd,
4259                                const VRegister& fn,
4260                                const VRegister& fm,
4261                                const VRegister& fa,
4262                                FPDataProcessing3SourceOp op);
4263   void NEONAcrossLanesL(const VRegister& vd,
4264                         const VRegister& vn,
4265                         NEONAcrossLanesOp op);
4266   void NEONAcrossLanes(const VRegister& vd,
4267                        const VRegister& vn,
4268                        NEONAcrossLanesOp op,
4269                        Instr op_half);
4270   void NEONModifiedImmShiftLsl(const VRegister& vd,
4271                                const int imm8,
4272                                const int left_shift,
4273                                NEONModifiedImmediateOp op);
4274   void NEONModifiedImmShiftMsl(const VRegister& vd,
4275                                const int imm8,
4276                                const int shift_amount,
4277                                NEONModifiedImmediateOp op);
4278   void NEONFP2Same(const VRegister& vd, const VRegister& vn, Instr vop);
4279   void NEON3Same(const VRegister& vd,
4280                  const VRegister& vn,
4281                  const VRegister& vm,
4282                  NEON3SameOp vop);
4283   void NEON3SameFP16(const VRegister& vd,
4284                      const VRegister& vn,
4285                      const VRegister& vm,
4286                      Instr op);
4287   void NEONFP3Same(const VRegister& vd,
4288                    const VRegister& vn,
4289                    const VRegister& vm,
4290                    Instr op);
4291   void NEON3DifferentL(const VRegister& vd,
4292                        const VRegister& vn,
4293                        const VRegister& vm,
4294                        NEON3DifferentOp vop);
4295   void NEON3DifferentW(const VRegister& vd,
4296                        const VRegister& vn,
4297                        const VRegister& vm,
4298                        NEON3DifferentOp vop);
4299   void NEON3DifferentHN(const VRegister& vd,
4300                         const VRegister& vn,
4301                         const VRegister& vm,
4302                         NEON3DifferentOp vop);
4303   void NEONFP2RegMisc(const VRegister& vd,
4304                       const VRegister& vn,
4305                       NEON2RegMiscOp vop,
4306                       double value = 0.0);
4307   void NEONFP2RegMiscFP16(const VRegister& vd,
4308                           const VRegister& vn,
4309                           NEON2RegMiscFP16Op vop,
4310                           double value = 0.0);
4311   void NEON2RegMisc(const VRegister& vd,
4312                     const VRegister& vn,
4313                     NEON2RegMiscOp vop,
4314                     int value = 0);
4315   void NEONFP2RegMisc(const VRegister& vd, const VRegister& vn, Instr op);
4316   void NEONFP2RegMiscFP16(const VRegister& vd, const VRegister& vn, Instr op);
4317   void NEONAddlp(const VRegister& vd, const VRegister& vn, NEON2RegMiscOp op);
4318   void NEONPerm(const VRegister& vd,
4319                 const VRegister& vn,
4320                 const VRegister& vm,
4321                 NEONPermOp op);
4322   void NEONFPByElement(const VRegister& vd,
4323                        const VRegister& vn,
4324                        const VRegister& vm,
4325                        int vm_index,
4326                        NEONByIndexedElementOp op,
4327                        NEONByIndexedElementOp op_half);
4328   void NEONByElement(const VRegister& vd,
4329                      const VRegister& vn,
4330                      const VRegister& vm,
4331                      int vm_index,
4332                      NEONByIndexedElementOp op);
4333   void NEONByElementL(const VRegister& vd,
4334                       const VRegister& vn,
4335                       const VRegister& vm,
4336                       int vm_index,
4337                       NEONByIndexedElementOp op);
4338   void NEONShiftImmediate(const VRegister& vd,
4339                           const VRegister& vn,
4340                           NEONShiftImmediateOp op,
4341                           int immh_immb);
4342   void NEONShiftLeftImmediate(const VRegister& vd,
4343                               const VRegister& vn,
4344                               int shift,
4345                               NEONShiftImmediateOp op);
4346   void NEONShiftRightImmediate(const VRegister& vd,
4347                                const VRegister& vn,
4348                                int shift,
4349                                NEONShiftImmediateOp op);
4350   void NEONShiftImmediateL(const VRegister& vd,
4351                            const VRegister& vn,
4352                            int shift,
4353                            NEONShiftImmediateOp op);
4354   void NEONShiftImmediateN(const VRegister& vd,
4355                            const VRegister& vn,
4356                            int shift,
4357                            NEONShiftImmediateOp op);
4358   void NEONXtn(const VRegister& vd, const VRegister& vn, NEON2RegMiscOp vop);
4359 
4360   Instr LoadStoreStructAddrModeField(const MemOperand& addr);
4361 
4362   // Encode the specified MemOperand for the specified access size and scaling
4363   // preference.
4364   Instr LoadStoreMemOperand(const MemOperand& addr,
4365                             unsigned access_size,
4366                             LoadStoreScalingOption option);
4367 
4368   // Link the current (not-yet-emitted) instruction to the specified label, then
4369   // return an offset to be encoded in the instruction. If the label is not yet
4370   // bound, an offset of 0 is returned.
4371   ptrdiff_t LinkAndGetByteOffsetTo(Label* label);
4372   ptrdiff_t LinkAndGetInstructionOffsetTo(Label* label);
4373   ptrdiff_t LinkAndGetPageOffsetTo(Label* label);
4374 
4375   // A common implementation for the LinkAndGet<Type>OffsetTo helpers.
4376   template <int element_shift>
4377   ptrdiff_t LinkAndGetOffsetTo(Label* label);
4378 
4379   // Literal load offset are in words (32-bit).
4380   ptrdiff_t LinkAndGetWordOffsetTo(RawLiteral* literal);
4381 
4382   // Emit the instruction in buffer_.
Emit(Instr instruction)4383   void Emit(Instr instruction) {
4384     VIXL_STATIC_ASSERT(sizeof(instruction) == kInstructionSize);
4385     VIXL_ASSERT(AllowAssembler());
4386     GetBuffer()->Emit32(instruction);
4387   }
4388 
4389   PositionIndependentCodeOption pic_;
4390 
4391   CPUFeatures cpu_features_;
4392 };
4393 
4394 
4395 template <typename T>
UpdateValue(T new_value,const Assembler * assembler)4396 void Literal<T>::UpdateValue(T new_value, const Assembler* assembler) {
4397   return UpdateValue(new_value,
4398                      assembler->GetBuffer().GetStartAddress<uint8_t*>());
4399 }
4400 
4401 
4402 template <typename T>
UpdateValue(T high64,T low64,const Assembler * assembler)4403 void Literal<T>::UpdateValue(T high64, T low64, const Assembler* assembler) {
4404   return UpdateValue(high64,
4405                      low64,
4406                      assembler->GetBuffer().GetStartAddress<uint8_t*>());
4407 }
4408 
4409 
4410 }  // namespace aarch64
4411 
4412 // Required InvalSet template specialisations.
4413 // TODO: These template specialisations should not live in this file.  Move
4414 // Label out of the aarch64 namespace in order to share its implementation
4415 // later.
4416 #define INVAL_SET_TEMPLATE_PARAMETERS                                \
4417   ptrdiff_t, aarch64::Label::kNPreallocatedLinks, ptrdiff_t,         \
4418       aarch64::Label::kInvalidLinkKey, aarch64::Label::kReclaimFrom, \
4419       aarch64::Label::kReclaimFactor
4420 template <>
GetKey(const ptrdiff_t & element)4421 inline ptrdiff_t InvalSet<INVAL_SET_TEMPLATE_PARAMETERS>::GetKey(
4422     const ptrdiff_t& element) {
4423   return element;
4424 }
4425 template <>
SetKey(ptrdiff_t * element,ptrdiff_t key)4426 inline void InvalSet<INVAL_SET_TEMPLATE_PARAMETERS>::SetKey(ptrdiff_t* element,
4427                                                             ptrdiff_t key) {
4428   *element = key;
4429 }
4430 #undef INVAL_SET_TEMPLATE_PARAMETERS
4431 
4432 }  // namespace vixl
4433 
4434 #endif  // VIXL_AARCH64_ASSEMBLER_AARCH64_H_
4435