• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2015, VIXL authors
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are met:
6 //
7 //   * Redistributions of source code must retain the above copyright notice,
8 //     this list of conditions and the following disclaimer.
9 //   * Redistributions in binary form must reproduce the above copyright notice,
10 //     this list of conditions and the following disclaimer in the documentation
11 //     and/or other materials provided with the distribution.
12 //   * Neither the name of ARM Limited nor the names of its contributors may be
13 //     used to endorse or promote products derived from this software without
14 //     specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 
27 #ifndef VIXL_AARCH64_MACRO_ASSEMBLER_AARCH64_H_
28 #define VIXL_AARCH64_MACRO_ASSEMBLER_AARCH64_H_
29 
30 #include <algorithm>
31 #include <limits>
32 
33 #include "../code-generation-scopes-vixl.h"
34 #include "../globals-vixl.h"
35 #include "../macro-assembler-interface.h"
36 
37 #include "assembler-aarch64.h"
38 #include "instrument-aarch64.h"
39 // Required for runtime call support.
40 // TODO: Break this dependency. We should be able to separate out the necessary
41 // parts so that we don't need to include the whole simulator header.
42 #include "simulator-aarch64.h"
43 // Required in order to generate debugging instructions for the simulator. This
44 // is needed regardless of whether the simulator is included or not, since
45 // generating simulator specific instructions is controlled at runtime.
46 #include "simulator-constants-aarch64.h"
47 
48 
49 #define LS_MACRO_LIST(V)                                     \
50   V(Ldrb, Register&, rt, LDRB_w)                             \
51   V(Strb, Register&, rt, STRB_w)                             \
52   V(Ldrsb, Register&, rt, rt.Is64Bits() ? LDRSB_x : LDRSB_w) \
53   V(Ldrh, Register&, rt, LDRH_w)                             \
54   V(Strh, Register&, rt, STRH_w)                             \
55   V(Ldrsh, Register&, rt, rt.Is64Bits() ? LDRSH_x : LDRSH_w) \
56   V(Ldr, CPURegister&, rt, LoadOpFor(rt))                    \
57   V(Str, CPURegister&, rt, StoreOpFor(rt))                   \
58   V(Ldrsw, Register&, rt, LDRSW_x)
59 
60 
61 #define LSPAIR_MACRO_LIST(V)                             \
62   V(Ldp, CPURegister&, rt, rt2, LoadPairOpFor(rt, rt2))  \
63   V(Stp, CPURegister&, rt, rt2, StorePairOpFor(rt, rt2)) \
64   V(Ldpsw, CPURegister&, rt, rt2, LDPSW_x)
65 
66 namespace vixl {
67 namespace aarch64 {
68 
69 // Forward declaration
70 class MacroAssembler;
71 class UseScratchRegisterScope;
72 
73 class Pool {
74  public:
Pool(MacroAssembler * masm)75   explicit Pool(MacroAssembler* masm)
76       : checkpoint_(kNoCheckpointRequired), masm_(masm) {
77     Reset();
78   }
79 
Reset()80   void Reset() {
81     checkpoint_ = kNoCheckpointRequired;
82     monitor_ = 0;
83   }
84 
Block()85   void Block() { monitor_++; }
86   void Release();
IsBlocked()87   bool IsBlocked() const { return monitor_ != 0; }
88 
89   static const ptrdiff_t kNoCheckpointRequired = PTRDIFF_MAX;
90 
91   void SetNextCheckpoint(ptrdiff_t checkpoint);
GetCheckpoint()92   ptrdiff_t GetCheckpoint() const { return checkpoint_; }
93   VIXL_DEPRECATED("GetCheckpoint", ptrdiff_t checkpoint() const) {
94     return GetCheckpoint();
95   }
96 
97   enum EmitOption { kBranchRequired, kNoBranchRequired };
98 
99  protected:
100   // Next buffer offset at which a check is required for this pool.
101   ptrdiff_t checkpoint_;
102   // Indicates whether the emission of this pool is blocked.
103   int monitor_;
104   // The MacroAssembler using this pool.
105   MacroAssembler* masm_;
106 };
107 
108 
109 class LiteralPool : public Pool {
110  public:
111   explicit LiteralPool(MacroAssembler* masm);
112   ~LiteralPool();
113   void Reset();
114 
115   void AddEntry(RawLiteral* literal);
IsEmpty()116   bool IsEmpty() const { return entries_.empty(); }
117   size_t GetSize() const;
118   VIXL_DEPRECATED("GetSize", size_t Size() const) { return GetSize(); }
119 
120   size_t GetMaxSize() const;
121   VIXL_DEPRECATED("GetMaxSize", size_t MaxSize() const) { return GetMaxSize(); }
122 
123   size_t GetOtherPoolsMaxSize() const;
124   VIXL_DEPRECATED("GetOtherPoolsMaxSize", size_t OtherPoolsMaxSize() const) {
125     return GetOtherPoolsMaxSize();
126   }
127 
128   void CheckEmitFor(size_t amount, EmitOption option = kBranchRequired);
129   // Check whether we need to emit the literal pool in order to be able to
130   // safely emit a branch with a given range.
131   void CheckEmitForBranch(size_t range);
132   void Emit(EmitOption option = kNoBranchRequired);
133 
134   void SetNextRecommendedCheckpoint(ptrdiff_t offset);
135   ptrdiff_t GetNextRecommendedCheckpoint();
136   VIXL_DEPRECATED("GetNextRecommendedCheckpoint",
137                   ptrdiff_t NextRecommendedCheckpoint()) {
138     return GetNextRecommendedCheckpoint();
139   }
140 
141   void UpdateFirstUse(ptrdiff_t use_position);
142 
DeleteOnDestruction(RawLiteral * literal)143   void DeleteOnDestruction(RawLiteral* literal) {
144     deleted_on_destruction_.push_back(literal);
145   }
146 
147   // Recommended not exact since the pool can be blocked for short periods.
148   static const ptrdiff_t kRecommendedLiteralPoolRange = 128 * KBytes;
149 
150  private:
151   std::vector<RawLiteral*> entries_;
152   size_t size_;
153   ptrdiff_t first_use_;
154   // The parent class `Pool` provides a `checkpoint_`, which is the buffer
155   // offset before which a check *must* occur. This recommended checkpoint
156   // indicates when we would like to start emitting the constant pool. The
157   // MacroAssembler can, but does not have to, check the buffer when the
158   // checkpoint is reached.
159   ptrdiff_t recommended_checkpoint_;
160 
161   std::vector<RawLiteral*> deleted_on_destruction_;
162 };
163 
164 
GetSize()165 inline size_t LiteralPool::GetSize() const {
166   // Account for the pool header.
167   return size_ + kInstructionSize;
168 }
169 
170 
GetMaxSize()171 inline size_t LiteralPool::GetMaxSize() const {
172   // Account for the potential branch over the pool.
173   return GetSize() + kInstructionSize;
174 }
175 
176 
GetNextRecommendedCheckpoint()177 inline ptrdiff_t LiteralPool::GetNextRecommendedCheckpoint() {
178   return first_use_ + kRecommendedLiteralPoolRange;
179 }
180 
181 
182 class VeneerPool : public Pool {
183  public:
VeneerPool(MacroAssembler * masm)184   explicit VeneerPool(MacroAssembler* masm) : Pool(masm) {}
185 
186   void Reset();
187 
Block()188   void Block() { monitor_++; }
189   void Release();
IsBlocked()190   bool IsBlocked() const { return monitor_ != 0; }
IsEmpty()191   bool IsEmpty() const { return unresolved_branches_.IsEmpty(); }
192 
193   class BranchInfo {
194    public:
BranchInfo()195     BranchInfo()
196         : first_unreacheable_pc_(0),
197           pc_offset_(0),
198           label_(NULL),
199           branch_type_(UnknownBranchType) {}
BranchInfo(ptrdiff_t offset,Label * label,ImmBranchType branch_type)200     BranchInfo(ptrdiff_t offset, Label* label, ImmBranchType branch_type)
201         : pc_offset_(offset), label_(label), branch_type_(branch_type) {
202       first_unreacheable_pc_ =
203           pc_offset_ + Instruction::GetImmBranchForwardRange(branch_type_);
204     }
205 
IsValidComparison(const BranchInfo & branch_1,const BranchInfo & branch_2)206     static bool IsValidComparison(const BranchInfo& branch_1,
207                                   const BranchInfo& branch_2) {
208       // BranchInfo are always compared against against other objects with
209       // the same branch type.
210       if (branch_1.branch_type_ != branch_2.branch_type_) {
211         return false;
212       }
213       // Since we should never have two branch infos with the same offsets, it
214       // first looks like we should check that offsets are different. However
215       // the operators may also be used to *search* for a branch info in the
216       // set.
217       bool same_offsets = (branch_1.pc_offset_ == branch_2.pc_offset_);
218       return (!same_offsets || ((branch_1.label_ == branch_2.label_) &&
219                                 (branch_1.first_unreacheable_pc_ ==
220                                  branch_2.first_unreacheable_pc_)));
221     }
222 
223     // We must provide comparison operators to work with InvalSet.
224     bool operator==(const BranchInfo& other) const {
225       VIXL_ASSERT(IsValidComparison(*this, other));
226       return pc_offset_ == other.pc_offset_;
227     }
228     bool operator<(const BranchInfo& other) const {
229       VIXL_ASSERT(IsValidComparison(*this, other));
230       return pc_offset_ < other.pc_offset_;
231     }
232     bool operator<=(const BranchInfo& other) const {
233       VIXL_ASSERT(IsValidComparison(*this, other));
234       return pc_offset_ <= other.pc_offset_;
235     }
236     bool operator>(const BranchInfo& other) const {
237       VIXL_ASSERT(IsValidComparison(*this, other));
238       return pc_offset_ > other.pc_offset_;
239     }
240 
241     // First instruction position that is not reachable by the branch using a
242     // positive branch offset.
243     ptrdiff_t first_unreacheable_pc_;
244     // Offset of the branch in the code generation buffer.
245     ptrdiff_t pc_offset_;
246     // The label branched to.
247     Label* label_;
248     ImmBranchType branch_type_;
249   };
250 
BranchTypeUsesVeneers(ImmBranchType type)251   bool BranchTypeUsesVeneers(ImmBranchType type) {
252     return (type != UnknownBranchType) && (type != UncondBranchType);
253   }
254 
255   void RegisterUnresolvedBranch(ptrdiff_t branch_pos,
256                                 Label* label,
257                                 ImmBranchType branch_type);
258   void DeleteUnresolvedBranchInfoForLabel(Label* label);
259 
260   bool ShouldEmitVeneer(int64_t first_unreacheable_pc, size_t amount);
ShouldEmitVeneers(size_t amount)261   bool ShouldEmitVeneers(size_t amount) {
262     return ShouldEmitVeneer(unresolved_branches_.GetFirstLimit(), amount);
263   }
264 
265   void CheckEmitFor(size_t amount, EmitOption option = kBranchRequired);
266   void Emit(EmitOption option, size_t margin);
267 
268   // The code size generated for a veneer. Currently one branch instruction.
269   // This is for code size checking purposes, and can be extended in the future
270   // for example if we decide to add nops between the veneers.
271   static const int kVeneerCodeSize = 1 * kInstructionSize;
272   // The maximum size of code other than veneers that can be generated when
273   // emitting a veneer pool. Currently there can be an additional branch to jump
274   // over the pool.
275   static const int kPoolNonVeneerCodeSize = 1 * kInstructionSize;
276 
UpdateNextCheckPoint()277   void UpdateNextCheckPoint() { SetNextCheckpoint(GetNextCheckPoint()); }
278 
GetNumberOfPotentialVeneers()279   int GetNumberOfPotentialVeneers() const {
280     return static_cast<int>(unresolved_branches_.GetSize());
281   }
282   VIXL_DEPRECATED("GetNumberOfPotentialVeneers",
NumberOfPotentialVeneers()283                   int NumberOfPotentialVeneers() const) {
284     return GetNumberOfPotentialVeneers();
285   }
286 
GetMaxSize()287   size_t GetMaxSize() const {
288     return kPoolNonVeneerCodeSize +
289            unresolved_branches_.GetSize() * kVeneerCodeSize;
290   }
291   VIXL_DEPRECATED("GetMaxSize", size_t MaxSize() const) { return GetMaxSize(); }
292 
293   size_t GetOtherPoolsMaxSize() const;
294   VIXL_DEPRECATED("GetOtherPoolsMaxSize", size_t OtherPoolsMaxSize() const) {
295     return GetOtherPoolsMaxSize();
296   }
297 
298   static const int kNPreallocatedInfos = 4;
299   static const ptrdiff_t kInvalidOffset = PTRDIFF_MAX;
300   static const size_t kReclaimFrom = 128;
301   static const size_t kReclaimFactor = 16;
302 
303  private:
304   typedef InvalSet<BranchInfo,
305                    kNPreallocatedInfos,
306                    ptrdiff_t,
307                    kInvalidOffset,
308                    kReclaimFrom,
309                    kReclaimFactor>
310       BranchInfoTypedSetBase;
311   typedef InvalSetIterator<BranchInfoTypedSetBase> BranchInfoTypedSetIterBase;
312 
313   class BranchInfoTypedSet : public BranchInfoTypedSetBase {
314    public:
BranchInfoTypedSet()315     BranchInfoTypedSet() : BranchInfoTypedSetBase() {}
316 
GetFirstLimit()317     ptrdiff_t GetFirstLimit() {
318       if (empty()) {
319         return kInvalidOffset;
320       }
321       return GetMinElementKey();
322     }
323     VIXL_DEPRECATED("GetFirstLimit", ptrdiff_t FirstLimit()) {
324       return GetFirstLimit();
325     }
326   };
327 
328   class BranchInfoTypedSetIterator : public BranchInfoTypedSetIterBase {
329    public:
BranchInfoTypedSetIterator()330     BranchInfoTypedSetIterator() : BranchInfoTypedSetIterBase(NULL) {}
BranchInfoTypedSetIterator(BranchInfoTypedSet * typed_set)331     explicit BranchInfoTypedSetIterator(BranchInfoTypedSet* typed_set)
332         : BranchInfoTypedSetIterBase(typed_set) {}
333 
334     // TODO: Remove these and use the STL-like interface instead.
335     using BranchInfoTypedSetIterBase::Advance;
336     using BranchInfoTypedSetIterBase::Current;
337   };
338 
339   class BranchInfoSet {
340    public:
insert(BranchInfo branch_info)341     void insert(BranchInfo branch_info) {
342       ImmBranchType type = branch_info.branch_type_;
343       VIXL_ASSERT(IsValidBranchType(type));
344       typed_set_[BranchIndexFromType(type)].insert(branch_info);
345     }
346 
erase(BranchInfo branch_info)347     void erase(BranchInfo branch_info) {
348       if (IsValidBranchType(branch_info.branch_type_)) {
349         int index =
350             BranchInfoSet::BranchIndexFromType(branch_info.branch_type_);
351         typed_set_[index].erase(branch_info);
352       }
353     }
354 
GetSize()355     size_t GetSize() const {
356       size_t res = 0;
357       for (int i = 0; i < kNumberOfTrackedBranchTypes; i++) {
358         res += typed_set_[i].size();
359       }
360       return res;
361     }
362     VIXL_DEPRECATED("GetSize", size_t size() const) { return GetSize(); }
363 
IsEmpty()364     bool IsEmpty() const {
365       for (int i = 0; i < kNumberOfTrackedBranchTypes; i++) {
366         if (!typed_set_[i].empty()) {
367           return false;
368         }
369       }
370       return true;
371     }
empty()372     VIXL_DEPRECATED("IsEmpty", bool empty() const) { return IsEmpty(); }
373 
GetFirstLimit()374     ptrdiff_t GetFirstLimit() {
375       ptrdiff_t res = kInvalidOffset;
376       for (int i = 0; i < kNumberOfTrackedBranchTypes; i++) {
377         res = std::min(res, typed_set_[i].GetFirstLimit());
378       }
379       return res;
380     }
381     VIXL_DEPRECATED("GetFirstLimit", ptrdiff_t FirstLimit()) {
382       return GetFirstLimit();
383     }
384 
Reset()385     void Reset() {
386       for (int i = 0; i < kNumberOfTrackedBranchTypes; i++) {
387         typed_set_[i].clear();
388       }
389     }
390 
BranchTypeFromIndex(int index)391     static ImmBranchType BranchTypeFromIndex(int index) {
392       switch (index) {
393         case 0:
394           return CondBranchType;
395         case 1:
396           return CompareBranchType;
397         case 2:
398           return TestBranchType;
399         default:
400           VIXL_UNREACHABLE();
401           return UnknownBranchType;
402       }
403     }
BranchIndexFromType(ImmBranchType branch_type)404     static int BranchIndexFromType(ImmBranchType branch_type) {
405       switch (branch_type) {
406         case CondBranchType:
407           return 0;
408         case CompareBranchType:
409           return 1;
410         case TestBranchType:
411           return 2;
412         default:
413           VIXL_UNREACHABLE();
414           return 0;
415       }
416     }
417 
IsValidBranchType(ImmBranchType branch_type)418     bool IsValidBranchType(ImmBranchType branch_type) {
419       return (branch_type != UnknownBranchType) &&
420              (branch_type != UncondBranchType);
421     }
422 
423    private:
424     static const int kNumberOfTrackedBranchTypes = 3;
425     BranchInfoTypedSet typed_set_[kNumberOfTrackedBranchTypes];
426 
427     friend class VeneerPool;
428     friend class BranchInfoSetIterator;
429   };
430 
431   class BranchInfoSetIterator {
432    public:
BranchInfoSetIterator(BranchInfoSet * set)433     explicit BranchInfoSetIterator(BranchInfoSet* set) : set_(set) {
434       for (int i = 0; i < BranchInfoSet::kNumberOfTrackedBranchTypes; i++) {
435         new (&sub_iterator_[i])
436             BranchInfoTypedSetIterator(&(set_->typed_set_[i]));
437       }
438     }
439 
Current()440     VeneerPool::BranchInfo* Current() {
441       for (int i = 0; i < BranchInfoSet::kNumberOfTrackedBranchTypes; i++) {
442         if (!sub_iterator_[i].Done()) {
443           return sub_iterator_[i].Current();
444         }
445       }
446       VIXL_UNREACHABLE();
447       return NULL;
448     }
449 
Advance()450     void Advance() {
451       VIXL_ASSERT(!Done());
452       for (int i = 0; i < BranchInfoSet::kNumberOfTrackedBranchTypes; i++) {
453         if (!sub_iterator_[i].Done()) {
454           sub_iterator_[i].Advance();
455           return;
456         }
457       }
458       VIXL_UNREACHABLE();
459     }
460 
Done()461     bool Done() const {
462       for (int i = 0; i < BranchInfoSet::kNumberOfTrackedBranchTypes; i++) {
463         if (!sub_iterator_[i].Done()) return false;
464       }
465       return true;
466     }
467 
AdvanceToNextType()468     void AdvanceToNextType() {
469       VIXL_ASSERT(!Done());
470       for (int i = 0; i < BranchInfoSet::kNumberOfTrackedBranchTypes; i++) {
471         if (!sub_iterator_[i].Done()) {
472           sub_iterator_[i].Finish();
473           return;
474         }
475       }
476       VIXL_UNREACHABLE();
477     }
478 
DeleteCurrentAndAdvance()479     void DeleteCurrentAndAdvance() {
480       for (int i = 0; i < BranchInfoSet::kNumberOfTrackedBranchTypes; i++) {
481         if (!sub_iterator_[i].Done()) {
482           sub_iterator_[i].DeleteCurrentAndAdvance();
483           return;
484         }
485       }
486     }
487 
488    private:
489     BranchInfoSet* set_;
490     BranchInfoTypedSetIterator
491         sub_iterator_[BranchInfoSet::kNumberOfTrackedBranchTypes];
492   };
493 
GetNextCheckPoint()494   ptrdiff_t GetNextCheckPoint() {
495     if (unresolved_branches_.IsEmpty()) {
496       return kNoCheckpointRequired;
497     } else {
498       return unresolved_branches_.GetFirstLimit();
499     }
500   }
501   VIXL_DEPRECATED("GetNextCheckPoint", ptrdiff_t NextCheckPoint()) {
502     return GetNextCheckPoint();
503   }
504 
505   // Information about unresolved (forward) branches.
506   BranchInfoSet unresolved_branches_;
507 };
508 
509 
510 // Helper for common Emission checks.
511 // The macro-instruction maps to a single instruction.
512 class SingleEmissionCheckScope : public EmissionCheckScope {
513  public:
SingleEmissionCheckScope(MacroAssemblerInterface * masm)514   explicit SingleEmissionCheckScope(MacroAssemblerInterface* masm)
515       : EmissionCheckScope(masm, kInstructionSize) {}
516 };
517 
518 
519 // The macro instruction is a "typical" macro-instruction. Typical macro-
520 // instruction only emit a few instructions, a few being defined as 8 here.
521 class MacroEmissionCheckScope : public EmissionCheckScope {
522  public:
MacroEmissionCheckScope(MacroAssemblerInterface * masm)523   explicit MacroEmissionCheckScope(MacroAssemblerInterface* masm)
524       : EmissionCheckScope(masm, kTypicalMacroInstructionMaxSize) {}
525 
526  private:
527   static const size_t kTypicalMacroInstructionMaxSize = 8 * kInstructionSize;
528 };
529 
530 
531 enum BranchType {
532   // Copies of architectural conditions.
533   // The associated conditions can be used in place of those, the code will
534   // take care of reinterpreting them with the correct type.
535   integer_eq = eq,
536   integer_ne = ne,
537   integer_hs = hs,
538   integer_lo = lo,
539   integer_mi = mi,
540   integer_pl = pl,
541   integer_vs = vs,
542   integer_vc = vc,
543   integer_hi = hi,
544   integer_ls = ls,
545   integer_ge = ge,
546   integer_lt = lt,
547   integer_gt = gt,
548   integer_le = le,
549   integer_al = al,
550   integer_nv = nv,
551 
552   // These two are *different* from the architectural codes al and nv.
553   // 'always' is used to generate unconditional branches.
554   // 'never' is used to not generate a branch (generally as the inverse
555   // branch type of 'always).
556   always,
557   never,
558   // cbz and cbnz
559   reg_zero,
560   reg_not_zero,
561   // tbz and tbnz
562   reg_bit_clear,
563   reg_bit_set,
564 
565   // Aliases.
566   kBranchTypeFirstCondition = eq,
567   kBranchTypeLastCondition = nv,
568   kBranchTypeFirstUsingReg = reg_zero,
569   kBranchTypeFirstUsingBit = reg_bit_clear
570 };
571 
572 
573 enum DiscardMoveMode { kDontDiscardForSameWReg, kDiscardForSameWReg };
574 
575 // The macro assembler supports moving automatically pre-shifted immediates for
576 // arithmetic and logical instructions, and then applying a post shift in the
577 // instruction to undo the modification, in order to reduce the code emitted for
578 // an operation. For example:
579 //
580 //  Add(x0, x0, 0x1f7de) => movz x16, 0xfbef; add x0, x0, x16, lsl #1.
581 //
582 // This optimisation can be only partially applied when the stack pointer is an
583 // operand or destination, so this enumeration is used to control the shift.
584 enum PreShiftImmMode {
585   kNoShift,          // Don't pre-shift.
586   kLimitShiftForSP,  // Limit pre-shift for add/sub extend use.
587   kAnyShift          // Allow any pre-shift.
588 };
589 
590 
591 class MacroAssembler : public Assembler, public MacroAssemblerInterface {
592  public:
593   explicit MacroAssembler(
594       PositionIndependentCodeOption pic = PositionIndependentCode);
595   MacroAssembler(size_t capacity,
596                  PositionIndependentCodeOption pic = PositionIndependentCode);
597   MacroAssembler(byte* buffer,
598                  size_t capacity,
599                  PositionIndependentCodeOption pic = PositionIndependentCode);
600   ~MacroAssembler();
601 
602   enum FinalizeOption {
603     kFallThrough,  // There may be more code to execute after calling Finalize.
604     kUnreachable   // Anything generated after calling Finalize is unreachable.
605   };
606 
AsAssemblerBase()607   virtual vixl::internal::AssemblerBase* AsAssemblerBase() VIXL_OVERRIDE {
608     return this;
609   }
610 
611   // TODO(pools): implement these functions.
EmitPoolHeader()612   virtual void EmitPoolHeader() VIXL_OVERRIDE {}
EmitPoolFooter()613   virtual void EmitPoolFooter() VIXL_OVERRIDE {}
EmitPaddingBytes(int n)614   virtual void EmitPaddingBytes(int n) VIXL_OVERRIDE { USE(n); }
EmitNopBytes(int n)615   virtual void EmitNopBytes(int n) VIXL_OVERRIDE { USE(n); }
616 
617   // Start generating code from the beginning of the buffer, discarding any code
618   // and data that has already been emitted into the buffer.
619   //
620   // In order to avoid any accidental transfer of state, Reset ASSERTs that the
621   // constant pool is not blocked.
622   void Reset();
623 
624   // Finalize a code buffer of generated instructions. This function must be
625   // called before executing or copying code from the buffer. By default,
626   // anything generated after this should not be reachable (the last instruction
627   // generated is an unconditional branch). If you need to generate more code,
628   // then set `option` to kFallThrough.
629   void FinalizeCode(FinalizeOption option = kUnreachable);
630 
631 
632   // Constant generation helpers.
633   // These functions return the number of instructions required to move the
634   // immediate into the destination register. Also, if the masm pointer is
635   // non-null, it generates the code to do so.
636   // The two features are implemented using one function to avoid duplication of
637   // the logic.
638   // The function can be used to evaluate the cost of synthesizing an
639   // instruction using 'mov immediate' instructions. A user might prefer loading
640   // a constant using the literal pool instead of using multiple 'mov immediate'
641   // instructions.
642   static int MoveImmediateHelper(MacroAssembler* masm,
643                                  const Register& rd,
644                                  uint64_t imm);
645   static bool OneInstrMoveImmediateHelper(MacroAssembler* masm,
646                                           const Register& dst,
647                                           int64_t imm);
648 
649 
650   // Logical macros.
651   void And(const Register& rd, const Register& rn, const Operand& operand);
652   void Ands(const Register& rd, const Register& rn, const Operand& operand);
653   void Bic(const Register& rd, const Register& rn, const Operand& operand);
654   void Bics(const Register& rd, const Register& rn, const Operand& operand);
655   void Orr(const Register& rd, const Register& rn, const Operand& operand);
656   void Orn(const Register& rd, const Register& rn, const Operand& operand);
657   void Eor(const Register& rd, const Register& rn, const Operand& operand);
658   void Eon(const Register& rd, const Register& rn, const Operand& operand);
659   void Tst(const Register& rn, const Operand& operand);
660   void LogicalMacro(const Register& rd,
661                     const Register& rn,
662                     const Operand& operand,
663                     LogicalOp op);
664 
665   // Add and sub macros.
666   void Add(const Register& rd,
667            const Register& rn,
668            const Operand& operand,
669            FlagsUpdate S = LeaveFlags);
670   void Adds(const Register& rd, const Register& rn, const Operand& operand);
671   void Sub(const Register& rd,
672            const Register& rn,
673            const Operand& operand,
674            FlagsUpdate S = LeaveFlags);
675   void Subs(const Register& rd, const Register& rn, const Operand& operand);
676   void Cmn(const Register& rn, const Operand& operand);
677   void Cmp(const Register& rn, const Operand& operand);
678   void Neg(const Register& rd, const Operand& operand);
679   void Negs(const Register& rd, const Operand& operand);
680 
681   void AddSubMacro(const Register& rd,
682                    const Register& rn,
683                    const Operand& operand,
684                    FlagsUpdate S,
685                    AddSubOp op);
686 
687   // Add/sub with carry macros.
688   void Adc(const Register& rd, const Register& rn, const Operand& operand);
689   void Adcs(const Register& rd, const Register& rn, const Operand& operand);
690   void Sbc(const Register& rd, const Register& rn, const Operand& operand);
691   void Sbcs(const Register& rd, const Register& rn, const Operand& operand);
692   void Ngc(const Register& rd, const Operand& operand);
693   void Ngcs(const Register& rd, const Operand& operand);
694   void AddSubWithCarryMacro(const Register& rd,
695                             const Register& rn,
696                             const Operand& operand,
697                             FlagsUpdate S,
698                             AddSubWithCarryOp op);
699 
700   // Move macros.
701   void Mov(const Register& rd, uint64_t imm);
702   void Mov(const Register& rd,
703            const Operand& operand,
704            DiscardMoveMode discard_mode = kDontDiscardForSameWReg);
Mvn(const Register & rd,uint64_t imm)705   void Mvn(const Register& rd, uint64_t imm) {
706     Mov(rd, (rd.GetSizeInBits() == kXRegSize) ? ~imm : (~imm & kWRegMask));
707   }
708   void Mvn(const Register& rd, const Operand& operand);
709 
710   // Try to move an immediate into the destination register in a single
711   // instruction. Returns true for success, and updates the contents of dst.
712   // Returns false, otherwise.
713   bool TryOneInstrMoveImmediate(const Register& dst, int64_t imm);
714 
715   // Move an immediate into register dst, and return an Operand object for
716   // use with a subsequent instruction that accepts a shift. The value moved
717   // into dst is not necessarily equal to imm; it may have had a shifting
718   // operation applied to it that will be subsequently undone by the shift
719   // applied in the Operand.
720   Operand MoveImmediateForShiftedOp(const Register& dst,
721                                     int64_t imm,
722                                     PreShiftImmMode mode);
723 
724   void Move(const GenericOperand& dst, const GenericOperand& src);
725 
726   // Synthesises the address represented by a MemOperand into a register.
727   void ComputeAddress(const Register& dst, const MemOperand& mem_op);
728 
729   // Conditional macros.
730   void Ccmp(const Register& rn,
731             const Operand& operand,
732             StatusFlags nzcv,
733             Condition cond);
734   void Ccmn(const Register& rn,
735             const Operand& operand,
736             StatusFlags nzcv,
737             Condition cond);
738   void ConditionalCompareMacro(const Register& rn,
739                                const Operand& operand,
740                                StatusFlags nzcv,
741                                Condition cond,
742                                ConditionalCompareOp op);
743 
744   // On return, the boolean values pointed to will indicate whether `left` and
745   // `right` should be synthesised in a temporary register.
GetCselSynthesisInformation(const Register & rd,const Operand & left,const Operand & right,bool * should_synthesise_left,bool * should_synthesise_right)746   static void GetCselSynthesisInformation(const Register& rd,
747                                           const Operand& left,
748                                           const Operand& right,
749                                           bool* should_synthesise_left,
750                                           bool* should_synthesise_right) {
751     // Note that the helper does not need to look at the condition.
752     CselHelper(NULL,
753                rd,
754                left,
755                right,
756                eq,
757                should_synthesise_left,
758                should_synthesise_right);
759   }
760 
Csel(const Register & rd,const Operand & left,const Operand & right,Condition cond)761   void Csel(const Register& rd,
762             const Operand& left,
763             const Operand& right,
764             Condition cond) {
765     CselHelper(this, rd, left, right, cond);
766   }
767 
768 // Load/store macros.
769 #define DECLARE_FUNCTION(FN, REGTYPE, REG, OP) \
770   void FN(const REGTYPE REG, const MemOperand& addr);
771   LS_MACRO_LIST(DECLARE_FUNCTION)
772 #undef DECLARE_FUNCTION
773 
774   void LoadStoreMacro(const CPURegister& rt,
775                       const MemOperand& addr,
776                       LoadStoreOp op);
777 
778 #define DECLARE_FUNCTION(FN, REGTYPE, REG, REG2, OP) \
779   void FN(const REGTYPE REG, const REGTYPE REG2, const MemOperand& addr);
780   LSPAIR_MACRO_LIST(DECLARE_FUNCTION)
781 #undef DECLARE_FUNCTION
782 
783   void LoadStorePairMacro(const CPURegister& rt,
784                           const CPURegister& rt2,
785                           const MemOperand& addr,
786                           LoadStorePairOp op);
787 
788   void Prfm(PrefetchOperation op, const MemOperand& addr);
789 
790   // Push or pop up to 4 registers of the same width to or from the stack,
791   // using the current stack pointer as set by SetStackPointer.
792   //
793   // If an argument register is 'NoReg', all further arguments are also assumed
794   // to be 'NoReg', and are thus not pushed or popped.
795   //
796   // Arguments are ordered such that "Push(a, b);" is functionally equivalent
797   // to "Push(a); Push(b);".
798   //
799   // It is valid to push the same register more than once, and there is no
800   // restriction on the order in which registers are specified.
801   //
802   // It is not valid to pop into the same register more than once in one
803   // operation, not even into the zero register.
804   //
805   // If the current stack pointer (as set by SetStackPointer) is sp, then it
806   // must be aligned to 16 bytes on entry and the total size of the specified
807   // registers must also be a multiple of 16 bytes.
808   //
809   // Even if the current stack pointer is not the system stack pointer (sp),
810   // Push (and derived methods) will still modify the system stack pointer in
811   // order to comply with ABI rules about accessing memory below the system
812   // stack pointer.
813   //
814   // Other than the registers passed into Pop, the stack pointer and (possibly)
815   // the system stack pointer, these methods do not modify any other registers.
816   void Push(const CPURegister& src0,
817             const CPURegister& src1 = NoReg,
818             const CPURegister& src2 = NoReg,
819             const CPURegister& src3 = NoReg);
820   void Pop(const CPURegister& dst0,
821            const CPURegister& dst1 = NoReg,
822            const CPURegister& dst2 = NoReg,
823            const CPURegister& dst3 = NoReg);
824 
825   // Alternative forms of Push and Pop, taking a RegList or CPURegList that
826   // specifies the registers that are to be pushed or popped. Higher-numbered
827   // registers are associated with higher memory addresses (as in the A32 push
828   // and pop instructions).
829   //
830   // (Push|Pop)SizeRegList allow you to specify the register size as a
831   // parameter. Only kXRegSize, kWRegSize, kDRegSize and kSRegSize are
832   // supported.
833   //
834   // Otherwise, (Push|Pop)(CPU|X|W|D|S)RegList is preferred.
835   void PushCPURegList(CPURegList registers);
836   void PopCPURegList(CPURegList registers);
837 
838   void PushSizeRegList(
839       RegList registers,
840       unsigned reg_size,
841       CPURegister::RegisterType type = CPURegister::kRegister) {
842     PushCPURegList(CPURegList(type, reg_size, registers));
843   }
844   void PopSizeRegList(RegList registers,
845                       unsigned reg_size,
846                       CPURegister::RegisterType type = CPURegister::kRegister) {
847     PopCPURegList(CPURegList(type, reg_size, registers));
848   }
PushXRegList(RegList regs)849   void PushXRegList(RegList regs) { PushSizeRegList(regs, kXRegSize); }
PopXRegList(RegList regs)850   void PopXRegList(RegList regs) { PopSizeRegList(regs, kXRegSize); }
PushWRegList(RegList regs)851   void PushWRegList(RegList regs) { PushSizeRegList(regs, kWRegSize); }
PopWRegList(RegList regs)852   void PopWRegList(RegList regs) { PopSizeRegList(regs, kWRegSize); }
PushDRegList(RegList regs)853   void PushDRegList(RegList regs) {
854     PushSizeRegList(regs, kDRegSize, CPURegister::kVRegister);
855   }
PopDRegList(RegList regs)856   void PopDRegList(RegList regs) {
857     PopSizeRegList(regs, kDRegSize, CPURegister::kVRegister);
858   }
PushSRegList(RegList regs)859   void PushSRegList(RegList regs) {
860     PushSizeRegList(regs, kSRegSize, CPURegister::kVRegister);
861   }
PopSRegList(RegList regs)862   void PopSRegList(RegList regs) {
863     PopSizeRegList(regs, kSRegSize, CPURegister::kVRegister);
864   }
865 
866   // Push the specified register 'count' times.
867   void PushMultipleTimes(int count, Register src);
868 
869   // Poke 'src' onto the stack. The offset is in bytes.
870   //
871   // If the current stack pointer (as set by SetStackPointer) is sp, then sp
872   // must be aligned to 16 bytes.
873   void Poke(const Register& src, const Operand& offset);
874 
875   // Peek at a value on the stack, and put it in 'dst'. The offset is in bytes.
876   //
877   // If the current stack pointer (as set by SetStackPointer) is sp, then sp
878   // must be aligned to 16 bytes.
879   void Peek(const Register& dst, const Operand& offset);
880 
881   // Alternative forms of Peek and Poke, taking a RegList or CPURegList that
882   // specifies the registers that are to be pushed or popped. Higher-numbered
883   // registers are associated with higher memory addresses.
884   //
885   // (Peek|Poke)SizeRegList allow you to specify the register size as a
886   // parameter. Only kXRegSize, kWRegSize, kDRegSize and kSRegSize are
887   // supported.
888   //
889   // Otherwise, (Peek|Poke)(CPU|X|W|D|S)RegList is preferred.
PeekCPURegList(CPURegList registers,int64_t offset)890   void PeekCPURegList(CPURegList registers, int64_t offset) {
891     LoadCPURegList(registers, MemOperand(StackPointer(), offset));
892   }
PokeCPURegList(CPURegList registers,int64_t offset)893   void PokeCPURegList(CPURegList registers, int64_t offset) {
894     StoreCPURegList(registers, MemOperand(StackPointer(), offset));
895   }
896 
897   void PeekSizeRegList(
898       RegList registers,
899       int64_t offset,
900       unsigned reg_size,
901       CPURegister::RegisterType type = CPURegister::kRegister) {
902     PeekCPURegList(CPURegList(type, reg_size, registers), offset);
903   }
904   void PokeSizeRegList(
905       RegList registers,
906       int64_t offset,
907       unsigned reg_size,
908       CPURegister::RegisterType type = CPURegister::kRegister) {
909     PokeCPURegList(CPURegList(type, reg_size, registers), offset);
910   }
PeekXRegList(RegList regs,int64_t offset)911   void PeekXRegList(RegList regs, int64_t offset) {
912     PeekSizeRegList(regs, offset, kXRegSize);
913   }
PokeXRegList(RegList regs,int64_t offset)914   void PokeXRegList(RegList regs, int64_t offset) {
915     PokeSizeRegList(regs, offset, kXRegSize);
916   }
PeekWRegList(RegList regs,int64_t offset)917   void PeekWRegList(RegList regs, int64_t offset) {
918     PeekSizeRegList(regs, offset, kWRegSize);
919   }
PokeWRegList(RegList regs,int64_t offset)920   void PokeWRegList(RegList regs, int64_t offset) {
921     PokeSizeRegList(regs, offset, kWRegSize);
922   }
PeekDRegList(RegList regs,int64_t offset)923   void PeekDRegList(RegList regs, int64_t offset) {
924     PeekSizeRegList(regs, offset, kDRegSize, CPURegister::kVRegister);
925   }
PokeDRegList(RegList regs,int64_t offset)926   void PokeDRegList(RegList regs, int64_t offset) {
927     PokeSizeRegList(regs, offset, kDRegSize, CPURegister::kVRegister);
928   }
PeekSRegList(RegList regs,int64_t offset)929   void PeekSRegList(RegList regs, int64_t offset) {
930     PeekSizeRegList(regs, offset, kSRegSize, CPURegister::kVRegister);
931   }
PokeSRegList(RegList regs,int64_t offset)932   void PokeSRegList(RegList regs, int64_t offset) {
933     PokeSizeRegList(regs, offset, kSRegSize, CPURegister::kVRegister);
934   }
935 
936 
937   // Claim or drop stack space without actually accessing memory.
938   //
939   // If the current stack pointer (as set by SetStackPointer) is sp, then it
940   // must be aligned to 16 bytes and the size claimed or dropped must be a
941   // multiple of 16 bytes.
942   void Claim(const Operand& size);
943   void Drop(const Operand& size);
944 
945   // Preserve the callee-saved registers (as defined by AAPCS64).
946   //
947   // Higher-numbered registers are pushed before lower-numbered registers, and
948   // thus get higher addresses.
949   // Floating-point registers are pushed before general-purpose registers, and
950   // thus get higher addresses.
951   //
952   // This method must not be called unless StackPointer() is sp, and it is
953   // aligned to 16 bytes.
954   void PushCalleeSavedRegisters();
955 
956   // Restore the callee-saved registers (as defined by AAPCS64).
957   //
958   // Higher-numbered registers are popped after lower-numbered registers, and
959   // thus come from higher addresses.
960   // Floating-point registers are popped after general-purpose registers, and
961   // thus come from higher addresses.
962   //
963   // This method must not be called unless StackPointer() is sp, and it is
964   // aligned to 16 bytes.
965   void PopCalleeSavedRegisters();
966 
967   void LoadCPURegList(CPURegList registers, const MemOperand& src);
968   void StoreCPURegList(CPURegList registers, const MemOperand& dst);
969 
970   // Remaining instructions are simple pass-through calls to the assembler.
Adr(const Register & rd,Label * label)971   void Adr(const Register& rd, Label* label) {
972     VIXL_ASSERT(allow_macro_instructions_);
973     VIXL_ASSERT(!rd.IsZero());
974     SingleEmissionCheckScope guard(this);
975     adr(rd, label);
976   }
Adrp(const Register & rd,Label * label)977   void Adrp(const Register& rd, Label* label) {
978     VIXL_ASSERT(allow_macro_instructions_);
979     VIXL_ASSERT(!rd.IsZero());
980     SingleEmissionCheckScope guard(this);
981     adrp(rd, label);
982   }
Asr(const Register & rd,const Register & rn,unsigned shift)983   void Asr(const Register& rd, const Register& rn, unsigned shift) {
984     VIXL_ASSERT(allow_macro_instructions_);
985     VIXL_ASSERT(!rd.IsZero());
986     VIXL_ASSERT(!rn.IsZero());
987     SingleEmissionCheckScope guard(this);
988     asr(rd, rn, shift);
989   }
Asr(const Register & rd,const Register & rn,const Register & rm)990   void Asr(const Register& rd, const Register& rn, const Register& rm) {
991     VIXL_ASSERT(allow_macro_instructions_);
992     VIXL_ASSERT(!rd.IsZero());
993     VIXL_ASSERT(!rn.IsZero());
994     VIXL_ASSERT(!rm.IsZero());
995     SingleEmissionCheckScope guard(this);
996     asrv(rd, rn, rm);
997   }
998 
999   // Branch type inversion relies on these relations.
1000   VIXL_STATIC_ASSERT((reg_zero == (reg_not_zero ^ 1)) &&
1001                      (reg_bit_clear == (reg_bit_set ^ 1)) &&
1002                      (always == (never ^ 1)));
1003 
InvertBranchType(BranchType type)1004   BranchType InvertBranchType(BranchType type) {
1005     if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
1006       return static_cast<BranchType>(
1007           InvertCondition(static_cast<Condition>(type)));
1008     } else {
1009       return static_cast<BranchType>(type ^ 1);
1010     }
1011   }
1012 
1013   void B(Label* label, BranchType type, Register reg = NoReg, int bit = -1);
1014 
1015   void B(Label* label);
1016   void B(Label* label, Condition cond);
B(Condition cond,Label * label)1017   void B(Condition cond, Label* label) { B(label, cond); }
Bfm(const Register & rd,const Register & rn,unsigned immr,unsigned imms)1018   void Bfm(const Register& rd,
1019            const Register& rn,
1020            unsigned immr,
1021            unsigned imms) {
1022     VIXL_ASSERT(allow_macro_instructions_);
1023     VIXL_ASSERT(!rd.IsZero());
1024     VIXL_ASSERT(!rn.IsZero());
1025     SingleEmissionCheckScope guard(this);
1026     bfm(rd, rn, immr, imms);
1027   }
Bfi(const Register & rd,const Register & rn,unsigned lsb,unsigned width)1028   void Bfi(const Register& rd,
1029            const Register& rn,
1030            unsigned lsb,
1031            unsigned width) {
1032     VIXL_ASSERT(allow_macro_instructions_);
1033     VIXL_ASSERT(!rd.IsZero());
1034     VIXL_ASSERT(!rn.IsZero());
1035     SingleEmissionCheckScope guard(this);
1036     bfi(rd, rn, lsb, width);
1037   }
Bfc(const Register & rd,unsigned lsb,unsigned width)1038   void Bfc(const Register& rd, unsigned lsb, unsigned width) {
1039     VIXL_ASSERT(allow_macro_instructions_);
1040     VIXL_ASSERT(!rd.IsZero());
1041     SingleEmissionCheckScope guard(this);
1042     bfc(rd, lsb, width);
1043   }
Bfxil(const Register & rd,const Register & rn,unsigned lsb,unsigned width)1044   void Bfxil(const Register& rd,
1045              const Register& rn,
1046              unsigned lsb,
1047              unsigned width) {
1048     VIXL_ASSERT(allow_macro_instructions_);
1049     VIXL_ASSERT(!rd.IsZero());
1050     VIXL_ASSERT(!rn.IsZero());
1051     SingleEmissionCheckScope guard(this);
1052     bfxil(rd, rn, lsb, width);
1053   }
1054   void Bind(Label* label);
1055   // Bind a label to a specified offset from the start of the buffer.
1056   void BindToOffset(Label* label, ptrdiff_t offset);
Bl(Label * label)1057   void Bl(Label* label) {
1058     VIXL_ASSERT(allow_macro_instructions_);
1059     SingleEmissionCheckScope guard(this);
1060     bl(label);
1061   }
Blr(const Register & xn)1062   void Blr(const Register& xn) {
1063     VIXL_ASSERT(allow_macro_instructions_);
1064     VIXL_ASSERT(!xn.IsZero());
1065     SingleEmissionCheckScope guard(this);
1066     blr(xn);
1067   }
Br(const Register & xn)1068   void Br(const Register& xn) {
1069     VIXL_ASSERT(allow_macro_instructions_);
1070     VIXL_ASSERT(!xn.IsZero());
1071     SingleEmissionCheckScope guard(this);
1072     br(xn);
1073   }
Braaz(const Register & xn)1074   void Braaz(const Register& xn) {
1075     VIXL_ASSERT(allow_macro_instructions_);
1076     SingleEmissionCheckScope guard(this);
1077     braaz(xn);
1078   }
Brabz(const Register & xn)1079   void Brabz(const Register& xn) {
1080     VIXL_ASSERT(allow_macro_instructions_);
1081     SingleEmissionCheckScope guard(this);
1082     brabz(xn);
1083   }
Blraaz(const Register & xn)1084   void Blraaz(const Register& xn) {
1085     VIXL_ASSERT(allow_macro_instructions_);
1086     SingleEmissionCheckScope guard(this);
1087     blraaz(xn);
1088   }
Blrabz(const Register & xn)1089   void Blrabz(const Register& xn) {
1090     VIXL_ASSERT(allow_macro_instructions_);
1091     SingleEmissionCheckScope guard(this);
1092     blrabz(xn);
1093   }
Retaa()1094   void Retaa() {
1095     VIXL_ASSERT(allow_macro_instructions_);
1096     SingleEmissionCheckScope guard(this);
1097     retaa();
1098   }
Retab()1099   void Retab() {
1100     VIXL_ASSERT(allow_macro_instructions_);
1101     SingleEmissionCheckScope guard(this);
1102     retab();
1103   }
Braa(const Register & xn,const Register & xm)1104   void Braa(const Register& xn, const Register& xm) {
1105     VIXL_ASSERT(allow_macro_instructions_);
1106     SingleEmissionCheckScope guard(this);
1107     braa(xn, xm);
1108   }
Brab(const Register & xn,const Register & xm)1109   void Brab(const Register& xn, const Register& xm) {
1110     VIXL_ASSERT(allow_macro_instructions_);
1111     SingleEmissionCheckScope guard(this);
1112     brab(xn, xm);
1113   }
Blraa(const Register & xn,const Register & xm)1114   void Blraa(const Register& xn, const Register& xm) {
1115     VIXL_ASSERT(allow_macro_instructions_);
1116     SingleEmissionCheckScope guard(this);
1117     blraa(xn, xm);
1118   }
Blrab(const Register & xn,const Register & xm)1119   void Blrab(const Register& xn, const Register& xm) {
1120     VIXL_ASSERT(allow_macro_instructions_);
1121     SingleEmissionCheckScope guard(this);
1122     blrab(xn, xm);
1123   }
1124   void Brk(int code = 0) {
1125     VIXL_ASSERT(allow_macro_instructions_);
1126     SingleEmissionCheckScope guard(this);
1127     brk(code);
1128   }
1129   void Cbnz(const Register& rt, Label* label);
1130   void Cbz(const Register& rt, Label* label);
Cinc(const Register & rd,const Register & rn,Condition cond)1131   void Cinc(const Register& rd, const Register& rn, Condition cond) {
1132     VIXL_ASSERT(allow_macro_instructions_);
1133     VIXL_ASSERT(!rd.IsZero());
1134     VIXL_ASSERT(!rn.IsZero());
1135     SingleEmissionCheckScope guard(this);
1136     cinc(rd, rn, cond);
1137   }
Cinv(const Register & rd,const Register & rn,Condition cond)1138   void Cinv(const Register& rd, const Register& rn, Condition cond) {
1139     VIXL_ASSERT(allow_macro_instructions_);
1140     VIXL_ASSERT(!rd.IsZero());
1141     VIXL_ASSERT(!rn.IsZero());
1142     SingleEmissionCheckScope guard(this);
1143     cinv(rd, rn, cond);
1144   }
1145 
1146 #define PAUTH_SYSTEM_MODES(V) \
1147   V(az)                       \
1148   V(bz)                       \
1149   V(asp)                      \
1150   V(bsp)
1151 
1152 #define DEFINE_MACRO_ASM_FUNCS(SUFFIX)      \
1153   void Paci##SUFFIX() {                     \
1154     VIXL_ASSERT(allow_macro_instructions_); \
1155     SingleEmissionCheckScope guard(this);   \
1156     paci##SUFFIX();                         \
1157   }                                         \
1158   void Auti##SUFFIX() {                     \
1159     VIXL_ASSERT(allow_macro_instructions_); \
1160     SingleEmissionCheckScope guard(this);   \
1161     auti##SUFFIX();                         \
1162   }
1163 
PAUTH_SYSTEM_MODES(DEFINE_MACRO_ASM_FUNCS)1164   PAUTH_SYSTEM_MODES(DEFINE_MACRO_ASM_FUNCS)
1165 #undef DEFINE_MACRO_ASM_FUNCS
1166 
1167   // The 1716 pac and aut instructions encourage people to use x16 and x17
1168   // directly, perhaps without realising that this is forbidden. For example:
1169   //
1170   //     UseScratchRegisterScope temps(&masm);
1171   //     Register temp = temps.AcquireX();  // temp will be x16
1172   //     __ Mov(x17, ptr);
1173   //     __ Mov(x16, modifier);  // Will override temp!
1174   //     __ Pacia1716();
1175   //
1176   // To work around this issue, you must exclude x16 and x17 from the scratch
1177   // register list. You may need to replace them with other registers:
1178   //
1179   //     UseScratchRegisterScope temps(&masm);
1180   //     temps.Exclude(x16, x17);
1181   //     temps.Include(x10, x11);
1182   //     __ Mov(x17, ptr);
1183   //     __ Mov(x16, modifier);
1184   //     __ Pacia1716();
1185   void Pacia1716() {
1186     VIXL_ASSERT(allow_macro_instructions_);
1187     VIXL_ASSERT(!GetScratchRegisterList()->IncludesAliasOf(x16));
1188     VIXL_ASSERT(!GetScratchRegisterList()->IncludesAliasOf(x17));
1189     SingleEmissionCheckScope guard(this);
1190     pacia1716();
1191   }
Pacib1716()1192   void Pacib1716() {
1193     VIXL_ASSERT(allow_macro_instructions_);
1194     VIXL_ASSERT(!GetScratchRegisterList()->IncludesAliasOf(x16));
1195     VIXL_ASSERT(!GetScratchRegisterList()->IncludesAliasOf(x17));
1196     SingleEmissionCheckScope guard(this);
1197     pacib1716();
1198   }
Autia1716()1199   void Autia1716() {
1200     VIXL_ASSERT(allow_macro_instructions_);
1201     VIXL_ASSERT(!GetScratchRegisterList()->IncludesAliasOf(x16));
1202     VIXL_ASSERT(!GetScratchRegisterList()->IncludesAliasOf(x17));
1203     SingleEmissionCheckScope guard(this);
1204     autia1716();
1205   }
Autib1716()1206   void Autib1716() {
1207     VIXL_ASSERT(allow_macro_instructions_);
1208     VIXL_ASSERT(!GetScratchRegisterList()->IncludesAliasOf(x16));
1209     VIXL_ASSERT(!GetScratchRegisterList()->IncludesAliasOf(x17));
1210     SingleEmissionCheckScope guard(this);
1211     autib1716();
1212   }
Xpaclri()1213   void Xpaclri() {
1214     VIXL_ASSERT(allow_macro_instructions_);
1215     SingleEmissionCheckScope guard(this);
1216     xpaclri();
1217   }
Clrex()1218   void Clrex() {
1219     VIXL_ASSERT(allow_macro_instructions_);
1220     SingleEmissionCheckScope guard(this);
1221     clrex();
1222   }
Cls(const Register & rd,const Register & rn)1223   void Cls(const Register& rd, const Register& rn) {
1224     VIXL_ASSERT(allow_macro_instructions_);
1225     VIXL_ASSERT(!rd.IsZero());
1226     VIXL_ASSERT(!rn.IsZero());
1227     SingleEmissionCheckScope guard(this);
1228     cls(rd, rn);
1229   }
Clz(const Register & rd,const Register & rn)1230   void Clz(const Register& rd, const Register& rn) {
1231     VIXL_ASSERT(allow_macro_instructions_);
1232     VIXL_ASSERT(!rd.IsZero());
1233     VIXL_ASSERT(!rn.IsZero());
1234     SingleEmissionCheckScope guard(this);
1235     clz(rd, rn);
1236   }
Cneg(const Register & rd,const Register & rn,Condition cond)1237   void Cneg(const Register& rd, const Register& rn, Condition cond) {
1238     VIXL_ASSERT(allow_macro_instructions_);
1239     VIXL_ASSERT(!rd.IsZero());
1240     VIXL_ASSERT(!rn.IsZero());
1241     SingleEmissionCheckScope guard(this);
1242     cneg(rd, rn, cond);
1243   }
Esb()1244   void Esb() {
1245     VIXL_ASSERT(allow_macro_instructions_);
1246     SingleEmissionCheckScope guard(this);
1247     esb();
1248   }
Csdb()1249   void Csdb() {
1250     VIXL_ASSERT(allow_macro_instructions_);
1251     SingleEmissionCheckScope guard(this);
1252     csdb();
1253   }
Cset(const Register & rd,Condition cond)1254   void Cset(const Register& rd, Condition cond) {
1255     VIXL_ASSERT(allow_macro_instructions_);
1256     VIXL_ASSERT(!rd.IsZero());
1257     SingleEmissionCheckScope guard(this);
1258     cset(rd, cond);
1259   }
Csetm(const Register & rd,Condition cond)1260   void Csetm(const Register& rd, Condition cond) {
1261     VIXL_ASSERT(allow_macro_instructions_);
1262     VIXL_ASSERT(!rd.IsZero());
1263     SingleEmissionCheckScope guard(this);
1264     csetm(rd, cond);
1265   }
Csinc(const Register & rd,const Register & rn,const Register & rm,Condition cond)1266   void Csinc(const Register& rd,
1267              const Register& rn,
1268              const Register& rm,
1269              Condition cond) {
1270     VIXL_ASSERT(allow_macro_instructions_);
1271     VIXL_ASSERT(!rd.IsZero());
1272     VIXL_ASSERT(!rn.IsZero());
1273     VIXL_ASSERT(!rm.IsZero());
1274     VIXL_ASSERT((cond != al) && (cond != nv));
1275     SingleEmissionCheckScope guard(this);
1276     csinc(rd, rn, rm, cond);
1277   }
Csinv(const Register & rd,const Register & rn,const Register & rm,Condition cond)1278   void Csinv(const Register& rd,
1279              const Register& rn,
1280              const Register& rm,
1281              Condition cond) {
1282     VIXL_ASSERT(allow_macro_instructions_);
1283     VIXL_ASSERT(!rd.IsZero());
1284     VIXL_ASSERT(!rn.IsZero());
1285     VIXL_ASSERT(!rm.IsZero());
1286     VIXL_ASSERT((cond != al) && (cond != nv));
1287     SingleEmissionCheckScope guard(this);
1288     csinv(rd, rn, rm, cond);
1289   }
Csneg(const Register & rd,const Register & rn,const Register & rm,Condition cond)1290   void Csneg(const Register& rd,
1291              const Register& rn,
1292              const Register& rm,
1293              Condition cond) {
1294     VIXL_ASSERT(allow_macro_instructions_);
1295     VIXL_ASSERT(!rd.IsZero());
1296     VIXL_ASSERT(!rn.IsZero());
1297     VIXL_ASSERT(!rm.IsZero());
1298     VIXL_ASSERT((cond != al) && (cond != nv));
1299     SingleEmissionCheckScope guard(this);
1300     csneg(rd, rn, rm, cond);
1301   }
Dmb(BarrierDomain domain,BarrierType type)1302   void Dmb(BarrierDomain domain, BarrierType type) {
1303     VIXL_ASSERT(allow_macro_instructions_);
1304     SingleEmissionCheckScope guard(this);
1305     dmb(domain, type);
1306   }
Dsb(BarrierDomain domain,BarrierType type)1307   void Dsb(BarrierDomain domain, BarrierType type) {
1308     VIXL_ASSERT(allow_macro_instructions_);
1309     SingleEmissionCheckScope guard(this);
1310     dsb(domain, type);
1311   }
Extr(const Register & rd,const Register & rn,const Register & rm,unsigned lsb)1312   void Extr(const Register& rd,
1313             const Register& rn,
1314             const Register& rm,
1315             unsigned lsb) {
1316     VIXL_ASSERT(allow_macro_instructions_);
1317     VIXL_ASSERT(!rd.IsZero());
1318     VIXL_ASSERT(!rn.IsZero());
1319     VIXL_ASSERT(!rm.IsZero());
1320     SingleEmissionCheckScope guard(this);
1321     extr(rd, rn, rm, lsb);
1322   }
Fadd(const VRegister & vd,const VRegister & vn,const VRegister & vm)1323   void Fadd(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1324     VIXL_ASSERT(allow_macro_instructions_);
1325     SingleEmissionCheckScope guard(this);
1326     fadd(vd, vn, vm);
1327   }
1328   void Fccmp(const VRegister& vn,
1329              const VRegister& vm,
1330              StatusFlags nzcv,
1331              Condition cond,
1332              FPTrapFlags trap = DisableTrap) {
1333     VIXL_ASSERT(allow_macro_instructions_);
1334     VIXL_ASSERT((cond != al) && (cond != nv));
1335     SingleEmissionCheckScope guard(this);
1336     FPCCompareMacro(vn, vm, nzcv, cond, trap);
1337   }
Fccmpe(const VRegister & vn,const VRegister & vm,StatusFlags nzcv,Condition cond)1338   void Fccmpe(const VRegister& vn,
1339               const VRegister& vm,
1340               StatusFlags nzcv,
1341               Condition cond) {
1342     Fccmp(vn, vm, nzcv, cond, EnableTrap);
1343   }
1344   void Fcmp(const VRegister& vn,
1345             const VRegister& vm,
1346             FPTrapFlags trap = DisableTrap) {
1347     VIXL_ASSERT(allow_macro_instructions_);
1348     SingleEmissionCheckScope guard(this);
1349     FPCompareMacro(vn, vm, trap);
1350   }
1351   void Fcmp(const VRegister& vn, double value, FPTrapFlags trap = DisableTrap);
1352   void Fcmpe(const VRegister& vn, double value);
Fcmpe(const VRegister & vn,const VRegister & vm)1353   void Fcmpe(const VRegister& vn, const VRegister& vm) {
1354     Fcmp(vn, vm, EnableTrap);
1355   }
Fcsel(const VRegister & vd,const VRegister & vn,const VRegister & vm,Condition cond)1356   void Fcsel(const VRegister& vd,
1357              const VRegister& vn,
1358              const VRegister& vm,
1359              Condition cond) {
1360     VIXL_ASSERT(allow_macro_instructions_);
1361     VIXL_ASSERT((cond != al) && (cond != nv));
1362     SingleEmissionCheckScope guard(this);
1363     fcsel(vd, vn, vm, cond);
1364   }
Fcvt(const VRegister & vd,const VRegister & vn)1365   void Fcvt(const VRegister& vd, const VRegister& vn) {
1366     VIXL_ASSERT(allow_macro_instructions_);
1367     SingleEmissionCheckScope guard(this);
1368     fcvt(vd, vn);
1369   }
Fcvtl(const VRegister & vd,const VRegister & vn)1370   void Fcvtl(const VRegister& vd, const VRegister& vn) {
1371     VIXL_ASSERT(allow_macro_instructions_);
1372     SingleEmissionCheckScope guard(this);
1373     fcvtl(vd, vn);
1374   }
Fcvtl2(const VRegister & vd,const VRegister & vn)1375   void Fcvtl2(const VRegister& vd, const VRegister& vn) {
1376     VIXL_ASSERT(allow_macro_instructions_);
1377     SingleEmissionCheckScope guard(this);
1378     fcvtl2(vd, vn);
1379   }
Fcvtn(const VRegister & vd,const VRegister & vn)1380   void Fcvtn(const VRegister& vd, const VRegister& vn) {
1381     VIXL_ASSERT(allow_macro_instructions_);
1382     SingleEmissionCheckScope guard(this);
1383     fcvtn(vd, vn);
1384   }
Fcvtn2(const VRegister & vd,const VRegister & vn)1385   void Fcvtn2(const VRegister& vd, const VRegister& vn) {
1386     VIXL_ASSERT(allow_macro_instructions_);
1387     SingleEmissionCheckScope guard(this);
1388     fcvtn2(vd, vn);
1389   }
Fcvtxn(const VRegister & vd,const VRegister & vn)1390   void Fcvtxn(const VRegister& vd, const VRegister& vn) {
1391     VIXL_ASSERT(allow_macro_instructions_);
1392     SingleEmissionCheckScope guard(this);
1393     fcvtxn(vd, vn);
1394   }
Fcvtxn2(const VRegister & vd,const VRegister & vn)1395   void Fcvtxn2(const VRegister& vd, const VRegister& vn) {
1396     VIXL_ASSERT(allow_macro_instructions_);
1397     SingleEmissionCheckScope guard(this);
1398     fcvtxn2(vd, vn);
1399   }
Fcvtas(const Register & rd,const VRegister & vn)1400   void Fcvtas(const Register& rd, const VRegister& vn) {
1401     VIXL_ASSERT(allow_macro_instructions_);
1402     VIXL_ASSERT(!rd.IsZero());
1403     SingleEmissionCheckScope guard(this);
1404     fcvtas(rd, vn);
1405   }
Fcvtau(const Register & rd,const VRegister & vn)1406   void Fcvtau(const Register& rd, const VRegister& vn) {
1407     VIXL_ASSERT(allow_macro_instructions_);
1408     VIXL_ASSERT(!rd.IsZero());
1409     SingleEmissionCheckScope guard(this);
1410     fcvtau(rd, vn);
1411   }
Fcvtms(const Register & rd,const VRegister & vn)1412   void Fcvtms(const Register& rd, const VRegister& vn) {
1413     VIXL_ASSERT(allow_macro_instructions_);
1414     VIXL_ASSERT(!rd.IsZero());
1415     SingleEmissionCheckScope guard(this);
1416     fcvtms(rd, vn);
1417   }
Fcvtmu(const Register & rd,const VRegister & vn)1418   void Fcvtmu(const Register& rd, const VRegister& vn) {
1419     VIXL_ASSERT(allow_macro_instructions_);
1420     VIXL_ASSERT(!rd.IsZero());
1421     SingleEmissionCheckScope guard(this);
1422     fcvtmu(rd, vn);
1423   }
Fcvtns(const Register & rd,const VRegister & vn)1424   void Fcvtns(const Register& rd, const VRegister& vn) {
1425     VIXL_ASSERT(allow_macro_instructions_);
1426     VIXL_ASSERT(!rd.IsZero());
1427     SingleEmissionCheckScope guard(this);
1428     fcvtns(rd, vn);
1429   }
Fcvtnu(const Register & rd,const VRegister & vn)1430   void Fcvtnu(const Register& rd, const VRegister& vn) {
1431     VIXL_ASSERT(allow_macro_instructions_);
1432     VIXL_ASSERT(!rd.IsZero());
1433     SingleEmissionCheckScope guard(this);
1434     fcvtnu(rd, vn);
1435   }
Fcvtps(const Register & rd,const VRegister & vn)1436   void Fcvtps(const Register& rd, const VRegister& vn) {
1437     VIXL_ASSERT(allow_macro_instructions_);
1438     VIXL_ASSERT(!rd.IsZero());
1439     SingleEmissionCheckScope guard(this);
1440     fcvtps(rd, vn);
1441   }
Fcvtpu(const Register & rd,const VRegister & vn)1442   void Fcvtpu(const Register& rd, const VRegister& vn) {
1443     VIXL_ASSERT(allow_macro_instructions_);
1444     VIXL_ASSERT(!rd.IsZero());
1445     SingleEmissionCheckScope guard(this);
1446     fcvtpu(rd, vn);
1447   }
1448   void Fcvtzs(const Register& rd, const VRegister& vn, int fbits = 0) {
1449     VIXL_ASSERT(allow_macro_instructions_);
1450     VIXL_ASSERT(!rd.IsZero());
1451     SingleEmissionCheckScope guard(this);
1452     fcvtzs(rd, vn, fbits);
1453   }
Fjcvtzs(const Register & rd,const VRegister & vn)1454   void Fjcvtzs(const Register& rd, const VRegister& vn) {
1455     VIXL_ASSERT(allow_macro_instructions_);
1456     VIXL_ASSERT(!rd.IsZero());
1457     SingleEmissionCheckScope guard(this);
1458     fjcvtzs(rd, vn);
1459   }
1460   void Fcvtzu(const Register& rd, const VRegister& vn, int fbits = 0) {
1461     VIXL_ASSERT(allow_macro_instructions_);
1462     VIXL_ASSERT(!rd.IsZero());
1463     SingleEmissionCheckScope guard(this);
1464     fcvtzu(rd, vn, fbits);
1465   }
Fdiv(const VRegister & vd,const VRegister & vn,const VRegister & vm)1466   void Fdiv(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1467     VIXL_ASSERT(allow_macro_instructions_);
1468     SingleEmissionCheckScope guard(this);
1469     fdiv(vd, vn, vm);
1470   }
Fmax(const VRegister & vd,const VRegister & vn,const VRegister & vm)1471   void Fmax(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1472     VIXL_ASSERT(allow_macro_instructions_);
1473     SingleEmissionCheckScope guard(this);
1474     fmax(vd, vn, vm);
1475   }
Fmaxnm(const VRegister & vd,const VRegister & vn,const VRegister & vm)1476   void Fmaxnm(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1477     VIXL_ASSERT(allow_macro_instructions_);
1478     SingleEmissionCheckScope guard(this);
1479     fmaxnm(vd, vn, vm);
1480   }
Fmin(const VRegister & vd,const VRegister & vn,const VRegister & vm)1481   void Fmin(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1482     VIXL_ASSERT(allow_macro_instructions_);
1483     SingleEmissionCheckScope guard(this);
1484     fmin(vd, vn, vm);
1485   }
Fminnm(const VRegister & vd,const VRegister & vn,const VRegister & vm)1486   void Fminnm(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1487     VIXL_ASSERT(allow_macro_instructions_);
1488     SingleEmissionCheckScope guard(this);
1489     fminnm(vd, vn, vm);
1490   }
Fmov(const VRegister & vd,const VRegister & vn)1491   void Fmov(const VRegister& vd, const VRegister& vn) {
1492     VIXL_ASSERT(allow_macro_instructions_);
1493     SingleEmissionCheckScope guard(this);
1494     // Only emit an instruction if vd and vn are different, and they are both D
1495     // registers. fmov(s0, s0) is not a no-op because it clears the top word of
1496     // d0. Technically, fmov(d0, d0) is not a no-op either because it clears
1497     // the top of q0, but VRegister does not currently support Q registers.
1498     if (!vd.Is(vn) || !vd.Is64Bits()) {
1499       fmov(vd, vn);
1500     }
1501   }
Fmov(const VRegister & vd,const Register & rn)1502   void Fmov(const VRegister& vd, const Register& rn) {
1503     VIXL_ASSERT(allow_macro_instructions_);
1504     VIXL_ASSERT(!rn.IsZero());
1505     SingleEmissionCheckScope guard(this);
1506     fmov(vd, rn);
1507   }
Fmov(const VRegister & vd,const XRegister & xn)1508   void Fmov(const VRegister& vd, const XRegister& xn) {
1509     Fmov(vd, Register(xn));
1510   }
Fmov(const VRegister & vd,const WRegister & wn)1511   void Fmov(const VRegister& vd, const WRegister& wn) {
1512     Fmov(vd, Register(wn));
1513   }
Fmov(const VRegister & vd,int index,const Register & rn)1514   void Fmov(const VRegister& vd, int index, const Register& rn) {
1515     VIXL_ASSERT(allow_macro_instructions_);
1516     SingleEmissionCheckScope guard(this);
1517     fmov(vd, index, rn);
1518   }
Fmov(const Register & rd,const VRegister & vn,int index)1519   void Fmov(const Register& rd, const VRegister& vn, int index) {
1520     VIXL_ASSERT(allow_macro_instructions_);
1521     SingleEmissionCheckScope guard(this);
1522     fmov(rd, vn, index);
1523   }
1524 
1525   // Provide explicit double and float interfaces for FP immediate moves, rather
1526   // than relying on implicit C++ casts. This allows signalling NaNs to be
1527   // preserved when the immediate matches the format of vd. Most systems convert
1528   // signalling NaNs to quiet NaNs when converting between float and double.
1529   void Fmov(VRegister vd, double imm);
1530   void Fmov(VRegister vd, float imm);
1531   void Fmov(VRegister vd, const Float16 imm);
1532   // Provide a template to allow other types to be converted automatically.
1533   template <typename T>
Fmov(VRegister vd,T imm)1534   void Fmov(VRegister vd, T imm) {
1535     VIXL_ASSERT(allow_macro_instructions_);
1536     Fmov(vd, static_cast<double>(imm));
1537   }
Fmov(Register rd,VRegister vn)1538   void Fmov(Register rd, VRegister vn) {
1539     VIXL_ASSERT(allow_macro_instructions_);
1540     VIXL_ASSERT(!rd.IsZero());
1541     SingleEmissionCheckScope guard(this);
1542     fmov(rd, vn);
1543   }
Fmul(const VRegister & vd,const VRegister & vn,const VRegister & vm)1544   void Fmul(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1545     VIXL_ASSERT(allow_macro_instructions_);
1546     SingleEmissionCheckScope guard(this);
1547     fmul(vd, vn, vm);
1548   }
Fnmul(const VRegister & vd,const VRegister & vn,const VRegister & vm)1549   void Fnmul(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1550     VIXL_ASSERT(allow_macro_instructions_);
1551     SingleEmissionCheckScope guard(this);
1552     fnmul(vd, vn, vm);
1553   }
Fmadd(const VRegister & vd,const VRegister & vn,const VRegister & vm,const VRegister & va)1554   void Fmadd(const VRegister& vd,
1555              const VRegister& vn,
1556              const VRegister& vm,
1557              const VRegister& va) {
1558     VIXL_ASSERT(allow_macro_instructions_);
1559     SingleEmissionCheckScope guard(this);
1560     fmadd(vd, vn, vm, va);
1561   }
Fmsub(const VRegister & vd,const VRegister & vn,const VRegister & vm,const VRegister & va)1562   void Fmsub(const VRegister& vd,
1563              const VRegister& vn,
1564              const VRegister& vm,
1565              const VRegister& va) {
1566     VIXL_ASSERT(allow_macro_instructions_);
1567     SingleEmissionCheckScope guard(this);
1568     fmsub(vd, vn, vm, va);
1569   }
Fnmadd(const VRegister & vd,const VRegister & vn,const VRegister & vm,const VRegister & va)1570   void Fnmadd(const VRegister& vd,
1571               const VRegister& vn,
1572               const VRegister& vm,
1573               const VRegister& va) {
1574     VIXL_ASSERT(allow_macro_instructions_);
1575     SingleEmissionCheckScope guard(this);
1576     fnmadd(vd, vn, vm, va);
1577   }
Fnmsub(const VRegister & vd,const VRegister & vn,const VRegister & vm,const VRegister & va)1578   void Fnmsub(const VRegister& vd,
1579               const VRegister& vn,
1580               const VRegister& vm,
1581               const VRegister& va) {
1582     VIXL_ASSERT(allow_macro_instructions_);
1583     SingleEmissionCheckScope guard(this);
1584     fnmsub(vd, vn, vm, va);
1585   }
Fsub(const VRegister & vd,const VRegister & vn,const VRegister & vm)1586   void Fsub(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1587     VIXL_ASSERT(allow_macro_instructions_);
1588     SingleEmissionCheckScope guard(this);
1589     fsub(vd, vn, vm);
1590   }
Hint(SystemHint code)1591   void Hint(SystemHint code) {
1592     VIXL_ASSERT(allow_macro_instructions_);
1593     SingleEmissionCheckScope guard(this);
1594     hint(code);
1595   }
Hint(int imm7)1596   void Hint(int imm7) {
1597     VIXL_ASSERT(allow_macro_instructions_);
1598     SingleEmissionCheckScope guard(this);
1599     hint(imm7);
1600   }
Hlt(int code)1601   void Hlt(int code) {
1602     VIXL_ASSERT(allow_macro_instructions_);
1603     SingleEmissionCheckScope guard(this);
1604     hlt(code);
1605   }
Isb()1606   void Isb() {
1607     VIXL_ASSERT(allow_macro_instructions_);
1608     SingleEmissionCheckScope guard(this);
1609     isb();
1610   }
Ldar(const Register & rt,const MemOperand & src)1611   void Ldar(const Register& rt, const MemOperand& src) {
1612     VIXL_ASSERT(allow_macro_instructions_);
1613     SingleEmissionCheckScope guard(this);
1614     ldar(rt, src);
1615   }
Ldarb(const Register & rt,const MemOperand & src)1616   void Ldarb(const Register& rt, const MemOperand& src) {
1617     VIXL_ASSERT(allow_macro_instructions_);
1618     SingleEmissionCheckScope guard(this);
1619     ldarb(rt, src);
1620   }
Ldarh(const Register & rt,const MemOperand & src)1621   void Ldarh(const Register& rt, const MemOperand& src) {
1622     VIXL_ASSERT(allow_macro_instructions_);
1623     SingleEmissionCheckScope guard(this);
1624     ldarh(rt, src);
1625   }
Ldlar(const Register & rt,const MemOperand & src)1626   void Ldlar(const Register& rt, const MemOperand& src) {
1627     VIXL_ASSERT(allow_macro_instructions_);
1628     SingleEmissionCheckScope guard(this);
1629     ldlar(rt, src);
1630   }
Ldlarb(const Register & rt,const MemOperand & src)1631   void Ldlarb(const Register& rt, const MemOperand& src) {
1632     VIXL_ASSERT(allow_macro_instructions_);
1633     SingleEmissionCheckScope guard(this);
1634     ldlarb(rt, src);
1635   }
Ldlarh(const Register & rt,const MemOperand & src)1636   void Ldlarh(const Register& rt, const MemOperand& src) {
1637     VIXL_ASSERT(allow_macro_instructions_);
1638     SingleEmissionCheckScope guard(this);
1639     ldlarh(rt, src);
1640   }
Ldaxp(const Register & rt,const Register & rt2,const MemOperand & src)1641   void Ldaxp(const Register& rt, const Register& rt2, const MemOperand& src) {
1642     VIXL_ASSERT(allow_macro_instructions_);
1643     VIXL_ASSERT(!rt.Aliases(rt2));
1644     SingleEmissionCheckScope guard(this);
1645     ldaxp(rt, rt2, src);
1646   }
Ldaxr(const Register & rt,const MemOperand & src)1647   void Ldaxr(const Register& rt, const MemOperand& src) {
1648     VIXL_ASSERT(allow_macro_instructions_);
1649     SingleEmissionCheckScope guard(this);
1650     ldaxr(rt, src);
1651   }
Ldaxrb(const Register & rt,const MemOperand & src)1652   void Ldaxrb(const Register& rt, const MemOperand& src) {
1653     VIXL_ASSERT(allow_macro_instructions_);
1654     SingleEmissionCheckScope guard(this);
1655     ldaxrb(rt, src);
1656   }
Ldaxrh(const Register & rt,const MemOperand & src)1657   void Ldaxrh(const Register& rt, const MemOperand& src) {
1658     VIXL_ASSERT(allow_macro_instructions_);
1659     SingleEmissionCheckScope guard(this);
1660     ldaxrh(rt, src);
1661   }
1662 
1663 // clang-format off
1664 #define COMPARE_AND_SWAP_SINGLE_MACRO_LIST(V) \
1665   V(cas,    Cas)                              \
1666   V(casa,   Casa)                             \
1667   V(casl,   Casl)                             \
1668   V(casal,  Casal)                            \
1669   V(casb,   Casb)                             \
1670   V(casab,  Casab)                            \
1671   V(caslb,  Caslb)                            \
1672   V(casalb, Casalb)                           \
1673   V(cash,   Cash)                             \
1674   V(casah,  Casah)                            \
1675   V(caslh,  Caslh)                            \
1676   V(casalh, Casalh)
1677 // clang-format on
1678 
1679 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM)                                     \
1680   void MASM(const Register& rs, const Register& rt, const MemOperand& src) { \
1681     VIXL_ASSERT(allow_macro_instructions_);                                  \
1682     SingleEmissionCheckScope guard(this);                                    \
1683     ASM(rs, rt, src);                                                        \
1684   }
1685   COMPARE_AND_SWAP_SINGLE_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
1686 #undef DEFINE_MACRO_ASM_FUNC
1687 
1688 
1689 // clang-format off
1690 #define COMPARE_AND_SWAP_PAIR_MACRO_LIST(V) \
1691   V(casp,   Casp)                           \
1692   V(caspa,  Caspa)                          \
1693   V(caspl,  Caspl)                          \
1694   V(caspal, Caspal)
1695 // clang-format on
1696 
1697 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM)    \
1698   void MASM(const Register& rs,             \
1699             const Register& rs2,            \
1700             const Register& rt,             \
1701             const Register& rt2,            \
1702             const MemOperand& src) {        \
1703     VIXL_ASSERT(allow_macro_instructions_); \
1704     SingleEmissionCheckScope guard(this);   \
1705     ASM(rs, rs2, rt, rt2, src);             \
1706   }
COMPARE_AND_SWAP_PAIR_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)1707   COMPARE_AND_SWAP_PAIR_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
1708 #undef DEFINE_MACRO_ASM_FUNC
1709 
1710 // These macros generate all the variations of the atomic memory operations,
1711 // e.g. ldadd, ldadda, ldaddb, staddl, etc.
1712 
1713 // clang-format off
1714 #define ATOMIC_MEMORY_SIMPLE_MACRO_LIST(V, DEF, MASM_PRE, ASM_PRE) \
1715   V(DEF, MASM_PRE##add,  ASM_PRE##add)                             \
1716   V(DEF, MASM_PRE##clr,  ASM_PRE##clr)                             \
1717   V(DEF, MASM_PRE##eor,  ASM_PRE##eor)                             \
1718   V(DEF, MASM_PRE##set,  ASM_PRE##set)                             \
1719   V(DEF, MASM_PRE##smax, ASM_PRE##smax)                            \
1720   V(DEF, MASM_PRE##smin, ASM_PRE##smin)                            \
1721   V(DEF, MASM_PRE##umax, ASM_PRE##umax)                            \
1722   V(DEF, MASM_PRE##umin, ASM_PRE##umin)
1723 
1724 #define ATOMIC_MEMORY_STORE_MACRO_MODES(V, MASM, ASM) \
1725   V(MASM,     ASM)                                    \
1726   V(MASM##l,  ASM##l)                                 \
1727   V(MASM##b,  ASM##b)                                 \
1728   V(MASM##lb, ASM##lb)                                \
1729   V(MASM##h,  ASM##h)                                 \
1730   V(MASM##lh, ASM##lh)
1731 
1732 #define ATOMIC_MEMORY_LOAD_MACRO_MODES(V, MASM, ASM) \
1733   ATOMIC_MEMORY_STORE_MACRO_MODES(V, MASM, ASM)      \
1734   V(MASM##a,   ASM##a)                               \
1735   V(MASM##al,  ASM##al)                              \
1736   V(MASM##ab,  ASM##ab)                              \
1737   V(MASM##alb, ASM##alb)                             \
1738   V(MASM##ah,  ASM##ah)                              \
1739   V(MASM##alh, ASM##alh)
1740 // clang-format on
1741 
1742 #define DEFINE_MACRO_LOAD_ASM_FUNC(MASM, ASM)                                \
1743   void MASM(const Register& rs, const Register& rt, const MemOperand& src) { \
1744     VIXL_ASSERT(allow_macro_instructions_);                                  \
1745     SingleEmissionCheckScope guard(this);                                    \
1746     ASM(rs, rt, src);                                                        \
1747   }
1748 #define DEFINE_MACRO_STORE_ASM_FUNC(MASM, ASM)           \
1749   void MASM(const Register& rs, const MemOperand& src) { \
1750     VIXL_ASSERT(allow_macro_instructions_);              \
1751     SingleEmissionCheckScope guard(this);                \
1752     ASM(rs, src);                                        \
1753   }
1754 
1755   ATOMIC_MEMORY_SIMPLE_MACRO_LIST(ATOMIC_MEMORY_LOAD_MACRO_MODES,
1756                                   DEFINE_MACRO_LOAD_ASM_FUNC,
1757                                   Ld,
1758                                   ld)
1759   ATOMIC_MEMORY_SIMPLE_MACRO_LIST(ATOMIC_MEMORY_STORE_MACRO_MODES,
1760                                   DEFINE_MACRO_STORE_ASM_FUNC,
1761                                   St,
1762                                   st)
1763 
1764 #define DEFINE_MACRO_SWP_ASM_FUNC(MASM, ASM)                                 \
1765   void MASM(const Register& rs, const Register& rt, const MemOperand& src) { \
1766     VIXL_ASSERT(allow_macro_instructions_);                                  \
1767     SingleEmissionCheckScope guard(this);                                    \
1768     ASM(rs, rt, src);                                                        \
1769   }
1770 
1771   ATOMIC_MEMORY_LOAD_MACRO_MODES(DEFINE_MACRO_SWP_ASM_FUNC, Swp, swp)
1772 
1773 #undef DEFINE_MACRO_LOAD_ASM_FUNC
1774 #undef DEFINE_MACRO_STORE_ASM_FUNC
1775 #undef DEFINE_MACRO_SWP_ASM_FUNC
1776 
1777   void Ldaprb(const Register& rt, const MemOperand& src) {
1778     VIXL_ASSERT(allow_macro_instructions_);
1779     SingleEmissionCheckScope guard(this);
1780     ldaprb(rt, src);
1781   }
1782 
Ldaprh(const Register & rt,const MemOperand & src)1783   void Ldaprh(const Register& rt, const MemOperand& src) {
1784     VIXL_ASSERT(allow_macro_instructions_);
1785     SingleEmissionCheckScope guard(this);
1786     ldaprh(rt, src);
1787   }
1788 
Ldapr(const Register & rt,const MemOperand & src)1789   void Ldapr(const Register& rt, const MemOperand& src) {
1790     VIXL_ASSERT(allow_macro_instructions_);
1791     SingleEmissionCheckScope guard(this);
1792     ldapr(rt, src);
1793   }
1794 
Ldnp(const CPURegister & rt,const CPURegister & rt2,const MemOperand & src)1795   void Ldnp(const CPURegister& rt,
1796             const CPURegister& rt2,
1797             const MemOperand& src) {
1798     VIXL_ASSERT(allow_macro_instructions_);
1799     SingleEmissionCheckScope guard(this);
1800     ldnp(rt, rt2, src);
1801   }
1802   // Provide both double and float interfaces for FP immediate loads, rather
1803   // than relying on implicit C++ casts. This allows signalling NaNs to be
1804   // preserved when the immediate matches the format of fd. Most systems convert
1805   // signalling NaNs to quiet NaNs when converting between float and double.
Ldr(const VRegister & vt,double imm)1806   void Ldr(const VRegister& vt, double imm) {
1807     VIXL_ASSERT(allow_macro_instructions_);
1808     SingleEmissionCheckScope guard(this);
1809     RawLiteral* literal;
1810     if (vt.IsD()) {
1811       literal = new Literal<double>(imm,
1812                                     &literal_pool_,
1813                                     RawLiteral::kDeletedOnPlacementByPool);
1814     } else {
1815       literal = new Literal<float>(static_cast<float>(imm),
1816                                    &literal_pool_,
1817                                    RawLiteral::kDeletedOnPlacementByPool);
1818     }
1819     ldr(vt, literal);
1820   }
Ldr(const VRegister & vt,float imm)1821   void Ldr(const VRegister& vt, float imm) {
1822     VIXL_ASSERT(allow_macro_instructions_);
1823     SingleEmissionCheckScope guard(this);
1824     RawLiteral* literal;
1825     if (vt.IsS()) {
1826       literal = new Literal<float>(imm,
1827                                    &literal_pool_,
1828                                    RawLiteral::kDeletedOnPlacementByPool);
1829     } else {
1830       literal = new Literal<double>(static_cast<double>(imm),
1831                                     &literal_pool_,
1832                                     RawLiteral::kDeletedOnPlacementByPool);
1833     }
1834     ldr(vt, literal);
1835   }
Ldr(const VRegister & vt,uint64_t high64,uint64_t low64)1836   void Ldr(const VRegister& vt, uint64_t high64, uint64_t low64) {
1837     VIXL_ASSERT(allow_macro_instructions_);
1838     VIXL_ASSERT(vt.IsQ());
1839     SingleEmissionCheckScope guard(this);
1840     ldr(vt,
1841         new Literal<uint64_t>(high64,
1842                               low64,
1843                               &literal_pool_,
1844                               RawLiteral::kDeletedOnPlacementByPool));
1845   }
Ldr(const Register & rt,uint64_t imm)1846   void Ldr(const Register& rt, uint64_t imm) {
1847     VIXL_ASSERT(allow_macro_instructions_);
1848     VIXL_ASSERT(!rt.IsZero());
1849     SingleEmissionCheckScope guard(this);
1850     RawLiteral* literal;
1851     if (rt.Is64Bits()) {
1852       literal = new Literal<uint64_t>(imm,
1853                                       &literal_pool_,
1854                                       RawLiteral::kDeletedOnPlacementByPool);
1855     } else {
1856       VIXL_ASSERT(rt.Is32Bits());
1857       VIXL_ASSERT(IsUint32(imm) || IsInt32(imm));
1858       literal = new Literal<uint32_t>(static_cast<uint32_t>(imm),
1859                                       &literal_pool_,
1860                                       RawLiteral::kDeletedOnPlacementByPool);
1861     }
1862     ldr(rt, literal);
1863   }
Ldrsw(const Register & rt,uint32_t imm)1864   void Ldrsw(const Register& rt, uint32_t imm) {
1865     VIXL_ASSERT(allow_macro_instructions_);
1866     VIXL_ASSERT(!rt.IsZero());
1867     SingleEmissionCheckScope guard(this);
1868     ldrsw(rt,
1869           new Literal<uint32_t>(imm,
1870                                 &literal_pool_,
1871                                 RawLiteral::kDeletedOnPlacementByPool));
1872   }
Ldr(const CPURegister & rt,RawLiteral * literal)1873   void Ldr(const CPURegister& rt, RawLiteral* literal) {
1874     VIXL_ASSERT(allow_macro_instructions_);
1875     SingleEmissionCheckScope guard(this);
1876     ldr(rt, literal);
1877   }
Ldrsw(const Register & rt,RawLiteral * literal)1878   void Ldrsw(const Register& rt, RawLiteral* literal) {
1879     VIXL_ASSERT(allow_macro_instructions_);
1880     SingleEmissionCheckScope guard(this);
1881     ldrsw(rt, literal);
1882   }
Ldxp(const Register & rt,const Register & rt2,const MemOperand & src)1883   void Ldxp(const Register& rt, const Register& rt2, const MemOperand& src) {
1884     VIXL_ASSERT(allow_macro_instructions_);
1885     VIXL_ASSERT(!rt.Aliases(rt2));
1886     SingleEmissionCheckScope guard(this);
1887     ldxp(rt, rt2, src);
1888   }
Ldxr(const Register & rt,const MemOperand & src)1889   void Ldxr(const Register& rt, const MemOperand& src) {
1890     VIXL_ASSERT(allow_macro_instructions_);
1891     SingleEmissionCheckScope guard(this);
1892     ldxr(rt, src);
1893   }
Ldxrb(const Register & rt,const MemOperand & src)1894   void Ldxrb(const Register& rt, const MemOperand& src) {
1895     VIXL_ASSERT(allow_macro_instructions_);
1896     SingleEmissionCheckScope guard(this);
1897     ldxrb(rt, src);
1898   }
Ldxrh(const Register & rt,const MemOperand & src)1899   void Ldxrh(const Register& rt, const MemOperand& src) {
1900     VIXL_ASSERT(allow_macro_instructions_);
1901     SingleEmissionCheckScope guard(this);
1902     ldxrh(rt, src);
1903   }
Lsl(const Register & rd,const Register & rn,unsigned shift)1904   void Lsl(const Register& rd, const Register& rn, unsigned shift) {
1905     VIXL_ASSERT(allow_macro_instructions_);
1906     VIXL_ASSERT(!rd.IsZero());
1907     VIXL_ASSERT(!rn.IsZero());
1908     SingleEmissionCheckScope guard(this);
1909     lsl(rd, rn, shift);
1910   }
Lsl(const Register & rd,const Register & rn,const Register & rm)1911   void Lsl(const Register& rd, const Register& rn, const Register& rm) {
1912     VIXL_ASSERT(allow_macro_instructions_);
1913     VIXL_ASSERT(!rd.IsZero());
1914     VIXL_ASSERT(!rn.IsZero());
1915     VIXL_ASSERT(!rm.IsZero());
1916     SingleEmissionCheckScope guard(this);
1917     lslv(rd, rn, rm);
1918   }
Lsr(const Register & rd,const Register & rn,unsigned shift)1919   void Lsr(const Register& rd, const Register& rn, unsigned shift) {
1920     VIXL_ASSERT(allow_macro_instructions_);
1921     VIXL_ASSERT(!rd.IsZero());
1922     VIXL_ASSERT(!rn.IsZero());
1923     SingleEmissionCheckScope guard(this);
1924     lsr(rd, rn, shift);
1925   }
Lsr(const Register & rd,const Register & rn,const Register & rm)1926   void Lsr(const Register& rd, const Register& rn, const Register& rm) {
1927     VIXL_ASSERT(allow_macro_instructions_);
1928     VIXL_ASSERT(!rd.IsZero());
1929     VIXL_ASSERT(!rn.IsZero());
1930     VIXL_ASSERT(!rm.IsZero());
1931     SingleEmissionCheckScope guard(this);
1932     lsrv(rd, rn, rm);
1933   }
Madd(const Register & rd,const Register & rn,const Register & rm,const Register & ra)1934   void Madd(const Register& rd,
1935             const Register& rn,
1936             const Register& rm,
1937             const Register& ra) {
1938     VIXL_ASSERT(allow_macro_instructions_);
1939     VIXL_ASSERT(!rd.IsZero());
1940     VIXL_ASSERT(!rn.IsZero());
1941     VIXL_ASSERT(!rm.IsZero());
1942     VIXL_ASSERT(!ra.IsZero());
1943     SingleEmissionCheckScope guard(this);
1944     madd(rd, rn, rm, ra);
1945   }
Mneg(const Register & rd,const Register & rn,const Register & rm)1946   void Mneg(const Register& rd, const Register& rn, const Register& rm) {
1947     VIXL_ASSERT(allow_macro_instructions_);
1948     VIXL_ASSERT(!rd.IsZero());
1949     VIXL_ASSERT(!rn.IsZero());
1950     VIXL_ASSERT(!rm.IsZero());
1951     SingleEmissionCheckScope guard(this);
1952     mneg(rd, rn, rm);
1953   }
1954   void Mov(const Register& rd,
1955            const Register& rn,
1956            DiscardMoveMode discard_mode = kDontDiscardForSameWReg) {
1957     VIXL_ASSERT(allow_macro_instructions_);
1958     // Emit a register move only if the registers are distinct, or if they are
1959     // not X registers.
1960     //
1961     // Note that mov(w0, w0) is not a no-op because it clears the top word of
1962     // x0. A flag is provided (kDiscardForSameWReg) if a move between the same W
1963     // registers is not required to clear the top word of the X register. In
1964     // this case, the instruction is discarded.
1965     //
1966     // If the sp is an operand, add #0 is emitted, otherwise, orr #0.
1967     if (!rd.Is(rn) ||
1968         (rd.Is32Bits() && (discard_mode == kDontDiscardForSameWReg))) {
1969       SingleEmissionCheckScope guard(this);
1970       mov(rd, rn);
1971     }
1972   }
1973   void Movk(const Register& rd, uint64_t imm, int shift = -1) {
1974     VIXL_ASSERT(allow_macro_instructions_);
1975     VIXL_ASSERT(!rd.IsZero());
1976     SingleEmissionCheckScope guard(this);
1977     movk(rd, imm, shift);
1978   }
Mrs(const Register & rt,SystemRegister sysreg)1979   void Mrs(const Register& rt, SystemRegister sysreg) {
1980     VIXL_ASSERT(allow_macro_instructions_);
1981     VIXL_ASSERT(!rt.IsZero());
1982     SingleEmissionCheckScope guard(this);
1983     mrs(rt, sysreg);
1984   }
Msr(SystemRegister sysreg,const Register & rt)1985   void Msr(SystemRegister sysreg, const Register& rt) {
1986     VIXL_ASSERT(allow_macro_instructions_);
1987     VIXL_ASSERT(!rt.IsZero());
1988     SingleEmissionCheckScope guard(this);
1989     msr(sysreg, rt);
1990   }
1991   void Sys(int op1, int crn, int crm, int op2, const Register& rt = xzr) {
1992     VIXL_ASSERT(allow_macro_instructions_);
1993     SingleEmissionCheckScope guard(this);
1994     sys(op1, crn, crm, op2, rt);
1995   }
Dc(DataCacheOp op,const Register & rt)1996   void Dc(DataCacheOp op, const Register& rt) {
1997     VIXL_ASSERT(allow_macro_instructions_);
1998     SingleEmissionCheckScope guard(this);
1999     dc(op, rt);
2000   }
Ic(InstructionCacheOp op,const Register & rt)2001   void Ic(InstructionCacheOp op, const Register& rt) {
2002     VIXL_ASSERT(allow_macro_instructions_);
2003     SingleEmissionCheckScope guard(this);
2004     ic(op, rt);
2005   }
Msub(const Register & rd,const Register & rn,const Register & rm,const Register & ra)2006   void Msub(const Register& rd,
2007             const Register& rn,
2008             const Register& rm,
2009             const Register& ra) {
2010     VIXL_ASSERT(allow_macro_instructions_);
2011     VIXL_ASSERT(!rd.IsZero());
2012     VIXL_ASSERT(!rn.IsZero());
2013     VIXL_ASSERT(!rm.IsZero());
2014     VIXL_ASSERT(!ra.IsZero());
2015     SingleEmissionCheckScope guard(this);
2016     msub(rd, rn, rm, ra);
2017   }
Mul(const Register & rd,const Register & rn,const Register & rm)2018   void Mul(const Register& rd, const Register& rn, const Register& rm) {
2019     VIXL_ASSERT(allow_macro_instructions_);
2020     VIXL_ASSERT(!rd.IsZero());
2021     VIXL_ASSERT(!rn.IsZero());
2022     VIXL_ASSERT(!rm.IsZero());
2023     SingleEmissionCheckScope guard(this);
2024     mul(rd, rn, rm);
2025   }
Nop()2026   void Nop() {
2027     VIXL_ASSERT(allow_macro_instructions_);
2028     SingleEmissionCheckScope guard(this);
2029     nop();
2030   }
Rbit(const Register & rd,const Register & rn)2031   void Rbit(const Register& rd, const Register& rn) {
2032     VIXL_ASSERT(allow_macro_instructions_);
2033     VIXL_ASSERT(!rd.IsZero());
2034     VIXL_ASSERT(!rn.IsZero());
2035     SingleEmissionCheckScope guard(this);
2036     rbit(rd, rn);
2037   }
2038   void Ret(const Register& xn = lr) {
2039     VIXL_ASSERT(allow_macro_instructions_);
2040     VIXL_ASSERT(!xn.IsZero());
2041     SingleEmissionCheckScope guard(this);
2042     ret(xn);
2043   }
Rev(const Register & rd,const Register & rn)2044   void Rev(const Register& rd, const Register& rn) {
2045     VIXL_ASSERT(allow_macro_instructions_);
2046     VIXL_ASSERT(!rd.IsZero());
2047     VIXL_ASSERT(!rn.IsZero());
2048     SingleEmissionCheckScope guard(this);
2049     rev(rd, rn);
2050   }
Rev16(const Register & rd,const Register & rn)2051   void Rev16(const Register& rd, const Register& rn) {
2052     VIXL_ASSERT(allow_macro_instructions_);
2053     VIXL_ASSERT(!rd.IsZero());
2054     VIXL_ASSERT(!rn.IsZero());
2055     SingleEmissionCheckScope guard(this);
2056     rev16(rd, rn);
2057   }
Rev32(const Register & rd,const Register & rn)2058   void Rev32(const Register& rd, const Register& rn) {
2059     VIXL_ASSERT(allow_macro_instructions_);
2060     VIXL_ASSERT(!rd.IsZero());
2061     VIXL_ASSERT(!rn.IsZero());
2062     SingleEmissionCheckScope guard(this);
2063     rev32(rd, rn);
2064   }
Rev64(const Register & rd,const Register & rn)2065   void Rev64(const Register& rd, const Register& rn) {
2066     VIXL_ASSERT(allow_macro_instructions_);
2067     VIXL_ASSERT(!rd.IsZero());
2068     VIXL_ASSERT(!rn.IsZero());
2069     SingleEmissionCheckScope guard(this);
2070     rev64(rd, rn);
2071   }
2072 
2073 #define PAUTH_MASM_VARIATIONS(V) \
2074   V(Paci, paci)                  \
2075   V(Pacd, pacd)                  \
2076   V(Auti, auti)                  \
2077   V(Autd, autd)
2078 
2079 #define DEFINE_MACRO_ASM_FUNCS(MASM_PRE, ASM_PRE)            \
2080   void MASM_PRE##a(const Register& xd, const Register& xn) { \
2081     VIXL_ASSERT(allow_macro_instructions_);                  \
2082     SingleEmissionCheckScope guard(this);                    \
2083     ASM_PRE##a(xd, xn);                                      \
2084   }                                                          \
2085   void MASM_PRE##za(const Register& xd) {                    \
2086     VIXL_ASSERT(allow_macro_instructions_);                  \
2087     SingleEmissionCheckScope guard(this);                    \
2088     ASM_PRE##za(xd);                                         \
2089   }                                                          \
2090   void MASM_PRE##b(const Register& xd, const Register& xn) { \
2091     VIXL_ASSERT(allow_macro_instructions_);                  \
2092     SingleEmissionCheckScope guard(this);                    \
2093     ASM_PRE##b(xd, xn);                                      \
2094   }                                                          \
2095   void MASM_PRE##zb(const Register& xd) {                    \
2096     VIXL_ASSERT(allow_macro_instructions_);                  \
2097     SingleEmissionCheckScope guard(this);                    \
2098     ASM_PRE##zb(xd);                                         \
2099   }
2100 
PAUTH_MASM_VARIATIONS(DEFINE_MACRO_ASM_FUNCS)2101   PAUTH_MASM_VARIATIONS(DEFINE_MACRO_ASM_FUNCS)
2102 #undef DEFINE_MACRO_ASM_FUNCS
2103 
2104   void Pacga(const Register& xd, const Register& xn, const Register& xm) {
2105     VIXL_ASSERT(allow_macro_instructions_);
2106     SingleEmissionCheckScope guard(this);
2107     pacga(xd, xn, xm);
2108   }
2109 
Xpaci(const Register & xd)2110   void Xpaci(const Register& xd) {
2111     VIXL_ASSERT(allow_macro_instructions_);
2112     SingleEmissionCheckScope guard(this);
2113     xpaci(xd);
2114   }
2115 
Xpacd(const Register & xd)2116   void Xpacd(const Register& xd) {
2117     VIXL_ASSERT(allow_macro_instructions_);
2118     SingleEmissionCheckScope guard(this);
2119     xpacd(xd);
2120   }
Ror(const Register & rd,const Register & rs,unsigned shift)2121   void Ror(const Register& rd, const Register& rs, unsigned shift) {
2122     VIXL_ASSERT(allow_macro_instructions_);
2123     VIXL_ASSERT(!rd.IsZero());
2124     VIXL_ASSERT(!rs.IsZero());
2125     SingleEmissionCheckScope guard(this);
2126     ror(rd, rs, shift);
2127   }
Ror(const Register & rd,const Register & rn,const Register & rm)2128   void Ror(const Register& rd, const Register& rn, const Register& rm) {
2129     VIXL_ASSERT(allow_macro_instructions_);
2130     VIXL_ASSERT(!rd.IsZero());
2131     VIXL_ASSERT(!rn.IsZero());
2132     VIXL_ASSERT(!rm.IsZero());
2133     SingleEmissionCheckScope guard(this);
2134     rorv(rd, rn, rm);
2135   }
Sbfiz(const Register & rd,const Register & rn,unsigned lsb,unsigned width)2136   void Sbfiz(const Register& rd,
2137              const Register& rn,
2138              unsigned lsb,
2139              unsigned width) {
2140     VIXL_ASSERT(allow_macro_instructions_);
2141     VIXL_ASSERT(!rd.IsZero());
2142     VIXL_ASSERT(!rn.IsZero());
2143     SingleEmissionCheckScope guard(this);
2144     sbfiz(rd, rn, lsb, width);
2145   }
Sbfm(const Register & rd,const Register & rn,unsigned immr,unsigned imms)2146   void Sbfm(const Register& rd,
2147             const Register& rn,
2148             unsigned immr,
2149             unsigned imms) {
2150     VIXL_ASSERT(allow_macro_instructions_);
2151     VIXL_ASSERT(!rd.IsZero());
2152     VIXL_ASSERT(!rn.IsZero());
2153     SingleEmissionCheckScope guard(this);
2154     sbfm(rd, rn, immr, imms);
2155   }
Sbfx(const Register & rd,const Register & rn,unsigned lsb,unsigned width)2156   void Sbfx(const Register& rd,
2157             const Register& rn,
2158             unsigned lsb,
2159             unsigned width) {
2160     VIXL_ASSERT(allow_macro_instructions_);
2161     VIXL_ASSERT(!rd.IsZero());
2162     VIXL_ASSERT(!rn.IsZero());
2163     SingleEmissionCheckScope guard(this);
2164     sbfx(rd, rn, lsb, width);
2165   }
2166   void Scvtf(const VRegister& vd, const Register& rn, int fbits = 0) {
2167     VIXL_ASSERT(allow_macro_instructions_);
2168     VIXL_ASSERT(!rn.IsZero());
2169     SingleEmissionCheckScope guard(this);
2170     scvtf(vd, rn, fbits);
2171   }
Sdiv(const Register & rd,const Register & rn,const Register & rm)2172   void Sdiv(const Register& rd, const Register& rn, const Register& rm) {
2173     VIXL_ASSERT(allow_macro_instructions_);
2174     VIXL_ASSERT(!rd.IsZero());
2175     VIXL_ASSERT(!rn.IsZero());
2176     VIXL_ASSERT(!rm.IsZero());
2177     SingleEmissionCheckScope guard(this);
2178     sdiv(rd, rn, rm);
2179   }
Smaddl(const Register & rd,const Register & rn,const Register & rm,const Register & ra)2180   void Smaddl(const Register& rd,
2181               const Register& rn,
2182               const Register& rm,
2183               const Register& ra) {
2184     VIXL_ASSERT(allow_macro_instructions_);
2185     VIXL_ASSERT(!rd.IsZero());
2186     VIXL_ASSERT(!rn.IsZero());
2187     VIXL_ASSERT(!rm.IsZero());
2188     VIXL_ASSERT(!ra.IsZero());
2189     SingleEmissionCheckScope guard(this);
2190     smaddl(rd, rn, rm, ra);
2191   }
Smsubl(const Register & rd,const Register & rn,const Register & rm,const Register & ra)2192   void Smsubl(const Register& rd,
2193               const Register& rn,
2194               const Register& rm,
2195               const Register& ra) {
2196     VIXL_ASSERT(allow_macro_instructions_);
2197     VIXL_ASSERT(!rd.IsZero());
2198     VIXL_ASSERT(!rn.IsZero());
2199     VIXL_ASSERT(!rm.IsZero());
2200     VIXL_ASSERT(!ra.IsZero());
2201     SingleEmissionCheckScope guard(this);
2202     smsubl(rd, rn, rm, ra);
2203   }
Smull(const Register & rd,const Register & rn,const Register & rm)2204   void Smull(const Register& rd, const Register& rn, const Register& rm) {
2205     VIXL_ASSERT(allow_macro_instructions_);
2206     VIXL_ASSERT(!rd.IsZero());
2207     VIXL_ASSERT(!rn.IsZero());
2208     VIXL_ASSERT(!rm.IsZero());
2209     SingleEmissionCheckScope guard(this);
2210     smull(rd, rn, rm);
2211   }
Smulh(const Register & xd,const Register & xn,const Register & xm)2212   void Smulh(const Register& xd, const Register& xn, const Register& xm) {
2213     VIXL_ASSERT(allow_macro_instructions_);
2214     VIXL_ASSERT(!xd.IsZero());
2215     VIXL_ASSERT(!xn.IsZero());
2216     VIXL_ASSERT(!xm.IsZero());
2217     SingleEmissionCheckScope guard(this);
2218     smulh(xd, xn, xm);
2219   }
Stlr(const Register & rt,const MemOperand & dst)2220   void Stlr(const Register& rt, const MemOperand& dst) {
2221     VIXL_ASSERT(allow_macro_instructions_);
2222     SingleEmissionCheckScope guard(this);
2223     stlr(rt, dst);
2224   }
Stlrb(const Register & rt,const MemOperand & dst)2225   void Stlrb(const Register& rt, const MemOperand& dst) {
2226     VIXL_ASSERT(allow_macro_instructions_);
2227     SingleEmissionCheckScope guard(this);
2228     stlrb(rt, dst);
2229   }
Stlrh(const Register & rt,const MemOperand & dst)2230   void Stlrh(const Register& rt, const MemOperand& dst) {
2231     VIXL_ASSERT(allow_macro_instructions_);
2232     SingleEmissionCheckScope guard(this);
2233     stlrh(rt, dst);
2234   }
Stllr(const Register & rt,const MemOperand & dst)2235   void Stllr(const Register& rt, const MemOperand& dst) {
2236     VIXL_ASSERT(allow_macro_instructions_);
2237     SingleEmissionCheckScope guard(this);
2238     stllr(rt, dst);
2239   }
Stllrb(const Register & rt,const MemOperand & dst)2240   void Stllrb(const Register& rt, const MemOperand& dst) {
2241     VIXL_ASSERT(allow_macro_instructions_);
2242     SingleEmissionCheckScope guard(this);
2243     stllrb(rt, dst);
2244   }
Stllrh(const Register & rt,const MemOperand & dst)2245   void Stllrh(const Register& rt, const MemOperand& dst) {
2246     VIXL_ASSERT(allow_macro_instructions_);
2247     SingleEmissionCheckScope guard(this);
2248     stllrh(rt, dst);
2249   }
Stlxp(const Register & rs,const Register & rt,const Register & rt2,const MemOperand & dst)2250   void Stlxp(const Register& rs,
2251              const Register& rt,
2252              const Register& rt2,
2253              const MemOperand& dst) {
2254     VIXL_ASSERT(allow_macro_instructions_);
2255     VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister()));
2256     VIXL_ASSERT(!rs.Aliases(rt));
2257     VIXL_ASSERT(!rs.Aliases(rt2));
2258     SingleEmissionCheckScope guard(this);
2259     stlxp(rs, rt, rt2, dst);
2260   }
Stlxr(const Register & rs,const Register & rt,const MemOperand & dst)2261   void Stlxr(const Register& rs, const Register& rt, const MemOperand& dst) {
2262     VIXL_ASSERT(allow_macro_instructions_);
2263     VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister()));
2264     VIXL_ASSERT(!rs.Aliases(rt));
2265     SingleEmissionCheckScope guard(this);
2266     stlxr(rs, rt, dst);
2267   }
Stlxrb(const Register & rs,const Register & rt,const MemOperand & dst)2268   void Stlxrb(const Register& rs, const Register& rt, const MemOperand& dst) {
2269     VIXL_ASSERT(allow_macro_instructions_);
2270     VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister()));
2271     VIXL_ASSERT(!rs.Aliases(rt));
2272     SingleEmissionCheckScope guard(this);
2273     stlxrb(rs, rt, dst);
2274   }
Stlxrh(const Register & rs,const Register & rt,const MemOperand & dst)2275   void Stlxrh(const Register& rs, const Register& rt, const MemOperand& dst) {
2276     VIXL_ASSERT(allow_macro_instructions_);
2277     VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister()));
2278     VIXL_ASSERT(!rs.Aliases(rt));
2279     SingleEmissionCheckScope guard(this);
2280     stlxrh(rs, rt, dst);
2281   }
Stnp(const CPURegister & rt,const CPURegister & rt2,const MemOperand & dst)2282   void Stnp(const CPURegister& rt,
2283             const CPURegister& rt2,
2284             const MemOperand& dst) {
2285     VIXL_ASSERT(allow_macro_instructions_);
2286     SingleEmissionCheckScope guard(this);
2287     stnp(rt, rt2, dst);
2288   }
Stxp(const Register & rs,const Register & rt,const Register & rt2,const MemOperand & dst)2289   void Stxp(const Register& rs,
2290             const Register& rt,
2291             const Register& rt2,
2292             const MemOperand& dst) {
2293     VIXL_ASSERT(allow_macro_instructions_);
2294     VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister()));
2295     VIXL_ASSERT(!rs.Aliases(rt));
2296     VIXL_ASSERT(!rs.Aliases(rt2));
2297     SingleEmissionCheckScope guard(this);
2298     stxp(rs, rt, rt2, dst);
2299   }
Stxr(const Register & rs,const Register & rt,const MemOperand & dst)2300   void Stxr(const Register& rs, const Register& rt, const MemOperand& dst) {
2301     VIXL_ASSERT(allow_macro_instructions_);
2302     VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister()));
2303     VIXL_ASSERT(!rs.Aliases(rt));
2304     SingleEmissionCheckScope guard(this);
2305     stxr(rs, rt, dst);
2306   }
Stxrb(const Register & rs,const Register & rt,const MemOperand & dst)2307   void Stxrb(const Register& rs, const Register& rt, const MemOperand& dst) {
2308     VIXL_ASSERT(allow_macro_instructions_);
2309     VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister()));
2310     VIXL_ASSERT(!rs.Aliases(rt));
2311     SingleEmissionCheckScope guard(this);
2312     stxrb(rs, rt, dst);
2313   }
Stxrh(const Register & rs,const Register & rt,const MemOperand & dst)2314   void Stxrh(const Register& rs, const Register& rt, const MemOperand& dst) {
2315     VIXL_ASSERT(allow_macro_instructions_);
2316     VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister()));
2317     VIXL_ASSERT(!rs.Aliases(rt));
2318     SingleEmissionCheckScope guard(this);
2319     stxrh(rs, rt, dst);
2320   }
Svc(int code)2321   void Svc(int code) {
2322     VIXL_ASSERT(allow_macro_instructions_);
2323     SingleEmissionCheckScope guard(this);
2324     svc(code);
2325   }
Sxtb(const Register & rd,const Register & rn)2326   void Sxtb(const Register& rd, const Register& rn) {
2327     VIXL_ASSERT(allow_macro_instructions_);
2328     VIXL_ASSERT(!rd.IsZero());
2329     VIXL_ASSERT(!rn.IsZero());
2330     SingleEmissionCheckScope guard(this);
2331     sxtb(rd, rn);
2332   }
Sxth(const Register & rd,const Register & rn)2333   void Sxth(const Register& rd, const Register& rn) {
2334     VIXL_ASSERT(allow_macro_instructions_);
2335     VIXL_ASSERT(!rd.IsZero());
2336     VIXL_ASSERT(!rn.IsZero());
2337     SingleEmissionCheckScope guard(this);
2338     sxth(rd, rn);
2339   }
Sxtw(const Register & rd,const Register & rn)2340   void Sxtw(const Register& rd, const Register& rn) {
2341     VIXL_ASSERT(allow_macro_instructions_);
2342     VIXL_ASSERT(!rd.IsZero());
2343     VIXL_ASSERT(!rn.IsZero());
2344     SingleEmissionCheckScope guard(this);
2345     sxtw(rd, rn);
2346   }
Tbl(const VRegister & vd,const VRegister & vn,const VRegister & vm)2347   void Tbl(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
2348     VIXL_ASSERT(allow_macro_instructions_);
2349     SingleEmissionCheckScope guard(this);
2350     tbl(vd, vn, vm);
2351   }
Tbl(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vm)2352   void Tbl(const VRegister& vd,
2353            const VRegister& vn,
2354            const VRegister& vn2,
2355            const VRegister& vm) {
2356     VIXL_ASSERT(allow_macro_instructions_);
2357     SingleEmissionCheckScope guard(this);
2358     tbl(vd, vn, vn2, vm);
2359   }
Tbl(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vn3,const VRegister & vm)2360   void Tbl(const VRegister& vd,
2361            const VRegister& vn,
2362            const VRegister& vn2,
2363            const VRegister& vn3,
2364            const VRegister& vm) {
2365     VIXL_ASSERT(allow_macro_instructions_);
2366     SingleEmissionCheckScope guard(this);
2367     tbl(vd, vn, vn2, vn3, vm);
2368   }
Tbl(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vn3,const VRegister & vn4,const VRegister & vm)2369   void Tbl(const VRegister& vd,
2370            const VRegister& vn,
2371            const VRegister& vn2,
2372            const VRegister& vn3,
2373            const VRegister& vn4,
2374            const VRegister& vm) {
2375     VIXL_ASSERT(allow_macro_instructions_);
2376     SingleEmissionCheckScope guard(this);
2377     tbl(vd, vn, vn2, vn3, vn4, vm);
2378   }
Tbx(const VRegister & vd,const VRegister & vn,const VRegister & vm)2379   void Tbx(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
2380     VIXL_ASSERT(allow_macro_instructions_);
2381     SingleEmissionCheckScope guard(this);
2382     tbx(vd, vn, vm);
2383   }
Tbx(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vm)2384   void Tbx(const VRegister& vd,
2385            const VRegister& vn,
2386            const VRegister& vn2,
2387            const VRegister& vm) {
2388     VIXL_ASSERT(allow_macro_instructions_);
2389     SingleEmissionCheckScope guard(this);
2390     tbx(vd, vn, vn2, vm);
2391   }
Tbx(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vn3,const VRegister & vm)2392   void Tbx(const VRegister& vd,
2393            const VRegister& vn,
2394            const VRegister& vn2,
2395            const VRegister& vn3,
2396            const VRegister& vm) {
2397     VIXL_ASSERT(allow_macro_instructions_);
2398     SingleEmissionCheckScope guard(this);
2399     tbx(vd, vn, vn2, vn3, vm);
2400   }
Tbx(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vn3,const VRegister & vn4,const VRegister & vm)2401   void Tbx(const VRegister& vd,
2402            const VRegister& vn,
2403            const VRegister& vn2,
2404            const VRegister& vn3,
2405            const VRegister& vn4,
2406            const VRegister& vm) {
2407     VIXL_ASSERT(allow_macro_instructions_);
2408     SingleEmissionCheckScope guard(this);
2409     tbx(vd, vn, vn2, vn3, vn4, vm);
2410   }
2411   void Tbnz(const Register& rt, unsigned bit_pos, Label* label);
2412   void Tbz(const Register& rt, unsigned bit_pos, Label* label);
Ubfiz(const Register & rd,const Register & rn,unsigned lsb,unsigned width)2413   void Ubfiz(const Register& rd,
2414              const Register& rn,
2415              unsigned lsb,
2416              unsigned width) {
2417     VIXL_ASSERT(allow_macro_instructions_);
2418     VIXL_ASSERT(!rd.IsZero());
2419     VIXL_ASSERT(!rn.IsZero());
2420     SingleEmissionCheckScope guard(this);
2421     ubfiz(rd, rn, lsb, width);
2422   }
Ubfm(const Register & rd,const Register & rn,unsigned immr,unsigned imms)2423   void Ubfm(const Register& rd,
2424             const Register& rn,
2425             unsigned immr,
2426             unsigned imms) {
2427     VIXL_ASSERT(allow_macro_instructions_);
2428     VIXL_ASSERT(!rd.IsZero());
2429     VIXL_ASSERT(!rn.IsZero());
2430     SingleEmissionCheckScope guard(this);
2431     ubfm(rd, rn, immr, imms);
2432   }
Ubfx(const Register & rd,const Register & rn,unsigned lsb,unsigned width)2433   void Ubfx(const Register& rd,
2434             const Register& rn,
2435             unsigned lsb,
2436             unsigned width) {
2437     VIXL_ASSERT(allow_macro_instructions_);
2438     VIXL_ASSERT(!rd.IsZero());
2439     VIXL_ASSERT(!rn.IsZero());
2440     SingleEmissionCheckScope guard(this);
2441     ubfx(rd, rn, lsb, width);
2442   }
2443   void Ucvtf(const VRegister& vd, const Register& rn, int fbits = 0) {
2444     VIXL_ASSERT(allow_macro_instructions_);
2445     VIXL_ASSERT(!rn.IsZero());
2446     SingleEmissionCheckScope guard(this);
2447     ucvtf(vd, rn, fbits);
2448   }
Udiv(const Register & rd,const Register & rn,const Register & rm)2449   void Udiv(const Register& rd, const Register& rn, const Register& rm) {
2450     VIXL_ASSERT(allow_macro_instructions_);
2451     VIXL_ASSERT(!rd.IsZero());
2452     VIXL_ASSERT(!rn.IsZero());
2453     VIXL_ASSERT(!rm.IsZero());
2454     SingleEmissionCheckScope guard(this);
2455     udiv(rd, rn, rm);
2456   }
Umaddl(const Register & rd,const Register & rn,const Register & rm,const Register & ra)2457   void Umaddl(const Register& rd,
2458               const Register& rn,
2459               const Register& rm,
2460               const Register& ra) {
2461     VIXL_ASSERT(allow_macro_instructions_);
2462     VIXL_ASSERT(!rd.IsZero());
2463     VIXL_ASSERT(!rn.IsZero());
2464     VIXL_ASSERT(!rm.IsZero());
2465     VIXL_ASSERT(!ra.IsZero());
2466     SingleEmissionCheckScope guard(this);
2467     umaddl(rd, rn, rm, ra);
2468   }
Umull(const Register & rd,const Register & rn,const Register & rm)2469   void Umull(const Register& rd, const Register& rn, const Register& rm) {
2470     VIXL_ASSERT(allow_macro_instructions_);
2471     VIXL_ASSERT(!rd.IsZero());
2472     VIXL_ASSERT(!rn.IsZero());
2473     VIXL_ASSERT(!rm.IsZero());
2474     SingleEmissionCheckScope guard(this);
2475     umull(rd, rn, rm);
2476   }
Umulh(const Register & xd,const Register & xn,const Register & xm)2477   void Umulh(const Register& xd, const Register& xn, const Register& xm) {
2478     VIXL_ASSERT(allow_macro_instructions_);
2479     VIXL_ASSERT(!xd.IsZero());
2480     VIXL_ASSERT(!xn.IsZero());
2481     VIXL_ASSERT(!xm.IsZero());
2482     SingleEmissionCheckScope guard(this);
2483     umulh(xd, xn, xm);
2484   }
Umsubl(const Register & rd,const Register & rn,const Register & rm,const Register & ra)2485   void Umsubl(const Register& rd,
2486               const Register& rn,
2487               const Register& rm,
2488               const Register& ra) {
2489     VIXL_ASSERT(allow_macro_instructions_);
2490     VIXL_ASSERT(!rd.IsZero());
2491     VIXL_ASSERT(!rn.IsZero());
2492     VIXL_ASSERT(!rm.IsZero());
2493     VIXL_ASSERT(!ra.IsZero());
2494     SingleEmissionCheckScope guard(this);
2495     umsubl(rd, rn, rm, ra);
2496   }
Unreachable()2497   void Unreachable() {
2498     VIXL_ASSERT(allow_macro_instructions_);
2499     SingleEmissionCheckScope guard(this);
2500     if (generate_simulator_code_) {
2501       hlt(kUnreachableOpcode);
2502     } else {
2503       // Branch to 0 to generate a segfault.
2504       // lr - kInstructionSize is the address of the offending instruction.
2505       blr(xzr);
2506     }
2507   }
Uxtb(const Register & rd,const Register & rn)2508   void Uxtb(const Register& rd, const Register& rn) {
2509     VIXL_ASSERT(allow_macro_instructions_);
2510     VIXL_ASSERT(!rd.IsZero());
2511     VIXL_ASSERT(!rn.IsZero());
2512     SingleEmissionCheckScope guard(this);
2513     uxtb(rd, rn);
2514   }
Uxth(const Register & rd,const Register & rn)2515   void Uxth(const Register& rd, const Register& rn) {
2516     VIXL_ASSERT(allow_macro_instructions_);
2517     VIXL_ASSERT(!rd.IsZero());
2518     VIXL_ASSERT(!rn.IsZero());
2519     SingleEmissionCheckScope guard(this);
2520     uxth(rd, rn);
2521   }
Uxtw(const Register & rd,const Register & rn)2522   void Uxtw(const Register& rd, const Register& rn) {
2523     VIXL_ASSERT(allow_macro_instructions_);
2524     VIXL_ASSERT(!rd.IsZero());
2525     VIXL_ASSERT(!rn.IsZero());
2526     SingleEmissionCheckScope guard(this);
2527     uxtw(rd, rn);
2528   }
2529 
2530 // NEON 3 vector register instructions.
2531 #define NEON_3VREG_MACRO_LIST(V) \
2532   V(add, Add)                    \
2533   V(addhn, Addhn)                \
2534   V(addhn2, Addhn2)              \
2535   V(addp, Addp)                  \
2536   V(and_, And)                   \
2537   V(bic, Bic)                    \
2538   V(bif, Bif)                    \
2539   V(bit, Bit)                    \
2540   V(bsl, Bsl)                    \
2541   V(cmeq, Cmeq)                  \
2542   V(cmge, Cmge)                  \
2543   V(cmgt, Cmgt)                  \
2544   V(cmhi, Cmhi)                  \
2545   V(cmhs, Cmhs)                  \
2546   V(cmtst, Cmtst)                \
2547   V(eor, Eor)                    \
2548   V(fabd, Fabd)                  \
2549   V(facge, Facge)                \
2550   V(facgt, Facgt)                \
2551   V(faddp, Faddp)                \
2552   V(fcmeq, Fcmeq)                \
2553   V(fcmge, Fcmge)                \
2554   V(fcmgt, Fcmgt)                \
2555   V(fmaxnmp, Fmaxnmp)            \
2556   V(fmaxp, Fmaxp)                \
2557   V(fminnmp, Fminnmp)            \
2558   V(fminp, Fminp)                \
2559   V(fmla, Fmla)                  \
2560   V(fmls, Fmls)                  \
2561   V(fmulx, Fmulx)                \
2562   V(frecps, Frecps)              \
2563   V(frsqrts, Frsqrts)            \
2564   V(mla, Mla)                    \
2565   V(mls, Mls)                    \
2566   V(mul, Mul)                    \
2567   V(orn, Orn)                    \
2568   V(orr, Orr)                    \
2569   V(pmul, Pmul)                  \
2570   V(pmull, Pmull)                \
2571   V(pmull2, Pmull2)              \
2572   V(raddhn, Raddhn)              \
2573   V(raddhn2, Raddhn2)            \
2574   V(rsubhn, Rsubhn)              \
2575   V(rsubhn2, Rsubhn2)            \
2576   V(saba, Saba)                  \
2577   V(sabal, Sabal)                \
2578   V(sabal2, Sabal2)              \
2579   V(sabd, Sabd)                  \
2580   V(sabdl, Sabdl)                \
2581   V(sabdl2, Sabdl2)              \
2582   V(saddl, Saddl)                \
2583   V(saddl2, Saddl2)              \
2584   V(saddw, Saddw)                \
2585   V(saddw2, Saddw2)              \
2586   V(shadd, Shadd)                \
2587   V(shsub, Shsub)                \
2588   V(smax, Smax)                  \
2589   V(smaxp, Smaxp)                \
2590   V(smin, Smin)                  \
2591   V(sminp, Sminp)                \
2592   V(smlal, Smlal)                \
2593   V(smlal2, Smlal2)              \
2594   V(smlsl, Smlsl)                \
2595   V(smlsl2, Smlsl2)              \
2596   V(smull, Smull)                \
2597   V(smull2, Smull2)              \
2598   V(sqadd, Sqadd)                \
2599   V(sqdmlal, Sqdmlal)            \
2600   V(sqdmlal2, Sqdmlal2)          \
2601   V(sqdmlsl, Sqdmlsl)            \
2602   V(sqdmlsl2, Sqdmlsl2)          \
2603   V(sqdmulh, Sqdmulh)            \
2604   V(sqdmull, Sqdmull)            \
2605   V(sqdmull2, Sqdmull2)          \
2606   V(sqrdmulh, Sqrdmulh)          \
2607   V(sdot, Sdot)                  \
2608   V(sqrdmlah, Sqrdmlah)          \
2609   V(udot, Udot)                  \
2610   V(sqrdmlsh, Sqrdmlsh)          \
2611   V(sqrshl, Sqrshl)              \
2612   V(sqshl, Sqshl)                \
2613   V(sqsub, Sqsub)                \
2614   V(srhadd, Srhadd)              \
2615   V(srshl, Srshl)                \
2616   V(sshl, Sshl)                  \
2617   V(ssubl, Ssubl)                \
2618   V(ssubl2, Ssubl2)              \
2619   V(ssubw, Ssubw)                \
2620   V(ssubw2, Ssubw2)              \
2621   V(sub, Sub)                    \
2622   V(subhn, Subhn)                \
2623   V(subhn2, Subhn2)              \
2624   V(trn1, Trn1)                  \
2625   V(trn2, Trn2)                  \
2626   V(uaba, Uaba)                  \
2627   V(uabal, Uabal)                \
2628   V(uabal2, Uabal2)              \
2629   V(uabd, Uabd)                  \
2630   V(uabdl, Uabdl)                \
2631   V(uabdl2, Uabdl2)              \
2632   V(uaddl, Uaddl)                \
2633   V(uaddl2, Uaddl2)              \
2634   V(uaddw, Uaddw)                \
2635   V(uaddw2, Uaddw2)              \
2636   V(uhadd, Uhadd)                \
2637   V(uhsub, Uhsub)                \
2638   V(umax, Umax)                  \
2639   V(umaxp, Umaxp)                \
2640   V(umin, Umin)                  \
2641   V(uminp, Uminp)                \
2642   V(umlal, Umlal)                \
2643   V(umlal2, Umlal2)              \
2644   V(umlsl, Umlsl)                \
2645   V(umlsl2, Umlsl2)              \
2646   V(umull, Umull)                \
2647   V(umull2, Umull2)              \
2648   V(uqadd, Uqadd)                \
2649   V(uqrshl, Uqrshl)              \
2650   V(uqshl, Uqshl)                \
2651   V(uqsub, Uqsub)                \
2652   V(urhadd, Urhadd)              \
2653   V(urshl, Urshl)                \
2654   V(ushl, Ushl)                  \
2655   V(usubl, Usubl)                \
2656   V(usubl2, Usubl2)              \
2657   V(usubw, Usubw)                \
2658   V(usubw2, Usubw2)              \
2659   V(uzp1, Uzp1)                  \
2660   V(uzp2, Uzp2)                  \
2661   V(zip1, Zip1)                  \
2662   V(zip2, Zip2)
2663 
2664 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM)                                     \
2665   void MASM(const VRegister& vd, const VRegister& vn, const VRegister& vm) { \
2666     VIXL_ASSERT(allow_macro_instructions_);                                  \
2667     SingleEmissionCheckScope guard(this);                                    \
2668     ASM(vd, vn, vm);                                                         \
2669   }
2670   NEON_3VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
2671 #undef DEFINE_MACRO_ASM_FUNC
2672 
2673 // NEON 2 vector register instructions.
2674 #define NEON_2VREG_MACRO_LIST(V) \
2675   V(abs, Abs)                    \
2676   V(addp, Addp)                  \
2677   V(addv, Addv)                  \
2678   V(cls, Cls)                    \
2679   V(clz, Clz)                    \
2680   V(cnt, Cnt)                    \
2681   V(fabs, Fabs)                  \
2682   V(faddp, Faddp)                \
2683   V(fcvtas, Fcvtas)              \
2684   V(fcvtau, Fcvtau)              \
2685   V(fcvtms, Fcvtms)              \
2686   V(fcvtmu, Fcvtmu)              \
2687   V(fcvtns, Fcvtns)              \
2688   V(fcvtnu, Fcvtnu)              \
2689   V(fcvtps, Fcvtps)              \
2690   V(fcvtpu, Fcvtpu)              \
2691   V(fmaxnmp, Fmaxnmp)            \
2692   V(fmaxnmv, Fmaxnmv)            \
2693   V(fmaxp, Fmaxp)                \
2694   V(fmaxv, Fmaxv)                \
2695   V(fminnmp, Fminnmp)            \
2696   V(fminnmv, Fminnmv)            \
2697   V(fminp, Fminp)                \
2698   V(fminv, Fminv)                \
2699   V(fneg, Fneg)                  \
2700   V(frecpe, Frecpe)              \
2701   V(frecpx, Frecpx)              \
2702   V(frinta, Frinta)              \
2703   V(frinti, Frinti)              \
2704   V(frintm, Frintm)              \
2705   V(frintn, Frintn)              \
2706   V(frintp, Frintp)              \
2707   V(frintx, Frintx)              \
2708   V(frintz, Frintz)              \
2709   V(frsqrte, Frsqrte)            \
2710   V(fsqrt, Fsqrt)                \
2711   V(mov, Mov)                    \
2712   V(mvn, Mvn)                    \
2713   V(neg, Neg)                    \
2714   V(not_, Not)                   \
2715   V(rbit, Rbit)                  \
2716   V(rev16, Rev16)                \
2717   V(rev32, Rev32)                \
2718   V(rev64, Rev64)                \
2719   V(sadalp, Sadalp)              \
2720   V(saddlp, Saddlp)              \
2721   V(saddlv, Saddlv)              \
2722   V(smaxv, Smaxv)                \
2723   V(sminv, Sminv)                \
2724   V(sqabs, Sqabs)                \
2725   V(sqneg, Sqneg)                \
2726   V(sqxtn, Sqxtn)                \
2727   V(sqxtn2, Sqxtn2)              \
2728   V(sqxtun, Sqxtun)              \
2729   V(sqxtun2, Sqxtun2)            \
2730   V(suqadd, Suqadd)              \
2731   V(sxtl, Sxtl)                  \
2732   V(sxtl2, Sxtl2)                \
2733   V(uadalp, Uadalp)              \
2734   V(uaddlp, Uaddlp)              \
2735   V(uaddlv, Uaddlv)              \
2736   V(umaxv, Umaxv)                \
2737   V(uminv, Uminv)                \
2738   V(uqxtn, Uqxtn)                \
2739   V(uqxtn2, Uqxtn2)              \
2740   V(urecpe, Urecpe)              \
2741   V(ursqrte, Ursqrte)            \
2742   V(usqadd, Usqadd)              \
2743   V(uxtl, Uxtl)                  \
2744   V(uxtl2, Uxtl2)                \
2745   V(xtn, Xtn)                    \
2746   V(xtn2, Xtn2)
2747 
2748 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM)                \
2749   void MASM(const VRegister& vd, const VRegister& vn) { \
2750     VIXL_ASSERT(allow_macro_instructions_);             \
2751     SingleEmissionCheckScope guard(this);               \
2752     ASM(vd, vn);                                        \
2753   }
NEON_2VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)2754   NEON_2VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
2755 #undef DEFINE_MACRO_ASM_FUNC
2756 
2757 // NEON 2 vector register with immediate instructions.
2758 #define NEON_2VREG_FPIMM_MACRO_LIST(V) \
2759   V(fcmeq, Fcmeq)                      \
2760   V(fcmge, Fcmge)                      \
2761   V(fcmgt, Fcmgt)                      \
2762   V(fcmle, Fcmle)                      \
2763   V(fcmlt, Fcmlt)
2764 
2765 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM)                            \
2766   void MASM(const VRegister& vd, const VRegister& vn, double imm) { \
2767     VIXL_ASSERT(allow_macro_instructions_);                         \
2768     SingleEmissionCheckScope guard(this);                           \
2769     ASM(vd, vn, imm);                                               \
2770   }
2771   NEON_2VREG_FPIMM_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
2772 #undef DEFINE_MACRO_ASM_FUNC
2773 
2774 // NEON by element instructions.
2775 #define NEON_BYELEMENT_MACRO_LIST(V) \
2776   V(fmul, Fmul)                      \
2777   V(fmla, Fmla)                      \
2778   V(fmls, Fmls)                      \
2779   V(fmulx, Fmulx)                    \
2780   V(mul, Mul)                        \
2781   V(mla, Mla)                        \
2782   V(mls, Mls)                        \
2783   V(sqdmulh, Sqdmulh)                \
2784   V(sqrdmulh, Sqrdmulh)              \
2785   V(sdot, Sdot)                      \
2786   V(sqrdmlah, Sqrdmlah)              \
2787   V(udot, Udot)                      \
2788   V(sqrdmlsh, Sqrdmlsh)              \
2789   V(sqdmull, Sqdmull)                \
2790   V(sqdmull2, Sqdmull2)              \
2791   V(sqdmlal, Sqdmlal)                \
2792   V(sqdmlal2, Sqdmlal2)              \
2793   V(sqdmlsl, Sqdmlsl)                \
2794   V(sqdmlsl2, Sqdmlsl2)              \
2795   V(smull, Smull)                    \
2796   V(smull2, Smull2)                  \
2797   V(smlal, Smlal)                    \
2798   V(smlal2, Smlal2)                  \
2799   V(smlsl, Smlsl)                    \
2800   V(smlsl2, Smlsl2)                  \
2801   V(umull, Umull)                    \
2802   V(umull2, Umull2)                  \
2803   V(umlal, Umlal)                    \
2804   V(umlal2, Umlal2)                  \
2805   V(umlsl, Umlsl)                    \
2806   V(umlsl2, Umlsl2)
2807 
2808 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM)    \
2809   void MASM(const VRegister& vd,            \
2810             const VRegister& vn,            \
2811             const VRegister& vm,            \
2812             int vm_index) {                 \
2813     VIXL_ASSERT(allow_macro_instructions_); \
2814     SingleEmissionCheckScope guard(this);   \
2815     ASM(vd, vn, vm, vm_index);              \
2816   }
2817   NEON_BYELEMENT_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
2818 #undef DEFINE_MACRO_ASM_FUNC
2819 
2820 #define NEON_2VREG_SHIFT_MACRO_LIST(V) \
2821   V(rshrn, Rshrn)                      \
2822   V(rshrn2, Rshrn2)                    \
2823   V(shl, Shl)                          \
2824   V(shll, Shll)                        \
2825   V(shll2, Shll2)                      \
2826   V(shrn, Shrn)                        \
2827   V(shrn2, Shrn2)                      \
2828   V(sli, Sli)                          \
2829   V(sqrshrn, Sqrshrn)                  \
2830   V(sqrshrn2, Sqrshrn2)                \
2831   V(sqrshrun, Sqrshrun)                \
2832   V(sqrshrun2, Sqrshrun2)              \
2833   V(sqshl, Sqshl)                      \
2834   V(sqshlu, Sqshlu)                    \
2835   V(sqshrn, Sqshrn)                    \
2836   V(sqshrn2, Sqshrn2)                  \
2837   V(sqshrun, Sqshrun)                  \
2838   V(sqshrun2, Sqshrun2)                \
2839   V(sri, Sri)                          \
2840   V(srshr, Srshr)                      \
2841   V(srsra, Srsra)                      \
2842   V(sshll, Sshll)                      \
2843   V(sshll2, Sshll2)                    \
2844   V(sshr, Sshr)                        \
2845   V(ssra, Ssra)                        \
2846   V(uqrshrn, Uqrshrn)                  \
2847   V(uqrshrn2, Uqrshrn2)                \
2848   V(uqshl, Uqshl)                      \
2849   V(uqshrn, Uqshrn)                    \
2850   V(uqshrn2, Uqshrn2)                  \
2851   V(urshr, Urshr)                      \
2852   V(ursra, Ursra)                      \
2853   V(ushll, Ushll)                      \
2854   V(ushll2, Ushll2)                    \
2855   V(ushr, Ushr)                        \
2856   V(usra, Usra)
2857 
2858 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM)                           \
2859   void MASM(const VRegister& vd, const VRegister& vn, int shift) { \
2860     VIXL_ASSERT(allow_macro_instructions_);                        \
2861     SingleEmissionCheckScope guard(this);                          \
2862     ASM(vd, vn, shift);                                            \
2863   }
2864   NEON_2VREG_SHIFT_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
2865 #undef DEFINE_MACRO_ASM_FUNC
2866 
2867   void Bic(const VRegister& vd, const int imm8, const int left_shift = 0) {
2868     VIXL_ASSERT(allow_macro_instructions_);
2869     SingleEmissionCheckScope guard(this);
2870     bic(vd, imm8, left_shift);
2871   }
Cmeq(const VRegister & vd,const VRegister & vn,int imm)2872   void Cmeq(const VRegister& vd, const VRegister& vn, int imm) {
2873     VIXL_ASSERT(allow_macro_instructions_);
2874     SingleEmissionCheckScope guard(this);
2875     cmeq(vd, vn, imm);
2876   }
Cmge(const VRegister & vd,const VRegister & vn,int imm)2877   void Cmge(const VRegister& vd, const VRegister& vn, int imm) {
2878     VIXL_ASSERT(allow_macro_instructions_);
2879     SingleEmissionCheckScope guard(this);
2880     cmge(vd, vn, imm);
2881   }
Cmgt(const VRegister & vd,const VRegister & vn,int imm)2882   void Cmgt(const VRegister& vd, const VRegister& vn, int imm) {
2883     VIXL_ASSERT(allow_macro_instructions_);
2884     SingleEmissionCheckScope guard(this);
2885     cmgt(vd, vn, imm);
2886   }
Cmle(const VRegister & vd,const VRegister & vn,int imm)2887   void Cmle(const VRegister& vd, const VRegister& vn, int imm) {
2888     VIXL_ASSERT(allow_macro_instructions_);
2889     SingleEmissionCheckScope guard(this);
2890     cmle(vd, vn, imm);
2891   }
Cmlt(const VRegister & vd,const VRegister & vn,int imm)2892   void Cmlt(const VRegister& vd, const VRegister& vn, int imm) {
2893     VIXL_ASSERT(allow_macro_instructions_);
2894     SingleEmissionCheckScope guard(this);
2895     cmlt(vd, vn, imm);
2896   }
Dup(const VRegister & vd,const VRegister & vn,int index)2897   void Dup(const VRegister& vd, const VRegister& vn, int index) {
2898     VIXL_ASSERT(allow_macro_instructions_);
2899     SingleEmissionCheckScope guard(this);
2900     dup(vd, vn, index);
2901   }
Dup(const VRegister & vd,const Register & rn)2902   void Dup(const VRegister& vd, const Register& rn) {
2903     VIXL_ASSERT(allow_macro_instructions_);
2904     SingleEmissionCheckScope guard(this);
2905     dup(vd, rn);
2906   }
Ext(const VRegister & vd,const VRegister & vn,const VRegister & vm,int index)2907   void Ext(const VRegister& vd,
2908            const VRegister& vn,
2909            const VRegister& vm,
2910            int index) {
2911     VIXL_ASSERT(allow_macro_instructions_);
2912     SingleEmissionCheckScope guard(this);
2913     ext(vd, vn, vm, index);
2914   }
Fcadd(const VRegister & vd,const VRegister & vn,const VRegister & vm,int rot)2915   void Fcadd(const VRegister& vd,
2916              const VRegister& vn,
2917              const VRegister& vm,
2918              int rot) {
2919     VIXL_ASSERT(allow_macro_instructions_);
2920     SingleEmissionCheckScope guard(this);
2921     fcadd(vd, vn, vm, rot);
2922   }
Fcmla(const VRegister & vd,const VRegister & vn,const VRegister & vm,int vm_index,int rot)2923   void Fcmla(const VRegister& vd,
2924              const VRegister& vn,
2925              const VRegister& vm,
2926              int vm_index,
2927              int rot) {
2928     VIXL_ASSERT(allow_macro_instructions_);
2929     SingleEmissionCheckScope guard(this);
2930     fcmla(vd, vn, vm, vm_index, rot);
2931   }
Fcmla(const VRegister & vd,const VRegister & vn,const VRegister & vm,int rot)2932   void Fcmla(const VRegister& vd,
2933              const VRegister& vn,
2934              const VRegister& vm,
2935              int rot) {
2936     VIXL_ASSERT(allow_macro_instructions_);
2937     SingleEmissionCheckScope guard(this);
2938     fcmla(vd, vn, vm, rot);
2939   }
Ins(const VRegister & vd,int vd_index,const VRegister & vn,int vn_index)2940   void Ins(const VRegister& vd,
2941            int vd_index,
2942            const VRegister& vn,
2943            int vn_index) {
2944     VIXL_ASSERT(allow_macro_instructions_);
2945     SingleEmissionCheckScope guard(this);
2946     ins(vd, vd_index, vn, vn_index);
2947   }
Ins(const VRegister & vd,int vd_index,const Register & rn)2948   void Ins(const VRegister& vd, int vd_index, const Register& rn) {
2949     VIXL_ASSERT(allow_macro_instructions_);
2950     SingleEmissionCheckScope guard(this);
2951     ins(vd, vd_index, rn);
2952   }
Ld1(const VRegister & vt,const MemOperand & src)2953   void Ld1(const VRegister& vt, const MemOperand& src) {
2954     VIXL_ASSERT(allow_macro_instructions_);
2955     SingleEmissionCheckScope guard(this);
2956     ld1(vt, src);
2957   }
Ld1(const VRegister & vt,const VRegister & vt2,const MemOperand & src)2958   void Ld1(const VRegister& vt, const VRegister& vt2, const MemOperand& src) {
2959     VIXL_ASSERT(allow_macro_instructions_);
2960     SingleEmissionCheckScope guard(this);
2961     ld1(vt, vt2, src);
2962   }
Ld1(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const MemOperand & src)2963   void Ld1(const VRegister& vt,
2964            const VRegister& vt2,
2965            const VRegister& vt3,
2966            const MemOperand& src) {
2967     VIXL_ASSERT(allow_macro_instructions_);
2968     SingleEmissionCheckScope guard(this);
2969     ld1(vt, vt2, vt3, src);
2970   }
Ld1(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,const MemOperand & src)2971   void Ld1(const VRegister& vt,
2972            const VRegister& vt2,
2973            const VRegister& vt3,
2974            const VRegister& vt4,
2975            const MemOperand& src) {
2976     VIXL_ASSERT(allow_macro_instructions_);
2977     SingleEmissionCheckScope guard(this);
2978     ld1(vt, vt2, vt3, vt4, src);
2979   }
Ld1(const VRegister & vt,int lane,const MemOperand & src)2980   void Ld1(const VRegister& vt, int lane, const MemOperand& src) {
2981     VIXL_ASSERT(allow_macro_instructions_);
2982     SingleEmissionCheckScope guard(this);
2983     ld1(vt, lane, src);
2984   }
Ld1r(const VRegister & vt,const MemOperand & src)2985   void Ld1r(const VRegister& vt, const MemOperand& src) {
2986     VIXL_ASSERT(allow_macro_instructions_);
2987     SingleEmissionCheckScope guard(this);
2988     ld1r(vt, src);
2989   }
Ld2(const VRegister & vt,const VRegister & vt2,const MemOperand & src)2990   void Ld2(const VRegister& vt, const VRegister& vt2, const MemOperand& src) {
2991     VIXL_ASSERT(allow_macro_instructions_);
2992     SingleEmissionCheckScope guard(this);
2993     ld2(vt, vt2, src);
2994   }
Ld2(const VRegister & vt,const VRegister & vt2,int lane,const MemOperand & src)2995   void Ld2(const VRegister& vt,
2996            const VRegister& vt2,
2997            int lane,
2998            const MemOperand& src) {
2999     VIXL_ASSERT(allow_macro_instructions_);
3000     SingleEmissionCheckScope guard(this);
3001     ld2(vt, vt2, lane, src);
3002   }
Ld2r(const VRegister & vt,const VRegister & vt2,const MemOperand & src)3003   void Ld2r(const VRegister& vt, const VRegister& vt2, const MemOperand& src) {
3004     VIXL_ASSERT(allow_macro_instructions_);
3005     SingleEmissionCheckScope guard(this);
3006     ld2r(vt, vt2, src);
3007   }
Ld3(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const MemOperand & src)3008   void Ld3(const VRegister& vt,
3009            const VRegister& vt2,
3010            const VRegister& vt3,
3011            const MemOperand& src) {
3012     VIXL_ASSERT(allow_macro_instructions_);
3013     SingleEmissionCheckScope guard(this);
3014     ld3(vt, vt2, vt3, src);
3015   }
Ld3(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,int lane,const MemOperand & src)3016   void Ld3(const VRegister& vt,
3017            const VRegister& vt2,
3018            const VRegister& vt3,
3019            int lane,
3020            const MemOperand& src) {
3021     VIXL_ASSERT(allow_macro_instructions_);
3022     SingleEmissionCheckScope guard(this);
3023     ld3(vt, vt2, vt3, lane, src);
3024   }
Ld3r(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const MemOperand & src)3025   void Ld3r(const VRegister& vt,
3026             const VRegister& vt2,
3027             const VRegister& vt3,
3028             const MemOperand& src) {
3029     VIXL_ASSERT(allow_macro_instructions_);
3030     SingleEmissionCheckScope guard(this);
3031     ld3r(vt, vt2, vt3, src);
3032   }
Ld4(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,const MemOperand & src)3033   void Ld4(const VRegister& vt,
3034            const VRegister& vt2,
3035            const VRegister& vt3,
3036            const VRegister& vt4,
3037            const MemOperand& src) {
3038     VIXL_ASSERT(allow_macro_instructions_);
3039     SingleEmissionCheckScope guard(this);
3040     ld4(vt, vt2, vt3, vt4, src);
3041   }
Ld4(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,int lane,const MemOperand & src)3042   void Ld4(const VRegister& vt,
3043            const VRegister& vt2,
3044            const VRegister& vt3,
3045            const VRegister& vt4,
3046            int lane,
3047            const MemOperand& src) {
3048     VIXL_ASSERT(allow_macro_instructions_);
3049     SingleEmissionCheckScope guard(this);
3050     ld4(vt, vt2, vt3, vt4, lane, src);
3051   }
Ld4r(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,const MemOperand & src)3052   void Ld4r(const VRegister& vt,
3053             const VRegister& vt2,
3054             const VRegister& vt3,
3055             const VRegister& vt4,
3056             const MemOperand& src) {
3057     VIXL_ASSERT(allow_macro_instructions_);
3058     SingleEmissionCheckScope guard(this);
3059     ld4r(vt, vt2, vt3, vt4, src);
3060   }
Mov(const VRegister & vd,int vd_index,const VRegister & vn,int vn_index)3061   void Mov(const VRegister& vd,
3062            int vd_index,
3063            const VRegister& vn,
3064            int vn_index) {
3065     VIXL_ASSERT(allow_macro_instructions_);
3066     SingleEmissionCheckScope guard(this);
3067     mov(vd, vd_index, vn, vn_index);
3068   }
Mov(const VRegister & vd,const VRegister & vn,int index)3069   void Mov(const VRegister& vd, const VRegister& vn, int index) {
3070     VIXL_ASSERT(allow_macro_instructions_);
3071     SingleEmissionCheckScope guard(this);
3072     mov(vd, vn, index);
3073   }
Mov(const VRegister & vd,int vd_index,const Register & rn)3074   void Mov(const VRegister& vd, int vd_index, const Register& rn) {
3075     VIXL_ASSERT(allow_macro_instructions_);
3076     SingleEmissionCheckScope guard(this);
3077     mov(vd, vd_index, rn);
3078   }
Mov(const Register & rd,const VRegister & vn,int vn_index)3079   void Mov(const Register& rd, const VRegister& vn, int vn_index) {
3080     VIXL_ASSERT(allow_macro_instructions_);
3081     SingleEmissionCheckScope guard(this);
3082     mov(rd, vn, vn_index);
3083   }
3084   void Movi(const VRegister& vd,
3085             uint64_t imm,
3086             Shift shift = LSL,
3087             int shift_amount = 0);
3088   void Movi(const VRegister& vd, uint64_t hi, uint64_t lo);
3089   void Mvni(const VRegister& vd,
3090             const int imm8,
3091             Shift shift = LSL,
3092             const int shift_amount = 0) {
3093     VIXL_ASSERT(allow_macro_instructions_);
3094     SingleEmissionCheckScope guard(this);
3095     mvni(vd, imm8, shift, shift_amount);
3096   }
3097   void Orr(const VRegister& vd, const int imm8, const int left_shift = 0) {
3098     VIXL_ASSERT(allow_macro_instructions_);
3099     SingleEmissionCheckScope guard(this);
3100     orr(vd, imm8, left_shift);
3101   }
3102   void Scvtf(const VRegister& vd, const VRegister& vn, int fbits = 0) {
3103     VIXL_ASSERT(allow_macro_instructions_);
3104     SingleEmissionCheckScope guard(this);
3105     scvtf(vd, vn, fbits);
3106   }
3107   void Ucvtf(const VRegister& vd, const VRegister& vn, int fbits = 0) {
3108     VIXL_ASSERT(allow_macro_instructions_);
3109     SingleEmissionCheckScope guard(this);
3110     ucvtf(vd, vn, fbits);
3111   }
3112   void Fcvtzs(const VRegister& vd, const VRegister& vn, int fbits = 0) {
3113     VIXL_ASSERT(allow_macro_instructions_);
3114     SingleEmissionCheckScope guard(this);
3115     fcvtzs(vd, vn, fbits);
3116   }
3117   void Fcvtzu(const VRegister& vd, const VRegister& vn, int fbits = 0) {
3118     VIXL_ASSERT(allow_macro_instructions_);
3119     SingleEmissionCheckScope guard(this);
3120     fcvtzu(vd, vn, fbits);
3121   }
St1(const VRegister & vt,const MemOperand & dst)3122   void St1(const VRegister& vt, const MemOperand& dst) {
3123     VIXL_ASSERT(allow_macro_instructions_);
3124     SingleEmissionCheckScope guard(this);
3125     st1(vt, dst);
3126   }
St1(const VRegister & vt,const VRegister & vt2,const MemOperand & dst)3127   void St1(const VRegister& vt, const VRegister& vt2, const MemOperand& dst) {
3128     VIXL_ASSERT(allow_macro_instructions_);
3129     SingleEmissionCheckScope guard(this);
3130     st1(vt, vt2, dst);
3131   }
St1(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const MemOperand & dst)3132   void St1(const VRegister& vt,
3133            const VRegister& vt2,
3134            const VRegister& vt3,
3135            const MemOperand& dst) {
3136     VIXL_ASSERT(allow_macro_instructions_);
3137     SingleEmissionCheckScope guard(this);
3138     st1(vt, vt2, vt3, dst);
3139   }
St1(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,const MemOperand & dst)3140   void St1(const VRegister& vt,
3141            const VRegister& vt2,
3142            const VRegister& vt3,
3143            const VRegister& vt4,
3144            const MemOperand& dst) {
3145     VIXL_ASSERT(allow_macro_instructions_);
3146     SingleEmissionCheckScope guard(this);
3147     st1(vt, vt2, vt3, vt4, dst);
3148   }
St1(const VRegister & vt,int lane,const MemOperand & dst)3149   void St1(const VRegister& vt, int lane, const MemOperand& dst) {
3150     VIXL_ASSERT(allow_macro_instructions_);
3151     SingleEmissionCheckScope guard(this);
3152     st1(vt, lane, dst);
3153   }
St2(const VRegister & vt,const VRegister & vt2,const MemOperand & dst)3154   void St2(const VRegister& vt, const VRegister& vt2, const MemOperand& dst) {
3155     VIXL_ASSERT(allow_macro_instructions_);
3156     SingleEmissionCheckScope guard(this);
3157     st2(vt, vt2, dst);
3158   }
St3(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const MemOperand & dst)3159   void St3(const VRegister& vt,
3160            const VRegister& vt2,
3161            const VRegister& vt3,
3162            const MemOperand& dst) {
3163     VIXL_ASSERT(allow_macro_instructions_);
3164     SingleEmissionCheckScope guard(this);
3165     st3(vt, vt2, vt3, dst);
3166   }
St4(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,const MemOperand & dst)3167   void St4(const VRegister& vt,
3168            const VRegister& vt2,
3169            const VRegister& vt3,
3170            const VRegister& vt4,
3171            const MemOperand& dst) {
3172     VIXL_ASSERT(allow_macro_instructions_);
3173     SingleEmissionCheckScope guard(this);
3174     st4(vt, vt2, vt3, vt4, dst);
3175   }
St2(const VRegister & vt,const VRegister & vt2,int lane,const MemOperand & dst)3176   void St2(const VRegister& vt,
3177            const VRegister& vt2,
3178            int lane,
3179            const MemOperand& dst) {
3180     VIXL_ASSERT(allow_macro_instructions_);
3181     SingleEmissionCheckScope guard(this);
3182     st2(vt, vt2, lane, dst);
3183   }
St3(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,int lane,const MemOperand & dst)3184   void St3(const VRegister& vt,
3185            const VRegister& vt2,
3186            const VRegister& vt3,
3187            int lane,
3188            const MemOperand& dst) {
3189     VIXL_ASSERT(allow_macro_instructions_);
3190     SingleEmissionCheckScope guard(this);
3191     st3(vt, vt2, vt3, lane, dst);
3192   }
St4(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,int lane,const MemOperand & dst)3193   void St4(const VRegister& vt,
3194            const VRegister& vt2,
3195            const VRegister& vt3,
3196            const VRegister& vt4,
3197            int lane,
3198            const MemOperand& dst) {
3199     VIXL_ASSERT(allow_macro_instructions_);
3200     SingleEmissionCheckScope guard(this);
3201     st4(vt, vt2, vt3, vt4, lane, dst);
3202   }
Smov(const Register & rd,const VRegister & vn,int vn_index)3203   void Smov(const Register& rd, const VRegister& vn, int vn_index) {
3204     VIXL_ASSERT(allow_macro_instructions_);
3205     SingleEmissionCheckScope guard(this);
3206     smov(rd, vn, vn_index);
3207   }
Umov(const Register & rd,const VRegister & vn,int vn_index)3208   void Umov(const Register& rd, const VRegister& vn, int vn_index) {
3209     VIXL_ASSERT(allow_macro_instructions_);
3210     SingleEmissionCheckScope guard(this);
3211     umov(rd, vn, vn_index);
3212   }
Crc32b(const Register & rd,const Register & rn,const Register & rm)3213   void Crc32b(const Register& rd, const Register& rn, const Register& rm) {
3214     VIXL_ASSERT(allow_macro_instructions_);
3215     SingleEmissionCheckScope guard(this);
3216     crc32b(rd, rn, rm);
3217   }
Crc32h(const Register & rd,const Register & rn,const Register & rm)3218   void Crc32h(const Register& rd, const Register& rn, const Register& rm) {
3219     VIXL_ASSERT(allow_macro_instructions_);
3220     SingleEmissionCheckScope guard(this);
3221     crc32h(rd, rn, rm);
3222   }
Crc32w(const Register & rd,const Register & rn,const Register & rm)3223   void Crc32w(const Register& rd, const Register& rn, const Register& rm) {
3224     VIXL_ASSERT(allow_macro_instructions_);
3225     SingleEmissionCheckScope guard(this);
3226     crc32w(rd, rn, rm);
3227   }
Crc32x(const Register & rd,const Register & rn,const Register & rm)3228   void Crc32x(const Register& rd, const Register& rn, const Register& rm) {
3229     VIXL_ASSERT(allow_macro_instructions_);
3230     SingleEmissionCheckScope guard(this);
3231     crc32x(rd, rn, rm);
3232   }
Crc32cb(const Register & rd,const Register & rn,const Register & rm)3233   void Crc32cb(const Register& rd, const Register& rn, const Register& rm) {
3234     VIXL_ASSERT(allow_macro_instructions_);
3235     SingleEmissionCheckScope guard(this);
3236     crc32cb(rd, rn, rm);
3237   }
Crc32ch(const Register & rd,const Register & rn,const Register & rm)3238   void Crc32ch(const Register& rd, const Register& rn, const Register& rm) {
3239     VIXL_ASSERT(allow_macro_instructions_);
3240     SingleEmissionCheckScope guard(this);
3241     crc32ch(rd, rn, rm);
3242   }
Crc32cw(const Register & rd,const Register & rn,const Register & rm)3243   void Crc32cw(const Register& rd, const Register& rn, const Register& rm) {
3244     VIXL_ASSERT(allow_macro_instructions_);
3245     SingleEmissionCheckScope guard(this);
3246     crc32cw(rd, rn, rm);
3247   }
Crc32cx(const Register & rd,const Register & rn,const Register & rm)3248   void Crc32cx(const Register& rd, const Register& rn, const Register& rm) {
3249     VIXL_ASSERT(allow_macro_instructions_);
3250     SingleEmissionCheckScope guard(this);
3251     crc32cx(rd, rn, rm);
3252   }
3253 
3254   template <typename T>
CreateLiteralDestroyedWithPool(T value)3255   Literal<T>* CreateLiteralDestroyedWithPool(T value) {
3256     return new Literal<T>(value,
3257                           &literal_pool_,
3258                           RawLiteral::kDeletedOnPoolDestruction);
3259   }
3260 
3261   template <typename T>
CreateLiteralDestroyedWithPool(T high64,T low64)3262   Literal<T>* CreateLiteralDestroyedWithPool(T high64, T low64) {
3263     return new Literal<T>(high64,
3264                           low64,
3265                           &literal_pool_,
3266                           RawLiteral::kDeletedOnPoolDestruction);
3267   }
3268 
3269   // Push the system stack pointer (sp) down to allow the same to be done to
3270   // the current stack pointer (according to StackPointer()). This must be
3271   // called _before_ accessing the memory.
3272   //
3273   // This is necessary when pushing or otherwise adding things to the stack, to
3274   // satisfy the AAPCS64 constraint that the memory below the system stack
3275   // pointer is not accessed.
3276   //
3277   // This method asserts that StackPointer() is not sp, since the call does
3278   // not make sense in that context.
3279   //
3280   // TODO: This method can only accept values of 'space' that can be encoded in
3281   // one instruction. Refer to the implementation for details.
3282   void BumpSystemStackPointer(const Operand& space);
3283 
AllowMacroInstructions()3284   virtual bool AllowMacroInstructions() const VIXL_OVERRIDE {
3285     return allow_macro_instructions_;
3286   }
3287 
ArePoolsBlocked()3288   virtual bool ArePoolsBlocked() const VIXL_OVERRIDE {
3289     return IsLiteralPoolBlocked() && IsVeneerPoolBlocked();
3290   }
3291 
SetGenerateSimulatorCode(bool value)3292   void SetGenerateSimulatorCode(bool value) {
3293     generate_simulator_code_ = value;
3294   }
3295 
GenerateSimulatorCode()3296   bool GenerateSimulatorCode() const { return generate_simulator_code_; }
3297 
GetLiteralPoolSize()3298   size_t GetLiteralPoolSize() const { return literal_pool_.GetSize(); }
3299   VIXL_DEPRECATED("GetLiteralPoolSize", size_t LiteralPoolSize() const) {
3300     return GetLiteralPoolSize();
3301   }
3302 
GetLiteralPoolMaxSize()3303   size_t GetLiteralPoolMaxSize() const { return literal_pool_.GetMaxSize(); }
3304   VIXL_DEPRECATED("GetLiteralPoolMaxSize", size_t LiteralPoolMaxSize() const) {
3305     return GetLiteralPoolMaxSize();
3306   }
3307 
GetVeneerPoolMaxSize()3308   size_t GetVeneerPoolMaxSize() const { return veneer_pool_.GetMaxSize(); }
3309   VIXL_DEPRECATED("GetVeneerPoolMaxSize", size_t VeneerPoolMaxSize() const) {
3310     return GetVeneerPoolMaxSize();
3311   }
3312 
3313   // The number of unresolved branches that may require a veneer.
GetNumberOfPotentialVeneers()3314   int GetNumberOfPotentialVeneers() const {
3315     return veneer_pool_.GetNumberOfPotentialVeneers();
3316   }
3317   VIXL_DEPRECATED("GetNumberOfPotentialVeneers",
NumberOfPotentialVeneers()3318                   int NumberOfPotentialVeneers() const) {
3319     return GetNumberOfPotentialVeneers();
3320   }
3321 
GetNextCheckPoint()3322   ptrdiff_t GetNextCheckPoint() const {
3323     ptrdiff_t next_checkpoint_for_pools =
3324         std::min(literal_pool_.GetCheckpoint(), veneer_pool_.GetCheckpoint());
3325     return std::min(next_checkpoint_for_pools,
3326                     static_cast<ptrdiff_t>(GetBuffer().GetCapacity()));
3327   }
3328   VIXL_DEPRECATED("GetNextCheckPoint", ptrdiff_t NextCheckPoint()) {
3329     return GetNextCheckPoint();
3330   }
3331 
EmitLiteralPool(LiteralPool::EmitOption option)3332   void EmitLiteralPool(LiteralPool::EmitOption option) {
3333     if (!literal_pool_.IsEmpty()) literal_pool_.Emit(option);
3334 
3335     checkpoint_ = GetNextCheckPoint();
3336     recommended_checkpoint_ = literal_pool_.GetNextRecommendedCheckpoint();
3337   }
3338 
3339   void CheckEmitFor(size_t amount);
EnsureEmitFor(size_t amount)3340   void EnsureEmitFor(size_t amount) {
3341     ptrdiff_t offset = amount;
3342     ptrdiff_t max_pools_size =
3343         literal_pool_.GetMaxSize() + veneer_pool_.GetMaxSize();
3344     ptrdiff_t cursor = GetCursorOffset();
3345     if ((cursor >= recommended_checkpoint_) ||
3346         ((cursor + offset + max_pools_size) >= checkpoint_)) {
3347       CheckEmitFor(amount);
3348     }
3349   }
3350 
3351   void CheckEmitPoolsFor(size_t amount);
EnsureEmitPoolsFor(size_t amount)3352   virtual void EnsureEmitPoolsFor(size_t amount) VIXL_OVERRIDE {
3353     ptrdiff_t offset = amount;
3354     ptrdiff_t max_pools_size =
3355         literal_pool_.GetMaxSize() + veneer_pool_.GetMaxSize();
3356     ptrdiff_t cursor = GetCursorOffset();
3357     if ((cursor >= recommended_checkpoint_) ||
3358         ((cursor + offset + max_pools_size) >= checkpoint_)) {
3359       CheckEmitPoolsFor(amount);
3360     }
3361   }
3362 
3363   // Set the current stack pointer, but don't generate any code.
SetStackPointer(const Register & stack_pointer)3364   void SetStackPointer(const Register& stack_pointer) {
3365     VIXL_ASSERT(!GetScratchRegisterList()->IncludesAliasOf(stack_pointer));
3366     sp_ = stack_pointer;
3367   }
3368 
3369   // Return the current stack pointer, as set by SetStackPointer.
StackPointer()3370   const Register& StackPointer() const { return sp_; }
3371 
GetScratchRegisterList()3372   CPURegList* GetScratchRegisterList() { return &tmp_list_; }
3373   VIXL_DEPRECATED("GetScratchRegisterList", CPURegList* TmpList()) {
3374     return GetScratchRegisterList();
3375   }
3376 
GetScratchFPRegisterList()3377   CPURegList* GetScratchFPRegisterList() { return &fptmp_list_; }
3378   VIXL_DEPRECATED("GetScratchFPRegisterList", CPURegList* FPTmpList()) {
3379     return GetScratchFPRegisterList();
3380   }
3381 
3382   // Get or set the current (most-deeply-nested) UseScratchRegisterScope.
SetCurrentScratchRegisterScope(UseScratchRegisterScope * scope)3383   void SetCurrentScratchRegisterScope(UseScratchRegisterScope* scope) {
3384     current_scratch_scope_ = scope;
3385   }
GetCurrentScratchRegisterScope()3386   UseScratchRegisterScope* GetCurrentScratchRegisterScope() {
3387     return current_scratch_scope_;
3388   }
3389 
3390   // Like printf, but print at run-time from generated code.
3391   //
3392   // The caller must ensure that arguments for floating-point placeholders
3393   // (such as %e, %f or %g) are VRegisters in format 1S or 1D, and that
3394   // arguments for integer placeholders are Registers.
3395   //
3396   // At the moment it is only possible to print the value of sp if it is the
3397   // current stack pointer. Otherwise, the MacroAssembler will automatically
3398   // update sp on every push (using BumpSystemStackPointer), so determining its
3399   // value is difficult.
3400   //
3401   // Format placeholders that refer to more than one argument, or to a specific
3402   // argument, are not supported. This includes formats like "%1$d" or "%.*d".
3403   //
3404   // This function automatically preserves caller-saved registers so that
3405   // calling code can use Printf at any point without having to worry about
3406   // corruption. The preservation mechanism generates a lot of code. If this is
3407   // a problem, preserve the important registers manually and then call
3408   // PrintfNoPreserve. Callee-saved registers are not used by Printf, and are
3409   // implicitly preserved.
3410   void Printf(const char* format,
3411               CPURegister arg0 = NoCPUReg,
3412               CPURegister arg1 = NoCPUReg,
3413               CPURegister arg2 = NoCPUReg,
3414               CPURegister arg3 = NoCPUReg);
3415 
3416   // Like Printf, but don't preserve any caller-saved registers, not even 'lr'.
3417   //
3418   // The return code from the system printf call will be returned in x0.
3419   void PrintfNoPreserve(const char* format,
3420                         const CPURegister& arg0 = NoCPUReg,
3421                         const CPURegister& arg1 = NoCPUReg,
3422                         const CPURegister& arg2 = NoCPUReg,
3423                         const CPURegister& arg3 = NoCPUReg);
3424 
3425   // Trace control when running the debug simulator.
3426   //
3427   // For example:
3428   //
3429   // __ Trace(LOG_REGS, TRACE_ENABLE);
3430   // Will add registers to the trace if it wasn't already the case.
3431   //
3432   // __ Trace(LOG_DISASM, TRACE_DISABLE);
3433   // Will stop logging disassembly. It has no effect if the disassembly wasn't
3434   // already being logged.
3435   void Trace(TraceParameters parameters, TraceCommand command);
3436 
3437   // Log the requested data independently of what is being traced.
3438   //
3439   // For example:
3440   //
3441   // __ Log(LOG_FLAGS)
3442   // Will output the flags.
3443   void Log(TraceParameters parameters);
3444 
3445   // Enable or disable instrumentation when an Instrument visitor is attached to
3446   // the simulator.
3447   void EnableInstrumentation();
3448   void DisableInstrumentation();
3449 
3450   // Add a marker to the instrumentation data produced by an Instrument visitor.
3451   // The name is a two character string that will be attached to the marker in
3452   // the output data.
3453   void AnnotateInstrumentation(const char* marker_name);
3454 
3455   // Enable or disable CPU features dynamically. This mechanism allows users to
3456   // strictly check the use of CPU features in different regions of code.
3457   void SetSimulatorCPUFeatures(const CPUFeatures& features);
3458   void EnableSimulatorCPUFeatures(const CPUFeatures& features);
3459   void DisableSimulatorCPUFeatures(const CPUFeatures& features);
3460   void SaveSimulatorCPUFeatures();
3461   void RestoreSimulatorCPUFeatures();
3462 
GetLiteralPool()3463   LiteralPool* GetLiteralPool() { return &literal_pool_; }
3464 
3465 // Support for simulated runtime calls.
3466 
3467 // `CallRuntime` requires variadic templating, that is only available from
3468 // C++11.
3469 #if __cplusplus >= 201103L
3470 #define VIXL_HAS_MACROASSEMBLER_RUNTIME_CALL_SUPPORT
3471 #endif  // #if __cplusplus >= 201103L
3472 
3473 #ifdef VIXL_HAS_MACROASSEMBLER_RUNTIME_CALL_SUPPORT
3474   template <typename R, typename... P>
3475   void CallRuntimeHelper(R (*function)(P...), RuntimeCallType call_type);
3476 
3477   template <typename R, typename... P>
CallRuntime(R (* function)(P...))3478   void CallRuntime(R (*function)(P...)) {
3479     CallRuntimeHelper(function, kCallRuntime);
3480   }
3481 
3482   template <typename R, typename... P>
TailCallRuntime(R (* function)(P...))3483   void TailCallRuntime(R (*function)(P...)) {
3484     CallRuntimeHelper(function, kTailCallRuntime);
3485   }
3486 #endif  // #ifdef VIXL_HAS_MACROASSEMBLER_RUNTIME_CALL_SUPPORT
3487 
3488  protected:
BlockLiteralPool()3489   void BlockLiteralPool() { literal_pool_.Block(); }
ReleaseLiteralPool()3490   void ReleaseLiteralPool() { literal_pool_.Release(); }
IsLiteralPoolBlocked()3491   bool IsLiteralPoolBlocked() const { return literal_pool_.IsBlocked(); }
BlockVeneerPool()3492   void BlockVeneerPool() { veneer_pool_.Block(); }
ReleaseVeneerPool()3493   void ReleaseVeneerPool() { veneer_pool_.Release(); }
IsVeneerPoolBlocked()3494   bool IsVeneerPoolBlocked() const { return veneer_pool_.IsBlocked(); }
3495 
BlockPools()3496   virtual void BlockPools() VIXL_OVERRIDE {
3497     BlockLiteralPool();
3498     BlockVeneerPool();
3499   }
3500 
ReleasePools()3501   virtual void ReleasePools() VIXL_OVERRIDE {
3502     ReleaseLiteralPool();
3503     ReleaseVeneerPool();
3504   }
3505 
3506   // The scopes below need to able to block and release a particular pool.
3507   // TODO: Consider removing those scopes or move them to
3508   // code-generation-scopes-vixl.h.
3509   friend class BlockPoolsScope;
3510   friend class BlockLiteralPoolScope;
3511   friend class BlockVeneerPoolScope;
3512 
SetAllowMacroInstructions(bool value)3513   virtual void SetAllowMacroInstructions(bool value) VIXL_OVERRIDE {
3514     allow_macro_instructions_ = value;
3515   }
3516 
3517   // Helper used to query information about code generation and to generate
3518   // code for `csel`.
3519   // Here and for the related helpers below:
3520   // - Code is generated when `masm` is not `NULL`.
3521   // - On return and when set, `should_synthesise_left` and
3522   //   `should_synthesise_right` will indicate whether `left` and `right`
3523   //   should be synthesized in a temporary register.
3524   static void CselHelper(MacroAssembler* masm,
3525                          const Register& rd,
3526                          Operand left,
3527                          Operand right,
3528                          Condition cond,
3529                          bool* should_synthesise_left = NULL,
3530                          bool* should_synthesise_right = NULL);
3531 
3532   // The helper returns `true` if it can handle the specified arguments.
3533   // Also see comments for `CselHelper()`.
3534   static bool CselSubHelperTwoImmediates(MacroAssembler* masm,
3535                                          const Register& rd,
3536                                          int64_t left,
3537                                          int64_t right,
3538                                          Condition cond,
3539                                          bool* should_synthesise_left,
3540                                          bool* should_synthesise_right);
3541 
3542   // See comments for `CselHelper()`.
3543   static bool CselSubHelperTwoOrderedImmediates(MacroAssembler* masm,
3544                                                 const Register& rd,
3545                                                 int64_t left,
3546                                                 int64_t right,
3547                                                 Condition cond);
3548 
3549   // See comments for `CselHelper()`.
3550   static void CselSubHelperRightSmallImmediate(MacroAssembler* masm,
3551                                                UseScratchRegisterScope* temps,
3552                                                const Register& rd,
3553                                                const Operand& left,
3554                                                const Operand& right,
3555                                                Condition cond,
3556                                                bool* should_synthesise_left);
3557 
3558  private:
3559   // The actual Push and Pop implementations. These don't generate any code
3560   // other than that required for the push or pop. This allows
3561   // (Push|Pop)CPURegList to bundle together setup code for a large block of
3562   // registers.
3563   //
3564   // Note that size is per register, and is specified in bytes.
3565   void PushHelper(int count,
3566                   int size,
3567                   const CPURegister& src0,
3568                   const CPURegister& src1,
3569                   const CPURegister& src2,
3570                   const CPURegister& src3);
3571   void PopHelper(int count,
3572                  int size,
3573                  const CPURegister& dst0,
3574                  const CPURegister& dst1,
3575                  const CPURegister& dst2,
3576                  const CPURegister& dst3);
3577 
3578   void Movi16bitHelper(const VRegister& vd, uint64_t imm);
3579   void Movi32bitHelper(const VRegister& vd, uint64_t imm);
3580   void Movi64bitHelper(const VRegister& vd, uint64_t imm);
3581 
3582   // Perform necessary maintenance operations before a push or pop.
3583   //
3584   // Note that size is per register, and is specified in bytes.
3585   void PrepareForPush(int count, int size);
3586   void PrepareForPop(int count, int size);
3587 
3588   // The actual implementation of load and store operations for CPURegList.
3589   enum LoadStoreCPURegListAction { kLoad, kStore };
3590   void LoadStoreCPURegListHelper(LoadStoreCPURegListAction operation,
3591                                  CPURegList registers,
3592                                  const MemOperand& mem);
3593   // Returns a MemOperand suitable for loading or storing a CPURegList at `dst`.
3594   // This helper may allocate registers from `scratch_scope` and generate code
3595   // to compute an intermediate address. The resulting MemOperand is only valid
3596   // as long as `scratch_scope` remains valid.
3597   MemOperand BaseMemOperandForLoadStoreCPURegList(
3598       const CPURegList& registers,
3599       const MemOperand& mem,
3600       UseScratchRegisterScope* scratch_scope);
3601 
LabelIsOutOfRange(Label * label,ImmBranchType branch_type)3602   bool LabelIsOutOfRange(Label* label, ImmBranchType branch_type) {
3603     return !Instruction::IsValidImmPCOffset(branch_type,
3604                                             label->GetLocation() -
3605                                                 GetCursorOffset());
3606   }
3607 
3608   void ConfigureSimulatorCPUFeaturesHelper(const CPUFeatures& features,
3609                                            DebugHltOpcode action);
3610 
3611   // Tell whether any of the macro instruction can be used. When false the
3612   // MacroAssembler will assert if a method which can emit a variable number
3613   // of instructions is called.
3614   bool allow_macro_instructions_;
3615 
3616   // Indicates whether we should generate simulator or native code.
3617   bool generate_simulator_code_;
3618 
3619   // The register to use as a stack pointer for stack operations.
3620   Register sp_;
3621 
3622   // Scratch registers available for use by the MacroAssembler.
3623   CPURegList tmp_list_;
3624   CPURegList fptmp_list_;
3625 
3626   UseScratchRegisterScope* current_scratch_scope_;
3627 
3628   LiteralPool literal_pool_;
3629   VeneerPool veneer_pool_;
3630 
3631   ptrdiff_t checkpoint_;
3632   ptrdiff_t recommended_checkpoint_;
3633 
3634   friend class Pool;
3635   friend class LiteralPool;
3636 };
3637 
3638 
GetOtherPoolsMaxSize()3639 inline size_t VeneerPool::GetOtherPoolsMaxSize() const {
3640   return masm_->GetLiteralPoolMaxSize();
3641 }
3642 
3643 
GetOtherPoolsMaxSize()3644 inline size_t LiteralPool::GetOtherPoolsMaxSize() const {
3645   return masm_->GetVeneerPoolMaxSize();
3646 }
3647 
3648 
SetNextRecommendedCheckpoint(ptrdiff_t offset)3649 inline void LiteralPool::SetNextRecommendedCheckpoint(ptrdiff_t offset) {
3650   masm_->recommended_checkpoint_ =
3651       std::min(masm_->recommended_checkpoint_, offset);
3652   recommended_checkpoint_ = offset;
3653 }
3654 
3655 class InstructionAccurateScope : public ExactAssemblyScope {
3656  public:
3657   VIXL_DEPRECATED("ExactAssemblyScope",
3658                   InstructionAccurateScope(MacroAssembler* masm,
3659                                            int64_t count,
3660                                            SizePolicy size_policy = kExactSize))
ExactAssemblyScope(masm,count * kInstructionSize,size_policy)3661       : ExactAssemblyScope(masm, count * kInstructionSize, size_policy) {}
3662 };
3663 
3664 class BlockLiteralPoolScope {
3665  public:
BlockLiteralPoolScope(MacroAssembler * masm)3666   explicit BlockLiteralPoolScope(MacroAssembler* masm) : masm_(masm) {
3667     masm_->BlockLiteralPool();
3668   }
3669 
~BlockLiteralPoolScope()3670   ~BlockLiteralPoolScope() { masm_->ReleaseLiteralPool(); }
3671 
3672  private:
3673   MacroAssembler* masm_;
3674 };
3675 
3676 
3677 class BlockVeneerPoolScope {
3678  public:
BlockVeneerPoolScope(MacroAssembler * masm)3679   explicit BlockVeneerPoolScope(MacroAssembler* masm) : masm_(masm) {
3680     masm_->BlockVeneerPool();
3681   }
3682 
~BlockVeneerPoolScope()3683   ~BlockVeneerPoolScope() { masm_->ReleaseVeneerPool(); }
3684 
3685  private:
3686   MacroAssembler* masm_;
3687 };
3688 
3689 
3690 class BlockPoolsScope {
3691  public:
BlockPoolsScope(MacroAssembler * masm)3692   explicit BlockPoolsScope(MacroAssembler* masm) : masm_(masm) {
3693     masm_->BlockPools();
3694   }
3695 
~BlockPoolsScope()3696   ~BlockPoolsScope() { masm_->ReleasePools(); }
3697 
3698  private:
3699   MacroAssembler* masm_;
3700 };
3701 
3702 
3703 // This scope utility allows scratch registers to be managed safely. The
3704 // MacroAssembler's GetScratchRegisterList() (and GetScratchFPRegisterList()) is
3705 // used as a pool of scratch registers. These registers can be allocated on
3706 // demand, and will be returned at the end of the scope.
3707 //
3708 // When the scope ends, the MacroAssembler's lists will be restored to their
3709 // original state, even if the lists were modified by some other means.
3710 class UseScratchRegisterScope {
3711  public:
3712   // This constructor implicitly calls `Open` to initialise the scope (`masm`
3713   // must not be `NULL`), so it is ready to use immediately after it has been
3714   // constructed.
UseScratchRegisterScope(MacroAssembler * masm)3715   explicit UseScratchRegisterScope(MacroAssembler* masm)
3716       : masm_(NULL), parent_(NULL), old_available_(0), old_availablefp_(0) {
3717     Open(masm);
3718   }
3719   // This constructor does not implicitly initialise the scope. Instead, the
3720   // user is required to explicitly call the `Open` function before using the
3721   // scope.
UseScratchRegisterScope()3722   UseScratchRegisterScope()
3723       : masm_(NULL), parent_(NULL), old_available_(0), old_availablefp_(0) {}
3724 
3725   // This function performs the actual initialisation work.
3726   void Open(MacroAssembler* masm);
3727 
3728   // The destructor always implicitly calls the `Close` function.
~UseScratchRegisterScope()3729   ~UseScratchRegisterScope() { Close(); }
3730 
3731   // This function performs the cleaning-up work. It must succeed even if the
3732   // scope has not been opened. It is safe to call multiple times.
3733   void Close();
3734 
3735 
3736   bool IsAvailable(const CPURegister& reg) const;
3737 
3738 
3739   // Take a register from the appropriate temps list. It will be returned
3740   // automatically when the scope ends.
AcquireW()3741   Register AcquireW() {
3742     return AcquireNextAvailable(masm_->GetScratchRegisterList()).W();
3743   }
AcquireX()3744   Register AcquireX() {
3745     return AcquireNextAvailable(masm_->GetScratchRegisterList()).X();
3746   }
AcquireH()3747   VRegister AcquireH() {
3748     return AcquireNextAvailable(masm_->GetScratchFPRegisterList()).H();
3749   }
AcquireS()3750   VRegister AcquireS() {
3751     return AcquireNextAvailable(masm_->GetScratchFPRegisterList()).S();
3752   }
AcquireD()3753   VRegister AcquireD() {
3754     return AcquireNextAvailable(masm_->GetScratchFPRegisterList()).D();
3755   }
3756 
3757 
3758   Register AcquireRegisterOfSize(int size_in_bits);
AcquireSameSizeAs(const Register & reg)3759   Register AcquireSameSizeAs(const Register& reg) {
3760     return AcquireRegisterOfSize(reg.GetSizeInBits());
3761   }
3762   VRegister AcquireVRegisterOfSize(int size_in_bits);
AcquireSameSizeAs(const VRegister & reg)3763   VRegister AcquireSameSizeAs(const VRegister& reg) {
3764     return AcquireVRegisterOfSize(reg.GetSizeInBits());
3765   }
AcquireCPURegisterOfSize(int size_in_bits)3766   CPURegister AcquireCPURegisterOfSize(int size_in_bits) {
3767     return masm_->GetScratchRegisterList()->IsEmpty()
3768                ? CPURegister(AcquireVRegisterOfSize(size_in_bits))
3769                : CPURegister(AcquireRegisterOfSize(size_in_bits));
3770   }
3771 
3772 
3773   // Explicitly release an acquired (or excluded) register, putting it back in
3774   // the appropriate temps list.
3775   void Release(const CPURegister& reg);
3776 
3777 
3778   // Make the specified registers available as scratch registers for the
3779   // duration of this scope.
3780   void Include(const CPURegList& list);
3781   void Include(const Register& reg1,
3782                const Register& reg2 = NoReg,
3783                const Register& reg3 = NoReg,
3784                const Register& reg4 = NoReg);
3785   void Include(const VRegister& reg1,
3786                const VRegister& reg2 = NoVReg,
3787                const VRegister& reg3 = NoVReg,
3788                const VRegister& reg4 = NoVReg);
3789 
3790 
3791   // Make sure that the specified registers are not available in this scope.
3792   // This can be used to prevent helper functions from using sensitive
3793   // registers, for example.
3794   void Exclude(const CPURegList& list);
3795   void Exclude(const Register& reg1,
3796                const Register& reg2 = NoReg,
3797                const Register& reg3 = NoReg,
3798                const Register& reg4 = NoReg);
3799   void Exclude(const VRegister& reg1,
3800                const VRegister& reg2 = NoVReg,
3801                const VRegister& reg3 = NoVReg,
3802                const VRegister& reg4 = NoVReg);
3803   void Exclude(const CPURegister& reg1,
3804                const CPURegister& reg2 = NoCPUReg,
3805                const CPURegister& reg3 = NoCPUReg,
3806                const CPURegister& reg4 = NoCPUReg);
3807 
3808 
3809   // Prevent any scratch registers from being used in this scope.
3810   void ExcludeAll();
3811 
3812  private:
3813   static CPURegister AcquireNextAvailable(CPURegList* available);
3814 
3815   static void ReleaseByCode(CPURegList* available, int code);
3816 
3817   static void ReleaseByRegList(CPURegList* available, RegList regs);
3818 
3819   static void IncludeByRegList(CPURegList* available, RegList exclude);
3820 
3821   static void ExcludeByRegList(CPURegList* available, RegList exclude);
3822 
3823   // The MacroAssembler maintains a list of available scratch registers, and
3824   // also keeps track of the most recently-opened scope so that on destruction
3825   // we can check that scopes do not outlive their parents.
3826   MacroAssembler* masm_;
3827   UseScratchRegisterScope* parent_;
3828 
3829   // The state of the available lists at the start of this scope.
3830   RegList old_available_;    // kRegister
3831   RegList old_availablefp_;  // kVRegister
3832 
3833   // Disallow copy constructor and operator=.
UseScratchRegisterScope(const UseScratchRegisterScope &)3834   VIXL_DEBUG_NO_RETURN UseScratchRegisterScope(const UseScratchRegisterScope&) {
3835     VIXL_UNREACHABLE();
3836   }
3837   VIXL_DEBUG_NO_RETURN void operator=(const UseScratchRegisterScope&) {
3838     VIXL_UNREACHABLE();
3839   }
3840 };
3841 
3842 
3843 // Like CPUFeaturesScope, but also generate Simulation pseudo-instructions to
3844 // control a Simulator's CPUFeatures dynamically.
3845 //
3846 // One major difference from CPUFeaturesScope is that this scope cannot offer
3847 // a writable "CPUFeatures* GetCPUFeatures()", because every write to the
3848 // features needs a corresponding macro instruction.
3849 class SimulationCPUFeaturesScope {
3850  public:
3851   explicit SimulationCPUFeaturesScope(
3852       MacroAssembler* masm,
3853       CPUFeatures::Feature feature0 = CPUFeatures::kNone,
3854       CPUFeatures::Feature feature1 = CPUFeatures::kNone,
3855       CPUFeatures::Feature feature2 = CPUFeatures::kNone,
3856       CPUFeatures::Feature feature3 = CPUFeatures::kNone)
masm_(masm)3857       : masm_(masm),
3858         cpu_features_scope_(masm, feature0, feature1, feature2, feature3) {
3859     masm_->SaveSimulatorCPUFeatures();
3860     masm_->EnableSimulatorCPUFeatures(
3861         CPUFeatures(feature0, feature1, feature2, feature3));
3862   }
3863 
SimulationCPUFeaturesScope(MacroAssembler * masm,const CPUFeatures & other)3864   SimulationCPUFeaturesScope(MacroAssembler* masm, const CPUFeatures& other)
3865       : masm_(masm), cpu_features_scope_(masm, other) {
3866     masm_->SaveSimulatorCPUFeatures();
3867     masm_->EnableSimulatorCPUFeatures(other);
3868   }
3869 
~SimulationCPUFeaturesScope()3870   ~SimulationCPUFeaturesScope() { masm_->RestoreSimulatorCPUFeatures(); }
3871 
GetCPUFeatures()3872   const CPUFeatures* GetCPUFeatures() const {
3873     return cpu_features_scope_.GetCPUFeatures();
3874   }
3875 
SetCPUFeatures(const CPUFeatures & cpu_features)3876   void SetCPUFeatures(const CPUFeatures& cpu_features) {
3877     cpu_features_scope_.SetCPUFeatures(cpu_features);
3878     masm_->SetSimulatorCPUFeatures(cpu_features);
3879   }
3880 
3881  private:
3882   MacroAssembler* masm_;
3883   CPUFeaturesScope cpu_features_scope_;
3884 };
3885 
3886 
3887 // Variadic templating is only available from C++11.
3888 #ifdef VIXL_HAS_MACROASSEMBLER_RUNTIME_CALL_SUPPORT
3889 
3890 // `R` stands for 'return type', and `P` for 'parameter types'.
3891 template <typename R, typename... P>
CallRuntimeHelper(R (* function)(P...),RuntimeCallType call_type)3892 void MacroAssembler::CallRuntimeHelper(R (*function)(P...),
3893                                        RuntimeCallType call_type) {
3894   if (generate_simulator_code_) {
3895 #ifdef VIXL_HAS_SIMULATED_RUNTIME_CALL_SUPPORT
3896     uintptr_t runtime_call_wrapper_address = reinterpret_cast<uintptr_t>(
3897         &(Simulator::RuntimeCallStructHelper<R, P...>::Wrapper));
3898     uintptr_t function_address = reinterpret_cast<uintptr_t>(function);
3899 
3900     EmissionCheckScope guard(this,
3901                              kRuntimeCallLength,
3902                              CodeBufferCheckScope::kExactSize);
3903     Label start;
3904     bind(&start);
3905     {
3906       ExactAssemblyScope scope(this, kInstructionSize);
3907       hlt(kRuntimeCallOpcode);
3908     }
3909     VIXL_ASSERT(GetSizeOfCodeGeneratedSince(&start) ==
3910                 kRuntimeCallWrapperOffset);
3911     dc(runtime_call_wrapper_address);
3912     VIXL_ASSERT(GetSizeOfCodeGeneratedSince(&start) ==
3913                 kRuntimeCallFunctionOffset);
3914     dc(function_address);
3915     VIXL_ASSERT(GetSizeOfCodeGeneratedSince(&start) == kRuntimeCallTypeOffset);
3916     dc32(call_type);
3917     VIXL_ASSERT(GetSizeOfCodeGeneratedSince(&start) == kRuntimeCallLength);
3918 #else
3919     VIXL_UNREACHABLE();
3920 #endif  // #ifdef VIXL_HAS_SIMULATED_RUNTIME_CALL_SUPPORT
3921   } else {
3922     UseScratchRegisterScope temps(this);
3923     Register temp = temps.AcquireX();
3924     Mov(temp, reinterpret_cast<uint64_t>(function));
3925     if (call_type == kTailCallRuntime) {
3926       Br(temp);
3927     } else {
3928       VIXL_ASSERT(call_type == kCallRuntime);
3929       Blr(temp);
3930     }
3931   }
3932 }
3933 
3934 #endif  // #ifdef VIXL_HAS_MACROASSEMBLER_RUNTIME_CALL_SUPPORT
3935 
3936 }  // namespace aarch64
3937 
3938 // Required InvalSet template specialisations.
3939 // TODO: These template specialisations should not live in this file.  Move
3940 // VeneerPool out of the aarch64 namespace in order to share its implementation
3941 // later.
3942 template <>
3943 inline ptrdiff_t InvalSet<aarch64::VeneerPool::BranchInfo,
3944                           aarch64::VeneerPool::kNPreallocatedInfos,
3945                           ptrdiff_t,
3946                           aarch64::VeneerPool::kInvalidOffset,
3947                           aarch64::VeneerPool::kReclaimFrom,
3948                           aarch64::VeneerPool::kReclaimFactor>::
GetKey(const aarch64::VeneerPool::BranchInfo & branch_info)3949     GetKey(const aarch64::VeneerPool::BranchInfo& branch_info) {
3950   return branch_info.first_unreacheable_pc_;
3951 }
3952 template <>
3953 inline void InvalSet<aarch64::VeneerPool::BranchInfo,
3954                      aarch64::VeneerPool::kNPreallocatedInfos,
3955                      ptrdiff_t,
3956                      aarch64::VeneerPool::kInvalidOffset,
3957                      aarch64::VeneerPool::kReclaimFrom,
3958                      aarch64::VeneerPool::kReclaimFactor>::
SetKey(aarch64::VeneerPool::BranchInfo * branch_info,ptrdiff_t key)3959     SetKey(aarch64::VeneerPool::BranchInfo* branch_info, ptrdiff_t key) {
3960   branch_info->first_unreacheable_pc_ = key;
3961 }
3962 
3963 }  // namespace vixl
3964 
3965 #endif  // VIXL_AARCH64_MACRO_ASSEMBLER_AARCH64_H_
3966