• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2015, VIXL authors
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are met:
6 //
7 //   * Redistributions of source code must retain the above copyright notice,
8 //     this list of conditions and the following disclaimer.
9 //   * Redistributions in binary form must reproduce the above copyright notice,
10 //     this list of conditions and the following disclaimer in the documentation
11 //     and/or other materials provided with the distribution.
12 //   * Neither the name of ARM Limited nor the names of its contributors may be
13 //     used to endorse or promote products derived from this software without
14 //     specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 
27 #ifndef VIXL_AARCH64_MACRO_ASSEMBLER_AARCH64_H_
28 #define VIXL_AARCH64_MACRO_ASSEMBLER_AARCH64_H_
29 
30 #include <algorithm>
31 #include <limits>
32 
33 #include "../code-generation-scopes-vixl.h"
34 #include "../globals-vixl.h"
35 #include "../macro-assembler-interface.h"
36 
37 #include "assembler-aarch64.h"
38 #include "debugger-aarch64.h"
39 #include "instrument-aarch64.h"
40 // Required in order to generate debugging instructions for the simulator. This
41 // is needed regardless of whether the simulator is included or not, since
42 // generating simulator specific instructions is controlled at runtime.
43 #include "simulator-constants-aarch64.h"
44 
45 
46 #define LS_MACRO_LIST(V)                                     \
47   V(Ldrb, Register&, rt, LDRB_w)                             \
48   V(Strb, Register&, rt, STRB_w)                             \
49   V(Ldrsb, Register&, rt, rt.Is64Bits() ? LDRSB_x : LDRSB_w) \
50   V(Ldrh, Register&, rt, LDRH_w)                             \
51   V(Strh, Register&, rt, STRH_w)                             \
52   V(Ldrsh, Register&, rt, rt.Is64Bits() ? LDRSH_x : LDRSH_w) \
53   V(Ldr, CPURegister&, rt, LoadOpFor(rt))                    \
54   V(Str, CPURegister&, rt, StoreOpFor(rt))                   \
55   V(Ldrsw, Register&, rt, LDRSW_x)
56 
57 
58 #define LSPAIR_MACRO_LIST(V)                             \
59   V(Ldp, CPURegister&, rt, rt2, LoadPairOpFor(rt, rt2))  \
60   V(Stp, CPURegister&, rt, rt2, StorePairOpFor(rt, rt2)) \
61   V(Ldpsw, CPURegister&, rt, rt2, LDPSW_x)
62 
63 namespace vixl {
64 namespace aarch64 {
65 
66 // Forward declaration
67 class MacroAssembler;
68 class UseScratchRegisterScope;
69 
70 class Pool {
71  public:
Pool(MacroAssembler * masm)72   explicit Pool(MacroAssembler* masm)
73       : checkpoint_(kNoCheckpointRequired), masm_(masm) {
74     Reset();
75   }
76 
Reset()77   void Reset() {
78     checkpoint_ = kNoCheckpointRequired;
79     monitor_ = 0;
80   }
81 
Block()82   void Block() { monitor_++; }
83   void Release();
IsBlocked()84   bool IsBlocked() const { return monitor_ != 0; }
85 
86   static const ptrdiff_t kNoCheckpointRequired = PTRDIFF_MAX;
87 
88   void SetNextCheckpoint(ptrdiff_t checkpoint);
GetCheckpoint()89   ptrdiff_t GetCheckpoint() const { return checkpoint_; }
90   VIXL_DEPRECATED("GetCheckpoint", ptrdiff_t checkpoint() const) {
91     return GetCheckpoint();
92   }
93 
94   enum EmitOption { kBranchRequired, kNoBranchRequired };
95 
96  protected:
97   // Next buffer offset at which a check is required for this pool.
98   ptrdiff_t checkpoint_;
99   // Indicates whether the emission of this pool is blocked.
100   int monitor_;
101   // The MacroAssembler using this pool.
102   MacroAssembler* masm_;
103 };
104 
105 
106 class LiteralPool : public Pool {
107  public:
108   explicit LiteralPool(MacroAssembler* masm);
109   ~LiteralPool();
110   void Reset();
111 
112   void AddEntry(RawLiteral* literal);
IsEmpty()113   bool IsEmpty() const { return entries_.empty(); }
114   size_t GetSize() const;
115   VIXL_DEPRECATED("GetSize", size_t Size() const) { return GetSize(); }
116 
117   size_t GetMaxSize() const;
118   VIXL_DEPRECATED("GetMaxSize", size_t MaxSize() const) { return GetMaxSize(); }
119 
120   size_t GetOtherPoolsMaxSize() const;
121   VIXL_DEPRECATED("GetOtherPoolsMaxSize", size_t OtherPoolsMaxSize() const) {
122     return GetOtherPoolsMaxSize();
123   }
124 
125   void CheckEmitFor(size_t amount, EmitOption option = kBranchRequired);
126   void Emit(EmitOption option = kNoBranchRequired);
127 
128   void SetNextRecommendedCheckpoint(ptrdiff_t offset);
129   ptrdiff_t GetNextRecommendedCheckpoint();
130   VIXL_DEPRECATED("GetNextRecommendedCheckpoint",
131                   ptrdiff_t NextRecommendedCheckpoint()) {
132     return GetNextRecommendedCheckpoint();
133   }
134 
135   void UpdateFirstUse(ptrdiff_t use_position);
136 
DeleteOnDestruction(RawLiteral * literal)137   void DeleteOnDestruction(RawLiteral* literal) {
138     deleted_on_destruction_.push_back(literal);
139   }
140 
141   // Recommended not exact since the pool can be blocked for short periods.
142   static const ptrdiff_t kRecommendedLiteralPoolRange = 128 * KBytes;
143 
144  private:
145   std::vector<RawLiteral*> entries_;
146   size_t size_;
147   ptrdiff_t first_use_;
148   // The parent class `Pool` provides a `checkpoint_`, which is the buffer
149   // offset before which a check *must* occur. This recommended checkpoint
150   // indicates when we would like to start emitting the constant pool. The
151   // MacroAssembler can, but does not have to, check the buffer when the
152   // checkpoint is reached.
153   ptrdiff_t recommended_checkpoint_;
154 
155   std::vector<RawLiteral*> deleted_on_destruction_;
156 };
157 
158 
GetSize()159 inline size_t LiteralPool::GetSize() const {
160   // Account for the pool header.
161   return size_ + kInstructionSize;
162 }
163 
164 
GetMaxSize()165 inline size_t LiteralPool::GetMaxSize() const {
166   // Account for the potential branch over the pool.
167   return GetSize() + kInstructionSize;
168 }
169 
170 
GetNextRecommendedCheckpoint()171 inline ptrdiff_t LiteralPool::GetNextRecommendedCheckpoint() {
172   return first_use_ + kRecommendedLiteralPoolRange;
173 }
174 
175 
176 class VeneerPool : public Pool {
177  public:
VeneerPool(MacroAssembler * masm)178   explicit VeneerPool(MacroAssembler* masm) : Pool(masm) {}
179 
180   void Reset();
181 
Block()182   void Block() { monitor_++; }
183   void Release();
IsBlocked()184   bool IsBlocked() const { return monitor_ != 0; }
IsEmpty()185   bool IsEmpty() const { return unresolved_branches_.IsEmpty(); }
186 
187   class BranchInfo {
188    public:
BranchInfo()189     BranchInfo()
190         : max_reachable_pc_(0),
191           pc_offset_(0),
192           label_(NULL),
193           branch_type_(UnknownBranchType) {}
BranchInfo(ptrdiff_t offset,Label * label,ImmBranchType branch_type)194     BranchInfo(ptrdiff_t offset, Label* label, ImmBranchType branch_type)
195         : pc_offset_(offset), label_(label), branch_type_(branch_type) {
196       max_reachable_pc_ =
197           pc_offset_ + Instruction::GetImmBranchForwardRange(branch_type_);
198     }
199 
IsValidComparison(const BranchInfo & branch_1,const BranchInfo & branch_2)200     static bool IsValidComparison(const BranchInfo& branch_1,
201                                   const BranchInfo& branch_2) {
202       // BranchInfo are always compared against against other objects with
203       // the same branch type.
204       if (branch_1.branch_type_ != branch_2.branch_type_) {
205         return false;
206       }
207       // Since we should never have two branch infos with the same offsets, it
208       // first looks like we should check that offsets are different. However
209       // the operators may also be used to *search* for a branch info in the
210       // set.
211       bool same_offsets = (branch_1.pc_offset_ == branch_2.pc_offset_);
212       return (!same_offsets ||
213               ((branch_1.label_ == branch_2.label_) &&
214                (branch_1.max_reachable_pc_ == branch_2.max_reachable_pc_)));
215     }
216 
217     // We must provide comparison operators to work with InvalSet.
218     bool operator==(const BranchInfo& other) const {
219       VIXL_ASSERT(IsValidComparison(*this, other));
220       return pc_offset_ == other.pc_offset_;
221     }
222     bool operator<(const BranchInfo& other) const {
223       VIXL_ASSERT(IsValidComparison(*this, other));
224       return pc_offset_ < other.pc_offset_;
225     }
226     bool operator<=(const BranchInfo& other) const {
227       VIXL_ASSERT(IsValidComparison(*this, other));
228       return pc_offset_ <= other.pc_offset_;
229     }
230     bool operator>(const BranchInfo& other) const {
231       VIXL_ASSERT(IsValidComparison(*this, other));
232       return pc_offset_ > other.pc_offset_;
233     }
234 
235     // Maximum position reachable by the branch using a positive branch offset.
236     ptrdiff_t max_reachable_pc_;
237     // Offset of the branch in the code generation buffer.
238     ptrdiff_t pc_offset_;
239     // The label branched to.
240     Label* label_;
241     ImmBranchType branch_type_;
242   };
243 
BranchTypeUsesVeneers(ImmBranchType type)244   bool BranchTypeUsesVeneers(ImmBranchType type) {
245     return (type != UnknownBranchType) && (type != UncondBranchType);
246   }
247 
248   void RegisterUnresolvedBranch(ptrdiff_t branch_pos,
249                                 Label* label,
250                                 ImmBranchType branch_type);
251   void DeleteUnresolvedBranchInfoForLabel(Label* label);
252 
253   bool ShouldEmitVeneer(int64_t max_reachable_pc, size_t amount);
ShouldEmitVeneers(size_t amount)254   bool ShouldEmitVeneers(size_t amount) {
255     return ShouldEmitVeneer(unresolved_branches_.GetFirstLimit(), amount);
256   }
257 
258   void CheckEmitFor(size_t amount, EmitOption option = kBranchRequired);
259   void Emit(EmitOption option, size_t margin);
260 
261   // The code size generated for a veneer. Currently one branch instruction.
262   // This is for code size checking purposes, and can be extended in the future
263   // for example if we decide to add nops between the veneers.
264   static const int kVeneerCodeSize = 1 * kInstructionSize;
265   // The maximum size of code other than veneers that can be generated when
266   // emitting a veneer pool. Currently there can be an additional branch to jump
267   // over the pool.
268   static const int kPoolNonVeneerCodeSize = 1 * kInstructionSize;
269 
UpdateNextCheckPoint()270   void UpdateNextCheckPoint() { SetNextCheckpoint(GetNextCheckPoint()); }
271 
GetNumberOfPotentialVeneers()272   int GetNumberOfPotentialVeneers() const {
273     return static_cast<int>(unresolved_branches_.GetSize());
274   }
275   VIXL_DEPRECATED("GetNumberOfPotentialVeneers",
NumberOfPotentialVeneers()276                   int NumberOfPotentialVeneers() const) {
277     return GetNumberOfPotentialVeneers();
278   }
279 
GetMaxSize()280   size_t GetMaxSize() const {
281     return kPoolNonVeneerCodeSize +
282            unresolved_branches_.GetSize() * kVeneerCodeSize;
283   }
284   VIXL_DEPRECATED("GetMaxSize", size_t MaxSize() const) { return GetMaxSize(); }
285 
286   size_t GetOtherPoolsMaxSize() const;
287   VIXL_DEPRECATED("GetOtherPoolsMaxSize", size_t OtherPoolsMaxSize() const) {
288     return GetOtherPoolsMaxSize();
289   }
290 
291   static const int kNPreallocatedInfos = 4;
292   static const ptrdiff_t kInvalidOffset = PTRDIFF_MAX;
293   static const size_t kReclaimFrom = 128;
294   static const size_t kReclaimFactor = 16;
295 
296  private:
297   typedef InvalSet<BranchInfo,
298                    kNPreallocatedInfos,
299                    ptrdiff_t,
300                    kInvalidOffset,
301                    kReclaimFrom,
302                    kReclaimFactor> BranchInfoTypedSetBase;
303   typedef InvalSetIterator<BranchInfoTypedSetBase> BranchInfoTypedSetIterBase;
304 
305   class BranchInfoTypedSet : public BranchInfoTypedSetBase {
306    public:
BranchInfoTypedSet()307     BranchInfoTypedSet() : BranchInfoTypedSetBase() {}
308 
GetFirstLimit()309     ptrdiff_t GetFirstLimit() {
310       if (empty()) {
311         return kInvalidOffset;
312       }
313       return GetMinElementKey();
314     }
315     VIXL_DEPRECATED("GetFirstLimit", ptrdiff_t FirstLimit()) {
316       return GetFirstLimit();
317     }
318   };
319 
320   class BranchInfoTypedSetIterator : public BranchInfoTypedSetIterBase {
321    public:
BranchInfoTypedSetIterator()322     BranchInfoTypedSetIterator() : BranchInfoTypedSetIterBase(NULL) {}
BranchInfoTypedSetIterator(BranchInfoTypedSet * typed_set)323     explicit BranchInfoTypedSetIterator(BranchInfoTypedSet* typed_set)
324         : BranchInfoTypedSetIterBase(typed_set) {}
325 
326     // TODO: Remove these and use the STL-like interface instead.
327     using BranchInfoTypedSetIterBase::Advance;
328     using BranchInfoTypedSetIterBase::Current;
329   };
330 
331   class BranchInfoSet {
332    public:
insert(BranchInfo branch_info)333     void insert(BranchInfo branch_info) {
334       ImmBranchType type = branch_info.branch_type_;
335       VIXL_ASSERT(IsValidBranchType(type));
336       typed_set_[BranchIndexFromType(type)].insert(branch_info);
337     }
338 
erase(BranchInfo branch_info)339     void erase(BranchInfo branch_info) {
340       if (IsValidBranchType(branch_info.branch_type_)) {
341         int index =
342             BranchInfoSet::BranchIndexFromType(branch_info.branch_type_);
343         typed_set_[index].erase(branch_info);
344       }
345     }
346 
GetSize()347     size_t GetSize() const {
348       size_t res = 0;
349       for (int i = 0; i < kNumberOfTrackedBranchTypes; i++) {
350         res += typed_set_[i].size();
351       }
352       return res;
353     }
354     VIXL_DEPRECATED("GetSize", size_t size() const) { return GetSize(); }
355 
IsEmpty()356     bool IsEmpty() const {
357       for (int i = 0; i < kNumberOfTrackedBranchTypes; i++) {
358         if (!typed_set_[i].empty()) {
359           return false;
360         }
361       }
362       return true;
363     }
empty()364     VIXL_DEPRECATED("IsEmpty", bool empty() const) { return IsEmpty(); }
365 
GetFirstLimit()366     ptrdiff_t GetFirstLimit() {
367       ptrdiff_t res = kInvalidOffset;
368       for (int i = 0; i < kNumberOfTrackedBranchTypes; i++) {
369         res = std::min(res, typed_set_[i].GetFirstLimit());
370       }
371       return res;
372     }
373     VIXL_DEPRECATED("GetFirstLimit", ptrdiff_t FirstLimit()) {
374       return GetFirstLimit();
375     }
376 
Reset()377     void Reset() {
378       for (int i = 0; i < kNumberOfTrackedBranchTypes; i++) {
379         typed_set_[i].clear();
380       }
381     }
382 
BranchTypeFromIndex(int index)383     static ImmBranchType BranchTypeFromIndex(int index) {
384       switch (index) {
385         case 0:
386           return CondBranchType;
387         case 1:
388           return CompareBranchType;
389         case 2:
390           return TestBranchType;
391         default:
392           VIXL_UNREACHABLE();
393           return UnknownBranchType;
394       }
395     }
BranchIndexFromType(ImmBranchType branch_type)396     static int BranchIndexFromType(ImmBranchType branch_type) {
397       switch (branch_type) {
398         case CondBranchType:
399           return 0;
400         case CompareBranchType:
401           return 1;
402         case TestBranchType:
403           return 2;
404         default:
405           VIXL_UNREACHABLE();
406           return 0;
407       }
408     }
409 
IsValidBranchType(ImmBranchType branch_type)410     bool IsValidBranchType(ImmBranchType branch_type) {
411       return (branch_type != UnknownBranchType) &&
412              (branch_type != UncondBranchType);
413     }
414 
415    private:
416     static const int kNumberOfTrackedBranchTypes = 3;
417     BranchInfoTypedSet typed_set_[kNumberOfTrackedBranchTypes];
418 
419     friend class VeneerPool;
420     friend class BranchInfoSetIterator;
421   };
422 
423   class BranchInfoSetIterator {
424    public:
BranchInfoSetIterator(BranchInfoSet * set)425     explicit BranchInfoSetIterator(BranchInfoSet* set) : set_(set) {
426       for (int i = 0; i < BranchInfoSet::kNumberOfTrackedBranchTypes; i++) {
427         new (&sub_iterator_[i])
428             BranchInfoTypedSetIterator(&(set_->typed_set_[i]));
429       }
430     }
431 
Current()432     VeneerPool::BranchInfo* Current() {
433       for (int i = 0; i < BranchInfoSet::kNumberOfTrackedBranchTypes; i++) {
434         if (!sub_iterator_[i].Done()) {
435           return sub_iterator_[i].Current();
436         }
437       }
438       VIXL_UNREACHABLE();
439       return NULL;
440     }
441 
Advance()442     void Advance() {
443       VIXL_ASSERT(!Done());
444       for (int i = 0; i < BranchInfoSet::kNumberOfTrackedBranchTypes; i++) {
445         if (!sub_iterator_[i].Done()) {
446           sub_iterator_[i].Advance();
447           return;
448         }
449       }
450       VIXL_UNREACHABLE();
451     }
452 
Done()453     bool Done() const {
454       for (int i = 0; i < BranchInfoSet::kNumberOfTrackedBranchTypes; i++) {
455         if (!sub_iterator_[i].Done()) return false;
456       }
457       return true;
458     }
459 
AdvanceToNextType()460     void AdvanceToNextType() {
461       VIXL_ASSERT(!Done());
462       for (int i = 0; i < BranchInfoSet::kNumberOfTrackedBranchTypes; i++) {
463         if (!sub_iterator_[i].Done()) {
464           sub_iterator_[i].Finish();
465           return;
466         }
467       }
468       VIXL_UNREACHABLE();
469     }
470 
DeleteCurrentAndAdvance()471     void DeleteCurrentAndAdvance() {
472       for (int i = 0; i < BranchInfoSet::kNumberOfTrackedBranchTypes; i++) {
473         if (!sub_iterator_[i].Done()) {
474           sub_iterator_[i].DeleteCurrentAndAdvance();
475           return;
476         }
477       }
478     }
479 
480    private:
481     BranchInfoSet* set_;
482     BranchInfoTypedSetIterator
483         sub_iterator_[BranchInfoSet::kNumberOfTrackedBranchTypes];
484   };
485 
GetNextCheckPoint()486   ptrdiff_t GetNextCheckPoint() {
487     if (unresolved_branches_.IsEmpty()) {
488       return kNoCheckpointRequired;
489     } else {
490       return unresolved_branches_.GetFirstLimit();
491     }
492   }
493   VIXL_DEPRECATED("GetNextCheckPoint", ptrdiff_t NextCheckPoint()) {
494     return GetNextCheckPoint();
495   }
496 
497   // Information about unresolved (forward) branches.
498   BranchInfoSet unresolved_branches_;
499 };
500 
501 
502 // Helper for common Emission checks.
503 // The macro-instruction maps to a single instruction.
504 class SingleEmissionCheckScope : public EmissionCheckScope {
505  public:
SingleEmissionCheckScope(MacroAssemblerInterface * masm)506   explicit SingleEmissionCheckScope(MacroAssemblerInterface* masm)
507       : EmissionCheckScope(masm, kInstructionSize) {}
508 };
509 
510 
511 // The macro instruction is a "typical" macro-instruction. Typical macro-
512 // instruction only emit a few instructions, a few being defined as 8 here.
513 class MacroEmissionCheckScope : public EmissionCheckScope {
514  public:
MacroEmissionCheckScope(MacroAssemblerInterface * masm)515   explicit MacroEmissionCheckScope(MacroAssemblerInterface* masm)
516       : EmissionCheckScope(masm, kTypicalMacroInstructionMaxSize) {}
517 
518  private:
519   static const size_t kTypicalMacroInstructionMaxSize = 8 * kInstructionSize;
520 };
521 
522 
523 enum BranchType {
524   // Copies of architectural conditions.
525   // The associated conditions can be used in place of those, the code will
526   // take care of reinterpreting them with the correct type.
527   integer_eq = eq,
528   integer_ne = ne,
529   integer_hs = hs,
530   integer_lo = lo,
531   integer_mi = mi,
532   integer_pl = pl,
533   integer_vs = vs,
534   integer_vc = vc,
535   integer_hi = hi,
536   integer_ls = ls,
537   integer_ge = ge,
538   integer_lt = lt,
539   integer_gt = gt,
540   integer_le = le,
541   integer_al = al,
542   integer_nv = nv,
543 
544   // These two are *different* from the architectural codes al and nv.
545   // 'always' is used to generate unconditional branches.
546   // 'never' is used to not generate a branch (generally as the inverse
547   // branch type of 'always).
548   always,
549   never,
550   // cbz and cbnz
551   reg_zero,
552   reg_not_zero,
553   // tbz and tbnz
554   reg_bit_clear,
555   reg_bit_set,
556 
557   // Aliases.
558   kBranchTypeFirstCondition = eq,
559   kBranchTypeLastCondition = nv,
560   kBranchTypeFirstUsingReg = reg_zero,
561   kBranchTypeFirstUsingBit = reg_bit_clear
562 };
563 
564 
565 enum DiscardMoveMode { kDontDiscardForSameWReg, kDiscardForSameWReg };
566 
567 
568 class MacroAssembler : public Assembler, public MacroAssemblerInterface {
569  public:
570   explicit MacroAssembler(
571       PositionIndependentCodeOption pic = PositionIndependentCode);
572   MacroAssembler(size_t capacity,
573                  PositionIndependentCodeOption pic = PositionIndependentCode);
574   MacroAssembler(byte* buffer,
575                  size_t capacity,
576                  PositionIndependentCodeOption pic = PositionIndependentCode);
577   ~MacroAssembler();
578 
AsAssemblerBase()579   virtual vixl::internal::AssemblerBase* AsAssemblerBase() VIXL_OVERRIDE {
580     return this;
581   }
582 
583   // Start generating code from the beginning of the buffer, discarding any code
584   // and data that has already been emitted into the buffer.
585   //
586   // In order to avoid any accidental transfer of state, Reset ASSERTs that the
587   // constant pool is not blocked.
588   void Reset();
589 
590   // Finalize a code buffer of generated instructions. This function must be
591   // called before executing or copying code from the buffer.
592   void FinalizeCode();
593 
594 
595   // Constant generation helpers.
596   // These functions return the number of instructions required to move the
597   // immediate into the destination register. Also, if the masm pointer is
598   // non-null, it generates the code to do so.
599   // The two features are implemented using one function to avoid duplication of
600   // the logic.
601   // The function can be used to evaluate the cost of synthesizing an
602   // instruction using 'mov immediate' instructions. A user might prefer loading
603   // a constant using the literal pool instead of using multiple 'mov immediate'
604   // instructions.
605   static int MoveImmediateHelper(MacroAssembler* masm,
606                                  const Register& rd,
607                                  uint64_t imm);
608   static bool OneInstrMoveImmediateHelper(MacroAssembler* masm,
609                                           const Register& dst,
610                                           int64_t imm);
611 
612 
613   // Logical macros.
614   void And(const Register& rd, const Register& rn, const Operand& operand);
615   void Ands(const Register& rd, const Register& rn, const Operand& operand);
616   void Bic(const Register& rd, const Register& rn, const Operand& operand);
617   void Bics(const Register& rd, const Register& rn, const Operand& operand);
618   void Orr(const Register& rd, const Register& rn, const Operand& operand);
619   void Orn(const Register& rd, const Register& rn, const Operand& operand);
620   void Eor(const Register& rd, const Register& rn, const Operand& operand);
621   void Eon(const Register& rd, const Register& rn, const Operand& operand);
622   void Tst(const Register& rn, const Operand& operand);
623   void LogicalMacro(const Register& rd,
624                     const Register& rn,
625                     const Operand& operand,
626                     LogicalOp op);
627 
628   // Add and sub macros.
629   void Add(const Register& rd,
630            const Register& rn,
631            const Operand& operand,
632            FlagsUpdate S = LeaveFlags);
633   void Adds(const Register& rd, const Register& rn, const Operand& operand);
634   void Sub(const Register& rd,
635            const Register& rn,
636            const Operand& operand,
637            FlagsUpdate S = LeaveFlags);
638   void Subs(const Register& rd, const Register& rn, const Operand& operand);
639   void Cmn(const Register& rn, const Operand& operand);
640   void Cmp(const Register& rn, const Operand& operand);
641   void Neg(const Register& rd, const Operand& operand);
642   void Negs(const Register& rd, const Operand& operand);
643 
644   void AddSubMacro(const Register& rd,
645                    const Register& rn,
646                    const Operand& operand,
647                    FlagsUpdate S,
648                    AddSubOp op);
649 
650   // Add/sub with carry macros.
651   void Adc(const Register& rd, const Register& rn, const Operand& operand);
652   void Adcs(const Register& rd, const Register& rn, const Operand& operand);
653   void Sbc(const Register& rd, const Register& rn, const Operand& operand);
654   void Sbcs(const Register& rd, const Register& rn, const Operand& operand);
655   void Ngc(const Register& rd, const Operand& operand);
656   void Ngcs(const Register& rd, const Operand& operand);
657   void AddSubWithCarryMacro(const Register& rd,
658                             const Register& rn,
659                             const Operand& operand,
660                             FlagsUpdate S,
661                             AddSubWithCarryOp op);
662 
663   // Move macros.
664   void Mov(const Register& rd, uint64_t imm);
665   void Mov(const Register& rd,
666            const Operand& operand,
667            DiscardMoveMode discard_mode = kDontDiscardForSameWReg);
Mvn(const Register & rd,uint64_t imm)668   void Mvn(const Register& rd, uint64_t imm) {
669     Mov(rd, (rd.GetSizeInBits() == kXRegSize) ? ~imm : (~imm & kWRegMask));
670   }
671   void Mvn(const Register& rd, const Operand& operand);
672 
673   // Try to move an immediate into the destination register in a single
674   // instruction. Returns true for success, and updates the contents of dst.
675   // Returns false, otherwise.
676   bool TryOneInstrMoveImmediate(const Register& dst, int64_t imm);
677 
678   // Move an immediate into register dst, and return an Operand object for
679   // use with a subsequent instruction that accepts a shift. The value moved
680   // into dst is not necessarily equal to imm; it may have had a shifting
681   // operation applied to it that will be subsequently undone by the shift
682   // applied in the Operand.
683   Operand MoveImmediateForShiftedOp(const Register& dst, int64_t imm);
684 
685   void Move(const GenericOperand& dst, const GenericOperand& src);
686 
687   // Synthesises the address represented by a MemOperand into a register.
688   void ComputeAddress(const Register& dst, const MemOperand& mem_op);
689 
690   // Conditional macros.
691   void Ccmp(const Register& rn,
692             const Operand& operand,
693             StatusFlags nzcv,
694             Condition cond);
695   void Ccmn(const Register& rn,
696             const Operand& operand,
697             StatusFlags nzcv,
698             Condition cond);
699   void ConditionalCompareMacro(const Register& rn,
700                                const Operand& operand,
701                                StatusFlags nzcv,
702                                Condition cond,
703                                ConditionalCompareOp op);
704 
705   // On return, the boolean values pointed to will indicate whether `left` and
706   // `right` should be synthesised in a temporary register.
GetCselSynthesisInformation(const Register & rd,const Operand & left,const Operand & right,bool * should_synthesise_left,bool * should_synthesise_right)707   static void GetCselSynthesisInformation(const Register& rd,
708                                           const Operand& left,
709                                           const Operand& right,
710                                           bool* should_synthesise_left,
711                                           bool* should_synthesise_right) {
712     // Note that the helper does not need to look at the condition.
713     CselHelper(NULL,
714                rd,
715                left,
716                right,
717                eq,
718                should_synthesise_left,
719                should_synthesise_right);
720   }
721 
Csel(const Register & rd,const Operand & left,const Operand & right,Condition cond)722   void Csel(const Register& rd,
723             const Operand& left,
724             const Operand& right,
725             Condition cond) {
726     CselHelper(this, rd, left, right, cond);
727   }
728 
729 // Load/store macros.
730 #define DECLARE_FUNCTION(FN, REGTYPE, REG, OP) \
731   void FN(const REGTYPE REG, const MemOperand& addr);
732   LS_MACRO_LIST(DECLARE_FUNCTION)
733 #undef DECLARE_FUNCTION
734 
735   void LoadStoreMacro(const CPURegister& rt,
736                       const MemOperand& addr,
737                       LoadStoreOp op);
738 
739 #define DECLARE_FUNCTION(FN, REGTYPE, REG, REG2, OP) \
740   void FN(const REGTYPE REG, const REGTYPE REG2, const MemOperand& addr);
741   LSPAIR_MACRO_LIST(DECLARE_FUNCTION)
742 #undef DECLARE_FUNCTION
743 
744   void LoadStorePairMacro(const CPURegister& rt,
745                           const CPURegister& rt2,
746                           const MemOperand& addr,
747                           LoadStorePairOp op);
748 
749   void Prfm(PrefetchOperation op, const MemOperand& addr);
750 
751   // Push or pop up to 4 registers of the same width to or from the stack,
752   // using the current stack pointer as set by SetStackPointer.
753   //
754   // If an argument register is 'NoReg', all further arguments are also assumed
755   // to be 'NoReg', and are thus not pushed or popped.
756   //
757   // Arguments are ordered such that "Push(a, b);" is functionally equivalent
758   // to "Push(a); Push(b);".
759   //
760   // It is valid to push the same register more than once, and there is no
761   // restriction on the order in which registers are specified.
762   //
763   // It is not valid to pop into the same register more than once in one
764   // operation, not even into the zero register.
765   //
766   // If the current stack pointer (as set by SetStackPointer) is sp, then it
767   // must be aligned to 16 bytes on entry and the total size of the specified
768   // registers must also be a multiple of 16 bytes.
769   //
770   // Even if the current stack pointer is not the system stack pointer (sp),
771   // Push (and derived methods) will still modify the system stack pointer in
772   // order to comply with ABI rules about accessing memory below the system
773   // stack pointer.
774   //
775   // Other than the registers passed into Pop, the stack pointer and (possibly)
776   // the system stack pointer, these methods do not modify any other registers.
777   void Push(const CPURegister& src0,
778             const CPURegister& src1 = NoReg,
779             const CPURegister& src2 = NoReg,
780             const CPURegister& src3 = NoReg);
781   void Pop(const CPURegister& dst0,
782            const CPURegister& dst1 = NoReg,
783            const CPURegister& dst2 = NoReg,
784            const CPURegister& dst3 = NoReg);
785 
786   // Alternative forms of Push and Pop, taking a RegList or CPURegList that
787   // specifies the registers that are to be pushed or popped. Higher-numbered
788   // registers are associated with higher memory addresses (as in the A32 push
789   // and pop instructions).
790   //
791   // (Push|Pop)SizeRegList allow you to specify the register size as a
792   // parameter. Only kXRegSize, kWRegSize, kDRegSize and kSRegSize are
793   // supported.
794   //
795   // Otherwise, (Push|Pop)(CPU|X|W|D|S)RegList is preferred.
796   void PushCPURegList(CPURegList registers);
797   void PopCPURegList(CPURegList registers);
798 
799   void PushSizeRegList(
800       RegList registers,
801       unsigned reg_size,
802       CPURegister::RegisterType type = CPURegister::kRegister) {
803     PushCPURegList(CPURegList(type, reg_size, registers));
804   }
805   void PopSizeRegList(RegList registers,
806                       unsigned reg_size,
807                       CPURegister::RegisterType type = CPURegister::kRegister) {
808     PopCPURegList(CPURegList(type, reg_size, registers));
809   }
PushXRegList(RegList regs)810   void PushXRegList(RegList regs) { PushSizeRegList(regs, kXRegSize); }
PopXRegList(RegList regs)811   void PopXRegList(RegList regs) { PopSizeRegList(regs, kXRegSize); }
PushWRegList(RegList regs)812   void PushWRegList(RegList regs) { PushSizeRegList(regs, kWRegSize); }
PopWRegList(RegList regs)813   void PopWRegList(RegList regs) { PopSizeRegList(regs, kWRegSize); }
PushDRegList(RegList regs)814   void PushDRegList(RegList regs) {
815     PushSizeRegList(regs, kDRegSize, CPURegister::kVRegister);
816   }
PopDRegList(RegList regs)817   void PopDRegList(RegList regs) {
818     PopSizeRegList(regs, kDRegSize, CPURegister::kVRegister);
819   }
PushSRegList(RegList regs)820   void PushSRegList(RegList regs) {
821     PushSizeRegList(regs, kSRegSize, CPURegister::kVRegister);
822   }
PopSRegList(RegList regs)823   void PopSRegList(RegList regs) {
824     PopSizeRegList(regs, kSRegSize, CPURegister::kVRegister);
825   }
826 
827   // Push the specified register 'count' times.
828   void PushMultipleTimes(int count, Register src);
829 
830   // Poke 'src' onto the stack. The offset is in bytes.
831   //
832   // If the current stack pointer (as set by SetStackPointer) is sp, then sp
833   // must be aligned to 16 bytes.
834   void Poke(const Register& src, const Operand& offset);
835 
836   // Peek at a value on the stack, and put it in 'dst'. The offset is in bytes.
837   //
838   // If the current stack pointer (as set by SetStackPointer) is sp, then sp
839   // must be aligned to 16 bytes.
840   void Peek(const Register& dst, const Operand& offset);
841 
842   // Alternative forms of Peek and Poke, taking a RegList or CPURegList that
843   // specifies the registers that are to be pushed or popped. Higher-numbered
844   // registers are associated with higher memory addresses.
845   //
846   // (Peek|Poke)SizeRegList allow you to specify the register size as a
847   // parameter. Only kXRegSize, kWRegSize, kDRegSize and kSRegSize are
848   // supported.
849   //
850   // Otherwise, (Peek|Poke)(CPU|X|W|D|S)RegList is preferred.
PeekCPURegList(CPURegList registers,int64_t offset)851   void PeekCPURegList(CPURegList registers, int64_t offset) {
852     LoadCPURegList(registers, MemOperand(StackPointer(), offset));
853   }
PokeCPURegList(CPURegList registers,int64_t offset)854   void PokeCPURegList(CPURegList registers, int64_t offset) {
855     StoreCPURegList(registers, MemOperand(StackPointer(), offset));
856   }
857 
858   void PeekSizeRegList(
859       RegList registers,
860       int64_t offset,
861       unsigned reg_size,
862       CPURegister::RegisterType type = CPURegister::kRegister) {
863     PeekCPURegList(CPURegList(type, reg_size, registers), offset);
864   }
865   void PokeSizeRegList(
866       RegList registers,
867       int64_t offset,
868       unsigned reg_size,
869       CPURegister::RegisterType type = CPURegister::kRegister) {
870     PokeCPURegList(CPURegList(type, reg_size, registers), offset);
871   }
PeekXRegList(RegList regs,int64_t offset)872   void PeekXRegList(RegList regs, int64_t offset) {
873     PeekSizeRegList(regs, offset, kXRegSize);
874   }
PokeXRegList(RegList regs,int64_t offset)875   void PokeXRegList(RegList regs, int64_t offset) {
876     PokeSizeRegList(regs, offset, kXRegSize);
877   }
PeekWRegList(RegList regs,int64_t offset)878   void PeekWRegList(RegList regs, int64_t offset) {
879     PeekSizeRegList(regs, offset, kWRegSize);
880   }
PokeWRegList(RegList regs,int64_t offset)881   void PokeWRegList(RegList regs, int64_t offset) {
882     PokeSizeRegList(regs, offset, kWRegSize);
883   }
PeekDRegList(RegList regs,int64_t offset)884   void PeekDRegList(RegList regs, int64_t offset) {
885     PeekSizeRegList(regs, offset, kDRegSize, CPURegister::kVRegister);
886   }
PokeDRegList(RegList regs,int64_t offset)887   void PokeDRegList(RegList regs, int64_t offset) {
888     PokeSizeRegList(regs, offset, kDRegSize, CPURegister::kVRegister);
889   }
PeekSRegList(RegList regs,int64_t offset)890   void PeekSRegList(RegList regs, int64_t offset) {
891     PeekSizeRegList(regs, offset, kSRegSize, CPURegister::kVRegister);
892   }
PokeSRegList(RegList regs,int64_t offset)893   void PokeSRegList(RegList regs, int64_t offset) {
894     PokeSizeRegList(regs, offset, kSRegSize, CPURegister::kVRegister);
895   }
896 
897 
898   // Claim or drop stack space without actually accessing memory.
899   //
900   // If the current stack pointer (as set by SetStackPointer) is sp, then it
901   // must be aligned to 16 bytes and the size claimed or dropped must be a
902   // multiple of 16 bytes.
903   void Claim(const Operand& size);
904   void Drop(const Operand& size);
905 
906   // Preserve the callee-saved registers (as defined by AAPCS64).
907   //
908   // Higher-numbered registers are pushed before lower-numbered registers, and
909   // thus get higher addresses.
910   // Floating-point registers are pushed before general-purpose registers, and
911   // thus get higher addresses.
912   //
913   // This method must not be called unless StackPointer() is sp, and it is
914   // aligned to 16 bytes.
915   void PushCalleeSavedRegisters();
916 
917   // Restore the callee-saved registers (as defined by AAPCS64).
918   //
919   // Higher-numbered registers are popped after lower-numbered registers, and
920   // thus come from higher addresses.
921   // Floating-point registers are popped after general-purpose registers, and
922   // thus come from higher addresses.
923   //
924   // This method must not be called unless StackPointer() is sp, and it is
925   // aligned to 16 bytes.
926   void PopCalleeSavedRegisters();
927 
928   void LoadCPURegList(CPURegList registers, const MemOperand& src);
929   void StoreCPURegList(CPURegList registers, const MemOperand& dst);
930 
931   // Remaining instructions are simple pass-through calls to the assembler.
Adr(const Register & rd,Label * label)932   void Adr(const Register& rd, Label* label) {
933     VIXL_ASSERT(allow_macro_instructions_);
934     VIXL_ASSERT(!rd.IsZero());
935     SingleEmissionCheckScope guard(this);
936     adr(rd, label);
937   }
Adrp(const Register & rd,Label * label)938   void Adrp(const Register& rd, Label* label) {
939     VIXL_ASSERT(allow_macro_instructions_);
940     VIXL_ASSERT(!rd.IsZero());
941     SingleEmissionCheckScope guard(this);
942     adrp(rd, label);
943   }
Asr(const Register & rd,const Register & rn,unsigned shift)944   void Asr(const Register& rd, const Register& rn, unsigned shift) {
945     VIXL_ASSERT(allow_macro_instructions_);
946     VIXL_ASSERT(!rd.IsZero());
947     VIXL_ASSERT(!rn.IsZero());
948     SingleEmissionCheckScope guard(this);
949     asr(rd, rn, shift);
950   }
Asr(const Register & rd,const Register & rn,const Register & rm)951   void Asr(const Register& rd, const Register& rn, const Register& rm) {
952     VIXL_ASSERT(allow_macro_instructions_);
953     VIXL_ASSERT(!rd.IsZero());
954     VIXL_ASSERT(!rn.IsZero());
955     VIXL_ASSERT(!rm.IsZero());
956     SingleEmissionCheckScope guard(this);
957     asrv(rd, rn, rm);
958   }
959 
960   // Branch type inversion relies on these relations.
961   VIXL_STATIC_ASSERT((reg_zero == (reg_not_zero ^ 1)) &&
962                      (reg_bit_clear == (reg_bit_set ^ 1)) &&
963                      (always == (never ^ 1)));
964 
InvertBranchType(BranchType type)965   BranchType InvertBranchType(BranchType type) {
966     if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
967       return static_cast<BranchType>(
968           InvertCondition(static_cast<Condition>(type)));
969     } else {
970       return static_cast<BranchType>(type ^ 1);
971     }
972   }
973 
974   void B(Label* label, BranchType type, Register reg = NoReg, int bit = -1);
975 
976   void B(Label* label);
977   void B(Label* label, Condition cond);
B(Condition cond,Label * label)978   void B(Condition cond, Label* label) { B(label, cond); }
Bfm(const Register & rd,const Register & rn,unsigned immr,unsigned imms)979   void Bfm(const Register& rd,
980            const Register& rn,
981            unsigned immr,
982            unsigned imms) {
983     VIXL_ASSERT(allow_macro_instructions_);
984     VIXL_ASSERT(!rd.IsZero());
985     VIXL_ASSERT(!rn.IsZero());
986     SingleEmissionCheckScope guard(this);
987     bfm(rd, rn, immr, imms);
988   }
Bfi(const Register & rd,const Register & rn,unsigned lsb,unsigned width)989   void Bfi(const Register& rd,
990            const Register& rn,
991            unsigned lsb,
992            unsigned width) {
993     VIXL_ASSERT(allow_macro_instructions_);
994     VIXL_ASSERT(!rd.IsZero());
995     VIXL_ASSERT(!rn.IsZero());
996     SingleEmissionCheckScope guard(this);
997     bfi(rd, rn, lsb, width);
998   }
Bfxil(const Register & rd,const Register & rn,unsigned lsb,unsigned width)999   void Bfxil(const Register& rd,
1000              const Register& rn,
1001              unsigned lsb,
1002              unsigned width) {
1003     VIXL_ASSERT(allow_macro_instructions_);
1004     VIXL_ASSERT(!rd.IsZero());
1005     VIXL_ASSERT(!rn.IsZero());
1006     SingleEmissionCheckScope guard(this);
1007     bfxil(rd, rn, lsb, width);
1008   }
1009   void Bind(Label* label);
1010   // Bind a label to a specified offset from the start of the buffer.
1011   void BindToOffset(Label* label, ptrdiff_t offset);
Bl(Label * label)1012   void Bl(Label* label) {
1013     VIXL_ASSERT(allow_macro_instructions_);
1014     SingleEmissionCheckScope guard(this);
1015     bl(label);
1016   }
Blr(const Register & xn)1017   void Blr(const Register& xn) {
1018     VIXL_ASSERT(allow_macro_instructions_);
1019     VIXL_ASSERT(!xn.IsZero());
1020     SingleEmissionCheckScope guard(this);
1021     blr(xn);
1022   }
Br(const Register & xn)1023   void Br(const Register& xn) {
1024     VIXL_ASSERT(allow_macro_instructions_);
1025     VIXL_ASSERT(!xn.IsZero());
1026     SingleEmissionCheckScope guard(this);
1027     br(xn);
1028   }
1029   void Brk(int code = 0) {
1030     VIXL_ASSERT(allow_macro_instructions_);
1031     SingleEmissionCheckScope guard(this);
1032     brk(code);
1033   }
1034   void Cbnz(const Register& rt, Label* label);
1035   void Cbz(const Register& rt, Label* label);
Cinc(const Register & rd,const Register & rn,Condition cond)1036   void Cinc(const Register& rd, const Register& rn, Condition cond) {
1037     VIXL_ASSERT(allow_macro_instructions_);
1038     VIXL_ASSERT(!rd.IsZero());
1039     VIXL_ASSERT(!rn.IsZero());
1040     SingleEmissionCheckScope guard(this);
1041     cinc(rd, rn, cond);
1042   }
Cinv(const Register & rd,const Register & rn,Condition cond)1043   void Cinv(const Register& rd, const Register& rn, Condition cond) {
1044     VIXL_ASSERT(allow_macro_instructions_);
1045     VIXL_ASSERT(!rd.IsZero());
1046     VIXL_ASSERT(!rn.IsZero());
1047     SingleEmissionCheckScope guard(this);
1048     cinv(rd, rn, cond);
1049   }
Clrex()1050   void Clrex() {
1051     VIXL_ASSERT(allow_macro_instructions_);
1052     SingleEmissionCheckScope guard(this);
1053     clrex();
1054   }
Cls(const Register & rd,const Register & rn)1055   void Cls(const Register& rd, const Register& rn) {
1056     VIXL_ASSERT(allow_macro_instructions_);
1057     VIXL_ASSERT(!rd.IsZero());
1058     VIXL_ASSERT(!rn.IsZero());
1059     SingleEmissionCheckScope guard(this);
1060     cls(rd, rn);
1061   }
Clz(const Register & rd,const Register & rn)1062   void Clz(const Register& rd, const Register& rn) {
1063     VIXL_ASSERT(allow_macro_instructions_);
1064     VIXL_ASSERT(!rd.IsZero());
1065     VIXL_ASSERT(!rn.IsZero());
1066     SingleEmissionCheckScope guard(this);
1067     clz(rd, rn);
1068   }
Cneg(const Register & rd,const Register & rn,Condition cond)1069   void Cneg(const Register& rd, const Register& rn, Condition cond) {
1070     VIXL_ASSERT(allow_macro_instructions_);
1071     VIXL_ASSERT(!rd.IsZero());
1072     VIXL_ASSERT(!rn.IsZero());
1073     SingleEmissionCheckScope guard(this);
1074     cneg(rd, rn, cond);
1075   }
Cset(const Register & rd,Condition cond)1076   void Cset(const Register& rd, Condition cond) {
1077     VIXL_ASSERT(allow_macro_instructions_);
1078     VIXL_ASSERT(!rd.IsZero());
1079     SingleEmissionCheckScope guard(this);
1080     cset(rd, cond);
1081   }
Csetm(const Register & rd,Condition cond)1082   void Csetm(const Register& rd, Condition cond) {
1083     VIXL_ASSERT(allow_macro_instructions_);
1084     VIXL_ASSERT(!rd.IsZero());
1085     SingleEmissionCheckScope guard(this);
1086     csetm(rd, cond);
1087   }
Csinc(const Register & rd,const Register & rn,const Register & rm,Condition cond)1088   void Csinc(const Register& rd,
1089              const Register& rn,
1090              const Register& rm,
1091              Condition cond) {
1092     VIXL_ASSERT(allow_macro_instructions_);
1093     VIXL_ASSERT(!rd.IsZero());
1094     VIXL_ASSERT(!rn.IsZero());
1095     VIXL_ASSERT(!rm.IsZero());
1096     VIXL_ASSERT((cond != al) && (cond != nv));
1097     SingleEmissionCheckScope guard(this);
1098     csinc(rd, rn, rm, cond);
1099   }
Csinv(const Register & rd,const Register & rn,const Register & rm,Condition cond)1100   void Csinv(const Register& rd,
1101              const Register& rn,
1102              const Register& rm,
1103              Condition cond) {
1104     VIXL_ASSERT(allow_macro_instructions_);
1105     VIXL_ASSERT(!rd.IsZero());
1106     VIXL_ASSERT(!rn.IsZero());
1107     VIXL_ASSERT(!rm.IsZero());
1108     VIXL_ASSERT((cond != al) && (cond != nv));
1109     SingleEmissionCheckScope guard(this);
1110     csinv(rd, rn, rm, cond);
1111   }
Csneg(const Register & rd,const Register & rn,const Register & rm,Condition cond)1112   void Csneg(const Register& rd,
1113              const Register& rn,
1114              const Register& rm,
1115              Condition cond) {
1116     VIXL_ASSERT(allow_macro_instructions_);
1117     VIXL_ASSERT(!rd.IsZero());
1118     VIXL_ASSERT(!rn.IsZero());
1119     VIXL_ASSERT(!rm.IsZero());
1120     VIXL_ASSERT((cond != al) && (cond != nv));
1121     SingleEmissionCheckScope guard(this);
1122     csneg(rd, rn, rm, cond);
1123   }
Dmb(BarrierDomain domain,BarrierType type)1124   void Dmb(BarrierDomain domain, BarrierType type) {
1125     VIXL_ASSERT(allow_macro_instructions_);
1126     SingleEmissionCheckScope guard(this);
1127     dmb(domain, type);
1128   }
Dsb(BarrierDomain domain,BarrierType type)1129   void Dsb(BarrierDomain domain, BarrierType type) {
1130     VIXL_ASSERT(allow_macro_instructions_);
1131     SingleEmissionCheckScope guard(this);
1132     dsb(domain, type);
1133   }
Extr(const Register & rd,const Register & rn,const Register & rm,unsigned lsb)1134   void Extr(const Register& rd,
1135             const Register& rn,
1136             const Register& rm,
1137             unsigned lsb) {
1138     VIXL_ASSERT(allow_macro_instructions_);
1139     VIXL_ASSERT(!rd.IsZero());
1140     VIXL_ASSERT(!rn.IsZero());
1141     VIXL_ASSERT(!rm.IsZero());
1142     SingleEmissionCheckScope guard(this);
1143     extr(rd, rn, rm, lsb);
1144   }
Fadd(const VRegister & vd,const VRegister & vn,const VRegister & vm)1145   void Fadd(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1146     VIXL_ASSERT(allow_macro_instructions_);
1147     SingleEmissionCheckScope guard(this);
1148     fadd(vd, vn, vm);
1149   }
1150   void Fccmp(const VRegister& vn,
1151              const VRegister& vm,
1152              StatusFlags nzcv,
1153              Condition cond,
1154              FPTrapFlags trap = DisableTrap) {
1155     VIXL_ASSERT(allow_macro_instructions_);
1156     VIXL_ASSERT((cond != al) && (cond != nv));
1157     SingleEmissionCheckScope guard(this);
1158     FPCCompareMacro(vn, vm, nzcv, cond, trap);
1159   }
Fccmpe(const VRegister & vn,const VRegister & vm,StatusFlags nzcv,Condition cond)1160   void Fccmpe(const VRegister& vn,
1161               const VRegister& vm,
1162               StatusFlags nzcv,
1163               Condition cond) {
1164     Fccmp(vn, vm, nzcv, cond, EnableTrap);
1165   }
1166   void Fcmp(const VRegister& vn,
1167             const VRegister& vm,
1168             FPTrapFlags trap = DisableTrap) {
1169     VIXL_ASSERT(allow_macro_instructions_);
1170     SingleEmissionCheckScope guard(this);
1171     FPCompareMacro(vn, vm, trap);
1172   }
1173   void Fcmp(const VRegister& vn, double value, FPTrapFlags trap = DisableTrap);
1174   void Fcmpe(const VRegister& vn, double value);
Fcmpe(const VRegister & vn,const VRegister & vm)1175   void Fcmpe(const VRegister& vn, const VRegister& vm) {
1176     Fcmp(vn, vm, EnableTrap);
1177   }
Fcsel(const VRegister & vd,const VRegister & vn,const VRegister & vm,Condition cond)1178   void Fcsel(const VRegister& vd,
1179              const VRegister& vn,
1180              const VRegister& vm,
1181              Condition cond) {
1182     VIXL_ASSERT(allow_macro_instructions_);
1183     VIXL_ASSERT((cond != al) && (cond != nv));
1184     SingleEmissionCheckScope guard(this);
1185     fcsel(vd, vn, vm, cond);
1186   }
Fcvt(const VRegister & vd,const VRegister & vn)1187   void Fcvt(const VRegister& vd, const VRegister& vn) {
1188     VIXL_ASSERT(allow_macro_instructions_);
1189     SingleEmissionCheckScope guard(this);
1190     fcvt(vd, vn);
1191   }
Fcvtl(const VRegister & vd,const VRegister & vn)1192   void Fcvtl(const VRegister& vd, const VRegister& vn) {
1193     VIXL_ASSERT(allow_macro_instructions_);
1194     SingleEmissionCheckScope guard(this);
1195     fcvtl(vd, vn);
1196   }
Fcvtl2(const VRegister & vd,const VRegister & vn)1197   void Fcvtl2(const VRegister& vd, const VRegister& vn) {
1198     VIXL_ASSERT(allow_macro_instructions_);
1199     SingleEmissionCheckScope guard(this);
1200     fcvtl2(vd, vn);
1201   }
Fcvtn(const VRegister & vd,const VRegister & vn)1202   void Fcvtn(const VRegister& vd, const VRegister& vn) {
1203     VIXL_ASSERT(allow_macro_instructions_);
1204     SingleEmissionCheckScope guard(this);
1205     fcvtn(vd, vn);
1206   }
Fcvtn2(const VRegister & vd,const VRegister & vn)1207   void Fcvtn2(const VRegister& vd, const VRegister& vn) {
1208     VIXL_ASSERT(allow_macro_instructions_);
1209     SingleEmissionCheckScope guard(this);
1210     fcvtn2(vd, vn);
1211   }
Fcvtxn(const VRegister & vd,const VRegister & vn)1212   void Fcvtxn(const VRegister& vd, const VRegister& vn) {
1213     VIXL_ASSERT(allow_macro_instructions_);
1214     SingleEmissionCheckScope guard(this);
1215     fcvtxn(vd, vn);
1216   }
Fcvtxn2(const VRegister & vd,const VRegister & vn)1217   void Fcvtxn2(const VRegister& vd, const VRegister& vn) {
1218     VIXL_ASSERT(allow_macro_instructions_);
1219     SingleEmissionCheckScope guard(this);
1220     fcvtxn2(vd, vn);
1221   }
Fcvtas(const Register & rd,const VRegister & vn)1222   void Fcvtas(const Register& rd, const VRegister& vn) {
1223     VIXL_ASSERT(allow_macro_instructions_);
1224     VIXL_ASSERT(!rd.IsZero());
1225     SingleEmissionCheckScope guard(this);
1226     fcvtas(rd, vn);
1227   }
Fcvtau(const Register & rd,const VRegister & vn)1228   void Fcvtau(const Register& rd, const VRegister& vn) {
1229     VIXL_ASSERT(allow_macro_instructions_);
1230     VIXL_ASSERT(!rd.IsZero());
1231     SingleEmissionCheckScope guard(this);
1232     fcvtau(rd, vn);
1233   }
Fcvtms(const Register & rd,const VRegister & vn)1234   void Fcvtms(const Register& rd, const VRegister& vn) {
1235     VIXL_ASSERT(allow_macro_instructions_);
1236     VIXL_ASSERT(!rd.IsZero());
1237     SingleEmissionCheckScope guard(this);
1238     fcvtms(rd, vn);
1239   }
Fcvtmu(const Register & rd,const VRegister & vn)1240   void Fcvtmu(const Register& rd, const VRegister& vn) {
1241     VIXL_ASSERT(allow_macro_instructions_);
1242     VIXL_ASSERT(!rd.IsZero());
1243     SingleEmissionCheckScope guard(this);
1244     fcvtmu(rd, vn);
1245   }
Fcvtns(const Register & rd,const VRegister & vn)1246   void Fcvtns(const Register& rd, const VRegister& vn) {
1247     VIXL_ASSERT(allow_macro_instructions_);
1248     VIXL_ASSERT(!rd.IsZero());
1249     SingleEmissionCheckScope guard(this);
1250     fcvtns(rd, vn);
1251   }
Fcvtnu(const Register & rd,const VRegister & vn)1252   void Fcvtnu(const Register& rd, const VRegister& vn) {
1253     VIXL_ASSERT(allow_macro_instructions_);
1254     VIXL_ASSERT(!rd.IsZero());
1255     SingleEmissionCheckScope guard(this);
1256     fcvtnu(rd, vn);
1257   }
Fcvtps(const Register & rd,const VRegister & vn)1258   void Fcvtps(const Register& rd, const VRegister& vn) {
1259     VIXL_ASSERT(allow_macro_instructions_);
1260     VIXL_ASSERT(!rd.IsZero());
1261     SingleEmissionCheckScope guard(this);
1262     fcvtps(rd, vn);
1263   }
Fcvtpu(const Register & rd,const VRegister & vn)1264   void Fcvtpu(const Register& rd, const VRegister& vn) {
1265     VIXL_ASSERT(allow_macro_instructions_);
1266     VIXL_ASSERT(!rd.IsZero());
1267     SingleEmissionCheckScope guard(this);
1268     fcvtpu(rd, vn);
1269   }
1270   void Fcvtzs(const Register& rd, const VRegister& vn, int fbits = 0) {
1271     VIXL_ASSERT(allow_macro_instructions_);
1272     VIXL_ASSERT(!rd.IsZero());
1273     SingleEmissionCheckScope guard(this);
1274     fcvtzs(rd, vn, fbits);
1275   }
1276   void Fcvtzu(const Register& rd, const VRegister& vn, int fbits = 0) {
1277     VIXL_ASSERT(allow_macro_instructions_);
1278     VIXL_ASSERT(!rd.IsZero());
1279     SingleEmissionCheckScope guard(this);
1280     fcvtzu(rd, vn, fbits);
1281   }
Fdiv(const VRegister & vd,const VRegister & vn,const VRegister & vm)1282   void Fdiv(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1283     VIXL_ASSERT(allow_macro_instructions_);
1284     SingleEmissionCheckScope guard(this);
1285     fdiv(vd, vn, vm);
1286   }
Fmax(const VRegister & vd,const VRegister & vn,const VRegister & vm)1287   void Fmax(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1288     VIXL_ASSERT(allow_macro_instructions_);
1289     SingleEmissionCheckScope guard(this);
1290     fmax(vd, vn, vm);
1291   }
Fmaxnm(const VRegister & vd,const VRegister & vn,const VRegister & vm)1292   void Fmaxnm(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1293     VIXL_ASSERT(allow_macro_instructions_);
1294     SingleEmissionCheckScope guard(this);
1295     fmaxnm(vd, vn, vm);
1296   }
Fmin(const VRegister & vd,const VRegister & vn,const VRegister & vm)1297   void Fmin(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1298     VIXL_ASSERT(allow_macro_instructions_);
1299     SingleEmissionCheckScope guard(this);
1300     fmin(vd, vn, vm);
1301   }
Fminnm(const VRegister & vd,const VRegister & vn,const VRegister & vm)1302   void Fminnm(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1303     VIXL_ASSERT(allow_macro_instructions_);
1304     SingleEmissionCheckScope guard(this);
1305     fminnm(vd, vn, vm);
1306   }
Fmov(const VRegister & vd,const VRegister & vn)1307   void Fmov(const VRegister& vd, const VRegister& vn) {
1308     VIXL_ASSERT(allow_macro_instructions_);
1309     SingleEmissionCheckScope guard(this);
1310     // Only emit an instruction if vd and vn are different, and they are both D
1311     // registers. fmov(s0, s0) is not a no-op because it clears the top word of
1312     // d0. Technically, fmov(d0, d0) is not a no-op either because it clears
1313     // the top of q0, but VRegister does not currently support Q registers.
1314     if (!vd.Is(vn) || !vd.Is64Bits()) {
1315       fmov(vd, vn);
1316     }
1317   }
Fmov(const VRegister & vd,const Register & rn)1318   void Fmov(const VRegister& vd, const Register& rn) {
1319     VIXL_ASSERT(allow_macro_instructions_);
1320     VIXL_ASSERT(!rn.IsZero());
1321     SingleEmissionCheckScope guard(this);
1322     fmov(vd, rn);
1323   }
Fmov(const VRegister & vd,const XRegister & xn)1324   void Fmov(const VRegister& vd, const XRegister& xn) {
1325     Fmov(vd, Register(xn));
1326   }
Fmov(const VRegister & vd,const WRegister & wn)1327   void Fmov(const VRegister& vd, const WRegister& wn) {
1328     Fmov(vd, Register(wn));
1329   }
Fmov(const VRegister & vd,int index,const Register & rn)1330   void Fmov(const VRegister& vd, int index, const Register& rn) {
1331     VIXL_ASSERT(allow_macro_instructions_);
1332     SingleEmissionCheckScope guard(this);
1333     fmov(vd, index, rn);
1334   }
Fmov(const Register & rd,const VRegister & vn,int index)1335   void Fmov(const Register& rd, const VRegister& vn, int index) {
1336     VIXL_ASSERT(allow_macro_instructions_);
1337     SingleEmissionCheckScope guard(this);
1338     fmov(rd, vn, index);
1339   }
1340 
1341   // Provide explicit double and float interfaces for FP immediate moves, rather
1342   // than relying on implicit C++ casts. This allows signalling NaNs to be
1343   // preserved when the immediate matches the format of vd. Most systems convert
1344   // signalling NaNs to quiet NaNs when converting between float and double.
1345   void Fmov(VRegister vd, double imm);
1346   void Fmov(VRegister vd, float imm);
1347   // Provide a template to allow other types to be converted automatically.
1348   template <typename T>
Fmov(VRegister vd,T imm)1349   void Fmov(VRegister vd, T imm) {
1350     VIXL_ASSERT(allow_macro_instructions_);
1351     Fmov(vd, static_cast<double>(imm));
1352   }
Fmov(Register rd,VRegister vn)1353   void Fmov(Register rd, VRegister vn) {
1354     VIXL_ASSERT(allow_macro_instructions_);
1355     VIXL_ASSERT(!rd.IsZero());
1356     SingleEmissionCheckScope guard(this);
1357     fmov(rd, vn);
1358   }
Fmul(const VRegister & vd,const VRegister & vn,const VRegister & vm)1359   void Fmul(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1360     VIXL_ASSERT(allow_macro_instructions_);
1361     SingleEmissionCheckScope guard(this);
1362     fmul(vd, vn, vm);
1363   }
Fnmul(const VRegister & vd,const VRegister & vn,const VRegister & vm)1364   void Fnmul(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1365     VIXL_ASSERT(allow_macro_instructions_);
1366     SingleEmissionCheckScope guard(this);
1367     fnmul(vd, vn, vm);
1368   }
Fmadd(const VRegister & vd,const VRegister & vn,const VRegister & vm,const VRegister & va)1369   void Fmadd(const VRegister& vd,
1370              const VRegister& vn,
1371              const VRegister& vm,
1372              const VRegister& va) {
1373     VIXL_ASSERT(allow_macro_instructions_);
1374     SingleEmissionCheckScope guard(this);
1375     fmadd(vd, vn, vm, va);
1376   }
Fmsub(const VRegister & vd,const VRegister & vn,const VRegister & vm,const VRegister & va)1377   void Fmsub(const VRegister& vd,
1378              const VRegister& vn,
1379              const VRegister& vm,
1380              const VRegister& va) {
1381     VIXL_ASSERT(allow_macro_instructions_);
1382     SingleEmissionCheckScope guard(this);
1383     fmsub(vd, vn, vm, va);
1384   }
Fnmadd(const VRegister & vd,const VRegister & vn,const VRegister & vm,const VRegister & va)1385   void Fnmadd(const VRegister& vd,
1386               const VRegister& vn,
1387               const VRegister& vm,
1388               const VRegister& va) {
1389     VIXL_ASSERT(allow_macro_instructions_);
1390     SingleEmissionCheckScope guard(this);
1391     fnmadd(vd, vn, vm, va);
1392   }
Fnmsub(const VRegister & vd,const VRegister & vn,const VRegister & vm,const VRegister & va)1393   void Fnmsub(const VRegister& vd,
1394               const VRegister& vn,
1395               const VRegister& vm,
1396               const VRegister& va) {
1397     VIXL_ASSERT(allow_macro_instructions_);
1398     SingleEmissionCheckScope guard(this);
1399     fnmsub(vd, vn, vm, va);
1400   }
Fsub(const VRegister & vd,const VRegister & vn,const VRegister & vm)1401   void Fsub(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1402     VIXL_ASSERT(allow_macro_instructions_);
1403     SingleEmissionCheckScope guard(this);
1404     fsub(vd, vn, vm);
1405   }
Hint(SystemHint code)1406   void Hint(SystemHint code) {
1407     VIXL_ASSERT(allow_macro_instructions_);
1408     SingleEmissionCheckScope guard(this);
1409     hint(code);
1410   }
Hlt(int code)1411   void Hlt(int code) {
1412     VIXL_ASSERT(allow_macro_instructions_);
1413     SingleEmissionCheckScope guard(this);
1414     hlt(code);
1415   }
Isb()1416   void Isb() {
1417     VIXL_ASSERT(allow_macro_instructions_);
1418     SingleEmissionCheckScope guard(this);
1419     isb();
1420   }
Ldar(const Register & rt,const MemOperand & src)1421   void Ldar(const Register& rt, const MemOperand& src) {
1422     VIXL_ASSERT(allow_macro_instructions_);
1423     SingleEmissionCheckScope guard(this);
1424     ldar(rt, src);
1425   }
Ldarb(const Register & rt,const MemOperand & src)1426   void Ldarb(const Register& rt, const MemOperand& src) {
1427     VIXL_ASSERT(allow_macro_instructions_);
1428     SingleEmissionCheckScope guard(this);
1429     ldarb(rt, src);
1430   }
Ldarh(const Register & rt,const MemOperand & src)1431   void Ldarh(const Register& rt, const MemOperand& src) {
1432     VIXL_ASSERT(allow_macro_instructions_);
1433     SingleEmissionCheckScope guard(this);
1434     ldarh(rt, src);
1435   }
Ldaxp(const Register & rt,const Register & rt2,const MemOperand & src)1436   void Ldaxp(const Register& rt, const Register& rt2, const MemOperand& src) {
1437     VIXL_ASSERT(allow_macro_instructions_);
1438     VIXL_ASSERT(!rt.Aliases(rt2));
1439     SingleEmissionCheckScope guard(this);
1440     ldaxp(rt, rt2, src);
1441   }
Ldaxr(const Register & rt,const MemOperand & src)1442   void Ldaxr(const Register& rt, const MemOperand& src) {
1443     VIXL_ASSERT(allow_macro_instructions_);
1444     SingleEmissionCheckScope guard(this);
1445     ldaxr(rt, src);
1446   }
Ldaxrb(const Register & rt,const MemOperand & src)1447   void Ldaxrb(const Register& rt, const MemOperand& src) {
1448     VIXL_ASSERT(allow_macro_instructions_);
1449     SingleEmissionCheckScope guard(this);
1450     ldaxrb(rt, src);
1451   }
Ldaxrh(const Register & rt,const MemOperand & src)1452   void Ldaxrh(const Register& rt, const MemOperand& src) {
1453     VIXL_ASSERT(allow_macro_instructions_);
1454     SingleEmissionCheckScope guard(this);
1455     ldaxrh(rt, src);
1456   }
Ldnp(const CPURegister & rt,const CPURegister & rt2,const MemOperand & src)1457   void Ldnp(const CPURegister& rt,
1458             const CPURegister& rt2,
1459             const MemOperand& src) {
1460     VIXL_ASSERT(allow_macro_instructions_);
1461     SingleEmissionCheckScope guard(this);
1462     ldnp(rt, rt2, src);
1463   }
1464   // Provide both double and float interfaces for FP immediate loads, rather
1465   // than relying on implicit C++ casts. This allows signalling NaNs to be
1466   // preserved when the immediate matches the format of fd. Most systems convert
1467   // signalling NaNs to quiet NaNs when converting between float and double.
Ldr(const VRegister & vt,double imm)1468   void Ldr(const VRegister& vt, double imm) {
1469     VIXL_ASSERT(allow_macro_instructions_);
1470     SingleEmissionCheckScope guard(this);
1471     RawLiteral* literal;
1472     if (vt.IsD()) {
1473       literal = new Literal<double>(imm,
1474                                     &literal_pool_,
1475                                     RawLiteral::kDeletedOnPlacementByPool);
1476     } else {
1477       literal = new Literal<float>(static_cast<float>(imm),
1478                                    &literal_pool_,
1479                                    RawLiteral::kDeletedOnPlacementByPool);
1480     }
1481     ldr(vt, literal);
1482   }
Ldr(const VRegister & vt,float imm)1483   void Ldr(const VRegister& vt, float imm) {
1484     VIXL_ASSERT(allow_macro_instructions_);
1485     SingleEmissionCheckScope guard(this);
1486     RawLiteral* literal;
1487     if (vt.IsS()) {
1488       literal = new Literal<float>(imm,
1489                                    &literal_pool_,
1490                                    RawLiteral::kDeletedOnPlacementByPool);
1491     } else {
1492       literal = new Literal<double>(static_cast<double>(imm),
1493                                     &literal_pool_,
1494                                     RawLiteral::kDeletedOnPlacementByPool);
1495     }
1496     ldr(vt, literal);
1497   }
Ldr(const VRegister & vt,uint64_t high64,uint64_t low64)1498   void Ldr(const VRegister& vt, uint64_t high64, uint64_t low64) {
1499     VIXL_ASSERT(allow_macro_instructions_);
1500     VIXL_ASSERT(vt.IsQ());
1501     SingleEmissionCheckScope guard(this);
1502     ldr(vt,
1503         new Literal<uint64_t>(high64,
1504                               low64,
1505                               &literal_pool_,
1506                               RawLiteral::kDeletedOnPlacementByPool));
1507   }
Ldr(const Register & rt,uint64_t imm)1508   void Ldr(const Register& rt, uint64_t imm) {
1509     VIXL_ASSERT(allow_macro_instructions_);
1510     VIXL_ASSERT(!rt.IsZero());
1511     SingleEmissionCheckScope guard(this);
1512     RawLiteral* literal;
1513     if (rt.Is64Bits()) {
1514       literal = new Literal<uint64_t>(imm,
1515                                       &literal_pool_,
1516                                       RawLiteral::kDeletedOnPlacementByPool);
1517     } else {
1518       VIXL_ASSERT(rt.Is32Bits());
1519       VIXL_ASSERT(IsUint32(imm) || IsInt32(imm));
1520       literal = new Literal<uint32_t>(static_cast<uint32_t>(imm),
1521                                       &literal_pool_,
1522                                       RawLiteral::kDeletedOnPlacementByPool);
1523     }
1524     ldr(rt, literal);
1525   }
Ldrsw(const Register & rt,uint32_t imm)1526   void Ldrsw(const Register& rt, uint32_t imm) {
1527     VIXL_ASSERT(allow_macro_instructions_);
1528     VIXL_ASSERT(!rt.IsZero());
1529     SingleEmissionCheckScope guard(this);
1530     ldrsw(rt,
1531           new Literal<uint32_t>(imm,
1532                                 &literal_pool_,
1533                                 RawLiteral::kDeletedOnPlacementByPool));
1534   }
Ldr(const CPURegister & rt,RawLiteral * literal)1535   void Ldr(const CPURegister& rt, RawLiteral* literal) {
1536     VIXL_ASSERT(allow_macro_instructions_);
1537     SingleEmissionCheckScope guard(this);
1538     ldr(rt, literal);
1539   }
Ldrsw(const Register & rt,RawLiteral * literal)1540   void Ldrsw(const Register& rt, RawLiteral* literal) {
1541     VIXL_ASSERT(allow_macro_instructions_);
1542     SingleEmissionCheckScope guard(this);
1543     ldrsw(rt, literal);
1544   }
Ldxp(const Register & rt,const Register & rt2,const MemOperand & src)1545   void Ldxp(const Register& rt, const Register& rt2, const MemOperand& src) {
1546     VIXL_ASSERT(allow_macro_instructions_);
1547     VIXL_ASSERT(!rt.Aliases(rt2));
1548     SingleEmissionCheckScope guard(this);
1549     ldxp(rt, rt2, src);
1550   }
Ldxr(const Register & rt,const MemOperand & src)1551   void Ldxr(const Register& rt, const MemOperand& src) {
1552     VIXL_ASSERT(allow_macro_instructions_);
1553     SingleEmissionCheckScope guard(this);
1554     ldxr(rt, src);
1555   }
Ldxrb(const Register & rt,const MemOperand & src)1556   void Ldxrb(const Register& rt, const MemOperand& src) {
1557     VIXL_ASSERT(allow_macro_instructions_);
1558     SingleEmissionCheckScope guard(this);
1559     ldxrb(rt, src);
1560   }
Ldxrh(const Register & rt,const MemOperand & src)1561   void Ldxrh(const Register& rt, const MemOperand& src) {
1562     VIXL_ASSERT(allow_macro_instructions_);
1563     SingleEmissionCheckScope guard(this);
1564     ldxrh(rt, src);
1565   }
Lsl(const Register & rd,const Register & rn,unsigned shift)1566   void Lsl(const Register& rd, const Register& rn, unsigned shift) {
1567     VIXL_ASSERT(allow_macro_instructions_);
1568     VIXL_ASSERT(!rd.IsZero());
1569     VIXL_ASSERT(!rn.IsZero());
1570     SingleEmissionCheckScope guard(this);
1571     lsl(rd, rn, shift);
1572   }
Lsl(const Register & rd,const Register & rn,const Register & rm)1573   void Lsl(const Register& rd, const Register& rn, const Register& rm) {
1574     VIXL_ASSERT(allow_macro_instructions_);
1575     VIXL_ASSERT(!rd.IsZero());
1576     VIXL_ASSERT(!rn.IsZero());
1577     VIXL_ASSERT(!rm.IsZero());
1578     SingleEmissionCheckScope guard(this);
1579     lslv(rd, rn, rm);
1580   }
Lsr(const Register & rd,const Register & rn,unsigned shift)1581   void Lsr(const Register& rd, const Register& rn, unsigned shift) {
1582     VIXL_ASSERT(allow_macro_instructions_);
1583     VIXL_ASSERT(!rd.IsZero());
1584     VIXL_ASSERT(!rn.IsZero());
1585     SingleEmissionCheckScope guard(this);
1586     lsr(rd, rn, shift);
1587   }
Lsr(const Register & rd,const Register & rn,const Register & rm)1588   void Lsr(const Register& rd, const Register& rn, const Register& rm) {
1589     VIXL_ASSERT(allow_macro_instructions_);
1590     VIXL_ASSERT(!rd.IsZero());
1591     VIXL_ASSERT(!rn.IsZero());
1592     VIXL_ASSERT(!rm.IsZero());
1593     SingleEmissionCheckScope guard(this);
1594     lsrv(rd, rn, rm);
1595   }
Madd(const Register & rd,const Register & rn,const Register & rm,const Register & ra)1596   void Madd(const Register& rd,
1597             const Register& rn,
1598             const Register& rm,
1599             const Register& ra) {
1600     VIXL_ASSERT(allow_macro_instructions_);
1601     VIXL_ASSERT(!rd.IsZero());
1602     VIXL_ASSERT(!rn.IsZero());
1603     VIXL_ASSERT(!rm.IsZero());
1604     VIXL_ASSERT(!ra.IsZero());
1605     SingleEmissionCheckScope guard(this);
1606     madd(rd, rn, rm, ra);
1607   }
Mneg(const Register & rd,const Register & rn,const Register & rm)1608   void Mneg(const Register& rd, const Register& rn, const Register& rm) {
1609     VIXL_ASSERT(allow_macro_instructions_);
1610     VIXL_ASSERT(!rd.IsZero());
1611     VIXL_ASSERT(!rn.IsZero());
1612     VIXL_ASSERT(!rm.IsZero());
1613     SingleEmissionCheckScope guard(this);
1614     mneg(rd, rn, rm);
1615   }
1616   void Mov(const Register& rd,
1617            const Register& rn,
1618            DiscardMoveMode discard_mode = kDontDiscardForSameWReg) {
1619     VIXL_ASSERT(allow_macro_instructions_);
1620     // Emit a register move only if the registers are distinct, or if they are
1621     // not X registers.
1622     //
1623     // Note that mov(w0, w0) is not a no-op because it clears the top word of
1624     // x0. A flag is provided (kDiscardForSameWReg) if a move between the same W
1625     // registers is not required to clear the top word of the X register. In
1626     // this case, the instruction is discarded.
1627     //
1628     // If the sp is an operand, add #0 is emitted, otherwise, orr #0.
1629     if (!rd.Is(rn) ||
1630         (rd.Is32Bits() && (discard_mode == kDontDiscardForSameWReg))) {
1631       SingleEmissionCheckScope guard(this);
1632       mov(rd, rn);
1633     }
1634   }
1635   void Movk(const Register& rd, uint64_t imm, int shift = -1) {
1636     VIXL_ASSERT(allow_macro_instructions_);
1637     VIXL_ASSERT(!rd.IsZero());
1638     SingleEmissionCheckScope guard(this);
1639     movk(rd, imm, shift);
1640   }
Mrs(const Register & rt,SystemRegister sysreg)1641   void Mrs(const Register& rt, SystemRegister sysreg) {
1642     VIXL_ASSERT(allow_macro_instructions_);
1643     VIXL_ASSERT(!rt.IsZero());
1644     SingleEmissionCheckScope guard(this);
1645     mrs(rt, sysreg);
1646   }
Msr(SystemRegister sysreg,const Register & rt)1647   void Msr(SystemRegister sysreg, const Register& rt) {
1648     VIXL_ASSERT(allow_macro_instructions_);
1649     VIXL_ASSERT(!rt.IsZero());
1650     SingleEmissionCheckScope guard(this);
1651     msr(sysreg, rt);
1652   }
1653   void Sys(int op1, int crn, int crm, int op2, const Register& rt = xzr) {
1654     VIXL_ASSERT(allow_macro_instructions_);
1655     SingleEmissionCheckScope guard(this);
1656     sys(op1, crn, crm, op2, rt);
1657   }
Dc(DataCacheOp op,const Register & rt)1658   void Dc(DataCacheOp op, const Register& rt) {
1659     VIXL_ASSERT(allow_macro_instructions_);
1660     SingleEmissionCheckScope guard(this);
1661     dc(op, rt);
1662   }
Ic(InstructionCacheOp op,const Register & rt)1663   void Ic(InstructionCacheOp op, const Register& rt) {
1664     VIXL_ASSERT(allow_macro_instructions_);
1665     SingleEmissionCheckScope guard(this);
1666     ic(op, rt);
1667   }
Msub(const Register & rd,const Register & rn,const Register & rm,const Register & ra)1668   void Msub(const Register& rd,
1669             const Register& rn,
1670             const Register& rm,
1671             const Register& ra) {
1672     VIXL_ASSERT(allow_macro_instructions_);
1673     VIXL_ASSERT(!rd.IsZero());
1674     VIXL_ASSERT(!rn.IsZero());
1675     VIXL_ASSERT(!rm.IsZero());
1676     VIXL_ASSERT(!ra.IsZero());
1677     SingleEmissionCheckScope guard(this);
1678     msub(rd, rn, rm, ra);
1679   }
Mul(const Register & rd,const Register & rn,const Register & rm)1680   void Mul(const Register& rd, const Register& rn, const Register& rm) {
1681     VIXL_ASSERT(allow_macro_instructions_);
1682     VIXL_ASSERT(!rd.IsZero());
1683     VIXL_ASSERT(!rn.IsZero());
1684     VIXL_ASSERT(!rm.IsZero());
1685     SingleEmissionCheckScope guard(this);
1686     mul(rd, rn, rm);
1687   }
Nop()1688   void Nop() {
1689     VIXL_ASSERT(allow_macro_instructions_);
1690     SingleEmissionCheckScope guard(this);
1691     nop();
1692   }
Rbit(const Register & rd,const Register & rn)1693   void Rbit(const Register& rd, const Register& rn) {
1694     VIXL_ASSERT(allow_macro_instructions_);
1695     VIXL_ASSERT(!rd.IsZero());
1696     VIXL_ASSERT(!rn.IsZero());
1697     SingleEmissionCheckScope guard(this);
1698     rbit(rd, rn);
1699   }
1700   void Ret(const Register& xn = lr) {
1701     VIXL_ASSERT(allow_macro_instructions_);
1702     VIXL_ASSERT(!xn.IsZero());
1703     SingleEmissionCheckScope guard(this);
1704     ret(xn);
1705   }
Rev(const Register & rd,const Register & rn)1706   void Rev(const Register& rd, const Register& rn) {
1707     VIXL_ASSERT(allow_macro_instructions_);
1708     VIXL_ASSERT(!rd.IsZero());
1709     VIXL_ASSERT(!rn.IsZero());
1710     SingleEmissionCheckScope guard(this);
1711     rev(rd, rn);
1712   }
Rev16(const Register & rd,const Register & rn)1713   void Rev16(const Register& rd, const Register& rn) {
1714     VIXL_ASSERT(allow_macro_instructions_);
1715     VIXL_ASSERT(!rd.IsZero());
1716     VIXL_ASSERT(!rn.IsZero());
1717     SingleEmissionCheckScope guard(this);
1718     rev16(rd, rn);
1719   }
Rev32(const Register & rd,const Register & rn)1720   void Rev32(const Register& rd, const Register& rn) {
1721     VIXL_ASSERT(allow_macro_instructions_);
1722     VIXL_ASSERT(!rd.IsZero());
1723     VIXL_ASSERT(!rn.IsZero());
1724     SingleEmissionCheckScope guard(this);
1725     rev32(rd, rn);
1726   }
Ror(const Register & rd,const Register & rs,unsigned shift)1727   void Ror(const Register& rd, const Register& rs, unsigned shift) {
1728     VIXL_ASSERT(allow_macro_instructions_);
1729     VIXL_ASSERT(!rd.IsZero());
1730     VIXL_ASSERT(!rs.IsZero());
1731     SingleEmissionCheckScope guard(this);
1732     ror(rd, rs, shift);
1733   }
Ror(const Register & rd,const Register & rn,const Register & rm)1734   void Ror(const Register& rd, const Register& rn, const Register& rm) {
1735     VIXL_ASSERT(allow_macro_instructions_);
1736     VIXL_ASSERT(!rd.IsZero());
1737     VIXL_ASSERT(!rn.IsZero());
1738     VIXL_ASSERT(!rm.IsZero());
1739     SingleEmissionCheckScope guard(this);
1740     rorv(rd, rn, rm);
1741   }
Sbfiz(const Register & rd,const Register & rn,unsigned lsb,unsigned width)1742   void Sbfiz(const Register& rd,
1743              const Register& rn,
1744              unsigned lsb,
1745              unsigned width) {
1746     VIXL_ASSERT(allow_macro_instructions_);
1747     VIXL_ASSERT(!rd.IsZero());
1748     VIXL_ASSERT(!rn.IsZero());
1749     SingleEmissionCheckScope guard(this);
1750     sbfiz(rd, rn, lsb, width);
1751   }
Sbfm(const Register & rd,const Register & rn,unsigned immr,unsigned imms)1752   void Sbfm(const Register& rd,
1753             const Register& rn,
1754             unsigned immr,
1755             unsigned imms) {
1756     VIXL_ASSERT(allow_macro_instructions_);
1757     VIXL_ASSERT(!rd.IsZero());
1758     VIXL_ASSERT(!rn.IsZero());
1759     SingleEmissionCheckScope guard(this);
1760     sbfm(rd, rn, immr, imms);
1761   }
Sbfx(const Register & rd,const Register & rn,unsigned lsb,unsigned width)1762   void Sbfx(const Register& rd,
1763             const Register& rn,
1764             unsigned lsb,
1765             unsigned width) {
1766     VIXL_ASSERT(allow_macro_instructions_);
1767     VIXL_ASSERT(!rd.IsZero());
1768     VIXL_ASSERT(!rn.IsZero());
1769     SingleEmissionCheckScope guard(this);
1770     sbfx(rd, rn, lsb, width);
1771   }
1772   void Scvtf(const VRegister& vd, const Register& rn, int fbits = 0) {
1773     VIXL_ASSERT(allow_macro_instructions_);
1774     VIXL_ASSERT(!rn.IsZero());
1775     SingleEmissionCheckScope guard(this);
1776     scvtf(vd, rn, fbits);
1777   }
Sdiv(const Register & rd,const Register & rn,const Register & rm)1778   void Sdiv(const Register& rd, const Register& rn, const Register& rm) {
1779     VIXL_ASSERT(allow_macro_instructions_);
1780     VIXL_ASSERT(!rd.IsZero());
1781     VIXL_ASSERT(!rn.IsZero());
1782     VIXL_ASSERT(!rm.IsZero());
1783     SingleEmissionCheckScope guard(this);
1784     sdiv(rd, rn, rm);
1785   }
Smaddl(const Register & rd,const Register & rn,const Register & rm,const Register & ra)1786   void Smaddl(const Register& rd,
1787               const Register& rn,
1788               const Register& rm,
1789               const Register& ra) {
1790     VIXL_ASSERT(allow_macro_instructions_);
1791     VIXL_ASSERT(!rd.IsZero());
1792     VIXL_ASSERT(!rn.IsZero());
1793     VIXL_ASSERT(!rm.IsZero());
1794     VIXL_ASSERT(!ra.IsZero());
1795     SingleEmissionCheckScope guard(this);
1796     smaddl(rd, rn, rm, ra);
1797   }
Smsubl(const Register & rd,const Register & rn,const Register & rm,const Register & ra)1798   void Smsubl(const Register& rd,
1799               const Register& rn,
1800               const Register& rm,
1801               const Register& ra) {
1802     VIXL_ASSERT(allow_macro_instructions_);
1803     VIXL_ASSERT(!rd.IsZero());
1804     VIXL_ASSERT(!rn.IsZero());
1805     VIXL_ASSERT(!rm.IsZero());
1806     VIXL_ASSERT(!ra.IsZero());
1807     SingleEmissionCheckScope guard(this);
1808     smsubl(rd, rn, rm, ra);
1809   }
Smull(const Register & rd,const Register & rn,const Register & rm)1810   void Smull(const Register& rd, const Register& rn, const Register& rm) {
1811     VIXL_ASSERT(allow_macro_instructions_);
1812     VIXL_ASSERT(!rd.IsZero());
1813     VIXL_ASSERT(!rn.IsZero());
1814     VIXL_ASSERT(!rm.IsZero());
1815     SingleEmissionCheckScope guard(this);
1816     smull(rd, rn, rm);
1817   }
Smulh(const Register & xd,const Register & xn,const Register & xm)1818   void Smulh(const Register& xd, const Register& xn, const Register& xm) {
1819     VIXL_ASSERT(allow_macro_instructions_);
1820     VIXL_ASSERT(!xd.IsZero());
1821     VIXL_ASSERT(!xn.IsZero());
1822     VIXL_ASSERT(!xm.IsZero());
1823     SingleEmissionCheckScope guard(this);
1824     smulh(xd, xn, xm);
1825   }
Stlr(const Register & rt,const MemOperand & dst)1826   void Stlr(const Register& rt, const MemOperand& dst) {
1827     VIXL_ASSERT(allow_macro_instructions_);
1828     SingleEmissionCheckScope guard(this);
1829     stlr(rt, dst);
1830   }
Stlrb(const Register & rt,const MemOperand & dst)1831   void Stlrb(const Register& rt, const MemOperand& dst) {
1832     VIXL_ASSERT(allow_macro_instructions_);
1833     SingleEmissionCheckScope guard(this);
1834     stlrb(rt, dst);
1835   }
Stlrh(const Register & rt,const MemOperand & dst)1836   void Stlrh(const Register& rt, const MemOperand& dst) {
1837     VIXL_ASSERT(allow_macro_instructions_);
1838     SingleEmissionCheckScope guard(this);
1839     stlrh(rt, dst);
1840   }
Stlxp(const Register & rs,const Register & rt,const Register & rt2,const MemOperand & dst)1841   void Stlxp(const Register& rs,
1842              const Register& rt,
1843              const Register& rt2,
1844              const MemOperand& dst) {
1845     VIXL_ASSERT(allow_macro_instructions_);
1846     VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister()));
1847     VIXL_ASSERT(!rs.Aliases(rt));
1848     VIXL_ASSERT(!rs.Aliases(rt2));
1849     SingleEmissionCheckScope guard(this);
1850     stlxp(rs, rt, rt2, dst);
1851   }
Stlxr(const Register & rs,const Register & rt,const MemOperand & dst)1852   void Stlxr(const Register& rs, const Register& rt, const MemOperand& dst) {
1853     VIXL_ASSERT(allow_macro_instructions_);
1854     VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister()));
1855     VIXL_ASSERT(!rs.Aliases(rt));
1856     SingleEmissionCheckScope guard(this);
1857     stlxr(rs, rt, dst);
1858   }
Stlxrb(const Register & rs,const Register & rt,const MemOperand & dst)1859   void Stlxrb(const Register& rs, const Register& rt, const MemOperand& dst) {
1860     VIXL_ASSERT(allow_macro_instructions_);
1861     VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister()));
1862     VIXL_ASSERT(!rs.Aliases(rt));
1863     SingleEmissionCheckScope guard(this);
1864     stlxrb(rs, rt, dst);
1865   }
Stlxrh(const Register & rs,const Register & rt,const MemOperand & dst)1866   void Stlxrh(const Register& rs, const Register& rt, const MemOperand& dst) {
1867     VIXL_ASSERT(allow_macro_instructions_);
1868     VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister()));
1869     VIXL_ASSERT(!rs.Aliases(rt));
1870     SingleEmissionCheckScope guard(this);
1871     stlxrh(rs, rt, dst);
1872   }
Stnp(const CPURegister & rt,const CPURegister & rt2,const MemOperand & dst)1873   void Stnp(const CPURegister& rt,
1874             const CPURegister& rt2,
1875             const MemOperand& dst) {
1876     VIXL_ASSERT(allow_macro_instructions_);
1877     SingleEmissionCheckScope guard(this);
1878     stnp(rt, rt2, dst);
1879   }
Stxp(const Register & rs,const Register & rt,const Register & rt2,const MemOperand & dst)1880   void Stxp(const Register& rs,
1881             const Register& rt,
1882             const Register& rt2,
1883             const MemOperand& dst) {
1884     VIXL_ASSERT(allow_macro_instructions_);
1885     VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister()));
1886     VIXL_ASSERT(!rs.Aliases(rt));
1887     VIXL_ASSERT(!rs.Aliases(rt2));
1888     SingleEmissionCheckScope guard(this);
1889     stxp(rs, rt, rt2, dst);
1890   }
Stxr(const Register & rs,const Register & rt,const MemOperand & dst)1891   void Stxr(const Register& rs, const Register& rt, const MemOperand& dst) {
1892     VIXL_ASSERT(allow_macro_instructions_);
1893     VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister()));
1894     VIXL_ASSERT(!rs.Aliases(rt));
1895     SingleEmissionCheckScope guard(this);
1896     stxr(rs, rt, dst);
1897   }
Stxrb(const Register & rs,const Register & rt,const MemOperand & dst)1898   void Stxrb(const Register& rs, const Register& rt, const MemOperand& dst) {
1899     VIXL_ASSERT(allow_macro_instructions_);
1900     VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister()));
1901     VIXL_ASSERT(!rs.Aliases(rt));
1902     SingleEmissionCheckScope guard(this);
1903     stxrb(rs, rt, dst);
1904   }
Stxrh(const Register & rs,const Register & rt,const MemOperand & dst)1905   void Stxrh(const Register& rs, const Register& rt, const MemOperand& dst) {
1906     VIXL_ASSERT(allow_macro_instructions_);
1907     VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister()));
1908     VIXL_ASSERT(!rs.Aliases(rt));
1909     SingleEmissionCheckScope guard(this);
1910     stxrh(rs, rt, dst);
1911   }
Svc(int code)1912   void Svc(int code) {
1913     VIXL_ASSERT(allow_macro_instructions_);
1914     SingleEmissionCheckScope guard(this);
1915     svc(code);
1916   }
Sxtb(const Register & rd,const Register & rn)1917   void Sxtb(const Register& rd, const Register& rn) {
1918     VIXL_ASSERT(allow_macro_instructions_);
1919     VIXL_ASSERT(!rd.IsZero());
1920     VIXL_ASSERT(!rn.IsZero());
1921     SingleEmissionCheckScope guard(this);
1922     sxtb(rd, rn);
1923   }
Sxth(const Register & rd,const Register & rn)1924   void Sxth(const Register& rd, const Register& rn) {
1925     VIXL_ASSERT(allow_macro_instructions_);
1926     VIXL_ASSERT(!rd.IsZero());
1927     VIXL_ASSERT(!rn.IsZero());
1928     SingleEmissionCheckScope guard(this);
1929     sxth(rd, rn);
1930   }
Sxtw(const Register & rd,const Register & rn)1931   void Sxtw(const Register& rd, const Register& rn) {
1932     VIXL_ASSERT(allow_macro_instructions_);
1933     VIXL_ASSERT(!rd.IsZero());
1934     VIXL_ASSERT(!rn.IsZero());
1935     SingleEmissionCheckScope guard(this);
1936     sxtw(rd, rn);
1937   }
Tbl(const VRegister & vd,const VRegister & vn,const VRegister & vm)1938   void Tbl(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1939     VIXL_ASSERT(allow_macro_instructions_);
1940     SingleEmissionCheckScope guard(this);
1941     tbl(vd, vn, vm);
1942   }
Tbl(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vm)1943   void Tbl(const VRegister& vd,
1944            const VRegister& vn,
1945            const VRegister& vn2,
1946            const VRegister& vm) {
1947     VIXL_ASSERT(allow_macro_instructions_);
1948     SingleEmissionCheckScope guard(this);
1949     tbl(vd, vn, vn2, vm);
1950   }
Tbl(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vn3,const VRegister & vm)1951   void Tbl(const VRegister& vd,
1952            const VRegister& vn,
1953            const VRegister& vn2,
1954            const VRegister& vn3,
1955            const VRegister& vm) {
1956     VIXL_ASSERT(allow_macro_instructions_);
1957     SingleEmissionCheckScope guard(this);
1958     tbl(vd, vn, vn2, vn3, vm);
1959   }
Tbl(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vn3,const VRegister & vn4,const VRegister & vm)1960   void Tbl(const VRegister& vd,
1961            const VRegister& vn,
1962            const VRegister& vn2,
1963            const VRegister& vn3,
1964            const VRegister& vn4,
1965            const VRegister& vm) {
1966     VIXL_ASSERT(allow_macro_instructions_);
1967     SingleEmissionCheckScope guard(this);
1968     tbl(vd, vn, vn2, vn3, vn4, vm);
1969   }
Tbx(const VRegister & vd,const VRegister & vn,const VRegister & vm)1970   void Tbx(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1971     VIXL_ASSERT(allow_macro_instructions_);
1972     SingleEmissionCheckScope guard(this);
1973     tbx(vd, vn, vm);
1974   }
Tbx(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vm)1975   void Tbx(const VRegister& vd,
1976            const VRegister& vn,
1977            const VRegister& vn2,
1978            const VRegister& vm) {
1979     VIXL_ASSERT(allow_macro_instructions_);
1980     SingleEmissionCheckScope guard(this);
1981     tbx(vd, vn, vn2, vm);
1982   }
Tbx(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vn3,const VRegister & vm)1983   void Tbx(const VRegister& vd,
1984            const VRegister& vn,
1985            const VRegister& vn2,
1986            const VRegister& vn3,
1987            const VRegister& vm) {
1988     VIXL_ASSERT(allow_macro_instructions_);
1989     SingleEmissionCheckScope guard(this);
1990     tbx(vd, vn, vn2, vn3, vm);
1991   }
Tbx(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vn3,const VRegister & vn4,const VRegister & vm)1992   void Tbx(const VRegister& vd,
1993            const VRegister& vn,
1994            const VRegister& vn2,
1995            const VRegister& vn3,
1996            const VRegister& vn4,
1997            const VRegister& vm) {
1998     VIXL_ASSERT(allow_macro_instructions_);
1999     SingleEmissionCheckScope guard(this);
2000     tbx(vd, vn, vn2, vn3, vn4, vm);
2001   }
2002   void Tbnz(const Register& rt, unsigned bit_pos, Label* label);
2003   void Tbz(const Register& rt, unsigned bit_pos, Label* label);
Ubfiz(const Register & rd,const Register & rn,unsigned lsb,unsigned width)2004   void Ubfiz(const Register& rd,
2005              const Register& rn,
2006              unsigned lsb,
2007              unsigned width) {
2008     VIXL_ASSERT(allow_macro_instructions_);
2009     VIXL_ASSERT(!rd.IsZero());
2010     VIXL_ASSERT(!rn.IsZero());
2011     SingleEmissionCheckScope guard(this);
2012     ubfiz(rd, rn, lsb, width);
2013   }
Ubfm(const Register & rd,const Register & rn,unsigned immr,unsigned imms)2014   void Ubfm(const Register& rd,
2015             const Register& rn,
2016             unsigned immr,
2017             unsigned imms) {
2018     VIXL_ASSERT(allow_macro_instructions_);
2019     VIXL_ASSERT(!rd.IsZero());
2020     VIXL_ASSERT(!rn.IsZero());
2021     SingleEmissionCheckScope guard(this);
2022     ubfm(rd, rn, immr, imms);
2023   }
Ubfx(const Register & rd,const Register & rn,unsigned lsb,unsigned width)2024   void Ubfx(const Register& rd,
2025             const Register& rn,
2026             unsigned lsb,
2027             unsigned width) {
2028     VIXL_ASSERT(allow_macro_instructions_);
2029     VIXL_ASSERT(!rd.IsZero());
2030     VIXL_ASSERT(!rn.IsZero());
2031     SingleEmissionCheckScope guard(this);
2032     ubfx(rd, rn, lsb, width);
2033   }
2034   void Ucvtf(const VRegister& vd, const Register& rn, int fbits = 0) {
2035     VIXL_ASSERT(allow_macro_instructions_);
2036     VIXL_ASSERT(!rn.IsZero());
2037     SingleEmissionCheckScope guard(this);
2038     ucvtf(vd, rn, fbits);
2039   }
Udiv(const Register & rd,const Register & rn,const Register & rm)2040   void Udiv(const Register& rd, const Register& rn, const Register& rm) {
2041     VIXL_ASSERT(allow_macro_instructions_);
2042     VIXL_ASSERT(!rd.IsZero());
2043     VIXL_ASSERT(!rn.IsZero());
2044     VIXL_ASSERT(!rm.IsZero());
2045     SingleEmissionCheckScope guard(this);
2046     udiv(rd, rn, rm);
2047   }
Umaddl(const Register & rd,const Register & rn,const Register & rm,const Register & ra)2048   void Umaddl(const Register& rd,
2049               const Register& rn,
2050               const Register& rm,
2051               const Register& ra) {
2052     VIXL_ASSERT(allow_macro_instructions_);
2053     VIXL_ASSERT(!rd.IsZero());
2054     VIXL_ASSERT(!rn.IsZero());
2055     VIXL_ASSERT(!rm.IsZero());
2056     VIXL_ASSERT(!ra.IsZero());
2057     SingleEmissionCheckScope guard(this);
2058     umaddl(rd, rn, rm, ra);
2059   }
Umull(const Register & rd,const Register & rn,const Register & rm)2060   void Umull(const Register& rd, const Register& rn, const Register& rm) {
2061     VIXL_ASSERT(allow_macro_instructions_);
2062     VIXL_ASSERT(!rd.IsZero());
2063     VIXL_ASSERT(!rn.IsZero());
2064     VIXL_ASSERT(!rm.IsZero());
2065     SingleEmissionCheckScope guard(this);
2066     umull(rd, rn, rm);
2067   }
Umulh(const Register & xd,const Register & xn,const Register & xm)2068   void Umulh(const Register& xd, const Register& xn, const Register& xm) {
2069     VIXL_ASSERT(allow_macro_instructions_);
2070     VIXL_ASSERT(!xd.IsZero());
2071     VIXL_ASSERT(!xn.IsZero());
2072     VIXL_ASSERT(!xm.IsZero());
2073     SingleEmissionCheckScope guard(this);
2074     umulh(xd, xn, xm);
2075   }
Umsubl(const Register & rd,const Register & rn,const Register & rm,const Register & ra)2076   void Umsubl(const Register& rd,
2077               const Register& rn,
2078               const Register& rm,
2079               const Register& ra) {
2080     VIXL_ASSERT(allow_macro_instructions_);
2081     VIXL_ASSERT(!rd.IsZero());
2082     VIXL_ASSERT(!rn.IsZero());
2083     VIXL_ASSERT(!rm.IsZero());
2084     VIXL_ASSERT(!ra.IsZero());
2085     SingleEmissionCheckScope guard(this);
2086     umsubl(rd, rn, rm, ra);
2087   }
Unreachable()2088   void Unreachable() {
2089     VIXL_ASSERT(allow_macro_instructions_);
2090     SingleEmissionCheckScope guard(this);
2091     if (generate_simulator_code_) {
2092       hlt(kUnreachableOpcode);
2093     } else {
2094       // Branch to 0 to generate a segfault.
2095       // lr - kInstructionSize is the address of the offending instruction.
2096       blr(xzr);
2097     }
2098   }
Uxtb(const Register & rd,const Register & rn)2099   void Uxtb(const Register& rd, const Register& rn) {
2100     VIXL_ASSERT(allow_macro_instructions_);
2101     VIXL_ASSERT(!rd.IsZero());
2102     VIXL_ASSERT(!rn.IsZero());
2103     SingleEmissionCheckScope guard(this);
2104     uxtb(rd, rn);
2105   }
Uxth(const Register & rd,const Register & rn)2106   void Uxth(const Register& rd, const Register& rn) {
2107     VIXL_ASSERT(allow_macro_instructions_);
2108     VIXL_ASSERT(!rd.IsZero());
2109     VIXL_ASSERT(!rn.IsZero());
2110     SingleEmissionCheckScope guard(this);
2111     uxth(rd, rn);
2112   }
Uxtw(const Register & rd,const Register & rn)2113   void Uxtw(const Register& rd, const Register& rn) {
2114     VIXL_ASSERT(allow_macro_instructions_);
2115     VIXL_ASSERT(!rd.IsZero());
2116     VIXL_ASSERT(!rn.IsZero());
2117     SingleEmissionCheckScope guard(this);
2118     uxtw(rd, rn);
2119   }
2120 
2121 // NEON 3 vector register instructions.
2122 #define NEON_3VREG_MACRO_LIST(V) \
2123   V(add, Add)                    \
2124   V(addhn, Addhn)                \
2125   V(addhn2, Addhn2)              \
2126   V(addp, Addp)                  \
2127   V(and_, And)                   \
2128   V(bic, Bic)                    \
2129   V(bif, Bif)                    \
2130   V(bit, Bit)                    \
2131   V(bsl, Bsl)                    \
2132   V(cmeq, Cmeq)                  \
2133   V(cmge, Cmge)                  \
2134   V(cmgt, Cmgt)                  \
2135   V(cmhi, Cmhi)                  \
2136   V(cmhs, Cmhs)                  \
2137   V(cmtst, Cmtst)                \
2138   V(eor, Eor)                    \
2139   V(fabd, Fabd)                  \
2140   V(facge, Facge)                \
2141   V(facgt, Facgt)                \
2142   V(faddp, Faddp)                \
2143   V(fcmeq, Fcmeq)                \
2144   V(fcmge, Fcmge)                \
2145   V(fcmgt, Fcmgt)                \
2146   V(fmaxnmp, Fmaxnmp)            \
2147   V(fmaxp, Fmaxp)                \
2148   V(fminnmp, Fminnmp)            \
2149   V(fminp, Fminp)                \
2150   V(fmla, Fmla)                  \
2151   V(fmls, Fmls)                  \
2152   V(fmulx, Fmulx)                \
2153   V(frecps, Frecps)              \
2154   V(frsqrts, Frsqrts)            \
2155   V(mla, Mla)                    \
2156   V(mls, Mls)                    \
2157   V(mul, Mul)                    \
2158   V(orn, Orn)                    \
2159   V(orr, Orr)                    \
2160   V(pmul, Pmul)                  \
2161   V(pmull, Pmull)                \
2162   V(pmull2, Pmull2)              \
2163   V(raddhn, Raddhn)              \
2164   V(raddhn2, Raddhn2)            \
2165   V(rsubhn, Rsubhn)              \
2166   V(rsubhn2, Rsubhn2)            \
2167   V(saba, Saba)                  \
2168   V(sabal, Sabal)                \
2169   V(sabal2, Sabal2)              \
2170   V(sabd, Sabd)                  \
2171   V(sabdl, Sabdl)                \
2172   V(sabdl2, Sabdl2)              \
2173   V(saddl, Saddl)                \
2174   V(saddl2, Saddl2)              \
2175   V(saddw, Saddw)                \
2176   V(saddw2, Saddw2)              \
2177   V(shadd, Shadd)                \
2178   V(shsub, Shsub)                \
2179   V(smax, Smax)                  \
2180   V(smaxp, Smaxp)                \
2181   V(smin, Smin)                  \
2182   V(sminp, Sminp)                \
2183   V(smlal, Smlal)                \
2184   V(smlal2, Smlal2)              \
2185   V(smlsl, Smlsl)                \
2186   V(smlsl2, Smlsl2)              \
2187   V(smull, Smull)                \
2188   V(smull2, Smull2)              \
2189   V(sqadd, Sqadd)                \
2190   V(sqdmlal, Sqdmlal)            \
2191   V(sqdmlal2, Sqdmlal2)          \
2192   V(sqdmlsl, Sqdmlsl)            \
2193   V(sqdmlsl2, Sqdmlsl2)          \
2194   V(sqdmulh, Sqdmulh)            \
2195   V(sqdmull, Sqdmull)            \
2196   V(sqdmull2, Sqdmull2)          \
2197   V(sqrdmulh, Sqrdmulh)          \
2198   V(sqrshl, Sqrshl)              \
2199   V(sqshl, Sqshl)                \
2200   V(sqsub, Sqsub)                \
2201   V(srhadd, Srhadd)              \
2202   V(srshl, Srshl)                \
2203   V(sshl, Sshl)                  \
2204   V(ssubl, Ssubl)                \
2205   V(ssubl2, Ssubl2)              \
2206   V(ssubw, Ssubw)                \
2207   V(ssubw2, Ssubw2)              \
2208   V(sub, Sub)                    \
2209   V(subhn, Subhn)                \
2210   V(subhn2, Subhn2)              \
2211   V(trn1, Trn1)                  \
2212   V(trn2, Trn2)                  \
2213   V(uaba, Uaba)                  \
2214   V(uabal, Uabal)                \
2215   V(uabal2, Uabal2)              \
2216   V(uabd, Uabd)                  \
2217   V(uabdl, Uabdl)                \
2218   V(uabdl2, Uabdl2)              \
2219   V(uaddl, Uaddl)                \
2220   V(uaddl2, Uaddl2)              \
2221   V(uaddw, Uaddw)                \
2222   V(uaddw2, Uaddw2)              \
2223   V(uhadd, Uhadd)                \
2224   V(uhsub, Uhsub)                \
2225   V(umax, Umax)                  \
2226   V(umaxp, Umaxp)                \
2227   V(umin, Umin)                  \
2228   V(uminp, Uminp)                \
2229   V(umlal, Umlal)                \
2230   V(umlal2, Umlal2)              \
2231   V(umlsl, Umlsl)                \
2232   V(umlsl2, Umlsl2)              \
2233   V(umull, Umull)                \
2234   V(umull2, Umull2)              \
2235   V(uqadd, Uqadd)                \
2236   V(uqrshl, Uqrshl)              \
2237   V(uqshl, Uqshl)                \
2238   V(uqsub, Uqsub)                \
2239   V(urhadd, Urhadd)              \
2240   V(urshl, Urshl)                \
2241   V(ushl, Ushl)                  \
2242   V(usubl, Usubl)                \
2243   V(usubl2, Usubl2)              \
2244   V(usubw, Usubw)                \
2245   V(usubw2, Usubw2)              \
2246   V(uzp1, Uzp1)                  \
2247   V(uzp2, Uzp2)                  \
2248   V(zip1, Zip1)                  \
2249   V(zip2, Zip2)
2250 
2251 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM)                                     \
2252   void MASM(const VRegister& vd, const VRegister& vn, const VRegister& vm) { \
2253     VIXL_ASSERT(allow_macro_instructions_);                                  \
2254     SingleEmissionCheckScope guard(this);                                    \
2255     ASM(vd, vn, vm);                                                         \
2256   }
2257   NEON_3VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
2258 #undef DEFINE_MACRO_ASM_FUNC
2259 
2260 // NEON 2 vector register instructions.
2261 #define NEON_2VREG_MACRO_LIST(V) \
2262   V(abs, Abs)                    \
2263   V(addp, Addp)                  \
2264   V(addv, Addv)                  \
2265   V(cls, Cls)                    \
2266   V(clz, Clz)                    \
2267   V(cnt, Cnt)                    \
2268   V(fabs, Fabs)                  \
2269   V(faddp, Faddp)                \
2270   V(fcvtas, Fcvtas)              \
2271   V(fcvtau, Fcvtau)              \
2272   V(fcvtms, Fcvtms)              \
2273   V(fcvtmu, Fcvtmu)              \
2274   V(fcvtns, Fcvtns)              \
2275   V(fcvtnu, Fcvtnu)              \
2276   V(fcvtps, Fcvtps)              \
2277   V(fcvtpu, Fcvtpu)              \
2278   V(fmaxnmp, Fmaxnmp)            \
2279   V(fmaxnmv, Fmaxnmv)            \
2280   V(fmaxp, Fmaxp)                \
2281   V(fmaxv, Fmaxv)                \
2282   V(fminnmp, Fminnmp)            \
2283   V(fminnmv, Fminnmv)            \
2284   V(fminp, Fminp)                \
2285   V(fminv, Fminv)                \
2286   V(fneg, Fneg)                  \
2287   V(frecpe, Frecpe)              \
2288   V(frecpx, Frecpx)              \
2289   V(frinta, Frinta)              \
2290   V(frinti, Frinti)              \
2291   V(frintm, Frintm)              \
2292   V(frintn, Frintn)              \
2293   V(frintp, Frintp)              \
2294   V(frintx, Frintx)              \
2295   V(frintz, Frintz)              \
2296   V(frsqrte, Frsqrte)            \
2297   V(fsqrt, Fsqrt)                \
2298   V(mov, Mov)                    \
2299   V(mvn, Mvn)                    \
2300   V(neg, Neg)                    \
2301   V(not_, Not)                   \
2302   V(rbit, Rbit)                  \
2303   V(rev16, Rev16)                \
2304   V(rev32, Rev32)                \
2305   V(rev64, Rev64)                \
2306   V(sadalp, Sadalp)              \
2307   V(saddlp, Saddlp)              \
2308   V(saddlv, Saddlv)              \
2309   V(smaxv, Smaxv)                \
2310   V(sminv, Sminv)                \
2311   V(sqabs, Sqabs)                \
2312   V(sqneg, Sqneg)                \
2313   V(sqxtn, Sqxtn)                \
2314   V(sqxtn2, Sqxtn2)              \
2315   V(sqxtun, Sqxtun)              \
2316   V(sqxtun2, Sqxtun2)            \
2317   V(suqadd, Suqadd)              \
2318   V(sxtl, Sxtl)                  \
2319   V(sxtl2, Sxtl2)                \
2320   V(uadalp, Uadalp)              \
2321   V(uaddlp, Uaddlp)              \
2322   V(uaddlv, Uaddlv)              \
2323   V(umaxv, Umaxv)                \
2324   V(uminv, Uminv)                \
2325   V(uqxtn, Uqxtn)                \
2326   V(uqxtn2, Uqxtn2)              \
2327   V(urecpe, Urecpe)              \
2328   V(ursqrte, Ursqrte)            \
2329   V(usqadd, Usqadd)              \
2330   V(uxtl, Uxtl)                  \
2331   V(uxtl2, Uxtl2)                \
2332   V(xtn, Xtn)                    \
2333   V(xtn2, Xtn2)
2334 
2335 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM)                \
2336   void MASM(const VRegister& vd, const VRegister& vn) { \
2337     VIXL_ASSERT(allow_macro_instructions_);             \
2338     SingleEmissionCheckScope guard(this);               \
2339     ASM(vd, vn);                                        \
2340   }
NEON_2VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)2341   NEON_2VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
2342 #undef DEFINE_MACRO_ASM_FUNC
2343 
2344 // NEON 2 vector register with immediate instructions.
2345 #define NEON_2VREG_FPIMM_MACRO_LIST(V) \
2346   V(fcmeq, Fcmeq)                      \
2347   V(fcmge, Fcmge)                      \
2348   V(fcmgt, Fcmgt)                      \
2349   V(fcmle, Fcmle)                      \
2350   V(fcmlt, Fcmlt)
2351 
2352 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM)                            \
2353   void MASM(const VRegister& vd, const VRegister& vn, double imm) { \
2354     VIXL_ASSERT(allow_macro_instructions_);                         \
2355     SingleEmissionCheckScope guard(this);                           \
2356     ASM(vd, vn, imm);                                               \
2357   }
2358   NEON_2VREG_FPIMM_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
2359 #undef DEFINE_MACRO_ASM_FUNC
2360 
2361 // NEON by element instructions.
2362 #define NEON_BYELEMENT_MACRO_LIST(V) \
2363   V(fmul, Fmul)                      \
2364   V(fmla, Fmla)                      \
2365   V(fmls, Fmls)                      \
2366   V(fmulx, Fmulx)                    \
2367   V(mul, Mul)                        \
2368   V(mla, Mla)                        \
2369   V(mls, Mls)                        \
2370   V(sqdmulh, Sqdmulh)                \
2371   V(sqrdmulh, Sqrdmulh)              \
2372   V(sqdmull, Sqdmull)                \
2373   V(sqdmull2, Sqdmull2)              \
2374   V(sqdmlal, Sqdmlal)                \
2375   V(sqdmlal2, Sqdmlal2)              \
2376   V(sqdmlsl, Sqdmlsl)                \
2377   V(sqdmlsl2, Sqdmlsl2)              \
2378   V(smull, Smull)                    \
2379   V(smull2, Smull2)                  \
2380   V(smlal, Smlal)                    \
2381   V(smlal2, Smlal2)                  \
2382   V(smlsl, Smlsl)                    \
2383   V(smlsl2, Smlsl2)                  \
2384   V(umull, Umull)                    \
2385   V(umull2, Umull2)                  \
2386   V(umlal, Umlal)                    \
2387   V(umlal2, Umlal2)                  \
2388   V(umlsl, Umlsl)                    \
2389   V(umlsl2, Umlsl2)
2390 
2391 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM)    \
2392   void MASM(const VRegister& vd,            \
2393             const VRegister& vn,            \
2394             const VRegister& vm,            \
2395             int vm_index) {                 \
2396     VIXL_ASSERT(allow_macro_instructions_); \
2397     SingleEmissionCheckScope guard(this);   \
2398     ASM(vd, vn, vm, vm_index);              \
2399   }
2400   NEON_BYELEMENT_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
2401 #undef DEFINE_MACRO_ASM_FUNC
2402 
2403 #define NEON_2VREG_SHIFT_MACRO_LIST(V) \
2404   V(rshrn, Rshrn)                      \
2405   V(rshrn2, Rshrn2)                    \
2406   V(shl, Shl)                          \
2407   V(shll, Shll)                        \
2408   V(shll2, Shll2)                      \
2409   V(shrn, Shrn)                        \
2410   V(shrn2, Shrn2)                      \
2411   V(sli, Sli)                          \
2412   V(sqrshrn, Sqrshrn)                  \
2413   V(sqrshrn2, Sqrshrn2)                \
2414   V(sqrshrun, Sqrshrun)                \
2415   V(sqrshrun2, Sqrshrun2)              \
2416   V(sqshl, Sqshl)                      \
2417   V(sqshlu, Sqshlu)                    \
2418   V(sqshrn, Sqshrn)                    \
2419   V(sqshrn2, Sqshrn2)                  \
2420   V(sqshrun, Sqshrun)                  \
2421   V(sqshrun2, Sqshrun2)                \
2422   V(sri, Sri)                          \
2423   V(srshr, Srshr)                      \
2424   V(srsra, Srsra)                      \
2425   V(sshll, Sshll)                      \
2426   V(sshll2, Sshll2)                    \
2427   V(sshr, Sshr)                        \
2428   V(ssra, Ssra)                        \
2429   V(uqrshrn, Uqrshrn)                  \
2430   V(uqrshrn2, Uqrshrn2)                \
2431   V(uqshl, Uqshl)                      \
2432   V(uqshrn, Uqshrn)                    \
2433   V(uqshrn2, Uqshrn2)                  \
2434   V(urshr, Urshr)                      \
2435   V(ursra, Ursra)                      \
2436   V(ushll, Ushll)                      \
2437   V(ushll2, Ushll2)                    \
2438   V(ushr, Ushr)                        \
2439   V(usra, Usra)
2440 
2441 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM)                           \
2442   void MASM(const VRegister& vd, const VRegister& vn, int shift) { \
2443     VIXL_ASSERT(allow_macro_instructions_);                        \
2444     SingleEmissionCheckScope guard(this);                          \
2445     ASM(vd, vn, shift);                                            \
2446   }
2447   NEON_2VREG_SHIFT_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
2448 #undef DEFINE_MACRO_ASM_FUNC
2449 
2450   void Bic(const VRegister& vd, const int imm8, const int left_shift = 0) {
2451     VIXL_ASSERT(allow_macro_instructions_);
2452     SingleEmissionCheckScope guard(this);
2453     bic(vd, imm8, left_shift);
2454   }
Cmeq(const VRegister & vd,const VRegister & vn,int imm)2455   void Cmeq(const VRegister& vd, const VRegister& vn, int imm) {
2456     VIXL_ASSERT(allow_macro_instructions_);
2457     SingleEmissionCheckScope guard(this);
2458     cmeq(vd, vn, imm);
2459   }
Cmge(const VRegister & vd,const VRegister & vn,int imm)2460   void Cmge(const VRegister& vd, const VRegister& vn, int imm) {
2461     VIXL_ASSERT(allow_macro_instructions_);
2462     SingleEmissionCheckScope guard(this);
2463     cmge(vd, vn, imm);
2464   }
Cmgt(const VRegister & vd,const VRegister & vn,int imm)2465   void Cmgt(const VRegister& vd, const VRegister& vn, int imm) {
2466     VIXL_ASSERT(allow_macro_instructions_);
2467     SingleEmissionCheckScope guard(this);
2468     cmgt(vd, vn, imm);
2469   }
Cmle(const VRegister & vd,const VRegister & vn,int imm)2470   void Cmle(const VRegister& vd, const VRegister& vn, int imm) {
2471     VIXL_ASSERT(allow_macro_instructions_);
2472     SingleEmissionCheckScope guard(this);
2473     cmle(vd, vn, imm);
2474   }
Cmlt(const VRegister & vd,const VRegister & vn,int imm)2475   void Cmlt(const VRegister& vd, const VRegister& vn, int imm) {
2476     VIXL_ASSERT(allow_macro_instructions_);
2477     SingleEmissionCheckScope guard(this);
2478     cmlt(vd, vn, imm);
2479   }
Dup(const VRegister & vd,const VRegister & vn,int index)2480   void Dup(const VRegister& vd, const VRegister& vn, int index) {
2481     VIXL_ASSERT(allow_macro_instructions_);
2482     SingleEmissionCheckScope guard(this);
2483     dup(vd, vn, index);
2484   }
Dup(const VRegister & vd,const Register & rn)2485   void Dup(const VRegister& vd, const Register& rn) {
2486     VIXL_ASSERT(allow_macro_instructions_);
2487     SingleEmissionCheckScope guard(this);
2488     dup(vd, rn);
2489   }
Ext(const VRegister & vd,const VRegister & vn,const VRegister & vm,int index)2490   void Ext(const VRegister& vd,
2491            const VRegister& vn,
2492            const VRegister& vm,
2493            int index) {
2494     VIXL_ASSERT(allow_macro_instructions_);
2495     SingleEmissionCheckScope guard(this);
2496     ext(vd, vn, vm, index);
2497   }
Ins(const VRegister & vd,int vd_index,const VRegister & vn,int vn_index)2498   void Ins(const VRegister& vd,
2499            int vd_index,
2500            const VRegister& vn,
2501            int vn_index) {
2502     VIXL_ASSERT(allow_macro_instructions_);
2503     SingleEmissionCheckScope guard(this);
2504     ins(vd, vd_index, vn, vn_index);
2505   }
Ins(const VRegister & vd,int vd_index,const Register & rn)2506   void Ins(const VRegister& vd, int vd_index, const Register& rn) {
2507     VIXL_ASSERT(allow_macro_instructions_);
2508     SingleEmissionCheckScope guard(this);
2509     ins(vd, vd_index, rn);
2510   }
Ld1(const VRegister & vt,const MemOperand & src)2511   void Ld1(const VRegister& vt, const MemOperand& src) {
2512     VIXL_ASSERT(allow_macro_instructions_);
2513     SingleEmissionCheckScope guard(this);
2514     ld1(vt, src);
2515   }
Ld1(const VRegister & vt,const VRegister & vt2,const MemOperand & src)2516   void Ld1(const VRegister& vt, const VRegister& vt2, const MemOperand& src) {
2517     VIXL_ASSERT(allow_macro_instructions_);
2518     SingleEmissionCheckScope guard(this);
2519     ld1(vt, vt2, src);
2520   }
Ld1(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const MemOperand & src)2521   void Ld1(const VRegister& vt,
2522            const VRegister& vt2,
2523            const VRegister& vt3,
2524            const MemOperand& src) {
2525     VIXL_ASSERT(allow_macro_instructions_);
2526     SingleEmissionCheckScope guard(this);
2527     ld1(vt, vt2, vt3, src);
2528   }
Ld1(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,const MemOperand & src)2529   void Ld1(const VRegister& vt,
2530            const VRegister& vt2,
2531            const VRegister& vt3,
2532            const VRegister& vt4,
2533            const MemOperand& src) {
2534     VIXL_ASSERT(allow_macro_instructions_);
2535     SingleEmissionCheckScope guard(this);
2536     ld1(vt, vt2, vt3, vt4, src);
2537   }
Ld1(const VRegister & vt,int lane,const MemOperand & src)2538   void Ld1(const VRegister& vt, int lane, const MemOperand& src) {
2539     VIXL_ASSERT(allow_macro_instructions_);
2540     SingleEmissionCheckScope guard(this);
2541     ld1(vt, lane, src);
2542   }
Ld1r(const VRegister & vt,const MemOperand & src)2543   void Ld1r(const VRegister& vt, const MemOperand& src) {
2544     VIXL_ASSERT(allow_macro_instructions_);
2545     SingleEmissionCheckScope guard(this);
2546     ld1r(vt, src);
2547   }
Ld2(const VRegister & vt,const VRegister & vt2,const MemOperand & src)2548   void Ld2(const VRegister& vt, const VRegister& vt2, const MemOperand& src) {
2549     VIXL_ASSERT(allow_macro_instructions_);
2550     SingleEmissionCheckScope guard(this);
2551     ld2(vt, vt2, src);
2552   }
Ld2(const VRegister & vt,const VRegister & vt2,int lane,const MemOperand & src)2553   void Ld2(const VRegister& vt,
2554            const VRegister& vt2,
2555            int lane,
2556            const MemOperand& src) {
2557     VIXL_ASSERT(allow_macro_instructions_);
2558     SingleEmissionCheckScope guard(this);
2559     ld2(vt, vt2, lane, src);
2560   }
Ld2r(const VRegister & vt,const VRegister & vt2,const MemOperand & src)2561   void Ld2r(const VRegister& vt, const VRegister& vt2, const MemOperand& src) {
2562     VIXL_ASSERT(allow_macro_instructions_);
2563     SingleEmissionCheckScope guard(this);
2564     ld2r(vt, vt2, src);
2565   }
Ld3(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const MemOperand & src)2566   void Ld3(const VRegister& vt,
2567            const VRegister& vt2,
2568            const VRegister& vt3,
2569            const MemOperand& src) {
2570     VIXL_ASSERT(allow_macro_instructions_);
2571     SingleEmissionCheckScope guard(this);
2572     ld3(vt, vt2, vt3, src);
2573   }
Ld3(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,int lane,const MemOperand & src)2574   void Ld3(const VRegister& vt,
2575            const VRegister& vt2,
2576            const VRegister& vt3,
2577            int lane,
2578            const MemOperand& src) {
2579     VIXL_ASSERT(allow_macro_instructions_);
2580     SingleEmissionCheckScope guard(this);
2581     ld3(vt, vt2, vt3, lane, src);
2582   }
Ld3r(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const MemOperand & src)2583   void Ld3r(const VRegister& vt,
2584             const VRegister& vt2,
2585             const VRegister& vt3,
2586             const MemOperand& src) {
2587     VIXL_ASSERT(allow_macro_instructions_);
2588     SingleEmissionCheckScope guard(this);
2589     ld3r(vt, vt2, vt3, src);
2590   }
Ld4(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,const MemOperand & src)2591   void Ld4(const VRegister& vt,
2592            const VRegister& vt2,
2593            const VRegister& vt3,
2594            const VRegister& vt4,
2595            const MemOperand& src) {
2596     VIXL_ASSERT(allow_macro_instructions_);
2597     SingleEmissionCheckScope guard(this);
2598     ld4(vt, vt2, vt3, vt4, src);
2599   }
Ld4(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,int lane,const MemOperand & src)2600   void Ld4(const VRegister& vt,
2601            const VRegister& vt2,
2602            const VRegister& vt3,
2603            const VRegister& vt4,
2604            int lane,
2605            const MemOperand& src) {
2606     VIXL_ASSERT(allow_macro_instructions_);
2607     SingleEmissionCheckScope guard(this);
2608     ld4(vt, vt2, vt3, vt4, lane, src);
2609   }
Ld4r(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,const MemOperand & src)2610   void Ld4r(const VRegister& vt,
2611             const VRegister& vt2,
2612             const VRegister& vt3,
2613             const VRegister& vt4,
2614             const MemOperand& src) {
2615     VIXL_ASSERT(allow_macro_instructions_);
2616     SingleEmissionCheckScope guard(this);
2617     ld4r(vt, vt2, vt3, vt4, src);
2618   }
Mov(const VRegister & vd,int vd_index,const VRegister & vn,int vn_index)2619   void Mov(const VRegister& vd,
2620            int vd_index,
2621            const VRegister& vn,
2622            int vn_index) {
2623     VIXL_ASSERT(allow_macro_instructions_);
2624     SingleEmissionCheckScope guard(this);
2625     mov(vd, vd_index, vn, vn_index);
2626   }
Mov(const VRegister & vd,const VRegister & vn,int index)2627   void Mov(const VRegister& vd, const VRegister& vn, int index) {
2628     VIXL_ASSERT(allow_macro_instructions_);
2629     SingleEmissionCheckScope guard(this);
2630     mov(vd, vn, index);
2631   }
Mov(const VRegister & vd,int vd_index,const Register & rn)2632   void Mov(const VRegister& vd, int vd_index, const Register& rn) {
2633     VIXL_ASSERT(allow_macro_instructions_);
2634     SingleEmissionCheckScope guard(this);
2635     mov(vd, vd_index, rn);
2636   }
Mov(const Register & rd,const VRegister & vn,int vn_index)2637   void Mov(const Register& rd, const VRegister& vn, int vn_index) {
2638     VIXL_ASSERT(allow_macro_instructions_);
2639     SingleEmissionCheckScope guard(this);
2640     mov(rd, vn, vn_index);
2641   }
2642   void Movi(const VRegister& vd,
2643             uint64_t imm,
2644             Shift shift = LSL,
2645             int shift_amount = 0);
2646   void Movi(const VRegister& vd, uint64_t hi, uint64_t lo);
2647   void Mvni(const VRegister& vd,
2648             const int imm8,
2649             Shift shift = LSL,
2650             const int shift_amount = 0) {
2651     VIXL_ASSERT(allow_macro_instructions_);
2652     SingleEmissionCheckScope guard(this);
2653     mvni(vd, imm8, shift, shift_amount);
2654   }
2655   void Orr(const VRegister& vd, const int imm8, const int left_shift = 0) {
2656     VIXL_ASSERT(allow_macro_instructions_);
2657     SingleEmissionCheckScope guard(this);
2658     orr(vd, imm8, left_shift);
2659   }
2660   void Scvtf(const VRegister& vd, const VRegister& vn, int fbits = 0) {
2661     VIXL_ASSERT(allow_macro_instructions_);
2662     SingleEmissionCheckScope guard(this);
2663     scvtf(vd, vn, fbits);
2664   }
2665   void Ucvtf(const VRegister& vd, const VRegister& vn, int fbits = 0) {
2666     VIXL_ASSERT(allow_macro_instructions_);
2667     SingleEmissionCheckScope guard(this);
2668     ucvtf(vd, vn, fbits);
2669   }
2670   void Fcvtzs(const VRegister& vd, const VRegister& vn, int fbits = 0) {
2671     VIXL_ASSERT(allow_macro_instructions_);
2672     SingleEmissionCheckScope guard(this);
2673     fcvtzs(vd, vn, fbits);
2674   }
2675   void Fcvtzu(const VRegister& vd, const VRegister& vn, int fbits = 0) {
2676     VIXL_ASSERT(allow_macro_instructions_);
2677     SingleEmissionCheckScope guard(this);
2678     fcvtzu(vd, vn, fbits);
2679   }
St1(const VRegister & vt,const MemOperand & dst)2680   void St1(const VRegister& vt, const MemOperand& dst) {
2681     VIXL_ASSERT(allow_macro_instructions_);
2682     SingleEmissionCheckScope guard(this);
2683     st1(vt, dst);
2684   }
St1(const VRegister & vt,const VRegister & vt2,const MemOperand & dst)2685   void St1(const VRegister& vt, const VRegister& vt2, const MemOperand& dst) {
2686     VIXL_ASSERT(allow_macro_instructions_);
2687     SingleEmissionCheckScope guard(this);
2688     st1(vt, vt2, dst);
2689   }
St1(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const MemOperand & dst)2690   void St1(const VRegister& vt,
2691            const VRegister& vt2,
2692            const VRegister& vt3,
2693            const MemOperand& dst) {
2694     VIXL_ASSERT(allow_macro_instructions_);
2695     SingleEmissionCheckScope guard(this);
2696     st1(vt, vt2, vt3, dst);
2697   }
St1(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,const MemOperand & dst)2698   void St1(const VRegister& vt,
2699            const VRegister& vt2,
2700            const VRegister& vt3,
2701            const VRegister& vt4,
2702            const MemOperand& dst) {
2703     VIXL_ASSERT(allow_macro_instructions_);
2704     SingleEmissionCheckScope guard(this);
2705     st1(vt, vt2, vt3, vt4, dst);
2706   }
St1(const VRegister & vt,int lane,const MemOperand & dst)2707   void St1(const VRegister& vt, int lane, const MemOperand& dst) {
2708     VIXL_ASSERT(allow_macro_instructions_);
2709     SingleEmissionCheckScope guard(this);
2710     st1(vt, lane, dst);
2711   }
St2(const VRegister & vt,const VRegister & vt2,const MemOperand & dst)2712   void St2(const VRegister& vt, const VRegister& vt2, const MemOperand& dst) {
2713     VIXL_ASSERT(allow_macro_instructions_);
2714     SingleEmissionCheckScope guard(this);
2715     st2(vt, vt2, dst);
2716   }
St3(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const MemOperand & dst)2717   void St3(const VRegister& vt,
2718            const VRegister& vt2,
2719            const VRegister& vt3,
2720            const MemOperand& dst) {
2721     VIXL_ASSERT(allow_macro_instructions_);
2722     SingleEmissionCheckScope guard(this);
2723     st3(vt, vt2, vt3, dst);
2724   }
St4(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,const MemOperand & dst)2725   void St4(const VRegister& vt,
2726            const VRegister& vt2,
2727            const VRegister& vt3,
2728            const VRegister& vt4,
2729            const MemOperand& dst) {
2730     VIXL_ASSERT(allow_macro_instructions_);
2731     SingleEmissionCheckScope guard(this);
2732     st4(vt, vt2, vt3, vt4, dst);
2733   }
St2(const VRegister & vt,const VRegister & vt2,int lane,const MemOperand & dst)2734   void St2(const VRegister& vt,
2735            const VRegister& vt2,
2736            int lane,
2737            const MemOperand& dst) {
2738     VIXL_ASSERT(allow_macro_instructions_);
2739     SingleEmissionCheckScope guard(this);
2740     st2(vt, vt2, lane, dst);
2741   }
St3(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,int lane,const MemOperand & dst)2742   void St3(const VRegister& vt,
2743            const VRegister& vt2,
2744            const VRegister& vt3,
2745            int lane,
2746            const MemOperand& dst) {
2747     VIXL_ASSERT(allow_macro_instructions_);
2748     SingleEmissionCheckScope guard(this);
2749     st3(vt, vt2, vt3, lane, dst);
2750   }
St4(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,int lane,const MemOperand & dst)2751   void St4(const VRegister& vt,
2752            const VRegister& vt2,
2753            const VRegister& vt3,
2754            const VRegister& vt4,
2755            int lane,
2756            const MemOperand& dst) {
2757     VIXL_ASSERT(allow_macro_instructions_);
2758     SingleEmissionCheckScope guard(this);
2759     st4(vt, vt2, vt3, vt4, lane, dst);
2760   }
Smov(const Register & rd,const VRegister & vn,int vn_index)2761   void Smov(const Register& rd, const VRegister& vn, int vn_index) {
2762     VIXL_ASSERT(allow_macro_instructions_);
2763     SingleEmissionCheckScope guard(this);
2764     smov(rd, vn, vn_index);
2765   }
Umov(const Register & rd,const VRegister & vn,int vn_index)2766   void Umov(const Register& rd, const VRegister& vn, int vn_index) {
2767     VIXL_ASSERT(allow_macro_instructions_);
2768     SingleEmissionCheckScope guard(this);
2769     umov(rd, vn, vn_index);
2770   }
Crc32b(const Register & rd,const Register & rn,const Register & rm)2771   void Crc32b(const Register& rd, const Register& rn, const Register& rm) {
2772     VIXL_ASSERT(allow_macro_instructions_);
2773     SingleEmissionCheckScope guard(this);
2774     crc32b(rd, rn, rm);
2775   }
Crc32h(const Register & rd,const Register & rn,const Register & rm)2776   void Crc32h(const Register& rd, const Register& rn, const Register& rm) {
2777     VIXL_ASSERT(allow_macro_instructions_);
2778     SingleEmissionCheckScope guard(this);
2779     crc32h(rd, rn, rm);
2780   }
Crc32w(const Register & rd,const Register & rn,const Register & rm)2781   void Crc32w(const Register& rd, const Register& rn, const Register& rm) {
2782     VIXL_ASSERT(allow_macro_instructions_);
2783     SingleEmissionCheckScope guard(this);
2784     crc32w(rd, rn, rm);
2785   }
Crc32x(const Register & rd,const Register & rn,const Register & rm)2786   void Crc32x(const Register& rd, const Register& rn, const Register& rm) {
2787     VIXL_ASSERT(allow_macro_instructions_);
2788     SingleEmissionCheckScope guard(this);
2789     crc32x(rd, rn, rm);
2790   }
Crc32cb(const Register & rd,const Register & rn,const Register & rm)2791   void Crc32cb(const Register& rd, const Register& rn, const Register& rm) {
2792     VIXL_ASSERT(allow_macro_instructions_);
2793     SingleEmissionCheckScope guard(this);
2794     crc32cb(rd, rn, rm);
2795   }
Crc32ch(const Register & rd,const Register & rn,const Register & rm)2796   void Crc32ch(const Register& rd, const Register& rn, const Register& rm) {
2797     VIXL_ASSERT(allow_macro_instructions_);
2798     SingleEmissionCheckScope guard(this);
2799     crc32ch(rd, rn, rm);
2800   }
Crc32cw(const Register & rd,const Register & rn,const Register & rm)2801   void Crc32cw(const Register& rd, const Register& rn, const Register& rm) {
2802     VIXL_ASSERT(allow_macro_instructions_);
2803     SingleEmissionCheckScope guard(this);
2804     crc32cw(rd, rn, rm);
2805   }
Crc32cx(const Register & rd,const Register & rn,const Register & rm)2806   void Crc32cx(const Register& rd, const Register& rn, const Register& rm) {
2807     VIXL_ASSERT(allow_macro_instructions_);
2808     SingleEmissionCheckScope guard(this);
2809     crc32cx(rd, rn, rm);
2810   }
2811 
2812   template <typename T>
CreateLiteralDestroyedWithPool(T value)2813   Literal<T>* CreateLiteralDestroyedWithPool(T value) {
2814     return new Literal<T>(value,
2815                           &literal_pool_,
2816                           RawLiteral::kDeletedOnPoolDestruction);
2817   }
2818 
2819   template <typename T>
CreateLiteralDestroyedWithPool(T high64,T low64)2820   Literal<T>* CreateLiteralDestroyedWithPool(T high64, T low64) {
2821     return new Literal<T>(high64,
2822                           low64,
2823                           &literal_pool_,
2824                           RawLiteral::kDeletedOnPoolDestruction);
2825   }
2826 
2827   // Push the system stack pointer (sp) down to allow the same to be done to
2828   // the current stack pointer (according to StackPointer()). This must be
2829   // called _before_ accessing the memory.
2830   //
2831   // This is necessary when pushing or otherwise adding things to the stack, to
2832   // satisfy the AAPCS64 constraint that the memory below the system stack
2833   // pointer is not accessed.
2834   //
2835   // This method asserts that StackPointer() is not sp, since the call does
2836   // not make sense in that context.
2837   //
2838   // TODO: This method can only accept values of 'space' that can be encoded in
2839   // one instruction. Refer to the implementation for details.
2840   void BumpSystemStackPointer(const Operand& space);
2841 
AllowMacroInstructions()2842   virtual bool AllowMacroInstructions() const VIXL_OVERRIDE {
2843     return allow_macro_instructions_;
2844   }
2845 
ArePoolsBlocked()2846   virtual bool ArePoolsBlocked() const VIXL_OVERRIDE {
2847     return IsLiteralPoolBlocked() && IsVeneerPoolBlocked();
2848   }
2849 
SetGenerateSimulatorCode(bool value)2850   void SetGenerateSimulatorCode(bool value) {
2851     generate_simulator_code_ = value;
2852   }
2853 
GenerateSimulatorCode()2854   bool GenerateSimulatorCode() const { return generate_simulator_code_; }
2855 
GetLiteralPoolSize()2856   size_t GetLiteralPoolSize() const { return literal_pool_.GetSize(); }
2857   VIXL_DEPRECATED("GetLiteralPoolSize", size_t LiteralPoolSize() const) {
2858     return GetLiteralPoolSize();
2859   }
2860 
GetLiteralPoolMaxSize()2861   size_t GetLiteralPoolMaxSize() const { return literal_pool_.GetMaxSize(); }
2862   VIXL_DEPRECATED("GetLiteralPoolMaxSize", size_t LiteralPoolMaxSize() const) {
2863     return GetLiteralPoolMaxSize();
2864   }
2865 
GetVeneerPoolMaxSize()2866   size_t GetVeneerPoolMaxSize() const { return veneer_pool_.GetMaxSize(); }
2867   VIXL_DEPRECATED("GetVeneerPoolMaxSize", size_t VeneerPoolMaxSize() const) {
2868     return GetVeneerPoolMaxSize();
2869   }
2870 
2871   // The number of unresolved branches that may require a veneer.
GetNumberOfPotentialVeneers()2872   int GetNumberOfPotentialVeneers() const {
2873     return veneer_pool_.GetNumberOfPotentialVeneers();
2874   }
2875   VIXL_DEPRECATED("GetNumberOfPotentialVeneers",
NumberOfPotentialVeneers()2876                   int NumberOfPotentialVeneers() const) {
2877     return GetNumberOfPotentialVeneers();
2878   }
2879 
GetNextCheckPoint()2880   ptrdiff_t GetNextCheckPoint() const {
2881     ptrdiff_t next_checkpoint_for_pools =
2882         std::min(literal_pool_.GetCheckpoint(), veneer_pool_.GetCheckpoint());
2883     return std::min(next_checkpoint_for_pools,
2884                     static_cast<ptrdiff_t>(GetBuffer().GetCapacity()));
2885   }
2886   VIXL_DEPRECATED("GetNextCheckPoint", ptrdiff_t NextCheckPoint()) {
2887     return GetNextCheckPoint();
2888   }
2889 
EmitLiteralPool(LiteralPool::EmitOption option)2890   void EmitLiteralPool(LiteralPool::EmitOption option) {
2891     if (!literal_pool_.IsEmpty()) literal_pool_.Emit(option);
2892 
2893     checkpoint_ = GetNextCheckPoint();
2894     recommended_checkpoint_ = literal_pool_.GetNextRecommendedCheckpoint();
2895   }
2896 
2897   void CheckEmitFor(size_t amount);
EnsureEmitFor(size_t amount)2898   void EnsureEmitFor(size_t amount) {
2899     ptrdiff_t offset = amount;
2900     ptrdiff_t max_pools_size =
2901         literal_pool_.GetMaxSize() + veneer_pool_.GetMaxSize();
2902     ptrdiff_t cursor = GetCursorOffset();
2903     if ((cursor >= recommended_checkpoint_) ||
2904         ((cursor + offset + max_pools_size) >= checkpoint_)) {
2905       CheckEmitFor(amount);
2906     }
2907   }
2908 
2909   void CheckEmitPoolsFor(size_t amount);
EnsureEmitPoolsFor(size_t amount)2910   virtual void EnsureEmitPoolsFor(size_t amount) VIXL_OVERRIDE {
2911     ptrdiff_t offset = amount;
2912     ptrdiff_t max_pools_size =
2913         literal_pool_.GetMaxSize() + veneer_pool_.GetMaxSize();
2914     ptrdiff_t cursor = GetCursorOffset();
2915     if ((cursor >= recommended_checkpoint_) ||
2916         ((cursor + offset + max_pools_size) >= checkpoint_)) {
2917       CheckEmitPoolsFor(amount);
2918     }
2919   }
2920 
2921   // Set the current stack pointer, but don't generate any code.
SetStackPointer(const Register & stack_pointer)2922   void SetStackPointer(const Register& stack_pointer) {
2923     VIXL_ASSERT(!GetScratchRegisterList()->IncludesAliasOf(stack_pointer));
2924     sp_ = stack_pointer;
2925   }
2926 
2927   // Return the current stack pointer, as set by SetStackPointer.
StackPointer()2928   const Register& StackPointer() const { return sp_; }
2929 
GetScratchRegisterList()2930   CPURegList* GetScratchRegisterList() { return &tmp_list_; }
2931   VIXL_DEPRECATED("GetScratchRegisterList", CPURegList* TmpList()) {
2932     return GetScratchRegisterList();
2933   }
2934 
GetScratchFPRegisterList()2935   CPURegList* GetScratchFPRegisterList() { return &fptmp_list_; }
2936   VIXL_DEPRECATED("GetScratchFPRegisterList", CPURegList* FPTmpList()) {
2937     return GetScratchFPRegisterList();
2938   }
2939 
2940   // Get or set the current (most-deeply-nested) UseScratchRegisterScope.
SetCurrentScratchRegisterScope(UseScratchRegisterScope * scope)2941   void SetCurrentScratchRegisterScope(UseScratchRegisterScope* scope) {
2942     current_scratch_scope_ = scope;
2943   }
GetCurrentScratchRegisterScope()2944   UseScratchRegisterScope* GetCurrentScratchRegisterScope() {
2945     return current_scratch_scope_;
2946   }
2947 
2948   // Like printf, but print at run-time from generated code.
2949   //
2950   // The caller must ensure that arguments for floating-point placeholders
2951   // (such as %e, %f or %g) are VRegisters in format 1S or 1D, and that
2952   // arguments for integer placeholders are Registers.
2953   //
2954   // At the moment it is only possible to print the value of sp if it is the
2955   // current stack pointer. Otherwise, the MacroAssembler will automatically
2956   // update sp on every push (using BumpSystemStackPointer), so determining its
2957   // value is difficult.
2958   //
2959   // Format placeholders that refer to more than one argument, or to a specific
2960   // argument, are not supported. This includes formats like "%1$d" or "%.*d".
2961   //
2962   // This function automatically preserves caller-saved registers so that
2963   // calling code can use Printf at any point without having to worry about
2964   // corruption. The preservation mechanism generates a lot of code. If this is
2965   // a problem, preserve the important registers manually and then call
2966   // PrintfNoPreserve. Callee-saved registers are not used by Printf, and are
2967   // implicitly preserved.
2968   void Printf(const char* format,
2969               CPURegister arg0 = NoCPUReg,
2970               CPURegister arg1 = NoCPUReg,
2971               CPURegister arg2 = NoCPUReg,
2972               CPURegister arg3 = NoCPUReg);
2973 
2974   // Like Printf, but don't preserve any caller-saved registers, not even 'lr'.
2975   //
2976   // The return code from the system printf call will be returned in x0.
2977   void PrintfNoPreserve(const char* format,
2978                         const CPURegister& arg0 = NoCPUReg,
2979                         const CPURegister& arg1 = NoCPUReg,
2980                         const CPURegister& arg2 = NoCPUReg,
2981                         const CPURegister& arg3 = NoCPUReg);
2982 
2983   // Trace control when running the debug simulator.
2984   //
2985   // For example:
2986   //
2987   // __ Trace(LOG_REGS, TRACE_ENABLE);
2988   // Will add registers to the trace if it wasn't already the case.
2989   //
2990   // __ Trace(LOG_DISASM, TRACE_DISABLE);
2991   // Will stop logging disassembly. It has no effect if the disassembly wasn't
2992   // already being logged.
2993   void Trace(TraceParameters parameters, TraceCommand command);
2994 
2995   // Log the requested data independently of what is being traced.
2996   //
2997   // For example:
2998   //
2999   // __ Log(LOG_FLAGS)
3000   // Will output the flags.
3001   void Log(TraceParameters parameters);
3002 
3003   // Enable or disable instrumentation when an Instrument visitor is attached to
3004   // the simulator.
3005   void EnableInstrumentation();
3006   void DisableInstrumentation();
3007 
3008   // Add a marker to the instrumentation data produced by an Instrument visitor.
3009   // The name is a two character string that will be attached to the marker in
3010   // the output data.
3011   void AnnotateInstrumentation(const char* marker_name);
3012 
GetLiteralPool()3013   LiteralPool* GetLiteralPool() { return &literal_pool_; }
3014 
3015 // Support for simulated runtime calls.
3016 
3017 // `CallRuntime` requires variadic templating, that is only available from
3018 // C++11.
3019 #if __cplusplus >= 201103L
3020 #define VIXL_HAS_MACROASSEMBLER_RUNTIME_CALL_SUPPORT
3021 #endif  // #if __cplusplus >= 201103L
3022 
3023 #ifdef VIXL_HAS_MACROASSEMBLER_RUNTIME_CALL_SUPPORT
3024   template <typename R, typename... P>
3025   void CallRuntime(R (*function)(P...));
3026 #endif  // #ifdef VIXL_HAS_MACROASSEMBLER_RUNTIME_CALL_SUPPORT
3027 
3028  protected:
BlockLiteralPool()3029   void BlockLiteralPool() { literal_pool_.Block(); }
ReleaseLiteralPool()3030   void ReleaseLiteralPool() { literal_pool_.Release(); }
IsLiteralPoolBlocked()3031   bool IsLiteralPoolBlocked() const { return literal_pool_.IsBlocked(); }
BlockVeneerPool()3032   void BlockVeneerPool() { veneer_pool_.Block(); }
ReleaseVeneerPool()3033   void ReleaseVeneerPool() { veneer_pool_.Release(); }
IsVeneerPoolBlocked()3034   bool IsVeneerPoolBlocked() const { return veneer_pool_.IsBlocked(); }
3035 
BlockPools()3036   virtual void BlockPools() VIXL_OVERRIDE {
3037     BlockLiteralPool();
3038     BlockVeneerPool();
3039   }
3040 
ReleasePools()3041   virtual void ReleasePools() VIXL_OVERRIDE {
3042     ReleaseLiteralPool();
3043     ReleaseVeneerPool();
3044   }
3045 
3046   // The scopes below need to able to block and release a particular pool.
3047   // TODO: Consider removing those scopes or move them to
3048   // code-generation-scopes-vixl.h.
3049   friend class BlockPoolsScope;
3050   friend class BlockLiteralPoolScope;
3051   friend class BlockVeneerPoolScope;
3052 
SetAllowMacroInstructions(bool value)3053   virtual void SetAllowMacroInstructions(bool value) VIXL_OVERRIDE {
3054     allow_macro_instructions_ = value;
3055   }
3056 
3057   // Helper used to query information about code generation and to generate
3058   // code for `csel`.
3059   // Here and for the related helpers below:
3060   // - Code is generated when `masm` is not `NULL`.
3061   // - On return and when set, `should_synthesise_left` and
3062   //   `should_synthesise_right` will indicate whether `left` and `right`
3063   //   should be synthesized in a temporary register.
3064   static void CselHelper(MacroAssembler* masm,
3065                          const Register& rd,
3066                          Operand left,
3067                          Operand right,
3068                          Condition cond,
3069                          bool* should_synthesise_left = NULL,
3070                          bool* should_synthesise_right = NULL);
3071 
3072   // The helper returns `true` if it can handle the specified arguments.
3073   // Also see comments for `CselHelper()`.
3074   static bool CselSubHelperTwoImmediates(MacroAssembler* masm,
3075                                          const Register& rd,
3076                                          int64_t left,
3077                                          int64_t right,
3078                                          Condition cond,
3079                                          bool* should_synthesise_left,
3080                                          bool* should_synthesise_right);
3081 
3082   // See comments for `CselHelper()`.
3083   static bool CselSubHelperTwoOrderedImmediates(MacroAssembler* masm,
3084                                                 const Register& rd,
3085                                                 int64_t left,
3086                                                 int64_t right,
3087                                                 Condition cond);
3088 
3089   // See comments for `CselHelper()`.
3090   static void CselSubHelperRightSmallImmediate(MacroAssembler* masm,
3091                                                UseScratchRegisterScope* temps,
3092                                                const Register& rd,
3093                                                const Operand& left,
3094                                                const Operand& right,
3095                                                Condition cond,
3096                                                bool* should_synthesise_left);
3097 
3098  private:
3099   // The actual Push and Pop implementations. These don't generate any code
3100   // other than that required for the push or pop. This allows
3101   // (Push|Pop)CPURegList to bundle together setup code for a large block of
3102   // registers.
3103   //
3104   // Note that size is per register, and is specified in bytes.
3105   void PushHelper(int count,
3106                   int size,
3107                   const CPURegister& src0,
3108                   const CPURegister& src1,
3109                   const CPURegister& src2,
3110                   const CPURegister& src3);
3111   void PopHelper(int count,
3112                  int size,
3113                  const CPURegister& dst0,
3114                  const CPURegister& dst1,
3115                  const CPURegister& dst2,
3116                  const CPURegister& dst3);
3117 
3118   void Movi16bitHelper(const VRegister& vd, uint64_t imm);
3119   void Movi32bitHelper(const VRegister& vd, uint64_t imm);
3120   void Movi64bitHelper(const VRegister& vd, uint64_t imm);
3121 
3122   // Perform necessary maintenance operations before a push or pop.
3123   //
3124   // Note that size is per register, and is specified in bytes.
3125   void PrepareForPush(int count, int size);
3126   void PrepareForPop(int count, int size);
3127 
3128   // The actual implementation of load and store operations for CPURegList.
3129   enum LoadStoreCPURegListAction { kLoad, kStore };
3130   void LoadStoreCPURegListHelper(LoadStoreCPURegListAction operation,
3131                                  CPURegList registers,
3132                                  const MemOperand& mem);
3133   // Returns a MemOperand suitable for loading or storing a CPURegList at `dst`.
3134   // This helper may allocate registers from `scratch_scope` and generate code
3135   // to compute an intermediate address. The resulting MemOperand is only valid
3136   // as long as `scratch_scope` remains valid.
3137   MemOperand BaseMemOperandForLoadStoreCPURegList(
3138       const CPURegList& registers,
3139       const MemOperand& mem,
3140       UseScratchRegisterScope* scratch_scope);
3141 
LabelIsOutOfRange(Label * label,ImmBranchType branch_type)3142   bool LabelIsOutOfRange(Label* label, ImmBranchType branch_type) {
3143     return !Instruction::IsValidImmPCOffset(branch_type,
3144                                             label->GetLocation() -
3145                                                 GetCursorOffset());
3146   }
3147 
3148   // Tell whether any of the macro instruction can be used. When false the
3149   // MacroAssembler will assert if a method which can emit a variable number
3150   // of instructions is called.
3151   bool allow_macro_instructions_;
3152 
3153   // Indicates whether we should generate simulator or native code.
3154   bool generate_simulator_code_;
3155 
3156   // The register to use as a stack pointer for stack operations.
3157   Register sp_;
3158 
3159   // Scratch registers available for use by the MacroAssembler.
3160   CPURegList tmp_list_;
3161   CPURegList fptmp_list_;
3162 
3163   UseScratchRegisterScope* current_scratch_scope_;
3164 
3165   LiteralPool literal_pool_;
3166   VeneerPool veneer_pool_;
3167 
3168   ptrdiff_t checkpoint_;
3169   ptrdiff_t recommended_checkpoint_;
3170 
3171   friend class Pool;
3172   friend class LiteralPool;
3173 };
3174 
3175 
GetOtherPoolsMaxSize()3176 inline size_t VeneerPool::GetOtherPoolsMaxSize() const {
3177   return masm_->GetLiteralPoolMaxSize();
3178 }
3179 
3180 
GetOtherPoolsMaxSize()3181 inline size_t LiteralPool::GetOtherPoolsMaxSize() const {
3182   return masm_->GetVeneerPoolMaxSize();
3183 }
3184 
3185 
SetNextRecommendedCheckpoint(ptrdiff_t offset)3186 inline void LiteralPool::SetNextRecommendedCheckpoint(ptrdiff_t offset) {
3187   masm_->recommended_checkpoint_ =
3188       std::min(masm_->recommended_checkpoint_, offset);
3189   recommended_checkpoint_ = offset;
3190 }
3191 
3192 class InstructionAccurateScope : public ExactAssemblyScope {
3193  public:
3194   VIXL_DEPRECATED("ExactAssemblyScope",
3195                   InstructionAccurateScope(MacroAssembler* masm,
3196                                            int64_t count,
3197                                            SizePolicy size_policy = kExactSize))
ExactAssemblyScope(masm,count * kInstructionSize,size_policy)3198       : ExactAssemblyScope(masm, count * kInstructionSize, size_policy) {}
3199 };
3200 
3201 class BlockLiteralPoolScope {
3202  public:
BlockLiteralPoolScope(MacroAssembler * masm)3203   explicit BlockLiteralPoolScope(MacroAssembler* masm) : masm_(masm) {
3204     masm_->BlockLiteralPool();
3205   }
3206 
~BlockLiteralPoolScope()3207   ~BlockLiteralPoolScope() { masm_->ReleaseLiteralPool(); }
3208 
3209  private:
3210   MacroAssembler* masm_;
3211 };
3212 
3213 
3214 class BlockVeneerPoolScope {
3215  public:
BlockVeneerPoolScope(MacroAssembler * masm)3216   explicit BlockVeneerPoolScope(MacroAssembler* masm) : masm_(masm) {
3217     masm_->BlockVeneerPool();
3218   }
3219 
~BlockVeneerPoolScope()3220   ~BlockVeneerPoolScope() { masm_->ReleaseVeneerPool(); }
3221 
3222  private:
3223   MacroAssembler* masm_;
3224 };
3225 
3226 
3227 class BlockPoolsScope {
3228  public:
BlockPoolsScope(MacroAssembler * masm)3229   explicit BlockPoolsScope(MacroAssembler* masm) : masm_(masm) {
3230     masm_->BlockPools();
3231   }
3232 
~BlockPoolsScope()3233   ~BlockPoolsScope() { masm_->ReleasePools(); }
3234 
3235  private:
3236   MacroAssembler* masm_;
3237 };
3238 
3239 
3240 // This scope utility allows scratch registers to be managed safely. The
3241 // MacroAssembler's GetScratchRegisterList() (and GetScratchFPRegisterList()) is
3242 // used as a pool of scratch registers. These registers can be allocated on
3243 // demand, and will be returned at the end of the scope.
3244 //
3245 // When the scope ends, the MacroAssembler's lists will be restored to their
3246 // original state, even if the lists were modified by some other means.
3247 class UseScratchRegisterScope {
3248  public:
3249   // This constructor implicitly calls `Open` to initialise the scope (`masm`
3250   // must not be `NULL`), so it is ready to use immediately after it has been
3251   // constructed.
UseScratchRegisterScope(MacroAssembler * masm)3252   explicit UseScratchRegisterScope(MacroAssembler* masm)
3253       : masm_(NULL), parent_(NULL), old_available_(0), old_availablefp_(0) {
3254     Open(masm);
3255   }
3256   // This constructor does not implicitly initialise the scope. Instead, the
3257   // user is required to explicitly call the `Open` function before using the
3258   // scope.
UseScratchRegisterScope()3259   UseScratchRegisterScope()
3260       : masm_(NULL), parent_(NULL), old_available_(0), old_availablefp_(0) {}
3261 
3262   // This function performs the actual initialisation work.
3263   void Open(MacroAssembler* masm);
3264 
3265   // The destructor always implicitly calls the `Close` function.
~UseScratchRegisterScope()3266   ~UseScratchRegisterScope() { Close(); }
3267 
3268   // This function performs the cleaning-up work. It must succeed even if the
3269   // scope has not been opened. It is safe to call multiple times.
3270   void Close();
3271 
3272 
3273   bool IsAvailable(const CPURegister& reg) const;
3274 
3275 
3276   // Take a register from the appropriate temps list. It will be returned
3277   // automatically when the scope ends.
AcquireW()3278   Register AcquireW() {
3279     return AcquireNextAvailable(masm_->GetScratchRegisterList()).W();
3280   }
AcquireX()3281   Register AcquireX() {
3282     return AcquireNextAvailable(masm_->GetScratchRegisterList()).X();
3283   }
AcquireS()3284   VRegister AcquireS() {
3285     return AcquireNextAvailable(masm_->GetScratchFPRegisterList()).S();
3286   }
AcquireD()3287   VRegister AcquireD() {
3288     return AcquireNextAvailable(masm_->GetScratchFPRegisterList()).D();
3289   }
3290 
3291 
3292   Register AcquireRegisterOfSize(int size_in_bits);
AcquireSameSizeAs(const Register & reg)3293   Register AcquireSameSizeAs(const Register& reg) {
3294     return AcquireRegisterOfSize(reg.GetSizeInBits());
3295   }
3296   VRegister AcquireVRegisterOfSize(int size_in_bits);
AcquireSameSizeAs(const VRegister & reg)3297   VRegister AcquireSameSizeAs(const VRegister& reg) {
3298     return AcquireVRegisterOfSize(reg.GetSizeInBits());
3299   }
AcquireCPURegisterOfSize(int size_in_bits)3300   CPURegister AcquireCPURegisterOfSize(int size_in_bits) {
3301     return masm_->GetScratchRegisterList()->IsEmpty()
3302                ? CPURegister(AcquireVRegisterOfSize(size_in_bits))
3303                : CPURegister(AcquireRegisterOfSize(size_in_bits));
3304   }
3305 
3306 
3307   // Explicitly release an acquired (or excluded) register, putting it back in
3308   // the appropriate temps list.
3309   void Release(const CPURegister& reg);
3310 
3311 
3312   // Make the specified registers available as scratch registers for the
3313   // duration of this scope.
3314   void Include(const CPURegList& list);
3315   void Include(const Register& reg1,
3316                const Register& reg2 = NoReg,
3317                const Register& reg3 = NoReg,
3318                const Register& reg4 = NoReg);
3319   void Include(const VRegister& reg1,
3320                const VRegister& reg2 = NoVReg,
3321                const VRegister& reg3 = NoVReg,
3322                const VRegister& reg4 = NoVReg);
3323 
3324 
3325   // Make sure that the specified registers are not available in this scope.
3326   // This can be used to prevent helper functions from using sensitive
3327   // registers, for example.
3328   void Exclude(const CPURegList& list);
3329   void Exclude(const Register& reg1,
3330                const Register& reg2 = NoReg,
3331                const Register& reg3 = NoReg,
3332                const Register& reg4 = NoReg);
3333   void Exclude(const VRegister& reg1,
3334                const VRegister& reg2 = NoVReg,
3335                const VRegister& reg3 = NoVReg,
3336                const VRegister& reg4 = NoVReg);
3337   void Exclude(const CPURegister& reg1,
3338                const CPURegister& reg2 = NoCPUReg,
3339                const CPURegister& reg3 = NoCPUReg,
3340                const CPURegister& reg4 = NoCPUReg);
3341 
3342 
3343   // Prevent any scratch registers from being used in this scope.
3344   void ExcludeAll();
3345 
3346  private:
3347   static CPURegister AcquireNextAvailable(CPURegList* available);
3348 
3349   static void ReleaseByCode(CPURegList* available, int code);
3350 
3351   static void ReleaseByRegList(CPURegList* available, RegList regs);
3352 
3353   static void IncludeByRegList(CPURegList* available, RegList exclude);
3354 
3355   static void ExcludeByRegList(CPURegList* available, RegList exclude);
3356 
3357   // The MacroAssembler maintains a list of available scratch registers, and
3358   // also keeps track of the most recently-opened scope so that on destruction
3359   // we can check that scopes do not outlive their parents.
3360   MacroAssembler* masm_;
3361   UseScratchRegisterScope* parent_;
3362 
3363   // The state of the available lists at the start of this scope.
3364   RegList old_available_;    // kRegister
3365   RegList old_availablefp_;  // kVRegister
3366 
3367   // Disallow copy constructor and operator=.
UseScratchRegisterScope(const UseScratchRegisterScope &)3368   VIXL_DEBUG_NO_RETURN UseScratchRegisterScope(const UseScratchRegisterScope&) {
3369     VIXL_UNREACHABLE();
3370   }
3371   VIXL_DEBUG_NO_RETURN void operator=(const UseScratchRegisterScope&) {
3372     VIXL_UNREACHABLE();
3373   }
3374 };
3375 
3376 // Variadic templating is only available from C++11.
3377 #ifdef VIXL_HAS_MACROASSEMBLER_RUNTIME_CALL_SUPPORT
3378 
3379 // `R` stands for 'return type', and `P` for 'parameter types'.
3380 template <typename R, typename... P>
CallRuntime(R (* function)(P...))3381 void MacroAssembler::CallRuntime(R (*function)(P...)) {
3382   if (generate_simulator_code_) {
3383 #ifdef VIXL_HAS_SIMULATED_RUNTIME_CALL_SUPPORT
3384     uintptr_t runtime_call_wrapper_address = reinterpret_cast<uintptr_t>(
3385         &(Simulator::RuntimeCallStructHelper<R, P...>::Wrapper));
3386     uintptr_t function_address = reinterpret_cast<uintptr_t>(function);
3387 
3388     EmissionCheckScope guard(this,
3389                              kInstructionSize + 2 * kRuntimeCallAddressSize,
3390                              CodeBufferCheckScope::kExactSize);
3391     Label start;
3392     bind(&start);
3393     {
3394       ExactAssemblyScope scope(this, kInstructionSize);
3395       hlt(kRuntimeCallOpcode);
3396     }
3397     VIXL_ASSERT(GetSizeOfCodeGeneratedSince(&start) ==
3398                 kRuntimeCallWrapperOffset);
3399     dc(runtime_call_wrapper_address);
3400     VIXL_ASSERT(GetSizeOfCodeGeneratedSince(&start) ==
3401                 kRuntimeCallFunctionOffset);
3402     dc(function_address);
3403     VIXL_ASSERT(GetSizeOfCodeGeneratedSince(&start) ==
3404                 kRuntimeCallFunctionOffset + kRuntimeCallAddressSize);
3405 #else
3406     VIXL_UNREACHABLE();
3407 #endif  // #ifdef VIXL_HAS_SIMULATED_RUNTIME_CALL_SUPPORT
3408   } else {
3409     UseScratchRegisterScope temps(this);
3410     Register temp = temps.AcquireX();
3411     Mov(temp, reinterpret_cast<uint64_t>(function));
3412     Blr(temp);
3413   }
3414 }
3415 
3416 #endif  // #ifdef VIXL_HAS_MACROASSEMBLER_RUNTIME_CALL_SUPPORT
3417 
3418 }  // namespace aarch64
3419 
3420 // Required InvalSet template specialisations.
3421 // TODO: These template specialisations should not live in this file.  Move
3422 // VeneerPool out of the aarch64 namespace in order to share its implementation
3423 // later.
3424 template <>
3425 inline ptrdiff_t InvalSet<aarch64::VeneerPool::BranchInfo,
3426                           aarch64::VeneerPool::kNPreallocatedInfos,
3427                           ptrdiff_t,
3428                           aarch64::VeneerPool::kInvalidOffset,
3429                           aarch64::VeneerPool::kReclaimFrom,
3430                           aarch64::VeneerPool::kReclaimFactor>::
GetKey(const aarch64::VeneerPool::BranchInfo & branch_info)3431     GetKey(const aarch64::VeneerPool::BranchInfo& branch_info) {
3432   return branch_info.max_reachable_pc_;
3433 }
3434 template <>
3435 inline void InvalSet<aarch64::VeneerPool::BranchInfo,
3436                      aarch64::VeneerPool::kNPreallocatedInfos,
3437                      ptrdiff_t,
3438                      aarch64::VeneerPool::kInvalidOffset,
3439                      aarch64::VeneerPool::kReclaimFrom,
3440                      aarch64::VeneerPool::kReclaimFactor>::
SetKey(aarch64::VeneerPool::BranchInfo * branch_info,ptrdiff_t key)3441     SetKey(aarch64::VeneerPool::BranchInfo* branch_info, ptrdiff_t key) {
3442   branch_info->max_reachable_pc_ = key;
3443 }
3444 
3445 }  // namespace vixl
3446 
3447 #endif  // VIXL_AARCH64_MACRO_ASSEMBLER_AARCH64_H_
3448