• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2015, VIXL authors
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are met:
6 //
7 //   * Redistributions of source code must retain the above copyright notice,
8 //     this list of conditions and the following disclaimer.
9 //   * Redistributions in binary form must reproduce the above copyright notice,
10 //     this list of conditions and the following disclaimer in the documentation
11 //     and/or other materials provided with the distribution.
12 //   * Neither the name of ARM Limited nor the names of its contributors may be
13 //     used to endorse or promote products derived from this software without
14 //     specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 
27 #ifndef VIXL_AARCH64_INSTRUCTIONS_AARCH64_H_
28 #define VIXL_AARCH64_INSTRUCTIONS_AARCH64_H_
29 
30 #include "../globals-vixl.h"
31 #include "../utils-vixl.h"
32 
33 #include "constants-aarch64.h"
34 
35 namespace vixl {
36 namespace aarch64 {
37 // ISA constants. --------------------------------------------------------------
38 
39 typedef uint32_t Instr;
40 const unsigned kInstructionSize = 4;
41 const unsigned kInstructionSizeLog2 = 2;
42 const unsigned kLiteralEntrySize = 4;
43 const unsigned kLiteralEntrySizeLog2 = 2;
44 const unsigned kMaxLoadLiteralRange = 1 * MBytes;
45 
46 // This is the nominal page size (as used by the adrp instruction); the actual
47 // size of the memory pages allocated by the kernel is likely to differ.
48 const unsigned kPageSize = 4 * KBytes;
49 const unsigned kPageSizeLog2 = 12;
50 
51 const unsigned kBRegSize = 8;
52 const unsigned kBRegSizeLog2 = 3;
53 const unsigned kBRegSizeInBytes = kBRegSize / 8;
54 const unsigned kBRegSizeInBytesLog2 = kBRegSizeLog2 - 3;
55 const unsigned kHRegSize = 16;
56 const unsigned kHRegSizeLog2 = 4;
57 const unsigned kHRegSizeInBytes = kHRegSize / 8;
58 const unsigned kHRegSizeInBytesLog2 = kHRegSizeLog2 - 3;
59 const unsigned kWRegSize = 32;
60 const unsigned kWRegSizeLog2 = 5;
61 const unsigned kWRegSizeInBytes = kWRegSize / 8;
62 const unsigned kWRegSizeInBytesLog2 = kWRegSizeLog2 - 3;
63 const unsigned kXRegSize = 64;
64 const unsigned kXRegSizeLog2 = 6;
65 const unsigned kXRegSizeInBytes = kXRegSize / 8;
66 const unsigned kXRegSizeInBytesLog2 = kXRegSizeLog2 - 3;
67 const unsigned kSRegSize = 32;
68 const unsigned kSRegSizeLog2 = 5;
69 const unsigned kSRegSizeInBytes = kSRegSize / 8;
70 const unsigned kSRegSizeInBytesLog2 = kSRegSizeLog2 - 3;
71 const unsigned kDRegSize = 64;
72 const unsigned kDRegSizeLog2 = 6;
73 const unsigned kDRegSizeInBytes = kDRegSize / 8;
74 const unsigned kDRegSizeInBytesLog2 = kDRegSizeLog2 - 3;
75 const unsigned kQRegSize = 128;
76 const unsigned kQRegSizeLog2 = 7;
77 const unsigned kQRegSizeInBytes = kQRegSize / 8;
78 const unsigned kQRegSizeInBytesLog2 = kQRegSizeLog2 - 3;
79 const uint64_t kWRegMask = UINT64_C(0xffffffff);
80 const uint64_t kXRegMask = UINT64_C(0xffffffffffffffff);
81 const uint64_t kHRegMask = UINT64_C(0xffff);
82 const uint64_t kSRegMask = UINT64_C(0xffffffff);
83 const uint64_t kDRegMask = UINT64_C(0xffffffffffffffff);
84 const uint64_t kHSignMask = UINT64_C(0x8000);
85 const uint64_t kSSignMask = UINT64_C(0x80000000);
86 const uint64_t kDSignMask = UINT64_C(0x8000000000000000);
87 const uint64_t kWSignMask = UINT64_C(0x80000000);
88 const uint64_t kXSignMask = UINT64_C(0x8000000000000000);
89 const uint64_t kByteMask = UINT64_C(0xff);
90 const uint64_t kHalfWordMask = UINT64_C(0xffff);
91 const uint64_t kWordMask = UINT64_C(0xffffffff);
92 const uint64_t kXMaxUInt = UINT64_C(0xffffffffffffffff);
93 const uint64_t kWMaxUInt = UINT64_C(0xffffffff);
94 const uint64_t kHMaxUInt = UINT64_C(0xffff);
95 // Define k*MinInt with "-k*MaxInt - 1", because the hexadecimal representation
96 // (e.g. "INT32_C(0x80000000)") has implementation-defined behaviour.
97 const int64_t kXMaxInt = INT64_C(0x7fffffffffffffff);
98 const int64_t kXMinInt = -kXMaxInt - 1;
99 const int32_t kWMaxInt = INT32_C(0x7fffffff);
100 const int32_t kWMinInt = -kWMaxInt - 1;
101 const int16_t kHMaxInt = INT16_C(0x7fff);
102 const int16_t kHMinInt = -kHMaxInt - 1;
103 const unsigned kFpRegCode = 29;
104 const unsigned kLinkRegCode = 30;
105 const unsigned kSpRegCode = 31;
106 const unsigned kZeroRegCode = 31;
107 const unsigned kSPRegInternalCode = 63;
108 const unsigned kRegCodeMask = 0x1f;
109 
110 const unsigned kAtomicAccessGranule = 16;
111 
112 const unsigned kAddressTagOffset = 56;
113 const unsigned kAddressTagWidth = 8;
114 const uint64_t kAddressTagMask = ((UINT64_C(1) << kAddressTagWidth) - 1)
115                                  << kAddressTagOffset;
116 VIXL_STATIC_ASSERT(kAddressTagMask == UINT64_C(0xff00000000000000));
117 
118 const uint64_t kTTBRMask = UINT64_C(1) << 55;
119 
120 // We can't define a static kZRegSize because the size depends on the
121 // implementation. However, it is sometimes useful to know the minimum and
122 // maxmimum possible sizes.
123 const unsigned kZRegMinSize = 128;
124 const unsigned kZRegMinSizeLog2 = 7;
125 const unsigned kZRegMinSizeInBytes = kZRegMinSize / 8;
126 const unsigned kZRegMinSizeInBytesLog2 = kZRegMinSizeLog2 - 3;
127 const unsigned kZRegMaxSize = 2048;
128 const unsigned kZRegMaxSizeLog2 = 11;
129 const unsigned kZRegMaxSizeInBytes = kZRegMaxSize / 8;
130 const unsigned kZRegMaxSizeInBytesLog2 = kZRegMaxSizeLog2 - 3;
131 
132 // The P register size depends on the Z register size.
133 const unsigned kZRegBitsPerPRegBit = kBitsPerByte;
134 const unsigned kZRegBitsPerPRegBitLog2 = 3;
135 const unsigned kPRegMinSize = kZRegMinSize / kZRegBitsPerPRegBit;
136 const unsigned kPRegMinSizeLog2 = kZRegMinSizeLog2 - 3;
137 const unsigned kPRegMinSizeInBytes = kPRegMinSize / 8;
138 const unsigned kPRegMinSizeInBytesLog2 = kPRegMinSizeLog2 - 3;
139 const unsigned kPRegMaxSize = kZRegMaxSize / kZRegBitsPerPRegBit;
140 const unsigned kPRegMaxSizeLog2 = kZRegMaxSizeLog2 - 3;
141 const unsigned kPRegMaxSizeInBytes = kPRegMaxSize / 8;
142 const unsigned kPRegMaxSizeInBytesLog2 = kPRegMaxSizeLog2 - 3;
143 
144 // Make these moved float constants backwards compatible
145 // with explicit vixl::aarch64:: namespace references.
146 using vixl::kDoubleMantissaBits;
147 using vixl::kDoubleExponentBits;
148 using vixl::kFloatMantissaBits;
149 using vixl::kFloatExponentBits;
150 using vixl::kFloat16MantissaBits;
151 using vixl::kFloat16ExponentBits;
152 
153 using vixl::kFP16PositiveInfinity;
154 using vixl::kFP16NegativeInfinity;
155 using vixl::kFP32PositiveInfinity;
156 using vixl::kFP32NegativeInfinity;
157 using vixl::kFP64PositiveInfinity;
158 using vixl::kFP64NegativeInfinity;
159 
160 using vixl::kFP16DefaultNaN;
161 using vixl::kFP32DefaultNaN;
162 using vixl::kFP64DefaultNaN;
163 
164 unsigned CalcLSDataSize(LoadStoreOp op);
165 unsigned CalcLSPairDataSize(LoadStorePairOp op);
166 
167 enum ImmBranchType {
168   UnknownBranchType = 0,
169   CondBranchType = 1,
170   UncondBranchType = 2,
171   CompareBranchType = 3,
172   TestBranchType = 4
173 };
174 
175 enum AddrMode { Offset, PreIndex, PostIndex };
176 
177 enum Reg31Mode { Reg31IsStackPointer, Reg31IsZeroRegister };
178 
179 enum VectorFormat {
180   kFormatUndefined = 0xffffffff,
181   kFormat8B = NEON_8B,
182   kFormat16B = NEON_16B,
183   kFormat4H = NEON_4H,
184   kFormat8H = NEON_8H,
185   kFormat2S = NEON_2S,
186   kFormat4S = NEON_4S,
187   kFormat1D = NEON_1D,
188   kFormat2D = NEON_2D,
189 
190   // Scalar formats. We add the scalar bit to distinguish between scalar and
191   // vector enumerations; the bit is always set in the encoding of scalar ops
192   // and always clear for vector ops. Although kFormatD and kFormat1D appear
193   // to be the same, their meaning is subtly different. The first is a scalar
194   // operation, the second a vector operation that only affects one lane.
195   kFormatB = NEON_B | NEONScalar,
196   kFormatH = NEON_H | NEONScalar,
197   kFormatS = NEON_S | NEONScalar,
198   kFormatD = NEON_D | NEONScalar,
199 
200   // An artificial value, used to distinguish from NEON format category.
201   kFormatSVE = 0x0000fffd,
202   // Artificial values. Q and O lane sizes aren't encoded in the usual size
203   // field.
204   kFormatSVEQ = 0x00080000,
205   kFormatSVEO = 0x00040000,
206 
207   // Vector element width of SVE register with the unknown lane count since
208   // the vector length is implementation dependent.
209   kFormatVnB = SVE_B | kFormatSVE,
210   kFormatVnH = SVE_H | kFormatSVE,
211   kFormatVnS = SVE_S | kFormatSVE,
212   kFormatVnD = SVE_D | kFormatSVE,
213   kFormatVnQ = kFormatSVEQ | kFormatSVE,
214   kFormatVnO = kFormatSVEO | kFormatSVE,
215 
216   // An artificial value, used by simulator trace tests and a few oddball
217   // instructions (such as FMLAL).
218   kFormat2H = 0xfffffffe
219 };
220 
221 // Instructions. ---------------------------------------------------------------
222 
223 class Instruction {
224  public:
GetInstructionBits()225   Instr GetInstructionBits() const {
226     return *(reinterpret_cast<const Instr*>(this));
227   }
228   VIXL_DEPRECATED("GetInstructionBits", Instr InstructionBits() const) {
229     return GetInstructionBits();
230   }
231 
SetInstructionBits(Instr new_instr)232   void SetInstructionBits(Instr new_instr) {
233     *(reinterpret_cast<Instr*>(this)) = new_instr;
234   }
235 
ExtractBit(int pos)236   int ExtractBit(int pos) const { return (GetInstructionBits() >> pos) & 1; }
Bit(int pos)237   VIXL_DEPRECATED("ExtractBit", int Bit(int pos) const) {
238     return ExtractBit(pos);
239   }
240 
ExtractBits(int msb,int lsb)241   uint32_t ExtractBits(int msb, int lsb) const {
242     return ExtractUnsignedBitfield32(msb, lsb, GetInstructionBits());
243   }
244   VIXL_DEPRECATED("ExtractBits", uint32_t Bits(int msb, int lsb) const) {
245     return ExtractBits(msb, lsb);
246   }
247 
248   // Compress bit extraction operation from Hacker's Delight.
249   // https://github.com/hcs0/Hackers-Delight/blob/master/compress.c.txt
Compress(uint32_t mask)250   uint32_t Compress(uint32_t mask) const {
251     uint32_t mk, mp, mv, t;
252     uint32_t x = GetInstructionBits() & mask;  // Clear irrelevant bits.
253     mk = ~mask << 1;                           // We will count 0's to right.
254     for (int i = 0; i < 5; i++) {
255       mp = mk ^ (mk << 1);  // Parallel suffix.
256       mp = mp ^ (mp << 2);
257       mp = mp ^ (mp << 4);
258       mp = mp ^ (mp << 8);
259       mp = mp ^ (mp << 16);
260       mv = mp & mask;                         // Bits to move.
261       mask = (mask ^ mv) | (mv >> (1 << i));  // Compress mask.
262       t = x & mv;
263       x = (x ^ t) | (t >> (1 << i));  // Compress x.
264       mk = mk & ~mp;
265     }
266     return x;
267   }
268 
269   template <uint32_t M>
ExtractBits()270   uint32_t ExtractBits() const {
271     return Compress(M);
272   }
273 
ExtractBitsAbsent()274   uint32_t ExtractBitsAbsent() const {
275     VIXL_UNREACHABLE();
276     return 0;
277   }
278 
279   template <uint32_t M, uint32_t V>
IsMaskedValue()280   uint32_t IsMaskedValue() const {
281     return (Mask(M) == V) ? 1 : 0;
282   }
283 
IsMaskedValueAbsent()284   uint32_t IsMaskedValueAbsent() const {
285     VIXL_UNREACHABLE();
286     return 0;
287   }
288 
ExtractSignedBits(int msb,int lsb)289   int32_t ExtractSignedBits(int msb, int lsb) const {
290     int32_t bits = *(reinterpret_cast<const int32_t*>(this));
291     return ExtractSignedBitfield32(msb, lsb, bits);
292   }
293   VIXL_DEPRECATED("ExtractSignedBits",
294                   int32_t SignedBits(int msb, int lsb) const) {
295     return ExtractSignedBits(msb, lsb);
296   }
297 
Mask(uint32_t mask)298   Instr Mask(uint32_t mask) const {
299     VIXL_ASSERT(mask != 0);
300     return GetInstructionBits() & mask;
301   }
302 
303 #define DEFINE_GETTER(Name, HighBit, LowBit, Func)                  \
304   int32_t Get##Name() const { return this->Func(HighBit, LowBit); } \
305   VIXL_DEPRECATED("Get" #Name, int32_t Name() const) { return Get##Name(); }
INSTRUCTION_FIELDS_LIST(DEFINE_GETTER)306   INSTRUCTION_FIELDS_LIST(DEFINE_GETTER)
307 #undef DEFINE_GETTER
308 
309   template <int msb, int lsb>
310   int32_t GetRx() const {
311     // We don't have any register fields wider than five bits, so the result
312     // will always fit into an int32_t.
313     VIXL_ASSERT((msb - lsb + 1) <= 5);
314     return this->ExtractBits(msb, lsb);
315   }
316 
317   VectorFormat GetSVEVectorFormat(int field_lsb = 22) const {
318     VIXL_ASSERT((field_lsb >= 0) && (field_lsb <= 30));
319     uint32_t instr = ExtractUnsignedBitfield32(field_lsb + 1,
320                                                field_lsb,
321                                                GetInstructionBits())
322                      << 22;
323     switch (instr & SVESizeFieldMask) {
324       case SVE_B:
325         return kFormatVnB;
326       case SVE_H:
327         return kFormatVnH;
328       case SVE_S:
329         return kFormatVnS;
330       case SVE_D:
331         return kFormatVnD;
332     }
333     VIXL_UNREACHABLE();
334     return kFormatUndefined;
335   }
336 
337   // ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST),
338   // formed from ImmPCRelLo and ImmPCRelHi.
GetImmPCRel()339   int GetImmPCRel() const {
340     uint32_t hi = static_cast<uint32_t>(GetImmPCRelHi());
341     uint32_t lo = GetImmPCRelLo();
342     uint32_t offset = (hi << ImmPCRelLo_width) | lo;
343     int width = ImmPCRelLo_width + ImmPCRelHi_width;
344     return ExtractSignedBitfield32(width - 1, 0, offset);
345   }
ImmPCRel()346   VIXL_DEPRECATED("GetImmPCRel", int ImmPCRel() const) { return GetImmPCRel(); }
347 
348   // ImmLSPAC is a compound field (not present in INSTRUCTION_FIELDS_LIST),
349   // formed from ImmLSPACLo and ImmLSPACHi.
GetImmLSPAC()350   int GetImmLSPAC() const {
351     uint32_t hi = static_cast<uint32_t>(GetImmLSPACHi());
352     uint32_t lo = GetImmLSPACLo();
353     uint32_t offset = (hi << ImmLSPACLo_width) | lo;
354     int width = ImmLSPACLo_width + ImmLSPACHi_width;
355     return ExtractSignedBitfield32(width - 1, 0, offset) << 3;
356   }
357 
358   uint64_t GetImmLogical() const;
359   VIXL_DEPRECATED("GetImmLogical", uint64_t ImmLogical() const) {
360     return GetImmLogical();
361   }
362   uint64_t GetSVEImmLogical() const;
363   int GetSVEBitwiseImmLaneSizeInBytesLog2() const;
364   uint64_t DecodeImmBitMask(int32_t n,
365                             int32_t imm_s,
366                             int32_t imm_r,
367                             int32_t size) const;
368 
369   std::pair<int, int> GetSVEPermuteIndexAndLaneSizeLog2() const;
370 
371   std::pair<int, int> GetSVEMulZmAndIndex() const;
372   std::pair<int, int> GetSVEMulLongZmAndIndex() const;
373 
374   std::pair<int, int> GetSVEImmShiftAndLaneSizeLog2(bool is_predicated) const;
375 
376   int GetSVEExtractImmediate() const;
377 
378   int GetSVEMsizeFromDtype(bool is_signed, int dtype_h_lsb = 23) const;
379 
380   int GetSVEEsizeFromDtype(bool is_signed, int dtype_l_lsb = 21) const;
381 
382 
383   unsigned GetImmNEONabcdefgh() const;
ImmNEONabcdefgh()384   VIXL_DEPRECATED("GetImmNEONabcdefgh", unsigned ImmNEONabcdefgh() const) {
385     return GetImmNEONabcdefgh();
386   }
387 
388   Float16 GetImmFP16() const;
389 
390   float GetImmFP32() const;
ImmFP32()391   VIXL_DEPRECATED("GetImmFP32", float ImmFP32() const) { return GetImmFP32(); }
392 
393   double GetImmFP64() const;
ImmFP64()394   VIXL_DEPRECATED("GetImmFP64", double ImmFP64() const) { return GetImmFP64(); }
395 
396   Float16 GetImmNEONFP16() const;
397 
398   float GetImmNEONFP32() const;
ImmNEONFP32()399   VIXL_DEPRECATED("GetImmNEONFP32", float ImmNEONFP32() const) {
400     return GetImmNEONFP32();
401   }
402 
403   double GetImmNEONFP64() const;
ImmNEONFP64()404   VIXL_DEPRECATED("GetImmNEONFP64", double ImmNEONFP64() const) {
405     return GetImmNEONFP64();
406   }
407 
GetSVEImmFP16()408   Float16 GetSVEImmFP16() const { return Imm8ToFloat16(ExtractBits(12, 5)); }
409 
GetSVEImmFP32()410   float GetSVEImmFP32() const { return Imm8ToFP32(ExtractBits(12, 5)); }
411 
GetSVEImmFP64()412   double GetSVEImmFP64() const { return Imm8ToFP64(ExtractBits(12, 5)); }
413 
414   static Float16 Imm8ToFloat16(uint32_t imm8);
415   static float Imm8ToFP32(uint32_t imm8);
416   static double Imm8ToFP64(uint32_t imm8);
417 
GetSizeLS()418   unsigned GetSizeLS() const {
419     return CalcLSDataSize(static_cast<LoadStoreOp>(Mask(LoadStoreMask)));
420   }
SizeLS()421   VIXL_DEPRECATED("GetSizeLS", unsigned SizeLS() const) { return GetSizeLS(); }
422 
GetSizeLSPair()423   unsigned GetSizeLSPair() const {
424     return CalcLSPairDataSize(
425         static_cast<LoadStorePairOp>(Mask(LoadStorePairMask)));
426   }
SizeLSPair()427   VIXL_DEPRECATED("GetSizeLSPair", unsigned SizeLSPair() const) {
428     return GetSizeLSPair();
429   }
430 
GetNEONLSIndex(int access_size_shift)431   int GetNEONLSIndex(int access_size_shift) const {
432     int64_t q = GetNEONQ();
433     int64_t s = GetNEONS();
434     int64_t size = GetNEONLSSize();
435     int64_t index = (q << 3) | (s << 2) | size;
436     return static_cast<int>(index >> access_size_shift);
437   }
438   VIXL_DEPRECATED("GetNEONLSIndex",
NEONLSIndex(int access_size_shift)439                   int NEONLSIndex(int access_size_shift) const) {
440     return GetNEONLSIndex(access_size_shift);
441   }
442 
443   // Helpers.
IsCondBranchImm()444   bool IsCondBranchImm() const {
445     return Mask(ConditionalBranchFMask) == ConditionalBranchFixed;
446   }
447 
IsUncondBranchImm()448   bool IsUncondBranchImm() const {
449     return Mask(UnconditionalBranchFMask) == UnconditionalBranchFixed;
450   }
451 
IsCompareBranch()452   bool IsCompareBranch() const {
453     return Mask(CompareBranchFMask) == CompareBranchFixed;
454   }
455 
IsTestBranch()456   bool IsTestBranch() const { return Mask(TestBranchFMask) == TestBranchFixed; }
457 
IsImmBranch()458   bool IsImmBranch() const { return GetBranchType() != UnknownBranchType; }
459 
IsPCRelAddressing()460   bool IsPCRelAddressing() const {
461     return Mask(PCRelAddressingFMask) == PCRelAddressingFixed;
462   }
463 
IsLogicalImmediate()464   bool IsLogicalImmediate() const {
465     return Mask(LogicalImmediateFMask) == LogicalImmediateFixed;
466   }
467 
IsAddSubImmediate()468   bool IsAddSubImmediate() const {
469     return Mask(AddSubImmediateFMask) == AddSubImmediateFixed;
470   }
471 
IsAddSubExtended()472   bool IsAddSubExtended() const {
473     return Mask(AddSubExtendedFMask) == AddSubExtendedFixed;
474   }
475 
IsLoadOrStore()476   bool IsLoadOrStore() const {
477     return Mask(LoadStoreAnyFMask) == LoadStoreAnyFixed;
478   }
479 
480   // True if `this` is valid immediately after the provided movprfx instruction.
481   bool CanTakeSVEMovprfx(uint32_t form_hash, Instruction const* movprfx) const;
482   bool CanTakeSVEMovprfx(const char* form, Instruction const* movprfx) const;
483 
484   bool IsLoad() const;
485   bool IsStore() const;
486 
IsLoadLiteral()487   bool IsLoadLiteral() const {
488     // This includes PRFM_lit.
489     return Mask(LoadLiteralFMask) == LoadLiteralFixed;
490   }
491 
IsMovn()492   bool IsMovn() const {
493     return (Mask(MoveWideImmediateMask) == MOVN_x) ||
494            (Mask(MoveWideImmediateMask) == MOVN_w);
495   }
496 
IsException()497   bool IsException() const { return Mask(ExceptionFMask) == ExceptionFixed; }
498 
IsPAuth()499   bool IsPAuth() const { return Mask(SystemPAuthFMask) == SystemPAuthFixed; }
500 
IsBti()501   bool IsBti() const {
502     if (Mask(SystemHintFMask) == SystemHintFixed) {
503       int imm_hint = GetImmHint();
504       switch (imm_hint) {
505         case BTI:
506         case BTI_c:
507         case BTI_j:
508         case BTI_jc:
509           return true;
510       }
511     }
512     return false;
513   }
514 
515   static int GetImmBranchRangeBitwidth(ImmBranchType branch_type);
516   VIXL_DEPRECATED(
517       "GetImmBranchRangeBitwidth",
ImmBranchRangeBitwidth(ImmBranchType branch_type)518       static int ImmBranchRangeBitwidth(ImmBranchType branch_type)) {
519     return GetImmBranchRangeBitwidth(branch_type);
520   }
521 
522   static int32_t GetImmBranchForwardRange(ImmBranchType branch_type);
523   VIXL_DEPRECATED(
524       "GetImmBranchForwardRange",
525       static int32_t ImmBranchForwardRange(ImmBranchType branch_type)) {
526     return GetImmBranchForwardRange(branch_type);
527   }
528 
529   static bool IsValidImmPCOffset(ImmBranchType branch_type, int64_t offset);
530 
531   // Indicate whether Rd can be the stack pointer or the zero register. This
532   // does not check that the instruction actually has an Rd field.
GetRdMode()533   Reg31Mode GetRdMode() const {
534     // The following instructions use sp or wsp as Rd:
535     //  Add/sub (immediate) when not setting the flags.
536     //  Add/sub (extended) when not setting the flags.
537     //  Logical (immediate) when not setting the flags.
538     // Otherwise, r31 is the zero register.
539     if (IsAddSubImmediate() || IsAddSubExtended()) {
540       if (Mask(AddSubSetFlagsBit)) {
541         return Reg31IsZeroRegister;
542       } else {
543         return Reg31IsStackPointer;
544       }
545     }
546     if (IsLogicalImmediate()) {
547       // Of the logical (immediate) instructions, only ANDS (and its aliases)
548       // can set the flags. The others can all write into sp.
549       // Note that some logical operations are not available to
550       // immediate-operand instructions, so we have to combine two masks here.
551       if (Mask(LogicalImmediateMask & LogicalOpMask) == ANDS) {
552         return Reg31IsZeroRegister;
553       } else {
554         return Reg31IsStackPointer;
555       }
556     }
557     return Reg31IsZeroRegister;
558   }
559   VIXL_DEPRECATED("GetRdMode", Reg31Mode RdMode() const) { return GetRdMode(); }
560 
561   // Indicate whether Rn can be the stack pointer or the zero register. This
562   // does not check that the instruction actually has an Rn field.
GetRnMode()563   Reg31Mode GetRnMode() const {
564     // The following instructions use sp or wsp as Rn:
565     //  All loads and stores.
566     //  Add/sub (immediate).
567     //  Add/sub (extended).
568     // Otherwise, r31 is the zero register.
569     if (IsLoadOrStore() || IsAddSubImmediate() || IsAddSubExtended()) {
570       return Reg31IsStackPointer;
571     }
572     return Reg31IsZeroRegister;
573   }
574   VIXL_DEPRECATED("GetRnMode", Reg31Mode RnMode() const) { return GetRnMode(); }
575 
GetBranchType()576   ImmBranchType GetBranchType() const {
577     if (IsCondBranchImm()) {
578       return CondBranchType;
579     } else if (IsUncondBranchImm()) {
580       return UncondBranchType;
581     } else if (IsCompareBranch()) {
582       return CompareBranchType;
583     } else if (IsTestBranch()) {
584       return TestBranchType;
585     } else {
586       return UnknownBranchType;
587     }
588   }
589   VIXL_DEPRECATED("GetBranchType", ImmBranchType BranchType() const) {
590     return GetBranchType();
591   }
592 
593   // Find the target of this instruction. 'this' may be a branch or a
594   // PC-relative addressing instruction.
595   const Instruction* GetImmPCOffsetTarget() const;
596   VIXL_DEPRECATED("GetImmPCOffsetTarget",
597                   const Instruction* ImmPCOffsetTarget() const) {
598     return GetImmPCOffsetTarget();
599   }
600 
601   // Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or
602   // a PC-relative addressing instruction.
603   void SetImmPCOffsetTarget(const Instruction* target);
604   // Patch a literal load instruction to load from 'source'.
605   void SetImmLLiteral(const Instruction* source);
606 
607   // The range of a load literal instruction, expressed as 'instr +- range'.
608   // The range is actually the 'positive' range; the branch instruction can
609   // target [instr - range - kInstructionSize, instr + range].
610   static const int kLoadLiteralImmBitwidth = 19;
611   static const int kLoadLiteralRange =
612       (1 << kLoadLiteralImmBitwidth) / 2 - kInstructionSize;
613 
614   // Calculate the address of a literal referred to by a load-literal
615   // instruction, and return it as the specified type.
616   //
617   // The literal itself is safely mutable only if the backing buffer is safely
618   // mutable.
619   template <typename T>
GetLiteralAddress()620   T GetLiteralAddress() const {
621     uint64_t base_raw = reinterpret_cast<uint64_t>(this);
622     int64_t offset = GetImmLLiteral() * static_cast<int>(kLiteralEntrySize);
623     uint64_t address_raw = base_raw + offset;
624 
625     // Cast the address using a C-style cast. A reinterpret_cast would be
626     // appropriate, but it can't cast one integral type to another.
627     T address = (T)(address_raw);
628 
629     // Assert that the address can be represented by the specified type.
630     VIXL_ASSERT((uint64_t)(address) == address_raw);
631 
632     return address;
633   }
634   template <typename T>
635   VIXL_DEPRECATED("GetLiteralAddress", T LiteralAddress() const) {
636     return GetLiteralAddress<T>();
637   }
638 
GetLiteral32()639   uint32_t GetLiteral32() const {
640     uint32_t literal;
641     memcpy(&literal, GetLiteralAddress<const void*>(), sizeof(literal));
642     return literal;
643   }
644   VIXL_DEPRECATED("GetLiteral32", uint32_t Literal32() const) {
645     return GetLiteral32();
646   }
647 
GetLiteral64()648   uint64_t GetLiteral64() const {
649     uint64_t literal;
650     memcpy(&literal, GetLiteralAddress<const void*>(), sizeof(literal));
651     return literal;
652   }
653   VIXL_DEPRECATED("GetLiteral64", uint64_t Literal64() const) {
654     return GetLiteral64();
655   }
656 
GetLiteralFP32()657   float GetLiteralFP32() const { return RawbitsToFloat(GetLiteral32()); }
LiteralFP32()658   VIXL_DEPRECATED("GetLiteralFP32", float LiteralFP32() const) {
659     return GetLiteralFP32();
660   }
661 
GetLiteralFP64()662   double GetLiteralFP64() const { return RawbitsToDouble(GetLiteral64()); }
LiteralFP64()663   VIXL_DEPRECATED("GetLiteralFP64", double LiteralFP64() const) {
664     return GetLiteralFP64();
665   }
666 
GetNextInstruction()667   Instruction* GetNextInstruction() { return this + kInstructionSize; }
GetNextInstruction()668   const Instruction* GetNextInstruction() const {
669     return this + kInstructionSize;
670   }
671   VIXL_DEPRECATED("GetNextInstruction",
672                   const Instruction* NextInstruction() const) {
673     return GetNextInstruction();
674   }
675 
GetInstructionAtOffset(int64_t offset)676   const Instruction* GetInstructionAtOffset(int64_t offset) const {
677     VIXL_ASSERT(IsWordAligned(this + offset));
678     return this + offset;
679   }
680   VIXL_DEPRECATED("GetInstructionAtOffset",
681                   const Instruction* InstructionAtOffset(int64_t offset)
682                       const) {
683     return GetInstructionAtOffset(offset);
684   }
685 
686   template <typename T>
Cast(T src)687   static Instruction* Cast(T src) {
688     return reinterpret_cast<Instruction*>(src);
689   }
690 
691   template <typename T>
CastConst(T src)692   static const Instruction* CastConst(T src) {
693     return reinterpret_cast<const Instruction*>(src);
694   }
695 
696  private:
697   int GetImmBranch() const;
698 
699   void SetPCRelImmTarget(const Instruction* target);
700   void SetBranchImmTarget(const Instruction* target);
701 };
702 
703 
704 // Functions for handling NEON and SVE vector format information.
705 
706 const int kMaxLanesPerVector = 16;
707 
708 VectorFormat VectorFormatHalfWidth(VectorFormat vform);
709 VectorFormat VectorFormatDoubleWidth(VectorFormat vform);
710 VectorFormat VectorFormatDoubleLanes(VectorFormat vform);
711 VectorFormat VectorFormatHalfLanes(VectorFormat vform);
712 VectorFormat ScalarFormatFromLaneSize(int lane_size_in_bits);
713 VectorFormat VectorFormatHalfWidthDoubleLanes(VectorFormat vform);
714 VectorFormat VectorFormatFillQ(VectorFormat vform);
715 VectorFormat ScalarFormatFromFormat(VectorFormat vform);
716 VectorFormat SVEFormatFromLaneSizeInBits(int lane_size_in_bits);
717 VectorFormat SVEFormatFromLaneSizeInBytes(int lane_size_in_bytes);
718 VectorFormat SVEFormatFromLaneSizeInBytesLog2(int lane_size_in_bytes_log_2);
719 unsigned RegisterSizeInBitsFromFormat(VectorFormat vform);
720 unsigned RegisterSizeInBytesFromFormat(VectorFormat vform);
721 bool IsSVEFormat(VectorFormat vform);
722 // TODO: Make the return types of these functions consistent.
723 unsigned LaneSizeInBitsFromFormat(VectorFormat vform);
724 int LaneSizeInBytesFromFormat(VectorFormat vform);
725 int LaneSizeInBytesLog2FromFormat(VectorFormat vform);
726 int LaneCountFromFormat(VectorFormat vform);
727 int MaxLaneCountFromFormat(VectorFormat vform);
728 bool IsVectorFormat(VectorFormat vform);
729 int64_t MaxIntFromFormat(VectorFormat vform);
730 int64_t MinIntFromFormat(VectorFormat vform);
731 uint64_t MaxUintFromFormat(VectorFormat vform);
732 
733 
734 // clang-format off
735 enum NEONFormat {
736   NF_UNDEF = 0,
737   NF_8B    = 1,
738   NF_16B   = 2,
739   NF_4H    = 3,
740   NF_8H    = 4,
741   NF_2S    = 5,
742   NF_4S    = 6,
743   NF_1D    = 7,
744   NF_2D    = 8,
745   NF_B     = 9,
746   NF_H     = 10,
747   NF_S     = 11,
748   NF_D     = 12
749 };
750 // clang-format on
751 
752 static const unsigned kNEONFormatMaxBits = 6;
753 
754 struct NEONFormatMap {
755   // The bit positions in the instruction to consider.
756   uint8_t bits[kNEONFormatMaxBits];
757 
758   // Mapping from concatenated bits to format.
759   NEONFormat map[1 << kNEONFormatMaxBits];
760 };
761 
762 class NEONFormatDecoder {
763  public:
764   enum SubstitutionMode { kPlaceholder, kFormat };
765 
766   // Construct a format decoder with increasingly specific format maps for each
767   // subsitution. If no format map is specified, the default is the integer
768   // format map.
NEONFormatDecoder(const Instruction * instr)769   explicit NEONFormatDecoder(const Instruction* instr) {
770     instrbits_ = instr->GetInstructionBits();
771     SetFormatMaps(IntegerFormatMap());
772   }
NEONFormatDecoder(const Instruction * instr,const NEONFormatMap * format)773   NEONFormatDecoder(const Instruction* instr, const NEONFormatMap* format) {
774     instrbits_ = instr->GetInstructionBits();
775     SetFormatMaps(format);
776   }
NEONFormatDecoder(const Instruction * instr,const NEONFormatMap * format0,const NEONFormatMap * format1)777   NEONFormatDecoder(const Instruction* instr,
778                     const NEONFormatMap* format0,
779                     const NEONFormatMap* format1) {
780     instrbits_ = instr->GetInstructionBits();
781     SetFormatMaps(format0, format1);
782   }
NEONFormatDecoder(const Instruction * instr,const NEONFormatMap * format0,const NEONFormatMap * format1,const NEONFormatMap * format2)783   NEONFormatDecoder(const Instruction* instr,
784                     const NEONFormatMap* format0,
785                     const NEONFormatMap* format1,
786                     const NEONFormatMap* format2) {
787     instrbits_ = instr->GetInstructionBits();
788     SetFormatMaps(format0, format1, format2);
789   }
790 
791   // Set the format mapping for all or individual substitutions.
792   void SetFormatMaps(const NEONFormatMap* format0,
793                      const NEONFormatMap* format1 = NULL,
794                      const NEONFormatMap* format2 = NULL) {
795     VIXL_ASSERT(format0 != NULL);
796     formats_[0] = format0;
797     formats_[1] = (format1 == NULL) ? formats_[0] : format1;
798     formats_[2] = (format2 == NULL) ? formats_[1] : format2;
799   }
SetFormatMap(unsigned index,const NEONFormatMap * format)800   void SetFormatMap(unsigned index, const NEONFormatMap* format) {
801     VIXL_ASSERT(index <= ArrayLength(formats_));
802     VIXL_ASSERT(format != NULL);
803     formats_[index] = format;
804   }
805 
806   // Substitute %s in the input string with the placeholder string for each
807   // register, ie. "'B", "'H", etc.
SubstitutePlaceholders(const char * string)808   const char* SubstitutePlaceholders(const char* string) {
809     return Substitute(string, kPlaceholder, kPlaceholder, kPlaceholder);
810   }
811 
812   // Substitute %s in the input string with a new string based on the
813   // substitution mode.
814   const char* Substitute(const char* string,
815                          SubstitutionMode mode0 = kFormat,
816                          SubstitutionMode mode1 = kFormat,
817                          SubstitutionMode mode2 = kFormat) {
818     const char* subst0 = GetSubstitute(0, mode0);
819     const char* subst1 = GetSubstitute(1, mode1);
820     const char* subst2 = GetSubstitute(2, mode2);
821 
822     if ((subst0 == NULL) || (subst1 == NULL) || (subst2 == NULL)) {
823       return NULL;
824     }
825 
826     snprintf(form_buffer_,
827              sizeof(form_buffer_),
828              string,
829              subst0,
830              subst1,
831              subst2);
832     return form_buffer_;
833   }
834 
835   // Append a "2" to a mnemonic string based on the state of the Q bit.
Mnemonic(const char * mnemonic)836   const char* Mnemonic(const char* mnemonic) {
837     if ((mnemonic != NULL) && (instrbits_ & NEON_Q) != 0) {
838       snprintf(mne_buffer_, sizeof(mne_buffer_), "%s2", mnemonic);
839       return mne_buffer_;
840     }
841     return mnemonic;
842   }
843 
844   VectorFormat GetVectorFormat(int format_index = 0) {
845     return GetVectorFormat(formats_[format_index]);
846   }
847 
GetVectorFormat(const NEONFormatMap * format_map)848   VectorFormat GetVectorFormat(const NEONFormatMap* format_map) {
849     static const VectorFormat vform[] = {kFormatUndefined,
850                                          kFormat8B,
851                                          kFormat16B,
852                                          kFormat4H,
853                                          kFormat8H,
854                                          kFormat2S,
855                                          kFormat4S,
856                                          kFormat1D,
857                                          kFormat2D,
858                                          kFormatB,
859                                          kFormatH,
860                                          kFormatS,
861                                          kFormatD};
862     VIXL_ASSERT(GetNEONFormat(format_map) < ArrayLength(vform));
863     return vform[GetNEONFormat(format_map)];
864   }
865 
866   // Built in mappings for common cases.
867 
868   // The integer format map uses three bits (Q, size<1:0>) to encode the
869   // "standard" set of NEON integer vector formats.
IntegerFormatMap()870   static const NEONFormatMap* IntegerFormatMap() {
871     static const NEONFormatMap map =
872         {{23, 22, 30},
873          {NF_8B, NF_16B, NF_4H, NF_8H, NF_2S, NF_4S, NF_UNDEF, NF_2D}};
874     return &map;
875   }
876 
877   // The long integer format map uses two bits (size<1:0>) to encode the
878   // long set of NEON integer vector formats. These are used in narrow, wide
879   // and long operations.
LongIntegerFormatMap()880   static const NEONFormatMap* LongIntegerFormatMap() {
881     static const NEONFormatMap map = {{23, 22}, {NF_8H, NF_4S, NF_2D}};
882     return &map;
883   }
884 
885   // The FP format map uses two bits (Q, size<0>) to encode the NEON FP vector
886   // formats: NF_2S, NF_4S, NF_2D.
FPFormatMap()887   static const NEONFormatMap* FPFormatMap() {
888     // The FP format map assumes two bits (Q, size<0>) are used to encode the
889     // NEON FP vector formats: NF_2S, NF_4S, NF_2D.
890     static const NEONFormatMap map = {{22, 30},
891                                       {NF_2S, NF_4S, NF_UNDEF, NF_2D}};
892     return &map;
893   }
894 
895   // The FP16 format map uses one bit (Q) to encode the NEON vector format:
896   // NF_4H, NF_8H.
FP16FormatMap()897   static const NEONFormatMap* FP16FormatMap() {
898     static const NEONFormatMap map = {{30}, {NF_4H, NF_8H}};
899     return &map;
900   }
901 
902   // The load/store format map uses three bits (Q, 11, 10) to encode the
903   // set of NEON vector formats.
LoadStoreFormatMap()904   static const NEONFormatMap* LoadStoreFormatMap() {
905     static const NEONFormatMap map =
906         {{11, 10, 30},
907          {NF_8B, NF_16B, NF_4H, NF_8H, NF_2S, NF_4S, NF_1D, NF_2D}};
908     return &map;
909   }
910 
911   // The logical format map uses one bit (Q) to encode the NEON vector format:
912   // NF_8B, NF_16B.
LogicalFormatMap()913   static const NEONFormatMap* LogicalFormatMap() {
914     static const NEONFormatMap map = {{30}, {NF_8B, NF_16B}};
915     return &map;
916   }
917 
918   // The triangular format map uses between two and five bits to encode the NEON
919   // vector format:
920   // xxx10->8B, xxx11->16B, xx100->4H, xx101->8H
921   // x1000->2S, x1001->4S,  10001->2D, all others undefined.
TriangularFormatMap()922   static const NEONFormatMap* TriangularFormatMap() {
923     static const NEONFormatMap map =
924         {{19, 18, 17, 16, 30},
925          {NF_UNDEF, NF_UNDEF, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B,
926           NF_2S,    NF_4S,    NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B,
927           NF_UNDEF, NF_2D,    NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B,
928           NF_2S,    NF_4S,    NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B}};
929     return &map;
930   }
931 
932   // The shift immediate map uses between two and five bits to encode the NEON
933   // vector format:
934   // 00010->8B, 00011->16B, 001x0->4H, 001x1->8H,
935   // 01xx0->2S, 01xx1->4S, 1xxx1->2D, all others undefined.
ShiftImmFormatMap()936   static const NEONFormatMap* ShiftImmFormatMap() {
937     static const NEONFormatMap map = {{22, 21, 20, 19, 30},
938                                       {NF_UNDEF, NF_UNDEF, NF_8B,    NF_16B,
939                                        NF_4H,    NF_8H,    NF_4H,    NF_8H,
940                                        NF_2S,    NF_4S,    NF_2S,    NF_4S,
941                                        NF_2S,    NF_4S,    NF_2S,    NF_4S,
942                                        NF_UNDEF, NF_2D,    NF_UNDEF, NF_2D,
943                                        NF_UNDEF, NF_2D,    NF_UNDEF, NF_2D,
944                                        NF_UNDEF, NF_2D,    NF_UNDEF, NF_2D,
945                                        NF_UNDEF, NF_2D,    NF_UNDEF, NF_2D}};
946     return &map;
947   }
948 
949   // The shift long/narrow immediate map uses between two and four bits to
950   // encode the NEON vector format:
951   // 0001->8H, 001x->4S, 01xx->2D, all others undefined.
ShiftLongNarrowImmFormatMap()952   static const NEONFormatMap* ShiftLongNarrowImmFormatMap() {
953     static const NEONFormatMap map =
954         {{22, 21, 20, 19},
955          {NF_UNDEF, NF_8H, NF_4S, NF_4S, NF_2D, NF_2D, NF_2D, NF_2D}};
956     return &map;
957   }
958 
959   // The scalar format map uses two bits (size<1:0>) to encode the NEON scalar
960   // formats: NF_B, NF_H, NF_S, NF_D.
ScalarFormatMap()961   static const NEONFormatMap* ScalarFormatMap() {
962     static const NEONFormatMap map = {{23, 22}, {NF_B, NF_H, NF_S, NF_D}};
963     return &map;
964   }
965 
966   // The long scalar format map uses two bits (size<1:0>) to encode the longer
967   // NEON scalar formats: NF_H, NF_S, NF_D.
LongScalarFormatMap()968   static const NEONFormatMap* LongScalarFormatMap() {
969     static const NEONFormatMap map = {{23, 22}, {NF_H, NF_S, NF_D}};
970     return &map;
971   }
972 
973   // The FP scalar format map assumes one bit (size<0>) is used to encode the
974   // NEON FP scalar formats: NF_S, NF_D.
FPScalarFormatMap()975   static const NEONFormatMap* FPScalarFormatMap() {
976     static const NEONFormatMap map = {{22}, {NF_S, NF_D}};
977     return &map;
978   }
979 
980   // The FP scalar pairwise format map assumes two bits (U, size<0>) are used to
981   // encode the NEON FP scalar formats: NF_H, NF_S, NF_D.
FPScalarPairwiseFormatMap()982   static const NEONFormatMap* FPScalarPairwiseFormatMap() {
983     static const NEONFormatMap map = {{29, 22}, {NF_H, NF_UNDEF, NF_S, NF_D}};
984     return &map;
985   }
986 
987   // The triangular scalar format map uses between one and four bits to encode
988   // the NEON FP scalar formats:
989   // xxx1->B, xx10->H, x100->S, 1000->D, all others undefined.
TriangularScalarFormatMap()990   static const NEONFormatMap* TriangularScalarFormatMap() {
991     static const NEONFormatMap map = {{19, 18, 17, 16},
992                                       {NF_UNDEF,
993                                        NF_B,
994                                        NF_H,
995                                        NF_B,
996                                        NF_S,
997                                        NF_B,
998                                        NF_H,
999                                        NF_B,
1000                                        NF_D,
1001                                        NF_B,
1002                                        NF_H,
1003                                        NF_B,
1004                                        NF_S,
1005                                        NF_B,
1006                                        NF_H,
1007                                        NF_B}};
1008     return &map;
1009   }
1010 
1011  private:
1012   // Get a pointer to a string that represents the format or placeholder for
1013   // the specified substitution index, based on the format map and instruction.
GetSubstitute(int index,SubstitutionMode mode)1014   const char* GetSubstitute(int index, SubstitutionMode mode) {
1015     if (mode == kFormat) {
1016       return NEONFormatAsString(GetNEONFormat(formats_[index]));
1017     }
1018     VIXL_ASSERT(mode == kPlaceholder);
1019     return NEONFormatAsPlaceholder(GetNEONFormat(formats_[index]));
1020   }
1021 
1022   // Get the NEONFormat enumerated value for bits obtained from the
1023   // instruction based on the specified format mapping.
GetNEONFormat(const NEONFormatMap * format_map)1024   NEONFormat GetNEONFormat(const NEONFormatMap* format_map) {
1025     return format_map->map[PickBits(format_map->bits)];
1026   }
1027 
1028   // Convert a NEONFormat into a string.
NEONFormatAsString(NEONFormat format)1029   static const char* NEONFormatAsString(NEONFormat format) {
1030     // clang-format off
1031     static const char* formats[] = {
1032       NULL,
1033       "8b", "16b", "4h", "8h", "2s", "4s", "1d", "2d",
1034       "b", "h", "s", "d"
1035     };
1036     // clang-format on
1037     VIXL_ASSERT(format < ArrayLength(formats));
1038     return formats[format];
1039   }
1040 
1041   // Convert a NEONFormat into a register placeholder string.
NEONFormatAsPlaceholder(NEONFormat format)1042   static const char* NEONFormatAsPlaceholder(NEONFormat format) {
1043     VIXL_ASSERT((format == NF_B) || (format == NF_H) || (format == NF_S) ||
1044                 (format == NF_D) || (format == NF_UNDEF));
1045     // clang-format off
1046     static const char* formats[] = {
1047       NULL,
1048       NULL, NULL, NULL, NULL,
1049       NULL, NULL, NULL, NULL,
1050       "'B", "'H", "'S", "'D"
1051     };
1052     // clang-format on
1053     return formats[format];
1054   }
1055 
1056   // Select bits from instrbits_ defined by the bits array, concatenate them,
1057   // and return the value.
PickBits(const uint8_t bits[])1058   uint8_t PickBits(const uint8_t bits[]) {
1059     uint8_t result = 0;
1060     for (unsigned b = 0; b < kNEONFormatMaxBits; b++) {
1061       if (bits[b] == 0) break;
1062       result <<= 1;
1063       result |= ((instrbits_ & (1 << bits[b])) == 0) ? 0 : 1;
1064     }
1065     return result;
1066   }
1067 
1068   Instr instrbits_;
1069   const NEONFormatMap* formats_[3];
1070   char form_buffer_[64];
1071   char mne_buffer_[16];
1072 };
1073 }  // namespace aarch64
1074 }  // namespace vixl
1075 
1076 #endif  // VIXL_AARCH64_INSTRUCTIONS_AARCH64_H_
1077