• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2015, VIXL authors
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are met:
6 //
7 //   * Redistributions of source code must retain the above copyright notice,
8 //     this list of conditions and the following disclaimer.
9 //   * Redistributions in binary form must reproduce the above copyright notice,
10 //     this list of conditions and the following disclaimer in the documentation
11 //     and/or other materials provided with the distribution.
12 //   * Neither the name of ARM Limited nor the names of its contributors may be
13 //     used to endorse or promote products derived from this software without
14 //     specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 
27 #ifndef VIXL_AARCH64_INSTRUCTIONS_AARCH64_H_
28 #define VIXL_AARCH64_INSTRUCTIONS_AARCH64_H_
29 
30 #include "../globals-vixl.h"
31 #include "../utils-vixl.h"
32 
33 #include "constants-aarch64.h"
34 
35 namespace vixl {
36 namespace aarch64 {
37 // ISA constants. --------------------------------------------------------------
38 
39 typedef uint32_t Instr;
40 const unsigned kInstructionSize = 4;
41 const unsigned kInstructionSizeLog2 = 2;
42 const unsigned kLiteralEntrySize = 4;
43 const unsigned kLiteralEntrySizeLog2 = 2;
44 const unsigned kMaxLoadLiteralRange = 1 * MBytes;
45 
46 // This is the nominal page size (as used by the adrp instruction); the actual
47 // size of the memory pages allocated by the kernel is likely to differ.
48 const unsigned kPageSize = 4 * KBytes;
49 const unsigned kPageSizeLog2 = 12;
50 
51 const unsigned kBRegSize = 8;
52 const unsigned kBRegSizeLog2 = 3;
53 const unsigned kBRegSizeInBytes = kBRegSize / 8;
54 const unsigned kBRegSizeInBytesLog2 = kBRegSizeLog2 - 3;
55 const unsigned kHRegSize = 16;
56 const unsigned kHRegSizeLog2 = 4;
57 const unsigned kHRegSizeInBytes = kHRegSize / 8;
58 const unsigned kHRegSizeInBytesLog2 = kHRegSizeLog2 - 3;
59 const unsigned kWRegSize = 32;
60 const unsigned kWRegSizeLog2 = 5;
61 const unsigned kWRegSizeInBytes = kWRegSize / 8;
62 const unsigned kWRegSizeInBytesLog2 = kWRegSizeLog2 - 3;
63 const unsigned kXRegSize = 64;
64 const unsigned kXRegSizeLog2 = 6;
65 const unsigned kXRegSizeInBytes = kXRegSize / 8;
66 const unsigned kXRegSizeInBytesLog2 = kXRegSizeLog2 - 3;
67 const unsigned kSRegSize = 32;
68 const unsigned kSRegSizeLog2 = 5;
69 const unsigned kSRegSizeInBytes = kSRegSize / 8;
70 const unsigned kSRegSizeInBytesLog2 = kSRegSizeLog2 - 3;
71 const unsigned kDRegSize = 64;
72 const unsigned kDRegSizeLog2 = 6;
73 const unsigned kDRegSizeInBytes = kDRegSize / 8;
74 const unsigned kDRegSizeInBytesLog2 = kDRegSizeLog2 - 3;
75 const unsigned kQRegSize = 128;
76 const unsigned kQRegSizeLog2 = 7;
77 const unsigned kQRegSizeInBytes = kQRegSize / 8;
78 const unsigned kQRegSizeInBytesLog2 = kQRegSizeLog2 - 3;
79 const uint64_t kWRegMask = UINT64_C(0xffffffff);
80 const uint64_t kXRegMask = UINT64_C(0xffffffffffffffff);
81 const uint64_t kHRegMask = UINT64_C(0xffff);
82 const uint64_t kSRegMask = UINT64_C(0xffffffff);
83 const uint64_t kDRegMask = UINT64_C(0xffffffffffffffff);
84 const uint64_t kSSignMask = UINT64_C(0x80000000);
85 const uint64_t kDSignMask = UINT64_C(0x8000000000000000);
86 const uint64_t kWSignMask = UINT64_C(0x80000000);
87 const uint64_t kXSignMask = UINT64_C(0x8000000000000000);
88 const uint64_t kByteMask = UINT64_C(0xff);
89 const uint64_t kHalfWordMask = UINT64_C(0xffff);
90 const uint64_t kWordMask = UINT64_C(0xffffffff);
91 const uint64_t kXMaxUInt = UINT64_C(0xffffffffffffffff);
92 const uint64_t kWMaxUInt = UINT64_C(0xffffffff);
93 const uint64_t kHMaxUInt = UINT64_C(0xffff);
94 // Define k*MinInt with "-k*MaxInt - 1", because the hexadecimal representation
95 // (e.g. "INT32_C(0x80000000)") has implementation-defined behaviour.
96 const int64_t kXMaxInt = INT64_C(0x7fffffffffffffff);
97 const int64_t kXMinInt = -kXMaxInt - 1;
98 const int32_t kWMaxInt = INT32_C(0x7fffffff);
99 const int32_t kWMinInt = -kWMaxInt - 1;
100 const int16_t kHMaxInt = INT16_C(0x7fff);
101 const int16_t kHMinInt = -kHMaxInt - 1;
102 const unsigned kFpRegCode = 29;
103 const unsigned kLinkRegCode = 30;
104 const unsigned kSpRegCode = 31;
105 const unsigned kZeroRegCode = 31;
106 const unsigned kSPRegInternalCode = 63;
107 const unsigned kRegCodeMask = 0x1f;
108 
109 const unsigned kAddressTagOffset = 56;
110 const unsigned kAddressTagWidth = 8;
111 const uint64_t kAddressTagMask = ((UINT64_C(1) << kAddressTagWidth) - 1)
112                                  << kAddressTagOffset;
113 VIXL_STATIC_ASSERT(kAddressTagMask == UINT64_C(0xff00000000000000));
114 
115 const uint64_t kTTBRMask = UINT64_C(1) << 55;
116 
117 // Make these moved float constants backwards compatible
118 // with explicit vixl::aarch64:: namespace references.
119 using vixl::kDoubleMantissaBits;
120 using vixl::kDoubleExponentBits;
121 using vixl::kFloatMantissaBits;
122 using vixl::kFloatExponentBits;
123 using vixl::kFloat16MantissaBits;
124 using vixl::kFloat16ExponentBits;
125 
126 using vixl::kFP16PositiveInfinity;
127 using vixl::kFP16NegativeInfinity;
128 using vixl::kFP32PositiveInfinity;
129 using vixl::kFP32NegativeInfinity;
130 using vixl::kFP64PositiveInfinity;
131 using vixl::kFP64NegativeInfinity;
132 
133 using vixl::kFP16DefaultNaN;
134 using vixl::kFP32DefaultNaN;
135 using vixl::kFP64DefaultNaN;
136 
137 unsigned CalcLSDataSize(LoadStoreOp op);
138 unsigned CalcLSPairDataSize(LoadStorePairOp op);
139 
140 enum ImmBranchType {
141   UnknownBranchType = 0,
142   CondBranchType = 1,
143   UncondBranchType = 2,
144   CompareBranchType = 3,
145   TestBranchType = 4
146 };
147 
148 enum AddrMode { Offset, PreIndex, PostIndex };
149 
150 enum Reg31Mode { Reg31IsStackPointer, Reg31IsZeroRegister };
151 
152 // Instructions. ---------------------------------------------------------------
153 
154 class Instruction {
155  public:
GetInstructionBits()156   Instr GetInstructionBits() const {
157     return *(reinterpret_cast<const Instr*>(this));
158   }
159   VIXL_DEPRECATED("GetInstructionBits", Instr InstructionBits() const) {
160     return GetInstructionBits();
161   }
162 
SetInstructionBits(Instr new_instr)163   void SetInstructionBits(Instr new_instr) {
164     *(reinterpret_cast<Instr*>(this)) = new_instr;
165   }
166 
ExtractBit(int pos)167   int ExtractBit(int pos) const { return (GetInstructionBits() >> pos) & 1; }
Bit(int pos)168   VIXL_DEPRECATED("ExtractBit", int Bit(int pos) const) {
169     return ExtractBit(pos);
170   }
171 
ExtractBits(int msb,int lsb)172   uint32_t ExtractBits(int msb, int lsb) const {
173     return ExtractUnsignedBitfield32(msb, lsb, GetInstructionBits());
174   }
175   VIXL_DEPRECATED("ExtractBits", uint32_t Bits(int msb, int lsb) const) {
176     return ExtractBits(msb, lsb);
177   }
178 
ExtractSignedBits(int msb,int lsb)179   int32_t ExtractSignedBits(int msb, int lsb) const {
180     int32_t bits = *(reinterpret_cast<const int32_t*>(this));
181     return ExtractSignedBitfield32(msb, lsb, bits);
182   }
183   VIXL_DEPRECATED("ExtractSignedBits",
184                   int32_t SignedBits(int msb, int lsb) const) {
185     return ExtractSignedBits(msb, lsb);
186   }
187 
Mask(uint32_t mask)188   Instr Mask(uint32_t mask) const {
189     VIXL_ASSERT(mask != 0);
190     return GetInstructionBits() & mask;
191   }
192 
193 #define DEFINE_GETTER(Name, HighBit, LowBit, Func)                  \
194   int32_t Get##Name() const { return this->Func(HighBit, LowBit); } \
195   VIXL_DEPRECATED("Get" #Name, int32_t Name() const) { return Get##Name(); }
INSTRUCTION_FIELDS_LIST(DEFINE_GETTER)196   INSTRUCTION_FIELDS_LIST(DEFINE_GETTER)
197 #undef DEFINE_GETTER
198 
199   // ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST),
200   // formed from ImmPCRelLo and ImmPCRelHi.
201   int GetImmPCRel() const {
202     uint32_t hi = static_cast<uint32_t>(GetImmPCRelHi());
203     uint32_t lo = GetImmPCRelLo();
204     uint32_t offset = (hi << ImmPCRelLo_width) | lo;
205     int width = ImmPCRelLo_width + ImmPCRelHi_width;
206     return ExtractSignedBitfield32(width - 1, 0, offset);
207   }
ImmPCRel()208   VIXL_DEPRECATED("GetImmPCRel", int ImmPCRel() const) { return GetImmPCRel(); }
209 
210   uint64_t GetImmLogical() const;
211   VIXL_DEPRECATED("GetImmLogical", uint64_t ImmLogical() const) {
212     return GetImmLogical();
213   }
214 
215   unsigned GetImmNEONabcdefgh() const;
ImmNEONabcdefgh()216   VIXL_DEPRECATED("GetImmNEONabcdefgh", unsigned ImmNEONabcdefgh() const) {
217     return GetImmNEONabcdefgh();
218   }
219 
220   Float16 GetImmFP16() const;
221 
222   float GetImmFP32() const;
ImmFP32()223   VIXL_DEPRECATED("GetImmFP32", float ImmFP32() const) { return GetImmFP32(); }
224 
225   double GetImmFP64() const;
ImmFP64()226   VIXL_DEPRECATED("GetImmFP64", double ImmFP64() const) { return GetImmFP64(); }
227 
228   Float16 GetImmNEONFP16() const;
229 
230   float GetImmNEONFP32() const;
ImmNEONFP32()231   VIXL_DEPRECATED("GetImmNEONFP32", float ImmNEONFP32() const) {
232     return GetImmNEONFP32();
233   }
234 
235   double GetImmNEONFP64() const;
ImmNEONFP64()236   VIXL_DEPRECATED("GetImmNEONFP64", double ImmNEONFP64() const) {
237     return GetImmNEONFP64();
238   }
239 
GetSizeLS()240   unsigned GetSizeLS() const {
241     return CalcLSDataSize(static_cast<LoadStoreOp>(Mask(LoadStoreMask)));
242   }
SizeLS()243   VIXL_DEPRECATED("GetSizeLS", unsigned SizeLS() const) { return GetSizeLS(); }
244 
GetSizeLSPair()245   unsigned GetSizeLSPair() const {
246     return CalcLSPairDataSize(
247         static_cast<LoadStorePairOp>(Mask(LoadStorePairMask)));
248   }
SizeLSPair()249   VIXL_DEPRECATED("GetSizeLSPair", unsigned SizeLSPair() const) {
250     return GetSizeLSPair();
251   }
252 
GetNEONLSIndex(int access_size_shift)253   int GetNEONLSIndex(int access_size_shift) const {
254     int64_t q = GetNEONQ();
255     int64_t s = GetNEONS();
256     int64_t size = GetNEONLSSize();
257     int64_t index = (q << 3) | (s << 2) | size;
258     return static_cast<int>(index >> access_size_shift);
259   }
260   VIXL_DEPRECATED("GetNEONLSIndex",
NEONLSIndex(int access_size_shift)261                   int NEONLSIndex(int access_size_shift) const) {
262     return GetNEONLSIndex(access_size_shift);
263   }
264 
265   // Helpers.
IsCondBranchImm()266   bool IsCondBranchImm() const {
267     return Mask(ConditionalBranchFMask) == ConditionalBranchFixed;
268   }
269 
IsUncondBranchImm()270   bool IsUncondBranchImm() const {
271     return Mask(UnconditionalBranchFMask) == UnconditionalBranchFixed;
272   }
273 
IsCompareBranch()274   bool IsCompareBranch() const {
275     return Mask(CompareBranchFMask) == CompareBranchFixed;
276   }
277 
IsTestBranch()278   bool IsTestBranch() const { return Mask(TestBranchFMask) == TestBranchFixed; }
279 
IsImmBranch()280   bool IsImmBranch() const { return GetBranchType() != UnknownBranchType; }
281 
IsPCRelAddressing()282   bool IsPCRelAddressing() const {
283     return Mask(PCRelAddressingFMask) == PCRelAddressingFixed;
284   }
285 
IsLogicalImmediate()286   bool IsLogicalImmediate() const {
287     return Mask(LogicalImmediateFMask) == LogicalImmediateFixed;
288   }
289 
IsAddSubImmediate()290   bool IsAddSubImmediate() const {
291     return Mask(AddSubImmediateFMask) == AddSubImmediateFixed;
292   }
293 
IsAddSubExtended()294   bool IsAddSubExtended() const {
295     return Mask(AddSubExtendedFMask) == AddSubExtendedFixed;
296   }
297 
IsLoadOrStore()298   bool IsLoadOrStore() const {
299     return Mask(LoadStoreAnyFMask) == LoadStoreAnyFixed;
300   }
301 
302   bool IsLoad() const;
303   bool IsStore() const;
304 
IsLoadLiteral()305   bool IsLoadLiteral() const {
306     // This includes PRFM_lit.
307     return Mask(LoadLiteralFMask) == LoadLiteralFixed;
308   }
309 
IsMovn()310   bool IsMovn() const {
311     return (Mask(MoveWideImmediateMask) == MOVN_x) ||
312            (Mask(MoveWideImmediateMask) == MOVN_w);
313   }
314 
315   static int GetImmBranchRangeBitwidth(ImmBranchType branch_type);
316   VIXL_DEPRECATED(
317       "GetImmBranchRangeBitwidth",
ImmBranchRangeBitwidth(ImmBranchType branch_type)318       static int ImmBranchRangeBitwidth(ImmBranchType branch_type)) {
319     return GetImmBranchRangeBitwidth(branch_type);
320   }
321 
322   static int32_t GetImmBranchForwardRange(ImmBranchType branch_type);
323   VIXL_DEPRECATED(
324       "GetImmBranchForwardRange",
325       static int32_t ImmBranchForwardRange(ImmBranchType branch_type)) {
326     return GetImmBranchForwardRange(branch_type);
327   }
328 
329   static bool IsValidImmPCOffset(ImmBranchType branch_type, int64_t offset);
330 
331   // Indicate whether Rd can be the stack pointer or the zero register. This
332   // does not check that the instruction actually has an Rd field.
GetRdMode()333   Reg31Mode GetRdMode() const {
334     // The following instructions use sp or wsp as Rd:
335     //  Add/sub (immediate) when not setting the flags.
336     //  Add/sub (extended) when not setting the flags.
337     //  Logical (immediate) when not setting the flags.
338     // Otherwise, r31 is the zero register.
339     if (IsAddSubImmediate() || IsAddSubExtended()) {
340       if (Mask(AddSubSetFlagsBit)) {
341         return Reg31IsZeroRegister;
342       } else {
343         return Reg31IsStackPointer;
344       }
345     }
346     if (IsLogicalImmediate()) {
347       // Of the logical (immediate) instructions, only ANDS (and its aliases)
348       // can set the flags. The others can all write into sp.
349       // Note that some logical operations are not available to
350       // immediate-operand instructions, so we have to combine two masks here.
351       if (Mask(LogicalImmediateMask & LogicalOpMask) == ANDS) {
352         return Reg31IsZeroRegister;
353       } else {
354         return Reg31IsStackPointer;
355       }
356     }
357     return Reg31IsZeroRegister;
358   }
359   VIXL_DEPRECATED("GetRdMode", Reg31Mode RdMode() const) { return GetRdMode(); }
360 
361   // Indicate whether Rn can be the stack pointer or the zero register. This
362   // does not check that the instruction actually has an Rn field.
GetRnMode()363   Reg31Mode GetRnMode() const {
364     // The following instructions use sp or wsp as Rn:
365     //  All loads and stores.
366     //  Add/sub (immediate).
367     //  Add/sub (extended).
368     // Otherwise, r31 is the zero register.
369     if (IsLoadOrStore() || IsAddSubImmediate() || IsAddSubExtended()) {
370       return Reg31IsStackPointer;
371     }
372     return Reg31IsZeroRegister;
373   }
374   VIXL_DEPRECATED("GetRnMode", Reg31Mode RnMode() const) { return GetRnMode(); }
375 
GetBranchType()376   ImmBranchType GetBranchType() const {
377     if (IsCondBranchImm()) {
378       return CondBranchType;
379     } else if (IsUncondBranchImm()) {
380       return UncondBranchType;
381     } else if (IsCompareBranch()) {
382       return CompareBranchType;
383     } else if (IsTestBranch()) {
384       return TestBranchType;
385     } else {
386       return UnknownBranchType;
387     }
388   }
389   VIXL_DEPRECATED("GetBranchType", ImmBranchType BranchType() const) {
390     return GetBranchType();
391   }
392 
393   // Find the target of this instruction. 'this' may be a branch or a
394   // PC-relative addressing instruction.
395   const Instruction* GetImmPCOffsetTarget() const;
396   VIXL_DEPRECATED("GetImmPCOffsetTarget",
397                   const Instruction* ImmPCOffsetTarget() const) {
398     return GetImmPCOffsetTarget();
399   }
400 
401   // Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or
402   // a PC-relative addressing instruction.
403   void SetImmPCOffsetTarget(const Instruction* target);
404   // Patch a literal load instruction to load from 'source'.
405   void SetImmLLiteral(const Instruction* source);
406 
407   // The range of a load literal instruction, expressed as 'instr +- range'.
408   // The range is actually the 'positive' range; the branch instruction can
409   // target [instr - range - kInstructionSize, instr + range].
410   static const int kLoadLiteralImmBitwidth = 19;
411   static const int kLoadLiteralRange =
412       (1 << kLoadLiteralImmBitwidth) / 2 - kInstructionSize;
413 
414   // Calculate the address of a literal referred to by a load-literal
415   // instruction, and return it as the specified type.
416   //
417   // The literal itself is safely mutable only if the backing buffer is safely
418   // mutable.
419   template <typename T>
GetLiteralAddress()420   T GetLiteralAddress() const {
421     uint64_t base_raw = reinterpret_cast<uint64_t>(this);
422     int64_t offset = GetImmLLiteral() * static_cast<int>(kLiteralEntrySize);
423     uint64_t address_raw = base_raw + offset;
424 
425     // Cast the address using a C-style cast. A reinterpret_cast would be
426     // appropriate, but it can't cast one integral type to another.
427     T address = (T)(address_raw);
428 
429     // Assert that the address can be represented by the specified type.
430     VIXL_ASSERT((uint64_t)(address) == address_raw);
431 
432     return address;
433   }
434   template <typename T>
435   VIXL_DEPRECATED("GetLiteralAddress", T LiteralAddress() const) {
436     return GetLiteralAddress<T>();
437   }
438 
GetLiteral32()439   uint32_t GetLiteral32() const {
440     uint32_t literal;
441     memcpy(&literal, GetLiteralAddress<const void*>(), sizeof(literal));
442     return literal;
443   }
444   VIXL_DEPRECATED("GetLiteral32", uint32_t Literal32() const) {
445     return GetLiteral32();
446   }
447 
GetLiteral64()448   uint64_t GetLiteral64() const {
449     uint64_t literal;
450     memcpy(&literal, GetLiteralAddress<const void*>(), sizeof(literal));
451     return literal;
452   }
453   VIXL_DEPRECATED("GetLiteral64", uint64_t Literal64() const) {
454     return GetLiteral64();
455   }
456 
GetLiteralFP32()457   float GetLiteralFP32() const { return RawbitsToFloat(GetLiteral32()); }
LiteralFP32()458   VIXL_DEPRECATED("GetLiteralFP32", float LiteralFP32() const) {
459     return GetLiteralFP32();
460   }
461 
GetLiteralFP64()462   double GetLiteralFP64() const { return RawbitsToDouble(GetLiteral64()); }
LiteralFP64()463   VIXL_DEPRECATED("GetLiteralFP64", double LiteralFP64() const) {
464     return GetLiteralFP64();
465   }
466 
GetNextInstruction()467   Instruction* GetNextInstruction() { return this + kInstructionSize; }
GetNextInstruction()468   const Instruction* GetNextInstruction() const {
469     return this + kInstructionSize;
470   }
471   VIXL_DEPRECATED("GetNextInstruction",
472                   const Instruction* NextInstruction() const) {
473     return GetNextInstruction();
474   }
475 
GetInstructionAtOffset(int64_t offset)476   const Instruction* GetInstructionAtOffset(int64_t offset) const {
477     VIXL_ASSERT(IsWordAligned(this + offset));
478     return this + offset;
479   }
480   VIXL_DEPRECATED("GetInstructionAtOffset",
481                   const Instruction* InstructionAtOffset(int64_t offset)
482                       const) {
483     return GetInstructionAtOffset(offset);
484   }
485 
486   template <typename T>
Cast(T src)487   static Instruction* Cast(T src) {
488     return reinterpret_cast<Instruction*>(src);
489   }
490 
491   template <typename T>
CastConst(T src)492   static const Instruction* CastConst(T src) {
493     return reinterpret_cast<const Instruction*>(src);
494   }
495 
496  private:
497   int GetImmBranch() const;
498 
499   static Float16 Imm8ToFloat16(uint32_t imm8);
500   static float Imm8ToFP32(uint32_t imm8);
501   static double Imm8ToFP64(uint32_t imm8);
502 
503   void SetPCRelImmTarget(const Instruction* target);
504   void SetBranchImmTarget(const Instruction* target);
505 };
506 
507 
508 // Functions for handling NEON vector format information.
509 enum VectorFormat {
510   kFormatUndefined = 0xffffffff,
511   kFormat8B = NEON_8B,
512   kFormat16B = NEON_16B,
513   kFormat4H = NEON_4H,
514   kFormat8H = NEON_8H,
515   kFormat2S = NEON_2S,
516   kFormat4S = NEON_4S,
517   kFormat1D = NEON_1D,
518   kFormat2D = NEON_2D,
519 
520   // Scalar formats. We add the scalar bit to distinguish between scalar and
521   // vector enumerations; the bit is always set in the encoding of scalar ops
522   // and always clear for vector ops. Although kFormatD and kFormat1D appear
523   // to be the same, their meaning is subtly different. The first is a scalar
524   // operation, the second a vector operation that only affects one lane.
525   kFormatB = NEON_B | NEONScalar,
526   kFormatH = NEON_H | NEONScalar,
527   kFormatS = NEON_S | NEONScalar,
528   kFormatD = NEON_D | NEONScalar,
529 
530   // A value invented solely for FP16 scalar pairwise simulator trace tests.
531   kFormat2H = 0xfffffffe
532 };
533 
534 const int kMaxLanesPerVector = 16;
535 
536 VectorFormat VectorFormatHalfWidth(VectorFormat vform);
537 VectorFormat VectorFormatDoubleWidth(VectorFormat vform);
538 VectorFormat VectorFormatDoubleLanes(VectorFormat vform);
539 VectorFormat VectorFormatHalfLanes(VectorFormat vform);
540 VectorFormat ScalarFormatFromLaneSize(int lanesize);
541 VectorFormat VectorFormatHalfWidthDoubleLanes(VectorFormat vform);
542 VectorFormat VectorFormatFillQ(VectorFormat vform);
543 VectorFormat ScalarFormatFromFormat(VectorFormat vform);
544 unsigned RegisterSizeInBitsFromFormat(VectorFormat vform);
545 unsigned RegisterSizeInBytesFromFormat(VectorFormat vform);
546 // TODO: Make the return types of these functions consistent.
547 unsigned LaneSizeInBitsFromFormat(VectorFormat vform);
548 int LaneSizeInBytesFromFormat(VectorFormat vform);
549 int LaneSizeInBytesLog2FromFormat(VectorFormat vform);
550 int LaneCountFromFormat(VectorFormat vform);
551 int MaxLaneCountFromFormat(VectorFormat vform);
552 bool IsVectorFormat(VectorFormat vform);
553 int64_t MaxIntFromFormat(VectorFormat vform);
554 int64_t MinIntFromFormat(VectorFormat vform);
555 uint64_t MaxUintFromFormat(VectorFormat vform);
556 
557 
558 // clang-format off
559 enum NEONFormat {
560   NF_UNDEF = 0,
561   NF_8B    = 1,
562   NF_16B   = 2,
563   NF_4H    = 3,
564   NF_8H    = 4,
565   NF_2S    = 5,
566   NF_4S    = 6,
567   NF_1D    = 7,
568   NF_2D    = 8,
569   NF_B     = 9,
570   NF_H     = 10,
571   NF_S     = 11,
572   NF_D     = 12
573 };
574 // clang-format on
575 
576 static const unsigned kNEONFormatMaxBits = 6;
577 
578 struct NEONFormatMap {
579   // The bit positions in the instruction to consider.
580   uint8_t bits[kNEONFormatMaxBits];
581 
582   // Mapping from concatenated bits to format.
583   NEONFormat map[1 << kNEONFormatMaxBits];
584 };
585 
586 class NEONFormatDecoder {
587  public:
588   enum SubstitutionMode { kPlaceholder, kFormat };
589 
590   // Construct a format decoder with increasingly specific format maps for each
591   // subsitution. If no format map is specified, the default is the integer
592   // format map.
NEONFormatDecoder(const Instruction * instr)593   explicit NEONFormatDecoder(const Instruction* instr) {
594     instrbits_ = instr->GetInstructionBits();
595     SetFormatMaps(IntegerFormatMap());
596   }
NEONFormatDecoder(const Instruction * instr,const NEONFormatMap * format)597   NEONFormatDecoder(const Instruction* instr, const NEONFormatMap* format) {
598     instrbits_ = instr->GetInstructionBits();
599     SetFormatMaps(format);
600   }
NEONFormatDecoder(const Instruction * instr,const NEONFormatMap * format0,const NEONFormatMap * format1)601   NEONFormatDecoder(const Instruction* instr,
602                     const NEONFormatMap* format0,
603                     const NEONFormatMap* format1) {
604     instrbits_ = instr->GetInstructionBits();
605     SetFormatMaps(format0, format1);
606   }
NEONFormatDecoder(const Instruction * instr,const NEONFormatMap * format0,const NEONFormatMap * format1,const NEONFormatMap * format2)607   NEONFormatDecoder(const Instruction* instr,
608                     const NEONFormatMap* format0,
609                     const NEONFormatMap* format1,
610                     const NEONFormatMap* format2) {
611     instrbits_ = instr->GetInstructionBits();
612     SetFormatMaps(format0, format1, format2);
613   }
614 
615   // Set the format mapping for all or individual substitutions.
616   void SetFormatMaps(const NEONFormatMap* format0,
617                      const NEONFormatMap* format1 = NULL,
618                      const NEONFormatMap* format2 = NULL) {
619     VIXL_ASSERT(format0 != NULL);
620     formats_[0] = format0;
621     formats_[1] = (format1 == NULL) ? formats_[0] : format1;
622     formats_[2] = (format2 == NULL) ? formats_[1] : format2;
623   }
SetFormatMap(unsigned index,const NEONFormatMap * format)624   void SetFormatMap(unsigned index, const NEONFormatMap* format) {
625     VIXL_ASSERT(index <= ArrayLength(formats_));
626     VIXL_ASSERT(format != NULL);
627     formats_[index] = format;
628   }
629 
630   // Substitute %s in the input string with the placeholder string for each
631   // register, ie. "'B", "'H", etc.
SubstitutePlaceholders(const char * string)632   const char* SubstitutePlaceholders(const char* string) {
633     return Substitute(string, kPlaceholder, kPlaceholder, kPlaceholder);
634   }
635 
636   // Substitute %s in the input string with a new string based on the
637   // substitution mode.
638   const char* Substitute(const char* string,
639                          SubstitutionMode mode0 = kFormat,
640                          SubstitutionMode mode1 = kFormat,
641                          SubstitutionMode mode2 = kFormat) {
642     snprintf(form_buffer_,
643              sizeof(form_buffer_),
644              string,
645              GetSubstitute(0, mode0),
646              GetSubstitute(1, mode1),
647              GetSubstitute(2, mode2));
648     return form_buffer_;
649   }
650 
651   // Append a "2" to a mnemonic string based of the state of the Q bit.
Mnemonic(const char * mnemonic)652   const char* Mnemonic(const char* mnemonic) {
653     if ((instrbits_ & NEON_Q) != 0) {
654       snprintf(mne_buffer_, sizeof(mne_buffer_), "%s2", mnemonic);
655       return mne_buffer_;
656     }
657     return mnemonic;
658   }
659 
660   VectorFormat GetVectorFormat(int format_index = 0) {
661     return GetVectorFormat(formats_[format_index]);
662   }
663 
GetVectorFormat(const NEONFormatMap * format_map)664   VectorFormat GetVectorFormat(const NEONFormatMap* format_map) {
665     static const VectorFormat vform[] = {kFormatUndefined,
666                                          kFormat8B,
667                                          kFormat16B,
668                                          kFormat4H,
669                                          kFormat8H,
670                                          kFormat2S,
671                                          kFormat4S,
672                                          kFormat1D,
673                                          kFormat2D,
674                                          kFormatB,
675                                          kFormatH,
676                                          kFormatS,
677                                          kFormatD};
678     VIXL_ASSERT(GetNEONFormat(format_map) < ArrayLength(vform));
679     return vform[GetNEONFormat(format_map)];
680   }
681 
682   // Built in mappings for common cases.
683 
684   // The integer format map uses three bits (Q, size<1:0>) to encode the
685   // "standard" set of NEON integer vector formats.
IntegerFormatMap()686   static const NEONFormatMap* IntegerFormatMap() {
687     static const NEONFormatMap map =
688         {{23, 22, 30},
689          {NF_8B, NF_16B, NF_4H, NF_8H, NF_2S, NF_4S, NF_UNDEF, NF_2D}};
690     return &map;
691   }
692 
693   // The long integer format map uses two bits (size<1:0>) to encode the
694   // long set of NEON integer vector formats. These are used in narrow, wide
695   // and long operations.
LongIntegerFormatMap()696   static const NEONFormatMap* LongIntegerFormatMap() {
697     static const NEONFormatMap map = {{23, 22}, {NF_8H, NF_4S, NF_2D}};
698     return &map;
699   }
700 
701   // The FP format map uses two bits (Q, size<0>) to encode the NEON FP vector
702   // formats: NF_2S, NF_4S, NF_2D.
FPFormatMap()703   static const NEONFormatMap* FPFormatMap() {
704     // The FP format map assumes two bits (Q, size<0>) are used to encode the
705     // NEON FP vector formats: NF_2S, NF_4S, NF_2D.
706     static const NEONFormatMap map = {{22, 30},
707                                       {NF_2S, NF_4S, NF_UNDEF, NF_2D}};
708     return &map;
709   }
710 
711   // The FP16 format map uses one bit (Q) to encode the NEON vector format:
712   // NF_4H, NF_8H.
FP16FormatMap()713   static const NEONFormatMap* FP16FormatMap() {
714     static const NEONFormatMap map = {{30}, {NF_4H, NF_8H}};
715     return &map;
716   }
717 
718   // The load/store format map uses three bits (Q, 11, 10) to encode the
719   // set of NEON vector formats.
LoadStoreFormatMap()720   static const NEONFormatMap* LoadStoreFormatMap() {
721     static const NEONFormatMap map =
722         {{11, 10, 30},
723          {NF_8B, NF_16B, NF_4H, NF_8H, NF_2S, NF_4S, NF_1D, NF_2D}};
724     return &map;
725   }
726 
727   // The logical format map uses one bit (Q) to encode the NEON vector format:
728   // NF_8B, NF_16B.
LogicalFormatMap()729   static const NEONFormatMap* LogicalFormatMap() {
730     static const NEONFormatMap map = {{30}, {NF_8B, NF_16B}};
731     return &map;
732   }
733 
734   // The triangular format map uses between two and five bits to encode the NEON
735   // vector format:
736   // xxx10->8B, xxx11->16B, xx100->4H, xx101->8H
737   // x1000->2S, x1001->4S,  10001->2D, all others undefined.
TriangularFormatMap()738   static const NEONFormatMap* TriangularFormatMap() {
739     static const NEONFormatMap map =
740         {{19, 18, 17, 16, 30},
741          {NF_UNDEF, NF_UNDEF, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B,
742           NF_2S,    NF_4S,    NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B,
743           NF_UNDEF, NF_2D,    NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B,
744           NF_2S,    NF_4S,    NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B}};
745     return &map;
746   }
747 
748   // The scalar format map uses two bits (size<1:0>) to encode the NEON scalar
749   // formats: NF_B, NF_H, NF_S, NF_D.
ScalarFormatMap()750   static const NEONFormatMap* ScalarFormatMap() {
751     static const NEONFormatMap map = {{23, 22}, {NF_B, NF_H, NF_S, NF_D}};
752     return &map;
753   }
754 
755   // The long scalar format map uses two bits (size<1:0>) to encode the longer
756   // NEON scalar formats: NF_H, NF_S, NF_D.
LongScalarFormatMap()757   static const NEONFormatMap* LongScalarFormatMap() {
758     static const NEONFormatMap map = {{23, 22}, {NF_H, NF_S, NF_D}};
759     return &map;
760   }
761 
762   // The FP scalar format map assumes one bit (size<0>) is used to encode the
763   // NEON FP scalar formats: NF_S, NF_D.
FPScalarFormatMap()764   static const NEONFormatMap* FPScalarFormatMap() {
765     static const NEONFormatMap map = {{22}, {NF_S, NF_D}};
766     return &map;
767   }
768 
769   // The FP scalar pairwise format map assumes two bits (U, size<0>) are used to
770   // encode the NEON FP scalar formats: NF_H, NF_S, NF_D.
FPScalarPairwiseFormatMap()771   static const NEONFormatMap* FPScalarPairwiseFormatMap() {
772     static const NEONFormatMap map = {{29, 22}, {NF_H, NF_UNDEF, NF_S, NF_D}};
773     return &map;
774   }
775 
776   // The triangular scalar format map uses between one and four bits to encode
777   // the NEON FP scalar formats:
778   // xxx1->B, xx10->H, x100->S, 1000->D, all others undefined.
TriangularScalarFormatMap()779   static const NEONFormatMap* TriangularScalarFormatMap() {
780     static const NEONFormatMap map = {{19, 18, 17, 16},
781                                       {NF_UNDEF,
782                                        NF_B,
783                                        NF_H,
784                                        NF_B,
785                                        NF_S,
786                                        NF_B,
787                                        NF_H,
788                                        NF_B,
789                                        NF_D,
790                                        NF_B,
791                                        NF_H,
792                                        NF_B,
793                                        NF_S,
794                                        NF_B,
795                                        NF_H,
796                                        NF_B}};
797     return &map;
798   }
799 
800  private:
801   // Get a pointer to a string that represents the format or placeholder for
802   // the specified substitution index, based on the format map and instruction.
GetSubstitute(int index,SubstitutionMode mode)803   const char* GetSubstitute(int index, SubstitutionMode mode) {
804     if (mode == kFormat) {
805       return NEONFormatAsString(GetNEONFormat(formats_[index]));
806     }
807     VIXL_ASSERT(mode == kPlaceholder);
808     return NEONFormatAsPlaceholder(GetNEONFormat(formats_[index]));
809   }
810 
811   // Get the NEONFormat enumerated value for bits obtained from the
812   // instruction based on the specified format mapping.
GetNEONFormat(const NEONFormatMap * format_map)813   NEONFormat GetNEONFormat(const NEONFormatMap* format_map) {
814     return format_map->map[PickBits(format_map->bits)];
815   }
816 
817   // Convert a NEONFormat into a string.
NEONFormatAsString(NEONFormat format)818   static const char* NEONFormatAsString(NEONFormat format) {
819     // clang-format off
820     static const char* formats[] = {
821       "undefined",
822       "8b", "16b", "4h", "8h", "2s", "4s", "1d", "2d",
823       "b", "h", "s", "d"
824     };
825     // clang-format on
826     VIXL_ASSERT(format < ArrayLength(formats));
827     return formats[format];
828   }
829 
830   // Convert a NEONFormat into a register placeholder string.
NEONFormatAsPlaceholder(NEONFormat format)831   static const char* NEONFormatAsPlaceholder(NEONFormat format) {
832     VIXL_ASSERT((format == NF_B) || (format == NF_H) || (format == NF_S) ||
833                 (format == NF_D) || (format == NF_UNDEF));
834     // clang-format off
835     static const char* formats[] = {
836       "undefined",
837       "undefined", "undefined", "undefined", "undefined",
838       "undefined", "undefined", "undefined", "undefined",
839       "'B", "'H", "'S", "'D"
840     };
841     // clang-format on
842     return formats[format];
843   }
844 
845   // Select bits from instrbits_ defined by the bits array, concatenate them,
846   // and return the value.
PickBits(const uint8_t bits[])847   uint8_t PickBits(const uint8_t bits[]) {
848     uint8_t result = 0;
849     for (unsigned b = 0; b < kNEONFormatMaxBits; b++) {
850       if (bits[b] == 0) break;
851       result <<= 1;
852       result |= ((instrbits_ & (1 << bits[b])) == 0) ? 0 : 1;
853     }
854     return result;
855   }
856 
857   Instr instrbits_;
858   const NEONFormatMap* formats_[3];
859   char form_buffer_[64];
860   char mne_buffer_[16];
861 };
862 }  // namespace aarch64
863 }  // namespace vixl
864 
865 #endif  // VIXL_AARCH64_INSTRUCTIONS_AARCH64_H_
866