• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_ARM64_INSTRUCTIONS_ARM64_H_
6 #define V8_ARM64_INSTRUCTIONS_ARM64_H_
7 
8 #include "src/arm64/constants-arm64.h"
9 #include "src/arm64/utils-arm64.h"
10 #include "src/assembler.h"
11 #include "src/globals.h"
12 #include "src/utils.h"
13 
14 namespace v8 {
15 namespace internal {
16 
17 // ISA constants. --------------------------------------------------------------
18 
19 typedef uint32_t Instr;
20 
21 extern const float16 kFP16PositiveInfinity;
22 extern const float16 kFP16NegativeInfinity;
23 extern const float kFP32PositiveInfinity;
24 extern const float kFP32NegativeInfinity;
25 extern const double kFP64PositiveInfinity;
26 extern const double kFP64NegativeInfinity;
27 
28 // This value is a signalling NaN as both a double and as a float (taking the
29 // least-significant word).
30 extern const double kFP64SignallingNaN;
31 extern const float kFP32SignallingNaN;
32 
33 // A similar value, but as a quiet NaN.
34 extern const double kFP64QuietNaN;
35 extern const float kFP32QuietNaN;
36 
37 // The default NaN values (for FPCR.DN=1).
38 extern const double kFP64DefaultNaN;
39 extern const float kFP32DefaultNaN;
40 extern const float16 kFP16DefaultNaN;
41 
42 unsigned CalcLSDataSize(LoadStoreOp op);
43 unsigned CalcLSPairDataSize(LoadStorePairOp op);
44 
45 enum ImmBranchType {
46   UnknownBranchType = 0,
47   CondBranchType    = 1,
48   UncondBranchType  = 2,
49   CompareBranchType = 3,
50   TestBranchType    = 4
51 };
52 
53 enum AddrMode {
54   Offset,
55   PreIndex,
56   PostIndex
57 };
58 
59 enum FPRounding {
60   // The first four values are encodable directly by FPCR<RMode>.
61   FPTieEven = 0x0,
62   FPPositiveInfinity = 0x1,
63   FPNegativeInfinity = 0x2,
64   FPZero = 0x3,
65 
66   // The final rounding modes are only available when explicitly specified by
67   // the instruction (such as with fcvta). They cannot be set in FPCR.
68   FPTieAway,
69   FPRoundOdd
70 };
71 
72 enum Reg31Mode {
73   Reg31IsStackPointer,
74   Reg31IsZeroRegister
75 };
76 
77 // Instructions. ---------------------------------------------------------------
78 
79 class Instruction {
80  public:
InstructionBits()81   V8_INLINE Instr InstructionBits() const {
82     return *reinterpret_cast<const Instr*>(this);
83   }
84 
SetInstructionBits(Instr new_instr)85   V8_INLINE void SetInstructionBits(Instr new_instr) {
86     *reinterpret_cast<Instr*>(this) = new_instr;
87   }
88 
Bit(int pos)89   int Bit(int pos) const {
90     return (InstructionBits() >> pos) & 1;
91   }
92 
Bits(int msb,int lsb)93   uint32_t Bits(int msb, int lsb) const {
94     return unsigned_bitextract_32(msb, lsb, InstructionBits());
95   }
96 
SignedBits(int msb,int lsb)97   int32_t SignedBits(int msb, int lsb) const {
98     int32_t bits = *(reinterpret_cast<const int32_t*>(this));
99     return signed_bitextract_32(msb, lsb, bits);
100   }
101 
Mask(uint32_t mask)102   Instr Mask(uint32_t mask) const {
103     return InstructionBits() & mask;
104   }
105 
106   V8_INLINE const Instruction* following(int count = 1) const {
107     return InstructionAtOffset(count * static_cast<int>(kInstrSize));
108   }
109 
110   V8_INLINE Instruction* following(int count = 1) {
111     return InstructionAtOffset(count * static_cast<int>(kInstrSize));
112   }
113 
114   V8_INLINE const Instruction* preceding(int count = 1) const {
115     return following(-count);
116   }
117 
118   V8_INLINE Instruction* preceding(int count = 1) {
119     return following(-count);
120   }
121 
122 #define DEFINE_GETTER(Name, HighBit, LowBit, Func) \
123   int32_t Name() const { return Func(HighBit, LowBit); }
INSTRUCTION_FIELDS_LIST(DEFINE_GETTER)124   INSTRUCTION_FIELDS_LIST(DEFINE_GETTER)
125   #undef DEFINE_GETTER
126 
127   // ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST),
128   // formed from ImmPCRelLo and ImmPCRelHi.
129   int ImmPCRel() const {
130     DCHECK(IsPCRelAddressing());
131     int offset = ((ImmPCRelHi() << ImmPCRelLo_width) | ImmPCRelLo());
132     int width = ImmPCRelLo_width + ImmPCRelHi_width;
133     return signed_bitextract_32(width - 1, 0, offset);
134   }
135 
136   uint64_t ImmLogical();
137   unsigned ImmNEONabcdefgh() const;
138   float ImmFP32();
139   double ImmFP64();
140   float ImmNEONFP32() const;
141   double ImmNEONFP64() const;
142 
SizeLS()143   unsigned SizeLS() const {
144     return CalcLSDataSize(static_cast<LoadStoreOp>(Mask(LoadStoreMask)));
145   }
146 
SizeLSPair()147   unsigned SizeLSPair() const {
148     return CalcLSPairDataSize(
149         static_cast<LoadStorePairOp>(Mask(LoadStorePairMask)));
150   }
151 
NEONLSIndex(int access_size_shift)152   int NEONLSIndex(int access_size_shift) const {
153     int q = NEONQ();
154     int s = NEONS();
155     int size = NEONLSSize();
156     int index = (q << 3) | (s << 2) | size;
157     return index >> access_size_shift;
158   }
159 
160   // Helpers.
IsCondBranchImm()161   bool IsCondBranchImm() const {
162     return Mask(ConditionalBranchFMask) == ConditionalBranchFixed;
163   }
164 
IsUncondBranchImm()165   bool IsUncondBranchImm() const {
166     return Mask(UnconditionalBranchFMask) == UnconditionalBranchFixed;
167   }
168 
IsCompareBranch()169   bool IsCompareBranch() const {
170     return Mask(CompareBranchFMask) == CompareBranchFixed;
171   }
172 
IsTestBranch()173   bool IsTestBranch() const {
174     return Mask(TestBranchFMask) == TestBranchFixed;
175   }
176 
IsImmBranch()177   bool IsImmBranch() const {
178     return BranchType() != UnknownBranchType;
179   }
180 
Imm8ToFP32(uint32_t imm8)181   static float Imm8ToFP32(uint32_t imm8) {
182     //   Imm8: abcdefgh (8 bits)
183     // Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits)
184     // where B is b ^ 1
185     uint32_t bits = imm8;
186     uint32_t bit7 = (bits >> 7) & 0x1;
187     uint32_t bit6 = (bits >> 6) & 0x1;
188     uint32_t bit5_to_0 = bits & 0x3f;
189     uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19);
190 
191     return bit_cast<float>(result);
192   }
193 
Imm8ToFP64(uint32_t imm8)194   static double Imm8ToFP64(uint32_t imm8) {
195     //   Imm8: abcdefgh (8 bits)
196     // Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
197     //         0000.0000.0000.0000.0000.0000.0000.0000 (64 bits)
198     // where B is b ^ 1
199     uint32_t bits = imm8;
200     uint64_t bit7 = (bits >> 7) & 0x1;
201     uint64_t bit6 = (bits >> 6) & 0x1;
202     uint64_t bit5_to_0 = bits & 0x3f;
203     uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48);
204 
205     return bit_cast<double>(result);
206   }
207 
IsLdrLiteral()208   bool IsLdrLiteral() const {
209     return Mask(LoadLiteralFMask) == LoadLiteralFixed;
210   }
211 
IsLdrLiteralX()212   bool IsLdrLiteralX() const {
213     return Mask(LoadLiteralMask) == LDR_x_lit;
214   }
215 
IsPCRelAddressing()216   bool IsPCRelAddressing() const {
217     return Mask(PCRelAddressingFMask) == PCRelAddressingFixed;
218   }
219 
IsAdr()220   bool IsAdr() const {
221     return Mask(PCRelAddressingMask) == ADR;
222   }
223 
IsBrk()224   bool IsBrk() const { return Mask(ExceptionMask) == BRK; }
225 
IsUnresolvedInternalReference()226   bool IsUnresolvedInternalReference() const {
227     // Unresolved internal references are encoded as two consecutive brk
228     // instructions.
229     return IsBrk() && following()->IsBrk();
230   }
231 
IsLogicalImmediate()232   bool IsLogicalImmediate() const {
233     return Mask(LogicalImmediateFMask) == LogicalImmediateFixed;
234   }
235 
IsAddSubImmediate()236   bool IsAddSubImmediate() const {
237     return Mask(AddSubImmediateFMask) == AddSubImmediateFixed;
238   }
239 
IsAddSubShifted()240   bool IsAddSubShifted() const {
241     return Mask(AddSubShiftedFMask) == AddSubShiftedFixed;
242   }
243 
IsAddSubExtended()244   bool IsAddSubExtended() const {
245     return Mask(AddSubExtendedFMask) == AddSubExtendedFixed;
246   }
247 
248   // Match any loads or stores, including pairs.
IsLoadOrStore()249   bool IsLoadOrStore() const {
250     return Mask(LoadStoreAnyFMask) == LoadStoreAnyFixed;
251   }
252 
253   // Match any loads, including pairs.
254   bool IsLoad() const;
255   // Match any stores, including pairs.
256   bool IsStore() const;
257 
258   // Indicate whether Rd can be the stack pointer or the zero register. This
259   // does not check that the instruction actually has an Rd field.
RdMode()260   Reg31Mode RdMode() const {
261     // The following instructions use sp or wsp as Rd:
262     //  Add/sub (immediate) when not setting the flags.
263     //  Add/sub (extended) when not setting the flags.
264     //  Logical (immediate) when not setting the flags.
265     // Otherwise, r31 is the zero register.
266     if (IsAddSubImmediate() || IsAddSubExtended()) {
267       if (Mask(AddSubSetFlagsBit)) {
268         return Reg31IsZeroRegister;
269       } else {
270         return Reg31IsStackPointer;
271       }
272     }
273     if (IsLogicalImmediate()) {
274       // Of the logical (immediate) instructions, only ANDS (and its aliases)
275       // can set the flags. The others can all write into sp.
276       // Note that some logical operations are not available to
277       // immediate-operand instructions, so we have to combine two masks here.
278       if (Mask(LogicalImmediateMask & LogicalOpMask) == ANDS) {
279         return Reg31IsZeroRegister;
280       } else {
281         return Reg31IsStackPointer;
282       }
283     }
284     return Reg31IsZeroRegister;
285   }
286 
287   // Indicate whether Rn can be the stack pointer or the zero register. This
288   // does not check that the instruction actually has an Rn field.
RnMode()289   Reg31Mode RnMode() const {
290     // The following instructions use sp or wsp as Rn:
291     //  All loads and stores.
292     //  Add/sub (immediate).
293     //  Add/sub (extended).
294     // Otherwise, r31 is the zero register.
295     if (IsLoadOrStore() || IsAddSubImmediate() || IsAddSubExtended()) {
296       return Reg31IsStackPointer;
297     }
298     return Reg31IsZeroRegister;
299   }
300 
BranchType()301   ImmBranchType BranchType() const {
302     if (IsCondBranchImm()) {
303       return CondBranchType;
304     } else if (IsUncondBranchImm()) {
305       return UncondBranchType;
306     } else if (IsCompareBranch()) {
307       return CompareBranchType;
308     } else if (IsTestBranch()) {
309       return TestBranchType;
310     } else {
311       return UnknownBranchType;
312     }
313   }
314 
ImmBranchRangeBitwidth(ImmBranchType branch_type)315   static int ImmBranchRangeBitwidth(ImmBranchType branch_type) {
316     switch (branch_type) {
317       case UncondBranchType:
318         return ImmUncondBranch_width;
319       case CondBranchType:
320         return ImmCondBranch_width;
321       case CompareBranchType:
322         return ImmCmpBranch_width;
323       case TestBranchType:
324         return ImmTestBranch_width;
325       default:
326         UNREACHABLE();
327     }
328   }
329 
330   // The range of the branch instruction, expressed as 'instr +- range'.
ImmBranchRange(ImmBranchType branch_type)331   static int32_t ImmBranchRange(ImmBranchType branch_type) {
332     return (1 << (ImmBranchRangeBitwidth(branch_type) + kInstrSizeLog2)) / 2 -
333            kInstrSize;
334   }
335 
ImmBranch()336   int ImmBranch() const {
337     switch (BranchType()) {
338       case CondBranchType: return ImmCondBranch();
339       case UncondBranchType: return ImmUncondBranch();
340       case CompareBranchType: return ImmCmpBranch();
341       case TestBranchType: return ImmTestBranch();
342       default: UNREACHABLE();
343     }
344     return 0;
345   }
346 
ImmUnresolvedInternalReference()347   int ImmUnresolvedInternalReference() const {
348     DCHECK(IsUnresolvedInternalReference());
349     // Unresolved references are encoded as two consecutive brk instructions.
350     // The associated immediate is made of the two 16-bit payloads.
351     int32_t high16 = ImmException();
352     int32_t low16 = following()->ImmException();
353     return (high16 << 16) | low16;
354   }
355 
IsUnconditionalBranch()356   bool IsUnconditionalBranch() const {
357     return Mask(UnconditionalBranchMask) == B;
358   }
359 
IsBranchAndLink()360   bool IsBranchAndLink() const { return Mask(UnconditionalBranchMask) == BL; }
361 
IsBranchAndLinkToRegister()362   bool IsBranchAndLinkToRegister() const {
363     return Mask(UnconditionalBranchToRegisterMask) == BLR;
364   }
365 
IsMovz()366   bool IsMovz() const {
367     return (Mask(MoveWideImmediateMask) == MOVZ_x) ||
368            (Mask(MoveWideImmediateMask) == MOVZ_w);
369   }
370 
IsMovk()371   bool IsMovk() const {
372     return (Mask(MoveWideImmediateMask) == MOVK_x) ||
373            (Mask(MoveWideImmediateMask) == MOVK_w);
374   }
375 
IsMovn()376   bool IsMovn() const {
377     return (Mask(MoveWideImmediateMask) == MOVN_x) ||
378            (Mask(MoveWideImmediateMask) == MOVN_w);
379   }
380 
IsNop(int n)381   bool IsNop(int n) {
382     // A marking nop is an instruction
383     //   mov r<n>,  r<n>
384     // which is encoded as
385     //   orr r<n>, xzr, r<n>
386     return (Mask(LogicalShiftedMask) == ORR_x) &&
387            (Rd() == Rm()) &&
388            (Rd() == n);
389   }
390 
391   // Find the PC offset encoded in this instruction. 'this' may be a branch or
392   // a PC-relative addressing instruction.
393   // The offset returned is unscaled.
394   int64_t ImmPCOffset();
395 
396   // Find the target of this instruction. 'this' may be a branch or a
397   // PC-relative addressing instruction.
398   Instruction* ImmPCOffsetTarget();
399 
400   static bool IsValidImmPCOffset(ImmBranchType branch_type, ptrdiff_t offset);
401   bool IsTargetInImmPCOffsetRange(Instruction* target);
402   // Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or
403   // a PC-relative addressing instruction.
404   void SetImmPCOffsetTarget(const AssemblerOptions& options,
405                             Instruction* target);
406   void SetUnresolvedInternalReferenceImmTarget(const AssemblerOptions& options,
407                                                Instruction* target);
408   // Patch a literal load instruction to load from 'source'.
409   void SetImmLLiteral(Instruction* source);
410 
LiteralAddress()411   uintptr_t LiteralAddress() {
412     int offset = ImmLLiteral() << kLoadLiteralScaleLog2;
413     return reinterpret_cast<uintptr_t>(this) + offset;
414   }
415 
416   enum CheckAlignment { NO_CHECK, CHECK_ALIGNMENT };
417 
418   V8_INLINE const Instruction* InstructionAtOffset(
419       int64_t offset, CheckAlignment check = CHECK_ALIGNMENT) const {
420     // The FUZZ_disasm test relies on no check being done.
421     DCHECK(check == NO_CHECK || IsAligned(offset, kInstrSize));
422     return this + offset;
423   }
424 
425   V8_INLINE Instruction* InstructionAtOffset(
426       int64_t offset, CheckAlignment check = CHECK_ALIGNMENT) {
427     // The FUZZ_disasm test relies on no check being done.
428     DCHECK(check == NO_CHECK || IsAligned(offset, kInstrSize));
429     return this + offset;
430   }
431 
Cast(T src)432   template<typename T> V8_INLINE static Instruction* Cast(T src) {
433     return reinterpret_cast<Instruction*>(src);
434   }
435 
DistanceTo(Instruction * target)436   V8_INLINE ptrdiff_t DistanceTo(Instruction* target) {
437     return reinterpret_cast<Address>(target) - reinterpret_cast<Address>(this);
438   }
439 
440 
441   static const int ImmPCRelRangeBitwidth = 21;
IsValidPCRelOffset(ptrdiff_t offset)442   static bool IsValidPCRelOffset(ptrdiff_t offset) { return is_int21(offset); }
443   void SetPCRelImmTarget(const AssemblerOptions& options, Instruction* target);
444   void SetBranchImmTarget(Instruction* target);
445 };
446 
447 // Functions for handling NEON vector format information.
448 enum VectorFormat {
449   kFormatUndefined = 0xffffffff,
450   kFormat8B = NEON_8B,
451   kFormat16B = NEON_16B,
452   kFormat4H = NEON_4H,
453   kFormat8H = NEON_8H,
454   kFormat2S = NEON_2S,
455   kFormat4S = NEON_4S,
456   kFormat1D = NEON_1D,
457   kFormat2D = NEON_2D,
458 
459   // Scalar formats. We add the scalar bit to distinguish between scalar and
460   // vector enumerations; the bit is always set in the encoding of scalar ops
461   // and always clear for vector ops. Although kFormatD and kFormat1D appear
462   // to be the same, their meaning is subtly different. The first is a scalar
463   // operation, the second a vector operation that only affects one lane.
464   kFormatB = NEON_B | NEONScalar,
465   kFormatH = NEON_H | NEONScalar,
466   kFormatS = NEON_S | NEONScalar,
467   kFormatD = NEON_D | NEONScalar
468 };
469 
470 VectorFormat VectorFormatHalfWidth(VectorFormat vform);
471 VectorFormat VectorFormatDoubleWidth(VectorFormat vform);
472 VectorFormat VectorFormatDoubleLanes(VectorFormat vform);
473 VectorFormat VectorFormatHalfLanes(VectorFormat vform);
474 VectorFormat ScalarFormatFromLaneSize(int lanesize);
475 VectorFormat VectorFormatHalfWidthDoubleLanes(VectorFormat vform);
476 VectorFormat VectorFormatFillQ(VectorFormat vform);
477 VectorFormat ScalarFormatFromFormat(VectorFormat vform);
478 unsigned RegisterSizeInBitsFromFormat(VectorFormat vform);
479 unsigned RegisterSizeInBytesFromFormat(VectorFormat vform);
480 int LaneSizeInBytesFromFormat(VectorFormat vform);
481 unsigned LaneSizeInBitsFromFormat(VectorFormat vform);
482 int LaneSizeInBytesLog2FromFormat(VectorFormat vform);
483 int LaneCountFromFormat(VectorFormat vform);
484 int MaxLaneCountFromFormat(VectorFormat vform);
485 bool IsVectorFormat(VectorFormat vform);
486 int64_t MaxIntFromFormat(VectorFormat vform);
487 int64_t MinIntFromFormat(VectorFormat vform);
488 uint64_t MaxUintFromFormat(VectorFormat vform);
489 
490 // Where Instruction looks at instructions generated by the Assembler,
491 // InstructionSequence looks at instructions sequences generated by the
492 // MacroAssembler.
493 class InstructionSequence : public Instruction {
494  public:
At(Address address)495   static InstructionSequence* At(Address address) {
496     return reinterpret_cast<InstructionSequence*>(address);
497   }
498 
499   // Sequences generated by MacroAssembler::InlineData().
500   bool IsInlineData() const;
501   uint64_t InlineData() const;
502 };
503 
504 
505 // Simulator/Debugger debug instructions ---------------------------------------
506 // Each debug marker is represented by a HLT instruction. The immediate comment
507 // field in the instruction is used to identify the type of debug marker. Each
508 // marker encodes arguments in a different way, as described below.
509 
510 // Indicate to the Debugger that the instruction is a redirected call.
511 const Instr kImmExceptionIsRedirectedCall = 0xca11;
512 
513 // Represent unreachable code. This is used as a guard in parts of the code that
514 // should not be reachable, such as in data encoded inline in the instructions.
515 const Instr kImmExceptionIsUnreachable = 0xdebf;
516 
517 // A pseudo 'printf' instruction. The arguments will be passed to the platform
518 // printf method.
519 const Instr kImmExceptionIsPrintf = 0xdeb1;
520 // Most parameters are stored in ARM64 registers as if the printf
521 // pseudo-instruction was a call to the real printf method:
522 //      x0: The format string.
523 //   x1-x7: Optional arguments.
524 //   d0-d7: Optional arguments.
525 //
526 // Also, the argument layout is described inline in the instructions:
527 //  - arg_count: The number of arguments.
528 //  - arg_pattern: A set of PrintfArgPattern values, packed into two-bit fields.
529 //
530 // Floating-point and integer arguments are passed in separate sets of registers
531 // in AAPCS64 (even for varargs functions), so it is not possible to determine
532 // the type of each argument without some information about the values that were
533 // passed in. This information could be retrieved from the printf format string,
534 // but the format string is not trivial to parse so we encode the relevant
535 // information with the HLT instruction.
536 const unsigned kPrintfArgCountOffset = 1 * kInstrSize;
537 const unsigned kPrintfArgPatternListOffset = 2 * kInstrSize;
538 const unsigned kPrintfLength = 3 * kInstrSize;
539 
540 const unsigned kPrintfMaxArgCount = 4;
541 
542 // The argument pattern is a set of two-bit-fields, each with one of the
543 // following values:
544 enum PrintfArgPattern {
545   kPrintfArgW = 1,
546   kPrintfArgX = 2,
547   // There is no kPrintfArgS because floats are always converted to doubles in C
548   // varargs calls.
549   kPrintfArgD = 3
550 };
551 static const unsigned kPrintfArgPatternBits = 2;
552 
553 // A pseudo 'debug' instruction.
554 const Instr kImmExceptionIsDebug = 0xdeb0;
555 // Parameters are inlined in the code after a debug pseudo-instruction:
556 // - Debug code.
557 // - Debug parameters.
558 // - Debug message string. This is a nullptr-terminated ASCII string, padded to
559 //   kInstrSize so that subsequent instructions are correctly aligned.
560 // - A kImmExceptionIsUnreachable marker, to catch accidental execution of the
561 //   string data.
562 const unsigned kDebugCodeOffset = 1 * kInstrSize;
563 const unsigned kDebugParamsOffset = 2 * kInstrSize;
564 const unsigned kDebugMessageOffset = 3 * kInstrSize;
565 
566 // Debug parameters.
567 // Used without a TRACE_ option, the Debugger will print the arguments only
568 // once. Otherwise TRACE_ENABLE and TRACE_DISABLE will enable or disable tracing
569 // before every instruction for the specified LOG_ parameters.
570 //
571 // TRACE_OVERRIDE enables the specified LOG_ parameters, and disabled any
572 // others that were not specified.
573 //
574 // For example:
575 //
576 // __ debug("print registers and fp registers", 0, LOG_REGS | LOG_VREGS);
577 // will print the registers and fp registers only once.
578 //
579 // __ debug("trace disasm", 1, TRACE_ENABLE | LOG_DISASM);
580 // starts disassembling the code.
581 //
582 // __ debug("trace rets", 2, TRACE_ENABLE | LOG_REGS);
583 // adds the general purpose registers to the trace.
584 //
585 // __ debug("stop regs", 3, TRACE_DISABLE | LOG_REGS);
586 // stops tracing the registers.
587 const unsigned kDebuggerTracingDirectivesMask = 3 << 6;
588 enum DebugParameters {
589   NO_PARAM = 0,
590   BREAK = 1 << 0,
591   LOG_DISASM = 1 << 1,    // Use only with TRACE. Disassemble the code.
592   LOG_REGS = 1 << 2,      // Log general purpose registers.
593   LOG_VREGS = 1 << 3,     // Log NEON and floating-point registers.
594   LOG_SYS_REGS = 1 << 4,  // Log the status flags.
595   LOG_WRITE = 1 << 5,     // Log any memory write.
596 
597   LOG_NONE = 0,
598   LOG_STATE = LOG_REGS | LOG_VREGS | LOG_SYS_REGS,
599   LOG_ALL = LOG_DISASM | LOG_STATE | LOG_WRITE,
600 
601   // Trace control.
602   TRACE_ENABLE = 1 << 6,
603   TRACE_DISABLE = 2 << 6,
604   TRACE_OVERRIDE = 3 << 6
605 };
606 
607 enum NEONFormat {
608   NF_UNDEF = 0,
609   NF_8B = 1,
610   NF_16B = 2,
611   NF_4H = 3,
612   NF_8H = 4,
613   NF_2S = 5,
614   NF_4S = 6,
615   NF_1D = 7,
616   NF_2D = 8,
617   NF_B = 9,
618   NF_H = 10,
619   NF_S = 11,
620   NF_D = 12
621 };
622 
623 static const unsigned kNEONFormatMaxBits = 6;
624 
625 struct NEONFormatMap {
626   // The bit positions in the instruction to consider.
627   uint8_t bits[kNEONFormatMaxBits];
628 
629   // Mapping from concatenated bits to format.
630   NEONFormat map[1 << kNEONFormatMaxBits];
631 };
632 
633 class NEONFormatDecoder {
634  public:
635   enum SubstitutionMode { kPlaceholder, kFormat };
636 
637   // Construct a format decoder with increasingly specific format maps for each
638   // substitution. If no format map is specified, the default is the integer
639   // format map.
640   explicit NEONFormatDecoder(const Instruction* instr);
641   NEONFormatDecoder(const Instruction* instr, const NEONFormatMap* format);
642   NEONFormatDecoder(const Instruction* instr, const NEONFormatMap* format0,
643                     const NEONFormatMap* format1);
644   NEONFormatDecoder(const Instruction* instr, const NEONFormatMap* format0,
645                     const NEONFormatMap* format1, const NEONFormatMap* format2);
646 
647   // Set the format mapping for all or individual substitutions.
648   void SetFormatMaps(const NEONFormatMap* format0,
649                      const NEONFormatMap* format1 = nullptr,
650                      const NEONFormatMap* format2 = nullptr);
651   void SetFormatMap(unsigned index, const NEONFormatMap* format);
652 
653   // Substitute %s in the input string with the placeholder string for each
654   // register, ie. "'B", "'H", etc.
655   const char* SubstitutePlaceholders(const char* string);
656 
657   // Substitute %s in the input string with a new string based on the
658   // substitution mode.
659   const char* Substitute(const char* string, SubstitutionMode mode0 = kFormat,
660                          SubstitutionMode mode1 = kFormat,
661                          SubstitutionMode mode2 = kFormat);
662 
663   // Append a "2" to a mnemonic string based of the state of the Q bit.
664   const char* Mnemonic(const char* mnemonic);
665 
666   VectorFormat GetVectorFormat(int format_index = 0);
667   VectorFormat GetVectorFormat(const NEONFormatMap* format_map);
668 
669   // Built in mappings for common cases.
670 
671   // The integer format map uses three bits (Q, size<1:0>) to encode the
672   // "standard" set of NEON integer vector formats.
IntegerFormatMap()673   static const NEONFormatMap* IntegerFormatMap() {
674     static const NEONFormatMap map = {
675         {23, 22, 30},
676         {NF_8B, NF_16B, NF_4H, NF_8H, NF_2S, NF_4S, NF_UNDEF, NF_2D}};
677     return &map;
678   }
679 
680   // The long integer format map uses two bits (size<1:0>) to encode the
681   // long set of NEON integer vector formats. These are used in narrow, wide
682   // and long operations.
LongIntegerFormatMap()683   static const NEONFormatMap* LongIntegerFormatMap() {
684     static const NEONFormatMap map = {{23, 22}, {NF_8H, NF_4S, NF_2D}};
685     return &map;
686   }
687 
688   // The FP format map uses two bits (Q, size<0>) to encode the NEON FP vector
689   // formats: NF_2S, NF_4S, NF_2D.
FPFormatMap()690   static const NEONFormatMap* FPFormatMap() {
691     // The FP format map assumes two bits (Q, size<0>) are used to encode the
692     // NEON FP vector formats: NF_2S, NF_4S, NF_2D.
693     static const NEONFormatMap map = {{22, 30},
694                                       {NF_2S, NF_4S, NF_UNDEF, NF_2D}};
695     return &map;
696   }
697 
698   // The load/store format map uses three bits (Q, 11, 10) to encode the
699   // set of NEON vector formats.
LoadStoreFormatMap()700   static const NEONFormatMap* LoadStoreFormatMap() {
701     static const NEONFormatMap map = {
702         {11, 10, 30},
703         {NF_8B, NF_16B, NF_4H, NF_8H, NF_2S, NF_4S, NF_1D, NF_2D}};
704     return &map;
705   }
706 
707   // The logical format map uses one bit (Q) to encode the NEON vector format:
708   // NF_8B, NF_16B.
LogicalFormatMap()709   static const NEONFormatMap* LogicalFormatMap() {
710     static const NEONFormatMap map = {{30}, {NF_8B, NF_16B}};
711     return &map;
712   }
713 
714   // The triangular format map uses between two and five bits to encode the NEON
715   // vector format:
716   // xxx10->8B, xxx11->16B, xx100->4H, xx101->8H
717   // x1000->2S, x1001->4S,  10001->2D, all others undefined.
TriangularFormatMap()718   static const NEONFormatMap* TriangularFormatMap() {
719     static const NEONFormatMap map = {
720         {19, 18, 17, 16, 30},
721         {NF_UNDEF, NF_UNDEF, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B,
722          NF_2S,    NF_4S,    NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B,
723          NF_UNDEF, NF_2D,    NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B,
724          NF_2S,    NF_4S,    NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B}};
725     return &map;
726   }
727 
728   // The scalar format map uses two bits (size<1:0>) to encode the NEON scalar
729   // formats: NF_B, NF_H, NF_S, NF_D.
ScalarFormatMap()730   static const NEONFormatMap* ScalarFormatMap() {
731     static const NEONFormatMap map = {{23, 22}, {NF_B, NF_H, NF_S, NF_D}};
732     return &map;
733   }
734 
735   // The long scalar format map uses two bits (size<1:0>) to encode the longer
736   // NEON scalar formats: NF_H, NF_S, NF_D.
LongScalarFormatMap()737   static const NEONFormatMap* LongScalarFormatMap() {
738     static const NEONFormatMap map = {{23, 22}, {NF_H, NF_S, NF_D}};
739     return &map;
740   }
741 
742   // The FP scalar format map assumes one bit (size<0>) is used to encode the
743   // NEON FP scalar formats: NF_S, NF_D.
FPScalarFormatMap()744   static const NEONFormatMap* FPScalarFormatMap() {
745     static const NEONFormatMap map = {{22}, {NF_S, NF_D}};
746     return &map;
747   }
748 
749   // The triangular scalar format map uses between one and four bits to encode
750   // the NEON FP scalar formats:
751   // xxx1->B, xx10->H, x100->S, 1000->D, all others undefined.
TriangularScalarFormatMap()752   static const NEONFormatMap* TriangularScalarFormatMap() {
753     static const NEONFormatMap map = {
754         {19, 18, 17, 16},
755         {NF_UNDEF, NF_B, NF_H, NF_B, NF_S, NF_B, NF_H, NF_B, NF_D, NF_B, NF_H,
756          NF_B, NF_S, NF_B, NF_H, NF_B}};
757     return &map;
758   }
759 
760  private:
761   // Get a pointer to a string that represents the format or placeholder for
762   // the specified substitution index, based on the format map and instruction.
763   const char* GetSubstitute(int index, SubstitutionMode mode);
764 
765   // Get the NEONFormat enumerated value for bits obtained from the
766   // instruction based on the specified format mapping.
767   NEONFormat GetNEONFormat(const NEONFormatMap* format_map);
768 
769   // Convert a NEONFormat into a string.
770   static const char* NEONFormatAsString(NEONFormat format);
771 
772   // Convert a NEONFormat into a register placeholder string.
773   static const char* NEONFormatAsPlaceholder(NEONFormat format);
774 
775   // Select bits from instrbits_ defined by the bits array, concatenate them,
776   // and return the value.
777   uint8_t PickBits(const uint8_t bits[]);
778 
779   Instr instrbits_;
780   const NEONFormatMap* formats_[3];
781   char form_buffer_[64];
782   char mne_buffer_[16];
783 };
784 }  // namespace internal
785 }  // namespace v8
786 
787 
788 #endif  // V8_ARM64_INSTRUCTIONS_ARM64_H_
789