• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2016, VIXL authors
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are met:
6 //
7 //   * Redistributions of source code must retain the above copyright notice,
8 //     this list of conditions and the following disclaimer.
9 //   * Redistributions in binary form must reproduce the above copyright notice,
10 //     this list of conditions and the following disclaimer in the documentation
11 //     and/or other materials provided with the distribution.
12 //   * Neither the name of ARM Limited nor the names of its contributors may be
13 //     used to endorse or promote products derived from this software without
14 //     specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 
27 #ifndef VIXL_AARCH64_OPERANDS_AARCH64_H_
28 #define VIXL_AARCH64_OPERANDS_AARCH64_H_
29 
30 #include <sstream>
31 #include <string>
32 
33 #include "instructions-aarch64.h"
34 #include "registers-aarch64.h"
35 
36 namespace vixl {
37 namespace aarch64 {
38 
39 // Lists of registers.
40 class CPURegList {
41  public:
42   explicit CPURegList(CPURegister reg1,
43                       CPURegister reg2 = NoCPUReg,
44                       CPURegister reg3 = NoCPUReg,
45                       CPURegister reg4 = NoCPUReg)
46       : list_(reg1.GetBit() | reg2.GetBit() | reg3.GetBit() | reg4.GetBit()),
47         size_(reg1.GetSizeInBits()),
48         type_(reg1.GetType()) {
49     VIXL_ASSERT(AreSameSizeAndType(reg1, reg2, reg3, reg4));
50     VIXL_ASSERT(IsValid());
51   }
52 
CPURegList(CPURegister::RegisterType type,unsigned size,RegList list)53   constexpr CPURegList(CPURegister::RegisterType type, unsigned size, RegList list)
54       : list_(list), size_(size), type_(type) {
55 #ifndef PANDA_BUILD
56     VIXL_ASSERT(IsValid());
57 #endif
58   }
59 
CPURegList(CPURegister::RegisterType type,unsigned size,unsigned first_reg,unsigned last_reg)60   constexpr CPURegList(CPURegister::RegisterType type,
61              unsigned size,
62              unsigned first_reg,
63              unsigned last_reg)
64       : list_((UINT64_C(1) << (last_reg + 1)) - 1), size_(size), type_(type) {
65     VIXL_ASSERT(
66         ((type == CPURegister::kRegister) && (last_reg < kNumberOfRegisters)) ||
67         ((type == CPURegister::kVRegister) &&
68          (last_reg < kNumberOfVRegisters)));
69     VIXL_ASSERT(last_reg >= first_reg);
70     list_ &= ~((UINT64_C(1) << first_reg) - 1);
71 #ifndef PANDA_BUILD
72     VIXL_ASSERT(IsValid());
73 #endif
74   }
75 
76   // Construct an empty CPURegList with the specified size and type. If `size`
77   // is CPURegister::kUnknownSize and the register type requires a size, a valid
78   // but unspecified default will be picked.
79   static CPURegList Empty(CPURegister::RegisterType type,
80                           unsigned size = CPURegister::kUnknownSize) {
81     return CPURegList(type, GetDefaultSizeFor(type, size), 0);
82   }
83 
84   // Construct a CPURegList with all possible registers with the specified size
85   // and type. If `size` is CPURegister::kUnknownSize and the register type
86   // requires a size, a valid but unspecified default will be picked.
87   static CPURegList All(CPURegister::RegisterType type,
88                         unsigned size = CPURegister::kUnknownSize) {
89     unsigned number_of_registers = (CPURegister::GetMaxCodeFor(type) + 1);
90     RegList list = (static_cast<RegList>(1) << number_of_registers) - 1;
91     if (type == CPURegister::kRegister) {
92       // GetMaxCodeFor(kRegister) ignores SP, so explicitly include it.
93       list |= (static_cast<RegList>(1) << kSPRegInternalCode);
94     }
95     return CPURegList(type, GetDefaultSizeFor(type, size), list);
96   }
97 
GetType()98   CPURegister::RegisterType GetType() const {
99     VIXL_ASSERT(IsValid());
100     return type_;
101   }
102   VIXL_DEPRECATED("GetType", CPURegister::RegisterType type() const) {
103     return GetType();
104   }
105 
GetBank()106   CPURegister::RegisterBank GetBank() const {
107     return CPURegister::GetBankFor(GetType());
108   }
109 
110   // Combine another CPURegList into this one. Registers that already exist in
111   // this list are left unchanged. The type and size of the registers in the
112   // 'other' list must match those in this list.
Combine(const CPURegList & other)113   void Combine(const CPURegList& other) {
114     VIXL_ASSERT(IsValid());
115     VIXL_ASSERT(other.GetType() == type_);
116     VIXL_ASSERT(other.GetRegisterSizeInBits() == size_);
117     list_ |= other.GetList();
118   }
119 
120   // Remove every register in the other CPURegList from this one. Registers that
121   // do not exist in this list are ignored. The type and size of the registers
122   // in the 'other' list must match those in this list.
Remove(const CPURegList & other)123   void Remove(const CPURegList& other) {
124     VIXL_ASSERT(IsValid());
125     VIXL_ASSERT(other.GetType() == type_);
126     VIXL_ASSERT(other.GetRegisterSizeInBits() == size_);
127     list_ &= ~other.GetList();
128   }
129 
130   // Variants of Combine and Remove which take a single register.
Combine(const CPURegister & other)131   void Combine(const CPURegister& other) {
132     VIXL_ASSERT(other.GetType() == type_);
133     VIXL_ASSERT(other.GetSizeInBits() == size_);
134     Combine(other.GetCode());
135   }
136 
Remove(const CPURegister & other)137   void Remove(const CPURegister& other) {
138     VIXL_ASSERT(other.GetType() == type_);
139     VIXL_ASSERT(other.GetSizeInBits() == size_);
140     Remove(other.GetCode());
141   }
142 
143   // Variants of Combine and Remove which take a single register by its code;
144   // the type and size of the register is inferred from this list.
Combine(int code)145   void Combine(int code) {
146     VIXL_ASSERT(IsValid());
147     VIXL_ASSERT(CPURegister(code, size_, type_).IsValid());
148     list_ |= (UINT64_C(1) << code);
149   }
150 
Remove(int code)151   void Remove(int code) {
152     VIXL_ASSERT(IsValid());
153     VIXL_ASSERT(CPURegister(code, size_, type_).IsValid());
154     list_ &= ~(UINT64_C(1) << code);
155   }
156 
Union(const CPURegList & list_1,const CPURegList & list_2)157   static CPURegList Union(const CPURegList& list_1, const CPURegList& list_2) {
158     VIXL_ASSERT(list_1.type_ == list_2.type_);
159     VIXL_ASSERT(list_1.size_ == list_2.size_);
160     return CPURegList(list_1.type_, list_1.size_, list_1.list_ | list_2.list_);
161   }
162   static CPURegList Union(const CPURegList& list_1,
163                           const CPURegList& list_2,
164                           const CPURegList& list_3);
165   static CPURegList Union(const CPURegList& list_1,
166                           const CPURegList& list_2,
167                           const CPURegList& list_3,
168                           const CPURegList& list_4);
169 
Intersection(const CPURegList & list_1,const CPURegList & list_2)170   static CPURegList Intersection(const CPURegList& list_1,
171                                  const CPURegList& list_2) {
172     VIXL_ASSERT(list_1.type_ == list_2.type_);
173     VIXL_ASSERT(list_1.size_ == list_2.size_);
174     return CPURegList(list_1.type_, list_1.size_, list_1.list_ & list_2.list_);
175   }
176   static CPURegList Intersection(const CPURegList& list_1,
177                                  const CPURegList& list_2,
178                                  const CPURegList& list_3);
179   static CPURegList Intersection(const CPURegList& list_1,
180                                  const CPURegList& list_2,
181                                  const CPURegList& list_3,
182                                  const CPURegList& list_4);
183 
Overlaps(const CPURegList & other)184   bool Overlaps(const CPURegList& other) const {
185     return (type_ == other.type_) && ((list_ & other.list_) != 0);
186   }
187 
GetList()188   constexpr RegList GetList() const {
189 #ifndef PANDA_BUILD
190     VIXL_ASSERT(IsValid());
191 #endif
192     return list_;
193   }
194   VIXL_DEPRECATED("GetList", RegList list() const) { return GetList(); }
195 
SetList(RegList new_list)196   void SetList(RegList new_list) {
197     VIXL_ASSERT(IsValid());
198     list_ = new_list;
199   }
set_list(RegList new_list)200   VIXL_DEPRECATED("SetList", void set_list(RegList new_list)) {
201     return SetList(new_list);
202   }
203 
204   // Remove all callee-saved registers from the list. This can be useful when
205   // preparing registers for an AAPCS64 function call, for example.
206   void RemoveCalleeSaved();
207 
208   // Find the register in this list that appears in `mask` with the lowest or
209   // highest code, remove it from the list and return it as a CPURegister. If
210   // the list is empty, leave it unchanged and return NoCPUReg.
211   CPURegister PopLowestIndex(RegList mask = ~static_cast<RegList>(0));
212   CPURegister PopHighestIndex(RegList mask = ~static_cast<RegList>(0));
213 
214   // AAPCS64 callee-saved registers.
215   static CPURegList GetCalleeSaved(unsigned size = kXRegSize);
216   static CPURegList GetCalleeSavedV(unsigned size = kDRegSize);
217 
218   // AAPCS64 caller-saved registers. Note that this includes lr.
219   // TODO(all): Determine how we handle d8-d15 being callee-saved, but the top
220   // 64-bits being caller-saved.
221   static CPURegList GetCallerSaved(unsigned size = kXRegSize);
222   static CPURegList GetCallerSavedV(unsigned size = kDRegSize);
223 
IsEmpty()224   bool IsEmpty() const {
225     VIXL_ASSERT(IsValid());
226     return list_ == 0;
227   }
228 
IncludesAliasOf(const CPURegister & other)229   bool IncludesAliasOf(const CPURegister& other) const {
230     VIXL_ASSERT(IsValid());
231     return (GetBank() == other.GetBank()) && IncludesAliasOf(other.GetCode());
232   }
233 
IncludesAliasOf(int code)234   bool IncludesAliasOf(int code) const {
235     VIXL_ASSERT(IsValid());
236     return (((static_cast<RegList>(1) << code) & list_) != 0);
237   }
238 
GetCount()239   int GetCount() const {
240     VIXL_ASSERT(IsValid());
241     return CountSetBits(list_);
242   }
Count()243   VIXL_DEPRECATED("GetCount", int Count()) const { return GetCount(); }
244 
GetRegisterSizeInBits()245   int GetRegisterSizeInBits() const {
246     VIXL_ASSERT(IsValid());
247     return size_;
248   }
RegisterSizeInBits()249   VIXL_DEPRECATED("GetRegisterSizeInBits", int RegisterSizeInBits() const) {
250     return GetRegisterSizeInBits();
251   }
252 
GetRegisterSizeInBytes()253   int GetRegisterSizeInBytes() const {
254     int size_in_bits = GetRegisterSizeInBits();
255     VIXL_ASSERT((size_in_bits % 8) == 0);
256     return size_in_bits / 8;
257   }
RegisterSizeInBytes()258   VIXL_DEPRECATED("GetRegisterSizeInBytes", int RegisterSizeInBytes() const) {
259     return GetRegisterSizeInBytes();
260   }
261 
GetTotalSizeInBytes()262   unsigned GetTotalSizeInBytes() const {
263     VIXL_ASSERT(IsValid());
264     return GetRegisterSizeInBytes() * GetCount();
265   }
TotalSizeInBytes()266   VIXL_DEPRECATED("GetTotalSizeInBytes", unsigned TotalSizeInBytes() const) {
267     return GetTotalSizeInBytes();
268   }
269 
270  private:
271   // If `size` is CPURegister::kUnknownSize and the type requires a known size,
272   // then return an arbitrary-but-valid size.
273   //
274   // Otherwise, the size is checked for validity and returned unchanged.
GetDefaultSizeFor(CPURegister::RegisterType type,unsigned size)275   static unsigned GetDefaultSizeFor(CPURegister::RegisterType type,
276                                     unsigned size) {
277     if (size == CPURegister::kUnknownSize) {
278       if (type == CPURegister::kRegister) size = kXRegSize;
279       if (type == CPURegister::kVRegister) size = kQRegSize;
280       // All other types require kUnknownSize.
281     }
282     VIXL_ASSERT(CPURegister(0, size, type).IsValid());
283     return size;
284   }
285 
286   RegList list_;
287   int size_;
288   CPURegister::RegisterType type_;
289 
290   bool IsValid() const;
291 };
292 
293 
294 // AAPCS64 callee-saved registers.
295 constexpr CPURegList kCalleeSaved = CPURegList(CPURegister::kRegister, kXRegSize, 19, 28);
296 constexpr CPURegList kCalleeSavedV = CPURegList(CPURegister::kVRegister, kDRegSize, 8, 15);
297 
298 // AAPCS64 caller-saved registers. Note that this includes lr.
299 constexpr CPURegList kCallerSaved = CPURegList(CPURegister::kRegister, kXRegSize, 0, 18);
300 constexpr CPURegList kCallerSavedV = CPURegList(CPURegister::kVRegister, kDRegSize, 0xffff00ff);
301 
302 class IntegerOperand;
303 
304 // Operand.
305 class Operand {
306  public:
307   // #<immediate>
308   // where <immediate> is int64_t.
309   // This is allowed to be an implicit constructor because Operand is
310   // a wrapper class that doesn't normally perform any type conversion.
311   Operand(int64_t immediate);  // NOLINT(runtime/explicit)
312 
313   Operand(IntegerOperand immediate);  // NOLINT(runtime/explicit)
314 
315   // rm, {<shift> #<shift_amount>}
316   // where <shift> is one of {LSL, LSR, ASR, ROR}.
317   //       <shift_amount> is uint6_t.
318   // This is allowed to be an implicit constructor because Operand is
319   // a wrapper class that doesn't normally perform any type conversion.
320   Operand(Register reg,
321           Shift shift = LSL,
322           unsigned shift_amount = 0);  // NOLINT(runtime/explicit)
323 
324   // rm, {<extend> {#<shift_amount>}}
325   // where <extend> is one of {UXTB, UXTH, UXTW, UXTX, SXTB, SXTH, SXTW, SXTX}.
326   //       <shift_amount> is uint2_t.
327   explicit Operand(Register reg, Extend extend, unsigned shift_amount = 0);
328 
329   bool IsImmediate() const;
330   bool IsPlainRegister() const;
331   bool IsShiftedRegister() const;
332   bool IsExtendedRegister() const;
333   bool IsZero() const;
334 
335   // This returns an LSL shift (<= 4) operand as an equivalent extend operand,
336   // which helps in the encoding of instructions that use the stack pointer.
337   Operand ToExtendedRegister() const;
338 
GetImmediate()339   int64_t GetImmediate() const {
340     VIXL_ASSERT(IsImmediate());
341     return immediate_;
342   }
343   VIXL_DEPRECATED("GetImmediate", int64_t immediate() const) {
344     return GetImmediate();
345   }
346 
GetEquivalentImmediate()347   int64_t GetEquivalentImmediate() const {
348     return IsZero() ? 0 : GetImmediate();
349   }
350 
GetRegister()351   Register GetRegister() const {
352     VIXL_ASSERT(IsShiftedRegister() || IsExtendedRegister());
353     return reg_;
354   }
355   VIXL_DEPRECATED("GetRegister", Register reg() const) { return GetRegister(); }
GetBaseRegister()356   Register GetBaseRegister() const { return GetRegister(); }
357 
GetShift()358   Shift GetShift() const {
359     VIXL_ASSERT(IsShiftedRegister());
360     return shift_;
361   }
362   VIXL_DEPRECATED("GetShift", Shift shift() const) { return GetShift(); }
363 
GetExtend()364   Extend GetExtend() const {
365     VIXL_ASSERT(IsExtendedRegister());
366     return extend_;
367   }
368   VIXL_DEPRECATED("GetExtend", Extend extend() const) { return GetExtend(); }
369 
GetShiftAmount()370   unsigned GetShiftAmount() const {
371     VIXL_ASSERT(IsShiftedRegister() || IsExtendedRegister());
372     return shift_amount_;
373   }
shift_amount()374   VIXL_DEPRECATED("GetShiftAmount", unsigned shift_amount() const) {
375     return GetShiftAmount();
376   }
377 
378  private:
379   int64_t immediate_;
380   Register reg_;
381   Shift shift_;
382   Extend extend_;
383   unsigned shift_amount_;
384 };
385 
386 
387 // MemOperand represents the addressing mode of a load or store instruction.
388 // In assembly syntax, MemOperands are normally denoted by one or more elements
389 // inside or around square brackets.
390 class MemOperand {
391  public:
392   // Creates an invalid `MemOperand`.
393   MemOperand();
394   explicit MemOperand(Register base,
395                       int64_t offset = 0,
396                       AddrMode addrmode = Offset);
397   MemOperand(Register base,
398              Register regoffset,
399              Shift shift = LSL,
400              unsigned shift_amount = 0);
401   MemOperand(Register base,
402              Register regoffset,
403              Extend extend,
404              unsigned shift_amount = 0);
405   MemOperand(Register base, const Operand& offset, AddrMode addrmode = Offset);
406 
GetBaseRegister()407   const Register& GetBaseRegister() const { return base_; }
408 
409   // If the MemOperand has a register offset, return it. (This also applies to
410   // pre- and post-index modes.) Otherwise, return NoReg.
GetRegisterOffset()411   const Register& GetRegisterOffset() const { return regoffset_; }
412 
413   // If the MemOperand has an immediate offset, return it. (This also applies to
414   // pre- and post-index modes.) Otherwise, return 0.
GetOffset()415   int64_t GetOffset() const { return offset_; }
416 
GetAddrMode()417   AddrMode GetAddrMode() const { return addrmode_; }
GetShift()418   Shift GetShift() const { return shift_; }
GetExtend()419   Extend GetExtend() const { return extend_; }
420 
GetShiftAmount()421   unsigned GetShiftAmount() const {
422     // Extend modes can also encode a shift for some instructions.
423     VIXL_ASSERT((GetShift() != NO_SHIFT) || (GetExtend() != NO_EXTEND));
424     return shift_amount_;
425   }
426 
427   // True for MemOperands which represent something like [x0].
428   // Currently, this will also return true for [x0, #0], because MemOperand has
429   // no way to distinguish the two.
430   bool IsPlainRegister() const;
431 
432   // True for MemOperands which represent something like [x0], or for compound
433   // MemOperands which are functionally equivalent, such as [x0, #0], [x0, xzr]
434   // or [x0, wzr, UXTW #3].
435   bool IsEquivalentToPlainRegister() const;
436 
437   // True for immediate-offset (but not indexed) MemOperands.
438   bool IsImmediateOffset() const;
439   // True for register-offset (but not indexed) MemOperands.
440   bool IsRegisterOffset() const;
441   // True for immediate or register pre-indexed MemOperands.
442   bool IsPreIndex() const;
443   // True for immediate or register post-indexed MemOperands.
444   bool IsPostIndex() const;
445   // True for immediate pre-indexed MemOperands, [reg, #imm]!
446   bool IsImmediatePreIndex() const;
447   // True for immediate post-indexed MemOperands, [reg], #imm
448   bool IsImmediatePostIndex() const;
449 
450   void AddOffset(int64_t offset);
451 
IsValid()452   bool IsValid() const {
453     return base_.IsValid() &&
454            ((addrmode_ == Offset) || (addrmode_ == PreIndex) ||
455             (addrmode_ == PostIndex)) &&
456            ((shift_ == NO_SHIFT) || (extend_ == NO_EXTEND)) &&
457            ((offset_ == 0) || !regoffset_.IsValid());
458   }
459 
Equals(const MemOperand & other)460   bool Equals(const MemOperand& other) const {
461     return base_.Is(other.base_) && regoffset_.Is(other.regoffset_) &&
462            (offset_ == other.offset_) && (addrmode_ == other.addrmode_) &&
463            (shift_ == other.shift_) && (extend_ == other.extend_) &&
464            (shift_amount_ == other.shift_amount_);
465   }
466 
467  private:
468   Register base_;
469   Register regoffset_;
470   int64_t offset_;
471   AddrMode addrmode_;
472   Shift shift_;
473   Extend extend_;
474   unsigned shift_amount_;
475 };
476 
477 // SVE supports memory operands which don't make sense to the core ISA, such as
478 // scatter-gather forms, in which either the base or offset registers are
479 // vectors. This class exists to avoid complicating core-ISA code with
480 // SVE-specific behaviour.
481 //
482 // Note that SVE does not support any pre- or post-index modes.
483 class SVEMemOperand {
484  public:
485   // "vector-plus-immediate", like [z0.s, #21]
486   explicit SVEMemOperand(ZRegister base, uint64_t offset = 0)
base_(base)487       : base_(base),
488         regoffset_(NoReg),
489         offset_(RawbitsToInt64(offset)),
490         mod_(NO_SVE_OFFSET_MODIFIER),
491         shift_amount_(0) {
492     VIXL_ASSERT(IsVectorPlusImmediate());
493     VIXL_ASSERT(IsValid());
494   }
495 
496   // "scalar-plus-immediate", like [x0], [x0, #42] or [x0, #42, MUL_VL]
497   // The only supported modifiers are NO_SVE_OFFSET_MODIFIER or SVE_MUL_VL.
498   //
499   // Note that VIXL cannot currently distinguish between `SVEMemOperand(x0)` and
500   // `SVEMemOperand(x0, 0)`. This is only significant in scalar-plus-scalar
501   // instructions where xm defaults to xzr. However, users should not rely on
502   // `SVEMemOperand(x0, 0)` being accepted in such cases.
503   explicit SVEMemOperand(Register base,
504                          uint64_t offset = 0,
505                          SVEOffsetModifier mod = NO_SVE_OFFSET_MODIFIER)
base_(base)506       : base_(base),
507         regoffset_(NoReg),
508         offset_(RawbitsToInt64(offset)),
509         mod_(mod),
510         shift_amount_(0) {
511     VIXL_ASSERT(IsScalarPlusImmediate());
512     VIXL_ASSERT(IsValid());
513   }
514 
515   // "scalar-plus-scalar", like [x0, x1]
516   // "scalar-plus-vector", like [x0, z1.d]
SVEMemOperand(Register base,CPURegister offset)517   SVEMemOperand(Register base, CPURegister offset)
518       : base_(base),
519         regoffset_(offset),
520         offset_(0),
521         mod_(NO_SVE_OFFSET_MODIFIER),
522         shift_amount_(0) {
523     VIXL_ASSERT(IsScalarPlusScalar() || IsScalarPlusVector());
524     if (offset.IsZero()) VIXL_ASSERT(IsEquivalentToScalar());
525     VIXL_ASSERT(IsValid());
526   }
527 
528   // "scalar-plus-vector", like [x0, z1.d, UXTW]
529   // The type of `mod` can be any `SVEOffsetModifier` (other than LSL), or a
530   // corresponding `Extend` value.
531   template <typename M>
SVEMemOperand(Register base,ZRegister offset,M mod)532   SVEMemOperand(Register base, ZRegister offset, M mod)
533       : base_(base),
534         regoffset_(offset),
535         offset_(0),
536         mod_(GetSVEOffsetModifierFor(mod)),
537         shift_amount_(0) {
538     VIXL_ASSERT(mod_ != SVE_LSL);  // LSL requires an explicit shift amount.
539     VIXL_ASSERT(IsScalarPlusVector());
540     VIXL_ASSERT(IsValid());
541   }
542 
543   // "scalar-plus-scalar", like [x0, x1, LSL #1]
544   // "scalar-plus-vector", like [x0, z1.d, LSL #2]
545   // The type of `mod` can be any `SVEOffsetModifier`, or a corresponding
546   // `Shift` or `Extend` value.
547   template <typename M>
SVEMemOperand(Register base,CPURegister offset,M mod,unsigned shift_amount)548   SVEMemOperand(Register base, CPURegister offset, M mod, unsigned shift_amount)
549       : base_(base),
550         regoffset_(offset),
551         offset_(0),
552         mod_(GetSVEOffsetModifierFor(mod)),
553         shift_amount_(shift_amount) {
554     VIXL_ASSERT(IsValid());
555   }
556 
557   // "vector-plus-scalar", like [z0.d, x0]
SVEMemOperand(ZRegister base,Register offset)558   SVEMemOperand(ZRegister base, Register offset)
559       : base_(base),
560         regoffset_(offset),
561         offset_(0),
562         mod_(NO_SVE_OFFSET_MODIFIER),
563         shift_amount_(0) {
564     VIXL_ASSERT(IsValid());
565     VIXL_ASSERT(IsVectorPlusScalar());
566   }
567 
568   // "vector-plus-vector", like [z0.d, z1.d, UXTW]
569   template <typename M = SVEOffsetModifier>
570   SVEMemOperand(ZRegister base,
571                 ZRegister offset,
572                 M mod = NO_SVE_OFFSET_MODIFIER,
573                 unsigned shift_amount = 0)
base_(base)574       : base_(base),
575         regoffset_(offset),
576         offset_(0),
577         mod_(GetSVEOffsetModifierFor(mod)),
578         shift_amount_(shift_amount) {
579     VIXL_ASSERT(IsValid());
580     VIXL_ASSERT(IsVectorPlusVector());
581   }
582 
583   // True for SVEMemOperands which represent something like [x0].
584   // This will also return true for [x0, #0], because there is no way
585   // to distinguish the two.
IsPlainScalar()586   bool IsPlainScalar() const {
587     return IsScalarPlusImmediate() && (offset_ == 0);
588   }
589 
590   // True for SVEMemOperands which represent something like [x0], or for
591   // compound SVEMemOperands which are functionally equivalent, such as
592   // [x0, #0], [x0, xzr] or [x0, wzr, UXTW #3].
593   bool IsEquivalentToScalar() const;
594 
595   // True for SVEMemOperands like [x0], [x0, #0], false for [x0, xzr] and
596   // similar.
597   bool IsPlainRegister() const;
598 
IsScalarPlusImmediate()599   bool IsScalarPlusImmediate() const {
600     return base_.IsX() && regoffset_.IsNone() &&
601            ((mod_ == NO_SVE_OFFSET_MODIFIER) || IsMulVl());
602   }
603 
IsScalarPlusScalar()604   bool IsScalarPlusScalar() const {
605     // SVE offers no extend modes for scalar-plus-scalar, so both registers must
606     // be X registers.
607     return base_.IsX() && regoffset_.IsX() &&
608            ((mod_ == NO_SVE_OFFSET_MODIFIER) || (mod_ == SVE_LSL));
609   }
610 
IsScalarPlusVector()611   bool IsScalarPlusVector() const {
612     // The modifier can be LSL or an an extend mode (UXTW or SXTW) here. Unlike
613     // in the core ISA, these extend modes do not imply an S-sized lane, so the
614     // modifier is independent from the lane size. The architecture describes
615     // [US]XTW with a D-sized lane as an "unpacked" offset.
616     return base_.IsX() && regoffset_.IsZRegister() &&
617            (regoffset_.IsLaneSizeS() || regoffset_.IsLaneSizeD()) && !IsMulVl();
618   }
619 
IsVectorPlusImmediate()620   bool IsVectorPlusImmediate() const {
621     return base_.IsZRegister() &&
622            (base_.IsLaneSizeS() || base_.IsLaneSizeD()) &&
623            regoffset_.IsNone() && (mod_ == NO_SVE_OFFSET_MODIFIER);
624   }
625 
IsVectorPlusScalar()626   bool IsVectorPlusScalar() const {
627     return base_.IsZRegister() && regoffset_.IsX() &&
628            (base_.IsLaneSizeS() || base_.IsLaneSizeD());
629   }
630 
IsVectorPlusVector()631   bool IsVectorPlusVector() const {
632     return base_.IsZRegister() && regoffset_.IsZRegister() && (offset_ == 0) &&
633            AreSameFormat(base_, regoffset_) &&
634            (base_.IsLaneSizeS() || base_.IsLaneSizeD());
635   }
636 
IsContiguous()637   bool IsContiguous() const { return !IsScatterGather(); }
IsScatterGather()638   bool IsScatterGather() const {
639     return base_.IsZRegister() || regoffset_.IsZRegister();
640   }
641 
642   // TODO: If necessary, add helpers like `HasScalarBase()`.
643 
GetScalarBase()644   Register GetScalarBase() const {
645     VIXL_ASSERT(base_.IsX());
646     return Register(base_);
647   }
648 
GetVectorBase()649   ZRegister GetVectorBase() const {
650     VIXL_ASSERT(base_.IsZRegister());
651     VIXL_ASSERT(base_.HasLaneSize());
652     return ZRegister(base_);
653   }
654 
GetScalarOffset()655   Register GetScalarOffset() const {
656     VIXL_ASSERT(regoffset_.IsRegister());
657     return Register(regoffset_);
658   }
659 
GetVectorOffset()660   ZRegister GetVectorOffset() const {
661     VIXL_ASSERT(regoffset_.IsZRegister());
662     VIXL_ASSERT(regoffset_.HasLaneSize());
663     return ZRegister(regoffset_);
664   }
665 
GetImmediateOffset()666   int64_t GetImmediateOffset() const {
667     VIXL_ASSERT(regoffset_.IsNone());
668     return offset_;
669   }
670 
GetOffsetModifier()671   SVEOffsetModifier GetOffsetModifier() const { return mod_; }
GetShiftAmount()672   unsigned GetShiftAmount() const { return shift_amount_; }
673 
IsEquivalentToLSL(unsigned amount)674   bool IsEquivalentToLSL(unsigned amount) const {
675     if (shift_amount_ != amount) return false;
676     if (amount == 0) {
677       // No-shift is equivalent to "LSL #0".
678       return ((mod_ == SVE_LSL) || (mod_ == NO_SVE_OFFSET_MODIFIER));
679     }
680     return mod_ == SVE_LSL;
681   }
682 
IsMulVl()683   bool IsMulVl() const { return mod_ == SVE_MUL_VL; }
684 
685   bool IsValid() const;
686 
687  private:
688   // Allow standard `Shift` and `Extend` arguments to be used.
GetSVEOffsetModifierFor(Shift shift)689   SVEOffsetModifier GetSVEOffsetModifierFor(Shift shift) {
690     if (shift == LSL) return SVE_LSL;
691     if (shift == NO_SHIFT) return NO_SVE_OFFSET_MODIFIER;
692     // SVE does not accept any other shift.
693     VIXL_UNIMPLEMENTED();
694     return NO_SVE_OFFSET_MODIFIER;
695   }
696 
697   SVEOffsetModifier GetSVEOffsetModifierFor(Extend extend = NO_EXTEND) {
698     if (extend == UXTW) return SVE_UXTW;
699     if (extend == SXTW) return SVE_SXTW;
700     if (extend == NO_EXTEND) return NO_SVE_OFFSET_MODIFIER;
701     // SVE does not accept any other extend mode.
702     VIXL_UNIMPLEMENTED();
703     return NO_SVE_OFFSET_MODIFIER;
704   }
705 
GetSVEOffsetModifierFor(SVEOffsetModifier mod)706   SVEOffsetModifier GetSVEOffsetModifierFor(SVEOffsetModifier mod) {
707     return mod;
708   }
709 
710   CPURegister base_;
711   CPURegister regoffset_;
712   int64_t offset_;
713   SVEOffsetModifier mod_;
714   unsigned shift_amount_;
715 };
716 
717 // Represent a signed or unsigned integer operand.
718 //
719 // This is designed to make instructions which naturally accept a _signed_
720 // immediate easier to implement and use, when we also want users to be able to
721 // specify raw-bits values (such as with hexadecimal constants). The advantage
722 // of this class over a simple uint64_t (with implicit C++ sign-extension) is
723 // that this class can strictly check the range of allowed values. With a simple
724 // uint64_t, it is impossible to distinguish -1 from UINT64_MAX.
725 //
726 // For example, these instructions are equivalent:
727 //
728 //     __ Insr(z0.VnB(), -1);
729 //     __ Insr(z0.VnB(), 0xff);
730 //
731 // ... as are these:
732 //
733 //     __ Insr(z0.VnD(), -1);
734 //     __ Insr(z0.VnD(), 0xffffffffffffffff);
735 //
736 // ... but this is invalid:
737 //
738 //     __ Insr(z0.VnB(), 0xffffffffffffffff);  // Too big for B-sized lanes.
739 class IntegerOperand {
740  public:
741 #define VIXL_INT_TYPES(V) \
742   V(char) V(short) V(int) V(long) V(long long)  // NOLINT(runtime/int)
743 #define VIXL_DECL_INT_OVERLOADS(T)                                        \
744   /* These are allowed to be implicit constructors because this is a */   \
745   /* wrapper class that doesn't normally perform any type conversion. */  \
746   IntegerOperand(signed T immediate) /* NOLINT(runtime/explicit) */       \
747       : raw_bits_(immediate),        /* Allow implicit sign-extension. */ \
748         is_negative_(immediate < 0) {}                                    \
749   IntegerOperand(unsigned T immediate) /* NOLINT(runtime/explicit) */     \
750       : raw_bits_(immediate), is_negative_(false) {}
VIXL_INT_TYPES(VIXL_DECL_INT_OVERLOADS)751   VIXL_INT_TYPES(VIXL_DECL_INT_OVERLOADS)
752 #undef VIXL_DECL_INT_OVERLOADS
753 #undef VIXL_INT_TYPES
754 
755   // TODO: `Operand` can currently only hold an int64_t, so some large, unsigned
756   // values will be misrepresented here.
757   explicit IntegerOperand(const Operand& operand)
758       : raw_bits_(operand.GetEquivalentImmediate()),
759         is_negative_(operand.GetEquivalentImmediate() < 0) {}
760 
IsIntN(unsigned n)761   bool IsIntN(unsigned n) const {
762     return is_negative_ ? vixl::IsIntN(n, RawbitsToInt64(raw_bits_))
763                         : vixl::IsIntN(n, raw_bits_);
764   }
IsUintN(unsigned n)765   bool IsUintN(unsigned n) const {
766     return !is_negative_ && vixl::IsUintN(n, raw_bits_);
767   }
768 
IsUint8()769   bool IsUint8() const { return IsUintN(8); }
IsUint16()770   bool IsUint16() const { return IsUintN(16); }
IsUint32()771   bool IsUint32() const { return IsUintN(32); }
IsUint64()772   bool IsUint64() const { return IsUintN(64); }
773 
IsInt8()774   bool IsInt8() const { return IsIntN(8); }
IsInt16()775   bool IsInt16() const { return IsIntN(16); }
IsInt32()776   bool IsInt32() const { return IsIntN(32); }
IsInt64()777   bool IsInt64() const { return IsIntN(64); }
778 
FitsInBits(unsigned n)779   bool FitsInBits(unsigned n) const {
780     return is_negative_ ? IsIntN(n) : IsUintN(n);
781   }
FitsInLane(const CPURegister & zd)782   bool FitsInLane(const CPURegister& zd) const {
783     return FitsInBits(zd.GetLaneSizeInBits());
784   }
FitsInSignedLane(const CPURegister & zd)785   bool FitsInSignedLane(const CPURegister& zd) const {
786     return IsIntN(zd.GetLaneSizeInBits());
787   }
FitsInUnsignedLane(const CPURegister & zd)788   bool FitsInUnsignedLane(const CPURegister& zd) const {
789     return IsUintN(zd.GetLaneSizeInBits());
790   }
791 
792   // Cast a value in the range [INT<n>_MIN, UINT<n>_MAX] to an unsigned integer
793   // in the range [0, UINT<n>_MAX] (using two's complement mapping).
AsUintN(unsigned n)794   uint64_t AsUintN(unsigned n) const {
795     VIXL_ASSERT(FitsInBits(n));
796     return raw_bits_ & GetUintMask(n);
797   }
798 
AsUint8()799   uint8_t AsUint8() const { return static_cast<uint8_t>(AsUintN(8)); }
AsUint16()800   uint16_t AsUint16() const { return static_cast<uint16_t>(AsUintN(16)); }
AsUint32()801   uint32_t AsUint32() const { return static_cast<uint32_t>(AsUintN(32)); }
AsUint64()802   uint64_t AsUint64() const { return AsUintN(64); }
803 
804   // Cast a value in the range [INT<n>_MIN, UINT<n>_MAX] to a signed integer in
805   // the range [INT<n>_MIN, INT<n>_MAX] (using two's complement mapping).
AsIntN(unsigned n)806   int64_t AsIntN(unsigned n) const {
807     VIXL_ASSERT(FitsInBits(n));
808     return ExtractSignedBitfield64(n - 1, 0, raw_bits_);
809   }
810 
AsInt8()811   int8_t AsInt8() const { return static_cast<int8_t>(AsIntN(8)); }
AsInt16()812   int16_t AsInt16() const { return static_cast<int16_t>(AsIntN(16)); }
AsInt32()813   int32_t AsInt32() const { return static_cast<int32_t>(AsIntN(32)); }
AsInt64()814   int64_t AsInt64() const { return AsIntN(64); }
815 
816   // Several instructions encode a signed int<N>_t, which is then (optionally)
817   // left-shifted and sign-extended to a Z register lane with a size which may
818   // be larger than N. This helper tries to find an int<N>_t such that the
819   // IntegerOperand's arithmetic value is reproduced in each lane.
820   //
821   // This is the mechanism that allows `Insr(z0.VnB(), 0xff)` to be treated as
822   // `Insr(z0.VnB(), -1)`.
823   template <unsigned N, unsigned kShift, typename T>
TryEncodeAsShiftedIntNForLane(const CPURegister & zd,T * imm)824   bool TryEncodeAsShiftedIntNForLane(const CPURegister& zd, T* imm) const {
825     VIXL_STATIC_ASSERT(std::numeric_limits<T>::digits > N);
826     VIXL_ASSERT(FitsInLane(zd));
827     if ((raw_bits_ & GetUintMask(kShift)) != 0) return false;
828 
829     // Reverse the specified left-shift.
830     IntegerOperand unshifted(*this);
831     unshifted.ArithmeticShiftRight(kShift);
832 
833     if (unshifted.IsIntN(N)) {
834       // This is trivial, since sign-extension produces the same arithmetic
835       // value irrespective of the destination size.
836       *imm = static_cast<T>(unshifted.AsIntN(N));
837       return true;
838     }
839 
840     // Otherwise, we might be able to use the sign-extension to produce the
841     // desired bit pattern. We can only do this for values in the range
842     // [INT<N>_MAX + 1, UINT<N>_MAX], where the highest set bit is the sign bit.
843     //
844     // The lane size has to be adjusted to compensate for `kShift`, since the
845     // high bits will be dropped when the encoded value is left-shifted.
846     if (unshifted.IsUintN(zd.GetLaneSizeInBits() - kShift)) {
847       int64_t encoded = unshifted.AsIntN(zd.GetLaneSizeInBits() - kShift);
848       if (vixl::IsIntN(N, encoded)) {
849         *imm = static_cast<T>(encoded);
850         return true;
851       }
852     }
853     return false;
854   }
855 
856   // As above, but `kShift` is written to the `*shift` parameter on success, so
857   // that it is easy to chain calls like this:
858   //
859   //     if (imm.TryEncodeAsShiftedIntNForLane<8, 0>(zd, &imm8, &shift) ||
860   //         imm.TryEncodeAsShiftedIntNForLane<8, 8>(zd, &imm8, &shift)) {
861   //       insn(zd, imm8, shift)
862   //     }
863   template <unsigned N, unsigned kShift, typename T, typename S>
TryEncodeAsShiftedIntNForLane(const CPURegister & zd,T * imm,S * shift)864   bool TryEncodeAsShiftedIntNForLane(const CPURegister& zd,
865                                      T* imm,
866                                      S* shift) const {
867     if (TryEncodeAsShiftedIntNForLane<N, kShift>(zd, imm)) {
868       *shift = kShift;
869       return true;
870     }
871     return false;
872   }
873 
874   // As above, but assume that `kShift` is 0.
875   template <unsigned N, typename T>
TryEncodeAsIntNForLane(const CPURegister & zd,T * imm)876   bool TryEncodeAsIntNForLane(const CPURegister& zd, T* imm) const {
877     return TryEncodeAsShiftedIntNForLane<N, 0>(zd, imm);
878   }
879 
880   // As above, but for unsigned fields. This is usuaully a simple operation, but
881   // is provided for symmetry.
882   template <unsigned N, unsigned kShift, typename T>
TryEncodeAsShiftedUintNForLane(const CPURegister & zd,T * imm)883   bool TryEncodeAsShiftedUintNForLane(const CPURegister& zd, T* imm) const {
884     VIXL_STATIC_ASSERT(std::numeric_limits<T>::digits > N);
885     VIXL_ASSERT(FitsInLane(zd));
886 
887     // TODO: Should we convert -1 to 0xff here?
888     if (is_negative_) return false;
889     USE(zd);
890 
891     if ((raw_bits_ & GetUintMask(kShift)) != 0) return false;
892 
893     if (vixl::IsUintN(N, raw_bits_ >> kShift)) {
894       *imm = static_cast<T>(raw_bits_ >> kShift);
895       return true;
896     }
897     return false;
898   }
899 
900   template <unsigned N, unsigned kShift, typename T, typename S>
TryEncodeAsShiftedUintNForLane(const CPURegister & zd,T * imm,S * shift)901   bool TryEncodeAsShiftedUintNForLane(const CPURegister& zd,
902                                       T* imm,
903                                       S* shift) const {
904     if (TryEncodeAsShiftedUintNForLane<N, kShift>(zd, imm)) {
905       *shift = kShift;
906       return true;
907     }
908     return false;
909   }
910 
IsZero()911   bool IsZero() const { return raw_bits_ == 0; }
IsNegative()912   bool IsNegative() const { return is_negative_; }
IsPositiveOrZero()913   bool IsPositiveOrZero() const { return !is_negative_; }
914 
GetMagnitude()915   uint64_t GetMagnitude() const {
916     return is_negative_ ? -raw_bits_ : raw_bits_;
917   }
918 
919  private:
920   // Shift the arithmetic value right, with sign extension if is_negative_.
ArithmeticShiftRight(int shift)921   void ArithmeticShiftRight(int shift) {
922     VIXL_ASSERT((shift >= 0) && (shift < 64));
923     if (shift == 0) return;
924     if (is_negative_) {
925       raw_bits_ = ExtractSignedBitfield64(63, shift, raw_bits_);
926     } else {
927       raw_bits_ >>= shift;
928     }
929   }
930 
931   uint64_t raw_bits_;
932   bool is_negative_;
933 };
934 
935 // This an abstraction that can represent a register or memory location. The
936 // `MacroAssembler` provides helpers to move data between generic operands.
937 class GenericOperand {
938  public:
GenericOperand()939   GenericOperand() { VIXL_ASSERT(!IsValid()); }
940   GenericOperand(const CPURegister& reg);  // NOLINT(runtime/explicit)
941   GenericOperand(const MemOperand& mem_op,
942                  size_t mem_op_size = 0);  // NOLINT(runtime/explicit)
943 
IsValid()944   bool IsValid() const { return cpu_register_.IsValid() != mem_op_.IsValid(); }
945 
946   bool Equals(const GenericOperand& other) const;
947 
IsCPURegister()948   bool IsCPURegister() const {
949     VIXL_ASSERT(IsValid());
950     return cpu_register_.IsValid();
951   }
952 
IsRegister()953   bool IsRegister() const {
954     return IsCPURegister() && cpu_register_.IsRegister();
955   }
956 
IsVRegister()957   bool IsVRegister() const {
958     return IsCPURegister() && cpu_register_.IsVRegister();
959   }
960 
IsSameCPURegisterType(const GenericOperand & other)961   bool IsSameCPURegisterType(const GenericOperand& other) {
962     return IsCPURegister() && other.IsCPURegister() &&
963            GetCPURegister().IsSameType(other.GetCPURegister());
964   }
965 
IsMemOperand()966   bool IsMemOperand() const {
967     VIXL_ASSERT(IsValid());
968     return mem_op_.IsValid();
969   }
970 
GetCPURegister()971   CPURegister GetCPURegister() const {
972     VIXL_ASSERT(IsCPURegister());
973     return cpu_register_;
974   }
975 
GetMemOperand()976   MemOperand GetMemOperand() const {
977     VIXL_ASSERT(IsMemOperand());
978     return mem_op_;
979   }
980 
GetMemOperandSizeInBytes()981   size_t GetMemOperandSizeInBytes() const {
982     VIXL_ASSERT(IsMemOperand());
983     return mem_op_size_;
984   }
985 
GetSizeInBytes()986   size_t GetSizeInBytes() const {
987     return IsCPURegister() ? cpu_register_.GetSizeInBytes()
988                            : GetMemOperandSizeInBytes();
989   }
990 
GetSizeInBits()991   size_t GetSizeInBits() const { return GetSizeInBytes() * kBitsPerByte; }
992 
993  private:
994   CPURegister cpu_register_;
995   MemOperand mem_op_;
996   // The size of the memory region pointed to, in bytes.
997   // We only support sizes up to X/D register sizes.
998   size_t mem_op_size_;
999 };
1000 }
1001 }  // namespace vixl::aarch64
1002 
1003 #endif  // VIXL_AARCH64_OPERANDS_AARCH64_H_
1004