• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2016, VIXL authors
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are met:
6 //
7 //   * Redistributions of source code must retain the above copyright notice,
8 //     this list of conditions and the following disclaimer.
9 //   * Redistributions in binary form must reproduce the above copyright notice,
10 //     this list of conditions and the following disclaimer in the documentation
11 //     and/or other materials provided with the distribution.
12 //   * Neither the name of ARM Limited nor the names of its contributors may be
13 //     used to endorse or promote products derived from this software without
14 //     specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 
27 #ifndef VIXL_AARCH64_OPERANDS_AARCH64_H_
28 #define VIXL_AARCH64_OPERANDS_AARCH64_H_
29 
30 #include <sstream>
31 #include <string>
32 
33 #include "instructions-aarch64.h"
34 #include "registers-aarch64.h"
35 
36 namespace vixl {
37 namespace aarch64 {
38 
39 // Lists of registers.
40 class CPURegList {
41  public:
42   explicit CPURegList(CPURegister reg1,
43                       CPURegister reg2 = NoCPUReg,
44                       CPURegister reg3 = NoCPUReg,
45                       CPURegister reg4 = NoCPUReg)
46       : list_(reg1.GetBit() | reg2.GetBit() | reg3.GetBit() | reg4.GetBit()),
47         size_(reg1.GetSizeInBits()),
48         type_(reg1.GetType()) {
49     VIXL_ASSERT(AreSameSizeAndType(reg1, reg2, reg3, reg4));
50     VIXL_ASSERT(IsValid());
51   }
52 
CPURegList(CPURegister::RegisterType type,unsigned size,RegList list)53   CPURegList(CPURegister::RegisterType type, unsigned size, RegList list)
54       : list_(list), size_(size), type_(type) {
55     VIXL_ASSERT(IsValid());
56   }
57 
CPURegList(CPURegister::RegisterType type,unsigned size,unsigned first_reg,unsigned last_reg)58   CPURegList(CPURegister::RegisterType type,
59              unsigned size,
60              unsigned first_reg,
61              unsigned last_reg)
62       : size_(size), type_(type) {
63     VIXL_ASSERT(
64         ((type == CPURegister::kRegister) && (last_reg < kNumberOfRegisters)) ||
65         ((type == CPURegister::kVRegister) &&
66          (last_reg < kNumberOfVRegisters)));
67     VIXL_ASSERT(last_reg >= first_reg);
68     list_ = (UINT64_C(1) << (last_reg + 1)) - 1;
69     list_ &= ~((UINT64_C(1) << first_reg) - 1);
70     VIXL_ASSERT(IsValid());
71   }
72 
73   // Construct an empty CPURegList with the specified size and type. If `size`
74   // is CPURegister::kUnknownSize and the register type requires a size, a valid
75   // but unspecified default will be picked.
76   static CPURegList Empty(CPURegister::RegisterType type,
77                           unsigned size = CPURegister::kUnknownSize) {
78     return CPURegList(type, GetDefaultSizeFor(type, size), 0);
79   }
80 
81   // Construct a CPURegList with all possible registers with the specified size
82   // and type. If `size` is CPURegister::kUnknownSize and the register type
83   // requires a size, a valid but unspecified default will be picked.
84   static CPURegList All(CPURegister::RegisterType type,
85                         unsigned size = CPURegister::kUnknownSize) {
86     unsigned number_of_registers = (CPURegister::GetMaxCodeFor(type) + 1);
87     RegList list = (static_cast<RegList>(1) << number_of_registers) - 1;
88     if (type == CPURegister::kRegister) {
89       // GetMaxCodeFor(kRegister) ignores SP, so explicitly include it.
90       list |= (static_cast<RegList>(1) << kSPRegInternalCode);
91     }
92     return CPURegList(type, GetDefaultSizeFor(type, size), list);
93   }
94 
GetType()95   CPURegister::RegisterType GetType() const {
96     VIXL_ASSERT(IsValid());
97     return type_;
98   }
99   VIXL_DEPRECATED("GetType", CPURegister::RegisterType type() const) {
100     return GetType();
101   }
102 
GetBank()103   CPURegister::RegisterBank GetBank() const {
104     return CPURegister::GetBankFor(GetType());
105   }
106 
107   // Combine another CPURegList into this one. Registers that already exist in
108   // this list are left unchanged. The type and size of the registers in the
109   // 'other' list must match those in this list.
Combine(const CPURegList & other)110   void Combine(const CPURegList& other) {
111     VIXL_ASSERT(IsValid());
112     VIXL_ASSERT(other.GetType() == type_);
113     VIXL_ASSERT(other.GetRegisterSizeInBits() == size_);
114     list_ |= other.GetList();
115   }
116 
117   // Remove every register in the other CPURegList from this one. Registers that
118   // do not exist in this list are ignored. The type and size of the registers
119   // in the 'other' list must match those in this list.
Remove(const CPURegList & other)120   void Remove(const CPURegList& other) {
121     VIXL_ASSERT(IsValid());
122     VIXL_ASSERT(other.GetType() == type_);
123     VIXL_ASSERT(other.GetRegisterSizeInBits() == size_);
124     list_ &= ~other.GetList();
125   }
126 
127   // Variants of Combine and Remove which take a single register.
Combine(const CPURegister & other)128   void Combine(const CPURegister& other) {
129     VIXL_ASSERT(other.GetType() == type_);
130     VIXL_ASSERT(other.GetSizeInBits() == size_);
131     Combine(other.GetCode());
132   }
133 
Remove(const CPURegister & other)134   void Remove(const CPURegister& other) {
135     VIXL_ASSERT(other.GetType() == type_);
136     VIXL_ASSERT(other.GetSizeInBits() == size_);
137     Remove(other.GetCode());
138   }
139 
140   // Variants of Combine and Remove which take a single register by its code;
141   // the type and size of the register is inferred from this list.
Combine(int code)142   void Combine(int code) {
143     VIXL_ASSERT(IsValid());
144     VIXL_ASSERT(CPURegister(code, size_, type_).IsValid());
145     list_ |= (UINT64_C(1) << code);
146   }
147 
Remove(int code)148   void Remove(int code) {
149     VIXL_ASSERT(IsValid());
150     VIXL_ASSERT(CPURegister(code, size_, type_).IsValid());
151     list_ &= ~(UINT64_C(1) << code);
152   }
153 
Union(const CPURegList & list_1,const CPURegList & list_2)154   static CPURegList Union(const CPURegList& list_1, const CPURegList& list_2) {
155     VIXL_ASSERT(list_1.type_ == list_2.type_);
156     VIXL_ASSERT(list_1.size_ == list_2.size_);
157     return CPURegList(list_1.type_, list_1.size_, list_1.list_ | list_2.list_);
158   }
159   static CPURegList Union(const CPURegList& list_1,
160                           const CPURegList& list_2,
161                           const CPURegList& list_3);
162   static CPURegList Union(const CPURegList& list_1,
163                           const CPURegList& list_2,
164                           const CPURegList& list_3,
165                           const CPURegList& list_4);
166 
Intersection(const CPURegList & list_1,const CPURegList & list_2)167   static CPURegList Intersection(const CPURegList& list_1,
168                                  const CPURegList& list_2) {
169     VIXL_ASSERT(list_1.type_ == list_2.type_);
170     VIXL_ASSERT(list_1.size_ == list_2.size_);
171     return CPURegList(list_1.type_, list_1.size_, list_1.list_ & list_2.list_);
172   }
173   static CPURegList Intersection(const CPURegList& list_1,
174                                  const CPURegList& list_2,
175                                  const CPURegList& list_3);
176   static CPURegList Intersection(const CPURegList& list_1,
177                                  const CPURegList& list_2,
178                                  const CPURegList& list_3,
179                                  const CPURegList& list_4);
180 
Overlaps(const CPURegList & other)181   bool Overlaps(const CPURegList& other) const {
182     return (type_ == other.type_) && ((list_ & other.list_) != 0);
183   }
184 
GetList()185   RegList GetList() const {
186     VIXL_ASSERT(IsValid());
187     return list_;
188   }
189   VIXL_DEPRECATED("GetList", RegList list() const) { return GetList(); }
190 
SetList(RegList new_list)191   void SetList(RegList new_list) {
192     VIXL_ASSERT(IsValid());
193     list_ = new_list;
194   }
set_list(RegList new_list)195   VIXL_DEPRECATED("SetList", void set_list(RegList new_list)) {
196     return SetList(new_list);
197   }
198 
199   // Remove all callee-saved registers from the list. This can be useful when
200   // preparing registers for an AAPCS64 function call, for example.
201   void RemoveCalleeSaved();
202 
203   // Find the register in this list that appears in `mask` with the lowest or
204   // highest code, remove it from the list and return it as a CPURegister. If
205   // the list is empty, leave it unchanged and return NoCPUReg.
206   CPURegister PopLowestIndex(RegList mask = ~static_cast<RegList>(0));
207   CPURegister PopHighestIndex(RegList mask = ~static_cast<RegList>(0));
208 
209   // AAPCS64 callee-saved registers.
210   static CPURegList GetCalleeSaved(unsigned size = kXRegSize);
211   static CPURegList GetCalleeSavedV(unsigned size = kDRegSize);
212 
213   // AAPCS64 caller-saved registers. Note that this includes lr.
214   // TODO(all): Determine how we handle d8-d15 being callee-saved, but the top
215   // 64-bits being caller-saved.
216   static CPURegList GetCallerSaved(unsigned size = kXRegSize);
217   static CPURegList GetCallerSavedV(unsigned size = kDRegSize);
218 
IsEmpty()219   bool IsEmpty() const {
220     VIXL_ASSERT(IsValid());
221     return list_ == 0;
222   }
223 
IncludesAliasOf(const CPURegister & other)224   bool IncludesAliasOf(const CPURegister& other) const {
225     VIXL_ASSERT(IsValid());
226     return (GetBank() == other.GetBank()) && IncludesAliasOf(other.GetCode());
227   }
228 
IncludesAliasOf(int code)229   bool IncludesAliasOf(int code) const {
230     VIXL_ASSERT(IsValid());
231     return (((static_cast<RegList>(1) << code) & list_) != 0);
232   }
233 
GetCount()234   int GetCount() const {
235     VIXL_ASSERT(IsValid());
236     return CountSetBits(list_);
237   }
Count()238   VIXL_DEPRECATED("GetCount", int Count()) const { return GetCount(); }
239 
GetRegisterSizeInBits()240   int GetRegisterSizeInBits() const {
241     VIXL_ASSERT(IsValid());
242     return size_;
243   }
RegisterSizeInBits()244   VIXL_DEPRECATED("GetRegisterSizeInBits", int RegisterSizeInBits() const) {
245     return GetRegisterSizeInBits();
246   }
247 
GetRegisterSizeInBytes()248   int GetRegisterSizeInBytes() const {
249     int size_in_bits = GetRegisterSizeInBits();
250     VIXL_ASSERT((size_in_bits % 8) == 0);
251     return size_in_bits / 8;
252   }
RegisterSizeInBytes()253   VIXL_DEPRECATED("GetRegisterSizeInBytes", int RegisterSizeInBytes() const) {
254     return GetRegisterSizeInBytes();
255   }
256 
GetTotalSizeInBytes()257   unsigned GetTotalSizeInBytes() const {
258     VIXL_ASSERT(IsValid());
259     return GetRegisterSizeInBytes() * GetCount();
260   }
TotalSizeInBytes()261   VIXL_DEPRECATED("GetTotalSizeInBytes", unsigned TotalSizeInBytes() const) {
262     return GetTotalSizeInBytes();
263   }
264 
265  private:
266   // If `size` is CPURegister::kUnknownSize and the type requires a known size,
267   // then return an arbitrary-but-valid size.
268   //
269   // Otherwise, the size is checked for validity and returned unchanged.
GetDefaultSizeFor(CPURegister::RegisterType type,unsigned size)270   static unsigned GetDefaultSizeFor(CPURegister::RegisterType type,
271                                     unsigned size) {
272     if (size == CPURegister::kUnknownSize) {
273       if (type == CPURegister::kRegister) size = kXRegSize;
274       if (type == CPURegister::kVRegister) size = kQRegSize;
275       // All other types require kUnknownSize.
276     }
277     VIXL_ASSERT(CPURegister(0, size, type).IsValid());
278     return size;
279   }
280 
281   RegList list_;
282   int size_;
283   CPURegister::RegisterType type_;
284 
285   bool IsValid() const;
286 };
287 
288 
289 // AAPCS64 callee-saved registers.
290 extern const CPURegList kCalleeSaved;
291 extern const CPURegList kCalleeSavedV;
292 
293 
294 // AAPCS64 caller-saved registers. Note that this includes lr.
295 extern const CPURegList kCallerSaved;
296 extern const CPURegList kCallerSavedV;
297 
298 class IntegerOperand;
299 
300 // Operand.
301 class Operand {
302  public:
303   // #<immediate>
304   // where <immediate> is int64_t.
305   // This is allowed to be an implicit constructor because Operand is
306   // a wrapper class that doesn't normally perform any type conversion.
307   Operand(int64_t immediate);  // NOLINT(runtime/explicit)
308 
309   Operand(IntegerOperand immediate);  // NOLINT(runtime/explicit)
310 
311   // rm, {<shift> #<shift_amount>}
312   // where <shift> is one of {LSL, LSR, ASR, ROR}.
313   //       <shift_amount> is uint6_t.
314   // This is allowed to be an implicit constructor because Operand is
315   // a wrapper class that doesn't normally perform any type conversion.
316   Operand(Register reg,
317           Shift shift = LSL,
318           unsigned shift_amount = 0);  // NOLINT(runtime/explicit)
319 
320   // rm, {<extend> {#<shift_amount>}}
321   // where <extend> is one of {UXTB, UXTH, UXTW, UXTX, SXTB, SXTH, SXTW, SXTX}.
322   //       <shift_amount> is uint2_t.
323   explicit Operand(Register reg, Extend extend, unsigned shift_amount = 0);
324 
325   bool IsImmediate() const;
326   bool IsPlainRegister() const;
327   bool IsShiftedRegister() const;
328   bool IsExtendedRegister() const;
329   bool IsZero() const;
330 
331   // This returns an LSL shift (<= 4) operand as an equivalent extend operand,
332   // which helps in the encoding of instructions that use the stack pointer.
333   Operand ToExtendedRegister() const;
334 
GetImmediate()335   int64_t GetImmediate() const {
336     VIXL_ASSERT(IsImmediate());
337     return immediate_;
338   }
339   VIXL_DEPRECATED("GetImmediate", int64_t immediate() const) {
340     return GetImmediate();
341   }
342 
GetEquivalentImmediate()343   int64_t GetEquivalentImmediate() const {
344     return IsZero() ? 0 : GetImmediate();
345   }
346 
GetRegister()347   Register GetRegister() const {
348     VIXL_ASSERT(IsShiftedRegister() || IsExtendedRegister());
349     return reg_;
350   }
351   VIXL_DEPRECATED("GetRegister", Register reg() const) { return GetRegister(); }
GetBaseRegister()352   Register GetBaseRegister() const { return GetRegister(); }
353 
GetShift()354   Shift GetShift() const {
355     VIXL_ASSERT(IsShiftedRegister());
356     return shift_;
357   }
358   VIXL_DEPRECATED("GetShift", Shift shift() const) { return GetShift(); }
359 
GetExtend()360   Extend GetExtend() const {
361     VIXL_ASSERT(IsExtendedRegister());
362     return extend_;
363   }
364   VIXL_DEPRECATED("GetExtend", Extend extend() const) { return GetExtend(); }
365 
GetShiftAmount()366   unsigned GetShiftAmount() const {
367     VIXL_ASSERT(IsShiftedRegister() || IsExtendedRegister());
368     return shift_amount_;
369   }
shift_amount()370   VIXL_DEPRECATED("GetShiftAmount", unsigned shift_amount() const) {
371     return GetShiftAmount();
372   }
373 
374  private:
375   int64_t immediate_;
376   Register reg_;
377   Shift shift_;
378   Extend extend_;
379   unsigned shift_amount_;
380 };
381 
382 
383 // MemOperand represents the addressing mode of a load or store instruction.
384 // In assembly syntax, MemOperands are normally denoted by one or more elements
385 // inside or around square brackets.
386 class MemOperand {
387  public:
388   // Creates an invalid `MemOperand`.
389   MemOperand();
390   explicit MemOperand(Register base,
391                       int64_t offset = 0,
392                       AddrMode addrmode = Offset);
393   MemOperand(Register base,
394              Register regoffset,
395              Shift shift = LSL,
396              unsigned shift_amount = 0);
397   MemOperand(Register base,
398              Register regoffset,
399              Extend extend,
400              unsigned shift_amount = 0);
401   MemOperand(Register base, const Operand& offset, AddrMode addrmode = Offset);
402 
GetBaseRegister()403   const Register& GetBaseRegister() const { return base_; }
404 
405   // If the MemOperand has a register offset, return it. (This also applies to
406   // pre- and post-index modes.) Otherwise, return NoReg.
GetRegisterOffset()407   const Register& GetRegisterOffset() const { return regoffset_; }
408 
409   // If the MemOperand has an immediate offset, return it. (This also applies to
410   // pre- and post-index modes.) Otherwise, return 0.
GetOffset()411   int64_t GetOffset() const { return offset_; }
412 
GetAddrMode()413   AddrMode GetAddrMode() const { return addrmode_; }
GetShift()414   Shift GetShift() const { return shift_; }
GetExtend()415   Extend GetExtend() const { return extend_; }
416 
GetShiftAmount()417   unsigned GetShiftAmount() const {
418     // Extend modes can also encode a shift for some instructions.
419     VIXL_ASSERT((GetShift() != NO_SHIFT) || (GetExtend() != NO_EXTEND));
420     return shift_amount_;
421   }
422 
423   // True for MemOperands which represent something like [x0].
424   // Currently, this will also return true for [x0, #0], because MemOperand has
425   // no way to distinguish the two.
426   bool IsPlainRegister() const;
427 
428   // True for MemOperands which represent something like [x0], or for compound
429   // MemOperands which are functionally equivalent, such as [x0, #0], [x0, xzr]
430   // or [x0, wzr, UXTW #3].
431   bool IsEquivalentToPlainRegister() const;
432 
433   // True for immediate-offset (but not indexed) MemOperands.
434   bool IsImmediateOffset() const;
435   // True for register-offset (but not indexed) MemOperands.
436   bool IsRegisterOffset() const;
437   // True for immediate or register pre-indexed MemOperands.
438   bool IsPreIndex() const;
439   // True for immediate or register post-indexed MemOperands.
440   bool IsPostIndex() const;
441   // True for immediate pre-indexed MemOperands, [reg, #imm]!
442   bool IsImmediatePreIndex() const;
443   // True for immediate post-indexed MemOperands, [reg], #imm
444   bool IsImmediatePostIndex() const;
445 
446   void AddOffset(int64_t offset);
447 
IsValid()448   bool IsValid() const {
449     return base_.IsValid() &&
450            ((addrmode_ == Offset) || (addrmode_ == PreIndex) ||
451             (addrmode_ == PostIndex)) &&
452            ((shift_ == NO_SHIFT) || (extend_ == NO_EXTEND)) &&
453            ((offset_ == 0) || !regoffset_.IsValid());
454   }
455 
Equals(const MemOperand & other)456   bool Equals(const MemOperand& other) const {
457     return base_.Is(other.base_) && regoffset_.Is(other.regoffset_) &&
458            (offset_ == other.offset_) && (addrmode_ == other.addrmode_) &&
459            (shift_ == other.shift_) && (extend_ == other.extend_) &&
460            (shift_amount_ == other.shift_amount_);
461   }
462 
463  private:
464   Register base_;
465   Register regoffset_;
466   int64_t offset_;
467   AddrMode addrmode_;
468   Shift shift_;
469   Extend extend_;
470   unsigned shift_amount_;
471 };
472 
473 // SVE supports memory operands which don't make sense to the core ISA, such as
474 // scatter-gather forms, in which either the base or offset registers are
475 // vectors. This class exists to avoid complicating core-ISA code with
476 // SVE-specific behaviour.
477 //
478 // Note that SVE does not support any pre- or post-index modes.
479 class SVEMemOperand {
480  public:
481   // "vector-plus-immediate", like [z0.s, #21]
482   explicit SVEMemOperand(ZRegister base, uint64_t offset = 0)
base_(base)483       : base_(base),
484         regoffset_(NoReg),
485         offset_(RawbitsToInt64(offset)),
486         mod_(NO_SVE_OFFSET_MODIFIER),
487         shift_amount_(0) {
488     VIXL_ASSERT(IsVectorPlusImmediate());
489     VIXL_ASSERT(IsValid());
490   }
491 
492   // "scalar-plus-immediate", like [x0], [x0, #42] or [x0, #42, MUL_VL]
493   // The only supported modifiers are NO_SVE_OFFSET_MODIFIER or SVE_MUL_VL.
494   //
495   // Note that VIXL cannot currently distinguish between `SVEMemOperand(x0)` and
496   // `SVEMemOperand(x0, 0)`. This is only significant in scalar-plus-scalar
497   // instructions where xm defaults to xzr. However, users should not rely on
498   // `SVEMemOperand(x0, 0)` being accepted in such cases.
499   explicit SVEMemOperand(Register base,
500                          uint64_t offset = 0,
501                          SVEOffsetModifier mod = NO_SVE_OFFSET_MODIFIER)
base_(base)502       : base_(base),
503         regoffset_(NoReg),
504         offset_(RawbitsToInt64(offset)),
505         mod_(mod),
506         shift_amount_(0) {
507     VIXL_ASSERT(IsScalarPlusImmediate());
508     VIXL_ASSERT(IsValid());
509   }
510 
511   // "scalar-plus-scalar", like [x0, x1]
512   // "scalar-plus-vector", like [x0, z1.d]
SVEMemOperand(Register base,CPURegister offset)513   SVEMemOperand(Register base, CPURegister offset)
514       : base_(base),
515         regoffset_(offset),
516         offset_(0),
517         mod_(NO_SVE_OFFSET_MODIFIER),
518         shift_amount_(0) {
519     VIXL_ASSERT(IsScalarPlusScalar() || IsScalarPlusVector());
520     if (offset.IsZero()) VIXL_ASSERT(IsEquivalentToScalar());
521     VIXL_ASSERT(IsValid());
522   }
523 
524   // "scalar-plus-vector", like [x0, z1.d, UXTW]
525   // The type of `mod` can be any `SVEOffsetModifier` (other than LSL), or a
526   // corresponding `Extend` value.
527   template <typename M>
SVEMemOperand(Register base,ZRegister offset,M mod)528   SVEMemOperand(Register base, ZRegister offset, M mod)
529       : base_(base),
530         regoffset_(offset),
531         offset_(0),
532         mod_(GetSVEOffsetModifierFor(mod)),
533         shift_amount_(0) {
534     VIXL_ASSERT(mod_ != SVE_LSL);  // LSL requires an explicit shift amount.
535     VIXL_ASSERT(IsScalarPlusVector());
536     VIXL_ASSERT(IsValid());
537   }
538 
539   // "scalar-plus-scalar", like [x0, x1, LSL #1]
540   // "scalar-plus-vector", like [x0, z1.d, LSL #2]
541   // The type of `mod` can be any `SVEOffsetModifier`, or a corresponding
542   // `Shift` or `Extend` value.
543   template <typename M>
SVEMemOperand(Register base,CPURegister offset,M mod,unsigned shift_amount)544   SVEMemOperand(Register base, CPURegister offset, M mod, unsigned shift_amount)
545       : base_(base),
546         regoffset_(offset),
547         offset_(0),
548         mod_(GetSVEOffsetModifierFor(mod)),
549         shift_amount_(shift_amount) {
550     VIXL_ASSERT(IsValid());
551   }
552 
553   // "vector-plus-scalar", like [z0.d, x0]
SVEMemOperand(ZRegister base,Register offset)554   SVEMemOperand(ZRegister base, Register offset)
555       : base_(base),
556         regoffset_(offset),
557         offset_(0),
558         mod_(NO_SVE_OFFSET_MODIFIER),
559         shift_amount_(0) {
560     VIXL_ASSERT(IsValid());
561     VIXL_ASSERT(IsVectorPlusScalar());
562   }
563 
564   // "vector-plus-vector", like [z0.d, z1.d, UXTW]
565   template <typename M = SVEOffsetModifier>
566   SVEMemOperand(ZRegister base,
567                 ZRegister offset,
568                 M mod = NO_SVE_OFFSET_MODIFIER,
569                 unsigned shift_amount = 0)
base_(base)570       : base_(base),
571         regoffset_(offset),
572         offset_(0),
573         mod_(GetSVEOffsetModifierFor(mod)),
574         shift_amount_(shift_amount) {
575     VIXL_ASSERT(IsValid());
576     VIXL_ASSERT(IsVectorPlusVector());
577   }
578 
579   // True for SVEMemOperands which represent something like [x0].
580   // This will also return true for [x0, #0], because there is no way
581   // to distinguish the two.
IsPlainScalar()582   bool IsPlainScalar() const {
583     return IsScalarPlusImmediate() && (offset_ == 0);
584   }
585 
586   // True for SVEMemOperands which represent something like [x0], or for
587   // compound SVEMemOperands which are functionally equivalent, such as
588   // [x0, #0], [x0, xzr] or [x0, wzr, UXTW #3].
589   bool IsEquivalentToScalar() const;
590 
591   // True for SVEMemOperands like [x0], [x0, #0], false for [x0, xzr] and
592   // similar.
593   bool IsPlainRegister() const;
594 
IsScalarPlusImmediate()595   bool IsScalarPlusImmediate() const {
596     return base_.IsX() && regoffset_.IsNone() &&
597            ((mod_ == NO_SVE_OFFSET_MODIFIER) || IsMulVl());
598   }
599 
IsScalarPlusScalar()600   bool IsScalarPlusScalar() const {
601     // SVE offers no extend modes for scalar-plus-scalar, so both registers must
602     // be X registers.
603     return base_.IsX() && regoffset_.IsX() &&
604            ((mod_ == NO_SVE_OFFSET_MODIFIER) || (mod_ == SVE_LSL));
605   }
606 
IsScalarPlusVector()607   bool IsScalarPlusVector() const {
608     // The modifier can be LSL or an an extend mode (UXTW or SXTW) here. Unlike
609     // in the core ISA, these extend modes do not imply an S-sized lane, so the
610     // modifier is independent from the lane size. The architecture describes
611     // [US]XTW with a D-sized lane as an "unpacked" offset.
612     return base_.IsX() && regoffset_.IsZRegister() &&
613            (regoffset_.IsLaneSizeS() || regoffset_.IsLaneSizeD()) && !IsMulVl();
614   }
615 
IsVectorPlusImmediate()616   bool IsVectorPlusImmediate() const {
617     return base_.IsZRegister() &&
618            (base_.IsLaneSizeS() || base_.IsLaneSizeD()) &&
619            regoffset_.IsNone() && (mod_ == NO_SVE_OFFSET_MODIFIER);
620   }
621 
IsVectorPlusScalar()622   bool IsVectorPlusScalar() const {
623     return base_.IsZRegister() && regoffset_.IsX() &&
624            (base_.IsLaneSizeS() || base_.IsLaneSizeD());
625   }
626 
IsVectorPlusVector()627   bool IsVectorPlusVector() const {
628     return base_.IsZRegister() && regoffset_.IsZRegister() && (offset_ == 0) &&
629            AreSameFormat(base_, regoffset_) &&
630            (base_.IsLaneSizeS() || base_.IsLaneSizeD());
631   }
632 
IsContiguous()633   bool IsContiguous() const { return !IsScatterGather(); }
IsScatterGather()634   bool IsScatterGather() const {
635     return base_.IsZRegister() || regoffset_.IsZRegister();
636   }
637 
638   // TODO: If necessary, add helpers like `HasScalarBase()`.
639 
GetScalarBase()640   Register GetScalarBase() const {
641     VIXL_ASSERT(base_.IsX());
642     return Register(base_);
643   }
644 
GetVectorBase()645   ZRegister GetVectorBase() const {
646     VIXL_ASSERT(base_.IsZRegister());
647     VIXL_ASSERT(base_.HasLaneSize());
648     return ZRegister(base_);
649   }
650 
GetScalarOffset()651   Register GetScalarOffset() const {
652     VIXL_ASSERT(regoffset_.IsRegister());
653     return Register(regoffset_);
654   }
655 
GetVectorOffset()656   ZRegister GetVectorOffset() const {
657     VIXL_ASSERT(regoffset_.IsZRegister());
658     VIXL_ASSERT(regoffset_.HasLaneSize());
659     return ZRegister(regoffset_);
660   }
661 
GetImmediateOffset()662   int64_t GetImmediateOffset() const {
663     VIXL_ASSERT(regoffset_.IsNone());
664     return offset_;
665   }
666 
GetOffsetModifier()667   SVEOffsetModifier GetOffsetModifier() const { return mod_; }
GetShiftAmount()668   unsigned GetShiftAmount() const { return shift_amount_; }
669 
IsEquivalentToLSL(unsigned amount)670   bool IsEquivalentToLSL(unsigned amount) const {
671     if (shift_amount_ != amount) return false;
672     if (amount == 0) {
673       // No-shift is equivalent to "LSL #0".
674       return ((mod_ == SVE_LSL) || (mod_ == NO_SVE_OFFSET_MODIFIER));
675     }
676     return mod_ == SVE_LSL;
677   }
678 
IsMulVl()679   bool IsMulVl() const { return mod_ == SVE_MUL_VL; }
680 
681   bool IsValid() const;
682 
683  private:
684   // Allow standard `Shift` and `Extend` arguments to be used.
GetSVEOffsetModifierFor(Shift shift)685   SVEOffsetModifier GetSVEOffsetModifierFor(Shift shift) {
686     if (shift == LSL) return SVE_LSL;
687     if (shift == NO_SHIFT) return NO_SVE_OFFSET_MODIFIER;
688     // SVE does not accept any other shift.
689     VIXL_UNIMPLEMENTED();
690     return NO_SVE_OFFSET_MODIFIER;
691   }
692 
693   SVEOffsetModifier GetSVEOffsetModifierFor(Extend extend = NO_EXTEND) {
694     if (extend == UXTW) return SVE_UXTW;
695     if (extend == SXTW) return SVE_SXTW;
696     if (extend == NO_EXTEND) return NO_SVE_OFFSET_MODIFIER;
697     // SVE does not accept any other extend mode.
698     VIXL_UNIMPLEMENTED();
699     return NO_SVE_OFFSET_MODIFIER;
700   }
701 
GetSVEOffsetModifierFor(SVEOffsetModifier mod)702   SVEOffsetModifier GetSVEOffsetModifierFor(SVEOffsetModifier mod) {
703     return mod;
704   }
705 
706   CPURegister base_;
707   CPURegister regoffset_;
708   int64_t offset_;
709   SVEOffsetModifier mod_;
710   unsigned shift_amount_;
711 };
712 
713 // Represent a signed or unsigned integer operand.
714 //
715 // This is designed to make instructions which naturally accept a _signed_
716 // immediate easier to implement and use, when we also want users to be able to
717 // specify raw-bits values (such as with hexadecimal constants). The advantage
718 // of this class over a simple uint64_t (with implicit C++ sign-extension) is
719 // that this class can strictly check the range of allowed values. With a simple
720 // uint64_t, it is impossible to distinguish -1 from UINT64_MAX.
721 //
722 // For example, these instructions are equivalent:
723 //
724 //     __ Insr(z0.VnB(), -1);
725 //     __ Insr(z0.VnB(), 0xff);
726 //
727 // ... as are these:
728 //
729 //     __ Insr(z0.VnD(), -1);
730 //     __ Insr(z0.VnD(), 0xffffffffffffffff);
731 //
732 // ... but this is invalid:
733 //
734 //     __ Insr(z0.VnB(), 0xffffffffffffffff);  // Too big for B-sized lanes.
735 class IntegerOperand {
736  public:
737 #define VIXL_INT_TYPES(V) \
738   V(char) V(short) V(int) V(long) V(long long)  // NOLINT(runtime/int)
739 #define VIXL_DECL_INT_OVERLOADS(T)                                        \
740   /* These are allowed to be implicit constructors because this is a */   \
741   /* wrapper class that doesn't normally perform any type conversion. */  \
742   IntegerOperand(signed T immediate) /* NOLINT(runtime/explicit) */       \
743       : raw_bits_(immediate),        /* Allow implicit sign-extension. */ \
744         is_negative_(immediate < 0) {}                                    \
745   IntegerOperand(unsigned T immediate) /* NOLINT(runtime/explicit) */     \
746       : raw_bits_(immediate), is_negative_(false) {}
VIXL_INT_TYPES(VIXL_DECL_INT_OVERLOADS)747   VIXL_INT_TYPES(VIXL_DECL_INT_OVERLOADS)
748 #undef VIXL_DECL_INT_OVERLOADS
749 #undef VIXL_INT_TYPES
750 
751   // TODO: `Operand` can currently only hold an int64_t, so some large, unsigned
752   // values will be misrepresented here.
753   explicit IntegerOperand(const Operand& operand)
754       : raw_bits_(operand.GetEquivalentImmediate()),
755         is_negative_(operand.GetEquivalentImmediate() < 0) {}
756 
IsIntN(unsigned n)757   bool IsIntN(unsigned n) const {
758     return is_negative_ ? vixl::IsIntN(n, RawbitsToInt64(raw_bits_))
759                         : vixl::IsIntN(n, raw_bits_);
760   }
IsUintN(unsigned n)761   bool IsUintN(unsigned n) const {
762     return !is_negative_ && vixl::IsUintN(n, raw_bits_);
763   }
764 
IsUint8()765   bool IsUint8() const { return IsUintN(8); }
IsUint16()766   bool IsUint16() const { return IsUintN(16); }
IsUint32()767   bool IsUint32() const { return IsUintN(32); }
IsUint64()768   bool IsUint64() const { return IsUintN(64); }
769 
IsInt8()770   bool IsInt8() const { return IsIntN(8); }
IsInt16()771   bool IsInt16() const { return IsIntN(16); }
IsInt32()772   bool IsInt32() const { return IsIntN(32); }
IsInt64()773   bool IsInt64() const { return IsIntN(64); }
774 
FitsInBits(unsigned n)775   bool FitsInBits(unsigned n) const {
776     return is_negative_ ? IsIntN(n) : IsUintN(n);
777   }
FitsInLane(const CPURegister & zd)778   bool FitsInLane(const CPURegister& zd) const {
779     return FitsInBits(zd.GetLaneSizeInBits());
780   }
FitsInSignedLane(const CPURegister & zd)781   bool FitsInSignedLane(const CPURegister& zd) const {
782     return IsIntN(zd.GetLaneSizeInBits());
783   }
FitsInUnsignedLane(const CPURegister & zd)784   bool FitsInUnsignedLane(const CPURegister& zd) const {
785     return IsUintN(zd.GetLaneSizeInBits());
786   }
787 
788   // Cast a value in the range [INT<n>_MIN, UINT<n>_MAX] to an unsigned integer
789   // in the range [0, UINT<n>_MAX] (using two's complement mapping).
AsUintN(unsigned n)790   uint64_t AsUintN(unsigned n) const {
791     VIXL_ASSERT(FitsInBits(n));
792     return raw_bits_ & GetUintMask(n);
793   }
794 
AsUint8()795   uint8_t AsUint8() const { return static_cast<uint8_t>(AsUintN(8)); }
AsUint16()796   uint16_t AsUint16() const { return static_cast<uint16_t>(AsUintN(16)); }
AsUint32()797   uint32_t AsUint32() const { return static_cast<uint32_t>(AsUintN(32)); }
AsUint64()798   uint64_t AsUint64() const { return AsUintN(64); }
799 
800   // Cast a value in the range [INT<n>_MIN, UINT<n>_MAX] to a signed integer in
801   // the range [INT<n>_MIN, INT<n>_MAX] (using two's complement mapping).
AsIntN(unsigned n)802   int64_t AsIntN(unsigned n) const {
803     VIXL_ASSERT(FitsInBits(n));
804     return ExtractSignedBitfield64(n - 1, 0, raw_bits_);
805   }
806 
AsInt8()807   int8_t AsInt8() const { return static_cast<int8_t>(AsIntN(8)); }
AsInt16()808   int16_t AsInt16() const { return static_cast<int16_t>(AsIntN(16)); }
AsInt32()809   int32_t AsInt32() const { return static_cast<int32_t>(AsIntN(32)); }
AsInt64()810   int64_t AsInt64() const { return AsIntN(64); }
811 
812   // Several instructions encode a signed int<N>_t, which is then (optionally)
813   // left-shifted and sign-extended to a Z register lane with a size which may
814   // be larger than N. This helper tries to find an int<N>_t such that the
815   // IntegerOperand's arithmetic value is reproduced in each lane.
816   //
817   // This is the mechanism that allows `Insr(z0.VnB(), 0xff)` to be treated as
818   // `Insr(z0.VnB(), -1)`.
819   template <unsigned N, unsigned kShift, typename T>
TryEncodeAsShiftedIntNForLane(const CPURegister & zd,T * imm)820   bool TryEncodeAsShiftedIntNForLane(const CPURegister& zd, T* imm) const {
821     VIXL_STATIC_ASSERT(std::numeric_limits<T>::digits > N);
822     VIXL_ASSERT(FitsInLane(zd));
823     if ((raw_bits_ & GetUintMask(kShift)) != 0) return false;
824 
825     // Reverse the specified left-shift.
826     IntegerOperand unshifted(*this);
827     unshifted.ArithmeticShiftRight(kShift);
828 
829     if (unshifted.IsIntN(N)) {
830       // This is trivial, since sign-extension produces the same arithmetic
831       // value irrespective of the destination size.
832       *imm = static_cast<T>(unshifted.AsIntN(N));
833       return true;
834     }
835 
836     // Otherwise, we might be able to use the sign-extension to produce the
837     // desired bit pattern. We can only do this for values in the range
838     // [INT<N>_MAX + 1, UINT<N>_MAX], where the highest set bit is the sign bit.
839     //
840     // The lane size has to be adjusted to compensate for `kShift`, since the
841     // high bits will be dropped when the encoded value is left-shifted.
842     if (unshifted.IsUintN(zd.GetLaneSizeInBits() - kShift)) {
843       int64_t encoded = unshifted.AsIntN(zd.GetLaneSizeInBits() - kShift);
844       if (vixl::IsIntN(N, encoded)) {
845         *imm = static_cast<T>(encoded);
846         return true;
847       }
848     }
849     return false;
850   }
851 
852   // As above, but `kShift` is written to the `*shift` parameter on success, so
853   // that it is easy to chain calls like this:
854   //
855   //     if (imm.TryEncodeAsShiftedIntNForLane<8, 0>(zd, &imm8, &shift) ||
856   //         imm.TryEncodeAsShiftedIntNForLane<8, 8>(zd, &imm8, &shift)) {
857   //       insn(zd, imm8, shift)
858   //     }
859   template <unsigned N, unsigned kShift, typename T, typename S>
TryEncodeAsShiftedIntNForLane(const CPURegister & zd,T * imm,S * shift)860   bool TryEncodeAsShiftedIntNForLane(const CPURegister& zd,
861                                      T* imm,
862                                      S* shift) const {
863     if (TryEncodeAsShiftedIntNForLane<N, kShift>(zd, imm)) {
864       *shift = kShift;
865       return true;
866     }
867     return false;
868   }
869 
870   // As above, but assume that `kShift` is 0.
871   template <unsigned N, typename T>
TryEncodeAsIntNForLane(const CPURegister & zd,T * imm)872   bool TryEncodeAsIntNForLane(const CPURegister& zd, T* imm) const {
873     return TryEncodeAsShiftedIntNForLane<N, 0>(zd, imm);
874   }
875 
876   // As above, but for unsigned fields. This is usuaully a simple operation, but
877   // is provided for symmetry.
878   template <unsigned N, unsigned kShift, typename T>
TryEncodeAsShiftedUintNForLane(const CPURegister & zd,T * imm)879   bool TryEncodeAsShiftedUintNForLane(const CPURegister& zd, T* imm) const {
880     VIXL_STATIC_ASSERT(std::numeric_limits<T>::digits > N);
881     VIXL_ASSERT(FitsInLane(zd));
882 
883     // TODO: Should we convert -1 to 0xff here?
884     if (is_negative_) return false;
885     USE(zd);
886 
887     if ((raw_bits_ & GetUintMask(kShift)) != 0) return false;
888 
889     if (vixl::IsUintN(N, raw_bits_ >> kShift)) {
890       *imm = static_cast<T>(raw_bits_ >> kShift);
891       return true;
892     }
893     return false;
894   }
895 
896   template <unsigned N, unsigned kShift, typename T, typename S>
TryEncodeAsShiftedUintNForLane(const CPURegister & zd,T * imm,S * shift)897   bool TryEncodeAsShiftedUintNForLane(const CPURegister& zd,
898                                       T* imm,
899                                       S* shift) const {
900     if (TryEncodeAsShiftedUintNForLane<N, kShift>(zd, imm)) {
901       *shift = kShift;
902       return true;
903     }
904     return false;
905   }
906 
IsZero()907   bool IsZero() const { return raw_bits_ == 0; }
IsNegative()908   bool IsNegative() const { return is_negative_; }
IsPositiveOrZero()909   bool IsPositiveOrZero() const { return !is_negative_; }
910 
GetMagnitude()911   uint64_t GetMagnitude() const {
912     return is_negative_ ? -raw_bits_ : raw_bits_;
913   }
914 
915  private:
916   // Shift the arithmetic value right, with sign extension if is_negative_.
ArithmeticShiftRight(int shift)917   void ArithmeticShiftRight(int shift) {
918     VIXL_ASSERT((shift >= 0) && (shift < 64));
919     if (shift == 0) return;
920     if (is_negative_) {
921       raw_bits_ = ExtractSignedBitfield64(63, shift, raw_bits_);
922     } else {
923       raw_bits_ >>= shift;
924     }
925   }
926 
927   uint64_t raw_bits_;
928   bool is_negative_;
929 };
930 
931 // This an abstraction that can represent a register or memory location. The
932 // `MacroAssembler` provides helpers to move data between generic operands.
933 class GenericOperand {
934  public:
GenericOperand()935   GenericOperand() { VIXL_ASSERT(!IsValid()); }
936   GenericOperand(const CPURegister& reg);  // NOLINT(runtime/explicit)
937   GenericOperand(const MemOperand& mem_op,
938                  size_t mem_op_size = 0);  // NOLINT(runtime/explicit)
939 
IsValid()940   bool IsValid() const { return cpu_register_.IsValid() != mem_op_.IsValid(); }
941 
942   bool Equals(const GenericOperand& other) const;
943 
IsCPURegister()944   bool IsCPURegister() const {
945     VIXL_ASSERT(IsValid());
946     return cpu_register_.IsValid();
947   }
948 
IsRegister()949   bool IsRegister() const {
950     return IsCPURegister() && cpu_register_.IsRegister();
951   }
952 
IsVRegister()953   bool IsVRegister() const {
954     return IsCPURegister() && cpu_register_.IsVRegister();
955   }
956 
IsSameCPURegisterType(const GenericOperand & other)957   bool IsSameCPURegisterType(const GenericOperand& other) {
958     return IsCPURegister() && other.IsCPURegister() &&
959            GetCPURegister().IsSameType(other.GetCPURegister());
960   }
961 
IsMemOperand()962   bool IsMemOperand() const {
963     VIXL_ASSERT(IsValid());
964     return mem_op_.IsValid();
965   }
966 
GetCPURegister()967   CPURegister GetCPURegister() const {
968     VIXL_ASSERT(IsCPURegister());
969     return cpu_register_;
970   }
971 
GetMemOperand()972   MemOperand GetMemOperand() const {
973     VIXL_ASSERT(IsMemOperand());
974     return mem_op_;
975   }
976 
GetMemOperandSizeInBytes()977   size_t GetMemOperandSizeInBytes() const {
978     VIXL_ASSERT(IsMemOperand());
979     return mem_op_size_;
980   }
981 
GetSizeInBytes()982   size_t GetSizeInBytes() const {
983     return IsCPURegister() ? cpu_register_.GetSizeInBytes()
984                            : GetMemOperandSizeInBytes();
985   }
986 
GetSizeInBits()987   size_t GetSizeInBits() const { return GetSizeInBytes() * kBitsPerByte; }
988 
989  private:
990   CPURegister cpu_register_;
991   MemOperand mem_op_;
992   // The size of the memory region pointed to, in bytes.
993   // We only support sizes up to X/D register sizes.
994   size_t mem_op_size_;
995 };
996 }
997 }  // namespace vixl::aarch64
998 
999 #endif  // VIXL_AARCH64_OPERANDS_AARCH64_H_
1000