1 // Copyright 2016, VIXL authors 2 // All rights reserved. 3 // 4 // Redistribution and use in source and binary forms, with or without 5 // modification, are permitted provided that the following conditions are met: 6 // 7 // * Redistributions of source code must retain the above copyright notice, 8 // this list of conditions and the following disclaimer. 9 // * Redistributions in binary form must reproduce the above copyright notice, 10 // this list of conditions and the following disclaimer in the documentation 11 // and/or other materials provided with the distribution. 12 // * Neither the name of ARM Limited nor the names of its contributors may be 13 // used to endorse or promote products derived from this software without 14 // specific prior written permission. 15 // 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND 17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE 20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 27 #ifndef VIXL_AARCH64_OPERANDS_AARCH64_H_ 28 #define VIXL_AARCH64_OPERANDS_AARCH64_H_ 29 30 #include <sstream> 31 #include <string> 32 33 #include "instructions-aarch64.h" 34 #include "registers-aarch64.h" 35 36 namespace vixl { 37 namespace aarch64 { 38 39 // Lists of registers. 40 class CPURegList { 41 public: 42 explicit CPURegList(CPURegister reg1, 43 CPURegister reg2 = NoCPUReg, 44 CPURegister reg3 = NoCPUReg, 45 CPURegister reg4 = NoCPUReg) 46 : list_(reg1.GetBit() | reg2.GetBit() | reg3.GetBit() | reg4.GetBit()), 47 size_(reg1.GetSizeInBits()), 48 type_(reg1.GetType()) { 49 VIXL_ASSERT(AreSameSizeAndType(reg1, reg2, reg3, reg4)); 50 VIXL_ASSERT(IsValid()); 51 } 52 CPURegList(CPURegister::RegisterType type,unsigned size,RegList list)53 CPURegList(CPURegister::RegisterType type, unsigned size, RegList list) 54 : list_(list), size_(size), type_(type) { 55 VIXL_ASSERT(IsValid()); 56 } 57 CPURegList(CPURegister::RegisterType type,unsigned size,unsigned first_reg,unsigned last_reg)58 CPURegList(CPURegister::RegisterType type, 59 unsigned size, 60 unsigned first_reg, 61 unsigned last_reg) 62 : size_(size), type_(type) { 63 VIXL_ASSERT( 64 ((type == CPURegister::kRegister) && (last_reg < kNumberOfRegisters)) || 65 ((type == CPURegister::kVRegister) && 66 (last_reg < kNumberOfVRegisters))); 67 VIXL_ASSERT(last_reg >= first_reg); 68 list_ = (UINT64_C(1) << (last_reg + 1)) - 1; 69 list_ &= ~((UINT64_C(1) << first_reg) - 1); 70 VIXL_ASSERT(IsValid()); 71 } 72 73 // Construct an empty CPURegList with the specified size and type. If `size` 74 // is CPURegister::kUnknownSize and the register type requires a size, a valid 75 // but unspecified default will be picked. 76 static CPURegList Empty(CPURegister::RegisterType type, 77 unsigned size = CPURegister::kUnknownSize) { 78 return CPURegList(type, GetDefaultSizeFor(type, size), 0); 79 } 80 81 // Construct a CPURegList with all possible registers with the specified size 82 // and type. If `size` is CPURegister::kUnknownSize and the register type 83 // requires a size, a valid but unspecified default will be picked. 84 static CPURegList All(CPURegister::RegisterType type, 85 unsigned size = CPURegister::kUnknownSize) { 86 unsigned number_of_registers = (CPURegister::GetMaxCodeFor(type) + 1); 87 RegList list = (static_cast<RegList>(1) << number_of_registers) - 1; 88 if (type == CPURegister::kRegister) { 89 // GetMaxCodeFor(kRegister) ignores SP, so explicitly include it. 90 list |= (static_cast<RegList>(1) << kSPRegInternalCode); 91 } 92 return CPURegList(type, GetDefaultSizeFor(type, size), list); 93 } 94 GetType()95 CPURegister::RegisterType GetType() const { 96 VIXL_ASSERT(IsValid()); 97 return type_; 98 } 99 VIXL_DEPRECATED("GetType", CPURegister::RegisterType type() const) { 100 return GetType(); 101 } 102 GetBank()103 CPURegister::RegisterBank GetBank() const { 104 return CPURegister::GetBankFor(GetType()); 105 } 106 107 // Combine another CPURegList into this one. Registers that already exist in 108 // this list are left unchanged. The type and size of the registers in the 109 // 'other' list must match those in this list. Combine(const CPURegList & other)110 void Combine(const CPURegList& other) { 111 VIXL_ASSERT(IsValid()); 112 VIXL_ASSERT(other.GetType() == type_); 113 VIXL_ASSERT(other.GetRegisterSizeInBits() == size_); 114 list_ |= other.GetList(); 115 } 116 117 // Remove every register in the other CPURegList from this one. Registers that 118 // do not exist in this list are ignored. The type and size of the registers 119 // in the 'other' list must match those in this list. Remove(const CPURegList & other)120 void Remove(const CPURegList& other) { 121 VIXL_ASSERT(IsValid()); 122 VIXL_ASSERT(other.GetType() == type_); 123 VIXL_ASSERT(other.GetRegisterSizeInBits() == size_); 124 list_ &= ~other.GetList(); 125 } 126 127 // Variants of Combine and Remove which take a single register. Combine(const CPURegister & other)128 void Combine(const CPURegister& other) { 129 VIXL_ASSERT(other.GetType() == type_); 130 VIXL_ASSERT(other.GetSizeInBits() == size_); 131 Combine(other.GetCode()); 132 } 133 Remove(const CPURegister & other)134 void Remove(const CPURegister& other) { 135 VIXL_ASSERT(other.GetType() == type_); 136 VIXL_ASSERT(other.GetSizeInBits() == size_); 137 Remove(other.GetCode()); 138 } 139 140 // Variants of Combine and Remove which take a single register by its code; 141 // the type and size of the register is inferred from this list. Combine(int code)142 void Combine(int code) { 143 VIXL_ASSERT(IsValid()); 144 VIXL_ASSERT(CPURegister(code, size_, type_).IsValid()); 145 list_ |= (UINT64_C(1) << code); 146 } 147 Remove(int code)148 void Remove(int code) { 149 VIXL_ASSERT(IsValid()); 150 VIXL_ASSERT(CPURegister(code, size_, type_).IsValid()); 151 list_ &= ~(UINT64_C(1) << code); 152 } 153 Union(const CPURegList & list_1,const CPURegList & list_2)154 static CPURegList Union(const CPURegList& list_1, const CPURegList& list_2) { 155 VIXL_ASSERT(list_1.type_ == list_2.type_); 156 VIXL_ASSERT(list_1.size_ == list_2.size_); 157 return CPURegList(list_1.type_, list_1.size_, list_1.list_ | list_2.list_); 158 } 159 static CPURegList Union(const CPURegList& list_1, 160 const CPURegList& list_2, 161 const CPURegList& list_3); 162 static CPURegList Union(const CPURegList& list_1, 163 const CPURegList& list_2, 164 const CPURegList& list_3, 165 const CPURegList& list_4); 166 Intersection(const CPURegList & list_1,const CPURegList & list_2)167 static CPURegList Intersection(const CPURegList& list_1, 168 const CPURegList& list_2) { 169 VIXL_ASSERT(list_1.type_ == list_2.type_); 170 VIXL_ASSERT(list_1.size_ == list_2.size_); 171 return CPURegList(list_1.type_, list_1.size_, list_1.list_ & list_2.list_); 172 } 173 static CPURegList Intersection(const CPURegList& list_1, 174 const CPURegList& list_2, 175 const CPURegList& list_3); 176 static CPURegList Intersection(const CPURegList& list_1, 177 const CPURegList& list_2, 178 const CPURegList& list_3, 179 const CPURegList& list_4); 180 Overlaps(const CPURegList & other)181 bool Overlaps(const CPURegList& other) const { 182 return (type_ == other.type_) && ((list_ & other.list_) != 0); 183 } 184 GetList()185 RegList GetList() const { 186 VIXL_ASSERT(IsValid()); 187 return list_; 188 } 189 VIXL_DEPRECATED("GetList", RegList list() const) { return GetList(); } 190 SetList(RegList new_list)191 void SetList(RegList new_list) { 192 VIXL_ASSERT(IsValid()); 193 list_ = new_list; 194 } set_list(RegList new_list)195 VIXL_DEPRECATED("SetList", void set_list(RegList new_list)) { 196 return SetList(new_list); 197 } 198 199 // Remove all callee-saved registers from the list. This can be useful when 200 // preparing registers for an AAPCS64 function call, for example. 201 void RemoveCalleeSaved(); 202 203 // Find the register in this list that appears in `mask` with the lowest or 204 // highest code, remove it from the list and return it as a CPURegister. If 205 // the list is empty, leave it unchanged and return NoCPUReg. 206 CPURegister PopLowestIndex(RegList mask = ~static_cast<RegList>(0)); 207 CPURegister PopHighestIndex(RegList mask = ~static_cast<RegList>(0)); 208 209 // AAPCS64 callee-saved registers. 210 static CPURegList GetCalleeSaved(unsigned size = kXRegSize); 211 static CPURegList GetCalleeSavedV(unsigned size = kDRegSize); 212 213 // AAPCS64 caller-saved registers. Note that this includes lr. 214 // TODO(all): Determine how we handle d8-d15 being callee-saved, but the top 215 // 64-bits being caller-saved. 216 static CPURegList GetCallerSaved(unsigned size = kXRegSize); 217 static CPURegList GetCallerSavedV(unsigned size = kDRegSize); 218 IsEmpty()219 bool IsEmpty() const { 220 VIXL_ASSERT(IsValid()); 221 return list_ == 0; 222 } 223 IncludesAliasOf(const CPURegister & other)224 bool IncludesAliasOf(const CPURegister& other) const { 225 VIXL_ASSERT(IsValid()); 226 return (GetBank() == other.GetBank()) && IncludesAliasOf(other.GetCode()); 227 } 228 IncludesAliasOf(int code)229 bool IncludesAliasOf(int code) const { 230 VIXL_ASSERT(IsValid()); 231 return (((static_cast<RegList>(1) << code) & list_) != 0); 232 } 233 GetCount()234 int GetCount() const { 235 VIXL_ASSERT(IsValid()); 236 return CountSetBits(list_); 237 } Count()238 VIXL_DEPRECATED("GetCount", int Count()) const { return GetCount(); } 239 GetRegisterSizeInBits()240 int GetRegisterSizeInBits() const { 241 VIXL_ASSERT(IsValid()); 242 return size_; 243 } RegisterSizeInBits()244 VIXL_DEPRECATED("GetRegisterSizeInBits", int RegisterSizeInBits() const) { 245 return GetRegisterSizeInBits(); 246 } 247 GetRegisterSizeInBytes()248 int GetRegisterSizeInBytes() const { 249 int size_in_bits = GetRegisterSizeInBits(); 250 VIXL_ASSERT((size_in_bits % 8) == 0); 251 return size_in_bits / 8; 252 } RegisterSizeInBytes()253 VIXL_DEPRECATED("GetRegisterSizeInBytes", int RegisterSizeInBytes() const) { 254 return GetRegisterSizeInBytes(); 255 } 256 GetTotalSizeInBytes()257 unsigned GetTotalSizeInBytes() const { 258 VIXL_ASSERT(IsValid()); 259 return GetRegisterSizeInBytes() * GetCount(); 260 } TotalSizeInBytes()261 VIXL_DEPRECATED("GetTotalSizeInBytes", unsigned TotalSizeInBytes() const) { 262 return GetTotalSizeInBytes(); 263 } 264 265 private: 266 // If `size` is CPURegister::kUnknownSize and the type requires a known size, 267 // then return an arbitrary-but-valid size. 268 // 269 // Otherwise, the size is checked for validity and returned unchanged. GetDefaultSizeFor(CPURegister::RegisterType type,unsigned size)270 static unsigned GetDefaultSizeFor(CPURegister::RegisterType type, 271 unsigned size) { 272 if (size == CPURegister::kUnknownSize) { 273 if (type == CPURegister::kRegister) size = kXRegSize; 274 if (type == CPURegister::kVRegister) size = kQRegSize; 275 // All other types require kUnknownSize. 276 } 277 VIXL_ASSERT(CPURegister(0, size, type).IsValid()); 278 return size; 279 } 280 281 RegList list_; 282 int size_; 283 CPURegister::RegisterType type_; 284 285 bool IsValid() const; 286 }; 287 288 289 // AAPCS64 callee-saved registers. 290 extern const CPURegList kCalleeSaved; 291 extern const CPURegList kCalleeSavedV; 292 293 294 // AAPCS64 caller-saved registers. Note that this includes lr. 295 extern const CPURegList kCallerSaved; 296 extern const CPURegList kCallerSavedV; 297 298 class IntegerOperand; 299 300 // Operand. 301 class Operand { 302 public: 303 // #<immediate> 304 // where <immediate> is int64_t. 305 // This is allowed to be an implicit constructor because Operand is 306 // a wrapper class that doesn't normally perform any type conversion. 307 Operand(int64_t immediate); // NOLINT(runtime/explicit) 308 309 Operand(IntegerOperand immediate); // NOLINT(runtime/explicit) 310 311 // rm, {<shift> #<shift_amount>} 312 // where <shift> is one of {LSL, LSR, ASR, ROR}. 313 // <shift_amount> is uint6_t. 314 // This is allowed to be an implicit constructor because Operand is 315 // a wrapper class that doesn't normally perform any type conversion. 316 Operand(Register reg, 317 Shift shift = LSL, 318 unsigned shift_amount = 0); // NOLINT(runtime/explicit) 319 320 // rm, {<extend> {#<shift_amount>}} 321 // where <extend> is one of {UXTB, UXTH, UXTW, UXTX, SXTB, SXTH, SXTW, SXTX}. 322 // <shift_amount> is uint2_t. 323 explicit Operand(Register reg, Extend extend, unsigned shift_amount = 0); 324 325 bool IsImmediate() const; 326 bool IsPlainRegister() const; 327 bool IsShiftedRegister() const; 328 bool IsExtendedRegister() const; 329 bool IsZero() const; 330 331 // This returns an LSL shift (<= 4) operand as an equivalent extend operand, 332 // which helps in the encoding of instructions that use the stack pointer. 333 Operand ToExtendedRegister() const; 334 GetImmediate()335 int64_t GetImmediate() const { 336 VIXL_ASSERT(IsImmediate()); 337 return immediate_; 338 } 339 VIXL_DEPRECATED("GetImmediate", int64_t immediate() const) { 340 return GetImmediate(); 341 } 342 GetEquivalentImmediate()343 int64_t GetEquivalentImmediate() const { 344 return IsZero() ? 0 : GetImmediate(); 345 } 346 GetRegister()347 Register GetRegister() const { 348 VIXL_ASSERT(IsShiftedRegister() || IsExtendedRegister()); 349 return reg_; 350 } 351 VIXL_DEPRECATED("GetRegister", Register reg() const) { return GetRegister(); } GetBaseRegister()352 Register GetBaseRegister() const { return GetRegister(); } 353 GetShift()354 Shift GetShift() const { 355 VIXL_ASSERT(IsShiftedRegister()); 356 return shift_; 357 } 358 VIXL_DEPRECATED("GetShift", Shift shift() const) { return GetShift(); } 359 GetExtend()360 Extend GetExtend() const { 361 VIXL_ASSERT(IsExtendedRegister()); 362 return extend_; 363 } 364 VIXL_DEPRECATED("GetExtend", Extend extend() const) { return GetExtend(); } 365 GetShiftAmount()366 unsigned GetShiftAmount() const { 367 VIXL_ASSERT(IsShiftedRegister() || IsExtendedRegister()); 368 return shift_amount_; 369 } shift_amount()370 VIXL_DEPRECATED("GetShiftAmount", unsigned shift_amount() const) { 371 return GetShiftAmount(); 372 } 373 374 private: 375 int64_t immediate_; 376 Register reg_; 377 Shift shift_; 378 Extend extend_; 379 unsigned shift_amount_; 380 }; 381 382 383 // MemOperand represents the addressing mode of a load or store instruction. 384 // In assembly syntax, MemOperands are normally denoted by one or more elements 385 // inside or around square brackets. 386 class MemOperand { 387 public: 388 // Creates an invalid `MemOperand`. 389 MemOperand(); 390 explicit MemOperand(Register base, 391 int64_t offset = 0, 392 AddrMode addrmode = Offset); 393 MemOperand(Register base, 394 Register regoffset, 395 Shift shift = LSL, 396 unsigned shift_amount = 0); 397 MemOperand(Register base, 398 Register regoffset, 399 Extend extend, 400 unsigned shift_amount = 0); 401 MemOperand(Register base, const Operand& offset, AddrMode addrmode = Offset); 402 GetBaseRegister()403 const Register& GetBaseRegister() const { return base_; } 404 405 // If the MemOperand has a register offset, return it. (This also applies to 406 // pre- and post-index modes.) Otherwise, return NoReg. GetRegisterOffset()407 const Register& GetRegisterOffset() const { return regoffset_; } 408 409 // If the MemOperand has an immediate offset, return it. (This also applies to 410 // pre- and post-index modes.) Otherwise, return 0. GetOffset()411 int64_t GetOffset() const { return offset_; } 412 GetAddrMode()413 AddrMode GetAddrMode() const { return addrmode_; } GetShift()414 Shift GetShift() const { return shift_; } GetExtend()415 Extend GetExtend() const { return extend_; } 416 GetShiftAmount()417 unsigned GetShiftAmount() const { 418 // Extend modes can also encode a shift for some instructions. 419 VIXL_ASSERT((GetShift() != NO_SHIFT) || (GetExtend() != NO_EXTEND)); 420 return shift_amount_; 421 } 422 423 // True for MemOperands which represent something like [x0]. 424 // Currently, this will also return true for [x0, #0], because MemOperand has 425 // no way to distinguish the two. 426 bool IsPlainRegister() const; 427 428 // True for MemOperands which represent something like [x0], or for compound 429 // MemOperands which are functionally equivalent, such as [x0, #0], [x0, xzr] 430 // or [x0, wzr, UXTW #3]. 431 bool IsEquivalentToPlainRegister() const; 432 433 // True for immediate-offset (but not indexed) MemOperands. 434 bool IsImmediateOffset() const; 435 // True for register-offset (but not indexed) MemOperands. 436 bool IsRegisterOffset() const; 437 438 bool IsPreIndex() const; 439 bool IsPostIndex() const; 440 441 void AddOffset(int64_t offset); 442 IsValid()443 bool IsValid() const { 444 return base_.IsValid() && 445 ((addrmode_ == Offset) || (addrmode_ == PreIndex) || 446 (addrmode_ == PostIndex)) && 447 ((shift_ == NO_SHIFT) || (extend_ == NO_EXTEND)) && 448 ((offset_ == 0) || !regoffset_.IsValid()); 449 } 450 Equals(const MemOperand & other)451 bool Equals(const MemOperand& other) const { 452 return base_.Is(other.base_) && regoffset_.Is(other.regoffset_) && 453 (offset_ == other.offset_) && (addrmode_ == other.addrmode_) && 454 (shift_ == other.shift_) && (extend_ == other.extend_) && 455 (shift_amount_ == other.shift_amount_); 456 } 457 458 private: 459 Register base_; 460 Register regoffset_; 461 int64_t offset_; 462 AddrMode addrmode_; 463 Shift shift_; 464 Extend extend_; 465 unsigned shift_amount_; 466 }; 467 468 // SVE supports memory operands which don't make sense to the core ISA, such as 469 // scatter-gather forms, in which either the base or offset registers are 470 // vectors. This class exists to avoid complicating core-ISA code with 471 // SVE-specific behaviour. 472 // 473 // Note that SVE does not support any pre- or post-index modes. 474 class SVEMemOperand { 475 public: 476 // "vector-plus-immediate", like [z0.s, #21] 477 explicit SVEMemOperand(ZRegister base, uint64_t offset = 0) base_(base)478 : base_(base), 479 regoffset_(NoReg), 480 offset_(RawbitsToInt64(offset)), 481 mod_(NO_SVE_OFFSET_MODIFIER), 482 shift_amount_(0) { 483 VIXL_ASSERT(IsVectorPlusImmediate()); 484 VIXL_ASSERT(IsValid()); 485 } 486 487 // "scalar-plus-immediate", like [x0], [x0, #42] or [x0, #42, MUL_VL] 488 // The only supported modifiers are NO_SVE_OFFSET_MODIFIER or SVE_MUL_VL. 489 // 490 // Note that VIXL cannot currently distinguish between `SVEMemOperand(x0)` and 491 // `SVEMemOperand(x0, 0)`. This is only significant in scalar-plus-scalar 492 // instructions where xm defaults to xzr. However, users should not rely on 493 // `SVEMemOperand(x0, 0)` being accepted in such cases. 494 explicit SVEMemOperand(Register base, 495 uint64_t offset = 0, 496 SVEOffsetModifier mod = NO_SVE_OFFSET_MODIFIER) base_(base)497 : base_(base), 498 regoffset_(NoReg), 499 offset_(RawbitsToInt64(offset)), 500 mod_(mod), 501 shift_amount_(0) { 502 VIXL_ASSERT(IsScalarPlusImmediate()); 503 VIXL_ASSERT(IsValid()); 504 } 505 506 // "scalar-plus-scalar", like [x0, x1] 507 // "scalar-plus-vector", like [x0, z1.d] SVEMemOperand(Register base,CPURegister offset)508 SVEMemOperand(Register base, CPURegister offset) 509 : base_(base), 510 regoffset_(offset), 511 offset_(0), 512 mod_(NO_SVE_OFFSET_MODIFIER), 513 shift_amount_(0) { 514 VIXL_ASSERT(IsScalarPlusScalar() || IsScalarPlusVector()); 515 if (offset.IsZero()) VIXL_ASSERT(IsEquivalentToScalar()); 516 VIXL_ASSERT(IsValid()); 517 } 518 519 // "scalar-plus-vector", like [x0, z1.d, UXTW] 520 // The type of `mod` can be any `SVEOffsetModifier` (other than LSL), or a 521 // corresponding `Extend` value. 522 template <typename M> SVEMemOperand(Register base,ZRegister offset,M mod)523 SVEMemOperand(Register base, ZRegister offset, M mod) 524 : base_(base), 525 regoffset_(offset), 526 offset_(0), 527 mod_(GetSVEOffsetModifierFor(mod)), 528 shift_amount_(0) { 529 VIXL_ASSERT(mod_ != SVE_LSL); // LSL requires an explicit shift amount. 530 VIXL_ASSERT(IsScalarPlusVector()); 531 VIXL_ASSERT(IsValid()); 532 } 533 534 // "scalar-plus-scalar", like [x0, x1, LSL #1] 535 // "scalar-plus-vector", like [x0, z1.d, LSL #2] 536 // The type of `mod` can be any `SVEOffsetModifier`, or a corresponding 537 // `Shift` or `Extend` value. 538 template <typename M> SVEMemOperand(Register base,CPURegister offset,M mod,unsigned shift_amount)539 SVEMemOperand(Register base, CPURegister offset, M mod, unsigned shift_amount) 540 : base_(base), 541 regoffset_(offset), 542 offset_(0), 543 mod_(GetSVEOffsetModifierFor(mod)), 544 shift_amount_(shift_amount) { 545 VIXL_ASSERT(IsValid()); 546 } 547 548 // "vector-plus-vector", like [z0.d, z1.d, UXTW] 549 template <typename M = SVEOffsetModifier> 550 SVEMemOperand(ZRegister base, 551 ZRegister offset, 552 M mod = NO_SVE_OFFSET_MODIFIER, 553 unsigned shift_amount = 0) base_(base)554 : base_(base), 555 regoffset_(offset), 556 offset_(0), 557 mod_(GetSVEOffsetModifierFor(mod)), 558 shift_amount_(shift_amount) { 559 VIXL_ASSERT(IsValid()); 560 VIXL_ASSERT(IsVectorPlusVector()); 561 } 562 563 // True for SVEMemOperands which represent something like [x0]. 564 // This will also return true for [x0, #0], because there is no way 565 // to distinguish the two. IsPlainScalar()566 bool IsPlainScalar() const { 567 return IsScalarPlusImmediate() && (offset_ == 0); 568 } 569 570 // True for SVEMemOperands which represent something like [x0], or for 571 // compound SVEMemOperands which are functionally equivalent, such as 572 // [x0, #0], [x0, xzr] or [x0, wzr, UXTW #3]. 573 bool IsEquivalentToScalar() const; 574 575 // True for SVEMemOperands like [x0], [x0, #0], false for [x0, xzr] and 576 // similar. 577 bool IsPlainRegister() const; 578 IsScalarPlusImmediate()579 bool IsScalarPlusImmediate() const { 580 return base_.IsX() && regoffset_.IsNone() && 581 ((mod_ == NO_SVE_OFFSET_MODIFIER) || IsMulVl()); 582 } 583 IsScalarPlusScalar()584 bool IsScalarPlusScalar() const { 585 // SVE offers no extend modes for scalar-plus-scalar, so both registers must 586 // be X registers. 587 return base_.IsX() && regoffset_.IsX() && 588 ((mod_ == NO_SVE_OFFSET_MODIFIER) || (mod_ == SVE_LSL)); 589 } 590 IsScalarPlusVector()591 bool IsScalarPlusVector() const { 592 // The modifier can be LSL or an an extend mode (UXTW or SXTW) here. Unlike 593 // in the core ISA, these extend modes do not imply an S-sized lane, so the 594 // modifier is independent from the lane size. The architecture describes 595 // [US]XTW with a D-sized lane as an "unpacked" offset. 596 return base_.IsX() && regoffset_.IsZRegister() && 597 (regoffset_.IsLaneSizeS() || regoffset_.IsLaneSizeD()) && !IsMulVl(); 598 } 599 IsVectorPlusImmediate()600 bool IsVectorPlusImmediate() const { 601 return base_.IsZRegister() && 602 (base_.IsLaneSizeS() || base_.IsLaneSizeD()) && 603 regoffset_.IsNone() && (mod_ == NO_SVE_OFFSET_MODIFIER); 604 } 605 IsVectorPlusVector()606 bool IsVectorPlusVector() const { 607 return base_.IsZRegister() && regoffset_.IsZRegister() && (offset_ == 0) && 608 AreSameFormat(base_, regoffset_) && 609 (base_.IsLaneSizeS() || base_.IsLaneSizeD()); 610 } 611 IsContiguous()612 bool IsContiguous() const { return !IsScatterGather(); } IsScatterGather()613 bool IsScatterGather() const { 614 return base_.IsZRegister() || regoffset_.IsZRegister(); 615 } 616 617 // TODO: If necessary, add helpers like `HasScalarBase()`. 618 GetScalarBase()619 Register GetScalarBase() const { 620 VIXL_ASSERT(base_.IsX()); 621 return Register(base_); 622 } 623 GetVectorBase()624 ZRegister GetVectorBase() const { 625 VIXL_ASSERT(base_.IsZRegister()); 626 VIXL_ASSERT(base_.HasLaneSize()); 627 return ZRegister(base_); 628 } 629 GetScalarOffset()630 Register GetScalarOffset() const { 631 VIXL_ASSERT(regoffset_.IsRegister()); 632 return Register(regoffset_); 633 } 634 GetVectorOffset()635 ZRegister GetVectorOffset() const { 636 VIXL_ASSERT(regoffset_.IsZRegister()); 637 VIXL_ASSERT(regoffset_.HasLaneSize()); 638 return ZRegister(regoffset_); 639 } 640 GetImmediateOffset()641 int64_t GetImmediateOffset() const { 642 VIXL_ASSERT(regoffset_.IsNone()); 643 return offset_; 644 } 645 GetOffsetModifier()646 SVEOffsetModifier GetOffsetModifier() const { return mod_; } GetShiftAmount()647 unsigned GetShiftAmount() const { return shift_amount_; } 648 IsEquivalentToLSL(unsigned amount)649 bool IsEquivalentToLSL(unsigned amount) const { 650 if (shift_amount_ != amount) return false; 651 if (amount == 0) { 652 // No-shift is equivalent to "LSL #0". 653 return ((mod_ == SVE_LSL) || (mod_ == NO_SVE_OFFSET_MODIFIER)); 654 } 655 return mod_ == SVE_LSL; 656 } 657 IsMulVl()658 bool IsMulVl() const { return mod_ == SVE_MUL_VL; } 659 660 bool IsValid() const; 661 662 private: 663 // Allow standard `Shift` and `Extend` arguments to be used. GetSVEOffsetModifierFor(Shift shift)664 SVEOffsetModifier GetSVEOffsetModifierFor(Shift shift) { 665 if (shift == LSL) return SVE_LSL; 666 if (shift == NO_SHIFT) return NO_SVE_OFFSET_MODIFIER; 667 // SVE does not accept any other shift. 668 VIXL_UNIMPLEMENTED(); 669 return NO_SVE_OFFSET_MODIFIER; 670 } 671 672 SVEOffsetModifier GetSVEOffsetModifierFor(Extend extend = NO_EXTEND) { 673 if (extend == UXTW) return SVE_UXTW; 674 if (extend == SXTW) return SVE_SXTW; 675 if (extend == NO_EXTEND) return NO_SVE_OFFSET_MODIFIER; 676 // SVE does not accept any other extend mode. 677 VIXL_UNIMPLEMENTED(); 678 return NO_SVE_OFFSET_MODIFIER; 679 } 680 GetSVEOffsetModifierFor(SVEOffsetModifier mod)681 SVEOffsetModifier GetSVEOffsetModifierFor(SVEOffsetModifier mod) { 682 return mod; 683 } 684 685 CPURegister base_; 686 CPURegister regoffset_; 687 int64_t offset_; 688 SVEOffsetModifier mod_; 689 unsigned shift_amount_; 690 }; 691 692 // Represent a signed or unsigned integer operand. 693 // 694 // This is designed to make instructions which naturally accept a _signed_ 695 // immediate easier to implement and use, when we also want users to be able to 696 // specify raw-bits values (such as with hexadecimal constants). The advantage 697 // of this class over a simple uint64_t (with implicit C++ sign-extension) is 698 // that this class can strictly check the range of allowed values. With a simple 699 // uint64_t, it is impossible to distinguish -1 from UINT64_MAX. 700 // 701 // For example, these instructions are equivalent: 702 // 703 // __ Insr(z0.VnB(), -1); 704 // __ Insr(z0.VnB(), 0xff); 705 // 706 // ... as are these: 707 // 708 // __ Insr(z0.VnD(), -1); 709 // __ Insr(z0.VnD(), 0xffffffffffffffff); 710 // 711 // ... but this is invalid: 712 // 713 // __ Insr(z0.VnB(), 0xffffffffffffffff); // Too big for B-sized lanes. 714 class IntegerOperand { 715 public: 716 #define VIXL_INT_TYPES(V) \ 717 V(char) V(short) V(int) V(long) V(long long) // NOLINT(runtime/int) 718 #define VIXL_DECL_INT_OVERLOADS(T) \ 719 /* These are allowed to be implicit constructors because this is a */ \ 720 /* wrapper class that doesn't normally perform any type conversion. */ \ 721 IntegerOperand(signed T immediate) /* NOLINT(runtime/explicit) */ \ 722 : raw_bits_(immediate), /* Allow implicit sign-extension. */ \ 723 is_negative_(immediate < 0) {} \ 724 IntegerOperand(unsigned T immediate) /* NOLINT(runtime/explicit) */ \ 725 : raw_bits_(immediate), is_negative_(false) {} VIXL_INT_TYPES(VIXL_DECL_INT_OVERLOADS)726 VIXL_INT_TYPES(VIXL_DECL_INT_OVERLOADS) 727 #undef VIXL_DECL_INT_OVERLOADS 728 #undef VIXL_INT_TYPES 729 730 // TODO: `Operand` can currently only hold an int64_t, so some large, unsigned 731 // values will be misrepresented here. 732 explicit IntegerOperand(const Operand& operand) 733 : raw_bits_(operand.GetEquivalentImmediate()), 734 is_negative_(operand.GetEquivalentImmediate() < 0) {} 735 IsIntN(unsigned n)736 bool IsIntN(unsigned n) const { 737 return is_negative_ ? vixl::IsIntN(n, RawbitsToInt64(raw_bits_)) 738 : vixl::IsIntN(n, raw_bits_); 739 } IsUintN(unsigned n)740 bool IsUintN(unsigned n) const { 741 return !is_negative_ && vixl::IsUintN(n, raw_bits_); 742 } 743 IsUint8()744 bool IsUint8() const { return IsUintN(8); } IsUint16()745 bool IsUint16() const { return IsUintN(16); } IsUint32()746 bool IsUint32() const { return IsUintN(32); } IsUint64()747 bool IsUint64() const { return IsUintN(64); } 748 IsInt8()749 bool IsInt8() const { return IsIntN(8); } IsInt16()750 bool IsInt16() const { return IsIntN(16); } IsInt32()751 bool IsInt32() const { return IsIntN(32); } IsInt64()752 bool IsInt64() const { return IsIntN(64); } 753 FitsInBits(unsigned n)754 bool FitsInBits(unsigned n) const { 755 return is_negative_ ? IsIntN(n) : IsUintN(n); 756 } FitsInLane(const CPURegister & zd)757 bool FitsInLane(const CPURegister& zd) const { 758 return FitsInBits(zd.GetLaneSizeInBits()); 759 } FitsInSignedLane(const CPURegister & zd)760 bool FitsInSignedLane(const CPURegister& zd) const { 761 return IsIntN(zd.GetLaneSizeInBits()); 762 } FitsInUnsignedLane(const CPURegister & zd)763 bool FitsInUnsignedLane(const CPURegister& zd) const { 764 return IsUintN(zd.GetLaneSizeInBits()); 765 } 766 767 // Cast a value in the range [INT<n>_MIN, UINT<n>_MAX] to an unsigned integer 768 // in the range [0, UINT<n>_MAX] (using two's complement mapping). AsUintN(unsigned n)769 uint64_t AsUintN(unsigned n) const { 770 VIXL_ASSERT(FitsInBits(n)); 771 return raw_bits_ & GetUintMask(n); 772 } 773 AsUint8()774 uint8_t AsUint8() const { return static_cast<uint8_t>(AsUintN(8)); } AsUint16()775 uint16_t AsUint16() const { return static_cast<uint16_t>(AsUintN(16)); } AsUint32()776 uint32_t AsUint32() const { return static_cast<uint32_t>(AsUintN(32)); } AsUint64()777 uint64_t AsUint64() const { return AsUintN(64); } 778 779 // Cast a value in the range [INT<n>_MIN, UINT<n>_MAX] to a signed integer in 780 // the range [INT<n>_MIN, INT<n>_MAX] (using two's complement mapping). AsIntN(unsigned n)781 int64_t AsIntN(unsigned n) const { 782 VIXL_ASSERT(FitsInBits(n)); 783 return ExtractSignedBitfield64(n - 1, 0, raw_bits_); 784 } 785 AsInt8()786 int8_t AsInt8() const { return static_cast<int8_t>(AsIntN(8)); } AsInt16()787 int16_t AsInt16() const { return static_cast<int16_t>(AsIntN(16)); } AsInt32()788 int32_t AsInt32() const { return static_cast<int32_t>(AsIntN(32)); } AsInt64()789 int64_t AsInt64() const { return AsIntN(64); } 790 791 // Several instructions encode a signed int<N>_t, which is then (optionally) 792 // left-shifted and sign-extended to a Z register lane with a size which may 793 // be larger than N. This helper tries to find an int<N>_t such that the 794 // IntegerOperand's arithmetic value is reproduced in each lane. 795 // 796 // This is the mechanism that allows `Insr(z0.VnB(), 0xff)` to be treated as 797 // `Insr(z0.VnB(), -1)`. 798 template <unsigned N, unsigned kShift, typename T> TryEncodeAsShiftedIntNForLane(const CPURegister & zd,T * imm)799 bool TryEncodeAsShiftedIntNForLane(const CPURegister& zd, T* imm) const { 800 VIXL_STATIC_ASSERT(std::numeric_limits<T>::digits > N); 801 VIXL_ASSERT(FitsInLane(zd)); 802 if ((raw_bits_ & GetUintMask(kShift)) != 0) return false; 803 804 // Reverse the specified left-shift. 805 IntegerOperand unshifted(*this); 806 unshifted.ArithmeticShiftRight(kShift); 807 808 if (unshifted.IsIntN(N)) { 809 // This is trivial, since sign-extension produces the same arithmetic 810 // value irrespective of the destination size. 811 *imm = static_cast<T>(unshifted.AsIntN(N)); 812 return true; 813 } 814 815 // Otherwise, we might be able to use the sign-extension to produce the 816 // desired bit pattern. We can only do this for values in the range 817 // [INT<N>_MAX + 1, UINT<N>_MAX], where the highest set bit is the sign bit. 818 // 819 // The lane size has to be adjusted to compensate for `kShift`, since the 820 // high bits will be dropped when the encoded value is left-shifted. 821 if (unshifted.IsUintN(zd.GetLaneSizeInBits() - kShift)) { 822 int64_t encoded = unshifted.AsIntN(zd.GetLaneSizeInBits() - kShift); 823 if (vixl::IsIntN(N, encoded)) { 824 *imm = static_cast<T>(encoded); 825 return true; 826 } 827 } 828 return false; 829 } 830 831 // As above, but `kShift` is written to the `*shift` parameter on success, so 832 // that it is easy to chain calls like this: 833 // 834 // if (imm.TryEncodeAsShiftedIntNForLane<8, 0>(zd, &imm8, &shift) || 835 // imm.TryEncodeAsShiftedIntNForLane<8, 8>(zd, &imm8, &shift)) { 836 // insn(zd, imm8, shift) 837 // } 838 template <unsigned N, unsigned kShift, typename T, typename S> TryEncodeAsShiftedIntNForLane(const CPURegister & zd,T * imm,S * shift)839 bool TryEncodeAsShiftedIntNForLane(const CPURegister& zd, 840 T* imm, 841 S* shift) const { 842 if (TryEncodeAsShiftedIntNForLane<N, kShift>(zd, imm)) { 843 *shift = kShift; 844 return true; 845 } 846 return false; 847 } 848 849 // As above, but assume that `kShift` is 0. 850 template <unsigned N, typename T> TryEncodeAsIntNForLane(const CPURegister & zd,T * imm)851 bool TryEncodeAsIntNForLane(const CPURegister& zd, T* imm) const { 852 return TryEncodeAsShiftedIntNForLane<N, 0>(zd, imm); 853 } 854 855 // As above, but for unsigned fields. This is usuaully a simple operation, but 856 // is provided for symmetry. 857 template <unsigned N, unsigned kShift, typename T> TryEncodeAsShiftedUintNForLane(const CPURegister & zd,T * imm)858 bool TryEncodeAsShiftedUintNForLane(const CPURegister& zd, T* imm) const { 859 VIXL_STATIC_ASSERT(std::numeric_limits<T>::digits > N); 860 VIXL_ASSERT(FitsInLane(zd)); 861 862 // TODO: Should we convert -1 to 0xff here? 863 if (is_negative_) return false; 864 USE(zd); 865 866 if ((raw_bits_ & GetUintMask(kShift)) != 0) return false; 867 868 if (vixl::IsUintN(N, raw_bits_ >> kShift)) { 869 *imm = static_cast<T>(raw_bits_ >> kShift); 870 return true; 871 } 872 return false; 873 } 874 875 template <unsigned N, unsigned kShift, typename T, typename S> TryEncodeAsShiftedUintNForLane(const CPURegister & zd,T * imm,S * shift)876 bool TryEncodeAsShiftedUintNForLane(const CPURegister& zd, 877 T* imm, 878 S* shift) const { 879 if (TryEncodeAsShiftedUintNForLane<N, kShift>(zd, imm)) { 880 *shift = kShift; 881 return true; 882 } 883 return false; 884 } 885 IsZero()886 bool IsZero() const { return raw_bits_ == 0; } IsNegative()887 bool IsNegative() const { return is_negative_; } IsPositiveOrZero()888 bool IsPositiveOrZero() const { return !is_negative_; } 889 GetMagnitude()890 uint64_t GetMagnitude() const { 891 return is_negative_ ? -raw_bits_ : raw_bits_; 892 } 893 894 private: 895 // Shift the arithmetic value right, with sign extension if is_negative_. ArithmeticShiftRight(int shift)896 void ArithmeticShiftRight(int shift) { 897 VIXL_ASSERT((shift >= 0) && (shift < 64)); 898 if (shift == 0) return; 899 if (is_negative_) { 900 raw_bits_ = ExtractSignedBitfield64(63, shift, raw_bits_); 901 } else { 902 raw_bits_ >>= shift; 903 } 904 } 905 906 uint64_t raw_bits_; 907 bool is_negative_; 908 }; 909 910 // This an abstraction that can represent a register or memory location. The 911 // `MacroAssembler` provides helpers to move data between generic operands. 912 class GenericOperand { 913 public: GenericOperand()914 GenericOperand() { VIXL_ASSERT(!IsValid()); } 915 GenericOperand(const CPURegister& reg); // NOLINT(runtime/explicit) 916 GenericOperand(const MemOperand& mem_op, 917 size_t mem_op_size = 0); // NOLINT(runtime/explicit) 918 IsValid()919 bool IsValid() const { return cpu_register_.IsValid() != mem_op_.IsValid(); } 920 921 bool Equals(const GenericOperand& other) const; 922 IsCPURegister()923 bool IsCPURegister() const { 924 VIXL_ASSERT(IsValid()); 925 return cpu_register_.IsValid(); 926 } 927 IsRegister()928 bool IsRegister() const { 929 return IsCPURegister() && cpu_register_.IsRegister(); 930 } 931 IsVRegister()932 bool IsVRegister() const { 933 return IsCPURegister() && cpu_register_.IsVRegister(); 934 } 935 IsSameCPURegisterType(const GenericOperand & other)936 bool IsSameCPURegisterType(const GenericOperand& other) { 937 return IsCPURegister() && other.IsCPURegister() && 938 GetCPURegister().IsSameType(other.GetCPURegister()); 939 } 940 IsMemOperand()941 bool IsMemOperand() const { 942 VIXL_ASSERT(IsValid()); 943 return mem_op_.IsValid(); 944 } 945 GetCPURegister()946 CPURegister GetCPURegister() const { 947 VIXL_ASSERT(IsCPURegister()); 948 return cpu_register_; 949 } 950 GetMemOperand()951 MemOperand GetMemOperand() const { 952 VIXL_ASSERT(IsMemOperand()); 953 return mem_op_; 954 } 955 GetMemOperandSizeInBytes()956 size_t GetMemOperandSizeInBytes() const { 957 VIXL_ASSERT(IsMemOperand()); 958 return mem_op_size_; 959 } 960 GetSizeInBytes()961 size_t GetSizeInBytes() const { 962 return IsCPURegister() ? cpu_register_.GetSizeInBytes() 963 : GetMemOperandSizeInBytes(); 964 } 965 GetSizeInBits()966 size_t GetSizeInBits() const { return GetSizeInBytes() * kBitsPerByte; } 967 968 private: 969 CPURegister cpu_register_; 970 MemOperand mem_op_; 971 // The size of the memory region pointed to, in bytes. 972 // We only support sizes up to X/D register sizes. 973 size_t mem_op_size_; 974 }; 975 } 976 } // namespace vixl::aarch64 977 978 #endif // VIXL_AARCH64_OPERANDS_AARCH64_H_ 979