1 // Copyright 2016, VIXL authors
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are met:
6 //
7 // * Redistributions of source code must retain the above copyright notice,
8 // this list of conditions and the following disclaimer.
9 // * Redistributions in binary form must reproduce the above copyright notice,
10 // this list of conditions and the following disclaimer in the documentation
11 // and/or other materials provided with the distribution.
12 // * Neither the name of ARM Limited nor the names of its contributors may be
13 // used to endorse or promote products derived from this software without
14 // specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
27 #include "operands-aarch64.h"
28
29 namespace vixl {
30 namespace aarch64 {
31
32 // CPURegList utilities.
PopLowestIndex()33 CPURegister CPURegList::PopLowestIndex() {
34 if (IsEmpty()) {
35 return NoCPUReg;
36 }
37 int index = CountTrailingZeros(list_);
38 VIXL_ASSERT((1 << index) & list_);
39 Remove(index);
40 return CPURegister(index, size_, type_);
41 }
42
43
PopHighestIndex()44 CPURegister CPURegList::PopHighestIndex() {
45 VIXL_ASSERT(IsValid());
46 if (IsEmpty()) {
47 return NoCPUReg;
48 }
49 int index = CountLeadingZeros(list_);
50 index = kRegListSizeInBits - 1 - index;
51 VIXL_ASSERT((1 << index) & list_);
52 Remove(index);
53 return CPURegister(index, size_, type_);
54 }
55
56
IsValid() const57 bool CPURegList::IsValid() const {
58 if ((type_ == CPURegister::kRegister) || (type_ == CPURegister::kVRegister)) {
59 bool is_valid = true;
60 // Try to create a CPURegister for each element in the list.
61 for (int i = 0; i < kRegListSizeInBits; i++) {
62 if (((list_ >> i) & 1) != 0) {
63 is_valid &= CPURegister(i, size_, type_).IsValid();
64 }
65 }
66 return is_valid;
67 } else if (type_ == CPURegister::kNoRegister) {
68 // We can't use IsEmpty here because that asserts IsValid().
69 return list_ == 0;
70 } else {
71 return false;
72 }
73 }
74
75
RemoveCalleeSaved()76 void CPURegList::RemoveCalleeSaved() {
77 if (GetType() == CPURegister::kRegister) {
78 Remove(GetCalleeSaved(GetRegisterSizeInBits()));
79 } else if (GetType() == CPURegister::kVRegister) {
80 Remove(GetCalleeSavedV(GetRegisterSizeInBits()));
81 } else {
82 VIXL_ASSERT(GetType() == CPURegister::kNoRegister);
83 VIXL_ASSERT(IsEmpty());
84 // The list must already be empty, so do nothing.
85 }
86 }
87
88
Union(const CPURegList & list_1,const CPURegList & list_2,const CPURegList & list_3)89 CPURegList CPURegList::Union(const CPURegList& list_1,
90 const CPURegList& list_2,
91 const CPURegList& list_3) {
92 return Union(list_1, Union(list_2, list_3));
93 }
94
95
Union(const CPURegList & list_1,const CPURegList & list_2,const CPURegList & list_3,const CPURegList & list_4)96 CPURegList CPURegList::Union(const CPURegList& list_1,
97 const CPURegList& list_2,
98 const CPURegList& list_3,
99 const CPURegList& list_4) {
100 return Union(Union(list_1, list_2), Union(list_3, list_4));
101 }
102
103
Intersection(const CPURegList & list_1,const CPURegList & list_2,const CPURegList & list_3)104 CPURegList CPURegList::Intersection(const CPURegList& list_1,
105 const CPURegList& list_2,
106 const CPURegList& list_3) {
107 return Intersection(list_1, Intersection(list_2, list_3));
108 }
109
110
Intersection(const CPURegList & list_1,const CPURegList & list_2,const CPURegList & list_3,const CPURegList & list_4)111 CPURegList CPURegList::Intersection(const CPURegList& list_1,
112 const CPURegList& list_2,
113 const CPURegList& list_3,
114 const CPURegList& list_4) {
115 return Intersection(Intersection(list_1, list_2),
116 Intersection(list_3, list_4));
117 }
118
119
GetCalleeSaved(unsigned size)120 CPURegList CPURegList::GetCalleeSaved(unsigned size) {
121 return CPURegList(CPURegister::kRegister, size, 19, 29);
122 }
123
124
GetCalleeSavedV(unsigned size)125 CPURegList CPURegList::GetCalleeSavedV(unsigned size) {
126 return CPURegList(CPURegister::kVRegister, size, 8, 15);
127 }
128
129
GetCallerSaved(unsigned size)130 CPURegList CPURegList::GetCallerSaved(unsigned size) {
131 // Registers x0-x18 and lr (x30) are caller-saved.
132 CPURegList list = CPURegList(CPURegister::kRegister, size, 0, 18);
133 // Do not use lr directly to avoid initialisation order fiasco bugs for users.
134 list.Combine(Register(30, kXRegSize));
135 return list;
136 }
137
138
GetCallerSavedV(unsigned size)139 CPURegList CPURegList::GetCallerSavedV(unsigned size) {
140 // Registers d0-d7 and d16-d31 are caller-saved.
141 CPURegList list = CPURegList(CPURegister::kVRegister, size, 0, 7);
142 list.Combine(CPURegList(CPURegister::kVRegister, size, 16, 31));
143 return list;
144 }
145
146
147 const CPURegList kCalleeSaved = CPURegList::GetCalleeSaved();
148 const CPURegList kCalleeSavedV = CPURegList::GetCalleeSavedV();
149 const CPURegList kCallerSaved = CPURegList::GetCallerSaved();
150 const CPURegList kCallerSavedV = CPURegList::GetCallerSavedV();
151
152
153 // Registers.
154 #define WREG(n) w##n,
155 const Register Register::wregisters[] = {AARCH64_REGISTER_CODE_LIST(WREG)};
156 #undef WREG
157
158 #define XREG(n) x##n,
159 const Register Register::xregisters[] = {AARCH64_REGISTER_CODE_LIST(XREG)};
160 #undef XREG
161
162 #define BREG(n) b##n,
163 const VRegister VRegister::bregisters[] = {AARCH64_REGISTER_CODE_LIST(BREG)};
164 #undef BREG
165
166 #define HREG(n) h##n,
167 const VRegister VRegister::hregisters[] = {AARCH64_REGISTER_CODE_LIST(HREG)};
168 #undef HREG
169
170 #define SREG(n) s##n,
171 const VRegister VRegister::sregisters[] = {AARCH64_REGISTER_CODE_LIST(SREG)};
172 #undef SREG
173
174 #define DREG(n) d##n,
175 const VRegister VRegister::dregisters[] = {AARCH64_REGISTER_CODE_LIST(DREG)};
176 #undef DREG
177
178 #define QREG(n) q##n,
179 const VRegister VRegister::qregisters[] = {AARCH64_REGISTER_CODE_LIST(QREG)};
180 #undef QREG
181
182 #define VREG(n) v##n,
183 const VRegister VRegister::vregisters[] = {AARCH64_REGISTER_CODE_LIST(VREG)};
184 #undef VREG
185
186
GetWRegFromCode(unsigned code)187 const Register& Register::GetWRegFromCode(unsigned code) {
188 if (code == kSPRegInternalCode) {
189 return wsp;
190 } else {
191 VIXL_ASSERT(code < kNumberOfRegisters);
192 return wregisters[code];
193 }
194 }
195
196
GetXRegFromCode(unsigned code)197 const Register& Register::GetXRegFromCode(unsigned code) {
198 if (code == kSPRegInternalCode) {
199 return sp;
200 } else {
201 VIXL_ASSERT(code < kNumberOfRegisters);
202 return xregisters[code];
203 }
204 }
205
206
GetBRegFromCode(unsigned code)207 const VRegister& VRegister::GetBRegFromCode(unsigned code) {
208 VIXL_ASSERT(code < kNumberOfVRegisters);
209 return bregisters[code];
210 }
211
212
GetHRegFromCode(unsigned code)213 const VRegister& VRegister::GetHRegFromCode(unsigned code) {
214 VIXL_ASSERT(code < kNumberOfVRegisters);
215 return hregisters[code];
216 }
217
218
GetSRegFromCode(unsigned code)219 const VRegister& VRegister::GetSRegFromCode(unsigned code) {
220 VIXL_ASSERT(code < kNumberOfVRegisters);
221 return sregisters[code];
222 }
223
224
GetDRegFromCode(unsigned code)225 const VRegister& VRegister::GetDRegFromCode(unsigned code) {
226 VIXL_ASSERT(code < kNumberOfVRegisters);
227 return dregisters[code];
228 }
229
230
GetQRegFromCode(unsigned code)231 const VRegister& VRegister::GetQRegFromCode(unsigned code) {
232 VIXL_ASSERT(code < kNumberOfVRegisters);
233 return qregisters[code];
234 }
235
236
GetVRegFromCode(unsigned code)237 const VRegister& VRegister::GetVRegFromCode(unsigned code) {
238 VIXL_ASSERT(code < kNumberOfVRegisters);
239 return vregisters[code];
240 }
241
242
W() const243 const Register& CPURegister::W() const {
244 VIXL_ASSERT(IsValidRegister());
245 return Register::GetWRegFromCode(code_);
246 }
247
248
X() const249 const Register& CPURegister::X() const {
250 VIXL_ASSERT(IsValidRegister());
251 return Register::GetXRegFromCode(code_);
252 }
253
254
B() const255 const VRegister& CPURegister::B() const {
256 VIXL_ASSERT(IsValidVRegister());
257 return VRegister::GetBRegFromCode(code_);
258 }
259
260
H() const261 const VRegister& CPURegister::H() const {
262 VIXL_ASSERT(IsValidVRegister());
263 return VRegister::GetHRegFromCode(code_);
264 }
265
266
S() const267 const VRegister& CPURegister::S() const {
268 VIXL_ASSERT(IsValidVRegister());
269 return VRegister::GetSRegFromCode(code_);
270 }
271
272
D() const273 const VRegister& CPURegister::D() const {
274 VIXL_ASSERT(IsValidVRegister());
275 return VRegister::GetDRegFromCode(code_);
276 }
277
278
Q() const279 const VRegister& CPURegister::Q() const {
280 VIXL_ASSERT(IsValidVRegister());
281 return VRegister::GetQRegFromCode(code_);
282 }
283
284
V() const285 const VRegister& CPURegister::V() const {
286 VIXL_ASSERT(IsValidVRegister());
287 return VRegister::GetVRegFromCode(code_);
288 }
289
290
291 // Operand.
Operand(int64_t immediate)292 Operand::Operand(int64_t immediate)
293 : immediate_(immediate),
294 reg_(NoReg),
295 shift_(NO_SHIFT),
296 extend_(NO_EXTEND),
297 shift_amount_(0) {}
298
299
Operand(Register reg,Shift shift,unsigned shift_amount)300 Operand::Operand(Register reg, Shift shift, unsigned shift_amount)
301 : reg_(reg),
302 shift_(shift),
303 extend_(NO_EXTEND),
304 shift_amount_(shift_amount) {
305 VIXL_ASSERT(shift != MSL);
306 VIXL_ASSERT(reg.Is64Bits() || (shift_amount < kWRegSize));
307 VIXL_ASSERT(reg.Is32Bits() || (shift_amount < kXRegSize));
308 VIXL_ASSERT(!reg.IsSP());
309 }
310
311
Operand(Register reg,Extend extend,unsigned shift_amount)312 Operand::Operand(Register reg, Extend extend, unsigned shift_amount)
313 : reg_(reg),
314 shift_(NO_SHIFT),
315 extend_(extend),
316 shift_amount_(shift_amount) {
317 VIXL_ASSERT(reg.IsValid());
318 VIXL_ASSERT(shift_amount <= 4);
319 VIXL_ASSERT(!reg.IsSP());
320
321 // Extend modes SXTX and UXTX require a 64-bit register.
322 VIXL_ASSERT(reg.Is64Bits() || ((extend != SXTX) && (extend != UXTX)));
323 }
324
325
IsImmediate() const326 bool Operand::IsImmediate() const { return reg_.Is(NoReg); }
327
328
IsPlainRegister() const329 bool Operand::IsPlainRegister() const {
330 return reg_.IsValid() &&
331 (((shift_ == NO_SHIFT) && (extend_ == NO_EXTEND)) ||
332 // No-op shifts.
333 ((shift_ != NO_SHIFT) && (shift_amount_ == 0)) ||
334 // No-op extend operations.
335 // We can't include [US]XTW here without knowing more about the
336 // context; they are only no-ops for 32-bit operations.
337 //
338 // For example, this operand could be replaced with w1:
339 // __ Add(w0, w0, Operand(w1, UXTW));
340 // However, no plain register can replace it in this context:
341 // __ Add(x0, x0, Operand(w1, UXTW));
342 (((extend_ == UXTX) || (extend_ == SXTX)) && (shift_amount_ == 0)));
343 }
344
345
IsShiftedRegister() const346 bool Operand::IsShiftedRegister() const {
347 return reg_.IsValid() && (shift_ != NO_SHIFT);
348 }
349
350
IsExtendedRegister() const351 bool Operand::IsExtendedRegister() const {
352 return reg_.IsValid() && (extend_ != NO_EXTEND);
353 }
354
355
IsZero() const356 bool Operand::IsZero() const {
357 if (IsImmediate()) {
358 return GetImmediate() == 0;
359 } else {
360 return GetRegister().IsZero();
361 }
362 }
363
364
ToExtendedRegister() const365 Operand Operand::ToExtendedRegister() const {
366 VIXL_ASSERT(IsShiftedRegister());
367 VIXL_ASSERT((shift_ == LSL) && (shift_amount_ <= 4));
368 return Operand(reg_, reg_.Is64Bits() ? UXTX : UXTW, shift_amount_);
369 }
370
371
372 // MemOperand
MemOperand()373 MemOperand::MemOperand()
374 : base_(NoReg),
375 regoffset_(NoReg),
376 offset_(0),
377 addrmode_(Offset),
378 shift_(NO_SHIFT),
379 extend_(NO_EXTEND) {}
380
381
MemOperand(Register base,int64_t offset,AddrMode addrmode)382 MemOperand::MemOperand(Register base, int64_t offset, AddrMode addrmode)
383 : base_(base),
384 regoffset_(NoReg),
385 offset_(offset),
386 addrmode_(addrmode),
387 shift_(NO_SHIFT),
388 extend_(NO_EXTEND),
389 shift_amount_(0) {
390 VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
391 }
392
393
MemOperand(Register base,Register regoffset,Extend extend,unsigned shift_amount)394 MemOperand::MemOperand(Register base,
395 Register regoffset,
396 Extend extend,
397 unsigned shift_amount)
398 : base_(base),
399 regoffset_(regoffset),
400 offset_(0),
401 addrmode_(Offset),
402 shift_(NO_SHIFT),
403 extend_(extend),
404 shift_amount_(shift_amount) {
405 VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
406 VIXL_ASSERT(!regoffset.IsSP());
407 VIXL_ASSERT((extend == UXTW) || (extend == SXTW) || (extend == SXTX));
408
409 // SXTX extend mode requires a 64-bit offset register.
410 VIXL_ASSERT(regoffset.Is64Bits() || (extend != SXTX));
411 }
412
413
MemOperand(Register base,Register regoffset,Shift shift,unsigned shift_amount)414 MemOperand::MemOperand(Register base,
415 Register regoffset,
416 Shift shift,
417 unsigned shift_amount)
418 : base_(base),
419 regoffset_(regoffset),
420 offset_(0),
421 addrmode_(Offset),
422 shift_(shift),
423 extend_(NO_EXTEND),
424 shift_amount_(shift_amount) {
425 VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
426 VIXL_ASSERT(regoffset.Is64Bits() && !regoffset.IsSP());
427 VIXL_ASSERT(shift == LSL);
428 }
429
430
MemOperand(Register base,const Operand & offset,AddrMode addrmode)431 MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode)
432 : base_(base),
433 regoffset_(NoReg),
434 addrmode_(addrmode),
435 shift_(NO_SHIFT),
436 extend_(NO_EXTEND),
437 shift_amount_(0) {
438 VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
439
440 if (offset.IsImmediate()) {
441 offset_ = offset.GetImmediate();
442 } else if (offset.IsShiftedRegister()) {
443 VIXL_ASSERT((addrmode == Offset) || (addrmode == PostIndex));
444
445 regoffset_ = offset.GetRegister();
446 shift_ = offset.GetShift();
447 shift_amount_ = offset.GetShiftAmount();
448
449 extend_ = NO_EXTEND;
450 offset_ = 0;
451
452 // These assertions match those in the shifted-register constructor.
453 VIXL_ASSERT(regoffset_.Is64Bits() && !regoffset_.IsSP());
454 VIXL_ASSERT(shift_ == LSL);
455 } else {
456 VIXL_ASSERT(offset.IsExtendedRegister());
457 VIXL_ASSERT(addrmode == Offset);
458
459 regoffset_ = offset.GetRegister();
460 extend_ = offset.GetExtend();
461 shift_amount_ = offset.GetShiftAmount();
462
463 shift_ = NO_SHIFT;
464 offset_ = 0;
465
466 // These assertions match those in the extended-register constructor.
467 VIXL_ASSERT(!regoffset_.IsSP());
468 VIXL_ASSERT((extend_ == UXTW) || (extend_ == SXTW) || (extend_ == SXTX));
469 VIXL_ASSERT((regoffset_.Is64Bits() || (extend_ != SXTX)));
470 }
471 }
472
473
IsImmediateOffset() const474 bool MemOperand::IsImmediateOffset() const {
475 return (addrmode_ == Offset) && regoffset_.Is(NoReg);
476 }
477
478
IsRegisterOffset() const479 bool MemOperand::IsRegisterOffset() const {
480 return (addrmode_ == Offset) && !regoffset_.Is(NoReg);
481 }
482
483
IsPreIndex() const484 bool MemOperand::IsPreIndex() const { return addrmode_ == PreIndex; }
485
486
IsPostIndex() const487 bool MemOperand::IsPostIndex() const { return addrmode_ == PostIndex; }
488
489
AddOffset(int64_t offset)490 void MemOperand::AddOffset(int64_t offset) {
491 VIXL_ASSERT(IsImmediateOffset());
492 offset_ += offset;
493 }
494
495
GenericOperand(const CPURegister & reg)496 GenericOperand::GenericOperand(const CPURegister& reg)
497 : cpu_register_(reg), mem_op_size_(0) {
498 if (reg.IsQ()) {
499 VIXL_ASSERT(reg.GetSizeInBits() > static_cast<int>(kXRegSize));
500 // Support for Q registers is not implemented yet.
501 VIXL_UNIMPLEMENTED();
502 }
503 }
504
505
GenericOperand(const MemOperand & mem_op,size_t mem_op_size)506 GenericOperand::GenericOperand(const MemOperand& mem_op, size_t mem_op_size)
507 : cpu_register_(NoReg), mem_op_(mem_op), mem_op_size_(mem_op_size) {
508 if (mem_op_size_ > kXRegSizeInBytes) {
509 // We only support generic operands up to the size of X registers.
510 VIXL_UNIMPLEMENTED();
511 }
512 }
513
Equals(const GenericOperand & other) const514 bool GenericOperand::Equals(const GenericOperand& other) const {
515 if (!IsValid() || !other.IsValid()) {
516 // Two invalid generic operands are considered equal.
517 return !IsValid() && !other.IsValid();
518 }
519 if (IsCPURegister() && other.IsCPURegister()) {
520 return GetCPURegister().Is(other.GetCPURegister());
521 } else if (IsMemOperand() && other.IsMemOperand()) {
522 return GetMemOperand().Equals(other.GetMemOperand()) &&
523 (GetMemOperandSizeInBytes() == other.GetMemOperandSizeInBytes());
524 }
525 return false;
526 }
527 }
528 } // namespace vixl::aarch64
529