1 // Copyright 2016, VIXL authors
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are met:
6 //
7 // * Redistributions of source code must retain the above copyright notice,
8 // this list of conditions and the following disclaimer.
9 // * Redistributions in binary form must reproduce the above copyright notice,
10 // this list of conditions and the following disclaimer in the documentation
11 // and/or other materials provided with the distribution.
12 // * Neither the name of ARM Limited nor the names of its contributors may be
13 // used to endorse or promote products derived from this software without
14 // specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
27 #include "operands-aarch64.h"
28
29 namespace vixl {
30 namespace aarch64 {
31
32 // CPURegList utilities.
PopLowestIndex(RegList mask)33 CPURegister CPURegList::PopLowestIndex(RegList mask) {
34 RegList list = list_ & mask;
35 if (list == 0) return NoCPUReg;
36 int index = CountTrailingZeros(list);
37 VIXL_ASSERT(((1 << index) & list) != 0);
38 Remove(index);
39 return CPURegister(index, size_, type_);
40 }
41
42
PopHighestIndex(RegList mask)43 CPURegister CPURegList::PopHighestIndex(RegList mask) {
44 RegList list = list_ & mask;
45 if (list == 0) return NoCPUReg;
46 int index = CountLeadingZeros(list);
47 index = kRegListSizeInBits - 1 - index;
48 VIXL_ASSERT(((1 << index) & list) != 0);
49 Remove(index);
50 return CPURegister(index, size_, type_);
51 }
52
53
IsValid() const54 bool CPURegList::IsValid() const {
55 if (type_ == CPURegister::kNoRegister) {
56 // We can't use IsEmpty here because that asserts IsValid().
57 return list_ == 0;
58 } else {
59 bool is_valid = true;
60 // Try to create a CPURegister for each element in the list.
61 for (int i = 0; i < kRegListSizeInBits; i++) {
62 if (((list_ >> i) & 1) != 0) {
63 is_valid &= CPURegister(i, size_, type_).IsValid();
64 }
65 }
66 return is_valid;
67 }
68 }
69
70
RemoveCalleeSaved()71 void CPURegList::RemoveCalleeSaved() {
72 if (GetType() == CPURegister::kRegister) {
73 Remove(GetCalleeSaved(GetRegisterSizeInBits()));
74 } else if (GetType() == CPURegister::kVRegister) {
75 Remove(GetCalleeSavedV(GetRegisterSizeInBits()));
76 } else {
77 VIXL_ASSERT(GetType() == CPURegister::kNoRegister);
78 VIXL_ASSERT(IsEmpty());
79 // The list must already be empty, so do nothing.
80 }
81 }
82
83
Union(const CPURegList & list_1,const CPURegList & list_2,const CPURegList & list_3)84 CPURegList CPURegList::Union(const CPURegList& list_1,
85 const CPURegList& list_2,
86 const CPURegList& list_3) {
87 return Union(list_1, Union(list_2, list_3));
88 }
89
90
Union(const CPURegList & list_1,const CPURegList & list_2,const CPURegList & list_3,const CPURegList & list_4)91 CPURegList CPURegList::Union(const CPURegList& list_1,
92 const CPURegList& list_2,
93 const CPURegList& list_3,
94 const CPURegList& list_4) {
95 return Union(Union(list_1, list_2), Union(list_3, list_4));
96 }
97
98
Intersection(const CPURegList & list_1,const CPURegList & list_2,const CPURegList & list_3)99 CPURegList CPURegList::Intersection(const CPURegList& list_1,
100 const CPURegList& list_2,
101 const CPURegList& list_3) {
102 return Intersection(list_1, Intersection(list_2, list_3));
103 }
104
105
Intersection(const CPURegList & list_1,const CPURegList & list_2,const CPURegList & list_3,const CPURegList & list_4)106 CPURegList CPURegList::Intersection(const CPURegList& list_1,
107 const CPURegList& list_2,
108 const CPURegList& list_3,
109 const CPURegList& list_4) {
110 return Intersection(Intersection(list_1, list_2),
111 Intersection(list_3, list_4));
112 }
113
114
GetCalleeSaved(unsigned size)115 CPURegList CPURegList::GetCalleeSaved(unsigned size) {
116 return CPURegList(CPURegister::kRegister, size, 19, 29);
117 }
118
119
GetCalleeSavedV(unsigned size)120 CPURegList CPURegList::GetCalleeSavedV(unsigned size) {
121 return CPURegList(CPURegister::kVRegister, size, 8, 15);
122 }
123
124
GetCallerSaved(unsigned size)125 CPURegList CPURegList::GetCallerSaved(unsigned size) {
126 // Registers x0-x18 and lr (x30) are caller-saved.
127 CPURegList list = CPURegList(CPURegister::kRegister, size, 0, 18);
128 // Do not use lr directly to avoid initialisation order fiasco bugs for users.
129 list.Combine(Register(30, kXRegSize));
130 return list;
131 }
132
133
GetCallerSavedV(unsigned size)134 CPURegList CPURegList::GetCallerSavedV(unsigned size) {
135 // Registers d0-d7 and d16-d31 are caller-saved.
136 CPURegList list = CPURegList(CPURegister::kVRegister, size, 0, 7);
137 list.Combine(CPURegList(CPURegister::kVRegister, size, 16, 31));
138 return list;
139 }
140
141
142 const CPURegList kCalleeSaved = CPURegList::GetCalleeSaved();
143 const CPURegList kCalleeSavedV = CPURegList::GetCalleeSavedV();
144 const CPURegList kCallerSaved = CPURegList::GetCallerSaved();
145 const CPURegList kCallerSavedV = CPURegList::GetCallerSavedV();
146
147 // Operand.
Operand(int64_t immediate)148 Operand::Operand(int64_t immediate)
149 : immediate_(immediate),
150 reg_(NoReg),
151 shift_(NO_SHIFT),
152 extend_(NO_EXTEND),
153 shift_amount_(0) {}
154
Operand(IntegerOperand immediate)155 Operand::Operand(IntegerOperand immediate)
156 : immediate_(immediate.AsIntN(64)),
157 reg_(NoReg),
158 shift_(NO_SHIFT),
159 extend_(NO_EXTEND),
160 shift_amount_(0) {}
161
Operand(Register reg,Shift shift,unsigned shift_amount)162 Operand::Operand(Register reg, Shift shift, unsigned shift_amount)
163 : reg_(reg),
164 shift_(shift),
165 extend_(NO_EXTEND),
166 shift_amount_(shift_amount) {
167 VIXL_ASSERT(shift != MSL);
168 VIXL_ASSERT(reg.Is64Bits() || (shift_amount < kWRegSize));
169 VIXL_ASSERT(reg.Is32Bits() || (shift_amount < kXRegSize));
170 VIXL_ASSERT(!reg.IsSP());
171 }
172
173
Operand(Register reg,Extend extend,unsigned shift_amount)174 Operand::Operand(Register reg, Extend extend, unsigned shift_amount)
175 : reg_(reg),
176 shift_(NO_SHIFT),
177 extend_(extend),
178 shift_amount_(shift_amount) {
179 VIXL_ASSERT(reg.IsValid());
180 VIXL_ASSERT(shift_amount <= 4);
181 VIXL_ASSERT(!reg.IsSP());
182
183 // Extend modes SXTX and UXTX require a 64-bit register.
184 VIXL_ASSERT(reg.Is64Bits() || ((extend != SXTX) && (extend != UXTX)));
185 }
186
187
IsImmediate() const188 bool Operand::IsImmediate() const { return reg_.Is(NoReg); }
189
190
IsPlainRegister() const191 bool Operand::IsPlainRegister() const {
192 return reg_.IsValid() &&
193 (((shift_ == NO_SHIFT) && (extend_ == NO_EXTEND)) ||
194 // No-op shifts.
195 ((shift_ != NO_SHIFT) && (shift_amount_ == 0)) ||
196 // No-op extend operations.
197 // We can't include [US]XTW here without knowing more about the
198 // context; they are only no-ops for 32-bit operations.
199 //
200 // For example, this operand could be replaced with w1:
201 // __ Add(w0, w0, Operand(w1, UXTW));
202 // However, no plain register can replace it in this context:
203 // __ Add(x0, x0, Operand(w1, UXTW));
204 (((extend_ == UXTX) || (extend_ == SXTX)) && (shift_amount_ == 0)));
205 }
206
207
IsShiftedRegister() const208 bool Operand::IsShiftedRegister() const {
209 return reg_.IsValid() && (shift_ != NO_SHIFT);
210 }
211
212
IsExtendedRegister() const213 bool Operand::IsExtendedRegister() const {
214 return reg_.IsValid() && (extend_ != NO_EXTEND);
215 }
216
217
IsZero() const218 bool Operand::IsZero() const {
219 if (IsImmediate()) {
220 return GetImmediate() == 0;
221 } else {
222 return GetRegister().IsZero();
223 }
224 }
225
226
ToExtendedRegister() const227 Operand Operand::ToExtendedRegister() const {
228 VIXL_ASSERT(IsShiftedRegister());
229 VIXL_ASSERT((shift_ == LSL) && (shift_amount_ <= 4));
230 return Operand(reg_, reg_.Is64Bits() ? UXTX : UXTW, shift_amount_);
231 }
232
233
234 // MemOperand
MemOperand()235 MemOperand::MemOperand()
236 : base_(NoReg),
237 regoffset_(NoReg),
238 offset_(0),
239 addrmode_(Offset),
240 shift_(NO_SHIFT),
241 extend_(NO_EXTEND) {}
242
243
MemOperand(Register base,int64_t offset,AddrMode addrmode)244 MemOperand::MemOperand(Register base, int64_t offset, AddrMode addrmode)
245 : base_(base),
246 regoffset_(NoReg),
247 offset_(offset),
248 addrmode_(addrmode),
249 shift_(NO_SHIFT),
250 extend_(NO_EXTEND),
251 shift_amount_(0) {
252 VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
253 }
254
255
MemOperand(Register base,Register regoffset,Extend extend,unsigned shift_amount)256 MemOperand::MemOperand(Register base,
257 Register regoffset,
258 Extend extend,
259 unsigned shift_amount)
260 : base_(base),
261 regoffset_(regoffset),
262 offset_(0),
263 addrmode_(Offset),
264 shift_(NO_SHIFT),
265 extend_(extend),
266 shift_amount_(shift_amount) {
267 VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
268 VIXL_ASSERT(!regoffset.IsSP());
269 VIXL_ASSERT((extend == UXTW) || (extend == SXTW) || (extend == SXTX));
270
271 // SXTX extend mode requires a 64-bit offset register.
272 VIXL_ASSERT(regoffset.Is64Bits() || (extend != SXTX));
273 }
274
275
MemOperand(Register base,Register regoffset,Shift shift,unsigned shift_amount)276 MemOperand::MemOperand(Register base,
277 Register regoffset,
278 Shift shift,
279 unsigned shift_amount)
280 : base_(base),
281 regoffset_(regoffset),
282 offset_(0),
283 addrmode_(Offset),
284 shift_(shift),
285 extend_(NO_EXTEND),
286 shift_amount_(shift_amount) {
287 VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
288 VIXL_ASSERT(regoffset.Is64Bits() && !regoffset.IsSP());
289 VIXL_ASSERT(shift == LSL);
290 }
291
292
MemOperand(Register base,const Operand & offset,AddrMode addrmode)293 MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode)
294 : base_(base),
295 regoffset_(NoReg),
296 addrmode_(addrmode),
297 shift_(NO_SHIFT),
298 extend_(NO_EXTEND),
299 shift_amount_(0) {
300 VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
301
302 if (offset.IsImmediate()) {
303 offset_ = offset.GetImmediate();
304 } else if (offset.IsShiftedRegister()) {
305 VIXL_ASSERT((addrmode == Offset) || (addrmode == PostIndex));
306
307 regoffset_ = offset.GetRegister();
308 shift_ = offset.GetShift();
309 shift_amount_ = offset.GetShiftAmount();
310
311 extend_ = NO_EXTEND;
312 offset_ = 0;
313
314 // These assertions match those in the shifted-register constructor.
315 VIXL_ASSERT(regoffset_.Is64Bits() && !regoffset_.IsSP());
316 VIXL_ASSERT(shift_ == LSL);
317 } else {
318 VIXL_ASSERT(offset.IsExtendedRegister());
319 VIXL_ASSERT(addrmode == Offset);
320
321 regoffset_ = offset.GetRegister();
322 extend_ = offset.GetExtend();
323 shift_amount_ = offset.GetShiftAmount();
324
325 shift_ = NO_SHIFT;
326 offset_ = 0;
327
328 // These assertions match those in the extended-register constructor.
329 VIXL_ASSERT(!regoffset_.IsSP());
330 VIXL_ASSERT((extend_ == UXTW) || (extend_ == SXTW) || (extend_ == SXTX));
331 VIXL_ASSERT((regoffset_.Is64Bits() || (extend_ != SXTX)));
332 }
333 }
334
335
IsPlainRegister() const336 bool MemOperand::IsPlainRegister() const {
337 return IsImmediateOffset() && (GetOffset() == 0);
338 }
339
340
IsEquivalentToPlainRegister() const341 bool MemOperand::IsEquivalentToPlainRegister() const {
342 if (regoffset_.Is(NoReg)) {
343 // Immediate offset, pre-index or post-index.
344 return GetOffset() == 0;
345 } else if (GetRegisterOffset().IsZero()) {
346 // Zero register offset, pre-index or post-index.
347 // We can ignore shift and extend options because they all result in zero.
348 return true;
349 }
350 return false;
351 }
352
353
IsImmediateOffset() const354 bool MemOperand::IsImmediateOffset() const {
355 return (addrmode_ == Offset) && regoffset_.Is(NoReg);
356 }
357
358
IsRegisterOffset() const359 bool MemOperand::IsRegisterOffset() const {
360 return (addrmode_ == Offset) && !regoffset_.Is(NoReg);
361 }
362
363
IsPreIndex() const364 bool MemOperand::IsPreIndex() const { return addrmode_ == PreIndex; }
365
366
IsPostIndex() const367 bool MemOperand::IsPostIndex() const { return addrmode_ == PostIndex; }
368
369
AddOffset(int64_t offset)370 void MemOperand::AddOffset(int64_t offset) {
371 VIXL_ASSERT(IsImmediateOffset());
372 offset_ += offset;
373 }
374
375
IsValid() const376 bool SVEMemOperand::IsValid() const {
377 #ifdef VIXL_DEBUG
378 {
379 // It should not be possible for an SVEMemOperand to match multiple types.
380 int count = 0;
381 if (IsScalarPlusImmediate()) count++;
382 if (IsScalarPlusScalar()) count++;
383 if (IsScalarPlusVector()) count++;
384 if (IsVectorPlusImmediate()) count++;
385 if (IsVectorPlusVector()) count++;
386 VIXL_ASSERT(count <= 1);
387 }
388 #endif
389
390 // We can't have a register _and_ an immediate offset.
391 if ((offset_ != 0) && (!regoffset_.IsNone())) return false;
392
393 if (shift_amount_ != 0) {
394 // Only shift and extend modifiers can take a shift amount.
395 switch (mod_) {
396 case NO_SVE_OFFSET_MODIFIER:
397 case SVE_MUL_VL:
398 return false;
399 case SVE_LSL:
400 case SVE_UXTW:
401 case SVE_SXTW:
402 // Fall through.
403 break;
404 }
405 }
406
407 return IsScalarPlusImmediate() || IsScalarPlusScalar() ||
408 IsScalarPlusVector() || IsVectorPlusImmediate() ||
409 IsVectorPlusVector();
410 }
411
412
IsEquivalentToScalar() const413 bool SVEMemOperand::IsEquivalentToScalar() const {
414 if (IsScalarPlusImmediate()) {
415 return GetImmediateOffset() == 0;
416 }
417 if (IsScalarPlusScalar()) {
418 // We can ignore the shift because it will still result in zero.
419 return GetScalarOffset().IsZero();
420 }
421 // Forms involving vectors are never equivalent to a single scalar.
422 return false;
423 }
424
IsPlainRegister() const425 bool SVEMemOperand::IsPlainRegister() const {
426 if (IsScalarPlusImmediate()) {
427 return GetImmediateOffset() == 0;
428 }
429 return false;
430 }
431
GenericOperand(const CPURegister & reg)432 GenericOperand::GenericOperand(const CPURegister& reg)
433 : cpu_register_(reg), mem_op_size_(0) {
434 if (reg.IsQ()) {
435 VIXL_ASSERT(reg.GetSizeInBits() > static_cast<int>(kXRegSize));
436 // Support for Q registers is not implemented yet.
437 VIXL_UNIMPLEMENTED();
438 }
439 }
440
441
GenericOperand(const MemOperand & mem_op,size_t mem_op_size)442 GenericOperand::GenericOperand(const MemOperand& mem_op, size_t mem_op_size)
443 : cpu_register_(NoReg), mem_op_(mem_op), mem_op_size_(mem_op_size) {
444 if (mem_op_size_ > kXRegSizeInBytes) {
445 // We only support generic operands up to the size of X registers.
446 VIXL_UNIMPLEMENTED();
447 }
448 }
449
Equals(const GenericOperand & other) const450 bool GenericOperand::Equals(const GenericOperand& other) const {
451 if (!IsValid() || !other.IsValid()) {
452 // Two invalid generic operands are considered equal.
453 return !IsValid() && !other.IsValid();
454 }
455 if (IsCPURegister() && other.IsCPURegister()) {
456 return GetCPURegister().Is(other.GetCPURegister());
457 } else if (IsMemOperand() && other.IsMemOperand()) {
458 return GetMemOperand().Equals(other.GetMemOperand()) &&
459 (GetMemOperandSizeInBytes() == other.GetMemOperandSizeInBytes());
460 }
461 return false;
462 }
463 }
464 } // namespace vixl::aarch64
465