1 // Copyright 2016, VIXL authors
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are met:
6 //
7 // * Redistributions of source code must retain the above copyright notice,
8 // this list of conditions and the following disclaimer.
9 // * Redistributions in binary form must reproduce the above copyright notice,
10 // this list of conditions and the following disclaimer in the documentation
11 // and/or other materials provided with the distribution.
12 // * Neither the name of ARM Limited nor the names of its contributors may be
13 // used to endorse or promote products derived from this software without
14 // specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
27 #include "operands-aarch64.h"
28
29 namespace vixl {
30 namespace aarch64 {
31
32 // CPURegList utilities.
PopLowestIndex(RegList mask)33 CPURegister CPURegList::PopLowestIndex(RegList mask) {
34 RegList list = list_ & mask;
35 if (list == 0) return NoCPUReg;
36 int index = CountTrailingZeros(list);
37 VIXL_ASSERT(((1 << index) & list) != 0);
38 Remove(index);
39 return CPURegister(index, size_, type_);
40 }
41
42
PopHighestIndex(RegList mask)43 CPURegister CPURegList::PopHighestIndex(RegList mask) {
44 RegList list = list_ & mask;
45 if (list == 0) return NoCPUReg;
46 int index = CountLeadingZeros(list);
47 index = kRegListSizeInBits - 1 - index;
48 VIXL_ASSERT(((1 << index) & list) != 0);
49 Remove(index);
50 return CPURegister(index, size_, type_);
51 }
52
53
IsValid() const54 bool CPURegList::IsValid() const {
55 if (type_ == CPURegister::kNoRegister) {
56 // We can't use IsEmpty here because that asserts IsValid().
57 return list_ == 0;
58 } else {
59 bool is_valid = true;
60 // Try to create a CPURegister for each element in the list.
61 for (int i = 0; i < kRegListSizeInBits; i++) {
62 if (((list_ >> i) & 1) != 0) {
63 is_valid &= CPURegister(i, size_, type_).IsValid();
64 }
65 }
66 return is_valid;
67 }
68 }
69
70
RemoveCalleeSaved()71 void CPURegList::RemoveCalleeSaved() {
72 if (GetType() == CPURegister::kRegister) {
73 Remove(GetCalleeSaved(GetRegisterSizeInBits()));
74 } else if (GetType() == CPURegister::kVRegister) {
75 Remove(GetCalleeSavedV(GetRegisterSizeInBits()));
76 } else {
77 VIXL_ASSERT(GetType() == CPURegister::kNoRegister);
78 VIXL_ASSERT(IsEmpty());
79 // The list must already be empty, so do nothing.
80 }
81 }
82
83
Union(const CPURegList & list_1,const CPURegList & list_2,const CPURegList & list_3)84 CPURegList CPURegList::Union(const CPURegList& list_1,
85 const CPURegList& list_2,
86 const CPURegList& list_3) {
87 return Union(list_1, Union(list_2, list_3));
88 }
89
90
Union(const CPURegList & list_1,const CPURegList & list_2,const CPURegList & list_3,const CPURegList & list_4)91 CPURegList CPURegList::Union(const CPURegList& list_1,
92 const CPURegList& list_2,
93 const CPURegList& list_3,
94 const CPURegList& list_4) {
95 return Union(Union(list_1, list_2), Union(list_3, list_4));
96 }
97
98
Intersection(const CPURegList & list_1,const CPURegList & list_2,const CPURegList & list_3)99 CPURegList CPURegList::Intersection(const CPURegList& list_1,
100 const CPURegList& list_2,
101 const CPURegList& list_3) {
102 return Intersection(list_1, Intersection(list_2, list_3));
103 }
104
105
Intersection(const CPURegList & list_1,const CPURegList & list_2,const CPURegList & list_3,const CPURegList & list_4)106 CPURegList CPURegList::Intersection(const CPURegList& list_1,
107 const CPURegList& list_2,
108 const CPURegList& list_3,
109 const CPURegList& list_4) {
110 return Intersection(Intersection(list_1, list_2),
111 Intersection(list_3, list_4));
112 }
113
114
GetCalleeSaved(unsigned size)115 CPURegList CPURegList::GetCalleeSaved([[maybe_unused]] unsigned size) {
116 VIXL_ASSERT(size == static_cast<unsigned>(kCalleeSaved.GetRegisterSizeInBits()));
117 return kCalleeSaved;
118 }
119
120
GetCalleeSavedV(unsigned size)121 CPURegList CPURegList::GetCalleeSavedV([[maybe_unused]] unsigned size) {
122 VIXL_ASSERT(size == static_cast<unsigned>(kCalleeSavedV.GetRegisterSizeInBits()));
123 return kCalleeSavedV;
124 }
125
126
GetCallerSaved(unsigned size)127 CPURegList CPURegList::GetCallerSaved([[maybe_unused]] unsigned size) {
128 VIXL_ASSERT(size == static_cast<unsigned>(kCallerSaved.GetRegisterSizeInBits()));
129 return kCallerSaved;
130 }
131
132
GetCallerSavedV(unsigned size)133 CPURegList CPURegList::GetCallerSavedV([[maybe_unused]] unsigned size) {
134 VIXL_ASSERT(size == static_cast<unsigned>(kCallerSavedV.GetRegisterSizeInBits()));
135 return kCallerSavedV;
136 }
137
138 // Operand.
Operand(int64_t immediate)139 Operand::Operand(int64_t immediate)
140 : immediate_(immediate),
141 reg_(NoReg),
142 shift_(NO_SHIFT),
143 extend_(NO_EXTEND),
144 shift_amount_(0) {}
145
Operand(IntegerOperand immediate)146 Operand::Operand(IntegerOperand immediate)
147 : immediate_(immediate.AsIntN(64)),
148 reg_(NoReg),
149 shift_(NO_SHIFT),
150 extend_(NO_EXTEND),
151 shift_amount_(0) {}
152
Operand(Register reg,Shift shift,unsigned shift_amount)153 Operand::Operand(Register reg, Shift shift, unsigned shift_amount)
154 : reg_(reg),
155 shift_(shift),
156 extend_(NO_EXTEND),
157 shift_amount_(shift_amount) {
158 VIXL_ASSERT(shift != MSL);
159 VIXL_ASSERT(reg.Is64Bits() || (shift_amount < kWRegSize));
160 VIXL_ASSERT(reg.Is32Bits() || (shift_amount < kXRegSize));
161 VIXL_ASSERT(!reg.IsSP());
162 }
163
164
Operand(Register reg,Extend extend,unsigned shift_amount)165 Operand::Operand(Register reg, Extend extend, unsigned shift_amount)
166 : reg_(reg),
167 shift_(NO_SHIFT),
168 extend_(extend),
169 shift_amount_(shift_amount) {
170 VIXL_ASSERT(reg.IsValid());
171 VIXL_ASSERT(shift_amount <= 4);
172 VIXL_ASSERT(!reg.IsSP());
173
174 // Extend modes SXTX and UXTX require a 64-bit register.
175 VIXL_ASSERT(reg.Is64Bits() || ((extend != SXTX) && (extend != UXTX)));
176 }
177
178
IsImmediate() const179 bool Operand::IsImmediate() const { return reg_.Is(NoReg); }
180
181
IsPlainRegister() const182 bool Operand::IsPlainRegister() const {
183 return reg_.IsValid() &&
184 (((shift_ == NO_SHIFT) && (extend_ == NO_EXTEND)) ||
185 // No-op shifts.
186 ((shift_ != NO_SHIFT) && (shift_amount_ == 0)) ||
187 // No-op extend operations.
188 // We can't include [US]XTW here without knowing more about the
189 // context; they are only no-ops for 32-bit operations.
190 //
191 // For example, this operand could be replaced with w1:
192 // __ Add(w0, w0, Operand(w1, UXTW));
193 // However, no plain register can replace it in this context:
194 // __ Add(x0, x0, Operand(w1, UXTW));
195 (((extend_ == UXTX) || (extend_ == SXTX)) && (shift_amount_ == 0)));
196 }
197
198
IsShiftedRegister() const199 bool Operand::IsShiftedRegister() const {
200 return reg_.IsValid() && (shift_ != NO_SHIFT);
201 }
202
203
IsExtendedRegister() const204 bool Operand::IsExtendedRegister() const {
205 return reg_.IsValid() && (extend_ != NO_EXTEND);
206 }
207
208
IsZero() const209 bool Operand::IsZero() const {
210 if (IsImmediate()) {
211 return GetImmediate() == 0;
212 } else {
213 return GetRegister().IsZero();
214 }
215 }
216
217
ToExtendedRegister() const218 Operand Operand::ToExtendedRegister() const {
219 VIXL_ASSERT(IsShiftedRegister());
220 VIXL_ASSERT((shift_ == LSL) && (shift_amount_ <= 4));
221 return Operand(reg_, reg_.Is64Bits() ? UXTX : UXTW, shift_amount_);
222 }
223
224
225 // MemOperand
MemOperand()226 MemOperand::MemOperand()
227 : base_(NoReg),
228 regoffset_(NoReg),
229 offset_(0),
230 addrmode_(Offset),
231 shift_(NO_SHIFT),
232 extend_(NO_EXTEND),
233 shift_amount_(0) {}
234
235
MemOperand(Register base,int64_t offset,AddrMode addrmode)236 MemOperand::MemOperand(Register base, int64_t offset, AddrMode addrmode)
237 : base_(base),
238 regoffset_(NoReg),
239 offset_(offset),
240 addrmode_(addrmode),
241 shift_(NO_SHIFT),
242 extend_(NO_EXTEND),
243 shift_amount_(0) {
244 VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
245 }
246
247
MemOperand(Register base,Register regoffset,Extend extend,unsigned shift_amount)248 MemOperand::MemOperand(Register base,
249 Register regoffset,
250 Extend extend,
251 unsigned shift_amount)
252 : base_(base),
253 regoffset_(regoffset),
254 offset_(0),
255 addrmode_(Offset),
256 shift_(NO_SHIFT),
257 extend_(extend),
258 shift_amount_(shift_amount) {
259 VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
260 VIXL_ASSERT(!regoffset.IsSP());
261 VIXL_ASSERT((extend == UXTW) || (extend == SXTW) || (extend == SXTX));
262
263 // SXTX extend mode requires a 64-bit offset register.
264 VIXL_ASSERT(regoffset.Is64Bits() || (extend != SXTX));
265 }
266
267
MemOperand(Register base,Register regoffset,Shift shift,unsigned shift_amount)268 MemOperand::MemOperand(Register base,
269 Register regoffset,
270 Shift shift,
271 unsigned shift_amount)
272 : base_(base),
273 regoffset_(regoffset),
274 offset_(0),
275 addrmode_(Offset),
276 shift_(shift),
277 extend_(NO_EXTEND),
278 shift_amount_(shift_amount) {
279 VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
280 VIXL_ASSERT(regoffset.Is64Bits() && !regoffset.IsSP());
281 VIXL_ASSERT(shift == LSL);
282 }
283
284
MemOperand(Register base,const Operand & offset,AddrMode addrmode)285 MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode)
286 : base_(base),
287 regoffset_(NoReg),
288 addrmode_(addrmode),
289 shift_(NO_SHIFT),
290 extend_(NO_EXTEND),
291 shift_amount_(0) {
292 VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
293
294 if (offset.IsImmediate()) {
295 offset_ = offset.GetImmediate();
296 } else if (offset.IsShiftedRegister()) {
297 VIXL_ASSERT((addrmode == Offset) || (addrmode == PostIndex));
298
299 regoffset_ = offset.GetRegister();
300 shift_ = offset.GetShift();
301 shift_amount_ = offset.GetShiftAmount();
302
303 extend_ = NO_EXTEND;
304 offset_ = 0;
305
306 // These assertions match those in the shifted-register constructor.
307 VIXL_ASSERT(regoffset_.Is64Bits() && !regoffset_.IsSP());
308 VIXL_ASSERT(shift_ == LSL);
309 } else {
310 VIXL_ASSERT(offset.IsExtendedRegister());
311 VIXL_ASSERT(addrmode == Offset);
312
313 regoffset_ = offset.GetRegister();
314 extend_ = offset.GetExtend();
315 shift_amount_ = offset.GetShiftAmount();
316
317 shift_ = NO_SHIFT;
318 offset_ = 0;
319
320 // These assertions match those in the extended-register constructor.
321 VIXL_ASSERT(!regoffset_.IsSP());
322 VIXL_ASSERT((extend_ == UXTW) || (extend_ == SXTW) || (extend_ == SXTX));
323 VIXL_ASSERT((regoffset_.Is64Bits() || (extend_ != SXTX)));
324 }
325 }
326
327
IsPlainRegister() const328 bool MemOperand::IsPlainRegister() const {
329 return IsImmediateOffset() && (GetOffset() == 0);
330 }
331
332
IsEquivalentToPlainRegister() const333 bool MemOperand::IsEquivalentToPlainRegister() const {
334 if (regoffset_.Is(NoReg)) {
335 // Immediate offset, pre-index or post-index.
336 return GetOffset() == 0;
337 } else if (GetRegisterOffset().IsZero()) {
338 // Zero register offset, pre-index or post-index.
339 // We can ignore shift and extend options because they all result in zero.
340 return true;
341 }
342 return false;
343 }
344
345
IsImmediateOffset() const346 bool MemOperand::IsImmediateOffset() const {
347 return (addrmode_ == Offset) && regoffset_.Is(NoReg);
348 }
349
350
IsRegisterOffset() const351 bool MemOperand::IsRegisterOffset() const {
352 return (addrmode_ == Offset) && !regoffset_.Is(NoReg);
353 }
354
IsPreIndex() const355 bool MemOperand::IsPreIndex() const { return addrmode_ == PreIndex; }
IsPostIndex() const356 bool MemOperand::IsPostIndex() const { return addrmode_ == PostIndex; }
357
IsImmediatePreIndex() const358 bool MemOperand::IsImmediatePreIndex() const {
359 return IsPreIndex() && regoffset_.Is(NoReg);
360 }
361
IsImmediatePostIndex() const362 bool MemOperand::IsImmediatePostIndex() const {
363 return IsPostIndex() && regoffset_.Is(NoReg);
364 }
365
AddOffset(int64_t offset)366 void MemOperand::AddOffset(int64_t offset) {
367 VIXL_ASSERT(IsImmediateOffset());
368 offset_ += offset;
369 }
370
371
IsValid() const372 bool SVEMemOperand::IsValid() const {
373 #ifdef VIXL_DEBUG
374 {
375 // It should not be possible for an SVEMemOperand to match multiple types.
376 int count = 0;
377 if (IsScalarPlusImmediate()) count++;
378 if (IsScalarPlusScalar()) count++;
379 if (IsScalarPlusVector()) count++;
380 if (IsVectorPlusImmediate()) count++;
381 if (IsVectorPlusScalar()) count++;
382 if (IsVectorPlusVector()) count++;
383 VIXL_ASSERT(count <= 1);
384 }
385 #endif
386
387 // We can't have a register _and_ an immediate offset.
388 if ((offset_ != 0) && (!regoffset_.IsNone())) return false;
389
390 if (shift_amount_ != 0) {
391 // Only shift and extend modifiers can take a shift amount.
392 switch (mod_) {
393 case NO_SVE_OFFSET_MODIFIER:
394 case SVE_MUL_VL:
395 return false;
396 case SVE_LSL:
397 case SVE_UXTW:
398 case SVE_SXTW:
399 // Fall through.
400 break;
401 }
402 }
403
404 return IsScalarPlusImmediate() || IsScalarPlusScalar() ||
405 IsScalarPlusVector() || IsVectorPlusImmediate() ||
406 IsVectorPlusScalar() || IsVectorPlusVector();
407 }
408
409
IsEquivalentToScalar() const410 bool SVEMemOperand::IsEquivalentToScalar() const {
411 if (IsScalarPlusImmediate()) {
412 return GetImmediateOffset() == 0;
413 }
414 if (IsScalarPlusScalar()) {
415 // We can ignore the shift because it will still result in zero.
416 return GetScalarOffset().IsZero();
417 }
418 // Forms involving vectors are never equivalent to a single scalar.
419 return false;
420 }
421
IsPlainRegister() const422 bool SVEMemOperand::IsPlainRegister() const {
423 if (IsScalarPlusImmediate()) {
424 return GetImmediateOffset() == 0;
425 }
426 return false;
427 }
428
GenericOperand(const CPURegister & reg)429 GenericOperand::GenericOperand(const CPURegister& reg)
430 : cpu_register_(reg), mem_op_size_(0) {
431 if (reg.IsQ()) {
432 VIXL_ASSERT(reg.GetSizeInBits() > static_cast<int>(kXRegSize));
433 // Support for Q registers is not implemented yet.
434 VIXL_UNIMPLEMENTED();
435 }
436 }
437
438
GenericOperand(const MemOperand & mem_op,size_t mem_op_size)439 GenericOperand::GenericOperand(const MemOperand& mem_op, size_t mem_op_size)
440 : cpu_register_(NoReg), mem_op_(mem_op), mem_op_size_(mem_op_size) {
441 if (mem_op_size_ > kXRegSizeInBytes) {
442 // We only support generic operands up to the size of X registers.
443 VIXL_UNIMPLEMENTED();
444 }
445 }
446
Equals(const GenericOperand & other) const447 bool GenericOperand::Equals(const GenericOperand& other) const {
448 if (!IsValid() || !other.IsValid()) {
449 // Two invalid generic operands are considered equal.
450 return !IsValid() && !other.IsValid();
451 }
452 if (IsCPURegister() && other.IsCPURegister()) {
453 return GetCPURegister().Is(other.GetCPURegister());
454 } else if (IsMemOperand() && other.IsMemOperand()) {
455 return GetMemOperand().Equals(other.GetMemOperand()) &&
456 (GetMemOperandSizeInBytes() == other.GetMemOperandSizeInBytes());
457 }
458 return false;
459 }
460 }
461 } // namespace vixl::aarch64
462