1 //===- AArch64AddressingModes.h - AArch64 Addressing Modes ------*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the AArch64 addressing mode implementation stuff.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #ifndef LLVM_LIB_TARGET_AARCH64_MCTARGETDESC_AARCH64ADDRESSINGMODES_H
15 #define LLVM_LIB_TARGET_AARCH64_MCTARGETDESC_AARCH64ADDRESSINGMODES_H
16
17 #include "llvm/ADT/APFloat.h"
18 #include "llvm/ADT/APInt.h"
19 #include "llvm/Support/ErrorHandling.h"
20 #include "llvm/Support/MathExtras.h"
21 #include <cassert>
22
23 namespace llvm {
24
25 /// AArch64_AM - AArch64 Addressing Mode Stuff
26 namespace AArch64_AM {
27
28 //===----------------------------------------------------------------------===//
29 // Shifts
30 //
31
32 enum ShiftExtendType {
33 InvalidShiftExtend = -1,
34 LSL = 0,
35 LSR,
36 ASR,
37 ROR,
38 MSL,
39
40 UXTB,
41 UXTH,
42 UXTW,
43 UXTX,
44
45 SXTB,
46 SXTH,
47 SXTW,
48 SXTX,
49 };
50
51 /// getShiftName - Get the string encoding for the shift type.
getShiftExtendName(AArch64_AM::ShiftExtendType ST)52 static inline const char *getShiftExtendName(AArch64_AM::ShiftExtendType ST) {
53 switch (ST) {
54 default: llvm_unreachable("unhandled shift type!");
55 case AArch64_AM::LSL: return "lsl";
56 case AArch64_AM::LSR: return "lsr";
57 case AArch64_AM::ASR: return "asr";
58 case AArch64_AM::ROR: return "ror";
59 case AArch64_AM::MSL: return "msl";
60 case AArch64_AM::UXTB: return "uxtb";
61 case AArch64_AM::UXTH: return "uxth";
62 case AArch64_AM::UXTW: return "uxtw";
63 case AArch64_AM::UXTX: return "uxtx";
64 case AArch64_AM::SXTB: return "sxtb";
65 case AArch64_AM::SXTH: return "sxth";
66 case AArch64_AM::SXTW: return "sxtw";
67 case AArch64_AM::SXTX: return "sxtx";
68 }
69 return nullptr;
70 }
71
72 /// getShiftType - Extract the shift type.
getShiftType(unsigned Imm)73 static inline AArch64_AM::ShiftExtendType getShiftType(unsigned Imm) {
74 switch ((Imm >> 6) & 0x7) {
75 default: return AArch64_AM::InvalidShiftExtend;
76 case 0: return AArch64_AM::LSL;
77 case 1: return AArch64_AM::LSR;
78 case 2: return AArch64_AM::ASR;
79 case 3: return AArch64_AM::ROR;
80 case 4: return AArch64_AM::MSL;
81 }
82 }
83
84 /// getShiftValue - Extract the shift value.
getShiftValue(unsigned Imm)85 static inline unsigned getShiftValue(unsigned Imm) {
86 return Imm & 0x3f;
87 }
88
89 /// getShifterImm - Encode the shift type and amount:
90 /// imm: 6-bit shift amount
91 /// shifter: 000 ==> lsl
92 /// 001 ==> lsr
93 /// 010 ==> asr
94 /// 011 ==> ror
95 /// 100 ==> msl
96 /// {8-6} = shifter
97 /// {5-0} = imm
getShifterImm(AArch64_AM::ShiftExtendType ST,unsigned Imm)98 static inline unsigned getShifterImm(AArch64_AM::ShiftExtendType ST,
99 unsigned Imm) {
100 assert((Imm & 0x3f) == Imm && "Illegal shifted immedate value!");
101 unsigned STEnc = 0;
102 switch (ST) {
103 default: llvm_unreachable("Invalid shift requested");
104 case AArch64_AM::LSL: STEnc = 0; break;
105 case AArch64_AM::LSR: STEnc = 1; break;
106 case AArch64_AM::ASR: STEnc = 2; break;
107 case AArch64_AM::ROR: STEnc = 3; break;
108 case AArch64_AM::MSL: STEnc = 4; break;
109 }
110 return (STEnc << 6) | (Imm & 0x3f);
111 }
112
113 //===----------------------------------------------------------------------===//
114 // Extends
115 //
116
117 /// getArithShiftValue - get the arithmetic shift value.
getArithShiftValue(unsigned Imm)118 static inline unsigned getArithShiftValue(unsigned Imm) {
119 return Imm & 0x7;
120 }
121
122 /// getExtendType - Extract the extend type for operands of arithmetic ops.
getExtendType(unsigned Imm)123 static inline AArch64_AM::ShiftExtendType getExtendType(unsigned Imm) {
124 assert((Imm & 0x7) == Imm && "invalid immediate!");
125 switch (Imm) {
126 default: llvm_unreachable("Compiler bug!");
127 case 0: return AArch64_AM::UXTB;
128 case 1: return AArch64_AM::UXTH;
129 case 2: return AArch64_AM::UXTW;
130 case 3: return AArch64_AM::UXTX;
131 case 4: return AArch64_AM::SXTB;
132 case 5: return AArch64_AM::SXTH;
133 case 6: return AArch64_AM::SXTW;
134 case 7: return AArch64_AM::SXTX;
135 }
136 }
137
getArithExtendType(unsigned Imm)138 static inline AArch64_AM::ShiftExtendType getArithExtendType(unsigned Imm) {
139 return getExtendType((Imm >> 3) & 0x7);
140 }
141
142 /// Mapping from extend bits to required operation:
143 /// shifter: 000 ==> uxtb
144 /// 001 ==> uxth
145 /// 010 ==> uxtw
146 /// 011 ==> uxtx
147 /// 100 ==> sxtb
148 /// 101 ==> sxth
149 /// 110 ==> sxtw
150 /// 111 ==> sxtx
getExtendEncoding(AArch64_AM::ShiftExtendType ET)151 inline unsigned getExtendEncoding(AArch64_AM::ShiftExtendType ET) {
152 switch (ET) {
153 default: llvm_unreachable("Invalid extend type requested");
154 case AArch64_AM::UXTB: return 0; break;
155 case AArch64_AM::UXTH: return 1; break;
156 case AArch64_AM::UXTW: return 2; break;
157 case AArch64_AM::UXTX: return 3; break;
158 case AArch64_AM::SXTB: return 4; break;
159 case AArch64_AM::SXTH: return 5; break;
160 case AArch64_AM::SXTW: return 6; break;
161 case AArch64_AM::SXTX: return 7; break;
162 }
163 }
164
165 /// getArithExtendImm - Encode the extend type and shift amount for an
166 /// arithmetic instruction:
167 /// imm: 3-bit extend amount
168 /// {5-3} = shifter
169 /// {2-0} = imm3
getArithExtendImm(AArch64_AM::ShiftExtendType ET,unsigned Imm)170 static inline unsigned getArithExtendImm(AArch64_AM::ShiftExtendType ET,
171 unsigned Imm) {
172 assert((Imm & 0x7) == Imm && "Illegal shifted immedate value!");
173 return (getExtendEncoding(ET) << 3) | (Imm & 0x7);
174 }
175
176 /// getMemDoShift - Extract the "do shift" flag value for load/store
177 /// instructions.
getMemDoShift(unsigned Imm)178 static inline bool getMemDoShift(unsigned Imm) {
179 return (Imm & 0x1) != 0;
180 }
181
182 /// getExtendType - Extract the extend type for the offset operand of
183 /// loads/stores.
getMemExtendType(unsigned Imm)184 static inline AArch64_AM::ShiftExtendType getMemExtendType(unsigned Imm) {
185 return getExtendType((Imm >> 1) & 0x7);
186 }
187
188 /// getExtendImm - Encode the extend type and amount for a load/store inst:
189 /// doshift: should the offset be scaled by the access size
190 /// shifter: 000 ==> uxtb
191 /// 001 ==> uxth
192 /// 010 ==> uxtw
193 /// 011 ==> uxtx
194 /// 100 ==> sxtb
195 /// 101 ==> sxth
196 /// 110 ==> sxtw
197 /// 111 ==> sxtx
198 /// {3-1} = shifter
199 /// {0} = doshift
getMemExtendImm(AArch64_AM::ShiftExtendType ET,bool DoShift)200 static inline unsigned getMemExtendImm(AArch64_AM::ShiftExtendType ET,
201 bool DoShift) {
202 return (getExtendEncoding(ET) << 1) | unsigned(DoShift);
203 }
204
ror(uint64_t elt,unsigned size)205 static inline uint64_t ror(uint64_t elt, unsigned size) {
206 return ((elt & 1) << (size-1)) | (elt >> 1);
207 }
208
209 /// processLogicalImmediate - Determine if an immediate value can be encoded
210 /// as the immediate operand of a logical instruction for the given register
211 /// size. If so, return true with "encoding" set to the encoded value in
212 /// the form N:immr:imms.
processLogicalImmediate(uint64_t Imm,unsigned RegSize,uint64_t & Encoding)213 static inline bool processLogicalImmediate(uint64_t Imm, unsigned RegSize,
214 uint64_t &Encoding) {
215 if (Imm == 0ULL || Imm == ~0ULL ||
216 (RegSize != 64 &&
217 (Imm >> RegSize != 0 || Imm == (~0ULL >> (64 - RegSize)))))
218 return false;
219
220 // First, determine the element size.
221 unsigned Size = RegSize;
222
223 do {
224 Size /= 2;
225 uint64_t Mask = (1ULL << Size) - 1;
226
227 if ((Imm & Mask) != ((Imm >> Size) & Mask)) {
228 Size *= 2;
229 break;
230 }
231 } while (Size > 2);
232
233 // Second, determine the rotation to make the element be: 0^m 1^n.
234 uint32_t CTO, I;
235 uint64_t Mask = ((uint64_t)-1LL) >> (64 - Size);
236 Imm &= Mask;
237
238 if (isShiftedMask_64(Imm)) {
239 I = countTrailingZeros(Imm);
240 assert(I < 64 && "undefined behavior");
241 CTO = countTrailingOnes(Imm >> I);
242 } else {
243 Imm |= ~Mask;
244 if (!isShiftedMask_64(~Imm))
245 return false;
246
247 unsigned CLO = countLeadingOnes(Imm);
248 I = 64 - CLO;
249 CTO = CLO + countTrailingOnes(Imm) - (64 - Size);
250 }
251
252 // Encode in Immr the number of RORs it would take to get *from* 0^m 1^n
253 // to our target value, where I is the number of RORs to go the opposite
254 // direction.
255 assert(Size > I && "I should be smaller than element size");
256 unsigned Immr = (Size - I) & (Size - 1);
257
258 // If size has a 1 in the n'th bit, create a value that has zeroes in
259 // bits [0, n] and ones above that.
260 uint64_t NImms = ~(Size-1) << 1;
261
262 // Or the CTO value into the low bits, which must be below the Nth bit
263 // bit mentioned above.
264 NImms |= (CTO-1);
265
266 // Extract the seventh bit and toggle it to create the N field.
267 unsigned N = ((NImms >> 6) & 1) ^ 1;
268
269 Encoding = (N << 12) | (Immr << 6) | (NImms & 0x3f);
270 return true;
271 }
272
273 /// isLogicalImmediate - Return true if the immediate is valid for a logical
274 /// immediate instruction of the given register size. Return false otherwise.
isLogicalImmediate(uint64_t imm,unsigned regSize)275 static inline bool isLogicalImmediate(uint64_t imm, unsigned regSize) {
276 uint64_t encoding;
277 return processLogicalImmediate(imm, regSize, encoding);
278 }
279
280 /// encodeLogicalImmediate - Return the encoded immediate value for a logical
281 /// immediate instruction of the given register size.
encodeLogicalImmediate(uint64_t imm,unsigned regSize)282 static inline uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize) {
283 uint64_t encoding = 0;
284 bool res = processLogicalImmediate(imm, regSize, encoding);
285 assert(res && "invalid logical immediate");
286 (void)res;
287 return encoding;
288 }
289
290 /// decodeLogicalImmediate - Decode a logical immediate value in the form
291 /// "N:immr:imms" (where the immr and imms fields are each 6 bits) into the
292 /// integer value it represents with regSize bits.
decodeLogicalImmediate(uint64_t val,unsigned regSize)293 static inline uint64_t decodeLogicalImmediate(uint64_t val, unsigned regSize) {
294 // Extract the N, imms, and immr fields.
295 unsigned N = (val >> 12) & 1;
296 unsigned immr = (val >> 6) & 0x3f;
297 unsigned imms = val & 0x3f;
298
299 assert((regSize == 64 || N == 0) && "undefined logical immediate encoding");
300 int len = 31 - countLeadingZeros((N << 6) | (~imms & 0x3f));
301 assert(len >= 0 && "undefined logical immediate encoding");
302 unsigned size = (1 << len);
303 unsigned R = immr & (size - 1);
304 unsigned S = imms & (size - 1);
305 assert(S != size - 1 && "undefined logical immediate encoding");
306 uint64_t pattern = (1ULL << (S + 1)) - 1;
307 for (unsigned i = 0; i < R; ++i)
308 pattern = ror(pattern, size);
309
310 // Replicate the pattern to fill the regSize.
311 while (size != regSize) {
312 pattern |= (pattern << size);
313 size *= 2;
314 }
315 return pattern;
316 }
317
318 /// isValidDecodeLogicalImmediate - Check to see if the logical immediate value
319 /// in the form "N:immr:imms" (where the immr and imms fields are each 6 bits)
320 /// is a valid encoding for an integer value with regSize bits.
isValidDecodeLogicalImmediate(uint64_t val,unsigned regSize)321 static inline bool isValidDecodeLogicalImmediate(uint64_t val,
322 unsigned regSize) {
323 // Extract the N and imms fields needed for checking.
324 unsigned N = (val >> 12) & 1;
325 unsigned imms = val & 0x3f;
326
327 if (regSize == 32 && N != 0) // undefined logical immediate encoding
328 return false;
329 int len = 31 - countLeadingZeros((N << 6) | (~imms & 0x3f));
330 if (len < 0) // undefined logical immediate encoding
331 return false;
332 unsigned size = (1 << len);
333 unsigned S = imms & (size - 1);
334 if (S == size - 1) // undefined logical immediate encoding
335 return false;
336
337 return true;
338 }
339
340 //===----------------------------------------------------------------------===//
341 // Floating-point Immediates
342 //
getFPImmFloat(unsigned Imm)343 static inline float getFPImmFloat(unsigned Imm) {
344 // We expect an 8-bit binary encoding of a floating-point number here.
345 union {
346 uint32_t I;
347 float F;
348 } FPUnion;
349
350 uint8_t Sign = (Imm >> 7) & 0x1;
351 uint8_t Exp = (Imm >> 4) & 0x7;
352 uint8_t Mantissa = Imm & 0xf;
353
354 // 8-bit FP iEEEE Float Encoding
355 // abcd efgh aBbbbbbc defgh000 00000000 00000000
356 //
357 // where B = NOT(b);
358
359 FPUnion.I = 0;
360 FPUnion.I |= Sign << 31;
361 FPUnion.I |= ((Exp & 0x4) != 0 ? 0 : 1) << 30;
362 FPUnion.I |= ((Exp & 0x4) != 0 ? 0x1f : 0) << 25;
363 FPUnion.I |= (Exp & 0x3) << 23;
364 FPUnion.I |= Mantissa << 19;
365 return FPUnion.F;
366 }
367
368 /// getFP16Imm - Return an 8-bit floating-point version of the 16-bit
369 /// floating-point value. If the value cannot be represented as an 8-bit
370 /// floating-point value, then return -1.
getFP16Imm(const APInt & Imm)371 static inline int getFP16Imm(const APInt &Imm) {
372 uint32_t Sign = Imm.lshr(15).getZExtValue() & 1;
373 int32_t Exp = (Imm.lshr(10).getSExtValue() & 0x1f) - 15; // -14 to 15
374 int32_t Mantissa = Imm.getZExtValue() & 0x3ff; // 10 bits
375
376 // We can handle 4 bits of mantissa.
377 // mantissa = (16+UInt(e:f:g:h))/16.
378 if (Mantissa & 0x3f)
379 return -1;
380 Mantissa >>= 6;
381
382 // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3
383 if (Exp < -3 || Exp > 4)
384 return -1;
385 Exp = ((Exp+3) & 0x7) ^ 4;
386
387 return ((int)Sign << 7) | (Exp << 4) | Mantissa;
388 }
389
getFP16Imm(const APFloat & FPImm)390 static inline int getFP16Imm(const APFloat &FPImm) {
391 return getFP16Imm(FPImm.bitcastToAPInt());
392 }
393
394 /// getFP32Imm - Return an 8-bit floating-point version of the 32-bit
395 /// floating-point value. If the value cannot be represented as an 8-bit
396 /// floating-point value, then return -1.
getFP32Imm(const APInt & Imm)397 static inline int getFP32Imm(const APInt &Imm) {
398 uint32_t Sign = Imm.lshr(31).getZExtValue() & 1;
399 int32_t Exp = (Imm.lshr(23).getSExtValue() & 0xff) - 127; // -126 to 127
400 int64_t Mantissa = Imm.getZExtValue() & 0x7fffff; // 23 bits
401
402 // We can handle 4 bits of mantissa.
403 // mantissa = (16+UInt(e:f:g:h))/16.
404 if (Mantissa & 0x7ffff)
405 return -1;
406 Mantissa >>= 19;
407 if ((Mantissa & 0xf) != Mantissa)
408 return -1;
409
410 // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3
411 if (Exp < -3 || Exp > 4)
412 return -1;
413 Exp = ((Exp+3) & 0x7) ^ 4;
414
415 return ((int)Sign << 7) | (Exp << 4) | Mantissa;
416 }
417
getFP32Imm(const APFloat & FPImm)418 static inline int getFP32Imm(const APFloat &FPImm) {
419 return getFP32Imm(FPImm.bitcastToAPInt());
420 }
421
422 /// getFP64Imm - Return an 8-bit floating-point version of the 64-bit
423 /// floating-point value. If the value cannot be represented as an 8-bit
424 /// floating-point value, then return -1.
getFP64Imm(const APInt & Imm)425 static inline int getFP64Imm(const APInt &Imm) {
426 uint64_t Sign = Imm.lshr(63).getZExtValue() & 1;
427 int64_t Exp = (Imm.lshr(52).getSExtValue() & 0x7ff) - 1023; // -1022 to 1023
428 uint64_t Mantissa = Imm.getZExtValue() & 0xfffffffffffffULL;
429
430 // We can handle 4 bits of mantissa.
431 // mantissa = (16+UInt(e:f:g:h))/16.
432 if (Mantissa & 0xffffffffffffULL)
433 return -1;
434 Mantissa >>= 48;
435 if ((Mantissa & 0xf) != Mantissa)
436 return -1;
437
438 // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3
439 if (Exp < -3 || Exp > 4)
440 return -1;
441 Exp = ((Exp+3) & 0x7) ^ 4;
442
443 return ((int)Sign << 7) | (Exp << 4) | Mantissa;
444 }
445
getFP64Imm(const APFloat & FPImm)446 static inline int getFP64Imm(const APFloat &FPImm) {
447 return getFP64Imm(FPImm.bitcastToAPInt());
448 }
449
450 //===--------------------------------------------------------------------===//
451 // AdvSIMD Modified Immediates
452 //===--------------------------------------------------------------------===//
453
454 // 0x00 0x00 0x00 abcdefgh 0x00 0x00 0x00 abcdefgh
isAdvSIMDModImmType1(uint64_t Imm)455 static inline bool isAdvSIMDModImmType1(uint64_t Imm) {
456 return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
457 ((Imm & 0xffffff00ffffff00ULL) == 0);
458 }
459
encodeAdvSIMDModImmType1(uint64_t Imm)460 static inline uint8_t encodeAdvSIMDModImmType1(uint64_t Imm) {
461 return (Imm & 0xffULL);
462 }
463
decodeAdvSIMDModImmType1(uint8_t Imm)464 static inline uint64_t decodeAdvSIMDModImmType1(uint8_t Imm) {
465 uint64_t EncVal = Imm;
466 return (EncVal << 32) | EncVal;
467 }
468
469 // 0x00 0x00 abcdefgh 0x00 0x00 0x00 abcdefgh 0x00
isAdvSIMDModImmType2(uint64_t Imm)470 static inline bool isAdvSIMDModImmType2(uint64_t Imm) {
471 return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
472 ((Imm & 0xffff00ffffff00ffULL) == 0);
473 }
474
encodeAdvSIMDModImmType2(uint64_t Imm)475 static inline uint8_t encodeAdvSIMDModImmType2(uint64_t Imm) {
476 return (Imm & 0xff00ULL) >> 8;
477 }
478
decodeAdvSIMDModImmType2(uint8_t Imm)479 static inline uint64_t decodeAdvSIMDModImmType2(uint8_t Imm) {
480 uint64_t EncVal = Imm;
481 return (EncVal << 40) | (EncVal << 8);
482 }
483
484 // 0x00 abcdefgh 0x00 0x00 0x00 abcdefgh 0x00 0x00
isAdvSIMDModImmType3(uint64_t Imm)485 static inline bool isAdvSIMDModImmType3(uint64_t Imm) {
486 return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
487 ((Imm & 0xff00ffffff00ffffULL) == 0);
488 }
489
encodeAdvSIMDModImmType3(uint64_t Imm)490 static inline uint8_t encodeAdvSIMDModImmType3(uint64_t Imm) {
491 return (Imm & 0xff0000ULL) >> 16;
492 }
493
decodeAdvSIMDModImmType3(uint8_t Imm)494 static inline uint64_t decodeAdvSIMDModImmType3(uint8_t Imm) {
495 uint64_t EncVal = Imm;
496 return (EncVal << 48) | (EncVal << 16);
497 }
498
499 // abcdefgh 0x00 0x00 0x00 abcdefgh 0x00 0x00 0x00
isAdvSIMDModImmType4(uint64_t Imm)500 static inline bool isAdvSIMDModImmType4(uint64_t Imm) {
501 return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
502 ((Imm & 0x00ffffff00ffffffULL) == 0);
503 }
504
encodeAdvSIMDModImmType4(uint64_t Imm)505 static inline uint8_t encodeAdvSIMDModImmType4(uint64_t Imm) {
506 return (Imm & 0xff000000ULL) >> 24;
507 }
508
decodeAdvSIMDModImmType4(uint8_t Imm)509 static inline uint64_t decodeAdvSIMDModImmType4(uint8_t Imm) {
510 uint64_t EncVal = Imm;
511 return (EncVal << 56) | (EncVal << 24);
512 }
513
514 // 0x00 abcdefgh 0x00 abcdefgh 0x00 abcdefgh 0x00 abcdefgh
isAdvSIMDModImmType5(uint64_t Imm)515 static inline bool isAdvSIMDModImmType5(uint64_t Imm) {
516 return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
517 (((Imm & 0x00ff0000ULL) >> 16) == (Imm & 0x000000ffULL)) &&
518 ((Imm & 0xff00ff00ff00ff00ULL) == 0);
519 }
520
encodeAdvSIMDModImmType5(uint64_t Imm)521 static inline uint8_t encodeAdvSIMDModImmType5(uint64_t Imm) {
522 return (Imm & 0xffULL);
523 }
524
decodeAdvSIMDModImmType5(uint8_t Imm)525 static inline uint64_t decodeAdvSIMDModImmType5(uint8_t Imm) {
526 uint64_t EncVal = Imm;
527 return (EncVal << 48) | (EncVal << 32) | (EncVal << 16) | EncVal;
528 }
529
530 // abcdefgh 0x00 abcdefgh 0x00 abcdefgh 0x00 abcdefgh 0x00
isAdvSIMDModImmType6(uint64_t Imm)531 static inline bool isAdvSIMDModImmType6(uint64_t Imm) {
532 return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
533 (((Imm & 0xff000000ULL) >> 16) == (Imm & 0x0000ff00ULL)) &&
534 ((Imm & 0x00ff00ff00ff00ffULL) == 0);
535 }
536
encodeAdvSIMDModImmType6(uint64_t Imm)537 static inline uint8_t encodeAdvSIMDModImmType6(uint64_t Imm) {
538 return (Imm & 0xff00ULL) >> 8;
539 }
540
decodeAdvSIMDModImmType6(uint8_t Imm)541 static inline uint64_t decodeAdvSIMDModImmType6(uint8_t Imm) {
542 uint64_t EncVal = Imm;
543 return (EncVal << 56) | (EncVal << 40) | (EncVal << 24) | (EncVal << 8);
544 }
545
546 // 0x00 0x00 abcdefgh 0xFF 0x00 0x00 abcdefgh 0xFF
isAdvSIMDModImmType7(uint64_t Imm)547 static inline bool isAdvSIMDModImmType7(uint64_t Imm) {
548 return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
549 ((Imm & 0xffff00ffffff00ffULL) == 0x000000ff000000ffULL);
550 }
551
encodeAdvSIMDModImmType7(uint64_t Imm)552 static inline uint8_t encodeAdvSIMDModImmType7(uint64_t Imm) {
553 return (Imm & 0xff00ULL) >> 8;
554 }
555
decodeAdvSIMDModImmType7(uint8_t Imm)556 static inline uint64_t decodeAdvSIMDModImmType7(uint8_t Imm) {
557 uint64_t EncVal = Imm;
558 return (EncVal << 40) | (EncVal << 8) | 0x000000ff000000ffULL;
559 }
560
561 // 0x00 abcdefgh 0xFF 0xFF 0x00 abcdefgh 0xFF 0xFF
isAdvSIMDModImmType8(uint64_t Imm)562 static inline bool isAdvSIMDModImmType8(uint64_t Imm) {
563 return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
564 ((Imm & 0xff00ffffff00ffffULL) == 0x0000ffff0000ffffULL);
565 }
566
decodeAdvSIMDModImmType8(uint8_t Imm)567 static inline uint64_t decodeAdvSIMDModImmType8(uint8_t Imm) {
568 uint64_t EncVal = Imm;
569 return (EncVal << 48) | (EncVal << 16) | 0x0000ffff0000ffffULL;
570 }
571
encodeAdvSIMDModImmType8(uint64_t Imm)572 static inline uint8_t encodeAdvSIMDModImmType8(uint64_t Imm) {
573 return (Imm & 0x00ff0000ULL) >> 16;
574 }
575
576 // abcdefgh abcdefgh abcdefgh abcdefgh abcdefgh abcdefgh abcdefgh abcdefgh
isAdvSIMDModImmType9(uint64_t Imm)577 static inline bool isAdvSIMDModImmType9(uint64_t Imm) {
578 return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
579 ((Imm >> 48) == (Imm & 0x0000ffffULL)) &&
580 ((Imm >> 56) == (Imm & 0x000000ffULL));
581 }
582
encodeAdvSIMDModImmType9(uint64_t Imm)583 static inline uint8_t encodeAdvSIMDModImmType9(uint64_t Imm) {
584 return (Imm & 0xffULL);
585 }
586
decodeAdvSIMDModImmType9(uint8_t Imm)587 static inline uint64_t decodeAdvSIMDModImmType9(uint8_t Imm) {
588 uint64_t EncVal = Imm;
589 EncVal |= (EncVal << 8);
590 EncVal |= (EncVal << 16);
591 EncVal |= (EncVal << 32);
592 return EncVal;
593 }
594
595 // aaaaaaaa bbbbbbbb cccccccc dddddddd eeeeeeee ffffffff gggggggg hhhhhhhh
596 // cmode: 1110, op: 1
isAdvSIMDModImmType10(uint64_t Imm)597 static inline bool isAdvSIMDModImmType10(uint64_t Imm) {
598 uint64_t ByteA = Imm & 0xff00000000000000ULL;
599 uint64_t ByteB = Imm & 0x00ff000000000000ULL;
600 uint64_t ByteC = Imm & 0x0000ff0000000000ULL;
601 uint64_t ByteD = Imm & 0x000000ff00000000ULL;
602 uint64_t ByteE = Imm & 0x00000000ff000000ULL;
603 uint64_t ByteF = Imm & 0x0000000000ff0000ULL;
604 uint64_t ByteG = Imm & 0x000000000000ff00ULL;
605 uint64_t ByteH = Imm & 0x00000000000000ffULL;
606
607 return (ByteA == 0ULL || ByteA == 0xff00000000000000ULL) &&
608 (ByteB == 0ULL || ByteB == 0x00ff000000000000ULL) &&
609 (ByteC == 0ULL || ByteC == 0x0000ff0000000000ULL) &&
610 (ByteD == 0ULL || ByteD == 0x000000ff00000000ULL) &&
611 (ByteE == 0ULL || ByteE == 0x00000000ff000000ULL) &&
612 (ByteF == 0ULL || ByteF == 0x0000000000ff0000ULL) &&
613 (ByteG == 0ULL || ByteG == 0x000000000000ff00ULL) &&
614 (ByteH == 0ULL || ByteH == 0x00000000000000ffULL);
615 }
616
encodeAdvSIMDModImmType10(uint64_t Imm)617 static inline uint8_t encodeAdvSIMDModImmType10(uint64_t Imm) {
618 uint8_t BitA = (Imm & 0xff00000000000000ULL) != 0;
619 uint8_t BitB = (Imm & 0x00ff000000000000ULL) != 0;
620 uint8_t BitC = (Imm & 0x0000ff0000000000ULL) != 0;
621 uint8_t BitD = (Imm & 0x000000ff00000000ULL) != 0;
622 uint8_t BitE = (Imm & 0x00000000ff000000ULL) != 0;
623 uint8_t BitF = (Imm & 0x0000000000ff0000ULL) != 0;
624 uint8_t BitG = (Imm & 0x000000000000ff00ULL) != 0;
625 uint8_t BitH = (Imm & 0x00000000000000ffULL) != 0;
626
627 uint8_t EncVal = BitA;
628 EncVal <<= 1;
629 EncVal |= BitB;
630 EncVal <<= 1;
631 EncVal |= BitC;
632 EncVal <<= 1;
633 EncVal |= BitD;
634 EncVal <<= 1;
635 EncVal |= BitE;
636 EncVal <<= 1;
637 EncVal |= BitF;
638 EncVal <<= 1;
639 EncVal |= BitG;
640 EncVal <<= 1;
641 EncVal |= BitH;
642 return EncVal;
643 }
644
decodeAdvSIMDModImmType10(uint8_t Imm)645 static inline uint64_t decodeAdvSIMDModImmType10(uint8_t Imm) {
646 uint64_t EncVal = 0;
647 if (Imm & 0x80) EncVal |= 0xff00000000000000ULL;
648 if (Imm & 0x40) EncVal |= 0x00ff000000000000ULL;
649 if (Imm & 0x20) EncVal |= 0x0000ff0000000000ULL;
650 if (Imm & 0x10) EncVal |= 0x000000ff00000000ULL;
651 if (Imm & 0x08) EncVal |= 0x00000000ff000000ULL;
652 if (Imm & 0x04) EncVal |= 0x0000000000ff0000ULL;
653 if (Imm & 0x02) EncVal |= 0x000000000000ff00ULL;
654 if (Imm & 0x01) EncVal |= 0x00000000000000ffULL;
655 return EncVal;
656 }
657
658 // aBbbbbbc defgh000 0x00 0x00 aBbbbbbc defgh000 0x00 0x00
isAdvSIMDModImmType11(uint64_t Imm)659 static inline bool isAdvSIMDModImmType11(uint64_t Imm) {
660 uint64_t BString = (Imm & 0x7E000000ULL) >> 25;
661 return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
662 (BString == 0x1f || BString == 0x20) &&
663 ((Imm & 0x0007ffff0007ffffULL) == 0);
664 }
665
encodeAdvSIMDModImmType11(uint64_t Imm)666 static inline uint8_t encodeAdvSIMDModImmType11(uint64_t Imm) {
667 uint8_t BitA = (Imm & 0x80000000ULL) != 0;
668 uint8_t BitB = (Imm & 0x20000000ULL) != 0;
669 uint8_t BitC = (Imm & 0x01000000ULL) != 0;
670 uint8_t BitD = (Imm & 0x00800000ULL) != 0;
671 uint8_t BitE = (Imm & 0x00400000ULL) != 0;
672 uint8_t BitF = (Imm & 0x00200000ULL) != 0;
673 uint8_t BitG = (Imm & 0x00100000ULL) != 0;
674 uint8_t BitH = (Imm & 0x00080000ULL) != 0;
675
676 uint8_t EncVal = BitA;
677 EncVal <<= 1;
678 EncVal |= BitB;
679 EncVal <<= 1;
680 EncVal |= BitC;
681 EncVal <<= 1;
682 EncVal |= BitD;
683 EncVal <<= 1;
684 EncVal |= BitE;
685 EncVal <<= 1;
686 EncVal |= BitF;
687 EncVal <<= 1;
688 EncVal |= BitG;
689 EncVal <<= 1;
690 EncVal |= BitH;
691 return EncVal;
692 }
693
decodeAdvSIMDModImmType11(uint8_t Imm)694 static inline uint64_t decodeAdvSIMDModImmType11(uint8_t Imm) {
695 uint64_t EncVal = 0;
696 if (Imm & 0x80) EncVal |= 0x80000000ULL;
697 if (Imm & 0x40) EncVal |= 0x3e000000ULL;
698 else EncVal |= 0x40000000ULL;
699 if (Imm & 0x20) EncVal |= 0x01000000ULL;
700 if (Imm & 0x10) EncVal |= 0x00800000ULL;
701 if (Imm & 0x08) EncVal |= 0x00400000ULL;
702 if (Imm & 0x04) EncVal |= 0x00200000ULL;
703 if (Imm & 0x02) EncVal |= 0x00100000ULL;
704 if (Imm & 0x01) EncVal |= 0x00080000ULL;
705 return (EncVal << 32) | EncVal;
706 }
707
708 // aBbbbbbb bbcdefgh 0x00 0x00 0x00 0x00 0x00 0x00
isAdvSIMDModImmType12(uint64_t Imm)709 static inline bool isAdvSIMDModImmType12(uint64_t Imm) {
710 uint64_t BString = (Imm & 0x7fc0000000000000ULL) >> 54;
711 return ((BString == 0xff || BString == 0x100) &&
712 ((Imm & 0x0000ffffffffffffULL) == 0));
713 }
714
encodeAdvSIMDModImmType12(uint64_t Imm)715 static inline uint8_t encodeAdvSIMDModImmType12(uint64_t Imm) {
716 uint8_t BitA = (Imm & 0x8000000000000000ULL) != 0;
717 uint8_t BitB = (Imm & 0x0040000000000000ULL) != 0;
718 uint8_t BitC = (Imm & 0x0020000000000000ULL) != 0;
719 uint8_t BitD = (Imm & 0x0010000000000000ULL) != 0;
720 uint8_t BitE = (Imm & 0x0008000000000000ULL) != 0;
721 uint8_t BitF = (Imm & 0x0004000000000000ULL) != 0;
722 uint8_t BitG = (Imm & 0x0002000000000000ULL) != 0;
723 uint8_t BitH = (Imm & 0x0001000000000000ULL) != 0;
724
725 uint8_t EncVal = BitA;
726 EncVal <<= 1;
727 EncVal |= BitB;
728 EncVal <<= 1;
729 EncVal |= BitC;
730 EncVal <<= 1;
731 EncVal |= BitD;
732 EncVal <<= 1;
733 EncVal |= BitE;
734 EncVal <<= 1;
735 EncVal |= BitF;
736 EncVal <<= 1;
737 EncVal |= BitG;
738 EncVal <<= 1;
739 EncVal |= BitH;
740 return EncVal;
741 }
742
decodeAdvSIMDModImmType12(uint8_t Imm)743 static inline uint64_t decodeAdvSIMDModImmType12(uint8_t Imm) {
744 uint64_t EncVal = 0;
745 if (Imm & 0x80) EncVal |= 0x8000000000000000ULL;
746 if (Imm & 0x40) EncVal |= 0x3fc0000000000000ULL;
747 else EncVal |= 0x4000000000000000ULL;
748 if (Imm & 0x20) EncVal |= 0x0020000000000000ULL;
749 if (Imm & 0x10) EncVal |= 0x0010000000000000ULL;
750 if (Imm & 0x08) EncVal |= 0x0008000000000000ULL;
751 if (Imm & 0x04) EncVal |= 0x0004000000000000ULL;
752 if (Imm & 0x02) EncVal |= 0x0002000000000000ULL;
753 if (Imm & 0x01) EncVal |= 0x0001000000000000ULL;
754 return (EncVal << 32) | EncVal;
755 }
756
757 /// Returns true if Imm is the concatenation of a repeating pattern of type T.
758 template <typename T>
isSVEMaskOfIdenticalElements(int64_t Imm)759 static inline bool isSVEMaskOfIdenticalElements(int64_t Imm) {
760 union {
761 int64_t Whole;
762 T Parts[sizeof(int64_t)/sizeof(T)];
763 } Vec { Imm };
764
765 return all_of(Vec.Parts, [Vec](T Elem) { return Elem == Vec.Parts[0]; });
766 }
767
768 /// Returns true if Imm is valid for CPY/DUP.
769 template <typename T>
isSVECpyImm(int64_t Imm)770 static inline bool isSVECpyImm(int64_t Imm) {
771 bool IsImm8 = int8_t(Imm) == Imm;
772 bool IsImm16 = int16_t(Imm & ~0xff) == Imm;
773
774 if (std::is_same<int8_t, typename std::make_signed<T>::type>::value)
775 return IsImm8 || uint8_t(Imm) == Imm;
776
777 if (std::is_same<int16_t, typename std::make_signed<T>::type>::value)
778 return IsImm8 || IsImm16 || uint16_t(Imm & ~0xff) == Imm;
779
780 return IsImm8 || IsImm16;
781 }
782
783 /// Returns true if Imm is valid for ADD/SUB.
784 template <typename T>
isSVEAddSubImm(int64_t Imm)785 static inline bool isSVEAddSubImm(int64_t Imm) {
786 bool IsInt8t =
787 std::is_same<int8_t, typename std::make_signed<T>::type>::value;
788 return uint8_t(Imm) == Imm || (!IsInt8t && uint16_t(Imm & ~0xff) == Imm);
789 }
790
791 /// Return true if Imm is valid for DUPM and has no single CPY/DUP equivalent.
isSVEMoveMaskPreferredLogicalImmediate(int64_t Imm)792 static inline bool isSVEMoveMaskPreferredLogicalImmediate(int64_t Imm) {
793 union {
794 int64_t D;
795 int32_t S[2];
796 int16_t H[4];
797 int8_t B[8];
798 } Vec = { Imm };
799
800 if (isSVECpyImm<int64_t>(Vec.D))
801 return false;
802
803 if (isSVEMaskOfIdenticalElements<int32_t>(Imm) &&
804 isSVECpyImm<int32_t>(Vec.S[0]))
805 return false;
806
807 if (isSVEMaskOfIdenticalElements<int16_t>(Imm) &&
808 isSVECpyImm<int16_t>(Vec.H[0]))
809 return false;
810
811 if (isSVEMaskOfIdenticalElements<int8_t>(Imm) &&
812 isSVECpyImm<int8_t>(Vec.B[0]))
813 return false;
814
815 return isLogicalImmediate(Vec.D, 64);
816 }
817
isAnyMOVZMovAlias(uint64_t Value,int RegWidth)818 inline static bool isAnyMOVZMovAlias(uint64_t Value, int RegWidth) {
819 for (int Shift = 0; Shift <= RegWidth - 16; Shift += 16)
820 if ((Value & ~(0xffffULL << Shift)) == 0)
821 return true;
822
823 return false;
824 }
825
isMOVZMovAlias(uint64_t Value,int Shift,int RegWidth)826 inline static bool isMOVZMovAlias(uint64_t Value, int Shift, int RegWidth) {
827 if (RegWidth == 32)
828 Value &= 0xffffffffULL;
829
830 // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
831 if (Value == 0 && Shift != 0)
832 return false;
833
834 return (Value & ~(0xffffULL << Shift)) == 0;
835 }
836
isMOVNMovAlias(uint64_t Value,int Shift,int RegWidth)837 inline static bool isMOVNMovAlias(uint64_t Value, int Shift, int RegWidth) {
838 // MOVZ takes precedence over MOVN.
839 if (isAnyMOVZMovAlias(Value, RegWidth))
840 return false;
841
842 Value = ~Value;
843 if (RegWidth == 32)
844 Value &= 0xffffffffULL;
845
846 return isMOVZMovAlias(Value, Shift, RegWidth);
847 }
848
isAnyMOVWMovAlias(uint64_t Value,int RegWidth)849 inline static bool isAnyMOVWMovAlias(uint64_t Value, int RegWidth) {
850 if (isAnyMOVZMovAlias(Value, RegWidth))
851 return true;
852
853 // It's not a MOVZ, but it might be a MOVN.
854 Value = ~Value;
855 if (RegWidth == 32)
856 Value &= 0xffffffffULL;
857
858 return isAnyMOVZMovAlias(Value, RegWidth);
859 }
860
861 } // end namespace AArch64_AM
862
863 } // end namespace llvm
864
865 #endif
866