1 //===-- ARMAddressingModes.h - ARM Addressing Modes -------------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the ARM addressing mode implementation stuff. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #ifndef LLVM_TARGET_ARM_ARMADDRESSINGMODES_H 15 #define LLVM_TARGET_ARM_ARMADDRESSINGMODES_H 16 17 #include "llvm/ADT/APFloat.h" 18 #include "llvm/ADT/APInt.h" 19 #include "llvm/Support/ErrorHandling.h" 20 #include "llvm/Support/MathExtras.h" 21 #include <cassert> 22 23 namespace llvm { 24 25 /// ARM_AM - ARM Addressing Mode Stuff 26 namespace ARM_AM { 27 enum ShiftOpc { 28 no_shift = 0, 29 asr, 30 lsl, 31 lsr, 32 ror, 33 rrx 34 }; 35 36 enum AddrOpc { 37 sub = 0, 38 add 39 }; 40 getAddrOpcStr(AddrOpc Op)41 static inline const char *getAddrOpcStr(AddrOpc Op) { 42 return Op == sub ? "-" : ""; 43 } 44 getShiftOpcStr(ShiftOpc Op)45 static inline const char *getShiftOpcStr(ShiftOpc Op) { 46 switch (Op) { 47 default: llvm_unreachable("Unknown shift opc!"); 48 case ARM_AM::asr: return "asr"; 49 case ARM_AM::lsl: return "lsl"; 50 case ARM_AM::lsr: return "lsr"; 51 case ARM_AM::ror: return "ror"; 52 case ARM_AM::rrx: return "rrx"; 53 } 54 } 55 getShiftOpcEncoding(ShiftOpc Op)56 static inline unsigned getShiftOpcEncoding(ShiftOpc Op) { 57 switch (Op) { 58 default: llvm_unreachable("Unknown shift opc!"); 59 case ARM_AM::asr: return 2; 60 case ARM_AM::lsl: return 0; 61 case ARM_AM::lsr: return 1; 62 case ARM_AM::ror: return 3; 63 } 64 } 65 66 enum AMSubMode { 67 bad_am_submode = 0, 68 ia, 69 ib, 70 da, 71 db 72 }; 73 getAMSubModeStr(AMSubMode Mode)74 static inline const char *getAMSubModeStr(AMSubMode Mode) { 75 switch (Mode) { 76 default: llvm_unreachable("Unknown addressing sub-mode!"); 77 case ARM_AM::ia: return "ia"; 78 case ARM_AM::ib: return "ib"; 79 case ARM_AM::da: return "da"; 80 case ARM_AM::db: return "db"; 81 } 82 } 83 84 /// rotr32 - Rotate a 32-bit unsigned value right by a specified # bits. 85 /// rotr32(unsigned Val,unsigned Amt)86 static inline unsigned rotr32(unsigned Val, unsigned Amt) { 87 assert(Amt < 32 && "Invalid rotate amount"); 88 return (Val >> Amt) | (Val << ((32-Amt)&31)); 89 } 90 91 /// rotl32 - Rotate a 32-bit unsigned value left by a specified # bits. 92 /// rotl32(unsigned Val,unsigned Amt)93 static inline unsigned rotl32(unsigned Val, unsigned Amt) { 94 assert(Amt < 32 && "Invalid rotate amount"); 95 return (Val << Amt) | (Val >> ((32-Amt)&31)); 96 } 97 98 //===--------------------------------------------------------------------===// 99 // Addressing Mode #1: shift_operand with registers 100 //===--------------------------------------------------------------------===// 101 // 102 // This 'addressing mode' is used for arithmetic instructions. It can 103 // represent things like: 104 // reg 105 // reg [asr|lsl|lsr|ror|rrx] reg 106 // reg [asr|lsl|lsr|ror|rrx] imm 107 // 108 // This is stored three operands [rega, regb, opc]. The first is the base 109 // reg, the second is the shift amount (or reg0 if not present or imm). The 110 // third operand encodes the shift opcode and the imm if a reg isn't present. 111 // getSORegOpc(ShiftOpc ShOp,unsigned Imm)112 static inline unsigned getSORegOpc(ShiftOpc ShOp, unsigned Imm) { 113 return ShOp | (Imm << 3); 114 } getSORegOffset(unsigned Op)115 static inline unsigned getSORegOffset(unsigned Op) { 116 return Op >> 3; 117 } getSORegShOp(unsigned Op)118 static inline ShiftOpc getSORegShOp(unsigned Op) { 119 return (ShiftOpc)(Op & 7); 120 } 121 122 /// getSOImmValImm - Given an encoded imm field for the reg/imm form, return 123 /// the 8-bit imm value. getSOImmValImm(unsigned Imm)124 static inline unsigned getSOImmValImm(unsigned Imm) { 125 return Imm & 0xFF; 126 } 127 /// getSOImmValRot - Given an encoded imm field for the reg/imm form, return 128 /// the rotate amount. getSOImmValRot(unsigned Imm)129 static inline unsigned getSOImmValRot(unsigned Imm) { 130 return (Imm >> 8) * 2; 131 } 132 133 /// getSOImmValRotate - Try to handle Imm with an immediate shifter operand, 134 /// computing the rotate amount to use. If this immediate value cannot be 135 /// handled with a single shifter-op, determine a good rotate amount that will 136 /// take a maximal chunk of bits out of the immediate. getSOImmValRotate(unsigned Imm)137 static inline unsigned getSOImmValRotate(unsigned Imm) { 138 // 8-bit (or less) immediates are trivially shifter_operands with a rotate 139 // of zero. 140 if ((Imm & ~255U) == 0) return 0; 141 142 // Use CTZ to compute the rotate amount. 143 unsigned TZ = countTrailingZeros(Imm); 144 145 // Rotate amount must be even. Something like 0x200 must be rotated 8 bits, 146 // not 9. 147 unsigned RotAmt = TZ & ~1; 148 149 // If we can handle this spread, return it. 150 if ((rotr32(Imm, RotAmt) & ~255U) == 0) 151 return (32-RotAmt)&31; // HW rotates right, not left. 152 153 // For values like 0xF000000F, we should ignore the low 6 bits, then 154 // retry the hunt. 155 if (Imm & 63U) { 156 unsigned TZ2 = countTrailingZeros(Imm & ~63U); 157 unsigned RotAmt2 = TZ2 & ~1; 158 if ((rotr32(Imm, RotAmt2) & ~255U) == 0) 159 return (32-RotAmt2)&31; // HW rotates right, not left. 160 } 161 162 // Otherwise, we have no way to cover this span of bits with a single 163 // shifter_op immediate. Return a chunk of bits that will be useful to 164 // handle. 165 return (32-RotAmt)&31; // HW rotates right, not left. 166 } 167 168 /// getSOImmVal - Given a 32-bit immediate, if it is something that can fit 169 /// into an shifter_operand immediate operand, return the 12-bit encoding for 170 /// it. If not, return -1. getSOImmVal(unsigned Arg)171 static inline int getSOImmVal(unsigned Arg) { 172 // 8-bit (or less) immediates are trivially shifter_operands with a rotate 173 // of zero. 174 if ((Arg & ~255U) == 0) return Arg; 175 176 unsigned RotAmt = getSOImmValRotate(Arg); 177 178 // If this cannot be handled with a single shifter_op, bail out. 179 if (rotr32(~255U, RotAmt) & Arg) 180 return -1; 181 182 // Encode this correctly. 183 return rotl32(Arg, RotAmt) | ((RotAmt>>1) << 8); 184 } 185 186 /// isSOImmTwoPartVal - Return true if the specified value can be obtained by 187 /// or'ing together two SOImmVal's. isSOImmTwoPartVal(unsigned V)188 static inline bool isSOImmTwoPartVal(unsigned V) { 189 // If this can be handled with a single shifter_op, bail out. 190 V = rotr32(~255U, getSOImmValRotate(V)) & V; 191 if (V == 0) 192 return false; 193 194 // If this can be handled with two shifter_op's, accept. 195 V = rotr32(~255U, getSOImmValRotate(V)) & V; 196 return V == 0; 197 } 198 199 /// getSOImmTwoPartFirst - If V is a value that satisfies isSOImmTwoPartVal, 200 /// return the first chunk of it. getSOImmTwoPartFirst(unsigned V)201 static inline unsigned getSOImmTwoPartFirst(unsigned V) { 202 return rotr32(255U, getSOImmValRotate(V)) & V; 203 } 204 205 /// getSOImmTwoPartSecond - If V is a value that satisfies isSOImmTwoPartVal, 206 /// return the second chunk of it. getSOImmTwoPartSecond(unsigned V)207 static inline unsigned getSOImmTwoPartSecond(unsigned V) { 208 // Mask out the first hunk. 209 V = rotr32(~255U, getSOImmValRotate(V)) & V; 210 211 // Take what's left. 212 assert(V == (rotr32(255U, getSOImmValRotate(V)) & V)); 213 return V; 214 } 215 216 /// getThumbImmValShift - Try to handle Imm with a 8-bit immediate followed 217 /// by a left shift. Returns the shift amount to use. getThumbImmValShift(unsigned Imm)218 static inline unsigned getThumbImmValShift(unsigned Imm) { 219 // 8-bit (or less) immediates are trivially immediate operand with a shift 220 // of zero. 221 if ((Imm & ~255U) == 0) return 0; 222 223 // Use CTZ to compute the shift amount. 224 return countTrailingZeros(Imm); 225 } 226 227 /// isThumbImmShiftedVal - Return true if the specified value can be obtained 228 /// by left shifting a 8-bit immediate. isThumbImmShiftedVal(unsigned V)229 static inline bool isThumbImmShiftedVal(unsigned V) { 230 // If this can be handled with 231 V = (~255U << getThumbImmValShift(V)) & V; 232 return V == 0; 233 } 234 235 /// getThumbImm16ValShift - Try to handle Imm with a 16-bit immediate followed 236 /// by a left shift. Returns the shift amount to use. getThumbImm16ValShift(unsigned Imm)237 static inline unsigned getThumbImm16ValShift(unsigned Imm) { 238 // 16-bit (or less) immediates are trivially immediate operand with a shift 239 // of zero. 240 if ((Imm & ~65535U) == 0) return 0; 241 242 // Use CTZ to compute the shift amount. 243 return countTrailingZeros(Imm); 244 } 245 246 /// isThumbImm16ShiftedVal - Return true if the specified value can be 247 /// obtained by left shifting a 16-bit immediate. isThumbImm16ShiftedVal(unsigned V)248 static inline bool isThumbImm16ShiftedVal(unsigned V) { 249 // If this can be handled with 250 V = (~65535U << getThumbImm16ValShift(V)) & V; 251 return V == 0; 252 } 253 254 /// getThumbImmNonShiftedVal - If V is a value that satisfies 255 /// isThumbImmShiftedVal, return the non-shiftd value. getThumbImmNonShiftedVal(unsigned V)256 static inline unsigned getThumbImmNonShiftedVal(unsigned V) { 257 return V >> getThumbImmValShift(V); 258 } 259 260 261 /// getT2SOImmValSplat - Return the 12-bit encoded representation 262 /// if the specified value can be obtained by splatting the low 8 bits 263 /// into every other byte or every byte of a 32-bit value. i.e., 264 /// 00000000 00000000 00000000 abcdefgh control = 0 265 /// 00000000 abcdefgh 00000000 abcdefgh control = 1 266 /// abcdefgh 00000000 abcdefgh 00000000 control = 2 267 /// abcdefgh abcdefgh abcdefgh abcdefgh control = 3 268 /// Return -1 if none of the above apply. 269 /// See ARM Reference Manual A6.3.2. getT2SOImmValSplatVal(unsigned V)270 static inline int getT2SOImmValSplatVal(unsigned V) { 271 unsigned u, Vs, Imm; 272 // control = 0 273 if ((V & 0xffffff00) == 0) 274 return V; 275 276 // If the value is zeroes in the first byte, just shift those off 277 Vs = ((V & 0xff) == 0) ? V >> 8 : V; 278 // Any passing value only has 8 bits of payload, splatted across the word 279 Imm = Vs & 0xff; 280 // Likewise, any passing values have the payload splatted into the 3rd byte 281 u = Imm | (Imm << 16); 282 283 // control = 1 or 2 284 if (Vs == u) 285 return (((Vs == V) ? 1 : 2) << 8) | Imm; 286 287 // control = 3 288 if (Vs == (u | (u << 8))) 289 return (3 << 8) | Imm; 290 291 return -1; 292 } 293 294 /// getT2SOImmValRotateVal - Return the 12-bit encoded representation if the 295 /// specified value is a rotated 8-bit value. Return -1 if no rotation 296 /// encoding is possible. 297 /// See ARM Reference Manual A6.3.2. getT2SOImmValRotateVal(unsigned V)298 static inline int getT2SOImmValRotateVal(unsigned V) { 299 unsigned RotAmt = countLeadingZeros(V); 300 if (RotAmt >= 24) 301 return -1; 302 303 // If 'Arg' can be handled with a single shifter_op return the value. 304 if ((rotr32(0xff000000U, RotAmt) & V) == V) 305 return (rotr32(V, 24 - RotAmt) & 0x7f) | ((RotAmt + 8) << 7); 306 307 return -1; 308 } 309 310 /// getT2SOImmVal - Given a 32-bit immediate, if it is something that can fit 311 /// into a Thumb-2 shifter_operand immediate operand, return the 12-bit 312 /// encoding for it. If not, return -1. 313 /// See ARM Reference Manual A6.3.2. getT2SOImmVal(unsigned Arg)314 static inline int getT2SOImmVal(unsigned Arg) { 315 // If 'Arg' is an 8-bit splat, then get the encoded value. 316 int Splat = getT2SOImmValSplatVal(Arg); 317 if (Splat != -1) 318 return Splat; 319 320 // If 'Arg' can be handled with a single shifter_op return the value. 321 int Rot = getT2SOImmValRotateVal(Arg); 322 if (Rot != -1) 323 return Rot; 324 325 return -1; 326 } 327 getT2SOImmValRotate(unsigned V)328 static inline unsigned getT2SOImmValRotate(unsigned V) { 329 if ((V & ~255U) == 0) return 0; 330 // Use CTZ to compute the rotate amount. 331 unsigned RotAmt = countTrailingZeros(V); 332 return (32 - RotAmt) & 31; 333 } 334 isT2SOImmTwoPartVal(unsigned Imm)335 static inline bool isT2SOImmTwoPartVal (unsigned Imm) { 336 unsigned V = Imm; 337 // Passing values can be any combination of splat values and shifter 338 // values. If this can be handled with a single shifter or splat, bail 339 // out. Those should be handled directly, not with a two-part val. 340 if (getT2SOImmValSplatVal(V) != -1) 341 return false; 342 V = rotr32 (~255U, getT2SOImmValRotate(V)) & V; 343 if (V == 0) 344 return false; 345 346 // If this can be handled as an immediate, accept. 347 if (getT2SOImmVal(V) != -1) return true; 348 349 // Likewise, try masking out a splat value first. 350 V = Imm; 351 if (getT2SOImmValSplatVal(V & 0xff00ff00U) != -1) 352 V &= ~0xff00ff00U; 353 else if (getT2SOImmValSplatVal(V & 0x00ff00ffU) != -1) 354 V &= ~0x00ff00ffU; 355 // If what's left can be handled as an immediate, accept. 356 if (getT2SOImmVal(V) != -1) return true; 357 358 // Otherwise, do not accept. 359 return false; 360 } 361 getT2SOImmTwoPartFirst(unsigned Imm)362 static inline unsigned getT2SOImmTwoPartFirst(unsigned Imm) { 363 assert (isT2SOImmTwoPartVal(Imm) && 364 "Immedate cannot be encoded as two part immediate!"); 365 // Try a shifter operand as one part 366 unsigned V = rotr32 (~255, getT2SOImmValRotate(Imm)) & Imm; 367 // If the rest is encodable as an immediate, then return it. 368 if (getT2SOImmVal(V) != -1) return V; 369 370 // Try masking out a splat value first. 371 if (getT2SOImmValSplatVal(Imm & 0xff00ff00U) != -1) 372 return Imm & 0xff00ff00U; 373 374 // The other splat is all that's left as an option. 375 assert (getT2SOImmValSplatVal(Imm & 0x00ff00ffU) != -1); 376 return Imm & 0x00ff00ffU; 377 } 378 getT2SOImmTwoPartSecond(unsigned Imm)379 static inline unsigned getT2SOImmTwoPartSecond(unsigned Imm) { 380 // Mask out the first hunk 381 Imm ^= getT2SOImmTwoPartFirst(Imm); 382 // Return what's left 383 assert (getT2SOImmVal(Imm) != -1 && 384 "Unable to encode second part of T2 two part SO immediate"); 385 return Imm; 386 } 387 388 389 //===--------------------------------------------------------------------===// 390 // Addressing Mode #2 391 //===--------------------------------------------------------------------===// 392 // 393 // This is used for most simple load/store instructions. 394 // 395 // addrmode2 := reg +/- reg shop imm 396 // addrmode2 := reg +/- imm12 397 // 398 // The first operand is always a Reg. The second operand is a reg if in 399 // reg/reg form, otherwise it's reg#0. The third field encodes the operation 400 // in bit 12, the immediate in bits 0-11, and the shift op in 13-15. The 401 // fourth operand 16-17 encodes the index mode. 402 // 403 // If this addressing mode is a frame index (before prolog/epilog insertion 404 // and code rewriting), this operand will have the form: FI#, reg0, <offs> 405 // with no shift amount for the frame offset. 406 // 407 static inline unsigned getAM2Opc(AddrOpc Opc, unsigned Imm12, ShiftOpc SO, 408 unsigned IdxMode = 0) { 409 assert(Imm12 < (1 << 12) && "Imm too large!"); 410 bool isSub = Opc == sub; 411 return Imm12 | ((int)isSub << 12) | (SO << 13) | (IdxMode << 16) ; 412 } getAM2Offset(unsigned AM2Opc)413 static inline unsigned getAM2Offset(unsigned AM2Opc) { 414 return AM2Opc & ((1 << 12)-1); 415 } getAM2Op(unsigned AM2Opc)416 static inline AddrOpc getAM2Op(unsigned AM2Opc) { 417 return ((AM2Opc >> 12) & 1) ? sub : add; 418 } getAM2ShiftOpc(unsigned AM2Opc)419 static inline ShiftOpc getAM2ShiftOpc(unsigned AM2Opc) { 420 return (ShiftOpc)((AM2Opc >> 13) & 7); 421 } getAM2IdxMode(unsigned AM2Opc)422 static inline unsigned getAM2IdxMode(unsigned AM2Opc) { 423 return (AM2Opc >> 16); 424 } 425 426 427 //===--------------------------------------------------------------------===// 428 // Addressing Mode #3 429 //===--------------------------------------------------------------------===// 430 // 431 // This is used for sign-extending loads, and load/store-pair instructions. 432 // 433 // addrmode3 := reg +/- reg 434 // addrmode3 := reg +/- imm8 435 // 436 // The first operand is always a Reg. The second operand is a reg if in 437 // reg/reg form, otherwise it's reg#0. The third field encodes the operation 438 // in bit 8, the immediate in bits 0-7. The fourth operand 9-10 encodes the 439 // index mode. 440 441 /// getAM3Opc - This function encodes the addrmode3 opc field. 442 static inline unsigned getAM3Opc(AddrOpc Opc, unsigned char Offset, 443 unsigned IdxMode = 0) { 444 bool isSub = Opc == sub; 445 return ((int)isSub << 8) | Offset | (IdxMode << 9); 446 } getAM3Offset(unsigned AM3Opc)447 static inline unsigned char getAM3Offset(unsigned AM3Opc) { 448 return AM3Opc & 0xFF; 449 } getAM3Op(unsigned AM3Opc)450 static inline AddrOpc getAM3Op(unsigned AM3Opc) { 451 return ((AM3Opc >> 8) & 1) ? sub : add; 452 } getAM3IdxMode(unsigned AM3Opc)453 static inline unsigned getAM3IdxMode(unsigned AM3Opc) { 454 return (AM3Opc >> 9); 455 } 456 457 //===--------------------------------------------------------------------===// 458 // Addressing Mode #4 459 //===--------------------------------------------------------------------===// 460 // 461 // This is used for load / store multiple instructions. 462 // 463 // addrmode4 := reg, <mode> 464 // 465 // The four modes are: 466 // IA - Increment after 467 // IB - Increment before 468 // DA - Decrement after 469 // DB - Decrement before 470 // For VFP instructions, only the IA and DB modes are valid. 471 getAM4SubMode(unsigned Mode)472 static inline AMSubMode getAM4SubMode(unsigned Mode) { 473 return (AMSubMode)(Mode & 0x7); 474 } 475 getAM4ModeImm(AMSubMode SubMode)476 static inline unsigned getAM4ModeImm(AMSubMode SubMode) { 477 return (int)SubMode; 478 } 479 480 //===--------------------------------------------------------------------===// 481 // Addressing Mode #5 482 //===--------------------------------------------------------------------===// 483 // 484 // This is used for coprocessor instructions, such as FP load/stores. 485 // 486 // addrmode5 := reg +/- imm8*4 487 // 488 // The first operand is always a Reg. The second operand encodes the 489 // operation in bit 8 and the immediate in bits 0-7. 490 491 /// getAM5Opc - This function encodes the addrmode5 opc field. getAM5Opc(AddrOpc Opc,unsigned char Offset)492 static inline unsigned getAM5Opc(AddrOpc Opc, unsigned char Offset) { 493 bool isSub = Opc == sub; 494 return ((int)isSub << 8) | Offset; 495 } getAM5Offset(unsigned AM5Opc)496 static inline unsigned char getAM5Offset(unsigned AM5Opc) { 497 return AM5Opc & 0xFF; 498 } getAM5Op(unsigned AM5Opc)499 static inline AddrOpc getAM5Op(unsigned AM5Opc) { 500 return ((AM5Opc >> 8) & 1) ? sub : add; 501 } 502 503 //===--------------------------------------------------------------------===// 504 // Addressing Mode #6 505 //===--------------------------------------------------------------------===// 506 // 507 // This is used for NEON load / store instructions. 508 // 509 // addrmode6 := reg with optional alignment 510 // 511 // This is stored in two operands [regaddr, align]. The first is the 512 // address register. The second operand is the value of the alignment 513 // specifier in bytes or zero if no explicit alignment. 514 // Valid alignments depend on the specific instruction. 515 516 //===--------------------------------------------------------------------===// 517 // NEON Modified Immediates 518 //===--------------------------------------------------------------------===// 519 // 520 // Several NEON instructions (e.g., VMOV) take a "modified immediate" 521 // vector operand, where a small immediate encoded in the instruction 522 // specifies a full NEON vector value. These modified immediates are 523 // represented here as encoded integers. The low 8 bits hold the immediate 524 // value; bit 12 holds the "Op" field of the instruction, and bits 11-8 hold 525 // the "Cmode" field of the instruction. The interfaces below treat the 526 // Op and Cmode values as a single 5-bit value. 527 createNEONModImm(unsigned OpCmode,unsigned Val)528 static inline unsigned createNEONModImm(unsigned OpCmode, unsigned Val) { 529 return (OpCmode << 8) | Val; 530 } getNEONModImmOpCmode(unsigned ModImm)531 static inline unsigned getNEONModImmOpCmode(unsigned ModImm) { 532 return (ModImm >> 8) & 0x1f; 533 } getNEONModImmVal(unsigned ModImm)534 static inline unsigned getNEONModImmVal(unsigned ModImm) { 535 return ModImm & 0xff; 536 } 537 538 /// decodeNEONModImm - Decode a NEON modified immediate value into the 539 /// element value and the element size in bits. (If the element size is 540 /// smaller than the vector, it is splatted into all the elements.) decodeNEONModImm(unsigned ModImm,unsigned & EltBits)541 static inline uint64_t decodeNEONModImm(unsigned ModImm, unsigned &EltBits) { 542 unsigned OpCmode = getNEONModImmOpCmode(ModImm); 543 unsigned Imm8 = getNEONModImmVal(ModImm); 544 uint64_t Val = 0; 545 546 if (OpCmode == 0xe) { 547 // 8-bit vector elements 548 Val = Imm8; 549 EltBits = 8; 550 } else if ((OpCmode & 0xc) == 0x8) { 551 // 16-bit vector elements 552 unsigned ByteNum = (OpCmode & 0x6) >> 1; 553 Val = Imm8 << (8 * ByteNum); 554 EltBits = 16; 555 } else if ((OpCmode & 0x8) == 0) { 556 // 32-bit vector elements, zero with one byte set 557 unsigned ByteNum = (OpCmode & 0x6) >> 1; 558 Val = Imm8 << (8 * ByteNum); 559 EltBits = 32; 560 } else if ((OpCmode & 0xe) == 0xc) { 561 // 32-bit vector elements, one byte with low bits set 562 unsigned ByteNum = 1 + (OpCmode & 0x1); 563 Val = (Imm8 << (8 * ByteNum)) | (0xffff >> (8 * (2 - ByteNum))); 564 EltBits = 32; 565 } else if (OpCmode == 0x1e) { 566 // 64-bit vector elements 567 for (unsigned ByteNum = 0; ByteNum < 8; ++ByteNum) { 568 if ((ModImm >> ByteNum) & 1) 569 Val |= (uint64_t)0xff << (8 * ByteNum); 570 } 571 EltBits = 64; 572 } else { 573 llvm_unreachable("Unsupported NEON immediate"); 574 } 575 return Val; 576 } 577 578 AMSubMode getLoadStoreMultipleSubMode(int Opcode); 579 580 //===--------------------------------------------------------------------===// 581 // Floating-point Immediates 582 // getFPImmFloat(unsigned Imm)583 static inline float getFPImmFloat(unsigned Imm) { 584 // We expect an 8-bit binary encoding of a floating-point number here. 585 union { 586 uint32_t I; 587 float F; 588 } FPUnion; 589 590 uint8_t Sign = (Imm >> 7) & 0x1; 591 uint8_t Exp = (Imm >> 4) & 0x7; 592 uint8_t Mantissa = Imm & 0xf; 593 594 // 8-bit FP iEEEE Float Encoding 595 // abcd efgh aBbbbbbc defgh000 00000000 00000000 596 // 597 // where B = NOT(b); 598 599 FPUnion.I = 0; 600 FPUnion.I |= Sign << 31; 601 FPUnion.I |= ((Exp & 0x4) != 0 ? 0 : 1) << 30; 602 FPUnion.I |= ((Exp & 0x4) != 0 ? 0x1f : 0) << 25; 603 FPUnion.I |= (Exp & 0x3) << 23; 604 FPUnion.I |= Mantissa << 19; 605 return FPUnion.F; 606 } 607 608 /// getFP32Imm - Return an 8-bit floating-point version of the 32-bit 609 /// floating-point value. If the value cannot be represented as an 8-bit 610 /// floating-point value, then return -1. getFP32Imm(const APInt & Imm)611 static inline int getFP32Imm(const APInt &Imm) { 612 uint32_t Sign = Imm.lshr(31).getZExtValue() & 1; 613 int32_t Exp = (Imm.lshr(23).getSExtValue() & 0xff) - 127; // -126 to 127 614 int64_t Mantissa = Imm.getZExtValue() & 0x7fffff; // 23 bits 615 616 // We can handle 4 bits of mantissa. 617 // mantissa = (16+UInt(e:f:g:h))/16. 618 if (Mantissa & 0x7ffff) 619 return -1; 620 Mantissa >>= 19; 621 if ((Mantissa & 0xf) != Mantissa) 622 return -1; 623 624 // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3 625 if (Exp < -3 || Exp > 4) 626 return -1; 627 Exp = ((Exp+3) & 0x7) ^ 4; 628 629 return ((int)Sign << 7) | (Exp << 4) | Mantissa; 630 } 631 getFP32Imm(const APFloat & FPImm)632 static inline int getFP32Imm(const APFloat &FPImm) { 633 return getFP32Imm(FPImm.bitcastToAPInt()); 634 } 635 636 /// getFP64Imm - Return an 8-bit floating-point version of the 64-bit 637 /// floating-point value. If the value cannot be represented as an 8-bit 638 /// floating-point value, then return -1. getFP64Imm(const APInt & Imm)639 static inline int getFP64Imm(const APInt &Imm) { 640 uint64_t Sign = Imm.lshr(63).getZExtValue() & 1; 641 int64_t Exp = (Imm.lshr(52).getSExtValue() & 0x7ff) - 1023; // -1022 to 1023 642 uint64_t Mantissa = Imm.getZExtValue() & 0xfffffffffffffULL; 643 644 // We can handle 4 bits of mantissa. 645 // mantissa = (16+UInt(e:f:g:h))/16. 646 if (Mantissa & 0xffffffffffffULL) 647 return -1; 648 Mantissa >>= 48; 649 if ((Mantissa & 0xf) != Mantissa) 650 return -1; 651 652 // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3 653 if (Exp < -3 || Exp > 4) 654 return -1; 655 Exp = ((Exp+3) & 0x7) ^ 4; 656 657 return ((int)Sign << 7) | (Exp << 4) | Mantissa; 658 } 659 getFP64Imm(const APFloat & FPImm)660 static inline int getFP64Imm(const APFloat &FPImm) { 661 return getFP64Imm(FPImm.bitcastToAPInt()); 662 } 663 664 } // end namespace ARM_AM 665 } // end namespace llvm 666 667 #endif 668 669