1 //===- subzero/src/IceAssemblerARM32.cpp - Assembler for ARM32 --*- C++ -*-===//
2 //
3 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
4 // for details. All rights reserved. Use of this source code is governed by a
5 // BSD-style license that can be found in the LICENSE file.
6 //
7 // Modified by the Subzero authors.
8 //
9 //===----------------------------------------------------------------------===//
10 //
11 // The Subzero Code Generator
12 //
13 // This file is distributed under the University of Illinois Open Source
14 // License. See LICENSE.TXT for details.
15 //
16 //===----------------------------------------------------------------------===//
17 ///
18 /// \file
19 /// \brief Implements the Assembler class for ARM32.
20 ///
21 //===----------------------------------------------------------------------===//
22
23 #include "IceAssemblerARM32.h"
24 #include "IceCfgNode.h"
25 #include "IceUtils.h"
26
27 namespace {
28
29 using namespace Ice;
30 using namespace Ice::ARM32;
31
32 using WordType = uint32_t;
33 static constexpr IValueT kWordSize = sizeof(WordType);
34
35 // The following define individual bits.
36 static constexpr IValueT B0 = 1;
37 static constexpr IValueT B1 = 1 << 1;
38 static constexpr IValueT B2 = 1 << 2;
39 static constexpr IValueT B3 = 1 << 3;
40 static constexpr IValueT B4 = 1 << 4;
41 static constexpr IValueT B5 = 1 << 5;
42 static constexpr IValueT B6 = 1 << 6;
43 static constexpr IValueT B7 = 1 << 7;
44 static constexpr IValueT B8 = 1 << 8;
45 static constexpr IValueT B9 = 1 << 9;
46 static constexpr IValueT B10 = 1 << 10;
47 static constexpr IValueT B11 = 1 << 11;
48 static constexpr IValueT B12 = 1 << 12;
49 static constexpr IValueT B13 = 1 << 13;
50 static constexpr IValueT B14 = 1 << 14;
51 static constexpr IValueT B15 = 1 << 15;
52 static constexpr IValueT B16 = 1 << 16;
53 static constexpr IValueT B17 = 1 << 17;
54 static constexpr IValueT B18 = 1 << 18;
55 static constexpr IValueT B19 = 1 << 19;
56 static constexpr IValueT B20 = 1 << 20;
57 static constexpr IValueT B21 = 1 << 21;
58 static constexpr IValueT B22 = 1 << 22;
59 static constexpr IValueT B23 = 1 << 23;
60 static constexpr IValueT B24 = 1 << 24;
61 static constexpr IValueT B25 = 1 << 25;
62 static constexpr IValueT B26 = 1 << 26;
63 static constexpr IValueT B27 = 1 << 27;
64
65 // Constants used for the decoding or encoding of the individual fields of
66 // instructions. Based on ARM section A5.1.
67 static constexpr IValueT L = 1 << 20; // load (or store)
68 static constexpr IValueT W = 1 << 21; // writeback base register
69 // (or leave unchanged)
70 static constexpr IValueT B = 1 << 22; // unsigned byte (or word)
71 static constexpr IValueT U = 1 << 23; // positive (or negative)
72 // offset/index
73 static constexpr IValueT P = 1 << 24; // offset/pre-indexed
74 // addressing (or
75 // post-indexed addressing)
76
77 static constexpr IValueT kConditionShift = 28;
78 static constexpr IValueT kLinkShift = 24;
79 static constexpr IValueT kOpcodeShift = 21;
80 static constexpr IValueT kRdShift = 12;
81 static constexpr IValueT kRmShift = 0;
82 static constexpr IValueT kRnShift = 16;
83 static constexpr IValueT kRsShift = 8;
84 static constexpr IValueT kSShift = 20;
85 static constexpr IValueT kTypeShift = 25;
86
87 // Immediate instruction fields encoding.
88 static constexpr IValueT kImmed8Bits = 8;
89 static constexpr IValueT kImmed8Shift = 0;
90 static constexpr IValueT kRotateBits = 4;
91 static constexpr IValueT kRotateShift = 8;
92
93 // Shift instruction register fields encodings.
94 static constexpr IValueT kShiftImmShift = 7;
95 static constexpr IValueT kShiftImmBits = 5;
96 static constexpr IValueT kShiftShift = 5;
97 static constexpr IValueT kImmed12Bits = 12;
98 static constexpr IValueT kImm12Shift = 0;
99
100 // Rotation instructions (uxtb etc.).
101 static constexpr IValueT kRotationShift = 10;
102
103 // MemEx instructions.
104 static constexpr IValueT kMemExOpcodeShift = 20;
105
106 // Div instruction register field encodings.
107 static constexpr IValueT kDivRdShift = 16;
108 static constexpr IValueT kDivRmShift = 8;
109 static constexpr IValueT kDivRnShift = 0;
110
111 // Type of instruction encoding (bits 25-27). See ARM section A5.1
112 static constexpr IValueT kInstTypeDataRegister = 0; // i.e. 000
113 static constexpr IValueT kInstTypeDataRegShift = 0; // i.e. 000
114 static constexpr IValueT kInstTypeDataImmediate = 1; // i.e. 001
115 static constexpr IValueT kInstTypeMemImmediate = 2; // i.e. 010
116 static constexpr IValueT kInstTypeRegisterShift = 3; // i.e. 011
117
118 // Limit on number of registers in a vpush/vpop.
119 static constexpr SizeT VpushVpopMaxConsecRegs = 16;
120
121 // Offset modifier to current PC for next instruction. The offset is off by 8
122 // due to the way the ARM CPUs read PC.
123 static constexpr IOffsetT kPCReadOffset = 8;
124
125 // Mask to pull out PC offset from branch (b) instruction.
126 static constexpr int kBranchOffsetBits = 24;
127 static constexpr IOffsetT kBranchOffsetMask = 0x00ffffff;
128
encodeBool(bool B)129 IValueT encodeBool(bool B) { return B ? 1 : 0; }
130
encodeRotation(ARM32::AssemblerARM32::RotationValue Value)131 IValueT encodeRotation(ARM32::AssemblerARM32::RotationValue Value) {
132 return static_cast<IValueT>(Value);
133 }
134
encodeGPRRegister(RegARM32::GPRRegister Rn)135 IValueT encodeGPRRegister(RegARM32::GPRRegister Rn) {
136 return static_cast<IValueT>(Rn);
137 }
138
decodeGPRRegister(IValueT R)139 RegARM32::GPRRegister decodeGPRRegister(IValueT R) {
140 return static_cast<RegARM32::GPRRegister>(R);
141 }
142
encodeCondition(CondARM32::Cond Cond)143 IValueT encodeCondition(CondARM32::Cond Cond) {
144 return static_cast<IValueT>(Cond);
145 }
146
encodeShift(OperandARM32::ShiftKind Shift)147 IValueT encodeShift(OperandARM32::ShiftKind Shift) {
148 // Follows encoding in ARM section A8.4.1 "Constant shifts".
149 switch (Shift) {
150 case OperandARM32::kNoShift:
151 case OperandARM32::LSL:
152 return 0; // 0b00
153 case OperandARM32::LSR:
154 return 1; // 0b01
155 case OperandARM32::ASR:
156 return 2; // 0b10
157 case OperandARM32::ROR:
158 case OperandARM32::RRX:
159 return 3; // 0b11
160 }
161 llvm::report_fatal_error("Unknown Shift value");
162 return 0;
163 }
164
165 // Returns the bits in the corresponding masked value.
mask(IValueT Value,IValueT Shift,IValueT Bits)166 IValueT mask(IValueT Value, IValueT Shift, IValueT Bits) {
167 return (Value >> Shift) & ((1 << Bits) - 1);
168 }
169
170 // Extract out a Bit in Value.
isBitSet(IValueT Bit,IValueT Value)171 bool isBitSet(IValueT Bit, IValueT Value) { return (Value & Bit) == Bit; }
172
173 // Returns the GPR register at given Shift in Value.
getGPRReg(IValueT Shift,IValueT Value)174 RegARM32::GPRRegister getGPRReg(IValueT Shift, IValueT Value) {
175 return decodeGPRRegister((Value >> Shift) & 0xF);
176 }
177
getEncodedGPRegNum(const Variable * Var)178 IValueT getEncodedGPRegNum(const Variable *Var) {
179 assert(Var->hasReg());
180 const auto Reg = Var->getRegNum();
181 return llvm::isa<Variable64On32>(Var) ? RegARM32::getI64PairFirstGPRNum(Reg)
182 : RegARM32::getEncodedGPR(Reg);
183 }
184
getEncodedSRegNum(const Variable * Var)185 IValueT getEncodedSRegNum(const Variable *Var) {
186 assert(Var->hasReg());
187 return RegARM32::getEncodedSReg(Var->getRegNum());
188 }
189
getEncodedDRegNum(const Variable * Var)190 IValueT getEncodedDRegNum(const Variable *Var) {
191 return RegARM32::getEncodedDReg(Var->getRegNum());
192 }
193
getEncodedQRegNum(const Variable * Var)194 IValueT getEncodedQRegNum(const Variable *Var) {
195 return RegARM32::getEncodedQReg(Var->getRegNum());
196 }
197
mapQRegToDReg(IValueT EncodedQReg)198 IValueT mapQRegToDReg(IValueT EncodedQReg) {
199 IValueT DReg = EncodedQReg << 1;
200 assert(DReg < RegARM32::getNumDRegs());
201 return DReg;
202 }
203
mapQRegToSReg(IValueT EncodedQReg)204 IValueT mapQRegToSReg(IValueT EncodedQReg) {
205 IValueT SReg = EncodedQReg << 2;
206 assert(SReg < RegARM32::getNumSRegs());
207 return SReg;
208 }
209
getYInRegXXXXY(IValueT RegXXXXY)210 IValueT getYInRegXXXXY(IValueT RegXXXXY) { return RegXXXXY & 0x1; }
211
getXXXXInRegXXXXY(IValueT RegXXXXY)212 IValueT getXXXXInRegXXXXY(IValueT RegXXXXY) { return RegXXXXY >> 1; }
213
getYInRegYXXXX(IValueT RegYXXXX)214 IValueT getYInRegYXXXX(IValueT RegYXXXX) { return RegYXXXX >> 4; }
215
getXXXXInRegYXXXX(IValueT RegYXXXX)216 IValueT getXXXXInRegYXXXX(IValueT RegYXXXX) { return RegYXXXX & 0x0f; }
217
218 // Figures out Op/Cmode values for given Value. Returns true if able to encode.
encodeAdvSIMDExpandImm(IValueT Value,Type ElmtTy,IValueT & Op,IValueT & Cmode,IValueT & Imm8)219 bool encodeAdvSIMDExpandImm(IValueT Value, Type ElmtTy, IValueT &Op,
220 IValueT &Cmode, IValueT &Imm8) {
221 // TODO(kschimpf): Handle other shifted 8-bit values.
222 constexpr IValueT Imm8Mask = 0xFF;
223 if ((Value & IValueT(~Imm8Mask)) != 0)
224 return false;
225 Imm8 = Value;
226 switch (ElmtTy) {
227 case IceType_i8:
228 Op = 0;
229 Cmode = 14; // 0b1110
230 return true;
231 case IceType_i16:
232 Op = 0;
233 Cmode = 8; // 0b1000
234 return true;
235 case IceType_i32:
236 Op = 0;
237 Cmode = 0; // 0b0000
238 return true;
239 default:
240 return false;
241 }
242 }
243
244 // Defines layouts of an operand representing a (register) memory address,
245 // possibly modified by an immediate value.
246 enum EncodedImmAddress {
247 // Address modified by a rotated immediate 8-bit value.
248 RotatedImm8Address,
249
250 // Alternate encoding for RotatedImm8Address, where the offset is divided by 4
251 // before encoding.
252 RotatedImm8Div4Address,
253
254 // Address modified by an immediate 12-bit value.
255 Imm12Address,
256
257 // Alternate encoding 3, for an address modified by a rotated immediate 8-bit
258 // value.
259 RotatedImm8Enc3Address,
260
261 // Encoding where no immediate offset is used.
262 NoImmOffsetAddress
263 };
264
265 // The way an operand is encoded into a sequence of bits in functions
266 // encodeOperand and encodeAddress below.
267 enum EncodedOperand {
268 // Unable to encode, value left undefined.
269 CantEncode = 0,
270
271 // Value is register found.
272 EncodedAsRegister,
273
274 // Value=rrrriiiiiiii where rrrr is the rotation, and iiiiiiii is the imm8
275 // value.
276 EncodedAsRotatedImm8,
277
278 // EncodedAsImmRegOffset is a memory operand that can take three forms, based
279 // on type EncodedImmAddress:
280 //
281 // ***** RotatedImm8Address *****
282 //
283 // Value=0000000pu0w0nnnn0000iiiiiiiiiiii where nnnn is the base register Rn,
284 // p=1 if pre-indexed addressing, u=1 if offset positive, w=1 if writeback to
285 // Rn should be used, and iiiiiiiiiiii defines the rotated Imm8 value.
286 //
287 // ***** RotatedImm8Div4Address *****
288 //
289 // Value=00000000pu0w0nnnn0000iiii0000jjjj where nnnn=Rn, iiiijjjj=Imm8, p=1
290 // if pre-indexed addressing, u=1 if offset positive, and w=1 if writeback to
291 // Rn.
292 //
293 // ***** Imm12Address *****
294 //
295 // Value=0000000pu0w0nnnn0000iiiiiiiiiiii where nnnn is the base register Rn,
296 // p=1 if pre-indexed addressing, u=1 if offset positive, w=1 if writeback to
297 // Rn should be used, and iiiiiiiiiiii defines the immediate 12-bit value.
298 //
299 // ***** NoImmOffsetAddress *****
300 //
301 // Value=000000001000nnnn0000000000000000 where nnnn=Rn.
302 EncodedAsImmRegOffset,
303
304 // Value=0000000pu0w00nnnnttttiiiiiss0mmmm where nnnn is the base register Rn,
305 // mmmm is the index register Rm, iiiii is the shift amount, ss is the shift
306 // kind, p=1 if pre-indexed addressing, u=1 if offset positive, and w=1 if
307 // writeback to Rn.
308 EncodedAsShiftRotateImm5,
309
310 // Value=000000000000000000000iiiii0000000 where iiii defines the Imm5 value
311 // to shift.
312 EncodedAsShiftImm5,
313
314 // Value=iiiiiss0mmmm where mmmm is the register to rotate, ss is the shift
315 // kind, and iiiii is the shift amount.
316 EncodedAsShiftedRegister,
317
318 // Value=ssss0tt1mmmm where mmmm=Rm, tt is an encoded ShiftKind, and ssss=Rms.
319 EncodedAsRegShiftReg,
320
321 // Value is 32bit integer constant.
322 EncodedAsConstI32
323 };
324
325 // Sets Encoding to a rotated Imm8 encoding of Value, if possible.
encodeRotatedImm8(IValueT RotateAmt,IValueT Immed8)326 IValueT encodeRotatedImm8(IValueT RotateAmt, IValueT Immed8) {
327 assert(RotateAmt < (1 << kRotateBits));
328 assert(Immed8 < (1 << kImmed8Bits));
329 return (RotateAmt << kRotateShift) | (Immed8 << kImmed8Shift);
330 }
331
332 // Encodes iiiiitt0mmmm for data-processing (2nd) operands where iiiii=Imm5,
333 // tt=Shift, and mmmm=Rm.
encodeShiftRotateImm5(IValueT Rm,OperandARM32::ShiftKind Shift,IOffsetT imm5)334 IValueT encodeShiftRotateImm5(IValueT Rm, OperandARM32::ShiftKind Shift,
335 IOffsetT imm5) {
336 (void)kShiftImmBits;
337 assert(imm5 < (1 << kShiftImmBits));
338 return (imm5 << kShiftImmShift) | (encodeShift(Shift) << kShiftShift) | Rm;
339 }
340
341 // Encodes mmmmtt01ssss for data-processing operands where mmmm=Rm, ssss=Rs, and
342 // tt=Shift.
encodeShiftRotateReg(IValueT Rm,OperandARM32::ShiftKind Shift,IValueT Rs)343 IValueT encodeShiftRotateReg(IValueT Rm, OperandARM32::ShiftKind Shift,
344 IValueT Rs) {
345 return (Rs << kRsShift) | (encodeShift(Shift) << kShiftShift) | B4 |
346 (Rm << kRmShift);
347 }
348
349 // Defines the set of registers expected in an operand.
350 enum RegSetWanted { WantGPRegs, WantSRegs, WantDRegs, WantQRegs };
351
encodeOperand(const Operand * Opnd,IValueT & Value,RegSetWanted WantedRegSet)352 EncodedOperand encodeOperand(const Operand *Opnd, IValueT &Value,
353 RegSetWanted WantedRegSet) {
354 Value = 0; // Make sure initialized.
355 if (const auto *Var = llvm::dyn_cast<Variable>(Opnd)) {
356 if (Var->hasReg()) {
357 switch (WantedRegSet) {
358 case WantGPRegs:
359 Value = getEncodedGPRegNum(Var);
360 break;
361 case WantSRegs:
362 Value = getEncodedSRegNum(Var);
363 break;
364 case WantDRegs:
365 Value = getEncodedDRegNum(Var);
366 break;
367 case WantQRegs:
368 Value = getEncodedQRegNum(Var);
369 break;
370 }
371 return EncodedAsRegister;
372 }
373 return CantEncode;
374 }
375 if (const auto *FlexImm = llvm::dyn_cast<OperandARM32FlexImm>(Opnd)) {
376 const IValueT Immed8 = FlexImm->getImm();
377 const IValueT Rotate = FlexImm->getRotateAmt();
378 if (!((Rotate < (1 << kRotateBits)) && (Immed8 < (1 << kImmed8Bits))))
379 return CantEncode;
380 Value = (Rotate << kRotateShift) | (Immed8 << kImmed8Shift);
381 return EncodedAsRotatedImm8;
382 }
383 if (const auto *Const = llvm::dyn_cast<ConstantInteger32>(Opnd)) {
384 Value = Const->getValue();
385 return EncodedAsConstI32;
386 }
387 if (const auto *FlexReg = llvm::dyn_cast<OperandARM32FlexReg>(Opnd)) {
388 Operand *Amt = FlexReg->getShiftAmt();
389 IValueT Rm;
390 if (encodeOperand(FlexReg->getReg(), Rm, WantGPRegs) != EncodedAsRegister)
391 return CantEncode;
392 if (const auto *Var = llvm::dyn_cast<Variable>(Amt)) {
393 IValueT Rs;
394 if (encodeOperand(Var, Rs, WantGPRegs) != EncodedAsRegister)
395 return CantEncode;
396 Value = encodeShiftRotateReg(Rm, FlexReg->getShiftOp(), Rs);
397 return EncodedAsRegShiftReg;
398 }
399 // If reached, the amount is a shifted amount by some 5-bit immediate.
400 uint32_t Imm5;
401 if (const auto *ShAmt = llvm::dyn_cast<OperandARM32ShAmtImm>(Amt)) {
402 Imm5 = ShAmt->getShAmtImm();
403 } else if (const auto *IntConst = llvm::dyn_cast<ConstantInteger32>(Amt)) {
404 int32_t Val = IntConst->getValue();
405 if (Val < 0)
406 return CantEncode;
407 Imm5 = static_cast<uint32_t>(Val);
408 } else
409 return CantEncode;
410 Value = encodeShiftRotateImm5(Rm, FlexReg->getShiftOp(), Imm5);
411 return EncodedAsShiftedRegister;
412 }
413 if (const auto *ShImm = llvm::dyn_cast<OperandARM32ShAmtImm>(Opnd)) {
414 const IValueT Immed5 = ShImm->getShAmtImm();
415 assert(Immed5 < (1 << kShiftImmBits));
416 Value = (Immed5 << kShiftImmShift);
417 return EncodedAsShiftImm5;
418 }
419 return CantEncode;
420 }
421
encodeImmRegOffset(IValueT Reg,IOffsetT Offset,OperandARM32Mem::AddrMode Mode,IOffsetT MaxOffset,IValueT OffsetShift)422 IValueT encodeImmRegOffset(IValueT Reg, IOffsetT Offset,
423 OperandARM32Mem::AddrMode Mode, IOffsetT MaxOffset,
424 IValueT OffsetShift) {
425 IValueT Value = Mode | (Reg << kRnShift);
426 if (Offset < 0) {
427 Offset = -Offset;
428 Value ^= U; // Flip U to adjust sign.
429 }
430 assert(Offset <= MaxOffset);
431 (void)MaxOffset;
432 return Value | (Offset >> OffsetShift);
433 }
434
435 // Encodes immediate register offset using encoding 3.
encodeImmRegOffsetEnc3(IValueT Rn,IOffsetT Imm8,OperandARM32Mem::AddrMode Mode)436 IValueT encodeImmRegOffsetEnc3(IValueT Rn, IOffsetT Imm8,
437 OperandARM32Mem::AddrMode Mode) {
438 IValueT Value = Mode | (Rn << kRnShift);
439 if (Imm8 < 0) {
440 Imm8 = -Imm8;
441 Value = (Value ^ U);
442 }
443 assert(Imm8 < (1 << 8));
444 Value = Value | B22 | ((Imm8 & 0xf0) << 4) | (Imm8 & 0x0f);
445 return Value;
446 }
447
encodeImmRegOffset(EncodedImmAddress ImmEncoding,IValueT Reg,IOffsetT Offset,OperandARM32Mem::AddrMode Mode)448 IValueT encodeImmRegOffset(EncodedImmAddress ImmEncoding, IValueT Reg,
449 IOffsetT Offset, OperandARM32Mem::AddrMode Mode) {
450 switch (ImmEncoding) {
451 case RotatedImm8Address: {
452 constexpr IOffsetT MaxOffset = (1 << 8) - 1;
453 constexpr IValueT NoRightShift = 0;
454 return encodeImmRegOffset(Reg, Offset, Mode, MaxOffset, NoRightShift);
455 }
456 case RotatedImm8Div4Address: {
457 assert((Offset & 0x3) == 0);
458 constexpr IOffsetT MaxOffset = (1 << 8) - 1;
459 constexpr IValueT RightShift2 = 2;
460 return encodeImmRegOffset(Reg, Offset, Mode, MaxOffset, RightShift2);
461 }
462 case Imm12Address: {
463 constexpr IOffsetT MaxOffset = (1 << 12) - 1;
464 constexpr IValueT NoRightShift = 0;
465 return encodeImmRegOffset(Reg, Offset, Mode, MaxOffset, NoRightShift);
466 }
467 case RotatedImm8Enc3Address:
468 return encodeImmRegOffsetEnc3(Reg, Offset, Mode);
469 case NoImmOffsetAddress: {
470 assert(Offset == 0);
471 assert(Mode == OperandARM32Mem::Offset);
472 return Reg << kRnShift;
473 }
474 }
475 llvm_unreachable("(silence g++ warning)");
476 }
477
478 // Encodes memory address Opnd, and encodes that information into Value, based
479 // on how ARM represents the address. Returns how the value was encoded.
encodeAddress(const Operand * Opnd,IValueT & Value,const AssemblerARM32::TargetInfo & TInfo,EncodedImmAddress ImmEncoding)480 EncodedOperand encodeAddress(const Operand *Opnd, IValueT &Value,
481 const AssemblerARM32::TargetInfo &TInfo,
482 EncodedImmAddress ImmEncoding) {
483 Value = 0; // Make sure initialized.
484 if (const auto *Var = llvm::dyn_cast<Variable>(Opnd)) {
485 // Should be a stack variable, with an offset.
486 if (Var->hasReg())
487 return CantEncode;
488 IOffsetT Offset = Var->getStackOffset();
489 if (!Utils::IsAbsoluteUint(12, Offset))
490 return CantEncode;
491 const auto BaseRegNum =
492 Var->hasReg() ? Var->getBaseRegNum() : TInfo.FrameOrStackReg;
493 Value = encodeImmRegOffset(ImmEncoding, BaseRegNum, Offset,
494 OperandARM32Mem::Offset);
495 return EncodedAsImmRegOffset;
496 }
497 if (const auto *Mem = llvm::dyn_cast<OperandARM32Mem>(Opnd)) {
498 Variable *Var = Mem->getBase();
499 if (!Var->hasReg())
500 return CantEncode;
501 IValueT Rn = getEncodedGPRegNum(Var);
502 if (Mem->isRegReg()) {
503 const Variable *Index = Mem->getIndex();
504 if (Var == nullptr)
505 return CantEncode;
506 Value = (Rn << kRnShift) | Mem->getAddrMode() |
507 encodeShiftRotateImm5(getEncodedGPRegNum(Index),
508 Mem->getShiftOp(), Mem->getShiftAmt());
509 return EncodedAsShiftRotateImm5;
510 }
511 // Encoded as immediate register offset.
512 ConstantInteger32 *Offset = Mem->getOffset();
513 Value = encodeImmRegOffset(ImmEncoding, Rn, Offset->getValue(),
514 Mem->getAddrMode());
515 return EncodedAsImmRegOffset;
516 }
517 return CantEncode;
518 }
519
520 // Checks that Offset can fit in imm24 constant of branch (b) instruction.
assertCanEncodeBranchOffset(IOffsetT Offset)521 void assertCanEncodeBranchOffset(IOffsetT Offset) {
522 (void)Offset;
523 (void)kBranchOffsetBits;
524 assert(Utils::IsAligned(Offset, 4) &&
525 Utils::IsInt(kBranchOffsetBits, Offset >> 2));
526 }
527
encodeBranchOffset(IOffsetT Offset,IValueT Inst)528 IValueT encodeBranchOffset(IOffsetT Offset, IValueT Inst) {
529 // Adjust offset to the way ARM CPUs read PC.
530 Offset -= kPCReadOffset;
531
532 assertCanEncodeBranchOffset(Offset);
533
534 // Properly preserve only the bits supported in the instruction.
535 Offset >>= 2;
536 Offset &= kBranchOffsetMask;
537 return (Inst & ~kBranchOffsetMask) | Offset;
538 }
539
encodeRegister(const Operand * OpReg,RegSetWanted WantedRegSet,const char * RegName,const char * InstName)540 IValueT encodeRegister(const Operand *OpReg, RegSetWanted WantedRegSet,
541 const char *RegName, const char *InstName) {
542 IValueT Reg = 0;
543 if (encodeOperand(OpReg, Reg, WantedRegSet) != EncodedAsRegister)
544 llvm::report_fatal_error(std::string(InstName) + ": Can't find register " +
545 RegName);
546 return Reg;
547 }
548
encodeGPRegister(const Operand * OpReg,const char * RegName,const char * InstName)549 IValueT encodeGPRegister(const Operand *OpReg, const char *RegName,
550 const char *InstName) {
551 return encodeRegister(OpReg, WantGPRegs, RegName, InstName);
552 }
553
encodeSRegister(const Operand * OpReg,const char * RegName,const char * InstName)554 IValueT encodeSRegister(const Operand *OpReg, const char *RegName,
555 const char *InstName) {
556 return encodeRegister(OpReg, WantSRegs, RegName, InstName);
557 }
558
encodeDRegister(const Operand * OpReg,const char * RegName,const char * InstName)559 IValueT encodeDRegister(const Operand *OpReg, const char *RegName,
560 const char *InstName) {
561 return encodeRegister(OpReg, WantDRegs, RegName, InstName);
562 }
563
encodeQRegister(const Operand * OpReg,const char * RegName,const char * InstName)564 IValueT encodeQRegister(const Operand *OpReg, const char *RegName,
565 const char *InstName) {
566 return encodeRegister(OpReg, WantQRegs, RegName, InstName);
567 }
568
verifyPOrNotW(IValueT Address,const char * InstName)569 void verifyPOrNotW(IValueT Address, const char *InstName) {
570 if (BuildDefs::minimal())
571 return;
572 if (!isBitSet(P, Address) && isBitSet(W, Address))
573 llvm::report_fatal_error(std::string(InstName) +
574 ": P=0 when W=1 not allowed");
575 }
576
verifyRegsNotEq(IValueT Reg1,const char * Reg1Name,IValueT Reg2,const char * Reg2Name,const char * InstName)577 void verifyRegsNotEq(IValueT Reg1, const char *Reg1Name, IValueT Reg2,
578 const char *Reg2Name, const char *InstName) {
579 if (BuildDefs::minimal())
580 return;
581 if (Reg1 == Reg2)
582 llvm::report_fatal_error(std::string(InstName) + ": " + Reg1Name + "=" +
583 Reg2Name + " not allowed");
584 }
585
verifyRegNotPc(IValueT Reg,const char * RegName,const char * InstName)586 void verifyRegNotPc(IValueT Reg, const char *RegName, const char *InstName) {
587 verifyRegsNotEq(Reg, RegName, RegARM32::Encoded_Reg_pc, "pc", InstName);
588 }
589
verifyAddrRegNotPc(IValueT RegShift,IValueT Address,const char * RegName,const char * InstName)590 void verifyAddrRegNotPc(IValueT RegShift, IValueT Address, const char *RegName,
591 const char *InstName) {
592 if (BuildDefs::minimal())
593 return;
594 if (getGPRReg(RegShift, Address) == RegARM32::Encoded_Reg_pc)
595 llvm::report_fatal_error(std::string(InstName) + ": " + RegName +
596 "=pc not allowed");
597 }
598
verifyRegNotPcWhenSetFlags(IValueT Reg,bool SetFlags,const char * InstName)599 void verifyRegNotPcWhenSetFlags(IValueT Reg, bool SetFlags,
600 const char *InstName) {
601 if (BuildDefs::minimal())
602 return;
603 if (SetFlags && (Reg == RegARM32::Encoded_Reg_pc))
604 llvm::report_fatal_error(std::string(InstName) + ": " +
605 RegARM32::getRegName(RegARM32::Reg_pc) +
606 "=pc not allowed when CC=1");
607 }
608
609 enum SIMDShiftType { ST_Vshl, ST_Vshr };
610
encodeSIMDShiftImm6(SIMDShiftType Shift,Type ElmtTy,const IValueT Imm)611 IValueT encodeSIMDShiftImm6(SIMDShiftType Shift, Type ElmtTy,
612 const IValueT Imm) {
613 assert(Imm > 0);
614 const SizeT MaxShift = getScalarIntBitWidth(ElmtTy);
615 assert(Imm < 2 * MaxShift);
616 assert(ElmtTy == IceType_i8 || ElmtTy == IceType_i16 ||
617 ElmtTy == IceType_i32);
618 const IValueT VshlImm = Imm - MaxShift;
619 const IValueT VshrImm = 2 * MaxShift - Imm;
620 return ((Shift == ST_Vshl) ? VshlImm : VshrImm) & (2 * MaxShift - 1);
621 }
622
encodeSIMDShiftImm6(SIMDShiftType Shift,Type ElmtTy,const ConstantInteger32 * Imm6)623 IValueT encodeSIMDShiftImm6(SIMDShiftType Shift, Type ElmtTy,
624 const ConstantInteger32 *Imm6) {
625 const IValueT Imm = Imm6->getValue();
626 return encodeSIMDShiftImm6(Shift, ElmtTy, Imm);
627 }
628 } // end of anonymous namespace
629
630 namespace Ice {
631 namespace ARM32 {
632
emit(GlobalContext * Ctx,const Assembler & Asm) const633 size_t MoveRelocatableFixup::emit(GlobalContext *Ctx,
634 const Assembler &Asm) const {
635 if (!BuildDefs::dump())
636 return InstARM32::InstSize;
637 Ostream &Str = Ctx->getStrEmit();
638 IValueT Inst = Asm.load<IValueT>(position());
639 const bool IsMovw = kind() == llvm::ELF::R_ARM_MOVW_ABS_NC ||
640 kind() == llvm::ELF::R_ARM_MOVW_PREL_NC;
641 const auto Symbol = symbol().toString();
642 const bool NeedsPCRelSuffix =
643 (Asm.fixupIsPCRel(kind()) || Symbol == GlobalOffsetTable);
644 Str << "\t"
645 "mov"
646 << (IsMovw ? "w" : "t") << "\t"
647 << RegARM32::getRegName(RegNumT::fixme((Inst >> kRdShift) & 0xF))
648 << ", #:" << (IsMovw ? "lower" : "upper") << "16:" << Symbol
649 << (NeedsPCRelSuffix ? " - ." : "")
650 << "\t@ .word "
651 // TODO(jpp): This is broken, it also needs to add a magic constant.
652 << llvm::format_hex_no_prefix(Inst, 8) << "\n";
653 return InstARM32::InstSize;
654 }
655
encodeElmtType(Type ElmtTy)656 IValueT AssemblerARM32::encodeElmtType(Type ElmtTy) {
657 switch (ElmtTy) {
658 case IceType_i8:
659 return 0;
660 case IceType_i16:
661 return 1;
662 case IceType_i32:
663 case IceType_f32:
664 return 2;
665 case IceType_i64:
666 return 3;
667 default:
668 llvm::report_fatal_error("SIMD op: Don't understand element type " +
669 typeStdString(ElmtTy));
670 }
671 }
672
673 // This fixup points to an ARM32 instruction with the following format:
emitOffset(Assembler * Asm) const674 void MoveRelocatableFixup::emitOffset(Assembler *Asm) const {
675 // cccc00110T00iiiiddddiiiiiiiiiiii where cccc=Cond, dddd=Rd,
676 // iiiiiiiiiiiiiiii = Imm16, and T=1 for movt.
677
678 const IValueT Inst = Asm->load<IValueT>(position());
679 constexpr IValueT Imm16Mask = 0x000F0FFF;
680 const IValueT Imm16 = offset() & 0xffff;
681 Asm->store(position(),
682 (Inst & ~Imm16Mask) | ((Imm16 >> 12) << 16) | (Imm16 & 0xfff));
683 }
684
createMoveFixup(bool IsMovW,const Constant * Value)685 MoveRelocatableFixup *AssemblerARM32::createMoveFixup(bool IsMovW,
686 const Constant *Value) {
687 MoveRelocatableFixup *F =
688 new (allocate<MoveRelocatableFixup>()) MoveRelocatableFixup();
689 F->set_kind(IsMovW ? llvm::ELF::R_ARM_MOVW_ABS_NC
690 : llvm::ELF::R_ARM_MOVT_ABS);
691 F->set_value(Value);
692 Buffer.installFixup(F);
693 return F;
694 }
695
emit(GlobalContext * Ctx,const Assembler & Asm) const696 size_t BlRelocatableFixup::emit(GlobalContext *Ctx,
697 const Assembler &Asm) const {
698 if (!BuildDefs::dump())
699 return InstARM32::InstSize;
700 Ostream &Str = Ctx->getStrEmit();
701 IValueT Inst = Asm.load<IValueT>(position());
702 Str << "\t"
703 "bl\t"
704 << symbol() << "\t@ .word " << llvm::format_hex_no_prefix(Inst, 8)
705 << "\n";
706 return InstARM32::InstSize;
707 }
708
emitOffset(Assembler * Asm) const709 void BlRelocatableFixup::emitOffset(Assembler *Asm) const {
710 // cccc101liiiiiiiiiiiiiiiiiiiiiiii where cccc=Cond, l=Link, and
711 // iiiiiiiiiiiiiiiiiiiiiiii=
712 // EncodedBranchOffset(cccc101l000000000000000000000000, Offset);
713 const IValueT Inst = Asm->load<IValueT>(position());
714 constexpr IValueT OffsetMask = 0x00FFFFFF;
715 Asm->store(position(), encodeBranchOffset(offset(), Inst & ~OffsetMask));
716 }
717
padWithNop(intptr_t Padding)718 void AssemblerARM32::padWithNop(intptr_t Padding) {
719 constexpr intptr_t InstWidth = sizeof(IValueT);
720 assert(Padding % InstWidth == 0 &&
721 "Padding not multiple of instruction size");
722 for (intptr_t i = 0; i < Padding; i += InstWidth)
723 nop();
724 }
725
726 BlRelocatableFixup *
createBlFixup(const ConstantRelocatable * BlTarget)727 AssemblerARM32::createBlFixup(const ConstantRelocatable *BlTarget) {
728 BlRelocatableFixup *F =
729 new (allocate<BlRelocatableFixup>()) BlRelocatableFixup();
730 F->set_kind(llvm::ELF::R_ARM_CALL);
731 F->set_value(BlTarget);
732 Buffer.installFixup(F);
733 return F;
734 }
735
bindCfgNodeLabel(const CfgNode * Node)736 void AssemblerARM32::bindCfgNodeLabel(const CfgNode *Node) {
737 if (BuildDefs::dump() && !getFlags().getDisableHybridAssembly()) {
738 // Generate label name so that branches can find it.
739 constexpr SizeT InstSize = 0;
740 emitTextInst(Node->getAsmName() + ":", InstSize);
741 }
742 SizeT NodeNumber = Node->getIndex();
743 assert(!getPreliminary());
744 Label *L = getOrCreateCfgNodeLabel(NodeNumber);
745 this->bind(L);
746 }
747
getOrCreateLabel(SizeT Number,LabelVector & Labels)748 Label *AssemblerARM32::getOrCreateLabel(SizeT Number, LabelVector &Labels) {
749 Label *L = nullptr;
750 if (Number == Labels.size()) {
751 L = new (this->allocate<Label>()) Label();
752 Labels.push_back(L);
753 return L;
754 }
755 if (Number > Labels.size()) {
756 Labels.resize(Number + 1);
757 }
758 L = Labels[Number];
759 if (!L) {
760 L = new (this->allocate<Label>()) Label();
761 Labels[Number] = L;
762 }
763 return L;
764 }
765
766 // Pull out offset from branch Inst.
decodeBranchOffset(IValueT Inst)767 IOffsetT AssemblerARM32::decodeBranchOffset(IValueT Inst) {
768 // Sign-extend, left-shift by 2, and adjust to the way ARM CPUs read PC.
769 const IOffsetT Offset = (Inst & kBranchOffsetMask) << 8;
770 return (Offset >> 6) + kPCReadOffset;
771 }
772
bind(Label * L)773 void AssemblerARM32::bind(Label *L) {
774 IOffsetT BoundPc = Buffer.size();
775 assert(!L->isBound()); // Labels can only be bound once.
776 while (L->isLinked()) {
777 IOffsetT Position = L->getLinkPosition();
778 IOffsetT Dest = BoundPc - Position;
779 IValueT Inst = Buffer.load<IValueT>(Position);
780 Buffer.store<IValueT>(Position, encodeBranchOffset(Dest, Inst));
781 L->setPosition(decodeBranchOffset(Inst));
782 }
783 L->bindTo(BoundPc);
784 }
785
emitTextInst(const std::string & Text,SizeT InstSize)786 void AssemblerARM32::emitTextInst(const std::string &Text, SizeT InstSize) {
787 AssemblerFixup *F = createTextFixup(Text, InstSize);
788 emitFixup(F);
789 for (SizeT I = 0; I < InstSize; ++I) {
790 AssemblerBuffer::EnsureCapacity ensured(&Buffer);
791 Buffer.emit<char>(0);
792 }
793 }
794
emitType01(CondARM32::Cond Cond,IValueT InstType,IValueT Opcode,bool SetFlags,IValueT Rn,IValueT Rd,IValueT Imm12,EmitChecks RuleChecks,const char * InstName)795 void AssemblerARM32::emitType01(CondARM32::Cond Cond, IValueT InstType,
796 IValueT Opcode, bool SetFlags, IValueT Rn,
797 IValueT Rd, IValueT Imm12,
798 EmitChecks RuleChecks, const char *InstName) {
799 switch (RuleChecks) {
800 case NoChecks:
801 break;
802 case RdIsPcAndSetFlags:
803 verifyRegNotPcWhenSetFlags(Rd, SetFlags, InstName);
804 break;
805 }
806 assert(Rd < RegARM32::getNumGPRegs());
807 assert(CondARM32::isDefined(Cond));
808 const IValueT Encoding = (encodeCondition(Cond) << kConditionShift) |
809 (InstType << kTypeShift) | (Opcode << kOpcodeShift) |
810 (encodeBool(SetFlags) << kSShift) |
811 (Rn << kRnShift) | (Rd << kRdShift) | Imm12;
812 emitInst(Encoding);
813 }
814
emitType01(CondARM32::Cond Cond,IValueT Opcode,const Operand * OpRd,const Operand * OpRn,const Operand * OpSrc1,bool SetFlags,EmitChecks RuleChecks,const char * InstName)815 void AssemblerARM32::emitType01(CondARM32::Cond Cond, IValueT Opcode,
816 const Operand *OpRd, const Operand *OpRn,
817 const Operand *OpSrc1, bool SetFlags,
818 EmitChecks RuleChecks, const char *InstName) {
819 IValueT Rd = encodeGPRegister(OpRd, "Rd", InstName);
820 IValueT Rn = encodeGPRegister(OpRn, "Rn", InstName);
821 emitType01(Cond, Opcode, Rd, Rn, OpSrc1, SetFlags, RuleChecks, InstName);
822 }
823
emitType01(CondARM32::Cond Cond,IValueT Opcode,IValueT Rd,IValueT Rn,const Operand * OpSrc1,bool SetFlags,EmitChecks RuleChecks,const char * InstName)824 void AssemblerARM32::emitType01(CondARM32::Cond Cond, IValueT Opcode,
825 IValueT Rd, IValueT Rn, const Operand *OpSrc1,
826 bool SetFlags, EmitChecks RuleChecks,
827 const char *InstName) {
828 IValueT Src1Value;
829 // TODO(kschimpf) Other possible decodings of data operations.
830 switch (encodeOperand(OpSrc1, Src1Value, WantGPRegs)) {
831 default:
832 llvm::report_fatal_error(std::string(InstName) +
833 ": Can't encode instruction");
834 return;
835 case EncodedAsRegister: {
836 // XXX (register)
837 // xxx{s}<c> <Rd>, <Rn>, <Rm>{, <shiff>}
838 //
839 // cccc000xxxxsnnnnddddiiiiitt0mmmm where cccc=Cond, xxxx=Opcode, dddd=Rd,
840 // nnnn=Rn, mmmm=Rm, iiiii=Shift, tt=ShiftKind, and s=SetFlags.
841 constexpr IValueT Imm5 = 0;
842 Src1Value = encodeShiftRotateImm5(Src1Value, OperandARM32::kNoShift, Imm5);
843 emitType01(Cond, kInstTypeDataRegister, Opcode, SetFlags, Rn, Rd, Src1Value,
844 RuleChecks, InstName);
845 return;
846 }
847 case EncodedAsShiftedRegister: {
848 // Form is defined in case EncodedAsRegister. (i.e. XXX (register)).
849 emitType01(Cond, kInstTypeDataRegister, Opcode, SetFlags, Rn, Rd, Src1Value,
850 RuleChecks, InstName);
851 return;
852 }
853 case EncodedAsConstI32: {
854 // See if we can convert this to an XXX (immediate).
855 IValueT RotateAmt;
856 IValueT Imm8;
857 if (!OperandARM32FlexImm::canHoldImm(Src1Value, &RotateAmt, &Imm8))
858 llvm::report_fatal_error(std::string(InstName) +
859 ": Immediate rotated constant not valid");
860 Src1Value = encodeRotatedImm8(RotateAmt, Imm8);
861 // Intentionally fall to next case!
862 }
863 case EncodedAsRotatedImm8: {
864 // XXX (Immediate)
865 // xxx{s}<c> <Rd>, <Rn>, #<RotatedImm8>
866 //
867 // cccc001xxxxsnnnnddddiiiiiiiiiiii where cccc=Cond, xxxx=Opcode, dddd=Rd,
868 // nnnn=Rn, s=SetFlags and iiiiiiiiiiii=Src1Value defining RotatedImm8.
869 emitType01(Cond, kInstTypeDataImmediate, Opcode, SetFlags, Rn, Rd,
870 Src1Value, RuleChecks, InstName);
871 return;
872 }
873 case EncodedAsRegShiftReg: {
874 // XXX (register-shifted reg)
875 // xxx{s}<c> <Rd>, <Rn>, <Rm>, <type> <Rs>
876 //
877 // cccc000xxxxfnnnnddddssss0tt1mmmm where cccc=Cond, xxxx=Opcode, dddd=Rd,
878 // nnnn=Rn, ssss=Rs, f=SetFlags, tt is encoding of type, and
879 // Src1Value=ssss01tt1mmmm.
880 emitType01(Cond, kInstTypeDataRegShift, Opcode, SetFlags, Rn, Rd, Src1Value,
881 RuleChecks, InstName);
882 return;
883 }
884 }
885 }
886
emitType05(CondARM32::Cond Cond,IOffsetT Offset,bool Link)887 void AssemblerARM32::emitType05(CondARM32::Cond Cond, IOffsetT Offset,
888 bool Link) {
889 // cccc101liiiiiiiiiiiiiiiiiiiiiiii where cccc=Cond, l=Link, and
890 // iiiiiiiiiiiiiiiiiiiiiiii=
891 // EncodedBranchOffset(cccc101l000000000000000000000000, Offset);
892 assert(CondARM32::isDefined(Cond));
893 IValueT Encoding = static_cast<int32_t>(Cond) << kConditionShift |
894 5 << kTypeShift | (Link ? 1 : 0) << kLinkShift;
895 Encoding = encodeBranchOffset(Offset, Encoding);
896 emitInst(Encoding);
897 }
898
emitBranch(Label * L,CondARM32::Cond Cond,bool Link)899 void AssemblerARM32::emitBranch(Label *L, CondARM32::Cond Cond, bool Link) {
900 // TODO(kschimpf): Handle far jumps.
901 if (L->isBound()) {
902 const int32_t Dest = L->getPosition() - Buffer.size();
903 emitType05(Cond, Dest, Link);
904 return;
905 }
906 const IOffsetT Position = Buffer.size();
907 // Use the offset field of the branch instruction for linking the sites.
908 emitType05(Cond, L->getEncodedPosition(), Link);
909 L->linkTo(*this, Position);
910 }
911
emitCompareOp(CondARM32::Cond Cond,IValueT Opcode,const Operand * OpRn,const Operand * OpSrc1,const char * InstName)912 void AssemblerARM32::emitCompareOp(CondARM32::Cond Cond, IValueT Opcode,
913 const Operand *OpRn, const Operand *OpSrc1,
914 const char *InstName) {
915 // XXX (register)
916 // XXX<c> <Rn>, <Rm>{, <shift>}
917 //
918 // ccccyyyxxxx1nnnn0000iiiiitt0mmmm where cccc=Cond, nnnn=Rn, mmmm=Rm, iiiii
919 // defines shift constant, tt=ShiftKind, yyy=kInstTypeDataRegister, and
920 // xxxx=Opcode.
921 //
922 // XXX (immediate)
923 // XXX<c> <Rn>, #<RotatedImm8>
924 //
925 // ccccyyyxxxx1nnnn0000iiiiiiiiiiii where cccc=Cond, dddd=Rd, nnnn=Rn,
926 // yyy=kInstTypeDataImmdiate, xxxx=Opcode, and iiiiiiiiiiii=Src1Value
927 // defining RotatedImm8.
928 constexpr bool SetFlags = true;
929 constexpr IValueT Rd = RegARM32::Encoded_Reg_r0;
930 IValueT Rn = encodeGPRegister(OpRn, "Rn", InstName);
931 emitType01(Cond, Opcode, Rd, Rn, OpSrc1, SetFlags, NoChecks, InstName);
932 }
933
emitMemOp(CondARM32::Cond Cond,IValueT InstType,bool IsLoad,bool IsByte,IValueT Rt,IValueT Address)934 void AssemblerARM32::emitMemOp(CondARM32::Cond Cond, IValueT InstType,
935 bool IsLoad, bool IsByte, IValueT Rt,
936 IValueT Address) {
937 assert(Rt < RegARM32::getNumGPRegs());
938 assert(CondARM32::isDefined(Cond));
939 const IValueT Encoding = (encodeCondition(Cond) << kConditionShift) |
940 (InstType << kTypeShift) | (IsLoad ? L : 0) |
941 (IsByte ? B : 0) | (Rt << kRdShift) | Address;
942 emitInst(Encoding);
943 }
944
emitMemOp(CondARM32::Cond Cond,bool IsLoad,bool IsByte,IValueT Rt,const Operand * OpAddress,const TargetInfo & TInfo,const char * InstName)945 void AssemblerARM32::emitMemOp(CondARM32::Cond Cond, bool IsLoad, bool IsByte,
946 IValueT Rt, const Operand *OpAddress,
947 const TargetInfo &TInfo, const char *InstName) {
948 IValueT Address;
949 switch (encodeAddress(OpAddress, Address, TInfo, Imm12Address)) {
950 default:
951 llvm::report_fatal_error(std::string(InstName) +
952 ": Memory address not understood");
953 case EncodedAsImmRegOffset: {
954 // XXX{B} (immediate):
955 // xxx{b}<c> <Rt>, [<Rn>{, #+/-<imm12>}] ; p=1, w=0
956 // xxx{b}<c> <Rt>, [<Rn>], #+/-<imm12> ; p=1, w=1
957 // xxx{b}<c> <Rt>, [<Rn>, #+/-<imm12>]! ; p=0, w=1
958 //
959 // cccc010pubwlnnnnttttiiiiiiiiiiii where cccc=Cond, tttt=Rt, nnnn=Rn,
960 // iiiiiiiiiiii=imm12, b=IsByte, pu0w<<21 is a BlockAddr, l=IsLoad, and
961 // pu0w0nnnn0000iiiiiiiiiiii=Address.
962 RegARM32::GPRRegister Rn = getGPRReg(kRnShift, Address);
963
964 // Check if conditions of rules violated.
965 verifyRegNotPc(Rn, "Rn", InstName);
966 verifyPOrNotW(Address, InstName);
967 if (!IsByte && (Rn == RegARM32::Encoded_Reg_sp) && !isBitSet(P, Address) &&
968 isBitSet(U, Address) && !isBitSet(W, Address) &&
969 (mask(Address, kImm12Shift, kImmed12Bits) == 0x8 /* 000000000100 */))
970 llvm::report_fatal_error(std::string(InstName) +
971 ": Use push/pop instead");
972
973 emitMemOp(Cond, kInstTypeMemImmediate, IsLoad, IsByte, Rt, Address);
974 return;
975 }
976 case EncodedAsShiftRotateImm5: {
977 // XXX{B} (register)
978 // xxx{b}<c> <Rt>, [<Rn>, +/-<Rm>{, <shift>}]{!}
979 // xxx{b}<c> <Rt>, [<Rn>], +/-<Rm>{, <shift>}
980 //
981 // cccc011pubwlnnnnttttiiiiiss0mmmm where cccc=Cond, tttt=Rt,
982 // b=IsByte, U=1 if +, pu0b is a BlockAddr, l=IsLoad, and
983 // pu0w0nnnn0000iiiiiss0mmmm=Address.
984 RegARM32::GPRRegister Rn = getGPRReg(kRnShift, Address);
985 RegARM32::GPRRegister Rm = getGPRReg(kRmShift, Address);
986
987 // Check if conditions of rules violated.
988 verifyPOrNotW(Address, InstName);
989 verifyRegNotPc(Rm, "Rm", InstName);
990 if (IsByte)
991 verifyRegNotPc(Rt, "Rt", InstName);
992 if (isBitSet(W, Address)) {
993 verifyRegNotPc(Rn, "Rn", InstName);
994 verifyRegsNotEq(Rn, "Rn", Rt, "Rt", InstName);
995 }
996 emitMemOp(Cond, kInstTypeRegisterShift, IsLoad, IsByte, Rt, Address);
997 return;
998 }
999 }
1000 }
1001
emitMemOpEnc3(CondARM32::Cond Cond,IValueT Opcode,IValueT Rt,const Operand * OpAddress,const TargetInfo & TInfo,const char * InstName)1002 void AssemblerARM32::emitMemOpEnc3(CondARM32::Cond Cond, IValueT Opcode,
1003 IValueT Rt, const Operand *OpAddress,
1004 const TargetInfo &TInfo,
1005 const char *InstName) {
1006 IValueT Address;
1007 switch (encodeAddress(OpAddress, Address, TInfo, RotatedImm8Enc3Address)) {
1008 default:
1009 llvm::report_fatal_error(std::string(InstName) +
1010 ": Memory address not understood");
1011 case EncodedAsImmRegOffset: {
1012 // XXXH (immediate)
1013 // xxxh<c> <Rt>, [<Rn>{, #+-<Imm8>}]
1014 // xxxh<c> <Rt>, [<Rn>, #+/-<Imm8>]
1015 // xxxh<c> <Rt>, [<Rn>, #+/-<Imm8>]!
1016 //
1017 // cccc000pu0wxnnnnttttiiiiyyyyjjjj where cccc=Cond, nnnn=Rn, tttt=Rt,
1018 // iiiijjjj=Imm8, pu0w<<21 is a BlockAddr, x000000000000yyyy0000=Opcode,
1019 // and pu0w0nnnn0000iiii0000jjjj=Address.
1020 assert(Rt < RegARM32::getNumGPRegs());
1021 assert(CondARM32::isDefined(Cond));
1022 verifyPOrNotW(Address, InstName);
1023 verifyRegNotPc(Rt, "Rt", InstName);
1024 if (isBitSet(W, Address))
1025 verifyRegsNotEq(getGPRReg(kRnShift, Address), "Rn", Rt, "Rt", InstName);
1026 const IValueT Encoding = (encodeCondition(Cond) << kConditionShift) |
1027 Opcode | (Rt << kRdShift) | Address;
1028 emitInst(Encoding);
1029 return;
1030 }
1031 case EncodedAsShiftRotateImm5: {
1032 // XXXH (register)
1033 // xxxh<c> <Rt>, [<Rn>, +/-<Rm>]{!}
1034 // xxxh<c> <Rt>, [<Rn>], +/-<Rm>
1035 //
1036 // cccc000pu0wxnnnntttt00001011mmmm where cccc=Cond, tttt=Rt, nnnn=Rn,
1037 // mmmm=Rm, pu0w<<21 is a BlockAddr, x000000000000yyyy0000=Opcode, and
1038 // pu0w0nnnn000000000000mmmm=Address.
1039 assert(Rt < RegARM32::getNumGPRegs());
1040 assert(CondARM32::isDefined(Cond));
1041 verifyPOrNotW(Address, InstName);
1042 verifyRegNotPc(Rt, "Rt", InstName);
1043 verifyAddrRegNotPc(kRmShift, Address, "Rm", InstName);
1044 const RegARM32::GPRRegister Rn = getGPRReg(kRnShift, Address);
1045 if (isBitSet(W, Address)) {
1046 verifyRegNotPc(Rn, "Rn", InstName);
1047 verifyRegsNotEq(Rn, "Rn", Rt, "Rt", InstName);
1048 }
1049 if (mask(Address, kShiftImmShift, 5) != 0)
1050 // For encoding 3, no shift is allowed.
1051 llvm::report_fatal_error(std::string(InstName) +
1052 ": Shift constant not allowed");
1053 const IValueT Encoding = (encodeCondition(Cond) << kConditionShift) |
1054 Opcode | (Rt << kRdShift) | Address;
1055 emitInst(Encoding);
1056 return;
1057 }
1058 }
1059 }
1060
emitDivOp(CondARM32::Cond Cond,IValueT Opcode,IValueT Rd,IValueT Rn,IValueT Rm)1061 void AssemblerARM32::emitDivOp(CondARM32::Cond Cond, IValueT Opcode, IValueT Rd,
1062 IValueT Rn, IValueT Rm) {
1063 assert(Rd < RegARM32::getNumGPRegs());
1064 assert(Rn < RegARM32::getNumGPRegs());
1065 assert(Rm < RegARM32::getNumGPRegs());
1066 assert(CondARM32::isDefined(Cond));
1067 const IValueT Encoding = Opcode | (encodeCondition(Cond) << kConditionShift) |
1068 (Rn << kDivRnShift) | (Rd << kDivRdShift) | B26 |
1069 B25 | B24 | B20 | B15 | B14 | B13 | B12 | B4 |
1070 (Rm << kDivRmShift);
1071 emitInst(Encoding);
1072 }
1073
emitInsertExtractInt(CondARM32::Cond Cond,const Operand * OpQn,uint32_t Index,const Operand * OpRt,bool IsExtract,const char * InstName)1074 void AssemblerARM32::emitInsertExtractInt(CondARM32::Cond Cond,
1075 const Operand *OpQn, uint32_t Index,
1076 const Operand *OpRt, bool IsExtract,
1077 const char *InstName) {
1078 const IValueT Rt = encodeGPRegister(OpRt, "Rt", InstName);
1079 IValueT Dn = mapQRegToDReg(encodeQRegister(OpQn, "Qn", InstName));
1080 assert(Rt != RegARM32::Encoded_Reg_pc);
1081 assert(Rt != RegARM32::Encoded_Reg_sp);
1082 assert(CondARM32::isDefined(Cond));
1083 const uint32_t BitSize = typeWidthInBytes(OpRt->getType()) * CHAR_BIT;
1084 IValueT Opcode1 = 0;
1085 IValueT Opcode2 = 0;
1086 switch (BitSize) {
1087 default:
1088 llvm::report_fatal_error(std::string(InstName) +
1089 ": Unable to process type " +
1090 typeStdString(OpRt->getType()));
1091 case 8:
1092 assert(Index < 16);
1093 Dn = Dn | mask(Index, 3, 1);
1094 Opcode1 = B1 | mask(Index, 2, 1);
1095 Opcode2 = mask(Index, 0, 2);
1096 break;
1097 case 16:
1098 assert(Index < 8);
1099 Dn = Dn | mask(Index, 2, 1);
1100 Opcode1 = mask(Index, 1, 1);
1101 Opcode2 = (mask(Index, 0, 1) << 1) | B0;
1102 break;
1103 case 32:
1104 assert(Index < 4);
1105 Dn = Dn | mask(Index, 1, 1);
1106 Opcode1 = mask(Index, 0, 1);
1107 break;
1108 }
1109 const IValueT Encoding = B27 | B26 | B25 | B11 | B9 | B8 | B4 |
1110 (encodeCondition(Cond) << kConditionShift) |
1111 (Opcode1 << 21) |
1112 (getXXXXInRegYXXXX(Dn) << kRnShift) | (Rt << 12) |
1113 (encodeBool(IsExtract) << 20) |
1114 (getYInRegYXXXX(Dn) << 7) | (Opcode2 << 5);
1115 emitInst(Encoding);
1116 }
1117
emitMoveSS(CondARM32::Cond Cond,IValueT Sd,IValueT Sm)1118 void AssemblerARM32::emitMoveSS(CondARM32::Cond Cond, IValueT Sd, IValueT Sm) {
1119 // VMOV (register) - ARM section A8.8.340, encoding A2:
1120 // vmov<c>.f32 <Sd>, <Sm>
1121 //
1122 // cccc11101D110000dddd101001M0mmmm where cccc=Cond, ddddD=Sd, and mmmmM=Sm.
1123 constexpr IValueT VmovssOpcode = B23 | B21 | B20 | B6;
1124 constexpr IValueT S0 = 0;
1125 emitVFPsss(Cond, VmovssOpcode, Sd, S0, Sm);
1126 }
1127
emitMulOp(CondARM32::Cond Cond,IValueT Opcode,IValueT Rd,IValueT Rn,IValueT Rm,IValueT Rs,bool SetFlags)1128 void AssemblerARM32::emitMulOp(CondARM32::Cond Cond, IValueT Opcode, IValueT Rd,
1129 IValueT Rn, IValueT Rm, IValueT Rs,
1130 bool SetFlags) {
1131 assert(Rd < RegARM32::getNumGPRegs());
1132 assert(Rn < RegARM32::getNumGPRegs());
1133 assert(Rm < RegARM32::getNumGPRegs());
1134 assert(Rs < RegARM32::getNumGPRegs());
1135 assert(CondARM32::isDefined(Cond));
1136 IValueT Encoding = Opcode | (encodeCondition(Cond) << kConditionShift) |
1137 (encodeBool(SetFlags) << kSShift) | (Rn << kRnShift) |
1138 (Rd << kRdShift) | (Rs << kRsShift) | B7 | B4 |
1139 (Rm << kRmShift);
1140 emitInst(Encoding);
1141 }
1142
emitMultiMemOp(CondARM32::Cond Cond,BlockAddressMode AddressMode,bool IsLoad,IValueT BaseReg,IValueT Registers)1143 void AssemblerARM32::emitMultiMemOp(CondARM32::Cond Cond,
1144 BlockAddressMode AddressMode, bool IsLoad,
1145 IValueT BaseReg, IValueT Registers) {
1146 assert(CondARM32::isDefined(Cond));
1147 assert(BaseReg < RegARM32::getNumGPRegs());
1148 assert(Registers < (1 << RegARM32::getNumGPRegs()));
1149 IValueT Encoding = (encodeCondition(Cond) << kConditionShift) | B27 |
1150 AddressMode | (IsLoad ? L : 0) | (BaseReg << kRnShift) |
1151 Registers;
1152 emitInst(Encoding);
1153 }
1154
emitSignExtend(CondARM32::Cond Cond,IValueT Opcode,const Operand * OpRd,const Operand * OpSrc0,const char * InstName)1155 void AssemblerARM32::emitSignExtend(CondARM32::Cond Cond, IValueT Opcode,
1156 const Operand *OpRd, const Operand *OpSrc0,
1157 const char *InstName) {
1158 IValueT Rd = encodeGPRegister(OpRd, "Rd", InstName);
1159 IValueT Rm = encodeGPRegister(OpSrc0, "Rm", InstName);
1160 // Note: For the moment, we assume no rotation is specified.
1161 RotationValue Rotation = kRotateNone;
1162 constexpr IValueT Rn = RegARM32::Encoded_Reg_pc;
1163 const Type Ty = OpSrc0->getType();
1164 switch (Ty) {
1165 default:
1166 llvm::report_fatal_error(std::string(InstName) + ": Type " +
1167 typeString(Ty) + " not allowed");
1168 break;
1169 case IceType_i1:
1170 case IceType_i8: {
1171 // SXTB/UXTB - Arm sections A8.8.233 and A8.8.274, encoding A1:
1172 // sxtb<c> <Rd>, <Rm>{, <rotate>}
1173 // uxtb<c> <Rd>, <Rm>{, <rotate>}
1174 //
1175 // ccccxxxxxxxx1111ddddrr000111mmmm where cccc=Cond, xxxxxxxx<<20=Opcode,
1176 // dddd=Rd, mmmm=Rm, and rr defined (RotationValue) rotate.
1177 break;
1178 }
1179 case IceType_i16: {
1180 // SXTH/UXTH - ARM sections A8.8.235 and A8.8.276, encoding A1:
1181 // uxth<c> <Rd>< <Rm>{, <rotate>}
1182 //
1183 // cccc01101111nnnnddddrr000111mmmm where cccc=Cond, dddd=Rd, mmmm=Rm, and
1184 // rr defined (RotationValue) rotate.
1185 Opcode |= B20;
1186 break;
1187 }
1188 }
1189
1190 assert(CondARM32::isDefined(Cond));
1191 IValueT Rot = encodeRotation(Rotation);
1192 if (!Utils::IsUint(2, Rot))
1193 llvm::report_fatal_error(std::string(InstName) +
1194 ": Illegal rotation value");
1195 IValueT Encoding = (encodeCondition(Cond) << kConditionShift) | Opcode |
1196 (Rn << kRnShift) | (Rd << kRdShift) |
1197 (Rot << kRotationShift) | B6 | B5 | B4 | (Rm << kRmShift);
1198 emitInst(Encoding);
1199 }
1200
emitSIMDBase(IValueT Opcode,IValueT Dd,IValueT Dn,IValueT Dm,bool UseQRegs,bool IsFloatTy)1201 void AssemblerARM32::emitSIMDBase(IValueT Opcode, IValueT Dd, IValueT Dn,
1202 IValueT Dm, bool UseQRegs, bool IsFloatTy) {
1203 const IValueT Encoding =
1204 Opcode | B25 | (encodeCondition(CondARM32::kNone) << kConditionShift) |
1205 (getYInRegYXXXX(Dd) << 22) | (getXXXXInRegYXXXX(Dn) << 16) |
1206 (getXXXXInRegYXXXX(Dd) << 12) | (IsFloatTy ? B10 : 0) |
1207 (getYInRegYXXXX(Dn) << 7) | (encodeBool(UseQRegs) << 6) |
1208 (getYInRegYXXXX(Dm) << 5) | getXXXXInRegYXXXX(Dm);
1209 emitInst(Encoding);
1210 }
1211
emitSIMD(IValueT Opcode,Type ElmtTy,IValueT Dd,IValueT Dn,IValueT Dm,bool UseQRegs)1212 void AssemblerARM32::emitSIMD(IValueT Opcode, Type ElmtTy, IValueT Dd,
1213 IValueT Dn, IValueT Dm, bool UseQRegs) {
1214 constexpr IValueT ElmtShift = 20;
1215 const IValueT ElmtSize = encodeElmtType(ElmtTy);
1216 assert(Utils::IsUint(2, ElmtSize));
1217 emitSIMDBase(Opcode | (ElmtSize << ElmtShift), Dd, Dn, Dm, UseQRegs,
1218 isFloatingType(ElmtTy));
1219 }
1220
emitSIMDqqqBase(IValueT Opcode,const Operand * OpQd,const Operand * OpQn,const Operand * OpQm,bool IsFloatTy,const char * OpcodeName)1221 void AssemblerARM32::emitSIMDqqqBase(IValueT Opcode, const Operand *OpQd,
1222 const Operand *OpQn, const Operand *OpQm,
1223 bool IsFloatTy, const char *OpcodeName) {
1224 const IValueT Qd = encodeQRegister(OpQd, "Qd", OpcodeName);
1225 const IValueT Qn = encodeQRegister(OpQn, "Qn", OpcodeName);
1226 const IValueT Qm = encodeQRegister(OpQm, "Qm", OpcodeName);
1227 constexpr bool UseQRegs = true;
1228 emitSIMDBase(Opcode, mapQRegToDReg(Qd), mapQRegToDReg(Qn), mapQRegToDReg(Qm),
1229 UseQRegs, IsFloatTy);
1230 }
1231
emitSIMDqqq(IValueT Opcode,Type ElmtTy,const Operand * OpQd,const Operand * OpQn,const Operand * OpQm,const char * OpcodeName)1232 void AssemblerARM32::emitSIMDqqq(IValueT Opcode, Type ElmtTy,
1233 const Operand *OpQd, const Operand *OpQn,
1234 const Operand *OpQm, const char *OpcodeName) {
1235 constexpr IValueT ElmtShift = 20;
1236 const IValueT ElmtSize = encodeElmtType(ElmtTy);
1237 assert(Utils::IsUint(2, ElmtSize));
1238 emitSIMDqqqBase(Opcode | (ElmtSize << ElmtShift), OpQd, OpQn, OpQm,
1239 isFloatingType(ElmtTy), OpcodeName);
1240 }
1241
emitSIMDShiftqqc(IValueT Opcode,const Operand * OpQd,const Operand * OpQm,const IValueT Imm6,const char * OpcodeName)1242 void AssemblerARM32::emitSIMDShiftqqc(IValueT Opcode, const Operand *OpQd,
1243 const Operand *OpQm, const IValueT Imm6,
1244 const char *OpcodeName) {
1245 const IValueT Qd = encodeQRegister(OpQd, "Qd", OpcodeName);
1246 const IValueT Qn = 0;
1247 const IValueT Qm = encodeQRegister(OpQm, "Qm", OpcodeName);
1248 constexpr bool UseQRegs = true;
1249 constexpr bool IsFloatTy = false;
1250 constexpr IValueT ElmtShift = 16;
1251 emitSIMDBase(Opcode | (Imm6 << ElmtShift), mapQRegToDReg(Qd),
1252 mapQRegToDReg(Qn), mapQRegToDReg(Qm), UseQRegs, IsFloatTy);
1253 }
1254
emitSIMDCvtqq(IValueT Opcode,const Operand * OpQd,const Operand * OpQm,const char * OpcodeName)1255 void AssemblerARM32::emitSIMDCvtqq(IValueT Opcode, const Operand *OpQd,
1256 const Operand *OpQm,
1257 const char *OpcodeName) {
1258 const IValueT SIMDOpcode =
1259 B24 | B23 | B21 | B20 | B19 | B17 | B16 | B10 | B9 | Opcode;
1260 constexpr bool UseQRegs = true;
1261 constexpr bool IsFloatTy = false;
1262 const IValueT Qd = encodeQRegister(OpQd, "Qd", OpcodeName);
1263 constexpr IValueT Qn = 0;
1264 const IValueT Qm = encodeQRegister(OpQm, "Qm", OpcodeName);
1265 emitSIMDBase(SIMDOpcode, mapQRegToDReg(Qd), mapQRegToDReg(Qn),
1266 mapQRegToDReg(Qm), UseQRegs, IsFloatTy);
1267 }
1268
emitVFPddd(CondARM32::Cond Cond,IValueT Opcode,IValueT Dd,IValueT Dn,IValueT Dm)1269 void AssemblerARM32::emitVFPddd(CondARM32::Cond Cond, IValueT Opcode,
1270 IValueT Dd, IValueT Dn, IValueT Dm) {
1271 assert(Dd < RegARM32::getNumDRegs());
1272 assert(Dn < RegARM32::getNumDRegs());
1273 assert(Dm < RegARM32::getNumDRegs());
1274 assert(CondARM32::isDefined(Cond));
1275 constexpr IValueT VFPOpcode = B27 | B26 | B25 | B11 | B9 | B8;
1276 const IValueT Encoding =
1277 Opcode | VFPOpcode | (encodeCondition(Cond) << kConditionShift) |
1278 (getYInRegYXXXX(Dd) << 22) | (getXXXXInRegYXXXX(Dn) << 16) |
1279 (getXXXXInRegYXXXX(Dd) << 12) | (getYInRegYXXXX(Dn) << 7) |
1280 (getYInRegYXXXX(Dm) << 5) | getXXXXInRegYXXXX(Dm);
1281 emitInst(Encoding);
1282 }
1283
emitVFPddd(CondARM32::Cond Cond,IValueT Opcode,const Operand * OpDd,const Operand * OpDn,const Operand * OpDm,const char * InstName)1284 void AssemblerARM32::emitVFPddd(CondARM32::Cond Cond, IValueT Opcode,
1285 const Operand *OpDd, const Operand *OpDn,
1286 const Operand *OpDm, const char *InstName) {
1287 IValueT Dd = encodeDRegister(OpDd, "Dd", InstName);
1288 IValueT Dn = encodeDRegister(OpDn, "Dn", InstName);
1289 IValueT Dm = encodeDRegister(OpDm, "Dm", InstName);
1290 emitVFPddd(Cond, Opcode, Dd, Dn, Dm);
1291 }
1292
emitVFPsss(CondARM32::Cond Cond,IValueT Opcode,IValueT Sd,IValueT Sn,IValueT Sm)1293 void AssemblerARM32::emitVFPsss(CondARM32::Cond Cond, IValueT Opcode,
1294 IValueT Sd, IValueT Sn, IValueT Sm) {
1295 assert(Sd < RegARM32::getNumSRegs());
1296 assert(Sn < RegARM32::getNumSRegs());
1297 assert(Sm < RegARM32::getNumSRegs());
1298 assert(CondARM32::isDefined(Cond));
1299 constexpr IValueT VFPOpcode = B27 | B26 | B25 | B11 | B9;
1300 const IValueT Encoding =
1301 Opcode | VFPOpcode | (encodeCondition(Cond) << kConditionShift) |
1302 (getYInRegXXXXY(Sd) << 22) | (getXXXXInRegXXXXY(Sn) << 16) |
1303 (getXXXXInRegXXXXY(Sd) << 12) | (getYInRegXXXXY(Sn) << 7) |
1304 (getYInRegXXXXY(Sm) << 5) | getXXXXInRegXXXXY(Sm);
1305 emitInst(Encoding);
1306 }
1307
emitVFPsss(CondARM32::Cond Cond,IValueT Opcode,const Operand * OpSd,const Operand * OpSn,const Operand * OpSm,const char * InstName)1308 void AssemblerARM32::emitVFPsss(CondARM32::Cond Cond, IValueT Opcode,
1309 const Operand *OpSd, const Operand *OpSn,
1310 const Operand *OpSm, const char *InstName) {
1311 const IValueT Sd = encodeSRegister(OpSd, "Sd", InstName);
1312 const IValueT Sn = encodeSRegister(OpSn, "Sn", InstName);
1313 const IValueT Sm = encodeSRegister(OpSm, "Sm", InstName);
1314 emitVFPsss(Cond, Opcode, Sd, Sn, Sm);
1315 }
1316
adc(const Operand * OpRd,const Operand * OpRn,const Operand * OpSrc1,bool SetFlags,CondARM32::Cond Cond)1317 void AssemblerARM32::adc(const Operand *OpRd, const Operand *OpRn,
1318 const Operand *OpSrc1, bool SetFlags,
1319 CondARM32::Cond Cond) {
1320 // ADC (register) - ARM section 18.8.2, encoding A1:
1321 // adc{s}<c> <Rd>, <Rn>, <Rm>{, <shift>}
1322 //
1323 // cccc0000101snnnnddddiiiiitt0mmmm where cccc=Cond, dddd=Rd, nnnn=Rn,
1324 // mmmm=Rm, iiiii=Shift, tt=ShiftKind, and s=SetFlags.
1325 //
1326 // ADC (Immediate) - ARM section A8.8.1, encoding A1:
1327 // adc{s}<c> <Rd>, <Rn>, #<RotatedImm8>
1328 //
1329 // cccc0010101snnnnddddiiiiiiiiiiii where cccc=Cond, dddd=Rd, nnnn=Rn,
1330 // s=SetFlags and iiiiiiiiiiii=Src1Value defining RotatedImm8.
1331 constexpr const char *AdcName = "adc";
1332 constexpr IValueT AdcOpcode = B2 | B0; // 0101
1333 emitType01(Cond, AdcOpcode, OpRd, OpRn, OpSrc1, SetFlags, RdIsPcAndSetFlags,
1334 AdcName);
1335 }
1336
add(const Operand * OpRd,const Operand * OpRn,const Operand * OpSrc1,bool SetFlags,CondARM32::Cond Cond)1337 void AssemblerARM32::add(const Operand *OpRd, const Operand *OpRn,
1338 const Operand *OpSrc1, bool SetFlags,
1339 CondARM32::Cond Cond) {
1340 // ADD (register) - ARM section A8.8.7, encoding A1:
1341 // add{s}<c> <Rd>, <Rn>, <Rm>{, <shiff>}
1342 // ADD (Sp plus register) - ARM section A8.8.11, encoding A1:
1343 // add{s}<c> sp, <Rn>, <Rm>{, <shiff>}
1344 //
1345 // cccc0000100snnnnddddiiiiitt0mmmm where cccc=Cond, dddd=Rd, nnnn=Rn,
1346 // mmmm=Rm, iiiii=Shift, tt=ShiftKind, and s=SetFlags.
1347 //
1348 // ADD (Immediate) - ARM section A8.8.5, encoding A1:
1349 // add{s}<c> <Rd>, <Rn>, #<RotatedImm8>
1350 // ADD (SP plus immediate) - ARM section A8.8.9, encoding A1.
1351 // add{s}<c> <Rd>, sp, #<RotatedImm8>
1352 //
1353 // cccc0010100snnnnddddiiiiiiiiiiii where cccc=Cond, dddd=Rd, nnnn=Rn,
1354 // s=SetFlags and iiiiiiiiiiii=Src1Value defining RotatedImm8.
1355 constexpr const char *AddName = "add";
1356 constexpr IValueT Add = B2; // 0100
1357 emitType01(Cond, Add, OpRd, OpRn, OpSrc1, SetFlags, RdIsPcAndSetFlags,
1358 AddName);
1359 }
1360
and_(const Operand * OpRd,const Operand * OpRn,const Operand * OpSrc1,bool SetFlags,CondARM32::Cond Cond)1361 void AssemblerARM32::and_(const Operand *OpRd, const Operand *OpRn,
1362 const Operand *OpSrc1, bool SetFlags,
1363 CondARM32::Cond Cond) {
1364 // AND (register) - ARM section A8.8.14, encoding A1:
1365 // and{s}<c> <Rd>, <Rn>{, <shift>}
1366 //
1367 // cccc0000000snnnnddddiiiiitt0mmmm where cccc=Cond, dddd=Rd, nnnn=Rn,
1368 // mmmm=Rm, iiiii=Shift, tt=ShiftKind, and s=SetFlags.
1369 //
1370 // AND (Immediate) - ARM section A8.8.13, encoding A1:
1371 // and{s}<c> <Rd>, <Rn>, #<RotatedImm8>
1372 //
1373 // cccc0010100snnnnddddiiiiiiiiiiii where cccc=Cond, dddd=Rd, nnnn=Rn,
1374 // s=SetFlags and iiiiiiiiiiii=Src1Value defining RotatedImm8.
1375 constexpr const char *AndName = "and";
1376 constexpr IValueT And = 0; // 0000
1377 emitType01(Cond, And, OpRd, OpRn, OpSrc1, SetFlags, RdIsPcAndSetFlags,
1378 AndName);
1379 }
1380
b(Label * L,CondARM32::Cond Cond)1381 void AssemblerARM32::b(Label *L, CondARM32::Cond Cond) {
1382 emitBranch(L, Cond, false);
1383 }
1384
bkpt(uint16_t Imm16)1385 void AssemblerARM32::bkpt(uint16_t Imm16) {
1386 // BKPT - ARM section A*.8.24 - encoding A1:
1387 // bkpt #<Imm16>
1388 //
1389 // cccc00010010iiiiiiiiiiii0111iiii where cccc=AL and iiiiiiiiiiiiiiii=Imm16
1390 const IValueT Encoding = (CondARM32::AL << kConditionShift) | B24 | B21 |
1391 ((Imm16 >> 4) << 8) | B6 | B5 | B4 | (Imm16 & 0xf);
1392 emitInst(Encoding);
1393 }
1394
bic(const Operand * OpRd,const Operand * OpRn,const Operand * OpSrc1,bool SetFlags,CondARM32::Cond Cond)1395 void AssemblerARM32::bic(const Operand *OpRd, const Operand *OpRn,
1396 const Operand *OpSrc1, bool SetFlags,
1397 CondARM32::Cond Cond) {
1398 // BIC (register) - ARM section A8.8.22, encoding A1:
1399 // bic{s}<c> <Rd>, <Rn>, <Rm>{, <shift>}
1400 //
1401 // cccc0001110snnnnddddiiiiitt0mmmm where cccc=Cond, dddd=Rd, nnnn=Rn,
1402 // mmmm=Rm, iiiii=Shift, tt=ShiftKind, and s=SetFlags.
1403 //
1404 // BIC (immediate) - ARM section A8.8.21, encoding A1:
1405 // bic{s}<c> <Rd>, <Rn>, #<RotatedImm8>
1406 //
1407 // cccc0011110snnnnddddiiiiiiiiiiii where cccc=Cond, dddd=Rn, nnnn=Rn,
1408 // s=SetFlags, and iiiiiiiiiiii=Src1Value defining RotatedImm8.
1409 constexpr const char *BicName = "bic";
1410 constexpr IValueT BicOpcode = B3 | B2 | B1; // i.e. 1110
1411 emitType01(Cond, BicOpcode, OpRd, OpRn, OpSrc1, SetFlags, RdIsPcAndSetFlags,
1412 BicName);
1413 }
1414
bl(const ConstantRelocatable * Target)1415 void AssemblerARM32::bl(const ConstantRelocatable *Target) {
1416 // BL (immediate) - ARM section A8.8.25, encoding A1:
1417 // bl<c> <label>
1418 //
1419 // cccc1011iiiiiiiiiiiiiiiiiiiiiiii where cccc=Cond (not currently allowed)
1420 // and iiiiiiiiiiiiiiiiiiiiiiii is the (encoded) Target to branch to.
1421 emitFixup(createBlFixup(Target));
1422 constexpr CondARM32::Cond Cond = CondARM32::AL;
1423 constexpr IValueT Immed = 0;
1424 constexpr bool Link = true;
1425 emitType05(Cond, Immed, Link);
1426 }
1427
blx(const Operand * Target)1428 void AssemblerARM32::blx(const Operand *Target) {
1429 // BLX (register) - ARM section A8.8.26, encoding A1:
1430 // blx<c> <Rm>
1431 //
1432 // cccc000100101111111111110011mmmm where cccc=Cond (not currently allowed)
1433 // and mmmm=Rm.
1434 constexpr const char *BlxName = "Blx";
1435 IValueT Rm = encodeGPRegister(Target, "Rm", BlxName);
1436 verifyRegNotPc(Rm, "Rm", BlxName);
1437 constexpr CondARM32::Cond Cond = CondARM32::AL;
1438 int32_t Encoding = (encodeCondition(Cond) << kConditionShift) | B24 | B21 |
1439 (0xfff << 8) | B5 | B4 | (Rm << kRmShift);
1440 emitInst(Encoding);
1441 }
1442
bx(RegARM32::GPRRegister Rm,CondARM32::Cond Cond)1443 void AssemblerARM32::bx(RegARM32::GPRRegister Rm, CondARM32::Cond Cond) {
1444 // BX - ARM section A8.8.27, encoding A1:
1445 // bx<c> <Rm>
1446 //
1447 // cccc000100101111111111110001mmmm where mmmm=rm and cccc=Cond.
1448 assert(CondARM32::isDefined(Cond));
1449 const IValueT Encoding = (encodeCondition(Cond) << kConditionShift) | B24 |
1450 B21 | (0xfff << 8) | B4 |
1451 (encodeGPRRegister(Rm) << kRmShift);
1452 emitInst(Encoding);
1453 }
1454
clz(const Operand * OpRd,const Operand * OpSrc,CondARM32::Cond Cond)1455 void AssemblerARM32::clz(const Operand *OpRd, const Operand *OpSrc,
1456 CondARM32::Cond Cond) {
1457 // CLZ - ARM section A8.8.33, encoding A1:
1458 // clz<c> <Rd> <Rm>
1459 //
1460 // cccc000101101111dddd11110001mmmm where cccc=Cond, dddd=Rd, and mmmm=Rm.
1461 constexpr const char *ClzName = "clz";
1462 constexpr const char *RdName = "Rd";
1463 constexpr const char *RmName = "Rm";
1464 IValueT Rd = encodeGPRegister(OpRd, RdName, ClzName);
1465 assert(Rd < RegARM32::getNumGPRegs());
1466 verifyRegNotPc(Rd, RdName, ClzName);
1467 IValueT Rm = encodeGPRegister(OpSrc, RmName, ClzName);
1468 assert(Rm < RegARM32::getNumGPRegs());
1469 verifyRegNotPc(Rm, RmName, ClzName);
1470 assert(CondARM32::isDefined(Cond));
1471 constexpr IValueT PredefinedBits =
1472 B24 | B22 | B21 | (0xF << 16) | (0xf << 8) | B4;
1473 const IValueT Encoding = PredefinedBits | (Cond << kConditionShift) |
1474 (Rd << kRdShift) | (Rm << kRmShift);
1475 emitInst(Encoding);
1476 }
1477
cmn(const Operand * OpRn,const Operand * OpSrc1,CondARM32::Cond Cond)1478 void AssemblerARM32::cmn(const Operand *OpRn, const Operand *OpSrc1,
1479 CondARM32::Cond Cond) {
1480 // CMN (immediate) - ARM section A8.8.34, encoding A1:
1481 // cmn<c> <Rn>, #<RotatedImm8>
1482 //
1483 // cccc00110111nnnn0000iiiiiiiiiiii where cccc=Cond, dddd=Rd, nnnn=Rn,
1484 // s=SetFlags and iiiiiiiiiiii=Src1Value defining RotatedImm8.
1485 //
1486 // CMN (register) - ARM section A8.8.35, encodeing A1:
1487 // cmn<c> <Rn>, <Rm>{, <shift>}
1488 //
1489 // cccc00010111nnnn0000iiiiitt0mmmm where cccc=Cond, nnnn=Rn, mmmm=Rm,
1490 // iiiii=Shift, and tt=ShiftKind.
1491 constexpr const char *CmnName = "cmn";
1492 constexpr IValueT CmnOpcode = B3 | B1 | B0; // ie. 1011
1493 emitCompareOp(Cond, CmnOpcode, OpRn, OpSrc1, CmnName);
1494 }
1495
cmp(const Operand * OpRn,const Operand * OpSrc1,CondARM32::Cond Cond)1496 void AssemblerARM32::cmp(const Operand *OpRn, const Operand *OpSrc1,
1497 CondARM32::Cond Cond) {
1498 // CMP (register) - ARM section A8.8.38, encoding A1:
1499 // cmp<c> <Rn>, <Rm>{, <shift>}
1500 //
1501 // cccc00010101nnnn0000iiiiitt0mmmm where cccc=Cond, nnnn=Rn, mmmm=Rm,
1502 // iiiii=Shift, and tt=ShiftKind.
1503 //
1504 // CMP (immediate) - ARM section A8.8.37
1505 // cmp<c: <Rn>, #<RotatedImm8>
1506 //
1507 // cccc00110101nnnn0000iiiiiiiiiiii where cccc=Cond, dddd=Rd, nnnn=Rn,
1508 // s=SetFlags and iiiiiiiiiiii=Src1Value defining RotatedImm8.
1509 constexpr const char *CmpName = "cmp";
1510 constexpr IValueT CmpOpcode = B3 | B1; // ie. 1010
1511 emitCompareOp(Cond, CmpOpcode, OpRn, OpSrc1, CmpName);
1512 }
1513
dmb(IValueT Option)1514 void AssemblerARM32::dmb(IValueT Option) {
1515 // DMB - ARM section A8.8.43, encoding A1:
1516 // dmb <option>
1517 //
1518 // 1111010101111111111100000101xxxx where xxxx=Option.
1519 assert(Utils::IsUint(4, Option) && "Bad dmb option");
1520 const IValueT Encoding =
1521 (encodeCondition(CondARM32::kNone) << kConditionShift) | B26 | B24 | B22 |
1522 B21 | B20 | B19 | B18 | B17 | B16 | B15 | B14 | B13 | B12 | B6 | B4 |
1523 Option;
1524 emitInst(Encoding);
1525 }
1526
eor(const Operand * OpRd,const Operand * OpRn,const Operand * OpSrc1,bool SetFlags,CondARM32::Cond Cond)1527 void AssemblerARM32::eor(const Operand *OpRd, const Operand *OpRn,
1528 const Operand *OpSrc1, bool SetFlags,
1529 CondARM32::Cond Cond) {
1530 // EOR (register) - ARM section A*.8.47, encoding A1:
1531 // eor{s}<c> <Rd>, <Rn>, <Rm>{, <shift>}
1532 //
1533 // cccc0000001snnnnddddiiiiitt0mmmm where cccc=Cond, dddd=Rd, nnnn=Rn,
1534 // mmmm=Rm, iiiii=Shift, tt=ShiftKind, and s=SetFlags.
1535 //
1536 // EOR (Immediate) - ARM section A8.*.46, encoding A1:
1537 // eor{s}<c> <Rd>, <Rn>, #RotatedImm8
1538 //
1539 // cccc0010001snnnnddddiiiiiiiiiiii where cccc=Cond, dddd=Rd, nnnn=Rn,
1540 // s=SetFlags and iiiiiiiiiiii=Src1Value defining RotatedImm8.
1541 constexpr const char *EorName = "eor";
1542 constexpr IValueT EorOpcode = B0; // 0001
1543 emitType01(Cond, EorOpcode, OpRd, OpRn, OpSrc1, SetFlags, RdIsPcAndSetFlags,
1544 EorName);
1545 }
1546
ldr(const Operand * OpRt,const Operand * OpAddress,CondARM32::Cond Cond,const TargetInfo & TInfo)1547 void AssemblerARM32::ldr(const Operand *OpRt, const Operand *OpAddress,
1548 CondARM32::Cond Cond, const TargetInfo &TInfo) {
1549 constexpr const char *LdrName = "ldr";
1550 constexpr bool IsLoad = true;
1551 IValueT Rt = encodeGPRegister(OpRt, "Rt", LdrName);
1552 const Type Ty = OpRt->getType();
1553 switch (Ty) {
1554 case IceType_i64:
1555 // LDRD is not implemented because target lowering handles i64 and double by
1556 // using two (32-bit) load instructions. Note: Intentionally drop to default
1557 // case.
1558 llvm::report_fatal_error(std::string("ldr : Type ") + typeString(Ty) +
1559 " not implemented");
1560 default:
1561 llvm::report_fatal_error(std::string("ldr : Type ") + typeString(Ty) +
1562 " not allowed");
1563 case IceType_i1:
1564 case IceType_i8: {
1565 // LDRB (immediate) - ARM section A8.8.68, encoding A1:
1566 // ldrb<c> <Rt>, [<Rn>{, #+/-<imm12>}] ; p=1, w=0
1567 // ldrb<c> <Rt>, [<Rn>], #+/-<imm12> ; p=1, w=1
1568 // ldrb<c> <Rt>, [<Rn>, #+/-<imm12>]! ; p=0, w=1
1569 //
1570 // cccc010pu1w1nnnnttttiiiiiiiiiiii where cccc=Cond, tttt=Rt, nnnn=Rn,
1571 // iiiiiiiiiiii=imm12, u=1 if +, pu0w is a BlockAddr, and
1572 // pu0w0nnnn0000iiiiiiiiiiii=Address.
1573 //
1574 // LDRB (register) - ARM section A8.8.66, encoding A1:
1575 // ldrb<c> <Rt>, [<Rn>, +/-<Rm>{, <shift>}]{!}
1576 // ldrb<c> <Rt>, [<Rn>], +/-<Rm>{, <shift>}
1577 //
1578 // cccc011pu1w1nnnnttttiiiiiss0mmmm where cccc=Cond, tttt=Rt, U=1 if +, pu0b
1579 // is a BlockAddr, and pu0w0nnnn0000iiiiiss0mmmm=Address.
1580 constexpr bool IsByte = true;
1581 emitMemOp(Cond, IsLoad, IsByte, Rt, OpAddress, TInfo, LdrName);
1582 return;
1583 }
1584 case IceType_i16: {
1585 // LDRH (immediate) - ARM section A8.8.80, encoding A1:
1586 // ldrh<c> <Rt>, [<Rn>{, #+/-<Imm8>}]
1587 // ldrh<c> <Rt>, [<Rn>], #+/-<Imm8>
1588 // ldrh<c> <Rt>, [<Rn>, #+/-<Imm8>]!
1589 //
1590 // cccc000pu1w1nnnnttttiiii1011iiii where cccc=Cond, tttt=Rt, nnnn=Rn,
1591 // iiiiiiii=Imm8, u=1 if +, pu0w is a BlockAddr, and
1592 // pu0w0nnnn0000iiiiiiiiiiii=Address.
1593 constexpr const char *Ldrh = "ldrh";
1594 emitMemOpEnc3(Cond, L | B7 | B5 | B4, Rt, OpAddress, TInfo, Ldrh);
1595 return;
1596 }
1597 case IceType_i32: {
1598 // LDR (immediate) - ARM section A8.8.63, encoding A1:
1599 // ldr<c> <Rt>, [<Rn>{, #+/-<imm12>}] ; p=1, w=0
1600 // ldr<c> <Rt>, [<Rn>], #+/-<imm12> ; p=1, w=1
1601 // ldr<c> <Rt>, [<Rn>, #+/-<imm12>]! ; p=0, w=1
1602 //
1603 // cccc010pu0w1nnnnttttiiiiiiiiiiii where cccc=Cond, tttt=Rt, nnnn=Rn,
1604 // iiiiiiiiiiii=imm12, u=1 if +, pu0w is a BlockAddr, and
1605 //
1606 // LDR (register) - ARM section A8.8.70, encoding A1:
1607 // ldrb<c> <Rt>, [<Rn>, +/-<Rm>{, <shift>}]{!}
1608 // ldrb<c> <Rt>, [<Rn>], +-<Rm>{, <shift>}
1609 //
1610 // cccc011pu0w1nnnnttttiiiiiss0mmmm where cccc=Cond, tttt=Rt, U=1 if +, pu0b
1611 // is a BlockAddr, and pu0w0nnnn0000iiiiiss0mmmm=Address.
1612 constexpr bool IsByte = false;
1613 emitMemOp(Cond, IsLoad, IsByte, Rt, OpAddress, TInfo, LdrName);
1614 return;
1615 }
1616 }
1617 }
1618
emitMemExOp(CondARM32::Cond Cond,Type Ty,bool IsLoad,const Operand * OpRd,IValueT Rt,const Operand * OpAddress,const TargetInfo & TInfo,const char * InstName)1619 void AssemblerARM32::emitMemExOp(CondARM32::Cond Cond, Type Ty, bool IsLoad,
1620 const Operand *OpRd, IValueT Rt,
1621 const Operand *OpAddress,
1622 const TargetInfo &TInfo,
1623 const char *InstName) {
1624 IValueT Rd = encodeGPRegister(OpRd, "Rd", InstName);
1625 IValueT MemExOpcode = IsLoad ? B0 : 0;
1626 switch (Ty) {
1627 default:
1628 llvm::report_fatal_error(std::string(InstName) + ": Type " +
1629 typeString(Ty) + " not allowed");
1630 case IceType_i1:
1631 case IceType_i8:
1632 MemExOpcode |= B2;
1633 break;
1634 case IceType_i16:
1635 MemExOpcode |= B2 | B1;
1636 break;
1637 case IceType_i32:
1638 break;
1639 case IceType_i64:
1640 MemExOpcode |= B1;
1641 }
1642 IValueT AddressRn;
1643 if (encodeAddress(OpAddress, AddressRn, TInfo, NoImmOffsetAddress) !=
1644 EncodedAsImmRegOffset)
1645 llvm::report_fatal_error(std::string(InstName) +
1646 ": Can't extract Rn from address");
1647 assert(Utils::IsAbsoluteUint(3, MemExOpcode));
1648 assert(Rd < RegARM32::getNumGPRegs());
1649 assert(Rt < RegARM32::getNumGPRegs());
1650 assert(CondARM32::isDefined(Cond));
1651 IValueT Encoding = (Cond << kConditionShift) | B24 | B23 | B11 | B10 | B9 |
1652 B8 | B7 | B4 | (MemExOpcode << kMemExOpcodeShift) |
1653 AddressRn | (Rd << kRdShift) | (Rt << kRmShift);
1654 emitInst(Encoding);
1655 return;
1656 }
1657
ldrex(const Operand * OpRt,const Operand * OpAddress,CondARM32::Cond Cond,const TargetInfo & TInfo)1658 void AssemblerARM32::ldrex(const Operand *OpRt, const Operand *OpAddress,
1659 CondARM32::Cond Cond, const TargetInfo &TInfo) {
1660 // LDREXB - ARM section A8.8.76, encoding A1:
1661 // ldrexb<c> <Rt>, [<Rn>]
1662 //
1663 // cccc00011101nnnntttt111110011111 where cccc=Cond, tttt=Rt, and nnnn=Rn.
1664 //
1665 // LDREXH - ARM section A8.8.78, encoding A1:
1666 // ldrexh<c> <Rt>, [<Rn>]
1667 //
1668 // cccc00011111nnnntttt111110011111 where cccc=Cond, tttt=Rt, and nnnn=Rn.
1669 //
1670 // LDREX - ARM section A8.8.75, encoding A1:
1671 // ldrex<c> <Rt>, [<Rn>]
1672 //
1673 // cccc00011001nnnntttt111110011111 where cccc=Cond, tttt=Rt, and nnnn=Rn.
1674 //
1675 // LDREXD - ARM section A8.
1676 // ldrexd<c> <Rt>, [<Rn>]
1677 //
1678 // cccc00011001nnnntttt111110011111 where cccc=Cond, tttt=Rt, and nnnn=Rn.
1679 constexpr const char *LdrexName = "ldrex";
1680 const Type Ty = OpRt->getType();
1681 constexpr bool IsLoad = true;
1682 constexpr IValueT Rm = RegARM32::Encoded_Reg_pc;
1683 emitMemExOp(Cond, Ty, IsLoad, OpRt, Rm, OpAddress, TInfo, LdrexName);
1684 }
1685
emitShift(const CondARM32::Cond Cond,const OperandARM32::ShiftKind Shift,const Operand * OpRd,const Operand * OpRm,const Operand * OpSrc1,const bool SetFlags,const char * InstName)1686 void AssemblerARM32::emitShift(const CondARM32::Cond Cond,
1687 const OperandARM32::ShiftKind Shift,
1688 const Operand *OpRd, const Operand *OpRm,
1689 const Operand *OpSrc1, const bool SetFlags,
1690 const char *InstName) {
1691 constexpr IValueT ShiftOpcode = B3 | B2 | B0; // 1101
1692 IValueT Rd = encodeGPRegister(OpRd, "Rd", InstName);
1693 IValueT Rm = encodeGPRegister(OpRm, "Rm", InstName);
1694 IValueT Value;
1695 switch (encodeOperand(OpSrc1, Value, WantGPRegs)) {
1696 default:
1697 llvm::report_fatal_error(std::string(InstName) +
1698 ": Last operand not understood");
1699 case EncodedAsShiftImm5: {
1700 // XXX (immediate)
1701 // xxx{s}<c> <Rd>, <Rm>, #imm5
1702 //
1703 // cccc0001101s0000ddddiiiii000mmmm where cccc=Cond, s=SetFlags, dddd=Rd,
1704 // iiiii=imm5, and mmmm=Rm.
1705 constexpr IValueT Rn = 0; // Rn field is not used.
1706 Value = Value | (Rm << kRmShift) | (Shift << kShiftShift);
1707 emitType01(Cond, kInstTypeDataRegShift, ShiftOpcode, SetFlags, Rn, Rd,
1708 Value, RdIsPcAndSetFlags, InstName);
1709 return;
1710 }
1711 case EncodedAsRegister: {
1712 // XXX (register)
1713 // xxx{S}<c> <Rd>, <Rm>, <Rs>
1714 //
1715 // cccc0001101s0000ddddssss0001mmmm where cccc=Cond, s=SetFlags, dddd=Rd,
1716 // mmmm=Rm, and ssss=Rs.
1717 constexpr IValueT Rn = 0; // Rn field is not used.
1718 IValueT Rs = encodeGPRegister(OpSrc1, "Rs", InstName);
1719 verifyRegNotPc(Rd, "Rd", InstName);
1720 verifyRegNotPc(Rm, "Rm", InstName);
1721 verifyRegNotPc(Rs, "Rs", InstName);
1722 emitType01(Cond, kInstTypeDataRegShift, ShiftOpcode, SetFlags, Rn, Rd,
1723 encodeShiftRotateReg(Rm, Shift, Rs), NoChecks, InstName);
1724 return;
1725 }
1726 }
1727 }
1728
asr(const Operand * OpRd,const Operand * OpRm,const Operand * OpSrc1,bool SetFlags,CondARM32::Cond Cond)1729 void AssemblerARM32::asr(const Operand *OpRd, const Operand *OpRm,
1730 const Operand *OpSrc1, bool SetFlags,
1731 CondARM32::Cond Cond) {
1732 constexpr const char *AsrName = "asr";
1733 emitShift(Cond, OperandARM32::ASR, OpRd, OpRm, OpSrc1, SetFlags, AsrName);
1734 }
1735
lsl(const Operand * OpRd,const Operand * OpRm,const Operand * OpSrc1,bool SetFlags,CondARM32::Cond Cond)1736 void AssemblerARM32::lsl(const Operand *OpRd, const Operand *OpRm,
1737 const Operand *OpSrc1, bool SetFlags,
1738 CondARM32::Cond Cond) {
1739 constexpr const char *LslName = "lsl";
1740 emitShift(Cond, OperandARM32::LSL, OpRd, OpRm, OpSrc1, SetFlags, LslName);
1741 }
1742
lsr(const Operand * OpRd,const Operand * OpRm,const Operand * OpSrc1,bool SetFlags,CondARM32::Cond Cond)1743 void AssemblerARM32::lsr(const Operand *OpRd, const Operand *OpRm,
1744 const Operand *OpSrc1, bool SetFlags,
1745 CondARM32::Cond Cond) {
1746 constexpr const char *LsrName = "lsr";
1747 emitShift(Cond, OperandARM32::LSR, OpRd, OpRm, OpSrc1, SetFlags, LsrName);
1748 }
1749
mov(const Operand * OpRd,const Operand * OpSrc,CondARM32::Cond Cond)1750 void AssemblerARM32::mov(const Operand *OpRd, const Operand *OpSrc,
1751 CondARM32::Cond Cond) {
1752 // MOV (register) - ARM section A8.8.104, encoding A1:
1753 // mov{S}<c> <Rd>, <Rn>
1754 //
1755 // cccc0001101s0000dddd00000000mmmm where cccc=Cond, s=SetFlags, dddd=Rd,
1756 // and nnnn=Rn.
1757 //
1758 // MOV (immediate) - ARM section A8.8.102, encoding A1:
1759 // mov{S}<c> <Rd>, #<RotatedImm8>
1760 //
1761 // cccc0011101s0000ddddiiiiiiiiiiii where cccc=Cond, s=SetFlags, dddd=Rd,
1762 // and iiiiiiiiiiii=RotatedImm8=Src. Note: We don't use movs in this
1763 // assembler.
1764 constexpr const char *MovName = "mov";
1765 IValueT Rd = encodeGPRegister(OpRd, "Rd", MovName);
1766 constexpr bool SetFlags = false;
1767 constexpr IValueT Rn = 0;
1768 constexpr IValueT MovOpcode = B3 | B2 | B0; // 1101.
1769 emitType01(Cond, MovOpcode, Rd, Rn, OpSrc, SetFlags, RdIsPcAndSetFlags,
1770 MovName);
1771 }
1772
emitMovwt(CondARM32::Cond Cond,bool IsMovW,const Operand * OpRd,const Operand * OpSrc,const char * MovName)1773 void AssemblerARM32::emitMovwt(CondARM32::Cond Cond, bool IsMovW,
1774 const Operand *OpRd, const Operand *OpSrc,
1775 const char *MovName) {
1776 IValueT Opcode = B25 | B24 | (IsMovW ? 0 : B22);
1777 IValueT Rd = encodeGPRegister(OpRd, "Rd", MovName);
1778 IValueT Imm16;
1779 if (const auto *Src = llvm::dyn_cast<ConstantRelocatable>(OpSrc)) {
1780 emitFixup(createMoveFixup(IsMovW, Src));
1781 // Use 0 for the lower 16 bits of the relocatable, and add a fixup to
1782 // install the correct bits.
1783 Imm16 = 0;
1784 } else if (encodeOperand(OpSrc, Imm16, WantGPRegs) != EncodedAsConstI32) {
1785 llvm::report_fatal_error(std::string(MovName) + ": Not i32 constant");
1786 }
1787 assert(CondARM32::isDefined(Cond));
1788 if (!Utils::IsAbsoluteUint(16, Imm16))
1789 llvm::report_fatal_error(std::string(MovName) + ": Constant not i16");
1790 const IValueT Encoding = encodeCondition(Cond) << kConditionShift | Opcode |
1791 ((Imm16 >> 12) << 16) | Rd << kRdShift |
1792 (Imm16 & 0xfff);
1793 emitInst(Encoding);
1794 }
1795
movw(const Operand * OpRd,const Operand * OpSrc,CondARM32::Cond Cond)1796 void AssemblerARM32::movw(const Operand *OpRd, const Operand *OpSrc,
1797 CondARM32::Cond Cond) {
1798 // MOV (immediate) - ARM section A8.8.102, encoding A2:
1799 // movw<c> <Rd>, #<imm16>
1800 //
1801 // cccc00110000iiiiddddiiiiiiiiiiii where cccc=Cond, dddd=Rd, and
1802 // iiiiiiiiiiiiiiii=imm16.
1803 constexpr const char *MovwName = "movw";
1804 constexpr bool IsMovW = true;
1805 emitMovwt(Cond, IsMovW, OpRd, OpSrc, MovwName);
1806 }
1807
movt(const Operand * OpRd,const Operand * OpSrc,CondARM32::Cond Cond)1808 void AssemblerARM32::movt(const Operand *OpRd, const Operand *OpSrc,
1809 CondARM32::Cond Cond) {
1810 // MOVT - ARM section A8.8.106, encoding A1:
1811 // movt<c> <Rd>, #<imm16>
1812 //
1813 // cccc00110100iiiiddddiiiiiiiiiiii where cccc=Cond, dddd=Rd, and
1814 // iiiiiiiiiiiiiiii=imm16.
1815 constexpr const char *MovtName = "movt";
1816 constexpr bool IsMovW = false;
1817 emitMovwt(Cond, IsMovW, OpRd, OpSrc, MovtName);
1818 }
1819
mvn(const Operand * OpRd,const Operand * OpSrc,CondARM32::Cond Cond)1820 void AssemblerARM32::mvn(const Operand *OpRd, const Operand *OpSrc,
1821 CondARM32::Cond Cond) {
1822 // MVN (immediate) - ARM section A8.8.115, encoding A1:
1823 // mvn{s}<c> <Rd>, #<const>
1824 //
1825 // cccc0011111s0000ddddiiiiiiiiiiii where cccc=Cond, s=SetFlags=0, dddd=Rd,
1826 // and iiiiiiiiiiii=const
1827 //
1828 // MVN (register) - ARM section A8.8.116, encoding A1:
1829 // mvn{s}<c> <Rd>, <Rm>{, <shift>
1830 //
1831 // cccc0001111s0000ddddiiiiitt0mmmm where cccc=Cond, s=SetFlags=0, dddd=Rd,
1832 // mmmm=Rm, iiii defines shift constant, and tt=ShiftKind.
1833 constexpr const char *MvnName = "mvn";
1834 IValueT Rd = encodeGPRegister(OpRd, "Rd", MvnName);
1835 constexpr IValueT MvnOpcode = B3 | B2 | B1 | B0; // i.e. 1111
1836 constexpr IValueT Rn = 0;
1837 constexpr bool SetFlags = false;
1838 emitType01(Cond, MvnOpcode, Rd, Rn, OpSrc, SetFlags, RdIsPcAndSetFlags,
1839 MvnName);
1840 }
1841
nop()1842 void AssemblerARM32::nop() {
1843 // NOP - Section A8.8.119, encoding A1:
1844 // nop<c>
1845 //
1846 // cccc0011001000001111000000000000 where cccc=Cond.
1847 constexpr CondARM32::Cond Cond = CondARM32::AL;
1848 const IValueT Encoding = (encodeCondition(Cond) << kConditionShift) | B25 |
1849 B24 | B21 | B15 | B14 | B13 | B12;
1850 emitInst(Encoding);
1851 }
1852
sbc(const Operand * OpRd,const Operand * OpRn,const Operand * OpSrc1,bool SetFlags,CondARM32::Cond Cond)1853 void AssemblerARM32::sbc(const Operand *OpRd, const Operand *OpRn,
1854 const Operand *OpSrc1, bool SetFlags,
1855 CondARM32::Cond Cond) {
1856 // SBC (register) - ARM section 18.8.162, encoding A1:
1857 // sbc{s}<c> <Rd>, <Rn>, <Rm>{, <shift>}
1858 //
1859 // cccc0000110snnnnddddiiiiitt0mmmm where cccc=Cond, dddd=Rd, nnnn=Rn,
1860 // mmmm=Rm, iiiii=Shift, tt=ShiftKind, and s=SetFlags.
1861 //
1862 // SBC (Immediate) - ARM section A8.8.161, encoding A1:
1863 // sbc{s}<c> <Rd>, <Rn>, #<RotatedImm8>
1864 //
1865 // cccc0010110snnnnddddiiiiiiiiiiii where cccc=Cond, dddd=Rd, nnnn=Rn,
1866 // s=SetFlags and iiiiiiiiiiii=Src1Value defining RotatedImm8.
1867 constexpr const char *SbcName = "sbc";
1868 constexpr IValueT SbcOpcode = B2 | B1; // 0110
1869 emitType01(Cond, SbcOpcode, OpRd, OpRn, OpSrc1, SetFlags, RdIsPcAndSetFlags,
1870 SbcName);
1871 }
1872
sdiv(const Operand * OpRd,const Operand * OpRn,const Operand * OpSrc1,CondARM32::Cond Cond)1873 void AssemblerARM32::sdiv(const Operand *OpRd, const Operand *OpRn,
1874 const Operand *OpSrc1, CondARM32::Cond Cond) {
1875 // SDIV - ARM section A8.8.165, encoding A1.
1876 // sdiv<c> <Rd>, <Rn>, <Rm>
1877 //
1878 // cccc01110001dddd1111mmmm0001nnnn where cccc=Cond, dddd=Rd, nnnn=Rn, and
1879 // mmmm=Rm.
1880 constexpr const char *SdivName = "sdiv";
1881 IValueT Rd = encodeGPRegister(OpRd, "Rd", SdivName);
1882 IValueT Rn = encodeGPRegister(OpRn, "Rn", SdivName);
1883 IValueT Rm = encodeGPRegister(OpSrc1, "Rm", SdivName);
1884 verifyRegNotPc(Rd, "Rd", SdivName);
1885 verifyRegNotPc(Rn, "Rn", SdivName);
1886 verifyRegNotPc(Rm, "Rm", SdivName);
1887 // Assembler registers rd, rn, rm are encoded as rn, rm, rs.
1888 constexpr IValueT SdivOpcode = 0;
1889 emitDivOp(Cond, SdivOpcode, Rd, Rn, Rm);
1890 }
1891
str(const Operand * OpRt,const Operand * OpAddress,CondARM32::Cond Cond,const TargetInfo & TInfo)1892 void AssemblerARM32::str(const Operand *OpRt, const Operand *OpAddress,
1893 CondARM32::Cond Cond, const TargetInfo &TInfo) {
1894 constexpr const char *StrName = "str";
1895 constexpr bool IsLoad = false;
1896 IValueT Rt = encodeGPRegister(OpRt, "Rt", StrName);
1897 const Type Ty = OpRt->getType();
1898 switch (Ty) {
1899 case IceType_i64:
1900 // STRD is not implemented because target lowering handles i64 and double by
1901 // using two (32-bit) store instructions. Note: Intentionally drop to
1902 // default case.
1903 llvm::report_fatal_error(std::string(StrName) + ": Type " + typeString(Ty) +
1904 " not implemented");
1905 default:
1906 llvm::report_fatal_error(std::string(StrName) + ": Type " + typeString(Ty) +
1907 " not allowed");
1908 case IceType_i1:
1909 case IceType_i8: {
1910 // STRB (immediate) - ARM section A8.8.207, encoding A1:
1911 // strb<c> <Rt>, [<Rn>{, #+/-<imm12>}] ; p=1, w=0
1912 // strb<c> <Rt>, [<Rn>], #+/-<imm12> ; p=1, w=1
1913 // strb<c> <Rt>, [<Rn>, #+/-<imm12>]! ; p=0, w=1
1914 //
1915 // cccc010pu1w0nnnnttttiiiiiiiiiiii where cccc=Cond, tttt=Rt, nnnn=Rn,
1916 // iiiiiiiiiiii=imm12, u=1 if +.
1917 constexpr bool IsByte = true;
1918 emitMemOp(Cond, IsLoad, IsByte, Rt, OpAddress, TInfo, StrName);
1919 return;
1920 }
1921 case IceType_i16: {
1922 // STRH (immediate) - ARM section A8.*.217, encoding A1:
1923 // strh<c> <Rt>, [<Rn>{, #+/-<Imm8>}]
1924 // strh<c> <Rt>, [<Rn>], #+/-<Imm8>
1925 // strh<c> <Rt>, [<Rn>, #+/-<Imm8>]!
1926 //
1927 // cccc000pu1w0nnnnttttiiii1011iiii where cccc=Cond, tttt=Rt, nnnn=Rn,
1928 // iiiiiiii=Imm8, u=1 if +, pu0w is a BlockAddr, and
1929 // pu0w0nnnn0000iiiiiiiiiiii=Address.
1930 constexpr const char *Strh = "strh";
1931 emitMemOpEnc3(Cond, B7 | B5 | B4, Rt, OpAddress, TInfo, Strh);
1932 return;
1933 }
1934 case IceType_i32: {
1935 // Note: Handles i32 and float stores. Target lowering handles i64 and
1936 // double by using two (32 bit) store instructions.
1937 //
1938 // STR (immediate) - ARM section A8.8.207, encoding A1:
1939 // str<c> <Rt>, [<Rn>{, #+/-<imm12>}] ; p=1, w=0
1940 // str<c> <Rt>, [<Rn>], #+/-<imm12> ; p=1, w=1
1941 // str<c> <Rt>, [<Rn>, #+/-<imm12>]! ; p=0, w=1
1942 //
1943 // cccc010pu1w0nnnnttttiiiiiiiiiiii where cccc=Cond, tttt=Rt, nnnn=Rn,
1944 // iiiiiiiiiiii=imm12, u=1 if +.
1945 constexpr bool IsByte = false;
1946 emitMemOp(Cond, IsLoad, IsByte, Rt, OpAddress, TInfo, StrName);
1947 return;
1948 }
1949 }
1950 }
1951
strex(const Operand * OpRd,const Operand * OpRt,const Operand * OpAddress,CondARM32::Cond Cond,const TargetInfo & TInfo)1952 void AssemblerARM32::strex(const Operand *OpRd, const Operand *OpRt,
1953 const Operand *OpAddress, CondARM32::Cond Cond,
1954 const TargetInfo &TInfo) {
1955 // STREXB - ARM section A8.8.213, encoding A1:
1956 // strexb<c> <Rd>, <Rt>, [<Rn>]
1957 //
1958 // cccc00011100nnnndddd11111001tttt where cccc=Cond, dddd=Rd, tttt=Rt, and
1959 // nnnn=Rn.
1960 //
1961 // STREXH - ARM section A8.8.215, encoding A1:
1962 // strexh<c> <Rd>, <Rt>, [<Rn>]
1963 //
1964 // cccc00011110nnnndddd11111001tttt where cccc=Cond, dddd=Rd, tttt=Rt, and
1965 // nnnn=Rn.
1966 //
1967 // STREX - ARM section A8.8.212, encoding A1:
1968 // strex<c> <Rd>, <Rt>, [<Rn>]
1969 //
1970 // cccc00011000nnnndddd11111001tttt where cccc=Cond, dddd=Rd, tttt=Rt, and
1971 // nnnn=Rn.
1972 //
1973 // STREXD - ARM section A8.8.214, encoding A1:
1974 // strexd<c> <Rd>, <Rt>, [<Rn>]
1975 //
1976 // cccc00011010nnnndddd11111001tttt where cccc=Cond, dddd=Rd, tttt=Rt, and
1977 // nnnn=Rn.
1978 constexpr const char *StrexName = "strex";
1979 // Note: Rt uses Rm shift in encoding.
1980 IValueT Rt = encodeGPRegister(OpRt, "Rt", StrexName);
1981 const Type Ty = OpRt->getType();
1982 constexpr bool IsLoad = true;
1983 emitMemExOp(Cond, Ty, !IsLoad, OpRd, Rt, OpAddress, TInfo, StrexName);
1984 }
1985
orr(const Operand * OpRd,const Operand * OpRn,const Operand * OpSrc1,bool SetFlags,CondARM32::Cond Cond)1986 void AssemblerARM32::orr(const Operand *OpRd, const Operand *OpRn,
1987 const Operand *OpSrc1, bool SetFlags,
1988 CondARM32::Cond Cond) {
1989 // ORR (register) - ARM Section A8.8.123, encoding A1:
1990 // orr{s}<c> <Rd>, <Rn>, <Rm>
1991 //
1992 // cccc0001100snnnnddddiiiiitt0mmmm where cccc=Cond, dddd=Rd, nnnn=Rn,
1993 // mmmm=Rm, iiiii=shift, tt=ShiftKind,, and s=SetFlags.
1994 //
1995 // ORR (register) - ARM Section A8.8.123, encoding A1:
1996 // orr{s}<c> <Rd>, <Rn>, #<RotatedImm8>
1997 //
1998 // cccc0001100snnnnddddiiiiiiiiiiii where cccc=Cond, dddd=Rd, nnnn=Rn,
1999 // s=SetFlags and iiiiiiiiiiii=Src1Value defining RotatedImm8.
2000 constexpr const char *OrrName = "orr";
2001 constexpr IValueT OrrOpcode = B3 | B2; // i.e. 1100
2002 emitType01(Cond, OrrOpcode, OpRd, OpRn, OpSrc1, SetFlags, RdIsPcAndSetFlags,
2003 OrrName);
2004 }
2005
pop(const Variable * OpRt,CondARM32::Cond Cond)2006 void AssemblerARM32::pop(const Variable *OpRt, CondARM32::Cond Cond) {
2007 // POP - ARM section A8.8.132, encoding A2:
2008 // pop<c> {Rt}
2009 //
2010 // cccc010010011101dddd000000000100 where dddd=Rt and cccc=Cond.
2011 constexpr const char *Pop = "pop";
2012 IValueT Rt = encodeGPRegister(OpRt, "Rt", Pop);
2013 verifyRegsNotEq(Rt, "Rt", RegARM32::Encoded_Reg_sp, "sp", Pop);
2014 // Same as load instruction.
2015 constexpr bool IsLoad = true;
2016 constexpr bool IsByte = false;
2017 constexpr IOffsetT MaxOffset = (1 << 8) - 1;
2018 constexpr IValueT NoShiftRight = 0;
2019 IValueT Address =
2020 encodeImmRegOffset(RegARM32::Encoded_Reg_sp, kWordSize,
2021 OperandARM32Mem::PostIndex, MaxOffset, NoShiftRight);
2022 emitMemOp(Cond, kInstTypeMemImmediate, IsLoad, IsByte, Rt, Address);
2023 }
2024
popList(const IValueT Registers,CondARM32::Cond Cond)2025 void AssemblerARM32::popList(const IValueT Registers, CondARM32::Cond Cond) {
2026 // POP - ARM section A8.*.131, encoding A1:
2027 // pop<c> <registers>
2028 //
2029 // cccc100010111101rrrrrrrrrrrrrrrr where cccc=Cond and
2030 // rrrrrrrrrrrrrrrr=Registers (one bit for each GP register).
2031 constexpr bool IsLoad = true;
2032 emitMultiMemOp(Cond, IA_W, IsLoad, RegARM32::Encoded_Reg_sp, Registers);
2033 }
2034
push(const Operand * OpRt,CondARM32::Cond Cond)2035 void AssemblerARM32::push(const Operand *OpRt, CondARM32::Cond Cond) {
2036 // PUSH - ARM section A8.8.133, encoding A2:
2037 // push<c> {Rt}
2038 //
2039 // cccc010100101101dddd000000000100 where dddd=Rt and cccc=Cond.
2040 constexpr const char *Push = "push";
2041 IValueT Rt = encodeGPRegister(OpRt, "Rt", Push);
2042 verifyRegsNotEq(Rt, "Rt", RegARM32::Encoded_Reg_sp, "sp", Push);
2043 // Same as store instruction.
2044 constexpr bool isLoad = false;
2045 constexpr bool isByte = false;
2046 constexpr IOffsetT MaxOffset = (1 << 8) - 1;
2047 constexpr IValueT NoShiftRight = 0;
2048 IValueT Address =
2049 encodeImmRegOffset(RegARM32::Encoded_Reg_sp, -kWordSize,
2050 OperandARM32Mem::PreIndex, MaxOffset, NoShiftRight);
2051 emitMemOp(Cond, kInstTypeMemImmediate, isLoad, isByte, Rt, Address);
2052 }
2053
pushList(const IValueT Registers,CondARM32::Cond Cond)2054 void AssemblerARM32::pushList(const IValueT Registers, CondARM32::Cond Cond) {
2055 // PUSH - ARM section A8.8.133, encoding A1:
2056 // push<c> <Registers>
2057 //
2058 // cccc100100101101rrrrrrrrrrrrrrrr where cccc=Cond and
2059 // rrrrrrrrrrrrrrrr=Registers (one bit for each GP register).
2060 constexpr bool IsLoad = false;
2061 emitMultiMemOp(Cond, DB_W, IsLoad, RegARM32::Encoded_Reg_sp, Registers);
2062 }
2063
mla(const Operand * OpRd,const Operand * OpRn,const Operand * OpRm,const Operand * OpRa,CondARM32::Cond Cond)2064 void AssemblerARM32::mla(const Operand *OpRd, const Operand *OpRn,
2065 const Operand *OpRm, const Operand *OpRa,
2066 CondARM32::Cond Cond) {
2067 // MLA - ARM section A8.8.114, encoding A1.
2068 // mla{s}<c> <Rd>, <Rn>, <Rm>, <Ra>
2069 //
2070 // cccc0000001sddddaaaammmm1001nnnn where cccc=Cond, s=SetFlags, dddd=Rd,
2071 // aaaa=Ra, mmmm=Rm, and nnnn=Rn.
2072 constexpr const char *MlaName = "mla";
2073 IValueT Rd = encodeGPRegister(OpRd, "Rd", MlaName);
2074 IValueT Rn = encodeGPRegister(OpRn, "Rn", MlaName);
2075 IValueT Rm = encodeGPRegister(OpRm, "Rm", MlaName);
2076 IValueT Ra = encodeGPRegister(OpRa, "Ra", MlaName);
2077 verifyRegNotPc(Rd, "Rd", MlaName);
2078 verifyRegNotPc(Rn, "Rn", MlaName);
2079 verifyRegNotPc(Rm, "Rm", MlaName);
2080 verifyRegNotPc(Ra, "Ra", MlaName);
2081 constexpr IValueT MlaOpcode = B21;
2082 constexpr bool SetFlags = true;
2083 // Assembler registers rd, rn, rm, ra are encoded as rn, rm, rs, rd.
2084 emitMulOp(Cond, MlaOpcode, Ra, Rd, Rn, Rm, !SetFlags);
2085 }
2086
mls(const Operand * OpRd,const Operand * OpRn,const Operand * OpRm,const Operand * OpRa,CondARM32::Cond Cond)2087 void AssemblerARM32::mls(const Operand *OpRd, const Operand *OpRn,
2088 const Operand *OpRm, const Operand *OpRa,
2089 CondARM32::Cond Cond) {
2090 constexpr const char *MlsName = "mls";
2091 IValueT Rd = encodeGPRegister(OpRd, "Rd", MlsName);
2092 IValueT Rn = encodeGPRegister(OpRn, "Rn", MlsName);
2093 IValueT Rm = encodeGPRegister(OpRm, "Rm", MlsName);
2094 IValueT Ra = encodeGPRegister(OpRa, "Ra", MlsName);
2095 verifyRegNotPc(Rd, "Rd", MlsName);
2096 verifyRegNotPc(Rn, "Rn", MlsName);
2097 verifyRegNotPc(Rm, "Rm", MlsName);
2098 verifyRegNotPc(Ra, "Ra", MlsName);
2099 constexpr IValueT MlsOpcode = B22 | B21;
2100 constexpr bool SetFlags = true;
2101 // Assembler registers rd, rn, rm, ra are encoded as rn, rm, rs, rd.
2102 emitMulOp(Cond, MlsOpcode, Ra, Rd, Rn, Rm, !SetFlags);
2103 }
2104
mul(const Operand * OpRd,const Operand * OpRn,const Operand * OpSrc1,bool SetFlags,CondARM32::Cond Cond)2105 void AssemblerARM32::mul(const Operand *OpRd, const Operand *OpRn,
2106 const Operand *OpSrc1, bool SetFlags,
2107 CondARM32::Cond Cond) {
2108 // MUL - ARM section A8.8.114, encoding A1.
2109 // mul{s}<c> <Rd>, <Rn>, <Rm>
2110 //
2111 // cccc0000000sdddd0000mmmm1001nnnn where cccc=Cond, dddd=Rd, nnnn=Rn,
2112 // mmmm=Rm, and s=SetFlags.
2113 constexpr const char *MulName = "mul";
2114 IValueT Rd = encodeGPRegister(OpRd, "Rd", MulName);
2115 IValueT Rn = encodeGPRegister(OpRn, "Rn", MulName);
2116 IValueT Rm = encodeGPRegister(OpSrc1, "Rm", MulName);
2117 verifyRegNotPc(Rd, "Rd", MulName);
2118 verifyRegNotPc(Rn, "Rn", MulName);
2119 verifyRegNotPc(Rm, "Rm", MulName);
2120 // Assembler registers rd, rn, rm are encoded as rn, rm, rs.
2121 constexpr IValueT MulOpcode = 0;
2122 emitMulOp(Cond, MulOpcode, RegARM32::Encoded_Reg_r0, Rd, Rn, Rm, SetFlags);
2123 }
2124
emitRdRm(CondARM32::Cond Cond,IValueT Opcode,const Operand * OpRd,const Operand * OpRm,const char * InstName)2125 void AssemblerARM32::emitRdRm(CondARM32::Cond Cond, IValueT Opcode,
2126 const Operand *OpRd, const Operand *OpRm,
2127 const char *InstName) {
2128 IValueT Rd = encodeGPRegister(OpRd, "Rd", InstName);
2129 IValueT Rm = encodeGPRegister(OpRm, "Rm", InstName);
2130 IValueT Encoding =
2131 (Cond << kConditionShift) | Opcode | (Rd << kRdShift) | (Rm << kRmShift);
2132 emitInst(Encoding);
2133 }
2134
rbit(const Operand * OpRd,const Operand * OpRm,CondARM32::Cond Cond)2135 void AssemblerARM32::rbit(const Operand *OpRd, const Operand *OpRm,
2136 CondARM32::Cond Cond) {
2137 // RBIT - ARM section A8.8.144, encoding A1:
2138 // rbit<c> <Rd>, <Rm>
2139 //
2140 // cccc011011111111dddd11110011mmmm where cccc=Cond, dddd=Rn, and mmmm=Rm.
2141 constexpr const char *RbitName = "rev";
2142 constexpr IValueT RbitOpcode = B26 | B25 | B23 | B22 | B21 | B20 | B19 | B18 |
2143 B17 | B16 | B11 | B10 | B9 | B8 | B5 | B4;
2144 emitRdRm(Cond, RbitOpcode, OpRd, OpRm, RbitName);
2145 }
2146
rev(const Operand * OpRd,const Operand * OpRm,CondARM32::Cond Cond)2147 void AssemblerARM32::rev(const Operand *OpRd, const Operand *OpRm,
2148 CondARM32::Cond Cond) {
2149 // REV - ARM section A8.8.145, encoding A1:
2150 // rev<c> <Rd>, <Rm>
2151 //
2152 // cccc011010111111dddd11110011mmmm where cccc=Cond, dddd=Rn, and mmmm=Rm.
2153 constexpr const char *RevName = "rev";
2154 constexpr IValueT RevOpcode = B26 | B25 | B23 | B21 | B20 | B19 | B18 | B17 |
2155 B16 | B11 | B10 | B9 | B8 | B5 | B4;
2156 emitRdRm(Cond, RevOpcode, OpRd, OpRm, RevName);
2157 }
2158
rsb(const Operand * OpRd,const Operand * OpRn,const Operand * OpSrc1,bool SetFlags,CondARM32::Cond Cond)2159 void AssemblerARM32::rsb(const Operand *OpRd, const Operand *OpRn,
2160 const Operand *OpSrc1, bool SetFlags,
2161 CondARM32::Cond Cond) {
2162 // RSB (immediate) - ARM section A8.8.152, encoding A1.
2163 // rsb{s}<c> <Rd>, <Rn>, #<RotatedImm8>
2164 //
2165 // cccc0010011snnnnddddiiiiiiiiiiii where cccc=Cond, dddd=Rd, nnnn=Rn,
2166 // s=setFlags and iiiiiiiiiiii defines the RotatedImm8 value.
2167 //
2168 // RSB (register) - ARM section A8.8.163, encoding A1.
2169 // rsb{s}<c> <Rd>, <Rn>, <Rm>{, <Shift>}
2170 //
2171 // cccc0000011snnnnddddiiiiitt0mmmm where cccc=Cond, dddd=Rd, nnnn=Rn,
2172 // mmmm=Rm, iiiii=shift, tt==ShiftKind, and s=SetFlags.
2173 constexpr const char *RsbName = "rsb";
2174 constexpr IValueT RsbOpcode = B1 | B0; // 0011
2175 emitType01(Cond, RsbOpcode, OpRd, OpRn, OpSrc1, SetFlags, RdIsPcAndSetFlags,
2176 RsbName);
2177 }
2178
rsc(const Operand * OpRd,const Operand * OpRn,const Operand * OpSrc1,bool SetFlags,CondARM32::Cond Cond)2179 void AssemblerARM32::rsc(const Operand *OpRd, const Operand *OpRn,
2180 const Operand *OpSrc1, bool SetFlags,
2181 CondARM32::Cond Cond) {
2182 // RSC (immediate) - ARM section A8.8.155, encoding A1:
2183 // rsc{s}<c> <Rd>, <Rn>, #<RotatedImm8>
2184 //
2185 // cccc0010111snnnnddddiiiiiiiiiiii where cccc=Cond, dddd=Rd, nnnn=Rn,
2186 // mmmm=Rm, iiiii=shift, tt=ShiftKind, and s=SetFlags.
2187 //
2188 // RSC (register) - ARM section A8.8.156, encoding A1:
2189 // rsc{s}<c> <Rd>, <Rn>, <Rm>{, <shift>}
2190 //
2191 // cccc0000111snnnnddddiiiiitt0mmmm where cccc=Cond, dddd=Rd, nnnn=Rn,
2192 // mmmm=Rm, iiiii=shift, tt=ShiftKind, and s=SetFlags.
2193 //
2194 // RSC (register-shifted register) - ARM section A8.8.157, encoding A1:
2195 // rsc{s}<c> <Rd>, <Rn>, <Rm>, <type> <Rs>
2196 //
2197 // cccc0000111fnnnnddddssss0tt1mmmm where cccc=Cond, dddd=Rd, nnnn=Rn,
2198 // mmmm=Rm, ssss=Rs, tt defined <type>, and f=SetFlags.
2199 constexpr const char *RscName = "rsc";
2200 constexpr IValueT RscOpcode = B2 | B1 | B0; // i.e. 0111.
2201 emitType01(Cond, RscOpcode, OpRd, OpRn, OpSrc1, SetFlags, RdIsPcAndSetFlags,
2202 RscName);
2203 }
2204
sxt(const Operand * OpRd,const Operand * OpSrc0,CondARM32::Cond Cond)2205 void AssemblerARM32::sxt(const Operand *OpRd, const Operand *OpSrc0,
2206 CondARM32::Cond Cond) {
2207 constexpr const char *SxtName = "sxt";
2208 constexpr IValueT SxtOpcode = B26 | B25 | B23 | B21;
2209 emitSignExtend(Cond, SxtOpcode, OpRd, OpSrc0, SxtName);
2210 }
2211
sub(const Operand * OpRd,const Operand * OpRn,const Operand * OpSrc1,bool SetFlags,CondARM32::Cond Cond)2212 void AssemblerARM32::sub(const Operand *OpRd, const Operand *OpRn,
2213 const Operand *OpSrc1, bool SetFlags,
2214 CondARM32::Cond Cond) {
2215 // SUB (register) - ARM section A8.8.223, encoding A1:
2216 // sub{s}<c> <Rd>, <Rn>, <Rm>{, <shift>}
2217 // SUB (SP minus register): See ARM section 8.8.226, encoding A1:
2218 // sub{s}<c> <Rd>, sp, <Rm>{, <Shift>}
2219 //
2220 // cccc0000010snnnnddddiiiiitt0mmmm where cccc=Cond, dddd=Rd, nnnn=Rn,
2221 // mmmm=Rm, iiiii=shift, tt=ShiftKind, and s=SetFlags.
2222 //
2223 // Sub (Immediate) - ARM section A8.8.222, encoding A1:
2224 // sub{s}<c> <Rd>, <Rn>, #<RotatedImm8>
2225 // Sub (Sp minus immediate) - ARM section A8.8.225, encoding A1:
2226 // sub{s}<c> sp, <Rn>, #<RotatedImm8>
2227 //
2228 // cccc0010010snnnnddddiiiiiiiiiiii where cccc=Cond, dddd=Rd, nnnn=Rn,
2229 // s=SetFlags and iiiiiiiiiiii=Src1Value defining RotatedImm8
2230 constexpr const char *SubName = "sub";
2231 constexpr IValueT SubOpcode = B1; // 0010
2232 emitType01(Cond, SubOpcode, OpRd, OpRn, OpSrc1, SetFlags, RdIsPcAndSetFlags,
2233 SubName);
2234 }
2235
2236 namespace {
2237
2238 // Use a particular UDF encoding -- TRAPNaCl in LLVM: 0xE7FEDEF0
2239 // http://llvm.org/viewvc/llvm-project?view=revision&revision=173943
2240 const uint8_t TrapBytesRaw[] = {0xE7, 0xFE, 0xDE, 0xF0};
2241
2242 const auto TrapBytes =
2243 llvm::ArrayRef<uint8_t>(TrapBytesRaw, llvm::array_lengthof(TrapBytesRaw));
2244
2245 } // end of anonymous namespace
2246
getNonExecBundlePadding() const2247 llvm::ArrayRef<uint8_t> AssemblerARM32::getNonExecBundlePadding() const {
2248 return TrapBytes;
2249 }
2250
trap()2251 void AssemblerARM32::trap() {
2252 AssemblerBuffer::EnsureCapacity ensured(&Buffer);
2253 for (const uint8_t &Byte : reverse_range(TrapBytes))
2254 Buffer.emit<uint8_t>(Byte);
2255 }
2256
tst(const Operand * OpRn,const Operand * OpSrc1,CondARM32::Cond Cond)2257 void AssemblerARM32::tst(const Operand *OpRn, const Operand *OpSrc1,
2258 CondARM32::Cond Cond) {
2259 // TST (register) - ARM section A8.8.241, encoding A1:
2260 // tst<c> <Rn>, <Rm>(, <shift>}
2261 //
2262 // cccc00010001nnnn0000iiiiitt0mmmm where cccc=Cond, nnnn=Rn, mmmm=Rm,
2263 // iiiii=Shift, and tt=ShiftKind.
2264 //
2265 // TST (immediate) - ARM section A8.8.240, encoding A1:
2266 // tst<c> <Rn>, #<RotatedImm8>
2267 //
2268 // cccc00110001nnnn0000iiiiiiiiiiii where cccc=Cond, nnnn=Rn, and
2269 // iiiiiiiiiiii defines RotatedImm8.
2270 constexpr const char *TstName = "tst";
2271 constexpr IValueT TstOpcode = B3; // ie. 1000
2272 emitCompareOp(Cond, TstOpcode, OpRn, OpSrc1, TstName);
2273 }
2274
udiv(const Operand * OpRd,const Operand * OpRn,const Operand * OpSrc1,CondARM32::Cond Cond)2275 void AssemblerARM32::udiv(const Operand *OpRd, const Operand *OpRn,
2276 const Operand *OpSrc1, CondARM32::Cond Cond) {
2277 // UDIV - ARM section A8.8.248, encoding A1.
2278 // udiv<c> <Rd>, <Rn>, <Rm>
2279 //
2280 // cccc01110011dddd1111mmmm0001nnnn where cccc=Cond, dddd=Rd, nnnn=Rn, and
2281 // mmmm=Rm.
2282 constexpr const char *UdivName = "udiv";
2283 IValueT Rd = encodeGPRegister(OpRd, "Rd", UdivName);
2284 IValueT Rn = encodeGPRegister(OpRn, "Rn", UdivName);
2285 IValueT Rm = encodeGPRegister(OpSrc1, "Rm", UdivName);
2286 verifyRegNotPc(Rd, "Rd", UdivName);
2287 verifyRegNotPc(Rn, "Rn", UdivName);
2288 verifyRegNotPc(Rm, "Rm", UdivName);
2289 // Assembler registers rd, rn, rm are encoded as rn, rm, rs.
2290 constexpr IValueT UdivOpcode = B21;
2291 emitDivOp(Cond, UdivOpcode, Rd, Rn, Rm);
2292 }
2293
umull(const Operand * OpRdLo,const Operand * OpRdHi,const Operand * OpRn,const Operand * OpRm,CondARM32::Cond Cond)2294 void AssemblerARM32::umull(const Operand *OpRdLo, const Operand *OpRdHi,
2295 const Operand *OpRn, const Operand *OpRm,
2296 CondARM32::Cond Cond) {
2297 // UMULL - ARM section A8.8.257, encoding A1:
2298 // umull<c> <RdLo>, <RdHi>, <Rn>, <Rm>
2299 //
2300 // cccc0000100shhhhllllmmmm1001nnnn where hhhh=RdHi, llll=RdLo, nnnn=Rn,
2301 // mmmm=Rm, and s=SetFlags
2302 constexpr const char *UmullName = "umull";
2303 IValueT RdLo = encodeGPRegister(OpRdLo, "RdLo", UmullName);
2304 IValueT RdHi = encodeGPRegister(OpRdHi, "RdHi", UmullName);
2305 IValueT Rn = encodeGPRegister(OpRn, "Rn", UmullName);
2306 IValueT Rm = encodeGPRegister(OpRm, "Rm", UmullName);
2307 verifyRegNotPc(RdLo, "RdLo", UmullName);
2308 verifyRegNotPc(RdHi, "RdHi", UmullName);
2309 verifyRegNotPc(Rn, "Rn", UmullName);
2310 verifyRegNotPc(Rm, "Rm", UmullName);
2311 verifyRegsNotEq(RdHi, "RdHi", RdLo, "RdLo", UmullName);
2312 constexpr IValueT UmullOpcode = B23;
2313 constexpr bool SetFlags = false;
2314 emitMulOp(Cond, UmullOpcode, RdLo, RdHi, Rn, Rm, SetFlags);
2315 }
2316
uxt(const Operand * OpRd,const Operand * OpSrc0,CondARM32::Cond Cond)2317 void AssemblerARM32::uxt(const Operand *OpRd, const Operand *OpSrc0,
2318 CondARM32::Cond Cond) {
2319 constexpr const char *UxtName = "uxt";
2320 constexpr IValueT UxtOpcode = B26 | B25 | B23 | B22 | B21;
2321 emitSignExtend(Cond, UxtOpcode, OpRd, OpSrc0, UxtName);
2322 }
2323
vabss(const Operand * OpSd,const Operand * OpSm,CondARM32::Cond Cond)2324 void AssemblerARM32::vabss(const Operand *OpSd, const Operand *OpSm,
2325 CondARM32::Cond Cond) {
2326 // VABS - ARM section A8.8.280, encoding A2:
2327 // vabs<c>.f32 <Sd>, <Sm>
2328 //
2329 // cccc11101D110000dddd101011M0mmmm where cccc=Cond, ddddD=Sd, and mmmmM=Sm.
2330 constexpr const char *Vabss = "vabss";
2331 IValueT Sd = encodeSRegister(OpSd, "Sd", Vabss);
2332 IValueT Sm = encodeSRegister(OpSm, "Sm", Vabss);
2333 constexpr IValueT S0 = 0;
2334 constexpr IValueT VabssOpcode = B23 | B21 | B20 | B7 | B6;
2335 emitVFPsss(Cond, VabssOpcode, Sd, S0, Sm);
2336 }
2337
vabsd(const Operand * OpDd,const Operand * OpDm,CondARM32::Cond Cond)2338 void AssemblerARM32::vabsd(const Operand *OpDd, const Operand *OpDm,
2339 CondARM32::Cond Cond) {
2340 // VABS - ARM section A8.8.280, encoding A2:
2341 // vabs<c>.f64 <Dd>, <Dm>
2342 //
2343 // cccc11101D110000dddd101111M0mmmm where cccc=Cond, Ddddd=Dd, and Mmmmm=Dm.
2344 constexpr const char *Vabsd = "vabsd";
2345 const IValueT Dd = encodeDRegister(OpDd, "Dd", Vabsd);
2346 const IValueT Dm = encodeDRegister(OpDm, "Dm", Vabsd);
2347 constexpr IValueT D0 = 0;
2348 constexpr IValueT VabsdOpcode = B23 | B21 | B20 | B7 | B6;
2349 emitVFPddd(Cond, VabsdOpcode, Dd, D0, Dm);
2350 }
2351
vabsq(const Operand * OpQd,const Operand * OpQm)2352 void AssemblerARM32::vabsq(const Operand *OpQd, const Operand *OpQm) {
2353 // VABS - ARM section A8.8.280, encoding A1:
2354 // vabs.<dt> <Qd>, <Qm>
2355 //
2356 // 111100111D11ss01ddd0f1101M0mmm0 where Dddd=OpQd, Mddd=OpQm, and
2357 // <dt> in {s8, s16, s32, f32} and ss is the encoding of <dt>.
2358 const Type ElmtTy = typeElementType(OpQd->getType());
2359 assert(ElmtTy != IceType_i64 && "vabsq doesn't allow i64!");
2360 constexpr const char *Vabsq = "vabsq";
2361 const IValueT Dd = mapQRegToDReg(encodeQRegister(OpQd, "Qd", Vabsq));
2362 const IValueT Dm = mapQRegToDReg(encodeQRegister(OpQm, "Qm", Vabsq));
2363 constexpr IValueT Dn = 0;
2364 const IValueT VabsqOpcode =
2365 B24 | B23 | B21 | B20 | B16 | B9 | B8 | (encodeElmtType(ElmtTy) << 18);
2366 constexpr bool UseQRegs = true;
2367 emitSIMDBase(VabsqOpcode, Dd, Dn, Dm, UseQRegs, isFloatingType(ElmtTy));
2368 }
2369
vadds(const Operand * OpSd,const Operand * OpSn,const Operand * OpSm,CondARM32::Cond Cond)2370 void AssemblerARM32::vadds(const Operand *OpSd, const Operand *OpSn,
2371 const Operand *OpSm, CondARM32::Cond Cond) {
2372 // VADD (floating-point) - ARM section A8.8.283, encoding A2:
2373 // vadd<c>.f32 <Sd>, <Sn>, <Sm>
2374 //
2375 // cccc11100D11nnnndddd101sN0M0mmmm where cccc=Cond, s=0, ddddD=Rd, nnnnN=Rn,
2376 // and mmmmM=Rm.
2377 constexpr const char *Vadds = "vadds";
2378 constexpr IValueT VaddsOpcode = B21 | B20;
2379 emitVFPsss(Cond, VaddsOpcode, OpSd, OpSn, OpSm, Vadds);
2380 }
2381
vaddqi(Type ElmtTy,const Operand * OpQd,const Operand * OpQm,const Operand * OpQn)2382 void AssemblerARM32::vaddqi(Type ElmtTy, const Operand *OpQd,
2383 const Operand *OpQm, const Operand *OpQn) {
2384 // VADD (integer) - ARM section A8.8.282, encoding A1:
2385 // vadd.<dt> <Qd>, <Qn>, <Qm>
2386 //
2387 // 111100100Dssnnn0ddd01000N1M0mmm0 where Dddd=OpQd, Nnnn=OpQm, Mmmm=OpQm,
2388 // and dt in [i8, i16, i32, i64] where ss is the index.
2389 assert(isScalarIntegerType(ElmtTy) &&
2390 "vaddqi expects vector with integer element type");
2391 constexpr const char *Vaddqi = "vaddqi";
2392 constexpr IValueT VaddqiOpcode = B11;
2393 emitSIMDqqq(VaddqiOpcode, ElmtTy, OpQd, OpQm, OpQn, Vaddqi);
2394 }
2395
vaddqf(const Operand * OpQd,const Operand * OpQn,const Operand * OpQm)2396 void AssemblerARM32::vaddqf(const Operand *OpQd, const Operand *OpQn,
2397 const Operand *OpQm) {
2398 // VADD (floating-point) - ARM section A8.8.283, Encoding A1:
2399 // vadd.f32 <Qd>, <Qn>, <Qm>
2400 //
2401 // 111100100D00nnn0ddd01101N1M0mmm0 where Dddd=Qd, Nnnn=Qn, and Mmmm=Qm.
2402 assert(OpQd->getType() == IceType_v4f32 && "vaddqf expects type <4 x float>");
2403 constexpr const char *Vaddqf = "vaddqf";
2404 constexpr IValueT VaddqfOpcode = B11 | B8;
2405 constexpr bool IsFloatTy = true;
2406 emitSIMDqqqBase(VaddqfOpcode, OpQd, OpQn, OpQm, IsFloatTy, Vaddqf);
2407 }
2408
vaddd(const Operand * OpDd,const Operand * OpDn,const Operand * OpDm,CondARM32::Cond Cond)2409 void AssemblerARM32::vaddd(const Operand *OpDd, const Operand *OpDn,
2410 const Operand *OpDm, CondARM32::Cond Cond) {
2411 // VADD (floating-point) - ARM section A8.8.283, encoding A2:
2412 // vadd<c>.f64 <Dd>, <Dn>, <Dm>
2413 //
2414 // cccc11100D11nnnndddd101sN0M0mmmm where cccc=Cond, s=1, Ddddd=Rd, Nnnnn=Rn,
2415 // and Mmmmm=Rm.
2416 constexpr const char *Vaddd = "vaddd";
2417 constexpr IValueT VadddOpcode = B21 | B20;
2418 emitVFPddd(Cond, VadddOpcode, OpDd, OpDn, OpDm, Vaddd);
2419 }
2420
vandq(const Operand * OpQd,const Operand * OpQm,const Operand * OpQn)2421 void AssemblerARM32::vandq(const Operand *OpQd, const Operand *OpQm,
2422 const Operand *OpQn) {
2423 // VAND (register) - ARM section A8.8.287, encoding A1:
2424 // vand <Qd>, <Qn>, <Qm>
2425 //
2426 // 111100100D00nnn0ddd00001N1M1mmm0 where Dddd=OpQd, Nnnn=OpQm, and Mmmm=OpQm.
2427 constexpr const char *Vandq = "vandq";
2428 constexpr IValueT VandqOpcode = B8 | B4;
2429 constexpr Type ElmtTy = IceType_i8;
2430 emitSIMDqqq(VandqOpcode, ElmtTy, OpQd, OpQm, OpQn, Vandq);
2431 }
2432
vbslq(const Operand * OpQd,const Operand * OpQm,const Operand * OpQn)2433 void AssemblerARM32::vbslq(const Operand *OpQd, const Operand *OpQm,
2434 const Operand *OpQn) {
2435 // VBSL (register) - ARM section A8.8.290, encoding A1:
2436 // vbsl <Qd>, <Qn>, <Qm>
2437 //
2438 // 111100110D01nnn0ddd00001N1M1mmm0 where Dddd=OpQd, Nnnn=OpQm, and Mmmm=OpQm.
2439 constexpr const char *Vbslq = "vbslq";
2440 constexpr IValueT VbslqOpcode = B24 | B20 | B8 | B4;
2441 constexpr Type ElmtTy = IceType_i8; // emits sz=0
2442 emitSIMDqqq(VbslqOpcode, ElmtTy, OpQd, OpQm, OpQn, Vbslq);
2443 }
2444
vceqqi(const Type ElmtTy,const Operand * OpQd,const Operand * OpQm,const Operand * OpQn)2445 void AssemblerARM32::vceqqi(const Type ElmtTy, const Operand *OpQd,
2446 const Operand *OpQm, const Operand *OpQn) {
2447 // vceq (register) - ARM section A8.8.291, encoding A1:
2448 // vceq.<st> <Qd>, <Qn>, <Qm>
2449 //
2450 // 111100110Dssnnnndddd1000NQM1mmmm where Dddd=OpQd, Nnnn=OpQm, Mmmm=OpQm, and
2451 // st in [i8, i16, i32] where ss is the index.
2452 constexpr const char *Vceq = "vceq";
2453 constexpr IValueT VceqOpcode = B24 | B11 | B4;
2454 emitSIMDqqq(VceqOpcode, ElmtTy, OpQd, OpQm, OpQn, Vceq);
2455 }
2456
vceqqs(const Operand * OpQd,const Operand * OpQm,const Operand * OpQn)2457 void AssemblerARM32::vceqqs(const Operand *OpQd, const Operand *OpQm,
2458 const Operand *OpQn) {
2459 // vceq (register) - ARM section A8.8.291, encoding A2:
2460 // vceq.f32 <Qd>, <Qn>, <Qm>
2461 //
2462 // 111100100D00nnnndddd1110NQM0mmmm where Dddd=OpQd, Nnnn=OpQm, and Mmmm=OpQm.
2463 constexpr const char *Vceq = "vceq";
2464 constexpr IValueT VceqOpcode = B11 | B10 | B9;
2465 constexpr Type ElmtTy = IceType_i8; // encoded as 0b00
2466 emitSIMDqqq(VceqOpcode, ElmtTy, OpQd, OpQm, OpQn, Vceq);
2467 }
2468
vcgeqi(const Type ElmtTy,const Operand * OpQd,const Operand * OpQm,const Operand * OpQn)2469 void AssemblerARM32::vcgeqi(const Type ElmtTy, const Operand *OpQd,
2470 const Operand *OpQm, const Operand *OpQn) {
2471 // vcge (register) - ARM section A8.8.293, encoding A1:
2472 // vcge.<st> <Qd>, <Qn>, <Qm>
2473 //
2474 // 1111001U0Dssnnnndddd0011NQM1mmmm where Dddd=OpQd, Nnnn=OpQm, Mmmm=OpQm,
2475 // 0=U, and st in [s8, s16, s32] where ss is the index.
2476 constexpr const char *Vcge = "vcge";
2477 constexpr IValueT VcgeOpcode = B9 | B8 | B4;
2478 emitSIMDqqq(VcgeOpcode, ElmtTy, OpQd, OpQm, OpQn, Vcge);
2479 }
2480
vcugeqi(const Type ElmtTy,const Operand * OpQd,const Operand * OpQm,const Operand * OpQn)2481 void AssemblerARM32::vcugeqi(const Type ElmtTy, const Operand *OpQd,
2482 const Operand *OpQm, const Operand *OpQn) {
2483 // vcge (register) - ARM section A8.8.293, encoding A1:
2484 // vcge.<st> <Qd>, <Qn>, <Qm>
2485 //
2486 // 1111001U0Dssnnnndddd0011NQM1mmmm where Dddd=OpQd, Nnnn=OpQm, Mmmm=OpQm,
2487 // 1=U, and st in [u8, u16, u32] where ss is the index.
2488 constexpr const char *Vcge = "vcge";
2489 constexpr IValueT VcgeOpcode = B24 | B9 | B8 | B4;
2490 emitSIMDqqq(VcgeOpcode, ElmtTy, OpQd, OpQm, OpQn, Vcge);
2491 }
2492
vcgeqs(const Operand * OpQd,const Operand * OpQm,const Operand * OpQn)2493 void AssemblerARM32::vcgeqs(const Operand *OpQd, const Operand *OpQm,
2494 const Operand *OpQn) {
2495 // vcge (register) - ARM section A8.8.293, encoding A2:
2496 // vcge.f32 <Qd>, <Qn>, <Qm>
2497 //
2498 // 111100110D00nnnndddd1110NQM0mmmm where Dddd=OpQd, Nnnn=OpQm, and Mmmm=OpQm.
2499 constexpr const char *Vcge = "vcge";
2500 constexpr IValueT VcgeOpcode = B24 | B11 | B10 | B9;
2501 constexpr Type ElmtTy = IceType_i8; // encoded as 0b00.
2502 emitSIMDqqq(VcgeOpcode, ElmtTy, OpQd, OpQm, OpQn, Vcge);
2503 }
2504
vcgtqi(const Type ElmtTy,const Operand * OpQd,const Operand * OpQm,const Operand * OpQn)2505 void AssemblerARM32::vcgtqi(const Type ElmtTy, const Operand *OpQd,
2506 const Operand *OpQm, const Operand *OpQn) {
2507 // vcgt (register) - ARM section A8.8.295, encoding A1:
2508 // vcgt.<st> <Qd>, <Qn>, <Qm>
2509 //
2510 // 1111001U0Dssnnnndddd0011NQM0mmmm where Dddd=OpQd, Nnnn=OpQm, Mmmm=OpQm,
2511 // 0=U, and st in [s8, s16, s32] where ss is the index.
2512 constexpr const char *Vcge = "vcgt";
2513 constexpr IValueT VcgeOpcode = B9 | B8;
2514 emitSIMDqqq(VcgeOpcode, ElmtTy, OpQd, OpQm, OpQn, Vcge);
2515 }
2516
vcugtqi(const Type ElmtTy,const Operand * OpQd,const Operand * OpQm,const Operand * OpQn)2517 void AssemblerARM32::vcugtqi(const Type ElmtTy, const Operand *OpQd,
2518 const Operand *OpQm, const Operand *OpQn) {
2519 // vcgt (register) - ARM section A8.8.295, encoding A1:
2520 // vcgt.<st> <Qd>, <Qn>, <Qm>
2521 //
2522 // 111100110Dssnnnndddd0011NQM0mmmm where Dddd=OpQd, Nnnn=OpQm, Mmmm=OpQm,
2523 // 1=U, and st in [u8, u16, u32] where ss is the index.
2524 constexpr const char *Vcge = "vcgt";
2525 constexpr IValueT VcgeOpcode = B24 | B9 | B8;
2526 emitSIMDqqq(VcgeOpcode, ElmtTy, OpQd, OpQm, OpQn, Vcge);
2527 }
2528
vcgtqs(const Operand * OpQd,const Operand * OpQm,const Operand * OpQn)2529 void AssemblerARM32::vcgtqs(const Operand *OpQd, const Operand *OpQm,
2530 const Operand *OpQn) {
2531 // vcgt (register) - ARM section A8.8.295, encoding A2:
2532 // vcgt.f32 <Qd>, <Qn>, <Qm>
2533 //
2534 // 111100110D10nnnndddd1110NQM0mmmm where Dddd=OpQd, Nnnn=OpQm, and Mmmm=OpQm.
2535 constexpr const char *Vcge = "vcgt";
2536 constexpr IValueT VcgeOpcode = B24 | B21 | B11 | B10 | B9;
2537 constexpr Type ElmtTy = IceType_i8; // encoded as 0b00.
2538 emitSIMDqqq(VcgeOpcode, ElmtTy, OpQd, OpQm, OpQn, Vcge);
2539 }
2540
vcmpd(const Operand * OpDd,const Operand * OpDm,CondARM32::Cond Cond)2541 void AssemblerARM32::vcmpd(const Operand *OpDd, const Operand *OpDm,
2542 CondARM32::Cond Cond) {
2543 constexpr const char *Vcmpd = "vcmpd";
2544 IValueT Dd = encodeDRegister(OpDd, "Dd", Vcmpd);
2545 IValueT Dm = encodeDRegister(OpDm, "Dm", Vcmpd);
2546 constexpr IValueT VcmpdOpcode = B23 | B21 | B20 | B18 | B6;
2547 constexpr IValueT Dn = 0;
2548 emitVFPddd(Cond, VcmpdOpcode, Dd, Dn, Dm);
2549 }
2550
vcmpdz(const Operand * OpDd,CondARM32::Cond Cond)2551 void AssemblerARM32::vcmpdz(const Operand *OpDd, CondARM32::Cond Cond) {
2552 constexpr const char *Vcmpdz = "vcmpdz";
2553 IValueT Dd = encodeDRegister(OpDd, "Dd", Vcmpdz);
2554 constexpr IValueT VcmpdzOpcode = B23 | B21 | B20 | B18 | B16 | B6;
2555 constexpr IValueT Dn = 0;
2556 constexpr IValueT Dm = 0;
2557 emitVFPddd(Cond, VcmpdzOpcode, Dd, Dn, Dm);
2558 }
2559
vcmps(const Operand * OpSd,const Operand * OpSm,CondARM32::Cond Cond)2560 void AssemblerARM32::vcmps(const Operand *OpSd, const Operand *OpSm,
2561 CondARM32::Cond Cond) {
2562 constexpr const char *Vcmps = "vcmps";
2563 IValueT Sd = encodeSRegister(OpSd, "Sd", Vcmps);
2564 IValueT Sm = encodeSRegister(OpSm, "Sm", Vcmps);
2565 constexpr IValueT VcmpsOpcode = B23 | B21 | B20 | B18 | B6;
2566 constexpr IValueT Sn = 0;
2567 emitVFPsss(Cond, VcmpsOpcode, Sd, Sn, Sm);
2568 }
2569
vcmpsz(const Operand * OpSd,CondARM32::Cond Cond)2570 void AssemblerARM32::vcmpsz(const Operand *OpSd, CondARM32::Cond Cond) {
2571 constexpr const char *Vcmpsz = "vcmps";
2572 IValueT Sd = encodeSRegister(OpSd, "Sd", Vcmpsz);
2573 constexpr IValueT VcmpszOpcode = B23 | B21 | B20 | B18 | B16 | B6;
2574 constexpr IValueT Sn = 0;
2575 constexpr IValueT Sm = 0;
2576 emitVFPsss(Cond, VcmpszOpcode, Sd, Sn, Sm);
2577 }
2578
emitVFPsd(CondARM32::Cond Cond,IValueT Opcode,IValueT Sd,IValueT Dm)2579 void AssemblerARM32::emitVFPsd(CondARM32::Cond Cond, IValueT Opcode, IValueT Sd,
2580 IValueT Dm) {
2581 assert(Sd < RegARM32::getNumSRegs());
2582 assert(Dm < RegARM32::getNumDRegs());
2583 assert(CondARM32::isDefined(Cond));
2584 constexpr IValueT VFPOpcode = B27 | B26 | B25 | B11 | B9;
2585 const IValueT Encoding =
2586 Opcode | VFPOpcode | (encodeCondition(Cond) << kConditionShift) |
2587 (getYInRegXXXXY(Sd) << 22) | (getXXXXInRegXXXXY(Sd) << 12) |
2588 (getYInRegYXXXX(Dm) << 5) | getXXXXInRegYXXXX(Dm);
2589 emitInst(Encoding);
2590 }
2591
vcvtdi(const Operand * OpDd,const Operand * OpSm,CondARM32::Cond Cond)2592 void AssemblerARM32::vcvtdi(const Operand *OpDd, const Operand *OpSm,
2593 CondARM32::Cond Cond) {
2594 // VCVT (between floating-point and integer, Floating-point)
2595 // - ARM Section A8.8.306, encoding A1:
2596 // vcvt<c>.f64.s32 <Dd>, <Sm>
2597 //
2598 // cccc11101D111000dddd10111M0mmmm where cccc=Cond, Ddddd=Dd, and mmmmM=Sm.
2599 constexpr const char *Vcvtdi = "vcvtdi";
2600 IValueT Dd = encodeDRegister(OpDd, "Dd", Vcvtdi);
2601 IValueT Sm = encodeSRegister(OpSm, "Sm", Vcvtdi);
2602 constexpr IValueT VcvtdiOpcode = B23 | B21 | B20 | B19 | B8 | B7 | B6;
2603 emitVFPds(Cond, VcvtdiOpcode, Dd, Sm);
2604 }
2605
vcvtdu(const Operand * OpDd,const Operand * OpSm,CondARM32::Cond Cond)2606 void AssemblerARM32::vcvtdu(const Operand *OpDd, const Operand *OpSm,
2607 CondARM32::Cond Cond) {
2608 // VCVT (between floating-point and integer, Floating-point)
2609 // - ARM Section A8.8.306, encoding A1:
2610 // vcvt<c>.f64.u32 <Dd>, <Sm>
2611 //
2612 // cccc11101D111000dddd10101M0mmmm where cccc=Cond, Ddddd=Dd, and mmmmM=Sm.
2613 constexpr const char *Vcvtdu = "vcvtdu";
2614 IValueT Dd = encodeDRegister(OpDd, "Dd", Vcvtdu);
2615 IValueT Sm = encodeSRegister(OpSm, "Sm", Vcvtdu);
2616 constexpr IValueT VcvtduOpcode = B23 | B21 | B20 | B19 | B8 | B6;
2617 emitVFPds(Cond, VcvtduOpcode, Dd, Sm);
2618 }
2619
vcvtsd(const Operand * OpSd,const Operand * OpDm,CondARM32::Cond Cond)2620 void AssemblerARM32::vcvtsd(const Operand *OpSd, const Operand *OpDm,
2621 CondARM32::Cond Cond) {
2622 constexpr const char *Vcvtsd = "vcvtsd";
2623 IValueT Sd = encodeSRegister(OpSd, "Sd", Vcvtsd);
2624 IValueT Dm = encodeDRegister(OpDm, "Dm", Vcvtsd);
2625 constexpr IValueT VcvtsdOpcode =
2626 B23 | B21 | B20 | B18 | B17 | B16 | B8 | B7 | B6;
2627 emitVFPsd(Cond, VcvtsdOpcode, Sd, Dm);
2628 }
2629
vcvtis(const Operand * OpSd,const Operand * OpSm,CondARM32::Cond Cond)2630 void AssemblerARM32::vcvtis(const Operand *OpSd, const Operand *OpSm,
2631 CondARM32::Cond Cond) {
2632 // VCVT (between floating-point and integer, Floating-point)
2633 // - ARM Section A8.8.306, encoding A1:
2634 // vcvt<c>.s32.f32 <Sd>, <Sm>
2635 //
2636 // cccc11101D111101dddd10011M0mmmm where cccc=Cond, ddddD=Sd, and mmmmM=Sm.
2637 constexpr const char *Vcvtis = "vcvtis";
2638 IValueT Sd = encodeSRegister(OpSd, "Sd", Vcvtis);
2639 IValueT Sm = encodeSRegister(OpSm, "Sm", Vcvtis);
2640 constexpr IValueT VcvtisOpcode = B23 | B21 | B20 | B19 | B18 | B16 | B7 | B6;
2641 constexpr IValueT S0 = 0;
2642 emitVFPsss(Cond, VcvtisOpcode, Sd, S0, Sm);
2643 }
2644
vcvtid(const Operand * OpSd,const Operand * OpDm,CondARM32::Cond Cond)2645 void AssemblerARM32::vcvtid(const Operand *OpSd, const Operand *OpDm,
2646 CondARM32::Cond Cond) {
2647 // VCVT (between floating-point and integer, Floating-point)
2648 // - ARM Section A8.8.306, encoding A1:
2649 // vcvt<c>.s32.f64 <Sd>, <Dm>
2650 //
2651 // cccc11101D111101dddd10111M0mmmm where cccc=Cond, ddddD=Sd, and Mmmmm=Dm.
2652 constexpr const char *Vcvtid = "vcvtid";
2653 IValueT Sd = encodeSRegister(OpSd, "Sd", Vcvtid);
2654 IValueT Dm = encodeDRegister(OpDm, "Dm", Vcvtid);
2655 constexpr IValueT VcvtidOpcode =
2656 B23 | B21 | B20 | B19 | B18 | B16 | B8 | B7 | B6;
2657 emitVFPsd(Cond, VcvtidOpcode, Sd, Dm);
2658 }
2659
vcvtsi(const Operand * OpSd,const Operand * OpSm,CondARM32::Cond Cond)2660 void AssemblerARM32::vcvtsi(const Operand *OpSd, const Operand *OpSm,
2661 CondARM32::Cond Cond) {
2662 // VCVT (between floating-point and integer, Floating-point)
2663 // - ARM Section A8.8.306, encoding A1:
2664 // vcvt<c>.f32.s32 <Sd>, <Sm>
2665 //
2666 // cccc11101D111000dddd10011M0mmmm where cccc=Cond, ddddD=Sd, and mmmmM=Sm.
2667 constexpr const char *Vcvtsi = "vcvtsi";
2668 IValueT Sd = encodeSRegister(OpSd, "Sd", Vcvtsi);
2669 IValueT Sm = encodeSRegister(OpSm, "Sm", Vcvtsi);
2670 constexpr IValueT VcvtsiOpcode = B23 | B21 | B20 | B19 | B7 | B6;
2671 constexpr IValueT S0 = 0;
2672 emitVFPsss(Cond, VcvtsiOpcode, Sd, S0, Sm);
2673 }
2674
vcvtsu(const Operand * OpSd,const Operand * OpSm,CondARM32::Cond Cond)2675 void AssemblerARM32::vcvtsu(const Operand *OpSd, const Operand *OpSm,
2676 CondARM32::Cond Cond) {
2677 // VCVT (between floating-point and integer, Floating-point)
2678 // - ARM Section A8.8.306, encoding A1:
2679 // vcvt<c>.f32.u32 <Sd>, <Sm>
2680 //
2681 // cccc11101D111000dddd10001M0mmmm where cccc=Cond, ddddD=Sd, and mmmmM=Sm.
2682 constexpr const char *Vcvtsu = "vcvtsu";
2683 IValueT Sd = encodeSRegister(OpSd, "Sd", Vcvtsu);
2684 IValueT Sm = encodeSRegister(OpSm, "Sm", Vcvtsu);
2685 constexpr IValueT VcvtsuOpcode = B23 | B21 | B20 | B19 | B6;
2686 constexpr IValueT S0 = 0;
2687 emitVFPsss(Cond, VcvtsuOpcode, Sd, S0, Sm);
2688 }
2689
vcvtud(const Operand * OpSd,const Operand * OpDm,CondARM32::Cond Cond)2690 void AssemblerARM32::vcvtud(const Operand *OpSd, const Operand *OpDm,
2691 CondARM32::Cond Cond) {
2692 // VCVT (between floating-point and integer, Floating-point)
2693 // - ARM Section A8.8.306, encoding A1:
2694 // vcvt<c>.u32.f64 <Sd>, <Dm>
2695 //
2696 // cccc11101D111100dddd10111M0mmmm where cccc=Cond, ddddD=Sd, and Mmmmm=Dm.
2697 constexpr const char *Vcvtud = "vcvtud";
2698 IValueT Sd = encodeSRegister(OpSd, "Sd", Vcvtud);
2699 IValueT Dm = encodeDRegister(OpDm, "Dm", Vcvtud);
2700 constexpr IValueT VcvtudOpcode = B23 | B21 | B20 | B19 | B18 | B8 | B7 | B6;
2701 emitVFPsd(Cond, VcvtudOpcode, Sd, Dm);
2702 }
2703
vcvtus(const Operand * OpSd,const Operand * OpSm,CondARM32::Cond Cond)2704 void AssemblerARM32::vcvtus(const Operand *OpSd, const Operand *OpSm,
2705 CondARM32::Cond Cond) {
2706 // VCVT (between floating-point and integer, Floating-point)
2707 // - ARM Section A8.8.306, encoding A1:
2708 // vcvt<c>.u32.f32 <Sd>, <Sm>
2709 //
2710 // cccc11101D111100dddd10011M0mmmm where cccc=Cond, ddddD=Sd, and mmmmM=Sm.
2711 constexpr const char *Vcvtus = "vcvtus";
2712 IValueT Sd = encodeSRegister(OpSd, "Sd", Vcvtus);
2713 IValueT Sm = encodeSRegister(OpSm, "Sm", Vcvtus);
2714 constexpr IValueT VcvtsiOpcode = B23 | B21 | B20 | B19 | B18 | B7 | B6;
2715 constexpr IValueT S0 = 0;
2716 emitVFPsss(Cond, VcvtsiOpcode, Sd, S0, Sm);
2717 }
2718
vcvtqsi(const Operand * OpQd,const Operand * OpQm)2719 void AssemblerARM32::vcvtqsi(const Operand *OpQd, const Operand *OpQm) {
2720 // VCVT (between floating-point and integer, Advanced SIMD)
2721 // - ARM Section A8.8.305, encoding A1:
2722 // vcvt<c>.f32.s32 <Qd>, <Qm>
2723 //
2724 // 111100111D11ss11dddd011ooQM0mmmm where Ddddd=Qd, Mmmmm=Qm, and 10=op.
2725 constexpr const char *Vcvtqsi = "vcvt.s32.f32";
2726 constexpr IValueT VcvtqsiOpcode = B8;
2727 emitSIMDCvtqq(VcvtqsiOpcode, OpQd, OpQm, Vcvtqsi);
2728 }
2729
vcvtqsu(const Operand * OpQd,const Operand * OpQm)2730 void AssemblerARM32::vcvtqsu(const Operand *OpQd, const Operand *OpQm) {
2731 // VCVT (between floating-point and integer, Advanced SIMD)
2732 // - ARM Section A8.8.305, encoding A1:
2733 // vcvt<c>.f32.u32 <Qd>, <Qm>
2734 //
2735 // 111100111D11ss11dddd011ooQM0mmmm where Ddddd=Qd, Mmmmm=Qm, and 11=op.
2736 constexpr const char *Vcvtqsu = "vcvt.u32.f32";
2737 constexpr IValueT VcvtqsuOpcode = B8 | B7;
2738 emitSIMDCvtqq(VcvtqsuOpcode, OpQd, OpQm, Vcvtqsu);
2739 }
2740
vcvtqis(const Operand * OpQd,const Operand * OpQm)2741 void AssemblerARM32::vcvtqis(const Operand *OpQd, const Operand *OpQm) {
2742 // VCVT (between floating-point and integer, Advanced SIMD)
2743 // - ARM Section A8.8.305, encoding A1:
2744 // vcvt<c>.f32.s32 <Qd>, <Qm>
2745 //
2746 // 111100111D11ss11dddd011ooQM0mmmm where Ddddd=Qd, Mmmmm=Qm, and 01=op.
2747 constexpr const char *Vcvtqis = "vcvt.f32.s32";
2748 constexpr IValueT VcvtqisOpcode = 0;
2749 emitSIMDCvtqq(VcvtqisOpcode, OpQd, OpQm, Vcvtqis);
2750 }
2751
vcvtqus(const Operand * OpQd,const Operand * OpQm)2752 void AssemblerARM32::vcvtqus(const Operand *OpQd, const Operand *OpQm) {
2753 // VCVT (between floating-point and integer, Advanced SIMD)
2754 // - ARM Section A8.8.305, encoding A1:
2755 // vcvt<c>.f32.u32 <Qd>, <Qm>
2756 //
2757 // 111100111D11ss11dddd011ooQM0mmmm where Ddddd=Qd, Mmmmm=Qm, and 01=op.
2758 constexpr const char *Vcvtqus = "vcvt.f32.u32";
2759 constexpr IValueT VcvtqusOpcode = B7;
2760 emitSIMDCvtqq(VcvtqusOpcode, OpQd, OpQm, Vcvtqus);
2761 }
2762
emitVFPds(CondARM32::Cond Cond,IValueT Opcode,IValueT Dd,IValueT Sm)2763 void AssemblerARM32::emitVFPds(CondARM32::Cond Cond, IValueT Opcode, IValueT Dd,
2764 IValueT Sm) {
2765 assert(Dd < RegARM32::getNumDRegs());
2766 assert(Sm < RegARM32::getNumSRegs());
2767 assert(CondARM32::isDefined(Cond));
2768 constexpr IValueT VFPOpcode = B27 | B26 | B25 | B11 | B9;
2769 const IValueT Encoding =
2770 Opcode | VFPOpcode | (encodeCondition(Cond) << kConditionShift) |
2771 (getYInRegYXXXX(Dd) << 22) | (getXXXXInRegYXXXX(Dd) << 12) |
2772 (getYInRegXXXXY(Sm) << 5) | getXXXXInRegXXXXY(Sm);
2773 emitInst(Encoding);
2774 }
2775
vcvtds(const Operand * OpDd,const Operand * OpSm,CondARM32::Cond Cond)2776 void AssemblerARM32::vcvtds(const Operand *OpDd, const Operand *OpSm,
2777 CondARM32::Cond Cond) {
2778 constexpr const char *Vcvtds = "Vctds";
2779 IValueT Dd = encodeDRegister(OpDd, "Dd", Vcvtds);
2780 IValueT Sm = encodeSRegister(OpSm, "Sm", Vcvtds);
2781 constexpr IValueT VcvtdsOpcode = B23 | B21 | B20 | B18 | B17 | B16 | B7 | B6;
2782 emitVFPds(Cond, VcvtdsOpcode, Dd, Sm);
2783 }
2784
vdivs(const Operand * OpSd,const Operand * OpSn,const Operand * OpSm,CondARM32::Cond Cond)2785 void AssemblerARM32::vdivs(const Operand *OpSd, const Operand *OpSn,
2786 const Operand *OpSm, CondARM32::Cond Cond) {
2787 // VDIV (floating-point) - ARM section A8.8.283, encoding A2:
2788 // vdiv<c>.f32 <Sd>, <Sn>, <Sm>
2789 //
2790 // cccc11101D00nnnndddd101sN0M0mmmm where cccc=Cond, s=0, ddddD=Rd, nnnnN=Rn,
2791 // and mmmmM=Rm.
2792 constexpr const char *Vdivs = "vdivs";
2793 constexpr IValueT VdivsOpcode = B23;
2794 emitVFPsss(Cond, VdivsOpcode, OpSd, OpSn, OpSm, Vdivs);
2795 }
2796
vdivd(const Operand * OpDd,const Operand * OpDn,const Operand * OpDm,CondARM32::Cond Cond)2797 void AssemblerARM32::vdivd(const Operand *OpDd, const Operand *OpDn,
2798 const Operand *OpDm, CondARM32::Cond Cond) {
2799 // VDIV (floating-point) - ARM section A8.8.283, encoding A2:
2800 // vdiv<c>.f64 <Dd>, <Dn>, <Dm>
2801 //
2802 // cccc11101D00nnnndddd101sN0M0mmmm where cccc=Cond, s=1, Ddddd=Rd, Nnnnn=Rn,
2803 // and Mmmmm=Rm.
2804 constexpr const char *Vdivd = "vdivd";
2805 constexpr IValueT VdivdOpcode = B23;
2806 emitVFPddd(Cond, VdivdOpcode, OpDd, OpDn, OpDm, Vdivd);
2807 }
2808
veord(const Operand * OpDd,const Operand * OpDn,const Operand * OpDm)2809 void AssemblerARM32::veord(const Operand *OpDd, const Operand *OpDn,
2810 const Operand *OpDm) {
2811 // VEOR - ARM secdtion A8.8.315, encoding A1:
2812 // veor<c> <Dd>, <Dn>, <Dm>
2813 //
2814 // 111100110D00nnnndddd0001N0M1mmmm where Ddddd=Dd, Nnnnn=Dn, and Mmmmm=Dm.
2815 constexpr const char *Veord = "veord";
2816 IValueT Dd = encodeDRegister(OpDd, "Dd", Veord);
2817 IValueT Dn = encodeDRegister(OpDn, "Dn", Veord);
2818 IValueT Dm = encodeDRegister(OpDm, "Dm", Veord);
2819 const IValueT Encoding =
2820 B25 | B24 | B8 | B4 |
2821 (encodeCondition(CondARM32::Cond::kNone) << kConditionShift) |
2822 (getYInRegYXXXX(Dd) << 22) | (getXXXXInRegYXXXX(Dn) << 16) |
2823 (getXXXXInRegYXXXX(Dd) << 12) | (getYInRegYXXXX(Dn) << 7) |
2824 (getYInRegYXXXX(Dm) << 5) | getXXXXInRegYXXXX(Dm);
2825 emitInst(Encoding);
2826 }
2827
veorq(const Operand * OpQd,const Operand * OpQn,const Operand * OpQm)2828 void AssemblerARM32::veorq(const Operand *OpQd, const Operand *OpQn,
2829 const Operand *OpQm) {
2830 // VEOR - ARM section A8.8.316, encoding A1:
2831 // veor <Qd>, <Qn>, <Qm>
2832 //
2833 // 111100110D00nnn0ddd00001N1M1mmm0 where Dddd=Qd, Nnnn=Qn, and Mmmm=Qm.
2834 constexpr const char *Veorq = "veorq";
2835 constexpr IValueT VeorqOpcode = B24 | B8 | B4;
2836 emitSIMDqqq(VeorqOpcode, IceType_i8, OpQd, OpQn, OpQm, Veorq);
2837 }
2838
vldrd(const Operand * OpDd,const Operand * OpAddress,CondARM32::Cond Cond,const TargetInfo & TInfo)2839 void AssemblerARM32::vldrd(const Operand *OpDd, const Operand *OpAddress,
2840 CondARM32::Cond Cond, const TargetInfo &TInfo) {
2841 // VLDR - ARM section A8.8.333, encoding A1.
2842 // vldr<c> <Dd>, [<Rn>{, #+/-<imm>}]
2843 //
2844 // cccc1101UD01nnnndddd1011iiiiiiii where cccc=Cond, nnnn=Rn, Ddddd=Rd,
2845 // iiiiiiii=abs(Imm >> 2), and U=1 if Opcode>=0.
2846 constexpr const char *Vldrd = "vldrd";
2847 IValueT Dd = encodeDRegister(OpDd, "Dd", Vldrd);
2848 assert(CondARM32::isDefined(Cond));
2849 IValueT Address;
2850 EncodedOperand AddressEncoding =
2851 encodeAddress(OpAddress, Address, TInfo, RotatedImm8Div4Address);
2852 (void)AddressEncoding;
2853 assert(AddressEncoding == EncodedAsImmRegOffset);
2854 IValueT Encoding = B27 | B26 | B24 | B20 | B11 | B9 | B8 |
2855 (encodeCondition(Cond) << kConditionShift) |
2856 (getYInRegYXXXX(Dd) << 22) |
2857 (getXXXXInRegYXXXX(Dd) << 12) | Address;
2858 emitInst(Encoding);
2859 }
2860
vldrq(const Operand * OpQd,const Operand * OpAddress,CondARM32::Cond Cond,const TargetInfo & TInfo)2861 void AssemblerARM32::vldrq(const Operand *OpQd, const Operand *OpAddress,
2862 CondARM32::Cond Cond, const TargetInfo &TInfo) {
2863 // This is a pseudo-instruction which loads 64-bit data into a quadword
2864 // vector register. It is implemented by loading into the lower doubleword.
2865
2866 // VLDR - ARM section A8.8.333, encoding A1.
2867 // vldr<c> <Dd>, [<Rn>{, #+/-<imm>}]
2868 //
2869 // cccc1101UD01nnnndddd1011iiiiiiii where cccc=Cond, nnnn=Rn, Ddddd=Rd,
2870 // iiiiiiii=abs(Imm >> 2), and U=1 if Opcode>=0.
2871 constexpr const char *Vldrd = "vldrd";
2872 IValueT Dd = mapQRegToDReg(encodeQRegister(OpQd, "Qd", Vldrd));
2873 assert(CondARM32::isDefined(Cond));
2874 IValueT Address;
2875 EncodedOperand AddressEncoding =
2876 encodeAddress(OpAddress, Address, TInfo, RotatedImm8Div4Address);
2877 (void)AddressEncoding;
2878 assert(AddressEncoding == EncodedAsImmRegOffset);
2879 IValueT Encoding = B27 | B26 | B24 | B20 | B11 | B9 | B8 |
2880 (encodeCondition(Cond) << kConditionShift) |
2881 (getYInRegYXXXX(Dd) << 22) |
2882 (getXXXXInRegYXXXX(Dd) << 12) | Address;
2883 emitInst(Encoding);
2884 }
2885
vldrs(const Operand * OpSd,const Operand * OpAddress,CondARM32::Cond Cond,const TargetInfo & TInfo)2886 void AssemblerARM32::vldrs(const Operand *OpSd, const Operand *OpAddress,
2887 CondARM32::Cond Cond, const TargetInfo &TInfo) {
2888 // VDLR - ARM section A8.8.333, encoding A2.
2889 // vldr<c> <Sd>, [<Rn>{, #+/-<imm>]]
2890 //
2891 // cccc1101UD01nnnndddd1010iiiiiiii where cccc=Cond, nnnn=Rn, ddddD=Sd,
2892 // iiiiiiii=abs(Opcode), and U=1 if Opcode >= 0;
2893 constexpr const char *Vldrs = "vldrs";
2894 IValueT Sd = encodeSRegister(OpSd, "Sd", Vldrs);
2895 assert(CondARM32::isDefined(Cond));
2896 IValueT Address;
2897 EncodedOperand AddressEncoding =
2898 encodeAddress(OpAddress, Address, TInfo, RotatedImm8Div4Address);
2899 (void)AddressEncoding;
2900 assert(AddressEncoding == EncodedAsImmRegOffset);
2901 IValueT Encoding = B27 | B26 | B24 | B20 | B11 | B9 |
2902 (encodeCondition(Cond) << kConditionShift) |
2903 (getYInRegXXXXY(Sd) << 22) |
2904 (getXXXXInRegXXXXY(Sd) << 12) | Address;
2905 emitInst(Encoding);
2906 }
2907
emitVMem1Op(IValueT Opcode,IValueT Dd,IValueT Rn,IValueT Rm,DRegListSize NumDRegs,size_t ElmtSize,IValueT Align,const char * InstName)2908 void AssemblerARM32::emitVMem1Op(IValueT Opcode, IValueT Dd, IValueT Rn,
2909 IValueT Rm, DRegListSize NumDRegs,
2910 size_t ElmtSize, IValueT Align,
2911 const char *InstName) {
2912 assert(Utils::IsAbsoluteUint(2, Align));
2913 IValueT EncodedElmtSize;
2914 switch (ElmtSize) {
2915 default: {
2916 std::string Buffer;
2917 llvm::raw_string_ostream StrBuf(Buffer);
2918 StrBuf << InstName << ": found invalid vector element size " << ElmtSize;
2919 llvm::report_fatal_error(StrBuf.str());
2920 }
2921 case 8:
2922 EncodedElmtSize = 0;
2923 break;
2924 case 16:
2925 EncodedElmtSize = 1;
2926 break;
2927 case 32:
2928 EncodedElmtSize = 2;
2929 break;
2930 case 64:
2931 EncodedElmtSize = 3;
2932 }
2933 const IValueT Encoding =
2934 Opcode | (encodeCondition(CondARM32::kNone) << kConditionShift) |
2935 (getYInRegYXXXX(Dd) << 22) | (Rn << kRnShift) |
2936 (getXXXXInRegYXXXX(Dd) << kRdShift) | (NumDRegs << 8) |
2937 (EncodedElmtSize << 6) | (Align << 4) | Rm;
2938 emitInst(Encoding);
2939 }
2940
emitVMem1Op(IValueT Opcode,IValueT Dd,IValueT Rn,IValueT Rm,size_t ElmtSize,IValueT Align,const char * InstName)2941 void AssemblerARM32::emitVMem1Op(IValueT Opcode, IValueT Dd, IValueT Rn,
2942 IValueT Rm, size_t ElmtSize, IValueT Align,
2943 const char *InstName) {
2944 assert(Utils::IsAbsoluteUint(2, Align));
2945 IValueT EncodedElmtSize;
2946 switch (ElmtSize) {
2947 default: {
2948 std::string Buffer;
2949 llvm::raw_string_ostream StrBuf(Buffer);
2950 StrBuf << InstName << ": found invalid vector element size " << ElmtSize;
2951 llvm::report_fatal_error(StrBuf.str());
2952 }
2953 case 8:
2954 EncodedElmtSize = 0;
2955 break;
2956 case 16:
2957 EncodedElmtSize = 1;
2958 break;
2959 case 32:
2960 EncodedElmtSize = 2;
2961 break;
2962 case 64:
2963 EncodedElmtSize = 3;
2964 }
2965 const IValueT Encoding =
2966 Opcode | (encodeCondition(CondARM32::kNone) << kConditionShift) |
2967 (getYInRegYXXXX(Dd) << 22) | (Rn << kRnShift) |
2968 (getXXXXInRegYXXXX(Dd) << kRdShift) | (EncodedElmtSize << 10) |
2969 (Align << 4) | Rm;
2970 emitInst(Encoding);
2971 }
2972
vld1qr(size_t ElmtSize,const Operand * OpQd,const Operand * OpAddress,const TargetInfo & TInfo)2973 void AssemblerARM32::vld1qr(size_t ElmtSize, const Operand *OpQd,
2974 const Operand *OpAddress, const TargetInfo &TInfo) {
2975 // VLD1 (multiple single elements) - ARM section A8.8.320, encoding A1:
2976 // vld1.<size> <Qd>, [<Rn>]
2977 //
2978 // 111101000D10nnnnddd0ttttssaammmm where tttt=DRegListSize2, Dddd=Qd,
2979 // nnnn=Rn, aa=0 (use default alignment), size=ElmtSize, and ss is the
2980 // encoding of ElmtSize.
2981 constexpr const char *Vld1qr = "vld1qr";
2982 const IValueT Qd = encodeQRegister(OpQd, "Qd", Vld1qr);
2983 const IValueT Dd = mapQRegToDReg(Qd);
2984 IValueT Address;
2985 if (encodeAddress(OpAddress, Address, TInfo, NoImmOffsetAddress) !=
2986 EncodedAsImmRegOffset)
2987 llvm::report_fatal_error(std::string(Vld1qr) + ": malform memory address");
2988 const IValueT Rn = mask(Address, kRnShift, 4);
2989 constexpr IValueT Rm = RegARM32::Reg_pc;
2990 constexpr IValueT Opcode = B26 | B21;
2991 constexpr IValueT Align = 0; // use default alignment.
2992 emitVMem1Op(Opcode, Dd, Rn, Rm, DRegListSize2, ElmtSize, Align, Vld1qr);
2993 }
2994
vld1(size_t ElmtSize,const Operand * OpQd,const Operand * OpAddress,const TargetInfo & TInfo)2995 void AssemblerARM32::vld1(size_t ElmtSize, const Operand *OpQd,
2996 const Operand *OpAddress, const TargetInfo &TInfo) {
2997 // This is a pseudo-instruction for loading a single element of a quadword
2998 // vector. For 64-bit the lower doubleword vector is loaded.
2999
3000 if (ElmtSize == 64) {
3001 return vldrq(OpQd, OpAddress, Ice::CondARM32::AL, TInfo);
3002 }
3003
3004 // VLD1 (single elements to one lane) - ARMv7-A/R section A8.6.308, encoding
3005 // A1:
3006 // VLD1<c>.<size> <list>, [<Rn>{@<align>}], <Rm>
3007 //
3008 // 111101001D10nnnnddddss00aaaammmm where tttt=DRegListSize2, Dddd=Qd,
3009 // nnnn=Rn, aa=0 (use default alignment), size=ElmtSize, and ss is the
3010 // encoding of ElmtSize.
3011 constexpr const char *Vld1qr = "vld1qr";
3012 const IValueT Qd = encodeQRegister(OpQd, "Qd", Vld1qr);
3013 const IValueT Dd = mapQRegToDReg(Qd);
3014 IValueT Address;
3015 if (encodeAddress(OpAddress, Address, TInfo, NoImmOffsetAddress) !=
3016 EncodedAsImmRegOffset)
3017 llvm::report_fatal_error(std::string(Vld1qr) + ": malform memory address");
3018 const IValueT Rn = mask(Address, kRnShift, 4);
3019 constexpr IValueT Rm = RegARM32::Reg_pc;
3020 constexpr IValueT Opcode = B26 | B23 | B21;
3021 constexpr IValueT Align = 0; // use default alignment.
3022 emitVMem1Op(Opcode, Dd, Rn, Rm, ElmtSize, Align, Vld1qr);
3023 }
3024
vmovqc(const Operand * OpQd,const ConstantInteger32 * Imm)3025 bool AssemblerARM32::vmovqc(const Operand *OpQd, const ConstantInteger32 *Imm) {
3026 // VMOV (immediate) - ARM section A8.8.320, encoding A1:
3027 // VMOV.<dt> <Qd>, #<Imm>
3028 // 1111001x1D000yyyddddcccc01p1zzzz where Qd=Ddddd, Imm=xyyyzzzz, cmode=cccc,
3029 // and Op=p.
3030 constexpr const char *Vmovc = "vmovc";
3031 const IValueT Dd = mapQRegToDReg(encodeQRegister(OpQd, "Qd", Vmovc));
3032 IValueT Value = Imm->getValue();
3033 const Type VecTy = OpQd->getType();
3034 if (!isVectorType(VecTy))
3035 return false;
3036
3037 IValueT Op;
3038 IValueT Cmode;
3039 IValueT Imm8;
3040 if (!encodeAdvSIMDExpandImm(Value, typeElementType(VecTy), Op, Cmode, Imm8))
3041 return false;
3042 if (Op == 0 && mask(Cmode, 0, 1) == 1)
3043 return false;
3044 if (Op == 1 && Cmode != 13)
3045 return false;
3046 const IValueT Encoding =
3047 (0xF << kConditionShift) | B25 | B23 | B6 | B4 |
3048 (mask(Imm8, 7, 1) << 24) | (getYInRegYXXXX(Dd) << 22) |
3049 (mask(Imm8, 4, 3) << 16) | (getXXXXInRegYXXXX(Dd) << 12) | (Cmode << 8) |
3050 (Op << 5) | mask(Imm8, 0, 4);
3051 emitInst(Encoding);
3052 return true;
3053 }
3054
vmovd(const Operand * OpDd,const OperandARM32FlexFpImm * OpFpImm,CondARM32::Cond Cond)3055 void AssemblerARM32::vmovd(const Operand *OpDd,
3056 const OperandARM32FlexFpImm *OpFpImm,
3057 CondARM32::Cond Cond) {
3058 // VMOV (immediate) - ARM section A8.8.339, encoding A2:
3059 // vmov<c>.f64 <Dd>, #<imm>
3060 //
3061 // cccc11101D11xxxxdddd10110000yyyy where cccc=Cond, ddddD=Sn, xxxxyyyy=imm.
3062 constexpr const char *Vmovd = "vmovd";
3063 IValueT Dd = encodeSRegister(OpDd, "Dd", Vmovd);
3064 IValueT Imm8 = OpFpImm->getModifiedImm();
3065 assert(Imm8 < (1 << 8));
3066 constexpr IValueT VmovsOpcode = B23 | B21 | B20 | B8;
3067 IValueT OpcodePlusImm8 = VmovsOpcode | ((Imm8 >> 4) << 16) | (Imm8 & 0xf);
3068 constexpr IValueT D0 = 0;
3069 emitVFPddd(Cond, OpcodePlusImm8, Dd, D0, D0);
3070 }
3071
vmovdd(const Operand * OpDd,const Variable * OpDm,CondARM32::Cond Cond)3072 void AssemblerARM32::vmovdd(const Operand *OpDd, const Variable *OpDm,
3073 CondARM32::Cond Cond) {
3074 // VMOV (register) - ARM section A8.8.340, encoding A2:
3075 // vmov<c>.f64 <Dd>, <Sm>
3076 //
3077 // cccc11101D110000dddd101101M0mmmm where cccc=Cond, Ddddd=Sd, and Mmmmm=Sm.
3078 constexpr const char *Vmovdd = "Vmovdd";
3079 IValueT Dd = encodeSRegister(OpDd, "Dd", Vmovdd);
3080 IValueT Dm = encodeSRegister(OpDm, "Dm", Vmovdd);
3081 constexpr IValueT VmovddOpcode = B23 | B21 | B20 | B6;
3082 constexpr IValueT D0 = 0;
3083 emitVFPddd(Cond, VmovddOpcode, Dd, D0, Dm);
3084 }
3085
vmovdrr(const Operand * OpDm,const Operand * OpRt,const Operand * OpRt2,CondARM32::Cond Cond)3086 void AssemblerARM32::vmovdrr(const Operand *OpDm, const Operand *OpRt,
3087 const Operand *OpRt2, CondARM32::Cond Cond) {
3088 // VMOV (between two ARM core registers and a doubleword extension register).
3089 // ARM section A8.8.345, encoding A1:
3090 // vmov<c> <Dm>, <Rt>, <Rt2>
3091 //
3092 // cccc11000100xxxxyyyy101100M1mmmm where cccc=Cond, xxxx=Rt, yyyy=Rt2, and
3093 // Mmmmm=Dm.
3094 constexpr const char *Vmovdrr = "vmovdrr";
3095 IValueT Dm = encodeDRegister(OpDm, "Dm", Vmovdrr);
3096 IValueT Rt = encodeGPRegister(OpRt, "Rt", Vmovdrr);
3097 IValueT Rt2 = encodeGPRegister(OpRt2, "Rt", Vmovdrr);
3098 assert(Rt != RegARM32::Encoded_Reg_sp);
3099 assert(Rt != RegARM32::Encoded_Reg_pc);
3100 assert(Rt2 != RegARM32::Encoded_Reg_sp);
3101 assert(Rt2 != RegARM32::Encoded_Reg_pc);
3102 assert(Rt != Rt2);
3103 assert(CondARM32::isDefined(Cond));
3104 IValueT Encoding = B27 | B26 | B22 | B11 | B9 | B8 | B4 |
3105 (encodeCondition(Cond) << kConditionShift) | (Rt2 << 16) |
3106 (Rt << 12) | (getYInRegYXXXX(Dm) << 5) |
3107 getXXXXInRegYXXXX(Dm);
3108 emitInst(Encoding);
3109 }
3110
vmovqir(const Operand * OpQn,uint32_t Index,const Operand * OpRt,CondARM32::Cond Cond)3111 void AssemblerARM32::vmovqir(const Operand *OpQn, uint32_t Index,
3112 const Operand *OpRt, CondARM32::Cond Cond) {
3113 // VMOV (ARM core register to scalar) - ARM section A8.8.341, encoding A1:
3114 // vmov<c>.<size> <Dn[x]>, <Rt>
3115 constexpr const char *Vmovdr = "vmovdr";
3116 constexpr bool IsExtract = true;
3117 emitInsertExtractInt(Cond, OpQn, Index, OpRt, !IsExtract, Vmovdr);
3118 }
3119
vmovqis(const Operand * OpQd,uint32_t Index,const Operand * OpSm,CondARM32::Cond Cond)3120 void AssemblerARM32::vmovqis(const Operand *OpQd, uint32_t Index,
3121 const Operand *OpSm, CondARM32::Cond Cond) {
3122 constexpr const char *Vmovqis = "vmovqis";
3123 assert(Index < 4);
3124 IValueT Sd = mapQRegToSReg(encodeQRegister(OpQd, "Qd", Vmovqis)) + Index;
3125 IValueT Sm = encodeSRegister(OpSm, "Sm", Vmovqis);
3126 emitMoveSS(Cond, Sd, Sm);
3127 }
3128
vmovrqi(const Operand * OpRt,const Operand * OpQn,uint32_t Index,CondARM32::Cond Cond)3129 void AssemblerARM32::vmovrqi(const Operand *OpRt, const Operand *OpQn,
3130 uint32_t Index, CondARM32::Cond Cond) {
3131 // VMOV (scalar to ARM core register) - ARM section A8.8.342, encoding A1:
3132 // vmov<c>.<dt> <Rt>, <Dn[x]>
3133 constexpr const char *Vmovrd = "vmovrd";
3134 constexpr bool IsExtract = true;
3135 emitInsertExtractInt(Cond, OpQn, Index, OpRt, IsExtract, Vmovrd);
3136 }
3137
vmovrrd(const Operand * OpRt,const Operand * OpRt2,const Operand * OpDm,CondARM32::Cond Cond)3138 void AssemblerARM32::vmovrrd(const Operand *OpRt, const Operand *OpRt2,
3139 const Operand *OpDm, CondARM32::Cond Cond) {
3140 // VMOV (between two ARM core registers and a doubleword extension register).
3141 // ARM section A8.8.345, encoding A1:
3142 // vmov<c> <Rt>, <Rt2>, <Dm>
3143 //
3144 // cccc11000101xxxxyyyy101100M1mmmm where cccc=Cond, xxxx=Rt, yyyy=Rt2, and
3145 // Mmmmm=Dm.
3146 constexpr const char *Vmovrrd = "vmovrrd";
3147 IValueT Rt = encodeGPRegister(OpRt, "Rt", Vmovrrd);
3148 IValueT Rt2 = encodeGPRegister(OpRt2, "Rt", Vmovrrd);
3149 IValueT Dm = encodeDRegister(OpDm, "Dm", Vmovrrd);
3150 assert(Rt != RegARM32::Encoded_Reg_sp);
3151 assert(Rt != RegARM32::Encoded_Reg_pc);
3152 assert(Rt2 != RegARM32::Encoded_Reg_sp);
3153 assert(Rt2 != RegARM32::Encoded_Reg_pc);
3154 assert(Rt != Rt2);
3155 assert(CondARM32::isDefined(Cond));
3156 IValueT Encoding = B27 | B26 | B22 | B20 | B11 | B9 | B8 | B4 |
3157 (encodeCondition(Cond) << kConditionShift) | (Rt2 << 16) |
3158 (Rt << 12) | (getYInRegYXXXX(Dm) << 5) |
3159 getXXXXInRegYXXXX(Dm);
3160 emitInst(Encoding);
3161 }
3162
vmovrs(const Operand * OpRt,const Operand * OpSn,CondARM32::Cond Cond)3163 void AssemblerARM32::vmovrs(const Operand *OpRt, const Operand *OpSn,
3164 CondARM32::Cond Cond) {
3165 // VMOV (between ARM core register and single-precision register)
3166 // ARM section A8.8.343, encoding A1.
3167 //
3168 // vmov<c> <Rt>, <Sn>
3169 //
3170 // cccc11100001nnnntttt1010N0010000 where cccc=Cond, nnnnN = Sn, and tttt=Rt.
3171 constexpr const char *Vmovrs = "vmovrs";
3172 IValueT Rt = encodeGPRegister(OpRt, "Rt", Vmovrs);
3173 IValueT Sn = encodeSRegister(OpSn, "Sn", Vmovrs);
3174 assert(CondARM32::isDefined(Cond));
3175 IValueT Encoding = (encodeCondition(Cond) << kConditionShift) | B27 | B26 |
3176 B25 | B20 | B11 | B9 | B4 | (getXXXXInRegXXXXY(Sn) << 16) |
3177 (Rt << kRdShift) | (getYInRegXXXXY(Sn) << 7);
3178 emitInst(Encoding);
3179 }
3180
vmovs(const Operand * OpSd,const OperandARM32FlexFpImm * OpFpImm,CondARM32::Cond Cond)3181 void AssemblerARM32::vmovs(const Operand *OpSd,
3182 const OperandARM32FlexFpImm *OpFpImm,
3183 CondARM32::Cond Cond) {
3184 // VMOV (immediate) - ARM section A8.8.339, encoding A2:
3185 // vmov<c>.f32 <Sd>, #<imm>
3186 //
3187 // cccc11101D11xxxxdddd10100000yyyy where cccc=Cond, ddddD=Sn, xxxxyyyy=imm.
3188 constexpr const char *Vmovs = "vmovs";
3189 IValueT Sd = encodeSRegister(OpSd, "Sd", Vmovs);
3190 IValueT Imm8 = OpFpImm->getModifiedImm();
3191 assert(Imm8 < (1 << 8));
3192 constexpr IValueT VmovsOpcode = B23 | B21 | B20;
3193 IValueT OpcodePlusImm8 = VmovsOpcode | ((Imm8 >> 4) << 16) | (Imm8 & 0xf);
3194 constexpr IValueT S0 = 0;
3195 emitVFPsss(Cond, OpcodePlusImm8, Sd, S0, S0);
3196 }
3197
vmovss(const Operand * OpSd,const Variable * OpSm,CondARM32::Cond Cond)3198 void AssemblerARM32::vmovss(const Operand *OpSd, const Variable *OpSm,
3199 CondARM32::Cond Cond) {
3200 constexpr const char *Vmovss = "Vmovss";
3201 IValueT Sd = encodeSRegister(OpSd, "Sd", Vmovss);
3202 IValueT Sm = encodeSRegister(OpSm, "Sm", Vmovss);
3203 emitMoveSS(Cond, Sd, Sm);
3204 }
3205
vmovsqi(const Operand * OpSd,const Operand * OpQm,uint32_t Index,CondARM32::Cond Cond)3206 void AssemblerARM32::vmovsqi(const Operand *OpSd, const Operand *OpQm,
3207 uint32_t Index, CondARM32::Cond Cond) {
3208 constexpr const char *Vmovsqi = "vmovsqi";
3209 const IValueT Sd = encodeSRegister(OpSd, "Sd", Vmovsqi);
3210 assert(Index < 4);
3211 const IValueT Sm =
3212 mapQRegToSReg(encodeQRegister(OpQm, "Qm", Vmovsqi)) + Index;
3213 emitMoveSS(Cond, Sd, Sm);
3214 }
3215
vmovsr(const Operand * OpSn,const Operand * OpRt,CondARM32::Cond Cond)3216 void AssemblerARM32::vmovsr(const Operand *OpSn, const Operand *OpRt,
3217 CondARM32::Cond Cond) {
3218 // VMOV (between ARM core register and single-precision register)
3219 // ARM section A8.8.343, encoding A1.
3220 //
3221 // vmov<c> <Sn>, <Rt>
3222 //
3223 // cccc11100000nnnntttt1010N0010000 where cccc=Cond, nnnnN = Sn, and tttt=Rt.
3224 constexpr const char *Vmovsr = "vmovsr";
3225 IValueT Sn = encodeSRegister(OpSn, "Sn", Vmovsr);
3226 IValueT Rt = encodeGPRegister(OpRt, "Rt", Vmovsr);
3227 assert(Sn < RegARM32::getNumSRegs());
3228 assert(Rt < RegARM32::getNumGPRegs());
3229 assert(CondARM32::isDefined(Cond));
3230 IValueT Encoding = (encodeCondition(Cond) << kConditionShift) | B27 | B26 |
3231 B25 | B11 | B9 | B4 | (getXXXXInRegXXXXY(Sn) << 16) |
3232 (Rt << kRdShift) | (getYInRegXXXXY(Sn) << 7);
3233 emitInst(Encoding);
3234 }
3235
vmlad(const Operand * OpDd,const Operand * OpDn,const Operand * OpDm,CondARM32::Cond Cond)3236 void AssemblerARM32::vmlad(const Operand *OpDd, const Operand *OpDn,
3237 const Operand *OpDm, CondARM32::Cond Cond) {
3238 // VMLA, VMLS (floating-point), ARM section A8.8.337, encoding A2:
3239 // vmla<c>.f64 <Dd>, <Dn>, <Dm>
3240 //
3241 // cccc11100d00nnnndddd1011n0M0mmmm where cccc=Cond, Ddddd=Dd, Nnnnn=Dn, and
3242 // Mmmmm=Dm
3243 constexpr const char *Vmlad = "vmlad";
3244 constexpr IValueT VmladOpcode = 0;
3245 emitVFPddd(Cond, VmladOpcode, OpDd, OpDn, OpDm, Vmlad);
3246 }
3247
vmlas(const Operand * OpSd,const Operand * OpSn,const Operand * OpSm,CondARM32::Cond Cond)3248 void AssemblerARM32::vmlas(const Operand *OpSd, const Operand *OpSn,
3249 const Operand *OpSm, CondARM32::Cond Cond) {
3250 // VMLA, VMLS (floating-point), ARM section A8.8.337, encoding A2:
3251 // vmla<c>.f32 <Sd>, <Sn>, <Sm>
3252 //
3253 // cccc11100d00nnnndddd1010n0M0mmmm where cccc=Cond, ddddD=Sd, nnnnN=Sn, and
3254 // mmmmM=Sm
3255 constexpr const char *Vmlas = "vmlas";
3256 constexpr IValueT VmlasOpcode = 0;
3257 emitVFPsss(Cond, VmlasOpcode, OpSd, OpSn, OpSm, Vmlas);
3258 }
3259
vmlsd(const Operand * OpDd,const Operand * OpDn,const Operand * OpDm,CondARM32::Cond Cond)3260 void AssemblerARM32::vmlsd(const Operand *OpDd, const Operand *OpDn,
3261 const Operand *OpDm, CondARM32::Cond Cond) {
3262 // VMLA, VMLS (floating-point), ARM section A8.8.337, encoding A2:
3263 // vmls<c>.f64 <Dd>, <Dn>, <Dm>
3264 //
3265 // cccc11100d00nnnndddd1011n1M0mmmm where cccc=Cond, Ddddd=Dd, Nnnnn=Dn, and
3266 // Mmmmm=Dm
3267 constexpr const char *Vmlad = "vmlad";
3268 constexpr IValueT VmladOpcode = B6;
3269 emitVFPddd(Cond, VmladOpcode, OpDd, OpDn, OpDm, Vmlad);
3270 }
3271
vmlss(const Operand * OpSd,const Operand * OpSn,const Operand * OpSm,CondARM32::Cond Cond)3272 void AssemblerARM32::vmlss(const Operand *OpSd, const Operand *OpSn,
3273 const Operand *OpSm, CondARM32::Cond Cond) {
3274 // VMLA, VMLS (floating-point), ARM section A8.8.337, encoding A2:
3275 // vmls<c>.f32 <Sd>, <Sn>, <Sm>
3276 //
3277 // cccc11100d00nnnndddd1010n1M0mmmm where cccc=Cond, ddddD=Sd, nnnnN=Sn, and
3278 // mmmmM=Sm
3279 constexpr const char *Vmlas = "vmlas";
3280 constexpr IValueT VmlasOpcode = B6;
3281 emitVFPsss(Cond, VmlasOpcode, OpSd, OpSn, OpSm, Vmlas);
3282 }
3283
vmrsAPSR_nzcv(CondARM32::Cond Cond)3284 void AssemblerARM32::vmrsAPSR_nzcv(CondARM32::Cond Cond) {
3285 // MVRS - ARM section A*.8.348, encoding A1:
3286 // vmrs<c> APSR_nzcv, FPSCR
3287 //
3288 // cccc111011110001tttt101000010000 where tttt=0x15 (i.e. when Rt=pc, use
3289 // APSR_nzcv instead).
3290 assert(CondARM32::isDefined(Cond));
3291 IValueT Encoding = B27 | B26 | B25 | B23 | B22 | B21 | B20 | B16 | B15 | B14 |
3292 B13 | B12 | B11 | B9 | B4 |
3293 (encodeCondition(Cond) << kConditionShift);
3294 emitInst(Encoding);
3295 }
3296
vmuls(const Operand * OpSd,const Operand * OpSn,const Operand * OpSm,CondARM32::Cond Cond)3297 void AssemblerARM32::vmuls(const Operand *OpSd, const Operand *OpSn,
3298 const Operand *OpSm, CondARM32::Cond Cond) {
3299 // VMUL (floating-point) - ARM section A8.8.351, encoding A2:
3300 // vmul<c>.f32 <Sd>, <Sn>, <Sm>
3301 //
3302 // cccc11100D10nnnndddd101sN0M0mmmm where cccc=Cond, s=0, ddddD=Rd, nnnnN=Rn,
3303 // and mmmmM=Rm.
3304 constexpr const char *Vmuls = "vmuls";
3305 constexpr IValueT VmulsOpcode = B21;
3306 emitVFPsss(Cond, VmulsOpcode, OpSd, OpSn, OpSm, Vmuls);
3307 }
3308
vmuld(const Operand * OpDd,const Operand * OpDn,const Operand * OpDm,CondARM32::Cond Cond)3309 void AssemblerARM32::vmuld(const Operand *OpDd, const Operand *OpDn,
3310 const Operand *OpDm, CondARM32::Cond Cond) {
3311 // VMUL (floating-point) - ARM section A8.8.351, encoding A2:
3312 // vmul<c>.f64 <Dd>, <Dn>, <Dm>
3313 //
3314 // cccc11100D10nnnndddd101sN0M0mmmm where cccc=Cond, s=1, Ddddd=Rd, Nnnnn=Rn,
3315 // and Mmmmm=Rm.
3316 constexpr const char *Vmuld = "vmuld";
3317 constexpr IValueT VmuldOpcode = B21;
3318 emitVFPddd(Cond, VmuldOpcode, OpDd, OpDn, OpDm, Vmuld);
3319 }
3320
vmulqi(Type ElmtTy,const Operand * OpQd,const Operand * OpQn,const Operand * OpQm)3321 void AssemblerARM32::vmulqi(Type ElmtTy, const Operand *OpQd,
3322 const Operand *OpQn, const Operand *OpQm) {
3323 // VMUL, VMULL (integer and polynomial) - ARM section A8.8.350, encoding A1:
3324 // vmul<c>.<dt> <Qd>, <Qn>, <Qm>
3325 //
3326 // 111100100Dssnnn0ddd01001NqM1mmm0 where Dddd=Qd, Nnnn=Qn, Mmmm=Qm, and
3327 // dt in [i8, i16, i32] where ss is the index.
3328 assert(isScalarIntegerType(ElmtTy) &&
3329 "vmulqi expects vector with integer element type");
3330 assert(ElmtTy != IceType_i64 && "vmulqi on i64 vector not allowed");
3331 constexpr const char *Vmulqi = "vmulqi";
3332 constexpr IValueT VmulqiOpcode = B11 | B8 | B4;
3333 emitSIMDqqq(VmulqiOpcode, ElmtTy, OpQd, OpQn, OpQm, Vmulqi);
3334 }
3335
vmulh(Type ElmtTy,const Operand * OpQd,const Operand * OpQn,const Operand * OpQm,bool Unsigned)3336 void AssemblerARM32::vmulh(Type ElmtTy, const Operand *OpQd,
3337 const Operand *OpQn, const Operand *OpQm,
3338 bool Unsigned) {
3339 // Pseudo-instruction for multiplying the corresponding elements in the lower
3340 // halves of two quadword vectors, and returning the high halves.
3341
3342 // VMULL (integer and polynomial) - ARMv7-A/R section A8.6.337, encoding A1:
3343 // VMUL<c>.<dt> <Dd>, <Dn>, <Dm>
3344 //
3345 // 1111001U1Dssnnnndddd11o0N0M0mmmm
3346 assert(isScalarIntegerType(ElmtTy) &&
3347 "vmull expects vector with integer element type");
3348 assert(ElmtTy != IceType_i64 && "vmull on i64 vector not allowed");
3349 constexpr const char *Vmull = "vmull";
3350
3351 constexpr IValueT ElmtShift = 20;
3352 const IValueT ElmtSize = encodeElmtType(ElmtTy);
3353 assert(Utils::IsUint(2, ElmtSize));
3354
3355 const IValueT VmullOpcode =
3356 B25 | (Unsigned ? B24 : 0) | B23 | (B20) | B11 | B10;
3357
3358 const IValueT Qd = encodeQRegister(OpQd, "Qd", Vmull);
3359 const IValueT Qn = encodeQRegister(OpQn, "Qn", Vmull);
3360 const IValueT Qm = encodeQRegister(OpQm, "Qm", Vmull);
3361
3362 const IValueT Dd = mapQRegToDReg(Qd);
3363 const IValueT Dn = mapQRegToDReg(Qn);
3364 const IValueT Dm = mapQRegToDReg(Qm);
3365
3366 constexpr bool UseQRegs = false;
3367 constexpr bool IsFloatTy = false;
3368 emitSIMDBase(VmullOpcode | (ElmtSize << ElmtShift), Dd, Dn, Dm, UseQRegs,
3369 IsFloatTy);
3370
3371 // Shift and narrow to obtain high halves.
3372 constexpr IValueT VshrnOpcode = B25 | B23 | B11 | B4;
3373 const IValueT Imm6 = encodeSIMDShiftImm6(ST_Vshr, IceType_i16, 16);
3374 constexpr IValueT ImmShift = 16;
3375
3376 emitSIMDBase(VshrnOpcode | (Imm6 << ImmShift), Dd, 0, Dd, UseQRegs,
3377 IsFloatTy);
3378 }
3379
vmlap(Type ElmtTy,const Operand * OpQd,const Operand * OpQn,const Operand * OpQm)3380 void AssemblerARM32::vmlap(Type ElmtTy, const Operand *OpQd,
3381 const Operand *OpQn, const Operand *OpQm) {
3382 // Pseudo-instruction for multiplying the corresponding elements in the lower
3383 // halves of two quadword vectors, and pairwise-adding the results.
3384
3385 // VMULL (integer and polynomial) - ARM section A8.8.350, encoding A1:
3386 // vmull<c>.<dt> <Qd>, <Qn>, <Qm>
3387 //
3388 // 1111001U1Dssnnnndddd11o0N0M0mmmm
3389 assert(isScalarIntegerType(ElmtTy) &&
3390 "vmull expects vector with integer element type");
3391 assert(ElmtTy != IceType_i64 && "vmull on i64 vector not allowed");
3392 constexpr const char *Vmull = "vmull";
3393
3394 constexpr IValueT ElmtShift = 20;
3395 const IValueT ElmtSize = encodeElmtType(ElmtTy);
3396 assert(Utils::IsUint(2, ElmtSize));
3397
3398 bool Unsigned = false;
3399 const IValueT VmullOpcode =
3400 B25 | (Unsigned ? B24 : 0) | B23 | (B20) | B11 | B10;
3401
3402 const IValueT Dd = mapQRegToDReg(encodeQRegister(OpQd, "Qd", Vmull));
3403 const IValueT Dn = mapQRegToDReg(encodeQRegister(OpQn, "Qn", Vmull));
3404 const IValueT Dm = mapQRegToDReg(encodeQRegister(OpQm, "Qm", Vmull));
3405
3406 constexpr bool UseQRegs = false;
3407 constexpr bool IsFloatTy = false;
3408 emitSIMDBase(VmullOpcode | (ElmtSize << ElmtShift), Dd, Dn, Dm, UseQRegs,
3409 IsFloatTy);
3410
3411 // VPADD - ARM section A8.8.280, encoding A1:
3412 // vpadd.<dt> <Dd>, <Dm>, <Dn>
3413 //
3414 // 111100100Dssnnnndddd1011NQM1mmmm where Ddddd=<Dd>, Mmmmm=<Dm>, and
3415 // Nnnnn=<Dn> and ss is the encoding of <dt>.
3416 assert(ElmtTy != IceType_i64 && "vpadd doesn't allow i64!");
3417 const IValueT VpaddOpcode =
3418 B25 | B11 | B9 | B8 | B4 | ((encodeElmtType(ElmtTy) + 1) << 20);
3419 emitSIMDBase(VpaddOpcode, Dd, Dd, Dd + 1, UseQRegs, IsFloatTy);
3420 }
3421
vdup(Type ElmtTy,const Operand * OpQd,const Operand * OpQn,IValueT Idx)3422 void AssemblerARM32::vdup(Type ElmtTy, const Operand *OpQd, const Operand *OpQn,
3423 IValueT Idx) {
3424 // VDUP (scalar) - ARMv7-A/R section A8.6.302, encoding A1:
3425 // VDUP<c>.<size> <Qd>, <Dm[x]>
3426 //
3427 // 111100111D11iiiiddd011000QM0mmmm where Dddd=<Qd>, Mmmmm=<Dm>, and
3428 // iiii=imm4 encodes <size> and [x].
3429 constexpr const char *Vdup = "vdup";
3430
3431 const IValueT VdupOpcode = B25 | B24 | B23 | B21 | B20 | B11 | B10;
3432
3433 const IValueT Dd = mapQRegToDReg(encodeQRegister(OpQd, "Qd", Vdup));
3434 const IValueT Dn = mapQRegToDReg(encodeQRegister(OpQn, "Qn", Vdup));
3435
3436 constexpr bool UseQRegs = true;
3437 constexpr bool IsFloatTy = false;
3438
3439 IValueT Imm4 = 0;
3440 bool Lower = true;
3441 switch (ElmtTy) {
3442 case IceType_i8:
3443 assert(Idx < 16);
3444 Lower = Idx < 8;
3445 Imm4 = 1 | ((Idx & 0x7) << 1);
3446 break;
3447 case IceType_i16:
3448 assert(Idx < 8);
3449 Lower = Idx < 4;
3450 Imm4 = 2 | ((Idx & 0x3) << 2);
3451 break;
3452 case IceType_i32:
3453 case IceType_f32:
3454 assert(Idx < 4);
3455 Lower = Idx < 2;
3456 Imm4 = 4 | ((Idx & 0x1) << 3);
3457 break;
3458 default:
3459 assert(false && "vdup only supports 8, 16, and 32-bit elements");
3460 break;
3461 }
3462
3463 emitSIMDBase(VdupOpcode, Dd, Imm4, Dn + (Lower ? 0 : 1), UseQRegs, IsFloatTy);
3464 }
3465
vzip(Type ElmtTy,const Operand * OpQd,const Operand * OpQn,const Operand * OpQm)3466 void AssemblerARM32::vzip(Type ElmtTy, const Operand *OpQd, const Operand *OpQn,
3467 const Operand *OpQm) {
3468 // Pseudo-instruction which interleaves the elements of the lower halves of
3469 // two quadword registers.
3470
3471 // Vzip - ARMv7-A/R section A8.6.410, encoding A1:
3472 // VZIP<c>.<size> <Dd>, <Dm>
3473 //
3474 // 111100111D11ss10dddd00011QM0mmmm where Ddddd=<Dd>, Mmmmm=<Dm>, and
3475 // ss=<size>
3476 assert(ElmtTy != IceType_i64 && "vzip on i64 vector not allowed");
3477
3478 constexpr const char *Vzip = "vzip";
3479 const IValueT Dd = mapQRegToDReg(encodeQRegister(OpQd, "Qd", Vzip));
3480 const IValueT Dn = mapQRegToDReg(encodeQRegister(OpQn, "Qn", Vzip));
3481 const IValueT Dm = mapQRegToDReg(encodeQRegister(OpQm, "Qm", Vzip));
3482
3483 constexpr bool UseQRegs = false;
3484 constexpr bool IsFloatTy = false;
3485
3486 // VMOV Dd, Dm
3487 // 111100100D10mmmmdddd0001MQM1mmmm
3488 constexpr IValueT VmovOpcode = B25 | B21 | B8 | B4;
3489
3490 // Copy lower half of second source to upper half of destination.
3491 emitSIMDBase(VmovOpcode, Dd + 1, Dm, Dm, UseQRegs, IsFloatTy);
3492
3493 // Copy lower half of first source to lower half of destination.
3494 if (Dd != Dn)
3495 emitSIMDBase(VmovOpcode, Dd, Dn, Dn, UseQRegs, IsFloatTy);
3496
3497 constexpr IValueT ElmtShift = 18;
3498 const IValueT ElmtSize = encodeElmtType(ElmtTy);
3499 assert(Utils::IsUint(2, ElmtSize));
3500
3501 if (ElmtTy != IceType_i32 && ElmtTy != IceType_f32) {
3502 constexpr IValueT VzipOpcode = B25 | B24 | B23 | B21 | B20 | B17 | B8 | B7;
3503 // Zip the lower and upper half of destination.
3504 emitSIMDBase(VzipOpcode | (ElmtSize << ElmtShift), Dd, 0, Dd + 1, UseQRegs,
3505 IsFloatTy);
3506 } else {
3507 constexpr IValueT VtrnOpcode = B25 | B24 | B23 | B21 | B20 | B17 | B7;
3508 emitSIMDBase(VtrnOpcode | (ElmtSize << ElmtShift), Dd, 0, Dd + 1, UseQRegs,
3509 IsFloatTy);
3510 }
3511 }
3512
vmulqf(const Operand * OpQd,const Operand * OpQn,const Operand * OpQm)3513 void AssemblerARM32::vmulqf(const Operand *OpQd, const Operand *OpQn,
3514 const Operand *OpQm) {
3515 // VMUL (floating-point) - ARM section A8.8.351, encoding A1:
3516 // vmul.f32 <Qd>, <Qn>, <Qm>
3517 //
3518 // 111100110D00nnn0ddd01101MqM1mmm0 where Dddd=Qd, Nnnn=Qn, and Mmmm=Qm.
3519 assert(OpQd->getType() == IceType_v4f32 && "vmulqf expects type <4 x float>");
3520 constexpr const char *Vmulqf = "vmulqf";
3521 constexpr IValueT VmulqfOpcode = B24 | B11 | B8 | B4;
3522 constexpr bool IsFloatTy = true;
3523 emitSIMDqqqBase(VmulqfOpcode, OpQd, OpQn, OpQm, IsFloatTy, Vmulqf);
3524 }
3525
vmvnq(const Operand * OpQd,const Operand * OpQm)3526 void AssemblerARM32::vmvnq(const Operand *OpQd, const Operand *OpQm) {
3527 // VMVN (integer) - ARM section A8.8.354, encoding A1:
3528 // vmvn <Qd>, <Qm>
3529 //
3530 // 111100111D110000dddd01011QM0mmmm where Dddd=Qd, Mmmm=Qm, and 1=Q.
3531 // TODO(jpp) xxx: unify
3532 constexpr const char *Vmvn = "vmvn";
3533 constexpr IValueT VmvnOpcode = B24 | B23 | B21 | B20 | B10 | B8 | B7;
3534 const IValueT Qd = encodeQRegister(OpQd, "Qd", Vmvn);
3535 constexpr IValueT Qn = 0;
3536 const IValueT Qm = encodeQRegister(OpQm, "Qm", Vmvn);
3537 constexpr bool UseQRegs = true;
3538 constexpr bool IsFloat = false;
3539 emitSIMDBase(VmvnOpcode, mapQRegToDReg(Qd), mapQRegToDReg(Qn),
3540 mapQRegToDReg(Qm), UseQRegs, IsFloat);
3541 }
3542
vmovlq(const Operand * OpQd,const Operand * OpQn,const Operand * OpQm)3543 void AssemblerARM32::vmovlq(const Operand *OpQd, const Operand *OpQn,
3544 const Operand *OpQm) {
3545 // Pseudo-instruction to copy the first source operand and insert the lower
3546 // half of the second operand into the lower half of the destination.
3547
3548 // VMOV (register) - ARMv7-A/R section A8.6.327, encoding A1:
3549 // VMOV<c> <Dd>, <Dm>
3550 //
3551 // 111100111D110000ddd001011QM0mmm0 where Dddd=Qd, Mmmm=Qm, and Q=0.
3552
3553 constexpr const char *Vmov = "vmov";
3554 const IValueT Dd = mapQRegToDReg(encodeQRegister(OpQd, "Qd", Vmov));
3555 const IValueT Dn = mapQRegToDReg(encodeQRegister(OpQn, "Qn", Vmov));
3556 const IValueT Dm = mapQRegToDReg(encodeQRegister(OpQm, "Qm", Vmov));
3557
3558 constexpr bool UseQRegs = false;
3559 constexpr bool IsFloat = false;
3560
3561 const IValueT VmovOpcode = B25 | B21 | B8 | B4;
3562
3563 if (Dd != Dm)
3564 emitSIMDBase(VmovOpcode, Dd, Dm, Dm, UseQRegs, IsFloat);
3565 if (Dd + 1 != Dn + 1)
3566 emitSIMDBase(VmovOpcode, Dd + 1, Dn + 1, Dn + 1, UseQRegs, IsFloat);
3567 }
3568
vmovhq(const Operand * OpQd,const Operand * OpQn,const Operand * OpQm)3569 void AssemblerARM32::vmovhq(const Operand *OpQd, const Operand *OpQn,
3570 const Operand *OpQm) {
3571 // Pseudo-instruction to copy the first source operand and insert the high
3572 // half of the second operand into the high half of the destination.
3573
3574 // VMOV (register) - ARMv7-A/R section A8.6.327, encoding A1:
3575 // VMOV<c> <Dd>, <Dm>
3576 //
3577 // 111100111D110000ddd001011QM0mmm0 where Dddd=Qd, Mmmm=Qm, and Q=0.
3578
3579 constexpr const char *Vmov = "vmov";
3580 const IValueT Dd = mapQRegToDReg(encodeQRegister(OpQd, "Qd", Vmov));
3581 const IValueT Dn = mapQRegToDReg(encodeQRegister(OpQn, "Qn", Vmov));
3582 const IValueT Dm = mapQRegToDReg(encodeQRegister(OpQm, "Qm", Vmov));
3583
3584 constexpr bool UseQRegs = false;
3585 constexpr bool IsFloat = false;
3586
3587 const IValueT VmovOpcode = B25 | B21 | B8 | B4;
3588
3589 if (Dd != Dn)
3590 emitSIMDBase(VmovOpcode, Dd, Dn, Dn, UseQRegs, IsFloat);
3591 if (Dd + 1 != Dm + 1)
3592 emitSIMDBase(VmovOpcode, Dd + 1, Dm + 1, Dm + 1, UseQRegs, IsFloat);
3593 }
3594
vmovhlq(const Operand * OpQd,const Operand * OpQn,const Operand * OpQm)3595 void AssemblerARM32::vmovhlq(const Operand *OpQd, const Operand *OpQn,
3596 const Operand *OpQm) {
3597 // Pseudo-instruction to copy the first source operand and insert the high
3598 // half of the second operand into the lower half of the destination.
3599
3600 // VMOV (register) - ARMv7-A/R section A8.6.327, encoding A1:
3601 // VMOV<c> <Dd>, <Dm>
3602 //
3603 // 111100111D110000ddd001011QM0mmm0 where Dddd=Qd, Mmmm=Qm, and Q=0.
3604
3605 constexpr const char *Vmov = "vmov";
3606 const IValueT Dd = mapQRegToDReg(encodeQRegister(OpQd, "Qd", Vmov));
3607 const IValueT Dn = mapQRegToDReg(encodeQRegister(OpQn, "Qn", Vmov));
3608 const IValueT Dm = mapQRegToDReg(encodeQRegister(OpQm, "Qm", Vmov));
3609
3610 constexpr bool UseQRegs = false;
3611 constexpr bool IsFloat = false;
3612
3613 const IValueT VmovOpcode = B25 | B21 | B8 | B4;
3614
3615 if (Dd != Dm + 1)
3616 emitSIMDBase(VmovOpcode, Dd, Dm + 1, Dm + 1, UseQRegs, IsFloat);
3617 if (Dd + 1 != Dn + 1)
3618 emitSIMDBase(VmovOpcode, Dd + 1, Dn + 1, Dn + 1, UseQRegs, IsFloat);
3619 }
3620
vmovlhq(const Operand * OpQd,const Operand * OpQn,const Operand * OpQm)3621 void AssemblerARM32::vmovlhq(const Operand *OpQd, const Operand *OpQn,
3622 const Operand *OpQm) {
3623 // Pseudo-instruction to copy the first source operand and insert the lower
3624 // half of the second operand into the high half of the destination.
3625
3626 // VMOV (register) - ARMv7-A/R section A8.6.327, encoding A1:
3627 // VMOV<c> <Dd>, <Dm>
3628 //
3629 // 111100111D110000ddd001011QM0mmm0 where Dddd=Qd, Mmmm=Qm, and Q=0.
3630
3631 constexpr const char *Vmov = "vmov";
3632 const IValueT Dd = mapQRegToDReg(encodeQRegister(OpQd, "Qd", Vmov));
3633 const IValueT Dn = mapQRegToDReg(encodeQRegister(OpQn, "Qn", Vmov));
3634 const IValueT Dm = mapQRegToDReg(encodeQRegister(OpQm, "Qm", Vmov));
3635
3636 constexpr bool UseQRegs = false;
3637 constexpr bool IsFloat = false;
3638
3639 const IValueT VmovOpcode = B25 | B21 | B8 | B4;
3640
3641 if (Dd + 1 != Dm)
3642 emitSIMDBase(VmovOpcode, Dd + 1, Dm, Dm, UseQRegs, IsFloat);
3643 if (Dd != Dn)
3644 emitSIMDBase(VmovOpcode, Dd, Dn, Dn, UseQRegs, IsFloat);
3645 }
3646
vnegqs(Type ElmtTy,const Operand * OpQd,const Operand * OpQm)3647 void AssemblerARM32::vnegqs(Type ElmtTy, const Operand *OpQd,
3648 const Operand *OpQm) {
3649 // VNEG - ARM section A8.8.355, encoding A1:
3650 // vneg.<dt> <Qd>, <Qm>
3651 //
3652 // 111111111D11ss01dddd0F111QM0mmmm where Dddd=Qd, and Mmmm=Qm, and:
3653 // * dt=s8 -> 00=ss, 0=F
3654 // * dt=s16 -> 01=ss, 0=F
3655 // * dt=s32 -> 10=ss, 0=F
3656 // * dt=s32 -> 10=ss, 1=F
3657 constexpr const char *Vneg = "vneg";
3658 constexpr IValueT VnegOpcode = B24 | B23 | B21 | B20 | B16 | B9 | B8 | B7;
3659 const IValueT Qd = encodeQRegister(OpQd, "Qd", Vneg);
3660 constexpr IValueT Qn = 0;
3661 const IValueT Qm = encodeQRegister(OpQm, "Qm", Vneg);
3662 constexpr bool UseQRegs = true;
3663 constexpr IValueT ElmtShift = 18;
3664 const IValueT ElmtSize = encodeElmtType(ElmtTy);
3665 assert(Utils::IsUint(2, ElmtSize));
3666 emitSIMDBase(VnegOpcode | (ElmtSize << ElmtShift), mapQRegToDReg(Qd),
3667 mapQRegToDReg(Qn), mapQRegToDReg(Qm), UseQRegs,
3668 isFloatingType(ElmtTy));
3669 }
3670
vorrq(const Operand * OpQd,const Operand * OpQm,const Operand * OpQn)3671 void AssemblerARM32::vorrq(const Operand *OpQd, const Operand *OpQm,
3672 const Operand *OpQn) {
3673 // VORR (register) - ARM section A8.8.360, encoding A1:
3674 // vorr <Qd>, <Qn>, <Qm>
3675 //
3676 // 111100100D10nnn0ddd00001N1M1mmm0 where Dddd=OpQd, Nnnn=OpQm, and Mmmm=OpQm.
3677 constexpr const char *Vorrq = "vorrq";
3678 constexpr IValueT VorrqOpcode = B21 | B8 | B4;
3679 constexpr Type ElmtTy = IceType_i8;
3680 emitSIMDqqq(VorrqOpcode, ElmtTy, OpQd, OpQm, OpQn, Vorrq);
3681 }
3682
vstrd(const Operand * OpDd,const Operand * OpAddress,CondARM32::Cond Cond,const TargetInfo & TInfo)3683 void AssemblerARM32::vstrd(const Operand *OpDd, const Operand *OpAddress,
3684 CondARM32::Cond Cond, const TargetInfo &TInfo) {
3685 // VSTR - ARM section A8.8.413, encoding A1:
3686 // vstr<c> <Dd>, [<Rn>{, #+/-<Imm>}]
3687 //
3688 // cccc1101UD00nnnndddd1011iiiiiiii where cccc=Cond, nnnn=Rn, Ddddd=Rd,
3689 // iiiiiiii=abs(Imm >> 2), and U=1 if Imm>=0.
3690 constexpr const char *Vstrd = "vstrd";
3691 IValueT Dd = encodeDRegister(OpDd, "Dd", Vstrd);
3692 assert(CondARM32::isDefined(Cond));
3693 IValueT Address;
3694 IValueT AddressEncoding =
3695 encodeAddress(OpAddress, Address, TInfo, RotatedImm8Div4Address);
3696 (void)AddressEncoding;
3697 assert(AddressEncoding == EncodedAsImmRegOffset);
3698 IValueT Encoding = B27 | B26 | B24 | B11 | B9 | B8 |
3699 (encodeCondition(Cond) << kConditionShift) |
3700 (getYInRegYXXXX(Dd) << 22) |
3701 (getXXXXInRegYXXXX(Dd) << 12) | Address;
3702 emitInst(Encoding);
3703 }
3704
vstrq(const Operand * OpQd,const Operand * OpAddress,CondARM32::Cond Cond,const TargetInfo & TInfo)3705 void AssemblerARM32::vstrq(const Operand *OpQd, const Operand *OpAddress,
3706 CondARM32::Cond Cond, const TargetInfo &TInfo) {
3707 // This is a pseudo-instruction which stores 64-bit data into a quadword
3708 // vector register. It is implemented by storing into the lower doubleword.
3709
3710 // VSTR - ARM section A8.8.413, encoding A1:
3711 // vstr<c> <Dd>, [<Rn>{, #+/-<Imm>}]
3712 //
3713 // cccc1101UD00nnnndddd1011iiiiiiii where cccc=Cond, nnnn=Rn, Ddddd=Rd,
3714 // iiiiiiii=abs(Imm >> 2), and U=1 if Imm>=0.
3715 constexpr const char *Vstrd = "vstrd";
3716 IValueT Dd = mapQRegToDReg(encodeQRegister(OpQd, "Dd", Vstrd));
3717 assert(CondARM32::isDefined(Cond));
3718 IValueT Address;
3719 IValueT AddressEncoding =
3720 encodeAddress(OpAddress, Address, TInfo, RotatedImm8Div4Address);
3721 (void)AddressEncoding;
3722 assert(AddressEncoding == EncodedAsImmRegOffset);
3723 IValueT Encoding = B27 | B26 | B24 | B11 | B9 | B8 |
3724 (encodeCondition(Cond) << kConditionShift) |
3725 (getYInRegYXXXX(Dd) << 22) |
3726 (getXXXXInRegYXXXX(Dd) << 12) | Address;
3727 emitInst(Encoding);
3728 }
3729
vstrs(const Operand * OpSd,const Operand * OpAddress,CondARM32::Cond Cond,const TargetInfo & TInfo)3730 void AssemblerARM32::vstrs(const Operand *OpSd, const Operand *OpAddress,
3731 CondARM32::Cond Cond, const TargetInfo &TInfo) {
3732 // VSTR - ARM section A8.8.413, encoding A2:
3733 // vstr<c> <Sd>, [<Rn>{, #+/-<imm>]]
3734 //
3735 // cccc1101UD01nnnndddd1010iiiiiiii where cccc=Cond, nnnn=Rn, ddddD=Sd,
3736 // iiiiiiii=abs(Opcode), and U=1 if Opcode >= 0;
3737 constexpr const char *Vstrs = "vstrs";
3738 IValueT Sd = encodeSRegister(OpSd, "Sd", Vstrs);
3739 assert(CondARM32::isDefined(Cond));
3740 IValueT Address;
3741 IValueT AddressEncoding =
3742 encodeAddress(OpAddress, Address, TInfo, RotatedImm8Div4Address);
3743 (void)AddressEncoding;
3744 assert(AddressEncoding == EncodedAsImmRegOffset);
3745 IValueT Encoding =
3746 B27 | B26 | B24 | B11 | B9 | (encodeCondition(Cond) << kConditionShift) |
3747 (getYInRegXXXXY(Sd) << 22) | (getXXXXInRegXXXXY(Sd) << 12) | Address;
3748 emitInst(Encoding);
3749 }
3750
vst1qr(size_t ElmtSize,const Operand * OpQd,const Operand * OpAddress,const TargetInfo & TInfo)3751 void AssemblerARM32::vst1qr(size_t ElmtSize, const Operand *OpQd,
3752 const Operand *OpAddress, const TargetInfo &TInfo) {
3753 // VST1 (multiple single elements) - ARM section A8.8.404, encoding A1:
3754 // vst1.<size> <Qd>, [<Rn>]
3755 //
3756 // 111101000D00nnnnddd0ttttssaammmm where tttt=DRegListSize2, Dddd=Qd,
3757 // nnnn=Rn, aa=0 (use default alignment), size=ElmtSize, and ss is the
3758 // encoding of ElmtSize.
3759 constexpr const char *Vst1qr = "vst1qr";
3760 const IValueT Qd = encodeQRegister(OpQd, "Qd", Vst1qr);
3761 const IValueT Dd = mapQRegToDReg(Qd);
3762 IValueT Address;
3763 if (encodeAddress(OpAddress, Address, TInfo, NoImmOffsetAddress) !=
3764 EncodedAsImmRegOffset)
3765 llvm::report_fatal_error(std::string(Vst1qr) + ": malform memory address");
3766 const IValueT Rn = mask(Address, kRnShift, 4);
3767 constexpr IValueT Rm = RegARM32::Reg_pc;
3768 constexpr IValueT Opcode = B26;
3769 constexpr IValueT Align = 0; // use default alignment.
3770 emitVMem1Op(Opcode, Dd, Rn, Rm, DRegListSize2, ElmtSize, Align, Vst1qr);
3771 }
3772
vst1(size_t ElmtSize,const Operand * OpQd,const Operand * OpAddress,const TargetInfo & TInfo)3773 void AssemblerARM32::vst1(size_t ElmtSize, const Operand *OpQd,
3774 const Operand *OpAddress, const TargetInfo &TInfo) {
3775
3776 // This is a pseudo-instruction for storing a single element of a quadword
3777 // vector. For 64-bit the lower doubleword vector is stored.
3778
3779 if (ElmtSize == 64) {
3780 return vstrq(OpQd, OpAddress, Ice::CondARM32::AL, TInfo);
3781 }
3782
3783 // VST1 (single element from one lane) - ARMv7-A/R section A8.6.392, encoding
3784 // A1:
3785 // VST1<c>.<size> <list>, [<Rn>{@<align>}], <Rm>
3786 //
3787 // 111101001D00nnnnddd0ss00aaaammmm where Dddd=Qd, nnnn=Rn,
3788 // aaaa=0 (use default alignment), size=ElmtSize, and ss is the
3789 // encoding of ElmtSize.
3790 constexpr const char *Vst1qr = "vst1qr";
3791 const IValueT Qd = encodeQRegister(OpQd, "Qd", Vst1qr);
3792 const IValueT Dd = mapQRegToDReg(Qd);
3793 IValueT Address;
3794 if (encodeAddress(OpAddress, Address, TInfo, NoImmOffsetAddress) !=
3795 EncodedAsImmRegOffset)
3796 llvm::report_fatal_error(std::string(Vst1qr) + ": malform memory address");
3797 const IValueT Rn = mask(Address, kRnShift, 4);
3798 constexpr IValueT Rm = RegARM32::Reg_pc;
3799 constexpr IValueT Opcode = B26 | B23;
3800 constexpr IValueT Align = 0; // use default alignment.
3801 emitVMem1Op(Opcode, Dd, Rn, Rm, ElmtSize, Align, Vst1qr);
3802 }
3803
vsubs(const Operand * OpSd,const Operand * OpSn,const Operand * OpSm,CondARM32::Cond Cond)3804 void AssemblerARM32::vsubs(const Operand *OpSd, const Operand *OpSn,
3805 const Operand *OpSm, CondARM32::Cond Cond) {
3806 // VSUB (floating-point) - ARM section A8.8.415, encoding A2:
3807 // vsub<c>.f32 <Sd>, <Sn>, <Sm>
3808 //
3809 // cccc11100D11nnnndddd101sN1M0mmmm where cccc=Cond, s=0, ddddD=Rd, nnnnN=Rn,
3810 // and mmmmM=Rm.
3811 constexpr const char *Vsubs = "vsubs";
3812 constexpr IValueT VsubsOpcode = B21 | B20 | B6;
3813 emitVFPsss(Cond, VsubsOpcode, OpSd, OpSn, OpSm, Vsubs);
3814 }
3815
vsubd(const Operand * OpDd,const Operand * OpDn,const Operand * OpDm,CondARM32::Cond Cond)3816 void AssemblerARM32::vsubd(const Operand *OpDd, const Operand *OpDn,
3817 const Operand *OpDm, CondARM32::Cond Cond) {
3818 // VSUB (floating-point) - ARM section A8.8.415, encoding A2:
3819 // vsub<c>.f64 <Dd>, <Dn>, <Dm>
3820 //
3821 // cccc11100D11nnnndddd101sN1M0mmmm where cccc=Cond, s=1, Ddddd=Rd, Nnnnn=Rn,
3822 // and Mmmmm=Rm.
3823 constexpr const char *Vsubd = "vsubd";
3824 constexpr IValueT VsubdOpcode = B21 | B20 | B6;
3825 emitVFPddd(Cond, VsubdOpcode, OpDd, OpDn, OpDm, Vsubd);
3826 }
3827
vqaddqi(Type ElmtTy,const Operand * OpQd,const Operand * OpQm,const Operand * OpQn)3828 void AssemblerARM32::vqaddqi(Type ElmtTy, const Operand *OpQd,
3829 const Operand *OpQm, const Operand *OpQn) {
3830 // VQADD (integer) - ARM section A8.6.369, encoding A1:
3831 // vqadd<c><q>.s<size> {<Qd>,} <Qn>, <Qm>
3832 //
3833 // 111100100Dssnnn0ddd00000N1M1mmm0 where Dddd=OpQd, Nnnn=OpQn, Mmmm=OpQm,
3834 // size is 8, 16, 32, or 64.
3835 assert(isScalarIntegerType(ElmtTy) &&
3836 "vqaddqi expects vector with integer element type");
3837 constexpr const char *Vqaddqi = "vqaddqi";
3838 constexpr IValueT VqaddqiOpcode = B4;
3839 emitSIMDqqq(VqaddqiOpcode, ElmtTy, OpQd, OpQm, OpQn, Vqaddqi);
3840 }
3841
vqaddqu(Type ElmtTy,const Operand * OpQd,const Operand * OpQm,const Operand * OpQn)3842 void AssemblerARM32::vqaddqu(Type ElmtTy, const Operand *OpQd,
3843 const Operand *OpQm, const Operand *OpQn) {
3844 // VQADD (integer) - ARM section A8.6.369, encoding A1:
3845 // vqadd<c><q>.s<size> {<Qd>,} <Qn>, <Qm>
3846 //
3847 // 111100110Dssnnn0ddd00000N1M1mmm0 where Dddd=OpQd, Nnnn=OpQn, Mmmm=OpQm,
3848 // size is 8, 16, 32, or 64.
3849 assert(isScalarIntegerType(ElmtTy) &&
3850 "vqaddqu expects vector with integer element type");
3851 constexpr const char *Vqaddqu = "vqaddqu";
3852 constexpr IValueT VqaddquOpcode = B24 | B4;
3853 emitSIMDqqq(VqaddquOpcode, ElmtTy, OpQd, OpQm, OpQn, Vqaddqu);
3854 }
3855
vqsubqi(Type ElmtTy,const Operand * OpQd,const Operand * OpQm,const Operand * OpQn)3856 void AssemblerARM32::vqsubqi(Type ElmtTy, const Operand *OpQd,
3857 const Operand *OpQm, const Operand *OpQn) {
3858 // VQSUB (integer) - ARM section A8.6.369, encoding A1:
3859 // vqsub<c><q>.s<size> {<Qd>,} <Qn>, <Qm>
3860 //
3861 // 111100100Dssnnn0ddd00010N1M1mmm0 where Dddd=OpQd, Nnnn=OpQn, Mmmm=OpQm,
3862 // size is 8, 16, 32, or 64.
3863 assert(isScalarIntegerType(ElmtTy) &&
3864 "vqsubqi expects vector with integer element type");
3865 constexpr const char *Vqsubqi = "vqsubqi";
3866 constexpr IValueT VqsubqiOpcode = B9 | B4;
3867 emitSIMDqqq(VqsubqiOpcode, ElmtTy, OpQd, OpQm, OpQn, Vqsubqi);
3868 }
3869
vqsubqu(Type ElmtTy,const Operand * OpQd,const Operand * OpQm,const Operand * OpQn)3870 void AssemblerARM32::vqsubqu(Type ElmtTy, const Operand *OpQd,
3871 const Operand *OpQm, const Operand *OpQn) {
3872 // VQSUB (integer) - ARM section A8.6.369, encoding A1:
3873 // vqsub<c><q>.s<size> {<Qd>,} <Qn>, <Qm>
3874 //
3875 // 111100110Dssnnn0ddd00010N1M1mmm0 where Dddd=OpQd, Nnnn=OpQn, Mmmm=OpQm,
3876 // size is 8, 16, 32, or 64.
3877 assert(isScalarIntegerType(ElmtTy) &&
3878 "vqsubqu expects vector with integer element type");
3879 constexpr const char *Vqsubqu = "vqsubqu";
3880 constexpr IValueT VqsubquOpcode = B24 | B9 | B4;
3881 emitSIMDqqq(VqsubquOpcode, ElmtTy, OpQd, OpQm, OpQn, Vqsubqu);
3882 }
3883
vsubqi(Type ElmtTy,const Operand * OpQd,const Operand * OpQm,const Operand * OpQn)3884 void AssemblerARM32::vsubqi(Type ElmtTy, const Operand *OpQd,
3885 const Operand *OpQm, const Operand *OpQn) {
3886 // VSUB (integer) - ARM section A8.8.414, encoding A1:
3887 // vsub.<dt> <Qd>, <Qn>, <Qm>
3888 //
3889 // 111100110Dssnnn0ddd01000N1M0mmm0 where Dddd=OpQd, Nnnn=OpQm, Mmmm=OpQm,
3890 // and dt in [i8, i16, i32, i64] where ss is the index.
3891 assert(isScalarIntegerType(ElmtTy) &&
3892 "vsubqi expects vector with integer element type");
3893 constexpr const char *Vsubqi = "vsubqi";
3894 constexpr IValueT VsubqiOpcode = B24 | B11;
3895 emitSIMDqqq(VsubqiOpcode, ElmtTy, OpQd, OpQm, OpQn, Vsubqi);
3896 }
3897
vqmovn2(Type DestElmtTy,const Operand * OpQd,const Operand * OpQm,const Operand * OpQn,bool Unsigned,bool Saturating)3898 void AssemblerARM32::vqmovn2(Type DestElmtTy, const Operand *OpQd,
3899 const Operand *OpQm, const Operand *OpQn,
3900 bool Unsigned, bool Saturating) {
3901 // Pseudo-instruction for packing two quadword vectors into one quadword
3902 // vector, narrowing each element using saturation or truncation.
3903
3904 // VQMOVN - ARMv7-A/R section A8.6.361, encoding A1:
3905 // V{Q}MOVN{U}N<c>.<type><size> <Dd>, <Qm>
3906 //
3907 // 111100111D11ss10dddd0010opM0mmm0 where Ddddd=OpQd, op = 10, Mmmm=OpQm,
3908 // ss is 00 (16-bit), 01 (32-bit), or 10 (64-bit).
3909
3910 assert(DestElmtTy != IceType_i64 &&
3911 "vmovn doesn't allow i64 destination vector elements!");
3912
3913 constexpr const char *Vqmovn = "vqmovn";
3914 constexpr bool UseQRegs = false;
3915 constexpr bool IsFloatTy = false;
3916 const IValueT Qd = encodeQRegister(OpQd, "Qd", Vqmovn);
3917 const IValueT Qm = encodeQRegister(OpQm, "Qm", Vqmovn);
3918 const IValueT Qn = encodeQRegister(OpQn, "Qn", Vqmovn);
3919 const IValueT Dd = mapQRegToDReg(Qd);
3920 const IValueT Dm = mapQRegToDReg(Qm);
3921 const IValueT Dn = mapQRegToDReg(Qn);
3922
3923 IValueT VqmovnOpcode = B25 | B24 | B23 | B21 | B20 | B17 | B9 |
3924 (Saturating ? (Unsigned ? B6 : B7) : 0);
3925
3926 constexpr IValueT ElmtShift = 18;
3927 VqmovnOpcode |= (encodeElmtType(DestElmtTy) << ElmtShift);
3928
3929 if (Qm != Qd) {
3930 // Narrow second source operand to upper half of destination.
3931 emitSIMDBase(VqmovnOpcode, Dd + 1, 0, Dn, UseQRegs, IsFloatTy);
3932 // Narrow first source operand to lower half of destination.
3933 emitSIMDBase(VqmovnOpcode, Dd + 0, 0, Dm, UseQRegs, IsFloatTy);
3934 } else if (Qn != Qd) {
3935 // Narrow first source operand to lower half of destination.
3936 emitSIMDBase(VqmovnOpcode, Dd + 0, 0, Dm, UseQRegs, IsFloatTy);
3937 // Narrow second source operand to upper half of destination.
3938 emitSIMDBase(VqmovnOpcode, Dd + 1, 0, Dn, UseQRegs, IsFloatTy);
3939 } else {
3940 // Narrow first source operand to lower half of destination.
3941 emitSIMDBase(VqmovnOpcode, Dd, 0, Dm, UseQRegs, IsFloatTy);
3942
3943 // VMOV Dd, Dm
3944 // 111100100D10mmmmdddd0001MQM1mmmm
3945 const IValueT VmovOpcode = B25 | B21 | B8 | B4;
3946
3947 emitSIMDBase(VmovOpcode, Dd + 1, Dd, Dd, UseQRegs, IsFloatTy);
3948 }
3949 }
3950
vsubqf(const Operand * OpQd,const Operand * OpQn,const Operand * OpQm)3951 void AssemblerARM32::vsubqf(const Operand *OpQd, const Operand *OpQn,
3952 const Operand *OpQm) {
3953 // VSUB (floating-point) - ARM section A8.8.415, Encoding A1:
3954 // vsub.f32 <Qd>, <Qn>, <Qm>
3955 //
3956 // 111100100D10nnn0ddd01101N1M0mmm0 where Dddd=Qd, Nnnn=Qn, and Mmmm=Qm.
3957 assert(OpQd->getType() == IceType_v4f32 && "vsubqf expects type <4 x float>");
3958 constexpr const char *Vsubqf = "vsubqf";
3959 constexpr IValueT VsubqfOpcode = B21 | B11 | B8;
3960 emitSIMDqqq(VsubqfOpcode, IceType_f32, OpQd, OpQn, OpQm, Vsubqf);
3961 }
3962
emitVStackOp(CondARM32::Cond Cond,IValueT Opcode,const Variable * OpBaseReg,SizeT NumConsecRegs)3963 void AssemblerARM32::emitVStackOp(CondARM32::Cond Cond, IValueT Opcode,
3964 const Variable *OpBaseReg,
3965 SizeT NumConsecRegs) {
3966 const IValueT BaseReg = getEncodedSRegNum(OpBaseReg);
3967 const IValueT DLastBit = mask(BaseReg, 0, 1); // Last bit of base register.
3968 const IValueT Rd = mask(BaseReg, 1, 4); // Top 4 bits of base register.
3969 assert(0 < NumConsecRegs);
3970 (void)VpushVpopMaxConsecRegs;
3971 assert(NumConsecRegs <= VpushVpopMaxConsecRegs);
3972 assert((BaseReg + NumConsecRegs) <= RegARM32::getNumSRegs());
3973 assert(CondARM32::isDefined(Cond));
3974 const IValueT Encoding = Opcode | (Cond << kConditionShift) | DLastBit |
3975 (Rd << kRdShift) | NumConsecRegs;
3976 emitInst(Encoding);
3977 }
3978
vpop(const Variable * OpBaseReg,SizeT NumConsecRegs,CondARM32::Cond Cond)3979 void AssemblerARM32::vpop(const Variable *OpBaseReg, SizeT NumConsecRegs,
3980 CondARM32::Cond Cond) {
3981 // Note: Current implementation assumes that OpBaseReg is defined using S
3982 // registers. It doesn't implement the D register form.
3983 //
3984 // VPOP - ARM section A8.8.367, encoding A2:
3985 // vpop<c> <RegList>
3986 //
3987 // cccc11001D111101dddd1010iiiiiiii where cccc=Cond, ddddD=BaseReg, and
3988 // iiiiiiii=NumConsecRegs.
3989 constexpr IValueT VpopOpcode =
3990 B27 | B26 | B23 | B21 | B20 | B19 | B18 | B16 | B11 | B9;
3991 emitVStackOp(Cond, VpopOpcode, OpBaseReg, NumConsecRegs);
3992 }
3993
vpush(const Variable * OpBaseReg,SizeT NumConsecRegs,CondARM32::Cond Cond)3994 void AssemblerARM32::vpush(const Variable *OpBaseReg, SizeT NumConsecRegs,
3995 CondARM32::Cond Cond) {
3996 // Note: Current implementation assumes that OpBaseReg is defined using S
3997 // registers. It doesn't implement the D register form.
3998 //
3999 // VPUSH - ARM section A8.8.368, encoding A2:
4000 // vpush<c> <RegList>
4001 //
4002 // cccc11010D101101dddd1010iiiiiiii where cccc=Cond, ddddD=BaseReg, and
4003 // iiiiiiii=NumConsecRegs.
4004 constexpr IValueT VpushOpcode =
4005 B27 | B26 | B24 | B21 | B19 | B18 | B16 | B11 | B9;
4006 emitVStackOp(Cond, VpushOpcode, OpBaseReg, NumConsecRegs);
4007 }
4008
vshlqi(Type ElmtTy,const Operand * OpQd,const Operand * OpQm,const Operand * OpQn)4009 void AssemblerARM32::vshlqi(Type ElmtTy, const Operand *OpQd,
4010 const Operand *OpQm, const Operand *OpQn) {
4011 // VSHL - ARM section A8.8.396, encoding A1:
4012 // vshl Qd, Qm, Qn
4013 //
4014 // 1111001U0Dssnnnndddd0100NQM0mmmm where Ddddd=Qd, Mmmmm=Qm, Nnnnn=Qn, 0=U,
4015 // 1=Q
4016 assert(isScalarIntegerType(ElmtTy) &&
4017 "vshl expects vector with integer element type");
4018 constexpr const char *Vshl = "vshl";
4019 constexpr IValueT VshlOpcode = B10 | B6;
4020 emitSIMDqqq(VshlOpcode, ElmtTy, OpQd, OpQn, OpQm, Vshl);
4021 }
4022
vshlqc(Type ElmtTy,const Operand * OpQd,const Operand * OpQm,const ConstantInteger32 * Imm6)4023 void AssemblerARM32::vshlqc(Type ElmtTy, const Operand *OpQd,
4024 const Operand *OpQm,
4025 const ConstantInteger32 *Imm6) {
4026 // VSHL - ARM section A8.8.395, encoding A1:
4027 // vshl Qd, Qm, #Imm
4028 //
4029 // 1111001U1Diiiiiidddd0101LQM1mmmm where Ddddd=Qd, Mmmmm=Qm, iiiiii=Imm6,
4030 // 0=U, 1=Q, 0=L.
4031 assert(isScalarIntegerType(ElmtTy) &&
4032 "vshl expects vector with integer element type");
4033 constexpr const char *Vshl = "vshl";
4034 constexpr IValueT VshlOpcode = B23 | B10 | B8 | B4;
4035 emitSIMDShiftqqc(VshlOpcode, OpQd, OpQm,
4036 encodeSIMDShiftImm6(ST_Vshl, ElmtTy, Imm6), Vshl);
4037 }
4038
vshrqc(Type ElmtTy,const Operand * OpQd,const Operand * OpQm,const ConstantInteger32 * Imm6,InstARM32::FPSign Sign)4039 void AssemblerARM32::vshrqc(Type ElmtTy, const Operand *OpQd,
4040 const Operand *OpQm, const ConstantInteger32 *Imm6,
4041 InstARM32::FPSign Sign) {
4042 // VSHR - ARM section A8.8.398, encoding A1:
4043 // vshr Qd, Qm, #Imm
4044 //
4045 // 1111001U1Diiiiiidddd0101LQM1mmmm where Ddddd=Qd, Mmmmm=Qm, iiiiii=Imm6,
4046 // U=Unsigned, Q=1, L=0.
4047 assert(isScalarIntegerType(ElmtTy) &&
4048 "vshr expects vector with integer element type");
4049 constexpr const char *Vshr = "vshr";
4050 const IValueT VshrOpcode =
4051 (Sign == InstARM32::FS_Unsigned ? B24 : 0) | B23 | B4;
4052 emitSIMDShiftqqc(VshrOpcode, OpQd, OpQm,
4053 encodeSIMDShiftImm6(ST_Vshr, ElmtTy, Imm6), Vshr);
4054 }
4055
vshlqu(Type ElmtTy,const Operand * OpQd,const Operand * OpQm,const Operand * OpQn)4056 void AssemblerARM32::vshlqu(Type ElmtTy, const Operand *OpQd,
4057 const Operand *OpQm, const Operand *OpQn) {
4058 // VSHL - ARM section A8.8.396, encoding A1:
4059 // vshl Qd, Qm, Qn
4060 //
4061 // 1111001U0Dssnnnndddd0100NQM0mmmm where Ddddd=Qd, Mmmmm=Qm, Nnnnn=Qn, 1=U,
4062 // 1=Q
4063 assert(isScalarIntegerType(ElmtTy) &&
4064 "vshl expects vector with integer element type");
4065 constexpr const char *Vshl = "vshl";
4066 constexpr IValueT VshlOpcode = B24 | B10 | B6;
4067 emitSIMDqqq(VshlOpcode, ElmtTy, OpQd, OpQn, OpQm, Vshl);
4068 }
4069
vsqrtd(const Operand * OpDd,const Operand * OpDm,CondARM32::Cond Cond)4070 void AssemblerARM32::vsqrtd(const Operand *OpDd, const Operand *OpDm,
4071 CondARM32::Cond Cond) {
4072 // VSQRT - ARM section A8.8.401, encoding A1:
4073 // vsqrt<c>.f64 <Dd>, <Dm>
4074 //
4075 // cccc11101D110001dddd101111M0mmmm where cccc=Cond, Ddddd=Sd, and Mmmmm=Sm.
4076 constexpr const char *Vsqrtd = "vsqrtd";
4077 IValueT Dd = encodeDRegister(OpDd, "Dd", Vsqrtd);
4078 IValueT Dm = encodeDRegister(OpDm, "Dm", Vsqrtd);
4079 constexpr IValueT VsqrtdOpcode = B23 | B21 | B20 | B16 | B7 | B6;
4080 constexpr IValueT D0 = 0;
4081 emitVFPddd(Cond, VsqrtdOpcode, Dd, D0, Dm);
4082 }
4083
vsqrts(const Operand * OpSd,const Operand * OpSm,CondARM32::Cond Cond)4084 void AssemblerARM32::vsqrts(const Operand *OpSd, const Operand *OpSm,
4085 CondARM32::Cond Cond) {
4086 // VSQRT - ARM section A8.8.401, encoding A1:
4087 // vsqrt<c>.f32 <Sd>, <Sm>
4088 //
4089 // cccc11101D110001dddd101011M0mmmm where cccc=Cond, ddddD=Sd, and mmmmM=Sm.
4090 constexpr const char *Vsqrts = "vsqrts";
4091 IValueT Sd = encodeSRegister(OpSd, "Sd", Vsqrts);
4092 IValueT Sm = encodeSRegister(OpSm, "Sm", Vsqrts);
4093 constexpr IValueT VsqrtsOpcode = B23 | B21 | B20 | B16 | B7 | B6;
4094 constexpr IValueT S0 = 0;
4095 emitVFPsss(Cond, VsqrtsOpcode, Sd, S0, Sm);
4096 }
4097
4098 } // end of namespace ARM32
4099 } // end of namespace Ice
4100