1 /*
2 * Copyright (c) 2023 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "aarch64_cg.h"
17 #include "aarch64_cgfunc.h"
18
19 namespace maplebe {
20 using namespace maple;
21 CondOperand AArch64CGFunc::ccOperands[kCcLast] = {
22 CondOperand(CC_EQ), CondOperand(CC_NE), CondOperand(CC_CS), CondOperand(CC_HS), CondOperand(CC_CC),
23 CondOperand(CC_LO), CondOperand(CC_MI), CondOperand(CC_PL), CondOperand(CC_VS), CondOperand(CC_VC),
24 CondOperand(CC_HI), CondOperand(CC_LS), CondOperand(CC_GE), CondOperand(CC_LT), CondOperand(CC_GT),
25 CondOperand(CC_LE), CondOperand(CC_AL),
26 };
27
28 namespace {
29 constexpr int32 kSignedDimension = 2; /* signed and unsigned */
30 constexpr int32 kIntByteSizeDimension = 4; /* 1 byte, 2 byte, 4 bytes, 8 bytes */
31 constexpr int32 kFloatByteSizeDimension = 3; /* 4 bytes, 8 bytes, 16 bytes(vector) */
32 constexpr int32 kShiftAmount12 = 12; /* for instruction that can use shift, shift amount must be 0 or 12 */
33
34 MOperator ldIs[kSignedDimension][kIntByteSizeDimension] = {
35 /* unsigned == 0 */
36 {MOP_wldrb, MOP_wldrh, MOP_wldr, MOP_xldr},
37 /* signed == 1 */
38 {MOP_wldrsb, MOP_wldrsh, MOP_wldr, MOP_xldr}};
39
40 MOperator stIs[kSignedDimension][kIntByteSizeDimension] = {
41 /* unsigned == 0 */
42 {MOP_wstrb, MOP_wstrh, MOP_wstr, MOP_xstr},
43 /* signed == 1 */
44 {MOP_wstrb, MOP_wstrh, MOP_wstr, MOP_xstr}};
45
46 MOperator ldFs[kFloatByteSizeDimension] = {MOP_sldr, MOP_dldr, MOP_qldr};
47 MOperator stFs[kFloatByteSizeDimension] = {MOP_sstr, MOP_dstr, MOP_qstr};
48
49 /* extended to unsigned ints */
50 MOperator uextIs[kIntByteSizeDimension][kIntByteSizeDimension] = {
51 /* u8 u16 u32 u64 */
52 {MOP_undef, MOP_xuxtb32, MOP_xuxtb32, MOP_xuxtb32}, /* u8/i8 */
53 {MOP_undef, MOP_undef, MOP_xuxth32, MOP_xuxth32}, /* u16/i16 */
54 {MOP_undef, MOP_undef, MOP_xuxtw64, MOP_xuxtw64}, /* u32/i32 */
55 {MOP_undef, MOP_undef, MOP_undef, MOP_undef} /* u64/u64 */
56 };
57
58 /* extended to signed ints */
59 MOperator extIs[kIntByteSizeDimension][kIntByteSizeDimension] = {
60 /* i8 i16 i32 i64 */
61 {MOP_undef, MOP_xsxtb32, MOP_xsxtb32, MOP_xsxtb64}, /* u8/i8 */
62 {MOP_undef, MOP_undef, MOP_xsxth32, MOP_xsxth64}, /* u16/i16 */
63 {MOP_undef, MOP_undef, MOP_undef, MOP_xsxtw64}, /* u32/i32 */
64 {MOP_undef, MOP_undef, MOP_undef, MOP_undef} /* u64/u64 */
65 };
66
PickLdStInsn(bool isLoad,uint32 bitSize,PrimType primType)67 MOperator PickLdStInsn(bool isLoad, uint32 bitSize, PrimType primType)
68 {
69 DEBUG_ASSERT(bitSize >= k8BitSize, "PTY_u1 should have been lowered?");
70 DEBUG_ASSERT(__builtin_popcount(bitSize) == 1, "PTY_u1 should have been lowered?");
71
72 /* __builtin_ffs(x) returns: 0 -> 0, 1 -> 1, 2 -> 2, 4 -> 3, 8 -> 4 */
73 if ((IsPrimitiveInteger(primType))) {
74 auto *table = isLoad ? ldIs : stIs;
75 int32 signedUnsigned = IsUnsignedInteger(primType) ? 0 : 1;
76
77 /* __builtin_ffs(x) returns: 8 -> 4, 16 -> 5, 32 -> 6, 64 -> 7 */
78 uint32 size = static_cast<uint32>(__builtin_ffs(static_cast<int32>(bitSize))) - k4BitSize;
79 DEBUG_ASSERT(size <= 3, "wrong bitSize"); // size must <= 3
80 return table[signedUnsigned][size];
81 } else {
82 MOperator *table = isLoad ? ldFs : stFs;
83 /* __builtin_ffs(x) returns: 32 -> 6, 64 -> 7, 128 -> 8 */
84 uint32 size = static_cast<uint32>(__builtin_ffs(static_cast<int32>(bitSize))) - k6BitSize;
85 DEBUG_ASSERT(size <= k2BitSize, "size must be 0 to 2");
86 return table[size];
87 }
88 }
89 } // namespace
90
GetOrCreateResOperand(const BaseNode & parent,PrimType primType)91 RegOperand &AArch64CGFunc::GetOrCreateResOperand(const BaseNode &parent, PrimType primType)
92 {
93 RegOperand *resOpnd = nullptr;
94 if (parent.GetOpCode() == OP_regassign) {
95 auto ®AssignNode = static_cast<const RegassignNode &>(parent);
96 PregIdx pregIdx = regAssignNode.GetRegIdx();
97 if (IsSpecialPseudoRegister(pregIdx)) {
98 /* if it is one of special registers */
99 resOpnd = &GetOrCreateSpecialRegisterOperand(-pregIdx, primType);
100 } else {
101 resOpnd = &GetOrCreateVirtualRegisterOperand(GetVirtualRegNOFromPseudoRegIdx(pregIdx));
102 }
103 } else {
104 resOpnd = &CreateRegisterOperandOfType(primType);
105 }
106 return *resOpnd;
107 }
108
PickLdInsn(uint32 bitSize,PrimType primType) const109 MOperator AArch64CGFunc::PickLdInsn(uint32 bitSize, PrimType primType) const
110 {
111 return PickLdStInsn(true, bitSize, primType);
112 }
113
PickStInsn(uint32 bitSize,PrimType primType) const114 MOperator AArch64CGFunc::PickStInsn(uint32 bitSize, PrimType primType) const
115 {
116 return PickLdStInsn(false, bitSize, primType);
117 }
118
PickExtInsn(PrimType dtype,PrimType stype) const119 MOperator AArch64CGFunc::PickExtInsn(PrimType dtype, PrimType stype) const
120 {
121 int32 sBitSize = static_cast<int32>(GetPrimTypeBitSize(stype));
122 int32 dBitSize = static_cast<int32>(GetPrimTypeBitSize(dtype));
123 /* __builtin_ffs(x) returns: 0 -> 0, 1 -> 1, 2 -> 2, 4 -> 3, 8 -> 4 */
124 if (IsPrimitiveInteger(stype) && IsPrimitiveInteger(dtype)) {
125 MOperator(*table)[kIntByteSizeDimension];
126 table = IsUnsignedInteger(stype) ? uextIs : extIs;
127 /* __builtin_ffs(x) returns: 8 -> 4, 16 -> 5, 32 -> 6, 64 -> 7 */
128 uint32 row = static_cast<uint32>(__builtin_ffs(sBitSize)) - k4BitSize;
129 DEBUG_ASSERT(row <= k3BitSize, "wrong bitSize");
130 uint32 col = static_cast<uint32>(__builtin_ffs(dBitSize)) - k4BitSize;
131 DEBUG_ASSERT(col <= k3BitSize, "wrong bitSize");
132 return table[row][col];
133 }
134 CHECK_FATAL(0, "extend not primitive integer");
135 return MOP_undef;
136 }
137
PickMovBetweenRegs(PrimType destType,PrimType srcType) const138 MOperator AArch64CGFunc::PickMovBetweenRegs(PrimType destType, PrimType srcType) const
139 {
140 if (IsPrimitiveInteger(destType) && IsPrimitiveInteger(srcType)) {
141 return GetPrimTypeSize(srcType) <= k4ByteSize ? MOP_wmovrr : MOP_xmovrr;
142 }
143 if (IsPrimitiveFloat(destType) && IsPrimitiveFloat(srcType)) {
144 return GetPrimTypeSize(srcType) <= k4ByteSize ? MOP_xvmovs : MOP_xvmovd;
145 }
146 if (IsPrimitiveInteger(destType) && IsPrimitiveFloat(srcType)) {
147 return GetPrimTypeSize(srcType) <= k4ByteSize ? MOP_xvmovrs : MOP_xvmovrd;
148 }
149 if (IsPrimitiveFloat(destType) && IsPrimitiveInteger(srcType)) {
150 return GetPrimTypeSize(srcType) <= k4ByteSize ? MOP_xvmovsr : MOP_xvmovdr;
151 }
152 CHECK_FATAL(false, "unexpected operand primtype for mov");
153 return MOP_undef;
154 }
155
SelectCopyImm(Operand & dest,PrimType dType,ImmOperand & src,PrimType sType)156 void AArch64CGFunc::SelectCopyImm(Operand &dest, PrimType dType, ImmOperand &src, PrimType sType)
157 {
158 if (IsPrimitiveInteger(dType) != IsPrimitiveInteger(sType)) {
159 RegOperand &tempReg = CreateRegisterOperandOfType(sType);
160 SelectCopyImm(tempReg, src, sType);
161 SelectCopy(dest, dType, tempReg, sType);
162 } else {
163 SelectCopyImm(dest, src, sType);
164 }
165 }
166
SelectCopyImm(Operand & dest,ImmOperand & src,PrimType dtype)167 void AArch64CGFunc::SelectCopyImm(Operand &dest, ImmOperand &src, PrimType dtype)
168 {
169 uint32 dsize = GetPrimTypeBitSize(dtype);
170 // If the type size of the parent node is smaller than the type size of the child node,
171 // the number of child node needs to be truncated.
172 if (dsize < src.GetSize()) {
173 uint64 value = static_cast<uint64>(src.GetValue());
174 uint64 mask = (1UL << dsize) - 1;
175 int64 newValue = static_cast<int64>(value & mask);
176 src.SetValue(newValue);
177 }
178 DEBUG_ASSERT(IsPrimitiveInteger(dtype), "The type of destination operand must be Integer");
179 DEBUG_ASSERT(((dsize == k8BitSize) || (dsize == k16BitSize) || (dsize == k32BitSize) || (dsize == k64BitSize)),
180 "The destination operand must be >= 8-bit");
181 if (src.GetSize() == k32BitSize && dsize == k64BitSize && src.IsSingleInstructionMovable()) {
182 auto tempReg = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k32BitSize), k32BitSize, kRegTyInt);
183 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wmovri32, *tempReg, src));
184 SelectCopy(dest, dtype, *tempReg, PTY_u32);
185 return;
186 }
187 if (src.IsSingleInstructionMovable()) {
188 MOperator mOp = (dsize <= k32BitSize) ? MOP_wmovri32 : MOP_xmovri64;
189 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, dest, src));
190 return;
191 }
192 uint64 srcVal = static_cast<uint64>(src.GetValue());
193 /* using mov/movk to load the immediate value */
194 if (dsize == k8BitSize) {
195 /* compute lower 8 bits value */
196 if (dtype == PTY_u8) {
197 /* zero extend */
198 srcVal = (srcVal << k56BitSize) >> k56BitSize;
199 dtype = PTY_u16;
200 } else {
201 /* sign extend */
202 srcVal = (static_cast<int64>(srcVal) << k56BitSize) >> k56BitSize;
203 dtype = PTY_i16;
204 }
205 dsize = k16BitSize;
206 }
207 if (dsize == k16BitSize) {
208 if (dtype == PTY_u16) {
209 /* check lower 16 bits and higher 16 bits respectively */
210 DEBUG_ASSERT((srcVal & 0x0000FFFFULL) != 0, "unexpected value");
211 DEBUG_ASSERT(((srcVal >> k16BitSize) & 0x0000FFFFULL) == 0, "unexpected value");
212 DEBUG_ASSERT((srcVal & 0x0000FFFFULL) != 0xFFFFULL, "unexpected value");
213 /* create an imm opereand which represents lower 16 bits of the immediate */
214 ImmOperand &srcLower = CreateImmOperand(static_cast<int64>(srcVal & 0x0000FFFFULL), k16BitSize, false);
215 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wmovri32, dest, srcLower));
216 return;
217 } else {
218 /* sign extend and let `dsize == 32` case take care of it */
219 srcVal = (static_cast<int64>(srcVal) << k48BitSize) >> k48BitSize;
220 dsize = k32BitSize;
221 }
222 }
223 if (dsize == k32BitSize) {
224 /* check lower 16 bits and higher 16 bits respectively */
225 DEBUG_ASSERT((srcVal & 0x0000FFFFULL) != 0, "unexpected val");
226 DEBUG_ASSERT(((srcVal >> k16BitSize) & 0x0000FFFFULL) != 0, "unexpected val");
227 DEBUG_ASSERT((srcVal & 0x0000FFFFULL) != 0xFFFFULL, "unexpected val");
228 DEBUG_ASSERT(((srcVal >> k16BitSize) & 0x0000FFFFULL) != 0xFFFFULL, "unexpected val");
229 /* create an imm opereand which represents lower 16 bits of the immediate */
230 ImmOperand &srcLower = CreateImmOperand(static_cast<int64>(srcVal & 0x0000FFFFULL), k16BitSize, false);
231 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wmovri32, dest, srcLower));
232 /* create an imm opereand which represents upper 16 bits of the immediate */
233 ImmOperand &srcUpper =
234 CreateImmOperand(static_cast<int64>((srcVal >> k16BitSize) & 0x0000FFFFULL), k16BitSize, false);
235 BitShiftOperand *lslOpnd = GetLogicalShiftLeftOperand(k16BitSize, false);
236 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wmovkri16, dest, srcUpper, *lslOpnd));
237 } else {
238 /*
239 * partition it into 4 16-bit chunks
240 * if more 0's than 0xFFFF's, use movz as the initial instruction.
241 * otherwise, movn.
242 */
243 bool useMovz = BetterUseMOVZ(srcVal);
244 bool useMovk = false;
245 /* get lower 32 bits of the immediate */
246 uint64 chunkLval = srcVal & 0xFFFFFFFFULL;
247 /* get upper 32 bits of the immediate */
248 uint64 chunkHval = (srcVal >> k32BitSize) & 0xFFFFFFFFULL;
249 int32 maxLoopTime = 4;
250
251 if (chunkLval == chunkHval) {
252 /* compute lower 32 bits, and then copy to higher 32 bits, so only 2 chunks need be processed */
253 maxLoopTime = 2;
254 }
255
256 uint64 sa = 0;
257
258 for (int64 i = 0; i < maxLoopTime; ++i, sa += k16BitSize) {
259 /* create an imm opereand which represents the i-th 16-bit chunk of the immediate */
260 uint64 chunkVal = (srcVal >> (static_cast<uint64>(sa))) & 0x0000FFFFULL;
261 if (useMovz ? (chunkVal == 0) : (chunkVal == 0x0000FFFFULL)) {
262 continue;
263 }
264 ImmOperand &src16 = CreateImmOperand(static_cast<int64>(chunkVal), k16BitSize, false);
265 BitShiftOperand *lslOpnd = GetLogicalShiftLeftOperand(sa, true);
266 if (!useMovk) {
267 /* use movz or movn */
268 if (!useMovz) {
269 src16.BitwiseNegate();
270 }
271 GetCurBB()->AppendInsn(
272 GetInsnBuilder()->BuildInsn(useMovz ? MOP_xmovzri16 : MOP_xmovnri16, dest, src16, *lslOpnd));
273 useMovk = true;
274 } else {
275 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xmovkri16, dest, src16, *lslOpnd));
276 }
277 }
278
279 if (maxLoopTime == 2) { /* as described above, only 2 chunks need be processed */
280 /* copy lower 32 bits to higher 32 bits */
281 ImmOperand &immOpnd = CreateImmOperand(k32BitSize, k8BitSize, false);
282 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xbfirri6i6, dest, dest, immOpnd, immOpnd));
283 }
284 }
285 }
286
SelectCopyMemOpnd(Operand & dest,PrimType dtype,uint32 dsize,Operand & src,PrimType stype)287 void AArch64CGFunc::SelectCopyMemOpnd(Operand &dest, PrimType dtype, uint32 dsize, Operand &src, PrimType stype)
288 {
289 Insn *insn = nullptr;
290 uint32 ssize = src.GetSize();
291 PrimType regTy = PTY_void;
292 RegOperand *loadReg = nullptr;
293 MOperator mop = MOP_undef;
294 if (IsPrimitiveFloat(stype)) {
295 CHECK_FATAL(dsize == ssize, "dsize %u expect equals ssize %u", dtype, ssize);
296 insn = &GetInsnBuilder()->BuildInsn(PickLdInsn(ssize, stype), dest, src);
297 } else {
298 mop = PickExtInsn(dtype, stype);
299 if (ssize == (GetPrimTypeSize(dtype) * kBitsPerByte) || mop == MOP_undef) {
300 insn = &GetInsnBuilder()->BuildInsn(PickLdInsn(ssize, stype), dest, src);
301 } else {
302 regTy = dsize == k64BitSize ? dtype : PTY_i32;
303 loadReg = &CreateRegisterOperandOfType(regTy);
304 insn = &GetInsnBuilder()->BuildInsn(PickLdInsn(ssize, stype), *loadReg, src);
305 }
306 }
307
308 GetCurBB()->AppendInsn(*insn);
309 if (regTy != PTY_void && mop != MOP_undef) {
310 DEBUG_ASSERT(loadReg != nullptr, "loadReg should not be nullptr");
311 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mop, dest, *loadReg));
312 }
313 }
314
IsImmediateValueInRange(MOperator mOp,int64 immVal,bool is64Bits,bool isIntactIndexed,bool isPostIndexed,bool isPreIndexed) const315 bool AArch64CGFunc::IsImmediateValueInRange(MOperator mOp, int64 immVal, bool is64Bits, bool isIntactIndexed,
316 bool isPostIndexed, bool isPreIndexed) const
317 {
318 bool isInRange = false;
319 switch (mOp) {
320 case MOP_xstr:
321 case MOP_wstr:
322 isInRange =
323 (isIntactIndexed &&
324 ((!is64Bits && (immVal >= kStrAllLdrAllImmLowerBound) && (immVal <= kStrLdrImm32UpperBound)) ||
325 (is64Bits && (immVal >= kStrAllLdrAllImmLowerBound) && (immVal <= kStrLdrImm64UpperBound)))) ||
326 ((isPostIndexed || isPreIndexed) && (immVal >= kStrLdrPerPostLowerBound) &&
327 (immVal <= kStrLdrPerPostUpperBound));
328 break;
329 case MOP_wstrb:
330 isInRange =
331 (isIntactIndexed && (immVal >= kStrAllLdrAllImmLowerBound) && (immVal <= kStrbLdrbImmUpperBound)) ||
332 ((isPostIndexed || isPreIndexed) && (immVal >= kStrLdrPerPostLowerBound) &&
333 (immVal <= kStrLdrPerPostUpperBound));
334 break;
335 case MOP_wstrh:
336 isInRange =
337 (isIntactIndexed && (immVal >= kStrAllLdrAllImmLowerBound) && (immVal <= kStrhLdrhImmUpperBound)) ||
338 ((isPostIndexed || isPreIndexed) && (immVal >= kStrLdrPerPostLowerBound) &&
339 (immVal <= kStrLdrPerPostUpperBound));
340 break;
341 default:
342 break;
343 }
344 return isInRange;
345 }
346
IsStoreMop(MOperator mOp) const347 bool AArch64CGFunc::IsStoreMop(MOperator mOp) const
348 {
349 switch (mOp) {
350 case MOP_sstr:
351 case MOP_dstr:
352 case MOP_qstr:
353 case MOP_xstr:
354 case MOP_wstr:
355 case MOP_wstrb:
356 case MOP_wstrh:
357 return true;
358 default:
359 return false;
360 }
361 }
362
SelectCopyRegOpnd(Operand & dest,PrimType dtype,Operand::OperandType opndType,uint32 dsize,Operand & src,PrimType stype)363 void AArch64CGFunc::SelectCopyRegOpnd(Operand &dest, PrimType dtype, Operand::OperandType opndType, uint32 dsize,
364 Operand &src, PrimType stype)
365 {
366 if (opndType != Operand::kOpdMem) {
367 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickMovBetweenRegs(dtype, stype), dest, src));
368 return;
369 }
370 bool is64Bits = (dest.GetSize() == k64BitSize) ? true : false;
371 MOperator strMop = PickStInsn(dsize, stype);
372 if (!dest.IsMemoryAccessOperand()) {
373 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(strMop, src, dest));
374 return;
375 }
376
377 MemOperand *memOpnd = static_cast<MemOperand *>(&dest);
378 DEBUG_ASSERT(memOpnd != nullptr, "memOpnd should not be nullptr");
379 if (memOpnd->GetAddrMode() == MemOperand::kAddrModeLo12Li) {
380 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(strMop, src, dest));
381 return;
382 }
383 if (memOpnd->GetOffsetOperand() == nullptr) {
384 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(strMop, src, dest));
385 return;
386 }
387 ImmOperand *immOpnd = static_cast<ImmOperand *>(memOpnd->GetOffsetOperand());
388 DEBUG_ASSERT(immOpnd != nullptr, "immOpnd should not be nullptr");
389 int64 immVal = immOpnd->GetValue();
390 bool isIntactIndexed = memOpnd->IsIntactIndexed();
391 bool isPostIndexed = memOpnd->IsPostIndexed();
392 bool isPreIndexed = memOpnd->IsPreIndexed();
393 DEBUG_ASSERT(!isPostIndexed, "memOpnd should not be post-index type");
394 DEBUG_ASSERT(!isPreIndexed, "memOpnd should not be pre-index type");
395 bool isInRange = false;
396 isInRange = IsImmediateValueInRange(strMop, immVal, is64Bits, isIntactIndexed, isPostIndexed, isPreIndexed);
397 bool isMopStr = IsStoreMop(strMop);
398 if (isInRange || !isMopStr) {
399 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(strMop, src, dest));
400 return;
401 }
402 DEBUG_ASSERT(memOpnd->GetBaseRegister() != nullptr, "nullptr check");
403 if (isIntactIndexed) {
404 memOpnd = &SplitOffsetWithAddInstruction(*memOpnd, dsize);
405 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(strMop, src, *memOpnd));
406 } else if (isPostIndexed || isPreIndexed) {
407 RegOperand ® = CreateRegisterOperandOfType(PTY_i64);
408 MOperator mopMov = MOP_xmovri64;
409 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopMov, reg, *immOpnd));
410 MOperator mopAdd = MOP_xaddrrr;
411 MemOperand &newDest =
412 GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, GetPrimTypeBitSize(dtype), memOpnd->GetBaseRegister(), nullptr,
413 &GetOrCreateOfstOpnd(0, k32BitSize), nullptr);
414 Insn &insn1 = GetInsnBuilder()->BuildInsn(strMop, src, newDest);
415 Insn &insn2 = GetInsnBuilder()->BuildInsn(mopAdd, *newDest.GetBaseRegister(), *newDest.GetBaseRegister(), reg);
416 if (isPostIndexed) {
417 GetCurBB()->AppendInsn(insn1);
418 GetCurBB()->AppendInsn(insn2);
419 } else {
420 /* isPreIndexed */
421 GetCurBB()->AppendInsn(insn2);
422 GetCurBB()->AppendInsn(insn1);
423 }
424 }
425 }
426
SelectCopy(Operand & dest,PrimType dtype,Operand & src,PrimType stype,BaseNode * baseNode)427 void AArch64CGFunc::SelectCopy(Operand &dest, PrimType dtype, Operand &src, PrimType stype, BaseNode *baseNode)
428 {
429 DEBUG_ASSERT(dest.IsRegister() || dest.IsMemoryAccessOperand(), "");
430 uint32 dsize = GetPrimTypeBitSize(dtype);
431 if (dest.IsRegister()) {
432 dsize = dest.GetSize();
433 }
434 Operand::OperandType opnd0Type = dest.GetKind();
435 Operand::OperandType opnd1Type = src.GetKind();
436 DEBUG_ASSERT(((dsize >= src.GetSize()) || (opnd0Type == Operand::kOpdRegister) || (opnd0Type == Operand::kOpdMem)),
437 "NYI");
438 DEBUG_ASSERT(((opnd0Type == Operand::kOpdRegister) || (src.GetKind() == Operand::kOpdRegister)),
439 "either src or dest should be register");
440
441 switch (opnd1Type) {
442 case Operand::kOpdMem:
443 SelectCopyMemOpnd(dest, dtype, dsize, src, stype);
444 break;
445 case Operand::kOpdOffset:
446 case Operand::kOpdImmediate:
447 SelectCopyImm(dest, dtype, static_cast<ImmOperand &>(src), stype);
448 break;
449 case Operand::kOpdFPImmediate:
450 CHECK_FATAL(static_cast<ImmOperand &>(src).GetValue() == 0, "NIY");
451 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn((dsize == k32BitSize) ? MOP_xvmovsr : MOP_xvmovdr, dest,
452 GetZeroOpnd(dsize)));
453 break;
454 case Operand::kOpdRegister: {
455 if (dest.IsRegister()) {
456 RegOperand &desReg = static_cast<RegOperand &>(dest);
457 RegOperand &srcReg = static_cast<RegOperand &>(src);
458 if (desReg.GetRegisterNumber() == srcReg.GetRegisterNumber()) {
459 break;
460 }
461 }
462 SelectCopyRegOpnd(dest, dtype, opnd0Type, dsize, src, stype);
463 break;
464 }
465 default:
466 CHECK_FATAL(false, "NYI");
467 }
468 }
469
470 /* This function copies src to a register, the src can be an imm, mem or a label */
SelectCopy(Operand & src,PrimType stype,PrimType dtype)471 RegOperand &AArch64CGFunc::SelectCopy(Operand &src, PrimType stype, PrimType dtype)
472 {
473 RegOperand &dest = CreateRegisterOperandOfType(dtype);
474 SelectCopy(dest, dtype, src, stype);
475 return dest;
476 }
477
478 /*
479 * We need to adjust the offset of a stack allocated local variable
480 * if we store FP/SP before any other local variables to save an instruction.
481 * See AArch64CGFunc::OffsetAdjustmentForFPLR() in aarch64_cgfunc.cpp
482 *
483 * That is when we !UsedStpSubPairForCallFrameAllocation().
484 *
485 * Because we need to use the STP/SUB instruction pair to store FP/SP 'after'
486 * local variables when the call frame size is greater that the max offset
487 * value allowed for the STP instruction (we cannot use STP w/ prefix, LDP w/
488 * postfix), if UsedStpSubPairForCallFrameAllocation(), we don't need to
489 * adjust the offsets.
490 */
IsImmediateOffsetOutOfRange(const MemOperand & memOpnd,uint32 bitLen)491 bool AArch64CGFunc::IsImmediateOffsetOutOfRange(const MemOperand &memOpnd, uint32 bitLen)
492 {
493 DEBUG_ASSERT(bitLen >= k8BitSize, "bitlen error");
494 DEBUG_ASSERT(bitLen <= k128BitSize, "bitlen error");
495
496 if (bitLen >= k8BitSize) {
497 bitLen = static_cast<uint32>(RoundUp(bitLen, k8BitSize));
498 }
499 DEBUG_ASSERT((bitLen & (bitLen - 1)) == 0, "bitlen error");
500
501 MemOperand::AArch64AddressingMode mode = memOpnd.GetAddrMode();
502 if ((mode == MemOperand::kAddrModeBOi) && memOpnd.IsIntactIndexed()) {
503 OfstOperand *ofstOpnd = memOpnd.GetOffsetImmediate();
504 int32 offsetValue = ofstOpnd ? static_cast<int32>(ofstOpnd->GetOffsetValue()) : 0;
505 if (ofstOpnd && ofstOpnd->GetVary() == kUnAdjustVary) {
506 offsetValue +=
507 static_cast<int32>(static_cast<AArch64MemLayout *>(GetMemlayout())->RealStackFrameSize() + 0xff);
508 }
509 offsetValue += kAarch64IntregBytelen << 1; /* Refer to the above comment */
510 return MemOperand::IsPIMMOffsetOutOfRange(offsetValue, bitLen);
511 } else {
512 return false;
513 }
514 }
515
516 // This api is used to judge whether opnd is legal for mop.
517 // It is implemented by calling verify api of mop (InsnDesc -> Verify).
IsOperandImmValid(MOperator mOp,Operand * o,uint32 opndIdx) const518 bool AArch64CGFunc::IsOperandImmValid(MOperator mOp, Operand *o, uint32 opndIdx) const
519 {
520 const InsnDesc *md = &AArch64CG::kMd[mOp];
521 auto *opndProp = md->opndMD[opndIdx];
522 MemPool *localMp = memPoolCtrler.NewMemPool("opnd verify mempool", true);
523 auto *localAlloc = new MapleAllocator(localMp);
524 MapleVector<Operand *> testOpnds(md->opndMD.size(), localAlloc->Adapter());
525 testOpnds[opndIdx] = o;
526 bool flag = true;
527 Operand::OperandType opndTy = opndProp->GetOperandType();
528 if (opndTy == Operand::kOpdMem) {
529 auto *memOpnd = static_cast<MemOperand *>(o);
530 CHECK_FATAL(memOpnd != nullptr, "memOpnd should not be nullptr");
531 if (memOpnd->GetAddrMode() == MemOperand::kAddrModeBOrX &&
532 (!memOpnd->IsPostIndexed() && !memOpnd->IsPreIndexed())) {
533 delete localAlloc;
534 memPoolCtrler.DeleteMemPool(localMp);
535 return true;
536 }
537 OfstOperand *ofStOpnd = memOpnd->GetOffsetImmediate();
538 int64 offsetValue = ofStOpnd ? ofStOpnd->GetOffsetValue() : 0LL;
539 if (md->IsLoadStorePair() || (memOpnd->GetAddrMode() == MemOperand::kAddrModeBOi)) {
540 flag = md->Verify(testOpnds);
541 } else if (memOpnd->GetAddrMode() == MemOperand::kAddrModeLo12Li) {
542 if (offsetValue == 0) {
543 flag = md->Verify(testOpnds);
544 } else {
545 flag = false;
546 }
547 } else if (memOpnd->IsPostIndexed() || memOpnd->IsPreIndexed()) {
548 flag = (offsetValue <= static_cast<int64>(k256BitSizeInt) && offsetValue >= kNegative256BitSize);
549 }
550 } else if (opndTy == Operand::kOpdImmediate) {
551 flag = md->Verify(testOpnds);
552 }
553 delete localAlloc;
554 memPoolCtrler.DeleteMemPool(localMp);
555 return flag;
556 }
557
CreateReplacementMemOperand(uint32 bitLen,RegOperand & baseReg,int64 offset)558 MemOperand &AArch64CGFunc::CreateReplacementMemOperand(uint32 bitLen, RegOperand &baseReg, int64 offset)
559 {
560 return CreateMemOpnd(baseReg, offset, bitLen);
561 }
562
CheckIfSplitOffsetWithAdd(const MemOperand & memOpnd,uint32 bitLen) const563 bool AArch64CGFunc::CheckIfSplitOffsetWithAdd(const MemOperand &memOpnd, uint32 bitLen) const
564 {
565 if (memOpnd.GetAddrMode() != MemOperand::kAddrModeBOi || !memOpnd.IsIntactIndexed()) {
566 return false;
567 }
568 OfstOperand *ofstOpnd = memOpnd.GetOffsetImmediate();
569 int32 opndVal = static_cast<int32>(ofstOpnd->GetOffsetValue());
570 int32 maxPimm = memOpnd.GetMaxPIMM(bitLen);
571 int32 q0 = opndVal / maxPimm;
572 int32 addend = q0 * maxPimm;
573 int32 r0 = opndVal - addend;
574 int32 alignment = static_cast<int32_t>(memOpnd.GetImmediateOffsetAlignment(bitLen));
575 int32 r1 = static_cast<uint32>(r0) & ((1u << static_cast<uint32>(alignment)) - 1);
576 addend = addend + r1;
577 return (addend > 0);
578 }
579
GetBaseRegForSplit(uint32 baseRegNum)580 RegOperand *AArch64CGFunc::GetBaseRegForSplit(uint32 baseRegNum)
581 {
582 RegOperand *resOpnd = nullptr;
583 if (baseRegNum == AArch64reg::kRinvalid) {
584 resOpnd = &CreateRegisterOperandOfType(PTY_i64);
585 } else if (AArch64isa::IsPhysicalRegister(baseRegNum)) {
586 resOpnd = &GetOrCreatePhysicalRegisterOperand(static_cast<AArch64reg>(baseRegNum),
587 GetPointerSize() * kBitsPerByte, kRegTyInt);
588 } else {
589 resOpnd = &GetOrCreateVirtualRegisterOperand(baseRegNum);
590 }
591 return resOpnd;
592 }
593
SplitAndGetRemained(const MemOperand & memOpnd,uint32 bitLen,RegOperand * resOpnd,int64 ofstVal,bool isDest,Insn * insn,bool forPair)594 ImmOperand &AArch64CGFunc::SplitAndGetRemained(const MemOperand &memOpnd, uint32 bitLen, RegOperand *resOpnd,
595 int64 ofstVal, bool isDest, Insn *insn, bool forPair)
596 {
597 auto it = hashMemOpndTable.find(memOpnd);
598 if (it != hashMemOpndTable.end()) {
599 hashMemOpndTable.erase(memOpnd);
600 }
601 /*
602 * opndVal == Q0 * 32760(16380) + R0
603 * R0 == Q1 * 8(4) + R1
604 * ADDEND == Q0 * 32760(16380) + R1
605 * NEW_OFFSET = Q1 * 8(4)
606 * we want to generate two instructions:
607 * ADD TEMP_REG, X29, ADDEND
608 * LDR/STR TEMP_REG, [ TEMP_REG, #NEW_OFFSET ]
609 */
610 int32 maxPimm = 0;
611 if (!forPair) {
612 maxPimm = MemOperand::GetMaxPIMM(bitLen);
613 } else {
614 maxPimm = MemOperand::GetMaxPairPIMM(bitLen);
615 }
616 DEBUG_ASSERT(maxPimm != 0, "get max pimm failed");
617
618 int64 q0 = ofstVal / maxPimm + (ofstVal < 0 ? -1 : 0);
619 int64 addend = q0 * maxPimm;
620 int64 r0 = ofstVal - addend;
621 int64 alignment = MemOperand::GetImmediateOffsetAlignment(bitLen);
622 auto q1 = static_cast<int64>(static_cast<uint64>(r0) >> static_cast<uint64>(alignment));
623 auto r1 = static_cast<int64>(static_cast<uint64>(r0) & ((1u << static_cast<uint64>(alignment)) - 1));
624 auto remained = static_cast<int64>(static_cast<uint64>(q1) << static_cast<uint64>(alignment));
625 addend = addend + r1;
626 if (addend > 0) {
627 int64 suffixClear = 0xfff;
628 if (forPair) {
629 suffixClear = 0xff;
630 }
631 int64 remainedTmp = remained + (addend & suffixClear);
632 if (!MemOperand::IsPIMMOffsetOutOfRange(static_cast<int32>(remainedTmp), bitLen) &&
633 ((static_cast<uint64>(remainedTmp) & ((1u << static_cast<uint64>(alignment)) - 1)) == 0)) {
634 remained = remainedTmp;
635 addend = (addend & ~suffixClear);
636 }
637 }
638 ImmOperand &immAddend = CreateImmOperand(addend, k64BitSize, true);
639 if (memOpnd.GetOffsetImmediate()->GetVary() == kUnAdjustVary) {
640 immAddend.SetVary(kUnAdjustVary);
641 }
642 return immAddend;
643 }
644
SplitOffsetWithAddInstruction(const MemOperand & memOpnd,uint32 bitLen,uint32 baseRegNum,bool isDest,Insn * insn,bool forPair)645 MemOperand &AArch64CGFunc::SplitOffsetWithAddInstruction(const MemOperand &memOpnd, uint32 bitLen, uint32 baseRegNum,
646 bool isDest, Insn *insn, bool forPair)
647 {
648 DEBUG_ASSERT((memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi), "expect kAddrModeBOi memOpnd");
649 DEBUG_ASSERT(memOpnd.IsIntactIndexed(), "expect intactIndexed memOpnd");
650 OfstOperand *ofstOpnd = memOpnd.GetOffsetImmediate();
651 int64 ofstVal = ofstOpnd->GetOffsetValue();
652 RegOperand *resOpnd = GetBaseRegForSplit(baseRegNum);
653 ImmOperand &immAddend = SplitAndGetRemained(memOpnd, bitLen, resOpnd, ofstVal, isDest, insn, forPair);
654 int64 remained = (ofstVal - immAddend.GetValue());
655 RegOperand *origBaseReg = memOpnd.GetBaseRegister();
656 DEBUG_ASSERT(origBaseReg != nullptr, "nullptr check");
657 if (insn == nullptr) {
658 SelectAdd(*resOpnd, *origBaseReg, immAddend, PTY_i64);
659 } else {
660 SelectAddAfterInsn(*resOpnd, *origBaseReg, immAddend, PTY_i64, isDest, *insn);
661 }
662 MemOperand &newMemOpnd = CreateReplacementMemOperand(bitLen, *resOpnd, remained);
663 newMemOpnd.SetStackMem(memOpnd.IsStackMem());
664 return newMemOpnd;
665 }
666
SelectDassign(DassignNode & stmt,Operand & opnd0)667 void AArch64CGFunc::SelectDassign(DassignNode &stmt, Operand &opnd0)
668 {
669 SelectDassign(stmt.GetStIdx(), stmt.GetFieldID(), stmt.GetRHS()->GetPrimType(), opnd0);
670 }
671
672 /*
673 * NOTE: I divided SelectDassign so that we can create "virtual" assignments
674 * when selecting other complex Maple IR instructions. For example, the atomic
675 * exchange and other intrinsics will need to assign its results to local
676 * variables. Such Maple IR instructions are pltform-specific (e.g.
677 * atomic_exchange can be implemented as one single machine intruction on x86_64
678 * and ARMv8.1, but ARMv8.0 needs an LL/SC loop), therefore they cannot (in
679 * principle) be lowered at BELowerer or CGLowerer.
680 */
SelectDassign(StIdx stIdx,FieldID fieldId,PrimType rhsPType,Operand & opnd0)681 void AArch64CGFunc::SelectDassign(StIdx stIdx, FieldID fieldId, PrimType rhsPType, Operand &opnd0)
682 {
683 MIRSymbol *symbol = GetFunction().GetLocalOrGlobalSymbol(stIdx);
684 int32 offset = 0;
685 bool parmCopy = false;
686 uint32 regSize = GetPrimTypeBitSize(rhsPType);
687 MIRType *type = symbol->GetType();
688 Operand &stOpnd = LoadIntoRegister(opnd0, IsPrimitiveInteger(rhsPType),
689 regSize, IsSignedInteger(type->GetPrimType()));
690 MOperator mOp = MOP_undef;
691
692 uint32 dataSize = GetPrimTypeBitSize(type->GetPrimType());
693 MemOperand *memOpnd = nullptr;
694 if (parmCopy) {
695 memOpnd = &LoadStructCopyBase(*symbol, offset, static_cast<int>(dataSize));
696 } else {
697 memOpnd = &GetOrCreateMemOpnd(*symbol, offset, dataSize);
698 }
699 if ((memOpnd->GetMemVaryType() == kNotVary) && IsImmediateOffsetOutOfRange(*memOpnd, dataSize)) {
700 memOpnd = &SplitOffsetWithAddInstruction(*memOpnd, dataSize);
701 }
702
703 /* In bpl mode, a func symbol's type is represented as a MIRFuncType instead of a MIRPtrType (pointing to
704 * MIRFuncType), so we allow `kTypeFunction` to appear here */
705 DEBUG_ASSERT(((type->GetKind() == kTypeScalar) || (type->GetKind() == kTypePointer) ||
706 (type->GetKind() == kTypeFunction) || (type->GetKind() == kTypeArray)), "NYI dassign type");
707 PrimType ptyp = type->GetPrimType();
708
709 mOp = PickStInsn(GetPrimTypeBitSize(ptyp), ptyp);
710 Insn &insn = GetInsnBuilder()->BuildInsn(mOp, stOpnd, *memOpnd);
711 GetCurBB()->AppendInsn(insn);
712 }
713
SelectRegassign(RegassignNode & stmt,Operand & opnd0)714 void AArch64CGFunc::SelectRegassign(RegassignNode &stmt, Operand &opnd0)
715 {
716 RegOperand *regOpnd = nullptr;
717 PregIdx pregIdx = stmt.GetRegIdx();
718 if (IsSpecialPseudoRegister(pregIdx)) {
719 regOpnd = &GetOrCreateSpecialRegisterOperand(-pregIdx, stmt.GetPrimType());
720 } else {
721 regOpnd = GetOrCreateRegOpndFromPregIdx(pregIdx, stmt.GetPrimType());
722 }
723 /* look at rhs */
724 PrimType rhsType = stmt.Opnd(0)->GetPrimType();
725 DEBUG_ASSERT(regOpnd != nullptr, "null ptr check!");
726 Operand *srcOpnd = &opnd0;
727 if (GetPrimTypeSize(stmt.GetPrimType()) > GetPrimTypeSize(rhsType) && IsPrimitiveInteger(rhsType)) {
728 CHECK_FATAL(IsPrimitiveInteger(stmt.GetPrimType()), "NIY");
729 srcOpnd = &CreateRegisterOperandOfType(stmt.GetPrimType());
730 SelectCvtInt2Int(nullptr, srcOpnd, &opnd0, rhsType, stmt.GetPrimType());
731 }
732 SelectCopy(*regOpnd, stmt.GetPrimType(), *srcOpnd, rhsType, stmt.GetRHS());
733
734 if (GetCG()->GenerateVerboseCG()) {
735 if (GetCurBB()->GetLastInsn()) {
736 GetCurBB()->GetLastInsn()->AppendComment(" regassign %" + std::to_string(pregIdx) + "; ");
737 } else if (GetCurBB()->GetPrev()->GetLastInsn()) {
738 GetCurBB()->GetPrev()->GetLastInsn()->AppendComment(" regassign %" + std::to_string(pregIdx) + "; ");
739 }
740 }
741
742 if ((Globals::GetInstance()->GetOptimLevel() == CGOptions::kLevel0) && (pregIdx >= 0)) {
743 MemOperand *dest = GetPseudoRegisterSpillMemoryOperand(pregIdx);
744 PrimType stype = GetTypeFromPseudoRegIdx(pregIdx);
745 MIRPreg *preg = GetFunction().GetPregTab()->PregFromPregIdx(pregIdx);
746 uint32 srcBitLength = GetPrimTypeSize(preg->GetPrimType()) * kBitsPerByte;
747 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickStInsn(srcBitLength, stype), *regOpnd, *dest));
748 } else if (regOpnd->GetRegisterNumber() == R0 || regOpnd->GetRegisterNumber() == R1) {
749 Insn &pseudo = GetInsnBuilder()->BuildInsn(MOP_pseudo_ret_int, *regOpnd);
750 GetCurBB()->AppendInsn(pseudo);
751 } else if (regOpnd->GetRegisterNumber() >= V0 && regOpnd->GetRegisterNumber() <= V3) {
752 Insn &pseudo = GetInsnBuilder()->BuildInsn(MOP_pseudo_ret_float, *regOpnd);
753 GetCurBB()->AppendInsn(pseudo);
754 }
755 const auto &derived2BaseRef = GetFunction().GetDerived2BaseRef();
756 auto itr = derived2BaseRef.find(pregIdx);
757 if (itr != derived2BaseRef.end()) {
758 auto *derivedRegOpnd = GetOrCreateRegOpndFromPregIdx(itr->first, PTY_ref);
759 auto *baseRegOpnd = GetOrCreateRegOpndFromPregIdx(itr->second, PTY_ref);
760 derivedRegOpnd->SetBaseRefOpnd(*baseRegOpnd);
761 }
762 }
763
GetOrCreateLocator(CallConvKind cc)764 CCImpl *AArch64CGFunc::GetOrCreateLocator(CallConvKind cc)
765 {
766 auto it = hashCCTable.find(cc);
767 if (it != hashCCTable.end()) {
768 it->second->Init();
769 return it->second;
770 }
771 CCImpl *res = nullptr;
772 if (cc == kCCall) {
773 res = memPool->New<AArch64CallConvImpl>(GetBecommon());
774 } else if (cc == kWebKitJS) {
775 res = memPool->New<AArch64WebKitJSCC>(GetBecommon());
776 } else {
777 CHECK_FATAL(false, "unsupported yet");
778 }
779 hashCCTable[cc] = res;
780 return res;
781 }
782
GetPointedToType(const MIRPtrType & pointerType)783 static MIRType *GetPointedToType(const MIRPtrType &pointerType)
784 {
785 MIRType *aType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerType.GetPointedTyIdx());
786 if (aType->GetKind() == kTypeArray) {
787 MIRArrayType *arrayType = static_cast<MIRArrayType *>(aType);
788 return GlobalTables::GetTypeTable().GetTypeFromTyIdx(arrayType->GetElemTyIdx());
789 }
790 return aType;
791 }
792
SelectIassign(IassignNode & stmt)793 void AArch64CGFunc::SelectIassign(IassignNode &stmt)
794 {
795 int32 offset = 0;
796 MIRPtrType *pointerType = static_cast<MIRPtrType *>(GlobalTables::GetTypeTable().GetTypeFromTyIdx(stmt.GetTyIdx()));
797 DEBUG_ASSERT(pointerType != nullptr, "expect a pointer type at iassign node");
798 MIRType *pointedType = nullptr;
799 bool isRefField = false;
800 pointedType = GetPointedToType(*pointerType);
801
802 PrimType styp = stmt.GetRHS()->GetPrimType();
803 Operand *valOpnd = HandleExpr(stmt, *stmt.GetRHS());
804 Operand &srcOpnd = LoadIntoRegister(*valOpnd, (IsPrimitiveInteger(styp)),
805 GetPrimTypeBitSize(styp));
806
807 PrimType destType = pointedType->GetPrimType();
808 DEBUG_ASSERT(stmt.Opnd(0) != nullptr, "null ptr check");
809 MemOperand &memOpnd = CreateMemOpnd(destType, stmt, *stmt.Opnd(0), offset);
810 SelectCopy(memOpnd, destType, srcOpnd, destType);
811 if (GetCurBB() && GetCurBB()->GetLastMachineInsn()) {
812 GetCurBB()->GetLastMachineInsn()->MarkAsAccessRefField(isRefField);
813 }
814 }
815
SelectDread(const BaseNode & parent,DreadNode & expr)816 Operand *AArch64CGFunc::SelectDread(const BaseNode &parent, DreadNode &expr)
817 {
818 MIRSymbol *symbol = GetFunction().GetLocalOrGlobalSymbol(expr.GetStIdx());
819
820 PrimType symType = symbol->GetType()->GetPrimType();
821 uint32 offset = 0;
822 bool parmCopy = false;
823
824 uint32 dataSize = GetPrimTypeBitSize(symType);
825 uint32 aggSize = 0;
826 PrimType resultType = expr.GetPrimType();
827 MemOperand *memOpnd = nullptr;
828 if (aggSize > k8ByteSize) {
829 if (parent.op == OP_eval) {
830 if (symbol->GetAttr(ATTR_volatile)) {
831 /* Need to generate loads for the upper parts of the struct. */
832 Operand &dest = GetZeroOpnd(k64BitSize);
833 uint32 numLoads = static_cast<uint32>(RoundUp(aggSize, k64BitSize) / k64BitSize);
834 for (uint32 o = 0; o < numLoads; ++o) {
835 if (parmCopy) {
836 memOpnd = &LoadStructCopyBase(*symbol, offset + o * GetPointerSize(), GetPointerSize());
837 } else {
838 memOpnd = &GetOrCreateMemOpnd(*symbol, offset + o * GetPointerSize(), GetPointerSize());
839 }
840 if (IsImmediateOffsetOutOfRange(*memOpnd, GetPointerSize())) {
841 memOpnd = &SplitOffsetWithAddInstruction(*memOpnd, GetPointerSize());
842 }
843 SelectCopy(dest, PTY_u64, *memOpnd, PTY_u64);
844 }
845 } else {
846 /* No side-effects. No need to generate anything for eval. */
847 }
848 } else {
849 if (expr.GetFieldID() != 0) {
850 CHECK_FATAL(false, "SelectDread: Illegal agg size");
851 }
852 }
853 }
854 if (parmCopy) {
855 memOpnd = &LoadStructCopyBase(*symbol, offset, static_cast<int>(dataSize));
856 } else {
857 memOpnd = &GetOrCreateMemOpnd(*symbol, offset, dataSize);
858 }
859 if ((memOpnd->GetMemVaryType() == kNotVary) && IsImmediateOffsetOutOfRange(*memOpnd, dataSize)) {
860 memOpnd = &SplitOffsetWithAddInstruction(*memOpnd, dataSize);
861 }
862
863 RegOperand &resOpnd = GetOrCreateResOperand(parent, symType);
864 SelectCopy(resOpnd, resultType, *memOpnd, symType);
865 return &resOpnd;
866 }
867
SelectRegread(RegreadNode & expr)868 RegOperand *AArch64CGFunc::SelectRegread(RegreadNode &expr)
869 {
870 PregIdx pregIdx = expr.GetRegIdx();
871 if (IsSpecialPseudoRegister(pregIdx)) {
872 /* if it is one of special registers */
873 return &GetOrCreateSpecialRegisterOperand(-pregIdx, expr.GetPrimType());
874 }
875 RegOperand ® = *GetOrCreateRegOpndFromPregIdx(pregIdx, expr.GetPrimType());
876 if (Globals::GetInstance()->GetOptimLevel() == CGOptions::kLevel0) {
877 MemOperand *src = GetPseudoRegisterSpillMemoryOperand(pregIdx);
878 MIRPreg *preg = GetFunction().GetPregTab()->PregFromPregIdx(pregIdx);
879 PrimType stype = preg->GetPrimType();
880 uint32 srcBitLength = GetPrimTypeSize(stype) * kBitsPerByte;
881 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickLdInsn(srcBitLength, stype), reg, *src));
882 }
883 return ®
884 }
885
SelectIread(const BaseNode & parent,IreadNode & expr,int extraOffset,PrimType finalBitFieldDestType)886 Operand *AArch64CGFunc::SelectIread(const BaseNode &parent, IreadNode &expr, int extraOffset,
887 PrimType finalBitFieldDestType)
888 {
889 int32 offset = 0;
890 MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(expr.GetTyIdx());
891 MIRPtrType *pointerType = static_cast<MIRPtrType *>(type);
892 DEBUG_ASSERT(pointerType != nullptr, "expect a pointer type at iread node");
893 MIRType *pointedType = nullptr;
894 bool isRefField = false;
895
896 pointedType = GetPointedToType(*pointerType);
897
898 RegType regType = GetRegTyFromPrimTy(expr.GetPrimType());
899 uint32 regSize = GetPrimTypeSize(expr.GetPrimType());
900 if (regSize < k4ByteSize) {
901 regSize = k4ByteSize; /* 32-bit */
902 }
903 Operand *result = nullptr;
904 constexpr int regSizeMax = 8;
905 if (parent.GetOpCode() == OP_eval && regSize <= regSizeMax) {
906 /* regSize << 3, that is regSize * 8, change bytes to bits */
907 result = &GetZeroOpnd(regSize << 3);
908 } else {
909 result = &GetOrCreateResOperand(parent, expr.GetPrimType());
910 }
911
912 PrimType destType = pointedType->GetPrimType();
913
914 uint32 bitSize = GetPrimTypeBitSize(destType);
915 if (regType == kRegTyFloat) {
916 destType = expr.GetPrimType();
917 bitSize = GetPrimTypeBitSize(destType);
918 }
919
920 PrimType memType = (finalBitFieldDestType == kPtyInvalid ? destType : finalBitFieldDestType);
921 MemOperand *memOpnd = CreateMemOpndOrNull(memType, expr, *expr.Opnd(0),
922 static_cast<int64>(static_cast<int>(offset) + extraOffset));
923 if (aggParamReg != nullptr) {
924 isAggParamInReg = false;
925 return aggParamReg;
926 }
927 DEBUG_ASSERT(memOpnd != nullptr, "memOpnd should not be nullptr");
928 MOperator mOp = 0;
929 if (finalBitFieldDestType == kPtyInvalid) {
930 mOp = PickLdInsn(bitSize, destType);
931 } else {
932 mOp = PickLdInsn(GetPrimTypeBitSize(finalBitFieldDestType), finalBitFieldDestType);
933 }
934 if ((memOpnd->GetMemVaryType() == kNotVary) && !IsOperandImmValid(mOp, memOpnd, 1)) {
935 memOpnd = &SplitOffsetWithAddInstruction(*memOpnd, bitSize);
936 }
937 Insn &insn = GetInsnBuilder()->BuildInsn(mOp, *result, *memOpnd);
938 if (parent.GetOpCode() == OP_eval && result->IsRegister() &&
939 static_cast<RegOperand *>(result)->GetRegisterNumber() == RZR) {
940 insn.SetComment("null-check");
941 }
942 GetCurBB()->AppendInsn(insn);
943
944 if (parent.op != OP_eval) {
945 const InsnDesc *md = &AArch64CG::kMd[insn.GetMachineOpcode()];
946 auto *prop = md->GetOpndDes(0);
947 if ((prop->GetSize()) < insn.GetOperand(0).GetSize()) {
948 switch (destType) {
949 case PTY_i8:
950 mOp = MOP_xsxtb64;
951 break;
952 case PTY_i16:
953 mOp = MOP_xsxth64;
954 break;
955 case PTY_i32:
956 mOp = MOP_xsxtw64;
957 break;
958 case PTY_u1:
959 case PTY_u8:
960 mOp = MOP_xuxtb32;
961 break;
962 case PTY_u16:
963 mOp = MOP_xuxth32;
964 break;
965 case PTY_u32:
966 mOp = MOP_xuxtw64;
967 break;
968 default:
969 break;
970 }
971 if (destType == PTY_u1) {
972 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wandrri12, insn.GetOperand(0),
973 insn.GetOperand(0), CreateImmOperand(1, kMaxImmVal5Bits, false)));
974 }
975
976 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, insn.GetOperand(0), insn.GetOperand(0)));
977 }
978 }
979 if (GetCurBB() && GetCurBB()->GetLastMachineInsn()) {
980 GetCurBB()->GetLastMachineInsn()->MarkAsAccessRefField(isRefField);
981 }
982 return result;
983 }
984
SelectIntConst(const MIRIntConst & intConst,const BaseNode & parent)985 Operand *AArch64CGFunc::SelectIntConst(const MIRIntConst &intConst, const BaseNode &parent)
986 {
987 auto primType = intConst.GetType().GetPrimType();
988 if (kOpcodeInfo.IsCompare(parent.GetOpCode())) {
989 primType = static_cast<const CompareNode &>(parent).GetOpndType();
990 }
991 return &CreateImmOperand(intConst.GetExtValue(), GetPrimTypeBitSize(primType), false);
992 }
993
HandleFmovImm(PrimType stype,int64 val,MIRConst & mirConst,const BaseNode & parent)994 Operand *AArch64CGFunc::HandleFmovImm(PrimType stype, int64 val, MIRConst &mirConst, const BaseNode &parent)
995 {
996 Operand *result;
997 bool is64Bits = (GetPrimTypeBitSize(stype) == k64BitSize);
998 uint64 canRepreset = is64Bits ? (val & 0xffffffffffff) : (val & 0x7ffff);
999 uint32 val1 = is64Bits ? (val >> 61) & 0x3 : (val >> 29) & 0x3;
1000 uint32 val2 = is64Bits ? (val >> 54) & 0xff : (val >> 25) & 0x1f;
1001 bool isSame = is64Bits ? ((val2 == 0) || (val2 == 0xff)) : ((val2 == 0) || (val2 == 0x1f));
1002 canRepreset = (canRepreset == 0) && ((val1 & 0x1) ^ ((val1 & 0x2) >> 1)) && isSame;
1003 if (canRepreset) {
1004 uint64 temp1 = is64Bits ? (val >> 63) << 7 : (val >> 31) << 7;
1005 uint64 temp2 = is64Bits ? val >> 48 : val >> 19;
1006 int64 imm8 = (temp2 & 0x7f) | temp1;
1007 Operand *newOpnd0 = &CreateImmOperand(imm8, k8BitSize, true, kNotVary, true);
1008 result = &GetOrCreateResOperand(parent, stype);
1009 MOperator mopFmov = (is64Bits ? MOP_xdfmovri : MOP_wsfmovri);
1010 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopFmov, *result, *newOpnd0));
1011 } else {
1012 Operand *newOpnd0 = &CreateImmOperand(val, GetPrimTypeSize(stype) * kBitsPerByte, false);
1013 PrimType itype = (stype == PTY_f32) ? PTY_i32 : PTY_i64;
1014 RegOperand ®Opnd = LoadIntoRegister(*newOpnd0, itype);
1015
1016 result = &GetOrCreateResOperand(parent, stype);
1017 MOperator mopFmov = (is64Bits ? MOP_xvmovdr : MOP_xvmovsr);
1018 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopFmov, *result, regOpnd));
1019 }
1020 return result;
1021 }
1022
SelectFloatConst(MIRFloatConst & floatConst,const BaseNode & parent)1023 Operand *AArch64CGFunc::SelectFloatConst(MIRFloatConst &floatConst, const BaseNode &parent)
1024 {
1025 PrimType stype = floatConst.GetType().GetPrimType();
1026 int32 val = floatConst.GetIntValue();
1027 /* according to aarch64 encoding format, convert int to float expression */
1028 Operand *result;
1029 result = HandleFmovImm(stype, val, floatConst, parent);
1030 return result;
1031 }
1032
SelectDoubleConst(MIRDoubleConst & doubleConst,const BaseNode & parent)1033 Operand *AArch64CGFunc::SelectDoubleConst(MIRDoubleConst &doubleConst, const BaseNode &parent)
1034 {
1035 PrimType stype = doubleConst.GetType().GetPrimType();
1036 int64 val = doubleConst.GetIntValue();
1037 /* according to aarch64 encoding format, convert int to float expression */
1038 Operand *result;
1039 result = HandleFmovImm(stype, val, doubleConst, parent);
1040 return result;
1041 }
1042
1043 /*
1044 * Returns the number of leading 0-bits in x, starting at the most significant bit position.
1045 * If x is 0, the result is -1.
1046 */
GetHead0BitNum(int64 val)1047 static int32 GetHead0BitNum(int64 val)
1048 {
1049 uint32 bitNum = 0;
1050 for (; bitNum < k64BitSize; bitNum++) {
1051 if ((0x8000000000000000ULL >> static_cast<uint32>(bitNum)) & static_cast<uint64>(val)) {
1052 break;
1053 }
1054 }
1055 if (bitNum == k64BitSize) {
1056 return -1;
1057 }
1058 return bitNum;
1059 }
1060
1061 /*
1062 * Returns the number of trailing 0-bits in x, starting at the least significant bit position.
1063 * If x is 0, the result is -1.
1064 */
GetTail0BitNum(int64 val)1065 static int32 GetTail0BitNum(int64 val)
1066 {
1067 uint32 bitNum = 0;
1068 for (; bitNum < k64BitSize; bitNum++) {
1069 if ((static_cast<uint64>(1) << static_cast<uint32>(bitNum)) & static_cast<uint64>(val)) {
1070 break;
1071 }
1072 }
1073 if (bitNum == k64BitSize) {
1074 return -1;
1075 }
1076 return bitNum;
1077 }
1078
1079 /*
1080 * If the input integer is power of 2, return log2(input)
1081 * else return -1
1082 */
GetLog2(uint64 val)1083 static inline int32 GetLog2(uint64 val)
1084 {
1085 if (__builtin_popcountll(val) == 1) {
1086 return __builtin_ffsll(static_cast<int64>(val)) - 1;
1087 }
1088 return -1;
1089 }
1090
PickJmpInsn(Opcode brOp,Opcode cmpOp,bool isFloat,bool isSigned) const1091 MOperator AArch64CGFunc::PickJmpInsn(Opcode brOp, Opcode cmpOp, bool isFloat, bool isSigned) const
1092 {
1093 switch (cmpOp) {
1094 case OP_ne:
1095 return (brOp == OP_brtrue) ? MOP_bne : MOP_beq;
1096 case OP_eq:
1097 return (brOp == OP_brtrue) ? MOP_beq : MOP_bne;
1098 case OP_lt:
1099 return (brOp == OP_brtrue) ? (isSigned ? MOP_blt : MOP_blo)
1100 : (isFloat ? MOP_bpl : (isSigned ? MOP_bge : MOP_bhs));
1101 case OP_le:
1102 return (brOp == OP_brtrue) ? (isSigned ? MOP_ble : MOP_bls)
1103 : (isFloat ? MOP_bhi : (isSigned ? MOP_bgt : MOP_bhi));
1104 case OP_gt:
1105 return (brOp == OP_brtrue) ? (isFloat ? MOP_bgt : (isSigned ? MOP_bgt : MOP_bhi))
1106 : (isSigned ? MOP_ble : MOP_bls);
1107 case OP_ge:
1108 return (brOp == OP_brtrue) ? (isFloat ? MOP_bpl : (isSigned ? MOP_bge : MOP_bhs))
1109 : (isSigned ? MOP_blt : MOP_blo);
1110 default:
1111 CHECK_FATAL(false, "PickJmpInsn error");
1112 }
1113 }
1114
GenerateCompareWithZeroInstruction(Opcode jmpOp,Opcode cmpOp,bool is64Bits,PrimType primType,LabelOperand & targetOpnd,Operand & opnd0)1115 bool AArch64CGFunc::GenerateCompareWithZeroInstruction(Opcode jmpOp, Opcode cmpOp, bool is64Bits, PrimType primType,
1116 LabelOperand &targetOpnd, Operand &opnd0)
1117 {
1118 bool finish = true;
1119 MOperator mOpCode = MOP_undef;
1120 switch (cmpOp) {
1121 case OP_ne: {
1122 if (jmpOp == OP_brtrue) {
1123 mOpCode = is64Bits ? MOP_xcbnz : MOP_wcbnz;
1124 } else {
1125 mOpCode = is64Bits ? MOP_xcbz : MOP_wcbz;
1126 }
1127 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, opnd0, targetOpnd));
1128 break;
1129 }
1130 case OP_eq: {
1131 if (jmpOp == OP_brtrue) {
1132 mOpCode = is64Bits ? MOP_xcbz : MOP_wcbz;
1133 } else {
1134 mOpCode = is64Bits ? MOP_xcbnz : MOP_wcbnz;
1135 }
1136 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, opnd0, targetOpnd));
1137 break;
1138 }
1139 /*
1140 * TBZ/TBNZ instruction have a range of +/-32KB, need to check if the jump target is reachable in a later
1141 * phase. If the branch target is not reachable, then we change tbz/tbnz into combination of ubfx and
1142 * cbz/cbnz, which will clobber one extra register. With LSRA under O2, we can use of the reserved registers
1143 * for that purpose.
1144 */
1145 case OP_lt: {
1146 if (primType == PTY_u64 || primType == PTY_u32) {
1147 return false;
1148 }
1149 ImmOperand &signBit =
1150 CreateImmOperand(is64Bits ? kHighestBitOf64Bits : kHighestBitOf32Bits, k8BitSize, false);
1151 if (jmpOp == OP_brtrue) {
1152 mOpCode = is64Bits ? MOP_xtbnz : MOP_wtbnz;
1153 } else {
1154 mOpCode = is64Bits ? MOP_xtbz : MOP_wtbz;
1155 }
1156 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, opnd0, signBit, targetOpnd));
1157 break;
1158 }
1159 case OP_ge: {
1160 if (primType == PTY_u64 || primType == PTY_u32) {
1161 return false;
1162 }
1163 ImmOperand &signBit =
1164 CreateImmOperand(is64Bits ? kHighestBitOf64Bits : kHighestBitOf32Bits, k8BitSize, false);
1165 if (jmpOp == OP_brtrue) {
1166 mOpCode = is64Bits ? MOP_xtbz : MOP_wtbz;
1167 } else {
1168 mOpCode = is64Bits ? MOP_xtbnz : MOP_wtbnz;
1169 }
1170 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, opnd0, signBit, targetOpnd));
1171 break;
1172 }
1173 default:
1174 finish = false;
1175 break;
1176 }
1177 return finish;
1178 }
1179
SelectCondGoto(LabelOperand & targetOpnd,Opcode jmpOp,Opcode cmpOp,Operand & origOpnd0,Operand & origOpnd1,PrimType primType,bool signedCond)1180 void AArch64CGFunc::SelectCondGoto(LabelOperand &targetOpnd, Opcode jmpOp, Opcode cmpOp, Operand &origOpnd0,
1181 Operand &origOpnd1, PrimType primType, bool signedCond)
1182 {
1183 Operand *opnd0 = &origOpnd0;
1184 Operand *opnd1 = &origOpnd1;
1185 opnd0 = &LoadIntoRegister(origOpnd0, primType);
1186
1187 bool is64Bits = GetPrimTypeBitSize(primType) == k64BitSize;
1188 bool isFloat = IsPrimitiveFloat(primType);
1189 Operand &rflag = GetOrCreateRflag();
1190 if (isFloat) {
1191 opnd1 = &LoadIntoRegister(origOpnd1, primType);
1192 MOperator mOp =
1193 is64Bits ? MOP_dcmperr : ((GetPrimTypeBitSize(primType) == k32BitSize) ? MOP_scmperr : MOP_hcmperr);
1194 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, rflag, *opnd0, *opnd1));
1195 } else {
1196 bool isImm = ((origOpnd1.GetKind() == Operand::kOpdImmediate) || (origOpnd1.GetKind() == Operand::kOpdOffset));
1197 if ((origOpnd1.GetKind() != Operand::kOpdRegister) && !isImm) {
1198 opnd1 = &SelectCopy(origOpnd1, primType, primType);
1199 }
1200 MOperator mOp = is64Bits ? MOP_xcmprr : MOP_wcmprr;
1201
1202 if (isImm) {
1203 if (static_cast<ImmOperand *>(opnd1)->IsZero() &&
1204 (Globals::GetInstance()->GetOptimLevel() > CGOptions::kLevel0)) {
1205 bool finish = GenerateCompareWithZeroInstruction(jmpOp, cmpOp, is64Bits, primType, targetOpnd, *opnd0);
1206 if (finish) {
1207 return;
1208 }
1209 }
1210
1211 /*
1212 * aarch64 assembly takes up to 24-bits immediate, generating
1213 * either cmp or cmp with shift 12 encoding
1214 */
1215 ImmOperand *immOpnd = static_cast<ImmOperand *>(opnd1);
1216 if (immOpnd->IsInBitSize(kMaxImmVal12Bits, 0) || immOpnd->IsInBitSize(kMaxImmVal12Bits, kMaxImmVal12Bits)) {
1217 mOp = is64Bits ? MOP_xcmpri : MOP_wcmpri;
1218 } else {
1219 opnd1 = &SelectCopy(*opnd1, primType, primType);
1220 }
1221 }
1222 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, rflag, *opnd0, *opnd1));
1223 }
1224
1225 bool isSigned = IsPrimitiveInteger(primType) ? IsSignedInteger(primType) : (signedCond ? true : false);
1226 MOperator jmpOperator = PickJmpInsn(jmpOp, cmpOp, isFloat, isSigned);
1227 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(jmpOperator, rflag, targetOpnd));
1228 }
1229
1230 /*
1231 * brtrue @label0 (ge u8 i32 (
1232 * cmp i32 i64 (dread i64 %Reg2_J, dread i64 %Reg4_J),
1233 * constval i32 0))
1234 * ===>
1235 * cmp r1, r2
1236 * bge Cond, label0
1237 */
SelectCondSpecialCase1(CondGotoNode & stmt,BaseNode & expr)1238 void AArch64CGFunc::SelectCondSpecialCase1(CondGotoNode &stmt, BaseNode &expr)
1239 {
1240 DEBUG_ASSERT(expr.GetOpCode() == OP_cmp, "unexpect opcode");
1241 Operand *opnd0 = HandleExpr(expr, *expr.Opnd(0));
1242 Operand *opnd1 = HandleExpr(expr, *expr.Opnd(1));
1243 CompareNode *node = static_cast<CompareNode *>(&expr);
1244 bool isFloat = IsPrimitiveFloat(node->GetOpndType());
1245 opnd0 = &LoadIntoRegister(*opnd0, node->GetOpndType());
1246 /*
1247 * most of FP constants are passed as MemOperand
1248 * except 0.0 which is passed as kOpdFPImmediate
1249 */
1250 Operand::OperandType opnd1Type = opnd1->GetKind();
1251 if ((opnd1Type != Operand::kOpdImmediate) && (opnd1Type != Operand::kOpdFPImmediate) &&
1252 (opnd1Type != Operand::kOpdOffset)) {
1253 opnd1 = &LoadIntoRegister(*opnd1, node->GetOpndType());
1254 }
1255 SelectAArch64Cmp(*opnd0, *opnd1, !isFloat, GetPrimTypeBitSize(node->GetOpndType()));
1256 /* handle condgoto now. */
1257 LabelIdx labelIdx = stmt.GetOffset();
1258 BaseNode *condNode = stmt.Opnd(0);
1259 LabelOperand &targetOpnd = GetOrCreateLabelOperand(labelIdx);
1260 Opcode cmpOp = condNode->GetOpCode();
1261 PrimType pType = static_cast<CompareNode *>(condNode)->GetOpndType();
1262 isFloat = IsPrimitiveFloat(pType);
1263 Operand &rflag = GetOrCreateRflag();
1264 bool isSigned =
1265 IsPrimitiveInteger(pType) ? IsSignedInteger(pType) : (IsSignedInteger(condNode->GetPrimType()) ? true : false);
1266 MOperator jmpOp = PickJmpInsn(stmt.GetOpCode(), cmpOp, isFloat, isSigned);
1267 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(jmpOp, rflag, targetOpnd));
1268 }
1269
1270 /*
1271 * Special case:
1272 * brfalse(ge (cmpg (op0, op1), 0) ==>
1273 * fcmp op1, op2
1274 * blo
1275 */
SelectCondSpecialCase2(const CondGotoNode & stmt,BaseNode & expr)1276 void AArch64CGFunc::SelectCondSpecialCase2(const CondGotoNode &stmt, BaseNode &expr)
1277 {
1278 auto &cmpNode = static_cast<CompareNode &>(expr);
1279 Operand *opnd0 = HandleExpr(cmpNode, *cmpNode.Opnd(0));
1280 Operand *opnd1 = HandleExpr(cmpNode, *cmpNode.Opnd(1));
1281 PrimType operandType = cmpNode.GetOpndType();
1282 opnd0 = opnd0->IsRegister() ? static_cast<RegOperand *>(opnd0) : &SelectCopy(*opnd0, operandType, operandType);
1283 Operand::OperandType opnd1Type = opnd1->GetKind();
1284 if ((opnd1Type != Operand::kOpdImmediate) && (opnd1Type != Operand::kOpdFPImmediate) &&
1285 (opnd1Type != Operand::kOpdOffset)) {
1286 opnd1 = opnd1->IsRegister() ? static_cast<RegOperand *>(opnd1) : &SelectCopy(*opnd1, operandType, operandType);
1287 }
1288 #ifdef DEBUG
1289 bool isFloat = IsPrimitiveFloat(operandType);
1290 if (!isFloat) {
1291 DEBUG_ASSERT(false, "incorrect operand types");
1292 }
1293 #endif
1294 SelectTargetFPCmpQuiet(*opnd0, *opnd1, GetPrimTypeBitSize(operandType));
1295 Operand &rFlag = GetOrCreateRflag();
1296 LabelIdx tempLabelIdx = stmt.GetOffset();
1297 LabelOperand &targetOpnd = GetOrCreateLabelOperand(tempLabelIdx);
1298 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_blo, rFlag, targetOpnd));
1299 }
1300
SelectCondGoto(CondGotoNode & stmt,Operand & opnd0,Operand & opnd1)1301 void AArch64CGFunc::SelectCondGoto(CondGotoNode &stmt, Operand &opnd0, Operand &opnd1)
1302 {
1303 /*
1304 * handle brfalse/brtrue op, opnd0 can be a compare node or non-compare node
1305 * such as a dread for example
1306 */
1307 LabelIdx labelIdx = stmt.GetOffset();
1308 BaseNode *condNode = stmt.Opnd(0);
1309 LabelOperand &targetOpnd = GetOrCreateLabelOperand(labelIdx);
1310 Opcode cmpOp;
1311
1312 PrimType pType;
1313 if (kOpcodeInfo.IsCompare(condNode->GetOpCode())) {
1314 cmpOp = condNode->GetOpCode();
1315 pType = static_cast<CompareNode *>(condNode)->GetOpndType();
1316 } else {
1317 /* not a compare node; dread for example, take its pType */
1318 cmpOp = OP_ne;
1319 pType = condNode->GetPrimType();
1320 }
1321 bool signedCond = IsSignedInteger(pType) || IsPrimitiveFloat(pType);
1322 SelectCondGoto(targetOpnd, stmt.GetOpCode(), cmpOp, opnd0, opnd1, pType, signedCond);
1323 }
1324
SelectGoto(GotoNode & stmt)1325 void AArch64CGFunc::SelectGoto(GotoNode &stmt)
1326 {
1327 Operand &targetOpnd = GetOrCreateLabelOperand(stmt.GetOffset());
1328 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xuncond, targetOpnd));
1329 GetCurBB()->SetKind(BB::kBBGoto);
1330 }
1331
SelectAdd(BinaryNode & node,Operand & opnd0,Operand & opnd1,const BaseNode & parent)1332 Operand *AArch64CGFunc::SelectAdd(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent)
1333 {
1334 PrimType dtype = node.GetPrimType();
1335 bool isSigned = IsSignedInteger(dtype);
1336 uint32 dsize = GetPrimTypeBitSize(dtype);
1337 bool is64Bits = (dsize == k64BitSize);
1338 bool isFloat = IsPrimitiveFloat(dtype);
1339 RegOperand *resOpnd = nullptr;
1340 /* promoted type */
1341 PrimType primType =
1342 isFloat ? dtype : ((is64Bits ? (isSigned ? PTY_i64 : PTY_u64) : (isSigned ? PTY_i32 : PTY_u32)));
1343 if (parent.GetOpCode() == OP_regassign) {
1344 auto ®AssignNode = static_cast<const RegassignNode &>(parent);
1345 PregIdx pregIdx = regAssignNode.GetRegIdx();
1346 if (IsSpecialPseudoRegister(pregIdx)) {
1347 resOpnd = &GetOrCreateSpecialRegisterOperand(-pregIdx, dtype);
1348 } else {
1349 resOpnd = &GetOrCreateVirtualRegisterOperand(GetVirtualRegNOFromPseudoRegIdx(pregIdx));
1350 }
1351 } else {
1352 resOpnd = &CreateRegisterOperandOfType(primType);
1353 }
1354 SelectAdd(*resOpnd, opnd0, opnd1, primType);
1355 return resOpnd;
1356 }
1357
SelectAdd(Operand & resOpnd,Operand & opnd0,Operand & opnd1,PrimType primType)1358 void AArch64CGFunc::SelectAdd(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType)
1359 {
1360 Operand::OperandType opnd0Type = opnd0.GetKind();
1361 Operand::OperandType opnd1Type = opnd1.GetKind();
1362 uint32 dsize = GetPrimTypeBitSize(primType);
1363 bool is64Bits = (dsize == k64BitSize);
1364 if (opnd0Type != Operand::kOpdRegister) {
1365 /* add #imm, #imm */
1366 if (opnd1Type != Operand::kOpdRegister) {
1367 SelectAdd(resOpnd, SelectCopy(opnd0, primType, primType), opnd1, primType);
1368 return;
1369 }
1370 /* add #imm, reg */
1371 SelectAdd(resOpnd, opnd1, opnd0, primType); /* commutative */
1372 return;
1373 }
1374 /* add reg, reg */
1375 if (opnd1Type == Operand::kOpdRegister) {
1376 DEBUG_ASSERT(IsPrimitiveFloat(primType) || IsPrimitiveInteger(primType), "NYI add");
1377 MOperator mOp =
1378 IsPrimitiveFloat(primType) ? (is64Bits ? MOP_dadd : MOP_sadd) : (is64Bits ? MOP_xaddrrr : MOP_waddrrr);
1379 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0, opnd1));
1380 return;
1381 } else if (opnd1Type == Operand::kOpdStImmediate) {
1382 CHECK_FATAL(is64Bits, "baseReg of mem in aarch64 must be 64bit size");
1383 /* add reg, reg, #:lo12:sym+offset */
1384 StImmOperand &stImmOpnd = static_cast<StImmOperand &>(opnd1);
1385 Insn &newInsn = GetInsnBuilder()->BuildInsn(MOP_xadrpl12, resOpnd, opnd0, stImmOpnd);
1386 GetCurBB()->AppendInsn(newInsn);
1387 return;
1388 } else if (!((opnd1Type == Operand::kOpdImmediate) || (opnd1Type == Operand::kOpdOffset))) {
1389 /* add reg, otheregType */
1390 SelectAdd(resOpnd, opnd0, SelectCopy(opnd1, primType, primType), primType);
1391 return;
1392 } else {
1393 /* add reg, #imm */
1394 ImmOperand *immOpnd = static_cast<ImmOperand *>(&opnd1);
1395 if (immOpnd->IsNegative()) {
1396 immOpnd->Negate();
1397 SelectSub(resOpnd, opnd0, *immOpnd, primType);
1398 return;
1399 }
1400 if (immOpnd->IsInBitSize(kMaxImmVal24Bits, 0)) {
1401 /*
1402 * ADD Wd|WSP, Wn|WSP, #imm{, shift} ; 32-bit general registers
1403 * ADD Xd|SP, Xn|SP, #imm{, shift} ; 64-bit general registers
1404 * imm : 0 ~ 4095, shift: none, LSL #0, or LSL #12
1405 * aarch64 assembly takes up to 24-bits, if the lower 12 bits is all 0
1406 */
1407 MOperator mOpCode = MOP_undef;
1408 Operand *newOpnd0 = &opnd0;
1409 if (!(immOpnd->IsInBitSize(kMaxImmVal12Bits, 0) ||
1410 immOpnd->IsInBitSize(kMaxImmVal12Bits, kMaxImmVal12Bits))) {
1411 /* process higher 12 bits */
1412 ImmOperand &immOpnd2 =
1413 CreateImmOperand(static_cast<int64>(static_cast<uint64>(immOpnd->GetValue()) >> kMaxImmVal12Bits),
1414 immOpnd->GetSize(), immOpnd->IsSignedValue());
1415 mOpCode = is64Bits ? MOP_xaddrri24 : MOP_waddrri24;
1416 Operand *tmpRes = IsAfterRegAlloc() ? &resOpnd : &CreateRegisterOperandOfType(primType);
1417 BitShiftOperand &shiftopnd = CreateBitShiftOperand(BitShiftOperand::kLSL, kShiftAmount12, k64BitSize);
1418 Insn &newInsn = GetInsnBuilder()->BuildInsn(mOpCode, *tmpRes, opnd0, immOpnd2, shiftopnd);
1419 GetCurBB()->AppendInsn(newInsn);
1420 immOpnd->ModuloByPow2(static_cast<int32>(kMaxImmVal12Bits));
1421 newOpnd0 = tmpRes;
1422 }
1423 /* process lower 12 bits */
1424 mOpCode = is64Bits ? MOP_xaddrri12 : MOP_waddrri12;
1425 Insn &newInsn = GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, *newOpnd0, *immOpnd);
1426 GetCurBB()->AppendInsn(newInsn);
1427 return;
1428 }
1429 /* load into register */
1430 int64 immVal = immOpnd->GetValue();
1431 int32 tail0bitNum = GetTail0BitNum(immVal);
1432 int32 head0bitNum = GetHead0BitNum(immVal);
1433 const int32 bitNum = (k64BitSizeInt - head0bitNum) - tail0bitNum;
1434 RegOperand ®Opnd = CreateRegisterOperandOfType(primType);
1435 if (isAfterRegAlloc) {
1436 RegType regty = GetRegTyFromPrimTy(primType);
1437 uint32 bytelen = GetPrimTypeSize(primType);
1438 regOpnd = GetOrCreatePhysicalRegisterOperand(static_cast<AArch64reg>(R16), bytelen, regty);
1439 }
1440 regno_t regNO0 = static_cast<RegOperand &>(opnd0).GetRegisterNumber();
1441 /* addrrrs do not support sp */
1442 if (bitNum <= k16ValidBit && regNO0 != RSP) {
1443 int64 newImm = (static_cast<uint64>(immVal) >> static_cast<uint32>(tail0bitNum)) & 0xFFFF;
1444 ImmOperand &immOpnd1 = CreateImmOperand(newImm, k16BitSize, false);
1445 SelectCopyImm(regOpnd, immOpnd1, primType);
1446 uint32 mopBadd = is64Bits ? MOP_xaddrrrs : MOP_waddrrrs;
1447 int32 bitLen = is64Bits ? kBitLenOfShift64Bits : kBitLenOfShift32Bits;
1448 BitShiftOperand &bitShiftOpnd =
1449 CreateBitShiftOperand(BitShiftOperand::kLSL, static_cast<uint32>(tail0bitNum), bitLen);
1450 Insn &newInsn = GetInsnBuilder()->BuildInsn(mopBadd, resOpnd, opnd0, regOpnd, bitShiftOpnd);
1451 GetCurBB()->AppendInsn(newInsn);
1452 return;
1453 }
1454
1455 SelectCopyImm(regOpnd, *immOpnd, primType);
1456 MOperator mOpCode = is64Bits ? MOP_xaddrrr : MOP_waddrrr;
1457 Insn &newInsn = GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, opnd0, regOpnd);
1458 GetCurBB()->AppendInsn(newInsn);
1459 }
1460 }
1461
SelectSub(Operand & resOpnd,Operand & opnd0,Operand & opnd1,PrimType primType)1462 void AArch64CGFunc::SelectSub(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType)
1463 {
1464 Operand::OperandType opnd1Type = opnd1.GetKind();
1465 uint32 dsize = GetPrimTypeBitSize(primType);
1466 bool is64Bits = (dsize == k64BitSize);
1467 bool isFloat = IsPrimitiveFloat(primType);
1468 Operand *opnd0Bak = &LoadIntoRegister(opnd0, primType);
1469 if (opnd1Type == Operand::kOpdRegister) {
1470 MOperator mOp = isFloat ? (is64Bits ? MOP_dsub : MOP_ssub) : (is64Bits ? MOP_xsubrrr : MOP_wsubrrr);
1471 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, *opnd0Bak, opnd1));
1472 return;
1473 }
1474
1475 if ((opnd1Type != Operand::kOpdImmediate) && (opnd1Type != Operand::kOpdOffset)) {
1476 SelectSub(resOpnd, *opnd0Bak, SelectCopy(opnd1, primType, primType), primType);
1477 return;
1478 }
1479
1480 ImmOperand *immOpnd = static_cast<ImmOperand *>(&opnd1);
1481 if (immOpnd->IsNegative()) {
1482 immOpnd->Negate();
1483 SelectAdd(resOpnd, *opnd0Bak, *immOpnd, primType);
1484 return;
1485 }
1486
1487 int64 higher12BitVal = static_cast<int64>(static_cast<uint64>(immOpnd->GetValue()) >> kMaxImmVal12Bits);
1488 if (immOpnd->IsInBitSize(kMaxImmVal24Bits, 0) && higher12BitVal + 1 <= kMaxPimm8) {
1489 /*
1490 * SUB Wd|WSP, Wn|WSP, #imm{, shift} ; 32-bit general registers
1491 * SUB Xd|SP, Xn|SP, #imm{, shift} ; 64-bit general registers
1492 * imm : 0 ~ 4095, shift: none, LSL #0, or LSL #12
1493 * aarch64 assembly takes up to 24-bits, if the lower 12 bits is all 0
1494 * large offset is treated as sub (higher 12 bits + 4096) + add
1495 * it gives opportunities for combining add + ldr due to the characteristics of aarch64's load/store
1496 */
1497 MOperator mOpCode = MOP_undef;
1498 bool isSplitSub = false;
1499 if (!(immOpnd->IsInBitSize(kMaxImmVal12Bits, 0) || immOpnd->IsInBitSize(kMaxImmVal12Bits, kMaxImmVal12Bits))) {
1500 isSplitSub = true;
1501 /* process higher 12 bits */
1502 ImmOperand &immOpnd2 = CreateImmOperand(higher12BitVal + 1, immOpnd->GetSize(), immOpnd->IsSignedValue());
1503
1504 mOpCode = is64Bits ? MOP_xsubrri24 : MOP_wsubrri24;
1505 BitShiftOperand &shiftopnd = CreateBitShiftOperand(BitShiftOperand::kLSL, kShiftAmount12, k64BitSize);
1506 Insn &newInsn = GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, *opnd0Bak, immOpnd2, shiftopnd);
1507 GetCurBB()->AppendInsn(newInsn);
1508 immOpnd->ModuloByPow2(static_cast<int64>(kMaxImmVal12Bits));
1509 immOpnd->SetValue(static_cast<int64>(kMax12UnsignedImm) - immOpnd->GetValue());
1510 opnd0Bak = &resOpnd;
1511 }
1512 /* process lower 12 bits */
1513 mOpCode = isSplitSub ? (is64Bits ? MOP_xaddrri12 : MOP_waddrri12) : (is64Bits ? MOP_xsubrri12 : MOP_wsubrri12);
1514 Insn &newInsn = GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, *opnd0Bak, *immOpnd);
1515 GetCurBB()->AppendInsn(newInsn);
1516 return;
1517 }
1518
1519 /* load into register */
1520 int64 immVal = immOpnd->GetValue();
1521 int32 tail0bitNum = GetTail0BitNum(immVal);
1522 int32 head0bitNum = GetHead0BitNum(immVal);
1523 const int32 bitNum = (k64BitSizeInt - head0bitNum) - tail0bitNum;
1524 RegOperand ®Opnd = CreateRegisterOperandOfType(primType);
1525 if (isAfterRegAlloc) {
1526 RegType regty = GetRegTyFromPrimTy(primType);
1527 uint32 bytelen = GetPrimTypeSize(primType);
1528 regOpnd = GetOrCreatePhysicalRegisterOperand(static_cast<AArch64reg>(R16), bytelen, regty);
1529 }
1530
1531 if (bitNum <= k16ValidBit) {
1532 int64 newImm = (static_cast<uint64>(immVal) >> static_cast<uint32>(tail0bitNum)) & 0xFFFF;
1533 ImmOperand &immOpnd1 = CreateImmOperand(newImm, k16BitSize, false);
1534 SelectCopyImm(regOpnd, immOpnd1, primType);
1535 uint32 mopBsub = is64Bits ? MOP_xsubrrrs : MOP_wsubrrrs;
1536 int32 bitLen = is64Bits ? kBitLenOfShift64Bits : kBitLenOfShift32Bits;
1537 BitShiftOperand &bitShiftOpnd =
1538 CreateBitShiftOperand(BitShiftOperand::kLSL, static_cast<uint32>(tail0bitNum), bitLen);
1539 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopBsub, resOpnd, *opnd0Bak, regOpnd, bitShiftOpnd));
1540 return;
1541 }
1542
1543 SelectCopyImm(regOpnd, *immOpnd, primType);
1544 MOperator mOpCode = is64Bits ? MOP_xsubrrr : MOP_wsubrrr;
1545 Insn &newInsn = GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, *opnd0Bak, regOpnd);
1546 GetCurBB()->AppendInsn(newInsn);
1547 }
1548
SelectSub(BinaryNode & node,Operand & opnd0,Operand & opnd1,const BaseNode & parent)1549 Operand *AArch64CGFunc::SelectSub(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent)
1550 {
1551 PrimType dtype = node.GetPrimType();
1552 bool isSigned = IsSignedInteger(dtype);
1553 uint32 dsize = GetPrimTypeBitSize(dtype);
1554 bool is64Bits = (dsize == k64BitSize);
1555 bool isFloat = IsPrimitiveFloat(dtype);
1556 RegOperand *resOpnd = nullptr;
1557 PrimType primType =
1558 isFloat ? dtype : ((is64Bits ? (isSigned ? PTY_i64 : PTY_u64) : (isSigned ? PTY_i32 : PTY_u32)));
1559 resOpnd = &GetOrCreateResOperand(parent, primType);
1560 SelectSub(*resOpnd, opnd0, opnd1, primType);
1561 return resOpnd;
1562 }
1563
SelectMpy(BinaryNode & node,Operand & opnd0,Operand & opnd1,const BaseNode & parent)1564 Operand *AArch64CGFunc::SelectMpy(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent)
1565 {
1566 PrimType dtype = node.GetPrimType();
1567 bool isSigned = IsSignedInteger(dtype);
1568 uint32 dsize = GetPrimTypeBitSize(dtype);
1569 bool is64Bits = (dsize == k64BitSize);
1570 bool isFloat = IsPrimitiveFloat(dtype);
1571 RegOperand *resOpnd = nullptr;
1572 PrimType primType =
1573 isFloat ? dtype : ((is64Bits ? (isSigned ? PTY_i64 : PTY_u64) : (isSigned ? PTY_i32 : PTY_u32)));
1574 resOpnd = &GetOrCreateResOperand(parent, primType);
1575 SelectMpy(*resOpnd, opnd0, opnd1, primType);
1576 return resOpnd;
1577 }
1578
SelectMpy(Operand & resOpnd,Operand & opnd0,Operand & opnd1,PrimType primType)1579 void AArch64CGFunc::SelectMpy(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType)
1580 {
1581 Operand::OperandType opnd0Type = opnd0.GetKind();
1582 Operand::OperandType opnd1Type = opnd1.GetKind();
1583 uint32 dsize = GetPrimTypeBitSize(primType);
1584 bool is64Bits = (dsize == k64BitSize);
1585
1586 if (((opnd0Type == Operand::kOpdImmediate) || (opnd0Type == Operand::kOpdOffset) ||
1587 (opnd1Type == Operand::kOpdImmediate) || (opnd1Type == Operand::kOpdOffset)) &&
1588 IsPrimitiveInteger(primType)) {
1589 ImmOperand *imm = ((opnd0Type == Operand::kOpdImmediate) || (opnd0Type == Operand::kOpdOffset))
1590 ? static_cast<ImmOperand *>(&opnd0)
1591 : static_cast<ImmOperand *>(&opnd1);
1592 Operand *otherOp =
1593 ((opnd0Type == Operand::kOpdImmediate) || (opnd0Type == Operand::kOpdOffset)) ? &opnd1 : &opnd0;
1594 int64 immValue = llabs(imm->GetValue());
1595 if (immValue != 0 && (static_cast<uint64>(immValue) & (static_cast<uint64>(immValue) - 1)) == 0) {
1596 /* immValue is 1 << n */
1597 if (otherOp->GetKind() != Operand::kOpdRegister) {
1598 otherOp = &SelectCopy(*otherOp, primType, primType);
1599 }
1600 int64 shiftVal = __builtin_ffsll(immValue);
1601 ImmOperand &shiftNum = CreateImmOperand(shiftVal - 1, dsize, false);
1602 SelectShift(resOpnd, *otherOp, shiftNum, kShiftLeft, primType);
1603 bool reachSignBit = (is64Bits && (shiftVal == k64BitSize)) || (!is64Bits && (shiftVal == k32BitSize));
1604 if (imm->GetValue() < 0 && !reachSignBit) {
1605 SelectNeg(resOpnd, resOpnd, primType);
1606 }
1607
1608 return;
1609 } else if (immValue > 2) { // immValue should larger than 2
1610 uint32 zeroNum = static_cast<uint32>(__builtin_ffsll(immValue) - 1);
1611 int64 headVal = static_cast<uint64>(immValue) >> zeroNum;
1612 /*
1613 * if (headVal - 1) & (headVal - 2) == 0, that is (immVal >> zeroNum) - 1 == 1 << n
1614 * otherOp * immVal = (otherOp * (immVal >> zeroNum) * (1 << zeroNum)
1615 * = (otherOp * ((immVal >> zeroNum) - 1) + otherOp) * (1 << zeroNum)
1616 */
1617 CHECK_FATAL(static_cast<uint64>(headVal) >= 2, "value overflow");
1618 // 2 see comment above
1619 if (((static_cast<uint64>(headVal) - 1) & (static_cast<uint64>(headVal) - 2)) == 0) {
1620 if (otherOp->GetKind() != Operand::kOpdRegister) {
1621 otherOp = &SelectCopy(*otherOp, primType, primType);
1622 }
1623 ImmOperand &shiftNum1 = CreateImmOperand(__builtin_ffsll(headVal - 1) - 1, dsize, false);
1624 RegOperand &tmpOpnd = CreateRegisterOperandOfType(primType);
1625 SelectShift(tmpOpnd, *otherOp, shiftNum1, kShiftLeft, primType);
1626 SelectAdd(resOpnd, *otherOp, tmpOpnd, primType);
1627 ImmOperand &shiftNum2 = CreateImmOperand(zeroNum, dsize, false);
1628 SelectShift(resOpnd, resOpnd, shiftNum2, kShiftLeft, primType);
1629 if (imm->GetValue() < 0) {
1630 SelectNeg(resOpnd, resOpnd, primType);
1631 }
1632
1633 return;
1634 }
1635 }
1636 }
1637
1638 if ((opnd0Type != Operand::kOpdRegister) && (opnd1Type != Operand::kOpdRegister)) {
1639 SelectMpy(resOpnd, SelectCopy(opnd0, primType, primType), opnd1, primType);
1640 } else if ((opnd0Type == Operand::kOpdRegister) && (opnd1Type != Operand::kOpdRegister)) {
1641 SelectMpy(resOpnd, opnd0, SelectCopy(opnd1, primType, primType), primType);
1642 } else if ((opnd0Type != Operand::kOpdRegister) && (opnd1Type == Operand::kOpdRegister)) {
1643 SelectMpy(resOpnd, opnd1, opnd0, primType);
1644 } else {
1645 DEBUG_ASSERT(IsPrimitiveFloat(primType) || IsPrimitiveInteger(primType), "NYI Mpy");
1646 MOperator mOp =
1647 IsPrimitiveFloat(primType) ? (is64Bits ? MOP_xvmuld : MOP_xvmuls) : (is64Bits ? MOP_xmulrrr : MOP_wmulrrr);
1648 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0, opnd1));
1649 }
1650 }
1651
SelectDiv(Operand & resOpnd,Operand & origOpnd0,Operand & opnd1,PrimType primType)1652 void AArch64CGFunc::SelectDiv(Operand &resOpnd, Operand &origOpnd0, Operand &opnd1, PrimType primType)
1653 {
1654 Operand &opnd0 = LoadIntoRegister(origOpnd0, primType);
1655 Operand::OperandType opnd0Type = opnd0.GetKind();
1656 Operand::OperandType opnd1Type = opnd1.GetKind();
1657 uint32 dsize = GetPrimTypeBitSize(primType);
1658 bool is64Bits = (dsize == k64BitSize);
1659
1660 if (Globals::GetInstance()->GetOptimLevel() > CGOptions::kLevel0) {
1661 if (((opnd1Type == Operand::kOpdImmediate) || (opnd1Type == Operand::kOpdOffset)) &&
1662 IsSignedInteger(primType)) {
1663 ImmOperand *imm = static_cast<ImmOperand *>(&opnd1);
1664 int64 immValue = llabs(imm->GetValue());
1665 if ((immValue != 0) && (static_cast<uint64>(immValue) & (static_cast<uint64>(immValue) - 1)) == 0) {
1666 if (immValue == 1) {
1667 if (imm->GetValue() > 0) {
1668 uint32 mOp = is64Bits ? MOP_xmovrr : MOP_wmovrr;
1669 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0));
1670 } else {
1671 SelectNeg(resOpnd, opnd0, primType);
1672 }
1673
1674 return;
1675 }
1676 int32 shiftNumber = __builtin_ffsll(immValue) - 1;
1677 ImmOperand &shiftNum = CreateImmOperand(shiftNumber, dsize, false);
1678 Operand &tmpOpnd = CreateRegisterOperandOfType(primType);
1679 SelectShift(tmpOpnd, opnd0, CreateImmOperand(dsize - 1, dsize, false), kShiftAright, primType);
1680 uint32 mopBadd = is64Bits ? MOP_xaddrrrs : MOP_waddrrrs;
1681 int32 bitLen = is64Bits ? kBitLenOfShift64Bits : kBitLenOfShift32Bits;
1682 BitShiftOperand &shiftOpnd = CreateBitShiftOperand(BitShiftOperand::kLSR, dsize - shiftNumber, bitLen);
1683 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopBadd, tmpOpnd, opnd0, tmpOpnd, shiftOpnd));
1684 SelectShift(resOpnd, tmpOpnd, shiftNum, kShiftAright, primType);
1685 if (imm->GetValue() < 0) {
1686 SelectNeg(resOpnd, resOpnd, primType);
1687 }
1688
1689 return;
1690 }
1691 } else if (((opnd1Type == Operand::kOpdImmediate) || (opnd1Type == Operand::kOpdOffset)) &&
1692 IsUnsignedInteger(primType)) {
1693 ImmOperand *imm = static_cast<ImmOperand *>(&opnd1);
1694 if (imm->GetValue() != 0) {
1695 if ((imm->GetValue() > 0) &&
1696 ((static_cast<uint64>(imm->GetValue()) & (static_cast<uint64>(imm->GetValue()) - 1)) == 0)) {
1697 ImmOperand &shiftNum = CreateImmOperand(__builtin_ffsll(imm->GetValue()) - 1, dsize, false);
1698 SelectShift(resOpnd, opnd0, shiftNum, kShiftLright, primType);
1699
1700 return;
1701 } else if (imm->GetValue() < 0) {
1702 SelectAArch64Cmp(opnd0, *imm, true, dsize);
1703 SelectAArch64CSet(resOpnd, GetCondOperand(CC_CS), is64Bits);
1704
1705 return;
1706 }
1707 }
1708 }
1709 }
1710
1711 if (opnd0Type != Operand::kOpdRegister) {
1712 SelectDiv(resOpnd, SelectCopy(opnd0, primType, primType), opnd1, primType);
1713 } else if (opnd1Type != Operand::kOpdRegister) {
1714 SelectDiv(resOpnd, opnd0, SelectCopy(opnd1, primType, primType), primType);
1715 } else {
1716 DEBUG_ASSERT(IsPrimitiveFloat(primType) || IsPrimitiveInteger(primType), "NYI Div");
1717 MOperator mOp = IsPrimitiveFloat(primType)
1718 ? (is64Bits ? MOP_ddivrrr : MOP_sdivrrr)
1719 : (IsSignedInteger(primType) ? (is64Bits ? MOP_xsdivrrr : MOP_wsdivrrr)
1720 : (is64Bits ? MOP_xudivrrr : MOP_wudivrrr));
1721 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0, opnd1));
1722 }
1723 }
1724
SelectDiv(BinaryNode & node,Operand & opnd0,Operand & opnd1,const BaseNode & parent)1725 Operand *AArch64CGFunc::SelectDiv(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent)
1726 {
1727 PrimType dtype = node.GetPrimType();
1728 bool isSigned = IsSignedInteger(dtype);
1729 uint32 dsize = GetPrimTypeBitSize(dtype);
1730 bool is64Bits = (dsize == k64BitSize);
1731 bool isFloat = IsPrimitiveFloat(dtype);
1732 /* promoted type */
1733 PrimType primType =
1734 isFloat ? dtype : ((is64Bits ? (isSigned ? PTY_i64 : PTY_u64) : (isSigned ? PTY_i32 : PTY_u32)));
1735 RegOperand &resOpnd = GetOrCreateResOperand(parent, primType);
1736 SelectDiv(resOpnd, opnd0, opnd1, primType);
1737 return &resOpnd;
1738 }
1739
SelectRem(Operand & resOpnd,Operand & lhsOpnd,Operand & rhsOpnd,PrimType primType,bool isSigned,bool is64Bits)1740 void AArch64CGFunc::SelectRem(Operand &resOpnd, Operand &lhsOpnd, Operand &rhsOpnd, PrimType primType, bool isSigned,
1741 bool is64Bits)
1742 {
1743 Operand &opnd0 = LoadIntoRegister(lhsOpnd, primType);
1744 Operand &opnd1 = LoadIntoRegister(rhsOpnd, primType);
1745
1746 DEBUG_ASSERT(IsPrimitiveInteger(primType), "Wrong type for REM");
1747 /*
1748 * printf("%d \n", 29 % 7 );
1749 * -> 1
1750 * printf("%u %d \n", (unsigned)-7, (unsigned)(-7) % 7 );
1751 * -> 4294967289 4
1752 * printf("%d \n", (-7) % 7 );
1753 * -> 0
1754 * printf("%d \n", 237 % -7 );
1755 * 6->
1756 * printf("implicit i->u conversion %d \n", ((unsigned)237) % -7 );
1757 * implicit conversion 237
1758
1759 * http://stackoverflow.com/questions/35351470/obtaining-remainder-using-single-aarch64-instruction
1760 * input: x0=dividend, x1=divisor
1761 * udiv|sdiv x2, x0, x1
1762 * msub x3, x2, x1, x0 -- multply-sub : x3 <- x0 - x2*x1
1763 * result: x2=quotient, x3=remainder
1764 *
1765 * allocate temporary register
1766 */
1767 RegOperand &temp = CreateRegisterOperandOfType(primType);
1768 /*
1769 * mov w1, #2
1770 * sdiv wTemp, w0, w1
1771 * msub wRespond, wTemp, w1, w0
1772 * ========>
1773 * asr wTemp, w0, #31
1774 * lsr wTemp, wTemp, #31 (#30 for 4, #29 for 8, ...)
1775 * add wRespond, w0, wTemp
1776 * and wRespond, wRespond, #1 (#3 for 4, #7 for 8, ...)
1777 * sub wRespond, wRespond, w2
1778 *
1779 * if divde by 2
1780 * ========>
1781 * lsr wTemp, w0, #31
1782 * add wRespond, w0, wTemp
1783 * and wRespond, wRespond, #1
1784 * sub wRespond, wRespond, w2
1785 *
1786 * for unsigned rem op, just use and
1787 */
1788 if ((Globals::GetInstance()->GetOptimLevel() >= CGOptions::kLevel2)) {
1789 ImmOperand *imm = nullptr;
1790 Insn *movImmInsn = GetCurBB()->GetLastMachineInsn();
1791 if (movImmInsn &&
1792 ((movImmInsn->GetMachineOpcode() == MOP_wmovri32) || (movImmInsn->GetMachineOpcode() == MOP_xmovri64)) &&
1793 movImmInsn->GetOperand(0).Equals(opnd1)) {
1794 /*
1795 * mov w1, #2
1796 * rem res, w0, w1
1797 */
1798 imm = static_cast<ImmOperand *>(&movImmInsn->GetOperand(kInsnSecondOpnd));
1799 } else if (opnd1.IsImmediate()) {
1800 /*
1801 * rem res, w0, #2
1802 */
1803 imm = static_cast<ImmOperand *>(&opnd1);
1804 }
1805 /* positive or negative do not have effect on the result */
1806 int64 dividor = 0;
1807 if (imm && (imm->GetValue() != LONG_MIN)) {
1808 dividor = abs(imm->GetValue());
1809 }
1810 const int64 Log2OfDividor = GetLog2(static_cast<uint64>(dividor));
1811 if ((dividor != 0) && (Log2OfDividor > 0)) {
1812 if (is64Bits) {
1813 CHECK_FATAL(Log2OfDividor < k64BitSize, "imm out of bound");
1814 if (isSigned) {
1815 ImmOperand &rightShiftValue = CreateImmOperand(k64BitSize - Log2OfDividor, k64BitSize, isSigned);
1816 if (Log2OfDividor != 1) {
1817 /* 63->shift ALL , 32 ->32bit register */
1818 ImmOperand &rightShiftAll = CreateImmOperand(63, k64BitSize, isSigned);
1819 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xasrrri6, temp, opnd0, rightShiftAll));
1820
1821 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xlsrrri6, temp, temp, rightShiftValue));
1822 } else {
1823 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xlsrrri6, temp, opnd0, rightShiftValue));
1824 }
1825 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xaddrrr, resOpnd, opnd0, temp));
1826 ImmOperand &remBits = CreateImmOperand(dividor - 1, k64BitSize, isSigned);
1827 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xandrri13, resOpnd, resOpnd, remBits));
1828 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xsubrrr, resOpnd, resOpnd, temp));
1829 return;
1830 } else if (imm && imm->GetValue() > 0) {
1831 ImmOperand &remBits = CreateImmOperand(dividor - 1, k64BitSize, isSigned);
1832 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xandrri13, resOpnd, opnd0, remBits));
1833 return;
1834 }
1835 } else {
1836 CHECK_FATAL(Log2OfDividor < k32BitSize, "imm out of bound");
1837 if (isSigned) {
1838 ImmOperand &rightShiftValue = CreateImmOperand(k32BitSize - Log2OfDividor, k32BitSize, isSigned);
1839 if (Log2OfDividor != 1) {
1840 /* 31->shift ALL , 32 ->32bit register */
1841 ImmOperand &rightShiftAll = CreateImmOperand(31, k32BitSize, isSigned);
1842 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wasrrri5, temp, opnd0, rightShiftAll));
1843
1844 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wlsrrri5, temp, temp, rightShiftValue));
1845 } else {
1846 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wlsrrri5, temp, opnd0, rightShiftValue));
1847 }
1848
1849 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_waddrrr, resOpnd, opnd0, temp));
1850 ImmOperand &remBits = CreateImmOperand(dividor - 1, k32BitSize, isSigned);
1851 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wandrri12, resOpnd, resOpnd, remBits));
1852
1853 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wsubrrr, resOpnd, resOpnd, temp));
1854 return;
1855 } else if (imm && imm->GetValue() > 0) {
1856 ImmOperand &remBits = CreateImmOperand(dividor - 1, k32BitSize, isSigned);
1857 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wandrri12, resOpnd, opnd0, remBits));
1858 return;
1859 }
1860 }
1861 }
1862 }
1863
1864 uint32 mopDiv = is64Bits ? (isSigned ? MOP_xsdivrrr : MOP_xudivrrr) : (isSigned ? MOP_wsdivrrr : MOP_wudivrrr);
1865 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopDiv, temp, opnd0, opnd1));
1866
1867 uint32 mopSub = is64Bits ? MOP_xmsubrrrr : MOP_wmsubrrrr;
1868 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopSub, resOpnd, temp, opnd1, opnd0));
1869 }
1870
SelectRem(BinaryNode & node,Operand & opnd0,Operand & opnd1,const BaseNode & parent)1871 Operand *AArch64CGFunc::SelectRem(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent)
1872 {
1873 PrimType dtype = node.GetPrimType();
1874 DEBUG_ASSERT(IsPrimitiveInteger(dtype), "wrong type for rem");
1875 bool isSigned = IsSignedInteger(dtype);
1876 uint32 dsize = GetPrimTypeBitSize(dtype);
1877 bool is64Bits = (dsize == k64BitSize);
1878
1879 /* promoted type */
1880 PrimType primType = ((is64Bits ? (isSigned ? PTY_i64 : PTY_u64) : (isSigned ? PTY_i32 : PTY_u32)));
1881 RegOperand &resOpnd = GetOrCreateResOperand(parent, primType);
1882 SelectRem(resOpnd, opnd0, opnd1, primType, isSigned, is64Bits);
1883 return &resOpnd;
1884 }
1885
SelectCmpOp(Operand & resOpnd,Operand & lhsOpnd,Operand & rhsOpnd,Opcode opcode,PrimType primType,const BaseNode & parent)1886 void AArch64CGFunc::SelectCmpOp(Operand &resOpnd, Operand &lhsOpnd, Operand &rhsOpnd, Opcode opcode, PrimType primType,
1887 const BaseNode &parent)
1888 {
1889 uint32 dsize = resOpnd.GetSize();
1890 bool isFloat = IsPrimitiveFloat(primType);
1891 Operand &opnd0 = LoadIntoRegister(lhsOpnd, primType);
1892
1893 /*
1894 * most of FP constants are passed as MemOperand
1895 * except 0.0 which is passed as kOpdFPImmediate
1896 */
1897 Operand::OperandType opnd1Type = rhsOpnd.GetKind();
1898 Operand *opnd1 = &rhsOpnd;
1899 if ((opnd1Type != Operand::kOpdImmediate) && (opnd1Type != Operand::kOpdFPImmediate) &&
1900 (opnd1Type != Operand::kOpdOffset)) {
1901 opnd1 = &LoadIntoRegister(rhsOpnd, primType);
1902 }
1903
1904 bool unsignedIntegerComparison = !isFloat && !IsSignedInteger(primType);
1905 /*
1906 * OP_cmp, OP_cmpl, OP_cmpg
1907 * <cmp> OP0, OP1 ; fcmp for OP_cmpl/OP_cmpg, cmp/fcmpe for OP_cmp
1908 * CSINV RES, WZR, WZR, GE
1909 * CSINC RES, RES, WZR, LE
1910 * if OP_cmpl, CSINV RES, RES, WZR, VC (no overflow)
1911 * if OP_cmpg, CSINC RES, RES, WZR, VC (no overflow)
1912 */
1913 RegOperand &xzr = GetZeroOpnd(dsize);
1914 if (opcode == OP_cmp) {
1915 SelectAArch64Cmp(opnd0, *opnd1, !isFloat, GetPrimTypeBitSize(primType));
1916 if (unsignedIntegerComparison) {
1917 SelectAArch64CSINV(resOpnd, xzr, xzr, GetCondOperand(CC_HS), (dsize == k64BitSize));
1918 SelectAArch64CSINC(resOpnd, resOpnd, xzr, GetCondOperand(CC_LS), (dsize == k64BitSize));
1919 } else {
1920 SelectAArch64CSINV(resOpnd, xzr, xzr, GetCondOperand(CC_GE), (dsize == k64BitSize));
1921 SelectAArch64CSINC(resOpnd, resOpnd, xzr, GetCondOperand(CC_LE), (dsize == k64BitSize));
1922 }
1923 return;
1924 }
1925
1926 // lt u8 i32 ( xxx, 0 ) => get sign bit
1927 if ((opcode == OP_lt) && opnd0.IsRegister() && opnd1->IsImmediate() &&
1928 (static_cast<ImmOperand *>(opnd1)->GetValue() == 0) && !isFloat) {
1929 bool is64Bits = (opnd0.GetSize() == k64BitSize);
1930 if (!unsignedIntegerComparison) {
1931 int32 bitLen = is64Bits ? kBitLenOfShift64Bits : kBitLenOfShift32Bits;
1932 ImmOperand &shiftNum = CreateImmOperand(is64Bits ? kHighestBitOf64Bits : kHighestBitOf32Bits,
1933 static_cast<uint32>(bitLen), false);
1934 MOperator mOpCode = is64Bits ? MOP_xlsrrri6 : MOP_wlsrrri5;
1935 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, opnd0, shiftNum));
1936 return;
1937 }
1938 ImmOperand &constNum = CreateImmOperand(0, is64Bits ? k64BitSize : k32BitSize, false);
1939 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(is64Bits ? MOP_xmovri64 : MOP_wmovri32, resOpnd, constNum));
1940 return;
1941 }
1942 SelectAArch64Cmp(opnd0, *opnd1, !isFloat, GetPrimTypeBitSize(primType));
1943
1944 ConditionCode cc = CC_EQ;
1945 // need to handle unordered situation here.
1946 switch (opcode) {
1947 case OP_eq:
1948 cc = CC_EQ;
1949 break;
1950 case OP_ne:
1951 cc = isFloat ? CC_MI : CC_NE;
1952 break;
1953 case OP_le:
1954 cc = isFloat ? CC_LS : unsignedIntegerComparison ? CC_LS : CC_LE;
1955 break;
1956 case OP_ge:
1957 cc = unsignedIntegerComparison ? CC_HS : CC_GE;
1958 break;
1959 case OP_gt:
1960 cc = unsignedIntegerComparison ? CC_HI : CC_GT;
1961 break;
1962 case OP_lt:
1963 cc = isFloat ? CC_MI : unsignedIntegerComparison ? CC_LO : CC_LT;
1964 break;
1965 default:
1966 CHECK_FATAL(false, "illegal logical operator");
1967 }
1968 SelectAArch64CSet(resOpnd, GetCondOperand(cc), (dsize == k64BitSize));
1969 if (isFloat && opcode == OP_ne) {
1970 SelectAArch64CSINC(resOpnd, resOpnd, xzr, GetCondOperand(CC_LE), (dsize == k64BitSize));
1971 }
1972 }
1973
SelectCmpOp(CompareNode & node,Operand & opnd0,Operand & opnd1,const BaseNode & parent)1974 Operand *AArch64CGFunc::SelectCmpOp(CompareNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent)
1975 {
1976 RegOperand *resOpnd = &GetOrCreateResOperand(parent, node.GetPrimType());
1977 SelectCmpOp(*resOpnd, opnd0, opnd1, node.GetOpCode(), node.GetOpndType(), parent);
1978 return resOpnd;
1979 }
1980
SelectTargetFPCmpQuiet(Operand & o0,Operand & o1,uint32 dsize)1981 void AArch64CGFunc::SelectTargetFPCmpQuiet(Operand &o0, Operand &o1, uint32 dsize)
1982 {
1983 MOperator mOpCode = 0;
1984 if (o1.GetKind() == Operand::kOpdFPImmediate) {
1985 CHECK_FATAL(static_cast<ImmOperand &>(o0).GetValue() == 0, "NIY");
1986 mOpCode = (dsize == k64BitSize) ? MOP_dcmpqri : (dsize == k32BitSize) ? MOP_scmpqri : MOP_hcmpqri;
1987 } else if (o1.GetKind() == Operand::kOpdRegister) {
1988 mOpCode = (dsize == k64BitSize) ? MOP_dcmpqrr : (dsize == k32BitSize) ? MOP_scmpqrr : MOP_hcmpqrr;
1989 } else {
1990 CHECK_FATAL(false, "unsupported operand type");
1991 }
1992 Operand &rflag = GetOrCreateRflag();
1993 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, rflag, o0, o1));
1994 }
1995
SelectAArch64Cmp(Operand & o0,Operand & o1,bool isIntType,uint32 dsize)1996 void AArch64CGFunc::SelectAArch64Cmp(Operand &o0, Operand &o1, bool isIntType, uint32 dsize)
1997 {
1998 MOperator mOpCode = 0;
1999 Operand *newO1 = &o1;
2000 if (isIntType) {
2001 if ((o1.GetKind() == Operand::kOpdImmediate) || (o1.GetKind() == Operand::kOpdOffset)) {
2002 ImmOperand *immOpnd = static_cast<ImmOperand *>(&o1);
2003 /*
2004 * imm : 0 ~ 4095, shift: none, LSL #0, or LSL #12
2005 * aarch64 assembly takes up to 24-bits, if the lower 12 bits is all 0
2006 */
2007 if (immOpnd->IsInBitSize(kMaxImmVal12Bits, 0) || immOpnd->IsInBitSize(kMaxImmVal12Bits, kMaxImmVal12Bits)) {
2008 mOpCode = (dsize == k64BitSize) ? MOP_xcmpri : MOP_wcmpri;
2009 } else {
2010 /* load into register */
2011 PrimType ptype = (dsize == k64BitSize) ? PTY_i64 : PTY_i32;
2012 newO1 = &SelectCopy(o1, ptype, ptype);
2013 mOpCode = (dsize == k64BitSize) ? MOP_xcmprr : MOP_wcmprr;
2014 }
2015 } else if (o1.GetKind() == Operand::kOpdRegister) {
2016 mOpCode = (dsize == k64BitSize) ? MOP_xcmprr : MOP_wcmprr;
2017 } else {
2018 CHECK_FATAL(false, "unsupported operand type");
2019 }
2020 } else { /* float */
2021 if (o1.GetKind() == Operand::kOpdFPImmediate) {
2022 CHECK_FATAL(static_cast<ImmOperand &>(o1).GetValue() == 0, "NIY");
2023 mOpCode = (dsize == k64BitSize) ? MOP_dcmperi : ((dsize == k32BitSize) ? MOP_scmperi : MOP_hcmperi);
2024 } else if (o1.GetKind() == Operand::kOpdRegister) {
2025 mOpCode = (dsize == k64BitSize) ? MOP_dcmperr : ((dsize == k32BitSize) ? MOP_scmperr : MOP_hcmperr);
2026 } else {
2027 CHECK_FATAL(false, "unsupported operand type");
2028 }
2029 }
2030 DEBUG_ASSERT(mOpCode != 0, "mOpCode undefined");
2031 Operand &rflag = GetOrCreateRflag();
2032 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, rflag, o0, *newO1));
2033 }
2034
SelectAArch64CSet(Operand & r,CondOperand & cond,bool is64Bits)2035 void AArch64CGFunc::SelectAArch64CSet(Operand &r, CondOperand &cond, bool is64Bits)
2036 {
2037 MOperator mOpCode = is64Bits ? MOP_xcsetrc : MOP_wcsetrc;
2038 Operand &rflag = GetOrCreateRflag();
2039 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, r, cond, rflag));
2040 }
2041
SelectAArch64CSINV(Operand & res,Operand & o0,Operand & o1,CondOperand & cond,bool is64Bits)2042 void AArch64CGFunc::SelectAArch64CSINV(Operand &res, Operand &o0, Operand &o1, CondOperand &cond, bool is64Bits)
2043 {
2044 MOperator mOpCode = is64Bits ? MOP_xcsinvrrrc : MOP_wcsinvrrrc;
2045 Operand &rflag = GetOrCreateRflag();
2046 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, res, o0, o1, cond, rflag));
2047 }
2048
SelectAArch64CSINC(Operand & res,Operand & o0,Operand & o1,CondOperand & cond,bool is64Bits)2049 void AArch64CGFunc::SelectAArch64CSINC(Operand &res, Operand &o0, Operand &o1, CondOperand &cond, bool is64Bits)
2050 {
2051 MOperator mOpCode = is64Bits ? MOP_xcsincrrrc : MOP_wcsincrrrc;
2052 Operand &rflag = GetOrCreateRflag();
2053 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, res, o0, o1, cond, rflag));
2054 }
2055
SelectBand(BinaryNode & node,Operand & opnd0,Operand & opnd1,const BaseNode & parent)2056 Operand *AArch64CGFunc::SelectBand(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent)
2057 {
2058 return SelectRelationOperator(kAND, node, opnd0, opnd1, parent);
2059 }
2060
SelectBand(Operand & resOpnd,Operand & opnd0,Operand & opnd1,PrimType primType)2061 void AArch64CGFunc::SelectBand(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType)
2062 {
2063 SelectRelationOperator(kAND, resOpnd, opnd0, opnd1, primType);
2064 }
2065
SelectRelationOperator(RelationOperator operatorCode,const BinaryNode & node,Operand & opnd0,Operand & opnd1,const BaseNode & parent)2066 Operand *AArch64CGFunc::SelectRelationOperator(RelationOperator operatorCode, const BinaryNode &node, Operand &opnd0,
2067 Operand &opnd1, const BaseNode &parent)
2068 {
2069 PrimType dtype = node.GetPrimType();
2070 bool isSigned = IsSignedInteger(dtype);
2071 uint32 dsize = GetPrimTypeBitSize(dtype);
2072 bool is64Bits = (dsize == k64BitSize);
2073 PrimType primType =
2074 is64Bits ? (isSigned ? PTY_i64 : PTY_u64) : (isSigned ? PTY_i32 : PTY_u32); /* promoted type */
2075 RegOperand *resOpnd = &GetOrCreateResOperand(parent, primType);
2076 SelectRelationOperator(operatorCode, *resOpnd, opnd0, opnd1, primType);
2077 return resOpnd;
2078 }
2079
SelectRelationMop(RelationOperator operatorCode,RelationOperatorOpndPattern opndPattern,bool is64Bits,bool isBitmaskImmediate,bool isBitNumLessThan16) const2080 MOperator AArch64CGFunc::SelectRelationMop(RelationOperator operatorCode, RelationOperatorOpndPattern opndPattern,
2081 bool is64Bits, bool isBitmaskImmediate, bool isBitNumLessThan16) const
2082 {
2083 MOperator mOp = MOP_undef;
2084 if (opndPattern == kRegReg) {
2085 switch (operatorCode) {
2086 case kAND:
2087 mOp = is64Bits ? MOP_xandrrr : MOP_wandrrr;
2088 break;
2089 case kIOR:
2090 mOp = is64Bits ? MOP_xiorrrr : MOP_wiorrrr;
2091 break;
2092 case kEOR:
2093 mOp = is64Bits ? MOP_xeorrrr : MOP_weorrrr;
2094 break;
2095 default:
2096 break;
2097 }
2098 return mOp;
2099 }
2100 /* opndPattern == KRegImm */
2101 if (isBitmaskImmediate) {
2102 switch (operatorCode) {
2103 case kAND:
2104 mOp = is64Bits ? MOP_xandrri13 : MOP_wandrri12;
2105 break;
2106 case kIOR:
2107 mOp = is64Bits ? MOP_xiorrri13 : MOP_wiorrri12;
2108 break;
2109 case kEOR:
2110 mOp = is64Bits ? MOP_xeorrri13 : MOP_weorrri12;
2111 break;
2112 default:
2113 break;
2114 }
2115 return mOp;
2116 }
2117 /* normal imm value */
2118 if (isBitNumLessThan16) {
2119 switch (operatorCode) {
2120 case kAND:
2121 mOp = is64Bits ? MOP_xandrrrs : MOP_wandrrrs;
2122 break;
2123 case kIOR:
2124 mOp = is64Bits ? MOP_xiorrrrs : MOP_wiorrrrs;
2125 break;
2126 case kEOR:
2127 mOp = is64Bits ? MOP_xeorrrrs : MOP_weorrrrs;
2128 break;
2129 default:
2130 break;
2131 }
2132 return mOp;
2133 }
2134 return mOp;
2135 }
2136
SelectRelationOperator(RelationOperator operatorCode,Operand & resOpnd,Operand & opnd0,Operand & opnd1,PrimType primType)2137 void AArch64CGFunc::SelectRelationOperator(RelationOperator operatorCode, Operand &resOpnd, Operand &opnd0,
2138 Operand &opnd1, PrimType primType)
2139 {
2140 Operand::OperandType opnd0Type = opnd0.GetKind();
2141 Operand::OperandType opnd1Type = opnd1.GetKind();
2142 uint32 dsize = GetPrimTypeBitSize(primType);
2143 bool is64Bits = (dsize == k64BitSize);
2144 /* op #imm. #imm */
2145 if ((opnd0Type != Operand::kOpdRegister) && (opnd1Type != Operand::kOpdRegister)) {
2146 SelectRelationOperator(operatorCode, resOpnd, SelectCopy(opnd0, primType, primType), opnd1, primType);
2147 return;
2148 }
2149 /* op #imm, reg -> op reg, #imm */
2150 if ((opnd0Type != Operand::kOpdRegister) && (opnd1Type == Operand::kOpdRegister)) {
2151 SelectRelationOperator(operatorCode, resOpnd, opnd1, opnd0, primType);
2152 return;
2153 }
2154 /* op reg, reg */
2155 if ((opnd0Type == Operand::kOpdRegister) && (opnd1Type == Operand::kOpdRegister)) {
2156 DEBUG_ASSERT(IsPrimitiveInteger(primType), "NYI band");
2157 MOperator mOp = SelectRelationMop(operatorCode, kRegReg, is64Bits, false, false);
2158 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0, opnd1));
2159 return;
2160 }
2161 /* op reg, #imm */
2162 if ((opnd0Type == Operand::kOpdRegister) && (opnd1Type != Operand::kOpdRegister)) {
2163 if (!((opnd1Type == Operand::kOpdImmediate) || (opnd1Type == Operand::kOpdOffset))) {
2164 SelectRelationOperator(operatorCode, resOpnd, opnd0, SelectCopy(opnd1, primType, primType), primType);
2165 return;
2166 }
2167
2168 ImmOperand *immOpnd = static_cast<ImmOperand *>(&opnd1);
2169 if (immOpnd->IsZero()) {
2170 if (operatorCode == kAND) {
2171 uint32 mopMv = is64Bits ? MOP_xmovrr : MOP_wmovrr;
2172 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopMv, resOpnd, GetZeroOpnd(dsize)));
2173 } else if ((operatorCode == kIOR) || (operatorCode == kEOR)) {
2174 SelectCopy(resOpnd, primType, opnd0, primType);
2175 }
2176 } else if ((immOpnd->IsAllOnes()) || (!is64Bits && immOpnd->IsAllOnes32bit())) {
2177 if (operatorCode == kAND) {
2178 SelectCopy(resOpnd, primType, opnd0, primType);
2179 } else if (operatorCode == kIOR) {
2180 uint32 mopMovn = is64Bits ? MOP_xmovnri16 : MOP_wmovnri16;
2181 ImmOperand &src16 = CreateImmOperand(0, k16BitSize, false);
2182 BitShiftOperand *lslOpnd = GetLogicalShiftLeftOperand(0, is64Bits);
2183 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopMovn, resOpnd, src16, *lslOpnd));
2184 } else if (operatorCode == kEOR) {
2185 SelectMvn(resOpnd, opnd0, primType);
2186 }
2187 } else if (immOpnd->IsBitmaskImmediate()) {
2188 MOperator mOp = SelectRelationMop(operatorCode, kRegImm, is64Bits, true, false);
2189 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0, opnd1));
2190 } else {
2191 int64 immVal = immOpnd->GetValue();
2192 int32 tail0BitNum = GetTail0BitNum(immVal);
2193 int32 head0BitNum = GetHead0BitNum(immVal);
2194 const int32 bitNum = (k64BitSizeInt - head0BitNum) - tail0BitNum;
2195 RegOperand ®Opnd = CreateRegisterOperandOfType(primType);
2196
2197 if (bitNum <= k16ValidBit) {
2198 int64 newImm = (static_cast<uint64>(immVal) >> static_cast<uint32>(tail0BitNum)) & 0xFFFF;
2199 ImmOperand &immOpnd1 = CreateImmOperand(newImm, k32BitSize, false);
2200 SelectCopyImm(regOpnd, immOpnd1, primType);
2201 MOperator mOp = SelectRelationMop(operatorCode, kRegImm, is64Bits, false, true);
2202 int32 bitLen = is64Bits ? kBitLenOfShift64Bits : kBitLenOfShift32Bits;
2203 BitShiftOperand &shiftOpnd =
2204 CreateBitShiftOperand(BitShiftOperand::kLSL, static_cast<uint32>(tail0BitNum), bitLen);
2205 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0, regOpnd, shiftOpnd));
2206 } else {
2207 SelectCopyImm(regOpnd, *immOpnd, primType);
2208 MOperator mOp = SelectRelationMop(operatorCode, kRegReg, is64Bits, false, false);
2209 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0, regOpnd));
2210 }
2211 }
2212 }
2213 }
2214
SelectBior(BinaryNode & node,Operand & opnd0,Operand & opnd1,const BaseNode & parent)2215 Operand *AArch64CGFunc::SelectBior(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent)
2216 {
2217 return SelectRelationOperator(kIOR, node, opnd0, opnd1, parent);
2218 }
2219
SelectBior(Operand & resOpnd,Operand & opnd0,Operand & opnd1,PrimType primType)2220 void AArch64CGFunc::SelectBior(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType)
2221 {
2222 SelectRelationOperator(kIOR, resOpnd, opnd0, opnd1, primType);
2223 }
2224
SelectMinOrMax(bool isMin,const BinaryNode & node,Operand & opnd0,Operand & opnd1,const BaseNode & parent)2225 Operand *AArch64CGFunc::SelectMinOrMax(bool isMin, const BinaryNode &node, Operand &opnd0, Operand &opnd1,
2226 const BaseNode &parent)
2227 {
2228 PrimType dtype = node.GetPrimType();
2229 bool isSigned = IsSignedInteger(dtype);
2230 uint32 dsize = GetPrimTypeBitSize(dtype);
2231 bool is64Bits = (dsize == k64BitSize);
2232 bool isFloat = IsPrimitiveFloat(dtype);
2233 /* promoted type */
2234 PrimType primType = isFloat ? dtype : (is64Bits ? (isSigned ? PTY_i64 : PTY_u64) : (isSigned ? PTY_i32 : PTY_u32));
2235 RegOperand &resOpnd = GetOrCreateResOperand(parent, primType);
2236 SelectMinOrMax(isMin, resOpnd, opnd0, opnd1, primType);
2237 return &resOpnd;
2238 }
2239
SelectMinOrMax(bool isMin,Operand & resOpnd,Operand & opnd0,Operand & opnd1,PrimType primType)2240 void AArch64CGFunc::SelectMinOrMax(bool isMin, Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType)
2241 {
2242 uint32 dsize = GetPrimTypeBitSize(primType);
2243 bool is64Bits = (dsize == k64BitSize);
2244 if (IsPrimitiveInteger(primType)) {
2245 RegOperand ®Opnd0 = LoadIntoRegister(opnd0, primType);
2246 Operand ®Opnd1 = LoadIntoRegister(opnd1, primType);
2247 SelectAArch64Cmp(regOpnd0, regOpnd1, true, dsize);
2248 Operand &newResOpnd = LoadIntoRegister(resOpnd, primType);
2249 if (isMin) {
2250 CondOperand &cc = IsSignedInteger(primType) ? GetCondOperand(CC_LT) : GetCondOperand(CC_LO);
2251 SelectAArch64Select(newResOpnd, regOpnd0, regOpnd1, cc, true, dsize);
2252 } else {
2253 CondOperand &cc = IsSignedInteger(primType) ? GetCondOperand(CC_GT) : GetCondOperand(CC_HI);
2254 SelectAArch64Select(newResOpnd, regOpnd0, regOpnd1, cc, true, dsize);
2255 }
2256 } else if (IsPrimitiveFloat(primType)) {
2257 RegOperand ®Opnd0 = LoadIntoRegister(opnd0, primType);
2258 RegOperand ®Opnd1 = LoadIntoRegister(opnd1, primType);
2259 SelectFMinFMax(resOpnd, regOpnd0, regOpnd1, is64Bits, isMin);
2260 } else {
2261 CHECK_FATAL(false, "NIY type max or min");
2262 }
2263 }
2264
SelectMin(BinaryNode & node,Operand & opnd0,Operand & opnd1,const BaseNode & parent)2265 Operand *AArch64CGFunc::SelectMin(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent)
2266 {
2267 return SelectMinOrMax(true, node, opnd0, opnd1, parent);
2268 }
2269
SelectMin(Operand & resOpnd,Operand & opnd0,Operand & opnd1,PrimType primType)2270 void AArch64CGFunc::SelectMin(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType)
2271 {
2272 SelectMinOrMax(true, resOpnd, opnd0, opnd1, primType);
2273 }
2274
SelectMax(BinaryNode & node,Operand & opnd0,Operand & opnd1,const BaseNode & parent)2275 Operand *AArch64CGFunc::SelectMax(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent)
2276 {
2277 return SelectMinOrMax(false, node, opnd0, opnd1, parent);
2278 }
2279
SelectMax(Operand & resOpnd,Operand & opnd0,Operand & opnd1,PrimType primType)2280 void AArch64CGFunc::SelectMax(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType)
2281 {
2282 SelectMinOrMax(false, resOpnd, opnd0, opnd1, primType);
2283 }
2284
SelectFMinFMax(Operand & resOpnd,Operand & opnd0,Operand & opnd1,bool is64Bits,bool isMin)2285 void AArch64CGFunc::SelectFMinFMax(Operand &resOpnd, Operand &opnd0, Operand &opnd1, bool is64Bits, bool isMin)
2286 {
2287 uint32 mOpCode = isMin ? (is64Bits ? MOP_xfminrrr : MOP_wfminrrr) : (is64Bits ? MOP_xfmaxrrr : MOP_wfmaxrrr);
2288 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, opnd0, opnd1));
2289 }
2290
SelectBxor(BinaryNode & node,Operand & opnd0,Operand & opnd1,const BaseNode & parent)2291 Operand *AArch64CGFunc::SelectBxor(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent)
2292 {
2293 return SelectRelationOperator(kEOR, node, opnd0, opnd1, parent);
2294 }
2295
SelectBxor(Operand & resOpnd,Operand & opnd0,Operand & opnd1,PrimType primType)2296 void AArch64CGFunc::SelectBxor(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType)
2297 {
2298 SelectRelationOperator(kEOR, resOpnd, opnd0, opnd1, primType);
2299 }
2300
SelectShift(BinaryNode & node,Operand & opnd0,Operand & opnd1,const BaseNode & parent)2301 Operand *AArch64CGFunc::SelectShift(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent)
2302 {
2303 PrimType dtype = node.GetPrimType();
2304 bool isSigned = IsSignedInteger(dtype);
2305 uint32 dsize = GetPrimTypeBitSize(dtype);
2306 bool is64Bits = (dsize == k64BitSize);
2307 bool isFloat = IsPrimitiveFloat(dtype);
2308 RegOperand *resOpnd = nullptr;
2309 Opcode opcode = node.GetOpCode();
2310
2311 PrimType primType =
2312 isFloat ? dtype : (is64Bits ? (isSigned ? PTY_i64 : PTY_u64) : (isSigned ? PTY_i32 : PTY_u32));
2313 resOpnd = &GetOrCreateResOperand(parent, primType);
2314 ShiftDirection direct = (opcode == OP_lshr) ? kShiftLright : ((opcode == OP_ashr) ? kShiftAright : kShiftLeft);
2315 SelectShift(*resOpnd, opnd0, opnd1, direct, primType);
2316
2317 if (dtype == PTY_i16) {
2318 MOperator exOp = is64Bits ? MOP_xsxth64 : MOP_xsxth32;
2319 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(exOp, *resOpnd, *resOpnd));
2320 } else if (dtype == PTY_i8) {
2321 MOperator exOp = is64Bits ? MOP_xsxtb64 : MOP_xsxtb32;
2322 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(exOp, *resOpnd, *resOpnd));
2323 }
2324 return resOpnd;
2325 }
2326
SelectBxorShift(Operand & resOpnd,Operand * opnd0,Operand * opnd1,Operand & opnd2,PrimType primType)2327 void AArch64CGFunc::SelectBxorShift(Operand &resOpnd, Operand *opnd0, Operand *opnd1, Operand &opnd2, PrimType primType)
2328 {
2329 opnd0 = &LoadIntoRegister(*opnd0, primType);
2330 opnd1 = &LoadIntoRegister(*opnd1, primType);
2331 uint32 dsize = GetPrimTypeBitSize(primType);
2332 bool is64Bits = (dsize == k64BitSize);
2333 MOperator mopBxor = is64Bits ? MOP_xeorrrrs : MOP_weorrrrs;
2334 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopBxor, resOpnd, *opnd0, *opnd1, opnd2));
2335 }
2336
SelectShift(Operand & resOpnd,Operand & opnd0,Operand & opnd1,ShiftDirection direct,PrimType primType)2337 void AArch64CGFunc::SelectShift(Operand &resOpnd, Operand &opnd0, Operand &opnd1, ShiftDirection direct,
2338 PrimType primType)
2339 {
2340 Operand::OperandType opnd1Type = opnd1.GetKind();
2341 uint32 dsize = GetPrimTypeBitSize(primType);
2342 bool is64Bits = (dsize == k64BitSize);
2343 Operand *firstOpnd = &LoadIntoRegister(opnd0, primType);
2344
2345 MOperator mopShift;
2346 if ((opnd1Type == Operand::kOpdImmediate) || (opnd1Type == Operand::kOpdOffset)) {
2347 ImmOperand *immOpnd1 = static_cast<ImmOperand *>(&opnd1);
2348 const int64 kVal = immOpnd1->GetValue();
2349 const uint32 kShiftamt = is64Bits ? kHighestBitOf64Bits : kHighestBitOf32Bits;
2350 if (kVal == 0) {
2351 SelectCopy(resOpnd, primType, *firstOpnd, primType);
2352 return;
2353 }
2354 /* e.g. a >> -1 */
2355 if ((kVal < 0) || (kVal > kShiftamt)) {
2356 SelectShift(resOpnd, *firstOpnd, SelectCopy(opnd1, primType, primType), direct, primType);
2357 return;
2358 }
2359 switch (direct) {
2360 case kShiftLeft:
2361 mopShift = is64Bits ? MOP_xlslrri6 : MOP_wlslrri5;
2362 break;
2363 case kShiftAright:
2364 mopShift = is64Bits ? MOP_xasrrri6 : MOP_wasrrri5;
2365 break;
2366 case kShiftLright:
2367 mopShift = is64Bits ? MOP_xlsrrri6 : MOP_wlsrrri5;
2368 break;
2369 }
2370 } else if (opnd1Type != Operand::kOpdRegister) {
2371 SelectShift(resOpnd, *firstOpnd, SelectCopy(opnd1, primType, primType), direct, primType);
2372 return;
2373 } else {
2374 switch (direct) {
2375 case kShiftLeft:
2376 mopShift = is64Bits ? MOP_xlslrrr : MOP_wlslrrr;
2377 break;
2378 case kShiftAright:
2379 mopShift = is64Bits ? MOP_xasrrrr : MOP_wasrrrr;
2380 break;
2381 case kShiftLright:
2382 mopShift = is64Bits ? MOP_xlsrrrr : MOP_wlsrrrr;
2383 break;
2384 }
2385 }
2386
2387 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopShift, resOpnd, *firstOpnd, opnd1));
2388 }
2389
SelectAbsSub(Insn & lastInsn,const UnaryNode & node,Operand & newOpnd0)2390 Operand *AArch64CGFunc::SelectAbsSub(Insn &lastInsn, const UnaryNode &node, Operand &newOpnd0)
2391 {
2392 PrimType dtyp = node.GetPrimType();
2393 bool is64Bits = (GetPrimTypeBitSize(dtyp) == k64BitSize);
2394 /* promoted type */
2395 PrimType primType = is64Bits ? (PTY_i64) : (PTY_i32);
2396 RegOperand &resOpnd = CreateRegisterOperandOfType(primType);
2397 uint32 mopCsneg = is64Bits ? MOP_xcnegrrrc : MOP_wcnegrrrc;
2398 /* ABS requires the operand be interpreted as a signed integer */
2399 CondOperand &condOpnd = GetCondOperand(CC_MI);
2400 MOperator newMop = AArch64isa::GetMopSub2Subs(lastInsn);
2401 Operand &rflag = GetOrCreateRflag();
2402 std::vector<Operand *> opndVec;
2403 opndVec.push_back(&rflag);
2404 for (uint32 i = 0; i < lastInsn.GetOperandSize(); i++) {
2405 opndVec.push_back(&lastInsn.GetOperand(i));
2406 }
2407 Insn *subsInsn = &GetInsnBuilder()->BuildInsn(newMop, opndVec);
2408 GetCurBB()->ReplaceInsn(lastInsn, *subsInsn);
2409 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopCsneg, resOpnd, newOpnd0, condOpnd, rflag));
2410 return &resOpnd;
2411 }
2412
SelectAbs(UnaryNode & node,Operand & opnd0)2413 Operand *AArch64CGFunc::SelectAbs(UnaryNode &node, Operand &opnd0)
2414 {
2415 PrimType dtyp = node.GetPrimType();
2416 if (IsPrimitiveFloat(dtyp)) {
2417 CHECK_FATAL(GetPrimTypeBitSize(dtyp) >= k32BitSize, "We don't support hanf-word FP operands yet");
2418 bool is64Bits = (GetPrimTypeBitSize(dtyp) == k64BitSize);
2419 Operand &newOpnd0 = LoadIntoRegister(opnd0, dtyp);
2420 RegOperand &resOpnd = CreateRegisterOperandOfType(dtyp);
2421 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(is64Bits ? MOP_dabsrr : MOP_sabsrr, resOpnd, newOpnd0));
2422 return &resOpnd;
2423 } else {
2424 bool is64Bits = (GetPrimTypeBitSize(dtyp) == k64BitSize);
2425 /* promoted type */
2426 PrimType primType = is64Bits ? (PTY_i64) : (PTY_i32);
2427 Operand &newOpnd0 = LoadIntoRegister(opnd0, primType);
2428 Insn *lastInsn = GetCurBB()->GetLastMachineInsn();
2429 if (lastInsn != nullptr && AArch64isa::IsSub(*lastInsn)) {
2430 Operand &dest = lastInsn->GetOperand(kInsnFirstOpnd);
2431 Operand &opd1 = lastInsn->GetOperand(kInsnSecondOpnd);
2432 Operand &opd2 = lastInsn->GetOperand(kInsnThirdOpnd);
2433 regno_t absReg = static_cast<RegOperand &>(newOpnd0).GetRegisterNumber();
2434 if ((dest.IsRegister() && static_cast<RegOperand &>(dest).GetRegisterNumber() == absReg) ||
2435 (opd1.IsRegister() && static_cast<RegOperand &>(opd1).GetRegisterNumber() == absReg) ||
2436 (opd2.IsRegister() && static_cast<RegOperand &>(opd2).GetRegisterNumber() == absReg)) {
2437 return SelectAbsSub(*lastInsn, node, newOpnd0);
2438 }
2439 }
2440 RegOperand &resOpnd = CreateRegisterOperandOfType(primType);
2441 SelectAArch64Cmp(newOpnd0, CreateImmOperand(0, is64Bits ? PTY_u64 : PTY_u32, false), true,
2442 GetPrimTypeBitSize(dtyp));
2443 uint32 mopCsneg = is64Bits ? MOP_xcsnegrrrc : MOP_wcsnegrrrc;
2444 /* ABS requires the operand be interpreted as a signed integer */
2445 CondOperand &condOpnd = GetCondOperand(CC_GE);
2446 Operand &rflag = GetOrCreateRflag();
2447 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopCsneg, resOpnd, newOpnd0, newOpnd0, condOpnd, rflag));
2448 return &resOpnd;
2449 }
2450 }
2451
SelectBnot(UnaryNode & node,Operand & opnd0,const BaseNode & parent)2452 Operand *AArch64CGFunc::SelectBnot(UnaryNode &node, Operand &opnd0, const BaseNode &parent)
2453 {
2454 PrimType dtype = node.GetPrimType();
2455 DEBUG_ASSERT(IsPrimitiveInteger(dtype), "bnot expect integer or NYI");
2456 uint32 bitSize = GetPrimTypeBitSize(dtype);
2457 bool is64Bits = (bitSize == k64BitSize);
2458 bool isSigned = IsSignedInteger(dtype);
2459 RegOperand *resOpnd = nullptr;
2460 PrimType primType = is64Bits ? (isSigned ? PTY_i64 : PTY_u64) : (isSigned ? PTY_i32 : PTY_u32);
2461 resOpnd = &GetOrCreateResOperand(parent, primType);
2462
2463 Operand &newOpnd0 = LoadIntoRegister(opnd0, primType);
2464
2465 uint32 mopBnot = is64Bits ? MOP_xnotrr : MOP_wnotrr;
2466 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopBnot, *resOpnd, newOpnd0));
2467 /* generate and resOpnd, resOpnd, 0x1/0xFF/0xFFFF for PTY_u1/PTY_u8/PTY_u16 */
2468 int64 immValue = 0;
2469 if (bitSize == k1BitSize) {
2470 immValue = 1;
2471 } else if (bitSize == k8BitSize) {
2472 immValue = 0xFF;
2473 } else if (bitSize == k16BitSize) {
2474 immValue = 0xFFFF;
2475 }
2476 if (immValue != 0) {
2477 ImmOperand &imm = CreateImmOperand(PTY_u32, immValue);
2478 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wandrri12, *resOpnd, *resOpnd, imm));
2479 }
2480 return resOpnd;
2481 }
2482
SelectRegularBitFieldLoad(ExtractbitsNode & node,const BaseNode & parent)2483 Operand *AArch64CGFunc::SelectRegularBitFieldLoad(ExtractbitsNode &node, const BaseNode &parent)
2484 {
2485 PrimType dtype = node.GetPrimType();
2486 bool isSigned = IsSignedInteger(dtype);
2487 uint8 bitOffset = node.GetBitsOffset();
2488 uint8 bitSize = node.GetBitsSize();
2489 bool is64Bits = (GetPrimTypeBitSize(dtype) == k64BitSize);
2490 CHECK_FATAL(!is64Bits, "dest opnd should not be 64bit");
2491 PrimType destType = GetIntegerPrimTypeBySizeAndSign(bitSize, isSigned);
2492 Operand *result =
2493 SelectIread(parent, *static_cast<IreadNode *>(node.Opnd(0)), static_cast<int>(bitOffset / k8BitSize), destType);
2494 return result;
2495 }
2496
SelectExtractbits(ExtractbitsNode & node,Operand & srcOpnd,const BaseNode & parent)2497 Operand *AArch64CGFunc::SelectExtractbits(ExtractbitsNode &node, Operand &srcOpnd, const BaseNode &parent)
2498 {
2499 uint8 bitOffset = node.GetBitsOffset();
2500 uint8 bitSize = node.GetBitsSize();
2501 PrimType dtype = node.GetPrimType();
2502 RegOperand &resOpnd = GetOrCreateResOperand(parent, dtype);
2503 bool isSigned =
2504 (node.GetOpCode() == OP_sext) ? true : (node.GetOpCode() == OP_zext) ? false : IsSignedInteger(dtype);
2505 bool is64Bits = (GetPrimTypeBitSize(dtype) == k64BitSize);
2506 uint32 immWidth = is64Bits ? kMaxImmVal13Bits : kMaxImmVal12Bits;
2507 Operand &opnd0 = LoadIntoRegister(srcOpnd, dtype);
2508 if (bitOffset == 0) {
2509 if (!isSigned && (bitSize < immWidth)) {
2510 SelectBand(resOpnd, opnd0,
2511 CreateImmOperand(static_cast<int64>((static_cast<uint64>(1) << bitSize) - 1), immWidth, false),
2512 dtype);
2513 return &resOpnd;
2514 } else {
2515 MOperator mOp = MOP_undef;
2516 if (bitSize == k8BitSize) {
2517 mOp = is64Bits ? (isSigned ? MOP_xsxtb64 : MOP_undef)
2518 : (isSigned ? MOP_xsxtb32 : (opnd0.GetSize() == k32BitSize ? MOP_xuxtb32 : MOP_undef));
2519 } else if (bitSize == k16BitSize) {
2520 mOp = is64Bits ? (isSigned ? MOP_xsxth64 : MOP_undef)
2521 : (isSigned ? MOP_xsxth32 : (opnd0.GetSize() == k32BitSize ? MOP_xuxth32 : MOP_undef));
2522 } else if (bitSize == k32BitSize) {
2523 mOp = is64Bits ? (isSigned ? MOP_xsxtw64 : MOP_xuxtw64) : MOP_wmovrr;
2524 }
2525 if (mOp != MOP_undef) {
2526 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0));
2527 return &resOpnd;
2528 }
2529 }
2530 }
2531 uint32 mopBfx =
2532 is64Bits ? (isSigned ? MOP_xsbfxrri6i6 : MOP_xubfxrri6i6) : (isSigned ? MOP_wsbfxrri5i5 : MOP_wubfxrri5i5);
2533 ImmOperand &immOpnd1 = CreateImmOperand(bitOffset, k8BitSize, false);
2534 ImmOperand &immOpnd2 = CreateImmOperand(bitSize, k8BitSize, false);
2535 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopBfx, resOpnd, opnd0, immOpnd1, immOpnd2));
2536 return &resOpnd;
2537 }
2538
SelectLnot(UnaryNode & node,Operand & srcOpnd,const BaseNode & parent)2539 Operand *AArch64CGFunc::SelectLnot(UnaryNode &node, Operand &srcOpnd, const BaseNode &parent)
2540 {
2541 PrimType dtype = node.GetPrimType();
2542 RegOperand &resOpnd = GetOrCreateResOperand(parent, dtype);
2543 bool is64Bits = (GetPrimTypeBitSize(dtype) == k64BitSize);
2544 Operand &opnd0 = LoadIntoRegister(srcOpnd, dtype);
2545 SelectAArch64Cmp(opnd0, CreateImmOperand(0, is64Bits ? PTY_u64 : PTY_u32, false), true, GetPrimTypeBitSize(dtype));
2546 SelectAArch64CSet(resOpnd, GetCondOperand(CC_EQ), is64Bits);
2547 return &resOpnd;
2548 }
2549
SelectNeg(UnaryNode & node,Operand & opnd0,const BaseNode & parent)2550 Operand *AArch64CGFunc::SelectNeg(UnaryNode &node, Operand &opnd0, const BaseNode &parent)
2551 {
2552 PrimType dtype = node.GetPrimType();
2553 bool is64Bits = (GetPrimTypeBitSize(dtype) == k64BitSize);
2554 RegOperand *resOpnd = nullptr;
2555 PrimType primType;
2556 if (IsPrimitiveFloat(dtype)) {
2557 primType = dtype;
2558 } else {
2559 primType = is64Bits ? (PTY_i64) : (PTY_i32); /* promoted type */
2560 }
2561 resOpnd = &GetOrCreateResOperand(parent, primType);
2562 SelectNeg(*resOpnd, opnd0, primType);
2563 return resOpnd;
2564 }
2565
SelectNeg(Operand & dest,Operand & srcOpnd,PrimType primType)2566 void AArch64CGFunc::SelectNeg(Operand &dest, Operand &srcOpnd, PrimType primType)
2567 {
2568 Operand &opnd0 = LoadIntoRegister(srcOpnd, primType);
2569 bool is64Bits = (GetPrimTypeBitSize(primType) == k64BitSize);
2570 MOperator mOp;
2571 if (IsPrimitiveFloat(primType)) {
2572 mOp = is64Bits ? MOP_xfnegrr : MOP_wfnegrr;
2573 } else {
2574 mOp = is64Bits ? MOP_xinegrr : MOP_winegrr;
2575 }
2576 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, dest, opnd0));
2577 }
2578
SelectMvn(Operand & dest,Operand & src,PrimType primType)2579 void AArch64CGFunc::SelectMvn(Operand &dest, Operand &src, PrimType primType)
2580 {
2581 Operand &opnd0 = LoadIntoRegister(src, primType);
2582 bool is64Bits = (GetPrimTypeBitSize(primType) == k64BitSize);
2583 MOperator mOp;
2584 DEBUG_ASSERT(!IsPrimitiveFloat(primType), "Instruction 'mvn' do not have float version.");
2585 mOp = is64Bits ? MOP_xnotrr : MOP_wnotrr;
2586 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, dest, opnd0));
2587 }
2588
SelectSqrt(UnaryNode & node,Operand & src,const BaseNode & parent)2589 Operand *AArch64CGFunc::SelectSqrt(UnaryNode &node, Operand &src, const BaseNode &parent)
2590 {
2591 /*
2592 * gcc generates code like below for better accurate
2593 * fsqrts s15, s0
2594 * fcmps s15, s15
2595 * fmstat
2596 * beq .L4
2597 * push {r3, lr}
2598 * bl sqrtf
2599 * pop {r3, pc}
2600 * .L4:
2601 * fcpys s0, s15
2602 * bx lr
2603 */
2604 PrimType dtype = node.GetPrimType();
2605 if (!IsPrimitiveFloat(dtype)) {
2606 DEBUG_ASSERT(false, "should be float type");
2607 return nullptr;
2608 }
2609 bool is64Bits = (GetPrimTypeBitSize(dtype) == k64BitSize);
2610 Operand &opnd0 = LoadIntoRegister(src, dtype);
2611 RegOperand &resOpnd = GetOrCreateResOperand(parent, dtype);
2612 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(is64Bits ? MOP_vsqrtd : MOP_vsqrts, resOpnd, opnd0));
2613 return &resOpnd;
2614 }
2615
SelectCvtFloat2Int(Operand & resOpnd,Operand & srcOpnd,PrimType itype,PrimType ftype)2616 void AArch64CGFunc::SelectCvtFloat2Int(Operand &resOpnd, Operand &srcOpnd, PrimType itype, PrimType ftype)
2617 {
2618 bool is64BitsFloat = (ftype == PTY_f64);
2619 MOperator mOp = 0;
2620
2621 DEBUG_ASSERT(((ftype == PTY_f64) || (ftype == PTY_f32)), "wrong from type");
2622 Operand &opnd0 = LoadIntoRegister(srcOpnd, ftype);
2623 switch (itype) {
2624 case PTY_i32:
2625 mOp = !is64BitsFloat ? MOP_vcvtrf : MOP_vcvtrd;
2626 break;
2627 case PTY_u32:
2628 mOp = !is64BitsFloat ? MOP_vcvturf : MOP_vcvturd;
2629 break;
2630 case PTY_i64:
2631 mOp = !is64BitsFloat ? MOP_xvcvtrf : MOP_xvcvtrd;
2632 break;
2633 case PTY_u64:
2634 case PTY_a64:
2635 mOp = !is64BitsFloat ? MOP_xvcvturf : MOP_xvcvturd;
2636 break;
2637 default:
2638 CHECK_FATAL(false, "unexpected type");
2639 }
2640 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0));
2641 }
2642
SelectCvtInt2Float(Operand & resOpnd,Operand & origOpnd0,PrimType toType,PrimType fromType)2643 void AArch64CGFunc::SelectCvtInt2Float(Operand &resOpnd, Operand &origOpnd0, PrimType toType, PrimType fromType)
2644 {
2645 DEBUG_ASSERT((toType == PTY_f32) || (toType == PTY_f64), "unexpected type");
2646 bool is64BitsFloat = (toType == PTY_f64);
2647 MOperator mOp = 0;
2648 uint32 fsize = GetPrimTypeBitSize(fromType);
2649
2650 PrimType itype = (GetPrimTypeBitSize(fromType) == k64BitSize) ? (IsSignedInteger(fromType) ? PTY_i64 : PTY_u64)
2651 : (IsSignedInteger(fromType) ? PTY_i32 : PTY_u32);
2652
2653 Operand *opnd0 = &LoadIntoRegister(origOpnd0, itype);
2654
2655 /* need extension before cvt */
2656 DEBUG_ASSERT(opnd0->IsRegister(), "opnd should be a register operand");
2657 Operand *srcOpnd = opnd0;
2658 if (IsSignedInteger(fromType) && (fsize < k32BitSize)) {
2659 srcOpnd = &CreateRegisterOperandOfType(itype);
2660 mOp = (fsize == k8BitSize) ? MOP_xsxtb32 : MOP_xsxth32;
2661 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, *srcOpnd, *opnd0));
2662 }
2663
2664 switch (itype) {
2665 case PTY_i32:
2666 mOp = !is64BitsFloat ? MOP_vcvtfr : MOP_vcvtdr;
2667 break;
2668 case PTY_u32:
2669 mOp = !is64BitsFloat ? MOP_vcvtufr : MOP_vcvtudr;
2670 break;
2671 case PTY_i64:
2672 mOp = !is64BitsFloat ? MOP_xvcvtfr : MOP_xvcvtdr;
2673 break;
2674 case PTY_u64:
2675 mOp = !is64BitsFloat ? MOP_xvcvtufr : MOP_xvcvtudr;
2676 break;
2677 default:
2678 CHECK_FATAL(false, "unexpected type");
2679 }
2680 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, *srcOpnd));
2681 }
2682
SelectRoundOperator(RoundType roundType,const TypeCvtNode & node,Operand & opnd0,const BaseNode & parent)2683 Operand *AArch64CGFunc::SelectRoundOperator(RoundType roundType, const TypeCvtNode &node, Operand &opnd0,
2684 const BaseNode &parent)
2685 {
2686 PrimType itype = node.GetPrimType();
2687 PrimType ftype = node.FromType();
2688 DEBUG_ASSERT(((ftype == PTY_f64) || (ftype == PTY_f32)), "wrong float type");
2689 bool is64Bits = (ftype == PTY_f64);
2690 bool isFloat = (ftype == PTY_f64) || (ftype == PTY_f32);
2691 RegOperand &resOpnd = GetOrCreateResOperand(parent, itype);
2692 RegOperand ®Opnd0 = LoadIntoRegister(opnd0, ftype);
2693 MOperator mop = MOP_undef;
2694 if (roundType == kCeil) {
2695 if (isFloat) {
2696 mop = is64Bits ? MOP_dfrintprr : MOP_sfrintprr;
2697 } else {
2698 mop = is64Bits ? MOP_xvcvtps : MOP_vcvtps;
2699 }
2700 } else if (roundType == kFloor) {
2701 if (isFloat) {
2702 mop = is64Bits ? MOP_dfrintmrr : MOP_sfrintmrr;
2703 } else {
2704 mop = is64Bits ? MOP_xvcvtms : MOP_vcvtms;
2705 }
2706 } else if (roundType == kTrunc) {
2707 if (isFloat) {
2708 mop = is64Bits ? MOP_dfrintzrr : MOP_sfrintzrr;
2709 } else {
2710 CHECK_FATAL(false, "not support here!");
2711 }
2712 } else {
2713 CHECK_FATAL(!isFloat, "not support float here!");
2714 mop = is64Bits ? MOP_xvcvtas : MOP_vcvtas;
2715 }
2716 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mop, resOpnd, regOpnd0));
2717 return &resOpnd;
2718 }
2719
SelectCeil(TypeCvtNode & node,Operand & opnd0,const BaseNode & parent)2720 Operand *AArch64CGFunc::SelectCeil(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent)
2721 {
2722 return SelectRoundOperator(kCeil, node, opnd0, parent);
2723 }
2724
2725 /* float to int floor */
SelectFloor(TypeCvtNode & node,Operand & opnd0,const BaseNode & parent)2726 Operand *AArch64CGFunc::SelectFloor(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent)
2727 {
2728 return SelectRoundOperator(kFloor, node, opnd0, parent);
2729 }
2730
LIsPrimitivePointer(PrimType ptype)2731 static bool LIsPrimitivePointer(PrimType ptype)
2732 {
2733 return ((ptype >= PTY_ptr) && (ptype <= PTY_a64));
2734 }
2735
SelectRetype(TypeCvtNode & node,Operand & opnd0)2736 Operand *AArch64CGFunc::SelectRetype(TypeCvtNode &node, Operand &opnd0)
2737 {
2738 PrimType fromType = node.Opnd(0)->GetPrimType();
2739 PrimType toType = node.GetPrimType();
2740 DEBUG_ASSERT(GetPrimTypeSize(fromType) == GetPrimTypeSize(toType), "retype bit widith doesn' match");
2741 if (LIsPrimitivePointer(fromType) && LIsPrimitivePointer(toType)) {
2742 return &LoadIntoRegister(opnd0, toType);
2743 }
2744 // if source operand is in memory,
2745 // simply read it as a value of 'toType 'into the dest operand and return
2746 if (opnd0.IsMemoryAccessOperand()) {
2747 return &SelectCopy(opnd0, toType, toType);
2748 }
2749
2750 bool isFromInt = IsPrimitiveInteger(fromType);
2751 bool is64Bits = GetPrimTypeBitSize(fromType) == k64BitSize;
2752 bool isImm = false;
2753 Operand *newOpnd0 = &opnd0;
2754 if (opnd0.IsImmediate()) {
2755 // according to aarch64 encoding format, convert int to float expression
2756 ImmOperand *imm = static_cast<ImmOperand *>(&opnd0);
2757 uint64 val = static_cast<uint64>(imm->GetValue());
2758 uint64 canRepreset = is64Bits ? (val & 0xffffffffffff) : (val & 0x7ffff);
2759 uint32 val1 = is64Bits ? (val >> 61) & 0x3 : (val >> 29) & 0x3;
2760 uint32 val2 = is64Bits ? (val >> 54) & 0xff : (val >> 25) & 0x1f;
2761 bool isSame = is64Bits ? ((val2 == 0) || (val2 == 0xff)) : ((val2 == 0) || (val2 == 0x1f));
2762 canRepreset = (canRepreset == 0) && ((val1 & 0x1) ^ ((val1 & 0x2) >> 1)) && isSame;
2763 if (IsPrimitiveInteger(fromType) && IsPrimitiveFloat(toType) && canRepreset) {
2764 uint64 temp1 = is64Bits ? (val >> 63) << 7 : (val >> 31) << 7;
2765 uint64 temp2 = is64Bits ? val >> 48 : val >> 19;
2766 int64 imm8 = (temp2 & 0x7f) | temp1;
2767 newOpnd0 = &CreateImmOperand(imm8, k8BitSize, false, kNotVary, true);
2768 isImm = true;
2769 }
2770 }
2771 if (!isImm) {
2772 bool isSigned = IsSignedInteger(fromType);
2773 PrimType itype = isFromInt ? (is64Bits ? (isSigned ? PTY_i64 : PTY_u64) : (isSigned ? PTY_i32 : PTY_u32))
2774 : (is64Bits ? PTY_f64 : PTY_f32);
2775 newOpnd0 = &LoadIntoRegister(opnd0, itype);
2776 }
2777 if ((IsPrimitiveFloat(fromType) && IsPrimitiveInteger(toType)) ||
2778 (IsPrimitiveFloat(toType) && IsPrimitiveInteger(fromType))) {
2779 MOperator mopFmov = isImm ? (is64Bits ? MOP_xdfmovri : MOP_wsfmovri)
2780 : (isFromInt ? (is64Bits ? MOP_xvmovdr : MOP_xvmovsr)
2781 : (is64Bits ? MOP_xvmovrd : MOP_xvmovrs));
2782 RegOperand *resOpnd = &CreateRegisterOperandOfType(toType);
2783 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopFmov, *resOpnd, *newOpnd0));
2784 return resOpnd;
2785 }
2786 return newOpnd0;
2787 }
2788
SelectCvtFloat2Float(Operand & resOpnd,Operand & srcOpnd,PrimType fromType,PrimType toType)2789 void AArch64CGFunc::SelectCvtFloat2Float(Operand &resOpnd, Operand &srcOpnd, PrimType fromType, PrimType toType)
2790 {
2791 Operand &opnd0 = LoadIntoRegister(srcOpnd, fromType);
2792 MOperator mOp = 0;
2793 switch (toType) {
2794 case PTY_f32: {
2795 CHECK_FATAL(fromType == PTY_f64, "unexpected cvt from type");
2796 mOp = MOP_xvcvtfd;
2797 break;
2798 }
2799 case PTY_f64: {
2800 CHECK_FATAL(fromType == PTY_f32, "unexpected cvt from type");
2801 mOp = MOP_xvcvtdf;
2802 break;
2803 }
2804 default:
2805 CHECK_FATAL(false, "unexpected cvt to type");
2806 }
2807
2808 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0));
2809 }
2810
2811 /*
2812 * This should be regarded only as a reference.
2813 *
2814 * C11 specification.
2815 * 6.3.1.3 Signed and unsigned integers
2816 * 1 When a value with integer type is converted to another integer
2817 * type other than _Bool, if the value can be represented by the
2818 * new type, it is unchanged.
2819 * 2 Otherwise, if the new type is unsigned, the value is converted
2820 * by repeatedly adding or subtracting one more than the maximum
2821 * value that can be represented in the new type until the value
2822 * is in the range of the new type.60)
2823 * 3 Otherwise, the new type is signed and the value cannot be
2824 * represented in it; either the result is implementation-defined
2825 * or an implementation-defined signal is raised.
2826 */
SelectCvtInt2Int(const BaseNode * parent,Operand * & resOpnd,Operand * opnd0,PrimType fromType,PrimType toType)2827 void AArch64CGFunc::SelectCvtInt2Int(const BaseNode *parent, Operand *&resOpnd, Operand *opnd0, PrimType fromType,
2828 PrimType toType)
2829 {
2830 uint32 fsize = GetPrimTypeBitSize(fromType);
2831 uint32 tsize = GetPrimTypeBitSize(toType);
2832 bool isExpand = tsize > fsize;
2833 bool is64Bit = (tsize == k64BitSize);
2834 if ((parent != nullptr) && opnd0->IsIntImmediate() &&
2835 ((parent->GetOpCode() == OP_band) || (parent->GetOpCode() == OP_bior) || (parent->GetOpCode() == OP_bxor) ||
2836 (parent->GetOpCode() == OP_ashr) || (parent->GetOpCode() == OP_lshr) || (parent->GetOpCode() == OP_shl))) {
2837 ImmOperand *simm = static_cast<ImmOperand *>(opnd0);
2838 DEBUG_ASSERT(simm != nullptr, "simm is nullptr in AArch64CGFunc::SelectCvtInt2Int");
2839 bool isSign = false;
2840 int64 origValue = simm->GetValue();
2841 int64 newValue = origValue;
2842 int64 signValue = 0;
2843 if (!isExpand) {
2844 /* 64--->32 */
2845 if (fsize > tsize) {
2846 if (IsSignedInteger(toType)) {
2847 if (origValue < 0) {
2848 signValue = static_cast<int64>(0xFFFFFFFFFFFFFFFFLL & (1ULL << static_cast<uint32>(tsize)));
2849 }
2850 newValue = static_cast<int64>(
2851 (static_cast<uint64>(origValue) & ((1ULL << static_cast<uint32>(tsize)) - 1u)) |
2852 static_cast<uint64>(signValue));
2853 } else {
2854 newValue = static_cast<uint64>(origValue) & ((1ULL << static_cast<uint32>(tsize)) - 1u);
2855 }
2856 }
2857 }
2858 if (IsSignedInteger(toType)) {
2859 isSign = true;
2860 }
2861 resOpnd = &static_cast<Operand &>(CreateImmOperand(newValue, GetPrimTypeSize(toType) * kBitsPerByte, isSign));
2862 return;
2863 }
2864 if (isExpand) { /* Expansion */
2865 /* if cvt expr's parent is add,and,xor and some other,we can use the imm version */
2866 PrimType primType = ((fsize == k64BitSize) ? (IsSignedInteger(fromType) ? PTY_i64 : PTY_u64)
2867 : (IsSignedInteger(fromType) ? PTY_i32 : PTY_u32));
2868 opnd0 = &LoadIntoRegister(*opnd0, primType);
2869
2870 if (IsSignedInteger(fromType)) {
2871 DEBUG_ASSERT((is64Bit || (fsize == k8BitSize || fsize == k16BitSize)), "incorrect from size");
2872
2873 MOperator mOp =
2874 (is64Bit ? ((fsize == k8BitSize) ? MOP_xsxtb64 : ((fsize == k16BitSize) ? MOP_xsxth64 : MOP_xsxtw64))
2875 : ((fsize == k8BitSize) ? MOP_xsxtb32 : MOP_xsxth32));
2876 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, *resOpnd, *opnd0));
2877 } else {
2878 /* Unsigned */
2879 auto mOp =
2880 (is64Bit ? ((fsize == k8BitSize) ? MOP_xuxtb32 : ((fsize == k16BitSize) ? MOP_xuxth32 : MOP_xuxtw64))
2881 : ((fsize == k8BitSize) ? MOP_xuxtb32 : MOP_xuxth32));
2882 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, *resOpnd, LoadIntoRegister(*opnd0, fromType)));
2883 }
2884 } else { /* Same size or truncate */
2885 #ifdef CNV_OPTIMIZE
2886 /*
2887 * No code needed for aarch64 with same reg.
2888 * Just update regno.
2889 */
2890 RegOperand *reg = static_cast<RegOperand *>(resOpnd);
2891 reg->regNo = static_cast<RegOperand *>(opnd0)->regNo;
2892 #else
2893 /*
2894 * This is not really needed if opnd0 is result from a load.
2895 * Hopefully the FE will get rid of the redundant conversions for loads.
2896 */
2897 PrimType primType = ((fsize == k64BitSize) ? (IsSignedInteger(fromType) ? PTY_i64 : PTY_u64)
2898 : (IsSignedInteger(fromType) ? PTY_i32 : PTY_u32));
2899 opnd0 = &LoadIntoRegister(*opnd0, primType);
2900
2901 if (fsize > tsize) {
2902 if (tsize == k8BitSize) {
2903 MOperator mOp = IsSignedInteger(toType) ? MOP_xsxtb32 : MOP_xuxtb32;
2904 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, *resOpnd, *opnd0));
2905 } else if (tsize == k16BitSize) {
2906 MOperator mOp = IsSignedInteger(toType) ? MOP_xsxth32 : MOP_xuxth32;
2907 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, *resOpnd, *opnd0));
2908 } else {
2909 MOperator mOp = IsSignedInteger(toType) ? MOP_xsbfxrri6i6 : MOP_xubfxrri6i6;
2910 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, *resOpnd, *opnd0,
2911 CreateImmOperand(0, k8BitSize, false),
2912 CreateImmOperand(tsize, k8BitSize, false)));
2913 }
2914 } else {
2915 /* same size, so resOpnd can be set */
2916 if ((IsSignedInteger(fromType) == IsSignedInteger(toType)) ||
2917 (GetPrimTypeSize(toType) >= k4BitSize)) {
2918 resOpnd = opnd0;
2919 } else if (IsUnsignedInteger(toType)) {
2920 MOperator mop;
2921 switch (toType) {
2922 case PTY_u8:
2923 mop = MOP_xuxtb32;
2924 break;
2925 case PTY_u16:
2926 mop = MOP_xuxth32;
2927 break;
2928 default:
2929 CHECK_FATAL(0, "Unhandled unsigned convert");
2930 }
2931 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mop, *resOpnd, *opnd0));
2932 } else {
2933 /* signed target */
2934 uint32 size = GetPrimTypeSize(toType);
2935 MOperator mop;
2936 switch (toType) {
2937 case PTY_i8:
2938 mop = (size > k4BitSize) ? MOP_xsxtb64 : MOP_xsxtb32;
2939 break;
2940 case PTY_i16:
2941 mop = (size > k4BitSize) ? MOP_xsxth64 : MOP_xsxth32;
2942 break;
2943 default:
2944 CHECK_FATAL(0, "Unhandled unsigned convert");
2945 }
2946 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mop, *resOpnd, *opnd0));
2947 }
2948 }
2949 #endif
2950 }
2951 }
2952
SelectCvt(const BaseNode & parent,TypeCvtNode & node,Operand & opnd0)2953 Operand *AArch64CGFunc::SelectCvt(const BaseNode &parent, TypeCvtNode &node, Operand &opnd0)
2954 {
2955 PrimType fromType = node.FromType();
2956 PrimType toType = node.GetPrimType();
2957 if (fromType == toType) {
2958 return &opnd0; /* noop */
2959 }
2960 Operand *resOpnd = &GetOrCreateResOperand(parent, toType);
2961 if (IsPrimitiveFloat(toType) && IsPrimitiveInteger(fromType)) {
2962 SelectCvtInt2Float(*resOpnd, opnd0, toType, fromType);
2963 } else if (IsPrimitiveFloat(fromType) && IsPrimitiveInteger(toType)) {
2964 SelectCvtFloat2Int(*resOpnd, opnd0, toType, fromType);
2965 } else if (IsPrimitiveInteger(fromType) && IsPrimitiveInteger(toType)) {
2966 SelectCvtInt2Int(&parent, resOpnd, &opnd0, fromType, toType);
2967 } else { /* both are float type */
2968 SelectCvtFloat2Float(*resOpnd, opnd0, fromType, toType);
2969 }
2970 return resOpnd;
2971 }
2972
SelectTrunc(TypeCvtNode & node,Operand & opnd0,const BaseNode & parent)2973 Operand *AArch64CGFunc::SelectTrunc(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent)
2974 {
2975 PrimType ftype = node.FromType();
2976 PrimType nodeType = node.GetPrimType();
2977 bool is64Bits = (GetPrimTypeBitSize(node.GetPrimType()) == k64BitSize);
2978 bool isFloat = (IsPrimitiveFloat(nodeType));
2979 if (isFloat) {
2980 CHECK_FATAL(nodeType == PTY_f32 || nodeType == PTY_f64, "only support f32, f64");
2981 return SelectRoundOperator(kTrunc, node, opnd0, parent);
2982 }
2983 PrimType itype = (is64Bits) ? (IsSignedInteger(node.GetPrimType()) ? PTY_i64 : PTY_u64)
2984 : (IsSignedInteger(node.GetPrimType()) ? PTY_i32 : PTY_u32); /* promoted type */
2985 RegOperand &resOpnd = GetOrCreateResOperand(parent, itype);
2986 SelectCvtFloat2Int(resOpnd, opnd0, itype, ftype);
2987 return &resOpnd;
2988 }
2989
2990 /*
2991 * syntax: select <prim-type> (<opnd0>, <opnd1>, <opnd2>)
2992 * <opnd0> must be of integer type.
2993 * <opnd1> and <opnd2> must be of the type given by <prim-type>.
2994 * If <opnd0> is not 0, return <opnd1>. Otherwise, return <opnd2>.
2995 */
SelectAArch64Select(Operand & dest,Operand & o0,Operand & o1,CondOperand & cond,bool isIntType,uint32 dsize)2996 void AArch64CGFunc::SelectAArch64Select(Operand &dest, Operand &o0, Operand &o1, CondOperand &cond, bool isIntType,
2997 uint32 dsize)
2998 {
2999 uint32 mOpCode =
3000 isIntType ? ((dsize == k64BitSize) ? MOP_xcselrrrc : MOP_wcselrrrc)
3001 : ((dsize == k64BitSize) ? MOP_dcselrrrc : ((dsize == k32BitSize) ? MOP_scselrrrc : MOP_hcselrrrc));
3002 Operand &rflag = GetOrCreateRflag();
3003 if (o1.IsImmediate()) {
3004 uint32 movOp = (dsize == k64BitSize ? MOP_xmovri64 : MOP_wmovri32);
3005 RegOperand &movDest =
3006 CreateVirtualRegisterOperand(NewVReg(kRegTyInt, (dsize == k64BitSize) ? k8ByteSize : k4ByteSize));
3007 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(movOp, movDest, o1));
3008 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, dest, o0, movDest, cond, rflag));
3009 return;
3010 }
3011 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, dest, o0, o1, cond, rflag));
3012 }
3013
SelectRangeGoto(RangeGotoNode & rangeGotoNode,Operand & srcOpnd)3014 void AArch64CGFunc::SelectRangeGoto(RangeGotoNode &rangeGotoNode, Operand &srcOpnd)
3015 {
3016 const SmallCaseVector &switchTable = rangeGotoNode.GetRangeGotoTable();
3017 MIRType *etype = GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast<TyIdx>(PTY_a64));
3018 /*
3019 * we store 8-byte displacement ( jump_label - offset_table_address )
3020 * in the table. Refer to AArch64Emit::Emit() in aarch64emit.cpp
3021 */
3022 std::vector<uint64> sizeArray;
3023 sizeArray.emplace_back(switchTable.size());
3024 MIRArrayType *arrayType = memPool->New<MIRArrayType>(etype->GetTypeIndex(), sizeArray);
3025 MIRAggConst *arrayConst = memPool->New<MIRAggConst>(mirModule, *arrayType);
3026 for (const auto &itPair : switchTable) {
3027 LabelIdx labelIdx = itPair.second;
3028 GetCurBB()->PushBackRangeGotoLabel(labelIdx);
3029 MIRConst *mirConst = memPool->New<MIRLblConst>(labelIdx, GetFunction().GetPuidx(), *etype);
3030 arrayConst->AddItem(mirConst, 0);
3031 }
3032
3033 MIRSymbol *lblSt = GetFunction().GetSymTab()->CreateSymbol(kScopeLocal);
3034 lblSt->SetStorageClass(kScFstatic);
3035 lblSt->SetSKind(kStConst);
3036 lblSt->SetTyIdx(arrayType->GetTypeIndex());
3037 lblSt->SetKonst(arrayConst);
3038 std::string lblStr(".LB_");
3039 MIRSymbol *funcSt = GlobalTables::GetGsymTable().GetSymbolFromStidx(GetFunction().GetStIdx().Idx());
3040 CHECK_FATAL(funcSt != nullptr, "funcSt should not be nullptr");
3041 uint32 labelIdxTmp = GetLabelIdx();
3042 lblStr += funcSt->GetName();
3043 lblStr += std::to_string(labelIdxTmp++);
3044 SetLabelIdx(labelIdxTmp);
3045 lblSt->SetNameStrIdx(lblStr);
3046 AddEmitSt(GetCurBB()->GetId(), *lblSt);
3047
3048 PrimType itype = rangeGotoNode.Opnd(0)->GetPrimType();
3049 Operand &opnd0 = LoadIntoRegister(srcOpnd, itype);
3050
3051 regno_t vRegNO = NewVReg(kRegTyInt, 8u);
3052 RegOperand *addOpnd = &CreateVirtualRegisterOperand(vRegNO);
3053
3054 int32 minIdx = switchTable[0].first;
3055 SelectAdd(*addOpnd, opnd0,
3056 CreateImmOperand(-static_cast<int64>(minIdx) - static_cast<int64>(rangeGotoNode.GetTagOffset()),
3057 GetPrimTypeBitSize(itype), true),
3058 itype);
3059
3060 /* contains the index */
3061 if (addOpnd->GetSize() != GetPrimTypeBitSize(PTY_u64)) {
3062 addOpnd = static_cast<RegOperand *>(&SelectCopy(*addOpnd, PTY_u64, PTY_u64));
3063 }
3064
3065 RegOperand &baseOpnd = CreateRegisterOperandOfType(PTY_u64);
3066 StImmOperand &stOpnd = CreateStImmOperand(*lblSt, 0, 0);
3067
3068 /* load the address of the switch table */
3069 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xadrp, baseOpnd, stOpnd));
3070 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xadrpl12, baseOpnd, baseOpnd, stOpnd));
3071
3072 /* load the displacement into a register by accessing memory at base + index*8 */
3073 Operand *disp = CreateMemOperand(MemOperand::kAddrModeBOrX, k64BitSize, baseOpnd, *addOpnd, k8BitShift);
3074 RegOperand &tgt = CreateRegisterOperandOfType(PTY_a64);
3075 SelectAdd(tgt, baseOpnd, *disp, PTY_u64);
3076 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xbr, tgt));
3077 }
3078
GetZeroOpnd(uint32 bitLen)3079 RegOperand &AArch64CGFunc::GetZeroOpnd(uint32 bitLen)
3080 {
3081 /*
3082 * It is possible to have a bitLen < 32, eg stb.
3083 * Set it to 32 if it is less than 32.
3084 */
3085 if (bitLen < k32BitSize) {
3086 bitLen = k32BitSize;
3087 }
3088 DEBUG_ASSERT((bitLen == k32BitSize || bitLen == k64BitSize), "illegal bit length = %d", bitLen);
3089 return (bitLen == k32BitSize) ? GetOrCreatePhysicalRegisterOperand(RZR, k32BitSize, kRegTyInt)
3090 : GetOrCreatePhysicalRegisterOperand(RZR, k64BitSize, kRegTyInt);
3091 }
3092
3093 /* if offset < 0, allocation; otherwise, deallocation */
CreateCallFrameOperand(int32 offset,uint32 size)3094 MemOperand &AArch64CGFunc::CreateCallFrameOperand(int32 offset, uint32 size)
3095 {
3096 MemOperand *memOpnd = CreateStackMemOpnd(RSP, offset, size);
3097 memOpnd->SetIndexOpt((offset < 0) ? MemOperand::kPreIndex : MemOperand::kPostIndex);
3098 return *memOpnd;
3099 }
3100
GetLogicalShiftLeftOperand(uint32 shiftAmount,bool is64bits) const3101 BitShiftOperand *AArch64CGFunc::GetLogicalShiftLeftOperand(uint32 shiftAmount, bool is64bits) const
3102 {
3103 /* num(0, 16, 32, 48) >> 4 is num1(0, 1, 2, 3), num1 & (~3) == 0 */
3104 DEBUG_ASSERT((!shiftAmount || ((shiftAmount >> 4) & ~static_cast<uint32>(3)) == 0),
3105 "shift amount should be one of 0, 16, 32, 48");
3106 /* movkLslOperands[4]~movkLslOperands[7] is for 64 bits */
3107 return &movkLslOperands[(shiftAmount >> 4) + (is64bits ? 4 : 0)];
3108 }
3109
3110 AArch64CGFunc::MovkLslOperandArray AArch64CGFunc::movkLslOperands = {
3111 BitShiftOperand(BitShiftOperand::kLSL, 0, 4),
3112 BitShiftOperand(BitShiftOperand::kLSL, 16, 4),
3113 BitShiftOperand(BitShiftOperand::kLSL, static_cast<uint32>(-1), 0), /* invalid entry */
3114 BitShiftOperand(BitShiftOperand::kLSL, static_cast<uint32>(-1), 0), /* invalid entry */
3115 BitShiftOperand(BitShiftOperand::kLSL, 0, 6),
3116 BitShiftOperand(BitShiftOperand::kLSL, 16, 6),
3117 BitShiftOperand(BitShiftOperand::kLSL, 32, 6),
3118 BitShiftOperand(BitShiftOperand::kLSL, 48, 6),
3119 };
3120
CreateStkTopOpnd(uint32 offset,uint32 size)3121 MemOperand &AArch64CGFunc::CreateStkTopOpnd(uint32 offset, uint32 size)
3122 {
3123 MemOperand *memOp = CreateStackMemOpnd(RFP, static_cast<int32>(offset), size);
3124 return *memOp;
3125 }
3126
CreateStackMemOpnd(regno_t preg,int32 offset,uint32 size)3127 MemOperand *AArch64CGFunc::CreateStackMemOpnd(regno_t preg, int32 offset, uint32 size)
3128 {
3129 auto *memOp =
3130 memPool->New<MemOperand>(memPool->New<RegOperand>(preg, k64BitSize, kRegTyInt),
3131 &CreateOfstOpnd(static_cast<uint64>(static_cast<int64>(offset)), k32BitSize), size);
3132 if (preg == RFP || preg == RSP) {
3133 memOp->SetStackMem(true);
3134 }
3135 return memOp;
3136 }
3137
3138 /* Mem mod BOI || PreIndex || PostIndex */
CreateMemOperand(uint32 size,RegOperand & base,ImmOperand & ofstOp,bool isVolatile,MemOperand::AArch64AddressingMode mode) const3139 MemOperand *AArch64CGFunc::CreateMemOperand(uint32 size, RegOperand &base, ImmOperand &ofstOp, bool isVolatile,
3140 MemOperand::AArch64AddressingMode mode) const
3141 {
3142 auto *memOp = memPool->New<MemOperand>(size, base, ofstOp, mode);
3143 memOp->SetVolatile(isVolatile);
3144 if (base.GetRegisterNumber() == RFP || base.GetRegisterNumber() == RSP) {
3145 memOp->SetStackMem(true);
3146 }
3147 return memOp;
3148 }
3149
CreateMemOperand(MemOperand::AArch64AddressingMode mode,uint32 size,RegOperand & base,RegOperand * index,ImmOperand * offset,const MIRSymbol * symbol) const3150 MemOperand *AArch64CGFunc::CreateMemOperand(MemOperand::AArch64AddressingMode mode, uint32 size, RegOperand &base,
3151 RegOperand *index, ImmOperand *offset, const MIRSymbol *symbol) const
3152 {
3153 auto *memOp = memPool->New<MemOperand>(mode, size, base, index, offset, symbol);
3154 if (base.GetRegisterNumber() == RFP || base.GetRegisterNumber() == RSP) {
3155 memOp->SetStackMem(true);
3156 }
3157 return memOp;
3158 }
3159
CreateMemOperand(MemOperand::AArch64AddressingMode mode,uint32 size,RegOperand & base,RegOperand & index,ImmOperand * offset,const MIRSymbol & symbol,bool noExtend)3160 MemOperand *AArch64CGFunc::CreateMemOperand(MemOperand::AArch64AddressingMode mode, uint32 size, RegOperand &base,
3161 RegOperand &index, ImmOperand *offset, const MIRSymbol &symbol,
3162 bool noExtend)
3163 {
3164 auto *memOp = memPool->New<MemOperand>(mode, size, base, index, offset, symbol, noExtend);
3165 if (base.GetRegisterNumber() == RFP || base.GetRegisterNumber() == RSP) {
3166 memOp->SetStackMem(true);
3167 }
3168 return memOp;
3169 }
3170
CreateMemOperand(MemOperand::AArch64AddressingMode mode,uint32 dSize,RegOperand & base,RegOperand & indexOpnd,uint32 shift,bool isSigned) const3171 MemOperand *AArch64CGFunc::CreateMemOperand(MemOperand::AArch64AddressingMode mode, uint32 dSize, RegOperand &base,
3172 RegOperand &indexOpnd, uint32 shift, bool isSigned) const
3173 {
3174 auto *memOp = memPool->New<MemOperand>(mode, dSize, base, indexOpnd, shift, isSigned);
3175 if (base.GetRegisterNumber() == RFP || base.GetRegisterNumber() == RSP) {
3176 memOp->SetStackMem(true);
3177 }
3178 return memOp;
3179 }
3180
CreateMemOperand(MemOperand::AArch64AddressingMode mode,uint32 dSize,const MIRSymbol & sym)3181 MemOperand *AArch64CGFunc::CreateMemOperand(MemOperand::AArch64AddressingMode mode, uint32 dSize, const MIRSymbol &sym)
3182 {
3183 auto *memOp = memPool->New<MemOperand>(mode, dSize, sym);
3184 return memOp;
3185 }
3186
CreateRegisterOperandOfType(PrimType primType)3187 RegOperand &AArch64CGFunc::CreateRegisterOperandOfType(PrimType primType)
3188 {
3189 RegType regType = GetRegTyFromPrimTy(primType);
3190 uint32 byteLength = GetPrimTypeSize(primType);
3191 return CreateRegisterOperandOfType(regType, byteLength);
3192 }
3193
CreateRegisterOperandOfType(RegType regty,uint32 byteLen)3194 RegOperand &AArch64CGFunc::CreateRegisterOperandOfType(RegType regty, uint32 byteLen)
3195 {
3196 /* BUG: if half-precision floating point operations are supported? */
3197 /* AArch64 has 32-bit and 64-bit registers only */
3198 if (byteLen < k4ByteSize) {
3199 byteLen = k4ByteSize;
3200 }
3201 regno_t vRegNO = NewVReg(regty, byteLen);
3202 return CreateVirtualRegisterOperand(vRegNO);
3203 }
3204
CreateRflagOperand()3205 RegOperand &AArch64CGFunc::CreateRflagOperand()
3206 {
3207 /* AArch64 has Status register that is 32-bit wide. */
3208 regno_t vRegNO = NewVRflag();
3209 return CreateVirtualRegisterOperand(vRegNO);
3210 }
3211
MergeReturn()3212 void AArch64CGFunc::MergeReturn()
3213 {
3214 uint32 exitBBSize = GetExitBBsVec().size();
3215 if (exitBBSize == 0) {
3216 return;
3217 }
3218 if ((exitBBSize == 1) && GetExitBB(0) == GetCurBB()) {
3219 return;
3220 }
3221 if (exitBBSize == 1) {
3222 BB *onlyExitBB = GetExitBB(0);
3223 LabelIdx labidx = CreateLabel();
3224 BB *retBB = CreateNewBB(labidx, onlyExitBB->IsUnreachable(), BB::kBBReturn, onlyExitBB->GetFrequency());
3225 onlyExitBB->AppendBB(*retBB);
3226 /* modify the original return BB. */
3227 DEBUG_ASSERT(onlyExitBB->GetKind() == BB::kBBReturn, "Error: suppose to merge multi return bb");
3228 onlyExitBB->SetKind(BB::kBBFallthru);
3229
3230 GetExitBBsVec().pop_back();
3231 GetExitBBsVec().emplace_back(retBB);
3232 return;
3233 }
3234
3235 LabelIdx labidx = CreateLabel();
3236 LabelOperand &targetOpnd = GetOrCreateLabelOperand(labidx);
3237 uint32 freq = 0;
3238 for (auto *tmpBB : GetExitBBsVec()) {
3239 DEBUG_ASSERT(tmpBB->GetKind() == BB::kBBReturn, "Error: suppose to merge multi return bb");
3240 tmpBB->SetKind(BB::kBBGoto);
3241 tmpBB->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xuncond, targetOpnd));
3242 freq += tmpBB->GetFrequency();
3243 }
3244 BB *retBB = CreateNewBB(labidx, false, BB::kBBReturn, freq);
3245 GetLastBB()->PrependBB(*retBB);
3246 GetExitBBsVec().clear();
3247 GetExitBBsVec().emplace_back(retBB);
3248 }
3249
CreateVirtualRegisterOperand(regno_t vRegNO,uint32 size,RegType kind,uint32 flg) const3250 RegOperand *AArch64CGFunc::CreateVirtualRegisterOperand(regno_t vRegNO, uint32 size, RegType kind, uint32 flg) const
3251 {
3252 RegOperand *res = memPool->New<RegOperand>(vRegNO, size, kind, flg);
3253 maplebe::VregInfo::vRegOperandTable[vRegNO] = res;
3254 return res;
3255 }
3256
CreateVirtualRegisterOperand(regno_t vRegNO)3257 RegOperand &AArch64CGFunc::CreateVirtualRegisterOperand(regno_t vRegNO)
3258 {
3259 DEBUG_ASSERT((vReg.vRegOperandTable.find(vRegNO) == vReg.vRegOperandTable.end()), "already exist");
3260 DEBUG_ASSERT(vRegNO < vReg.VRegTableSize(), "index out of range");
3261 uint8 bitSize = static_cast<uint8>((static_cast<uint32>(vReg.VRegTableGetSize(vRegNO))) * kBitsPerByte);
3262 RegOperand *res = CreateVirtualRegisterOperand(vRegNO, bitSize, vReg.VRegTableGetType(vRegNO));
3263 return *res;
3264 }
3265
GetOrCreateVirtualRegisterOperand(regno_t vRegNO)3266 RegOperand &AArch64CGFunc::GetOrCreateVirtualRegisterOperand(regno_t vRegNO)
3267 {
3268 auto it = maplebe::VregInfo::vRegOperandTable.find(vRegNO);
3269 return (it != maplebe::VregInfo::vRegOperandTable.end()) ? *(it->second) : CreateVirtualRegisterOperand(vRegNO);
3270 }
3271
3272 // Stage B - Pre-padding and extension of arguments
SelectParmListPreprocess(StmtNode & naryNode,size_t start,std::vector<ParamDesc> & argsDesc,const MIRFunction * callee)3273 bool AArch64CGFunc::SelectParmListPreprocess(StmtNode &naryNode, size_t start, std::vector<ParamDesc> &argsDesc,
3274 const MIRFunction *callee)
3275 {
3276 bool hasSpecialArg = false;
3277 for (size_t i = start; i < naryNode.NumOpnds(); ++i) {
3278 BaseNode *argExpr = naryNode.Opnd(i);
3279 DEBUG_ASSERT(argExpr != nullptr, "not null check");
3280 PrimType primType = argExpr->GetPrimType();
3281 DEBUG_ASSERT(primType != PTY_void, "primType should not be void");
3282 auto *mirType = GlobalTables::GetTypeTable().GetPrimType(primType);
3283 (void)argsDesc.emplace_back(mirType, argExpr);
3284 }
3285 return hasSpecialArg;
3286 }
3287
GetCalleeFunction(StmtNode & naryNode) const3288 std::pair<MIRFunction *, MIRFuncType *> AArch64CGFunc::GetCalleeFunction(StmtNode &naryNode) const
3289 {
3290 MIRFunction *callee = nullptr;
3291 MIRFuncType *calleeType = nullptr;
3292 if (dynamic_cast<CallNode *>(&naryNode) != nullptr) {
3293 auto calleePuIdx = static_cast<CallNode &>(naryNode).GetPUIdx();
3294 callee = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(calleePuIdx);
3295 calleeType = callee->GetMIRFuncType();
3296 } else if (naryNode.GetOpCode() == OP_icallproto) {
3297 auto *iCallNode = &static_cast<IcallNode &>(naryNode);
3298 MIRType *protoType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(iCallNode->GetRetTyIdx());
3299 if (protoType->IsMIRPtrType()) {
3300 calleeType = static_cast<MIRPtrType *>(protoType)->GetPointedFuncType();
3301 } else if (protoType->IsMIRFuncType()) {
3302 calleeType = static_cast<MIRFuncType *>(protoType);
3303 }
3304 }
3305 return {callee, calleeType};
3306 }
3307
SelectParmListPassByStack(const MIRType & mirType,Operand & opnd,uint32 memOffset,bool preCopyed,std::vector<Insn * > & insnForStackArgs)3308 void AArch64CGFunc::SelectParmListPassByStack(const MIRType &mirType, Operand &opnd, uint32 memOffset, bool preCopyed,
3309 std::vector<Insn *> &insnForStackArgs)
3310 {
3311 PrimType primType = preCopyed ? PTY_a64 : mirType.GetPrimType();
3312 auto &valReg = LoadIntoRegister(opnd, primType);
3313 auto &actMemOpnd = CreateMemOpnd(RSP, memOffset, GetPrimTypeBitSize(primType));
3314 Insn &strInsn = GetInsnBuilder()->BuildInsn(PickStInsn(GetPrimTypeBitSize(primType), primType), valReg, actMemOpnd);
3315 actMemOpnd.SetStackArgMem(true);
3316 if (Globals::GetInstance()->GetOptimLevel() == CGOptions::kLevel2 && insnForStackArgs.size() < kShiftAmount12) {
3317 (void)insnForStackArgs.emplace_back(&strInsn);
3318 } else {
3319 GetCurBB()->AppendInsn(strInsn);
3320 }
3321 }
3322
3323 /*
3324 SelectParmList generates an instrunction for each of the parameters
3325 to load the parameter value into the corresponding register.
3326 We return a list of registers to the call instruction because
3327 they may be needed in the register allocation phase.
3328 */
SelectParmList(StmtNode & naryNode,ListOperand & srcOpnds,bool isCallNative)3329 void AArch64CGFunc::SelectParmList(StmtNode &naryNode, ListOperand &srcOpnds, bool isCallNative)
3330 {
3331 size_t opndIdx = 0;
3332 // the first opnd of ICallNode is not parameter of function
3333 if (naryNode.GetOpCode() == OP_icall || naryNode.GetOpCode() == OP_icallproto || isCallNative) {
3334 opndIdx++;
3335 }
3336 auto [callee, calleeType] = GetCalleeFunction(naryNode);
3337
3338 std::vector<ParamDesc> argsDesc;
3339 std::vector<RegMapForPhyRegCpy> regMapForTmpBB;
3340 bool hasSpecialArg = SelectParmListPreprocess(naryNode, opndIdx, argsDesc, callee);
3341 BB *curBBrecord = GetCurBB();
3342 BB *tmpBB = nullptr;
3343 if (hasSpecialArg) {
3344 tmpBB = CreateNewBB();
3345 }
3346
3347 AArch64CallConvImpl parmLocator(GetBecommon());
3348 CCLocInfo ploc;
3349 std::vector<Insn *> insnForStackArgs;
3350
3351 for (size_t i = 0; i < argsDesc.size(); ++i) {
3352 if (hasSpecialArg) {
3353 DEBUG_ASSERT(tmpBB, "need temp bb for lower priority args");
3354 SetCurBB(argsDesc[i].isSpecialArg ? *curBBrecord : *tmpBB);
3355 }
3356
3357 auto *mirType = argsDesc[i].mirType;
3358
3359 // get param opnd, for unpreCody agg, opnd must be mem opnd
3360 Operand *opnd = nullptr;
3361 auto preCopyed = argsDesc[i].preCopyed;
3362 if (preCopyed) { // preCopyed agg, passed by address
3363 naryNode.SetMayTailcall(false); // has preCopyed arguments, don't do tailcall
3364 opnd = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize));
3365 auto &spReg = GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt);
3366 SelectAdd(*opnd, spReg, CreateImmOperand(argsDesc[i].offset, k64BitSize, false), PTY_a64);
3367 } else { // base type, clac true val
3368 opnd = &LoadIntoRegister(*HandleExpr(naryNode, *argsDesc[i].argExpr), mirType->GetPrimType());
3369 }
3370 parmLocator.LocateNextParm(*mirType, ploc, (i == 0), calleeType);
3371
3372 // skip unused args
3373 if (callee && callee->GetFuncDesc().IsArgUnused(i)) {
3374 continue;
3375 }
3376
3377 if (ploc.reg0 != kRinvalid) { // load to the register.
3378 CHECK_FATAL(ploc.reg1 == kRinvalid, "NIY");
3379 auto &phyReg = GetOrCreatePhysicalRegisterOperand(static_cast<AArch64reg>(ploc.reg0),
3380 GetPrimTypeBitSize(ploc.primTypeOfReg0),
3381 GetRegTyFromPrimTy(ploc.primTypeOfReg0));
3382 DEBUG_ASSERT(opnd->IsRegister(), "NIY, must be reg");
3383 if (!DoCallerEnsureValidParm(phyReg, static_cast<RegOperand &>(*opnd), ploc.primTypeOfReg0)) {
3384 if (argsDesc[i].isSpecialArg) {
3385 regMapForTmpBB.emplace_back(RegMapForPhyRegCpy(
3386 &phyReg, ploc.primTypeOfReg0, static_cast<RegOperand *>(opnd), ploc.primTypeOfReg0));
3387 } else {
3388 SelectCopy(phyReg, ploc.primTypeOfReg0, *opnd, ploc.primTypeOfReg0);
3389 }
3390 }
3391 srcOpnds.PushOpnd(phyReg);
3392 continue;
3393 }
3394
3395 // store to the memory segment for stack-passsed arguments.
3396 if (CGOptions::IsBigEndian() && ploc.memSize < static_cast<int32>(k8ByteSize)) {
3397 ploc.memOffset = ploc.memOffset + static_cast<int32>(k4ByteSize);
3398 }
3399 SelectParmListPassByStack(*mirType, *opnd, static_cast<uint32>(ploc.memOffset), preCopyed, insnForStackArgs);
3400 }
3401 // if we have stack-passed arguments, don't do tailcall
3402 parmLocator.InitCCLocInfo(ploc);
3403 if (ploc.memOffset != 0) {
3404 naryNode.SetMayTailcall(false);
3405 }
3406 if (hasSpecialArg) {
3407 DEBUG_ASSERT(tmpBB, "need temp bb for lower priority args");
3408 SetCurBB(*tmpBB);
3409 for (auto it : regMapForTmpBB) {
3410 SelectCopy(*it.destReg, it.destType, *it.srcReg, it.srcType);
3411 }
3412 curBBrecord->InsertAtEnd(*tmpBB);
3413 SetCurBB(*curBBrecord);
3414 }
3415 for (auto &strInsn : insnForStackArgs) {
3416 GetCurBB()->AppendInsn(*strInsn);
3417 }
3418 }
3419
DoCallerEnsureValidParm(RegOperand & destOpnd,RegOperand & srcOpnd,PrimType formalPType)3420 bool AArch64CGFunc::DoCallerEnsureValidParm(RegOperand &destOpnd, RegOperand &srcOpnd, PrimType formalPType)
3421 {
3422 Insn *insn = nullptr;
3423 switch (formalPType) {
3424 case PTY_u1: {
3425 ImmOperand &lsbOpnd = CreateImmOperand(maplebe::k0BitSize, srcOpnd.GetSize(), false);
3426 ImmOperand &widthOpnd = CreateImmOperand(maplebe::k1BitSize, srcOpnd.GetSize(), false);
3427 bool is64Bit = (srcOpnd.GetSize() == maplebe::k64BitSize);
3428 insn = &GetInsnBuilder()->BuildInsn(is64Bit ? MOP_xubfxrri6i6 : MOP_wubfxrri5i5, destOpnd, srcOpnd, lsbOpnd,
3429 widthOpnd);
3430 break;
3431 }
3432 case PTY_u8:
3433 case PTY_i8:
3434 insn = &GetInsnBuilder()->BuildInsn(MOP_xuxtb32, destOpnd, srcOpnd);
3435 break;
3436 case PTY_u16:
3437 case PTY_i16:
3438 insn = &GetInsnBuilder()->BuildInsn(MOP_xuxth32, destOpnd, srcOpnd);
3439 break;
3440 default:
3441 break;
3442 }
3443 if (insn != nullptr) {
3444 GetCurBB()->AppendInsn(*insn);
3445 return true;
3446 }
3447 return false;
3448 }
3449
SelectParmListNotC(StmtNode & naryNode,ListOperand & srcOpnds)3450 void AArch64CGFunc::SelectParmListNotC(StmtNode &naryNode, ListOperand &srcOpnds)
3451 {
3452 size_t i = 0;
3453 if (naryNode.GetOpCode() == OP_icall || naryNode.GetOpCode() == OP_icallproto) {
3454 i++;
3455 }
3456
3457 CCImpl &parmLocator = *GetOrCreateLocator(CCImpl::GetCallConvKind(naryNode));
3458 CCLocInfo ploc;
3459 std::vector<Insn *> insnForStackArgs;
3460 uint32 stackArgsCount = 0;
3461 for (uint32 pnum = 0; i < naryNode.NumOpnds(); ++i, ++pnum) {
3462 MIRType *ty = nullptr;
3463 BaseNode *argExpr = naryNode.Opnd(i);
3464 DEBUG_ASSERT(argExpr != nullptr, "argExpr should not be nullptr");
3465 PrimType primType = argExpr->GetPrimType();
3466 DEBUG_ASSERT(primType != PTY_void, "primType should not be void");
3467 /* use alloca */
3468 ty = GlobalTables::GetTypeTable().GetTypeTable()[static_cast<uint32>(primType)];
3469 RegOperand *expRegOpnd = nullptr;
3470 Operand *opnd = HandleExpr(naryNode, *argExpr);
3471 if (!opnd->IsRegister()) {
3472 opnd = &LoadIntoRegister(*opnd, primType);
3473 }
3474 expRegOpnd = static_cast<RegOperand *>(opnd);
3475
3476 parmLocator.LocateNextParm(*ty, ploc);
3477 PrimType destPrimType = primType;
3478 if (ploc.reg0 != kRinvalid) { /* load to the register. */
3479 CHECK_FATAL(expRegOpnd != nullptr, "null ptr check");
3480 RegOperand &parmRegOpnd = GetOrCreatePhysicalRegisterOperand(
3481 static_cast<AArch64reg>(ploc.reg0), expRegOpnd->GetSize(), GetRegTyFromPrimTy(destPrimType));
3482 SelectCopy(parmRegOpnd, destPrimType, *expRegOpnd, primType);
3483 srcOpnds.PushOpnd(parmRegOpnd);
3484 } else { /* store to the memory segment for stack-passsed arguments. */
3485 if (CGOptions::IsBigEndian()) {
3486 if (GetPrimTypeBitSize(primType) < k64BitSize) {
3487 ploc.memOffset = ploc.memOffset + static_cast<int32>(k4BitSize);
3488 }
3489 }
3490 MemOperand &actMemOpnd = CreateMemOpnd(RSP, ploc.memOffset, GetPrimTypeBitSize(primType));
3491 Insn &strInsn = GetInsnBuilder()->BuildInsn(PickStInsn(GetPrimTypeBitSize(primType), primType), *expRegOpnd,
3492 actMemOpnd);
3493 actMemOpnd.SetStackArgMem(true);
3494 if (Globals::GetInstance()->GetOptimLevel() == CGOptions::kLevel1 && stackArgsCount < kShiftAmount12) {
3495 (void)insnForStackArgs.emplace_back(&strInsn);
3496 stackArgsCount++;
3497 } else {
3498 GetCurBB()->AppendInsn(strInsn);
3499 }
3500 }
3501 DEBUG_ASSERT(ploc.reg1 == 0, "SelectCall NYI");
3502 }
3503 for (auto &strInsn : insnForStackArgs) {
3504 GetCurBB()->AppendInsn(*strInsn);
3505 }
3506 }
3507
3508 // based on call conv, choose how to prepare args
SelectParmListWrapper(StmtNode & naryNode,ListOperand & srcOpnds,bool isCallNative)3509 void AArch64CGFunc::SelectParmListWrapper(StmtNode &naryNode, ListOperand &srcOpnds, bool isCallNative)
3510 {
3511 if (CCImpl::GetCallConvKind(naryNode) == kCCall) {
3512 SelectParmList(naryNode, srcOpnds, isCallNative);
3513 } else if (CCImpl::GetCallConvKind(naryNode) == kWebKitJS || CCImpl::GetCallConvKind(naryNode) == kGHC) {
3514 SelectParmListNotC(naryNode, srcOpnds);
3515 } else {
3516 CHECK_FATAL(false, "niy");
3517 }
3518 }
3519
SelectCall(CallNode & callNode)3520 void AArch64CGFunc::SelectCall(CallNode &callNode)
3521 {
3522 MIRFunction *fn = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callNode.GetPUIdx());
3523 MIRSymbol *fsym = GetFunction().GetLocalOrGlobalSymbol(fn->GetStIdx(), false);
3524 MIRType *retType = fn->GetReturnType();
3525
3526 if (GetCG()->GenerateVerboseCG()) {
3527 const std::string &comment = fsym->GetName();
3528 GetCurBB()->AppendInsn(CreateCommentInsn(comment));
3529 }
3530
3531 ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator());
3532 SelectParmListWrapper(callNode, *srcOpnds, false);
3533
3534 Insn &callInsn = AppendCall(*fsym, *srcOpnds);
3535 GetCurBB()->SetHasCall();
3536 if (retType != nullptr) {
3537 callInsn.SetRetSize(static_cast<uint32>(retType->GetSize()));
3538 callInsn.SetIsCallReturnUnsigned(IsUnsignedInteger(retType->GetPrimType()));
3539 }
3540 const auto &deoptBundleInfo = callNode.GetDeoptBundleInfo();
3541 for (const auto &elem : deoptBundleInfo) {
3542 auto valueKind = elem.second.GetMapleValueKind();
3543 if (valueKind == MapleValue::kPregKind) {
3544 auto *opnd = GetOrCreateRegOpndFromPregIdx(elem.second.GetPregIdx(), PTY_ref);
3545 callInsn.AddDeoptBundleInfo(elem.first, *opnd);
3546 } else if (valueKind == MapleValue::kConstKind) {
3547 auto *opnd = SelectIntConst(static_cast<const MIRIntConst &>(elem.second.GetConstValue()), callNode);
3548 callInsn.AddDeoptBundleInfo(elem.first, *opnd);
3549 } else {
3550 CHECK_FATAL(false, "not supported currently");
3551 }
3552 }
3553 AppendStackMapInsn(callInsn);
3554
3555 /* check if this call use stack slot to return */
3556 if (fn->IsFirstArgReturn()) {
3557 SetStackProtectInfo(kRetureStackSlot);
3558 }
3559
3560 GetFunction().SetHasCall();
3561 }
3562
SelectIcall(IcallNode & icallNode)3563 void AArch64CGFunc::SelectIcall(IcallNode &icallNode)
3564 {
3565 ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator());
3566 SelectParmListWrapper(icallNode, *srcOpnds, false);
3567
3568 Operand *srcOpnd = HandleExpr(icallNode, *icallNode.GetNopndAt(0));
3569 Operand *fptrOpnd = srcOpnd;
3570 if (fptrOpnd->GetKind() != Operand::kOpdRegister) {
3571 PrimType ty = icallNode.Opnd(0)->GetPrimType();
3572 fptrOpnd = &SelectCopy(*srcOpnd, ty, ty);
3573 }
3574 DEBUG_ASSERT(fptrOpnd->IsRegister(), "SelectIcall: function pointer not RegOperand");
3575 RegOperand *regOpnd = static_cast<RegOperand *>(fptrOpnd);
3576 Insn &callInsn = GetInsnBuilder()->BuildInsn(MOP_xblr, *regOpnd, *srcOpnds);
3577
3578 MIRType *retType = icallNode.GetCallReturnType();
3579 if (retType != nullptr) {
3580 callInsn.SetRetSize(static_cast<uint32>(retType->GetSize()));
3581 callInsn.SetIsCallReturnUnsigned(IsUnsignedInteger(retType->GetPrimType()));
3582 }
3583
3584 /* check if this icall use stack slot to return */
3585 CallReturnVector *p2nrets = &icallNode.GetReturnVec();
3586 if (p2nrets->size() == k1ByteSize) {
3587 StIdx stIdx = (*p2nrets)[0].first;
3588 CHECK_NULL_FATAL(mirModule.CurFunction());
3589 MIRSymbol *sym = GetBecommon().GetMIRModule().CurFunction()->GetSymTab()->GetSymbolFromStIdx(stIdx.Idx());
3590 if (sym != nullptr && (GetBecommon().GetTypeSize(sym->GetTyIdx().GetIdx()) > k16ByteSize)) {
3591 SetStackProtectInfo(kRetureStackSlot);
3592 }
3593 }
3594
3595 GetCurBB()->AppendInsn(callInsn);
3596 GetCurBB()->SetHasCall();
3597 DEBUG_ASSERT(GetCurBB()->GetLastMachineInsn()->IsCall(), "lastInsn should be a call");
3598 GetFunction().SetHasCall();
3599 const auto &deoptBundleInfo = icallNode.GetDeoptBundleInfo();
3600 for (const auto &elem : deoptBundleInfo) {
3601 auto valueKind = elem.second.GetMapleValueKind();
3602 if (valueKind == MapleValue::kPregKind) {
3603 auto *opnd = GetOrCreateRegOpndFromPregIdx(elem.second.GetPregIdx(), PTY_ref);
3604 callInsn.AddDeoptBundleInfo(elem.first, *opnd);
3605 } else if (valueKind == MapleValue::kConstKind) {
3606 auto *opnd = SelectIntConst(static_cast<const MIRIntConst &>(elem.second.GetConstValue()), icallNode);
3607 callInsn.AddDeoptBundleInfo(elem.first, *opnd);
3608 } else {
3609 CHECK_FATAL(false, "not supported currently");
3610 }
3611 }
3612 AppendStackMapInsn(callInsn);
3613 }
3614
SelectComment(CommentNode & comment)3615 void AArch64CGFunc::SelectComment(CommentNode &comment)
3616 {
3617 GetCurBB()->AppendInsn(CreateCommentInsn(comment.GetComment()));
3618 }
3619
SelectReturn(Operand * opnd0)3620 void AArch64CGFunc::SelectReturn(Operand *opnd0)
3621 {
3622 bool is64x1vec = GetFunction().GetAttr(FUNCATTR_oneelem_simd) ? true : false;
3623 MIRType *floatType = GlobalTables::GetTypeTable().GetDouble();
3624 MIRType *retTyp = is64x1vec ? floatType : GetFunction().GetReturnType();
3625 CCImpl &retLocator = *GetOrCreateLocator(GetCurCallConvKind());
3626 CCLocInfo retMech;
3627 retLocator.LocateRetVal(*retTyp, retMech);
3628 if ((retMech.GetRegCount() > 0) && (opnd0 != nullptr)) {
3629 RegType regTyp = is64x1vec ? kRegTyFloat : GetRegTyFromPrimTy(retMech.GetPrimTypeOfReg0());
3630 PrimType oriPrimType = is64x1vec ? GetFunction().GetReturnType()->GetPrimType() : retMech.GetPrimTypeOfReg0();
3631 AArch64reg retReg = static_cast<AArch64reg>(retMech.GetReg0());
3632 if (opnd0->IsRegister()) {
3633 RegOperand *regOpnd = static_cast<RegOperand *>(opnd0);
3634 if (regOpnd->GetRegisterNumber() != retMech.GetReg0()) {
3635 RegOperand &retOpnd = GetOrCreatePhysicalRegisterOperand(retReg, regOpnd->GetSize(), regTyp);
3636 SelectCopy(retOpnd, retMech.GetPrimTypeOfReg0(), *regOpnd, oriPrimType);
3637 }
3638 } else if (opnd0->IsMemoryAccessOperand()) {
3639 auto *memopnd = static_cast<MemOperand *>(opnd0);
3640 RegOperand &retOpnd =
3641 GetOrCreatePhysicalRegisterOperand(retReg, GetPrimTypeBitSize(retMech.GetPrimTypeOfReg0()), regTyp);
3642 MOperator mOp = PickLdInsn(memopnd->GetSize(), retMech.GetPrimTypeOfReg0());
3643 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, retOpnd, *memopnd));
3644 } else if (opnd0->IsConstImmediate()) {
3645 ImmOperand *immOpnd = static_cast<ImmOperand *>(opnd0);
3646 if (!is64x1vec) {
3647 RegOperand &retOpnd =
3648 GetOrCreatePhysicalRegisterOperand(retReg, GetPrimTypeBitSize(retMech.GetPrimTypeOfReg0()),
3649 GetRegTyFromPrimTy(retMech.GetPrimTypeOfReg0()));
3650 SelectCopy(retOpnd, retMech.GetPrimTypeOfReg0(), *immOpnd, retMech.GetPrimTypeOfReg0());
3651 } else {
3652 PrimType rType = GetFunction().GetReturnType()->GetPrimType();
3653 RegOperand *reg = &CreateRegisterOperandOfType(rType);
3654 SelectCopy(*reg, rType, *immOpnd, rType);
3655 RegOperand &retOpnd = GetOrCreatePhysicalRegisterOperand(retReg, GetPrimTypeBitSize(PTY_f64),
3656 GetRegTyFromPrimTy(PTY_f64));
3657 Insn &insn = GetInsnBuilder()->BuildInsn(MOP_xvmovdr, retOpnd, *reg);
3658 GetCurBB()->AppendInsn(insn);
3659 }
3660 } else {
3661 CHECK_FATAL(false, "nyi");
3662 }
3663 }
3664 GetExitBBsVec().emplace_back(GetCurBB());
3665 }
3666
GetOrCreateSpecialRegisterOperand(PregIdx sregIdx,PrimType primType)3667 RegOperand &AArch64CGFunc::GetOrCreateSpecialRegisterOperand(PregIdx sregIdx, PrimType primType)
3668 {
3669 switch (sregIdx) {
3670 case kSregSp:
3671 return GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt);
3672 case kSregFp:
3673 return GetOrCreatePhysicalRegisterOperand(RFP, k64BitSize, kRegTyInt);
3674 default:
3675 break;
3676 }
3677
3678 bool useFpReg = !IsPrimitiveInteger(primType);
3679 AArch64reg pReg = RLAST_INT_REG;
3680 switch (sregIdx) {
3681 case kSregRetval0:
3682 pReg = useFpReg ? V0 : R0;
3683 break;
3684 case kSregRetval1:
3685 pReg = useFpReg ? V1 : R1;
3686 break;
3687 case kSregRetval2:
3688 pReg = V2;
3689 break;
3690 case kSregRetval3:
3691 pReg = V3;
3692 break;
3693 default:
3694 DEBUG_ASSERT(false, "Special pseudo registers NYI");
3695 break;
3696 }
3697 uint32 bitSize = GetPrimTypeBitSize(primType);
3698 bitSize = bitSize <= k32BitSize ? k32BitSize : bitSize;
3699 auto &phyOpnd = GetOrCreatePhysicalRegisterOperand(pReg, bitSize, GetRegTyFromPrimTy(primType));
3700 return SelectCopy(phyOpnd, primType, primType); // most opt only deal vreg, so return a vreg
3701 }
3702
GetOrCreatePhysicalRegisterOperand(AArch64reg regNO,uint32 size,RegType kind,uint32 flag)3703 RegOperand &AArch64CGFunc::GetOrCreatePhysicalRegisterOperand(AArch64reg regNO, uint32 size, RegType kind, uint32 flag)
3704 {
3705 uint64 aarch64PhyRegIdx = regNO;
3706 DEBUG_ASSERT(flag == 0, "Do not expect flag here");
3707 if (size <= k32BitSize) {
3708 size = k32BitSize;
3709 aarch64PhyRegIdx = aarch64PhyRegIdx << 1;
3710 } else if (size <= k64BitSize) {
3711 size = k64BitSize;
3712 aarch64PhyRegIdx = (aarch64PhyRegIdx << 1) + 1;
3713 } else {
3714 size = (size == k128BitSize) ? k128BitSize : k64BitSize;
3715 aarch64PhyRegIdx = aarch64PhyRegIdx << k4BitShift;
3716 }
3717 RegOperand *phyRegOpnd = nullptr;
3718 auto phyRegIt = phyRegOperandTable.find(aarch64PhyRegIdx);
3719 if (phyRegIt != phyRegOperandTable.end()) {
3720 phyRegOpnd = phyRegOperandTable[aarch64PhyRegIdx];
3721 } else {
3722 phyRegOpnd = memPool->New<RegOperand>(regNO, size, kind, flag);
3723 phyRegOperandTable.emplace(aarch64PhyRegIdx, phyRegOpnd);
3724 }
3725 return *phyRegOpnd;
3726 }
3727
GetLabelOperand(LabelIdx labIdx) const3728 const LabelOperand *AArch64CGFunc::GetLabelOperand(LabelIdx labIdx) const
3729 {
3730 const MapleUnorderedMap<LabelIdx, LabelOperand *>::const_iterator it = hashLabelOpndTable.find(labIdx);
3731 if (it != hashLabelOpndTable.end()) {
3732 return it->second;
3733 }
3734 return nullptr;
3735 }
3736
GetOrCreateLabelOperand(LabelIdx labIdx)3737 LabelOperand &AArch64CGFunc::GetOrCreateLabelOperand(LabelIdx labIdx)
3738 {
3739 MapleUnorderedMap<LabelIdx, LabelOperand *>::iterator it = hashLabelOpndTable.find(labIdx);
3740 if (it != hashLabelOpndTable.end()) {
3741 return *(it->second);
3742 }
3743 LabelOperand *res = memPool->New<LabelOperand>(GetShortFuncName().c_str(), labIdx, *memPool);
3744 hashLabelOpndTable[labIdx] = res;
3745 return *res;
3746 }
3747
GetOrCreateLabelOperand(BB & bb)3748 LabelOperand &AArch64CGFunc::GetOrCreateLabelOperand(BB &bb)
3749 {
3750 LabelIdx labelIdx = bb.GetLabIdx();
3751 if (labelIdx == MIRLabelTable::GetDummyLabel()) {
3752 labelIdx = CreateLabel();
3753 bb.AddLabel(labelIdx);
3754 SetLab2BBMap(labelIdx, bb);
3755 }
3756 return GetOrCreateLabelOperand(labelIdx);
3757 }
3758
GetOrCreateOfstOpnd(uint64 offset,uint32 size)3759 OfstOperand &AArch64CGFunc::GetOrCreateOfstOpnd(uint64 offset, uint32 size)
3760 {
3761 uint64 aarch64OfstRegIdx = offset;
3762 aarch64OfstRegIdx = (aarch64OfstRegIdx << 1);
3763 if (size == k64BitSize) {
3764 ++aarch64OfstRegIdx;
3765 }
3766 DEBUG_ASSERT(size == k32BitSize || size == k64BitSize, "ofStOpnd size check");
3767 auto it = hashOfstOpndTable.find(aarch64OfstRegIdx);
3768 if (it != hashOfstOpndTable.end()) {
3769 return *it->second;
3770 }
3771 OfstOperand *res = &CreateOfstOpnd(offset, size);
3772 hashOfstOpndTable[aarch64OfstRegIdx] = res;
3773 return *res;
3774 }
3775
GetOrCreateMemOpnd(const MIRSymbol & symbol,int64 offset,uint32 size,bool forLocalRef,bool needLow12,RegOperand * regOp)3776 MemOperand &AArch64CGFunc::GetOrCreateMemOpnd(const MIRSymbol &symbol, int64 offset, uint32 size, bool forLocalRef,
3777 bool needLow12, RegOperand *regOp)
3778 {
3779 MIRStorageClass storageClass = symbol.GetStorageClass();
3780 if ((storageClass == kScAuto) || (storageClass == kScFormal)) {
3781 AArch64SymbolAlloc *symLoc =
3782 static_cast<AArch64SymbolAlloc *>(GetMemlayout()->GetSymAllocInfo(symbol.GetStIndex()));
3783 if (forLocalRef) {
3784 auto p = GetMemlayout()->GetLocalRefLocMap().find(symbol.GetStIdx());
3785 CHECK_FATAL(p != GetMemlayout()->GetLocalRefLocMap().end(), "sym loc should have been defined");
3786 symLoc = static_cast<AArch64SymbolAlloc *>(p->second);
3787 }
3788 DEBUG_ASSERT(symLoc != nullptr, "sym loc should have been defined");
3789 /* At this point, we don't know which registers the callee needs to save. */
3790 DEBUG_ASSERT((IsFPLRAddedToCalleeSavedList() || (SizeOfCalleeSaved() == 0)),
3791 "CalleeSaved won't be known until after Register Allocation");
3792 StIdx idx = symbol.GetStIdx();
3793 auto it = memOpndsRequiringOffsetAdjustment.find(idx);
3794 DEBUG_ASSERT((!IsFPLRAddedToCalleeSavedList() ||
3795 ((it != memOpndsRequiringOffsetAdjustment.end()) || (storageClass == kScFormal))),
3796 "Memory operand of this symbol should have been added to the hash table");
3797 int32 stOffset = GetBaseOffset(*symLoc);
3798 if (it != memOpndsRequiringOffsetAdjustment.end()) {
3799 if (GetMemlayout()->IsLocalRefLoc(symbol)) {
3800 if (!forLocalRef) {
3801 return *(it->second);
3802 }
3803 } else {
3804 Operand *offOpnd = (it->second)->GetOffset();
3805 DEBUG_ASSERT(static_cast<OfstOperand *>(offOpnd) != nullptr,
3806 "static cast of offOpnd should not be nullptr");
3807 if (((static_cast<OfstOperand *>(offOpnd))->GetOffsetValue() == (stOffset + offset)) &&
3808 (it->second->GetSize() == size)) {
3809 return *(it->second);
3810 }
3811 }
3812 }
3813 it = memOpndsForStkPassedArguments.find(idx);
3814 if (it != memOpndsForStkPassedArguments.end()) {
3815 if (GetMemlayout()->IsLocalRefLoc(symbol)) {
3816 if (!forLocalRef) {
3817 return *(it->second);
3818 }
3819 } else {
3820 return *(it->second);
3821 }
3822 }
3823
3824 RegOperand *baseOpnd = static_cast<RegOperand *>(GetBaseReg(*symLoc));
3825 int32 totalOffset = stOffset + static_cast<int32>(offset);
3826 /* needs a fresh copy of ImmOperand as we may adjust its offset at a later stage. */
3827 OfstOperand *offsetOpnd = nullptr;
3828 if (CGOptions::IsBigEndian()) {
3829 if (symLoc->GetMemSegment()->GetMemSegmentKind() == kMsArgsStkPassed && size < k64BitSize) {
3830 offsetOpnd = &CreateOfstOpnd(k4BitSize + static_cast<uint32>(totalOffset), k64BitSize);
3831 } else {
3832 offsetOpnd = &CreateOfstOpnd(static_cast<uint64>(static_cast<int64>(totalOffset)), k64BitSize);
3833 }
3834 } else {
3835 offsetOpnd = &CreateOfstOpnd(static_cast<uint64>(static_cast<int64>(totalOffset)), k64BitSize);
3836 }
3837 if (symLoc->GetMemSegment()->GetMemSegmentKind() == kMsArgsStkPassed &&
3838 MemOperand::IsPIMMOffsetOutOfRange(totalOffset, size)) {
3839 ImmOperand *offsetOprand;
3840 offsetOprand = &CreateImmOperand(totalOffset, k64BitSize, true, kUnAdjustVary);
3841 Operand *resImmOpnd = &SelectCopy(*offsetOprand, PTY_i64, PTY_i64);
3842 return *CreateMemOperand(MemOperand::kAddrModeBOrX, size, *baseOpnd, static_cast<RegOperand &>(*resImmOpnd),
3843 nullptr, symbol, true);
3844 } else {
3845 if (symLoc->GetMemSegment()->GetMemSegmentKind() == kMsArgsStkPassed) {
3846 offsetOpnd->SetVary(kUnAdjustVary);
3847 }
3848 MemOperand *res = CreateMemOperand(MemOperand::kAddrModeBOi, size, *baseOpnd, nullptr, offsetOpnd, &symbol);
3849 if (!forLocalRef) {
3850 memOpndsRequiringOffsetAdjustment[idx] = res;
3851 }
3852 return *res;
3853 }
3854 } else {
3855 CHECK_FATAL(false, "NYI");
3856 }
3857 }
3858
HashMemOpnd(MemOperand & tMemOpnd)3859 MemOperand &AArch64CGFunc::HashMemOpnd(MemOperand &tMemOpnd)
3860 {
3861 auto it = hashMemOpndTable.find(tMemOpnd);
3862 if (it != hashMemOpndTable.end()) {
3863 return *(it->second);
3864 }
3865 auto *res = memPool->New<MemOperand>(tMemOpnd);
3866 hashMemOpndTable[tMemOpnd] = res;
3867 return *res;
3868 }
3869
GetOrCreateMemOpnd(MemOperand::AArch64AddressingMode mode,uint32 size,RegOperand * base,RegOperand * index,ImmOperand * offset,const MIRSymbol * st)3870 MemOperand &AArch64CGFunc::GetOrCreateMemOpnd(MemOperand::AArch64AddressingMode mode, uint32 size, RegOperand *base,
3871 RegOperand *index, ImmOperand *offset, const MIRSymbol *st)
3872 {
3873 DEBUG_ASSERT(base != nullptr, "nullptr check");
3874 MemOperand tMemOpnd(mode, size, *base, index, offset, st);
3875 if (base->GetRegisterNumber() == RFP || base->GetRegisterNumber() == RSP) {
3876 tMemOpnd.SetStackMem(true);
3877 }
3878 return HashMemOpnd(tMemOpnd);
3879 }
3880
GetOrCreateMemOpnd(MemOperand::AArch64AddressingMode mode,uint32 size,RegOperand * base,RegOperand * index,int32 shift,bool isSigned)3881 MemOperand &AArch64CGFunc::GetOrCreateMemOpnd(MemOperand::AArch64AddressingMode mode, uint32 size, RegOperand *base,
3882 RegOperand *index, int32 shift, bool isSigned)
3883 {
3884 DEBUG_ASSERT(base != nullptr, "nullptr check");
3885 MemOperand tMemOpnd(mode, size, *base, *index, shift, isSigned);
3886 if (base->GetRegisterNumber() == RFP || base->GetRegisterNumber() == RSP) {
3887 tMemOpnd.SetStackMem(true);
3888 }
3889 return HashMemOpnd(tMemOpnd);
3890 }
3891
GetOrCreateMemOpnd(MemOperand & oldMem)3892 MemOperand &AArch64CGFunc::GetOrCreateMemOpnd(MemOperand &oldMem)
3893 {
3894 return HashMemOpnd(oldMem);
3895 }
3896
3897 /* offset: base offset from FP or SP */
CreateMemOpnd(RegOperand & baseOpnd,int64 offset,uint32 size)3898 MemOperand &AArch64CGFunc::CreateMemOpnd(RegOperand &baseOpnd, int64 offset, uint32 size)
3899 {
3900 OfstOperand &offsetOpnd = CreateOfstOpnd(static_cast<uint64>(offset), k32BitSize);
3901 /* do not need to check bit size rotate of sign immediate */
3902 bool checkSimm = (offset > kMinSimm64 && offset < kMaxSimm64Pair);
3903 if (!checkSimm && !ImmOperand::IsInBitSizeRot(kMaxImmVal12Bits, offset)) {
3904 Operand *resImmOpnd = &SelectCopy(CreateImmOperand(offset, k32BitSize, true), PTY_i32, PTY_i32);
3905 return *CreateMemOperand(MemOperand::kAddrModeBOrX, size, baseOpnd, static_cast<RegOperand *>(resImmOpnd),
3906 nullptr, nullptr);
3907 } else {
3908 return *CreateMemOperand(MemOperand::kAddrModeBOi, size, baseOpnd, nullptr, &offsetOpnd, nullptr);
3909 }
3910 }
3911
3912 /* offset: base offset + #:lo12:Label+immediate */
CreateMemOpnd(RegOperand & baseOpnd,int64 offset,uint32 size,const MIRSymbol & sym)3913 MemOperand &AArch64CGFunc::CreateMemOpnd(RegOperand &baseOpnd, int64 offset, uint32 size, const MIRSymbol &sym)
3914 {
3915 OfstOperand &offsetOpnd = CreateOfstOpnd(static_cast<uint64>(offset), k32BitSize);
3916 DEBUG_ASSERT(ImmOperand::IsInBitSizeRot(kMaxImmVal12Bits, offset), "");
3917 return *CreateMemOperand(MemOperand::kAddrModeBOi, size, baseOpnd, nullptr, &offsetOpnd, &sym);
3918 }
3919
3920 /*
3921 * case 1: iread a64 <* <* void>> 0 (add a64 (
3922 * addrof a64 $__reg_jni_func_tab$$libcore_all_bytecode,
3923 * mul a64 (
3924 * cvt a64 i32 (constval i32 21),
3925 * constval a64 8)))
3926 *
3927 * case 2 : iread u32 <* u8> 0 (add a64 (regread a64 %61, constval a64 3))
3928 * case 3 : iread u32 <* u8> 0 (add a64 (regread a64 %61, regread a64 %65))
3929 * case 4 : iread u32 <* u8> 0 (add a64 (cvt a64 i32(regread %n)))
3930 */
CheckAndCreateExtendMemOpnd(PrimType ptype,const BaseNode & addrExpr,int64 offset)3931 MemOperand *AArch64CGFunc::CheckAndCreateExtendMemOpnd(PrimType ptype, const BaseNode &addrExpr, int64 offset)
3932 {
3933 aggParamReg = nullptr;
3934 if (addrExpr.GetOpCode() != OP_add || offset != 0) {
3935 return nullptr;
3936 }
3937 BaseNode *baseExpr = addrExpr.Opnd(0);
3938 BaseNode *addendExpr = addrExpr.Opnd(1);
3939
3940 if (baseExpr->GetOpCode() == OP_regread) {
3941 /* case 2 */
3942 if (addendExpr->GetOpCode() == OP_constval) {
3943 DEBUG_ASSERT(addrExpr.GetNumOpnds() == kOpndNum2, "Unepect expr operand in CheckAndCreateExtendMemOpnd");
3944 ConstvalNode *constOfstNode = static_cast<ConstvalNode *>(addendExpr);
3945 DEBUG_ASSERT(constOfstNode->GetConstVal()->GetKind() == kConstInt, "expect MIRIntConst");
3946 MIRIntConst *intOfst = safe_cast<MIRIntConst>(constOfstNode->GetConstVal());
3947 CHECK_FATAL(intOfst != nullptr, "just checking");
3948 /* discard large offset and negative offset */
3949 if (intOfst->GetExtValue() > INT32_MAX || intOfst->IsNegative()) {
3950 return nullptr;
3951 }
3952 uint32 scale = static_cast<uint32>(intOfst->GetExtValue());
3953 OfstOperand &ofstOpnd = GetOrCreateOfstOpnd(scale, k32BitSize);
3954 uint32 dsize = GetPrimTypeBitSize(ptype);
3955 MemOperand *memOpnd =
3956 &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, GetPrimTypeBitSize(ptype),
3957 SelectRegread(*static_cast<RegreadNode *>(baseExpr)), nullptr, &ofstOpnd, nullptr);
3958 return IsOperandImmValid(PickLdInsn(dsize, ptype), memOpnd, kInsnSecondOpnd) ? memOpnd : nullptr;
3959 /* case 3 */
3960 } else if (addendExpr->GetOpCode() == OP_regread) {
3961 CHECK_FATAL(addrExpr.GetNumOpnds() == kOpndNum2, "Unepect expr operand in CheckAndCreateExtendMemOpnd");
3962 if (GetPrimTypeSize(baseExpr->GetPrimType()) != GetPrimTypeSize(addendExpr->GetPrimType())) {
3963 return nullptr;
3964 }
3965
3966 auto *baseReg = SelectRegread(*static_cast<RegreadNode *>(baseExpr));
3967 auto *indexReg = SelectRegread(*static_cast<RegreadNode *>(addendExpr));
3968 MemOperand *memOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOrX, GetPrimTypeBitSize(ptype), baseReg,
3969 indexReg, nullptr, nullptr);
3970 if (CGOptions::IsArm64ilp32() && IsSignedInteger(addendExpr->GetPrimType())) {
3971 memOpnd->SetExtend(memOpnd->GetExtend() | MemOperand::ExtendInfo::kSignExtend);
3972 }
3973 return memOpnd;
3974 /* case 4 */
3975 } else if (addendExpr->GetOpCode() == OP_cvt && addendExpr->GetNumOpnds() == 1) {
3976 int shiftAmount = 0;
3977 BaseNode *cvtRegreadNode = addendExpr->Opnd(kInsnFirstOpnd);
3978 if (cvtRegreadNode->GetOpCode() == OP_regread && cvtRegreadNode->IsLeaf()) {
3979 uint32 fromSize = GetPrimTypeBitSize(cvtRegreadNode->GetPrimType());
3980 uint32 toSize = GetPrimTypeBitSize(addendExpr->GetPrimType());
3981
3982 if (toSize < fromSize) {
3983 return nullptr;
3984 }
3985
3986 MemOperand *memOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOrX, GetPrimTypeBitSize(ptype),
3987 SelectRegread(*static_cast<RegreadNode *>(baseExpr)),
3988 SelectRegread(*static_cast<RegreadNode *>(cvtRegreadNode)),
3989 shiftAmount, toSize != fromSize);
3990 return memOpnd;
3991 }
3992 }
3993 }
3994 if (addendExpr->GetOpCode() != OP_mul || !IsPrimitiveInteger(ptype)) {
3995 return nullptr;
3996 }
3997 BaseNode *indexExpr, *scaleExpr;
3998 indexExpr = addendExpr->Opnd(0);
3999 scaleExpr = addendExpr->Opnd(1);
4000 if (scaleExpr->GetOpCode() != OP_constval) {
4001 return nullptr;
4002 }
4003 ConstvalNode *constValNode = static_cast<ConstvalNode *>(scaleExpr);
4004 CHECK_FATAL(constValNode->GetConstVal()->GetKind() == kConstInt, "expect MIRIntConst");
4005 MIRIntConst *mirIntConst = safe_cast<MIRIntConst>(constValNode->GetConstVal());
4006 CHECK_FATAL(mirIntConst != nullptr, "just checking");
4007 int32 scale = mirIntConst->GetExtValue();
4008 if (scale < 0) {
4009 return nullptr;
4010 }
4011 uint32 unsignedScale = static_cast<uint32>(scale);
4012 if (unsignedScale != GetPrimTypeSize(ptype) || indexExpr->GetOpCode() != OP_cvt) {
4013 return nullptr;
4014 }
4015 /* 8 is 1 << 3; 4 is 1 << 2; 2 is 1 << 1; 1 is 1 << 0 */
4016 int32 shift = (unsignedScale == 8) ? 3 : ((unsignedScale == 4) ? 2 : ((unsignedScale == 2) ? 1 : 0));
4017 RegOperand &base = static_cast<RegOperand &>(LoadIntoRegister(*HandleExpr(addrExpr, *baseExpr), PTY_a64));
4018 TypeCvtNode *typeCvtNode = static_cast<TypeCvtNode *>(indexExpr);
4019 PrimType fromType = typeCvtNode->FromType();
4020 PrimType toType = typeCvtNode->GetPrimType();
4021 MemOperand *memOpnd = nullptr;
4022 if ((fromType == PTY_i32) && (toType == PTY_a64)) {
4023 RegOperand &index =
4024 static_cast<RegOperand &>(LoadIntoRegister(*HandleExpr(*indexExpr, *indexExpr->Opnd(0)), PTY_i32));
4025 memOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOrX, GetPrimTypeBitSize(ptype), &base, &index, shift, true);
4026 } else if ((fromType == PTY_u32) && (toType == PTY_a64)) {
4027 RegOperand &index =
4028 static_cast<RegOperand &>(LoadIntoRegister(*HandleExpr(*indexExpr, *indexExpr->Opnd(0)), PTY_u32));
4029 memOpnd =
4030 &GetOrCreateMemOpnd(MemOperand::kAddrModeBOrX, GetPrimTypeBitSize(ptype), &base, &index, shift, false);
4031 }
4032 return memOpnd;
4033 }
4034
CreateNonExtendMemOpnd(PrimType ptype,const BaseNode & parent,BaseNode & addrExpr,int64 offset)4035 MemOperand &AArch64CGFunc::CreateNonExtendMemOpnd(PrimType ptype, const BaseNode &parent, BaseNode &addrExpr,
4036 int64 offset)
4037 {
4038 Operand *addrOpnd = nullptr;
4039 if ((addrExpr.GetOpCode() == OP_add || addrExpr.GetOpCode() == OP_sub) &&
4040 addrExpr.Opnd(1)->GetOpCode() == OP_constval) {
4041 addrOpnd = HandleExpr(addrExpr, *addrExpr.Opnd(0));
4042 ConstvalNode *constOfstNode = static_cast<ConstvalNode *>(addrExpr.Opnd(1));
4043 DEBUG_ASSERT(constOfstNode->GetConstVal()->GetKind() == kConstInt, "expect MIRIntConst");
4044 MIRIntConst *intOfst = safe_cast<MIRIntConst>(constOfstNode->GetConstVal());
4045 CHECK_FATAL(intOfst != nullptr, "just checking");
4046 offset = (addrExpr.GetOpCode() == OP_add) ? offset + intOfst->GetSXTValue() : offset - intOfst->GetSXTValue();
4047 } else {
4048 addrOpnd = HandleExpr(parent, addrExpr);
4049 }
4050 addrOpnd = static_cast<RegOperand *>(&LoadIntoRegister(*addrOpnd, PTY_a64));
4051 {
4052 OfstOperand &ofstOpnd = GetOrCreateOfstOpnd(static_cast<uint64>(offset), k64BitSize);
4053 return GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, GetPrimTypeBitSize(ptype),
4054 static_cast<RegOperand *>(addrOpnd), nullptr, &ofstOpnd, nullptr);
4055 }
4056 }
4057
4058 /*
4059 * Create a memory operand with specified data type and memory ordering, making
4060 * use of aarch64 extend register addressing mode when possible.
4061 */
CreateMemOpnd(PrimType ptype,const BaseNode & parent,BaseNode & addrExpr,int64 offset)4062 MemOperand &AArch64CGFunc::CreateMemOpnd(PrimType ptype, const BaseNode &parent, BaseNode &addrExpr, int64 offset)
4063 {
4064 MemOperand *memOpnd = CheckAndCreateExtendMemOpnd(ptype, addrExpr, offset);
4065 if (memOpnd != nullptr) {
4066 return *memOpnd;
4067 }
4068 return CreateNonExtendMemOpnd(ptype, parent, addrExpr, offset);
4069 }
4070
CreateMemOpndOrNull(PrimType ptype,const BaseNode & parent,BaseNode & addrExpr,int64 offset)4071 MemOperand *AArch64CGFunc::CreateMemOpndOrNull(PrimType ptype, const BaseNode &parent, BaseNode &addrExpr, int64 offset)
4072 {
4073 MemOperand *memOpnd = CheckAndCreateExtendMemOpnd(ptype, addrExpr, offset);
4074 if (memOpnd != nullptr) {
4075 return memOpnd;
4076 } else if (aggParamReg != nullptr) {
4077 return nullptr;
4078 }
4079 return &CreateNonExtendMemOpnd(ptype, parent, addrExpr, offset);
4080 }
4081
GetOrCreateFuncNameOpnd(const MIRSymbol & symbol) const4082 Operand &AArch64CGFunc::GetOrCreateFuncNameOpnd(const MIRSymbol &symbol) const
4083 {
4084 return *memPool->New<FuncNameOperand>(symbol);
4085 }
4086
GetOrCreateRflag()4087 Operand &AArch64CGFunc::GetOrCreateRflag()
4088 {
4089 if (rcc == nullptr) {
4090 rcc = &CreateRflagOperand();
4091 }
4092 return *rcc;
4093 }
4094
GetRflag() const4095 const Operand *AArch64CGFunc::GetRflag() const
4096 {
4097 return rcc;
4098 }
4099
GetOrCreatevaryreg()4100 RegOperand &AArch64CGFunc::GetOrCreatevaryreg()
4101 {
4102 if (vary == nullptr) {
4103 regno_t vRegNO = NewVReg(kRegTyVary, k8ByteSize);
4104 vary = &CreateVirtualRegisterOperand(vRegNO);
4105 }
4106 return *vary;
4107 }
4108
GetBaseReg(const SymbolAlloc & symAlloc)4109 RegOperand *AArch64CGFunc::GetBaseReg(const SymbolAlloc &symAlloc)
4110 {
4111 MemSegmentKind sgKind = symAlloc.GetMemSegment()->GetMemSegmentKind();
4112 DEBUG_ASSERT(((sgKind == kMsArgsRegPassed) || (sgKind == kMsLocals) || (sgKind == kMsRefLocals) ||
4113 (sgKind == kMsArgsToStkPass) || (sgKind == kMsArgsStkPassed)),
4114 "NYI");
4115
4116 if (sgKind == kMsArgsStkPassed || sgKind == kMsCold) {
4117 return &GetOrCreatevaryreg();
4118 }
4119
4120 if (fsp == nullptr) {
4121 fsp = &GetOrCreatePhysicalRegisterOperand(RFP, GetPointerSize() * kBitsPerByte, kRegTyInt);
4122 }
4123 return fsp;
4124 }
4125
GetBaseOffset(const SymbolAlloc & symbolAlloc)4126 int32 AArch64CGFunc::GetBaseOffset(const SymbolAlloc &symbolAlloc)
4127 {
4128 const AArch64SymbolAlloc *symAlloc = static_cast<const AArch64SymbolAlloc *>(&symbolAlloc);
4129 // Call Frame layout of AArch64
4130 // Refer to V2 in aarch64_memlayout.h.
4131 // Do Not change this unless you know what you do
4132 // O2 mode refer to V2.1 in aarch64_memlayout.cpp
4133 const int32 sizeofFplr = static_cast<int32>(2 * kAarch64IntregBytelen);
4134 MemSegmentKind sgKind = symAlloc->GetMemSegment()->GetMemSegmentKind();
4135 AArch64MemLayout *memLayout = static_cast<AArch64MemLayout *>(this->GetMemlayout());
4136 if (sgKind == kMsArgsStkPassed) { /* for callees */
4137 int32 offset = static_cast<int32>(symAlloc->GetOffset());
4138 offset += static_cast<int32>(memLayout->GetSizeOfColdToStk());
4139 return offset;
4140 } else if (sgKind == kMsCold) {
4141 int offset = static_cast<int32>(symAlloc->GetOffset());
4142 return offset;
4143 } else if (sgKind == kMsArgsRegPassed) {
4144 int32 baseOffset = static_cast<int32>(memLayout->GetSizeOfLocals() + memLayout->GetSizeOfRefLocals()) +
4145 static_cast<int32>(symAlloc->GetOffset());
4146 return baseOffset + sizeofFplr;
4147 } else if (sgKind == kMsRefLocals) {
4148 int32 baseOffset = static_cast<int32>(symAlloc->GetOffset()) + static_cast<int32>(memLayout->GetSizeOfLocals());
4149 return baseOffset + sizeofFplr;
4150 } else if (sgKind == kMsLocals) {
4151 int32 baseOffset = symAlloc->GetOffset();
4152 return baseOffset + sizeofFplr;
4153 } else if (sgKind == kMsSpillReg) {
4154 int32 baseOffset = static_cast<int32>(symAlloc->GetOffset()) +
4155 static_cast<int32>(memLayout->SizeOfArgsRegisterPassed() + memLayout->GetSizeOfLocals() +
4156 memLayout->GetSizeOfRefLocals());
4157 return baseOffset + sizeofFplr;
4158 } else if (sgKind == kMsArgsToStkPass) { /* this is for callers */
4159 return static_cast<int32>(symAlloc->GetOffset());
4160 } else {
4161 CHECK_FATAL(false, "sgKind check");
4162 }
4163 return 0;
4164 }
4165
AppendCall(const MIRSymbol & funcSymbol)4166 void AArch64CGFunc::AppendCall(const MIRSymbol &funcSymbol)
4167 {
4168 ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator());
4169 AppendCall(funcSymbol, *srcOpnds);
4170 }
4171
SelectAddAfterInsn(Operand & resOpnd,Operand & opnd0,Operand & opnd1,PrimType primType,bool isDest,Insn & insn)4172 void AArch64CGFunc::SelectAddAfterInsn(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType, bool isDest,
4173 Insn &insn)
4174 {
4175 uint32 dsize = GetPrimTypeBitSize(primType);
4176 bool is64Bits = (dsize == k64BitSize);
4177 DEBUG_ASSERT(opnd0.GetKind() == Operand::kOpdRegister, "Spill memory operand should based on register");
4178 DEBUG_ASSERT((opnd1.GetKind() == Operand::kOpdImmediate || opnd1.GetKind() == Operand::kOpdOffset),
4179 "Spill memory operand should be with a immediate offset.");
4180
4181 ImmOperand *immOpnd = static_cast<ImmOperand *>(&opnd1);
4182
4183 MOperator mOpCode = MOP_undef;
4184 Insn *curInsn = &insn;
4185 /* lower 24 bits has 1, higher bits are all 0 */
4186 if (immOpnd->IsInBitSize(kMaxImmVal24Bits, 0)) {
4187 /* lower 12 bits and higher 12 bits both has 1 */
4188 Operand *newOpnd0 = &opnd0;
4189 if (!(immOpnd->IsInBitSize(kMaxImmVal12Bits, 0) || immOpnd->IsInBitSize(kMaxImmVal12Bits, kMaxImmVal12Bits))) {
4190 /* process higher 12 bits */
4191 ImmOperand &immOpnd2 =
4192 CreateImmOperand(static_cast<int64>(static_cast<uint64>(immOpnd->GetValue()) >> kMaxImmVal12Bits),
4193 immOpnd->GetSize(), immOpnd->IsSignedValue());
4194 mOpCode = is64Bits ? MOP_xaddrri24 : MOP_waddrri24;
4195 BitShiftOperand &shiftopnd = CreateBitShiftOperand(BitShiftOperand::kLSL, kShiftAmount12, k64BitSize);
4196 Insn &newInsn = GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, opnd0, immOpnd2, shiftopnd);
4197 DEBUG_ASSERT(IsOperandImmValid(mOpCode, &immOpnd2, kInsnThirdOpnd), "immOpnd2 appears invalid");
4198 if (isDest) {
4199 insn.GetBB()->InsertInsnAfter(insn, newInsn);
4200 } else {
4201 insn.GetBB()->InsertInsnBefore(insn, newInsn);
4202 }
4203 /* get lower 12 bits value */
4204 immOpnd->ModuloByPow2(static_cast<int32>(kMaxImmVal12Bits));
4205 newOpnd0 = &resOpnd;
4206 curInsn = &newInsn;
4207 }
4208 /* process lower 12 bits value */
4209 mOpCode = is64Bits ? MOP_xaddrri12 : MOP_waddrri12;
4210 Insn &newInsn = GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, *newOpnd0, *immOpnd);
4211 DEBUG_ASSERT(IsOperandImmValid(mOpCode, immOpnd, kInsnThirdOpnd), "immOpnd appears invalid");
4212 if (isDest) {
4213 insn.GetBB()->InsertInsnAfter(*curInsn, newInsn);
4214 } else {
4215 insn.GetBB()->InsertInsnBefore(insn, newInsn);
4216 }
4217 } else {
4218 /* load into register */
4219 RegOperand &movOpnd = GetOrCreatePhysicalRegisterOperand(R16, dsize, kRegTyInt);
4220 mOpCode = is64Bits ? MOP_xmovri64 : MOP_wmovri32;
4221 Insn &movInsn = GetInsnBuilder()->BuildInsn(mOpCode, movOpnd, *immOpnd);
4222 mOpCode = is64Bits ? MOP_xaddrrr : MOP_waddrrr;
4223 Insn &newInsn = GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, opnd0, movOpnd);
4224 if (isDest) {
4225 (void)insn.GetBB()->InsertInsnAfter(insn, newInsn);
4226 (void)insn.GetBB()->InsertInsnAfter(insn, movInsn);
4227 } else {
4228 (void)insn.GetBB()->InsertInsnBefore(insn, movInsn);
4229 (void)insn.GetBB()->InsertInsnBefore(insn, newInsn);
4230 }
4231 }
4232 }
4233
AdjustMemOperandIfOffsetOutOfRange(MemOperand * memOpnd,regno_t vrNum,bool isDest,Insn & insn,AArch64reg regNum,bool & isOutOfRange)4234 MemOperand *AArch64CGFunc::AdjustMemOperandIfOffsetOutOfRange(MemOperand *memOpnd, regno_t vrNum, bool isDest,
4235 Insn &insn, AArch64reg regNum, bool &isOutOfRange)
4236 {
4237 if (vrNum >= vReg.VRegTableSize()) {
4238 CHECK_FATAL(false, "index out of range in AArch64CGFunc::AdjustMemOperandIfOffsetOutOfRange");
4239 }
4240 uint32 dataSize = GetOrCreateVirtualRegisterOperand(vrNum).GetSize();
4241 if (IsImmediateOffsetOutOfRange(*memOpnd, dataSize) && CheckIfSplitOffsetWithAdd(*memOpnd, dataSize)) {
4242 isOutOfRange = true;
4243 memOpnd = &SplitOffsetWithAddInstruction(*memOpnd, dataSize, regNum, isDest, &insn);
4244 } else {
4245 isOutOfRange = false;
4246 }
4247 return memOpnd;
4248 }
4249
FreeSpillRegMem(regno_t vrNum)4250 void AArch64CGFunc::FreeSpillRegMem(regno_t vrNum)
4251 {
4252 MemOperand *memOpnd = nullptr;
4253
4254 auto p = spillRegMemOperands.find(vrNum);
4255 if (p != spillRegMemOperands.end()) {
4256 memOpnd = p->second;
4257 }
4258
4259 if ((memOpnd == nullptr) && IsVRegNOForPseudoRegister(vrNum)) {
4260 auto pSecond = pRegSpillMemOperands.find(GetPseudoRegIdxFromVirtualRegNO(vrNum));
4261 if (pSecond != pRegSpillMemOperands.end()) {
4262 memOpnd = pSecond->second;
4263 }
4264 }
4265
4266 if (memOpnd == nullptr) {
4267 DEBUG_ASSERT(false, "free spillreg have no mem");
4268 return;
4269 }
4270
4271 uint32 size = memOpnd->GetSize();
4272 MapleUnorderedMap<uint32, SpillMemOperandSet *>::iterator iter;
4273 if ((iter = reuseSpillLocMem.find(size)) != reuseSpillLocMem.end()) {
4274 iter->second->Add(*memOpnd);
4275 } else {
4276 reuseSpillLocMem[size] = memPool->New<SpillMemOperandSet>(*GetFuncScopeAllocator());
4277 reuseSpillLocMem[size]->Add(*memOpnd);
4278 }
4279 }
4280
GetOrCreatSpillMem(regno_t vrNum,uint32 memSize)4281 MemOperand *AArch64CGFunc::GetOrCreatSpillMem(regno_t vrNum, uint32 memSize)
4282 {
4283 /* NOTES: must used in RA, not used in other place. */
4284 if (IsVRegNOForPseudoRegister(vrNum)) {
4285 auto p = pRegSpillMemOperands.find(GetPseudoRegIdxFromVirtualRegNO(vrNum));
4286 if (p != pRegSpillMemOperands.end()) {
4287 return p->second;
4288 }
4289 }
4290
4291 auto p = spillRegMemOperands.find(vrNum);
4292 if (p == spillRegMemOperands.end()) {
4293 if (vrNum >= vReg.VRegTableSize()) {
4294 CHECK_FATAL(false, "index out of range in AArch64CGFunc::FreeSpillRegMem");
4295 }
4296 uint32 memBitSize = (memSize <= k32BitSize) ? k32BitSize : (memSize <= k64BitSize) ? k64BitSize : k128BitSize;
4297 auto it = reuseSpillLocMem.find(memBitSize);
4298 if (it != reuseSpillLocMem.end()) {
4299 MemOperand *memOpnd = it->second->GetOne();
4300 if (memOpnd != nullptr) {
4301 (void)spillRegMemOperands.emplace(std::pair<regno_t, MemOperand *>(vrNum, memOpnd));
4302 return memOpnd;
4303 }
4304 }
4305
4306 RegOperand &baseOpnd = GetOrCreateStackBaseRegOperand();
4307 int64 offset = GetOrCreatSpillRegLocation(vrNum, memBitSize / kBitsPerByte);
4308 MemOperand *memOpnd = nullptr;
4309 OfstOperand *offsetOpnd = &CreateOfstOpnd(static_cast<uint64>(offset), k64BitSize);
4310 memOpnd = CreateMemOperand(MemOperand::kAddrModeBOi, memBitSize, baseOpnd, nullptr, offsetOpnd, nullptr);
4311 (void)spillRegMemOperands.emplace(std::pair<regno_t, MemOperand *>(vrNum, memOpnd));
4312 return memOpnd;
4313 } else {
4314 return p->second;
4315 }
4316 }
4317
GetPseudoRegisterSpillMemoryOperand(PregIdx i)4318 MemOperand *AArch64CGFunc::GetPseudoRegisterSpillMemoryOperand(PregIdx i)
4319 {
4320 MapleUnorderedMap<PregIdx, MemOperand *>::iterator p;
4321 if (GetCG()->GetOptimizeLevel() == CGOptions::kLevel0) {
4322 p = pRegSpillMemOperands.end();
4323 } else {
4324 p = pRegSpillMemOperands.find(i);
4325 }
4326 if (p != pRegSpillMemOperands.end()) {
4327 return p->second;
4328 }
4329 int64 offset = GetPseudoRegisterSpillLocation(i);
4330 MIRPreg *preg = GetFunction().GetPregTab()->PregFromPregIdx(i);
4331 uint32 bitLen = GetPrimTypeSize(preg->GetPrimType()) * kBitsPerByte;
4332 RegOperand &base = GetOrCreateFramePointerRegOperand();
4333
4334 OfstOperand &ofstOpnd = GetOrCreateOfstOpnd(static_cast<uint64>(offset), k32BitSize);
4335 MemOperand &memOpnd = GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, bitLen, &base, nullptr, &ofstOpnd, nullptr);
4336 if (IsImmediateOffsetOutOfRange(memOpnd, bitLen)) {
4337 MemOperand &newMemOpnd = SplitOffsetWithAddInstruction(memOpnd, bitLen);
4338 (void)pRegSpillMemOperands.emplace(std::pair<PregIdx, MemOperand *>(i, &newMemOpnd));
4339 return &newMemOpnd;
4340 }
4341 (void)pRegSpillMemOperands.emplace(std::pair<PregIdx, MemOperand *>(i, &memOpnd));
4342 return &memOpnd;
4343 }
4344
AppendCall(const MIRSymbol & sym,ListOperand & srcOpnds)4345 Insn &AArch64CGFunc::AppendCall(const MIRSymbol &sym, ListOperand &srcOpnds)
4346 {
4347 Insn *callInsn = nullptr;
4348 Operand &targetOpnd = GetOrCreateFuncNameOpnd(sym);
4349 callInsn = &GetInsnBuilder()->BuildInsn(MOP_xbl, targetOpnd, srcOpnds);
4350 GetCurBB()->AppendInsn(*callInsn);
4351 GetCurBB()->SetHasCall();
4352 return *callInsn;
4353 }
4354
4355 // output
4356 // add_with_overflow/ sub_with_overflow:
4357 // w1: parm1
4358 // w2: parm2
4359 // adds/subs w0, w1, w2
4360 // cset w3, vs
4361
4362 // mul_with_overflow:
4363 // w1: parm1
4364 // w2: parm2
4365 // smull x0, w0, w1
4366 // cmp x0, w0, sxtw
4367 // cset w4, ne
SelectOverFlowCall(const IntrinsiccallNode & intrnNode)4368 void AArch64CGFunc::SelectOverFlowCall(const IntrinsiccallNode &intrnNode)
4369 {
4370 DEBUG_ASSERT(intrnNode.NumOpnds() == 2, "must be 2 operands"); // must be 2 operands
4371 MIRIntrinsicID intrinsic = intrnNode.GetIntrinsic();
4372 PrimType type = intrnNode.Opnd(0)->GetPrimType();
4373 PrimType type2 = intrnNode.Opnd(1)->GetPrimType();
4374 CHECK_FATAL(type == PTY_i32 || type == PTY_u32, "only support i32 or u32 here");
4375 CHECK_FATAL(type2 == PTY_i32 || type2 == PTY_u32, "only support i32 or u32 here");
4376 // deal with parms
4377 RegOperand &opnd0 = LoadIntoRegister(*HandleExpr(intrnNode, *intrnNode.Opnd(0)),
4378 intrnNode.Opnd(0)->GetPrimType()); /* first argument of intrinsic */
4379 RegOperand &opnd1 = LoadIntoRegister(*HandleExpr(intrnNode, *intrnNode.Opnd(1)),
4380 intrnNode.Opnd(1)->GetPrimType()); /* first argument of intrinsic */
4381 auto *retVals = &intrnNode.GetReturnVec();
4382 CHECK_FATAL(retVals->size() == k2ByteSize, "there must be two return values");
4383 PregIdx pregIdx = (*retVals)[0].second.GetPregIdx();
4384 PregIdx pregIdx2 = (*retVals)[1].second.GetPregIdx();
4385 RegOperand &resReg = GetOrCreateVirtualRegisterOperand(GetVirtualRegNOFromPseudoRegIdx(pregIdx));
4386 RegOperand &resReg2 = GetOrCreateVirtualRegisterOperand(GetVirtualRegNOFromPseudoRegIdx(pregIdx2));
4387 Operand &rflag = GetOrCreateRflag();
4388 // arith operation with set flag
4389 if (intrinsic == INTRN_ADD_WITH_OVERFLOW) {
4390 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_waddsrrr, rflag, resReg, opnd0, opnd1));
4391 SelectAArch64CSet(resReg2, GetCondOperand(CC_VS), false);
4392 } else if (intrinsic == INTRN_SUB_WITH_OVERFLOW) {
4393 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wsubsrrr, rflag, resReg, opnd0, opnd1));
4394 SelectAArch64CSet(resReg2, GetCondOperand(CC_VS), false);
4395 } else if (intrinsic == INTRN_MUL_WITH_OVERFLOW) {
4396 // smull
4397 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xsmullrrr, resReg, opnd0, opnd1));
4398 Operand &sxtw = CreateExtendShiftOperand(ExtendShiftOperand::kSXTW, 0, k3BitSize);
4399 Insn &cmpInsn = GetInsnBuilder()->BuildInsn(MOP_xwcmprre, rflag, resReg, resReg, sxtw);
4400 GetCurBB()->AppendInsn(cmpInsn);
4401 SelectAArch64CSet(resReg2, GetCondOperand(CC_NE), false);
4402 } else {
4403 CHECK_FATAL(false, "niy");
4404 }
4405 }
4406
LoadOpndIntoPhysicalRegister(const IntrinsiccallNode & intrnNode,uint32 index)4407 RegOperand &AArch64CGFunc::LoadOpndIntoPhysicalRegister(const IntrinsiccallNode &intrnNode, uint32 index)
4408 {
4409 auto &opnd = *intrnNode.Opnd(index);
4410 auto ptyp = opnd.GetPrimType();
4411 RegOperand &opndReg = LoadIntoRegister(*HandleExpr(intrnNode, opnd), ptyp);
4412 AArch64reg regId;
4413 switch (index - 1) {
4414 case kFirstReg:
4415 regId = static_cast<AArch64reg>(R0);
4416 break;
4417 case kSecondReg:
4418 regId = static_cast<AArch64reg>(R1);
4419 break;
4420 case kThirdReg:
4421 regId = static_cast<AArch64reg>(R2);
4422 break;
4423 case kFourthReg:
4424 regId = static_cast<AArch64reg>(R3);
4425 break;
4426 case kFifthReg:
4427 regId = static_cast<AArch64reg>(R4);
4428 break;
4429 case kSixthReg:
4430 regId = static_cast<AArch64reg>(R5);
4431 break;
4432 default:
4433 CHECK_FATAL_FALSE("Unreachable!");
4434 }
4435 RegOperand &realReg = GetOrCreatePhysicalRegisterOperand(regId, opndReg.GetSize(), GetRegTyFromPrimTy(ptyp));
4436 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickMovBetweenRegs(ptyp, ptyp), realReg, opndReg));
4437 return realReg;
4438 }
4439
4440
SelectPureCall(const IntrinsiccallNode & intrnNode)4441 void AArch64CGFunc::SelectPureCall(const IntrinsiccallNode &intrnNode)
4442 {
4443 DEBUG_ASSERT(intrnNode.NumOpnds() == 7, "must be 7 operands"); // must be 7 operands
4444 // deal with parms
4445 ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator());
4446 auto &callee = *intrnNode.Opnd(0);
4447 auto ptyp = callee.GetPrimType();
4448 RegOperand &calleeReg = LoadIntoRegister(*HandleExpr(intrnNode, callee), ptyp);
4449 uint32 i = 1;
4450 for (; i < kSeventhReg; i++) {
4451 srcOpnds->PushOpnd(LoadOpndIntoPhysicalRegister(intrnNode, i));
4452 }
4453 // R15 is used in asm call
4454 srcOpnds->PushOpnd(GetOrCreatePhysicalRegisterOperand(static_cast<AArch64reg>(R15),
4455 GetPointerSize() * kBitsPerByte, kRegTyInt));
4456 Insn &callInsn = GetInsnBuilder()->BuildInsn(MOP_pure_call, calleeReg, *srcOpnds);
4457 GetCurBB()->AppendInsn(callInsn);
4458 }
4459
SelectIntrinsicCall(IntrinsiccallNode & intrinsiccallNode)4460 void AArch64CGFunc::SelectIntrinsicCall(IntrinsiccallNode &intrinsiccallNode)
4461 {
4462 MIRIntrinsicID intrinsic = intrinsiccallNode.GetIntrinsic();
4463
4464 if (GetCG()->GenerateVerboseCG()) {
4465 std::string comment = GetIntrinsicName(intrinsic);
4466 GetCurBB()->AppendInsn(CreateCommentInsn(comment));
4467 }
4468 if (intrinsic == INTRN_ADD_WITH_OVERFLOW || intrinsic == INTRN_SUB_WITH_OVERFLOW ||
4469 intrinsic == INTRN_MUL_WITH_OVERFLOW) {
4470 SelectOverFlowCall(intrinsiccallNode);
4471 return;
4472 }
4473 if (intrinsic == maple::INTRN_JS_PURE_CALL) {
4474 SelectPureCall(intrinsiccallNode);
4475 return;
4476 }
4477 }
4478
SelectCclz(IntrinsicopNode & intrnNode)4479 Operand *AArch64CGFunc::SelectCclz(IntrinsicopNode &intrnNode)
4480 {
4481 BaseNode *argexpr = intrnNode.Opnd(0);
4482 PrimType ptype = argexpr->GetPrimType();
4483 Operand *opnd = HandleExpr(intrnNode, *argexpr);
4484 MOperator mop;
4485
4486 RegOperand &ldDest = CreateRegisterOperandOfType(ptype);
4487 if (opnd->IsMemoryAccessOperand()) {
4488 Insn &insn = GetInsnBuilder()->BuildInsn(PickLdInsn(GetPrimTypeBitSize(ptype), ptype), ldDest, *opnd);
4489 GetCurBB()->AppendInsn(insn);
4490 opnd = &ldDest;
4491 } else if (opnd->IsImmediate()) {
4492 SelectCopyImm(ldDest, *static_cast<ImmOperand *>(opnd), ptype);
4493 opnd = &ldDest;
4494 }
4495
4496 if (GetPrimTypeSize(ptype) == k4ByteSize) {
4497 mop = MOP_wclz;
4498 } else {
4499 mop = MOP_xclz;
4500 }
4501 RegOperand &dst = CreateRegisterOperandOfType(ptype);
4502 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mop, dst, *opnd));
4503 return &dst;
4504 }
4505
GetRegisterType(regno_t reg) const4506 RegType AArch64CGFunc::GetRegisterType(regno_t reg) const
4507 {
4508 if (AArch64isa::IsPhysicalRegister(reg)) {
4509 return AArch64isa::GetRegType(static_cast<AArch64reg>(reg));
4510 } else if (reg == kRFLAG) {
4511 return kRegTyCc;
4512 } else {
4513 return CGFunc::GetRegisterType(reg);
4514 }
4515 }
4516
LoadStructCopyBase(const MIRSymbol & symbol,int64 offset,int dataSize)4517 MemOperand &AArch64CGFunc::LoadStructCopyBase(const MIRSymbol &symbol, int64 offset, int dataSize)
4518 {
4519 /* For struct formals > 16 bytes, this is the pointer to the struct copy. */
4520 /* Load the base pointer first. */
4521 RegOperand *vreg = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize));
4522 MemOperand *baseMemOpnd = &GetOrCreateMemOpnd(symbol, 0, k64BitSize);
4523 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickLdInsn(k64BitSize, PTY_i64), *vreg, *baseMemOpnd));
4524 /* Create the indirect load mem opnd from the base pointer. */
4525 return CreateMemOpnd(*vreg, offset, static_cast<uint32>(dataSize));
4526 }
4527
4528 /* For long branch, insert an unconditional branch.
4529 * From To
4530 * cond_br targe_label reverse_cond_br fallthru_label
4531 * fallthruBB unconditional br target_label
4532 * fallthru_label:
4533 * fallthruBB
4534 */
InsertJumpPad(Insn * insn)4535 void AArch64CGFunc::InsertJumpPad(Insn *insn)
4536 {
4537 BB *bb = insn->GetBB();
4538 DEBUG_ASSERT(bb, "instruction has no bb");
4539 DEBUG_ASSERT(bb->GetKind() == BB::kBBIf || bb->GetKind() == BB::kBBGoto,
4540 "instruction is in neither if bb nor goto bb");
4541 if (bb->GetKind() == BB::kBBGoto) {
4542 return;
4543 }
4544 DEBUG_ASSERT(bb->NumSuccs() == k2ByteSize, "if bb should have 2 successors");
4545
4546 BB *longBrBB = CreateNewBB();
4547
4548 BB *fallthruBB = bb->GetNext();
4549 LabelIdx fallthruLBL = fallthruBB->GetLabIdx();
4550 if (fallthruLBL == 0) {
4551 fallthruLBL = CreateLabel();
4552 SetLab2BBMap(static_cast<int32>(fallthruLBL), *fallthruBB);
4553 fallthruBB->AddLabel(fallthruLBL);
4554 }
4555
4556 BB *targetBB;
4557 if (bb->GetSuccs().front() == fallthruBB) {
4558 targetBB = bb->GetSuccs().back();
4559 } else {
4560 targetBB = bb->GetSuccs().front();
4561 }
4562 LabelIdx targetLBL = targetBB->GetLabIdx();
4563 if (targetLBL == 0) {
4564 targetLBL = CreateLabel();
4565 SetLab2BBMap(static_cast<int32>(targetLBL), *targetBB);
4566 targetBB->AddLabel(targetLBL);
4567 }
4568
4569 // Adjustment on br and CFG
4570 bb->RemoveSuccs(*targetBB);
4571 bb->PushBackSuccs(*longBrBB);
4572 bb->SetNext(longBrBB);
4573 // reverse cond br targeting fallthruBB
4574 uint32 targetIdx = AArch64isa::GetJumpTargetIdx(*insn);
4575 MOperator mOp = AArch64isa::FlipConditionOp(insn->GetMachineOpcode());
4576 insn->SetMOP(AArch64CG::kMd[mOp]);
4577 LabelOperand &fallthruBBLBLOpnd = GetOrCreateLabelOperand(fallthruLBL);
4578 insn->SetOperand(targetIdx, fallthruBBLBLOpnd);
4579
4580 longBrBB->PushBackPreds(*bb);
4581 longBrBB->PushBackSuccs(*targetBB);
4582 LabelOperand &targetLBLOpnd = GetOrCreateLabelOperand(targetLBL);
4583 longBrBB->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xuncond, targetLBLOpnd));
4584 longBrBB->SetPrev(bb);
4585 longBrBB->SetNext(fallthruBB);
4586 longBrBB->SetKind(BB::kBBGoto);
4587
4588 fallthruBB->SetPrev(longBrBB);
4589
4590 targetBB->RemovePreds(*bb);
4591 targetBB->PushBackPreds(*longBrBB);
4592 }
4593
4594 /* Check the distance between the first insn of BB with the lable(targ_labidx)
4595 * and the insn with targ_id. If the distance greater than maxDistance
4596 * return false.
4597 */
DistanceCheck(const BB & bb,LabelIdx targLabIdx,uint32 targId,uint32 maxDistance) const4598 bool AArch64CGFunc::DistanceCheck(const BB &bb, LabelIdx targLabIdx, uint32 targId, uint32 maxDistance) const
4599 {
4600 for (auto *tBB : bb.GetSuccs()) {
4601 if (tBB->GetLabIdx() != targLabIdx) {
4602 continue;
4603 }
4604 Insn *tInsn = tBB->GetFirstInsn();
4605 while (tInsn == nullptr || !tInsn->IsMachineInstruction()) {
4606 if (tInsn == nullptr) {
4607 tBB = tBB->GetNext();
4608 if (tBB == nullptr) { /* tailcallopt may make the target block empty */
4609 return true;
4610 }
4611 tInsn = tBB->GetFirstInsn();
4612 } else {
4613 tInsn = tInsn->GetNext();
4614 }
4615 }
4616 uint32 tmp = (tInsn->GetId() > targId) ? (tInsn->GetId() - targId) : (targId - tInsn->GetId());
4617 return (tmp < maxDistance);
4618 }
4619 CHECK_FATAL(false, "CFG error");
4620 }
4621 } /* namespace maplebe */
4622