1 /*
2 * Copyright (c) 2023 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "aarch64_cg.h"
17 #include "aarch64_cgfunc.h"
18
19 namespace maplebe {
20 using namespace maple;
21 CondOperand AArch64CGFunc::ccOperands[kCcLast] = {
22 CondOperand(CC_EQ), CondOperand(CC_NE), CondOperand(CC_CS), CondOperand(CC_HS), CondOperand(CC_CC),
23 CondOperand(CC_LO), CondOperand(CC_MI), CondOperand(CC_PL), CondOperand(CC_VS), CondOperand(CC_VC),
24 CondOperand(CC_HI), CondOperand(CC_LS), CondOperand(CC_GE), CondOperand(CC_LT), CondOperand(CC_GT),
25 CondOperand(CC_LE), CondOperand(CC_AL),
26 };
27
28 namespace {
29 constexpr int32 kSignedDimension = 2; /* signed and unsigned */
30 constexpr int32 kIntByteSizeDimension = 4; /* 1 byte, 2 byte, 4 bytes, 8 bytes */
31 constexpr int32 kFloatByteSizeDimension = 3; /* 4 bytes, 8 bytes, 16 bytes(vector) */
32 constexpr int32 kShiftAmount12 = 12; /* for instruction that can use shift, shift amount must be 0 or 12 */
33
34 MOperator ldIs[kSignedDimension][kIntByteSizeDimension] = {
35 /* unsigned == 0 */
36 {MOP_wldrb, MOP_wldrh, MOP_wldr, MOP_xldr},
37 /* signed == 1 */
38 {MOP_wldrsb, MOP_wldrsh, MOP_wldr, MOP_xldr}};
39
40 MOperator stIs[kSignedDimension][kIntByteSizeDimension] = {
41 /* unsigned == 0 */
42 {MOP_wstrb, MOP_wstrh, MOP_wstr, MOP_xstr},
43 /* signed == 1 */
44 {MOP_wstrb, MOP_wstrh, MOP_wstr, MOP_xstr}};
45
46 MOperator ldFs[kFloatByteSizeDimension] = {MOP_sldr, MOP_dldr, MOP_qldr};
47 MOperator stFs[kFloatByteSizeDimension] = {MOP_sstr, MOP_dstr, MOP_qstr};
48
49 /* extended to unsigned ints */
50 MOperator uextIs[kIntByteSizeDimension][kIntByteSizeDimension] = {
51 /* u8 u16 u32 u64 */
52 {MOP_undef, MOP_xuxtb32, MOP_xuxtb32, MOP_xuxtb32}, /* u8/i8 */
53 {MOP_undef, MOP_undef, MOP_xuxth32, MOP_xuxth32}, /* u16/i16 */
54 {MOP_undef, MOP_undef, MOP_xuxtw64, MOP_xuxtw64}, /* u32/i32 */
55 {MOP_undef, MOP_undef, MOP_undef, MOP_undef} /* u64/u64 */
56 };
57
58 /* extended to signed ints */
59 MOperator extIs[kIntByteSizeDimension][kIntByteSizeDimension] = {
60 /* i8 i16 i32 i64 */
61 {MOP_undef, MOP_xsxtb32, MOP_xsxtb32, MOP_xsxtb64}, /* u8/i8 */
62 {MOP_undef, MOP_undef, MOP_xsxth32, MOP_xsxth64}, /* u16/i16 */
63 {MOP_undef, MOP_undef, MOP_undef, MOP_xsxtw64}, /* u32/i32 */
64 {MOP_undef, MOP_undef, MOP_undef, MOP_undef} /* u64/u64 */
65 };
66
PickLdStInsn(bool isLoad,uint32 bitSize,PrimType primType)67 MOperator PickLdStInsn(bool isLoad, uint32 bitSize, PrimType primType)
68 {
69 DEBUG_ASSERT(bitSize >= k8BitSize, "PTY_u1 should have been lowered?");
70 DEBUG_ASSERT(__builtin_popcount(bitSize) == 1, "PTY_u1 should have been lowered?");
71
72 /* __builtin_ffs(x) returns: 0 -> 0, 1 -> 1, 2 -> 2, 4 -> 3, 8 -> 4 */
73 if ((IsPrimitiveInteger(primType))) {
74 auto *table = isLoad ? ldIs : stIs;
75 int32 signedUnsigned = IsUnsignedInteger(primType) ? 0 : 1;
76
77 /* __builtin_ffs(x) returns: 8 -> 4, 16 -> 5, 32 -> 6, 64 -> 7 */
78 uint32 size = static_cast<uint32>(__builtin_ffs(static_cast<int32>(bitSize))) - k4BitSize;
79 DEBUG_ASSERT(size <= 3, "wrong bitSize"); // size must <= 3
80 return table[signedUnsigned][size];
81 } else {
82 MOperator *table = isLoad ? ldFs : stFs;
83 /* __builtin_ffs(x) returns: 32 -> 6, 64 -> 7, 128 -> 8 */
84 uint32 size = static_cast<uint32>(__builtin_ffs(static_cast<int32>(bitSize))) - k6BitSize;
85 DEBUG_ASSERT(size <= k2BitSize, "size must be 0 to 2");
86 return table[size];
87 }
88 }
89 } // namespace
90
GetOrCreateResOperand(const BaseNode & parent,PrimType primType)91 RegOperand &AArch64CGFunc::GetOrCreateResOperand(const BaseNode &parent, PrimType primType)
92 {
93 RegOperand *resOpnd = nullptr;
94 if (parent.GetOpCode() == OP_regassign) {
95 auto ®AssignNode = static_cast<const RegassignNode &>(parent);
96 PregIdx pregIdx = regAssignNode.GetRegIdx();
97 if (IsSpecialPseudoRegister(pregIdx)) {
98 /* if it is one of special registers */
99 resOpnd = &GetOrCreateSpecialRegisterOperand(-pregIdx, primType);
100 } else {
101 resOpnd = &GetOrCreateVirtualRegisterOperand(GetVirtualRegNOFromPseudoRegIdx(pregIdx));
102 }
103 } else {
104 resOpnd = &CreateRegisterOperandOfType(primType);
105 }
106 return *resOpnd;
107 }
108
PickLdInsn(uint32 bitSize,PrimType primType) const109 MOperator AArch64CGFunc::PickLdInsn(uint32 bitSize, PrimType primType) const
110 {
111 return PickLdStInsn(true, bitSize, primType);
112 }
113
PickStInsn(uint32 bitSize,PrimType primType) const114 MOperator AArch64CGFunc::PickStInsn(uint32 bitSize, PrimType primType) const
115 {
116 return PickLdStInsn(false, bitSize, primType);
117 }
118
PickExtInsn(PrimType dtype,PrimType stype) const119 MOperator AArch64CGFunc::PickExtInsn(PrimType dtype, PrimType stype) const
120 {
121 int32 sBitSize = static_cast<int32>(GetPrimTypeBitSize(stype));
122 int32 dBitSize = static_cast<int32>(GetPrimTypeBitSize(dtype));
123 /* __builtin_ffs(x) returns: 0 -> 0, 1 -> 1, 2 -> 2, 4 -> 3, 8 -> 4 */
124 if (IsPrimitiveInteger(stype) && IsPrimitiveInteger(dtype)) {
125 MOperator(*table)[kIntByteSizeDimension];
126 table = IsUnsignedInteger(stype) ? uextIs : extIs;
127 /* __builtin_ffs(x) returns: 8 -> 4, 16 -> 5, 32 -> 6, 64 -> 7 */
128 uint32 row = static_cast<uint32>(__builtin_ffs(sBitSize)) - k4BitSize;
129 DEBUG_ASSERT(row <= k3BitSize, "wrong bitSize");
130 uint32 col = static_cast<uint32>(__builtin_ffs(dBitSize)) - k4BitSize;
131 DEBUG_ASSERT(col <= k3BitSize, "wrong bitSize");
132 return table[row][col];
133 }
134 CHECK_FATAL(0, "extend not primitive integer");
135 return MOP_undef;
136 }
137
PickMovBetweenRegs(PrimType destType,PrimType srcType) const138 MOperator AArch64CGFunc::PickMovBetweenRegs(PrimType destType, PrimType srcType) const
139 {
140 if (IsPrimitiveInteger(destType) && IsPrimitiveInteger(srcType)) {
141 return GetPrimTypeSize(srcType) <= k4ByteSize ? MOP_wmovrr : MOP_xmovrr;
142 }
143 if (IsPrimitiveFloat(destType) && IsPrimitiveFloat(srcType)) {
144 return GetPrimTypeSize(srcType) <= k4ByteSize ? MOP_xvmovs : MOP_xvmovd;
145 }
146 if (IsPrimitiveInteger(destType) && IsPrimitiveFloat(srcType)) {
147 return GetPrimTypeSize(srcType) <= k4ByteSize ? MOP_xvmovrs : MOP_xvmovrd;
148 }
149 if (IsPrimitiveFloat(destType) && IsPrimitiveInteger(srcType)) {
150 return GetPrimTypeSize(srcType) <= k4ByteSize ? MOP_xvmovsr : MOP_xvmovdr;
151 }
152 CHECK_FATAL(false, "unexpected operand primtype for mov");
153 return MOP_undef;
154 }
155
SelectCopyImm(Operand & dest,PrimType dType,ImmOperand & src,PrimType sType)156 void AArch64CGFunc::SelectCopyImm(Operand &dest, PrimType dType, ImmOperand &src, PrimType sType)
157 {
158 if (IsPrimitiveInteger(dType) != IsPrimitiveInteger(sType)) {
159 RegOperand &tempReg = CreateRegisterOperandOfType(sType);
160 SelectCopyImm(tempReg, src, sType);
161 SelectCopy(dest, dType, tempReg, sType);
162 } else {
163 SelectCopyImm(dest, src, sType);
164 }
165 }
166
SelectCopyImm(Operand & dest,ImmOperand & src,PrimType dtype)167 void AArch64CGFunc::SelectCopyImm(Operand &dest, ImmOperand &src, PrimType dtype)
168 {
169 uint32 dsize = GetPrimTypeBitSize(dtype);
170 // If the type size of the parent node is smaller than the type size of the child node,
171 // the number of child node needs to be truncated.
172 if (dsize < src.GetSize()) {
173 uint64 value = static_cast<uint64>(src.GetValue());
174 uint64 mask = (1UL << dsize) - 1;
175 int64 newValue = static_cast<int64>(value & mask);
176 src.SetValue(newValue);
177 }
178 DEBUG_ASSERT(IsPrimitiveInteger(dtype), "The type of destination operand must be Integer");
179 DEBUG_ASSERT(((dsize == k8BitSize) || (dsize == k16BitSize) || (dsize == k32BitSize) || (dsize == k64BitSize)),
180 "The destination operand must be >= 8-bit");
181 if (src.GetSize() == k32BitSize && dsize == k64BitSize && src.IsSingleInstructionMovable()) {
182 auto tempReg = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k32BitSize), k32BitSize, kRegTyInt);
183 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wmovri32, *tempReg, src));
184 SelectCopy(dest, dtype, *tempReg, PTY_u32);
185 return;
186 }
187 if (src.IsSingleInstructionMovable()) {
188 MOperator mOp = (dsize <= k32BitSize) ? MOP_wmovri32 : MOP_xmovri64;
189 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, dest, src));
190 return;
191 }
192 uint64 srcVal = static_cast<uint64>(src.GetValue());
193 /* using mov/movk to load the immediate value */
194 if (dsize == k8BitSize) {
195 /* compute lower 8 bits value */
196 if (dtype == PTY_u8) {
197 /* zero extend */
198 srcVal = (srcVal << k56BitSize) >> k56BitSize;
199 dtype = PTY_u16;
200 } else {
201 /* sign extend */
202 srcVal = (static_cast<int64>(srcVal) << k56BitSize) >> k56BitSize;
203 dtype = PTY_i16;
204 }
205 dsize = k16BitSize;
206 }
207 if (dsize == k16BitSize) {
208 if (dtype == PTY_u16) {
209 /* check lower 16 bits and higher 16 bits respectively */
210 DEBUG_ASSERT((srcVal & 0x0000FFFFULL) != 0, "unexpected value");
211 DEBUG_ASSERT(((srcVal >> k16BitSize) & 0x0000FFFFULL) == 0, "unexpected value");
212 DEBUG_ASSERT((srcVal & 0x0000FFFFULL) != 0xFFFFULL, "unexpected value");
213 /* create an imm opereand which represents lower 16 bits of the immediate */
214 ImmOperand &srcLower = CreateImmOperand(static_cast<int64>(srcVal & 0x0000FFFFULL), k16BitSize, false);
215 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wmovri32, dest, srcLower));
216 return;
217 } else {
218 /* sign extend and let `dsize == 32` case take care of it */
219 srcVal = (static_cast<int64>(srcVal) << k48BitSize) >> k48BitSize;
220 dsize = k32BitSize;
221 }
222 }
223 if (dsize == k32BitSize) {
224 /* check lower 16 bits and higher 16 bits respectively */
225 DEBUG_ASSERT((srcVal & 0x0000FFFFULL) != 0, "unexpected val");
226 DEBUG_ASSERT(((srcVal >> k16BitSize) & 0x0000FFFFULL) != 0, "unexpected val");
227 DEBUG_ASSERT((srcVal & 0x0000FFFFULL) != 0xFFFFULL, "unexpected val");
228 DEBUG_ASSERT(((srcVal >> k16BitSize) & 0x0000FFFFULL) != 0xFFFFULL, "unexpected val");
229 /* create an imm opereand which represents lower 16 bits of the immediate */
230 ImmOperand &srcLower = CreateImmOperand(static_cast<int64>(srcVal & 0x0000FFFFULL), k16BitSize, false);
231 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wmovri32, dest, srcLower));
232 /* create an imm opereand which represents upper 16 bits of the immediate */
233 ImmOperand &srcUpper =
234 CreateImmOperand(static_cast<int64>((srcVal >> k16BitSize) & 0x0000FFFFULL), k16BitSize, false);
235 BitShiftOperand *lslOpnd = GetLogicalShiftLeftOperand(k16BitSize, false);
236 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wmovkri16, dest, srcUpper, *lslOpnd));
237 } else {
238 /*
239 * partition it into 4 16-bit chunks
240 * if more 0's than 0xFFFF's, use movz as the initial instruction.
241 * otherwise, movn.
242 */
243 bool useMovz = BetterUseMOVZ(srcVal);
244 bool useMovk = false;
245 /* get lower 32 bits of the immediate */
246 uint64 chunkLval = srcVal & 0xFFFFFFFFULL;
247 /* get upper 32 bits of the immediate */
248 uint64 chunkHval = (srcVal >> k32BitSize) & 0xFFFFFFFFULL;
249 int32 maxLoopTime = 4;
250
251 if (chunkLval == chunkHval) {
252 /* compute lower 32 bits, and then copy to higher 32 bits, so only 2 chunks need be processed */
253 maxLoopTime = 2;
254 }
255
256 uint64 sa = 0;
257
258 for (int64 i = 0; i < maxLoopTime; ++i, sa += k16BitSize) {
259 /* create an imm opereand which represents the i-th 16-bit chunk of the immediate */
260 uint64 chunkVal = (srcVal >> (static_cast<uint64>(sa))) & 0x0000FFFFULL;
261 if (useMovz ? (chunkVal == 0) : (chunkVal == 0x0000FFFFULL)) {
262 continue;
263 }
264 ImmOperand &src16 = CreateImmOperand(static_cast<int64>(chunkVal), k16BitSize, false);
265 BitShiftOperand *lslOpnd = GetLogicalShiftLeftOperand(sa, true);
266 if (!useMovk) {
267 /* use movz or movn */
268 if (!useMovz) {
269 src16.BitwiseNegate();
270 }
271 GetCurBB()->AppendInsn(
272 GetInsnBuilder()->BuildInsn(useMovz ? MOP_xmovzri16 : MOP_xmovnri16, dest, src16, *lslOpnd));
273 useMovk = true;
274 } else {
275 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xmovkri16, dest, src16, *lslOpnd));
276 }
277 }
278
279 if (maxLoopTime == 2) { /* as described above, only 2 chunks need be processed */
280 /* copy lower 32 bits to higher 32 bits */
281 ImmOperand &immOpnd = CreateImmOperand(k32BitSize, k8BitSize, false);
282 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xbfirri6i6, dest, dest, immOpnd, immOpnd));
283 }
284 }
285 }
286
SelectCopyMemOpnd(Operand & dest,PrimType dtype,uint32 dsize,Operand & src,PrimType stype)287 void AArch64CGFunc::SelectCopyMemOpnd(Operand &dest, PrimType dtype, uint32 dsize, Operand &src, PrimType stype)
288 {
289 Insn *insn = nullptr;
290 uint32 ssize = src.GetSize();
291 PrimType regTy = PTY_void;
292 RegOperand *loadReg = nullptr;
293 MOperator mop = MOP_undef;
294 if (IsPrimitiveFloat(stype)) {
295 CHECK_FATAL(dsize == ssize, "dsize %u expect equals ssize %u", dtype, ssize);
296 insn = &GetInsnBuilder()->BuildInsn(PickLdInsn(ssize, stype), dest, src);
297 } else {
298 mop = PickExtInsn(dtype, stype);
299 if (ssize == (GetPrimTypeSize(dtype) * kBitsPerByte) || mop == MOP_undef) {
300 insn = &GetInsnBuilder()->BuildInsn(PickLdInsn(ssize, stype), dest, src);
301 } else {
302 regTy = dsize == k64BitSize ? dtype : PTY_i32;
303 loadReg = &CreateRegisterOperandOfType(regTy);
304 insn = &GetInsnBuilder()->BuildInsn(PickLdInsn(ssize, stype), *loadReg, src);
305 }
306 }
307
308 GetCurBB()->AppendInsn(*insn);
309 if (regTy != PTY_void && mop != MOP_undef) {
310 DEBUG_ASSERT(loadReg != nullptr, "loadReg should not be nullptr");
311 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mop, dest, *loadReg));
312 }
313 }
314
IsImmediateValueInRange(MOperator mOp,int64 immVal,bool is64Bits,bool isIntactIndexed,bool isPostIndexed,bool isPreIndexed) const315 bool AArch64CGFunc::IsImmediateValueInRange(MOperator mOp, int64 immVal, bool is64Bits, bool isIntactIndexed,
316 bool isPostIndexed, bool isPreIndexed) const
317 {
318 bool isInRange = false;
319 switch (mOp) {
320 case MOP_xstr:
321 case MOP_wstr:
322 isInRange =
323 (isIntactIndexed &&
324 ((!is64Bits && (immVal >= kStrAllLdrAllImmLowerBound) && (immVal <= kStrLdrImm32UpperBound)) ||
325 (is64Bits && (immVal >= kStrAllLdrAllImmLowerBound) && (immVal <= kStrLdrImm64UpperBound)))) ||
326 ((isPostIndexed || isPreIndexed) && (immVal >= kStrLdrPerPostLowerBound) &&
327 (immVal <= kStrLdrPerPostUpperBound));
328 break;
329 case MOP_wstrb:
330 isInRange =
331 (isIntactIndexed && (immVal >= kStrAllLdrAllImmLowerBound) && (immVal <= kStrbLdrbImmUpperBound)) ||
332 ((isPostIndexed || isPreIndexed) && (immVal >= kStrLdrPerPostLowerBound) &&
333 (immVal <= kStrLdrPerPostUpperBound));
334 break;
335 case MOP_wstrh:
336 isInRange =
337 (isIntactIndexed && (immVal >= kStrAllLdrAllImmLowerBound) && (immVal <= kStrhLdrhImmUpperBound)) ||
338 ((isPostIndexed || isPreIndexed) && (immVal >= kStrLdrPerPostLowerBound) &&
339 (immVal <= kStrLdrPerPostUpperBound));
340 break;
341 default:
342 break;
343 }
344 return isInRange;
345 }
346
IsStoreMop(MOperator mOp) const347 bool AArch64CGFunc::IsStoreMop(MOperator mOp) const
348 {
349 switch (mOp) {
350 case MOP_sstr:
351 case MOP_dstr:
352 case MOP_qstr:
353 case MOP_xstr:
354 case MOP_wstr:
355 case MOP_wstrb:
356 case MOP_wstrh:
357 return true;
358 default:
359 return false;
360 }
361 }
362
SelectCopyRegOpnd(Operand & dest,PrimType dtype,Operand::OperandType opndType,uint32 dsize,Operand & src,PrimType stype)363 void AArch64CGFunc::SelectCopyRegOpnd(Operand &dest, PrimType dtype, Operand::OperandType opndType, uint32 dsize,
364 Operand &src, PrimType stype)
365 {
366 if (opndType != Operand::kOpdMem) {
367 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickMovBetweenRegs(dtype, stype), dest, src));
368 return;
369 }
370 bool is64Bits = (dest.GetSize() == k64BitSize) ? true : false;
371 MOperator strMop = PickStInsn(dsize, stype);
372 if (!dest.IsMemoryAccessOperand()) {
373 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(strMop, src, dest));
374 return;
375 }
376
377 MemOperand *memOpnd = static_cast<MemOperand *>(&dest);
378 DEBUG_ASSERT(memOpnd != nullptr, "memOpnd should not be nullptr");
379 if (memOpnd->GetAddrMode() == MemOperand::kAddrModeLo12Li) {
380 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(strMop, src, dest));
381 return;
382 }
383 if (memOpnd->GetOffsetOperand() == nullptr) {
384 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(strMop, src, dest));
385 return;
386 }
387 ImmOperand *immOpnd = static_cast<ImmOperand *>(memOpnd->GetOffsetOperand());
388 DEBUG_ASSERT(immOpnd != nullptr, "immOpnd should not be nullptr");
389 int64 immVal = immOpnd->GetValue();
390 bool isIntactIndexed = memOpnd->IsIntactIndexed();
391 bool isPostIndexed = memOpnd->IsPostIndexed();
392 bool isPreIndexed = memOpnd->IsPreIndexed();
393 DEBUG_ASSERT(!isPostIndexed, "memOpnd should not be post-index type");
394 DEBUG_ASSERT(!isPreIndexed, "memOpnd should not be pre-index type");
395 bool isInRange = false;
396 isInRange = IsImmediateValueInRange(strMop, immVal, is64Bits, isIntactIndexed, isPostIndexed, isPreIndexed);
397 bool isMopStr = IsStoreMop(strMop);
398 if (isInRange || !isMopStr) {
399 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(strMop, src, dest));
400 return;
401 }
402 DEBUG_ASSERT(memOpnd->GetBaseRegister() != nullptr, "nullptr check");
403 if (isIntactIndexed) {
404 memOpnd = &SplitOffsetWithAddInstruction(*memOpnd, dsize);
405 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(strMop, src, *memOpnd));
406 } else if (isPostIndexed || isPreIndexed) {
407 RegOperand ® = CreateRegisterOperandOfType(PTY_i64);
408 MOperator mopMov = MOP_xmovri64;
409 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopMov, reg, *immOpnd));
410 MOperator mopAdd = MOP_xaddrrr;
411 MemOperand &newDest =
412 GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, GetPrimTypeBitSize(dtype), memOpnd->GetBaseRegister(), nullptr,
413 &GetOrCreateOfstOpnd(0, k32BitSize), nullptr);
414 Insn &insn1 = GetInsnBuilder()->BuildInsn(strMop, src, newDest);
415 Insn &insn2 = GetInsnBuilder()->BuildInsn(mopAdd, *newDest.GetBaseRegister(), *newDest.GetBaseRegister(), reg);
416 if (isPostIndexed) {
417 GetCurBB()->AppendInsn(insn1);
418 GetCurBB()->AppendInsn(insn2);
419 } else {
420 /* isPreIndexed */
421 GetCurBB()->AppendInsn(insn2);
422 GetCurBB()->AppendInsn(insn1);
423 }
424 }
425 }
426
SelectCopy(Operand & dest,PrimType dtype,Operand & src,PrimType stype,BaseNode * baseNode)427 void AArch64CGFunc::SelectCopy(Operand &dest, PrimType dtype, Operand &src, PrimType stype, BaseNode *baseNode)
428 {
429 DEBUG_ASSERT(dest.IsRegister() || dest.IsMemoryAccessOperand(), "");
430 uint32 dsize = GetPrimTypeBitSize(dtype);
431 if (dest.IsRegister()) {
432 dsize = dest.GetSize();
433 }
434 Operand::OperandType opnd0Type = dest.GetKind();
435 Operand::OperandType opnd1Type = src.GetKind();
436 DEBUG_ASSERT(((dsize >= src.GetSize()) || (opnd0Type == Operand::kOpdRegister) || (opnd0Type == Operand::kOpdMem)),
437 "NYI");
438 DEBUG_ASSERT(((opnd0Type == Operand::kOpdRegister) || (src.GetKind() == Operand::kOpdRegister)),
439 "either src or dest should be register");
440
441 switch (opnd1Type) {
442 case Operand::kOpdMem:
443 SelectCopyMemOpnd(dest, dtype, dsize, src, stype);
444 break;
445 case Operand::kOpdOffset:
446 case Operand::kOpdImmediate:
447 SelectCopyImm(dest, dtype, static_cast<ImmOperand &>(src), stype);
448 break;
449 case Operand::kOpdFPImmediate:
450 CHECK_FATAL(static_cast<ImmOperand &>(src).GetValue() == 0, "NIY");
451 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn((dsize == k32BitSize) ? MOP_xvmovsr : MOP_xvmovdr, dest,
452 GetZeroOpnd(dsize)));
453 break;
454 case Operand::kOpdRegister: {
455 if (dest.IsRegister()) {
456 RegOperand &desReg = static_cast<RegOperand &>(dest);
457 RegOperand &srcReg = static_cast<RegOperand &>(src);
458 if (desReg.GetRegisterNumber() == srcReg.GetRegisterNumber()) {
459 break;
460 }
461 }
462 SelectCopyRegOpnd(dest, dtype, opnd0Type, dsize, src, stype);
463 break;
464 }
465 default:
466 CHECK_FATAL(false, "NYI");
467 }
468 }
469
470 /* This function copies src to a register, the src can be an imm, mem or a label */
SelectCopy(Operand & src,PrimType stype,PrimType dtype)471 RegOperand &AArch64CGFunc::SelectCopy(Operand &src, PrimType stype, PrimType dtype)
472 {
473 RegOperand &dest = CreateRegisterOperandOfType(dtype);
474 SelectCopy(dest, dtype, src, stype);
475 return dest;
476 }
477
478 /*
479 * We need to adjust the offset of a stack allocated local variable
480 * if we store FP/SP before any other local variables to save an instruction.
481 * See AArch64CGFunc::OffsetAdjustmentForFPLR() in aarch64_cgfunc.cpp
482 *
483 * That is when we !UsedStpSubPairForCallFrameAllocation().
484 *
485 * Because we need to use the STP/SUB instruction pair to store FP/SP 'after'
486 * local variables when the call frame size is greater that the max offset
487 * value allowed for the STP instruction (we cannot use STP w/ prefix, LDP w/
488 * postfix), if UsedStpSubPairForCallFrameAllocation(), we don't need to
489 * adjust the offsets.
490 */
IsImmediateOffsetOutOfRange(const MemOperand & memOpnd,uint32 bitLen)491 bool AArch64CGFunc::IsImmediateOffsetOutOfRange(const MemOperand &memOpnd, uint32 bitLen)
492 {
493 DEBUG_ASSERT(bitLen >= k8BitSize, "bitlen error");
494 DEBUG_ASSERT(bitLen <= k128BitSize, "bitlen error");
495
496 if (bitLen >= k8BitSize) {
497 bitLen = static_cast<uint32>(RoundUp(bitLen, k8BitSize));
498 }
499 DEBUG_ASSERT((bitLen & (bitLen - 1)) == 0, "bitlen error");
500
501 MemOperand::AArch64AddressingMode mode = memOpnd.GetAddrMode();
502 if ((mode == MemOperand::kAddrModeBOi) && memOpnd.IsIntactIndexed()) {
503 OfstOperand *ofstOpnd = memOpnd.GetOffsetImmediate();
504 int32 offsetValue = ofstOpnd ? static_cast<int32>(ofstOpnd->GetOffsetValue()) : 0;
505 if (ofstOpnd && ofstOpnd->GetVary() == kUnAdjustVary) {
506 offsetValue +=
507 static_cast<int32>(static_cast<AArch64MemLayout *>(GetMemlayout())->RealStackFrameSize() + 0xff);
508 }
509 offsetValue += kAarch64IntregBytelen << 1; /* Refer to the above comment */
510 return MemOperand::IsPIMMOffsetOutOfRange(offsetValue, bitLen);
511 } else {
512 return false;
513 }
514 }
515
516 // This api is used to judge whether opnd is legal for mop.
517 // It is implemented by calling verify api of mop (InsnDesc -> Verify).
IsOperandImmValid(MOperator mOp,Operand * o,uint32 opndIdx) const518 bool AArch64CGFunc::IsOperandImmValid(MOperator mOp, Operand *o, uint32 opndIdx) const
519 {
520 const InsnDesc *md = &AArch64CG::kMd[mOp];
521 auto *opndProp = md->opndMD[opndIdx];
522 MemPool *localMp = memPoolCtrler.NewMemPool("opnd verify mempool", true);
523 auto *localAlloc = new MapleAllocator(localMp);
524 MapleVector<Operand *> testOpnds(md->opndMD.size(), localAlloc->Adapter());
525 testOpnds[opndIdx] = o;
526 bool flag = true;
527 Operand::OperandType opndTy = opndProp->GetOperandType();
528 if (opndTy == Operand::kOpdMem) {
529 auto *memOpnd = static_cast<MemOperand *>(o);
530 CHECK_FATAL(memOpnd != nullptr, "memOpnd should not be nullptr");
531 if (memOpnd->GetAddrMode() == MemOperand::kAddrModeBOrX &&
532 (!memOpnd->IsPostIndexed() && !memOpnd->IsPreIndexed())) {
533 delete localAlloc;
534 memPoolCtrler.DeleteMemPool(localMp);
535 return true;
536 }
537 OfstOperand *ofStOpnd = memOpnd->GetOffsetImmediate();
538 int64 offsetValue = ofStOpnd ? ofStOpnd->GetOffsetValue() : 0LL;
539 if (md->IsLoadStorePair() || (memOpnd->GetAddrMode() == MemOperand::kAddrModeBOi)) {
540 flag = md->Verify(testOpnds);
541 } else if (memOpnd->GetAddrMode() == MemOperand::kAddrModeLo12Li) {
542 if (offsetValue == 0) {
543 flag = md->Verify(testOpnds);
544 } else {
545 flag = false;
546 }
547 } else if (memOpnd->IsPostIndexed() || memOpnd->IsPreIndexed()) {
548 flag = (offsetValue <= static_cast<int64>(k256BitSizeInt) && offsetValue >= kNegative256BitSize);
549 }
550 } else if (opndTy == Operand::kOpdImmediate) {
551 flag = md->Verify(testOpnds);
552 }
553 delete localAlloc;
554 memPoolCtrler.DeleteMemPool(localMp);
555 return flag;
556 }
557
CreateReplacementMemOperand(uint32 bitLen,RegOperand & baseReg,int64 offset)558 MemOperand &AArch64CGFunc::CreateReplacementMemOperand(uint32 bitLen, RegOperand &baseReg, int64 offset)
559 {
560 return CreateMemOpnd(baseReg, offset, bitLen);
561 }
562
CheckIfSplitOffsetWithAdd(const MemOperand & memOpnd,uint32 bitLen) const563 bool AArch64CGFunc::CheckIfSplitOffsetWithAdd(const MemOperand &memOpnd, uint32 bitLen) const
564 {
565 if (memOpnd.GetAddrMode() != MemOperand::kAddrModeBOi || !memOpnd.IsIntactIndexed()) {
566 return false;
567 }
568 OfstOperand *ofstOpnd = memOpnd.GetOffsetImmediate();
569 int32 opndVal = static_cast<int32>(ofstOpnd->GetOffsetValue());
570 int32 maxPimm = memOpnd.GetMaxPIMM(bitLen);
571 int32 q0 = opndVal / maxPimm;
572 int32 addend = q0 * maxPimm;
573 int32 r0 = opndVal - addend;
574 int32 alignment = static_cast<int32_t>(memOpnd.GetImmediateOffsetAlignment(bitLen));
575 int32 r1 = static_cast<uint32>(r0) & ((1u << static_cast<uint32>(alignment)) - 1);
576 addend = addend + r1;
577 return (addend > 0);
578 }
579
GetBaseRegForSplit(uint32 baseRegNum)580 RegOperand *AArch64CGFunc::GetBaseRegForSplit(uint32 baseRegNum)
581 {
582 RegOperand *resOpnd = nullptr;
583 if (baseRegNum == AArch64reg::kRinvalid) {
584 resOpnd = &CreateRegisterOperandOfType(PTY_i64);
585 } else if (AArch64isa::IsPhysicalRegister(baseRegNum)) {
586 resOpnd = &GetOrCreatePhysicalRegisterOperand(static_cast<AArch64reg>(baseRegNum),
587 GetPointerSize() * kBitsPerByte, kRegTyInt);
588 } else {
589 resOpnd = &GetOrCreateVirtualRegisterOperand(baseRegNum);
590 }
591 return resOpnd;
592 }
593
SplitAndGetRemained(const MemOperand & memOpnd,uint32 bitLen,RegOperand * resOpnd,int64 ofstVal,bool isDest,Insn * insn,bool forPair)594 ImmOperand &AArch64CGFunc::SplitAndGetRemained(const MemOperand &memOpnd, uint32 bitLen, RegOperand *resOpnd,
595 int64 ofstVal, bool isDest, Insn *insn, bool forPair)
596 {
597 auto it = hashMemOpndTable.find(memOpnd);
598 if (it != hashMemOpndTable.end()) {
599 hashMemOpndTable.erase(memOpnd);
600 }
601 /*
602 * opndVal == Q0 * 32760(16380) + R0
603 * R0 == Q1 * 8(4) + R1
604 * ADDEND == Q0 * 32760(16380) + R1
605 * NEW_OFFSET = Q1 * 8(4)
606 * we want to generate two instructions:
607 * ADD TEMP_REG, X29, ADDEND
608 * LDR/STR TEMP_REG, [ TEMP_REG, #NEW_OFFSET ]
609 */
610 int32 maxPimm = 0;
611 if (!forPair) {
612 maxPimm = MemOperand::GetMaxPIMM(bitLen);
613 } else {
614 maxPimm = MemOperand::GetMaxPairPIMM(bitLen);
615 }
616 DEBUG_ASSERT(maxPimm != 0, "get max pimm failed");
617
618 int64 q0 = ofstVal / maxPimm + (ofstVal < 0 ? -1 : 0);
619 int64 addend = q0 * maxPimm;
620 int64 r0 = ofstVal - addend;
621 int64 alignment = MemOperand::GetImmediateOffsetAlignment(bitLen);
622 auto q1 = static_cast<int64>(static_cast<uint64>(r0) >> static_cast<uint64>(alignment));
623 auto r1 = static_cast<int64>(static_cast<uint64>(r0) & ((1u << static_cast<uint64>(alignment)) - 1));
624 auto remained = static_cast<int64>(static_cast<uint64>(q1) << static_cast<uint64>(alignment));
625 addend = addend + r1;
626 if (addend > 0) {
627 int64 suffixClear = 0xfff;
628 if (forPair) {
629 suffixClear = 0xff;
630 }
631 int64 remainedTmp = remained + (addend & suffixClear);
632 if (!MemOperand::IsPIMMOffsetOutOfRange(static_cast<int32>(remainedTmp), bitLen) &&
633 ((static_cast<uint64>(remainedTmp) & ((1u << static_cast<uint64>(alignment)) - 1)) == 0)) {
634 remained = remainedTmp;
635 addend = (addend & ~suffixClear);
636 }
637 }
638 ImmOperand &immAddend = CreateImmOperand(addend, k64BitSize, true);
639 if (memOpnd.GetOffsetImmediate()->GetVary() == kUnAdjustVary) {
640 immAddend.SetVary(kUnAdjustVary);
641 }
642 return immAddend;
643 }
644
SplitOffsetWithAddInstruction(const MemOperand & memOpnd,uint32 bitLen,uint32 baseRegNum,bool isDest,Insn * insn,bool forPair)645 MemOperand &AArch64CGFunc::SplitOffsetWithAddInstruction(const MemOperand &memOpnd, uint32 bitLen, uint32 baseRegNum,
646 bool isDest, Insn *insn, bool forPair)
647 {
648 DEBUG_ASSERT((memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi), "expect kAddrModeBOi memOpnd");
649 DEBUG_ASSERT(memOpnd.IsIntactIndexed(), "expect intactIndexed memOpnd");
650 OfstOperand *ofstOpnd = memOpnd.GetOffsetImmediate();
651 int64 ofstVal = ofstOpnd->GetOffsetValue();
652 RegOperand *resOpnd = GetBaseRegForSplit(baseRegNum);
653 ImmOperand &immAddend = SplitAndGetRemained(memOpnd, bitLen, resOpnd, ofstVal, isDest, insn, forPair);
654 int64 remained = (ofstVal - immAddend.GetValue());
655 RegOperand *origBaseReg = memOpnd.GetBaseRegister();
656 DEBUG_ASSERT(origBaseReg != nullptr, "nullptr check");
657 if (insn == nullptr) {
658 SelectAdd(*resOpnd, *origBaseReg, immAddend, PTY_i64);
659 } else {
660 SelectAddAfterInsn(*resOpnd, *origBaseReg, immAddend, PTY_i64, isDest, *insn);
661 }
662 MemOperand &newMemOpnd = CreateReplacementMemOperand(bitLen, *resOpnd, remained);
663 newMemOpnd.SetStackMem(memOpnd.IsStackMem());
664 return newMemOpnd;
665 }
666
SelectDassign(DassignNode & stmt,Operand & opnd0)667 void AArch64CGFunc::SelectDassign(DassignNode &stmt, Operand &opnd0)
668 {
669 SelectDassign(stmt.GetStIdx(), stmt.GetFieldID(), stmt.GetRHS()->GetPrimType(), opnd0);
670 }
671
672 /*
673 * NOTE: I divided SelectDassign so that we can create "virtual" assignments
674 * when selecting other complex Maple IR instructions. For example, the atomic
675 * exchange and other intrinsics will need to assign its results to local
676 * variables. Such Maple IR instructions are pltform-specific (e.g.
677 * atomic_exchange can be implemented as one single machine intruction on x86_64
678 * and ARMv8.1, but ARMv8.0 needs an LL/SC loop), therefore they cannot (in
679 * principle) be lowered at BELowerer or CGLowerer.
680 */
SelectDassign(StIdx stIdx,FieldID fieldId,PrimType rhsPType,Operand & opnd0)681 void AArch64CGFunc::SelectDassign(StIdx stIdx, FieldID fieldId, PrimType rhsPType, Operand &opnd0)
682 {
683 MIRSymbol *symbol = GetFunction().GetLocalOrGlobalSymbol(stIdx);
684 int32 offset = 0;
685 bool parmCopy = false;
686 uint32 regSize = GetPrimTypeBitSize(rhsPType);
687 MIRType *type = symbol->GetType();
688 Operand &stOpnd = LoadIntoRegister(opnd0, IsPrimitiveInteger(rhsPType),
689 regSize, IsSignedInteger(type->GetPrimType()));
690 MOperator mOp = MOP_undef;
691
692 uint32 dataSize = GetPrimTypeBitSize(type->GetPrimType());
693 MemOperand *memOpnd = nullptr;
694 if (parmCopy) {
695 memOpnd = &LoadStructCopyBase(*symbol, offset, static_cast<int>(dataSize));
696 } else {
697 memOpnd = &GetOrCreateMemOpnd(*symbol, offset, dataSize);
698 }
699 if ((memOpnd->GetMemVaryType() == kNotVary) && IsImmediateOffsetOutOfRange(*memOpnd, dataSize)) {
700 memOpnd = &SplitOffsetWithAddInstruction(*memOpnd, dataSize);
701 }
702
703 /* In bpl mode, a func symbol's type is represented as a MIRFuncType instead of a MIRPtrType (pointing to
704 * MIRFuncType), so we allow `kTypeFunction` to appear here */
705 DEBUG_ASSERT(((type->GetKind() == kTypeScalar) || (type->GetKind() == kTypePointer) ||
706 (type->GetKind() == kTypeFunction) || (type->GetKind() == kTypeArray)), "NYI dassign type");
707 PrimType ptyp = type->GetPrimType();
708
709 mOp = PickStInsn(GetPrimTypeBitSize(ptyp), ptyp);
710 Insn &insn = GetInsnBuilder()->BuildInsn(mOp, stOpnd, *memOpnd);
711 GetCurBB()->AppendInsn(insn);
712 }
713
SelectRegassign(RegassignNode & stmt,Operand & opnd0)714 void AArch64CGFunc::SelectRegassign(RegassignNode &stmt, Operand &opnd0)
715 {
716 RegOperand *regOpnd = nullptr;
717 PregIdx pregIdx = stmt.GetRegIdx();
718 if (IsSpecialPseudoRegister(pregIdx)) {
719 regOpnd = &GetOrCreateSpecialRegisterOperand(-pregIdx, stmt.GetPrimType());
720 } else {
721 regOpnd = GetOrCreateRegOpndFromPregIdx(pregIdx, stmt.GetPrimType());
722 }
723 /* look at rhs */
724 PrimType rhsType = stmt.Opnd(0)->GetPrimType();
725 DEBUG_ASSERT(regOpnd != nullptr, "null ptr check!");
726 Operand *srcOpnd = &opnd0;
727 if (GetPrimTypeSize(stmt.GetPrimType()) > GetPrimTypeSize(rhsType) && IsPrimitiveInteger(rhsType)) {
728 CHECK_FATAL(IsPrimitiveInteger(stmt.GetPrimType()), "NIY");
729 srcOpnd = &CreateRegisterOperandOfType(stmt.GetPrimType());
730 SelectCvtInt2Int(nullptr, srcOpnd, &opnd0, rhsType, stmt.GetPrimType());
731 }
732 SelectCopy(*regOpnd, stmt.GetPrimType(), *srcOpnd, rhsType, stmt.GetRHS());
733
734 if (GetCG()->GenerateVerboseCG()) {
735 if (GetCurBB()->GetLastInsn()) {
736 GetCurBB()->GetLastInsn()->AppendComment(" regassign %" + std::to_string(pregIdx) + "; ");
737 } else if (GetCurBB()->GetPrev()->GetLastInsn()) {
738 GetCurBB()->GetPrev()->GetLastInsn()->AppendComment(" regassign %" + std::to_string(pregIdx) + "; ");
739 }
740 }
741
742 if ((Globals::GetInstance()->GetOptimLevel() == CGOptions::kLevel0) && (pregIdx >= 0)) {
743 MemOperand *dest = GetPseudoRegisterSpillMemoryOperand(pregIdx);
744 PrimType stype = GetTypeFromPseudoRegIdx(pregIdx);
745 MIRPreg *preg = GetFunction().GetPregTab()->PregFromPregIdx(pregIdx);
746 uint32 srcBitLength = GetPrimTypeSize(preg->GetPrimType()) * kBitsPerByte;
747 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickStInsn(srcBitLength, stype), *regOpnd, *dest));
748 } else if (regOpnd->GetRegisterNumber() == R0 || regOpnd->GetRegisterNumber() == R1) {
749 Insn &pseudo = GetInsnBuilder()->BuildInsn(MOP_pseudo_ret_int, *regOpnd);
750 GetCurBB()->AppendInsn(pseudo);
751 } else if (regOpnd->GetRegisterNumber() >= V0 && regOpnd->GetRegisterNumber() <= V3) {
752 Insn &pseudo = GetInsnBuilder()->BuildInsn(MOP_pseudo_ret_float, *regOpnd);
753 GetCurBB()->AppendInsn(pseudo);
754 }
755 const auto &derived2BaseRef = GetFunction().GetDerived2BaseRef();
756 auto itr = derived2BaseRef.find(pregIdx);
757 if (itr != derived2BaseRef.end()) {
758 auto *derivedRegOpnd = GetOrCreateRegOpndFromPregIdx(itr->first, PTY_ref);
759 auto *baseRegOpnd = GetOrCreateRegOpndFromPregIdx(itr->second, PTY_ref);
760 derivedRegOpnd->SetBaseRefOpnd(*baseRegOpnd);
761 }
762 }
763
GetOrCreateLocator(CallConvKind cc)764 CCImpl *AArch64CGFunc::GetOrCreateLocator(CallConvKind cc)
765 {
766 auto it = hashCCTable.find(cc);
767 if (it != hashCCTable.end()) {
768 it->second->Init();
769 return it->second;
770 }
771 CCImpl *res = nullptr;
772 if (cc == kCCall) {
773 res = memPool->New<AArch64CallConvImpl>(GetBecommon());
774 } else if (cc == kWebKitJS) {
775 res = memPool->New<AArch64WebKitJSCC>(GetBecommon());
776 } else {
777 CHECK_FATAL(false, "unsupported yet");
778 }
779 hashCCTable[cc] = res;
780 return res;
781 }
782
GetPointedToType(const MIRPtrType & pointerType)783 static MIRType *GetPointedToType(const MIRPtrType &pointerType)
784 {
785 MIRType *aType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerType.GetPointedTyIdx());
786 if (aType->GetKind() == kTypeArray) {
787 MIRArrayType *arrayType = static_cast<MIRArrayType *>(aType);
788 return GlobalTables::GetTypeTable().GetTypeFromTyIdx(arrayType->GetElemTyIdx());
789 }
790 return aType;
791 }
792
SelectIassign(IassignNode & stmt)793 void AArch64CGFunc::SelectIassign(IassignNode &stmt)
794 {
795 int32 offset = 0;
796 MIRPtrType *pointerType = static_cast<MIRPtrType *>(GlobalTables::GetTypeTable().GetTypeFromTyIdx(stmt.GetTyIdx()));
797 DEBUG_ASSERT(pointerType != nullptr, "expect a pointer type at iassign node");
798 MIRType *pointedType = nullptr;
799 bool isRefField = false;
800 pointedType = GetPointedToType(*pointerType);
801
802 PrimType styp = stmt.GetRHS()->GetPrimType();
803 Operand *valOpnd = HandleExpr(stmt, *stmt.GetRHS());
804 Operand &srcOpnd = LoadIntoRegister(*valOpnd, (IsPrimitiveInteger(styp)),
805 GetPrimTypeBitSize(styp));
806
807 PrimType destType = pointedType->GetPrimType();
808 DEBUG_ASSERT(stmt.Opnd(0) != nullptr, "null ptr check");
809 MemOperand &memOpnd = CreateMemOpnd(destType, stmt, *stmt.Opnd(0), offset);
810 SelectCopy(memOpnd, destType, srcOpnd, destType);
811 if (GetCurBB() && GetCurBB()->GetLastMachineInsn()) {
812 GetCurBB()->GetLastMachineInsn()->MarkAsAccessRefField(isRefField);
813 }
814 }
815
SelectDread(const BaseNode & parent,DreadNode & expr)816 Operand *AArch64CGFunc::SelectDread(const BaseNode &parent, DreadNode &expr)
817 {
818 MIRSymbol *symbol = GetFunction().GetLocalOrGlobalSymbol(expr.GetStIdx());
819
820 PrimType symType = symbol->GetType()->GetPrimType();
821 uint32 offset = 0;
822 uint32 dataSize = GetPrimTypeBitSize(symType);
823 PrimType resultType = expr.GetPrimType();
824 MemOperand *memOpnd = nullptr;
825
826 memOpnd = &GetOrCreateMemOpnd(*symbol, offset, dataSize);
827 if ((memOpnd->GetMemVaryType() == kNotVary) && IsImmediateOffsetOutOfRange(*memOpnd, dataSize)) {
828 memOpnd = &SplitOffsetWithAddInstruction(*memOpnd, dataSize);
829 }
830
831 RegOperand &resOpnd = GetOrCreateResOperand(parent, symType);
832 SelectCopy(resOpnd, resultType, *memOpnd, symType);
833 return &resOpnd;
834 }
835
SelectRegread(RegreadNode & expr)836 RegOperand *AArch64CGFunc::SelectRegread(RegreadNode &expr)
837 {
838 PregIdx pregIdx = expr.GetRegIdx();
839 if (IsSpecialPseudoRegister(pregIdx)) {
840 /* if it is one of special registers */
841 return &GetOrCreateSpecialRegisterOperand(-pregIdx, expr.GetPrimType());
842 }
843 RegOperand ® = *GetOrCreateRegOpndFromPregIdx(pregIdx, expr.GetPrimType());
844 if (Globals::GetInstance()->GetOptimLevel() == CGOptions::kLevel0) {
845 MemOperand *src = GetPseudoRegisterSpillMemoryOperand(pregIdx);
846 MIRPreg *preg = GetFunction().GetPregTab()->PregFromPregIdx(pregIdx);
847 PrimType stype = preg->GetPrimType();
848 uint32 srcBitLength = GetPrimTypeSize(stype) * kBitsPerByte;
849 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickLdInsn(srcBitLength, stype), reg, *src));
850 }
851 return ®
852 }
853
SelectIread(const BaseNode & parent,IreadNode & expr,int extraOffset,PrimType finalBitFieldDestType)854 Operand *AArch64CGFunc::SelectIread(const BaseNode &parent, IreadNode &expr, int extraOffset,
855 PrimType finalBitFieldDestType)
856 {
857 int32 offset = 0;
858 MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(expr.GetTyIdx());
859 MIRPtrType *pointerType = static_cast<MIRPtrType *>(type);
860 DEBUG_ASSERT(pointerType != nullptr, "expect a pointer type at iread node");
861 MIRType *pointedType = nullptr;
862 bool isRefField = false;
863
864 pointedType = GetPointedToType(*pointerType);
865
866 RegType regType = GetRegTyFromPrimTy(expr.GetPrimType());
867 uint32 regSize = GetPrimTypeSize(expr.GetPrimType());
868 if (regSize < k4ByteSize) {
869 regSize = k4ByteSize; /* 32-bit */
870 }
871 Operand *result = nullptr;
872 constexpr int regSizeMax = 8;
873 if (parent.GetOpCode() == OP_eval && regSize <= regSizeMax) {
874 /* regSize << 3, that is regSize * 8, change bytes to bits */
875 result = &GetZeroOpnd(regSize << 3);
876 } else {
877 result = &GetOrCreateResOperand(parent, expr.GetPrimType());
878 }
879
880 PrimType destType = pointedType->GetPrimType();
881
882 uint32 bitSize = GetPrimTypeBitSize(destType);
883 if (regType == kRegTyFloat) {
884 destType = expr.GetPrimType();
885 bitSize = GetPrimTypeBitSize(destType);
886 }
887
888 PrimType memType = (finalBitFieldDestType == kPtyInvalid ? destType : finalBitFieldDestType);
889 MemOperand *memOpnd = CreateMemOpndOrNull(memType, expr, *expr.Opnd(0),
890 static_cast<int64>(static_cast<int>(offset) + extraOffset));
891 if (aggParamReg != nullptr) {
892 isAggParamInReg = false;
893 return aggParamReg;
894 }
895 DEBUG_ASSERT(memOpnd != nullptr, "memOpnd should not be nullptr");
896 MOperator mOp = 0;
897 if (finalBitFieldDestType == kPtyInvalid) {
898 mOp = PickLdInsn(bitSize, destType);
899 } else {
900 mOp = PickLdInsn(GetPrimTypeBitSize(finalBitFieldDestType), finalBitFieldDestType);
901 }
902 if ((memOpnd->GetMemVaryType() == kNotVary) && !IsOperandImmValid(mOp, memOpnd, 1)) {
903 memOpnd = &SplitOffsetWithAddInstruction(*memOpnd, bitSize);
904 }
905 Insn &insn = GetInsnBuilder()->BuildInsn(mOp, *result, *memOpnd);
906 if (parent.GetOpCode() == OP_eval && result->IsRegister() &&
907 static_cast<RegOperand *>(result)->GetRegisterNumber() == RZR) {
908 insn.SetComment("null-check");
909 }
910 GetCurBB()->AppendInsn(insn);
911
912 if (parent.op != OP_eval) {
913 const InsnDesc *md = &AArch64CG::kMd[insn.GetMachineOpcode()];
914 auto *prop = md->GetOpndDes(0);
915 if ((prop->GetSize()) < insn.GetOperand(0).GetSize()) {
916 switch (destType) {
917 case PTY_i8:
918 mOp = MOP_xsxtb64;
919 break;
920 case PTY_i16:
921 mOp = MOP_xsxth64;
922 break;
923 case PTY_i32:
924 mOp = MOP_xsxtw64;
925 break;
926 case PTY_u1:
927 case PTY_u8:
928 mOp = MOP_xuxtb32;
929 break;
930 case PTY_u16:
931 mOp = MOP_xuxth32;
932 break;
933 case PTY_u32:
934 mOp = MOP_xuxtw64;
935 break;
936 default:
937 break;
938 }
939 if (destType == PTY_u1) {
940 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wandrri12, insn.GetOperand(0),
941 insn.GetOperand(0), CreateImmOperand(1, kMaxImmVal5Bits, false)));
942 }
943
944 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, insn.GetOperand(0), insn.GetOperand(0)));
945 }
946 }
947 if (GetCurBB() && GetCurBB()->GetLastMachineInsn()) {
948 GetCurBB()->GetLastMachineInsn()->MarkAsAccessRefField(isRefField);
949 }
950 return result;
951 }
952
SelectIntConst(const MIRIntConst & intConst,const BaseNode & parent)953 Operand *AArch64CGFunc::SelectIntConst(const MIRIntConst &intConst, const BaseNode &parent)
954 {
955 auto primType = intConst.GetType().GetPrimType();
956 if (kOpcodeInfo.IsCompare(parent.GetOpCode())) {
957 primType = static_cast<const CompareNode &>(parent).GetOpndType();
958 }
959 return &CreateImmOperand(intConst.GetExtValue(), GetPrimTypeBitSize(primType), false);
960 }
961
HandleFmovImm(PrimType stype,int64 val,MIRConst & mirConst,const BaseNode & parent)962 Operand *AArch64CGFunc::HandleFmovImm(PrimType stype, int64 val, MIRConst &mirConst, const BaseNode &parent)
963 {
964 Operand *result;
965 bool is64Bits = (GetPrimTypeBitSize(stype) == k64BitSize);
966 uint64 canRepreset = is64Bits ? (val & 0xffffffffffff) : (val & 0x7ffff);
967 uint32 val1 = is64Bits ? (val >> 61) & 0x3 : (val >> 29) & 0x3;
968 uint32 val2 = is64Bits ? (val >> 54) & 0xff : (val >> 25) & 0x1f;
969 bool isSame = is64Bits ? ((val2 == 0) || (val2 == 0xff)) : ((val2 == 0) || (val2 == 0x1f));
970 canRepreset = (canRepreset == 0) && ((val1 & 0x1) ^ ((val1 & 0x2) >> 1)) && isSame;
971 if (canRepreset) {
972 uint64 temp1 = is64Bits ? (val >> 63) << 7 : (val >> 31) << 7;
973 uint64 temp2 = is64Bits ? val >> 48 : val >> 19;
974 int64 imm8 = (temp2 & 0x7f) | temp1;
975 Operand *newOpnd0 = &CreateImmOperand(imm8, k8BitSize, true, kNotVary, true);
976 result = &GetOrCreateResOperand(parent, stype);
977 MOperator mopFmov = (is64Bits ? MOP_xdfmovri : MOP_wsfmovri);
978 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopFmov, *result, *newOpnd0));
979 } else {
980 Operand *newOpnd0 = &CreateImmOperand(val, GetPrimTypeSize(stype) * kBitsPerByte, false);
981 PrimType itype = (stype == PTY_f32) ? PTY_i32 : PTY_i64;
982 RegOperand ®Opnd = LoadIntoRegister(*newOpnd0, itype);
983
984 result = &GetOrCreateResOperand(parent, stype);
985 MOperator mopFmov = (is64Bits ? MOP_xvmovdr : MOP_xvmovsr);
986 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopFmov, *result, regOpnd));
987 }
988 return result;
989 }
990
SelectFloatConst(MIRFloatConst & floatConst,const BaseNode & parent)991 Operand *AArch64CGFunc::SelectFloatConst(MIRFloatConst &floatConst, const BaseNode &parent)
992 {
993 PrimType stype = floatConst.GetType().GetPrimType();
994 int32 val = floatConst.GetIntValue();
995 /* according to aarch64 encoding format, convert int to float expression */
996 return HandleFmovImm(stype, val, floatConst, parent);
997 }
998
SelectDoubleConst(MIRDoubleConst & doubleConst,const BaseNode & parent)999 Operand *AArch64CGFunc::SelectDoubleConst(MIRDoubleConst &doubleConst, const BaseNode &parent)
1000 {
1001 PrimType stype = doubleConst.GetType().GetPrimType();
1002 int64 val = doubleConst.GetIntValue();
1003 /* according to aarch64 encoding format, convert int to float expression */
1004 return HandleFmovImm(stype, val, doubleConst, parent);
1005 }
1006
1007 /*
1008 * Returns the number of leading 0-bits in x, starting at the most significant bit position.
1009 * If x is 0, the result is -1.
1010 */
GetHead0BitNum(int64 val)1011 static int32 GetHead0BitNum(int64 val)
1012 {
1013 uint32 bitNum = 0;
1014 for (; bitNum < k64BitSize; bitNum++) {
1015 if ((0x8000000000000000ULL >> static_cast<uint32>(bitNum)) & static_cast<uint64>(val)) {
1016 break;
1017 }
1018 }
1019 if (bitNum == k64BitSize) {
1020 return -1;
1021 }
1022 return bitNum;
1023 }
1024
1025 /*
1026 * Returns the number of trailing 0-bits in x, starting at the least significant bit position.
1027 * If x is 0, the result is -1.
1028 */
GetTail0BitNum(int64 val)1029 static int32 GetTail0BitNum(int64 val)
1030 {
1031 uint32 bitNum = 0;
1032 for (; bitNum < k64BitSize; bitNum++) {
1033 if ((static_cast<uint64>(1) << static_cast<uint32>(bitNum)) & static_cast<uint64>(val)) {
1034 break;
1035 }
1036 }
1037 if (bitNum == k64BitSize) {
1038 return -1;
1039 }
1040 return bitNum;
1041 }
1042
1043 /*
1044 * If the input integer is power of 2, return log2(input)
1045 * else return -1
1046 */
GetLog2(uint64 val)1047 static inline int32 GetLog2(uint64 val)
1048 {
1049 if (__builtin_popcountll(val) == 1) {
1050 return __builtin_ffsll(static_cast<int64>(val)) - 1;
1051 }
1052 return -1;
1053 }
1054
PickJmpInsn(Opcode brOp,Opcode cmpOp,bool isFloat,bool isSigned) const1055 MOperator AArch64CGFunc::PickJmpInsn(Opcode brOp, Opcode cmpOp, bool isFloat, bool isSigned) const
1056 {
1057 switch (cmpOp) {
1058 case OP_ne:
1059 return (brOp == OP_brtrue) ? MOP_bne : MOP_beq;
1060 case OP_eq:
1061 return (brOp == OP_brtrue) ? MOP_beq : MOP_bne;
1062 case OP_lt:
1063 return (brOp == OP_brtrue) ? (isSigned ? MOP_blt : MOP_blo)
1064 : (isFloat ? MOP_bpl : (isSigned ? MOP_bge : MOP_bhs));
1065 case OP_le:
1066 return (brOp == OP_brtrue) ? (isSigned ? MOP_ble : MOP_bls)
1067 : (isFloat ? MOP_bhi : (isSigned ? MOP_bgt : MOP_bhi));
1068 case OP_gt:
1069 return (brOp == OP_brtrue) ? (isFloat ? MOP_bgt : (isSigned ? MOP_bgt : MOP_bhi))
1070 : (isSigned ? MOP_ble : MOP_bls);
1071 case OP_ge:
1072 return (brOp == OP_brtrue) ? (isFloat ? MOP_bpl : (isSigned ? MOP_bge : MOP_bhs))
1073 : (isSigned ? MOP_blt : MOP_blo);
1074 default:
1075 CHECK_FATAL(false, "PickJmpInsn error");
1076 }
1077 }
1078
GenerateCompareWithZeroInstruction(Opcode jmpOp,Opcode cmpOp,bool is64Bits,PrimType primType,LabelOperand & targetOpnd,Operand & opnd0)1079 bool AArch64CGFunc::GenerateCompareWithZeroInstruction(Opcode jmpOp, Opcode cmpOp, bool is64Bits, PrimType primType,
1080 LabelOperand &targetOpnd, Operand &opnd0)
1081 {
1082 bool finish = true;
1083 MOperator mOpCode = MOP_undef;
1084 switch (cmpOp) {
1085 case OP_ne: {
1086 if (jmpOp == OP_brtrue) {
1087 mOpCode = is64Bits ? MOP_xcbnz : MOP_wcbnz;
1088 } else {
1089 mOpCode = is64Bits ? MOP_xcbz : MOP_wcbz;
1090 }
1091 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, opnd0, targetOpnd));
1092 break;
1093 }
1094 case OP_eq: {
1095 if (jmpOp == OP_brtrue) {
1096 mOpCode = is64Bits ? MOP_xcbz : MOP_wcbz;
1097 } else {
1098 mOpCode = is64Bits ? MOP_xcbnz : MOP_wcbnz;
1099 }
1100 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, opnd0, targetOpnd));
1101 break;
1102 }
1103 /*
1104 * TBZ/TBNZ instruction have a range of +/-32KB, need to check if the jump target is reachable in a later
1105 * phase. If the branch target is not reachable, then we change tbz/tbnz into combination of ubfx and
1106 * cbz/cbnz, which will clobber one extra register. With LSRA under O2, we can use of the reserved registers
1107 * for that purpose.
1108 */
1109 case OP_lt: {
1110 if (primType == PTY_u64 || primType == PTY_u32) {
1111 return false;
1112 }
1113 ImmOperand &signBit =
1114 CreateImmOperand(is64Bits ? kHighestBitOf64Bits : kHighestBitOf32Bits, k8BitSize, false);
1115 if (jmpOp == OP_brtrue) {
1116 mOpCode = is64Bits ? MOP_xtbnz : MOP_wtbnz;
1117 } else {
1118 mOpCode = is64Bits ? MOP_xtbz : MOP_wtbz;
1119 }
1120 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, opnd0, signBit, targetOpnd));
1121 break;
1122 }
1123 case OP_ge: {
1124 if (primType == PTY_u64 || primType == PTY_u32) {
1125 return false;
1126 }
1127 ImmOperand &signBit =
1128 CreateImmOperand(is64Bits ? kHighestBitOf64Bits : kHighestBitOf32Bits, k8BitSize, false);
1129 if (jmpOp == OP_brtrue) {
1130 mOpCode = is64Bits ? MOP_xtbz : MOP_wtbz;
1131 } else {
1132 mOpCode = is64Bits ? MOP_xtbnz : MOP_wtbnz;
1133 }
1134 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, opnd0, signBit, targetOpnd));
1135 break;
1136 }
1137 default:
1138 finish = false;
1139 break;
1140 }
1141 return finish;
1142 }
1143
SelectCondGoto(LabelOperand & targetOpnd,Opcode jmpOp,Opcode cmpOp,Operand & origOpnd0,Operand & origOpnd1,PrimType primType,bool signedCond)1144 void AArch64CGFunc::SelectCondGoto(LabelOperand &targetOpnd, Opcode jmpOp, Opcode cmpOp, Operand &origOpnd0,
1145 Operand &origOpnd1, PrimType primType, bool signedCond)
1146 {
1147 Operand *opnd0 = &origOpnd0;
1148 Operand *opnd1 = &origOpnd1;
1149 opnd0 = &LoadIntoRegister(origOpnd0, primType);
1150
1151 bool is64Bits = GetPrimTypeBitSize(primType) == k64BitSize;
1152 bool isFloat = IsPrimitiveFloat(primType);
1153 Operand &rflag = GetOrCreateRflag();
1154 if (isFloat) {
1155 opnd1 = &LoadIntoRegister(origOpnd1, primType);
1156 MOperator mOp =
1157 is64Bits ? MOP_dcmperr : ((GetPrimTypeBitSize(primType) == k32BitSize) ? MOP_scmperr : MOP_hcmperr);
1158 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, rflag, *opnd0, *opnd1));
1159 } else {
1160 bool isImm = ((origOpnd1.GetKind() == Operand::kOpdImmediate) || (origOpnd1.GetKind() == Operand::kOpdOffset));
1161 if ((origOpnd1.GetKind() != Operand::kOpdRegister) && !isImm) {
1162 opnd1 = &SelectCopy(origOpnd1, primType, primType);
1163 }
1164 MOperator mOp = is64Bits ? MOP_xcmprr : MOP_wcmprr;
1165
1166 if (isImm) {
1167 if (static_cast<ImmOperand *>(opnd1)->IsZero() &&
1168 (Globals::GetInstance()->GetOptimLevel() > CGOptions::kLevel0)) {
1169 bool finish = GenerateCompareWithZeroInstruction(jmpOp, cmpOp, is64Bits, primType, targetOpnd, *opnd0);
1170 if (finish) {
1171 return;
1172 }
1173 }
1174
1175 /*
1176 * aarch64 assembly takes up to 24-bits immediate, generating
1177 * either cmp or cmp with shift 12 encoding
1178 */
1179 ImmOperand *immOpnd = static_cast<ImmOperand *>(opnd1);
1180 if (immOpnd->IsInBitSize(kMaxImmVal12Bits, 0) || immOpnd->IsInBitSize(kMaxImmVal12Bits, kMaxImmVal12Bits)) {
1181 mOp = is64Bits ? MOP_xcmpri : MOP_wcmpri;
1182 } else {
1183 opnd1 = &SelectCopy(*opnd1, primType, primType);
1184 }
1185 }
1186 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, rflag, *opnd0, *opnd1));
1187 }
1188
1189 bool isSigned = IsPrimitiveInteger(primType) ? IsSignedInteger(primType) : (signedCond ? true : false);
1190 MOperator jmpOperator = PickJmpInsn(jmpOp, cmpOp, isFloat, isSigned);
1191 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(jmpOperator, rflag, targetOpnd));
1192 }
1193
1194 /*
1195 * brtrue @label0 (ge u8 i32 (
1196 * cmp i32 i64 (dread i64 %Reg2_J, dread i64 %Reg4_J),
1197 * constval i32 0))
1198 * ===>
1199 * cmp r1, r2
1200 * bge Cond, label0
1201 */
SelectCondSpecialCase1(CondGotoNode & stmt,BaseNode & expr)1202 void AArch64CGFunc::SelectCondSpecialCase1(CondGotoNode &stmt, BaseNode &expr)
1203 {
1204 DEBUG_ASSERT(expr.GetOpCode() == OP_cmp, "unexpect opcode");
1205 Operand *opnd0 = HandleExpr(expr, *expr.Opnd(0));
1206 Operand *opnd1 = HandleExpr(expr, *expr.Opnd(1));
1207 CompareNode *node = static_cast<CompareNode *>(&expr);
1208 bool isFloat = IsPrimitiveFloat(node->GetOpndType());
1209 opnd0 = &LoadIntoRegister(*opnd0, node->GetOpndType());
1210 /*
1211 * most of FP constants are passed as MemOperand
1212 * except 0.0 which is passed as kOpdFPImmediate
1213 */
1214 Operand::OperandType opnd1Type = opnd1->GetKind();
1215 if ((opnd1Type != Operand::kOpdImmediate) && (opnd1Type != Operand::kOpdFPImmediate) &&
1216 (opnd1Type != Operand::kOpdOffset)) {
1217 opnd1 = &LoadIntoRegister(*opnd1, node->GetOpndType());
1218 }
1219 SelectAArch64Cmp(*opnd0, *opnd1, !isFloat, GetPrimTypeBitSize(node->GetOpndType()));
1220 /* handle condgoto now. */
1221 LabelIdx labelIdx = stmt.GetOffset();
1222 BaseNode *condNode = stmt.Opnd(0);
1223 LabelOperand &targetOpnd = GetOrCreateLabelOperand(labelIdx);
1224 Opcode cmpOp = condNode->GetOpCode();
1225 PrimType pType = static_cast<CompareNode *>(condNode)->GetOpndType();
1226 isFloat = IsPrimitiveFloat(pType);
1227 Operand &rflag = GetOrCreateRflag();
1228 bool isSigned =
1229 IsPrimitiveInteger(pType) ? IsSignedInteger(pType) : (IsSignedInteger(condNode->GetPrimType()) ? true : false);
1230 MOperator jmpOp = PickJmpInsn(stmt.GetOpCode(), cmpOp, isFloat, isSigned);
1231 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(jmpOp, rflag, targetOpnd));
1232 }
1233
1234 /*
1235 * Special case:
1236 * brfalse(ge (cmpg (op0, op1), 0) ==>
1237 * fcmp op1, op2
1238 * blo
1239 */
SelectCondSpecialCase2(const CondGotoNode & stmt,BaseNode & expr)1240 void AArch64CGFunc::SelectCondSpecialCase2(const CondGotoNode &stmt, BaseNode &expr)
1241 {
1242 auto &cmpNode = static_cast<CompareNode &>(expr);
1243 Operand *opnd0 = HandleExpr(cmpNode, *cmpNode.Opnd(0));
1244 Operand *opnd1 = HandleExpr(cmpNode, *cmpNode.Opnd(1));
1245 PrimType operandType = cmpNode.GetOpndType();
1246 opnd0 = opnd0->IsRegister() ? static_cast<RegOperand *>(opnd0) : &SelectCopy(*opnd0, operandType, operandType);
1247 Operand::OperandType opnd1Type = opnd1->GetKind();
1248 if ((opnd1Type != Operand::kOpdImmediate) && (opnd1Type != Operand::kOpdFPImmediate) &&
1249 (opnd1Type != Operand::kOpdOffset)) {
1250 opnd1 = opnd1->IsRegister() ? static_cast<RegOperand *>(opnd1) : &SelectCopy(*opnd1, operandType, operandType);
1251 }
1252 #ifdef DEBUG
1253 bool isFloat = IsPrimitiveFloat(operandType);
1254 if (!isFloat) {
1255 DEBUG_ASSERT(false, "incorrect operand types");
1256 }
1257 #endif
1258 SelectTargetFPCmpQuiet(*opnd0, *opnd1, GetPrimTypeBitSize(operandType));
1259 Operand &rFlag = GetOrCreateRflag();
1260 LabelIdx tempLabelIdx = stmt.GetOffset();
1261 LabelOperand &targetOpnd = GetOrCreateLabelOperand(tempLabelIdx);
1262 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_blo, rFlag, targetOpnd));
1263 }
1264
SelectCondGoto(CondGotoNode & stmt,Operand & opnd0,Operand & opnd1)1265 void AArch64CGFunc::SelectCondGoto(CondGotoNode &stmt, Operand &opnd0, Operand &opnd1)
1266 {
1267 /*
1268 * handle brfalse/brtrue op, opnd0 can be a compare node or non-compare node
1269 * such as a dread for example
1270 */
1271 LabelIdx labelIdx = stmt.GetOffset();
1272 BaseNode *condNode = stmt.Opnd(0);
1273 LabelOperand &targetOpnd = GetOrCreateLabelOperand(labelIdx);
1274 Opcode cmpOp;
1275
1276 PrimType pType;
1277 if (kOpcodeInfo.IsCompare(condNode->GetOpCode())) {
1278 cmpOp = condNode->GetOpCode();
1279 pType = static_cast<CompareNode *>(condNode)->GetOpndType();
1280 } else {
1281 /* not a compare node; dread for example, take its pType */
1282 cmpOp = OP_ne;
1283 pType = condNode->GetPrimType();
1284 }
1285 bool signedCond = IsSignedInteger(pType) || IsPrimitiveFloat(pType);
1286 SelectCondGoto(targetOpnd, stmt.GetOpCode(), cmpOp, opnd0, opnd1, pType, signedCond);
1287 }
1288
SelectGoto(GotoNode & stmt)1289 void AArch64CGFunc::SelectGoto(GotoNode &stmt)
1290 {
1291 Operand &targetOpnd = GetOrCreateLabelOperand(stmt.GetOffset());
1292 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xuncond, targetOpnd));
1293 GetCurBB()->SetKind(BB::kBBGoto);
1294 }
1295
SelectAdd(BinaryNode & node,Operand & opnd0,Operand & opnd1,const BaseNode & parent)1296 Operand *AArch64CGFunc::SelectAdd(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent)
1297 {
1298 PrimType dtype = node.GetPrimType();
1299 bool isSigned = IsSignedInteger(dtype);
1300 uint32 dsize = GetPrimTypeBitSize(dtype);
1301 bool is64Bits = (dsize == k64BitSize);
1302 bool isFloat = IsPrimitiveFloat(dtype);
1303 RegOperand *resOpnd = nullptr;
1304 /* promoted type */
1305 PrimType primType =
1306 isFloat ? dtype : ((is64Bits ? (isSigned ? PTY_i64 : PTY_u64) : (isSigned ? PTY_i32 : PTY_u32)));
1307 if (parent.GetOpCode() == OP_regassign) {
1308 auto ®AssignNode = static_cast<const RegassignNode &>(parent);
1309 PregIdx pregIdx = regAssignNode.GetRegIdx();
1310 if (IsSpecialPseudoRegister(pregIdx)) {
1311 resOpnd = &GetOrCreateSpecialRegisterOperand(-pregIdx, dtype);
1312 } else {
1313 resOpnd = &GetOrCreateVirtualRegisterOperand(GetVirtualRegNOFromPseudoRegIdx(pregIdx));
1314 }
1315 } else {
1316 resOpnd = &CreateRegisterOperandOfType(primType);
1317 }
1318 SelectAdd(*resOpnd, opnd0, opnd1, primType);
1319 return resOpnd;
1320 }
1321
SelectAdd(Operand & resOpnd,Operand & opnd0,Operand & opnd1,PrimType primType)1322 void AArch64CGFunc::SelectAdd(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType)
1323 {
1324 Operand::OperandType opnd0Type = opnd0.GetKind();
1325 Operand::OperandType opnd1Type = opnd1.GetKind();
1326 uint32 dsize = GetPrimTypeBitSize(primType);
1327 bool is64Bits = (dsize == k64BitSize);
1328 if (opnd0Type != Operand::kOpdRegister) {
1329 /* add #imm, #imm */
1330 if (opnd1Type != Operand::kOpdRegister) {
1331 SelectAdd(resOpnd, SelectCopy(opnd0, primType, primType), opnd1, primType);
1332 return;
1333 }
1334 /* add #imm, reg */
1335 SelectAdd(resOpnd, opnd1, opnd0, primType); /* commutative */
1336 return;
1337 }
1338 /* add reg, reg */
1339 if (opnd1Type == Operand::kOpdRegister) {
1340 DEBUG_ASSERT(IsPrimitiveFloat(primType) || IsPrimitiveInteger(primType), "NYI add");
1341 MOperator mOp =
1342 IsPrimitiveFloat(primType) ? (is64Bits ? MOP_dadd : MOP_sadd) : (is64Bits ? MOP_xaddrrr : MOP_waddrrr);
1343 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0, opnd1));
1344 return;
1345 } else if (opnd1Type == Operand::kOpdStImmediate) {
1346 CHECK_FATAL(is64Bits, "baseReg of mem in aarch64 must be 64bit size");
1347 /* add reg, reg, #:lo12:sym+offset */
1348 StImmOperand &stImmOpnd = static_cast<StImmOperand &>(opnd1);
1349 Insn &newInsn = GetInsnBuilder()->BuildInsn(MOP_xadrpl12, resOpnd, opnd0, stImmOpnd);
1350 GetCurBB()->AppendInsn(newInsn);
1351 return;
1352 } else if (!((opnd1Type == Operand::kOpdImmediate) || (opnd1Type == Operand::kOpdOffset))) {
1353 /* add reg, otheregType */
1354 SelectAdd(resOpnd, opnd0, SelectCopy(opnd1, primType, primType), primType);
1355 return;
1356 } else {
1357 /* add reg, #imm */
1358 ImmOperand *immOpnd = static_cast<ImmOperand *>(&opnd1);
1359 if (immOpnd->IsNegative()) {
1360 immOpnd->Negate();
1361 SelectSub(resOpnd, opnd0, *immOpnd, primType);
1362 return;
1363 }
1364 if (immOpnd->IsInBitSize(kMaxImmVal24Bits, 0)) {
1365 /*
1366 * ADD Wd|WSP, Wn|WSP, #imm{, shift} ; 32-bit general registers
1367 * ADD Xd|SP, Xn|SP, #imm{, shift} ; 64-bit general registers
1368 * imm : 0 ~ 4095, shift: none, LSL #0, or LSL #12
1369 * aarch64 assembly takes up to 24-bits, if the lower 12 bits is all 0
1370 */
1371 MOperator mOpCode = MOP_undef;
1372 Operand *newOpnd0 = &opnd0;
1373 if (!(immOpnd->IsInBitSize(kMaxImmVal12Bits, 0) ||
1374 immOpnd->IsInBitSize(kMaxImmVal12Bits, kMaxImmVal12Bits))) {
1375 /* process higher 12 bits */
1376 ImmOperand &immOpnd2 =
1377 CreateImmOperand(static_cast<int64>(static_cast<uint64>(immOpnd->GetValue()) >> kMaxImmVal12Bits),
1378 immOpnd->GetSize(), immOpnd->IsSignedValue());
1379 mOpCode = is64Bits ? MOP_xaddrri24 : MOP_waddrri24;
1380 Operand *tmpRes = IsAfterRegAlloc() ? &resOpnd : &CreateRegisterOperandOfType(primType);
1381 BitShiftOperand &shiftopnd = CreateBitShiftOperand(BitShiftOperand::kLSL, kShiftAmount12, k64BitSize);
1382 Insn &newInsn = GetInsnBuilder()->BuildInsn(mOpCode, *tmpRes, opnd0, immOpnd2, shiftopnd);
1383 GetCurBB()->AppendInsn(newInsn);
1384 immOpnd->ModuloByPow2(static_cast<int32>(kMaxImmVal12Bits));
1385 newOpnd0 = tmpRes;
1386 }
1387 /* process lower 12 bits */
1388 mOpCode = is64Bits ? MOP_xaddrri12 : MOP_waddrri12;
1389 Insn &newInsn = GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, *newOpnd0, *immOpnd);
1390 GetCurBB()->AppendInsn(newInsn);
1391 return;
1392 }
1393 /* load into register */
1394 int64 immVal = immOpnd->GetValue();
1395 int32 tail0bitNum = GetTail0BitNum(immVal);
1396 int32 head0bitNum = GetHead0BitNum(immVal);
1397 const int32 bitNum = (k64BitSizeInt - head0bitNum) - tail0bitNum;
1398 RegOperand ®Opnd = CreateRegisterOperandOfType(primType);
1399 if (isAfterRegAlloc) {
1400 RegType regty = GetRegTyFromPrimTy(primType);
1401 uint32 bytelen = GetPrimTypeSize(primType);
1402 regOpnd = GetOrCreatePhysicalRegisterOperand(static_cast<AArch64reg>(R16), bytelen, regty);
1403 }
1404 regno_t regNO0 = static_cast<RegOperand &>(opnd0).GetRegisterNumber();
1405 /* addrrrs do not support sp */
1406 if (bitNum <= k16ValidBit && regNO0 != RSP) {
1407 int64 newImm = (static_cast<uint64>(immVal) >> static_cast<uint32>(tail0bitNum)) & 0xFFFF;
1408 ImmOperand &immOpnd1 = CreateImmOperand(newImm, k16BitSize, false);
1409 SelectCopyImm(regOpnd, immOpnd1, primType);
1410 uint32 mopBadd = is64Bits ? MOP_xaddrrrs : MOP_waddrrrs;
1411 int32 bitLen = is64Bits ? kBitLenOfShift64Bits : kBitLenOfShift32Bits;
1412 BitShiftOperand &bitShiftOpnd =
1413 CreateBitShiftOperand(BitShiftOperand::kLSL, static_cast<uint32>(tail0bitNum), bitLen);
1414 Insn &newInsn = GetInsnBuilder()->BuildInsn(mopBadd, resOpnd, opnd0, regOpnd, bitShiftOpnd);
1415 GetCurBB()->AppendInsn(newInsn);
1416 return;
1417 }
1418
1419 SelectCopyImm(regOpnd, *immOpnd, primType);
1420 MOperator mOpCode = is64Bits ? MOP_xaddrrr : MOP_waddrrr;
1421 Insn &newInsn = GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, opnd0, regOpnd);
1422 GetCurBB()->AppendInsn(newInsn);
1423 }
1424 }
1425
SelectSub(Operand & resOpnd,Operand & opnd0,Operand & opnd1,PrimType primType)1426 void AArch64CGFunc::SelectSub(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType)
1427 {
1428 Operand::OperandType opnd1Type = opnd1.GetKind();
1429 uint32 dsize = GetPrimTypeBitSize(primType);
1430 bool is64Bits = (dsize == k64BitSize);
1431 bool isFloat = IsPrimitiveFloat(primType);
1432 Operand *opnd0Bak = &LoadIntoRegister(opnd0, primType);
1433 if (opnd1Type == Operand::kOpdRegister) {
1434 MOperator mOp = isFloat ? (is64Bits ? MOP_dsub : MOP_ssub) : (is64Bits ? MOP_xsubrrr : MOP_wsubrrr);
1435 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, *opnd0Bak, opnd1));
1436 return;
1437 }
1438
1439 if ((opnd1Type != Operand::kOpdImmediate) && (opnd1Type != Operand::kOpdOffset)) {
1440 SelectSub(resOpnd, *opnd0Bak, SelectCopy(opnd1, primType, primType), primType);
1441 return;
1442 }
1443
1444 ImmOperand *immOpnd = static_cast<ImmOperand *>(&opnd1);
1445 if (immOpnd->IsNegative()) {
1446 immOpnd->Negate();
1447 SelectAdd(resOpnd, *opnd0Bak, *immOpnd, primType);
1448 return;
1449 }
1450
1451 int64 higher12BitVal = static_cast<int64>(static_cast<uint64>(immOpnd->GetValue()) >> kMaxImmVal12Bits);
1452 if (immOpnd->IsInBitSize(kMaxImmVal24Bits, 0) && higher12BitVal + 1 <= kMaxPimm8) {
1453 /*
1454 * SUB Wd|WSP, Wn|WSP, #imm{, shift} ; 32-bit general registers
1455 * SUB Xd|SP, Xn|SP, #imm{, shift} ; 64-bit general registers
1456 * imm : 0 ~ 4095, shift: none, LSL #0, or LSL #12
1457 * aarch64 assembly takes up to 24-bits, if the lower 12 bits is all 0
1458 * large offset is treated as sub (higher 12 bits + 4096) + add
1459 * it gives opportunities for combining add + ldr due to the characteristics of aarch64's load/store
1460 */
1461 MOperator mOpCode = MOP_undef;
1462 bool isSplitSub = false;
1463 if (!(immOpnd->IsInBitSize(kMaxImmVal12Bits, 0) || immOpnd->IsInBitSize(kMaxImmVal12Bits, kMaxImmVal12Bits))) {
1464 isSplitSub = true;
1465 /* process higher 12 bits */
1466 ImmOperand &immOpnd2 = CreateImmOperand(higher12BitVal + 1, immOpnd->GetSize(), immOpnd->IsSignedValue());
1467
1468 mOpCode = is64Bits ? MOP_xsubrri24 : MOP_wsubrri24;
1469 BitShiftOperand &shiftopnd = CreateBitShiftOperand(BitShiftOperand::kLSL, kShiftAmount12, k64BitSize);
1470 Insn &newInsn = GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, *opnd0Bak, immOpnd2, shiftopnd);
1471 GetCurBB()->AppendInsn(newInsn);
1472 immOpnd->ModuloByPow2(static_cast<int64>(kMaxImmVal12Bits));
1473 immOpnd->SetValue(static_cast<int64>(kMax12UnsignedImm) - immOpnd->GetValue());
1474 opnd0Bak = &resOpnd;
1475 }
1476 /* process lower 12 bits */
1477 mOpCode = isSplitSub ? (is64Bits ? MOP_xaddrri12 : MOP_waddrri12) : (is64Bits ? MOP_xsubrri12 : MOP_wsubrri12);
1478 Insn &newInsn = GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, *opnd0Bak, *immOpnd);
1479 GetCurBB()->AppendInsn(newInsn);
1480 return;
1481 }
1482
1483 /* load into register */
1484 int64 immVal = immOpnd->GetValue();
1485 int32 tail0bitNum = GetTail0BitNum(immVal);
1486 int32 head0bitNum = GetHead0BitNum(immVal);
1487 const int32 bitNum = (k64BitSizeInt - head0bitNum) - tail0bitNum;
1488 RegOperand ®Opnd = CreateRegisterOperandOfType(primType);
1489 if (isAfterRegAlloc) {
1490 RegType regty = GetRegTyFromPrimTy(primType);
1491 uint32 bytelen = GetPrimTypeSize(primType);
1492 regOpnd = GetOrCreatePhysicalRegisterOperand(static_cast<AArch64reg>(R16), bytelen, regty);
1493 }
1494
1495 if (bitNum <= k16ValidBit) {
1496 int64 newImm = (static_cast<uint64>(immVal) >> static_cast<uint32>(tail0bitNum)) & 0xFFFF;
1497 ImmOperand &immOpnd1 = CreateImmOperand(newImm, k16BitSize, false);
1498 SelectCopyImm(regOpnd, immOpnd1, primType);
1499 uint32 mopBsub = is64Bits ? MOP_xsubrrrs : MOP_wsubrrrs;
1500 int32 bitLen = is64Bits ? kBitLenOfShift64Bits : kBitLenOfShift32Bits;
1501 BitShiftOperand &bitShiftOpnd =
1502 CreateBitShiftOperand(BitShiftOperand::kLSL, static_cast<uint32>(tail0bitNum), bitLen);
1503 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopBsub, resOpnd, *opnd0Bak, regOpnd, bitShiftOpnd));
1504 return;
1505 }
1506
1507 SelectCopyImm(regOpnd, *immOpnd, primType);
1508 MOperator mOpCode = is64Bits ? MOP_xsubrrr : MOP_wsubrrr;
1509 Insn &newInsn = GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, *opnd0Bak, regOpnd);
1510 GetCurBB()->AppendInsn(newInsn);
1511 }
1512
SelectSub(BinaryNode & node,Operand & opnd0,Operand & opnd1,const BaseNode & parent)1513 Operand *AArch64CGFunc::SelectSub(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent)
1514 {
1515 PrimType dtype = node.GetPrimType();
1516 bool isSigned = IsSignedInteger(dtype);
1517 uint32 dsize = GetPrimTypeBitSize(dtype);
1518 bool is64Bits = (dsize == k64BitSize);
1519 bool isFloat = IsPrimitiveFloat(dtype);
1520 RegOperand *resOpnd = nullptr;
1521 PrimType primType =
1522 isFloat ? dtype : ((is64Bits ? (isSigned ? PTY_i64 : PTY_u64) : (isSigned ? PTY_i32 : PTY_u32)));
1523 resOpnd = &GetOrCreateResOperand(parent, primType);
1524 SelectSub(*resOpnd, opnd0, opnd1, primType);
1525 return resOpnd;
1526 }
1527
SelectMpy(BinaryNode & node,Operand & opnd0,Operand & opnd1,const BaseNode & parent)1528 Operand *AArch64CGFunc::SelectMpy(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent)
1529 {
1530 PrimType dtype = node.GetPrimType();
1531 bool isSigned = IsSignedInteger(dtype);
1532 uint32 dsize = GetPrimTypeBitSize(dtype);
1533 bool is64Bits = (dsize == k64BitSize);
1534 bool isFloat = IsPrimitiveFloat(dtype);
1535 RegOperand *resOpnd = nullptr;
1536 PrimType primType =
1537 isFloat ? dtype : ((is64Bits ? (isSigned ? PTY_i64 : PTY_u64) : (isSigned ? PTY_i32 : PTY_u32)));
1538 resOpnd = &GetOrCreateResOperand(parent, primType);
1539 SelectMpy(*resOpnd, opnd0, opnd1, primType);
1540 return resOpnd;
1541 }
1542
SelectMpy(Operand & resOpnd,Operand & opnd0,Operand & opnd1,PrimType primType)1543 void AArch64CGFunc::SelectMpy(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType)
1544 {
1545 Operand::OperandType opnd0Type = opnd0.GetKind();
1546 Operand::OperandType opnd1Type = opnd1.GetKind();
1547 uint32 dsize = GetPrimTypeBitSize(primType);
1548 bool is64Bits = (dsize == k64BitSize);
1549
1550 if (((opnd0Type == Operand::kOpdImmediate) || (opnd0Type == Operand::kOpdOffset) ||
1551 (opnd1Type == Operand::kOpdImmediate) || (opnd1Type == Operand::kOpdOffset)) &&
1552 IsPrimitiveInteger(primType)) {
1553 ImmOperand *imm = ((opnd0Type == Operand::kOpdImmediate) || (opnd0Type == Operand::kOpdOffset))
1554 ? static_cast<ImmOperand *>(&opnd0)
1555 : static_cast<ImmOperand *>(&opnd1);
1556 Operand *otherOp =
1557 ((opnd0Type == Operand::kOpdImmediate) || (opnd0Type == Operand::kOpdOffset)) ? &opnd1 : &opnd0;
1558 int64 immValue = llabs(imm->GetValue());
1559 if (immValue != 0 && (static_cast<uint64>(immValue) & (static_cast<uint64>(immValue) - 1)) == 0) {
1560 /* immValue is 1 << n */
1561 if (otherOp->GetKind() != Operand::kOpdRegister) {
1562 otherOp = &SelectCopy(*otherOp, primType, primType);
1563 }
1564 int64 shiftVal = __builtin_ffsll(immValue);
1565 ImmOperand &shiftNum = CreateImmOperand(shiftVal - 1, dsize, false);
1566 SelectShift(resOpnd, *otherOp, shiftNum, kShiftLeft, primType);
1567 bool reachSignBit = (is64Bits && (shiftVal == k64BitSize)) || (!is64Bits && (shiftVal == k32BitSize));
1568 if (imm->GetValue() < 0 && !reachSignBit) {
1569 SelectNeg(resOpnd, resOpnd, primType);
1570 }
1571
1572 return;
1573 } else if (immValue > 2) { // immValue should larger than 2
1574 uint32 zeroNum = static_cast<uint32>(__builtin_ffsll(immValue) - 1);
1575 int64 headVal = static_cast<uint64>(immValue) >> zeroNum;
1576 /*
1577 * if (headVal - 1) & (headVal - 2) == 0, that is (immVal >> zeroNum) - 1 == 1 << n
1578 * otherOp * immVal = (otherOp * (immVal >> zeroNum) * (1 << zeroNum)
1579 * = (otherOp * ((immVal >> zeroNum) - 1) + otherOp) * (1 << zeroNum)
1580 */
1581 CHECK_FATAL(static_cast<uint64>(headVal) >= 2, "value overflow");
1582 // 2 see comment above
1583 if (((static_cast<uint64>(headVal) - 1) & (static_cast<uint64>(headVal) - 2)) == 0) {
1584 if (otherOp->GetKind() != Operand::kOpdRegister) {
1585 otherOp = &SelectCopy(*otherOp, primType, primType);
1586 }
1587 ImmOperand &shiftNum1 = CreateImmOperand(__builtin_ffsll(headVal - 1) - 1, dsize, false);
1588 RegOperand &tmpOpnd = CreateRegisterOperandOfType(primType);
1589 SelectShift(tmpOpnd, *otherOp, shiftNum1, kShiftLeft, primType);
1590 SelectAdd(resOpnd, *otherOp, tmpOpnd, primType);
1591 ImmOperand &shiftNum2 = CreateImmOperand(zeroNum, dsize, false);
1592 SelectShift(resOpnd, resOpnd, shiftNum2, kShiftLeft, primType);
1593 if (imm->GetValue() < 0) {
1594 SelectNeg(resOpnd, resOpnd, primType);
1595 }
1596
1597 return;
1598 }
1599 }
1600 }
1601
1602 if ((opnd0Type != Operand::kOpdRegister) && (opnd1Type != Operand::kOpdRegister)) {
1603 SelectMpy(resOpnd, SelectCopy(opnd0, primType, primType), opnd1, primType);
1604 } else if ((opnd0Type == Operand::kOpdRegister) && (opnd1Type != Operand::kOpdRegister)) {
1605 SelectMpy(resOpnd, opnd0, SelectCopy(opnd1, primType, primType), primType);
1606 } else if ((opnd0Type != Operand::kOpdRegister) && (opnd1Type == Operand::kOpdRegister)) {
1607 SelectMpy(resOpnd, opnd1, opnd0, primType);
1608 } else {
1609 DEBUG_ASSERT(IsPrimitiveFloat(primType) || IsPrimitiveInteger(primType), "NYI Mpy");
1610 MOperator mOp =
1611 IsPrimitiveFloat(primType) ? (is64Bits ? MOP_xvmuld : MOP_xvmuls) : (is64Bits ? MOP_xmulrrr : MOP_wmulrrr);
1612 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0, opnd1));
1613 }
1614 }
1615
SelectDiv(Operand & resOpnd,Operand & origOpnd0,Operand & opnd1,PrimType primType)1616 void AArch64CGFunc::SelectDiv(Operand &resOpnd, Operand &origOpnd0, Operand &opnd1, PrimType primType)
1617 {
1618 Operand &opnd0 = LoadIntoRegister(origOpnd0, primType);
1619 Operand::OperandType opnd0Type = opnd0.GetKind();
1620 Operand::OperandType opnd1Type = opnd1.GetKind();
1621 uint32 dsize = GetPrimTypeBitSize(primType);
1622 bool is64Bits = (dsize == k64BitSize);
1623
1624 if (Globals::GetInstance()->GetOptimLevel() > CGOptions::kLevel0) {
1625 if (((opnd1Type == Operand::kOpdImmediate) || (opnd1Type == Operand::kOpdOffset)) &&
1626 IsSignedInteger(primType)) {
1627 ImmOperand *imm = static_cast<ImmOperand *>(&opnd1);
1628 int64 immValue = llabs(imm->GetValue());
1629 if ((immValue != 0) && (static_cast<uint64>(immValue) & (static_cast<uint64>(immValue) - 1)) == 0) {
1630 if (immValue == 1) {
1631 if (imm->GetValue() > 0) {
1632 uint32 mOp = is64Bits ? MOP_xmovrr : MOP_wmovrr;
1633 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0));
1634 } else {
1635 SelectNeg(resOpnd, opnd0, primType);
1636 }
1637
1638 return;
1639 }
1640 int32 shiftNumber = __builtin_ffsll(immValue) - 1;
1641 ImmOperand &shiftNum = CreateImmOperand(shiftNumber, dsize, false);
1642 Operand &tmpOpnd = CreateRegisterOperandOfType(primType);
1643 SelectShift(tmpOpnd, opnd0, CreateImmOperand(dsize - 1, dsize, false), kShiftAright, primType);
1644 uint32 mopBadd = is64Bits ? MOP_xaddrrrs : MOP_waddrrrs;
1645 int32 bitLen = is64Bits ? kBitLenOfShift64Bits : kBitLenOfShift32Bits;
1646 BitShiftOperand &shiftOpnd = CreateBitShiftOperand(BitShiftOperand::kLSR, dsize - shiftNumber, bitLen);
1647 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopBadd, tmpOpnd, opnd0, tmpOpnd, shiftOpnd));
1648 SelectShift(resOpnd, tmpOpnd, shiftNum, kShiftAright, primType);
1649 if (imm->GetValue() < 0) {
1650 SelectNeg(resOpnd, resOpnd, primType);
1651 }
1652
1653 return;
1654 }
1655 } else if (((opnd1Type == Operand::kOpdImmediate) || (opnd1Type == Operand::kOpdOffset)) &&
1656 IsUnsignedInteger(primType)) {
1657 ImmOperand *imm = static_cast<ImmOperand *>(&opnd1);
1658 if (imm->GetValue() != 0) {
1659 if ((imm->GetValue() > 0) &&
1660 ((static_cast<uint64>(imm->GetValue()) & (static_cast<uint64>(imm->GetValue()) - 1)) == 0)) {
1661 ImmOperand &shiftNum = CreateImmOperand(__builtin_ffsll(imm->GetValue()) - 1, dsize, false);
1662 SelectShift(resOpnd, opnd0, shiftNum, kShiftLright, primType);
1663
1664 return;
1665 } else if (imm->GetValue() < 0) {
1666 SelectAArch64Cmp(opnd0, *imm, true, dsize);
1667 SelectAArch64CSet(resOpnd, GetCondOperand(CC_CS), is64Bits);
1668
1669 return;
1670 }
1671 }
1672 }
1673 }
1674
1675 if (opnd0Type != Operand::kOpdRegister) {
1676 SelectDiv(resOpnd, SelectCopy(opnd0, primType, primType), opnd1, primType);
1677 } else if (opnd1Type != Operand::kOpdRegister) {
1678 SelectDiv(resOpnd, opnd0, SelectCopy(opnd1, primType, primType), primType);
1679 } else {
1680 DEBUG_ASSERT(IsPrimitiveFloat(primType) || IsPrimitiveInteger(primType), "NYI Div");
1681 MOperator mOp = IsPrimitiveFloat(primType)
1682 ? (is64Bits ? MOP_ddivrrr : MOP_sdivrrr)
1683 : (IsSignedInteger(primType) ? (is64Bits ? MOP_xsdivrrr : MOP_wsdivrrr)
1684 : (is64Bits ? MOP_xudivrrr : MOP_wudivrrr));
1685 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0, opnd1));
1686 }
1687 }
1688
SelectDiv(BinaryNode & node,Operand & opnd0,Operand & opnd1,const BaseNode & parent)1689 Operand *AArch64CGFunc::SelectDiv(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent)
1690 {
1691 PrimType dtype = node.GetPrimType();
1692 bool isSigned = IsSignedInteger(dtype);
1693 uint32 dsize = GetPrimTypeBitSize(dtype);
1694 bool is64Bits = (dsize == k64BitSize);
1695 bool isFloat = IsPrimitiveFloat(dtype);
1696 /* promoted type */
1697 PrimType primType =
1698 isFloat ? dtype : ((is64Bits ? (isSigned ? PTY_i64 : PTY_u64) : (isSigned ? PTY_i32 : PTY_u32)));
1699 RegOperand &resOpnd = GetOrCreateResOperand(parent, primType);
1700 SelectDiv(resOpnd, opnd0, opnd1, primType);
1701 return &resOpnd;
1702 }
1703
SelectRem(Operand & resOpnd,Operand & lhsOpnd,Operand & rhsOpnd,PrimType primType,bool isSigned,bool is64Bits)1704 void AArch64CGFunc::SelectRem(Operand &resOpnd, Operand &lhsOpnd, Operand &rhsOpnd, PrimType primType, bool isSigned,
1705 bool is64Bits)
1706 {
1707 Operand &opnd0 = LoadIntoRegister(lhsOpnd, primType);
1708 Operand &opnd1 = LoadIntoRegister(rhsOpnd, primType);
1709
1710 DEBUG_ASSERT(IsPrimitiveInteger(primType), "Wrong type for REM");
1711 /*
1712 * printf("%d \n", 29 % 7 );
1713 * -> 1
1714 * printf("%u %d \n", (unsigned)-7, (unsigned)(-7) % 7 );
1715 * -> 4294967289 4
1716 * printf("%d \n", (-7) % 7 );
1717 * -> 0
1718 * printf("%d \n", 237 % -7 );
1719 * 6->
1720 * printf("implicit i->u conversion %d \n", ((unsigned)237) % -7 );
1721 * implicit conversion 237
1722
1723 * http://stackoverflow.com/questions/35351470/obtaining-remainder-using-single-aarch64-instruction
1724 * input: x0=dividend, x1=divisor
1725 * udiv|sdiv x2, x0, x1
1726 * msub x3, x2, x1, x0 -- multply-sub : x3 <- x0 - x2*x1
1727 * result: x2=quotient, x3=remainder
1728 *
1729 * allocate temporary register
1730 */
1731 RegOperand &temp = CreateRegisterOperandOfType(primType);
1732 /*
1733 * mov w1, #2
1734 * sdiv wTemp, w0, w1
1735 * msub wRespond, wTemp, w1, w0
1736 * ========>
1737 * asr wTemp, w0, #31
1738 * lsr wTemp, wTemp, #31 (#30 for 4, #29 for 8, ...)
1739 * add wRespond, w0, wTemp
1740 * and wRespond, wRespond, #1 (#3 for 4, #7 for 8, ...)
1741 * sub wRespond, wRespond, w2
1742 *
1743 * if divde by 2
1744 * ========>
1745 * lsr wTemp, w0, #31
1746 * add wRespond, w0, wTemp
1747 * and wRespond, wRespond, #1
1748 * sub wRespond, wRespond, w2
1749 *
1750 * for unsigned rem op, just use and
1751 */
1752 if ((Globals::GetInstance()->GetOptimLevel() >= CGOptions::kLevel2)) {
1753 ImmOperand *imm = nullptr;
1754 Insn *movImmInsn = GetCurBB()->GetLastMachineInsn();
1755 if (movImmInsn &&
1756 ((movImmInsn->GetMachineOpcode() == MOP_wmovri32) || (movImmInsn->GetMachineOpcode() == MOP_xmovri64)) &&
1757 movImmInsn->GetOperand(0).Equals(opnd1)) {
1758 /*
1759 * mov w1, #2
1760 * rem res, w0, w1
1761 */
1762 imm = static_cast<ImmOperand *>(&movImmInsn->GetOperand(kInsnSecondOpnd));
1763 } else if (opnd1.IsImmediate()) {
1764 /*
1765 * rem res, w0, #2
1766 */
1767 imm = static_cast<ImmOperand *>(&opnd1);
1768 }
1769 /* positive or negative do not have effect on the result */
1770 int64 dividor = 0;
1771 if (imm && (imm->GetValue() != LONG_MIN)) {
1772 dividor = abs(imm->GetValue());
1773 }
1774 const int64 Log2OfDividor = GetLog2(static_cast<uint64>(dividor));
1775 if ((dividor != 0) && (Log2OfDividor > 0)) {
1776 if (is64Bits) {
1777 CHECK_FATAL(Log2OfDividor < k64BitSize, "imm out of bound");
1778 if (isSigned) {
1779 ImmOperand &rightShiftValue = CreateImmOperand(k64BitSize - Log2OfDividor, k64BitSize, isSigned);
1780 if (Log2OfDividor != 1) {
1781 /* 63->shift ALL , 32 ->32bit register */
1782 ImmOperand &rightShiftAll = CreateImmOperand(63, k64BitSize, isSigned);
1783 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xasrrri6, temp, opnd0, rightShiftAll));
1784
1785 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xlsrrri6, temp, temp, rightShiftValue));
1786 } else {
1787 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xlsrrri6, temp, opnd0, rightShiftValue));
1788 }
1789 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xaddrrr, resOpnd, opnd0, temp));
1790 ImmOperand &remBits = CreateImmOperand(dividor - 1, k64BitSize, isSigned);
1791 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xandrri13, resOpnd, resOpnd, remBits));
1792 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xsubrrr, resOpnd, resOpnd, temp));
1793 return;
1794 } else if (imm && imm->GetValue() > 0) {
1795 ImmOperand &remBits = CreateImmOperand(dividor - 1, k64BitSize, isSigned);
1796 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xandrri13, resOpnd, opnd0, remBits));
1797 return;
1798 }
1799 } else {
1800 CHECK_FATAL(Log2OfDividor < k32BitSize, "imm out of bound");
1801 if (isSigned) {
1802 ImmOperand &rightShiftValue = CreateImmOperand(k32BitSize - Log2OfDividor, k32BitSize, isSigned);
1803 if (Log2OfDividor != 1) {
1804 /* 31->shift ALL , 32 ->32bit register */
1805 ImmOperand &rightShiftAll = CreateImmOperand(31, k32BitSize, isSigned);
1806 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wasrrri5, temp, opnd0, rightShiftAll));
1807
1808 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wlsrrri5, temp, temp, rightShiftValue));
1809 } else {
1810 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wlsrrri5, temp, opnd0, rightShiftValue));
1811 }
1812
1813 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_waddrrr, resOpnd, opnd0, temp));
1814 ImmOperand &remBits = CreateImmOperand(dividor - 1, k32BitSize, isSigned);
1815 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wandrri12, resOpnd, resOpnd, remBits));
1816
1817 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wsubrrr, resOpnd, resOpnd, temp));
1818 return;
1819 } else if (imm && imm->GetValue() > 0) {
1820 ImmOperand &remBits = CreateImmOperand(dividor - 1, k32BitSize, isSigned);
1821 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wandrri12, resOpnd, opnd0, remBits));
1822 return;
1823 }
1824 }
1825 }
1826 }
1827
1828 uint32 mopDiv = is64Bits ? (isSigned ? MOP_xsdivrrr : MOP_xudivrrr) : (isSigned ? MOP_wsdivrrr : MOP_wudivrrr);
1829 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopDiv, temp, opnd0, opnd1));
1830
1831 uint32 mopSub = is64Bits ? MOP_xmsubrrrr : MOP_wmsubrrrr;
1832 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopSub, resOpnd, temp, opnd1, opnd0));
1833 }
1834
SelectRem(BinaryNode & node,Operand & opnd0,Operand & opnd1,const BaseNode & parent)1835 Operand *AArch64CGFunc::SelectRem(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent)
1836 {
1837 PrimType dtype = node.GetPrimType();
1838 DEBUG_ASSERT(IsPrimitiveInteger(dtype), "wrong type for rem");
1839 bool isSigned = IsSignedInteger(dtype);
1840 uint32 dsize = GetPrimTypeBitSize(dtype);
1841 bool is64Bits = (dsize == k64BitSize);
1842
1843 /* promoted type */
1844 PrimType primType = ((is64Bits ? (isSigned ? PTY_i64 : PTY_u64) : (isSigned ? PTY_i32 : PTY_u32)));
1845 RegOperand &resOpnd = GetOrCreateResOperand(parent, primType);
1846 SelectRem(resOpnd, opnd0, opnd1, primType, isSigned, is64Bits);
1847 return &resOpnd;
1848 }
1849
SelectCmpOp(Operand & resOpnd,Operand & lhsOpnd,Operand & rhsOpnd,Opcode opcode,PrimType primType,const BaseNode & parent)1850 void AArch64CGFunc::SelectCmpOp(Operand &resOpnd, Operand &lhsOpnd, Operand &rhsOpnd, Opcode opcode, PrimType primType,
1851 const BaseNode &parent)
1852 {
1853 uint32 dsize = resOpnd.GetSize();
1854 bool isFloat = IsPrimitiveFloat(primType);
1855 Operand &opnd0 = LoadIntoRegister(lhsOpnd, primType);
1856
1857 /*
1858 * most of FP constants are passed as MemOperand
1859 * except 0.0 which is passed as kOpdFPImmediate
1860 */
1861 Operand::OperandType opnd1Type = rhsOpnd.GetKind();
1862 Operand *opnd1 = &rhsOpnd;
1863 if ((opnd1Type != Operand::kOpdImmediate) && (opnd1Type != Operand::kOpdFPImmediate) &&
1864 (opnd1Type != Operand::kOpdOffset)) {
1865 opnd1 = &LoadIntoRegister(rhsOpnd, primType);
1866 }
1867
1868 bool unsignedIntegerComparison = !isFloat && !IsSignedInteger(primType);
1869 /*
1870 * OP_cmp, OP_cmpl, OP_cmpg
1871 * <cmp> OP0, OP1 ; fcmp for OP_cmpl/OP_cmpg, cmp/fcmpe for OP_cmp
1872 * CSINV RES, WZR, WZR, GE
1873 * CSINC RES, RES, WZR, LE
1874 * if OP_cmpl, CSINV RES, RES, WZR, VC (no overflow)
1875 * if OP_cmpg, CSINC RES, RES, WZR, VC (no overflow)
1876 */
1877 RegOperand &xzr = GetZeroOpnd(dsize);
1878 if (opcode == OP_cmp) {
1879 SelectAArch64Cmp(opnd0, *opnd1, !isFloat, GetPrimTypeBitSize(primType));
1880 if (unsignedIntegerComparison) {
1881 SelectAArch64CSINV(resOpnd, xzr, xzr, GetCondOperand(CC_HS), (dsize == k64BitSize));
1882 SelectAArch64CSINC(resOpnd, resOpnd, xzr, GetCondOperand(CC_LS), (dsize == k64BitSize));
1883 } else {
1884 SelectAArch64CSINV(resOpnd, xzr, xzr, GetCondOperand(CC_GE), (dsize == k64BitSize));
1885 SelectAArch64CSINC(resOpnd, resOpnd, xzr, GetCondOperand(CC_LE), (dsize == k64BitSize));
1886 }
1887 return;
1888 }
1889
1890 // lt u8 i32 ( xxx, 0 ) => get sign bit
1891 if ((opcode == OP_lt) && opnd0.IsRegister() && opnd1->IsImmediate() &&
1892 (static_cast<ImmOperand *>(opnd1)->GetValue() == 0) && !isFloat) {
1893 bool is64Bits = (opnd0.GetSize() == k64BitSize);
1894 if (!unsignedIntegerComparison) {
1895 int32 bitLen = is64Bits ? kBitLenOfShift64Bits : kBitLenOfShift32Bits;
1896 ImmOperand &shiftNum = CreateImmOperand(is64Bits ? kHighestBitOf64Bits : kHighestBitOf32Bits,
1897 static_cast<uint32>(bitLen), false);
1898 MOperator mOpCode = is64Bits ? MOP_xlsrrri6 : MOP_wlsrrri5;
1899 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, opnd0, shiftNum));
1900 return;
1901 }
1902 ImmOperand &constNum = CreateImmOperand(0, is64Bits ? k64BitSize : k32BitSize, false);
1903 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(is64Bits ? MOP_xmovri64 : MOP_wmovri32, resOpnd, constNum));
1904 return;
1905 }
1906 SelectAArch64Cmp(opnd0, *opnd1, !isFloat, GetPrimTypeBitSize(primType));
1907
1908 ConditionCode cc = CC_EQ;
1909 // need to handle unordered situation here.
1910 switch (opcode) {
1911 case OP_eq:
1912 cc = CC_EQ;
1913 break;
1914 case OP_ne:
1915 cc = isFloat ? CC_MI : CC_NE;
1916 break;
1917 case OP_le:
1918 cc = isFloat ? CC_LS : unsignedIntegerComparison ? CC_LS : CC_LE;
1919 break;
1920 case OP_ge:
1921 cc = unsignedIntegerComparison ? CC_HS : CC_GE;
1922 break;
1923 case OP_gt:
1924 cc = unsignedIntegerComparison ? CC_HI : CC_GT;
1925 break;
1926 case OP_lt:
1927 cc = isFloat ? CC_MI : unsignedIntegerComparison ? CC_LO : CC_LT;
1928 break;
1929 default:
1930 CHECK_FATAL(false, "illegal logical operator");
1931 }
1932 SelectAArch64CSet(resOpnd, GetCondOperand(cc), (dsize == k64BitSize));
1933 if (isFloat && opcode == OP_ne) {
1934 SelectAArch64CSINC(resOpnd, resOpnd, xzr, GetCondOperand(CC_LE), (dsize == k64BitSize));
1935 }
1936 }
1937
SelectCmpOp(CompareNode & node,Operand & opnd0,Operand & opnd1,const BaseNode & parent)1938 Operand *AArch64CGFunc::SelectCmpOp(CompareNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent)
1939 {
1940 RegOperand *resOpnd = &GetOrCreateResOperand(parent, node.GetPrimType());
1941 SelectCmpOp(*resOpnd, opnd0, opnd1, node.GetOpCode(), node.GetOpndType(), parent);
1942 return resOpnd;
1943 }
1944
SelectTargetFPCmpQuiet(Operand & o0,Operand & o1,uint32 dsize)1945 void AArch64CGFunc::SelectTargetFPCmpQuiet(Operand &o0, Operand &o1, uint32 dsize)
1946 {
1947 MOperator mOpCode = 0;
1948 if (o1.GetKind() == Operand::kOpdFPImmediate) {
1949 CHECK_FATAL(static_cast<ImmOperand &>(o0).GetValue() == 0, "NIY");
1950 mOpCode = (dsize == k64BitSize) ? MOP_dcmpqri : (dsize == k32BitSize) ? MOP_scmpqri : MOP_hcmpqri;
1951 } else if (o1.GetKind() == Operand::kOpdRegister) {
1952 mOpCode = (dsize == k64BitSize) ? MOP_dcmpqrr : (dsize == k32BitSize) ? MOP_scmpqrr : MOP_hcmpqrr;
1953 } else {
1954 CHECK_FATAL(false, "unsupported operand type");
1955 }
1956 Operand &rflag = GetOrCreateRflag();
1957 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, rflag, o0, o1));
1958 }
1959
SelectAArch64Cmp(Operand & o0,Operand & o1,bool isIntType,uint32 dsize)1960 void AArch64CGFunc::SelectAArch64Cmp(Operand &o0, Operand &o1, bool isIntType, uint32 dsize)
1961 {
1962 MOperator mOpCode = 0;
1963 Operand *newO1 = &o1;
1964 if (isIntType) {
1965 if ((o1.GetKind() == Operand::kOpdImmediate) || (o1.GetKind() == Operand::kOpdOffset)) {
1966 ImmOperand *immOpnd = static_cast<ImmOperand *>(&o1);
1967 /*
1968 * imm : 0 ~ 4095, shift: none, LSL #0, or LSL #12
1969 * aarch64 assembly takes up to 24-bits, if the lower 12 bits is all 0
1970 */
1971 if (immOpnd->IsInBitSize(kMaxImmVal12Bits, 0) || immOpnd->IsInBitSize(kMaxImmVal12Bits, kMaxImmVal12Bits)) {
1972 mOpCode = (dsize == k64BitSize) ? MOP_xcmpri : MOP_wcmpri;
1973 } else {
1974 /* load into register */
1975 PrimType ptype = (dsize == k64BitSize) ? PTY_i64 : PTY_i32;
1976 newO1 = &SelectCopy(o1, ptype, ptype);
1977 mOpCode = (dsize == k64BitSize) ? MOP_xcmprr : MOP_wcmprr;
1978 }
1979 } else if (o1.GetKind() == Operand::kOpdRegister) {
1980 mOpCode = (dsize == k64BitSize) ? MOP_xcmprr : MOP_wcmprr;
1981 } else {
1982 CHECK_FATAL(false, "unsupported operand type");
1983 }
1984 } else { /* float */
1985 if (o1.GetKind() == Operand::kOpdFPImmediate) {
1986 CHECK_FATAL(static_cast<ImmOperand &>(o1).GetValue() == 0, "NIY");
1987 mOpCode = (dsize == k64BitSize) ? MOP_dcmperi : ((dsize == k32BitSize) ? MOP_scmperi : MOP_hcmperi);
1988 } else if (o1.GetKind() == Operand::kOpdRegister) {
1989 mOpCode = (dsize == k64BitSize) ? MOP_dcmperr : ((dsize == k32BitSize) ? MOP_scmperr : MOP_hcmperr);
1990 } else {
1991 CHECK_FATAL(false, "unsupported operand type");
1992 }
1993 }
1994 DEBUG_ASSERT(mOpCode != 0, "mOpCode undefined");
1995 Operand &rflag = GetOrCreateRflag();
1996 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, rflag, o0, *newO1));
1997 }
1998
SelectAArch64CSet(Operand & r,CondOperand & cond,bool is64Bits)1999 void AArch64CGFunc::SelectAArch64CSet(Operand &r, CondOperand &cond, bool is64Bits)
2000 {
2001 MOperator mOpCode = is64Bits ? MOP_xcsetrc : MOP_wcsetrc;
2002 Operand &rflag = GetOrCreateRflag();
2003 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, r, cond, rflag));
2004 }
2005
SelectAArch64CSINV(Operand & res,Operand & o0,Operand & o1,CondOperand & cond,bool is64Bits)2006 void AArch64CGFunc::SelectAArch64CSINV(Operand &res, Operand &o0, Operand &o1, CondOperand &cond, bool is64Bits)
2007 {
2008 MOperator mOpCode = is64Bits ? MOP_xcsinvrrrc : MOP_wcsinvrrrc;
2009 Operand &rflag = GetOrCreateRflag();
2010 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, res, o0, o1, cond, rflag));
2011 }
2012
SelectAArch64CSINC(Operand & res,Operand & o0,Operand & o1,CondOperand & cond,bool is64Bits)2013 void AArch64CGFunc::SelectAArch64CSINC(Operand &res, Operand &o0, Operand &o1, CondOperand &cond, bool is64Bits)
2014 {
2015 MOperator mOpCode = is64Bits ? MOP_xcsincrrrc : MOP_wcsincrrrc;
2016 Operand &rflag = GetOrCreateRflag();
2017 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, res, o0, o1, cond, rflag));
2018 }
2019
SelectBand(BinaryNode & node,Operand & opnd0,Operand & opnd1,const BaseNode & parent)2020 Operand *AArch64CGFunc::SelectBand(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent)
2021 {
2022 return SelectRelationOperator(kAND, node, opnd0, opnd1, parent);
2023 }
2024
SelectBand(Operand & resOpnd,Operand & opnd0,Operand & opnd1,PrimType primType)2025 void AArch64CGFunc::SelectBand(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType)
2026 {
2027 SelectRelationOperator(kAND, resOpnd, opnd0, opnd1, primType);
2028 }
2029
SelectRelationOperator(RelationOperator operatorCode,const BinaryNode & node,Operand & opnd0,Operand & opnd1,const BaseNode & parent)2030 Operand *AArch64CGFunc::SelectRelationOperator(RelationOperator operatorCode, const BinaryNode &node, Operand &opnd0,
2031 Operand &opnd1, const BaseNode &parent)
2032 {
2033 PrimType dtype = node.GetPrimType();
2034 bool isSigned = IsSignedInteger(dtype);
2035 uint32 dsize = GetPrimTypeBitSize(dtype);
2036 bool is64Bits = (dsize == k64BitSize);
2037 PrimType primType =
2038 is64Bits ? (isSigned ? PTY_i64 : PTY_u64) : (isSigned ? PTY_i32 : PTY_u32); /* promoted type */
2039 RegOperand *resOpnd = &GetOrCreateResOperand(parent, primType);
2040 SelectRelationOperator(operatorCode, *resOpnd, opnd0, opnd1, primType);
2041 return resOpnd;
2042 }
2043
SelectRelationMop(RelationOperator operatorCode,RelationOperatorOpndPattern opndPattern,bool is64Bits,bool isBitmaskImmediate,bool isBitNumLessThan16) const2044 MOperator AArch64CGFunc::SelectRelationMop(RelationOperator operatorCode, RelationOperatorOpndPattern opndPattern,
2045 bool is64Bits, bool isBitmaskImmediate, bool isBitNumLessThan16) const
2046 {
2047 MOperator mOp = MOP_undef;
2048 if (opndPattern == kRegReg) {
2049 switch (operatorCode) {
2050 case kAND:
2051 mOp = is64Bits ? MOP_xandrrr : MOP_wandrrr;
2052 break;
2053 case kIOR:
2054 mOp = is64Bits ? MOP_xiorrrr : MOP_wiorrrr;
2055 break;
2056 case kEOR:
2057 mOp = is64Bits ? MOP_xeorrrr : MOP_weorrrr;
2058 break;
2059 default:
2060 break;
2061 }
2062 return mOp;
2063 }
2064 /* opndPattern == KRegImm */
2065 if (isBitmaskImmediate) {
2066 switch (operatorCode) {
2067 case kAND:
2068 mOp = is64Bits ? MOP_xandrri13 : MOP_wandrri12;
2069 break;
2070 case kIOR:
2071 mOp = is64Bits ? MOP_xiorrri13 : MOP_wiorrri12;
2072 break;
2073 case kEOR:
2074 mOp = is64Bits ? MOP_xeorrri13 : MOP_weorrri12;
2075 break;
2076 default:
2077 break;
2078 }
2079 return mOp;
2080 }
2081 /* normal imm value */
2082 if (isBitNumLessThan16) {
2083 switch (operatorCode) {
2084 case kAND:
2085 mOp = is64Bits ? MOP_xandrrrs : MOP_wandrrrs;
2086 break;
2087 case kIOR:
2088 mOp = is64Bits ? MOP_xiorrrrs : MOP_wiorrrrs;
2089 break;
2090 case kEOR:
2091 mOp = is64Bits ? MOP_xeorrrrs : MOP_weorrrrs;
2092 break;
2093 default:
2094 break;
2095 }
2096 return mOp;
2097 }
2098 return mOp;
2099 }
2100
SelectRelationOperator(RelationOperator operatorCode,Operand & resOpnd,Operand & opnd0,Operand & opnd1,PrimType primType)2101 void AArch64CGFunc::SelectRelationOperator(RelationOperator operatorCode, Operand &resOpnd, Operand &opnd0,
2102 Operand &opnd1, PrimType primType)
2103 {
2104 Operand::OperandType opnd0Type = opnd0.GetKind();
2105 Operand::OperandType opnd1Type = opnd1.GetKind();
2106 uint32 dsize = GetPrimTypeBitSize(primType);
2107 bool is64Bits = (dsize == k64BitSize);
2108 /* op #imm. #imm */
2109 if ((opnd0Type != Operand::kOpdRegister) && (opnd1Type != Operand::kOpdRegister)) {
2110 SelectRelationOperator(operatorCode, resOpnd, SelectCopy(opnd0, primType, primType), opnd1, primType);
2111 return;
2112 }
2113 /* op #imm, reg -> op reg, #imm */
2114 if ((opnd0Type != Operand::kOpdRegister) && (opnd1Type == Operand::kOpdRegister)) {
2115 SelectRelationOperator(operatorCode, resOpnd, opnd1, opnd0, primType);
2116 return;
2117 }
2118 /* op reg, reg */
2119 if ((opnd0Type == Operand::kOpdRegister) && (opnd1Type == Operand::kOpdRegister)) {
2120 DEBUG_ASSERT(IsPrimitiveInteger(primType), "NYI band");
2121 MOperator mOp = SelectRelationMop(operatorCode, kRegReg, is64Bits, false, false);
2122 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0, opnd1));
2123 return;
2124 }
2125 /* op reg, #imm */
2126 if ((opnd0Type == Operand::kOpdRegister) && (opnd1Type != Operand::kOpdRegister)) {
2127 if (!((opnd1Type == Operand::kOpdImmediate) || (opnd1Type == Operand::kOpdOffset))) {
2128 SelectRelationOperator(operatorCode, resOpnd, opnd0, SelectCopy(opnd1, primType, primType), primType);
2129 return;
2130 }
2131
2132 ImmOperand *immOpnd = static_cast<ImmOperand *>(&opnd1);
2133 if (immOpnd->IsZero()) {
2134 if (operatorCode == kAND) {
2135 uint32 mopMv = is64Bits ? MOP_xmovrr : MOP_wmovrr;
2136 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopMv, resOpnd, GetZeroOpnd(dsize)));
2137 } else if ((operatorCode == kIOR) || (operatorCode == kEOR)) {
2138 SelectCopy(resOpnd, primType, opnd0, primType);
2139 }
2140 } else if ((immOpnd->IsAllOnes()) || (!is64Bits && immOpnd->IsAllOnes32bit())) {
2141 if (operatorCode == kAND) {
2142 SelectCopy(resOpnd, primType, opnd0, primType);
2143 } else if (operatorCode == kIOR) {
2144 uint32 mopMovn = is64Bits ? MOP_xmovnri16 : MOP_wmovnri16;
2145 ImmOperand &src16 = CreateImmOperand(0, k16BitSize, false);
2146 BitShiftOperand *lslOpnd = GetLogicalShiftLeftOperand(0, is64Bits);
2147 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopMovn, resOpnd, src16, *lslOpnd));
2148 } else if (operatorCode == kEOR) {
2149 SelectMvn(resOpnd, opnd0, primType);
2150 }
2151 } else if (immOpnd->IsBitmaskImmediate()) {
2152 MOperator mOp = SelectRelationMop(operatorCode, kRegImm, is64Bits, true, false);
2153 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0, opnd1));
2154 } else {
2155 int64 immVal = immOpnd->GetValue();
2156 int32 tail0BitNum = GetTail0BitNum(immVal);
2157 int32 head0BitNum = GetHead0BitNum(immVal);
2158 const int32 bitNum = (k64BitSizeInt - head0BitNum) - tail0BitNum;
2159 RegOperand ®Opnd = CreateRegisterOperandOfType(primType);
2160
2161 if (bitNum <= k16ValidBit) {
2162 int64 newImm = (static_cast<uint64>(immVal) >> static_cast<uint32>(tail0BitNum)) & 0xFFFF;
2163 ImmOperand &immOpnd1 = CreateImmOperand(newImm, k32BitSize, false);
2164 SelectCopyImm(regOpnd, immOpnd1, primType);
2165 MOperator mOp = SelectRelationMop(operatorCode, kRegImm, is64Bits, false, true);
2166 int32 bitLen = is64Bits ? kBitLenOfShift64Bits : kBitLenOfShift32Bits;
2167 BitShiftOperand &shiftOpnd =
2168 CreateBitShiftOperand(BitShiftOperand::kLSL, static_cast<uint32>(tail0BitNum), bitLen);
2169 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0, regOpnd, shiftOpnd));
2170 } else {
2171 SelectCopyImm(regOpnd, *immOpnd, primType);
2172 MOperator mOp = SelectRelationMop(operatorCode, kRegReg, is64Bits, false, false);
2173 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0, regOpnd));
2174 }
2175 }
2176 }
2177 }
2178
SelectBior(BinaryNode & node,Operand & opnd0,Operand & opnd1,const BaseNode & parent)2179 Operand *AArch64CGFunc::SelectBior(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent)
2180 {
2181 return SelectRelationOperator(kIOR, node, opnd0, opnd1, parent);
2182 }
2183
SelectBior(Operand & resOpnd,Operand & opnd0,Operand & opnd1,PrimType primType)2184 void AArch64CGFunc::SelectBior(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType)
2185 {
2186 SelectRelationOperator(kIOR, resOpnd, opnd0, opnd1, primType);
2187 }
2188
SelectMinOrMax(bool isMin,const BinaryNode & node,Operand & opnd0,Operand & opnd1,const BaseNode & parent)2189 Operand *AArch64CGFunc::SelectMinOrMax(bool isMin, const BinaryNode &node, Operand &opnd0, Operand &opnd1,
2190 const BaseNode &parent)
2191 {
2192 PrimType dtype = node.GetPrimType();
2193 bool isSigned = IsSignedInteger(dtype);
2194 uint32 dsize = GetPrimTypeBitSize(dtype);
2195 bool is64Bits = (dsize == k64BitSize);
2196 bool isFloat = IsPrimitiveFloat(dtype);
2197 /* promoted type */
2198 PrimType primType = isFloat ? dtype : (is64Bits ? (isSigned ? PTY_i64 : PTY_u64) : (isSigned ? PTY_i32 : PTY_u32));
2199 RegOperand &resOpnd = GetOrCreateResOperand(parent, primType);
2200 SelectMinOrMax(isMin, resOpnd, opnd0, opnd1, primType);
2201 return &resOpnd;
2202 }
2203
SelectMinOrMax(bool isMin,Operand & resOpnd,Operand & opnd0,Operand & opnd1,PrimType primType)2204 void AArch64CGFunc::SelectMinOrMax(bool isMin, Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType)
2205 {
2206 uint32 dsize = GetPrimTypeBitSize(primType);
2207 bool is64Bits = (dsize == k64BitSize);
2208 if (IsPrimitiveInteger(primType)) {
2209 RegOperand ®Opnd0 = LoadIntoRegister(opnd0, primType);
2210 Operand ®Opnd1 = LoadIntoRegister(opnd1, primType);
2211 SelectAArch64Cmp(regOpnd0, regOpnd1, true, dsize);
2212 Operand &newResOpnd = LoadIntoRegister(resOpnd, primType);
2213 if (isMin) {
2214 CondOperand &cc = IsSignedInteger(primType) ? GetCondOperand(CC_LT) : GetCondOperand(CC_LO);
2215 SelectAArch64Select(newResOpnd, regOpnd0, regOpnd1, cc, true, dsize);
2216 } else {
2217 CondOperand &cc = IsSignedInteger(primType) ? GetCondOperand(CC_GT) : GetCondOperand(CC_HI);
2218 SelectAArch64Select(newResOpnd, regOpnd0, regOpnd1, cc, true, dsize);
2219 }
2220 } else if (IsPrimitiveFloat(primType)) {
2221 RegOperand ®Opnd0 = LoadIntoRegister(opnd0, primType);
2222 RegOperand ®Opnd1 = LoadIntoRegister(opnd1, primType);
2223 SelectFMinFMax(resOpnd, regOpnd0, regOpnd1, is64Bits, isMin);
2224 } else {
2225 CHECK_FATAL(false, "NIY type max or min");
2226 }
2227 }
2228
SelectMin(BinaryNode & node,Operand & opnd0,Operand & opnd1,const BaseNode & parent)2229 Operand *AArch64CGFunc::SelectMin(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent)
2230 {
2231 return SelectMinOrMax(true, node, opnd0, opnd1, parent);
2232 }
2233
SelectMin(Operand & resOpnd,Operand & opnd0,Operand & opnd1,PrimType primType)2234 void AArch64CGFunc::SelectMin(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType)
2235 {
2236 SelectMinOrMax(true, resOpnd, opnd0, opnd1, primType);
2237 }
2238
SelectMax(BinaryNode & node,Operand & opnd0,Operand & opnd1,const BaseNode & parent)2239 Operand *AArch64CGFunc::SelectMax(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent)
2240 {
2241 return SelectMinOrMax(false, node, opnd0, opnd1, parent);
2242 }
2243
SelectMax(Operand & resOpnd,Operand & opnd0,Operand & opnd1,PrimType primType)2244 void AArch64CGFunc::SelectMax(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType)
2245 {
2246 SelectMinOrMax(false, resOpnd, opnd0, opnd1, primType);
2247 }
2248
SelectFMinFMax(Operand & resOpnd,Operand & opnd0,Operand & opnd1,bool is64Bits,bool isMin)2249 void AArch64CGFunc::SelectFMinFMax(Operand &resOpnd, Operand &opnd0, Operand &opnd1, bool is64Bits, bool isMin)
2250 {
2251 uint32 mOpCode = isMin ? (is64Bits ? MOP_xfminrrr : MOP_wfminrrr) : (is64Bits ? MOP_xfmaxrrr : MOP_wfmaxrrr);
2252 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, opnd0, opnd1));
2253 }
2254
SelectBxor(BinaryNode & node,Operand & opnd0,Operand & opnd1,const BaseNode & parent)2255 Operand *AArch64CGFunc::SelectBxor(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent)
2256 {
2257 return SelectRelationOperator(kEOR, node, opnd0, opnd1, parent);
2258 }
2259
SelectBxor(Operand & resOpnd,Operand & opnd0,Operand & opnd1,PrimType primType)2260 void AArch64CGFunc::SelectBxor(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType)
2261 {
2262 SelectRelationOperator(kEOR, resOpnd, opnd0, opnd1, primType);
2263 }
2264
SelectShift(BinaryNode & node,Operand & opnd0,Operand & opnd1,const BaseNode & parent)2265 Operand *AArch64CGFunc::SelectShift(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent)
2266 {
2267 PrimType dtype = node.GetPrimType();
2268 bool isSigned = IsSignedInteger(dtype);
2269 uint32 dsize = GetPrimTypeBitSize(dtype);
2270 bool is64Bits = (dsize == k64BitSize);
2271 bool isFloat = IsPrimitiveFloat(dtype);
2272 RegOperand *resOpnd = nullptr;
2273 Opcode opcode = node.GetOpCode();
2274
2275 PrimType primType =
2276 isFloat ? dtype : (is64Bits ? (isSigned ? PTY_i64 : PTY_u64) : (isSigned ? PTY_i32 : PTY_u32));
2277 resOpnd = &GetOrCreateResOperand(parent, primType);
2278 ShiftDirection direct = (opcode == OP_lshr) ? kShiftLright : ((opcode == OP_ashr) ? kShiftAright : kShiftLeft);
2279 SelectShift(*resOpnd, opnd0, opnd1, direct, primType);
2280
2281 if (dtype == PTY_i16) {
2282 MOperator exOp = is64Bits ? MOP_xsxth64 : MOP_xsxth32;
2283 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(exOp, *resOpnd, *resOpnd));
2284 } else if (dtype == PTY_i8) {
2285 MOperator exOp = is64Bits ? MOP_xsxtb64 : MOP_xsxtb32;
2286 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(exOp, *resOpnd, *resOpnd));
2287 }
2288 return resOpnd;
2289 }
2290
SelectBxorShift(Operand & resOpnd,Operand * opnd0,Operand * opnd1,Operand & opnd2,PrimType primType)2291 void AArch64CGFunc::SelectBxorShift(Operand &resOpnd, Operand *opnd0, Operand *opnd1, Operand &opnd2, PrimType primType)
2292 {
2293 opnd0 = &LoadIntoRegister(*opnd0, primType);
2294 opnd1 = &LoadIntoRegister(*opnd1, primType);
2295 uint32 dsize = GetPrimTypeBitSize(primType);
2296 bool is64Bits = (dsize == k64BitSize);
2297 MOperator mopBxor = is64Bits ? MOP_xeorrrrs : MOP_weorrrrs;
2298 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopBxor, resOpnd, *opnd0, *opnd1, opnd2));
2299 }
2300
SelectShift(Operand & resOpnd,Operand & opnd0,Operand & opnd1,ShiftDirection direct,PrimType primType)2301 void AArch64CGFunc::SelectShift(Operand &resOpnd, Operand &opnd0, Operand &opnd1, ShiftDirection direct,
2302 PrimType primType)
2303 {
2304 Operand::OperandType opnd1Type = opnd1.GetKind();
2305 uint32 dsize = GetPrimTypeBitSize(primType);
2306 bool is64Bits = (dsize == k64BitSize);
2307 Operand *firstOpnd = &LoadIntoRegister(opnd0, primType);
2308
2309 MOperator mopShift;
2310 if ((opnd1Type == Operand::kOpdImmediate) || (opnd1Type == Operand::kOpdOffset)) {
2311 ImmOperand *immOpnd1 = static_cast<ImmOperand *>(&opnd1);
2312 const int64 kVal = immOpnd1->GetValue();
2313 const uint32 kShiftamt = is64Bits ? kHighestBitOf64Bits : kHighestBitOf32Bits;
2314 if (kVal == 0) {
2315 SelectCopy(resOpnd, primType, *firstOpnd, primType);
2316 return;
2317 }
2318 /* e.g. a >> -1 */
2319 if ((kVal < 0) || (kVal > kShiftamt)) {
2320 SelectShift(resOpnd, *firstOpnd, SelectCopy(opnd1, primType, primType), direct, primType);
2321 return;
2322 }
2323 switch (direct) {
2324 case kShiftLeft:
2325 mopShift = is64Bits ? MOP_xlslrri6 : MOP_wlslrri5;
2326 break;
2327 case kShiftAright:
2328 mopShift = is64Bits ? MOP_xasrrri6 : MOP_wasrrri5;
2329 break;
2330 case kShiftLright:
2331 mopShift = is64Bits ? MOP_xlsrrri6 : MOP_wlsrrri5;
2332 break;
2333 }
2334 } else if (opnd1Type != Operand::kOpdRegister) {
2335 SelectShift(resOpnd, *firstOpnd, SelectCopy(opnd1, primType, primType), direct, primType);
2336 return;
2337 } else {
2338 switch (direct) {
2339 case kShiftLeft:
2340 mopShift = is64Bits ? MOP_xlslrrr : MOP_wlslrrr;
2341 break;
2342 case kShiftAright:
2343 mopShift = is64Bits ? MOP_xasrrrr : MOP_wasrrrr;
2344 break;
2345 case kShiftLright:
2346 mopShift = is64Bits ? MOP_xlsrrrr : MOP_wlsrrrr;
2347 break;
2348 }
2349 }
2350
2351 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopShift, resOpnd, *firstOpnd, opnd1));
2352 }
2353
SelectAbsSub(Insn & lastInsn,const UnaryNode & node,Operand & newOpnd0)2354 Operand *AArch64CGFunc::SelectAbsSub(Insn &lastInsn, const UnaryNode &node, Operand &newOpnd0)
2355 {
2356 PrimType dtyp = node.GetPrimType();
2357 bool is64Bits = (GetPrimTypeBitSize(dtyp) == k64BitSize);
2358 /* promoted type */
2359 PrimType primType = is64Bits ? (PTY_i64) : (PTY_i32);
2360 RegOperand &resOpnd = CreateRegisterOperandOfType(primType);
2361 uint32 mopCsneg = is64Bits ? MOP_xcnegrrrc : MOP_wcnegrrrc;
2362 /* ABS requires the operand be interpreted as a signed integer */
2363 CondOperand &condOpnd = GetCondOperand(CC_MI);
2364 MOperator newMop = AArch64isa::GetMopSub2Subs(lastInsn);
2365 Operand &rflag = GetOrCreateRflag();
2366 std::vector<Operand *> opndVec;
2367 opndVec.push_back(&rflag);
2368 for (uint32 i = 0; i < lastInsn.GetOperandSize(); i++) {
2369 opndVec.push_back(&lastInsn.GetOperand(i));
2370 }
2371 Insn *subsInsn = &GetInsnBuilder()->BuildInsn(newMop, opndVec);
2372 GetCurBB()->ReplaceInsn(lastInsn, *subsInsn);
2373 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopCsneg, resOpnd, newOpnd0, condOpnd, rflag));
2374 return &resOpnd;
2375 }
2376
SelectAbs(UnaryNode & node,Operand & opnd0)2377 Operand *AArch64CGFunc::SelectAbs(UnaryNode &node, Operand &opnd0)
2378 {
2379 PrimType dtyp = node.GetPrimType();
2380 if (IsPrimitiveFloat(dtyp)) {
2381 CHECK_FATAL(GetPrimTypeBitSize(dtyp) >= k32BitSize, "We don't support hanf-word FP operands yet");
2382 bool is64Bits = (GetPrimTypeBitSize(dtyp) == k64BitSize);
2383 Operand &newOpnd0 = LoadIntoRegister(opnd0, dtyp);
2384 RegOperand &resOpnd = CreateRegisterOperandOfType(dtyp);
2385 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(is64Bits ? MOP_dabsrr : MOP_sabsrr, resOpnd, newOpnd0));
2386 return &resOpnd;
2387 } else {
2388 bool is64Bits = (GetPrimTypeBitSize(dtyp) == k64BitSize);
2389 /* promoted type */
2390 PrimType primType = is64Bits ? (PTY_i64) : (PTY_i32);
2391 Operand &newOpnd0 = LoadIntoRegister(opnd0, primType);
2392 Insn *lastInsn = GetCurBB()->GetLastMachineInsn();
2393 if (lastInsn != nullptr && AArch64isa::IsSub(*lastInsn)) {
2394 Operand &dest = lastInsn->GetOperand(kInsnFirstOpnd);
2395 Operand &opd1 = lastInsn->GetOperand(kInsnSecondOpnd);
2396 Operand &opd2 = lastInsn->GetOperand(kInsnThirdOpnd);
2397 regno_t absReg = static_cast<RegOperand &>(newOpnd0).GetRegisterNumber();
2398 if ((dest.IsRegister() && static_cast<RegOperand &>(dest).GetRegisterNumber() == absReg) ||
2399 (opd1.IsRegister() && static_cast<RegOperand &>(opd1).GetRegisterNumber() == absReg) ||
2400 (opd2.IsRegister() && static_cast<RegOperand &>(opd2).GetRegisterNumber() == absReg)) {
2401 return SelectAbsSub(*lastInsn, node, newOpnd0);
2402 }
2403 }
2404 RegOperand &resOpnd = CreateRegisterOperandOfType(primType);
2405 SelectAArch64Cmp(newOpnd0, CreateImmOperand(0, is64Bits ? PTY_u64 : PTY_u32, false), true,
2406 GetPrimTypeBitSize(dtyp));
2407 uint32 mopCsneg = is64Bits ? MOP_xcsnegrrrc : MOP_wcsnegrrrc;
2408 /* ABS requires the operand be interpreted as a signed integer */
2409 CondOperand &condOpnd = GetCondOperand(CC_GE);
2410 Operand &rflag = GetOrCreateRflag();
2411 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopCsneg, resOpnd, newOpnd0, newOpnd0, condOpnd, rflag));
2412 return &resOpnd;
2413 }
2414 }
2415
SelectBnot(UnaryNode & node,Operand & opnd0,const BaseNode & parent)2416 Operand *AArch64CGFunc::SelectBnot(UnaryNode &node, Operand &opnd0, const BaseNode &parent)
2417 {
2418 PrimType dtype = node.GetPrimType();
2419 DEBUG_ASSERT(IsPrimitiveInteger(dtype), "bnot expect integer or NYI");
2420 uint32 bitSize = GetPrimTypeBitSize(dtype);
2421 bool is64Bits = (bitSize == k64BitSize);
2422 bool isSigned = IsSignedInteger(dtype);
2423 RegOperand *resOpnd = nullptr;
2424 PrimType primType = is64Bits ? (isSigned ? PTY_i64 : PTY_u64) : (isSigned ? PTY_i32 : PTY_u32);
2425 resOpnd = &GetOrCreateResOperand(parent, primType);
2426
2427 Operand &newOpnd0 = LoadIntoRegister(opnd0, primType);
2428
2429 uint32 mopBnot = is64Bits ? MOP_xnotrr : MOP_wnotrr;
2430 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopBnot, *resOpnd, newOpnd0));
2431 /* generate and resOpnd, resOpnd, 0x1/0xFF/0xFFFF for PTY_u1/PTY_u8/PTY_u16 */
2432 int64 immValue = 0;
2433 if (bitSize == k1BitSize) {
2434 immValue = 1;
2435 } else if (bitSize == k8BitSize) {
2436 immValue = 0xFF;
2437 } else if (bitSize == k16BitSize) {
2438 immValue = 0xFFFF;
2439 }
2440 if (immValue != 0) {
2441 ImmOperand &imm = CreateImmOperand(PTY_u32, immValue);
2442 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wandrri12, *resOpnd, *resOpnd, imm));
2443 }
2444 return resOpnd;
2445 }
2446
SelectRegularBitFieldLoad(ExtractbitsNode & node,const BaseNode & parent)2447 Operand *AArch64CGFunc::SelectRegularBitFieldLoad(ExtractbitsNode &node, const BaseNode &parent)
2448 {
2449 PrimType dtype = node.GetPrimType();
2450 bool isSigned = IsSignedInteger(dtype);
2451 uint8 bitOffset = node.GetBitsOffset();
2452 uint8 bitSize = node.GetBitsSize();
2453 bool is64Bits = (GetPrimTypeBitSize(dtype) == k64BitSize);
2454 CHECK_FATAL(!is64Bits, "dest opnd should not be 64bit");
2455 PrimType destType = GetIntegerPrimTypeBySizeAndSign(bitSize, isSigned);
2456 Operand *result =
2457 SelectIread(parent, *static_cast<IreadNode *>(node.Opnd(0)), static_cast<int>(bitOffset / k8BitSize), destType);
2458 return result;
2459 }
2460
SelectExtractbits(ExtractbitsNode & node,Operand & srcOpnd,const BaseNode & parent)2461 Operand *AArch64CGFunc::SelectExtractbits(ExtractbitsNode &node, Operand &srcOpnd, const BaseNode &parent)
2462 {
2463 uint8 bitOffset = node.GetBitsOffset();
2464 uint8 bitSize = node.GetBitsSize();
2465 PrimType dtype = node.GetPrimType();
2466 RegOperand &resOpnd = GetOrCreateResOperand(parent, dtype);
2467 bool isSigned =
2468 (node.GetOpCode() == OP_sext) ? true : (node.GetOpCode() == OP_zext) ? false : IsSignedInteger(dtype);
2469 bool is64Bits = (GetPrimTypeBitSize(dtype) == k64BitSize);
2470 uint32 immWidth = is64Bits ? kMaxImmVal13Bits : kMaxImmVal12Bits;
2471 Operand &opnd0 = LoadIntoRegister(srcOpnd, dtype);
2472 if (bitOffset == 0) {
2473 if (!isSigned && (bitSize < immWidth)) {
2474 SelectBand(resOpnd, opnd0,
2475 CreateImmOperand(static_cast<int64>((static_cast<uint64>(1) << bitSize) - 1), immWidth, false),
2476 dtype);
2477 return &resOpnd;
2478 } else {
2479 MOperator mOp = MOP_undef;
2480 if (bitSize == k8BitSize) {
2481 mOp = is64Bits ? (isSigned ? MOP_xsxtb64 : MOP_undef)
2482 : (isSigned ? MOP_xsxtb32 : (opnd0.GetSize() == k32BitSize ? MOP_xuxtb32 : MOP_undef));
2483 } else if (bitSize == k16BitSize) {
2484 mOp = is64Bits ? (isSigned ? MOP_xsxth64 : MOP_undef)
2485 : (isSigned ? MOP_xsxth32 : (opnd0.GetSize() == k32BitSize ? MOP_xuxth32 : MOP_undef));
2486 } else if (bitSize == k32BitSize) {
2487 mOp = is64Bits ? (isSigned ? MOP_xsxtw64 : MOP_xuxtw64) : MOP_wmovrr;
2488 }
2489 if (mOp != MOP_undef) {
2490 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0));
2491 return &resOpnd;
2492 }
2493 }
2494 }
2495 uint32 mopBfx =
2496 is64Bits ? (isSigned ? MOP_xsbfxrri6i6 : MOP_xubfxrri6i6) : (isSigned ? MOP_wsbfxrri5i5 : MOP_wubfxrri5i5);
2497 ImmOperand &immOpnd1 = CreateImmOperand(bitOffset, k8BitSize, false);
2498 ImmOperand &immOpnd2 = CreateImmOperand(bitSize, k8BitSize, false);
2499 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopBfx, resOpnd, opnd0, immOpnd1, immOpnd2));
2500 return &resOpnd;
2501 }
2502
SelectLnot(UnaryNode & node,Operand & srcOpnd,const BaseNode & parent)2503 Operand *AArch64CGFunc::SelectLnot(UnaryNode &node, Operand &srcOpnd, const BaseNode &parent)
2504 {
2505 PrimType dtype = node.GetPrimType();
2506 RegOperand &resOpnd = GetOrCreateResOperand(parent, dtype);
2507 bool is64Bits = (GetPrimTypeBitSize(dtype) == k64BitSize);
2508 Operand &opnd0 = LoadIntoRegister(srcOpnd, dtype);
2509 SelectAArch64Cmp(opnd0, CreateImmOperand(0, is64Bits ? PTY_u64 : PTY_u32, false), true, GetPrimTypeBitSize(dtype));
2510 SelectAArch64CSet(resOpnd, GetCondOperand(CC_EQ), is64Bits);
2511 return &resOpnd;
2512 }
2513
SelectNeg(UnaryNode & node,Operand & opnd0,const BaseNode & parent)2514 Operand *AArch64CGFunc::SelectNeg(UnaryNode &node, Operand &opnd0, const BaseNode &parent)
2515 {
2516 PrimType dtype = node.GetPrimType();
2517 bool is64Bits = (GetPrimTypeBitSize(dtype) == k64BitSize);
2518 RegOperand *resOpnd = nullptr;
2519 PrimType primType;
2520 if (IsPrimitiveFloat(dtype)) {
2521 primType = dtype;
2522 } else {
2523 primType = is64Bits ? (PTY_i64) : (PTY_i32); /* promoted type */
2524 }
2525 resOpnd = &GetOrCreateResOperand(parent, primType);
2526 SelectNeg(*resOpnd, opnd0, primType);
2527 return resOpnd;
2528 }
2529
SelectNeg(Operand & dest,Operand & srcOpnd,PrimType primType)2530 void AArch64CGFunc::SelectNeg(Operand &dest, Operand &srcOpnd, PrimType primType)
2531 {
2532 Operand &opnd0 = LoadIntoRegister(srcOpnd, primType);
2533 bool is64Bits = (GetPrimTypeBitSize(primType) == k64BitSize);
2534 MOperator mOp;
2535 if (IsPrimitiveFloat(primType)) {
2536 mOp = is64Bits ? MOP_xfnegrr : MOP_wfnegrr;
2537 } else {
2538 mOp = is64Bits ? MOP_xinegrr : MOP_winegrr;
2539 }
2540 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, dest, opnd0));
2541 }
2542
SelectMvn(Operand & dest,Operand & src,PrimType primType)2543 void AArch64CGFunc::SelectMvn(Operand &dest, Operand &src, PrimType primType)
2544 {
2545 Operand &opnd0 = LoadIntoRegister(src, primType);
2546 bool is64Bits = (GetPrimTypeBitSize(primType) == k64BitSize);
2547 MOperator mOp;
2548 DEBUG_ASSERT(!IsPrimitiveFloat(primType), "Instruction 'mvn' do not have float version.");
2549 mOp = is64Bits ? MOP_xnotrr : MOP_wnotrr;
2550 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, dest, opnd0));
2551 }
2552
SelectSqrt(UnaryNode & node,Operand & src,const BaseNode & parent)2553 Operand *AArch64CGFunc::SelectSqrt(UnaryNode &node, Operand &src, const BaseNode &parent)
2554 {
2555 /*
2556 * gcc generates code like below for better accurate
2557 * fsqrts s15, s0
2558 * fcmps s15, s15
2559 * fmstat
2560 * beq .L4
2561 * push {r3, lr}
2562 * bl sqrtf
2563 * pop {r3, pc}
2564 * .L4:
2565 * fcpys s0, s15
2566 * bx lr
2567 */
2568 PrimType dtype = node.GetPrimType();
2569 if (!IsPrimitiveFloat(dtype)) {
2570 DEBUG_ASSERT(false, "should be float type");
2571 return nullptr;
2572 }
2573 bool is64Bits = (GetPrimTypeBitSize(dtype) == k64BitSize);
2574 Operand &opnd0 = LoadIntoRegister(src, dtype);
2575 RegOperand &resOpnd = GetOrCreateResOperand(parent, dtype);
2576 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(is64Bits ? MOP_vsqrtd : MOP_vsqrts, resOpnd, opnd0));
2577 return &resOpnd;
2578 }
2579
SelectCvtFloat2Int(Operand & resOpnd,Operand & srcOpnd,PrimType itype,PrimType ftype)2580 void AArch64CGFunc::SelectCvtFloat2Int(Operand &resOpnd, Operand &srcOpnd, PrimType itype, PrimType ftype)
2581 {
2582 bool is64BitsFloat = (ftype == PTY_f64);
2583 MOperator mOp = 0;
2584
2585 DEBUG_ASSERT(((ftype == PTY_f64) || (ftype == PTY_f32)), "wrong from type");
2586 Operand &opnd0 = LoadIntoRegister(srcOpnd, ftype);
2587 switch (itype) {
2588 case PTY_i32:
2589 mOp = !is64BitsFloat ? MOP_vcvtrf : MOP_vcvtrd;
2590 break;
2591 case PTY_u32:
2592 mOp = !is64BitsFloat ? MOP_vcvturf : MOP_vcvturd;
2593 break;
2594 case PTY_i64:
2595 mOp = !is64BitsFloat ? MOP_xvcvtrf : MOP_xvcvtrd;
2596 break;
2597 case PTY_u64:
2598 case PTY_a64:
2599 mOp = !is64BitsFloat ? MOP_xvcvturf : MOP_xvcvturd;
2600 break;
2601 default:
2602 CHECK_FATAL(false, "unexpected type");
2603 }
2604 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0));
2605 }
2606
SelectCvtInt2Float(Operand & resOpnd,Operand & origOpnd0,PrimType toType,PrimType fromType)2607 void AArch64CGFunc::SelectCvtInt2Float(Operand &resOpnd, Operand &origOpnd0, PrimType toType, PrimType fromType)
2608 {
2609 DEBUG_ASSERT((toType == PTY_f32) || (toType == PTY_f64), "unexpected type");
2610 bool is64BitsFloat = (toType == PTY_f64);
2611 MOperator mOp = 0;
2612 uint32 fsize = GetPrimTypeBitSize(fromType);
2613
2614 PrimType itype = (GetPrimTypeBitSize(fromType) == k64BitSize) ? (IsSignedInteger(fromType) ? PTY_i64 : PTY_u64)
2615 : (IsSignedInteger(fromType) ? PTY_i32 : PTY_u32);
2616
2617 Operand *opnd0 = &LoadIntoRegister(origOpnd0, itype);
2618
2619 /* need extension before cvt */
2620 DEBUG_ASSERT(opnd0->IsRegister(), "opnd should be a register operand");
2621 Operand *srcOpnd = opnd0;
2622 if (IsSignedInteger(fromType) && (fsize < k32BitSize)) {
2623 srcOpnd = &CreateRegisterOperandOfType(itype);
2624 mOp = (fsize == k8BitSize) ? MOP_xsxtb32 : MOP_xsxth32;
2625 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, *srcOpnd, *opnd0));
2626 }
2627
2628 switch (itype) {
2629 case PTY_i32:
2630 mOp = !is64BitsFloat ? MOP_vcvtfr : MOP_vcvtdr;
2631 break;
2632 case PTY_u32:
2633 mOp = !is64BitsFloat ? MOP_vcvtufr : MOP_vcvtudr;
2634 break;
2635 case PTY_i64:
2636 mOp = !is64BitsFloat ? MOP_xvcvtfr : MOP_xvcvtdr;
2637 break;
2638 case PTY_u64:
2639 mOp = !is64BitsFloat ? MOP_xvcvtufr : MOP_xvcvtudr;
2640 break;
2641 default:
2642 CHECK_FATAL(false, "unexpected type");
2643 }
2644 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, *srcOpnd));
2645 }
2646
SelectRoundOperator(RoundType roundType,const TypeCvtNode & node,Operand & opnd0,const BaseNode & parent)2647 Operand *AArch64CGFunc::SelectRoundOperator(RoundType roundType, const TypeCvtNode &node, Operand &opnd0,
2648 const BaseNode &parent)
2649 {
2650 PrimType itype = node.GetPrimType();
2651 PrimType ftype = node.FromType();
2652 DEBUG_ASSERT(((ftype == PTY_f64) || (ftype == PTY_f32)), "wrong float type");
2653 bool is64Bits = (ftype == PTY_f64);
2654 bool isFloat = (ftype == PTY_f64) || (ftype == PTY_f32);
2655 RegOperand &resOpnd = GetOrCreateResOperand(parent, itype);
2656 RegOperand ®Opnd0 = LoadIntoRegister(opnd0, ftype);
2657 MOperator mop = MOP_undef;
2658 if (roundType == kCeil) {
2659 if (isFloat) {
2660 mop = is64Bits ? MOP_dfrintprr : MOP_sfrintprr;
2661 } else {
2662 mop = is64Bits ? MOP_xvcvtps : MOP_vcvtps;
2663 }
2664 } else if (roundType == kFloor) {
2665 if (isFloat) {
2666 mop = is64Bits ? MOP_dfrintmrr : MOP_sfrintmrr;
2667 } else {
2668 mop = is64Bits ? MOP_xvcvtms : MOP_vcvtms;
2669 }
2670 } else if (roundType == kTrunc) {
2671 if (isFloat) {
2672 mop = is64Bits ? MOP_dfrintzrr : MOP_sfrintzrr;
2673 } else {
2674 CHECK_FATAL(false, "not support here!");
2675 }
2676 } else {
2677 CHECK_FATAL(!isFloat, "not support float here!");
2678 mop = is64Bits ? MOP_xvcvtas : MOP_vcvtas;
2679 }
2680 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mop, resOpnd, regOpnd0));
2681 return &resOpnd;
2682 }
2683
SelectCeil(TypeCvtNode & node,Operand & opnd0,const BaseNode & parent)2684 Operand *AArch64CGFunc::SelectCeil(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent)
2685 {
2686 return SelectRoundOperator(kCeil, node, opnd0, parent);
2687 }
2688
2689 /* float to int floor */
SelectFloor(TypeCvtNode & node,Operand & opnd0,const BaseNode & parent)2690 Operand *AArch64CGFunc::SelectFloor(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent)
2691 {
2692 return SelectRoundOperator(kFloor, node, opnd0, parent);
2693 }
2694
LIsPrimitivePointer(PrimType ptype)2695 static bool LIsPrimitivePointer(PrimType ptype)
2696 {
2697 return ((ptype >= PTY_ptr) && (ptype <= PTY_a64));
2698 }
2699
SelectRetype(TypeCvtNode & node,Operand & opnd0)2700 Operand *AArch64CGFunc::SelectRetype(TypeCvtNode &node, Operand &opnd0)
2701 {
2702 PrimType fromType = node.Opnd(0)->GetPrimType();
2703 PrimType toType = node.GetPrimType();
2704 DEBUG_ASSERT(GetPrimTypeSize(fromType) == GetPrimTypeSize(toType), "retype bit widith doesn' match");
2705 if (LIsPrimitivePointer(fromType) && LIsPrimitivePointer(toType)) {
2706 return &LoadIntoRegister(opnd0, toType);
2707 }
2708 // if source operand is in memory,
2709 // simply read it as a value of 'toType 'into the dest operand and return
2710 if (opnd0.IsMemoryAccessOperand()) {
2711 return &SelectCopy(opnd0, toType, toType);
2712 }
2713
2714 bool isFromInt = IsPrimitiveInteger(fromType);
2715 bool is64Bits = GetPrimTypeBitSize(fromType) == k64BitSize;
2716 bool isImm = false;
2717 Operand *newOpnd0 = &opnd0;
2718 if (opnd0.IsImmediate()) {
2719 // according to aarch64 encoding format, convert int to float expression
2720 ImmOperand *imm = static_cast<ImmOperand *>(&opnd0);
2721 uint64 val = static_cast<uint64>(imm->GetValue());
2722 uint64 canRepreset = is64Bits ? (val & 0xffffffffffff) : (val & 0x7ffff);
2723 uint32 val1 = is64Bits ? (val >> 61) & 0x3 : (val >> 29) & 0x3;
2724 uint32 val2 = is64Bits ? (val >> 54) & 0xff : (val >> 25) & 0x1f;
2725 bool isSame = is64Bits ? ((val2 == 0) || (val2 == 0xff)) : ((val2 == 0) || (val2 == 0x1f));
2726 canRepreset = (canRepreset == 0) && ((val1 & 0x1) ^ ((val1 & 0x2) >> 1)) && isSame;
2727 if (IsPrimitiveInteger(fromType) && IsPrimitiveFloat(toType) && canRepreset) {
2728 uint64 temp1 = is64Bits ? (val >> 63) << 7 : (val >> 31) << 7;
2729 uint64 temp2 = is64Bits ? val >> 48 : val >> 19;
2730 int64 imm8 = (temp2 & 0x7f) | temp1;
2731 newOpnd0 = &CreateImmOperand(imm8, k8BitSize, false, kNotVary, true);
2732 isImm = true;
2733 }
2734 }
2735 if (!isImm) {
2736 bool isSigned = IsSignedInteger(fromType);
2737 PrimType itype = isFromInt ? (is64Bits ? (isSigned ? PTY_i64 : PTY_u64) : (isSigned ? PTY_i32 : PTY_u32))
2738 : (is64Bits ? PTY_f64 : PTY_f32);
2739 newOpnd0 = &LoadIntoRegister(opnd0, itype);
2740 }
2741 if ((IsPrimitiveFloat(fromType) && IsPrimitiveInteger(toType)) ||
2742 (IsPrimitiveFloat(toType) && IsPrimitiveInteger(fromType))) {
2743 MOperator mopFmov = isImm ? (is64Bits ? MOP_xdfmovri : MOP_wsfmovri)
2744 : (isFromInt ? (is64Bits ? MOP_xvmovdr : MOP_xvmovsr)
2745 : (is64Bits ? MOP_xvmovrd : MOP_xvmovrs));
2746 RegOperand *resOpnd = &CreateRegisterOperandOfType(toType);
2747 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopFmov, *resOpnd, *newOpnd0));
2748 return resOpnd;
2749 }
2750 return newOpnd0;
2751 }
2752
SelectCvtFloat2Float(Operand & resOpnd,Operand & srcOpnd,PrimType fromType,PrimType toType)2753 void AArch64CGFunc::SelectCvtFloat2Float(Operand &resOpnd, Operand &srcOpnd, PrimType fromType, PrimType toType)
2754 {
2755 Operand &opnd0 = LoadIntoRegister(srcOpnd, fromType);
2756 MOperator mOp = 0;
2757 switch (toType) {
2758 case PTY_f32: {
2759 CHECK_FATAL(fromType == PTY_f64, "unexpected cvt from type");
2760 mOp = MOP_xvcvtfd;
2761 break;
2762 }
2763 case PTY_f64: {
2764 CHECK_FATAL(fromType == PTY_f32, "unexpected cvt from type");
2765 mOp = MOP_xvcvtdf;
2766 break;
2767 }
2768 default:
2769 CHECK_FATAL(false, "unexpected cvt to type");
2770 }
2771
2772 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0));
2773 }
2774
2775 /*
2776 * This should be regarded only as a reference.
2777 *
2778 * C11 specification.
2779 * 6.3.1.3 Signed and unsigned integers
2780 * 1 When a value with integer type is converted to another integer
2781 * type other than _Bool, if the value can be represented by the
2782 * new type, it is unchanged.
2783 * 2 Otherwise, if the new type is unsigned, the value is converted
2784 * by repeatedly adding or subtracting one more than the maximum
2785 * value that can be represented in the new type until the value
2786 * is in the range of the new type.60)
2787 * 3 Otherwise, the new type is signed and the value cannot be
2788 * represented in it; either the result is implementation-defined
2789 * or an implementation-defined signal is raised.
2790 */
SelectCvtInt2Int(const BaseNode * parent,Operand * & resOpnd,Operand * opnd0,PrimType fromType,PrimType toType)2791 void AArch64CGFunc::SelectCvtInt2Int(const BaseNode *parent, Operand *&resOpnd, Operand *opnd0, PrimType fromType,
2792 PrimType toType)
2793 {
2794 uint32 fsize = GetPrimTypeBitSize(fromType);
2795 uint32 tsize = GetPrimTypeBitSize(toType);
2796 bool isExpand = tsize > fsize;
2797 bool is64Bit = (tsize == k64BitSize);
2798 if ((parent != nullptr) && opnd0->IsIntImmediate() &&
2799 ((parent->GetOpCode() == OP_band) || (parent->GetOpCode() == OP_bior) || (parent->GetOpCode() == OP_bxor) ||
2800 (parent->GetOpCode() == OP_ashr) || (parent->GetOpCode() == OP_lshr) || (parent->GetOpCode() == OP_shl))) {
2801 ImmOperand *simm = static_cast<ImmOperand *>(opnd0);
2802 DEBUG_ASSERT(simm != nullptr, "simm is nullptr in AArch64CGFunc::SelectCvtInt2Int");
2803 bool isSign = false;
2804 int64 origValue = simm->GetValue();
2805 int64 newValue = origValue;
2806 int64 signValue = 0;
2807 if (!isExpand) {
2808 /* 64--->32 */
2809 if (fsize > tsize) {
2810 if (IsSignedInteger(toType)) {
2811 if (origValue < 0) {
2812 signValue = static_cast<int64>(0xFFFFFFFFFFFFFFFFLL & (1ULL << static_cast<uint32>(tsize)));
2813 }
2814 newValue = static_cast<int64>(
2815 (static_cast<uint64>(origValue) & ((1ULL << static_cast<uint32>(tsize)) - 1u)) |
2816 static_cast<uint64>(signValue));
2817 } else {
2818 newValue = static_cast<uint64>(origValue) & ((1ULL << static_cast<uint32>(tsize)) - 1u);
2819 }
2820 }
2821 }
2822 if (IsSignedInteger(toType)) {
2823 isSign = true;
2824 }
2825 resOpnd = &static_cast<Operand &>(CreateImmOperand(newValue, GetPrimTypeSize(toType) * kBitsPerByte, isSign));
2826 return;
2827 }
2828 if (isExpand) { /* Expansion */
2829 /* if cvt expr's parent is add,and,xor and some other,we can use the imm version */
2830 PrimType primType = ((fsize == k64BitSize) ? (IsSignedInteger(fromType) ? PTY_i64 : PTY_u64)
2831 : (IsSignedInteger(fromType) ? PTY_i32 : PTY_u32));
2832 opnd0 = &LoadIntoRegister(*opnd0, primType);
2833
2834 if (IsSignedInteger(fromType)) {
2835 DEBUG_ASSERT((is64Bit || (fsize == k8BitSize || fsize == k16BitSize)), "incorrect from size");
2836
2837 MOperator mOp =
2838 (is64Bit ? ((fsize == k8BitSize) ? MOP_xsxtb64 : ((fsize == k16BitSize) ? MOP_xsxth64 : MOP_xsxtw64))
2839 : ((fsize == k8BitSize) ? MOP_xsxtb32 : MOP_xsxth32));
2840 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, *resOpnd, *opnd0));
2841 } else {
2842 /* Unsigned */
2843 auto mOp =
2844 (is64Bit ? ((fsize == k8BitSize) ? MOP_xuxtb32 : ((fsize == k16BitSize) ? MOP_xuxth32 : MOP_xuxtw64))
2845 : ((fsize == k8BitSize) ? MOP_xuxtb32 : MOP_xuxth32));
2846 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, *resOpnd, LoadIntoRegister(*opnd0, fromType)));
2847 }
2848 } else { /* Same size or truncate */
2849 #ifdef CNV_OPTIMIZE
2850 /*
2851 * No code needed for aarch64 with same reg.
2852 * Just update regno.
2853 */
2854 RegOperand *reg = static_cast<RegOperand *>(resOpnd);
2855 reg->regNo = static_cast<RegOperand *>(opnd0)->regNo;
2856 #else
2857 /*
2858 * This is not really needed if opnd0 is result from a load.
2859 * Hopefully the FE will get rid of the redundant conversions for loads.
2860 */
2861 PrimType primType = ((fsize == k64BitSize) ? (IsSignedInteger(fromType) ? PTY_i64 : PTY_u64)
2862 : (IsSignedInteger(fromType) ? PTY_i32 : PTY_u32));
2863 opnd0 = &LoadIntoRegister(*opnd0, primType);
2864
2865 if (fsize > tsize) {
2866 if (tsize == k8BitSize) {
2867 MOperator mOp = IsSignedInteger(toType) ? MOP_xsxtb32 : MOP_xuxtb32;
2868 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, *resOpnd, *opnd0));
2869 } else if (tsize == k16BitSize) {
2870 MOperator mOp = IsSignedInteger(toType) ? MOP_xsxth32 : MOP_xuxth32;
2871 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, *resOpnd, *opnd0));
2872 } else {
2873 MOperator mOp = IsSignedInteger(toType) ? MOP_xsbfxrri6i6 : MOP_xubfxrri6i6;
2874 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, *resOpnd, *opnd0,
2875 CreateImmOperand(0, k8BitSize, false),
2876 CreateImmOperand(tsize, k8BitSize, false)));
2877 }
2878 } else {
2879 /* same size, so resOpnd can be set */
2880 if ((IsSignedInteger(fromType) == IsSignedInteger(toType)) ||
2881 (GetPrimTypeSize(toType) >= k4BitSize)) {
2882 resOpnd = opnd0;
2883 } else if (IsUnsignedInteger(toType)) {
2884 MOperator mop;
2885 switch (toType) {
2886 case PTY_u8:
2887 mop = MOP_xuxtb32;
2888 break;
2889 case PTY_u16:
2890 mop = MOP_xuxth32;
2891 break;
2892 default:
2893 CHECK_FATAL(0, "Unhandled unsigned convert");
2894 }
2895 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mop, *resOpnd, *opnd0));
2896 } else {
2897 /* signed target */
2898 uint32 size = GetPrimTypeSize(toType);
2899 MOperator mop;
2900 switch (toType) {
2901 case PTY_i8:
2902 mop = (size > k4BitSize) ? MOP_xsxtb64 : MOP_xsxtb32;
2903 break;
2904 case PTY_i16:
2905 mop = (size > k4BitSize) ? MOP_xsxth64 : MOP_xsxth32;
2906 break;
2907 default:
2908 CHECK_FATAL(0, "Unhandled unsigned convert");
2909 }
2910 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mop, *resOpnd, *opnd0));
2911 }
2912 }
2913 #endif
2914 }
2915 }
2916
SelectCvt(const BaseNode & parent,TypeCvtNode & node,Operand & opnd0)2917 Operand *AArch64CGFunc::SelectCvt(const BaseNode &parent, TypeCvtNode &node, Operand &opnd0)
2918 {
2919 PrimType fromType = node.FromType();
2920 PrimType toType = node.GetPrimType();
2921 if (fromType == toType) {
2922 return &opnd0; /* noop */
2923 }
2924 Operand *resOpnd = &GetOrCreateResOperand(parent, toType);
2925 if (IsPrimitiveFloat(toType) && IsPrimitiveInteger(fromType)) {
2926 SelectCvtInt2Float(*resOpnd, opnd0, toType, fromType);
2927 } else if (IsPrimitiveFloat(fromType) && IsPrimitiveInteger(toType)) {
2928 SelectCvtFloat2Int(*resOpnd, opnd0, toType, fromType);
2929 } else if (IsPrimitiveInteger(fromType) && IsPrimitiveInteger(toType)) {
2930 SelectCvtInt2Int(&parent, resOpnd, &opnd0, fromType, toType);
2931 } else { /* both are float type */
2932 SelectCvtFloat2Float(*resOpnd, opnd0, fromType, toType);
2933 }
2934 return resOpnd;
2935 }
2936
SelectTrunc(TypeCvtNode & node,Operand & opnd0,const BaseNode & parent)2937 Operand *AArch64CGFunc::SelectTrunc(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent)
2938 {
2939 PrimType ftype = node.FromType();
2940 PrimType nodeType = node.GetPrimType();
2941 bool is64Bits = (GetPrimTypeBitSize(node.GetPrimType()) == k64BitSize);
2942 bool isFloat = (IsPrimitiveFloat(nodeType));
2943 if (isFloat) {
2944 CHECK_FATAL(nodeType == PTY_f32 || nodeType == PTY_f64, "only support f32, f64");
2945 return SelectRoundOperator(kTrunc, node, opnd0, parent);
2946 }
2947 PrimType itype = (is64Bits) ? (IsSignedInteger(node.GetPrimType()) ? PTY_i64 : PTY_u64)
2948 : (IsSignedInteger(node.GetPrimType()) ? PTY_i32 : PTY_u32); /* promoted type */
2949 RegOperand &resOpnd = GetOrCreateResOperand(parent, itype);
2950 SelectCvtFloat2Int(resOpnd, opnd0, itype, ftype);
2951 return &resOpnd;
2952 }
2953
2954 /*
2955 * syntax: select <prim-type> (<opnd0>, <opnd1>, <opnd2>)
2956 * <opnd0> must be of integer type.
2957 * <opnd1> and <opnd2> must be of the type given by <prim-type>.
2958 * If <opnd0> is not 0, return <opnd1>. Otherwise, return <opnd2>.
2959 */
SelectAArch64Select(Operand & dest,Operand & o0,Operand & o1,CondOperand & cond,bool isIntType,uint32 dsize)2960 void AArch64CGFunc::SelectAArch64Select(Operand &dest, Operand &o0, Operand &o1, CondOperand &cond, bool isIntType,
2961 uint32 dsize)
2962 {
2963 uint32 mOpCode =
2964 isIntType ? ((dsize == k64BitSize) ? MOP_xcselrrrc : MOP_wcselrrrc)
2965 : ((dsize == k64BitSize) ? MOP_dcselrrrc : ((dsize == k32BitSize) ? MOP_scselrrrc : MOP_hcselrrrc));
2966 Operand &rflag = GetOrCreateRflag();
2967 if (o1.IsImmediate()) {
2968 uint32 movOp = (dsize == k64BitSize ? MOP_xmovri64 : MOP_wmovri32);
2969 RegOperand &movDest =
2970 CreateVirtualRegisterOperand(NewVReg(kRegTyInt, (dsize == k64BitSize) ? k8ByteSize : k4ByteSize));
2971 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(movOp, movDest, o1));
2972 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, dest, o0, movDest, cond, rflag));
2973 return;
2974 }
2975 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, dest, o0, o1, cond, rflag));
2976 }
2977
SelectRangeGoto(RangeGotoNode & rangeGotoNode,Operand & srcOpnd)2978 void AArch64CGFunc::SelectRangeGoto(RangeGotoNode &rangeGotoNode, Operand &srcOpnd)
2979 {
2980 const SmallCaseVector &switchTable = rangeGotoNode.GetRangeGotoTable();
2981 MIRType *etype = GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast<TyIdx>(PTY_a64));
2982 /*
2983 * we store 8-byte displacement ( jump_label - offset_table_address )
2984 * in the table. Refer to AArch64Emit::Emit() in aarch64emit.cpp
2985 */
2986 std::vector<uint64> sizeArray;
2987 sizeArray.emplace_back(switchTable.size());
2988 MIRArrayType *arrayType = memPool->New<MIRArrayType>(etype->GetTypeIndex(), sizeArray);
2989 MIRAggConst *arrayConst = memPool->New<MIRAggConst>(mirModule, *arrayType);
2990 for (const auto &itPair : switchTable) {
2991 LabelIdx labelIdx = itPair.second;
2992 GetCurBB()->PushBackRangeGotoLabel(labelIdx);
2993 MIRConst *mirConst = memPool->New<MIRLblConst>(labelIdx, GetFunction().GetPuidx(), *etype);
2994 arrayConst->AddItem(mirConst, 0);
2995 }
2996
2997 MIRSymbol *lblSt = GetFunction().GetSymTab()->CreateSymbol(kScopeLocal);
2998 lblSt->SetStorageClass(kScFstatic);
2999 lblSt->SetSKind(kStConst);
3000 lblSt->SetTyIdx(arrayType->GetTypeIndex());
3001 lblSt->SetKonst(arrayConst);
3002 std::string lblStr(".LB_");
3003 MIRSymbol *funcSt = GlobalTables::GetGsymTable().GetSymbolFromStidx(GetFunction().GetStIdx().Idx());
3004 CHECK_FATAL(funcSt != nullptr, "funcSt should not be nullptr");
3005 uint32 labelIdxTmp = GetLabelIdx();
3006 lblStr += funcSt->GetName();
3007 lblStr += std::to_string(labelIdxTmp++);
3008 SetLabelIdx(labelIdxTmp);
3009 lblSt->SetNameStrIdx(lblStr);
3010 AddEmitSt(GetCurBB()->GetId(), *lblSt);
3011
3012 PrimType itype = rangeGotoNode.Opnd(0)->GetPrimType();
3013 Operand &opnd0 = LoadIntoRegister(srcOpnd, itype);
3014
3015 regno_t vRegNO = NewVReg(kRegTyInt, 8u);
3016 RegOperand *addOpnd = &CreateVirtualRegisterOperand(vRegNO);
3017
3018 int32 minIdx = switchTable[0].first;
3019 SelectAdd(*addOpnd, opnd0,
3020 CreateImmOperand(-static_cast<int64>(minIdx) - static_cast<int64>(rangeGotoNode.GetTagOffset()),
3021 GetPrimTypeBitSize(itype), true),
3022 itype);
3023
3024 /* contains the index */
3025 if (addOpnd->GetSize() != GetPrimTypeBitSize(PTY_u64)) {
3026 addOpnd = static_cast<RegOperand *>(&SelectCopy(*addOpnd, PTY_u64, PTY_u64));
3027 }
3028
3029 RegOperand &baseOpnd = CreateRegisterOperandOfType(PTY_u64);
3030 StImmOperand &stOpnd = CreateStImmOperand(*lblSt, 0, 0);
3031
3032 /* load the address of the switch table */
3033 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xadrp, baseOpnd, stOpnd));
3034 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xadrpl12, baseOpnd, baseOpnd, stOpnd));
3035
3036 /* load the displacement into a register by accessing memory at base + index*8 */
3037 Operand *disp = CreateMemOperand(MemOperand::kAddrModeBOrX, k64BitSize, baseOpnd, *addOpnd, k8BitShift);
3038 RegOperand &tgt = CreateRegisterOperandOfType(PTY_a64);
3039 SelectAdd(tgt, baseOpnd, *disp, PTY_u64);
3040 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xbr, tgt));
3041 }
3042
GetZeroOpnd(uint32 bitLen)3043 RegOperand &AArch64CGFunc::GetZeroOpnd(uint32 bitLen)
3044 {
3045 /*
3046 * It is possible to have a bitLen < 32, eg stb.
3047 * Set it to 32 if it is less than 32.
3048 */
3049 if (bitLen < k32BitSize) {
3050 bitLen = k32BitSize;
3051 }
3052 DEBUG_ASSERT((bitLen == k32BitSize || bitLen == k64BitSize), "illegal bit length = %d", bitLen);
3053 return (bitLen == k32BitSize) ? GetOrCreatePhysicalRegisterOperand(RZR, k32BitSize, kRegTyInt)
3054 : GetOrCreatePhysicalRegisterOperand(RZR, k64BitSize, kRegTyInt);
3055 }
3056
3057 /* if offset < 0, allocation; otherwise, deallocation */
CreateCallFrameOperand(int32 offset,uint32 size)3058 MemOperand &AArch64CGFunc::CreateCallFrameOperand(int32 offset, uint32 size)
3059 {
3060 MemOperand *memOpnd = CreateStackMemOpnd(RSP, offset, size);
3061 memOpnd->SetIndexOpt((offset < 0) ? MemOperand::kPreIndex : MemOperand::kPostIndex);
3062 return *memOpnd;
3063 }
3064
GetLogicalShiftLeftOperand(uint32 shiftAmount,bool is64bits) const3065 BitShiftOperand *AArch64CGFunc::GetLogicalShiftLeftOperand(uint32 shiftAmount, bool is64bits) const
3066 {
3067 /* num(0, 16, 32, 48) >> 4 is num1(0, 1, 2, 3), num1 & (~3) == 0 */
3068 DEBUG_ASSERT((!shiftAmount || ((shiftAmount >> 4) & ~static_cast<uint32>(3)) == 0),
3069 "shift amount should be one of 0, 16, 32, 48");
3070 /* movkLslOperands[4]~movkLslOperands[7] is for 64 bits */
3071 return &movkLslOperands[(shiftAmount >> 4) + (is64bits ? 4 : 0)];
3072 }
3073
3074 AArch64CGFunc::MovkLslOperandArray AArch64CGFunc::movkLslOperands = {
3075 BitShiftOperand(BitShiftOperand::kLSL, 0, 4),
3076 BitShiftOperand(BitShiftOperand::kLSL, 16, 4),
3077 BitShiftOperand(BitShiftOperand::kLSL, static_cast<uint32>(-1), 0), /* invalid entry */
3078 BitShiftOperand(BitShiftOperand::kLSL, static_cast<uint32>(-1), 0), /* invalid entry */
3079 BitShiftOperand(BitShiftOperand::kLSL, 0, 6),
3080 BitShiftOperand(BitShiftOperand::kLSL, 16, 6),
3081 BitShiftOperand(BitShiftOperand::kLSL, 32, 6),
3082 BitShiftOperand(BitShiftOperand::kLSL, 48, 6),
3083 };
3084
CreateStkTopOpnd(uint32 offset,uint32 size)3085 MemOperand &AArch64CGFunc::CreateStkTopOpnd(uint32 offset, uint32 size)
3086 {
3087 MemOperand *memOp = CreateStackMemOpnd(RFP, static_cast<int32>(offset), size);
3088 return *memOp;
3089 }
3090
CreateStackMemOpnd(regno_t preg,int32 offset,uint32 size)3091 MemOperand *AArch64CGFunc::CreateStackMemOpnd(regno_t preg, int32 offset, uint32 size)
3092 {
3093 auto *memOp =
3094 memPool->New<MemOperand>(memPool->New<RegOperand>(preg, k64BitSize, kRegTyInt),
3095 &CreateOfstOpnd(static_cast<uint64>(static_cast<int64>(offset)), k32BitSize), size);
3096 if (preg == RFP || preg == RSP) {
3097 memOp->SetStackMem(true);
3098 }
3099 return memOp;
3100 }
3101
3102 /* Mem mod BOI || PreIndex || PostIndex */
CreateMemOperand(uint32 size,RegOperand & base,ImmOperand & ofstOp,bool isVolatile,MemOperand::AArch64AddressingMode mode) const3103 MemOperand *AArch64CGFunc::CreateMemOperand(uint32 size, RegOperand &base, ImmOperand &ofstOp, bool isVolatile,
3104 MemOperand::AArch64AddressingMode mode) const
3105 {
3106 auto *memOp = memPool->New<MemOperand>(size, base, ofstOp, mode);
3107 memOp->SetVolatile(isVolatile);
3108 if (base.GetRegisterNumber() == RFP || base.GetRegisterNumber() == RSP) {
3109 memOp->SetStackMem(true);
3110 }
3111 return memOp;
3112 }
3113
CreateMemOperand(MemOperand::AArch64AddressingMode mode,uint32 size,RegOperand & base,RegOperand * index,ImmOperand * offset,const MIRSymbol * symbol) const3114 MemOperand *AArch64CGFunc::CreateMemOperand(MemOperand::AArch64AddressingMode mode, uint32 size, RegOperand &base,
3115 RegOperand *index, ImmOperand *offset, const MIRSymbol *symbol) const
3116 {
3117 auto *memOp = memPool->New<MemOperand>(mode, size, base, index, offset, symbol);
3118 if (base.GetRegisterNumber() == RFP || base.GetRegisterNumber() == RSP) {
3119 memOp->SetStackMem(true);
3120 }
3121 return memOp;
3122 }
3123
CreateMemOperand(MemOperand::AArch64AddressingMode mode,uint32 size,RegOperand & base,RegOperand & index,ImmOperand * offset,const MIRSymbol & symbol,bool noExtend)3124 MemOperand *AArch64CGFunc::CreateMemOperand(MemOperand::AArch64AddressingMode mode, uint32 size, RegOperand &base,
3125 RegOperand &index, ImmOperand *offset, const MIRSymbol &symbol,
3126 bool noExtend)
3127 {
3128 auto *memOp = memPool->New<MemOperand>(mode, size, base, index, offset, symbol, noExtend);
3129 if (base.GetRegisterNumber() == RFP || base.GetRegisterNumber() == RSP) {
3130 memOp->SetStackMem(true);
3131 }
3132 return memOp;
3133 }
3134
CreateMemOperand(MemOperand::AArch64AddressingMode mode,uint32 dSize,RegOperand & base,RegOperand & indexOpnd,uint32 shift,bool isSigned) const3135 MemOperand *AArch64CGFunc::CreateMemOperand(MemOperand::AArch64AddressingMode mode, uint32 dSize, RegOperand &base,
3136 RegOperand &indexOpnd, uint32 shift, bool isSigned) const
3137 {
3138 auto *memOp = memPool->New<MemOperand>(mode, dSize, base, indexOpnd, shift, isSigned);
3139 if (base.GetRegisterNumber() == RFP || base.GetRegisterNumber() == RSP) {
3140 memOp->SetStackMem(true);
3141 }
3142 return memOp;
3143 }
3144
CreateMemOperand(MemOperand::AArch64AddressingMode mode,uint32 dSize,const MIRSymbol & sym)3145 MemOperand *AArch64CGFunc::CreateMemOperand(MemOperand::AArch64AddressingMode mode, uint32 dSize, const MIRSymbol &sym)
3146 {
3147 auto *memOp = memPool->New<MemOperand>(mode, dSize, sym);
3148 return memOp;
3149 }
3150
CreateRegisterOperandOfType(PrimType primType)3151 RegOperand &AArch64CGFunc::CreateRegisterOperandOfType(PrimType primType)
3152 {
3153 RegType regType = GetRegTyFromPrimTy(primType);
3154 uint32 byteLength = GetPrimTypeSize(primType);
3155 return CreateRegisterOperandOfType(regType, byteLength);
3156 }
3157
CreateRegisterOperandOfType(RegType regty,uint32 byteLen)3158 RegOperand &AArch64CGFunc::CreateRegisterOperandOfType(RegType regty, uint32 byteLen)
3159 {
3160 /* BUG: if half-precision floating point operations are supported? */
3161 /* AArch64 has 32-bit and 64-bit registers only */
3162 if (byteLen < k4ByteSize) {
3163 byteLen = k4ByteSize;
3164 }
3165 regno_t vRegNO = NewVReg(regty, byteLen);
3166 return CreateVirtualRegisterOperand(vRegNO);
3167 }
3168
CreateRflagOperand()3169 RegOperand &AArch64CGFunc::CreateRflagOperand()
3170 {
3171 /* AArch64 has Status register that is 32-bit wide. */
3172 regno_t vRegNO = NewVRflag();
3173 return CreateVirtualRegisterOperand(vRegNO);
3174 }
3175
MergeReturn()3176 void AArch64CGFunc::MergeReturn()
3177 {
3178 uint32 exitBBSize = GetExitBBsVec().size();
3179 if (exitBBSize == 0) {
3180 return;
3181 }
3182 if ((exitBBSize == 1) && GetExitBB(0) == GetCurBB()) {
3183 return;
3184 }
3185 if (exitBBSize == 1) {
3186 BB *onlyExitBB = GetExitBB(0);
3187 LabelIdx labidx = CreateLabel();
3188 BB *retBB = CreateNewBB(labidx, onlyExitBB->IsUnreachable(), BB::kBBReturn, onlyExitBB->GetFrequency());
3189 onlyExitBB->AppendBB(*retBB);
3190 /* modify the original return BB. */
3191 DEBUG_ASSERT(onlyExitBB->GetKind() == BB::kBBReturn, "Error: suppose to merge multi return bb");
3192 onlyExitBB->SetKind(BB::kBBFallthru);
3193
3194 GetExitBBsVec().pop_back();
3195 GetExitBBsVec().emplace_back(retBB);
3196 return;
3197 }
3198
3199 LabelIdx labidx = CreateLabel();
3200 LabelOperand &targetOpnd = GetOrCreateLabelOperand(labidx);
3201 uint32 freq = 0;
3202 for (auto *tmpBB : GetExitBBsVec()) {
3203 DEBUG_ASSERT(tmpBB->GetKind() == BB::kBBReturn, "Error: suppose to merge multi return bb");
3204 tmpBB->SetKind(BB::kBBGoto);
3205 tmpBB->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xuncond, targetOpnd));
3206 freq += tmpBB->GetFrequency();
3207 }
3208 BB *retBB = CreateNewBB(labidx, false, BB::kBBReturn, freq);
3209 GetLastBB()->PrependBB(*retBB);
3210 GetExitBBsVec().clear();
3211 GetExitBBsVec().emplace_back(retBB);
3212 }
3213
CreateVirtualRegisterOperand(regno_t vRegNO,uint32 size,RegType kind,uint32 flg) const3214 RegOperand *AArch64CGFunc::CreateVirtualRegisterOperand(regno_t vRegNO, uint32 size, RegType kind, uint32 flg) const
3215 {
3216 RegOperand *res = memPool->New<RegOperand>(vRegNO, size, kind, flg);
3217 maplebe::VregInfo::vRegOperandTable[vRegNO] = res;
3218 return res;
3219 }
3220
CreateVirtualRegisterOperand(regno_t vRegNO)3221 RegOperand &AArch64CGFunc::CreateVirtualRegisterOperand(regno_t vRegNO)
3222 {
3223 DEBUG_ASSERT((vReg.vRegOperandTable.find(vRegNO) == vReg.vRegOperandTable.end()), "already exist");
3224 DEBUG_ASSERT(vRegNO < vReg.VRegTableSize(), "index out of range");
3225 uint8 bitSize = static_cast<uint8>((static_cast<uint32>(vReg.VRegTableGetSize(vRegNO))) * kBitsPerByte);
3226 RegOperand *res = CreateVirtualRegisterOperand(vRegNO, bitSize, vReg.VRegTableGetType(vRegNO));
3227 return *res;
3228 }
3229
GetOrCreateVirtualRegisterOperand(regno_t vRegNO)3230 RegOperand &AArch64CGFunc::GetOrCreateVirtualRegisterOperand(regno_t vRegNO)
3231 {
3232 auto it = maplebe::VregInfo::vRegOperandTable.find(vRegNO);
3233 return (it != maplebe::VregInfo::vRegOperandTable.end()) ? *(it->second) : CreateVirtualRegisterOperand(vRegNO);
3234 }
3235
3236 // Stage B - Pre-padding and extension of arguments
SelectParmListPreprocess(StmtNode & naryNode,size_t start,std::vector<ParamDesc> & argsDesc,const MIRFunction * callee)3237 bool AArch64CGFunc::SelectParmListPreprocess(StmtNode &naryNode, size_t start, std::vector<ParamDesc> &argsDesc,
3238 const MIRFunction *callee)
3239 {
3240 bool hasSpecialArg = false;
3241 for (size_t i = start; i < naryNode.NumOpnds(); ++i) {
3242 BaseNode *argExpr = naryNode.Opnd(i);
3243 DEBUG_ASSERT(argExpr != nullptr, "not null check");
3244 PrimType primType = argExpr->GetPrimType();
3245 DEBUG_ASSERT(primType != PTY_void, "primType should not be void");
3246 auto *mirType = GlobalTables::GetTypeTable().GetPrimType(primType);
3247 (void)argsDesc.emplace_back(mirType, argExpr);
3248 }
3249 return hasSpecialArg;
3250 }
3251
GetCalleeFunction(StmtNode & naryNode) const3252 std::pair<MIRFunction *, MIRFuncType *> AArch64CGFunc::GetCalleeFunction(StmtNode &naryNode) const
3253 {
3254 MIRFunction *callee = nullptr;
3255 MIRFuncType *calleeType = nullptr;
3256 if (dynamic_cast<CallNode *>(&naryNode) != nullptr) {
3257 auto calleePuIdx = static_cast<CallNode &>(naryNode).GetPUIdx();
3258 callee = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(calleePuIdx);
3259 calleeType = callee->GetMIRFuncType();
3260 } else if (naryNode.GetOpCode() == OP_icallproto) {
3261 auto *iCallNode = &static_cast<IcallNode &>(naryNode);
3262 MIRType *protoType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(iCallNode->GetRetTyIdx());
3263 if (protoType->IsMIRPtrType()) {
3264 calleeType = static_cast<MIRPtrType *>(protoType)->GetPointedFuncType();
3265 } else if (protoType->IsMIRFuncType()) {
3266 calleeType = static_cast<MIRFuncType *>(protoType);
3267 }
3268 }
3269 return {callee, calleeType};
3270 }
3271
SelectParmListPassByStack(const MIRType & mirType,Operand & opnd,uint32 memOffset,bool preCopyed,std::vector<Insn * > & insnForStackArgs)3272 void AArch64CGFunc::SelectParmListPassByStack(const MIRType &mirType, Operand &opnd, uint32 memOffset, bool preCopyed,
3273 std::vector<Insn *> &insnForStackArgs)
3274 {
3275 PrimType primType = preCopyed ? PTY_a64 : mirType.GetPrimType();
3276 auto &valReg = LoadIntoRegister(opnd, primType);
3277 auto &actMemOpnd = CreateMemOpnd(RSP, memOffset, GetPrimTypeBitSize(primType));
3278 Insn &strInsn = GetInsnBuilder()->BuildInsn(PickStInsn(GetPrimTypeBitSize(primType), primType), valReg, actMemOpnd);
3279 actMemOpnd.SetStackArgMem(true);
3280 if (Globals::GetInstance()->GetOptimLevel() == CGOptions::kLevel2 && insnForStackArgs.size() < kShiftAmount12) {
3281 (void)insnForStackArgs.emplace_back(&strInsn);
3282 } else {
3283 GetCurBB()->AppendInsn(strInsn);
3284 }
3285 }
3286
3287 /*
3288 SelectParmList generates an instrunction for each of the parameters
3289 to load the parameter value into the corresponding register.
3290 We return a list of registers to the call instruction because
3291 they may be needed in the register allocation phase.
3292 */
SelectParmList(StmtNode & naryNode,ListOperand & srcOpnds,bool isCallNative)3293 void AArch64CGFunc::SelectParmList(StmtNode &naryNode, ListOperand &srcOpnds, bool isCallNative)
3294 {
3295 size_t opndIdx = 0;
3296 // the first opnd of ICallNode is not parameter of function
3297 if (naryNode.GetOpCode() == OP_icall || naryNode.GetOpCode() == OP_icallproto || isCallNative ||
3298 naryNode.GetOpCode() == OP_tailicall) {
3299 opndIdx++;
3300 }
3301 auto [callee, calleeType] = GetCalleeFunction(naryNode);
3302
3303 std::vector<ParamDesc> argsDesc;
3304 std::vector<RegMapForPhyRegCpy> regMapForTmpBB;
3305 bool hasSpecialArg = SelectParmListPreprocess(naryNode, opndIdx, argsDesc, callee);
3306 BB *curBBrecord = GetCurBB();
3307 BB *tmpBB = nullptr;
3308 if (hasSpecialArg) {
3309 tmpBB = CreateNewBB();
3310 }
3311
3312 AArch64CallConvImpl parmLocator(GetBecommon());
3313 CCLocInfo ploc;
3314 std::vector<Insn *> insnForStackArgs;
3315
3316 for (size_t i = 0; i < argsDesc.size(); ++i) {
3317 if (hasSpecialArg) {
3318 DEBUG_ASSERT(tmpBB, "need temp bb for lower priority args");
3319 SetCurBB(argsDesc[i].isSpecialArg ? *curBBrecord : *tmpBB);
3320 }
3321
3322 auto *mirType = argsDesc[i].mirType;
3323
3324 // get param opnd, for unpreCody agg, opnd must be mem opnd
3325 Operand *opnd = nullptr;
3326 auto preCopyed = argsDesc[i].preCopyed;
3327 if (preCopyed) { // preCopyed agg, passed by address
3328 naryNode.SetMayTailcall(false); // has preCopyed arguments, don't do tailcall
3329 opnd = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize));
3330 auto &spReg = GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt);
3331 SelectAdd(*opnd, spReg, CreateImmOperand(argsDesc[i].offset, k64BitSize, false), PTY_a64);
3332 } else { // base type, clac true val
3333 opnd = &LoadIntoRegister(*HandleExpr(naryNode, *argsDesc[i].argExpr), mirType->GetPrimType());
3334 }
3335 parmLocator.LocateNextParm(*mirType, ploc, (i == 0), calleeType);
3336
3337 // skip unused args
3338 if (callee && callee->GetFuncDesc().IsArgUnused(i)) {
3339 continue;
3340 }
3341
3342 if (ploc.reg0 != kRinvalid) { // load to the register.
3343 CHECK_FATAL(ploc.reg1 == kRinvalid, "NIY");
3344 auto &phyReg = GetOrCreatePhysicalRegisterOperand(static_cast<AArch64reg>(ploc.reg0),
3345 GetPrimTypeBitSize(ploc.primTypeOfReg0),
3346 GetRegTyFromPrimTy(ploc.primTypeOfReg0));
3347 DEBUG_ASSERT(opnd->IsRegister(), "NIY, must be reg");
3348 if (!DoCallerEnsureValidParm(phyReg, static_cast<RegOperand &>(*opnd), ploc.primTypeOfReg0)) {
3349 if (argsDesc[i].isSpecialArg) {
3350 regMapForTmpBB.emplace_back(RegMapForPhyRegCpy(
3351 &phyReg, ploc.primTypeOfReg0, static_cast<RegOperand *>(opnd), ploc.primTypeOfReg0));
3352 } else {
3353 SelectCopy(phyReg, ploc.primTypeOfReg0, *opnd, ploc.primTypeOfReg0);
3354 }
3355 }
3356 srcOpnds.PushOpnd(phyReg);
3357 continue;
3358 }
3359
3360 // store to the memory segment for stack-passsed arguments.
3361 if (CGOptions::IsBigEndian() && ploc.memSize < static_cast<int32>(k8ByteSize)) {
3362 ploc.memOffset = ploc.memOffset + static_cast<int32>(k4ByteSize);
3363 }
3364 SelectParmListPassByStack(*mirType, *opnd, static_cast<uint32>(ploc.memOffset), preCopyed, insnForStackArgs);
3365 }
3366 // if we have stack-passed arguments, don't do tailcall
3367 parmLocator.InitCCLocInfo(ploc);
3368 if (ploc.memOffset != 0) {
3369 naryNode.SetMayTailcall(false);
3370 }
3371 if (hasSpecialArg) {
3372 DEBUG_ASSERT(tmpBB, "need temp bb for lower priority args");
3373 SetCurBB(*tmpBB);
3374 for (auto it : regMapForTmpBB) {
3375 SelectCopy(*it.destReg, it.destType, *it.srcReg, it.srcType);
3376 }
3377 curBBrecord->InsertAtEnd(*tmpBB);
3378 SetCurBB(*curBBrecord);
3379 }
3380 for (auto &strInsn : insnForStackArgs) {
3381 GetCurBB()->AppendInsn(*strInsn);
3382 }
3383 }
3384
DoCallerEnsureValidParm(RegOperand & destOpnd,RegOperand & srcOpnd,PrimType formalPType)3385 bool AArch64CGFunc::DoCallerEnsureValidParm(RegOperand &destOpnd, RegOperand &srcOpnd, PrimType formalPType)
3386 {
3387 Insn *insn = nullptr;
3388 switch (formalPType) {
3389 case PTY_u1: {
3390 ImmOperand &lsbOpnd = CreateImmOperand(maplebe::k0BitSize, srcOpnd.GetSize(), false);
3391 ImmOperand &widthOpnd = CreateImmOperand(maplebe::k1BitSize, srcOpnd.GetSize(), false);
3392 bool is64Bit = (srcOpnd.GetSize() == maplebe::k64BitSize);
3393 insn = &GetInsnBuilder()->BuildInsn(is64Bit ? MOP_xubfxrri6i6 : MOP_wubfxrri5i5, destOpnd, srcOpnd, lsbOpnd,
3394 widthOpnd);
3395 break;
3396 }
3397 case PTY_u8:
3398 case PTY_i8:
3399 insn = &GetInsnBuilder()->BuildInsn(MOP_xuxtb32, destOpnd, srcOpnd);
3400 break;
3401 case PTY_u16:
3402 case PTY_i16:
3403 insn = &GetInsnBuilder()->BuildInsn(MOP_xuxth32, destOpnd, srcOpnd);
3404 break;
3405 default:
3406 break;
3407 }
3408 if (insn != nullptr) {
3409 GetCurBB()->AppendInsn(*insn);
3410 return true;
3411 }
3412 return false;
3413 }
3414
DoOptForStackStrInsns(std::vector<Insn * > & insnForStackArgs,std::vector<Insn * > & optInsns)3415 void AArch64CGFunc::DoOptForStackStrInsns(std::vector<Insn *> &insnForStackArgs, std::vector<Insn *> &optInsns)
3416 {
3417 for (size_t i = 0; i + 1 < insnForStackArgs.size(); i += 2) { // 2 : iterate two insns each loop
3418 auto &insn1 = insnForStackArgs[i];
3419 auto &insn2 = insnForStackArgs[i + 1];
3420 if (!CheckStrPairOpt(insn1, insn2)) {
3421 optInsns.emplace_back(insn1);
3422 optInsns.emplace_back(insn2);
3423 } else {
3424 auto ®1 = insn1->GetOperand(kInsnFirstOpnd);
3425 auto ®2 = insn2->GetOperand(kInsnFirstOpnd);
3426 auto &mem1 = static_cast<MemOperand&>(insn1->GetOperand(kInsnSecondOpnd));
3427 Insn &stpInsn = GetInsnBuilder()->BuildInsn(MOP_xstp, reg1, reg2, mem1);
3428 optInsns.emplace_back(&stpInsn);
3429 }
3430 }
3431 if (insnForStackArgs.size() % 2 == 1) { // 2: is for even, this is for odd situation.
3432 optInsns.emplace_back(insnForStackArgs.back());
3433 }
3434 }
3435
CheckStrPairOpt(Insn * insn1,Insn * insn2)3436 bool AArch64CGFunc::CheckStrPairOpt(Insn *insn1, Insn *insn2)
3437 {
3438 if (insn1 == nullptr || insn2 == nullptr) {
3439 return false;
3440 }
3441 if (insn1->GetMachineOpcode() != MOP_xstr || insn2->GetMachineOpcode() != MOP_xstr) {
3442 return false;
3443 }
3444 auto &mem1 = static_cast<MemOperand&>(insn1->GetOperand(kInsnSecondOpnd));
3445 auto &mem2 = static_cast<MemOperand&>(insn2->GetOperand(kInsnSecondOpnd));
3446 if (mem1.GetAddrMode() != MemOperand::kAddrModeBOi || mem2.GetAddrMode() != MemOperand::kAddrModeBOi) {
3447 return false;
3448 }
3449 OfstOperand *mem1Offset = mem1.GetOffsetImmediate();
3450 OfstOperand *mem2Offset = mem2.GetOffsetImmediate();
3451 int64 mem1OffsetValue = mem1Offset ? mem1Offset->GetOffsetValue() : 0;
3452 int64 mem2OffsetValue = mem2Offset ? mem2Offset->GetOffsetValue() : 0;
3453 if (mem1OffsetValue % k8ByteSize != 0) {
3454 return false;
3455 }
3456 if (mem2OffsetValue != mem1OffsetValue + k8ByteSize) {
3457 return false;
3458 }
3459 return true;
3460 }
3461
SelectParmListNotC(StmtNode & naryNode,ListOperand & srcOpnds)3462 void AArch64CGFunc::SelectParmListNotC(StmtNode &naryNode, ListOperand &srcOpnds)
3463 {
3464 size_t i = 0;
3465 if (naryNode.GetOpCode() == OP_icall || naryNode.GetOpCode() == OP_icallproto) {
3466 i++;
3467 }
3468
3469 CCImpl &parmLocator = *GetOrCreateLocator(CCImpl::GetCallConvKind(naryNode));
3470 CCLocInfo ploc;
3471 std::vector<Insn *> insnForStackArgs;
3472 uint32 stackArgsCount = 0;
3473 for (uint32 pnum = 0; i < naryNode.NumOpnds(); ++i, ++pnum) {
3474 MIRType *ty = nullptr;
3475 BaseNode *argExpr = naryNode.Opnd(i);
3476 DEBUG_ASSERT(argExpr != nullptr, "argExpr should not be nullptr");
3477 PrimType primType = argExpr->GetPrimType();
3478 DEBUG_ASSERT(primType != PTY_void, "primType should not be void");
3479 /* use alloca */
3480 ty = GlobalTables::GetTypeTable().GetTypeTable()[static_cast<uint32>(primType)];
3481 RegOperand *expRegOpnd = nullptr;
3482 Operand *opnd = HandleExpr(naryNode, *argExpr);
3483 if (!opnd->IsRegister()) {
3484 opnd = &LoadIntoRegister(*opnd, primType);
3485 }
3486 expRegOpnd = static_cast<RegOperand *>(opnd);
3487
3488 parmLocator.LocateNextParm(*ty, ploc);
3489 PrimType destPrimType = primType;
3490 if (ploc.reg0 != kRinvalid) { /* load to the register. */
3491 CHECK_FATAL(expRegOpnd != nullptr, "null ptr check");
3492 RegOperand &parmRegOpnd = GetOrCreatePhysicalRegisterOperand(
3493 static_cast<AArch64reg>(ploc.reg0), expRegOpnd->GetSize(), GetRegTyFromPrimTy(destPrimType));
3494 SelectCopy(parmRegOpnd, destPrimType, *expRegOpnd, primType);
3495 srcOpnds.PushOpnd(parmRegOpnd);
3496 } else { /* store to the memory segment for stack-passsed arguments. */
3497 if (CGOptions::IsBigEndian()) {
3498 if (GetPrimTypeBitSize(primType) < k64BitSize) {
3499 ploc.memOffset = ploc.memOffset + static_cast<int32>(k4BitSize);
3500 }
3501 }
3502 MemOperand &actMemOpnd = CreateMemOpnd(RSP, ploc.memOffset, GetPrimTypeBitSize(primType));
3503 Insn &strInsn = GetInsnBuilder()->BuildInsn(PickStInsn(GetPrimTypeBitSize(primType), primType), *expRegOpnd,
3504 actMemOpnd);
3505 actMemOpnd.SetStackArgMem(true);
3506 if (stackArgsCount < kShiftAmount12) {
3507 (void)insnForStackArgs.emplace_back(&strInsn);
3508 stackArgsCount++;
3509 } else {
3510 GetCurBB()->AppendInsn(strInsn);
3511 }
3512 }
3513 DEBUG_ASSERT(ploc.reg1 == 0, "SelectCall NYI");
3514 }
3515 std::vector<Insn *> insnForStackArgsOpt;
3516 DoOptForStackStrInsns(insnForStackArgs, insnForStackArgsOpt);
3517 for (auto &strInsn : insnForStackArgsOpt) {
3518 GetCurBB()->AppendInsn(*strInsn);
3519 }
3520 }
3521
3522 // based on call conv, choose how to prepare args
SelectParmListWrapper(StmtNode & naryNode,ListOperand & srcOpnds,bool isCallNative)3523 void AArch64CGFunc::SelectParmListWrapper(StmtNode &naryNode, ListOperand &srcOpnds, bool isCallNative)
3524 {
3525 if (CCImpl::GetCallConvKind(naryNode) == kCCall) {
3526 SelectParmList(naryNode, srcOpnds, isCallNative);
3527 } else if (CCImpl::GetCallConvKind(naryNode) == kWebKitJS || CCImpl::GetCallConvKind(naryNode) == kGHC) {
3528 SelectParmListNotC(naryNode, srcOpnds);
3529 } else {
3530 CHECK_FATAL(false, "niy");
3531 }
3532 }
3533
SelectCall(CallNode & callNode)3534 void AArch64CGFunc::SelectCall(CallNode &callNode)
3535 {
3536 MIRFunction *fn = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callNode.GetPUIdx());
3537 MIRSymbol *fsym = GetFunction().GetLocalOrGlobalSymbol(fn->GetStIdx(), false);
3538 MIRType *retType = fn->GetReturnType();
3539
3540 if (GetCG()->GenerateVerboseCG()) {
3541 const std::string &comment = fsym->GetName();
3542 GetCurBB()->AppendInsn(CreateCommentInsn(comment));
3543 }
3544
3545 ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator());
3546 SelectParmListWrapper(callNode, *srcOpnds, false);
3547
3548 Insn &callInsn = AppendCall(*fsym, *srcOpnds);
3549 GetCurBB()->SetHasCall();
3550 if (retType != nullptr) {
3551 callInsn.SetRetSize(static_cast<uint32>(retType->GetSize()));
3552 callInsn.SetIsCallReturnUnsigned(IsUnsignedInteger(retType->GetPrimType()));
3553 }
3554 const auto &deoptBundleInfo = callNode.GetDeoptBundleInfo();
3555 for (const auto &elem : deoptBundleInfo) {
3556 auto valueKind = elem.second.GetMapleValueKind();
3557 if (valueKind == MapleValue::kPregKind) {
3558 auto *opnd = GetOrCreateRegOpndFromPregIdx(elem.second.GetPregIdx(), PTY_ref);
3559 callInsn.AddDeoptBundleInfo(elem.first, *opnd);
3560 } else if (valueKind == MapleValue::kConstKind) {
3561 auto *opnd = SelectIntConst(static_cast<const MIRIntConst &>(elem.second.GetConstValue()), callNode);
3562 callInsn.AddDeoptBundleInfo(elem.first, *opnd);
3563 } else {
3564 CHECK_FATAL(false, "not supported currently");
3565 }
3566 }
3567 AppendStackMapInsn(callInsn);
3568
3569 /* check if this call use stack slot to return */
3570 if (fn->IsFirstArgReturn()) {
3571 SetStackProtectInfo(kRetureStackSlot);
3572 }
3573
3574 GetFunction().SetHasCall();
3575 }
3576
SelectIcall(IcallNode & icallNode)3577 void AArch64CGFunc::SelectIcall(IcallNode &icallNode)
3578 {
3579 ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator());
3580 SelectParmListWrapper(icallNode, *srcOpnds, false);
3581
3582 Operand *srcOpnd = HandleExpr(icallNode, *icallNode.GetNopndAt(0));
3583 Operand *fptrOpnd = srcOpnd;
3584 if (fptrOpnd->GetKind() != Operand::kOpdRegister) {
3585 PrimType ty = icallNode.Opnd(0)->GetPrimType();
3586 fptrOpnd = &SelectCopy(*srcOpnd, ty, ty);
3587 }
3588 DEBUG_ASSERT(fptrOpnd->IsRegister(), "SelectIcall: function pointer not RegOperand");
3589 RegOperand *regOpnd = static_cast<RegOperand *>(fptrOpnd);
3590 Insn &callInsn = GetInsnBuilder()->BuildInsn(MOP_xblr, *regOpnd, *srcOpnds);
3591
3592 MIRType *retType = icallNode.GetCallReturnType();
3593 if (retType != nullptr) {
3594 callInsn.SetRetSize(static_cast<uint32>(retType->GetSize()));
3595 callInsn.SetIsCallReturnUnsigned(IsUnsignedInteger(retType->GetPrimType()));
3596 }
3597
3598 /* check if this icall use stack slot to return */
3599 CallReturnVector *p2nrets = &icallNode.GetReturnVec();
3600 if (p2nrets->size() == k1ByteSize) {
3601 StIdx stIdx = (*p2nrets)[0].first;
3602 CHECK_NULL_FATAL(mirModule.CurFunction());
3603 MIRSymbol *sym = GetBecommon().GetMIRModule().CurFunction()->GetSymTab()->GetSymbolFromStIdx(stIdx.Idx());
3604 if (sym != nullptr && (GetBecommon().GetTypeSize(sym->GetTyIdx().GetIdx()) > k16ByteSize)) {
3605 SetStackProtectInfo(kRetureStackSlot);
3606 }
3607 }
3608
3609 GetCurBB()->AppendInsn(callInsn);
3610 GetCurBB()->SetHasCall();
3611 DEBUG_ASSERT(GetCurBB()->GetLastMachineInsn()->IsCall(), "lastInsn should be a call");
3612 GetFunction().SetHasCall();
3613 const auto &deoptBundleInfo = icallNode.GetDeoptBundleInfo();
3614 for (const auto &elem : deoptBundleInfo) {
3615 auto valueKind = elem.second.GetMapleValueKind();
3616 if (valueKind == MapleValue::kPregKind) {
3617 auto *opnd = GetOrCreateRegOpndFromPregIdx(elem.second.GetPregIdx(), PTY_ref);
3618 callInsn.AddDeoptBundleInfo(elem.first, *opnd);
3619 } else if (valueKind == MapleValue::kConstKind) {
3620 auto *opnd = SelectIntConst(static_cast<const MIRIntConst &>(elem.second.GetConstValue()), icallNode);
3621 callInsn.AddDeoptBundleInfo(elem.first, *opnd);
3622 } else {
3623 CHECK_FATAL(false, "not supported currently");
3624 }
3625 }
3626 AppendStackMapInsn(callInsn);
3627 }
3628
SelectComment(CommentNode & comment)3629 void AArch64CGFunc::SelectComment(CommentNode &comment)
3630 {
3631 GetCurBB()->AppendInsn(CreateCommentInsn(comment.GetComment()));
3632 }
3633
SelectReturn(Operand * opnd0)3634 void AArch64CGFunc::SelectReturn(Operand *opnd0)
3635 {
3636 bool is64x1vec = GetFunction().GetAttr(FUNCATTR_oneelem_simd) ? true : false;
3637 MIRType *floatType = GlobalTables::GetTypeTable().GetDouble();
3638 MIRType *retTyp = is64x1vec ? floatType : GetFunction().GetReturnType();
3639 CCImpl &retLocator = *GetOrCreateLocator(GetCurCallConvKind());
3640 CCLocInfo retMech;
3641 retLocator.LocateRetVal(*retTyp, retMech);
3642 if ((retMech.GetRegCount() > 0) && (opnd0 != nullptr)) {
3643 RegType regTyp = is64x1vec ? kRegTyFloat : GetRegTyFromPrimTy(retMech.GetPrimTypeOfReg0());
3644 PrimType oriPrimType = is64x1vec ? GetFunction().GetReturnType()->GetPrimType() : retMech.GetPrimTypeOfReg0();
3645 AArch64reg retReg = static_cast<AArch64reg>(retMech.GetReg0());
3646 if (opnd0->IsRegister()) {
3647 RegOperand *regOpnd = static_cast<RegOperand *>(opnd0);
3648 if (regOpnd->GetRegisterNumber() != retMech.GetReg0()) {
3649 RegOperand &retOpnd = GetOrCreatePhysicalRegisterOperand(retReg, regOpnd->GetSize(), regTyp);
3650 SelectCopy(retOpnd, retMech.GetPrimTypeOfReg0(), *regOpnd, oriPrimType);
3651 }
3652 } else if (opnd0->IsMemoryAccessOperand()) {
3653 auto *memopnd = static_cast<MemOperand *>(opnd0);
3654 RegOperand &retOpnd =
3655 GetOrCreatePhysicalRegisterOperand(retReg, GetPrimTypeBitSize(retMech.GetPrimTypeOfReg0()), regTyp);
3656 MOperator mOp = PickLdInsn(memopnd->GetSize(), retMech.GetPrimTypeOfReg0());
3657 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, retOpnd, *memopnd));
3658 } else if (opnd0->IsConstImmediate()) {
3659 ImmOperand *immOpnd = static_cast<ImmOperand *>(opnd0);
3660 if (!is64x1vec) {
3661 RegOperand &retOpnd =
3662 GetOrCreatePhysicalRegisterOperand(retReg, GetPrimTypeBitSize(retMech.GetPrimTypeOfReg0()),
3663 GetRegTyFromPrimTy(retMech.GetPrimTypeOfReg0()));
3664 SelectCopy(retOpnd, retMech.GetPrimTypeOfReg0(), *immOpnd, retMech.GetPrimTypeOfReg0());
3665 } else {
3666 PrimType rType = GetFunction().GetReturnType()->GetPrimType();
3667 RegOperand *reg = &CreateRegisterOperandOfType(rType);
3668 SelectCopy(*reg, rType, *immOpnd, rType);
3669 RegOperand &retOpnd = GetOrCreatePhysicalRegisterOperand(retReg, GetPrimTypeBitSize(PTY_f64),
3670 GetRegTyFromPrimTy(PTY_f64));
3671 Insn &insn = GetInsnBuilder()->BuildInsn(MOP_xvmovdr, retOpnd, *reg);
3672 GetCurBB()->AppendInsn(insn);
3673 }
3674 } else {
3675 CHECK_FATAL(false, "nyi");
3676 }
3677 }
3678 GetExitBBsVec().emplace_back(GetCurBB());
3679 }
3680
GetOrCreateSpecialRegisterOperand(PregIdx sregIdx,PrimType primType)3681 RegOperand &AArch64CGFunc::GetOrCreateSpecialRegisterOperand(PregIdx sregIdx, PrimType primType)
3682 {
3683 switch (sregIdx) {
3684 case kSregSp:
3685 return GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt);
3686 case kSregFp:
3687 return GetOrCreatePhysicalRegisterOperand(RFP, k64BitSize, kRegTyInt);
3688 default:
3689 break;
3690 }
3691
3692 bool useFpReg = !IsPrimitiveInteger(primType);
3693 AArch64reg pReg = RLAST_INT_REG;
3694 switch (sregIdx) {
3695 case kSregRetval0:
3696 pReg = useFpReg ? V0 : R0;
3697 break;
3698 case kSregRetval1:
3699 pReg = useFpReg ? V1 : R1;
3700 break;
3701 case kSregRetval2:
3702 pReg = V2;
3703 break;
3704 case kSregRetval3:
3705 pReg = V3;
3706 break;
3707 default:
3708 DEBUG_ASSERT(false, "Special pseudo registers NYI");
3709 break;
3710 }
3711 uint32 bitSize = GetPrimTypeBitSize(primType);
3712 bitSize = bitSize <= k32BitSize ? k32BitSize : bitSize;
3713 auto &phyOpnd = GetOrCreatePhysicalRegisterOperand(pReg, bitSize, GetRegTyFromPrimTy(primType));
3714 return SelectCopy(phyOpnd, primType, primType); // most opt only deal vreg, so return a vreg
3715 }
3716
GetOrCreatePhysicalRegisterOperand(AArch64reg regNO,uint32 size,RegType kind,uint32 flag)3717 RegOperand &AArch64CGFunc::GetOrCreatePhysicalRegisterOperand(AArch64reg regNO, uint32 size, RegType kind, uint32 flag)
3718 {
3719 uint64 aarch64PhyRegIdx = regNO;
3720 DEBUG_ASSERT(flag == 0, "Do not expect flag here");
3721 if (size <= k32BitSize) {
3722 size = k32BitSize;
3723 aarch64PhyRegIdx = aarch64PhyRegIdx << 1;
3724 } else if (size <= k64BitSize) {
3725 size = k64BitSize;
3726 aarch64PhyRegIdx = (aarch64PhyRegIdx << 1) + 1;
3727 } else {
3728 size = (size == k128BitSize) ? k128BitSize : k64BitSize;
3729 aarch64PhyRegIdx = aarch64PhyRegIdx << k4BitShift;
3730 }
3731 RegOperand *phyRegOpnd = nullptr;
3732 auto phyRegIt = phyRegOperandTable.find(aarch64PhyRegIdx);
3733 if (phyRegIt != phyRegOperandTable.end()) {
3734 phyRegOpnd = phyRegOperandTable[aarch64PhyRegIdx];
3735 } else {
3736 phyRegOpnd = memPool->New<RegOperand>(regNO, size, kind, flag);
3737 phyRegOperandTable.emplace(aarch64PhyRegIdx, phyRegOpnd);
3738 }
3739 return *phyRegOpnd;
3740 }
3741
GetLabelOperand(LabelIdx labIdx) const3742 const LabelOperand *AArch64CGFunc::GetLabelOperand(LabelIdx labIdx) const
3743 {
3744 const MapleUnorderedMap<LabelIdx, LabelOperand *>::const_iterator it = hashLabelOpndTable.find(labIdx);
3745 if (it != hashLabelOpndTable.end()) {
3746 return it->second;
3747 }
3748 return nullptr;
3749 }
3750
GetOrCreateLabelOperand(LabelIdx labIdx)3751 LabelOperand &AArch64CGFunc::GetOrCreateLabelOperand(LabelIdx labIdx)
3752 {
3753 MapleUnorderedMap<LabelIdx, LabelOperand *>::iterator it = hashLabelOpndTable.find(labIdx);
3754 if (it != hashLabelOpndTable.end()) {
3755 return *(it->second);
3756 }
3757 LabelOperand *res = memPool->New<LabelOperand>(GetShortFuncName().c_str(), labIdx, *memPool);
3758 hashLabelOpndTable[labIdx] = res;
3759 return *res;
3760 }
3761
GetOrCreateLabelOperand(BB & bb)3762 LabelOperand &AArch64CGFunc::GetOrCreateLabelOperand(BB &bb)
3763 {
3764 LabelIdx labelIdx = bb.GetLabIdx();
3765 if (labelIdx == MIRLabelTable::GetDummyLabel()) {
3766 labelIdx = CreateLabel();
3767 bb.AddLabel(labelIdx);
3768 SetLab2BBMap(labelIdx, bb);
3769 }
3770 return GetOrCreateLabelOperand(labelIdx);
3771 }
3772
GetOrCreateOfstOpnd(uint64 offset,uint32 size)3773 OfstOperand &AArch64CGFunc::GetOrCreateOfstOpnd(uint64 offset, uint32 size)
3774 {
3775 uint64 aarch64OfstRegIdx = offset;
3776 aarch64OfstRegIdx = (aarch64OfstRegIdx << 1);
3777 if (size == k64BitSize) {
3778 ++aarch64OfstRegIdx;
3779 }
3780 DEBUG_ASSERT(size == k32BitSize || size == k64BitSize, "ofStOpnd size check");
3781 auto it = hashOfstOpndTable.find(aarch64OfstRegIdx);
3782 if (it != hashOfstOpndTable.end()) {
3783 return *it->second;
3784 }
3785 OfstOperand *res = &CreateOfstOpnd(offset, size);
3786 hashOfstOpndTable[aarch64OfstRegIdx] = res;
3787 return *res;
3788 }
3789
GetOrCreateMemOpnd(const MIRSymbol & symbol,int64 offset,uint32 size,bool forLocalRef,bool needLow12,RegOperand * regOp)3790 MemOperand &AArch64CGFunc::GetOrCreateMemOpnd(const MIRSymbol &symbol, int64 offset, uint32 size, bool forLocalRef,
3791 bool needLow12, RegOperand *regOp)
3792 {
3793 MIRStorageClass storageClass = symbol.GetStorageClass();
3794 if ((storageClass == kScAuto) || (storageClass == kScFormal)) {
3795 AArch64SymbolAlloc *symLoc =
3796 static_cast<AArch64SymbolAlloc *>(GetMemlayout()->GetSymAllocInfo(symbol.GetStIndex()));
3797 if (forLocalRef) {
3798 auto p = GetMemlayout()->GetLocalRefLocMap().find(symbol.GetStIdx());
3799 CHECK_FATAL(p != GetMemlayout()->GetLocalRefLocMap().end(), "sym loc should have been defined");
3800 symLoc = static_cast<AArch64SymbolAlloc *>(p->second);
3801 }
3802 DEBUG_ASSERT(symLoc != nullptr, "sym loc should have been defined");
3803 /* At this point, we don't know which registers the callee needs to save. */
3804 DEBUG_ASSERT((IsFPLRAddedToCalleeSavedList() || (SizeOfCalleeSaved() == 0)),
3805 "CalleeSaved won't be known until after Register Allocation");
3806 StIdx idx = symbol.GetStIdx();
3807 auto it = memOpndsRequiringOffsetAdjustment.find(idx);
3808 DEBUG_ASSERT((!IsFPLRAddedToCalleeSavedList() ||
3809 ((it != memOpndsRequiringOffsetAdjustment.end()) || (storageClass == kScFormal))),
3810 "Memory operand of this symbol should have been added to the hash table");
3811 int32 stOffset = GetBaseOffset(*symLoc);
3812 if (it != memOpndsRequiringOffsetAdjustment.end()) {
3813 if (GetMemlayout()->IsLocalRefLoc(symbol)) {
3814 if (!forLocalRef) {
3815 return *(it->second);
3816 }
3817 } else {
3818 Operand *offOpnd = (it->second)->GetOffset();
3819 DEBUG_ASSERT(offOpnd != nullptr, "offOpnd should not be nullptr");
3820 if (((static_cast<OfstOperand *>(offOpnd))->GetOffsetValue() == (stOffset + offset)) &&
3821 (it->second->GetSize() == size)) {
3822 return *(it->second);
3823 }
3824 }
3825 }
3826 it = memOpndsForStkPassedArguments.find(idx);
3827 if (it != memOpndsForStkPassedArguments.end()) {
3828 if (GetMemlayout()->IsLocalRefLoc(symbol)) {
3829 if (!forLocalRef) {
3830 return *(it->second);
3831 }
3832 } else {
3833 return *(it->second);
3834 }
3835 }
3836
3837 RegOperand *baseOpnd = static_cast<RegOperand *>(GetBaseReg(*symLoc));
3838 int32 totalOffset = stOffset + static_cast<int32>(offset);
3839 /* needs a fresh copy of ImmOperand as we may adjust its offset at a later stage. */
3840 OfstOperand *offsetOpnd = nullptr;
3841 if (CGOptions::IsBigEndian()) {
3842 if (symLoc->GetMemSegment()->GetMemSegmentKind() == kMsArgsStkPassed && size < k64BitSize) {
3843 offsetOpnd = &CreateOfstOpnd(k4BitSize + static_cast<uint32>(totalOffset), k64BitSize);
3844 } else {
3845 offsetOpnd = &CreateOfstOpnd(static_cast<uint64>(static_cast<int64>(totalOffset)), k64BitSize);
3846 }
3847 } else {
3848 offsetOpnd = &CreateOfstOpnd(static_cast<uint64>(static_cast<int64>(totalOffset)), k64BitSize);
3849 }
3850 if (symLoc->GetMemSegment()->GetMemSegmentKind() == kMsArgsStkPassed &&
3851 MemOperand::IsPIMMOffsetOutOfRange(totalOffset, size)) {
3852 ImmOperand *offsetOprand = &CreateImmOperand(totalOffset, k64BitSize, true, kUnAdjustVary);
3853 Operand *resImmOpnd = &SelectCopy(*offsetOprand, PTY_i64, PTY_i64);
3854 return *CreateMemOperand(MemOperand::kAddrModeBOrX, size, *baseOpnd, static_cast<RegOperand &>(*resImmOpnd),
3855 nullptr, symbol, true);
3856 } else {
3857 if (symLoc->GetMemSegment()->GetMemSegmentKind() == kMsArgsStkPassed) {
3858 offsetOpnd->SetVary(kUnAdjustVary);
3859 }
3860 MemOperand *res = CreateMemOperand(MemOperand::kAddrModeBOi, size, *baseOpnd, nullptr, offsetOpnd, &symbol);
3861 if (!forLocalRef) {
3862 memOpndsRequiringOffsetAdjustment[idx] = res;
3863 }
3864 return *res;
3865 }
3866 } else {
3867 CHECK_FATAL(false, "NYI");
3868 }
3869 }
3870
HashMemOpnd(MemOperand & tMemOpnd)3871 MemOperand &AArch64CGFunc::HashMemOpnd(MemOperand &tMemOpnd)
3872 {
3873 auto it = hashMemOpndTable.find(tMemOpnd);
3874 if (it != hashMemOpndTable.end()) {
3875 return *(it->second);
3876 }
3877 auto *res = memPool->New<MemOperand>(tMemOpnd);
3878 hashMemOpndTable[tMemOpnd] = res;
3879 return *res;
3880 }
3881
GetOrCreateMemOpnd(MemOperand::AArch64AddressingMode mode,uint32 size,RegOperand * base,RegOperand * index,ImmOperand * offset,const MIRSymbol * st)3882 MemOperand &AArch64CGFunc::GetOrCreateMemOpnd(MemOperand::AArch64AddressingMode mode, uint32 size, RegOperand *base,
3883 RegOperand *index, ImmOperand *offset, const MIRSymbol *st)
3884 {
3885 DEBUG_ASSERT(base != nullptr, "nullptr check");
3886 MemOperand tMemOpnd(mode, size, *base, index, offset, st);
3887 if (base->GetRegisterNumber() == RFP || base->GetRegisterNumber() == RSP) {
3888 tMemOpnd.SetStackMem(true);
3889 }
3890 return HashMemOpnd(tMemOpnd);
3891 }
3892
GetOrCreateMemOpnd(MemOperand::AArch64AddressingMode mode,uint32 size,RegOperand * base,RegOperand * index,int32 shift,bool isSigned)3893 MemOperand &AArch64CGFunc::GetOrCreateMemOpnd(MemOperand::AArch64AddressingMode mode, uint32 size, RegOperand *base,
3894 RegOperand *index, int32 shift, bool isSigned)
3895 {
3896 DEBUG_ASSERT(base != nullptr, "nullptr check");
3897 MemOperand tMemOpnd(mode, size, *base, *index, shift, isSigned);
3898 if (base->GetRegisterNumber() == RFP || base->GetRegisterNumber() == RSP) {
3899 tMemOpnd.SetStackMem(true);
3900 }
3901 return HashMemOpnd(tMemOpnd);
3902 }
3903
GetOrCreateMemOpnd(MemOperand & oldMem)3904 MemOperand &AArch64CGFunc::GetOrCreateMemOpnd(MemOperand &oldMem)
3905 {
3906 return HashMemOpnd(oldMem);
3907 }
3908
3909 /* offset: base offset from FP or SP */
CreateMemOpnd(RegOperand & baseOpnd,int64 offset,uint32 size)3910 MemOperand &AArch64CGFunc::CreateMemOpnd(RegOperand &baseOpnd, int64 offset, uint32 size)
3911 {
3912 OfstOperand &offsetOpnd = CreateOfstOpnd(static_cast<uint64>(offset), k32BitSize);
3913 /* do not need to check bit size rotate of sign immediate */
3914 bool checkSimm = (offset > kMinSimm64 && offset < kMaxSimm64Pair);
3915 if (!checkSimm && !ImmOperand::IsInBitSizeRot(kMaxImmVal12Bits, offset)) {
3916 Operand *resImmOpnd = &SelectCopy(CreateImmOperand(offset, k32BitSize, true), PTY_i32, PTY_i32);
3917 return *CreateMemOperand(MemOperand::kAddrModeBOrX, size, baseOpnd, static_cast<RegOperand *>(resImmOpnd),
3918 nullptr, nullptr);
3919 } else {
3920 return *CreateMemOperand(MemOperand::kAddrModeBOi, size, baseOpnd, nullptr, &offsetOpnd, nullptr);
3921 }
3922 }
3923
3924 /* offset: base offset + #:lo12:Label+immediate */
CreateMemOpnd(RegOperand & baseOpnd,int64 offset,uint32 size,const MIRSymbol & sym)3925 MemOperand &AArch64CGFunc::CreateMemOpnd(RegOperand &baseOpnd, int64 offset, uint32 size, const MIRSymbol &sym)
3926 {
3927 OfstOperand &offsetOpnd = CreateOfstOpnd(static_cast<uint64>(offset), k32BitSize);
3928 DEBUG_ASSERT(ImmOperand::IsInBitSizeRot(kMaxImmVal12Bits, offset), "");
3929 return *CreateMemOperand(MemOperand::kAddrModeBOi, size, baseOpnd, nullptr, &offsetOpnd, &sym);
3930 }
3931
3932 /*
3933 * case 1: iread a64 <* <* void>> 0 (add a64 (
3934 * addrof a64 $__reg_jni_func_tab$$libcore_all_bytecode,
3935 * mul a64 (
3936 * cvt a64 i32 (constval i32 21),
3937 * constval a64 8)))
3938 *
3939 * case 2 : iread u32 <* u8> 0 (add a64 (regread a64 %61, constval a64 3))
3940 * case 3 : iread u32 <* u8> 0 (add a64 (regread a64 %61, regread a64 %65))
3941 * case 4 : iread u32 <* u8> 0 (add a64 (cvt a64 i32(regread %n)))
3942 */
CheckAndCreateExtendMemOpnd(PrimType ptype,const BaseNode & addrExpr,int64 offset)3943 MemOperand *AArch64CGFunc::CheckAndCreateExtendMemOpnd(PrimType ptype, const BaseNode &addrExpr, int64 offset)
3944 {
3945 aggParamReg = nullptr;
3946 if (addrExpr.GetOpCode() != OP_add || offset != 0) {
3947 return nullptr;
3948 }
3949 BaseNode *baseExpr = addrExpr.Opnd(0);
3950 BaseNode *addendExpr = addrExpr.Opnd(1);
3951
3952 if (baseExpr->GetOpCode() == OP_regread) {
3953 /* case 2 */
3954 if (addendExpr->GetOpCode() == OP_constval) {
3955 DEBUG_ASSERT(addrExpr.GetNumOpnds() == kOpndNum2, "Unepect expr operand in CheckAndCreateExtendMemOpnd");
3956 ConstvalNode *constOfstNode = static_cast<ConstvalNode *>(addendExpr);
3957 DEBUG_ASSERT(constOfstNode->GetConstVal()->GetKind() == kConstInt, "expect MIRIntConst");
3958 MIRIntConst *intOfst = safe_cast<MIRIntConst>(constOfstNode->GetConstVal());
3959 CHECK_FATAL(intOfst != nullptr, "just checking");
3960 /* discard large offset and negative offset */
3961 if (intOfst->GetExtValue() > INT32_MAX || intOfst->IsNegative()) {
3962 return nullptr;
3963 }
3964 uint32 scale = static_cast<uint32>(intOfst->GetExtValue());
3965 OfstOperand &ofstOpnd = GetOrCreateOfstOpnd(scale, k32BitSize);
3966 uint32 dsize = GetPrimTypeBitSize(ptype);
3967 MemOperand *memOpnd =
3968 &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, GetPrimTypeBitSize(ptype),
3969 SelectRegread(*static_cast<RegreadNode *>(baseExpr)), nullptr, &ofstOpnd, nullptr);
3970 return IsOperandImmValid(PickLdInsn(dsize, ptype), memOpnd, kInsnSecondOpnd) ? memOpnd : nullptr;
3971 /* case 3 */
3972 } else if (addendExpr->GetOpCode() == OP_regread) {
3973 CHECK_FATAL(addrExpr.GetNumOpnds() == kOpndNum2, "Unepect expr operand in CheckAndCreateExtendMemOpnd");
3974 if (GetPrimTypeSize(baseExpr->GetPrimType()) != GetPrimTypeSize(addendExpr->GetPrimType())) {
3975 return nullptr;
3976 }
3977
3978 auto *baseReg = SelectRegread(*static_cast<RegreadNode *>(baseExpr));
3979 auto *indexReg = SelectRegread(*static_cast<RegreadNode *>(addendExpr));
3980 MemOperand *memOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOrX, GetPrimTypeBitSize(ptype), baseReg,
3981 indexReg, nullptr, nullptr);
3982 if (CGOptions::IsArm64ilp32() && IsSignedInteger(addendExpr->GetPrimType())) {
3983 memOpnd->SetExtend(memOpnd->GetExtend() | MemOperand::ExtendInfo::kSignExtend);
3984 }
3985 return memOpnd;
3986 /* case 4 */
3987 } else if (addendExpr->GetOpCode() == OP_cvt && addendExpr->GetNumOpnds() == 1) {
3988 int shiftAmount = 0;
3989 BaseNode *cvtRegreadNode = addendExpr->Opnd(kInsnFirstOpnd);
3990 if (cvtRegreadNode->GetOpCode() == OP_regread && cvtRegreadNode->IsLeaf()) {
3991 uint32 fromSize = GetPrimTypeBitSize(cvtRegreadNode->GetPrimType());
3992 uint32 toSize = GetPrimTypeBitSize(addendExpr->GetPrimType());
3993
3994 if (toSize < fromSize) {
3995 return nullptr;
3996 }
3997
3998 MemOperand *memOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOrX, GetPrimTypeBitSize(ptype),
3999 SelectRegread(*static_cast<RegreadNode *>(baseExpr)),
4000 SelectRegread(*static_cast<RegreadNode *>(cvtRegreadNode)),
4001 shiftAmount, toSize != fromSize);
4002 return memOpnd;
4003 }
4004 }
4005 }
4006 if (addendExpr->GetOpCode() != OP_mul || !IsPrimitiveInteger(ptype)) {
4007 return nullptr;
4008 }
4009 BaseNode *indexExpr, *scaleExpr;
4010 indexExpr = addendExpr->Opnd(0);
4011 scaleExpr = addendExpr->Opnd(1);
4012 if (scaleExpr->GetOpCode() != OP_constval) {
4013 return nullptr;
4014 }
4015 ConstvalNode *constValNode = static_cast<ConstvalNode *>(scaleExpr);
4016 CHECK_FATAL(constValNode->GetConstVal()->GetKind() == kConstInt, "expect MIRIntConst");
4017 MIRIntConst *mirIntConst = safe_cast<MIRIntConst>(constValNode->GetConstVal());
4018 CHECK_FATAL(mirIntConst != nullptr, "just checking");
4019 int32 scale = mirIntConst->GetExtValue();
4020 if (scale < 0) {
4021 return nullptr;
4022 }
4023 uint32 unsignedScale = static_cast<uint32>(scale);
4024 if (unsignedScale != GetPrimTypeSize(ptype) || indexExpr->GetOpCode() != OP_cvt) {
4025 return nullptr;
4026 }
4027 /* 8 is 1 << 3; 4 is 1 << 2; 2 is 1 << 1; 1 is 1 << 0 */
4028 int32 shift = (unsignedScale == 8) ? 3 : ((unsignedScale == 4) ? 2 : ((unsignedScale == 2) ? 1 : 0));
4029 RegOperand &base = static_cast<RegOperand &>(LoadIntoRegister(*HandleExpr(addrExpr, *baseExpr), PTY_a64));
4030 TypeCvtNode *typeCvtNode = static_cast<TypeCvtNode *>(indexExpr);
4031 PrimType fromType = typeCvtNode->FromType();
4032 PrimType toType = typeCvtNode->GetPrimType();
4033 MemOperand *memOpnd = nullptr;
4034 if ((fromType == PTY_i32) && (toType == PTY_a64)) {
4035 RegOperand &index =
4036 static_cast<RegOperand &>(LoadIntoRegister(*HandleExpr(*indexExpr, *indexExpr->Opnd(0)), PTY_i32));
4037 memOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOrX, GetPrimTypeBitSize(ptype), &base, &index, shift, true);
4038 } else if ((fromType == PTY_u32) && (toType == PTY_a64)) {
4039 RegOperand &index =
4040 static_cast<RegOperand &>(LoadIntoRegister(*HandleExpr(*indexExpr, *indexExpr->Opnd(0)), PTY_u32));
4041 memOpnd =
4042 &GetOrCreateMemOpnd(MemOperand::kAddrModeBOrX, GetPrimTypeBitSize(ptype), &base, &index, shift, false);
4043 }
4044 return memOpnd;
4045 }
4046
CreateNonExtendMemOpnd(PrimType ptype,const BaseNode & parent,BaseNode & addrExpr,int64 offset)4047 MemOperand &AArch64CGFunc::CreateNonExtendMemOpnd(PrimType ptype, const BaseNode &parent, BaseNode &addrExpr,
4048 int64 offset)
4049 {
4050 Operand *addrOpnd = nullptr;
4051 if ((addrExpr.GetOpCode() == OP_add || addrExpr.GetOpCode() == OP_sub) &&
4052 addrExpr.Opnd(1)->GetOpCode() == OP_constval) {
4053 addrOpnd = HandleExpr(addrExpr, *addrExpr.Opnd(0));
4054 ConstvalNode *constOfstNode = static_cast<ConstvalNode *>(addrExpr.Opnd(1));
4055 DEBUG_ASSERT(constOfstNode->GetConstVal()->GetKind() == kConstInt, "expect MIRIntConst");
4056 MIRIntConst *intOfst = safe_cast<MIRIntConst>(constOfstNode->GetConstVal());
4057 CHECK_FATAL(intOfst != nullptr, "just checking");
4058 offset = (addrExpr.GetOpCode() == OP_add) ? offset + intOfst->GetSXTValue() : offset - intOfst->GetSXTValue();
4059 } else {
4060 addrOpnd = HandleExpr(parent, addrExpr);
4061 }
4062 addrOpnd = static_cast<RegOperand *>(&LoadIntoRegister(*addrOpnd, PTY_a64));
4063 {
4064 OfstOperand &ofstOpnd = GetOrCreateOfstOpnd(static_cast<uint64>(offset), k64BitSize);
4065 return GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, GetPrimTypeBitSize(ptype),
4066 static_cast<RegOperand *>(addrOpnd), nullptr, &ofstOpnd, nullptr);
4067 }
4068 }
4069
4070 /*
4071 * Create a memory operand with specified data type and memory ordering, making
4072 * use of aarch64 extend register addressing mode when possible.
4073 */
CreateMemOpnd(PrimType ptype,const BaseNode & parent,BaseNode & addrExpr,int64 offset)4074 MemOperand &AArch64CGFunc::CreateMemOpnd(PrimType ptype, const BaseNode &parent, BaseNode &addrExpr, int64 offset)
4075 {
4076 MemOperand *memOpnd = CheckAndCreateExtendMemOpnd(ptype, addrExpr, offset);
4077 if (memOpnd != nullptr) {
4078 return *memOpnd;
4079 }
4080 return CreateNonExtendMemOpnd(ptype, parent, addrExpr, offset);
4081 }
4082
CreateMemOpndOrNull(PrimType ptype,const BaseNode & parent,BaseNode & addrExpr,int64 offset)4083 MemOperand *AArch64CGFunc::CreateMemOpndOrNull(PrimType ptype, const BaseNode &parent, BaseNode &addrExpr, int64 offset)
4084 {
4085 MemOperand *memOpnd = CheckAndCreateExtendMemOpnd(ptype, addrExpr, offset);
4086 if (memOpnd != nullptr) {
4087 return memOpnd;
4088 } else if (aggParamReg != nullptr) {
4089 return nullptr;
4090 }
4091 return &CreateNonExtendMemOpnd(ptype, parent, addrExpr, offset);
4092 }
4093
GetOrCreateFuncNameOpnd(const MIRSymbol & symbol) const4094 Operand &AArch64CGFunc::GetOrCreateFuncNameOpnd(const MIRSymbol &symbol) const
4095 {
4096 return *memPool->New<FuncNameOperand>(symbol);
4097 }
4098
GetOrCreateRflag()4099 Operand &AArch64CGFunc::GetOrCreateRflag()
4100 {
4101 if (rcc == nullptr) {
4102 rcc = &CreateRflagOperand();
4103 }
4104 return *rcc;
4105 }
4106
GetRflag() const4107 const Operand *AArch64CGFunc::GetRflag() const
4108 {
4109 return rcc;
4110 }
4111
GetOrCreatevaryreg()4112 RegOperand &AArch64CGFunc::GetOrCreatevaryreg()
4113 {
4114 if (vary == nullptr) {
4115 regno_t vRegNO = NewVReg(kRegTyVary, k8ByteSize);
4116 vary = &CreateVirtualRegisterOperand(vRegNO);
4117 }
4118 return *vary;
4119 }
4120
GetBaseReg(const SymbolAlloc & symAlloc)4121 RegOperand *AArch64CGFunc::GetBaseReg(const SymbolAlloc &symAlloc)
4122 {
4123 MemSegmentKind sgKind = symAlloc.GetMemSegment()->GetMemSegmentKind();
4124 DEBUG_ASSERT(((sgKind == kMsArgsRegPassed) || (sgKind == kMsLocals) || (sgKind == kMsRefLocals) ||
4125 (sgKind == kMsArgsToStkPass) || (sgKind == kMsArgsStkPassed)),
4126 "NYI");
4127
4128 if (sgKind == kMsArgsStkPassed || sgKind == kMsCold) {
4129 return &GetOrCreatevaryreg();
4130 }
4131
4132 if (fsp == nullptr) {
4133 fsp = &GetOrCreatePhysicalRegisterOperand(RFP, GetPointerSize() * kBitsPerByte, kRegTyInt);
4134 }
4135 return fsp;
4136 }
4137
GetBaseOffset(const SymbolAlloc & symbolAlloc)4138 int32 AArch64CGFunc::GetBaseOffset(const SymbolAlloc &symbolAlloc)
4139 {
4140 const AArch64SymbolAlloc *symAlloc = static_cast<const AArch64SymbolAlloc *>(&symbolAlloc);
4141 // Call Frame layout of AArch64
4142 // Refer to V2 in aarch64_memlayout.h.
4143 // Do Not change this unless you know what you do
4144 // O2 mode refer to V2.1 in aarch64_memlayout.cpp
4145 const int32 sizeofFplr = static_cast<int32>(2 * kAarch64IntregBytelen);
4146 MemSegmentKind sgKind = symAlloc->GetMemSegment()->GetMemSegmentKind();
4147 AArch64MemLayout *memLayout = static_cast<AArch64MemLayout *>(this->GetMemlayout());
4148 if (sgKind == kMsArgsStkPassed) { /* for callees */
4149 int32 offset = static_cast<int32>(symAlloc->GetOffset());
4150 offset += static_cast<int32>(memLayout->GetSizeOfColdToStk());
4151 return offset;
4152 } else if (sgKind == kMsCold) {
4153 int offset = static_cast<int32>(symAlloc->GetOffset());
4154 return offset;
4155 } else if (sgKind == kMsArgsRegPassed) {
4156 int32 baseOffset = static_cast<int32>(memLayout->GetSizeOfLocals() + memLayout->GetSizeOfRefLocals()) +
4157 static_cast<int32>(symAlloc->GetOffset());
4158 return baseOffset + sizeofFplr;
4159 } else if (sgKind == kMsRefLocals) {
4160 int32 baseOffset = static_cast<int32>(symAlloc->GetOffset()) + static_cast<int32>(memLayout->GetSizeOfLocals());
4161 return baseOffset + sizeofFplr;
4162 } else if (sgKind == kMsLocals) {
4163 int32 baseOffset = symAlloc->GetOffset();
4164 return baseOffset + sizeofFplr;
4165 } else if (sgKind == kMsSpillReg) {
4166 int32 baseOffset = static_cast<int32>(symAlloc->GetOffset()) +
4167 static_cast<int32>(memLayout->SizeOfArgsRegisterPassed() + memLayout->GetSizeOfLocals() +
4168 memLayout->GetSizeOfRefLocals());
4169 return baseOffset + sizeofFplr;
4170 } else if (sgKind == kMsArgsToStkPass) { /* this is for callers */
4171 return static_cast<int32>(symAlloc->GetOffset());
4172 } else {
4173 CHECK_FATAL(false, "sgKind check");
4174 }
4175 return 0;
4176 }
4177
AppendCall(const MIRSymbol & funcSymbol)4178 void AArch64CGFunc::AppendCall(const MIRSymbol &funcSymbol)
4179 {
4180 ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator());
4181 AppendCall(funcSymbol, *srcOpnds);
4182 }
4183
SelectAddAfterInsn(Operand & resOpnd,Operand & opnd0,Operand & opnd1,PrimType primType,bool isDest,Insn & insn)4184 void AArch64CGFunc::SelectAddAfterInsn(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType, bool isDest,
4185 Insn &insn)
4186 {
4187 uint32 dsize = GetPrimTypeBitSize(primType);
4188 bool is64Bits = (dsize == k64BitSize);
4189 DEBUG_ASSERT(opnd0.GetKind() == Operand::kOpdRegister, "Spill memory operand should based on register");
4190 DEBUG_ASSERT((opnd1.GetKind() == Operand::kOpdImmediate || opnd1.GetKind() == Operand::kOpdOffset),
4191 "Spill memory operand should be with a immediate offset.");
4192
4193 ImmOperand *immOpnd = static_cast<ImmOperand *>(&opnd1);
4194
4195 MOperator mOpCode = MOP_undef;
4196 Insn *curInsn = &insn;
4197 /* lower 24 bits has 1, higher bits are all 0 */
4198 if (immOpnd->IsInBitSize(kMaxImmVal24Bits, 0)) {
4199 /* lower 12 bits and higher 12 bits both has 1 */
4200 Operand *newOpnd0 = &opnd0;
4201 if (!(immOpnd->IsInBitSize(kMaxImmVal12Bits, 0) || immOpnd->IsInBitSize(kMaxImmVal12Bits, kMaxImmVal12Bits))) {
4202 /* process higher 12 bits */
4203 ImmOperand &immOpnd2 =
4204 CreateImmOperand(static_cast<int64>(static_cast<uint64>(immOpnd->GetValue()) >> kMaxImmVal12Bits),
4205 immOpnd->GetSize(), immOpnd->IsSignedValue());
4206 mOpCode = is64Bits ? MOP_xaddrri24 : MOP_waddrri24;
4207 BitShiftOperand &shiftopnd = CreateBitShiftOperand(BitShiftOperand::kLSL, kShiftAmount12, k64BitSize);
4208 Insn &newInsn = GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, opnd0, immOpnd2, shiftopnd);
4209 DEBUG_ASSERT(IsOperandImmValid(mOpCode, &immOpnd2, kInsnThirdOpnd), "immOpnd2 appears invalid");
4210 if (isDest) {
4211 insn.GetBB()->InsertInsnAfter(insn, newInsn);
4212 } else {
4213 insn.GetBB()->InsertInsnBefore(insn, newInsn);
4214 }
4215 /* get lower 12 bits value */
4216 immOpnd->ModuloByPow2(static_cast<int32>(kMaxImmVal12Bits));
4217 newOpnd0 = &resOpnd;
4218 curInsn = &newInsn;
4219 }
4220 /* process lower 12 bits value */
4221 mOpCode = is64Bits ? MOP_xaddrri12 : MOP_waddrri12;
4222 Insn &newInsn = GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, *newOpnd0, *immOpnd);
4223 DEBUG_ASSERT(IsOperandImmValid(mOpCode, immOpnd, kInsnThirdOpnd), "immOpnd appears invalid");
4224 if (isDest) {
4225 insn.GetBB()->InsertInsnAfter(*curInsn, newInsn);
4226 } else {
4227 insn.GetBB()->InsertInsnBefore(insn, newInsn);
4228 }
4229 } else {
4230 /* load into register */
4231 RegOperand &movOpnd = GetOrCreatePhysicalRegisterOperand(R16, dsize, kRegTyInt);
4232 mOpCode = is64Bits ? MOP_xmovri64 : MOP_wmovri32;
4233 Insn &movInsn = GetInsnBuilder()->BuildInsn(mOpCode, movOpnd, *immOpnd);
4234 mOpCode = is64Bits ? MOP_xaddrrr : MOP_waddrrr;
4235 Insn &newInsn = GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, opnd0, movOpnd);
4236 if (isDest) {
4237 (void)insn.GetBB()->InsertInsnAfter(insn, newInsn);
4238 (void)insn.GetBB()->InsertInsnAfter(insn, movInsn);
4239 } else {
4240 (void)insn.GetBB()->InsertInsnBefore(insn, movInsn);
4241 (void)insn.GetBB()->InsertInsnBefore(insn, newInsn);
4242 }
4243 }
4244 }
4245
AdjustMemOperandIfOffsetOutOfRange(MemOperand * memOpnd,regno_t vrNum,bool isDest,Insn & insn,AArch64reg regNum,bool & isOutOfRange)4246 MemOperand *AArch64CGFunc::AdjustMemOperandIfOffsetOutOfRange(MemOperand *memOpnd, regno_t vrNum, bool isDest,
4247 Insn &insn, AArch64reg regNum, bool &isOutOfRange)
4248 {
4249 if (vrNum >= vReg.VRegTableSize()) {
4250 CHECK_FATAL(false, "index out of range in AArch64CGFunc::AdjustMemOperandIfOffsetOutOfRange");
4251 }
4252 uint32 dataSize = GetOrCreateVirtualRegisterOperand(vrNum).GetSize();
4253 if (IsImmediateOffsetOutOfRange(*memOpnd, dataSize) && CheckIfSplitOffsetWithAdd(*memOpnd, dataSize)) {
4254 isOutOfRange = true;
4255 memOpnd = &SplitOffsetWithAddInstruction(*memOpnd, dataSize, regNum, isDest, &insn);
4256 } else {
4257 isOutOfRange = false;
4258 }
4259 return memOpnd;
4260 }
4261
FreeSpillRegMem(regno_t vrNum)4262 void AArch64CGFunc::FreeSpillRegMem(regno_t vrNum)
4263 {
4264 MemOperand *memOpnd = nullptr;
4265
4266 auto p = spillRegMemOperands.find(vrNum);
4267 if (p != spillRegMemOperands.end()) {
4268 memOpnd = p->second;
4269 }
4270
4271 if ((memOpnd == nullptr) && IsVRegNOForPseudoRegister(vrNum)) {
4272 auto pSecond = pRegSpillMemOperands.find(GetPseudoRegIdxFromVirtualRegNO(vrNum));
4273 if (pSecond != pRegSpillMemOperands.end()) {
4274 memOpnd = pSecond->second;
4275 }
4276 }
4277
4278 if (memOpnd == nullptr) {
4279 DEBUG_ASSERT(false, "free spillreg have no mem");
4280 return;
4281 }
4282
4283 uint32 size = memOpnd->GetSize();
4284 MapleUnorderedMap<uint32, SpillMemOperandSet *>::iterator iter;
4285 if ((iter = reuseSpillLocMem.find(size)) != reuseSpillLocMem.end()) {
4286 iter->second->Add(*memOpnd);
4287 } else {
4288 reuseSpillLocMem[size] = memPool->New<SpillMemOperandSet>(*GetFuncScopeAllocator());
4289 reuseSpillLocMem[size]->Add(*memOpnd);
4290 }
4291 }
4292
GetOrCreatSpillMem(regno_t vrNum,uint32 memSize)4293 MemOperand *AArch64CGFunc::GetOrCreatSpillMem(regno_t vrNum, uint32 memSize)
4294 {
4295 /* NOTES: must used in RA, not used in other place. */
4296 if (IsVRegNOForPseudoRegister(vrNum)) {
4297 auto p = pRegSpillMemOperands.find(GetPseudoRegIdxFromVirtualRegNO(vrNum));
4298 if (p != pRegSpillMemOperands.end()) {
4299 return p->second;
4300 }
4301 }
4302
4303 auto p = spillRegMemOperands.find(vrNum);
4304 if (p == spillRegMemOperands.end()) {
4305 if (vrNum >= vReg.VRegTableSize()) {
4306 CHECK_FATAL(false, "index out of range in AArch64CGFunc::FreeSpillRegMem");
4307 }
4308 uint32 memBitSize = (memSize <= k32BitSize) ? k32BitSize : (memSize <= k64BitSize) ? k64BitSize : k128BitSize;
4309 auto it = reuseSpillLocMem.find(memBitSize);
4310 if (it != reuseSpillLocMem.end()) {
4311 MemOperand *memOpnd = it->second->GetOne();
4312 if (memOpnd != nullptr) {
4313 (void)spillRegMemOperands.emplace(std::pair<regno_t, MemOperand *>(vrNum, memOpnd));
4314 return memOpnd;
4315 }
4316 }
4317
4318 RegOperand &baseOpnd = GetOrCreateStackBaseRegOperand();
4319 int64 offset = GetOrCreatSpillRegLocation(vrNum, memBitSize / kBitsPerByte);
4320 MemOperand *memOpnd = nullptr;
4321 OfstOperand *offsetOpnd = &CreateOfstOpnd(static_cast<uint64>(offset), k64BitSize);
4322 memOpnd = CreateMemOperand(MemOperand::kAddrModeBOi, memBitSize, baseOpnd, nullptr, offsetOpnd, nullptr);
4323 (void)spillRegMemOperands.emplace(std::pair<regno_t, MemOperand *>(vrNum, memOpnd));
4324 return memOpnd;
4325 } else {
4326 return p->second;
4327 }
4328 }
4329
GetPseudoRegisterSpillMemoryOperand(PregIdx i)4330 MemOperand *AArch64CGFunc::GetPseudoRegisterSpillMemoryOperand(PregIdx i)
4331 {
4332 MapleUnorderedMap<PregIdx, MemOperand *>::iterator p;
4333 if (GetCG()->GetOptimizeLevel() == CGOptions::kLevel0) {
4334 p = pRegSpillMemOperands.end();
4335 } else {
4336 p = pRegSpillMemOperands.find(i);
4337 }
4338 if (p != pRegSpillMemOperands.end()) {
4339 return p->second;
4340 }
4341 int64 offset = GetPseudoRegisterSpillLocation(i);
4342 MIRPreg *preg = GetFunction().GetPregTab()->PregFromPregIdx(i);
4343 uint32 bitLen = GetPrimTypeSize(preg->GetPrimType()) * kBitsPerByte;
4344 RegOperand &base = GetOrCreateFramePointerRegOperand();
4345
4346 OfstOperand &ofstOpnd = GetOrCreateOfstOpnd(static_cast<uint64>(offset), k32BitSize);
4347 MemOperand &memOpnd = GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, bitLen, &base, nullptr, &ofstOpnd, nullptr);
4348 if (IsImmediateOffsetOutOfRange(memOpnd, bitLen)) {
4349 MemOperand &newMemOpnd = SplitOffsetWithAddInstruction(memOpnd, bitLen);
4350 (void)pRegSpillMemOperands.emplace(std::pair<PregIdx, MemOperand *>(i, &newMemOpnd));
4351 return &newMemOpnd;
4352 }
4353 (void)pRegSpillMemOperands.emplace(std::pair<PregIdx, MemOperand *>(i, &memOpnd));
4354 return &memOpnd;
4355 }
4356
AppendCall(const MIRSymbol & sym,ListOperand & srcOpnds)4357 Insn &AArch64CGFunc::AppendCall(const MIRSymbol &sym, ListOperand &srcOpnds)
4358 {
4359 Insn *callInsn = nullptr;
4360 Operand &targetOpnd = GetOrCreateFuncNameOpnd(sym);
4361 callInsn = &GetInsnBuilder()->BuildInsn(MOP_xbl, targetOpnd, srcOpnds);
4362 GetCurBB()->AppendInsn(*callInsn);
4363 GetCurBB()->SetHasCall();
4364 return *callInsn;
4365 }
4366
4367 // output
4368 // add_with_overflow/ sub_with_overflow:
4369 // w1: parm1
4370 // w2: parm2
4371 // adds/subs w0, w1, w2
4372 // cset w3, vs
4373
4374 // mul_with_overflow:
4375 // w1: parm1
4376 // w2: parm2
4377 // smull x0, w0, w1
4378 // cmp x0, w0, sxtw
4379 // cset w4, ne
SelectOverFlowCall(const IntrinsiccallNode & intrnNode)4380 void AArch64CGFunc::SelectOverFlowCall(const IntrinsiccallNode &intrnNode)
4381 {
4382 DEBUG_ASSERT(intrnNode.NumOpnds() == 2, "must be 2 operands"); // must be 2 operands
4383 MIRIntrinsicID intrinsic = intrnNode.GetIntrinsic();
4384 PrimType type = intrnNode.Opnd(0)->GetPrimType();
4385 PrimType type2 = intrnNode.Opnd(1)->GetPrimType();
4386 CHECK_FATAL(type == PTY_i32 || type == PTY_u32, "only support i32 or u32 here");
4387 CHECK_FATAL(type2 == PTY_i32 || type2 == PTY_u32, "only support i32 or u32 here");
4388 // deal with parms
4389 RegOperand &opnd0 = LoadIntoRegister(*HandleExpr(intrnNode, *intrnNode.Opnd(0)),
4390 intrnNode.Opnd(0)->GetPrimType()); /* first argument of intrinsic */
4391 RegOperand &opnd1 = LoadIntoRegister(*HandleExpr(intrnNode, *intrnNode.Opnd(1)),
4392 intrnNode.Opnd(1)->GetPrimType()); /* first argument of intrinsic */
4393 auto *retVals = &intrnNode.GetReturnVec();
4394 CHECK_FATAL(retVals->size() == k2ByteSize, "there must be two return values");
4395 PregIdx pregIdx = (*retVals)[0].second.GetPregIdx();
4396 PregIdx pregIdx2 = (*retVals)[1].second.GetPregIdx();
4397 RegOperand &resReg = GetOrCreateVirtualRegisterOperand(GetVirtualRegNOFromPseudoRegIdx(pregIdx));
4398 RegOperand &resReg2 = GetOrCreateVirtualRegisterOperand(GetVirtualRegNOFromPseudoRegIdx(pregIdx2));
4399 Operand &rflag = GetOrCreateRflag();
4400 // arith operation with set flag
4401 if (intrinsic == INTRN_ADD_WITH_OVERFLOW) {
4402 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_waddsrrr, rflag, resReg, opnd0, opnd1));
4403 SelectAArch64CSet(resReg2, GetCondOperand(CC_VS), false);
4404 } else if (intrinsic == INTRN_SUB_WITH_OVERFLOW) {
4405 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wsubsrrr, rflag, resReg, opnd0, opnd1));
4406 SelectAArch64CSet(resReg2, GetCondOperand(CC_VS), false);
4407 } else if (intrinsic == INTRN_MUL_WITH_OVERFLOW) {
4408 // smull
4409 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xsmullrrr, resReg, opnd0, opnd1));
4410 Operand &sxtw = CreateExtendShiftOperand(ExtendShiftOperand::kSXTW, 0, k3BitSize);
4411 Insn &cmpInsn = GetInsnBuilder()->BuildInsn(MOP_xwcmprre, rflag, resReg, resReg, sxtw);
4412 GetCurBB()->AppendInsn(cmpInsn);
4413 SelectAArch64CSet(resReg2, GetCondOperand(CC_NE), false);
4414 } else {
4415 CHECK_FATAL(false, "niy");
4416 }
4417 }
4418
LoadOpndIntoPhysicalRegister(const IntrinsiccallNode & intrnNode,uint32 index)4419 RegOperand &AArch64CGFunc::LoadOpndIntoPhysicalRegister(const IntrinsiccallNode &intrnNode, uint32 index)
4420 {
4421 auto &opnd = *intrnNode.Opnd(index);
4422 auto ptyp = opnd.GetPrimType();
4423 RegOperand &opndReg = LoadIntoRegister(*HandleExpr(intrnNode, opnd), ptyp);
4424 AArch64reg regId;
4425 switch (index - 1) {
4426 case kFirstReg:
4427 regId = static_cast<AArch64reg>(R0);
4428 break;
4429 case kSecondReg:
4430 regId = static_cast<AArch64reg>(R1);
4431 break;
4432 case kThirdReg:
4433 regId = static_cast<AArch64reg>(R2);
4434 break;
4435 case kFourthReg:
4436 regId = static_cast<AArch64reg>(R3);
4437 break;
4438 case kFifthReg:
4439 regId = static_cast<AArch64reg>(R4);
4440 break;
4441 case kSixthReg:
4442 regId = static_cast<AArch64reg>(R5);
4443 break;
4444 default:
4445 CHECK_FATAL_FALSE("Unreachable!");
4446 }
4447 RegOperand &realReg = GetOrCreatePhysicalRegisterOperand(regId, opndReg.GetSize(), GetRegTyFromPrimTy(ptyp));
4448 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickMovBetweenRegs(ptyp, ptyp), realReg, opndReg));
4449 return realReg;
4450 }
4451
4452
SelectPureCall(const IntrinsiccallNode & intrnNode)4453 void AArch64CGFunc::SelectPureCall(const IntrinsiccallNode &intrnNode)
4454 {
4455 DEBUG_ASSERT(intrnNode.NumOpnds() == 6, "must be 6 operands"); // must be 6 operands
4456 // deal with parms
4457 ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator());
4458 auto &callee = *intrnNode.Opnd(0);
4459 auto ptyp = callee.GetPrimType();
4460 RegOperand &calleeReg = LoadIntoRegister(*HandleExpr(intrnNode, callee), ptyp);
4461 uint32 i = 1;
4462 for (; i < kSeventhReg; i++) {
4463 srcOpnds->PushOpnd(LoadOpndIntoPhysicalRegister(intrnNode, i));
4464 }
4465 // R15 is used in asm call
4466 srcOpnds->PushOpnd(GetOrCreatePhysicalRegisterOperand(static_cast<AArch64reg>(R15),
4467 GetPointerSize() * kBitsPerByte, kRegTyInt));
4468 Insn &callInsn = GetInsnBuilder()->BuildInsn(MOP_pure_call, calleeReg, *srcOpnds);
4469 GetCurBB()->AppendInsn(callInsn);
4470 }
4471
SelectIntrinsicCall(IntrinsiccallNode & intrinsiccallNode)4472 void AArch64CGFunc::SelectIntrinsicCall(IntrinsiccallNode &intrinsiccallNode)
4473 {
4474 MIRIntrinsicID intrinsic = intrinsiccallNode.GetIntrinsic();
4475
4476 if (GetCG()->GenerateVerboseCG()) {
4477 std::string comment = GetIntrinsicName(intrinsic);
4478 GetCurBB()->AppendInsn(CreateCommentInsn(comment));
4479 }
4480 if (intrinsic == INTRN_ADD_WITH_OVERFLOW || intrinsic == INTRN_SUB_WITH_OVERFLOW ||
4481 intrinsic == INTRN_MUL_WITH_OVERFLOW) {
4482 SelectOverFlowCall(intrinsiccallNode);
4483 return;
4484 }
4485 if (intrinsic == maple::INTRN_JS_PURE_CALL) {
4486 SelectPureCall(intrinsiccallNode);
4487 return;
4488 }
4489 }
4490
SelectDeoptCall(CallNode & callNode)4491 void AArch64CGFunc::SelectDeoptCall(CallNode &callNode)
4492 {
4493 MIRFunction *fn = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callNode.GetPUIdx());
4494 MIRSymbol *fsym = GetFunction().GetLocalOrGlobalSymbol(fn->GetStIdx(), false);
4495 if (GetCG()->GenerateVerboseCG()) {
4496 const std::string &comment = fsym->GetName();
4497 GetCurBB()->AppendInsn(CreateCommentInsn(comment));
4498 }
4499 ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator());
4500 SelectParmListWrapper(callNode, *srcOpnds, false);
4501
4502 Insn &callInsn = AppendCall(*fsym, *srcOpnds);
4503 const auto &deoptBundleInfo = callNode.GetDeoptBundleInfo();
4504 for (const auto &elem : deoptBundleInfo) {
4505 auto valueKind = elem.second.GetMapleValueKind();
4506 if (valueKind == MapleValue::kPregKind) {
4507 auto *opnd = GetOrCreateRegOpndFromPregIdx(elem.second.GetPregIdx(), PTY_ref);
4508 callInsn.AddDeoptBundleInfo(elem.first, *opnd);
4509 } else if (valueKind == MapleValue::kConstKind) {
4510 auto *opnd = SelectIntConst(static_cast<const MIRIntConst &>(elem.second.GetConstValue()), callNode);
4511 callInsn.AddDeoptBundleInfo(elem.first, *opnd);
4512 } else {
4513 CHECK_FATAL(false, "not supported currently");
4514 }
4515 }
4516 AppendStackMapInsn(callInsn);
4517 GetFunction().SetHasCall();
4518 }
4519
SelectTailICall(IcallNode & icallNode)4520 void AArch64CGFunc::SelectTailICall(IcallNode &icallNode)
4521 {
4522 ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator());
4523 SelectParmListWrapper(icallNode, *srcOpnds, false);
4524
4525 Operand *srcOpnd = HandleExpr(icallNode, *icallNode.GetNopndAt(0));
4526 Operand *fptrOpnd = srcOpnd;
4527 if (fptrOpnd->GetKind() != Operand::kOpdRegister) {
4528 PrimType ty = icallNode.Opnd(0)->GetPrimType();
4529 fptrOpnd = &SelectCopy(*srcOpnd, ty, ty);
4530 }
4531 DEBUG_ASSERT(fptrOpnd->IsRegister(), "SelectIcall: function pointer not RegOperand");
4532 RegOperand *regOpnd = static_cast<RegOperand *>(fptrOpnd);
4533 Insn &callInsn = GetInsnBuilder()->BuildInsn(MOP_tail_call_opt_xblr, *regOpnd, *srcOpnds);
4534 GetCurBB()->AppendInsn(callInsn);
4535 }
4536
SelectCclz(IntrinsicopNode & intrnNode)4537 Operand *AArch64CGFunc::SelectCclz(IntrinsicopNode &intrnNode)
4538 {
4539 BaseNode *argexpr = intrnNode.Opnd(0);
4540 PrimType ptype = argexpr->GetPrimType();
4541 Operand *opnd = HandleExpr(intrnNode, *argexpr);
4542 MOperator mop;
4543
4544 RegOperand &ldDest = CreateRegisterOperandOfType(ptype);
4545 if (opnd->IsMemoryAccessOperand()) {
4546 Insn &insn = GetInsnBuilder()->BuildInsn(PickLdInsn(GetPrimTypeBitSize(ptype), ptype), ldDest, *opnd);
4547 GetCurBB()->AppendInsn(insn);
4548 opnd = &ldDest;
4549 } else if (opnd->IsImmediate()) {
4550 SelectCopyImm(ldDest, *static_cast<ImmOperand *>(opnd), ptype);
4551 opnd = &ldDest;
4552 }
4553
4554 if (GetPrimTypeSize(ptype) == k4ByteSize) {
4555 mop = MOP_wclz;
4556 } else {
4557 mop = MOP_xclz;
4558 }
4559 RegOperand &dst = CreateRegisterOperandOfType(ptype);
4560 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mop, dst, *opnd));
4561 return &dst;
4562 }
4563
SelectHeapConstant(IntrinsicopNode & node,Operand & opnd0,Operand & opnd1)4564 RegOperand *AArch64CGFunc::SelectHeapConstant(IntrinsicopNode &node, Operand &opnd0, Operand &opnd1)
4565 {
4566 PrimType retType = node.GetPrimType();
4567 RegOperand &destReg = CreateRegisterOperandOfType(retType);
4568 MOperator mOp = MOP_heap_const;
4569 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, destReg, opnd0, opnd1));
4570 return &destReg;
4571 }
4572
SelectTaggedIsHeapObject(IntrinsicopNode & node,Operand & opnd0,Operand & opnd1)4573 RegOperand *AArch64CGFunc::SelectTaggedIsHeapObject(IntrinsicopNode &node, Operand &opnd0, Operand &opnd1)
4574 {
4575 RegOperand &destReg = CreateRegisterOperandOfType(PTY_i64);
4576 RegOperand &tmpReg = CreateRegisterOperandOfType(PTY_i64);
4577 MOperator mOp = MOP_tagged_is_heapobject;
4578 if (opnd0.IsImmediate()) {
4579 uint64 value = static_cast<uint64>(static_cast<ImmOperand &>(opnd0).GetValue());
4580 uint64 heapObjectMask = static_cast<uint64_t>(static_cast<ImmOperand&>(opnd1).GetValue());
4581 if (!static_cast<int64>(value & heapObjectMask)) {
4582 ImmOperand &value = CreateImmOperand(1, k64BitSize, true);
4583 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xmovri64, destReg, value));
4584 } else {
4585 ImmOperand &value = CreateImmOperand(0, k64BitSize, true);
4586 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xmovri64, destReg, value));
4587 }
4588 } else {
4589 ImmOperand &heapObjectTagMask = static_cast<ImmOperand&>(opnd1);
4590 CHECK_FATAL(static_cast<uint64_t>(heapObjectTagMask.GetValue()) == 0xFFFF000000000006,
4591 "unexpected heap object tag mask");
4592 ImmOperand &immValue6 = CreateImmOperand(6, k16BitSize, false);
4593 Insn &movInsn1 = GetInsnBuilder()->BuildInsn(MOP_xmovri64, tmpReg, immValue6);
4594 GetCurBB()->AppendInsn(movInsn1);
4595 ImmOperand &immValue = CreateImmOperand(65535, k16BitSize, false); // 65535: 0xFFFF, top 16 bits
4596 BitShiftOperand *lslOpnd = GetLogicalShiftLeftOperand(48, true); // 48: left shift 48 bits
4597 Insn &movInsn2 = GetInsnBuilder()->BuildInsn(MOP_xmovkri16, tmpReg, immValue, *lslOpnd);
4598 GetCurBB()->AppendInsn(movInsn2);
4599 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, destReg, opnd0, tmpReg));
4600 }
4601 return &destReg;
4602 }
4603
SelectIsStableElements(IntrinsicopNode & node,Operand & opnd0,Operand & opnd1,Operand & opnd2)4604 RegOperand *AArch64CGFunc::SelectIsStableElements(IntrinsicopNode &node, Operand &opnd0,
4605 Operand &opnd1, Operand &opnd2)
4606 {
4607 RegOperand &destReg = CreateRegisterOperandOfType(PTY_i32);
4608 MOperator mOp = MOP_is_stable_elements;
4609 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, destReg, opnd0, opnd1, opnd2));
4610 return &destReg;
4611 }
4612
SelectHasPendingException(IntrinsicopNode & node,Operand & opnd0,Operand & opnd1,Operand & opnd2)4613 RegOperand *AArch64CGFunc::SelectHasPendingException(IntrinsicopNode &node, Operand &opnd0,
4614 Operand &opnd1, Operand &opnd2)
4615 {
4616 RegOperand &destReg = CreateRegisterOperandOfType(PTY_i64);
4617 MOperator mOp = MOP_has_pending_exception;
4618 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, destReg, opnd0, opnd1, opnd2));
4619 return &destReg;
4620 }
4621
SelectGetHeapConstantTable(IntrinsicopNode & node,Operand & opnd0,Operand & opnd1,Operand & opnd2)4622 RegOperand *AArch64CGFunc::SelectGetHeapConstantTable(IntrinsicopNode &node,
4623 Operand &opnd0, Operand &opnd1, Operand &opnd2)
4624 {
4625 PrimType retType = node.GetPrimType();
4626 RegOperand &destReg = CreateRegisterOperandOfType(retType);
4627 MOperator mOp = MOP_get_heap_const_table;
4628 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, destReg, opnd0, opnd1, opnd2));
4629 return &destReg;
4630 }
4631
SelectTaggedObjectIsString(IntrinsicopNode & node,Operand & opnd0,Operand & opnd1,Operand & opnd2,Operand & opnd3,Operand & opnd4)4632 RegOperand *AArch64CGFunc::SelectTaggedObjectIsString(IntrinsicopNode &node, Operand &opnd0, Operand &opnd1,
4633 Operand &opnd2, Operand &opnd3, Operand &opnd4)
4634 {
4635 PrimType retType = node.GetPrimType();
4636 RegOperand &destReg = CreateRegisterOperandOfType(retType);
4637 MOperator mOp = MOP_tagged_object_is_string;
4638 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, destReg, opnd0, opnd1, opnd2, opnd3, opnd4));
4639 return &destReg;
4640 }
4641
SelectIsCOWArray(IntrinsicopNode & node,Operand & opnd0,Operand & opnd1,Operand & opnd2,Operand & opnd3,Operand & opnd4,Operand & opnd5)4642 RegOperand *AArch64CGFunc::SelectIsCOWArray(IntrinsicopNode &node, Operand &opnd0, Operand &opnd1,
4643 Operand &opnd2, Operand &opnd3, Operand &opnd4, Operand &opnd5)
4644 {
4645 PrimType retType = node.GetPrimType();
4646 RegOperand &destReg = CreateRegisterOperandOfType(retType);
4647 MOperator mOp = MOP_is_cow_array;
4648 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, destReg, opnd0, opnd1, opnd2, opnd3, opnd4, opnd5));
4649 return &destReg;
4650 }
4651
GetRegisterType(regno_t reg) const4652 RegType AArch64CGFunc::GetRegisterType(regno_t reg) const
4653 {
4654 if (AArch64isa::IsPhysicalRegister(reg)) {
4655 return AArch64isa::GetRegType(static_cast<AArch64reg>(reg));
4656 } else if (reg == kRFLAG) {
4657 return kRegTyCc;
4658 } else {
4659 return CGFunc::GetRegisterType(reg);
4660 }
4661 }
4662
LoadStructCopyBase(const MIRSymbol & symbol,int64 offset,int dataSize)4663 MemOperand &AArch64CGFunc::LoadStructCopyBase(const MIRSymbol &symbol, int64 offset, int dataSize)
4664 {
4665 /* For struct formals > 16 bytes, this is the pointer to the struct copy. */
4666 /* Load the base pointer first. */
4667 RegOperand *vreg = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize));
4668 MemOperand *baseMemOpnd = &GetOrCreateMemOpnd(symbol, 0, k64BitSize);
4669 GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickLdInsn(k64BitSize, PTY_i64), *vreg, *baseMemOpnd));
4670 /* Create the indirect load mem opnd from the base pointer. */
4671 return CreateMemOpnd(*vreg, offset, static_cast<uint32>(dataSize));
4672 }
4673
4674 /* For long branch, insert an unconditional branch.
4675 * From To
4676 * cond_br targe_label reverse_cond_br fallthru_label
4677 * fallthruBB unconditional br target_label
4678 * fallthru_label:
4679 * fallthruBB
4680 */
InsertJumpPad(Insn * insn)4681 void AArch64CGFunc::InsertJumpPad(Insn *insn)
4682 {
4683 BB *bb = insn->GetBB();
4684 DEBUG_ASSERT(bb, "instruction has no bb");
4685 DEBUG_ASSERT(bb->GetKind() == BB::kBBIf || bb->GetKind() == BB::kBBGoto,
4686 "instruction is in neither if bb nor goto bb");
4687 if (bb->GetKind() == BB::kBBGoto) {
4688 return;
4689 }
4690 DEBUG_ASSERT(bb->NumSuccs() == k2ByteSize, "if bb should have 2 successors");
4691
4692 BB *longBrBB = CreateNewBB();
4693
4694 BB *fallthruBB = bb->GetNext();
4695 LabelIdx fallthruLBL = fallthruBB->GetLabIdx();
4696 if (fallthruLBL == 0) {
4697 fallthruLBL = CreateLabel();
4698 SetLab2BBMap(static_cast<int32>(fallthruLBL), *fallthruBB);
4699 fallthruBB->AddLabel(fallthruLBL);
4700 }
4701
4702 BB *targetBB;
4703 if (bb->GetSuccs().front() == fallthruBB) {
4704 targetBB = bb->GetSuccs().back();
4705 } else {
4706 targetBB = bb->GetSuccs().front();
4707 }
4708 LabelIdx targetLBL = targetBB->GetLabIdx();
4709 if (targetLBL == 0) {
4710 targetLBL = CreateLabel();
4711 SetLab2BBMap(static_cast<int32>(targetLBL), *targetBB);
4712 targetBB->AddLabel(targetLBL);
4713 }
4714
4715 // Adjustment on br and CFG
4716 bb->RemoveSuccs(*targetBB);
4717 bb->PushBackSuccs(*longBrBB);
4718 bb->SetNext(longBrBB);
4719 // reverse cond br targeting fallthruBB
4720 uint32 targetIdx = AArch64isa::GetJumpTargetIdx(*insn);
4721 MOperator mOp = AArch64isa::FlipConditionOp(insn->GetMachineOpcode());
4722 insn->SetMOP(AArch64CG::kMd[mOp]);
4723 LabelOperand &fallthruBBLBLOpnd = GetOrCreateLabelOperand(fallthruLBL);
4724 insn->SetOperand(targetIdx, fallthruBBLBLOpnd);
4725
4726 longBrBB->PushBackPreds(*bb);
4727 longBrBB->PushBackSuccs(*targetBB);
4728 LabelOperand &targetLBLOpnd = GetOrCreateLabelOperand(targetLBL);
4729 longBrBB->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xuncond, targetLBLOpnd));
4730 longBrBB->SetPrev(bb);
4731 longBrBB->SetNext(fallthruBB);
4732 longBrBB->SetKind(BB::kBBGoto);
4733
4734 fallthruBB->SetPrev(longBrBB);
4735
4736 targetBB->RemovePreds(*bb);
4737 targetBB->PushBackPreds(*longBrBB);
4738 }
4739
4740 /* Check the distance between the first insn of BB with the lable(targ_labidx)
4741 * and the insn with targ_id. If the distance greater than maxDistance
4742 * return false.
4743 */
DistanceCheck(const BB & bb,LabelIdx targLabIdx,uint32 targId,uint32 maxDistance) const4744 bool AArch64CGFunc::DistanceCheck(const BB &bb, LabelIdx targLabIdx, uint32 targId, uint32 maxDistance) const
4745 {
4746 for (auto *tBB : bb.GetSuccs()) {
4747 if (tBB->GetLabIdx() != targLabIdx) {
4748 continue;
4749 }
4750 Insn *tInsn = tBB->GetFirstInsn();
4751 while (tInsn == nullptr || !tInsn->IsMachineInstruction()) {
4752 if (tInsn == nullptr) {
4753 tBB = tBB->GetNext();
4754 if (tBB == nullptr) { /* tailcallopt may make the target block empty */
4755 return true;
4756 }
4757 tInsn = tBB->GetFirstInsn();
4758 } else {
4759 tInsn = tInsn->GetNext();
4760 }
4761 }
4762 uint32 tmp = (tInsn->GetId() > targId) ? (tInsn->GetId() - targId) : (targId - tInsn->GetId());
4763 return (tmp < maxDistance);
4764 }
4765 CHECK_FATAL(false, "CFG error");
4766 }
4767 } /* namespace maplebe */
4768