1 /*
2 * Copyright (c) 2023 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "x64_standardize.h"
17 #include "x64_cg.h"
18
19 namespace maplebe {
20 #define DEFINE_MAPPING(ABSTRACT_IR, X64_MOP, ...) {ABSTRACT_IR, X64_MOP},
21 std::unordered_map<MOperator, X64MOP_t> x64AbstractMapping = {
22 #include "x64_abstract_mapping.def"
23 };
24
GetMopFromAbstraceIRMop(MOperator mOp)25 static inline X64MOP_t GetMopFromAbstraceIRMop(MOperator mOp)
26 {
27 auto iter = x64AbstractMapping.find(mOp);
28 if (iter == x64AbstractMapping.end()) {
29 CHECK_FATAL(false, "NIY mapping");
30 }
31 CHECK_FATAL(iter->second != x64::MOP_begin, "NIY mapping");
32 return iter->second;
33 }
34
StdzMov(maplebe::Insn & insn)35 void X64Standardize::StdzMov(maplebe::Insn &insn)
36 {
37 X64MOP_t directlyMappingMop = GetMopFromAbstraceIRMop(insn.GetMachineOpcode());
38 insn.SetMOP(X64CG::kMd[directlyMappingMop]);
39 insn.CommuteOperands(kInsnFirstOpnd, kInsnSecondOpnd);
40 }
41
StdzStrLdr(Insn & insn)42 void X64Standardize::StdzStrLdr(Insn &insn)
43 {
44 StdzMov(insn);
45 }
46
StdzBasicOp(Insn & insn)47 void X64Standardize::StdzBasicOp(Insn &insn)
48 {
49 X64MOP_t directlyMappingMop = GetMopFromAbstraceIRMop(insn.GetMachineOpcode());
50 insn.SetMOP(X64CG::kMd[directlyMappingMop]);
51 Operand &dest = insn.GetOperand(kInsnFirstOpnd);
52 Operand &src2 = insn.GetOperand(kInsnThirdOpnd);
53 insn.CleanAllOperand();
54 insn.AddOpndChain(src2).AddOpndChain(dest);
55 }
56
StdzUnaryOp(Insn & insn,CGFunc & cgFunc)57 void X64Standardize::StdzUnaryOp(Insn &insn, CGFunc &cgFunc)
58 {
59 MOperator mOp = insn.GetMachineOpcode();
60 if (mOp == abstract::MOP_neg_f_32 || mOp == abstract::MOP_neg_f_64) {
61 StdzFloatingNeg(insn, cgFunc);
62 return;
63 }
64 X64MOP_t directlyMappingMop = GetMopFromAbstraceIRMop(insn.GetMachineOpcode());
65 insn.SetMOP(X64CG::kMd[directlyMappingMop]);
66 Operand &dest = insn.GetOperand(kInsnFirstOpnd);
67 insn.CleanAllOperand();
68 insn.AddOpndChain(dest);
69 }
70
StdzCvtOp(Insn & insn,CGFunc & cgFunc)71 void X64Standardize::StdzCvtOp(Insn &insn, CGFunc &cgFunc)
72 {
73 uint32 OpndDesSize = insn.GetDesc()->GetOpndDes(kInsnFirstOpnd)->GetSize();
74 uint32 destSize = OpndDesSize;
75 uint32 OpndSrcSize = insn.GetDesc()->GetOpndDes(kInsnSecondOpnd)->GetSize();
76 uint32 srcSize = OpndSrcSize;
77 switch (insn.GetMachineOpcode()) {
78 case abstract::MOP_zext_rr_64_8:
79 case abstract::MOP_zext_rr_64_16:
80 case abstract::MOP_zext_rr_64_32:
81 destSize = k32BitSize;
82 break;
83 case abstract::MOP_cvt_f32_u32:
84 srcSize = k64BitSize;
85 break;
86 case abstract::MOP_cvt_u32_f32:
87 destSize = k64BitSize;
88 break;
89 default:
90 break;
91 }
92 MOperator directlyMappingMop = GetMopFromAbstraceIRMop(insn.GetMachineOpcode());
93 if (directlyMappingMop != abstract::MOP_undef) {
94 insn.SetMOP(X64CG::kMd[directlyMappingMop]);
95 Operand *opnd0 = &insn.GetOperand(kInsnSecondOpnd);
96 RegOperand *src = static_cast<RegOperand *>(opnd0);
97 if (srcSize != OpndSrcSize) {
98 src = &cgFunc.GetOpndBuilder()->CreateVReg(src->GetRegisterNumber(), srcSize, src->GetRegisterType());
99 }
100 Operand *opnd1 = &insn.GetOperand(kInsnFirstOpnd);
101 RegOperand *dest = static_cast<RegOperand *>(opnd1);
102 if (destSize != OpndDesSize) {
103 dest = &cgFunc.GetOpndBuilder()->CreateVReg(dest->GetRegisterNumber(), destSize, dest->GetRegisterType());
104 }
105 insn.CleanAllOperand();
106 insn.AddOpndChain(*src).AddOpndChain(*dest);
107 } else {
108 CHECK_FATAL(false, "NIY mapping");
109 }
110 }
111
112 /* x86 does not have floating point neg instruction
113 * neg_f operand0 operand1
114 * ==>
115 * movd xmm0 R1
116 * 64: movabsq 0x8000000000000000 R2
117 * xorq R2 R1
118 * 32: xorl 0x80000000 R1
119 * movd R1 xmm0
120 */
StdzFloatingNeg(Insn & insn,CGFunc & cgFunc)121 void X64Standardize::StdzFloatingNeg(Insn &insn, CGFunc &cgFunc)
122 {
123 MOperator mOp = insn.GetMachineOpcode();
124 uint32 bitSize = mOp == abstract::MOP_neg_f_32 ? k32BitSize : k64BitSize;
125
126 // mov dest -> tmpOperand0
127 MOperator movOp = mOp == abstract::MOP_neg_f_32 ? x64::MOP_movd_fr_r : x64::MOP_movq_fr_r;
128 RegOperand *tmpOperand0 = &cgFunc.GetOpndBuilder()->CreateVReg(bitSize, kRegTyInt);
129 Insn &movInsn0 = cgFunc.GetInsnBuilder()->BuildInsn(movOp, X64CG::kMd[movOp]);
130 Operand &dest = insn.GetOperand(kInsnFirstOpnd);
131 movInsn0.AddOpndChain(dest).AddOpndChain(*tmpOperand0);
132 insn.GetBB()->InsertInsnBefore(insn, movInsn0);
133
134 // 32 : xorl 0x80000000 tmpOperand0
135 // 64 : movabs 0x8000000000000000 tmpOperand1
136 // xorq tmpOperand1 tmpOperand0
137 ImmOperand &imm = cgFunc.GetOpndBuilder()->CreateImm(bitSize, (static_cast<int64>(1) << (bitSize - 1)));
138 if (mOp == abstract::MOP_neg_f_64) {
139 Operand *tmpOperand1 = &cgFunc.GetOpndBuilder()->CreateVReg(k64BitSize, kRegTyInt);
140 Insn &movabs = cgFunc.GetInsnBuilder()->BuildInsn(x64::MOP_movabs_i_r, X64CG::kMd[x64::MOP_movabs_i_r]);
141 movabs.AddOpndChain(imm).AddOpndChain(*tmpOperand1);
142 insn.GetBB()->InsertInsnBefore(insn, movabs);
143
144 MOperator xorOp = x64::MOP_xorq_r_r;
145 Insn &xorq = cgFunc.GetInsnBuilder()->BuildInsn(xorOp, X64CG::kMd[xorOp]);
146 xorq.AddOpndChain(*tmpOperand1).AddOpndChain(*tmpOperand0);
147 insn.GetBB()->InsertInsnBefore(insn, xorq);
148 } else {
149 MOperator xorOp = x64::MOP_xorl_i_r;
150 Insn &xorq = cgFunc.GetInsnBuilder()->BuildInsn(xorOp, X64CG::kMd[xorOp]);
151 xorq.AddOpndChain(imm).AddOpndChain(*tmpOperand0);
152 insn.GetBB()->InsertInsnBefore(insn, xorq);
153 }
154
155 // mov tmpOperand0 -> dest
156 Insn &movq = cgFunc.GetInsnBuilder()->BuildInsn(movOp, X64CG::kMd[movOp]);
157 movq.AddOpndChain(*tmpOperand0).AddOpndChain(dest);
158 insn.GetBB()->InsertInsnBefore(insn, movq);
159
160 insn.GetBB()->RemoveInsn(insn);
161 return;
162 }
163
StdzShiftOp(Insn & insn,CGFunc & cgFunc)164 void X64Standardize::StdzShiftOp(Insn &insn, CGFunc &cgFunc)
165 {
166 RegOperand *countOpnd = static_cast<RegOperand *>(&insn.GetOperand(kInsnThirdOpnd));
167 /* count operand cvt -> PTY_u8 */
168 if (countOpnd->GetSize() != GetPrimTypeBitSize(PTY_u8)) {
169 countOpnd = &cgFunc.GetOpndBuilder()->CreateVReg(countOpnd->GetRegisterNumber(), GetPrimTypeBitSize(PTY_u8),
170 countOpnd->GetRegisterType());
171 }
172 /* copy count operand to cl(rcx) register */
173 RegOperand &clOpnd = cgFunc.GetOpndBuilder()->CreatePReg(x64::RCX, GetPrimTypeBitSize(PTY_u8), kRegTyInt);
174 X64MOP_t copyMop = x64::MOP_movb_r_r;
175 Insn ©Insn = cgFunc.GetInsnBuilder()->BuildInsn(copyMop, X64CG::kMd[copyMop]);
176 copyInsn.AddOpndChain(*countOpnd).AddOpndChain(clOpnd);
177 insn.GetBB()->InsertInsnBefore(insn, copyInsn);
178 /* shift OP */
179 X64MOP_t directlyMappingMop = GetMopFromAbstraceIRMop(insn.GetMachineOpcode());
180 insn.SetMOP(X64CG::kMd[directlyMappingMop]);
181 RegOperand &destOpnd = static_cast<RegOperand &>(insn.GetOperand(kInsnFirstOpnd));
182 insn.CleanAllOperand();
183 insn.AddOpndChain(clOpnd).AddOpndChain(destOpnd);
184 }
185
186 } // namespace maplebe
187