1 /*
2 * Copyright (c) 2023 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "x64_standardize.h"
17 #include "x64_isa.h"
18 #include "x64_cg.h"
19 #include "insn.h"
20
21 namespace maplebe {
22 #define DEFINE_MAPPING(ABSTRACT_IR, X64_MOP, ...) {ABSTRACT_IR, X64_MOP},
23 std::unordered_map<MOperator, X64MOP_t> x64AbstractMapping = {
24 #include "x64_abstract_mapping.def"
25 };
26
GetMopFromAbstraceIRMop(MOperator mOp)27 static inline X64MOP_t GetMopFromAbstraceIRMop(MOperator mOp)
28 {
29 auto iter = x64AbstractMapping.find(mOp);
30 if (iter == x64AbstractMapping.end()) {
31 CHECK_FATAL(false, "NIY mapping");
32 }
33 CHECK_FATAL(iter->second != x64::MOP_begin, "NIY mapping");
34 return iter->second;
35 }
36
StdzMov(maplebe::Insn & insn)37 void X64Standardize::StdzMov(maplebe::Insn &insn)
38 {
39 X64MOP_t directlyMappingMop = GetMopFromAbstraceIRMop(insn.GetMachineOpcode());
40 insn.SetMOP(X64CG::kMd[directlyMappingMop]);
41 insn.CommuteOperands(kInsnFirstOpnd, kInsnSecondOpnd);
42 }
43
StdzStrLdr(Insn & insn)44 void X64Standardize::StdzStrLdr(Insn &insn)
45 {
46 StdzMov(insn);
47 }
48
StdzBasicOp(Insn & insn)49 void X64Standardize::StdzBasicOp(Insn &insn)
50 {
51 X64MOP_t directlyMappingMop = GetMopFromAbstraceIRMop(insn.GetMachineOpcode());
52 insn.SetMOP(X64CG::kMd[directlyMappingMop]);
53 Operand &dest = insn.GetOperand(kInsnFirstOpnd);
54 Operand &src2 = insn.GetOperand(kInsnThirdOpnd);
55 insn.CleanAllOperand();
56 insn.AddOpndChain(src2).AddOpndChain(dest);
57 }
58
StdzUnaryOp(Insn & insn,CGFunc & cgFunc)59 void X64Standardize::StdzUnaryOp(Insn &insn, CGFunc &cgFunc)
60 {
61 MOperator mOp = insn.GetMachineOpcode();
62 if (mOp == abstract::MOP_neg_f_32 || mOp == abstract::MOP_neg_f_64) {
63 StdzFloatingNeg(insn, cgFunc);
64 return;
65 }
66 X64MOP_t directlyMappingMop = GetMopFromAbstraceIRMop(insn.GetMachineOpcode());
67 insn.SetMOP(X64CG::kMd[directlyMappingMop]);
68 Operand &dest = insn.GetOperand(kInsnFirstOpnd);
69 insn.CleanAllOperand();
70 insn.AddOpndChain(dest);
71 }
72
StdzCvtOp(Insn & insn,CGFunc & cgFunc)73 void X64Standardize::StdzCvtOp(Insn &insn, CGFunc &cgFunc)
74 {
75 uint32 OpndDesSize = insn.GetDesc()->GetOpndDes(kInsnFirstOpnd)->GetSize();
76 uint32 destSize = OpndDesSize;
77 uint32 OpndSrcSize = insn.GetDesc()->GetOpndDes(kInsnSecondOpnd)->GetSize();
78 uint32 srcSize = OpndSrcSize;
79 switch (insn.GetMachineOpcode()) {
80 case abstract::MOP_zext_rr_64_8:
81 case abstract::MOP_zext_rr_64_16:
82 case abstract::MOP_zext_rr_64_32:
83 destSize = k32BitSize;
84 break;
85 case abstract::MOP_cvt_f32_u32:
86 srcSize = k64BitSize;
87 break;
88 case abstract::MOP_cvt_u32_f32:
89 destSize = k64BitSize;
90 break;
91 default:
92 break;
93 }
94 MOperator directlyMappingMop = GetMopFromAbstraceIRMop(insn.GetMachineOpcode());
95 if (directlyMappingMop != abstract::MOP_undef) {
96 insn.SetMOP(X64CG::kMd[directlyMappingMop]);
97 Operand *opnd0 = &insn.GetOperand(kInsnSecondOpnd);
98 RegOperand *src = static_cast<RegOperand *>(opnd0);
99 if (srcSize != OpndSrcSize) {
100 src = &cgFunc.GetOpndBuilder()->CreateVReg(src->GetRegisterNumber(), srcSize, src->GetRegisterType());
101 }
102 Operand *opnd1 = &insn.GetOperand(kInsnFirstOpnd);
103 RegOperand *dest = static_cast<RegOperand *>(opnd1);
104 if (destSize != OpndDesSize) {
105 dest = &cgFunc.GetOpndBuilder()->CreateVReg(dest->GetRegisterNumber(), destSize, dest->GetRegisterType());
106 }
107 insn.CleanAllOperand();
108 insn.AddOpndChain(*src).AddOpndChain(*dest);
109 } else {
110 CHECK_FATAL(false, "NIY mapping");
111 }
112 }
113
114 /* x86 does not have floating point neg instruction
115 * neg_f operand0 operand1
116 * ==>
117 * movd xmm0 R1
118 * 64: movabsq 0x8000000000000000 R2
119 * xorq R2 R1
120 * 32: xorl 0x80000000 R1
121 * movd R1 xmm0
122 */
StdzFloatingNeg(Insn & insn,CGFunc & cgFunc)123 void X64Standardize::StdzFloatingNeg(Insn &insn, CGFunc &cgFunc)
124 {
125 MOperator mOp = insn.GetMachineOpcode();
126 uint32 bitSize = mOp == abstract::MOP_neg_f_32 ? k32BitSize : k64BitSize;
127
128 // mov dest -> tmpOperand0
129 MOperator movOp = mOp == abstract::MOP_neg_f_32 ? x64::MOP_movd_fr_r : x64::MOP_movq_fr_r;
130 RegOperand *tmpOperand0 = &cgFunc.GetOpndBuilder()->CreateVReg(bitSize, kRegTyInt);
131 Insn &movInsn0 = cgFunc.GetInsnBuilder()->BuildInsn(movOp, X64CG::kMd[movOp]);
132 Operand &dest = insn.GetOperand(kInsnFirstOpnd);
133 movInsn0.AddOpndChain(dest).AddOpndChain(*tmpOperand0);
134 insn.GetBB()->InsertInsnBefore(insn, movInsn0);
135
136 // 32 : xorl 0x80000000 tmpOperand0
137 // 64 : movabs 0x8000000000000000 tmpOperand1
138 // xorq tmpOperand1 tmpOperand0
139 ImmOperand &imm = cgFunc.GetOpndBuilder()->CreateImm(bitSize, (static_cast<int64>(1) << (bitSize - 1)));
140 if (mOp == abstract::MOP_neg_f_64) {
141 Operand *tmpOperand1 = &cgFunc.GetOpndBuilder()->CreateVReg(k64BitSize, kRegTyInt);
142 Insn &movabs = cgFunc.GetInsnBuilder()->BuildInsn(x64::MOP_movabs_i_r, X64CG::kMd[x64::MOP_movabs_i_r]);
143 movabs.AddOpndChain(imm).AddOpndChain(*tmpOperand1);
144 insn.GetBB()->InsertInsnBefore(insn, movabs);
145
146 MOperator xorOp = x64::MOP_xorq_r_r;
147 Insn &xorq = cgFunc.GetInsnBuilder()->BuildInsn(xorOp, X64CG::kMd[xorOp]);
148 xorq.AddOpndChain(*tmpOperand1).AddOpndChain(*tmpOperand0);
149 insn.GetBB()->InsertInsnBefore(insn, xorq);
150 } else {
151 MOperator xorOp = x64::MOP_xorl_i_r;
152 Insn &xorq = cgFunc.GetInsnBuilder()->BuildInsn(xorOp, X64CG::kMd[xorOp]);
153 xorq.AddOpndChain(imm).AddOpndChain(*tmpOperand0);
154 insn.GetBB()->InsertInsnBefore(insn, xorq);
155 }
156
157 // mov tmpOperand0 -> dest
158 Insn &movq = cgFunc.GetInsnBuilder()->BuildInsn(movOp, X64CG::kMd[movOp]);
159 movq.AddOpndChain(*tmpOperand0).AddOpndChain(dest);
160 insn.GetBB()->InsertInsnBefore(insn, movq);
161
162 insn.GetBB()->RemoveInsn(insn);
163 return;
164 }
165
StdzShiftOp(Insn & insn,CGFunc & cgFunc)166 void X64Standardize::StdzShiftOp(Insn &insn, CGFunc &cgFunc)
167 {
168 RegOperand *countOpnd = static_cast<RegOperand *>(&insn.GetOperand(kInsnThirdOpnd));
169 /* count operand cvt -> PTY_u8 */
170 if (countOpnd->GetSize() != GetPrimTypeBitSize(PTY_u8)) {
171 countOpnd = &cgFunc.GetOpndBuilder()->CreateVReg(countOpnd->GetRegisterNumber(), GetPrimTypeBitSize(PTY_u8),
172 countOpnd->GetRegisterType());
173 }
174 /* copy count operand to cl(rcx) register */
175 RegOperand &clOpnd = cgFunc.GetOpndBuilder()->CreatePReg(x64::RCX, GetPrimTypeBitSize(PTY_u8), kRegTyInt);
176 X64MOP_t copyMop = x64::MOP_movb_r_r;
177 Insn ©Insn = cgFunc.GetInsnBuilder()->BuildInsn(copyMop, X64CG::kMd[copyMop]);
178 copyInsn.AddOpndChain(*countOpnd).AddOpndChain(clOpnd);
179 insn.GetBB()->InsertInsnBefore(insn, copyInsn);
180 /* shift OP */
181 X64MOP_t directlyMappingMop = GetMopFromAbstraceIRMop(insn.GetMachineOpcode());
182 insn.SetMOP(X64CG::kMd[directlyMappingMop]);
183 RegOperand &destOpnd = static_cast<RegOperand &>(insn.GetOperand(kInsnFirstOpnd));
184 insn.CleanAllOperand();
185 insn.AddOpndChain(clOpnd).AddOpndChain(destOpnd);
186 }
187
188 } // namespace maplebe
189