1 /*
2 * Copyright (c) 2023 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "becommon.h"
17 #include "x64_cgfunc.h"
18 #include "x64_reg_info.h"
19
20 namespace maplebe {
21 using namespace maple;
22 using namespace x64;
Init()23 void X64RegInfo::Init()
24 {
25 for (regno_t regNO = kRinvalid; regNO < kMaxRegNum; ++regNO) {
26 /* when yieldpoint is enabled, the RYP(R12) can not be used. */
27 if (IsYieldPointReg(static_cast<X64reg>(regNO))) {
28 continue;
29 }
30 if (!x64::IsAvailableReg(static_cast<X64reg>(regNO))) {
31 continue;
32 }
33 if (x64::IsGPRegister(static_cast<X64reg>(regNO))) {
34 AddToIntRegs(regNO);
35 } else {
36 AddToFpRegs(regNO);
37 }
38 AddToAllRegs(regNO);
39 }
40 return;
41 }
42
SaveCalleeSavedReg(MapleSet<regno_t> savedRegs)43 void X64RegInfo::SaveCalleeSavedReg(MapleSet<regno_t> savedRegs)
44 {
45 X64CGFunc *x64CGFunc = static_cast<X64CGFunc *>(GetCurrFunction());
46 for (auto reg : savedRegs) {
47 x64CGFunc->AddtoCalleeSaved(static_cast<X64reg>(reg));
48 }
49 }
50
IsCalleeSavedReg(regno_t regno) const51 bool X64RegInfo::IsCalleeSavedReg(regno_t regno) const
52 {
53 return x64::IsCalleeSavedReg(static_cast<X64reg>(regno));
54 }
55
IsYieldPointReg(regno_t regno) const56 bool X64RegInfo::IsYieldPointReg(regno_t regno) const
57 {
58 return false;
59 }
60
IsUnconcernedReg(regno_t regNO) const61 bool X64RegInfo::IsUnconcernedReg(regno_t regNO) const
62 {
63 X64reg reg = static_cast<X64reg>(regNO);
64 if (reg == RBP || reg == RSP || reg == RIP) {
65 return true;
66 }
67
68 /* when yieldpoint is enabled, the RYP(R12) can not be used. */
69 if (IsYieldPointReg(reg)) {
70 return true;
71 }
72 return false;
73 }
74
IsUnconcernedReg(const RegOperand & regOpnd) const75 bool X64RegInfo::IsUnconcernedReg(const RegOperand ®Opnd) const
76 {
77 RegType regType = regOpnd.GetRegisterType();
78 if (regType == kRegTyCc || regType == kRegTyVary) {
79 return true;
80 }
81 uint32 regNO = regOpnd.GetRegisterNumber();
82 return IsUnconcernedReg(regNO);
83 }
84
Fini()85 void X64RegInfo::Fini() {}
86
GetOrCreatePhyRegOperand(regno_t regNO,uint32 size,RegType kind,uint32 flag)87 RegOperand *X64RegInfo::GetOrCreatePhyRegOperand(regno_t regNO, uint32 size, RegType kind, uint32 flag)
88 {
89 return &(GetCurrFunction()->GetOpndBuilder()->CreatePReg(regNO, size, kind));
90 }
91
BuildStrInsn(uint32 regSize,PrimType stype,RegOperand & phyOpnd,MemOperand & memOpnd)92 Insn *X64RegInfo::BuildStrInsn(uint32 regSize, PrimType stype, RegOperand &phyOpnd, MemOperand &memOpnd)
93 {
94 X64MOP_t mOp = x64::MOP_begin;
95 switch (regSize) {
96 case k8BitSize:
97 mOp = (phyOpnd.GetRegisterType() == kRegTyInt) ? x64::MOP_movb_r_m : x64::MOP_begin;
98 break;
99 case k16BitSize:
100 mOp = (phyOpnd.GetRegisterType() == kRegTyInt) ? x64::MOP_movw_r_m : x64::MOP_begin;
101 break;
102 case k32BitSize:
103 mOp = (phyOpnd.GetRegisterType() == kRegTyInt) ? x64::MOP_movl_r_m : x64::MOP_movfs_r_m;
104 break;
105 case k64BitSize:
106 mOp = (phyOpnd.GetRegisterType() == kRegTyInt) ? x64::MOP_movq_r_m : x64::MOP_movfd_r_m;
107 break;
108 default:
109 CHECK_FATAL(false, "NIY");
110 break;
111 }
112 CHECK_FATAL(mOp != x64::MOP_begin, "NIY");
113 Insn &insn = GetCurrFunction()->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]);
114 insn.AddOpndChain(phyOpnd).AddOpndChain(memOpnd);
115 return &insn;
116 }
117
BuildLdrInsn(uint32 regSize,PrimType stype,RegOperand & phyOpnd,MemOperand & memOpnd)118 Insn *X64RegInfo::BuildLdrInsn(uint32 regSize, PrimType stype, RegOperand &phyOpnd, MemOperand &memOpnd)
119 {
120 X64MOP_t mOp = x64::MOP_begin;
121 switch (regSize) {
122 case k8BitSize:
123 mOp = (phyOpnd.GetRegisterType() == kRegTyInt) ? x64::MOP_movb_m_r : x64::MOP_begin;
124 break;
125 case k16BitSize:
126 mOp = (phyOpnd.GetRegisterType() == kRegTyInt) ? x64::MOP_movw_m_r : x64::MOP_begin;
127 break;
128 case k32BitSize:
129 mOp = (phyOpnd.GetRegisterType() == kRegTyInt) ? x64::MOP_movl_m_r : x64::MOP_movfs_m_r;
130 break;
131 case k64BitSize:
132 mOp = (phyOpnd.GetRegisterType() == kRegTyInt) ? x64::MOP_movq_m_r : x64::MOP_movfd_m_r;
133 break;
134 default:
135 CHECK_FATAL(false, "NIY");
136 break;
137 }
138 CHECK_FATAL(mOp != x64::MOP_begin, "should not happen");
139 Insn &insn = GetCurrFunction()->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]);
140 insn.AddOpndChain(memOpnd).AddOpndChain(phyOpnd);
141 return &insn;
142 }
143
FreeSpillRegMem(regno_t vrNum)144 void X64RegInfo::FreeSpillRegMem(regno_t vrNum)
145 {
146 X64CGFunc *x64CGFunc = static_cast<X64CGFunc *>(GetCurrFunction());
147 x64CGFunc->FreeSpillRegMem(vrNum);
148 }
149
GetOrCreatSpillMem(regno_t vrNum,uint32 bitSize)150 MemOperand *X64RegInfo::GetOrCreatSpillMem(regno_t vrNum, uint32 bitSize)
151 {
152 X64CGFunc *x64CGFunc = static_cast<X64CGFunc *>(GetCurrFunction());
153 return x64CGFunc->GetOrCreatSpillMem(vrNum, bitSize);
154 }
155
AdjustMemOperandIfOffsetOutOfRange(MemOperand * memOpnd,const RegNoPair & regNoPair,bool isDest,Insn & insn,bool & isOutOfRange)156 MemOperand *X64RegInfo::AdjustMemOperandIfOffsetOutOfRange(MemOperand *memOpnd, const RegNoPair ®NoPair, bool isDest,
157 Insn &insn, bool &isOutOfRange)
158 {
159 isOutOfRange = false;
160 return memOpnd;
161 }
162
163 } /* namespace maplebe */
164