1 /*
2 * Copyright (c) 2021-2024 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 /*
17 Register file implementation.
18 Reserve registers.
19 */
20
21 #include "registers_description.h"
22 #include "target/aarch64/target.h"
23 #include "regfile.h"
24
25 namespace ark::compiler::aarch64 {
Aarch64RegisterDescription(ArenaAllocator * allocator)26 Aarch64RegisterDescription::Aarch64RegisterDescription(ArenaAllocator *allocator)
27 : RegistersDescription(allocator, Arch::AARCH64), usedRegs_(allocator->Adapter())
28 {
29 }
30
IsRegUsed(ArenaVector<Reg> vecReg,Reg reg)31 bool Aarch64RegisterDescription::IsRegUsed(ArenaVector<Reg> vecReg, Reg reg)
32 {
33 auto equality = [reg](Reg in) { return (reg.GetId() == in.GetId()) && (reg.GetType() == in.GetType()); };
34 return (std::find_if(vecReg.begin(), vecReg.end(), equality) != vecReg.end());
35 }
36
GetCalleeSaved()37 ArenaVector<Reg> Aarch64RegisterDescription::GetCalleeSaved()
38 {
39 ArenaVector<Reg> out(GetAllocator()->Adapter());
40 for (uint32_t i = 0; i <= MAX_NUM_REGS; ++i) {
41 if ((calleeSavedv_.GetList() & (UINT64_C(1) << i)) != 0) {
42 out.emplace_back(Reg(i, FLOAT64_TYPE));
43 }
44 if (i == MAX_NUM_REGS) {
45 break;
46 }
47 if ((calleeSaved_.GetList() & (UINT64_C(1) << i)) != 0) {
48 out.emplace_back(Reg(i, INT64_TYPE));
49 }
50 }
51 return out;
52 }
53
SetCalleeSaved(const ArenaVector<Reg> & regs)54 void Aarch64RegisterDescription::SetCalleeSaved(const ArenaVector<Reg> ®s)
55 {
56 calleeSaved_ = vixl::aarch64::kCalleeSaved;
57 calleeSavedv_ = vixl::aarch64::kCalleeSavedV;
58
59 for (uint32_t i = 0; i < MAX_NUM_REGS; ++i) {
60 bool vectorUsed = IsRegUsed(regs, Reg(i, FLOAT64_TYPE));
61 if (vectorUsed) {
62 calleeSavedv_.Combine(i);
63 } else {
64 calleeSavedv_.Remove(i);
65 }
66 bool scalarUsed = IsRegUsed(regs, Reg(i, INT64_TYPE));
67 if (scalarUsed) {
68 calleeSaved_.Combine(i);
69 } else {
70 calleeSaved_.Remove(i);
71 }
72 }
73 // Remove return-value from callee
74 calleeSaved_.Remove(0);
75
76 // We can safely skip saving THREAD_REG if it is in the regmask
77 // of the regdescr (i.e. regalloc can not use it).
78 if (GetRegMask().Test(GetThreadReg(Arch::AARCH64))) {
79 calleeSaved_.Remove(GetThreadReg(Arch::AARCH64));
80 }
81 }
82
SetUsedRegs(const ArenaVector<Reg> & regs)83 void Aarch64RegisterDescription::SetUsedRegs(const ArenaVector<Reg> ®s)
84 {
85 usedRegs_ = regs;
86
87 // Update current lists - to do not use old data
88 calleeSaved_ = vixl::aarch64::kCalleeSaved;
89 callerSaved_ = vixl::aarch64::kCallerSaved;
90
91 // Need remove return value from callee
92 calleeSaved_.Remove(0);
93
94 // We can safely skip saving THREAD_REG if it is in the regmask
95 // of the regdescr (i.e. regalloc can not use it).
96 if (GetRegMask().Test(GetThreadReg(Arch::AARCH64))) {
97 calleeSaved_.Remove(GetThreadReg(Arch::AARCH64));
98 }
99
100 calleeSavedv_ = vixl::aarch64::kCalleeSavedV;
101 callerSavedv_ = vixl::aarch64::kCallerSavedV;
102
103 for (uint32_t i = 0; i <= MAX_NUM_REGS; ++i) {
104 // IsRegUsed use used_regs_ variable
105 bool scalarUsed = IsRegUsed(usedRegs_, Reg(i, INT64_TYPE));
106 if (!scalarUsed && ((calleeSaved_.GetList() & (UINT64_C(1) << i)) != 0)) {
107 calleeSaved_.Remove(i);
108 }
109 if (!scalarUsed && ((callerSaved_.GetList() & (UINT64_C(1) << i)) != 0)) {
110 callerSaved_.Remove(i);
111 }
112 bool vectorUsed = IsRegUsed(usedRegs_, Reg(i, FLOAT64_TYPE));
113 if (!vectorUsed && ((calleeSavedv_.GetList() & (UINT64_C(1) << i)) != 0)) {
114 calleeSavedv_.Remove(i);
115 allignmentVregCallee_ = i;
116 }
117 if (!vectorUsed && ((callerSavedv_.GetList() & (UINT64_C(1) << i)) != 0)) {
118 callerSavedv_.Remove(i);
119 allignmentVregCaller_ = i;
120 }
121 }
122 }
123
GetCallerSavedRegMask() const124 RegMask Aarch64RegisterDescription::GetCallerSavedRegMask() const
125 {
126 return RegMask(callerSaved_.GetList());
127 }
128
GetCallerSavedVRegMask() const129 VRegMask Aarch64RegisterDescription::GetCallerSavedVRegMask() const
130 {
131 return VRegMask(callerSavedv_.GetList());
132 }
133
IsCalleeRegister(Reg reg)134 bool Aarch64RegisterDescription::IsCalleeRegister(Reg reg)
135 {
136 bool isFp = reg.IsFloat();
137 return reg.GetId() >= GetFirstCalleeReg(Arch::AARCH64, isFp) &&
138 reg.GetId() <= GetLastCalleeReg(Arch::AARCH64, isFp);
139 }
140
GetZeroReg() const141 Reg Aarch64RegisterDescription::GetZeroReg() const
142 {
143 return Target(Arch::AARCH64).GetZeroReg();
144 }
145
IsZeroReg(Reg reg) const146 bool Aarch64RegisterDescription::IsZeroReg(Reg reg) const
147 {
148 return reg.IsValid() && reg.IsScalar() && reg.GetId() == GetZeroReg().GetId();
149 }
150
GetTempReg()151 Reg::RegIDType Aarch64RegisterDescription::GetTempReg()
152 {
153 return compiler::arch_info::arm64::TEMP_REGS.GetMaxRegister();
154 }
155
GetTempVReg()156 Reg::RegIDType Aarch64RegisterDescription::GetTempVReg()
157 {
158 return compiler::arch_info::arm64::TEMP_FP_REGS.GetMaxRegister();
159 }
160
GetDefaultRegMask() const161 RegMask Aarch64RegisterDescription::GetDefaultRegMask() const
162 {
163 RegMask regMask = compiler::arch_info::arm64::TEMP_REGS;
164 regMask.set(Target(Arch::AARCH64).GetZeroReg().GetId());
165 regMask.set(GetThreadReg(Arch::AARCH64));
166 regMask.set(vixl::aarch64::x29.GetCode());
167 regMask.set(vixl::aarch64::lr.GetCode());
168 return regMask;
169 }
170
GetVRegMask()171 VRegMask Aarch64RegisterDescription::GetVRegMask()
172 {
173 return compiler::arch_info::arm64::TEMP_FP_REGS;
174 }
175
176 // Check register mapping
SupportMapping(uint32_t type)177 bool Aarch64RegisterDescription::SupportMapping(uint32_t type)
178 {
179 // Current implementation does not support reg-reg mapping
180 if ((type & (RegMapping::VECTOR_VECTOR | RegMapping::FLOAT_FLOAT)) != 0U) {
181 return false;
182 }
183 // Scalar and float registers lay in different registers
184 if ((type & (RegMapping::SCALAR_VECTOR | RegMapping::SCALAR_FLOAT)) != 0U) {
185 return false;
186 }
187 return true;
188 }
189
IsValid() const190 bool Aarch64RegisterDescription::IsValid() const
191 {
192 return true;
193 }
194
GetCalleeSavedR()195 vixl::aarch64::CPURegList Aarch64RegisterDescription::GetCalleeSavedR()
196 {
197 return calleeSaved_;
198 }
199
GetCalleeSavedV()200 vixl::aarch64::CPURegList Aarch64RegisterDescription::GetCalleeSavedV()
201 {
202 return calleeSavedv_;
203 }
204
GetCallerSavedR()205 vixl::aarch64::CPURegList Aarch64RegisterDescription::GetCallerSavedR()
206 {
207 return callerSaved_;
208 }
209
GetCallerSavedV()210 vixl::aarch64::CPURegList Aarch64RegisterDescription::GetCallerSavedV()
211 {
212 return callerSavedv_;
213 }
214
GetAlignmentVreg(bool isCallee)215 uint8_t Aarch64RegisterDescription::GetAlignmentVreg(bool isCallee)
216 {
217 auto allignmentVreg = isCallee ? allignmentVregCallee_ : allignmentVregCaller_;
218 // !NOTE Ishin Pavel fix if allignment_vreg == UNDEF_VREG
219 ASSERT(allignmentVreg != UNDEF_VREG);
220
221 return allignmentVreg;
222 }
223
224 } // namespace ark::compiler::aarch64
225