1 /*
2 * Copyright (c) 2021-2024 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 /*
17 Register file implementation.
18 Reserve registers.
19 */
20
21 #include "registers_description.h"
22 #include "target/aarch32/target.h"
23 #include "regfile.h"
24
25 namespace ark::compiler::aarch32 {
26 /**
27 * Default aarch32 calling convention registers
28 * Callee
29 * r4-r11,r14
30 * d8-d15
31 * Caller
32 * (r0-r3),r12
33 * d0-d7
34 */
Aarch32RegisterDescription(ArenaAllocator * allocator)35 Aarch32RegisterDescription::Aarch32RegisterDescription(ArenaAllocator *allocator)
36 : RegistersDescription(allocator, Arch::AARCH32),
37 aarch32RegList_(allocator->Adapter()),
38 usedRegs_(allocator->Adapter())
39 {
40 // Initialize Masm
41 for (uint32_t i = 0; i <= MAX_NUM_REGS; ++i) {
42 aarch32RegList_.emplace_back(Reg(i, INT32_TYPE));
43 aarch32RegList_.emplace_back(Reg(i, FLOAT32_TYPE));
44 }
45
46 for (auto i = vixl::aarch32::r4.GetCode(); i < vixl::aarch32::r8.GetCode(); ++i) {
47 callerSavedv_.set(i);
48 }
49 }
50
IsValid() const51 bool Aarch32RegisterDescription::IsValid() const
52 {
53 return !aarch32RegList_.empty();
54 }
55
IsRegUsed(ArenaVector<Reg> vecReg,Reg reg)56 bool Aarch32RegisterDescription::IsRegUsed(ArenaVector<Reg> vecReg, Reg reg)
57 {
58 auto equality = [reg](Reg in) { return (reg.GetId() == in.GetId()) && (reg.GetType() == in.GetType()); };
59 return (std::find_if(vecReg.begin(), vecReg.end(), equality) != vecReg.end());
60 }
61
62 /* static */
IsTmp(Reg reg)63 bool Aarch32RegisterDescription::IsTmp(Reg reg)
64 {
65 if (reg.IsScalar()) {
66 for (auto it : AARCH32_TMP_REG) {
67 if (it == reg.GetId()) {
68 return true;
69 }
70 }
71 return false;
72 }
73 ASSERT(reg.IsFloat());
74 for (auto it : AARCH32_TMP_VREG) {
75 if (it == reg.GetId()) {
76 return true;
77 }
78 }
79 return false;
80 }
81
82 // Reg Mask
83 // r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12,r13,r14,r15
84 // -dwr0,-dwr1,-dwr2,-dwr3,-dwr4,---dwr8,---dwr6,---dwr7 <- double word
85 // r0,r1,r2,r3,r4,r5,r6,r7,r8,r9, fp+tmp, sp+ip, lr+pc
86 // |----------------------------| <- available for regalloc
87 // r0, r2, r4, r6 r8 - market to be available
GetDefaultRegMask() const88 RegMask Aarch32RegisterDescription::GetDefaultRegMask() const
89 {
90 // Set all to 1
91 RegMask regMask;
92 regMask.set();
93 for (size_t i = 0; i < AVAILABLE_DOUBLE_WORD_REGISTERS; ++i) {
94 regMask.reset(i * 2U);
95 }
96 regMask.set(GetThreadReg(Arch::AARCH32));
97 return regMask;
98 }
99
GetVRegMask()100 VRegMask Aarch32RegisterDescription::GetVRegMask()
101 {
102 VRegMask vregMask;
103 for (auto vregCode : AARCH32_TMP_VREG) {
104 vregMask.set(vregCode);
105 }
106 // Only d0-d15 available for alloc
107 // They mapped on s0-s31 same, like scalar:
108 for (size_t i = 0; i < vregMask.size() / 2U; ++i) {
109 vregMask.set(i * 2U + 1);
110 }
111 return vregMask;
112 }
113
SupportMapping(uint32_t type)114 bool Aarch32RegisterDescription::SupportMapping(uint32_t type)
115 {
116 // Current implementation does not support vreg-vreg mapping
117 if ((type & (RegMapping::VECTOR_VECTOR | RegMapping::FLOAT_FLOAT)) != 0U) {
118 return false;
119 }
120 // Scalar and float registers lay in different registers
121 if ((type & (RegMapping::SCALAR_VECTOR | RegMapping::SCALAR_FLOAT)) != 0U) {
122 return false;
123 }
124 // Supported mapping for upper half register-parts:
125 // (type & RegMapping::SCALAR_SCALAR != 0)
126 return true;
127 }
128
GetCalleeSaved()129 ArenaVector<Reg> Aarch32RegisterDescription::GetCalleeSaved()
130 {
131 ArenaVector<Reg> out(GetAllocator()->Adapter());
132 ASSERT(calleeSaved_.size() == calleeSavedv_.size());
133 for (size_t i = 0; i < calleeSaved_.size(); ++i) {
134 if (calleeSaved_.test(i)) {
135 out.emplace_back(Reg(i, INT32_TYPE));
136 }
137 if ((calleeSavedv_.test(i))) {
138 out.emplace_back(Reg(i, FLOAT32_TYPE));
139 }
140 }
141 return out;
142 }
143
SetCalleeSaved(const ArenaVector<Reg> & regs)144 void Aarch32RegisterDescription::SetCalleeSaved([[maybe_unused]] const ArenaVector<Reg> ®s)
145 {
146 calleeSaved_ = CALLEE_SAVED;
147 calleeSavedv_ = CALLEE_SAVEDV;
148 }
149
SetUsedRegs(const ArenaVector<Reg> & regs)150 void Aarch32RegisterDescription::SetUsedRegs(const ArenaVector<Reg> ®s)
151 {
152 usedRegs_ = regs;
153
154 ASSERT(calleeSaved_.size() == callerSaved_.size());
155 ASSERT(calleeSavedv_.size() == callerSavedv_.size());
156
157 allignmentRegCallee_ = vixl::aarch32::r10.GetCode();
158 // NOTE (pishin) need to resolve conflict
159 allignmentRegCaller_ = vixl::aarch32::r10.GetCode();
160 for (size_t i = 0; i < calleeSaved_.size(); ++i) {
161 // IsRegUsed use used_regs_ variable
162 bool scalarUsed = IsRegUsed(usedRegs_, Reg(i, INT64_TYPE));
163 bool isTmp = IsTmp(Reg(i, INT32_TYPE));
164 if ((!scalarUsed && ((calleeSaved_.test(i)))) || isTmp) {
165 calleeSaved_.reset(i);
166 allignmentRegCallee_ = i;
167 }
168 if (!scalarUsed && ((callerSaved_.test(i)))) {
169 allignmentRegCaller_ = i;
170 }
171 bool isVtmp = IsTmp(Reg(i, FLOAT32_TYPE));
172
173 bool vectorUsed = IsRegUsed(usedRegs_, Reg(i, FLOAT64_TYPE));
174 if ((!vectorUsed && ((calleeSavedv_.test(i)))) || isVtmp) {
175 calleeSavedv_.reset(i);
176 }
177 if (!vectorUsed && ((callerSavedv_.test(i)))) {
178 callerSavedv_.reset(i);
179 }
180 if (i > (AVAILABLE_DOUBLE_WORD_REGISTERS << 1U)) {
181 continue;
182 }
183 if (!scalarUsed && ((calleeSaved_.test(i + 1)))) {
184 calleeSaved_.reset(i + 1);
185 }
186 }
187
188 calleeSaved_.reset(vixl::aarch32::pc.GetCode());
189 callerSaved_.reset(vixl::aarch32::pc.GetCode());
190 }
191
GetCallerSavedRegMask() const192 RegMask Aarch32RegisterDescription::GetCallerSavedRegMask() const
193 {
194 return callerSaved_;
195 }
196
GetCallerSavedVRegMask() const197 VRegMask Aarch32RegisterDescription::GetCallerSavedVRegMask() const
198 {
199 return callerSavedv_;
200 }
201
IsCalleeRegister(Reg reg)202 bool Aarch32RegisterDescription::IsCalleeRegister(Reg reg)
203 {
204 bool isFp = reg.IsFloat();
205 return reg.GetId() >= GetFirstCalleeReg(Arch::AARCH32, isFp) &&
206 reg.GetId() <= GetLastCalleeReg(Arch::AARCH32, isFp);
207 }
208
GetZeroReg() const209 Reg Aarch32RegisterDescription::GetZeroReg() const
210 {
211 return INVALID_REGISTER;
212 }
213
IsZeroReg(Reg reg) const214 bool Aarch32RegisterDescription::IsZeroReg([[maybe_unused]] Reg reg) const
215 {
216 return false;
217 }
218
GetTempReg()219 Reg::RegIDType Aarch32RegisterDescription::GetTempReg()
220 {
221 return INVALID_REG_ID;
222 }
223
GetTempVReg()224 Reg::RegIDType Aarch32RegisterDescription::GetTempVReg()
225 {
226 return INVALID_REG_ID;
227 }
228
GetCalleeSavedR()229 RegMask Aarch32RegisterDescription::GetCalleeSavedR()
230 {
231 return calleeSaved_;
232 }
GetCalleeSavedV()233 VRegMask Aarch32RegisterDescription::GetCalleeSavedV()
234 {
235 return calleeSavedv_;
236 }
GetCallerSavedR()237 RegMask Aarch32RegisterDescription::GetCallerSavedR()
238 {
239 return callerSaved_;
240 }
GetCallerSavedV()241 VRegMask Aarch32RegisterDescription::GetCallerSavedV()
242 {
243 return callerSavedv_;
244 }
245
GetAligmentReg(bool isCallee)246 uint8_t Aarch32RegisterDescription::GetAligmentReg(bool isCallee)
247 {
248 auto allignmentReg = isCallee ? allignmentRegCallee_ : allignmentRegCaller_;
249 ASSERT(allignmentReg != UNDEF_REG);
250 return allignmentReg;
251 }
252
253 } // namespace ark::compiler::aarch32
254