1 /*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "calling_convention_arm64.h"
18
19 #include <android-base/logging.h>
20
21 #include "handle_scope-inl.h"
22 #include "utils/arm64/managed_register_arm64.h"
23
24 namespace art {
25 namespace arm64 {
26
27 static_assert(kArm64PointerSize == PointerSize::k64, "Unexpected ARM64 pointer size");
28
29 // Up to how many float-like (float, double) args can be enregistered.
30 // The rest of the args must go on the stack.
31 constexpr size_t kMaxFloatOrDoubleRegisterArguments = 8u;
32 // Up to how many integer-like (pointers, objects, longs, int, short, bool, etc) args can be
33 // enregistered. The rest of the args must go on the stack.
34 constexpr size_t kMaxIntLikeRegisterArguments = 8u;
35
36 static const XRegister kXArgumentRegisters[] = {
37 X0, X1, X2, X3, X4, X5, X6, X7
38 };
39
40 static const WRegister kWArgumentRegisters[] = {
41 W0, W1, W2, W3, W4, W5, W6, W7
42 };
43
44 static const DRegister kDArgumentRegisters[] = {
45 D0, D1, D2, D3, D4, D5, D6, D7
46 };
47
48 static const SRegister kSArgumentRegisters[] = {
49 S0, S1, S2, S3, S4, S5, S6, S7
50 };
51
52 static constexpr ManagedRegister kCalleeSaveRegisters[] = {
53 // Core registers.
54 // Note: The native jni function may call to some VM runtime functions which may suspend
55 // or trigger GC. And the jni method frame will become top quick frame in those cases.
56 // So we need to satisfy GC to save LR and callee-save registers which is similar to
57 // CalleeSaveMethod(RefOnly) frame.
58 // Jni function is the native function which the java code wants to call.
59 // Jni method is the method that is compiled by jni compiler.
60 // Call chain: managed code(java) --> jni method --> jni function.
61 // Thread register(X19) is saved on stack.
62 Arm64ManagedRegister::FromXRegister(X19),
63 Arm64ManagedRegister::FromXRegister(X20),
64 Arm64ManagedRegister::FromXRegister(X21),
65 Arm64ManagedRegister::FromXRegister(X22),
66 Arm64ManagedRegister::FromXRegister(X23),
67 Arm64ManagedRegister::FromXRegister(X24),
68 Arm64ManagedRegister::FromXRegister(X25),
69 Arm64ManagedRegister::FromXRegister(X26),
70 Arm64ManagedRegister::FromXRegister(X27),
71 Arm64ManagedRegister::FromXRegister(X28),
72 Arm64ManagedRegister::FromXRegister(X29),
73 Arm64ManagedRegister::FromXRegister(LR),
74 // Hard float registers.
75 // Considering the case, java_method_1 --> jni method --> jni function --> java_method_2,
76 // we may break on java_method_2 and we still need to find out the values of DEX registers
77 // in java_method_1. So all callee-saves(in managed code) need to be saved.
78 Arm64ManagedRegister::FromDRegister(D8),
79 Arm64ManagedRegister::FromDRegister(D9),
80 Arm64ManagedRegister::FromDRegister(D10),
81 Arm64ManagedRegister::FromDRegister(D11),
82 Arm64ManagedRegister::FromDRegister(D12),
83 Arm64ManagedRegister::FromDRegister(D13),
84 Arm64ManagedRegister::FromDRegister(D14),
85 Arm64ManagedRegister::FromDRegister(D15),
86 };
87
CalculateCoreCalleeSpillMask()88 static constexpr uint32_t CalculateCoreCalleeSpillMask() {
89 uint32_t result = 0u;
90 for (auto&& r : kCalleeSaveRegisters) {
91 if (r.AsArm64().IsXRegister()) {
92 result |= (1 << r.AsArm64().AsXRegister());
93 }
94 }
95 return result;
96 }
97
CalculateFpCalleeSpillMask()98 static constexpr uint32_t CalculateFpCalleeSpillMask() {
99 uint32_t result = 0;
100 for (auto&& r : kCalleeSaveRegisters) {
101 if (r.AsArm64().IsDRegister()) {
102 result |= (1 << r.AsArm64().AsDRegister());
103 }
104 }
105 return result;
106 }
107
108 static constexpr uint32_t kCoreCalleeSpillMask = CalculateCoreCalleeSpillMask();
109 static constexpr uint32_t kFpCalleeSpillMask = CalculateFpCalleeSpillMask();
110
111 // Calling convention
InterproceduralScratchRegister()112 ManagedRegister Arm64ManagedRuntimeCallingConvention::InterproceduralScratchRegister() {
113 // X20 is safe to use as a scratch register:
114 // - with Baker read barriers (in the case of a non-critical native
115 // method), it is reserved as Marking Register, and thus does not
116 // actually need to be saved/restored; it is refreshed on exit
117 // (see Arm64JNIMacroAssembler::RemoveFrame);
118 // - in other cases, it is saved on entry (in
119 // Arm64JNIMacroAssembler::BuildFrame) and restored on exit (in
120 // Arm64JNIMacroAssembler::RemoveFrame). This is also expected in
121 // the case of a critical native method in the Baker read barrier
122 // configuration, where the value of MR must be preserved across
123 // the JNI call (as there is no MR refresh in that case).
124 return Arm64ManagedRegister::FromXRegister(X20);
125 }
126
InterproceduralScratchRegister()127 ManagedRegister Arm64JniCallingConvention::InterproceduralScratchRegister() {
128 // X20 is safe to use as a scratch register:
129 // - with Baker read barriers (in the case of a non-critical native
130 // method), it is reserved as Marking Register, and thus does not
131 // actually need to be saved/restored; it is refreshed on exit
132 // (see Arm64JNIMacroAssembler::RemoveFrame);
133 // - in other cases, it is saved on entry (in
134 // Arm64JNIMacroAssembler::BuildFrame) and restored on exit (in
135 // Arm64JNIMacroAssembler::RemoveFrame). This is also expected in
136 // the case of a critical native method in the Baker read barrier
137 // configuration, where the value of MR must be preserved across
138 // the JNI call (as there is no MR refresh in that case).
139 return Arm64ManagedRegister::FromXRegister(X20);
140 }
141
ReturnRegisterForShorty(const char * shorty)142 static ManagedRegister ReturnRegisterForShorty(const char* shorty) {
143 if (shorty[0] == 'F') {
144 return Arm64ManagedRegister::FromSRegister(S0);
145 } else if (shorty[0] == 'D') {
146 return Arm64ManagedRegister::FromDRegister(D0);
147 } else if (shorty[0] == 'J') {
148 return Arm64ManagedRegister::FromXRegister(X0);
149 } else if (shorty[0] == 'V') {
150 return Arm64ManagedRegister::NoRegister();
151 } else {
152 return Arm64ManagedRegister::FromWRegister(W0);
153 }
154 }
155
ReturnRegister()156 ManagedRegister Arm64ManagedRuntimeCallingConvention::ReturnRegister() {
157 return ReturnRegisterForShorty(GetShorty());
158 }
159
ReturnRegister()160 ManagedRegister Arm64JniCallingConvention::ReturnRegister() {
161 return ReturnRegisterForShorty(GetShorty());
162 }
163
IntReturnRegister()164 ManagedRegister Arm64JniCallingConvention::IntReturnRegister() {
165 return Arm64ManagedRegister::FromWRegister(W0);
166 }
167
168 // Managed runtime calling convention
169
MethodRegister()170 ManagedRegister Arm64ManagedRuntimeCallingConvention::MethodRegister() {
171 return Arm64ManagedRegister::FromXRegister(X0);
172 }
173
IsCurrentParamInRegister()174 bool Arm64ManagedRuntimeCallingConvention::IsCurrentParamInRegister() {
175 return false; // Everything moved to stack on entry.
176 }
177
IsCurrentParamOnStack()178 bool Arm64ManagedRuntimeCallingConvention::IsCurrentParamOnStack() {
179 return true;
180 }
181
CurrentParamRegister()182 ManagedRegister Arm64ManagedRuntimeCallingConvention::CurrentParamRegister() {
183 LOG(FATAL) << "Should not reach here";
184 return ManagedRegister::NoRegister();
185 }
186
CurrentParamStackOffset()187 FrameOffset Arm64ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
188 CHECK(IsCurrentParamOnStack());
189 FrameOffset result =
190 FrameOffset(displacement_.Int32Value() + // displacement
191 kFramePointerSize + // Method ref
192 (itr_slots_ * sizeof(uint32_t))); // offset into in args
193 return result;
194 }
195
EntrySpills()196 const ManagedRegisterEntrySpills& Arm64ManagedRuntimeCallingConvention::EntrySpills() {
197 // We spill the argument registers on ARM64 to free them up for scratch use, we then assume
198 // all arguments are on the stack.
199 if ((entry_spills_.size() == 0) && (NumArgs() > 0)) {
200 int gp_reg_index = 1; // we start from X1/W1, X0 holds ArtMethod*.
201 int fp_reg_index = 0; // D0/S0.
202
203 // We need to choose the correct register (D/S or X/W) since the managed
204 // stack uses 32bit stack slots.
205 ResetIterator(FrameOffset(0));
206 while (HasNext()) {
207 if (IsCurrentParamAFloatOrDouble()) { // FP regs.
208 if (fp_reg_index < 8) {
209 if (!IsCurrentParamADouble()) {
210 entry_spills_.push_back(Arm64ManagedRegister::FromSRegister(kSArgumentRegisters[fp_reg_index]));
211 } else {
212 entry_spills_.push_back(Arm64ManagedRegister::FromDRegister(kDArgumentRegisters[fp_reg_index]));
213 }
214 fp_reg_index++;
215 } else { // just increase the stack offset.
216 if (!IsCurrentParamADouble()) {
217 entry_spills_.push_back(ManagedRegister::NoRegister(), 4);
218 } else {
219 entry_spills_.push_back(ManagedRegister::NoRegister(), 8);
220 }
221 }
222 } else { // GP regs.
223 if (gp_reg_index < 8) {
224 if (IsCurrentParamALong() && (!IsCurrentParamAReference())) {
225 entry_spills_.push_back(Arm64ManagedRegister::FromXRegister(kXArgumentRegisters[gp_reg_index]));
226 } else {
227 entry_spills_.push_back(Arm64ManagedRegister::FromWRegister(kWArgumentRegisters[gp_reg_index]));
228 }
229 gp_reg_index++;
230 } else { // just increase the stack offset.
231 if (IsCurrentParamALong() && (!IsCurrentParamAReference())) {
232 entry_spills_.push_back(ManagedRegister::NoRegister(), 8);
233 } else {
234 entry_spills_.push_back(ManagedRegister::NoRegister(), 4);
235 }
236 }
237 }
238 Next();
239 }
240 }
241 return entry_spills_;
242 }
243
244 // JNI calling convention
Arm64JniCallingConvention(bool is_static,bool is_synchronized,bool is_critical_native,const char * shorty)245 Arm64JniCallingConvention::Arm64JniCallingConvention(bool is_static,
246 bool is_synchronized,
247 bool is_critical_native,
248 const char* shorty)
249 : JniCallingConvention(is_static,
250 is_synchronized,
251 is_critical_native,
252 shorty,
253 kArm64PointerSize) {
254 }
255
CoreSpillMask() const256 uint32_t Arm64JniCallingConvention::CoreSpillMask() const {
257 return kCoreCalleeSpillMask;
258 }
259
FpSpillMask() const260 uint32_t Arm64JniCallingConvention::FpSpillMask() const {
261 return kFpCalleeSpillMask;
262 }
263
ReturnScratchRegister() const264 ManagedRegister Arm64JniCallingConvention::ReturnScratchRegister() const {
265 return ManagedRegister::NoRegister();
266 }
267
FrameSize()268 size_t Arm64JniCallingConvention::FrameSize() {
269 // Method*, callee save area size, local reference segment state
270 //
271 // (Unlike x86_64, do not include return address, and the segment state is uint32
272 // instead of pointer).
273 size_t method_ptr_size = static_cast<size_t>(kFramePointerSize);
274 size_t callee_save_area_size = CalleeSaveRegisters().size() * kFramePointerSize;
275
276 size_t frame_data_size = method_ptr_size + callee_save_area_size;
277 if (LIKELY(HasLocalReferenceSegmentState())) {
278 frame_data_size += sizeof(uint32_t);
279 }
280 // References plus 2 words for HandleScope header
281 size_t handle_scope_size = HandleScope::SizeOf(kArm64PointerSize, ReferenceCount());
282
283 size_t total_size = frame_data_size;
284 if (LIKELY(HasHandleScope())) {
285 // HandleScope is sometimes excluded.
286 total_size += handle_scope_size; // handle scope size
287 }
288
289 // Plus return value spill area size
290 total_size += SizeOfReturnValue();
291
292 return RoundUp(total_size, kStackAlignment);
293 }
294
OutArgSize()295 size_t Arm64JniCallingConvention::OutArgSize() {
296 // Same as X86_64
297 return RoundUp(NumberOfOutgoingStackArgs() * kFramePointerSize, kStackAlignment);
298 }
299
CalleeSaveRegisters() const300 ArrayRef<const ManagedRegister> Arm64JniCallingConvention::CalleeSaveRegisters() const {
301 // Same as X86_64
302 return ArrayRef<const ManagedRegister>(kCalleeSaveRegisters);
303 }
304
IsCurrentParamInRegister()305 bool Arm64JniCallingConvention::IsCurrentParamInRegister() {
306 if (IsCurrentParamAFloatOrDouble()) {
307 return (itr_float_and_doubles_ < kMaxFloatOrDoubleRegisterArguments);
308 } else {
309 return ((itr_args_ - itr_float_and_doubles_) < kMaxIntLikeRegisterArguments);
310 }
311 // TODO: Can we just call CurrentParamRegister to figure this out?
312 }
313
IsCurrentParamOnStack()314 bool Arm64JniCallingConvention::IsCurrentParamOnStack() {
315 // Is this ever not the same for all the architectures?
316 return !IsCurrentParamInRegister();
317 }
318
CurrentParamRegister()319 ManagedRegister Arm64JniCallingConvention::CurrentParamRegister() {
320 CHECK(IsCurrentParamInRegister());
321 if (IsCurrentParamAFloatOrDouble()) {
322 CHECK_LT(itr_float_and_doubles_, kMaxFloatOrDoubleRegisterArguments);
323 if (IsCurrentParamADouble()) {
324 return Arm64ManagedRegister::FromDRegister(kDArgumentRegisters[itr_float_and_doubles_]);
325 } else {
326 return Arm64ManagedRegister::FromSRegister(kSArgumentRegisters[itr_float_and_doubles_]);
327 }
328 } else {
329 int gp_reg = itr_args_ - itr_float_and_doubles_;
330 CHECK_LT(static_cast<unsigned int>(gp_reg), kMaxIntLikeRegisterArguments);
331 if (IsCurrentParamALong() || IsCurrentParamAReference() || IsCurrentParamJniEnv()) {
332 return Arm64ManagedRegister::FromXRegister(kXArgumentRegisters[gp_reg]);
333 } else {
334 return Arm64ManagedRegister::FromWRegister(kWArgumentRegisters[gp_reg]);
335 }
336 }
337 }
338
CurrentParamStackOffset()339 FrameOffset Arm64JniCallingConvention::CurrentParamStackOffset() {
340 CHECK(IsCurrentParamOnStack());
341 size_t args_on_stack = itr_args_
342 - std::min(kMaxFloatOrDoubleRegisterArguments,
343 static_cast<size_t>(itr_float_and_doubles_))
344 - std::min(kMaxIntLikeRegisterArguments,
345 static_cast<size_t>(itr_args_ - itr_float_and_doubles_));
346 size_t offset = displacement_.Int32Value() - OutArgSize() + (args_on_stack * kFramePointerSize);
347 CHECK_LT(offset, OutArgSize());
348 return FrameOffset(offset);
349 // TODO: Seems identical to X86_64 code.
350 }
351
NumberOfOutgoingStackArgs()352 size_t Arm64JniCallingConvention::NumberOfOutgoingStackArgs() {
353 // all arguments including JNI args
354 size_t all_args = NumArgs() + NumberOfExtraArgumentsForJni();
355
356 DCHECK_GE(all_args, NumFloatOrDoubleArgs());
357
358 size_t all_stack_args =
359 all_args
360 - std::min(kMaxFloatOrDoubleRegisterArguments,
361 static_cast<size_t>(NumFloatOrDoubleArgs()))
362 - std::min(kMaxIntLikeRegisterArguments,
363 static_cast<size_t>((all_args - NumFloatOrDoubleArgs())));
364
365 // TODO: Seems similar to X86_64 code except it doesn't count return pc.
366
367 return all_stack_args;
368 }
369
370 } // namespace arm64
371 } // namespace art
372