• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "calling_convention_x86.h"
18 
19 #include <android-base/logging.h>
20 
21 #include "arch/instruction_set.h"
22 #include "handle_scope-inl.h"
23 #include "utils/x86/managed_register_x86.h"
24 
25 namespace art {
26 namespace x86 {
27 
28 static_assert(kX86PointerSize == PointerSize::k32, "Unexpected x86 pointer size");
29 static_assert(kStackAlignment >= 16u, "IA-32 cdecl requires at least 16 byte stack alignment");
30 
31 static constexpr ManagedRegister kCalleeSaveRegisters[] = {
32     // Core registers.
33     X86ManagedRegister::FromCpuRegister(EBP),
34     X86ManagedRegister::FromCpuRegister(ESI),
35     X86ManagedRegister::FromCpuRegister(EDI),
36     // No hard float callee saves.
37 };
38 
CalculateCoreCalleeSpillMask()39 static constexpr uint32_t CalculateCoreCalleeSpillMask() {
40   // The spilled PC gets a special marker.
41   uint32_t result = 1 << kNumberOfCpuRegisters;
42   for (auto&& r : kCalleeSaveRegisters) {
43     if (r.AsX86().IsCpuRegister()) {
44       result |= (1 << r.AsX86().AsCpuRegister());
45     }
46   }
47   return result;
48 }
49 
50 static constexpr uint32_t kCoreCalleeSpillMask = CalculateCoreCalleeSpillMask();
51 static constexpr uint32_t kFpCalleeSpillMask = 0u;
52 
53 // Calling convention
54 
InterproceduralScratchRegister()55 ManagedRegister X86ManagedRuntimeCallingConvention::InterproceduralScratchRegister() {
56   return X86ManagedRegister::FromCpuRegister(ECX);
57 }
58 
InterproceduralScratchRegister()59 ManagedRegister X86JniCallingConvention::InterproceduralScratchRegister() {
60   return X86ManagedRegister::FromCpuRegister(ECX);
61 }
62 
ReturnScratchRegister() const63 ManagedRegister X86JniCallingConvention::ReturnScratchRegister() const {
64   return ManagedRegister::NoRegister();  // No free regs, so assembler uses push/pop
65 }
66 
ReturnRegisterForShorty(const char * shorty,bool jni)67 static ManagedRegister ReturnRegisterForShorty(const char* shorty, bool jni) {
68   if (shorty[0] == 'F' || shorty[0] == 'D') {
69     if (jni) {
70       return X86ManagedRegister::FromX87Register(ST0);
71     } else {
72       return X86ManagedRegister::FromXmmRegister(XMM0);
73     }
74   } else if (shorty[0] == 'J') {
75     return X86ManagedRegister::FromRegisterPair(EAX_EDX);
76   } else if (shorty[0] == 'V') {
77     return ManagedRegister::NoRegister();
78   } else {
79     return X86ManagedRegister::FromCpuRegister(EAX);
80   }
81 }
82 
ReturnRegister()83 ManagedRegister X86ManagedRuntimeCallingConvention::ReturnRegister() {
84   return ReturnRegisterForShorty(GetShorty(), false);
85 }
86 
ReturnRegister()87 ManagedRegister X86JniCallingConvention::ReturnRegister() {
88   return ReturnRegisterForShorty(GetShorty(), true);
89 }
90 
IntReturnRegister()91 ManagedRegister X86JniCallingConvention::IntReturnRegister() {
92   return X86ManagedRegister::FromCpuRegister(EAX);
93 }
94 
95 // Managed runtime calling convention
96 
MethodRegister()97 ManagedRegister X86ManagedRuntimeCallingConvention::MethodRegister() {
98   return X86ManagedRegister::FromCpuRegister(EAX);
99 }
100 
IsCurrentParamInRegister()101 bool X86ManagedRuntimeCallingConvention::IsCurrentParamInRegister() {
102   return false;  // Everything is passed by stack
103 }
104 
IsCurrentParamOnStack()105 bool X86ManagedRuntimeCallingConvention::IsCurrentParamOnStack() {
106   // We assume all parameters are on stack, args coming via registers are spilled as entry_spills.
107   return true;
108 }
109 
CurrentParamRegister()110 ManagedRegister X86ManagedRuntimeCallingConvention::CurrentParamRegister() {
111   ManagedRegister res = ManagedRegister::NoRegister();
112   if (!IsCurrentParamAFloatOrDouble()) {
113     switch (gpr_arg_count_) {
114       case 0:
115         res = X86ManagedRegister::FromCpuRegister(ECX);
116         break;
117       case 1:
118         res = X86ManagedRegister::FromCpuRegister(EDX);
119         break;
120       case 2:
121         // Don't split a long between the last register and the stack.
122         if (IsCurrentParamALong()) {
123           return ManagedRegister::NoRegister();
124         }
125         res = X86ManagedRegister::FromCpuRegister(EBX);
126         break;
127     }
128   } else if (itr_float_and_doubles_ < 4) {
129     // First four float parameters are passed via XMM0..XMM3
130     res = X86ManagedRegister::FromXmmRegister(
131                                  static_cast<XmmRegister>(XMM0 + itr_float_and_doubles_));
132   }
133   return res;
134 }
135 
CurrentParamHighLongRegister()136 ManagedRegister X86ManagedRuntimeCallingConvention::CurrentParamHighLongRegister() {
137   ManagedRegister res = ManagedRegister::NoRegister();
138   DCHECK(IsCurrentParamALong());
139   switch (gpr_arg_count_) {
140     case 0: res = X86ManagedRegister::FromCpuRegister(EDX); break;
141     case 1: res = X86ManagedRegister::FromCpuRegister(EBX); break;
142   }
143   return res;
144 }
145 
CurrentParamStackOffset()146 FrameOffset X86ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
147   return FrameOffset(displacement_.Int32Value() +   // displacement
148                      kFramePointerSize +                 // Method*
149                      (itr_slots_ * kFramePointerSize));  // offset into in args
150 }
151 
EntrySpills()152 const ManagedRegisterEntrySpills& X86ManagedRuntimeCallingConvention::EntrySpills() {
153   // We spill the argument registers on X86 to free them up for scratch use, we then assume
154   // all arguments are on the stack.
155   if (entry_spills_.size() == 0) {
156     ResetIterator(FrameOffset(0));
157     while (HasNext()) {
158       ManagedRegister in_reg = CurrentParamRegister();
159       bool is_long = IsCurrentParamALong();
160       if (!in_reg.IsNoRegister()) {
161         int32_t size = IsParamADouble(itr_args_) ? 8 : 4;
162         int32_t spill_offset = CurrentParamStackOffset().Uint32Value();
163         ManagedRegisterSpill spill(in_reg, size, spill_offset);
164         entry_spills_.push_back(spill);
165         if (is_long) {
166           // special case, as we need a second register here.
167           in_reg = CurrentParamHighLongRegister();
168           DCHECK(!in_reg.IsNoRegister());
169           // We have to spill the second half of the long.
170           ManagedRegisterSpill spill2(in_reg, size, spill_offset + 4);
171           entry_spills_.push_back(spill2);
172         }
173 
174         // Keep track of the number of GPRs allocated.
175         if (!IsCurrentParamAFloatOrDouble()) {
176           if (is_long) {
177             // Long was allocated in 2 registers.
178             gpr_arg_count_ += 2;
179           } else {
180             gpr_arg_count_++;
181           }
182         }
183       } else if (is_long) {
184         // We need to skip the unused last register, which is empty.
185         // If we are already out of registers, this is harmless.
186         gpr_arg_count_ += 2;
187       }
188       Next();
189     }
190   }
191   return entry_spills_;
192 }
193 
194 // JNI calling convention
195 
X86JniCallingConvention(bool is_static,bool is_synchronized,bool is_critical_native,const char * shorty)196 X86JniCallingConvention::X86JniCallingConvention(bool is_static,
197                                                  bool is_synchronized,
198                                                  bool is_critical_native,
199                                                  const char* shorty)
200     : JniCallingConvention(is_static,
201                            is_synchronized,
202                            is_critical_native,
203                            shorty,
204                            kX86PointerSize) {
205 }
206 
CoreSpillMask() const207 uint32_t X86JniCallingConvention::CoreSpillMask() const {
208   return kCoreCalleeSpillMask;
209 }
210 
FpSpillMask() const211 uint32_t X86JniCallingConvention::FpSpillMask() const {
212   return kFpCalleeSpillMask;
213 }
214 
FrameSize()215 size_t X86JniCallingConvention::FrameSize() {
216   // Method*, PC return address and callee save area size, local reference segment state
217   const size_t method_ptr_size = static_cast<size_t>(kX86PointerSize);
218   const size_t pc_return_addr_size = kFramePointerSize;
219   const size_t callee_save_area_size = CalleeSaveRegisters().size() * kFramePointerSize;
220   size_t frame_data_size = method_ptr_size + pc_return_addr_size + callee_save_area_size;
221 
222   if (LIKELY(HasLocalReferenceSegmentState())) {                     // local ref. segment state
223     // Local reference segment state is sometimes excluded.
224     frame_data_size += kFramePointerSize;
225   }
226 
227   // References plus link_ (pointer) and number_of_references_ (uint32_t) for HandleScope header
228   const size_t handle_scope_size = HandleScope::SizeOf(kX86PointerSize, ReferenceCount());
229 
230   size_t total_size = frame_data_size;
231   if (LIKELY(HasHandleScope())) {
232     // HandleScope is sometimes excluded.
233     total_size += handle_scope_size;                                 // handle scope size
234   }
235 
236   // Plus return value spill area size
237   total_size += SizeOfReturnValue();
238 
239   return RoundUp(total_size, kStackAlignment);
240   // TODO: Same thing as x64 except using different pointer size. Refactor?
241 }
242 
OutArgSize()243 size_t X86JniCallingConvention::OutArgSize() {
244   return RoundUp(NumberOfOutgoingStackArgs() * kFramePointerSize, kStackAlignment);
245 }
246 
CalleeSaveRegisters() const247 ArrayRef<const ManagedRegister> X86JniCallingConvention::CalleeSaveRegisters() const {
248   return ArrayRef<const ManagedRegister>(kCalleeSaveRegisters);
249 }
250 
IsCurrentParamInRegister()251 bool X86JniCallingConvention::IsCurrentParamInRegister() {
252   return false;  // Everything is passed by stack.
253 }
254 
IsCurrentParamOnStack()255 bool X86JniCallingConvention::IsCurrentParamOnStack() {
256   return true;  // Everything is passed by stack.
257 }
258 
CurrentParamRegister()259 ManagedRegister X86JniCallingConvention::CurrentParamRegister() {
260   LOG(FATAL) << "Should not reach here";
261   UNREACHABLE();
262 }
263 
CurrentParamStackOffset()264 FrameOffset X86JniCallingConvention::CurrentParamStackOffset() {
265   return FrameOffset(displacement_.Int32Value() - OutArgSize() + (itr_slots_ * kFramePointerSize));
266 }
267 
NumberOfOutgoingStackArgs()268 size_t X86JniCallingConvention::NumberOfOutgoingStackArgs() {
269   size_t static_args = HasSelfClass() ? 1 : 0;  // count jclass
270   // regular argument parameters and this
271   size_t param_args = NumArgs() + NumLongOrDoubleArgs();
272   // count JNIEnv* and return pc (pushed after Method*)
273   size_t internal_args = 1 /* return pc */ + (HasJniEnv() ? 1 : 0 /* jni env */);
274   // No register args.
275   size_t total_args = static_args + param_args + internal_args;
276   return total_args;
277 }
278 
279 }  // namespace x86
280 }  // namespace art
281