1 /*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "calling_convention_mips64.h"
18
19 #include "base/logging.h"
20 #include "handle_scope-inl.h"
21 #include "utils/mips64/managed_register_mips64.h"
22
23 namespace art {
24 namespace mips64 {
25
26 static const GpuRegister kGpuArgumentRegisters[] = {
27 A0, A1, A2, A3, A4, A5, A6, A7
28 };
29
30 static const FpuRegister kFpuArgumentRegisters[] = {
31 F12, F13, F14, F15, F16, F17, F18, F19
32 };
33
34 // Calling convention
InterproceduralScratchRegister()35 ManagedRegister Mips64ManagedRuntimeCallingConvention::InterproceduralScratchRegister() {
36 return Mips64ManagedRegister::FromGpuRegister(T9);
37 }
38
InterproceduralScratchRegister()39 ManagedRegister Mips64JniCallingConvention::InterproceduralScratchRegister() {
40 return Mips64ManagedRegister::FromGpuRegister(T9);
41 }
42
ReturnRegisterForShorty(const char * shorty)43 static ManagedRegister ReturnRegisterForShorty(const char* shorty) {
44 if (shorty[0] == 'F' || shorty[0] == 'D') {
45 return Mips64ManagedRegister::FromFpuRegister(F0);
46 } else if (shorty[0] == 'V') {
47 return Mips64ManagedRegister::NoRegister();
48 } else {
49 return Mips64ManagedRegister::FromGpuRegister(V0);
50 }
51 }
52
ReturnRegister()53 ManagedRegister Mips64ManagedRuntimeCallingConvention::ReturnRegister() {
54 return ReturnRegisterForShorty(GetShorty());
55 }
56
ReturnRegister()57 ManagedRegister Mips64JniCallingConvention::ReturnRegister() {
58 return ReturnRegisterForShorty(GetShorty());
59 }
60
IntReturnRegister()61 ManagedRegister Mips64JniCallingConvention::IntReturnRegister() {
62 return Mips64ManagedRegister::FromGpuRegister(V0);
63 }
64
65 // Managed runtime calling convention
66
MethodRegister()67 ManagedRegister Mips64ManagedRuntimeCallingConvention::MethodRegister() {
68 return Mips64ManagedRegister::FromGpuRegister(A0);
69 }
70
IsCurrentParamInRegister()71 bool Mips64ManagedRuntimeCallingConvention::IsCurrentParamInRegister() {
72 return false; // Everything moved to stack on entry.
73 }
74
IsCurrentParamOnStack()75 bool Mips64ManagedRuntimeCallingConvention::IsCurrentParamOnStack() {
76 return true;
77 }
78
CurrentParamRegister()79 ManagedRegister Mips64ManagedRuntimeCallingConvention::CurrentParamRegister() {
80 LOG(FATAL) << "Should not reach here";
81 return ManagedRegister::NoRegister();
82 }
83
CurrentParamStackOffset()84 FrameOffset Mips64ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
85 CHECK(IsCurrentParamOnStack());
86 FrameOffset result =
87 FrameOffset(displacement_.Int32Value() + // displacement
88 kFramePointerSize + // Method ref
89 (itr_slots_ * sizeof(uint32_t))); // offset into in args
90 return result;
91 }
92
EntrySpills()93 const ManagedRegisterEntrySpills& Mips64ManagedRuntimeCallingConvention::EntrySpills() {
94 // We spill the argument registers on MIPS64 to free them up for scratch use,
95 // we then assume all arguments are on the stack.
96 if ((entry_spills_.size() == 0) && (NumArgs() > 0)) {
97 int reg_index = 1; // we start from A1, A0 holds ArtMethod*.
98
99 // We need to choose the correct register size since the managed
100 // stack uses 32bit stack slots.
101 ResetIterator(FrameOffset(0));
102 while (HasNext()) {
103 if (reg_index < 8) {
104 if (IsCurrentParamAFloatOrDouble()) { // FP regs.
105 FpuRegister arg = kFpuArgumentRegisters[reg_index];
106 Mips64ManagedRegister reg = Mips64ManagedRegister::FromFpuRegister(arg);
107 entry_spills_.push_back(reg, IsCurrentParamADouble() ? 8 : 4);
108 } else { // GP regs.
109 GpuRegister arg = kGpuArgumentRegisters[reg_index];
110 Mips64ManagedRegister reg = Mips64ManagedRegister::FromGpuRegister(arg);
111 entry_spills_.push_back(reg,
112 (IsCurrentParamALong() && (!IsCurrentParamAReference())) ? 8 : 4);
113 }
114 // e.g. A1, A2, F3, A4, F5, F6, A7
115 reg_index++;
116 }
117
118 Next();
119 }
120 }
121 return entry_spills_;
122 }
123
124 // JNI calling convention
125
Mips64JniCallingConvention(bool is_static,bool is_synchronized,const char * shorty)126 Mips64JniCallingConvention::Mips64JniCallingConvention(bool is_static, bool is_synchronized,
127 const char* shorty)
128 : JniCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {
129 callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S2));
130 callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S3));
131 callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S4));
132 callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S5));
133 callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S6));
134 callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S7));
135 callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(GP));
136 callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S8));
137 }
138
CoreSpillMask() const139 uint32_t Mips64JniCallingConvention::CoreSpillMask() const {
140 // Compute spill mask to agree with callee saves initialized in the constructor
141 uint32_t result = 0;
142 result = 1 << S2 | 1 << S3 | 1 << S4 | 1 << S5 | 1 << S6 | 1 << S7 | 1 << GP | 1 << S8 | 1 << RA;
143 DCHECK_EQ(static_cast<size_t>(POPCOUNT(result)), callee_save_regs_.size() + 1);
144 return result;
145 }
146
ReturnScratchRegister() const147 ManagedRegister Mips64JniCallingConvention::ReturnScratchRegister() const {
148 return Mips64ManagedRegister::FromGpuRegister(AT);
149 }
150
FrameSize()151 size_t Mips64JniCallingConvention::FrameSize() {
152 // ArtMethod*, RA and callee save area size, local reference segment state
153 size_t frame_data_size = kFramePointerSize +
154 (CalleeSaveRegisters().size() + 1) * kFramePointerSize + sizeof(uint32_t);
155 // References plus 2 words for HandleScope header
156 size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount());
157 // Plus return value spill area size
158 return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
159 }
160
OutArgSize()161 size_t Mips64JniCallingConvention::OutArgSize() {
162 return RoundUp(NumberOfOutgoingStackArgs() * kFramePointerSize, kStackAlignment);
163 }
164
IsCurrentParamInRegister()165 bool Mips64JniCallingConvention::IsCurrentParamInRegister() {
166 return itr_args_ < 8;
167 }
168
IsCurrentParamOnStack()169 bool Mips64JniCallingConvention::IsCurrentParamOnStack() {
170 return !IsCurrentParamInRegister();
171 }
172
CurrentParamRegister()173 ManagedRegister Mips64JniCallingConvention::CurrentParamRegister() {
174 CHECK(IsCurrentParamInRegister());
175 if (IsCurrentParamAFloatOrDouble()) {
176 return Mips64ManagedRegister::FromFpuRegister(kFpuArgumentRegisters[itr_args_]);
177 } else {
178 return Mips64ManagedRegister::FromGpuRegister(kGpuArgumentRegisters[itr_args_]);
179 }
180 }
181
CurrentParamStackOffset()182 FrameOffset Mips64JniCallingConvention::CurrentParamStackOffset() {
183 CHECK(IsCurrentParamOnStack());
184 size_t offset = displacement_.Int32Value() - OutArgSize() + ((itr_args_ - 8) * kFramePointerSize);
185 CHECK_LT(offset, OutArgSize());
186 return FrameOffset(offset);
187 }
188
NumberOfOutgoingStackArgs()189 size_t Mips64JniCallingConvention::NumberOfOutgoingStackArgs() {
190 // all arguments including JNI args
191 size_t all_args = NumArgs() + NumberOfExtraArgumentsForJni();
192
193 // Nothing on the stack unless there are more than 8 arguments
194 return (all_args > 8) ? all_args - 8 : 0;
195 }
196 } // namespace mips64
197 } // namespace art
198