1 /*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "trampoline_compiler.h"
18
19 #include "base/arena_allocator.h"
20 #include "base/malloc_arena_pool.h"
21 #include "jni/jni_env_ext.h"
22
23 #ifdef ART_ENABLE_CODEGEN_arm
24 #include "utils/arm/assembler_arm_vixl.h"
25 #endif
26
27 #ifdef ART_ENABLE_CODEGEN_arm64
28 #include "utils/arm64/assembler_arm64.h"
29 #endif
30
31 #ifdef ART_ENABLE_CODEGEN_mips
32 #include "utils/mips/assembler_mips.h"
33 #endif
34
35 #ifdef ART_ENABLE_CODEGEN_mips64
36 #include "utils/mips64/assembler_mips64.h"
37 #endif
38
39 #ifdef ART_ENABLE_CODEGEN_x86
40 #include "utils/x86/assembler_x86.h"
41 #endif
42
43 #ifdef ART_ENABLE_CODEGEN_x86_64
44 #include "utils/x86_64/assembler_x86_64.h"
45 #endif
46
47 #define __ assembler.
48
49 namespace art {
50
51 #ifdef ART_ENABLE_CODEGEN_arm
52 namespace arm {
53
54 #ifdef ___
55 #error "ARM Assembler macro already defined."
56 #else
57 #define ___ assembler.GetVIXLAssembler()->
58 #endif
59
CreateTrampoline(ArenaAllocator * allocator,EntryPointCallingConvention abi,ThreadOffset32 offset)60 static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(
61 ArenaAllocator* allocator, EntryPointCallingConvention abi, ThreadOffset32 offset) {
62 using vixl::aarch32::MemOperand;
63 using vixl::aarch32::pc;
64 using vixl::aarch32::r0;
65 ArmVIXLAssembler assembler(allocator);
66
67 switch (abi) {
68 case kInterpreterAbi: // Thread* is first argument (R0) in interpreter ABI.
69 ___ Ldr(pc, MemOperand(r0, offset.Int32Value()));
70 break;
71 case kJniAbi: { // Load via Thread* held in JNIEnv* in first argument (R0).
72 vixl::aarch32::UseScratchRegisterScope temps(assembler.GetVIXLAssembler());
73 const vixl::aarch32::Register temp_reg = temps.Acquire();
74
75 // VIXL will use the destination as a scratch register if
76 // the offset is not encodable as an immediate operand.
77 ___ Ldr(temp_reg, MemOperand(r0, JNIEnvExt::SelfOffset(4).Int32Value()));
78 ___ Ldr(pc, MemOperand(temp_reg, offset.Int32Value()));
79 break;
80 }
81 case kQuickAbi: // TR holds Thread*.
82 ___ Ldr(pc, MemOperand(tr, offset.Int32Value()));
83 }
84
85 __ FinalizeCode();
86 size_t cs = __ CodeSize();
87 std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs));
88 MemoryRegion code(entry_stub->data(), entry_stub->size());
89 __ FinalizeInstructions(code);
90
91 return std::move(entry_stub);
92 }
93
94 #undef ___
95
96 } // namespace arm
97 #endif // ART_ENABLE_CODEGEN_arm
98
99 #ifdef ART_ENABLE_CODEGEN_arm64
100 namespace arm64 {
CreateTrampoline(ArenaAllocator * allocator,EntryPointCallingConvention abi,ThreadOffset64 offset)101 static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(
102 ArenaAllocator* allocator, EntryPointCallingConvention abi, ThreadOffset64 offset) {
103 Arm64Assembler assembler(allocator);
104
105 switch (abi) {
106 case kInterpreterAbi: // Thread* is first argument (X0) in interpreter ABI.
107 __ JumpTo(Arm64ManagedRegister::FromXRegister(X0), Offset(offset.Int32Value()),
108 Arm64ManagedRegister::FromXRegister(IP1));
109
110 break;
111 case kJniAbi: // Load via Thread* held in JNIEnv* in first argument (X0).
112 __ LoadRawPtr(Arm64ManagedRegister::FromXRegister(IP1),
113 Arm64ManagedRegister::FromXRegister(X0),
114 Offset(JNIEnvExt::SelfOffset(8).Int32Value()));
115
116 __ JumpTo(Arm64ManagedRegister::FromXRegister(IP1), Offset(offset.Int32Value()),
117 Arm64ManagedRegister::FromXRegister(IP0));
118
119 break;
120 case kQuickAbi: // X18 holds Thread*.
121 __ JumpTo(Arm64ManagedRegister::FromXRegister(TR), Offset(offset.Int32Value()),
122 Arm64ManagedRegister::FromXRegister(IP0));
123
124 break;
125 }
126
127 __ FinalizeCode();
128 size_t cs = __ CodeSize();
129 std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs));
130 MemoryRegion code(entry_stub->data(), entry_stub->size());
131 __ FinalizeInstructions(code);
132
133 return std::move(entry_stub);
134 }
135 } // namespace arm64
136 #endif // ART_ENABLE_CODEGEN_arm64
137
138 #ifdef ART_ENABLE_CODEGEN_mips
139 namespace mips {
CreateTrampoline(ArenaAllocator * allocator,EntryPointCallingConvention abi,ThreadOffset32 offset)140 static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(
141 ArenaAllocator* allocator, EntryPointCallingConvention abi, ThreadOffset32 offset) {
142 MipsAssembler assembler(allocator);
143
144 switch (abi) {
145 case kInterpreterAbi: // Thread* is first argument (A0) in interpreter ABI.
146 __ LoadFromOffset(kLoadWord, T9, A0, offset.Int32Value());
147 break;
148 case kJniAbi: // Load via Thread* held in JNIEnv* in first argument (A0).
149 __ LoadFromOffset(kLoadWord, T9, A0, JNIEnvExt::SelfOffset(4).Int32Value());
150 __ LoadFromOffset(kLoadWord, T9, T9, offset.Int32Value());
151 break;
152 case kQuickAbi: // S1 holds Thread*.
153 __ LoadFromOffset(kLoadWord, T9, S1, offset.Int32Value());
154 }
155 __ Jr(T9);
156 __ NopIfNoReordering();
157 __ Break();
158
159 __ FinalizeCode();
160 size_t cs = __ CodeSize();
161 std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs));
162 MemoryRegion code(entry_stub->data(), entry_stub->size());
163 __ FinalizeInstructions(code);
164
165 return std::move(entry_stub);
166 }
167 } // namespace mips
168 #endif // ART_ENABLE_CODEGEN_mips
169
170 #ifdef ART_ENABLE_CODEGEN_mips64
171 namespace mips64 {
CreateTrampoline(ArenaAllocator * allocator,EntryPointCallingConvention abi,ThreadOffset64 offset)172 static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(
173 ArenaAllocator* allocator, EntryPointCallingConvention abi, ThreadOffset64 offset) {
174 Mips64Assembler assembler(allocator);
175
176 switch (abi) {
177 case kInterpreterAbi: // Thread* is first argument (A0) in interpreter ABI.
178 __ LoadFromOffset(kLoadDoubleword, T9, A0, offset.Int32Value());
179 break;
180 case kJniAbi: // Load via Thread* held in JNIEnv* in first argument (A0).
181 __ LoadFromOffset(kLoadDoubleword, T9, A0, JNIEnvExt::SelfOffset(8).Int32Value());
182 __ LoadFromOffset(kLoadDoubleword, T9, T9, offset.Int32Value());
183 break;
184 case kQuickAbi: // Fall-through.
185 __ LoadFromOffset(kLoadDoubleword, T9, S1, offset.Int32Value());
186 }
187 __ Jr(T9);
188 __ Nop();
189 __ Break();
190
191 __ FinalizeCode();
192 size_t cs = __ CodeSize();
193 std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs));
194 MemoryRegion code(entry_stub->data(), entry_stub->size());
195 __ FinalizeInstructions(code);
196
197 return std::move(entry_stub);
198 }
199 } // namespace mips64
200 #endif // ART_ENABLE_CODEGEN_mips
201
202 #ifdef ART_ENABLE_CODEGEN_x86
203 namespace x86 {
CreateTrampoline(ArenaAllocator * allocator,ThreadOffset32 offset)204 static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(ArenaAllocator* allocator,
205 ThreadOffset32 offset) {
206 X86Assembler assembler(allocator);
207
208 // All x86 trampolines call via the Thread* held in fs.
209 __ fs()->jmp(Address::Absolute(offset));
210 __ int3();
211
212 __ FinalizeCode();
213 size_t cs = __ CodeSize();
214 std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs));
215 MemoryRegion code(entry_stub->data(), entry_stub->size());
216 __ FinalizeInstructions(code);
217
218 return std::move(entry_stub);
219 }
220 } // namespace x86
221 #endif // ART_ENABLE_CODEGEN_x86
222
223 #ifdef ART_ENABLE_CODEGEN_x86_64
224 namespace x86_64 {
CreateTrampoline(ArenaAllocator * allocator,ThreadOffset64 offset)225 static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(ArenaAllocator* allocator,
226 ThreadOffset64 offset) {
227 x86_64::X86_64Assembler assembler(allocator);
228
229 // All x86 trampolines call via the Thread* held in gs.
230 __ gs()->jmp(x86_64::Address::Absolute(offset, true));
231 __ int3();
232
233 __ FinalizeCode();
234 size_t cs = __ CodeSize();
235 std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs));
236 MemoryRegion code(entry_stub->data(), entry_stub->size());
237 __ FinalizeInstructions(code);
238
239 return std::move(entry_stub);
240 }
241 } // namespace x86_64
242 #endif // ART_ENABLE_CODEGEN_x86_64
243
CreateTrampoline64(InstructionSet isa,EntryPointCallingConvention abi,ThreadOffset64 offset)244 std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline64(InstructionSet isa,
245 EntryPointCallingConvention abi,
246 ThreadOffset64 offset) {
247 MallocArenaPool pool;
248 ArenaAllocator allocator(&pool);
249 switch (isa) {
250 #ifdef ART_ENABLE_CODEGEN_arm64
251 case InstructionSet::kArm64:
252 return arm64::CreateTrampoline(&allocator, abi, offset);
253 #endif
254 #ifdef ART_ENABLE_CODEGEN_mips64
255 case InstructionSet::kMips64:
256 return mips64::CreateTrampoline(&allocator, abi, offset);
257 #endif
258 #ifdef ART_ENABLE_CODEGEN_x86_64
259 case InstructionSet::kX86_64:
260 return x86_64::CreateTrampoline(&allocator, offset);
261 #endif
262 default:
263 UNUSED(abi);
264 UNUSED(offset);
265 LOG(FATAL) << "Unexpected InstructionSet: " << isa;
266 UNREACHABLE();
267 }
268 }
269
CreateTrampoline32(InstructionSet isa,EntryPointCallingConvention abi,ThreadOffset32 offset)270 std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline32(InstructionSet isa,
271 EntryPointCallingConvention abi,
272 ThreadOffset32 offset) {
273 MallocArenaPool pool;
274 ArenaAllocator allocator(&pool);
275 switch (isa) {
276 #ifdef ART_ENABLE_CODEGEN_arm
277 case InstructionSet::kArm:
278 case InstructionSet::kThumb2:
279 return arm::CreateTrampoline(&allocator, abi, offset);
280 #endif
281 #ifdef ART_ENABLE_CODEGEN_mips
282 case InstructionSet::kMips:
283 return mips::CreateTrampoline(&allocator, abi, offset);
284 #endif
285 #ifdef ART_ENABLE_CODEGEN_x86
286 case InstructionSet::kX86:
287 UNUSED(abi);
288 return x86::CreateTrampoline(&allocator, offset);
289 #endif
290 default:
291 LOG(FATAL) << "Unexpected InstructionSet: " << isa;
292 UNREACHABLE();
293 }
294 }
295
296 } // namespace art
297