1 /*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 /*
18 * Mterp entry point and support functions.
19 */
20 #include "mterp.h"
21
22 #include "base/quasi_atomic.h"
23 #include "debugger.h"
24 #include "entrypoints/entrypoint_utils-inl.h"
25 #include "interpreter/interpreter_common.h"
26 #include "interpreter/interpreter_intrinsics.h"
27
28 namespace art {
29 namespace interpreter {
30 /*
31 * Verify some constants used by the mterp interpreter.
32 */
CheckMterpAsmConstants()33 void CheckMterpAsmConstants() {
34 /*
35 * If we're using computed goto instruction transitions, make sure
36 * none of the handlers overflows the 128-byte limit. This won't tell
37 * which one did, but if any one is too big the total size will
38 * overflow.
39 */
40 const int width = 128;
41 int interp_size = (uintptr_t) artMterpAsmInstructionEnd -
42 (uintptr_t) artMterpAsmInstructionStart;
43 if ((interp_size == 0) || (interp_size != (art::kNumPackedOpcodes * width))) {
44 LOG(FATAL) << "ERROR: unexpected asm interp size " << interp_size
45 << "(did an instruction handler exceed " << width << " bytes?)";
46 }
47 }
48
InitMterpTls(Thread * self)49 void InitMterpTls(Thread* self) {
50 self->SetMterpDefaultIBase(artMterpAsmInstructionStart);
51 self->SetMterpAltIBase(artMterpAsmAltInstructionStart);
52 self->SetMterpCurrentIBase((kTraceExecutionEnabled || kTestExportPC) ?
53 artMterpAsmAltInstructionStart :
54 artMterpAsmInstructionStart);
55 }
56
57 /*
58 * Find the matching case. Returns the offset to the handler instructions.
59 *
60 * Returns 3 if we don't find a match (it's the size of the sparse-switch
61 * instruction).
62 */
MterpDoSparseSwitch(const uint16_t * switchData,int32_t testVal)63 extern "C" ssize_t MterpDoSparseSwitch(const uint16_t* switchData, int32_t testVal) {
64 const int kInstrLen = 3;
65 uint16_t size;
66 const int32_t* keys;
67 const int32_t* entries;
68
69 /*
70 * Sparse switch data format:
71 * ushort ident = 0x0200 magic value
72 * ushort size number of entries in the table; > 0
73 * int keys[size] keys, sorted low-to-high; 32-bit aligned
74 * int targets[size] branch targets, relative to switch opcode
75 *
76 * Total size is (2+size*4) 16-bit code units.
77 */
78
79 uint16_t signature = *switchData++;
80 DCHECK_EQ(signature, static_cast<uint16_t>(art::Instruction::kSparseSwitchSignature));
81
82 size = *switchData++;
83
84 /* The keys are guaranteed to be aligned on a 32-bit boundary;
85 * we can treat them as a native int array.
86 */
87 keys = reinterpret_cast<const int32_t*>(switchData);
88
89 /* The entries are guaranteed to be aligned on a 32-bit boundary;
90 * we can treat them as a native int array.
91 */
92 entries = keys + size;
93
94 /*
95 * Binary-search through the array of keys, which are guaranteed to
96 * be sorted low-to-high.
97 */
98 int lo = 0;
99 int hi = size - 1;
100 while (lo <= hi) {
101 int mid = (lo + hi) >> 1;
102
103 int32_t foundVal = keys[mid];
104 if (testVal < foundVal) {
105 hi = mid - 1;
106 } else if (testVal > foundVal) {
107 lo = mid + 1;
108 } else {
109 return entries[mid];
110 }
111 }
112 return kInstrLen;
113 }
114
MterpDoPackedSwitch(const uint16_t * switchData,int32_t testVal)115 extern "C" ssize_t MterpDoPackedSwitch(const uint16_t* switchData, int32_t testVal) {
116 const int kInstrLen = 3;
117
118 /*
119 * Packed switch data format:
120 * ushort ident = 0x0100 magic value
121 * ushort size number of entries in the table
122 * int first_key first (and lowest) switch case value
123 * int targets[size] branch targets, relative to switch opcode
124 *
125 * Total size is (4+size*2) 16-bit code units.
126 */
127 uint16_t signature = *switchData++;
128 DCHECK_EQ(signature, static_cast<uint16_t>(art::Instruction::kPackedSwitchSignature));
129
130 uint16_t size = *switchData++;
131
132 int32_t firstKey = *switchData++;
133 firstKey |= (*switchData++) << 16;
134
135 int index = testVal - firstKey;
136 if (index < 0 || index >= size) {
137 return kInstrLen;
138 }
139
140 /*
141 * The entries are guaranteed to be aligned on a 32-bit boundary;
142 * we can treat them as a native int array.
143 */
144 const int32_t* entries = reinterpret_cast<const int32_t*>(switchData);
145 return entries[index];
146 }
147
MterpShouldSwitchInterpreters()148 extern "C" size_t MterpShouldSwitchInterpreters()
149 REQUIRES_SHARED(Locks::mutator_lock_) {
150 const Runtime* const runtime = Runtime::Current();
151 const instrumentation::Instrumentation* const instrumentation = runtime->GetInstrumentation();
152 return instrumentation->NonJitProfilingActive() ||
153 Dbg::IsDebuggerActive() ||
154 // An async exception has been thrown. We need to go to the switch interpreter. MTerp doesn't
155 // know how to deal with these so we could end up never dealing with it if we are in an
156 // infinite loop. Since this can be called in a tight loop and getting the current thread
157 // requires a TLS read we instead first check a short-circuit runtime flag that will only be
158 // set if something tries to set an async exception. This will make this function faster in
159 // the common case where no async exception has ever been sent. We don't need to worry about
160 // synchronization on the runtime flag since it is only set in a checkpoint which will either
161 // take place on the current thread or act as a synchronization point.
162 (UNLIKELY(runtime->AreAsyncExceptionsThrown()) &&
163 Thread::Current()->IsAsyncExceptionPending());
164 }
165
166
MterpInvokeVirtual(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)167 extern "C" size_t MterpInvokeVirtual(Thread* self,
168 ShadowFrame* shadow_frame,
169 uint16_t* dex_pc_ptr,
170 uint16_t inst_data)
171 REQUIRES_SHARED(Locks::mutator_lock_) {
172 JValue* result_register = shadow_frame->GetResultRegister();
173 const Instruction* inst = Instruction::At(dex_pc_ptr);
174 return DoFastInvoke<kVirtual>(
175 self, *shadow_frame, inst, inst_data, result_register);
176 }
177
MterpInvokeSuper(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)178 extern "C" size_t MterpInvokeSuper(Thread* self,
179 ShadowFrame* shadow_frame,
180 uint16_t* dex_pc_ptr,
181 uint16_t inst_data)
182 REQUIRES_SHARED(Locks::mutator_lock_) {
183 JValue* result_register = shadow_frame->GetResultRegister();
184 const Instruction* inst = Instruction::At(dex_pc_ptr);
185 return DoInvoke<kSuper, false, false>(
186 self, *shadow_frame, inst, inst_data, result_register);
187 }
188
MterpInvokeInterface(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)189 extern "C" size_t MterpInvokeInterface(Thread* self,
190 ShadowFrame* shadow_frame,
191 uint16_t* dex_pc_ptr,
192 uint16_t inst_data)
193 REQUIRES_SHARED(Locks::mutator_lock_) {
194 JValue* result_register = shadow_frame->GetResultRegister();
195 const Instruction* inst = Instruction::At(dex_pc_ptr);
196 return DoInvoke<kInterface, false, false>(
197 self, *shadow_frame, inst, inst_data, result_register);
198 }
199
MterpInvokeDirect(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)200 extern "C" size_t MterpInvokeDirect(Thread* self,
201 ShadowFrame* shadow_frame,
202 uint16_t* dex_pc_ptr,
203 uint16_t inst_data)
204 REQUIRES_SHARED(Locks::mutator_lock_) {
205 JValue* result_register = shadow_frame->GetResultRegister();
206 const Instruction* inst = Instruction::At(dex_pc_ptr);
207 return DoFastInvoke<kDirect>(
208 self, *shadow_frame, inst, inst_data, result_register);
209 }
210
MterpInvokeStatic(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)211 extern "C" size_t MterpInvokeStatic(Thread* self,
212 ShadowFrame* shadow_frame,
213 uint16_t* dex_pc_ptr,
214 uint16_t inst_data)
215 REQUIRES_SHARED(Locks::mutator_lock_) {
216 JValue* result_register = shadow_frame->GetResultRegister();
217 const Instruction* inst = Instruction::At(dex_pc_ptr);
218 return DoFastInvoke<kStatic>(
219 self, *shadow_frame, inst, inst_data, result_register);
220 }
221
MterpInvokeCustom(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)222 extern "C" size_t MterpInvokeCustom(Thread* self,
223 ShadowFrame* shadow_frame,
224 uint16_t* dex_pc_ptr,
225 uint16_t inst_data)
226 REQUIRES_SHARED(Locks::mutator_lock_) {
227 JValue* result_register = shadow_frame->GetResultRegister();
228 const Instruction* inst = Instruction::At(dex_pc_ptr);
229 return DoInvokeCustom<false /* is_range */>(
230 self, *shadow_frame, inst, inst_data, result_register);
231 }
232
MterpInvokePolymorphic(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)233 extern "C" size_t MterpInvokePolymorphic(Thread* self,
234 ShadowFrame* shadow_frame,
235 uint16_t* dex_pc_ptr,
236 uint16_t inst_data)
237 REQUIRES_SHARED(Locks::mutator_lock_) {
238 JValue* result_register = shadow_frame->GetResultRegister();
239 const Instruction* inst = Instruction::At(dex_pc_ptr);
240 return DoInvokePolymorphic<false /* is_range */>(
241 self, *shadow_frame, inst, inst_data, result_register);
242 }
243
MterpInvokeVirtualRange(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)244 extern "C" size_t MterpInvokeVirtualRange(Thread* self,
245 ShadowFrame* shadow_frame,
246 uint16_t* dex_pc_ptr,
247 uint16_t inst_data)
248 REQUIRES_SHARED(Locks::mutator_lock_) {
249 JValue* result_register = shadow_frame->GetResultRegister();
250 const Instruction* inst = Instruction::At(dex_pc_ptr);
251 return DoInvoke<kVirtual, true, false>(
252 self, *shadow_frame, inst, inst_data, result_register);
253 }
254
MterpInvokeSuperRange(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)255 extern "C" size_t MterpInvokeSuperRange(Thread* self,
256 ShadowFrame* shadow_frame,
257 uint16_t* dex_pc_ptr,
258 uint16_t inst_data)
259 REQUIRES_SHARED(Locks::mutator_lock_) {
260 JValue* result_register = shadow_frame->GetResultRegister();
261 const Instruction* inst = Instruction::At(dex_pc_ptr);
262 return DoInvoke<kSuper, true, false>(
263 self, *shadow_frame, inst, inst_data, result_register);
264 }
265
MterpInvokeInterfaceRange(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)266 extern "C" size_t MterpInvokeInterfaceRange(Thread* self,
267 ShadowFrame* shadow_frame,
268 uint16_t* dex_pc_ptr,
269 uint16_t inst_data)
270 REQUIRES_SHARED(Locks::mutator_lock_) {
271 JValue* result_register = shadow_frame->GetResultRegister();
272 const Instruction* inst = Instruction::At(dex_pc_ptr);
273 return DoInvoke<kInterface, true, false>(
274 self, *shadow_frame, inst, inst_data, result_register);
275 }
276
MterpInvokeDirectRange(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)277 extern "C" size_t MterpInvokeDirectRange(Thread* self,
278 ShadowFrame* shadow_frame,
279 uint16_t* dex_pc_ptr,
280 uint16_t inst_data)
281 REQUIRES_SHARED(Locks::mutator_lock_) {
282 JValue* result_register = shadow_frame->GetResultRegister();
283 const Instruction* inst = Instruction::At(dex_pc_ptr);
284 return DoInvoke<kDirect, true, false>(
285 self, *shadow_frame, inst, inst_data, result_register);
286 }
287
MterpInvokeStaticRange(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)288 extern "C" size_t MterpInvokeStaticRange(Thread* self,
289 ShadowFrame* shadow_frame,
290 uint16_t* dex_pc_ptr,
291 uint16_t inst_data)
292 REQUIRES_SHARED(Locks::mutator_lock_) {
293 JValue* result_register = shadow_frame->GetResultRegister();
294 const Instruction* inst = Instruction::At(dex_pc_ptr);
295 return DoInvoke<kStatic, true, false>(
296 self, *shadow_frame, inst, inst_data, result_register);
297 }
298
MterpInvokeCustomRange(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)299 extern "C" size_t MterpInvokeCustomRange(Thread* self,
300 ShadowFrame* shadow_frame,
301 uint16_t* dex_pc_ptr,
302 uint16_t inst_data)
303 REQUIRES_SHARED(Locks::mutator_lock_) {
304 JValue* result_register = shadow_frame->GetResultRegister();
305 const Instruction* inst = Instruction::At(dex_pc_ptr);
306 return DoInvokeCustom<true /* is_range */>(self, *shadow_frame, inst, inst_data, result_register);
307 }
308
MterpInvokePolymorphicRange(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)309 extern "C" size_t MterpInvokePolymorphicRange(Thread* self,
310 ShadowFrame* shadow_frame,
311 uint16_t* dex_pc_ptr,
312 uint16_t inst_data)
313 REQUIRES_SHARED(Locks::mutator_lock_) {
314 JValue* result_register = shadow_frame->GetResultRegister();
315 const Instruction* inst = Instruction::At(dex_pc_ptr);
316 return DoInvokePolymorphic<true /* is_range */>(
317 self, *shadow_frame, inst, inst_data, result_register);
318 }
319
MterpInvokeVirtualQuick(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)320 extern "C" size_t MterpInvokeVirtualQuick(Thread* self,
321 ShadowFrame* shadow_frame,
322 uint16_t* dex_pc_ptr,
323 uint16_t inst_data)
324 REQUIRES_SHARED(Locks::mutator_lock_) {
325 JValue* result_register = shadow_frame->GetResultRegister();
326 const Instruction* inst = Instruction::At(dex_pc_ptr);
327 const uint32_t vregC = inst->VRegC_35c();
328 const uint32_t vtable_idx = inst->VRegB_35c();
329 ObjPtr<mirror::Object> const receiver = shadow_frame->GetVRegReference(vregC);
330 if (receiver != nullptr) {
331 ArtMethod* const called_method = receiver->GetClass()->GetEmbeddedVTableEntry(
332 vtable_idx, kRuntimePointerSize);
333 if ((called_method != nullptr) && called_method->IsIntrinsic()) {
334 if (MterpHandleIntrinsic(shadow_frame, called_method, inst, inst_data, result_register)) {
335 jit::Jit* jit = Runtime::Current()->GetJit();
336 if (jit != nullptr) {
337 jit->InvokeVirtualOrInterface(
338 receiver, shadow_frame->GetMethod(), shadow_frame->GetDexPC(), called_method);
339 }
340 return !self->IsExceptionPending();
341 }
342 }
343 }
344 return DoInvokeVirtualQuick<false>(
345 self, *shadow_frame, inst, inst_data, result_register);
346 }
347
MterpInvokeVirtualQuickRange(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)348 extern "C" size_t MterpInvokeVirtualQuickRange(Thread* self,
349 ShadowFrame* shadow_frame,
350 uint16_t* dex_pc_ptr,
351 uint16_t inst_data)
352 REQUIRES_SHARED(Locks::mutator_lock_) {
353 JValue* result_register = shadow_frame->GetResultRegister();
354 const Instruction* inst = Instruction::At(dex_pc_ptr);
355 return DoInvokeVirtualQuick<true>(
356 self, *shadow_frame, inst, inst_data, result_register);
357 }
358
MterpThreadFenceForConstructor()359 extern "C" void MterpThreadFenceForConstructor() {
360 QuasiAtomic::ThreadFenceForConstructor();
361 }
362
MterpConstString(uint32_t index,uint32_t tgt_vreg,ShadowFrame * shadow_frame,Thread * self)363 extern "C" size_t MterpConstString(uint32_t index,
364 uint32_t tgt_vreg,
365 ShadowFrame* shadow_frame,
366 Thread* self)
367 REQUIRES_SHARED(Locks::mutator_lock_) {
368 ObjPtr<mirror::String> s = ResolveString(self, *shadow_frame, dex::StringIndex(index));
369 if (UNLIKELY(s == nullptr)) {
370 return true;
371 }
372 shadow_frame->SetVRegReference(tgt_vreg, s.Ptr());
373 return false;
374 }
375
MterpConstClass(uint32_t index,uint32_t tgt_vreg,ShadowFrame * shadow_frame,Thread * self)376 extern "C" size_t MterpConstClass(uint32_t index,
377 uint32_t tgt_vreg,
378 ShadowFrame* shadow_frame,
379 Thread* self)
380 REQUIRES_SHARED(Locks::mutator_lock_) {
381 ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(index),
382 shadow_frame->GetMethod(),
383 self,
384 /* can_run_clinit */ false,
385 /* verify_access */ false);
386 if (UNLIKELY(c == nullptr)) {
387 return true;
388 }
389 shadow_frame->SetVRegReference(tgt_vreg, c.Ptr());
390 return false;
391 }
392
MterpConstMethodHandle(uint32_t index,uint32_t tgt_vreg,ShadowFrame * shadow_frame,Thread * self)393 extern "C" size_t MterpConstMethodHandle(uint32_t index,
394 uint32_t tgt_vreg,
395 ShadowFrame* shadow_frame,
396 Thread* self)
397 REQUIRES_SHARED(Locks::mutator_lock_) {
398 ObjPtr<mirror::MethodHandle> mh = ResolveMethodHandle(self, index, shadow_frame->GetMethod());
399 if (UNLIKELY(mh == nullptr)) {
400 return true;
401 }
402 shadow_frame->SetVRegReference(tgt_vreg, mh.Ptr());
403 return false;
404 }
405
MterpConstMethodType(uint32_t index,uint32_t tgt_vreg,ShadowFrame * shadow_frame,Thread * self)406 extern "C" size_t MterpConstMethodType(uint32_t index,
407 uint32_t tgt_vreg,
408 ShadowFrame* shadow_frame,
409 Thread* self)
410 REQUIRES_SHARED(Locks::mutator_lock_) {
411 ObjPtr<mirror::MethodType> mt = ResolveMethodType(self, index, shadow_frame->GetMethod());
412 if (UNLIKELY(mt == nullptr)) {
413 return true;
414 }
415 shadow_frame->SetVRegReference(tgt_vreg, mt.Ptr());
416 return false;
417 }
418
MterpCheckCast(uint32_t index,StackReference<mirror::Object> * vreg_addr,art::ArtMethod * method,Thread * self)419 extern "C" size_t MterpCheckCast(uint32_t index,
420 StackReference<mirror::Object>* vreg_addr,
421 art::ArtMethod* method,
422 Thread* self)
423 REQUIRES_SHARED(Locks::mutator_lock_) {
424 ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(index),
425 method,
426 self,
427 false,
428 false);
429 if (UNLIKELY(c == nullptr)) {
430 return true;
431 }
432 // Must load obj from vreg following ResolveVerifyAndClinit due to moving gc.
433 mirror::Object* obj = vreg_addr->AsMirrorPtr();
434 if (UNLIKELY(obj != nullptr && !obj->InstanceOf(c))) {
435 ThrowClassCastException(c, obj->GetClass());
436 return true;
437 }
438 return false;
439 }
440
MterpInstanceOf(uint32_t index,StackReference<mirror::Object> * vreg_addr,art::ArtMethod * method,Thread * self)441 extern "C" size_t MterpInstanceOf(uint32_t index,
442 StackReference<mirror::Object>* vreg_addr,
443 art::ArtMethod* method,
444 Thread* self)
445 REQUIRES_SHARED(Locks::mutator_lock_) {
446 ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(index),
447 method,
448 self,
449 false,
450 false);
451 if (UNLIKELY(c == nullptr)) {
452 return false; // Caller will check for pending exception. Return value unimportant.
453 }
454 // Must load obj from vreg following ResolveVerifyAndClinit due to moving gc.
455 mirror::Object* obj = vreg_addr->AsMirrorPtr();
456 return (obj != nullptr) && obj->InstanceOf(c);
457 }
458
MterpFillArrayData(mirror::Object * obj,const Instruction::ArrayDataPayload * payload)459 extern "C" size_t MterpFillArrayData(mirror::Object* obj, const Instruction::ArrayDataPayload* payload)
460 REQUIRES_SHARED(Locks::mutator_lock_) {
461 return FillArrayData(obj, payload);
462 }
463
MterpNewInstance(ShadowFrame * shadow_frame,Thread * self,uint32_t inst_data)464 extern "C" size_t MterpNewInstance(ShadowFrame* shadow_frame, Thread* self, uint32_t inst_data)
465 REQUIRES_SHARED(Locks::mutator_lock_) {
466 const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
467 mirror::Object* obj = nullptr;
468 ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(inst->VRegB_21c()),
469 shadow_frame->GetMethod(),
470 self,
471 /* can_run_clinit */ false,
472 /* verify_access */ false);
473 if (LIKELY(c != nullptr)) {
474 if (UNLIKELY(c->IsStringClass())) {
475 gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
476 obj = mirror::String::AllocEmptyString<true>(self, allocator_type);
477 } else {
478 obj = AllocObjectFromCode<true>(c.Ptr(),
479 self,
480 Runtime::Current()->GetHeap()->GetCurrentAllocator());
481 }
482 }
483 if (UNLIKELY(obj == nullptr)) {
484 return false;
485 }
486 obj->GetClass()->AssertInitializedOrInitializingInThread(self);
487 shadow_frame->SetVRegReference(inst->VRegA_21c(inst_data), obj);
488 return true;
489 }
490
MterpSputObject(ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint32_t inst_data,Thread * self)491 extern "C" size_t MterpSputObject(ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr,
492 uint32_t inst_data, Thread* self)
493 REQUIRES_SHARED(Locks::mutator_lock_) {
494 const Instruction* inst = Instruction::At(dex_pc_ptr);
495 return DoFieldPut<StaticObjectWrite, Primitive::kPrimNot, false, false>
496 (self, *shadow_frame, inst, inst_data);
497 }
498
MterpIputObject(ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint32_t inst_data,Thread * self)499 extern "C" size_t MterpIputObject(ShadowFrame* shadow_frame,
500 uint16_t* dex_pc_ptr,
501 uint32_t inst_data,
502 Thread* self)
503 REQUIRES_SHARED(Locks::mutator_lock_) {
504 const Instruction* inst = Instruction::At(dex_pc_ptr);
505 return DoFieldPut<InstanceObjectWrite, Primitive::kPrimNot, false, false>
506 (self, *shadow_frame, inst, inst_data);
507 }
508
MterpIputObjectQuick(ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint32_t inst_data)509 extern "C" size_t MterpIputObjectQuick(ShadowFrame* shadow_frame,
510 uint16_t* dex_pc_ptr,
511 uint32_t inst_data)
512 REQUIRES_SHARED(Locks::mutator_lock_) {
513 const Instruction* inst = Instruction::At(dex_pc_ptr);
514 return DoIPutQuick<Primitive::kPrimNot, false>(*shadow_frame, inst, inst_data);
515 }
516
MterpAputObject(ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint32_t inst_data)517 extern "C" size_t MterpAputObject(ShadowFrame* shadow_frame,
518 uint16_t* dex_pc_ptr,
519 uint32_t inst_data)
520 REQUIRES_SHARED(Locks::mutator_lock_) {
521 const Instruction* inst = Instruction::At(dex_pc_ptr);
522 mirror::Object* a = shadow_frame->GetVRegReference(inst->VRegB_23x());
523 if (UNLIKELY(a == nullptr)) {
524 return false;
525 }
526 int32_t index = shadow_frame->GetVReg(inst->VRegC_23x());
527 mirror::Object* val = shadow_frame->GetVRegReference(inst->VRegA_23x(inst_data));
528 mirror::ObjectArray<mirror::Object>* array = a->AsObjectArray<mirror::Object>();
529 if (array->CheckIsValidIndex(index) && array->CheckAssignable(val)) {
530 array->SetWithoutChecks<false>(index, val);
531 return true;
532 }
533 return false;
534 }
535
MterpFilledNewArray(ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,Thread * self)536 extern "C" size_t MterpFilledNewArray(ShadowFrame* shadow_frame,
537 uint16_t* dex_pc_ptr,
538 Thread* self)
539 REQUIRES_SHARED(Locks::mutator_lock_) {
540 const Instruction* inst = Instruction::At(dex_pc_ptr);
541 return DoFilledNewArray<false, false, false>(inst, *shadow_frame, self,
542 shadow_frame->GetResultRegister());
543 }
544
MterpFilledNewArrayRange(ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,Thread * self)545 extern "C" size_t MterpFilledNewArrayRange(ShadowFrame* shadow_frame,
546 uint16_t* dex_pc_ptr,
547 Thread* self)
548 REQUIRES_SHARED(Locks::mutator_lock_) {
549 const Instruction* inst = Instruction::At(dex_pc_ptr);
550 return DoFilledNewArray<true, false, false>(inst, *shadow_frame, self,
551 shadow_frame->GetResultRegister());
552 }
553
MterpNewArray(ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint32_t inst_data,Thread * self)554 extern "C" size_t MterpNewArray(ShadowFrame* shadow_frame,
555 uint16_t* dex_pc_ptr,
556 uint32_t inst_data, Thread* self)
557 REQUIRES_SHARED(Locks::mutator_lock_) {
558 const Instruction* inst = Instruction::At(dex_pc_ptr);
559 int32_t length = shadow_frame->GetVReg(inst->VRegB_22c(inst_data));
560 mirror::Object* obj = AllocArrayFromCode<false, true>(
561 dex::TypeIndex(inst->VRegC_22c()), length, shadow_frame->GetMethod(), self,
562 Runtime::Current()->GetHeap()->GetCurrentAllocator());
563 if (UNLIKELY(obj == nullptr)) {
564 return false;
565 }
566 shadow_frame->SetVRegReference(inst->VRegA_22c(inst_data), obj);
567 return true;
568 }
569
MterpHandleException(Thread * self,ShadowFrame * shadow_frame)570 extern "C" size_t MterpHandleException(Thread* self, ShadowFrame* shadow_frame)
571 REQUIRES_SHARED(Locks::mutator_lock_) {
572 DCHECK(self->IsExceptionPending());
573 const instrumentation::Instrumentation* const instrumentation =
574 Runtime::Current()->GetInstrumentation();
575 return MoveToExceptionHandler(self, *shadow_frame, instrumentation);
576 }
577
MterpCheckBefore(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr)578 extern "C" void MterpCheckBefore(Thread* self, ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr)
579 REQUIRES_SHARED(Locks::mutator_lock_) {
580 const Instruction* inst = Instruction::At(dex_pc_ptr);
581 uint16_t inst_data = inst->Fetch16(0);
582 if (inst->Opcode(inst_data) == Instruction::MOVE_EXCEPTION) {
583 self->AssertPendingException();
584 } else {
585 self->AssertNoPendingException();
586 }
587 if (kTraceExecutionEnabled) {
588 uint32_t dex_pc = dex_pc_ptr - shadow_frame->GetDexInstructions();
589 TraceExecution(*shadow_frame, inst, dex_pc);
590 }
591 if (kTestExportPC) {
592 // Save invalid dex pc to force segfault if improperly used.
593 shadow_frame->SetDexPCPtr(reinterpret_cast<uint16_t*>(kExportPCPoison));
594 }
595 }
596
MterpLogDivideByZeroException(Thread * self,ShadowFrame * shadow_frame)597 extern "C" void MterpLogDivideByZeroException(Thread* self, ShadowFrame* shadow_frame)
598 REQUIRES_SHARED(Locks::mutator_lock_) {
599 UNUSED(self);
600 const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
601 uint16_t inst_data = inst->Fetch16(0);
602 LOG(INFO) << "DivideByZero: " << inst->Opcode(inst_data);
603 }
604
MterpLogArrayIndexException(Thread * self,ShadowFrame * shadow_frame)605 extern "C" void MterpLogArrayIndexException(Thread* self, ShadowFrame* shadow_frame)
606 REQUIRES_SHARED(Locks::mutator_lock_) {
607 UNUSED(self);
608 const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
609 uint16_t inst_data = inst->Fetch16(0);
610 LOG(INFO) << "ArrayIndex: " << inst->Opcode(inst_data);
611 }
612
MterpLogNegativeArraySizeException(Thread * self,ShadowFrame * shadow_frame)613 extern "C" void MterpLogNegativeArraySizeException(Thread* self, ShadowFrame* shadow_frame)
614 REQUIRES_SHARED(Locks::mutator_lock_) {
615 UNUSED(self);
616 const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
617 uint16_t inst_data = inst->Fetch16(0);
618 LOG(INFO) << "NegativeArraySize: " << inst->Opcode(inst_data);
619 }
620
MterpLogNoSuchMethodException(Thread * self,ShadowFrame * shadow_frame)621 extern "C" void MterpLogNoSuchMethodException(Thread* self, ShadowFrame* shadow_frame)
622 REQUIRES_SHARED(Locks::mutator_lock_) {
623 UNUSED(self);
624 const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
625 uint16_t inst_data = inst->Fetch16(0);
626 LOG(INFO) << "NoSuchMethod: " << inst->Opcode(inst_data);
627 }
628
MterpLogExceptionThrownException(Thread * self,ShadowFrame * shadow_frame)629 extern "C" void MterpLogExceptionThrownException(Thread* self, ShadowFrame* shadow_frame)
630 REQUIRES_SHARED(Locks::mutator_lock_) {
631 UNUSED(self);
632 const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
633 uint16_t inst_data = inst->Fetch16(0);
634 LOG(INFO) << "ExceptionThrown: " << inst->Opcode(inst_data);
635 }
636
MterpLogNullObjectException(Thread * self,ShadowFrame * shadow_frame)637 extern "C" void MterpLogNullObjectException(Thread* self, ShadowFrame* shadow_frame)
638 REQUIRES_SHARED(Locks::mutator_lock_) {
639 UNUSED(self);
640 const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
641 uint16_t inst_data = inst->Fetch16(0);
642 LOG(INFO) << "NullObject: " << inst->Opcode(inst_data);
643 }
644
MterpLogFallback(Thread * self,ShadowFrame * shadow_frame)645 extern "C" void MterpLogFallback(Thread* self, ShadowFrame* shadow_frame)
646 REQUIRES_SHARED(Locks::mutator_lock_) {
647 UNUSED(self);
648 const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
649 uint16_t inst_data = inst->Fetch16(0);
650 LOG(INFO) << "Fallback: " << inst->Opcode(inst_data) << ", Suspend Pending?: "
651 << self->IsExceptionPending();
652 }
653
MterpLogOSR(Thread * self,ShadowFrame * shadow_frame,int32_t offset)654 extern "C" void MterpLogOSR(Thread* self, ShadowFrame* shadow_frame, int32_t offset)
655 REQUIRES_SHARED(Locks::mutator_lock_) {
656 UNUSED(self);
657 const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
658 uint16_t inst_data = inst->Fetch16(0);
659 LOG(INFO) << "OSR: " << inst->Opcode(inst_data) << ", offset = " << offset;
660 }
661
MterpLogSuspendFallback(Thread * self,ShadowFrame * shadow_frame,uint32_t flags)662 extern "C" void MterpLogSuspendFallback(Thread* self, ShadowFrame* shadow_frame, uint32_t flags)
663 REQUIRES_SHARED(Locks::mutator_lock_) {
664 UNUSED(self);
665 const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
666 uint16_t inst_data = inst->Fetch16(0);
667 if (flags & kCheckpointRequest) {
668 LOG(INFO) << "Checkpoint fallback: " << inst->Opcode(inst_data);
669 } else if (flags & kSuspendRequest) {
670 LOG(INFO) << "Suspend fallback: " << inst->Opcode(inst_data);
671 } else if (flags & kEmptyCheckpointRequest) {
672 LOG(INFO) << "Empty checkpoint fallback: " << inst->Opcode(inst_data);
673 }
674 }
675
MterpSuspendCheck(Thread * self)676 extern "C" size_t MterpSuspendCheck(Thread* self)
677 REQUIRES_SHARED(Locks::mutator_lock_) {
678 self->AllowThreadSuspension();
679 return MterpShouldSwitchInterpreters();
680 }
681
artSet8InstanceFromMterp(uint32_t field_idx,mirror::Object * obj,uint8_t new_value,ArtMethod * referrer)682 extern "C" ssize_t artSet8InstanceFromMterp(uint32_t field_idx,
683 mirror::Object* obj,
684 uint8_t new_value,
685 ArtMethod* referrer)
686 REQUIRES_SHARED(Locks::mutator_lock_) {
687 ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int8_t));
688 if (LIKELY(field != nullptr && obj != nullptr)) {
689 Primitive::Type type = field->GetTypeAsPrimitiveType();
690 if (type == Primitive::kPrimBoolean) {
691 field->SetBoolean<false>(obj, new_value);
692 } else {
693 DCHECK_EQ(Primitive::kPrimByte, type);
694 field->SetByte<false>(obj, new_value);
695 }
696 return 0; // success
697 }
698 return -1; // failure
699 }
700
artSet16InstanceFromMterp(uint32_t field_idx,mirror::Object * obj,uint16_t new_value,ArtMethod * referrer)701 extern "C" ssize_t artSet16InstanceFromMterp(uint32_t field_idx,
702 mirror::Object* obj,
703 uint16_t new_value,
704 ArtMethod* referrer)
705 REQUIRES_SHARED(Locks::mutator_lock_) {
706 ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite,
707 sizeof(int16_t));
708 if (LIKELY(field != nullptr && obj != nullptr)) {
709 Primitive::Type type = field->GetTypeAsPrimitiveType();
710 if (type == Primitive::kPrimChar) {
711 field->SetChar<false>(obj, new_value);
712 } else {
713 DCHECK_EQ(Primitive::kPrimShort, type);
714 field->SetShort<false>(obj, new_value);
715 }
716 return 0; // success
717 }
718 return -1; // failure
719 }
720
artSet32InstanceFromMterp(uint32_t field_idx,mirror::Object * obj,uint32_t new_value,ArtMethod * referrer)721 extern "C" ssize_t artSet32InstanceFromMterp(uint32_t field_idx,
722 mirror::Object* obj,
723 uint32_t new_value,
724 ArtMethod* referrer)
725 REQUIRES_SHARED(Locks::mutator_lock_) {
726 ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite,
727 sizeof(int32_t));
728 if (LIKELY(field != nullptr && obj != nullptr)) {
729 field->Set32<false>(obj, new_value);
730 return 0; // success
731 }
732 return -1; // failure
733 }
734
artSet64InstanceFromMterp(uint32_t field_idx,mirror::Object * obj,uint64_t * new_value,ArtMethod * referrer)735 extern "C" ssize_t artSet64InstanceFromMterp(uint32_t field_idx,
736 mirror::Object* obj,
737 uint64_t* new_value,
738 ArtMethod* referrer)
739 REQUIRES_SHARED(Locks::mutator_lock_) {
740 ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite,
741 sizeof(int64_t));
742 if (LIKELY(field != nullptr && obj != nullptr)) {
743 field->Set64<false>(obj, *new_value);
744 return 0; // success
745 }
746 return -1; // failure
747 }
748
artSetObjInstanceFromMterp(uint32_t field_idx,mirror::Object * obj,mirror::Object * new_value,ArtMethod * referrer)749 extern "C" ssize_t artSetObjInstanceFromMterp(uint32_t field_idx,
750 mirror::Object* obj,
751 mirror::Object* new_value,
752 ArtMethod* referrer)
753 REQUIRES_SHARED(Locks::mutator_lock_) {
754 ArtField* field = FindFieldFast(field_idx, referrer, InstanceObjectWrite,
755 sizeof(mirror::HeapReference<mirror::Object>));
756 if (LIKELY(field != nullptr && obj != nullptr)) {
757 field->SetObj<false>(obj, new_value);
758 return 0; // success
759 }
760 return -1; // failure
761 }
762
763 template <typename return_type, Primitive::Type primitive_type>
MterpGetStatic(uint32_t field_idx,ArtMethod * referrer,Thread * self,return_type (ArtField::* func)(ObjPtr<mirror::Object>))764 ALWAYS_INLINE return_type MterpGetStatic(uint32_t field_idx,
765 ArtMethod* referrer,
766 Thread* self,
767 return_type (ArtField::*func)(ObjPtr<mirror::Object>))
768 REQUIRES_SHARED(Locks::mutator_lock_) {
769 return_type res = 0; // On exception, the result will be ignored.
770 ArtField* f =
771 FindFieldFromCode<StaticPrimitiveRead, false>(field_idx,
772 referrer,
773 self,
774 primitive_type);
775 if (LIKELY(f != nullptr)) {
776 ObjPtr<mirror::Object> obj = f->GetDeclaringClass();
777 res = (f->*func)(obj);
778 }
779 return res;
780 }
781
MterpGetBooleanStatic(uint32_t field_idx,ArtMethod * referrer,Thread * self)782 extern "C" int32_t MterpGetBooleanStatic(uint32_t field_idx,
783 ArtMethod* referrer,
784 Thread* self)
785 REQUIRES_SHARED(Locks::mutator_lock_) {
786 return MterpGetStatic<uint8_t, Primitive::kPrimBoolean>(field_idx,
787 referrer,
788 self,
789 &ArtField::GetBoolean);
790 }
791
MterpGetByteStatic(uint32_t field_idx,ArtMethod * referrer,Thread * self)792 extern "C" int32_t MterpGetByteStatic(uint32_t field_idx,
793 ArtMethod* referrer,
794 Thread* self)
795 REQUIRES_SHARED(Locks::mutator_lock_) {
796 return MterpGetStatic<int8_t, Primitive::kPrimByte>(field_idx,
797 referrer,
798 self,
799 &ArtField::GetByte);
800 }
801
MterpGetCharStatic(uint32_t field_idx,ArtMethod * referrer,Thread * self)802 extern "C" uint32_t MterpGetCharStatic(uint32_t field_idx,
803 ArtMethod* referrer,
804 Thread* self)
805 REQUIRES_SHARED(Locks::mutator_lock_) {
806 return MterpGetStatic<uint16_t, Primitive::kPrimChar>(field_idx,
807 referrer,
808 self,
809 &ArtField::GetChar);
810 }
811
MterpGetShortStatic(uint32_t field_idx,ArtMethod * referrer,Thread * self)812 extern "C" int32_t MterpGetShortStatic(uint32_t field_idx,
813 ArtMethod* referrer,
814 Thread* self)
815 REQUIRES_SHARED(Locks::mutator_lock_) {
816 return MterpGetStatic<int16_t, Primitive::kPrimShort>(field_idx,
817 referrer,
818 self,
819 &ArtField::GetShort);
820 }
821
MterpGetObjStatic(uint32_t field_idx,ArtMethod * referrer,Thread * self)822 extern "C" mirror::Object* MterpGetObjStatic(uint32_t field_idx,
823 ArtMethod* referrer,
824 Thread* self)
825 REQUIRES_SHARED(Locks::mutator_lock_) {
826 return MterpGetStatic<ObjPtr<mirror::Object>, Primitive::kPrimNot>(field_idx,
827 referrer,
828 self,
829 &ArtField::GetObject).Ptr();
830 }
831
MterpGet32Static(uint32_t field_idx,ArtMethod * referrer,Thread * self)832 extern "C" int32_t MterpGet32Static(uint32_t field_idx,
833 ArtMethod* referrer,
834 Thread* self)
835 REQUIRES_SHARED(Locks::mutator_lock_) {
836 return MterpGetStatic<int32_t, Primitive::kPrimInt>(field_idx,
837 referrer,
838 self,
839 &ArtField::GetInt);
840 }
841
MterpGet64Static(uint32_t field_idx,ArtMethod * referrer,Thread * self)842 extern "C" int64_t MterpGet64Static(uint32_t field_idx, ArtMethod* referrer, Thread* self)
843 REQUIRES_SHARED(Locks::mutator_lock_) {
844 return MterpGetStatic<int64_t, Primitive::kPrimLong>(field_idx,
845 referrer,
846 self,
847 &ArtField::GetLong);
848 }
849
850
851 template <typename field_type, Primitive::Type primitive_type>
MterpSetStatic(uint32_t field_idx,field_type new_value,ArtMethod * referrer,Thread * self,void (ArtField::* func)(ObjPtr<mirror::Object>,field_type val))852 int MterpSetStatic(uint32_t field_idx,
853 field_type new_value,
854 ArtMethod* referrer,
855 Thread* self,
856 void (ArtField::*func)(ObjPtr<mirror::Object>, field_type val))
857 REQUIRES_SHARED(Locks::mutator_lock_) {
858 int res = 0; // Assume success (following quick_field_entrypoints conventions)
859 ArtField* f =
860 FindFieldFromCode<StaticPrimitiveWrite, false>(field_idx, referrer, self, primitive_type);
861 if (LIKELY(f != nullptr)) {
862 ObjPtr<mirror::Object> obj = f->GetDeclaringClass();
863 (f->*func)(obj, new_value);
864 } else {
865 res = -1; // Failure
866 }
867 return res;
868 }
869
MterpSetBooleanStatic(uint32_t field_idx,uint8_t new_value,ArtMethod * referrer,Thread * self)870 extern "C" int MterpSetBooleanStatic(uint32_t field_idx,
871 uint8_t new_value,
872 ArtMethod* referrer,
873 Thread* self)
874 REQUIRES_SHARED(Locks::mutator_lock_) {
875 return MterpSetStatic<uint8_t, Primitive::kPrimBoolean>(field_idx,
876 new_value,
877 referrer,
878 self,
879 &ArtField::SetBoolean<false>);
880 }
881
MterpSetByteStatic(uint32_t field_idx,int8_t new_value,ArtMethod * referrer,Thread * self)882 extern "C" int MterpSetByteStatic(uint32_t field_idx,
883 int8_t new_value,
884 ArtMethod* referrer,
885 Thread* self)
886 REQUIRES_SHARED(Locks::mutator_lock_) {
887 return MterpSetStatic<int8_t, Primitive::kPrimByte>(field_idx,
888 new_value,
889 referrer,
890 self,
891 &ArtField::SetByte<false>);
892 }
893
MterpSetCharStatic(uint32_t field_idx,uint16_t new_value,ArtMethod * referrer,Thread * self)894 extern "C" int MterpSetCharStatic(uint32_t field_idx,
895 uint16_t new_value,
896 ArtMethod* referrer,
897 Thread* self)
898 REQUIRES_SHARED(Locks::mutator_lock_) {
899 return MterpSetStatic<uint16_t, Primitive::kPrimChar>(field_idx,
900 new_value,
901 referrer,
902 self,
903 &ArtField::SetChar<false>);
904 }
905
MterpSetShortStatic(uint32_t field_idx,int16_t new_value,ArtMethod * referrer,Thread * self)906 extern "C" int MterpSetShortStatic(uint32_t field_idx,
907 int16_t new_value,
908 ArtMethod* referrer,
909 Thread* self)
910 REQUIRES_SHARED(Locks::mutator_lock_) {
911 return MterpSetStatic<int16_t, Primitive::kPrimShort>(field_idx,
912 new_value,
913 referrer,
914 self,
915 &ArtField::SetShort<false>);
916 }
917
MterpSet32Static(uint32_t field_idx,int32_t new_value,ArtMethod * referrer,Thread * self)918 extern "C" int MterpSet32Static(uint32_t field_idx,
919 int32_t new_value,
920 ArtMethod* referrer,
921 Thread* self)
922 REQUIRES_SHARED(Locks::mutator_lock_) {
923 return MterpSetStatic<int32_t, Primitive::kPrimInt>(field_idx,
924 new_value,
925 referrer,
926 self,
927 &ArtField::SetInt<false>);
928 }
929
MterpSet64Static(uint32_t field_idx,int64_t * new_value,ArtMethod * referrer,Thread * self)930 extern "C" int MterpSet64Static(uint32_t field_idx,
931 int64_t* new_value,
932 ArtMethod* referrer,
933 Thread* self)
934 REQUIRES_SHARED(Locks::mutator_lock_) {
935 return MterpSetStatic<int64_t, Primitive::kPrimLong>(field_idx,
936 *new_value,
937 referrer,
938 self,
939 &ArtField::SetLong<false>);
940 }
941
artAGetObjectFromMterp(mirror::Object * arr,int32_t index)942 extern "C" mirror::Object* artAGetObjectFromMterp(mirror::Object* arr,
943 int32_t index)
944 REQUIRES_SHARED(Locks::mutator_lock_) {
945 if (UNLIKELY(arr == nullptr)) {
946 ThrowNullPointerExceptionFromInterpreter();
947 return nullptr;
948 }
949 mirror::ObjectArray<mirror::Object>* array = arr->AsObjectArray<mirror::Object>();
950 if (LIKELY(array->CheckIsValidIndex(index))) {
951 return array->GetWithoutChecks(index);
952 } else {
953 return nullptr;
954 }
955 }
956
artIGetObjectFromMterp(mirror::Object * obj,uint32_t field_offset)957 extern "C" mirror::Object* artIGetObjectFromMterp(mirror::Object* obj,
958 uint32_t field_offset)
959 REQUIRES_SHARED(Locks::mutator_lock_) {
960 if (UNLIKELY(obj == nullptr)) {
961 ThrowNullPointerExceptionFromInterpreter();
962 return nullptr;
963 }
964 return obj->GetFieldObject<mirror::Object>(MemberOffset(field_offset));
965 }
966
967 /*
968 * Create a hotness_countdown based on the current method hotness_count and profiling
969 * mode. In short, determine how many hotness events we hit before reporting back
970 * to the full instrumentation via MterpAddHotnessBatch. Called once on entry to the method,
971 * and regenerated following batch updates.
972 */
MterpSetUpHotnessCountdown(ArtMethod * method,ShadowFrame * shadow_frame,Thread * self)973 extern "C" ssize_t MterpSetUpHotnessCountdown(ArtMethod* method,
974 ShadowFrame* shadow_frame,
975 Thread* self)
976 REQUIRES_SHARED(Locks::mutator_lock_) {
977 uint16_t hotness_count = method->GetCounter();
978 int32_t countdown_value = jit::kJitHotnessDisabled;
979 jit::Jit* jit = Runtime::Current()->GetJit();
980 if (jit != nullptr) {
981 int32_t warm_threshold = jit->WarmMethodThreshold();
982 int32_t hot_threshold = jit->HotMethodThreshold();
983 int32_t osr_threshold = jit->OSRMethodThreshold();
984 if (hotness_count < warm_threshold) {
985 countdown_value = warm_threshold - hotness_count;
986 } else if (hotness_count < hot_threshold) {
987 countdown_value = hot_threshold - hotness_count;
988 } else if (hotness_count < osr_threshold) {
989 countdown_value = osr_threshold - hotness_count;
990 } else {
991 countdown_value = jit::kJitCheckForOSR;
992 }
993 if (jit::Jit::ShouldUsePriorityThreadWeight(self)) {
994 int32_t priority_thread_weight = jit->PriorityThreadWeight();
995 countdown_value = std::min(countdown_value, countdown_value / priority_thread_weight);
996 }
997 }
998 /*
999 * The actual hotness threshold may exceed the range of our int16_t countdown value. This is
1000 * not a problem, though. We can just break it down into smaller chunks.
1001 */
1002 countdown_value = std::min(countdown_value,
1003 static_cast<int32_t>(std::numeric_limits<int16_t>::max()));
1004 shadow_frame->SetCachedHotnessCountdown(countdown_value);
1005 shadow_frame->SetHotnessCountdown(countdown_value);
1006 return countdown_value;
1007 }
1008
1009 /*
1010 * Report a batch of hotness events to the instrumentation and then return the new
1011 * countdown value to the next time we should report.
1012 */
MterpAddHotnessBatch(ArtMethod * method,ShadowFrame * shadow_frame,Thread * self)1013 extern "C" ssize_t MterpAddHotnessBatch(ArtMethod* method,
1014 ShadowFrame* shadow_frame,
1015 Thread* self)
1016 REQUIRES_SHARED(Locks::mutator_lock_) {
1017 jit::Jit* jit = Runtime::Current()->GetJit();
1018 if (jit != nullptr) {
1019 int16_t count = shadow_frame->GetCachedHotnessCountdown() - shadow_frame->GetHotnessCountdown();
1020 jit->AddSamples(self, method, count, /*with_backedges*/ true);
1021 }
1022 return MterpSetUpHotnessCountdown(method, shadow_frame, self);
1023 }
1024
MterpMaybeDoOnStackReplacement(Thread * self,ShadowFrame * shadow_frame,int32_t offset)1025 extern "C" size_t MterpMaybeDoOnStackReplacement(Thread* self,
1026 ShadowFrame* shadow_frame,
1027 int32_t offset)
1028 REQUIRES_SHARED(Locks::mutator_lock_) {
1029 int16_t osr_countdown = shadow_frame->GetCachedHotnessCountdown() - 1;
1030 bool did_osr = false;
1031 /*
1032 * To reduce the cost of polling the compiler to determine whether the requested OSR
1033 * compilation has completed, only check every Nth time. NOTE: the "osr_countdown <= 0"
1034 * condition is satisfied either by the decrement below or the initial setting of
1035 * the cached countdown field to kJitCheckForOSR, which elsewhere is asserted to be -1.
1036 */
1037 if (osr_countdown <= 0) {
1038 ArtMethod* method = shadow_frame->GetMethod();
1039 JValue* result = shadow_frame->GetResultRegister();
1040 uint32_t dex_pc = shadow_frame->GetDexPC();
1041 jit::Jit* jit = Runtime::Current()->GetJit();
1042 osr_countdown = jit::Jit::kJitRecheckOSRThreshold;
1043 if (offset <= 0) {
1044 // Keep updating hotness in case a compilation request was dropped. Eventually it will retry.
1045 jit->AddSamples(self, method, osr_countdown, /*with_backedges*/ true);
1046 }
1047 did_osr = jit::Jit::MaybeDoOnStackReplacement(self, method, dex_pc, offset, result);
1048 }
1049 shadow_frame->SetCachedHotnessCountdown(osr_countdown);
1050 return did_osr;
1051 }
1052
1053 } // namespace interpreter
1054 } // namespace art
1055