1 /*
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 /*
18 * Mterp entry point and support functions.
19 */
20 #include "nterp.h"
21
22 #include "base/quasi_atomic.h"
23 #include "class_linker-inl.h"
24 #include "dex/dex_instruction_utils.h"
25 #include "debugger.h"
26 #include "entrypoints/entrypoint_utils-inl.h"
27 #include "interpreter/interpreter_cache-inl.h"
28 #include "interpreter/interpreter_common.h"
29 #include "interpreter/interpreter_intrinsics.h"
30 #include "interpreter/shadow_frame-inl.h"
31 #include "mirror/string-alloc-inl.h"
32 #include "nterp_helpers.h"
33
34 namespace art {
35 namespace interpreter {
36
IsNterpSupported()37 bool IsNterpSupported() {
38 return !kPoisonHeapReferences && kUseReadBarrier;
39 }
40
CanRuntimeUseNterp()41 bool CanRuntimeUseNterp() REQUIRES_SHARED(Locks::mutator_lock_) {
42 Runtime* runtime = Runtime::Current();
43 instrumentation::Instrumentation* instr = runtime->GetInstrumentation();
44 // If the runtime is interpreter only, we currently don't use nterp as some
45 // parts of the runtime (like instrumentation) make assumption on an
46 // interpreter-only runtime to always be in a switch-like interpreter.
47 return IsNterpSupported() &&
48 !instr->InterpretOnly() &&
49 !runtime->IsAotCompiler() &&
50 !runtime->GetInstrumentation()->NeedsSlowInterpreterForListeners() &&
51 // An async exception has been thrown. We need to go to the switch interpreter. nterp doesn't
52 // know how to deal with these so we could end up never dealing with it if we are in an
53 // infinite loop.
54 !runtime->AreAsyncExceptionsThrown() &&
55 (runtime->GetJit() == nullptr || !runtime->GetJit()->JitAtFirstUse());
56 }
57
58 // The entrypoint for nterp, which ArtMethods can directly point to.
59 extern "C" void ExecuteNterpImpl() REQUIRES_SHARED(Locks::mutator_lock_);
60
GetNterpEntryPoint()61 const void* GetNterpEntryPoint() {
62 return reinterpret_cast<const void*>(interpreter::ExecuteNterpImpl);
63 }
64
65 /*
66 * Verify some constants used by the nterp interpreter.
67 */
CheckNterpAsmConstants()68 void CheckNterpAsmConstants() {
69 /*
70 * If we're using computed goto instruction transitions, make sure
71 * none of the handlers overflows the byte limit. This won't tell
72 * which one did, but if any one is too big the total size will
73 * overflow.
74 */
75 const int width = kNterpHandlerSize;
76 ptrdiff_t interp_size = reinterpret_cast<uintptr_t>(artNterpAsmInstructionEnd) -
77 reinterpret_cast<uintptr_t>(artNterpAsmInstructionStart);
78 if ((interp_size == 0) || (interp_size != (art::kNumPackedOpcodes * width))) {
79 LOG(FATAL) << "ERROR: unexpected asm interp size " << interp_size
80 << "(did an instruction handler exceed " << width << " bytes?)";
81 }
82 }
83
UpdateHotness(ArtMethod * method)84 inline void UpdateHotness(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) {
85 // The hotness we will add to a method when we perform a
86 // field/method/class/string lookup.
87 constexpr uint16_t kNterpHotnessLookup = 0xf;
88 method->UpdateCounter(kNterpHotnessLookup);
89 }
90
91 template<typename T>
UpdateCache(Thread * self,uint16_t * dex_pc_ptr,T value)92 inline void UpdateCache(Thread* self, uint16_t* dex_pc_ptr, T value) {
93 DCHECK(kUseReadBarrier) << "Nterp only works with read barriers";
94 self->GetInterpreterCache()->Set(self, dex_pc_ptr, value);
95 }
96
97 template<typename T>
UpdateCache(Thread * self,uint16_t * dex_pc_ptr,T * value)98 inline void UpdateCache(Thread* self, uint16_t* dex_pc_ptr, T* value) {
99 UpdateCache(self, dex_pc_ptr, reinterpret_cast<size_t>(value));
100 }
101
102 #ifdef __arm__
103
NterpStoreArm32Fprs(const char * shorty,uint32_t * registers,uint32_t * stack_args,const uint32_t * fprs)104 extern "C" void NterpStoreArm32Fprs(const char* shorty,
105 uint32_t* registers,
106 uint32_t* stack_args,
107 const uint32_t* fprs) {
108 // Note `shorty` has already the returned type removed.
109 ScopedAssertNoThreadSuspension sants("In nterp");
110 uint32_t arg_index = 0;
111 uint32_t fpr_double_index = 0;
112 uint32_t fpr_index = 0;
113 for (uint32_t shorty_index = 0; shorty[shorty_index] != '\0'; ++shorty_index) {
114 char arg_type = shorty[shorty_index];
115 switch (arg_type) {
116 case 'D': {
117 // Double should not overlap with float.
118 fpr_double_index = std::max(fpr_double_index, RoundUp(fpr_index, 2));
119 if (fpr_double_index < 16) {
120 registers[arg_index] = fprs[fpr_double_index++];
121 registers[arg_index + 1] = fprs[fpr_double_index++];
122 } else {
123 registers[arg_index] = stack_args[arg_index];
124 registers[arg_index + 1] = stack_args[arg_index + 1];
125 }
126 arg_index += 2;
127 break;
128 }
129 case 'F': {
130 if (fpr_index % 2 == 0) {
131 fpr_index = std::max(fpr_double_index, fpr_index);
132 }
133 if (fpr_index < 16) {
134 registers[arg_index] = fprs[fpr_index++];
135 } else {
136 registers[arg_index] = stack_args[arg_index];
137 }
138 arg_index++;
139 break;
140 }
141 case 'J': {
142 arg_index += 2;
143 break;
144 }
145 default: {
146 arg_index++;
147 break;
148 }
149 }
150 }
151 }
152
NterpSetupArm32Fprs(const char * shorty,uint32_t dex_register,uint32_t stack_index,uint32_t * fprs,uint32_t * registers,uint32_t * stack_args)153 extern "C" void NterpSetupArm32Fprs(const char* shorty,
154 uint32_t dex_register,
155 uint32_t stack_index,
156 uint32_t* fprs,
157 uint32_t* registers,
158 uint32_t* stack_args) {
159 // Note `shorty` has already the returned type removed.
160 ScopedAssertNoThreadSuspension sants("In nterp");
161 uint32_t fpr_double_index = 0;
162 uint32_t fpr_index = 0;
163 for (uint32_t shorty_index = 0; shorty[shorty_index] != '\0'; ++shorty_index) {
164 char arg_type = shorty[shorty_index];
165 switch (arg_type) {
166 case 'D': {
167 // Double should not overlap with float.
168 fpr_double_index = std::max(fpr_double_index, RoundUp(fpr_index, 2));
169 if (fpr_double_index < 16) {
170 fprs[fpr_double_index++] = registers[dex_register++];
171 fprs[fpr_double_index++] = registers[dex_register++];
172 stack_index += 2;
173 } else {
174 stack_args[stack_index++] = registers[dex_register++];
175 stack_args[stack_index++] = registers[dex_register++];
176 }
177 break;
178 }
179 case 'F': {
180 if (fpr_index % 2 == 0) {
181 fpr_index = std::max(fpr_double_index, fpr_index);
182 }
183 if (fpr_index < 16) {
184 fprs[fpr_index++] = registers[dex_register++];
185 stack_index++;
186 } else {
187 stack_args[stack_index++] = registers[dex_register++];
188 }
189 break;
190 }
191 case 'J': {
192 stack_index += 2;
193 dex_register += 2;
194 break;
195 }
196 default: {
197 stack_index++;
198 dex_register++;
199 break;
200 }
201 }
202 }
203 }
204
205 #endif
206
NterpGetCodeItem(ArtMethod * method)207 extern "C" const dex::CodeItem* NterpGetCodeItem(ArtMethod* method)
208 REQUIRES_SHARED(Locks::mutator_lock_) {
209 ScopedAssertNoThreadSuspension sants("In nterp");
210 return method->GetCodeItem();
211 }
212
NterpGetShorty(ArtMethod * method)213 extern "C" const char* NterpGetShorty(ArtMethod* method)
214 REQUIRES_SHARED(Locks::mutator_lock_) {
215 ScopedAssertNoThreadSuspension sants("In nterp");
216 return method->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetShorty();
217 }
218
NterpGetShortyFromMethodId(ArtMethod * caller,uint32_t method_index)219 extern "C" const char* NterpGetShortyFromMethodId(ArtMethod* caller, uint32_t method_index)
220 REQUIRES_SHARED(Locks::mutator_lock_) {
221 ScopedAssertNoThreadSuspension sants("In nterp");
222 return caller->GetDexFile()->GetMethodShorty(method_index);
223 }
224
NterpGetShortyFromInvokePolymorphic(ArtMethod * caller,uint16_t * dex_pc_ptr)225 extern "C" const char* NterpGetShortyFromInvokePolymorphic(ArtMethod* caller, uint16_t* dex_pc_ptr)
226 REQUIRES_SHARED(Locks::mutator_lock_) {
227 ScopedAssertNoThreadSuspension sants("In nterp");
228 const Instruction* inst = Instruction::At(dex_pc_ptr);
229 dex::ProtoIndex proto_idx(inst->Opcode() == Instruction::INVOKE_POLYMORPHIC
230 ? inst->VRegH_45cc()
231 : inst->VRegH_4rcc());
232 return caller->GetDexFile()->GetShorty(proto_idx);
233 }
234
NterpGetShortyFromInvokeCustom(ArtMethod * caller,uint16_t * dex_pc_ptr)235 extern "C" const char* NterpGetShortyFromInvokeCustom(ArtMethod* caller, uint16_t* dex_pc_ptr)
236 REQUIRES_SHARED(Locks::mutator_lock_) {
237 ScopedAssertNoThreadSuspension sants("In nterp");
238 const Instruction* inst = Instruction::At(dex_pc_ptr);
239 uint16_t call_site_index = (inst->Opcode() == Instruction::INVOKE_CUSTOM
240 ? inst->VRegB_35c()
241 : inst->VRegB_3rc());
242 const DexFile* dex_file = caller->GetDexFile();
243 dex::ProtoIndex proto_idx = dex_file->GetProtoIndexForCallSite(call_site_index);
244 return dex_file->GetShorty(proto_idx);
245 }
246
247 FLATTEN
NterpGetMethod(Thread * self,ArtMethod * caller,uint16_t * dex_pc_ptr)248 extern "C" size_t NterpGetMethod(Thread* self, ArtMethod* caller, uint16_t* dex_pc_ptr)
249 REQUIRES_SHARED(Locks::mutator_lock_) {
250 UpdateHotness(caller);
251 const Instruction* inst = Instruction::At(dex_pc_ptr);
252 InvokeType invoke_type = kStatic;
253 uint16_t method_index = 0;
254 switch (inst->Opcode()) {
255 case Instruction::INVOKE_DIRECT: {
256 method_index = inst->VRegB_35c();
257 invoke_type = kDirect;
258 break;
259 }
260
261 case Instruction::INVOKE_INTERFACE: {
262 method_index = inst->VRegB_35c();
263 invoke_type = kInterface;
264 break;
265 }
266
267 case Instruction::INVOKE_STATIC: {
268 method_index = inst->VRegB_35c();
269 invoke_type = kStatic;
270 break;
271 }
272
273 case Instruction::INVOKE_SUPER: {
274 method_index = inst->VRegB_35c();
275 invoke_type = kSuper;
276 break;
277 }
278 case Instruction::INVOKE_VIRTUAL: {
279 method_index = inst->VRegB_35c();
280 invoke_type = kVirtual;
281 break;
282 }
283
284 case Instruction::INVOKE_DIRECT_RANGE: {
285 method_index = inst->VRegB_3rc();
286 invoke_type = kDirect;
287 break;
288 }
289
290 case Instruction::INVOKE_INTERFACE_RANGE: {
291 method_index = inst->VRegB_3rc();
292 invoke_type = kInterface;
293 break;
294 }
295
296 case Instruction::INVOKE_STATIC_RANGE: {
297 method_index = inst->VRegB_3rc();
298 invoke_type = kStatic;
299 break;
300 }
301
302 case Instruction::INVOKE_SUPER_RANGE: {
303 method_index = inst->VRegB_3rc();
304 invoke_type = kSuper;
305 break;
306 }
307
308 case Instruction::INVOKE_VIRTUAL_RANGE: {
309 method_index = inst->VRegB_3rc();
310 invoke_type = kVirtual;
311 break;
312 }
313
314 default:
315 LOG(FATAL) << "Unknown instruction " << inst->Opcode();
316 }
317
318 ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
319 ArtMethod* resolved_method = caller->SkipAccessChecks()
320 ? class_linker->ResolveMethod<ClassLinker::ResolveMode::kNoChecks>(
321 self, method_index, caller, invoke_type)
322 : class_linker->ResolveMethod<ClassLinker::ResolveMode::kCheckICCEAndIAE>(
323 self, method_index, caller, invoke_type);
324 if (resolved_method == nullptr) {
325 DCHECK(self->IsExceptionPending());
326 return 0;
327 }
328
329 if (invoke_type == kSuper) {
330 resolved_method = caller->SkipAccessChecks()
331 ? FindSuperMethodToCall</*access_check=*/false>(method_index, resolved_method, caller, self)
332 : FindSuperMethodToCall</*access_check=*/true>(method_index, resolved_method, caller, self);
333 if (resolved_method == nullptr) {
334 DCHECK(self->IsExceptionPending());
335 return 0;
336 }
337 }
338
339 if (invoke_type == kInterface) {
340 size_t result = 0u;
341 if (resolved_method->GetDeclaringClass()->IsObjectClass()) {
342 // Set the low bit to notify the interpreter it should do a vtable call.
343 DCHECK_LT(resolved_method->GetMethodIndex(), 0x10000);
344 result = (resolved_method->GetMethodIndex() << 16) | 1U;
345 } else {
346 DCHECK(resolved_method->GetDeclaringClass()->IsInterface());
347 DCHECK(!resolved_method->IsCopied());
348 if (!resolved_method->IsAbstract()) {
349 // Set the second bit to notify the interpreter this is a default
350 // method.
351 result = reinterpret_cast<size_t>(resolved_method) | 2U;
352 } else {
353 result = reinterpret_cast<size_t>(resolved_method);
354 }
355 }
356 UpdateCache(self, dex_pc_ptr, result);
357 return result;
358 } else if (resolved_method->GetDeclaringClass()->IsStringClass()
359 && !resolved_method->IsStatic()
360 && resolved_method->IsConstructor()) {
361 CHECK_NE(invoke_type, kSuper);
362 resolved_method = WellKnownClasses::StringInitToStringFactory(resolved_method);
363 // Or the result with 1 to notify to nterp this is a string init method. We
364 // also don't cache the result as we don't want nterp to have its fast path always
365 // check for it, and we expect a lot more regular calls than string init
366 // calls.
367 return reinterpret_cast<size_t>(resolved_method) | 1;
368 } else if (invoke_type == kVirtual) {
369 UpdateCache(self, dex_pc_ptr, resolved_method->GetMethodIndex());
370 return resolved_method->GetMethodIndex();
371 } else {
372 UpdateCache(self, dex_pc_ptr, resolved_method);
373 return reinterpret_cast<size_t>(resolved_method);
374 }
375 }
376
377 FLATTEN
ResolveFieldWithAccessChecks(Thread * self,ClassLinker * class_linker,uint16_t field_index,ArtMethod * caller,bool is_static,bool is_put,size_t resolve_field_type)378 static ArtField* ResolveFieldWithAccessChecks(Thread* self,
379 ClassLinker* class_linker,
380 uint16_t field_index,
381 ArtMethod* caller,
382 bool is_static,
383 bool is_put,
384 size_t resolve_field_type) // Resolve if not zero
385 REQUIRES_SHARED(Locks::mutator_lock_) {
386 if (caller->SkipAccessChecks()) {
387 return class_linker->ResolveField(field_index, caller, is_static);
388 }
389
390 caller = caller->GetInterfaceMethodIfProxy(kRuntimePointerSize);
391
392 StackHandleScope<2> hs(self);
393 Handle<mirror::DexCache> h_dex_cache(hs.NewHandle(caller->GetDexCache()));
394 Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(caller->GetClassLoader()));
395
396 ArtField* resolved_field = class_linker->ResolveFieldJLS(field_index,
397 h_dex_cache,
398 h_class_loader);
399 if (resolved_field == nullptr) {
400 return nullptr;
401 }
402
403 ObjPtr<mirror::Class> fields_class = resolved_field->GetDeclaringClass();
404 if (UNLIKELY(resolved_field->IsStatic() != is_static)) {
405 ThrowIncompatibleClassChangeErrorField(resolved_field, is_static, caller);
406 return nullptr;
407 }
408 ObjPtr<mirror::Class> referring_class = caller->GetDeclaringClass();
409 if (UNLIKELY(!referring_class->CheckResolvedFieldAccess(fields_class,
410 resolved_field,
411 caller->GetDexCache(),
412 field_index))) {
413 return nullptr;
414 }
415 if (UNLIKELY(is_put && resolved_field->IsFinal() && (fields_class != referring_class))) {
416 ThrowIllegalAccessErrorFinalField(caller, resolved_field);
417 return nullptr;
418 }
419 if (resolve_field_type != 0u && resolved_field->ResolveType() == nullptr) {
420 DCHECK(self->IsExceptionPending());
421 return nullptr;
422 }
423 return resolved_field;
424 }
425
NterpGetStaticField(Thread * self,ArtMethod * caller,uint16_t * dex_pc_ptr,size_t resolve_field_type)426 extern "C" size_t NterpGetStaticField(Thread* self,
427 ArtMethod* caller,
428 uint16_t* dex_pc_ptr,
429 size_t resolve_field_type) // Resolve if not zero
430 REQUIRES_SHARED(Locks::mutator_lock_) {
431 UpdateHotness(caller);
432 const Instruction* inst = Instruction::At(dex_pc_ptr);
433 uint16_t field_index = inst->VRegB_21c();
434 ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
435 ArtField* resolved_field = ResolveFieldWithAccessChecks(
436 self,
437 class_linker,
438 field_index,
439 caller,
440 /* is_static */ true,
441 /* is_put */ IsInstructionSPut(inst->Opcode()),
442 resolve_field_type);
443
444 if (resolved_field == nullptr) {
445 DCHECK(self->IsExceptionPending());
446 return 0;
447 }
448 if (UNLIKELY(!resolved_field->GetDeclaringClass()->IsVisiblyInitialized())) {
449 StackHandleScope<1> hs(self);
450 Handle<mirror::Class> h_class(hs.NewHandle(resolved_field->GetDeclaringClass()));
451 if (UNLIKELY(!class_linker->EnsureInitialized(
452 self, h_class, /*can_init_fields=*/ true, /*can_init_parents=*/ true))) {
453 DCHECK(self->IsExceptionPending());
454 return 0;
455 }
456 DCHECK(h_class->IsInitializing());
457 }
458 if (resolved_field->IsVolatile()) {
459 // Or the result with 1 to notify to nterp this is a volatile field. We
460 // also don't cache the result as we don't want nterp to have its fast path always
461 // check for it.
462 return reinterpret_cast<size_t>(resolved_field) | 1;
463 } else {
464 UpdateCache(self, dex_pc_ptr, resolved_field);
465 return reinterpret_cast<size_t>(resolved_field);
466 }
467 }
468
NterpGetInstanceFieldOffset(Thread * self,ArtMethod * caller,uint16_t * dex_pc_ptr,size_t resolve_field_type)469 extern "C" uint32_t NterpGetInstanceFieldOffset(Thread* self,
470 ArtMethod* caller,
471 uint16_t* dex_pc_ptr,
472 size_t resolve_field_type) // Resolve if not zero
473 REQUIRES_SHARED(Locks::mutator_lock_) {
474 UpdateHotness(caller);
475 const Instruction* inst = Instruction::At(dex_pc_ptr);
476 uint16_t field_index = inst->VRegC_22c();
477 ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
478 ArtField* resolved_field = ResolveFieldWithAccessChecks(
479 self,
480 class_linker,
481 field_index,
482 caller,
483 /* is_static */ false,
484 /* is_put */ IsInstructionIPut(inst->Opcode()),
485 resolve_field_type);
486 if (resolved_field == nullptr) {
487 DCHECK(self->IsExceptionPending());
488 return 0;
489 }
490 if (resolved_field->IsVolatile()) {
491 // Don't cache for a volatile field, and return a negative offset as marker
492 // of volatile.
493 return -resolved_field->GetOffset().Uint32Value();
494 }
495 UpdateCache(self, dex_pc_ptr, resolved_field->GetOffset().Uint32Value());
496 return resolved_field->GetOffset().Uint32Value();
497 }
498
NterpGetClassOrAllocateObject(Thread * self,ArtMethod * caller,uint16_t * dex_pc_ptr)499 extern "C" mirror::Object* NterpGetClassOrAllocateObject(Thread* self,
500 ArtMethod* caller,
501 uint16_t* dex_pc_ptr)
502 REQUIRES_SHARED(Locks::mutator_lock_) {
503 UpdateHotness(caller);
504 const Instruction* inst = Instruction::At(dex_pc_ptr);
505 dex::TypeIndex index;
506 switch (inst->Opcode()) {
507 case Instruction::NEW_INSTANCE:
508 index = dex::TypeIndex(inst->VRegB_21c());
509 break;
510 case Instruction::CHECK_CAST:
511 index = dex::TypeIndex(inst->VRegB_21c());
512 break;
513 case Instruction::INSTANCE_OF:
514 index = dex::TypeIndex(inst->VRegC_22c());
515 break;
516 case Instruction::CONST_CLASS:
517 index = dex::TypeIndex(inst->VRegB_21c());
518 break;
519 case Instruction::NEW_ARRAY:
520 index = dex::TypeIndex(inst->VRegC_22c());
521 break;
522 default:
523 LOG(FATAL) << "Unreachable";
524 }
525 ObjPtr<mirror::Class> c =
526 ResolveVerifyAndClinit(index,
527 caller,
528 self,
529 /* can_run_clinit= */ false,
530 /* verify_access= */ !caller->SkipAccessChecks());
531 if (c == nullptr) {
532 DCHECK(self->IsExceptionPending());
533 return nullptr;
534 }
535
536 if (inst->Opcode() == Instruction::NEW_INSTANCE) {
537 gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
538 if (UNLIKELY(c->IsStringClass())) {
539 // We don't cache the class for strings as we need to special case their
540 // allocation.
541 return mirror::String::AllocEmptyString(self, allocator_type).Ptr();
542 } else {
543 if (!c->IsFinalizable() && c->IsInstantiable()) {
544 // Cache non-finalizable classes for next calls.
545 UpdateCache(self, dex_pc_ptr, c.Ptr());
546 }
547 return AllocObjectFromCode(c, self, allocator_type).Ptr();
548 }
549 } else {
550 // For all other cases, cache the class.
551 UpdateCache(self, dex_pc_ptr, c.Ptr());
552 }
553 return c.Ptr();
554 }
555
NterpLoadObject(Thread * self,ArtMethod * caller,uint16_t * dex_pc_ptr)556 extern "C" mirror::Object* NterpLoadObject(Thread* self, ArtMethod* caller, uint16_t* dex_pc_ptr)
557 REQUIRES_SHARED(Locks::mutator_lock_) {
558 const Instruction* inst = Instruction::At(dex_pc_ptr);
559 ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
560 switch (inst->Opcode()) {
561 case Instruction::CONST_STRING:
562 case Instruction::CONST_STRING_JUMBO: {
563 UpdateHotness(caller);
564 dex::StringIndex string_index(
565 (inst->Opcode() == Instruction::CONST_STRING)
566 ? inst->VRegB_21c()
567 : inst->VRegB_31c());
568 ObjPtr<mirror::String> str = class_linker->ResolveString(string_index, caller);
569 if (str == nullptr) {
570 DCHECK(self->IsExceptionPending());
571 return nullptr;
572 }
573 UpdateCache(self, dex_pc_ptr, str.Ptr());
574 return str.Ptr();
575 }
576 case Instruction::CONST_METHOD_HANDLE: {
577 // Don't cache: we don't expect this to be performance sensitive, and we
578 // don't want the cache to conflict with a performance sensitive entry.
579 return class_linker->ResolveMethodHandle(self, inst->VRegB_21c(), caller).Ptr();
580 }
581 case Instruction::CONST_METHOD_TYPE: {
582 // Don't cache: we don't expect this to be performance sensitive, and we
583 // don't want the cache to conflict with a performance sensitive entry.
584 return class_linker->ResolveMethodType(
585 self, dex::ProtoIndex(inst->VRegB_21c()), caller).Ptr();
586 }
587 default:
588 LOG(FATAL) << "Unreachable";
589 }
590 return nullptr;
591 }
592
NterpUnimplemented()593 extern "C" void NterpUnimplemented() {
594 LOG(FATAL) << "Unimplemented";
595 }
596
DoFilledNewArray(Thread * self,ArtMethod * caller,uint16_t * dex_pc_ptr,uint32_t * regs,bool is_range)597 static mirror::Object* DoFilledNewArray(Thread* self,
598 ArtMethod* caller,
599 uint16_t* dex_pc_ptr,
600 uint32_t* regs,
601 bool is_range)
602 REQUIRES_SHARED(Locks::mutator_lock_) {
603 const Instruction* inst = Instruction::At(dex_pc_ptr);
604 if (kIsDebugBuild) {
605 if (is_range) {
606 DCHECK_EQ(inst->Opcode(), Instruction::FILLED_NEW_ARRAY_RANGE);
607 } else {
608 DCHECK_EQ(inst->Opcode(), Instruction::FILLED_NEW_ARRAY);
609 }
610 }
611 const int32_t length = is_range ? inst->VRegA_3rc() : inst->VRegA_35c();
612 DCHECK_GE(length, 0);
613 if (!is_range) {
614 // Checks FILLED_NEW_ARRAY's length does not exceed 5 arguments.
615 DCHECK_LE(length, 5);
616 }
617 uint16_t type_idx = is_range ? inst->VRegB_3rc() : inst->VRegB_35c();
618 ObjPtr<mirror::Class> array_class =
619 ResolveVerifyAndClinit(dex::TypeIndex(type_idx),
620 caller,
621 self,
622 /* can_run_clinit= */ true,
623 /* verify_access= */ !caller->SkipAccessChecks());
624 if (UNLIKELY(array_class == nullptr)) {
625 DCHECK(self->IsExceptionPending());
626 return nullptr;
627 }
628 DCHECK(array_class->IsArrayClass());
629 ObjPtr<mirror::Class> component_class = array_class->GetComponentType();
630 const bool is_primitive_int_component = component_class->IsPrimitiveInt();
631 if (UNLIKELY(component_class->IsPrimitive() && !is_primitive_int_component)) {
632 if (component_class->IsPrimitiveLong() || component_class->IsPrimitiveDouble()) {
633 ThrowRuntimeException("Bad filled array request for type %s",
634 component_class->PrettyDescriptor().c_str());
635 } else {
636 self->ThrowNewExceptionF(
637 "Ljava/lang/InternalError;",
638 "Found type %s; filled-new-array not implemented for anything but 'int'",
639 component_class->PrettyDescriptor().c_str());
640 }
641 return nullptr;
642 }
643 ObjPtr<mirror::Object> new_array = mirror::Array::Alloc(
644 self,
645 array_class,
646 length,
647 array_class->GetComponentSizeShift(),
648 Runtime::Current()->GetHeap()->GetCurrentAllocator());
649 if (UNLIKELY(new_array == nullptr)) {
650 self->AssertPendingOOMException();
651 return nullptr;
652 }
653 uint32_t arg[Instruction::kMaxVarArgRegs]; // only used in filled-new-array.
654 uint32_t vregC = 0; // only used in filled-new-array-range.
655 if (is_range) {
656 vregC = inst->VRegC_3rc();
657 } else {
658 inst->GetVarArgs(arg);
659 }
660 for (int32_t i = 0; i < length; ++i) {
661 size_t src_reg = is_range ? vregC + i : arg[i];
662 if (is_primitive_int_component) {
663 new_array->AsIntArray()->SetWithoutChecks</* kTransactionActive= */ false>(i, regs[src_reg]);
664 } else {
665 new_array->AsObjectArray<mirror::Object>()->SetWithoutChecks</* kTransactionActive= */ false>(
666 i, reinterpret_cast<mirror::Object*>(regs[src_reg]));
667 }
668 }
669 return new_array.Ptr();
670 }
671
NterpFilledNewArray(Thread * self,ArtMethod * caller,uint32_t * registers,uint16_t * dex_pc_ptr)672 extern "C" mirror::Object* NterpFilledNewArray(Thread* self,
673 ArtMethod* caller,
674 uint32_t* registers,
675 uint16_t* dex_pc_ptr)
676 REQUIRES_SHARED(Locks::mutator_lock_) {
677 return DoFilledNewArray(self, caller, dex_pc_ptr, registers, /* is_range= */ false);
678 }
679
NterpFilledNewArrayRange(Thread * self,ArtMethod * caller,uint32_t * registers,uint16_t * dex_pc_ptr)680 extern "C" mirror::Object* NterpFilledNewArrayRange(Thread* self,
681 ArtMethod* caller,
682 uint32_t* registers,
683 uint16_t* dex_pc_ptr)
684 REQUIRES_SHARED(Locks::mutator_lock_) {
685 return DoFilledNewArray(self, caller, dex_pc_ptr, registers, /* is_range= */ true);
686 }
687
NterpHotMethod(ArtMethod * method,uint16_t * dex_pc_ptr,uint32_t * vregs)688 extern "C" jit::OsrData* NterpHotMethod(ArtMethod* method, uint16_t* dex_pc_ptr, uint32_t* vregs)
689 REQUIRES_SHARED(Locks::mutator_lock_) {
690 // It is important this method is not suspended because it can be called on
691 // method entry and async deoptimization does not expect runtime methods other than the
692 // suspend entrypoint before executing the first instruction of a Java
693 // method.
694 ScopedAssertNoThreadSuspension sants("In nterp");
695 Runtime* runtime = Runtime::Current();
696 if (method->IsMemorySharedMethod()) {
697 DCHECK_EQ(Thread::Current()->GetSharedMethodHotness(), 0u);
698 Thread::Current()->ResetSharedMethodHotness();
699 } else {
700 method->ResetCounter(runtime->GetJITOptions()->GetWarmupThreshold());
701 }
702 jit::Jit* jit = runtime->GetJit();
703 if (jit != nullptr && jit->UseJitCompilation()) {
704 // Nterp passes null on entry where we don't want to OSR.
705 if (dex_pc_ptr != nullptr) {
706 // This could be a loop back edge, check if we can OSR.
707 CodeItemInstructionAccessor accessor(method->DexInstructions());
708 uint32_t dex_pc = dex_pc_ptr - accessor.Insns();
709 jit::OsrData* osr_data = jit->PrepareForOsr(
710 method->GetInterfaceMethodIfProxy(kRuntimePointerSize), dex_pc, vregs);
711 if (osr_data != nullptr) {
712 return osr_data;
713 }
714 }
715 jit->MaybeEnqueueCompilation(method, Thread::Current());
716 }
717 return nullptr;
718 }
719
NterpDoPackedSwitch(const uint16_t * switchData,int32_t testVal)720 extern "C" ssize_t NterpDoPackedSwitch(const uint16_t* switchData, int32_t testVal)
721 REQUIRES_SHARED(Locks::mutator_lock_) {
722 ScopedAssertNoThreadSuspension sants("In nterp");
723 const int kInstrLen = 3;
724
725 /*
726 * Packed switch data format:
727 * ushort ident = 0x0100 magic value
728 * ushort size number of entries in the table
729 * int first_key first (and lowest) switch case value
730 * int targets[size] branch targets, relative to switch opcode
731 *
732 * Total size is (4+size*2) 16-bit code units.
733 */
734 uint16_t signature = *switchData++;
735 DCHECK_EQ(signature, static_cast<uint16_t>(art::Instruction::kPackedSwitchSignature));
736
737 uint16_t size = *switchData++;
738
739 int32_t firstKey = *switchData++;
740 firstKey |= (*switchData++) << 16;
741
742 int index = testVal - firstKey;
743 if (index < 0 || index >= size) {
744 return kInstrLen;
745 }
746
747 /*
748 * The entries are guaranteed to be aligned on a 32-bit boundary;
749 * we can treat them as a native int array.
750 */
751 const int32_t* entries = reinterpret_cast<const int32_t*>(switchData);
752 return entries[index];
753 }
754
755 /*
756 * Find the matching case. Returns the offset to the handler instructions.
757 *
758 * Returns 3 if we don't find a match (it's the size of the sparse-switch
759 * instruction).
760 */
NterpDoSparseSwitch(const uint16_t * switchData,int32_t testVal)761 extern "C" ssize_t NterpDoSparseSwitch(const uint16_t* switchData, int32_t testVal)
762 REQUIRES_SHARED(Locks::mutator_lock_) {
763 ScopedAssertNoThreadSuspension sants("In nterp");
764 const int kInstrLen = 3;
765 uint16_t size;
766 const int32_t* keys;
767 const int32_t* entries;
768
769 /*
770 * Sparse switch data format:
771 * ushort ident = 0x0200 magic value
772 * ushort size number of entries in the table; > 0
773 * int keys[size] keys, sorted low-to-high; 32-bit aligned
774 * int targets[size] branch targets, relative to switch opcode
775 *
776 * Total size is (2+size*4) 16-bit code units.
777 */
778
779 uint16_t signature = *switchData++;
780 DCHECK_EQ(signature, static_cast<uint16_t>(art::Instruction::kSparseSwitchSignature));
781
782 size = *switchData++;
783
784 /* The keys are guaranteed to be aligned on a 32-bit boundary;
785 * we can treat them as a native int array.
786 */
787 keys = reinterpret_cast<const int32_t*>(switchData);
788
789 /* The entries are guaranteed to be aligned on a 32-bit boundary;
790 * we can treat them as a native int array.
791 */
792 entries = keys + size;
793
794 /*
795 * Binary-search through the array of keys, which are guaranteed to
796 * be sorted low-to-high.
797 */
798 int lo = 0;
799 int hi = size - 1;
800 while (lo <= hi) {
801 int mid = (lo + hi) >> 1;
802
803 int32_t foundVal = keys[mid];
804 if (testVal < foundVal) {
805 hi = mid - 1;
806 } else if (testVal > foundVal) {
807 lo = mid + 1;
808 } else {
809 return entries[mid];
810 }
811 }
812 return kInstrLen;
813 }
814
NterpFree(void * val)815 extern "C" void NterpFree(void* val) {
816 free(val);
817 }
818
819 } // namespace interpreter
820 } // namespace art
821