• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2019 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 /*
18  * Mterp entry point and support functions.
19  */
20 #include "nterp.h"
21 
22 #include "arch/instruction_set.h"
23 #include "base/quasi_atomic.h"
24 #include "class_linker-inl.h"
25 #include "dex/dex_instruction_utils.h"
26 #include "debugger.h"
27 #include "entrypoints/entrypoint_utils-inl.h"
28 #include "interpreter/interpreter_cache-inl.h"
29 #include "interpreter/interpreter_common.h"
30 #include "interpreter/shadow_frame-inl.h"
31 #include "mirror/string-alloc-inl.h"
32 #include "nterp_helpers.h"
33 
34 namespace art HIDDEN {
35 namespace interpreter {
36 
IsNterpSupported()37 bool IsNterpSupported() {
38 #ifdef ART_USE_RESTRICTED_MODE
39   // TODO(Simulator): Support Nterp.
40   // Nterp uses the native stack and quick stack frame layout; this will be a complication
41   // for the simulator mode. We should use switch interpreter only for now.
42   return false;
43 #else
44   switch (kRuntimeQuickCodeISA) {
45     case InstructionSet::kArm:
46     case InstructionSet::kThumb2:
47     case InstructionSet::kArm64:
48       return kReserveMarkingRegister && !kUseTableLookupReadBarrier;
49     case InstructionSet::kRiscv64:
50       return true;
51     case InstructionSet::kX86:
52     case InstructionSet::kX86_64:
53       return !kUseTableLookupReadBarrier;
54     default:
55       return false;
56   }
57 #endif  // #ifdef ART_USE_RESTRICTED_MODE
58 }
59 
CanRuntimeUseNterp()60 bool CanRuntimeUseNterp() REQUIRES_SHARED(Locks::mutator_lock_) {
61   Runtime* runtime = Runtime::Current();
62   instrumentation::Instrumentation* instr = runtime->GetInstrumentation();
63   // If the runtime is interpreter only, we currently don't use nterp as some
64   // parts of the runtime (like instrumentation) make assumption on an
65   // interpreter-only runtime to always be in a switch-like interpreter.
66   return IsNterpSupported() && !runtime->IsJavaDebuggable() && !instr->EntryExitStubsInstalled() &&
67          !instr->InterpretOnly() && !runtime->IsAotCompiler() &&
68          !instr->NeedsSlowInterpreterForListeners() &&
69          // An async exception has been thrown. We need to go to the switch interpreter. nterp
70          // doesn't know how to deal with these so we could end up never dealing with it if we are
71          // in an infinite loop.
72          !runtime->AreAsyncExceptionsThrown() &&
73          (runtime->GetJit() == nullptr || !runtime->GetJit()->JitAtFirstUse());
74 }
75 
76 // The entrypoint for nterp, which ArtMethods can directly point to.
77 extern "C" void ExecuteNterpImpl() REQUIRES_SHARED(Locks::mutator_lock_);
78 extern "C" void EndExecuteNterpImpl() REQUIRES_SHARED(Locks::mutator_lock_);
79 
GetNterpEntryPoint()80 const void* GetNterpEntryPoint() {
81   return reinterpret_cast<const void*>(interpreter::ExecuteNterpImpl);
82 }
83 
NterpImpl()84 ArrayRef<const uint8_t> NterpImpl() {
85   const uint8_t* entry_point = reinterpret_cast<const uint8_t*>(ExecuteNterpImpl);
86   size_t size = reinterpret_cast<const uint8_t*>(EndExecuteNterpImpl) - entry_point;
87   const uint8_t* code = reinterpret_cast<const uint8_t*>(EntryPointToCodePointer(entry_point));
88   return ArrayRef<const uint8_t>(code, size);
89 }
90 
91 // Another entrypoint, which does a clinit check at entry.
92 extern "C" void ExecuteNterpWithClinitImpl() REQUIRES_SHARED(Locks::mutator_lock_);
93 extern "C" void EndExecuteNterpWithClinitImpl() REQUIRES_SHARED(Locks::mutator_lock_);
94 
GetNterpWithClinitEntryPoint()95 const void* GetNterpWithClinitEntryPoint() {
96   return reinterpret_cast<const void*>(interpreter::ExecuteNterpWithClinitImpl);
97 }
98 
NterpWithClinitImpl()99 ArrayRef<const uint8_t> NterpWithClinitImpl() {
100   const uint8_t* entry_point = reinterpret_cast<const uint8_t*>(ExecuteNterpWithClinitImpl);
101   size_t size = reinterpret_cast<const uint8_t*>(EndExecuteNterpWithClinitImpl) - entry_point;
102   const uint8_t* code = reinterpret_cast<const uint8_t*>(EntryPointToCodePointer(entry_point));
103   return ArrayRef<const uint8_t>(code, size);
104 }
105 
106 /*
107  * Verify some constants used by the nterp interpreter.
108  */
CheckNterpAsmConstants()109 void CheckNterpAsmConstants() {
110   /*
111    * If we're using computed goto instruction transitions, make sure
112    * none of the handlers overflows the byte limit.  This won't tell
113    * which one did, but if any one is too big the total size will
114    * overflow.
115    */
116   constexpr size_t width = kNterpHandlerSize;
117   ptrdiff_t interp_size = reinterpret_cast<uintptr_t>(artNterpAsmInstructionEnd) -
118                           reinterpret_cast<uintptr_t>(artNterpAsmInstructionStart);
119   static_assert(kNumPackedOpcodes * width != 0);
120   if (interp_size != kNumPackedOpcodes * width) {
121     LOG(FATAL) << "ERROR: unexpected asm interp size " << interp_size
122                << "(did an instruction handler exceed " << width << " bytes?)";
123   }
124 }
125 
UpdateHotness(ArtMethod * method)126 inline void UpdateHotness(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) {
127   // The hotness we will add to a method when we perform a
128   // field/method/class/string lookup.
129   method->UpdateCounter(0xf);
130 }
131 
132 template<typename T>
UpdateCache(Thread * self,const uint16_t * dex_pc_ptr,T value)133 inline void UpdateCache(Thread* self, const uint16_t* dex_pc_ptr, T value) {
134   self->GetInterpreterCache()->Set(self, dex_pc_ptr, value);
135 }
136 
137 template<typename T>
UpdateCache(Thread * self,const uint16_t * dex_pc_ptr,T * value)138 inline void UpdateCache(Thread* self, const uint16_t* dex_pc_ptr, T* value) {
139   UpdateCache(self, dex_pc_ptr, reinterpret_cast<size_t>(value));
140 }
141 
142 #ifdef __arm__
143 
NterpStoreArm32Fprs(const char * shorty,uint32_t * registers,uint32_t * stack_args,const uint32_t * fprs)144 extern "C" void NterpStoreArm32Fprs(const char* shorty,
145                                     uint32_t* registers,
146                                     uint32_t* stack_args,
147                                     const uint32_t* fprs) {
148   // Note `shorty` has already the returned type removed.
149   ScopedAssertNoThreadSuspension sants("In nterp");
150   uint32_t arg_index = 0;
151   uint32_t fpr_double_index = 0;
152   uint32_t fpr_index = 0;
153   for (uint32_t shorty_index = 0; shorty[shorty_index] != '\0'; ++shorty_index) {
154     char arg_type = shorty[shorty_index];
155     switch (arg_type) {
156       case 'D': {
157         // Double should not overlap with float.
158         fpr_double_index = std::max(fpr_double_index, RoundUp(fpr_index, 2));
159         if (fpr_double_index < 16) {
160           registers[arg_index] = fprs[fpr_double_index++];
161           registers[arg_index + 1] = fprs[fpr_double_index++];
162         } else {
163           registers[arg_index] = stack_args[arg_index];
164           registers[arg_index + 1] = stack_args[arg_index + 1];
165         }
166         arg_index += 2;
167         break;
168       }
169       case 'F': {
170         if (fpr_index % 2 == 0) {
171           fpr_index = std::max(fpr_double_index, fpr_index);
172         }
173         if (fpr_index < 16) {
174           registers[arg_index] = fprs[fpr_index++];
175         } else {
176           registers[arg_index] = stack_args[arg_index];
177         }
178         arg_index++;
179         break;
180       }
181       case 'J': {
182         arg_index += 2;
183         break;
184       }
185       default: {
186         arg_index++;
187         break;
188       }
189     }
190   }
191 }
192 
NterpSetupArm32Fprs(const char * shorty,uint32_t dex_register,uint32_t stack_index,uint32_t * fprs,uint32_t * registers,uint32_t * stack_args)193 extern "C" void NterpSetupArm32Fprs(const char* shorty,
194                                     uint32_t dex_register,
195                                     uint32_t stack_index,
196                                     uint32_t* fprs,
197                                     uint32_t* registers,
198                                     uint32_t* stack_args) {
199   // Note `shorty` has already the returned type removed.
200   ScopedAssertNoThreadSuspension sants("In nterp");
201   uint32_t fpr_double_index = 0;
202   uint32_t fpr_index = 0;
203   for (uint32_t shorty_index = 0; shorty[shorty_index] != '\0'; ++shorty_index) {
204     char arg_type = shorty[shorty_index];
205     switch (arg_type) {
206       case 'D': {
207         // Double should not overlap with float.
208         fpr_double_index = std::max(fpr_double_index, RoundUp(fpr_index, 2));
209         if (fpr_double_index < 16) {
210           fprs[fpr_double_index++] = registers[dex_register++];
211           fprs[fpr_double_index++] = registers[dex_register++];
212           stack_index += 2;
213         } else {
214           stack_args[stack_index++] = registers[dex_register++];
215           stack_args[stack_index++] = registers[dex_register++];
216         }
217         break;
218       }
219       case 'F': {
220         if (fpr_index % 2 == 0) {
221           fpr_index = std::max(fpr_double_index, fpr_index);
222         }
223         if (fpr_index < 16) {
224           fprs[fpr_index++] = registers[dex_register++];
225           stack_index++;
226         } else {
227           stack_args[stack_index++] = registers[dex_register++];
228         }
229         break;
230       }
231       case 'J': {
232         stack_index += 2;
233         dex_register += 2;
234         break;
235       }
236       default: {
237         stack_index++;
238         dex_register++;
239         break;
240       }
241     }
242   }
243 }
244 
245 #endif
246 
NterpGetCodeItem(ArtMethod * method)247 extern "C" const dex::CodeItem* NterpGetCodeItem(ArtMethod* method)
248     REQUIRES_SHARED(Locks::mutator_lock_) {
249   ScopedAssertNoThreadSuspension sants("In nterp");
250   return method->GetCodeItem();
251 }
252 
NterpGetShorty(ArtMethod * method)253 extern "C" const char* NterpGetShorty(ArtMethod* method)
254     REQUIRES_SHARED(Locks::mutator_lock_) {
255   ScopedAssertNoThreadSuspension sants("In nterp");
256   return method->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetShorty();
257 }
258 
NterpGetShortyFromMethodId(ArtMethod * caller,uint32_t method_index)259 extern "C" const char* NterpGetShortyFromMethodId(ArtMethod* caller, uint32_t method_index)
260     REQUIRES_SHARED(Locks::mutator_lock_) {
261   ScopedAssertNoThreadSuspension sants("In nterp");
262   return caller->GetDexFile()->GetMethodShorty(method_index);
263 }
264 
NterpGetShortyFromInvokePolymorphic(ArtMethod * caller,uint16_t * dex_pc_ptr)265 extern "C" const char* NterpGetShortyFromInvokePolymorphic(ArtMethod* caller, uint16_t* dex_pc_ptr)
266     REQUIRES_SHARED(Locks::mutator_lock_) {
267   ScopedAssertNoThreadSuspension sants("In nterp");
268   const Instruction* inst = Instruction::At(dex_pc_ptr);
269   dex::ProtoIndex proto_idx(inst->Opcode() == Instruction::INVOKE_POLYMORPHIC
270       ? inst->VRegH_45cc()
271       : inst->VRegH_4rcc());
272   return caller->GetDexFile()->GetShorty(proto_idx);
273 }
274 
NterpGetShortyFromInvokeCustom(ArtMethod * caller,uint16_t * dex_pc_ptr)275 extern "C" const char* NterpGetShortyFromInvokeCustom(ArtMethod* caller, uint16_t* dex_pc_ptr)
276     REQUIRES_SHARED(Locks::mutator_lock_) {
277   ScopedAssertNoThreadSuspension sants("In nterp");
278   const Instruction* inst = Instruction::At(dex_pc_ptr);
279   uint16_t call_site_index = (inst->Opcode() == Instruction::INVOKE_CUSTOM
280       ? inst->VRegB_35c()
281       : inst->VRegB_3rc());
282   const DexFile* dex_file = caller->GetDexFile();
283   dex::ProtoIndex proto_idx = dex_file->GetProtoIndexForCallSite(call_site_index);
284   return dex_file->GetShorty(proto_idx);
285 }
286 
287 static constexpr uint8_t kInvalidInvokeType = 255u;
288 static_assert(static_cast<uint8_t>(kMaxInvokeType) < kInvalidInvokeType);
289 
GetOpcodeInvokeType(uint8_t opcode)290 static constexpr uint8_t GetOpcodeInvokeType(uint8_t opcode) {
291   switch (opcode) {
292     case Instruction::INVOKE_DIRECT:
293     case Instruction::INVOKE_DIRECT_RANGE:
294       return static_cast<uint8_t>(kDirect);
295     case Instruction::INVOKE_INTERFACE:
296     case Instruction::INVOKE_INTERFACE_RANGE:
297       return static_cast<uint8_t>(kInterface);
298     case Instruction::INVOKE_STATIC:
299     case Instruction::INVOKE_STATIC_RANGE:
300       return static_cast<uint8_t>(kStatic);
301     case Instruction::INVOKE_SUPER:
302     case Instruction::INVOKE_SUPER_RANGE:
303       return static_cast<uint8_t>(kSuper);
304     case Instruction::INVOKE_VIRTUAL:
305     case Instruction::INVOKE_VIRTUAL_RANGE:
306       return static_cast<uint8_t>(kVirtual);
307 
308     default:
309       return kInvalidInvokeType;
310   }
311 }
312 
GenerateOpcodeInvokeTypes()313 static constexpr std::array<uint8_t, 256u> GenerateOpcodeInvokeTypes() {
314   std::array<uint8_t, 256u> opcode_invoke_types{};
315   for (size_t opcode = 0u; opcode != opcode_invoke_types.size(); ++opcode) {
316     opcode_invoke_types[opcode] = GetOpcodeInvokeType(opcode);
317   }
318   return opcode_invoke_types;
319 }
320 
321 static constexpr std::array<uint8_t, 256u> kOpcodeInvokeTypes = GenerateOpcodeInvokeTypes();
322 
323 LIBART_PROTECTED FLATTEN
NterpGetMethod(Thread * self,ArtMethod * caller,const uint16_t * dex_pc_ptr)324 extern "C" size_t NterpGetMethod(Thread* self, ArtMethod* caller, const uint16_t* dex_pc_ptr)
325     REQUIRES_SHARED(Locks::mutator_lock_) {
326   UpdateHotness(caller);
327   const Instruction* inst = Instruction::At(dex_pc_ptr);
328   Instruction::Code opcode = inst->Opcode();
329   DCHECK(IsUint<8>(static_cast<std::underlying_type_t<Instruction::Code>>(opcode)));
330   uint8_t raw_invoke_type = kOpcodeInvokeTypes[opcode];
331   DCHECK_LE(raw_invoke_type, kMaxInvokeType);
332   InvokeType invoke_type = static_cast<InvokeType>(raw_invoke_type);
333 
334   // In release mode, this is just a simple load.
335   // In debug mode, this checks that we're using the correct instruction format.
336   uint16_t method_index =
337       (opcode >= Instruction::INVOKE_VIRTUAL_RANGE) ? inst->VRegB_3rc() : inst->VRegB_35c();
338 
339   ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
340   ArtMethod* resolved_method = caller->SkipAccessChecks()
341       ? class_linker->ResolveMethodId(method_index, caller)
342       : class_linker->ResolveMethodWithChecks(method_index, caller, invoke_type);
343   if (resolved_method == nullptr) {
344     DCHECK(self->IsExceptionPending());
345     return 0;
346   }
347 
348   if (invoke_type == kSuper) {
349     resolved_method = caller->SkipAccessChecks()
350         ? FindSuperMethodToCall</*access_check=*/false>(method_index, resolved_method, caller, self)
351         : FindSuperMethodToCall</*access_check=*/true>(method_index, resolved_method, caller, self);
352     if (resolved_method == nullptr) {
353       DCHECK(self->IsExceptionPending());
354       return 0;
355     }
356   }
357 
358   if (invoke_type == kInterface) {
359     size_t result = 0u;
360     if (resolved_method->GetDeclaringClass()->IsObjectClass()) {
361       // Set the low bit to notify the interpreter it should do a vtable call.
362       DCHECK_LT(resolved_method->GetMethodIndex(), 0x10000);
363       result = (resolved_method->GetMethodIndex() << 16) | 1U;
364     } else {
365       DCHECK(resolved_method->GetDeclaringClass()->IsInterface());
366       DCHECK(!resolved_method->IsCopied());
367       if (!resolved_method->IsAbstract()) {
368         // Set the second bit to notify the interpreter this is a default
369         // method.
370         result = reinterpret_cast<size_t>(resolved_method) | 2U;
371       } else {
372         result = reinterpret_cast<size_t>(resolved_method);
373       }
374     }
375     UpdateCache(self, dex_pc_ptr, result);
376     return result;
377   } else if (resolved_method->IsStringConstructor()) {
378     CHECK_NE(invoke_type, kSuper);
379     resolved_method = WellKnownClasses::StringInitToStringFactory(resolved_method);
380     // Or the result with 1 to notify to nterp this is a string init method. We
381     // also don't cache the result as we don't want nterp to have its fast path always
382     // check for it, and we expect a lot more regular calls than string init
383     // calls.
384     return reinterpret_cast<size_t>(resolved_method) | 1;
385   } else if (invoke_type == kVirtual) {
386     UpdateCache(self, dex_pc_ptr, resolved_method->GetMethodIndex());
387     return resolved_method->GetMethodIndex();
388   } else {
389     UpdateCache(self, dex_pc_ptr, resolved_method);
390     return reinterpret_cast<size_t>(resolved_method);
391   }
392 }
393 
394 ALWAYS_INLINE FLATTEN
FindFieldFast(ArtMethod * caller,uint16_t field_index)395 static ArtField* FindFieldFast(ArtMethod* caller, uint16_t field_index)
396     REQUIRES_SHARED(Locks::mutator_lock_) {
397   if (caller->IsObsolete()) {
398     return nullptr;
399   }
400 
401   ObjPtr<mirror::Class> cls = caller->GetDeclaringClass();
402   const dex::FieldId& field_id = cls->GetDexFile().GetFieldId(field_index);
403   if (cls->GetDexTypeIndex() == field_id.class_idx_) {
404     // Field is in the same class as the caller, no need to do access checks.
405     return cls->FindDeclaredField(field_index);
406   }
407 
408   return nullptr;
409 }
410 
411 NO_INLINE
FindFieldSlow(Thread * self,ArtMethod * caller,uint16_t field_index,bool is_static,bool is_put)412 static ArtField* FindFieldSlow(Thread* self,
413                                ArtMethod* caller,
414                                uint16_t field_index,
415                                bool is_static,
416                                bool is_put)
417     REQUIRES_SHARED(Locks::mutator_lock_) {
418   return ResolveFieldWithAccessChecks(
419       self,
420       Runtime::Current()->GetClassLinker(),
421       field_index,
422       caller,
423       is_static,
424       /*is_put=*/ is_put,
425       /*resolve_field_type=*/ 0);
426 }
427 
428 LIBART_PROTECTED
NterpGetStaticField(Thread * self,ArtMethod * caller,const uint16_t * dex_pc_ptr,size_t resolve_field_type)429 extern "C" size_t NterpGetStaticField(Thread* self,
430                                       ArtMethod* caller,
431                                       const uint16_t* dex_pc_ptr,
432                                       size_t resolve_field_type)  // Resolve if not zero
433     REQUIRES_SHARED(Locks::mutator_lock_) {
434   const Instruction* inst = Instruction::At(dex_pc_ptr);
435   uint16_t field_index = inst->VRegB_21c();
436   Instruction::Code opcode = inst->Opcode();
437 
438   ArtField* resolved_field = FindFieldFast(caller, field_index);
439   if (resolved_field == nullptr || !resolved_field->IsStatic()) {
440     resolved_field = FindFieldSlow(
441         self, caller, field_index, /*is_static=*/ true, IsInstructionSPut(opcode));
442     if (resolved_field == nullptr) {
443       DCHECK(self->IsExceptionPending());
444       return 0;
445     }
446     // Only update hotness for slow lookups.
447     UpdateHotness(caller);
448   }
449 
450   if (UNLIKELY(!resolved_field->GetDeclaringClass()->IsVisiblyInitialized())) {
451     StackHandleScope<1> hs(self);
452     Handle<mirror::Class> h_class(hs.NewHandle(resolved_field->GetDeclaringClass()));
453     ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
454     if (UNLIKELY(!class_linker->EnsureInitialized(
455                       self, h_class, /*can_init_fields=*/ true, /*can_init_parents=*/ true))) {
456       DCHECK(self->IsExceptionPending());
457       return 0;
458     }
459     DCHECK(h_class->IsInitializing());
460   }
461 
462   // For sput-object, try to resolve the field type even if we were not requested to.
463   // Only if the field type is successfully resolved can we update the cache. If we
464   // fail to resolve the type, we clear the exception to keep interpreter
465   // semantics of not throwing when null is stored.
466   bool update_cache = true;
467   if (opcode == Instruction::SPUT_OBJECT &&
468       caller->GetDeclaringClass()->HasTypeChecksFailure() &&
469       resolved_field->ResolveType() == nullptr) {
470     DCHECK(self->IsExceptionPending());
471     if (resolve_field_type) {
472       return 0;
473     }
474     self->ClearException();
475     update_cache = false;
476   }
477 
478   if (resolved_field->IsVolatile()) {
479     // Or the result with 1 to notify to nterp this is a volatile field. We
480     // also don't cache the result as we don't want nterp to have its fast path always
481     // check for it.
482     return reinterpret_cast<size_t>(resolved_field) | 1;
483   }
484 
485   if (update_cache) {
486     UpdateCache(self, dex_pc_ptr, resolved_field);
487   }
488   return reinterpret_cast<size_t>(resolved_field);
489 }
490 
491 LIBART_PROTECTED
NterpGetInstanceFieldOffset(Thread * self,ArtMethod * caller,const uint16_t * dex_pc_ptr,size_t resolve_field_type)492 extern "C" uint32_t NterpGetInstanceFieldOffset(Thread* self,
493                                                 ArtMethod* caller,
494                                                 const uint16_t* dex_pc_ptr,
495                                                 size_t resolve_field_type)  // Resolve if not zero
496     REQUIRES_SHARED(Locks::mutator_lock_) {
497   const Instruction* inst = Instruction::At(dex_pc_ptr);
498   uint16_t field_index = inst->VRegC_22c();
499   Instruction::Code opcode = inst->Opcode();
500 
501   ArtField* resolved_field = FindFieldFast(caller, field_index);
502   if (resolved_field == nullptr || resolved_field->IsStatic()) {
503     resolved_field = FindFieldSlow(
504         self, caller, field_index, /*is_static=*/ false, IsInstructionIPut(opcode));
505     if (resolved_field == nullptr) {
506       DCHECK(self->IsExceptionPending());
507       return 0;
508     }
509     // Only update hotness for slow lookups.
510     UpdateHotness(caller);
511   }
512 
513   // For iput-object, try to resolve the field type even if we were not requested to.
514   // Only if the field type is successfully resolved can we update the cache. If we
515   // fail to resolve the type, we clear the exception to keep interpreter
516   // semantics of not throwing when null is stored.
517   bool update_cache = true;
518   if (opcode == Instruction::IPUT_OBJECT &&
519       caller->GetDeclaringClass()->HasTypeChecksFailure() &&
520       resolved_field->ResolveType() == nullptr) {
521     DCHECK(self->IsExceptionPending());
522     if (resolve_field_type != 0u) {
523       return 0;
524     }
525     self->ClearException();
526     update_cache = false;
527   }
528 
529   if (resolved_field->IsVolatile()) {
530     // Don't cache for a volatile field, and return a negative offset as marker
531     // of volatile.
532     return -resolved_field->GetOffset().Uint32Value();
533   }
534   if (update_cache) {
535     UpdateCache(self, dex_pc_ptr, resolved_field->GetOffset().Uint32Value());
536   }
537   return resolved_field->GetOffset().Uint32Value();
538 }
539 
NterpGetClass(Thread * self,ArtMethod * caller,uint16_t * dex_pc_ptr)540 extern "C" mirror::Object* NterpGetClass(Thread* self, ArtMethod* caller, uint16_t* dex_pc_ptr)
541     REQUIRES_SHARED(Locks::mutator_lock_) {
542   UpdateHotness(caller);
543   const Instruction* inst = Instruction::At(dex_pc_ptr);
544   Instruction::Code opcode = inst->Opcode();
545   DCHECK(opcode == Instruction::CHECK_CAST ||
546          opcode == Instruction::INSTANCE_OF ||
547          opcode == Instruction::CONST_CLASS ||
548          opcode == Instruction::NEW_ARRAY);
549 
550   // In release mode, this is just a simple load.
551   // In debug mode, this checks that we're using the correct instruction format.
552   dex::TypeIndex index = dex::TypeIndex(
553       (opcode == Instruction::CHECK_CAST || opcode == Instruction::CONST_CLASS)
554           ? inst->VRegB_21c()
555           : inst->VRegC_22c());
556 
557   ObjPtr<mirror::Class> c =
558       ResolveVerifyAndClinit(index,
559                              caller,
560                              self,
561                              /* can_run_clinit= */ false,
562                              /* verify_access= */ !caller->SkipAccessChecks());
563   if (UNLIKELY(c == nullptr)) {
564     DCHECK(self->IsExceptionPending());
565     return nullptr;
566   }
567 
568   UpdateCache(self, dex_pc_ptr, c.Ptr());
569   return c.Ptr();
570 }
571 
NterpAllocateObject(Thread * self,ArtMethod * caller,uint16_t * dex_pc_ptr)572 extern "C" mirror::Object* NterpAllocateObject(Thread* self,
573                                                ArtMethod* caller,
574                                                uint16_t* dex_pc_ptr)
575     REQUIRES_SHARED(Locks::mutator_lock_) {
576   UpdateHotness(caller);
577   const Instruction* inst = Instruction::At(dex_pc_ptr);
578   DCHECK_EQ(inst->Opcode(), Instruction::NEW_INSTANCE);
579   dex::TypeIndex index = dex::TypeIndex(inst->VRegB_21c());
580   ObjPtr<mirror::Class> c =
581       ResolveVerifyAndClinit(index,
582                              caller,
583                              self,
584                              /* can_run_clinit= */ false,
585                              /* verify_access= */ !caller->SkipAccessChecks());
586   if (UNLIKELY(c == nullptr)) {
587     DCHECK(self->IsExceptionPending());
588     return nullptr;
589   }
590 
591   gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
592   if (UNLIKELY(c->IsStringClass())) {
593     // We don't cache the class for strings as we need to special case their
594     // allocation.
595     return mirror::String::AllocEmptyString(self, allocator_type).Ptr();
596   } else {
597     if (!c->IsFinalizable() && c->IsInstantiable()) {
598       // Cache non-finalizable classes for next calls.
599       UpdateCache(self, dex_pc_ptr, c.Ptr());
600     }
601     return AllocObjectFromCode(c, self, allocator_type).Ptr();
602   }
603 }
604 
NterpLoadObject(Thread * self,ArtMethod * caller,uint16_t * dex_pc_ptr)605 extern "C" mirror::Object* NterpLoadObject(Thread* self, ArtMethod* caller, uint16_t* dex_pc_ptr)
606     REQUIRES_SHARED(Locks::mutator_lock_) {
607   const Instruction* inst = Instruction::At(dex_pc_ptr);
608   ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
609   switch (inst->Opcode()) {
610     case Instruction::CONST_STRING:
611     case Instruction::CONST_STRING_JUMBO: {
612       UpdateHotness(caller);
613       dex::StringIndex string_index(
614           (inst->Opcode() == Instruction::CONST_STRING)
615               ? inst->VRegB_21c()
616               : inst->VRegB_31c());
617       ObjPtr<mirror::String> str = class_linker->ResolveString(string_index, caller);
618       if (str == nullptr) {
619         DCHECK(self->IsExceptionPending());
620         return nullptr;
621       }
622       UpdateCache(self, dex_pc_ptr, str.Ptr());
623       return str.Ptr();
624     }
625     case Instruction::CONST_METHOD_HANDLE: {
626       // Don't cache: we don't expect this to be performance sensitive, and we
627       // don't want the cache to conflict with a performance sensitive entry.
628       return class_linker->ResolveMethodHandle(self, inst->VRegB_21c(), caller).Ptr();
629     }
630     case Instruction::CONST_METHOD_TYPE: {
631       // Don't cache: we don't expect this to be performance sensitive, and we
632       // don't want the cache to conflict with a performance sensitive entry.
633       return class_linker->ResolveMethodType(
634           self, dex::ProtoIndex(inst->VRegB_21c()), caller).Ptr();
635     }
636     default:
637       LOG(FATAL) << "Unreachable";
638   }
639   return nullptr;
640 }
641 
NterpUnimplemented()642 extern "C" void NterpUnimplemented() {
643   LOG(FATAL) << "Unimplemented";
644 }
645 
DoFilledNewArray(Thread * self,ArtMethod * caller,uint16_t * dex_pc_ptr,uint32_t * regs,bool is_range)646 static mirror::Object* DoFilledNewArray(Thread* self,
647                                         ArtMethod* caller,
648                                         uint16_t* dex_pc_ptr,
649                                         uint32_t* regs,
650                                         bool is_range)
651     REQUIRES_SHARED(Locks::mutator_lock_) {
652   const Instruction* inst = Instruction::At(dex_pc_ptr);
653   if (kIsDebugBuild) {
654     if (is_range) {
655       DCHECK_EQ(inst->Opcode(), Instruction::FILLED_NEW_ARRAY_RANGE);
656     } else {
657       DCHECK_EQ(inst->Opcode(), Instruction::FILLED_NEW_ARRAY);
658     }
659   }
660   const int32_t length = is_range ? inst->VRegA_3rc() : inst->VRegA_35c();
661   DCHECK_GE(length, 0);
662   if (!is_range) {
663     // Checks FILLED_NEW_ARRAY's length does not exceed 5 arguments.
664     DCHECK_LE(length, 5);
665   }
666   uint16_t type_idx = is_range ? inst->VRegB_3rc() : inst->VRegB_35c();
667   ObjPtr<mirror::Class> array_class =
668       ResolveVerifyAndClinit(dex::TypeIndex(type_idx),
669                              caller,
670                              self,
671                              /* can_run_clinit= */ true,
672                              /* verify_access= */ !caller->SkipAccessChecks());
673   if (UNLIKELY(array_class == nullptr)) {
674     DCHECK(self->IsExceptionPending());
675     return nullptr;
676   }
677   DCHECK(array_class->IsArrayClass());
678   ObjPtr<mirror::Class> component_class = array_class->GetComponentType();
679   const bool is_primitive_int_component = component_class->IsPrimitiveInt();
680   if (UNLIKELY(component_class->IsPrimitive() && !is_primitive_int_component)) {
681     if (component_class->IsPrimitiveLong() || component_class->IsPrimitiveDouble()) {
682       ThrowRuntimeException("Bad filled array request for type %s",
683                             component_class->PrettyDescriptor().c_str());
684     } else {
685       self->ThrowNewExceptionF(
686           "Ljava/lang/InternalError;",
687           "Found type %s; filled-new-array not implemented for anything but 'int'",
688           component_class->PrettyDescriptor().c_str());
689     }
690     return nullptr;
691   }
692   ObjPtr<mirror::Object> new_array = mirror::Array::Alloc(
693       self,
694       array_class,
695       length,
696       array_class->GetComponentSizeShift(),
697       Runtime::Current()->GetHeap()->GetCurrentAllocator());
698   if (UNLIKELY(new_array == nullptr)) {
699     self->AssertPendingOOMException();
700     return nullptr;
701   }
702   uint32_t arg[Instruction::kMaxVarArgRegs];  // only used in filled-new-array.
703   uint32_t vregC = 0;   // only used in filled-new-array-range.
704   if (is_range) {
705     vregC = inst->VRegC_3rc();
706   } else {
707     inst->GetVarArgs(arg);
708   }
709   for (int32_t i = 0; i < length; ++i) {
710     size_t src_reg = is_range ? vregC + i : arg[i];
711     if (is_primitive_int_component) {
712       new_array->AsIntArray()->SetWithoutChecks</* kTransactionActive= */ false>(i, regs[src_reg]);
713     } else {
714       new_array->AsObjectArray<mirror::Object>()->SetWithoutChecks</* kTransactionActive= */ false>(
715           i, reinterpret_cast<mirror::Object*>(regs[src_reg]));
716     }
717   }
718   return new_array.Ptr();
719 }
720 
NterpFilledNewArray(Thread * self,ArtMethod * caller,uint32_t * registers,uint16_t * dex_pc_ptr)721 extern "C" mirror::Object* NterpFilledNewArray(Thread* self,
722                                                ArtMethod* caller,
723                                                uint32_t* registers,
724                                                uint16_t* dex_pc_ptr)
725     REQUIRES_SHARED(Locks::mutator_lock_) {
726   return DoFilledNewArray(self, caller, dex_pc_ptr, registers, /* is_range= */ false);
727 }
728 
NterpFilledNewArrayRange(Thread * self,ArtMethod * caller,uint32_t * registers,uint16_t * dex_pc_ptr)729 extern "C" mirror::Object* NterpFilledNewArrayRange(Thread* self,
730                                                     ArtMethod* caller,
731                                                     uint32_t* registers,
732                                                     uint16_t* dex_pc_ptr)
733     REQUIRES_SHARED(Locks::mutator_lock_) {
734   return DoFilledNewArray(self, caller, dex_pc_ptr, registers, /* is_range= */ true);
735 }
736 
NterpHotMethod(ArtMethod * method,uint16_t * dex_pc_ptr,uint32_t * vregs)737 extern "C" jit::OsrData* NterpHotMethod(ArtMethod* method, uint16_t* dex_pc_ptr, uint32_t* vregs)
738     REQUIRES_SHARED(Locks::mutator_lock_) {
739   // It is important this method is not suspended because it can be called on
740   // method entry and async deoptimization does not expect runtime methods other than the
741   // suspend entrypoint before executing the first instruction of a Java
742   // method.
743   ScopedAssertNoThreadSuspension sants("In nterp");
744   Runtime* runtime = Runtime::Current();
745   if (method->IsMemorySharedMethod()) {
746     if (!method->IsIntrinsic()) {
747       // Intrinsics are special and will be considered hot from the first call.
748       DCHECK_EQ(Thread::Current()->GetSharedMethodHotness(), 0u);
749       Thread::Current()->ResetSharedMethodHotness();
750     }
751   } else {
752     // Move the counter to the initial threshold in case we have to re-JIT it.
753     method->ResetCounter(runtime->GetJITOptions()->GetWarmupThreshold());
754     // Mark the method as warm for the profile saver.
755     method->SetPreviouslyWarm();
756   }
757   jit::Jit* jit = runtime->GetJit();
758   if (jit != nullptr && jit->UseJitCompilation()) {
759     // Nterp passes null on entry where we don't want to OSR.
760     if (dex_pc_ptr != nullptr) {
761       // This could be a loop back edge, check if we can OSR.
762       CodeItemInstructionAccessor accessor(method->DexInstructions());
763       uint32_t dex_pc = dex_pc_ptr - accessor.Insns();
764       jit::OsrData* osr_data = jit->PrepareForOsr(
765           method->GetInterfaceMethodIfProxy(kRuntimePointerSize), dex_pc, vregs);
766       if (osr_data != nullptr) {
767         return osr_data;
768       }
769     }
770     jit->MaybeEnqueueCompilation(method, Thread::Current());
771   }
772   return nullptr;
773 }
774 
NterpDoPackedSwitch(const uint16_t * switchData,int32_t testVal)775 extern "C" ssize_t NterpDoPackedSwitch(const uint16_t* switchData, int32_t testVal)
776     REQUIRES_SHARED(Locks::mutator_lock_) {
777   ScopedAssertNoThreadSuspension sants("In nterp");
778   const int kInstrLen = 3;
779 
780   /*
781    * Packed switch data format:
782    *  ushort ident = 0x0100   magic value
783    *  ushort size             number of entries in the table
784    *  int first_key           first (and lowest) switch case value
785    *  int targets[size]       branch targets, relative to switch opcode
786    *
787    * Total size is (4+size*2) 16-bit code units.
788    */
789   uint16_t signature = *switchData++;
790   DCHECK_EQ(signature, static_cast<uint16_t>(art::Instruction::kPackedSwitchSignature));
791 
792   uint16_t size = *switchData++;
793 
794   int32_t firstKey = *switchData++;
795   firstKey |= (*switchData++) << 16;
796 
797   int index = testVal - firstKey;
798   if (index < 0 || index >= size) {
799     return kInstrLen;
800   }
801 
802   /*
803    * The entries are guaranteed to be aligned on a 32-bit boundary;
804    * we can treat them as a native int array.
805    */
806   const int32_t* entries = reinterpret_cast<const int32_t*>(switchData);
807   return entries[index];
808 }
809 
810 /*
811  * Find the matching case.  Returns the offset to the handler instructions.
812  *
813  * Returns 3 if we don't find a match (it's the size of the sparse-switch
814  * instruction).
815  */
NterpDoSparseSwitch(const uint16_t * switchData,int32_t testVal)816 extern "C" ssize_t NterpDoSparseSwitch(const uint16_t* switchData, int32_t testVal)
817     REQUIRES_SHARED(Locks::mutator_lock_) {
818   ScopedAssertNoThreadSuspension sants("In nterp");
819   const int kInstrLen = 3;
820   uint16_t size;
821   const int32_t* keys;
822   const int32_t* entries;
823 
824   /*
825    * Sparse switch data format:
826    *  ushort ident = 0x0200   magic value
827    *  ushort size             number of entries in the table; > 0
828    *  int keys[size]          keys, sorted low-to-high; 32-bit aligned
829    *  int targets[size]       branch targets, relative to switch opcode
830    *
831    * Total size is (2+size*4) 16-bit code units.
832    */
833 
834   uint16_t signature = *switchData++;
835   DCHECK_EQ(signature, static_cast<uint16_t>(art::Instruction::kSparseSwitchSignature));
836 
837   size = *switchData++;
838 
839   /* The keys are guaranteed to be aligned on a 32-bit boundary;
840    * we can treat them as a native int array.
841    */
842   keys = reinterpret_cast<const int32_t*>(switchData);
843 
844   /* The entries are guaranteed to be aligned on a 32-bit boundary;
845    * we can treat them as a native int array.
846    */
847   entries = keys + size;
848 
849   /*
850    * Binary-search through the array of keys, which are guaranteed to
851    * be sorted low-to-high.
852    */
853   int lo = 0;
854   int hi = size - 1;
855   while (lo <= hi) {
856     int mid = (lo + hi) >> 1;
857 
858     int32_t foundVal = keys[mid];
859     if (testVal < foundVal) {
860       hi = mid - 1;
861     } else if (testVal > foundVal) {
862       lo = mid + 1;
863     } else {
864       return entries[mid];
865     }
866   }
867   return kInstrLen;
868 }
869 
NterpFree(void * val)870 extern "C" void NterpFree(void* val) {
871   free(val);
872 }
873 
874 }  // namespace interpreter
875 }  // namespace art
876