1 /*
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 /*
18 * Mterp entry point and support functions.
19 */
20 #include "nterp.h"
21
22 #include "base/quasi_atomic.h"
23 #include "class_linker-inl.h"
24 #include "dex/dex_instruction_utils.h"
25 #include "debugger.h"
26 #include "entrypoints/entrypoint_utils-inl.h"
27 #include "interpreter/interpreter_cache-inl.h"
28 #include "interpreter/interpreter_common.h"
29 #include "interpreter/shadow_frame-inl.h"
30 #include "mirror/string-alloc-inl.h"
31 #include "nterp_helpers.h"
32
33 namespace art {
34 namespace interpreter {
35
UpdateHotness(ArtMethod * method)36 inline void UpdateHotness(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) {
37 // The hotness we will add to a method when we perform a
38 // field/method/class/string lookup.
39 constexpr uint16_t kNterpHotnessLookup = 0xf;
40 method->UpdateCounter(kNterpHotnessLookup);
41 }
42
43 template<typename T>
UpdateCache(Thread * self,const uint16_t * dex_pc_ptr,T value)44 inline void UpdateCache(Thread* self, const uint16_t* dex_pc_ptr, T value) {
45 self->GetInterpreterCache()->Set(self, dex_pc_ptr, value);
46 }
47
48 template<typename T>
UpdateCache(Thread * self,const uint16_t * dex_pc_ptr,T * value)49 inline void UpdateCache(Thread* self, const uint16_t* dex_pc_ptr, T* value) {
50 UpdateCache(self, dex_pc_ptr, reinterpret_cast<size_t>(value));
51 }
52
53 #ifdef __arm__
54
NterpStoreArm32Fprs(const char * shorty,uint32_t * registers,uint32_t * stack_args,const uint32_t * fprs)55 extern "C" void NterpStoreArm32Fprs(const char* shorty,
56 uint32_t* registers,
57 uint32_t* stack_args,
58 const uint32_t* fprs) {
59 // Note `shorty` has already the returned type removed.
60 ScopedAssertNoThreadSuspension sants("In nterp");
61 uint32_t arg_index = 0;
62 uint32_t fpr_double_index = 0;
63 uint32_t fpr_index = 0;
64 for (uint32_t shorty_index = 0; shorty[shorty_index] != '\0'; ++shorty_index) {
65 char arg_type = shorty[shorty_index];
66 switch (arg_type) {
67 case 'D': {
68 // Double should not overlap with float.
69 fpr_double_index = std::max(fpr_double_index, RoundUp(fpr_index, 2));
70 if (fpr_double_index < 16) {
71 registers[arg_index] = fprs[fpr_double_index++];
72 registers[arg_index + 1] = fprs[fpr_double_index++];
73 } else {
74 registers[arg_index] = stack_args[arg_index];
75 registers[arg_index + 1] = stack_args[arg_index + 1];
76 }
77 arg_index += 2;
78 break;
79 }
80 case 'F': {
81 if (fpr_index % 2 == 0) {
82 fpr_index = std::max(fpr_double_index, fpr_index);
83 }
84 if (fpr_index < 16) {
85 registers[arg_index] = fprs[fpr_index++];
86 } else {
87 registers[arg_index] = stack_args[arg_index];
88 }
89 arg_index++;
90 break;
91 }
92 case 'J': {
93 arg_index += 2;
94 break;
95 }
96 default: {
97 arg_index++;
98 break;
99 }
100 }
101 }
102 }
103
NterpSetupArm32Fprs(const char * shorty,uint32_t dex_register,uint32_t stack_index,uint32_t * fprs,uint32_t * registers,uint32_t * stack_args)104 extern "C" void NterpSetupArm32Fprs(const char* shorty,
105 uint32_t dex_register,
106 uint32_t stack_index,
107 uint32_t* fprs,
108 uint32_t* registers,
109 uint32_t* stack_args) {
110 // Note `shorty` has already the returned type removed.
111 ScopedAssertNoThreadSuspension sants("In nterp");
112 uint32_t fpr_double_index = 0;
113 uint32_t fpr_index = 0;
114 for (uint32_t shorty_index = 0; shorty[shorty_index] != '\0'; ++shorty_index) {
115 char arg_type = shorty[shorty_index];
116 switch (arg_type) {
117 case 'D': {
118 // Double should not overlap with float.
119 fpr_double_index = std::max(fpr_double_index, RoundUp(fpr_index, 2));
120 if (fpr_double_index < 16) {
121 fprs[fpr_double_index++] = registers[dex_register++];
122 fprs[fpr_double_index++] = registers[dex_register++];
123 stack_index += 2;
124 } else {
125 stack_args[stack_index++] = registers[dex_register++];
126 stack_args[stack_index++] = registers[dex_register++];
127 }
128 break;
129 }
130 case 'F': {
131 if (fpr_index % 2 == 0) {
132 fpr_index = std::max(fpr_double_index, fpr_index);
133 }
134 if (fpr_index < 16) {
135 fprs[fpr_index++] = registers[dex_register++];
136 stack_index++;
137 } else {
138 stack_args[stack_index++] = registers[dex_register++];
139 }
140 break;
141 }
142 case 'J': {
143 stack_index += 2;
144 dex_register += 2;
145 break;
146 }
147 default: {
148 stack_index++;
149 dex_register++;
150 break;
151 }
152 }
153 }
154 }
155
156 #endif
157
NterpGetCodeItem(ArtMethod * method)158 extern "C" const dex::CodeItem* NterpGetCodeItem(ArtMethod* method)
159 REQUIRES_SHARED(Locks::mutator_lock_) {
160 ScopedAssertNoThreadSuspension sants("In nterp");
161 return method->GetCodeItem();
162 }
163
NterpGetShorty(ArtMethod * method)164 extern "C" const char* NterpGetShorty(ArtMethod* method)
165 REQUIRES_SHARED(Locks::mutator_lock_) {
166 ScopedAssertNoThreadSuspension sants("In nterp");
167 return method->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetShorty();
168 }
169
NterpGetShortyFromMethodId(ArtMethod * caller,uint32_t method_index)170 extern "C" const char* NterpGetShortyFromMethodId(ArtMethod* caller, uint32_t method_index)
171 REQUIRES_SHARED(Locks::mutator_lock_) {
172 ScopedAssertNoThreadSuspension sants("In nterp");
173 return caller->GetDexFile()->GetMethodShorty(method_index);
174 }
175
NterpGetShortyFromInvokePolymorphic(ArtMethod * caller,uint16_t * dex_pc_ptr)176 extern "C" const char* NterpGetShortyFromInvokePolymorphic(ArtMethod* caller, uint16_t* dex_pc_ptr)
177 REQUIRES_SHARED(Locks::mutator_lock_) {
178 ScopedAssertNoThreadSuspension sants("In nterp");
179 const Instruction* inst = Instruction::At(dex_pc_ptr);
180 dex::ProtoIndex proto_idx(inst->Opcode() == Instruction::INVOKE_POLYMORPHIC
181 ? inst->VRegH_45cc()
182 : inst->VRegH_4rcc());
183 return caller->GetDexFile()->GetShorty(proto_idx);
184 }
185
NterpGetShortyFromInvokeCustom(ArtMethod * caller,uint16_t * dex_pc_ptr)186 extern "C" const char* NterpGetShortyFromInvokeCustom(ArtMethod* caller, uint16_t* dex_pc_ptr)
187 REQUIRES_SHARED(Locks::mutator_lock_) {
188 ScopedAssertNoThreadSuspension sants("In nterp");
189 const Instruction* inst = Instruction::At(dex_pc_ptr);
190 uint16_t call_site_index = (inst->Opcode() == Instruction::INVOKE_CUSTOM
191 ? inst->VRegB_35c()
192 : inst->VRegB_3rc());
193 const DexFile* dex_file = caller->GetDexFile();
194 dex::ProtoIndex proto_idx = dex_file->GetProtoIndexForCallSite(call_site_index);
195 return dex_file->GetShorty(proto_idx);
196 }
197
198 static constexpr uint8_t kInvalidInvokeType = 255u;
199 static_assert(static_cast<uint8_t>(kMaxInvokeType) < kInvalidInvokeType);
200
GetOpcodeInvokeType(uint8_t opcode)201 static constexpr uint8_t GetOpcodeInvokeType(uint8_t opcode) {
202 switch (opcode) {
203 case Instruction::INVOKE_DIRECT:
204 case Instruction::INVOKE_DIRECT_RANGE:
205 return static_cast<uint8_t>(kDirect);
206 case Instruction::INVOKE_INTERFACE:
207 case Instruction::INVOKE_INTERFACE_RANGE:
208 return static_cast<uint8_t>(kInterface);
209 case Instruction::INVOKE_STATIC:
210 case Instruction::INVOKE_STATIC_RANGE:
211 return static_cast<uint8_t>(kStatic);
212 case Instruction::INVOKE_SUPER:
213 case Instruction::INVOKE_SUPER_RANGE:
214 return static_cast<uint8_t>(kSuper);
215 case Instruction::INVOKE_VIRTUAL:
216 case Instruction::INVOKE_VIRTUAL_RANGE:
217 return static_cast<uint8_t>(kVirtual);
218
219 default:
220 return kInvalidInvokeType;
221 }
222 }
223
GenerateOpcodeInvokeTypes()224 static constexpr std::array<uint8_t, 256u> GenerateOpcodeInvokeTypes() {
225 std::array<uint8_t, 256u> opcode_invoke_types{};
226 for (size_t opcode = 0u; opcode != opcode_invoke_types.size(); ++opcode) {
227 opcode_invoke_types[opcode] = GetOpcodeInvokeType(opcode);
228 }
229 return opcode_invoke_types;
230 }
231
232 static constexpr std::array<uint8_t, 256u> kOpcodeInvokeTypes = GenerateOpcodeInvokeTypes();
233
234 FLATTEN
NterpGetMethod(Thread * self,ArtMethod * caller,const uint16_t * dex_pc_ptr)235 extern "C" size_t NterpGetMethod(Thread* self, ArtMethod* caller, const uint16_t* dex_pc_ptr)
236 REQUIRES_SHARED(Locks::mutator_lock_) {
237 UpdateHotness(caller);
238 const Instruction* inst = Instruction::At(dex_pc_ptr);
239 Instruction::Code opcode = inst->Opcode();
240 DCHECK(IsUint<8>(static_cast<std::underlying_type_t<Instruction::Code>>(opcode)));
241 uint8_t raw_invoke_type = kOpcodeInvokeTypes[opcode];
242 DCHECK_LE(raw_invoke_type, kMaxInvokeType);
243 InvokeType invoke_type = static_cast<InvokeType>(raw_invoke_type);
244
245 // In release mode, this is just a simple load.
246 // In debug mode, this checks that we're using the correct instruction format.
247 uint16_t method_index =
248 (opcode >= Instruction::INVOKE_VIRTUAL_RANGE) ? inst->VRegB_3rc() : inst->VRegB_35c();
249
250 ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
251 ArtMethod* resolved_method = caller->SkipAccessChecks()
252 ? class_linker->ResolveMethod<ClassLinker::ResolveMode::kNoChecks>(
253 self, method_index, caller, invoke_type)
254 : class_linker->ResolveMethod<ClassLinker::ResolveMode::kCheckICCEAndIAE>(
255 self, method_index, caller, invoke_type);
256 if (resolved_method == nullptr) {
257 DCHECK(self->IsExceptionPending());
258 return 0;
259 }
260
261 if (invoke_type == kSuper) {
262 resolved_method = caller->SkipAccessChecks()
263 ? FindSuperMethodToCall</*access_check=*/false>(method_index, resolved_method, caller, self)
264 : FindSuperMethodToCall</*access_check=*/true>(method_index, resolved_method, caller, self);
265 if (resolved_method == nullptr) {
266 DCHECK(self->IsExceptionPending());
267 return 0;
268 }
269 }
270
271 if (invoke_type == kInterface) {
272 size_t result = 0u;
273 if (resolved_method->GetDeclaringClass()->IsObjectClass()) {
274 // Set the low bit to notify the interpreter it should do a vtable call.
275 DCHECK_LT(resolved_method->GetMethodIndex(), 0x10000);
276 result = (resolved_method->GetMethodIndex() << 16) | 1U;
277 } else {
278 DCHECK(resolved_method->GetDeclaringClass()->IsInterface());
279 DCHECK(!resolved_method->IsCopied());
280 if (!resolved_method->IsAbstract()) {
281 // Set the second bit to notify the interpreter this is a default
282 // method.
283 result = reinterpret_cast<size_t>(resolved_method) | 2U;
284 } else {
285 result = reinterpret_cast<size_t>(resolved_method);
286 }
287 }
288 UpdateCache(self, dex_pc_ptr, result);
289 return result;
290 } else if (resolved_method->IsStringConstructor()) {
291 CHECK_NE(invoke_type, kSuper);
292 resolved_method = WellKnownClasses::StringInitToStringFactory(resolved_method);
293 // Or the result with 1 to notify to nterp this is a string init method. We
294 // also don't cache the result as we don't want nterp to have its fast path always
295 // check for it, and we expect a lot more regular calls than string init
296 // calls.
297 return reinterpret_cast<size_t>(resolved_method) | 1;
298 } else if (invoke_type == kVirtual) {
299 UpdateCache(self, dex_pc_ptr, resolved_method->GetMethodIndex());
300 return resolved_method->GetMethodIndex();
301 } else {
302 UpdateCache(self, dex_pc_ptr, resolved_method);
303 return reinterpret_cast<size_t>(resolved_method);
304 }
305 }
306
NterpGetStaticField(Thread * self,ArtMethod * caller,const uint16_t * dex_pc_ptr,size_t resolve_field_type)307 extern "C" size_t NterpGetStaticField(Thread* self,
308 ArtMethod* caller,
309 const uint16_t* dex_pc_ptr,
310 size_t resolve_field_type) // Resolve if not zero
311 REQUIRES_SHARED(Locks::mutator_lock_) {
312 UpdateHotness(caller);
313 const Instruction* inst = Instruction::At(dex_pc_ptr);
314 uint16_t field_index = inst->VRegB_21c();
315 ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
316 Instruction::Code opcode = inst->Opcode();
317 ArtField* resolved_field = ResolveFieldWithAccessChecks(
318 self,
319 class_linker,
320 field_index,
321 caller,
322 /*is_static=*/ true,
323 /*is_put=*/ IsInstructionSPut(opcode),
324 resolve_field_type);
325
326 if (resolved_field == nullptr) {
327 DCHECK(self->IsExceptionPending());
328 return 0;
329 }
330 if (UNLIKELY(!resolved_field->GetDeclaringClass()->IsVisiblyInitialized())) {
331 StackHandleScope<1> hs(self);
332 Handle<mirror::Class> h_class(hs.NewHandle(resolved_field->GetDeclaringClass()));
333 if (UNLIKELY(!class_linker->EnsureInitialized(
334 self, h_class, /*can_init_fields=*/ true, /*can_init_parents=*/ true))) {
335 DCHECK(self->IsExceptionPending());
336 return 0;
337 }
338 DCHECK(h_class->IsInitializing());
339 }
340 if (resolved_field->IsVolatile()) {
341 // Or the result with 1 to notify to nterp this is a volatile field. We
342 // also don't cache the result as we don't want nterp to have its fast path always
343 // check for it.
344 return reinterpret_cast<size_t>(resolved_field) | 1;
345 } else {
346 // For sput-object, try to resolve the field type even if we were not requested to.
347 // Only if the field type is successfully resolved can we update the cache. If we
348 // fail to resolve the type, we clear the exception to keep interpreter
349 // semantics of not throwing when null is stored.
350 if (opcode == Instruction::SPUT_OBJECT &&
351 resolve_field_type == 0 &&
352 resolved_field->ResolveType() == nullptr) {
353 DCHECK(self->IsExceptionPending());
354 self->ClearException();
355 } else {
356 UpdateCache(self, dex_pc_ptr, resolved_field);
357 }
358 return reinterpret_cast<size_t>(resolved_field);
359 }
360 }
361
NterpGetInstanceFieldOffset(Thread * self,ArtMethod * caller,const uint16_t * dex_pc_ptr,size_t resolve_field_type)362 extern "C" uint32_t NterpGetInstanceFieldOffset(Thread* self,
363 ArtMethod* caller,
364 const uint16_t* dex_pc_ptr,
365 size_t resolve_field_type) // Resolve if not zero
366 REQUIRES_SHARED(Locks::mutator_lock_) {
367 UpdateHotness(caller);
368 const Instruction* inst = Instruction::At(dex_pc_ptr);
369 uint16_t field_index = inst->VRegC_22c();
370 ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
371 Instruction::Code opcode = inst->Opcode();
372 ArtField* resolved_field = ResolveFieldWithAccessChecks(
373 self,
374 class_linker,
375 field_index,
376 caller,
377 /*is_static=*/ false,
378 /*is_put=*/ IsInstructionIPut(opcode),
379 resolve_field_type);
380 if (resolved_field == nullptr) {
381 DCHECK(self->IsExceptionPending());
382 return 0;
383 }
384 if (resolved_field->IsVolatile()) {
385 // Don't cache for a volatile field, and return a negative offset as marker
386 // of volatile.
387 return -resolved_field->GetOffset().Uint32Value();
388 }
389 // For iput-object, try to resolve the field type even if we were not requested to.
390 // Only if the field type is successfully resolved can we update the cache. If we
391 // fail to resolve the type, we clear the exception to keep interpreter
392 // semantics of not throwing when null is stored.
393 if (opcode == Instruction::IPUT_OBJECT &&
394 resolve_field_type == 0 &&
395 resolved_field->ResolveType() == nullptr) {
396 DCHECK(self->IsExceptionPending());
397 self->ClearException();
398 } else {
399 UpdateCache(self, dex_pc_ptr, resolved_field->GetOffset().Uint32Value());
400 }
401 return resolved_field->GetOffset().Uint32Value();
402 }
403
NterpGetClass(Thread * self,ArtMethod * caller,uint16_t * dex_pc_ptr)404 extern "C" mirror::Object* NterpGetClass(Thread* self, ArtMethod* caller, uint16_t* dex_pc_ptr)
405 REQUIRES_SHARED(Locks::mutator_lock_) {
406 UpdateHotness(caller);
407 const Instruction* inst = Instruction::At(dex_pc_ptr);
408 Instruction::Code opcode = inst->Opcode();
409 DCHECK(opcode == Instruction::CHECK_CAST ||
410 opcode == Instruction::INSTANCE_OF ||
411 opcode == Instruction::CONST_CLASS ||
412 opcode == Instruction::NEW_ARRAY);
413
414 // In release mode, this is just a simple load.
415 // In debug mode, this checks that we're using the correct instruction format.
416 dex::TypeIndex index = dex::TypeIndex(
417 (opcode == Instruction::CHECK_CAST || opcode == Instruction::CONST_CLASS)
418 ? inst->VRegB_21c()
419 : inst->VRegC_22c());
420
421 ObjPtr<mirror::Class> c =
422 ResolveVerifyAndClinit(index,
423 caller,
424 self,
425 /* can_run_clinit= */ false,
426 /* verify_access= */ !caller->SkipAccessChecks());
427 if (UNLIKELY(c == nullptr)) {
428 DCHECK(self->IsExceptionPending());
429 return nullptr;
430 }
431
432 UpdateCache(self, dex_pc_ptr, c.Ptr());
433 return c.Ptr();
434 }
435
NterpAllocateObject(Thread * self,ArtMethod * caller,uint16_t * dex_pc_ptr)436 extern "C" mirror::Object* NterpAllocateObject(Thread* self,
437 ArtMethod* caller,
438 uint16_t* dex_pc_ptr)
439 REQUIRES_SHARED(Locks::mutator_lock_) {
440 UpdateHotness(caller);
441 const Instruction* inst = Instruction::At(dex_pc_ptr);
442 DCHECK_EQ(inst->Opcode(), Instruction::NEW_INSTANCE);
443 dex::TypeIndex index = dex::TypeIndex(inst->VRegB_21c());
444 ObjPtr<mirror::Class> c =
445 ResolveVerifyAndClinit(index,
446 caller,
447 self,
448 /* can_run_clinit= */ false,
449 /* verify_access= */ !caller->SkipAccessChecks());
450 if (UNLIKELY(c == nullptr)) {
451 DCHECK(self->IsExceptionPending());
452 return nullptr;
453 }
454
455 gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
456 if (UNLIKELY(c->IsStringClass())) {
457 // We don't cache the class for strings as we need to special case their
458 // allocation.
459 return mirror::String::AllocEmptyString(self, allocator_type).Ptr();
460 } else {
461 if (!c->IsFinalizable() && c->IsInstantiable()) {
462 // Cache non-finalizable classes for next calls.
463 UpdateCache(self, dex_pc_ptr, c.Ptr());
464 }
465 return AllocObjectFromCode(c, self, allocator_type).Ptr();
466 }
467 }
468
NterpLoadObject(Thread * self,ArtMethod * caller,uint16_t * dex_pc_ptr)469 extern "C" mirror::Object* NterpLoadObject(Thread* self, ArtMethod* caller, uint16_t* dex_pc_ptr)
470 REQUIRES_SHARED(Locks::mutator_lock_) {
471 const Instruction* inst = Instruction::At(dex_pc_ptr);
472 ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
473 switch (inst->Opcode()) {
474 case Instruction::CONST_STRING:
475 case Instruction::CONST_STRING_JUMBO: {
476 UpdateHotness(caller);
477 dex::StringIndex string_index(
478 (inst->Opcode() == Instruction::CONST_STRING)
479 ? inst->VRegB_21c()
480 : inst->VRegB_31c());
481 ObjPtr<mirror::String> str = class_linker->ResolveString(string_index, caller);
482 if (str == nullptr) {
483 DCHECK(self->IsExceptionPending());
484 return nullptr;
485 }
486 UpdateCache(self, dex_pc_ptr, str.Ptr());
487 return str.Ptr();
488 }
489 case Instruction::CONST_METHOD_HANDLE: {
490 // Don't cache: we don't expect this to be performance sensitive, and we
491 // don't want the cache to conflict with a performance sensitive entry.
492 return class_linker->ResolveMethodHandle(self, inst->VRegB_21c(), caller).Ptr();
493 }
494 case Instruction::CONST_METHOD_TYPE: {
495 // Don't cache: we don't expect this to be performance sensitive, and we
496 // don't want the cache to conflict with a performance sensitive entry.
497 return class_linker->ResolveMethodType(
498 self, dex::ProtoIndex(inst->VRegB_21c()), caller).Ptr();
499 }
500 default:
501 LOG(FATAL) << "Unreachable";
502 }
503 return nullptr;
504 }
505
NterpUnimplemented()506 extern "C" void NterpUnimplemented() {
507 LOG(FATAL) << "Unimplemented";
508 }
509
DoFilledNewArray(Thread * self,ArtMethod * caller,uint16_t * dex_pc_ptr,uint32_t * regs,bool is_range)510 static mirror::Object* DoFilledNewArray(Thread* self,
511 ArtMethod* caller,
512 uint16_t* dex_pc_ptr,
513 uint32_t* regs,
514 bool is_range)
515 REQUIRES_SHARED(Locks::mutator_lock_) {
516 const Instruction* inst = Instruction::At(dex_pc_ptr);
517 if (kIsDebugBuild) {
518 if (is_range) {
519 DCHECK_EQ(inst->Opcode(), Instruction::FILLED_NEW_ARRAY_RANGE);
520 } else {
521 DCHECK_EQ(inst->Opcode(), Instruction::FILLED_NEW_ARRAY);
522 }
523 }
524 const int32_t length = is_range ? inst->VRegA_3rc() : inst->VRegA_35c();
525 DCHECK_GE(length, 0);
526 if (!is_range) {
527 // Checks FILLED_NEW_ARRAY's length does not exceed 5 arguments.
528 DCHECK_LE(length, 5);
529 }
530 uint16_t type_idx = is_range ? inst->VRegB_3rc() : inst->VRegB_35c();
531 ObjPtr<mirror::Class> array_class =
532 ResolveVerifyAndClinit(dex::TypeIndex(type_idx),
533 caller,
534 self,
535 /* can_run_clinit= */ true,
536 /* verify_access= */ !caller->SkipAccessChecks());
537 if (UNLIKELY(array_class == nullptr)) {
538 DCHECK(self->IsExceptionPending());
539 return nullptr;
540 }
541 DCHECK(array_class->IsArrayClass());
542 ObjPtr<mirror::Class> component_class = array_class->GetComponentType();
543 const bool is_primitive_int_component = component_class->IsPrimitiveInt();
544 if (UNLIKELY(component_class->IsPrimitive() && !is_primitive_int_component)) {
545 if (component_class->IsPrimitiveLong() || component_class->IsPrimitiveDouble()) {
546 ThrowRuntimeException("Bad filled array request for type %s",
547 component_class->PrettyDescriptor().c_str());
548 } else {
549 self->ThrowNewExceptionF(
550 "Ljava/lang/InternalError;",
551 "Found type %s; filled-new-array not implemented for anything but 'int'",
552 component_class->PrettyDescriptor().c_str());
553 }
554 return nullptr;
555 }
556 ObjPtr<mirror::Object> new_array = mirror::Array::Alloc(
557 self,
558 array_class,
559 length,
560 array_class->GetComponentSizeShift(),
561 Runtime::Current()->GetHeap()->GetCurrentAllocator());
562 if (UNLIKELY(new_array == nullptr)) {
563 self->AssertPendingOOMException();
564 return nullptr;
565 }
566 uint32_t arg[Instruction::kMaxVarArgRegs]; // only used in filled-new-array.
567 uint32_t vregC = 0; // only used in filled-new-array-range.
568 if (is_range) {
569 vregC = inst->VRegC_3rc();
570 } else {
571 inst->GetVarArgs(arg);
572 }
573 for (int32_t i = 0; i < length; ++i) {
574 size_t src_reg = is_range ? vregC + i : arg[i];
575 if (is_primitive_int_component) {
576 new_array->AsIntArray()->SetWithoutChecks</* kTransactionActive= */ false>(i, regs[src_reg]);
577 } else {
578 new_array->AsObjectArray<mirror::Object>()->SetWithoutChecks</* kTransactionActive= */ false>(
579 i, reinterpret_cast<mirror::Object*>(regs[src_reg]));
580 }
581 }
582 return new_array.Ptr();
583 }
584
NterpFilledNewArray(Thread * self,ArtMethod * caller,uint32_t * registers,uint16_t * dex_pc_ptr)585 extern "C" mirror::Object* NterpFilledNewArray(Thread* self,
586 ArtMethod* caller,
587 uint32_t* registers,
588 uint16_t* dex_pc_ptr)
589 REQUIRES_SHARED(Locks::mutator_lock_) {
590 return DoFilledNewArray(self, caller, dex_pc_ptr, registers, /* is_range= */ false);
591 }
592
NterpFilledNewArrayRange(Thread * self,ArtMethod * caller,uint32_t * registers,uint16_t * dex_pc_ptr)593 extern "C" mirror::Object* NterpFilledNewArrayRange(Thread* self,
594 ArtMethod* caller,
595 uint32_t* registers,
596 uint16_t* dex_pc_ptr)
597 REQUIRES_SHARED(Locks::mutator_lock_) {
598 return DoFilledNewArray(self, caller, dex_pc_ptr, registers, /* is_range= */ true);
599 }
600
NterpHotMethod(ArtMethod * method,uint16_t * dex_pc_ptr,uint32_t * vregs)601 extern "C" jit::OsrData* NterpHotMethod(ArtMethod* method, uint16_t* dex_pc_ptr, uint32_t* vregs)
602 REQUIRES_SHARED(Locks::mutator_lock_) {
603 // It is important this method is not suspended because it can be called on
604 // method entry and async deoptimization does not expect runtime methods other than the
605 // suspend entrypoint before executing the first instruction of a Java
606 // method.
607 ScopedAssertNoThreadSuspension sants("In nterp");
608 Runtime* runtime = Runtime::Current();
609 if (method->IsMemorySharedMethod()) {
610 DCHECK_EQ(Thread::Current()->GetSharedMethodHotness(), 0u);
611 Thread::Current()->ResetSharedMethodHotness();
612 } else {
613 method->ResetCounter(runtime->GetJITOptions()->GetWarmupThreshold());
614 }
615 jit::Jit* jit = runtime->GetJit();
616 if (jit != nullptr && jit->UseJitCompilation()) {
617 // Nterp passes null on entry where we don't want to OSR.
618 if (dex_pc_ptr != nullptr) {
619 // This could be a loop back edge, check if we can OSR.
620 CodeItemInstructionAccessor accessor(method->DexInstructions());
621 uint32_t dex_pc = dex_pc_ptr - accessor.Insns();
622 jit::OsrData* osr_data = jit->PrepareForOsr(
623 method->GetInterfaceMethodIfProxy(kRuntimePointerSize), dex_pc, vregs);
624 if (osr_data != nullptr) {
625 return osr_data;
626 }
627 }
628 jit->MaybeEnqueueCompilation(method, Thread::Current());
629 }
630 return nullptr;
631 }
632
NterpDoPackedSwitch(const uint16_t * switchData,int32_t testVal)633 extern "C" ssize_t NterpDoPackedSwitch(const uint16_t* switchData, int32_t testVal)
634 REQUIRES_SHARED(Locks::mutator_lock_) {
635 ScopedAssertNoThreadSuspension sants("In nterp");
636 const int kInstrLen = 3;
637
638 /*
639 * Packed switch data format:
640 * ushort ident = 0x0100 magic value
641 * ushort size number of entries in the table
642 * int first_key first (and lowest) switch case value
643 * int targets[size] branch targets, relative to switch opcode
644 *
645 * Total size is (4+size*2) 16-bit code units.
646 */
647 uint16_t signature = *switchData++;
648 DCHECK_EQ(signature, static_cast<uint16_t>(art::Instruction::kPackedSwitchSignature));
649
650 uint16_t size = *switchData++;
651
652 int32_t firstKey = *switchData++;
653 firstKey |= (*switchData++) << 16;
654
655 int index = testVal - firstKey;
656 if (index < 0 || index >= size) {
657 return kInstrLen;
658 }
659
660 /*
661 * The entries are guaranteed to be aligned on a 32-bit boundary;
662 * we can treat them as a native int array.
663 */
664 const int32_t* entries = reinterpret_cast<const int32_t*>(switchData);
665 return entries[index];
666 }
667
668 /*
669 * Find the matching case. Returns the offset to the handler instructions.
670 *
671 * Returns 3 if we don't find a match (it's the size of the sparse-switch
672 * instruction).
673 */
NterpDoSparseSwitch(const uint16_t * switchData,int32_t testVal)674 extern "C" ssize_t NterpDoSparseSwitch(const uint16_t* switchData, int32_t testVal)
675 REQUIRES_SHARED(Locks::mutator_lock_) {
676 ScopedAssertNoThreadSuspension sants("In nterp");
677 const int kInstrLen = 3;
678 uint16_t size;
679 const int32_t* keys;
680 const int32_t* entries;
681
682 /*
683 * Sparse switch data format:
684 * ushort ident = 0x0200 magic value
685 * ushort size number of entries in the table; > 0
686 * int keys[size] keys, sorted low-to-high; 32-bit aligned
687 * int targets[size] branch targets, relative to switch opcode
688 *
689 * Total size is (2+size*4) 16-bit code units.
690 */
691
692 uint16_t signature = *switchData++;
693 DCHECK_EQ(signature, static_cast<uint16_t>(art::Instruction::kSparseSwitchSignature));
694
695 size = *switchData++;
696
697 /* The keys are guaranteed to be aligned on a 32-bit boundary;
698 * we can treat them as a native int array.
699 */
700 keys = reinterpret_cast<const int32_t*>(switchData);
701
702 /* The entries are guaranteed to be aligned on a 32-bit boundary;
703 * we can treat them as a native int array.
704 */
705 entries = keys + size;
706
707 /*
708 * Binary-search through the array of keys, which are guaranteed to
709 * be sorted low-to-high.
710 */
711 int lo = 0;
712 int hi = size - 1;
713 while (lo <= hi) {
714 int mid = (lo + hi) >> 1;
715
716 int32_t foundVal = keys[mid];
717 if (testVal < foundVal) {
718 hi = mid - 1;
719 } else if (testVal > foundVal) {
720 lo = mid + 1;
721 } else {
722 return entries[mid];
723 }
724 }
725 return kInstrLen;
726 }
727
NterpFree(void * val)728 extern "C" void NterpFree(void* val) {
729 free(val);
730 }
731
732 } // namespace interpreter
733 } // namespace art
734