• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2012 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_INTERPRETER_INTERPRETER_COMMON_H_
18 #define ART_RUNTIME_INTERPRETER_INTERPRETER_COMMON_H_
19 
20 #include "android-base/macros.h"
21 #include "instrumentation.h"
22 #include "interpreter.h"
23 #include "interpreter_intrinsics.h"
24 #include "transaction.h"
25 
26 #include <math.h>
27 
28 #include <atomic>
29 #include <iostream>
30 #include <sstream>
31 
32 #include <android-base/logging.h>
33 #include <android-base/stringprintf.h>
34 
35 #include "art_field-inl.h"
36 #include "art_method-inl.h"
37 #include "base/enums.h"
38 #include "base/locks.h"
39 #include "base/logging.h"
40 #include "base/macros.h"
41 #include "class_linker-inl.h"
42 #include "class_root-inl.h"
43 #include "common_dex_operations.h"
44 #include "common_throws.h"
45 #include "dex/dex_file-inl.h"
46 #include "dex/dex_instruction-inl.h"
47 #include "entrypoints/entrypoint_utils-inl.h"
48 #include "handle_scope-inl.h"
49 #include "interpreter_mterp_impl.h"
50 #include "interpreter_switch_impl.h"
51 #include "jit/jit-inl.h"
52 #include "mirror/call_site.h"
53 #include "mirror/class-inl.h"
54 #include "mirror/dex_cache.h"
55 #include "mirror/method.h"
56 #include "mirror/method_handles_lookup.h"
57 #include "mirror/object-inl.h"
58 #include "mirror/object_array-inl.h"
59 #include "mirror/string-inl.h"
60 #include "mterp/mterp.h"
61 #include "obj_ptr.h"
62 #include "stack.h"
63 #include "thread.h"
64 #include "unstarted_runtime.h"
65 #include "verifier/method_verifier.h"
66 #include "well_known_classes.h"
67 
68 namespace art {
69 namespace interpreter {
70 
71 void ThrowNullPointerExceptionFromInterpreter()
72     REQUIRES_SHARED(Locks::mutator_lock_);
73 
74 template <bool kMonitorCounting>
DoMonitorEnter(Thread * self,ShadowFrame * frame,ObjPtr<mirror::Object> ref)75 static inline void DoMonitorEnter(Thread* self, ShadowFrame* frame, ObjPtr<mirror::Object> ref)
76     NO_THREAD_SAFETY_ANALYSIS
77     REQUIRES(!Roles::uninterruptible_) {
78   DCHECK(!ref.IsNull());
79   StackHandleScope<1> hs(self);
80   Handle<mirror::Object> h_ref(hs.NewHandle(ref));
81   h_ref->MonitorEnter(self);
82   DCHECK(self->HoldsLock(h_ref.Get()));
83   if (UNLIKELY(self->IsExceptionPending())) {
84     bool unlocked = h_ref->MonitorExit(self);
85     DCHECK(unlocked);
86     return;
87   }
88   if (kMonitorCounting && frame->GetMethod()->MustCountLocks()) {
89     frame->GetLockCountData().AddMonitor(self, h_ref.Get());
90   }
91 }
92 
93 template <bool kMonitorCounting>
DoMonitorExit(Thread * self,ShadowFrame * frame,ObjPtr<mirror::Object> ref)94 static inline void DoMonitorExit(Thread* self, ShadowFrame* frame, ObjPtr<mirror::Object> ref)
95     NO_THREAD_SAFETY_ANALYSIS
96     REQUIRES(!Roles::uninterruptible_) {
97   StackHandleScope<1> hs(self);
98   Handle<mirror::Object> h_ref(hs.NewHandle(ref));
99   h_ref->MonitorExit(self);
100   if (kMonitorCounting && frame->GetMethod()->MustCountLocks()) {
101     frame->GetLockCountData().RemoveMonitorOrThrow(self, h_ref.Get());
102   }
103 }
104 
105 template <bool kMonitorCounting>
DoMonitorCheckOnExit(Thread * self,ShadowFrame * frame)106 static inline bool DoMonitorCheckOnExit(Thread* self, ShadowFrame* frame)
107     NO_THREAD_SAFETY_ANALYSIS
108     REQUIRES(!Roles::uninterruptible_) {
109   if (kMonitorCounting && frame->GetMethod()->MustCountLocks()) {
110     return frame->GetLockCountData().CheckAllMonitorsReleasedOrThrow(self);
111   }
112   return true;
113 }
114 
115 void AbortTransactionF(Thread* self, const char* fmt, ...)
116     __attribute__((__format__(__printf__, 2, 3)))
117     REQUIRES_SHARED(Locks::mutator_lock_);
118 
119 void AbortTransactionV(Thread* self, const char* fmt, va_list args)
120     REQUIRES_SHARED(Locks::mutator_lock_);
121 
122 void RecordArrayElementsInTransaction(ObjPtr<mirror::Array> array, int32_t count)
123     REQUIRES_SHARED(Locks::mutator_lock_);
124 
125 // Invokes the given method. This is part of the invocation support and is used by DoInvoke,
126 // DoFastInvoke and DoInvokeVirtualQuick functions.
127 // Returns true on success, otherwise throws an exception and returns false.
128 template<bool is_range, bool do_assignability_check>
129 bool DoCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_frame,
130             const Instruction* inst, uint16_t inst_data, JValue* result);
131 
132 bool UseFastInterpreterToInterpreterInvoke(ArtMethod* method)
133     REQUIRES_SHARED(Locks::mutator_lock_);
134 
135 // Throws exception if we are getting close to the end of the stack.
136 NO_INLINE bool CheckStackOverflow(Thread* self, size_t frame_size)
137     REQUIRES_SHARED(Locks::mutator_lock_);
138 
139 
140 // Sends the normal method exit event.
141 // Returns true if the events succeeded and false if there is a pending exception.
142 template <typename T> bool SendMethodExitEvents(
143     Thread* self,
144     const instrumentation::Instrumentation* instrumentation,
145     ShadowFrame& frame,
146     ObjPtr<mirror::Object> thiz,
147     ArtMethod* method,
148     uint32_t dex_pc,
149     T& result) REQUIRES_SHARED(Locks::mutator_lock_);
150 
151 static inline ALWAYS_INLINE WARN_UNUSED bool
NeedsMethodExitEvent(const instrumentation::Instrumentation * ins)152 NeedsMethodExitEvent(const instrumentation::Instrumentation* ins)
153     REQUIRES_SHARED(Locks::mutator_lock_) {
154   return ins->HasMethodExitListeners() || ins->HasWatchedFramePopListeners();
155 }
156 
157 // NO_INLINE so we won't bloat the interpreter with this very cold lock-release code.
158 template <bool kMonitorCounting>
UnlockHeldMonitors(Thread * self,ShadowFrame * shadow_frame)159 static NO_INLINE void UnlockHeldMonitors(Thread* self, ShadowFrame* shadow_frame)
160     REQUIRES_SHARED(Locks::mutator_lock_) {
161   DCHECK(shadow_frame->GetForcePopFrame() ||
162          Runtime::Current()->IsTransactionAborted());
163   // Unlock all monitors.
164   if (kMonitorCounting && shadow_frame->GetMethod()->MustCountLocks()) {
165     // Get the monitors from the shadow-frame monitor-count data.
166     shadow_frame->GetLockCountData().VisitMonitors(
167       [&](mirror::Object** obj) REQUIRES_SHARED(Locks::mutator_lock_) {
168         // Since we don't use the 'obj' pointer after the DoMonitorExit everything should be fine
169         // WRT suspension.
170         DoMonitorExit<kMonitorCounting>(self, shadow_frame, *obj);
171       });
172   } else {
173     std::vector<verifier::MethodVerifier::DexLockInfo> locks;
174     verifier::MethodVerifier::FindLocksAtDexPc(shadow_frame->GetMethod(),
175                                                 shadow_frame->GetDexPC(),
176                                                 &locks,
177                                                 Runtime::Current()->GetTargetSdkVersion());
178     for (const auto& reg : locks) {
179       if (UNLIKELY(reg.dex_registers.empty())) {
180         LOG(ERROR) << "Unable to determine reference locked by "
181                     << shadow_frame->GetMethod()->PrettyMethod() << " at pc "
182                     << shadow_frame->GetDexPC();
183       } else {
184         DoMonitorExit<kMonitorCounting>(
185             self, shadow_frame, shadow_frame->GetVRegReference(*reg.dex_registers.begin()));
186       }
187     }
188   }
189 }
190 
191 enum class MonitorState {
192   kNoMonitorsLocked,
193   kCountingMonitors,
194   kNormalMonitors,
195 };
196 
197 template<MonitorState kMonitorState>
PerformNonStandardReturn(Thread * self,ShadowFrame & frame,JValue & result,const instrumentation::Instrumentation * instrumentation,uint16_t num_dex_inst,uint32_t dex_pc)198 static inline ALWAYS_INLINE void PerformNonStandardReturn(
199       Thread* self,
200       ShadowFrame& frame,
201       JValue& result,
202       const instrumentation::Instrumentation* instrumentation,
203       uint16_t num_dex_inst,
204       uint32_t dex_pc) REQUIRES_SHARED(Locks::mutator_lock_) {
205   static constexpr bool kMonitorCounting = (kMonitorState == MonitorState::kCountingMonitors);
206   ObjPtr<mirror::Object> thiz(frame.GetThisObject(num_dex_inst));
207   StackHandleScope<1u> hs(self);
208   Handle<mirror::Object> h_thiz(hs.NewHandle(thiz));
209   if (UNLIKELY(self->IsExceptionPending())) {
210     LOG(WARNING) << "Suppressing exception for non-standard method exit: "
211                  << self->GetException()->Dump();
212     self->ClearException();
213   }
214   if (kMonitorState != MonitorState::kNoMonitorsLocked) {
215     UnlockHeldMonitors<kMonitorCounting>(self, &frame);
216   }
217   DoMonitorCheckOnExit<kMonitorCounting>(self, &frame);
218   result = JValue();
219   if (UNLIKELY(NeedsMethodExitEvent(instrumentation))) {
220     SendMethodExitEvents(
221         self, instrumentation, frame, h_thiz.Get(), frame.GetMethod(), dex_pc, result);
222   }
223 }
224 
225 // Handles all invoke-XXX/range instructions except for invoke-polymorphic[/range].
226 // Returns true on success, otherwise throws an exception and returns false.
227 template<InvokeType type, bool is_range, bool do_access_check, bool is_mterp>
DoInvoke(Thread * self,ShadowFrame & shadow_frame,const Instruction * inst,uint16_t inst_data,JValue * result)228 static ALWAYS_INLINE bool DoInvoke(Thread* self,
229                                    ShadowFrame& shadow_frame,
230                                    const Instruction* inst,
231                                    uint16_t inst_data,
232                                    JValue* result)
233     REQUIRES_SHARED(Locks::mutator_lock_) {
234   // Make sure to check for async exceptions before anything else.
235   if (is_mterp && self->UseMterp()) {
236     DCHECK(!self->ObserveAsyncException());
237   } else if (UNLIKELY(self->ObserveAsyncException())) {
238     return false;
239   }
240   const uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
241   const uint32_t vregC = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c();
242   ArtMethod* sf_method = shadow_frame.GetMethod();
243 
244   // Try to find the method in small thread-local cache first (only used when
245   // nterp is not used as mterp and nterp use the cache in an incompatible way).
246   InterpreterCache* tls_cache = self->GetInterpreterCache();
247   size_t tls_value;
248   ArtMethod* resolved_method;
249   if (!IsNterpSupported() && LIKELY(tls_cache->Get(inst, &tls_value))) {
250     resolved_method = reinterpret_cast<ArtMethod*>(tls_value);
251   } else {
252     ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
253     constexpr ClassLinker::ResolveMode resolve_mode =
254         do_access_check ? ClassLinker::ResolveMode::kCheckICCEAndIAE
255                         : ClassLinker::ResolveMode::kNoChecks;
256     resolved_method = class_linker->ResolveMethod<resolve_mode>(self, method_idx, sf_method, type);
257     if (UNLIKELY(resolved_method == nullptr)) {
258       CHECK(self->IsExceptionPending());
259       result->SetJ(0);
260       return false;
261     }
262     if (!IsNterpSupported()) {
263       tls_cache->Set(inst, reinterpret_cast<size_t>(resolved_method));
264     }
265   }
266 
267   // Null pointer check and virtual method resolution.
268   ObjPtr<mirror::Object> receiver =
269       (type == kStatic) ? nullptr : shadow_frame.GetVRegReference(vregC);
270   ArtMethod* called_method;
271   called_method = FindMethodToCall<type, do_access_check>(
272       method_idx, resolved_method, &receiver, sf_method, self);
273   if (UNLIKELY(called_method == nullptr)) {
274     CHECK(self->IsExceptionPending());
275     result->SetJ(0);
276     return false;
277   }
278   if (UNLIKELY(!called_method->IsInvokable())) {
279     called_method->ThrowInvocationTimeError();
280     result->SetJ(0);
281     return false;
282   }
283 
284   jit::Jit* jit = Runtime::Current()->GetJit();
285   if (is_mterp && !is_range && called_method->IsIntrinsic()) {
286     if (MterpHandleIntrinsic(&shadow_frame, called_method, inst, inst_data,
287                              shadow_frame.GetResultRegister())) {
288       if (jit != nullptr && sf_method != nullptr) {
289         jit->NotifyInterpreterToCompiledCodeTransition(self, sf_method);
290       }
291       return !self->IsExceptionPending();
292     }
293   }
294 
295   // Check whether we can use the fast path. The result is cached in the ArtMethod.
296   // If the bit is not set, we explicitly recheck all the conditions.
297   // If any of the conditions get falsified, it is important to clear the bit.
298   bool use_fast_path = false;
299   if (is_mterp && self->UseMterp()) {
300     use_fast_path = called_method->UseFastInterpreterToInterpreterInvoke();
301     if (!use_fast_path) {
302       use_fast_path = UseFastInterpreterToInterpreterInvoke(called_method);
303       if (use_fast_path) {
304         called_method->SetFastInterpreterToInterpreterInvokeFlag();
305       }
306     }
307   }
308 
309   if (use_fast_path) {
310     DCHECK(Runtime::Current()->IsStarted());
311     DCHECK(!Runtime::Current()->IsActiveTransaction());
312     DCHECK(called_method->SkipAccessChecks());
313     DCHECK(!called_method->IsNative());
314     DCHECK(!called_method->IsProxyMethod());
315     DCHECK(!called_method->IsIntrinsic());
316     DCHECK(!(called_method->GetDeclaringClass()->IsStringClass() &&
317         called_method->IsConstructor()));
318     DCHECK(type != kStatic || called_method->GetDeclaringClass()->IsVisiblyInitialized());
319 
320     const uint16_t number_of_inputs =
321         (is_range) ? inst->VRegA_3rc(inst_data) : inst->VRegA_35c(inst_data);
322     CodeItemDataAccessor accessor(called_method->DexInstructionData());
323     uint32_t num_regs = accessor.RegistersSize();
324     DCHECK_EQ(number_of_inputs, accessor.InsSize());
325     DCHECK_GE(num_regs, number_of_inputs);
326     size_t first_dest_reg = num_regs - number_of_inputs;
327 
328     if (UNLIKELY(!CheckStackOverflow(self, ShadowFrame::ComputeSize(num_regs)))) {
329       return false;
330     }
331 
332     if (jit != nullptr) {
333       jit->AddSamples(self, called_method, 1, /* with_backedges */false);
334     }
335 
336     // Create shadow frame on the stack.
337     const char* old_cause = self->StartAssertNoThreadSuspension("DoFastInvoke");
338     ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
339         CREATE_SHADOW_FRAME(num_regs, &shadow_frame, called_method, /* dex pc */ 0);
340     ShadowFrame* new_shadow_frame = shadow_frame_unique_ptr.get();
341     if (is_range) {
342       size_t src = vregC;
343       for (size_t i = 0, dst = first_dest_reg; i < number_of_inputs; ++i, ++dst, ++src) {
344         *new_shadow_frame->GetVRegAddr(dst) = *shadow_frame.GetVRegAddr(src);
345         *new_shadow_frame->GetShadowRefAddr(dst) = *shadow_frame.GetShadowRefAddr(src);
346       }
347     } else {
348       uint32_t arg[Instruction::kMaxVarArgRegs];
349       inst->GetVarArgs(arg, inst_data);
350       for (size_t i = 0, dst = first_dest_reg; i < number_of_inputs; ++i, ++dst) {
351         *new_shadow_frame->GetVRegAddr(dst) = *shadow_frame.GetVRegAddr(arg[i]);
352         *new_shadow_frame->GetShadowRefAddr(dst) = *shadow_frame.GetShadowRefAddr(arg[i]);
353       }
354     }
355     self->PushShadowFrame(new_shadow_frame);
356     self->EndAssertNoThreadSuspension(old_cause);
357 
358     VLOG(interpreter) << "Interpreting " << called_method->PrettyMethod();
359 
360     DCheckStaticState(self, called_method);
361     while (true) {
362       // Mterp does not support all instrumentation/debugging.
363       if (!self->UseMterp()) {
364         *result =
365             ExecuteSwitchImpl<false, false>(self, accessor, *new_shadow_frame, *result, false);
366         break;
367       }
368       if (ExecuteMterpImpl(self, accessor.Insns(), new_shadow_frame, result)) {
369         break;
370       } else {
371         // Mterp didn't like that instruction.  Single-step it with the reference interpreter.
372         *result = ExecuteSwitchImpl<false, false>(self, accessor, *new_shadow_frame, *result, true);
373         if (new_shadow_frame->GetDexPC() == dex::kDexNoIndex) {
374           break;  // Single-stepped a return or an exception not handled locally.
375         }
376       }
377     }
378     self->PopShadowFrame();
379 
380     return !self->IsExceptionPending();
381   }
382 
383   return DoCall<is_range, do_access_check>(called_method, self, shadow_frame, inst, inst_data,
384                                            result);
385 }
386 
ResolveMethodHandle(Thread * self,uint32_t method_handle_index,ArtMethod * referrer)387 static inline ObjPtr<mirror::MethodHandle> ResolveMethodHandle(Thread* self,
388                                                                uint32_t method_handle_index,
389                                                                ArtMethod* referrer)
390     REQUIRES_SHARED(Locks::mutator_lock_) {
391   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
392   return class_linker->ResolveMethodHandle(self, method_handle_index, referrer);
393 }
394 
ResolveMethodType(Thread * self,dex::ProtoIndex method_type_index,ArtMethod * referrer)395 static inline ObjPtr<mirror::MethodType> ResolveMethodType(Thread* self,
396                                                            dex::ProtoIndex method_type_index,
397                                                            ArtMethod* referrer)
398     REQUIRES_SHARED(Locks::mutator_lock_) {
399   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
400   return class_linker->ResolveMethodType(self, method_type_index, referrer);
401 }
402 
403 #define DECLARE_SIGNATURE_POLYMORPHIC_HANDLER(Name, ...)              \
404 bool Do ## Name(Thread* self,                                         \
405                 ShadowFrame& shadow_frame,                            \
406                 const Instruction* inst,                              \
407                 uint16_t inst_data,                                   \
408                 JValue* result) REQUIRES_SHARED(Locks::mutator_lock_);
409 #include "intrinsics_list.h"
410 INTRINSICS_LIST(DECLARE_SIGNATURE_POLYMORPHIC_HANDLER)
411 #undef INTRINSICS_LIST
412 #undef DECLARE_SIGNATURE_POLYMORPHIC_HANDLER
413 
414 // Performs a invoke-polymorphic or invoke-polymorphic-range.
415 template<bool is_range>
416 bool DoInvokePolymorphic(Thread* self,
417                          ShadowFrame& shadow_frame,
418                          const Instruction* inst,
419                          uint16_t inst_data,
420                          JValue* result)
421     REQUIRES_SHARED(Locks::mutator_lock_);
422 
423 bool DoInvokeCustom(Thread* self,
424                     ShadowFrame& shadow_frame,
425                     uint32_t call_site_idx,
426                     const InstructionOperands* operands,
427                     JValue* result)
428     REQUIRES_SHARED(Locks::mutator_lock_);
429 
430 // Performs a custom invoke (invoke-custom/invoke-custom-range).
431 template<bool is_range>
DoInvokeCustom(Thread * self,ShadowFrame & shadow_frame,const Instruction * inst,uint16_t inst_data,JValue * result)432 bool DoInvokeCustom(Thread* self,
433                     ShadowFrame& shadow_frame,
434                     const Instruction* inst,
435                     uint16_t inst_data,
436                     JValue* result)
437     REQUIRES_SHARED(Locks::mutator_lock_) {
438   const uint32_t call_site_idx = is_range ? inst->VRegB_3rc() : inst->VRegB_35c();
439   if (is_range) {
440     RangeInstructionOperands operands(inst->VRegC_3rc(), inst->VRegA_3rc());
441     return DoInvokeCustom(self, shadow_frame, call_site_idx, &operands, result);
442   } else {
443     uint32_t args[Instruction::kMaxVarArgRegs];
444     inst->GetVarArgs(args, inst_data);
445     VarArgsInstructionOperands operands(args, inst->VRegA_35c());
446     return DoInvokeCustom(self, shadow_frame, call_site_idx, &operands, result);
447   }
448 }
449 
450 template<Primitive::Type field_type>
GetFieldValue(const ShadowFrame & shadow_frame,uint32_t vreg)451 ALWAYS_INLINE static JValue GetFieldValue(const ShadowFrame& shadow_frame, uint32_t vreg)
452     REQUIRES_SHARED(Locks::mutator_lock_) {
453   JValue field_value;
454   switch (field_type) {
455     case Primitive::kPrimBoolean:
456       field_value.SetZ(static_cast<uint8_t>(shadow_frame.GetVReg(vreg)));
457       break;
458     case Primitive::kPrimByte:
459       field_value.SetB(static_cast<int8_t>(shadow_frame.GetVReg(vreg)));
460       break;
461     case Primitive::kPrimChar:
462       field_value.SetC(static_cast<uint16_t>(shadow_frame.GetVReg(vreg)));
463       break;
464     case Primitive::kPrimShort:
465       field_value.SetS(static_cast<int16_t>(shadow_frame.GetVReg(vreg)));
466       break;
467     case Primitive::kPrimInt:
468       field_value.SetI(shadow_frame.GetVReg(vreg));
469       break;
470     case Primitive::kPrimLong:
471       field_value.SetJ(shadow_frame.GetVRegLong(vreg));
472       break;
473     case Primitive::kPrimNot:
474       field_value.SetL(shadow_frame.GetVRegReference(vreg));
475       break;
476     default:
477       LOG(FATAL) << "Unreachable: " << field_type;
478       UNREACHABLE();
479   }
480   return field_value;
481 }
482 
483 // Handles iget-XXX and sget-XXX instructions.
484 // Returns true on success, otherwise throws an exception and returns false.
485 template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check,
486          bool transaction_active = false>
DoFieldGet(Thread * self,ShadowFrame & shadow_frame,const Instruction * inst,uint16_t inst_data)487 ALWAYS_INLINE bool DoFieldGet(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst,
488                               uint16_t inst_data) REQUIRES_SHARED(Locks::mutator_lock_) {
489   const bool is_static = (find_type == StaticObjectRead) || (find_type == StaticPrimitiveRead);
490   const uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
491   ArtField* f =
492       FindFieldFromCode<find_type, do_access_check>(field_idx, shadow_frame.GetMethod(), self,
493                                                     Primitive::ComponentSize(field_type));
494   if (UNLIKELY(f == nullptr)) {
495     CHECK(self->IsExceptionPending());
496     return false;
497   }
498   ObjPtr<mirror::Object> obj;
499   if (is_static) {
500     obj = f->GetDeclaringClass();
501     if (transaction_active) {
502       if (Runtime::Current()->GetTransaction()->ReadConstraint(self, obj)) {
503         Runtime::Current()->AbortTransactionAndThrowAbortError(self, "Can't read static fields of "
504             + obj->PrettyTypeOf() + " since it does not belong to clinit's class.");
505         return false;
506       }
507     }
508   } else {
509     obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
510     if (UNLIKELY(obj == nullptr)) {
511       ThrowNullPointerExceptionForFieldAccess(f, true);
512       return false;
513     }
514   }
515 
516   JValue result;
517   if (UNLIKELY(!DoFieldGetCommon<field_type>(self, shadow_frame, obj, f, &result))) {
518     // Instrumentation threw an error!
519     CHECK(self->IsExceptionPending());
520     return false;
521   }
522   uint32_t vregA = is_static ? inst->VRegA_21c(inst_data) : inst->VRegA_22c(inst_data);
523   switch (field_type) {
524     case Primitive::kPrimBoolean:
525       shadow_frame.SetVReg(vregA, result.GetZ());
526       break;
527     case Primitive::kPrimByte:
528       shadow_frame.SetVReg(vregA, result.GetB());
529       break;
530     case Primitive::kPrimChar:
531       shadow_frame.SetVReg(vregA, result.GetC());
532       break;
533     case Primitive::kPrimShort:
534       shadow_frame.SetVReg(vregA, result.GetS());
535       break;
536     case Primitive::kPrimInt:
537       shadow_frame.SetVReg(vregA, result.GetI());
538       break;
539     case Primitive::kPrimLong:
540       shadow_frame.SetVRegLong(vregA, result.GetJ());
541       break;
542     case Primitive::kPrimNot:
543       shadow_frame.SetVRegReference(vregA, result.GetL());
544       break;
545     default:
546       LOG(FATAL) << "Unreachable: " << field_type;
547       UNREACHABLE();
548   }
549   return true;
550 }
551 
CheckWriteConstraint(Thread * self,ObjPtr<mirror::Object> obj)552 static inline bool CheckWriteConstraint(Thread* self, ObjPtr<mirror::Object> obj)
553     REQUIRES_SHARED(Locks::mutator_lock_) {
554   Runtime* runtime = Runtime::Current();
555   if (runtime->GetTransaction()->WriteConstraint(self, obj)) {
556     DCHECK(runtime->GetHeap()->ObjectIsInBootImageSpace(obj) || obj->IsClass());
557     const char* base_msg = runtime->GetHeap()->ObjectIsInBootImageSpace(obj)
558         ? "Can't set fields of boot image "
559         : "Can't set fields of ";
560     runtime->AbortTransactionAndThrowAbortError(self, base_msg + obj->PrettyTypeOf());
561     return false;
562   }
563   return true;
564 }
565 
CheckWriteValueConstraint(Thread * self,ObjPtr<mirror::Object> value)566 static inline bool CheckWriteValueConstraint(Thread* self, ObjPtr<mirror::Object> value)
567     REQUIRES_SHARED(Locks::mutator_lock_) {
568   Runtime* runtime = Runtime::Current();
569   if (runtime->GetTransaction()->WriteValueConstraint(self, value)) {
570     DCHECK(value != nullptr);
571     std::string msg = value->IsClass()
572         ? "Can't store reference to class " + value->AsClass()->PrettyDescriptor()
573         : "Can't store reference to instance of " + value->GetClass()->PrettyDescriptor();
574     runtime->AbortTransactionAndThrowAbortError(self, msg);
575     return false;
576   }
577   return true;
578 }
579 
580 // Handles iput-XXX and sput-XXX instructions.
581 // Returns true on success, otherwise throws an exception and returns false.
582 template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check,
583          bool transaction_active>
DoFieldPut(Thread * self,const ShadowFrame & shadow_frame,const Instruction * inst,uint16_t inst_data)584 ALWAYS_INLINE bool DoFieldPut(Thread* self, const ShadowFrame& shadow_frame,
585                               const Instruction* inst, uint16_t inst_data)
586     REQUIRES_SHARED(Locks::mutator_lock_) {
587   const bool do_assignability_check = do_access_check;
588   bool is_static = (find_type == StaticObjectWrite) || (find_type == StaticPrimitiveWrite);
589   uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
590   ArtField* f =
591       FindFieldFromCode<find_type, do_access_check>(field_idx, shadow_frame.GetMethod(), self,
592                                                     Primitive::ComponentSize(field_type));
593   if (UNLIKELY(f == nullptr)) {
594     CHECK(self->IsExceptionPending());
595     return false;
596   }
597   ObjPtr<mirror::Object> obj;
598   if (is_static) {
599     obj = f->GetDeclaringClass();
600   } else {
601     obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
602     if (UNLIKELY(obj == nullptr)) {
603       ThrowNullPointerExceptionForFieldAccess(f, false);
604       return false;
605     }
606   }
607   if (transaction_active && !CheckWriteConstraint(self, obj)) {
608     return false;
609   }
610 
611   uint32_t vregA = is_static ? inst->VRegA_21c(inst_data) : inst->VRegA_22c(inst_data);
612   JValue value = GetFieldValue<field_type>(shadow_frame, vregA);
613 
614   if (transaction_active &&
615       field_type == Primitive::kPrimNot &&
616       !CheckWriteValueConstraint(self, value.GetL())) {
617     return false;
618   }
619 
620   return DoFieldPutCommon<field_type, do_assignability_check, transaction_active>(self,
621                                                                                   shadow_frame,
622                                                                                   obj,
623                                                                                   f,
624                                                                                   value);
625 }
626 
627 // Handles string resolution for const-string and const-string-jumbo instructions. Also ensures the
628 // java.lang.String class is initialized.
ResolveString(Thread * self,ShadowFrame & shadow_frame,dex::StringIndex string_idx)629 static inline ObjPtr<mirror::String> ResolveString(Thread* self,
630                                                    ShadowFrame& shadow_frame,
631                                                    dex::StringIndex string_idx)
632     REQUIRES_SHARED(Locks::mutator_lock_) {
633   ObjPtr<mirror::Class> java_lang_string_class = GetClassRoot<mirror::String>();
634   if (UNLIKELY(!java_lang_string_class->IsVisiblyInitialized())) {
635     StackHandleScope<1> hs(self);
636     Handle<mirror::Class> h_class(hs.NewHandle(java_lang_string_class));
637     if (UNLIKELY(!Runtime::Current()->GetClassLinker()->EnsureInitialized(
638                       self, h_class, /*can_init_fields=*/ true, /*can_init_parents=*/ true))) {
639       DCHECK(self->IsExceptionPending());
640       return nullptr;
641     }
642     DCHECK(h_class->IsInitializing());
643   }
644   ArtMethod* method = shadow_frame.GetMethod();
645   ObjPtr<mirror::String> string_ptr =
646       Runtime::Current()->GetClassLinker()->ResolveString(string_idx, method);
647   return string_ptr;
648 }
649 
650 // Handles div-int, div-int/2addr, div-int/li16 and div-int/lit8 instructions.
651 // Returns true on success, otherwise throws a java.lang.ArithmeticException and return false.
DoIntDivide(ShadowFrame & shadow_frame,size_t result_reg,int32_t dividend,int32_t divisor)652 static inline bool DoIntDivide(ShadowFrame& shadow_frame, size_t result_reg,
653                                int32_t dividend, int32_t divisor)
654     REQUIRES_SHARED(Locks::mutator_lock_) {
655   constexpr int32_t kMinInt = std::numeric_limits<int32_t>::min();
656   if (UNLIKELY(divisor == 0)) {
657     ThrowArithmeticExceptionDivideByZero();
658     return false;
659   }
660   if (UNLIKELY(dividend == kMinInt && divisor == -1)) {
661     shadow_frame.SetVReg(result_reg, kMinInt);
662   } else {
663     shadow_frame.SetVReg(result_reg, dividend / divisor);
664   }
665   return true;
666 }
667 
668 // Handles rem-int, rem-int/2addr, rem-int/li16 and rem-int/lit8 instructions.
669 // Returns true on success, otherwise throws a java.lang.ArithmeticException and return false.
DoIntRemainder(ShadowFrame & shadow_frame,size_t result_reg,int32_t dividend,int32_t divisor)670 static inline bool DoIntRemainder(ShadowFrame& shadow_frame, size_t result_reg,
671                                   int32_t dividend, int32_t divisor)
672     REQUIRES_SHARED(Locks::mutator_lock_) {
673   constexpr int32_t kMinInt = std::numeric_limits<int32_t>::min();
674   if (UNLIKELY(divisor == 0)) {
675     ThrowArithmeticExceptionDivideByZero();
676     return false;
677   }
678   if (UNLIKELY(dividend == kMinInt && divisor == -1)) {
679     shadow_frame.SetVReg(result_reg, 0);
680   } else {
681     shadow_frame.SetVReg(result_reg, dividend % divisor);
682   }
683   return true;
684 }
685 
686 // Handles div-long and div-long-2addr instructions.
687 // Returns true on success, otherwise throws a java.lang.ArithmeticException and return false.
DoLongDivide(ShadowFrame & shadow_frame,size_t result_reg,int64_t dividend,int64_t divisor)688 static inline bool DoLongDivide(ShadowFrame& shadow_frame,
689                                 size_t result_reg,
690                                 int64_t dividend,
691                                 int64_t divisor)
692     REQUIRES_SHARED(Locks::mutator_lock_) {
693   const int64_t kMinLong = std::numeric_limits<int64_t>::min();
694   if (UNLIKELY(divisor == 0)) {
695     ThrowArithmeticExceptionDivideByZero();
696     return false;
697   }
698   if (UNLIKELY(dividend == kMinLong && divisor == -1)) {
699     shadow_frame.SetVRegLong(result_reg, kMinLong);
700   } else {
701     shadow_frame.SetVRegLong(result_reg, dividend / divisor);
702   }
703   return true;
704 }
705 
706 // Handles rem-long and rem-long-2addr instructions.
707 // Returns true on success, otherwise throws a java.lang.ArithmeticException and return false.
DoLongRemainder(ShadowFrame & shadow_frame,size_t result_reg,int64_t dividend,int64_t divisor)708 static inline bool DoLongRemainder(ShadowFrame& shadow_frame,
709                                    size_t result_reg,
710                                    int64_t dividend,
711                                    int64_t divisor)
712     REQUIRES_SHARED(Locks::mutator_lock_) {
713   const int64_t kMinLong = std::numeric_limits<int64_t>::min();
714   if (UNLIKELY(divisor == 0)) {
715     ThrowArithmeticExceptionDivideByZero();
716     return false;
717   }
718   if (UNLIKELY(dividend == kMinLong && divisor == -1)) {
719     shadow_frame.SetVRegLong(result_reg, 0);
720   } else {
721     shadow_frame.SetVRegLong(result_reg, dividend % divisor);
722   }
723   return true;
724 }
725 
726 // Handles filled-new-array and filled-new-array-range instructions.
727 // Returns true on success, otherwise throws an exception and returns false.
728 template <bool is_range, bool do_access_check, bool transaction_active>
729 bool DoFilledNewArray(const Instruction* inst, const ShadowFrame& shadow_frame,
730                       Thread* self, JValue* result);
731 
732 // Handles packed-switch instruction.
733 // Returns the branch offset to the next instruction to execute.
DoPackedSwitch(const Instruction * inst,const ShadowFrame & shadow_frame,uint16_t inst_data)734 static inline int32_t DoPackedSwitch(const Instruction* inst, const ShadowFrame& shadow_frame,
735                                      uint16_t inst_data)
736     REQUIRES_SHARED(Locks::mutator_lock_) {
737   DCHECK(inst->Opcode() == Instruction::PACKED_SWITCH);
738   const uint16_t* switch_data = reinterpret_cast<const uint16_t*>(inst) + inst->VRegB_31t();
739   int32_t test_val = shadow_frame.GetVReg(inst->VRegA_31t(inst_data));
740   DCHECK_EQ(switch_data[0], static_cast<uint16_t>(Instruction::kPackedSwitchSignature));
741   uint16_t size = switch_data[1];
742   if (size == 0) {
743     // Empty packed switch, move forward by 3 (size of PACKED_SWITCH).
744     return 3;
745   }
746   const int32_t* keys = reinterpret_cast<const int32_t*>(&switch_data[2]);
747   DCHECK_ALIGNED(keys, 4);
748   int32_t first_key = keys[0];
749   const int32_t* targets = reinterpret_cast<const int32_t*>(&switch_data[4]);
750   DCHECK_ALIGNED(targets, 4);
751   int32_t index = test_val - first_key;
752   if (index >= 0 && index < size) {
753     return targets[index];
754   } else {
755     // No corresponding value: move forward by 3 (size of PACKED_SWITCH).
756     return 3;
757   }
758 }
759 
760 // Handles sparse-switch instruction.
761 // Returns the branch offset to the next instruction to execute.
DoSparseSwitch(const Instruction * inst,const ShadowFrame & shadow_frame,uint16_t inst_data)762 static inline int32_t DoSparseSwitch(const Instruction* inst, const ShadowFrame& shadow_frame,
763                                      uint16_t inst_data)
764     REQUIRES_SHARED(Locks::mutator_lock_) {
765   DCHECK(inst->Opcode() == Instruction::SPARSE_SWITCH);
766   const uint16_t* switch_data = reinterpret_cast<const uint16_t*>(inst) + inst->VRegB_31t();
767   int32_t test_val = shadow_frame.GetVReg(inst->VRegA_31t(inst_data));
768   DCHECK_EQ(switch_data[0], static_cast<uint16_t>(Instruction::kSparseSwitchSignature));
769   uint16_t size = switch_data[1];
770   // Return length of SPARSE_SWITCH if size is 0.
771   if (size == 0) {
772     return 3;
773   }
774   const int32_t* keys = reinterpret_cast<const int32_t*>(&switch_data[2]);
775   DCHECK_ALIGNED(keys, 4);
776   const int32_t* entries = keys + size;
777   DCHECK_ALIGNED(entries, 4);
778   int lo = 0;
779   int hi = size - 1;
780   while (lo <= hi) {
781     int mid = (lo + hi) / 2;
782     int32_t foundVal = keys[mid];
783     if (test_val < foundVal) {
784       hi = mid - 1;
785     } else if (test_val > foundVal) {
786       lo = mid + 1;
787     } else {
788       return entries[mid];
789     }
790   }
791   // No corresponding value: move forward by 3 (size of SPARSE_SWITCH).
792   return 3;
793 }
794 
795 // We execute any instrumentation events triggered by throwing and/or handing the pending exception
796 // and change the shadow_frames dex_pc to the appropriate exception handler if the current method
797 // has one. If the exception has been handled and the shadow_frame is now pointing to a catch clause
798 // we return true. If the current method is unable to handle the exception we return false.
799 // This function accepts a null Instrumentation* as a way to cause instrumentation events not to be
800 // reported.
801 // TODO We might wish to reconsider how we cause some events to be ignored.
802 bool MoveToExceptionHandler(Thread* self,
803                             ShadowFrame& shadow_frame,
804                             const instrumentation::Instrumentation* instrumentation)
805     REQUIRES_SHARED(Locks::mutator_lock_);
806 
807 NO_RETURN void UnexpectedOpcode(const Instruction* inst, const ShadowFrame& shadow_frame)
808   __attribute__((cold))
809   REQUIRES_SHARED(Locks::mutator_lock_);
810 
811 // Set true if you want TraceExecution invocation before each bytecode execution.
812 constexpr bool kTraceExecutionEnabled = false;
813 
TraceExecution(const ShadowFrame & shadow_frame,const Instruction * inst,const uint32_t dex_pc)814 static inline void TraceExecution(const ShadowFrame& shadow_frame, const Instruction* inst,
815                                   const uint32_t dex_pc)
816     REQUIRES_SHARED(Locks::mutator_lock_) {
817   if (kTraceExecutionEnabled) {
818 #define TRACE_LOG std::cerr
819     std::ostringstream oss;
820     oss << shadow_frame.GetMethod()->PrettyMethod()
821         << android::base::StringPrintf("\n0x%x: ", dex_pc)
822         << inst->DumpString(shadow_frame.GetMethod()->GetDexFile()) << "\n";
823     for (uint32_t i = 0; i < shadow_frame.NumberOfVRegs(); ++i) {
824       uint32_t raw_value = shadow_frame.GetVReg(i);
825       ObjPtr<mirror::Object> ref_value = shadow_frame.GetVRegReference(i);
826       oss << android::base::StringPrintf(" vreg%u=0x%08X", i, raw_value);
827       if (ref_value != nullptr) {
828         if (ref_value->GetClass()->IsStringClass() &&
829             !ref_value->AsString()->IsValueNull()) {
830           oss << "/java.lang.String \"" << ref_value->AsString()->ToModifiedUtf8() << "\"";
831         } else {
832           oss << "/" << ref_value->PrettyTypeOf();
833         }
834       }
835     }
836     TRACE_LOG << oss.str() << "\n";
837 #undef TRACE_LOG
838   }
839 }
840 
IsBackwardBranch(int32_t branch_offset)841 static inline bool IsBackwardBranch(int32_t branch_offset) {
842   return branch_offset <= 0;
843 }
844 
845 // The arg_offset is the offset to the first input register in the frame.
846 void ArtInterpreterToCompiledCodeBridge(Thread* self,
847                                         ArtMethod* caller,
848                                         ShadowFrame* shadow_frame,
849                                         uint16_t arg_offset,
850                                         JValue* result);
851 
IsStringInit(const DexFile * dex_file,uint32_t method_idx)852 static inline bool IsStringInit(const DexFile* dex_file, uint32_t method_idx)
853     REQUIRES_SHARED(Locks::mutator_lock_) {
854   const dex::MethodId& method_id = dex_file->GetMethodId(method_idx);
855   const char* class_name = dex_file->StringByTypeIdx(method_id.class_idx_);
856   const char* method_name = dex_file->GetMethodName(method_id);
857   // Instead of calling ResolveMethod() which has suspend point and can trigger
858   // GC, look up the method symbolically.
859   // Compare method's class name and method name against string init.
860   // It's ok since it's not allowed to create your own java/lang/String.
861   // TODO: verify that assumption.
862   if ((strcmp(class_name, "Ljava/lang/String;") == 0) &&
863       (strcmp(method_name, "<init>") == 0)) {
864     return true;
865   }
866   return false;
867 }
868 
IsStringInit(const Instruction * instr,ArtMethod * caller)869 static inline bool IsStringInit(const Instruction* instr, ArtMethod* caller)
870     REQUIRES_SHARED(Locks::mutator_lock_) {
871   if (instr->Opcode() == Instruction::INVOKE_DIRECT ||
872       instr->Opcode() == Instruction::INVOKE_DIRECT_RANGE) {
873     uint16_t callee_method_idx = (instr->Opcode() == Instruction::INVOKE_DIRECT_RANGE) ?
874         instr->VRegB_3rc() : instr->VRegB_35c();
875     return IsStringInit(caller->GetDexFile(), callee_method_idx);
876   }
877   return false;
878 }
879 
880 // Set string value created from StringFactory.newStringFromXXX() into all aliases of
881 // StringFactory.newEmptyString().
882 void SetStringInitValueToAllAliases(ShadowFrame* shadow_frame,
883                                     uint16_t this_obj_vreg,
884                                     JValue result);
885 
886 }  // namespace interpreter
887 }  // namespace art
888 
889 #endif  // ART_RUNTIME_INTERPRETER_INTERPRETER_COMMON_H_
890