• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2015 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_JIT_PROFILING_INFO_H_
18 #define ART_RUNTIME_JIT_PROFILING_INFO_H_
19 
20 #include <vector>
21 
22 #include "base/macros.h"
23 #include "gc_root.h"
24 
25 namespace art {
26 
27 class ArtMethod;
28 class ProfilingInfo;
29 
30 namespace jit {
31 class JitCodeCache;
32 }
33 
34 namespace mirror {
35 class Class;
36 }
37 
38 // Structure to store the classes seen at runtime for a specific instruction.
39 // Once the classes_ array is full, we consider the INVOKE to be megamorphic.
40 class InlineCache {
41  public:
IsMonomorphic()42   bool IsMonomorphic() const {
43     DCHECK_GE(kIndividualCacheSize, 2);
44     return !classes_[0].IsNull() && classes_[1].IsNull();
45   }
46 
IsMegamorphic()47   bool IsMegamorphic() const {
48     for (size_t i = 0; i < kIndividualCacheSize; ++i) {
49       if (classes_[i].IsNull()) {
50         return false;
51       }
52     }
53     return true;
54   }
55 
GetMonomorphicType()56   mirror::Class* GetMonomorphicType() const SHARED_REQUIRES(Locks::mutator_lock_) {
57     // Note that we cannot ensure the inline cache is actually monomorphic
58     // at this point, as other threads may have updated it.
59     DCHECK(!classes_[0].IsNull());
60     return classes_[0].Read();
61   }
62 
IsUninitialized()63   bool IsUninitialized() const {
64     return classes_[0].IsNull();
65   }
66 
IsPolymorphic()67   bool IsPolymorphic() const {
68     DCHECK_GE(kIndividualCacheSize, 3);
69     return !classes_[1].IsNull() && classes_[kIndividualCacheSize - 1].IsNull();
70   }
71 
GetTypeAt(size_t i)72   mirror::Class* GetTypeAt(size_t i) const SHARED_REQUIRES(Locks::mutator_lock_) {
73     return classes_[i].Read();
74   }
75 
76   static constexpr uint16_t kIndividualCacheSize = 5;
77 
78  private:
79   uint32_t dex_pc_;
80   GcRoot<mirror::Class> classes_[kIndividualCacheSize];
81 
82   friend class ProfilingInfo;
83 
84   DISALLOW_COPY_AND_ASSIGN(InlineCache);
85 };
86 
87 /**
88  * Profiling info for a method, created and filled by the interpreter once the
89  * method is warm, and used by the compiler to drive optimizations.
90  */
91 class ProfilingInfo {
92  public:
93   // Create a ProfilingInfo for 'method'. Return whether it succeeded, or if it is
94   // not needed in case the method does not have virtual/interface invocations.
95   static bool Create(Thread* self, ArtMethod* method, bool retry_allocation)
96       SHARED_REQUIRES(Locks::mutator_lock_);
97 
98   // Add information from an executed INVOKE instruction to the profile.
99   void AddInvokeInfo(uint32_t dex_pc, mirror::Class* cls)
100       // Method should not be interruptible, as it manipulates the ProfilingInfo
101       // which can be concurrently collected.
102       REQUIRES(Roles::uninterruptible_)
103       SHARED_REQUIRES(Locks::mutator_lock_);
104 
105   // NO_THREAD_SAFETY_ANALYSIS since we don't know what the callback requires.
106   template<typename RootVisitorType>
VisitRoots(RootVisitorType & visitor)107   void VisitRoots(RootVisitorType& visitor) NO_THREAD_SAFETY_ANALYSIS {
108     for (size_t i = 0; i < number_of_inline_caches_; ++i) {
109       InlineCache* cache = &cache_[i];
110       for (size_t j = 0; j < InlineCache::kIndividualCacheSize; ++j) {
111         visitor.VisitRootIfNonNull(cache->classes_[j].AddressWithoutBarrier());
112       }
113     }
114   }
115 
GetMethod()116   ArtMethod* GetMethod() const {
117     return method_;
118   }
119 
120   InlineCache* GetInlineCache(uint32_t dex_pc);
121 
IsMethodBeingCompiled(bool osr)122   bool IsMethodBeingCompiled(bool osr) const {
123     return osr
124         ? is_osr_method_being_compiled_
125         : is_method_being_compiled_;
126   }
127 
SetIsMethodBeingCompiled(bool value,bool osr)128   void SetIsMethodBeingCompiled(bool value, bool osr) {
129     if (osr) {
130       is_osr_method_being_compiled_ = value;
131     } else {
132       is_method_being_compiled_ = value;
133     }
134   }
135 
SetSavedEntryPoint(const void * entry_point)136   void SetSavedEntryPoint(const void* entry_point) {
137     saved_entry_point_ = entry_point;
138   }
139 
GetSavedEntryPoint()140   const void* GetSavedEntryPoint() const {
141     return saved_entry_point_;
142   }
143 
ClearGcRootsInInlineCaches()144   void ClearGcRootsInInlineCaches() {
145     for (size_t i = 0; i < number_of_inline_caches_; ++i) {
146       InlineCache* cache = &cache_[i];
147       memset(&cache->classes_[0],
148              0,
149              InlineCache::kIndividualCacheSize * sizeof(GcRoot<mirror::Class>));
150     }
151   }
152 
IncrementInlineUse()153   void IncrementInlineUse() {
154     DCHECK_NE(current_inline_uses_, std::numeric_limits<uint16_t>::max());
155     current_inline_uses_++;
156   }
157 
DecrementInlineUse()158   void DecrementInlineUse() {
159     DCHECK_GT(current_inline_uses_, 0);
160     current_inline_uses_--;
161   }
162 
IsInUseByCompiler()163   bool IsInUseByCompiler() const {
164     return IsMethodBeingCompiled(/*osr*/ true) || IsMethodBeingCompiled(/*osr*/ false) ||
165         (current_inline_uses_ > 0);
166   }
167 
168  private:
ProfilingInfo(ArtMethod * method,const std::vector<uint32_t> & entries)169   ProfilingInfo(ArtMethod* method, const std::vector<uint32_t>& entries)
170       : number_of_inline_caches_(entries.size()),
171         method_(method),
172         is_method_being_compiled_(false),
173         is_osr_method_being_compiled_(false),
174         current_inline_uses_(0),
175         saved_entry_point_(nullptr) {
176     memset(&cache_, 0, number_of_inline_caches_ * sizeof(InlineCache));
177     for (size_t i = 0; i < number_of_inline_caches_; ++i) {
178       cache_[i].dex_pc_ = entries[i];
179     }
180   }
181 
182   // Number of instructions we are profiling in the ArtMethod.
183   const uint32_t number_of_inline_caches_;
184 
185   // Method this profiling info is for.
186   ArtMethod* const method_;
187 
188   // Whether the ArtMethod is currently being compiled. This flag
189   // is implicitly guarded by the JIT code cache lock.
190   // TODO: Make the JIT code cache lock global.
191   bool is_method_being_compiled_;
192   bool is_osr_method_being_compiled_;
193 
194   // When the compiler inlines the method associated to this ProfilingInfo,
195   // it updates this counter so that the GC does not try to clear the inline caches.
196   uint16_t current_inline_uses_;
197 
198   // Entry point of the corresponding ArtMethod, while the JIT code cache
199   // is poking for the liveness of compiled code.
200   const void* saved_entry_point_;
201 
202   // Dynamically allocated array of size `number_of_inline_caches_`.
203   InlineCache cache_[0];
204 
205   friend class jit::JitCodeCache;
206 
207   DISALLOW_COPY_AND_ASSIGN(ProfilingInfo);
208 };
209 
210 }  // namespace art
211 
212 #endif  // ART_RUNTIME_JIT_PROFILING_INFO_H_
213