• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_JIT_JIT_CODE_CACHE_H_
18 #define ART_RUNTIME_JIT_JIT_CODE_CACHE_H_
19 
20 #include "instrumentation.h"
21 
22 #include "atomic.h"
23 #include "base/arena_containers.h"
24 #include "base/histogram-inl.h"
25 #include "base/macros.h"
26 #include "base/mutex.h"
27 #include "gc/accounting/bitmap.h"
28 #include "gc_root.h"
29 #include "jni.h"
30 #include "method_reference.h"
31 #include "oat_file.h"
32 #include "profile_compilation_info.h"
33 #include "safe_map.h"
34 #include "thread_pool.h"
35 
36 namespace art {
37 
38 class ArtMethod;
39 class LinearAlloc;
40 class InlineCache;
41 class IsMarkedVisitor;
42 class OatQuickMethodHeader;
43 class ProfilingInfo;
44 
45 namespace jit {
46 
47 class JitInstrumentationCache;
48 
49 // Alignment in bits that will suit all architectures.
50 static constexpr int kJitCodeAlignment = 16;
51 using CodeCacheBitmap = gc::accounting::MemoryRangeBitmap<kJitCodeAlignment>;
52 
53 class JitCodeCache {
54  public:
55   static constexpr size_t kMaxCapacity = 64 * MB;
56   // Put the default to a very low amount for debug builds to stress the code cache
57   // collection.
58   static constexpr size_t kInitialCapacity = kIsDebugBuild ? 8 * KB : 64 * KB;
59 
60   // By default, do not GC until reaching 256KB.
61   static constexpr size_t kReservedCapacity = kInitialCapacity * 4;
62 
63   // Create the code cache with a code + data capacity equal to "capacity", error message is passed
64   // in the out arg error_msg.
65   static JitCodeCache* Create(size_t initial_capacity,
66                               size_t max_capacity,
67                               bool generate_debug_info,
68                               std::string* error_msg);
69 
70   // Number of bytes allocated in the code cache.
71   size_t CodeCacheSize() REQUIRES(!lock_);
72 
73   // Number of bytes allocated in the data cache.
74   size_t DataCacheSize() REQUIRES(!lock_);
75 
76   bool NotifyCompilationOf(ArtMethod* method, Thread* self, bool osr)
77       REQUIRES_SHARED(Locks::mutator_lock_)
78       REQUIRES(!lock_);
79 
80   void NotifyMethodRedefined(ArtMethod* method)
81       REQUIRES(Locks::mutator_lock_)
82       REQUIRES(!lock_);
83 
84   // Notify to the code cache that the compiler wants to use the
85   // profiling info of `method` to drive optimizations,
86   // and therefore ensure the returned profiling info object is not
87   // collected.
88   ProfilingInfo* NotifyCompilerUse(ArtMethod* method, Thread* self)
89       REQUIRES_SHARED(Locks::mutator_lock_)
90       REQUIRES(!lock_);
91 
92   void DoneCompiling(ArtMethod* method, Thread* self, bool osr)
93       REQUIRES_SHARED(Locks::mutator_lock_)
94       REQUIRES(!lock_);
95 
96   void DoneCompilerUse(ArtMethod* method, Thread* self)
97       REQUIRES_SHARED(Locks::mutator_lock_)
98       REQUIRES(!lock_);
99 
100   // Allocate and write code and its metadata to the code cache.
101   // `cha_single_implementation_list` needs to be registered via CHA (if it's
102   // still valid), since the compiled code still needs to be invalidated if the
103   // single-implementation assumptions are violated later. This needs to be done
104   // even if `has_should_deoptimize_flag` is false, which can happen due to CHA
105   // guard elimination.
106   uint8_t* CommitCode(Thread* self,
107                       ArtMethod* method,
108                       uint8_t* stack_map,
109                       uint8_t* method_info,
110                       uint8_t* roots_data,
111                       size_t frame_size_in_bytes,
112                       size_t core_spill_mask,
113                       size_t fp_spill_mask,
114                       const uint8_t* code,
115                       size_t code_size,
116                       size_t data_size,
117                       bool osr,
118                       Handle<mirror::ObjectArray<mirror::Object>> roots,
119                       bool has_should_deoptimize_flag,
120                       const ArenaSet<ArtMethod*>& cha_single_implementation_list)
121       REQUIRES_SHARED(Locks::mutator_lock_)
122       REQUIRES(!lock_);
123 
124   // Return true if the code cache contains this pc.
125   bool ContainsPc(const void* pc) const;
126 
127   // Return true if the code cache contains this method.
128   bool ContainsMethod(ArtMethod* method) REQUIRES(!lock_);
129 
130   // Allocate a region of data that contain `size` bytes, and potentially space
131   // for storing `number_of_roots` roots. Returns null if there is no more room.
132   // Return the number of bytes allocated.
133   size_t ReserveData(Thread* self,
134                      size_t stack_map_size,
135                      size_t method_info_size,
136                      size_t number_of_roots,
137                      ArtMethod* method,
138                      uint8_t** stack_map_data,
139                      uint8_t** method_info_data,
140                      uint8_t** roots_data)
141       REQUIRES_SHARED(Locks::mutator_lock_)
142       REQUIRES(!lock_);
143 
144   // Clear data from the data portion of the code cache.
145   void ClearData(Thread* self, uint8_t* stack_map_data, uint8_t* roots_data)
146       REQUIRES_SHARED(Locks::mutator_lock_)
147       REQUIRES(!lock_);
148 
GetLiveBitmap()149   CodeCacheBitmap* GetLiveBitmap() const {
150     return live_bitmap_.get();
151   }
152 
153   // Return whether we should do a full collection given the current state of the cache.
154   bool ShouldDoFullCollection()
155       REQUIRES(lock_)
156       REQUIRES_SHARED(Locks::mutator_lock_);
157 
158   // Perform a collection on the code cache.
159   void GarbageCollectCache(Thread* self)
160       REQUIRES(!lock_)
161       REQUIRES_SHARED(Locks::mutator_lock_);
162 
163   // Given the 'pc', try to find the JIT compiled code associated with it.
164   // Return null if 'pc' is not in the code cache. 'method' is passed for
165   // sanity check.
166   OatQuickMethodHeader* LookupMethodHeader(uintptr_t pc, ArtMethod* method)
167       REQUIRES(!lock_)
168       REQUIRES_SHARED(Locks::mutator_lock_);
169 
170   OatQuickMethodHeader* LookupOsrMethodHeader(ArtMethod* method)
171       REQUIRES(!lock_)
172       REQUIRES_SHARED(Locks::mutator_lock_);
173 
174   // Removes method from the cache for testing purposes. The caller
175   // must ensure that all threads are suspended and the method should
176   // not be in any thread's stack.
177   bool RemoveMethod(ArtMethod* method, bool release_memory)
178       REQUIRES(!lock_)
179       REQUIRES(Locks::mutator_lock_);
180 
181   // Remove all methods in our cache that were allocated by 'alloc'.
182   void RemoveMethodsIn(Thread* self, const LinearAlloc& alloc)
183       REQUIRES(!lock_)
184       REQUIRES_SHARED(Locks::mutator_lock_);
185 
186   void CopyInlineCacheInto(const InlineCache& ic, Handle<mirror::ObjectArray<mirror::Class>> array)
187       REQUIRES(!lock_)
188       REQUIRES_SHARED(Locks::mutator_lock_);
189 
190   // Create a 'ProfileInfo' for 'method'. If 'retry_allocation' is true,
191   // will collect and retry if the first allocation is unsuccessful.
192   ProfilingInfo* AddProfilingInfo(Thread* self,
193                                   ArtMethod* method,
194                                   const std::vector<uint32_t>& entries,
195                                   bool retry_allocation)
196       REQUIRES(!lock_)
197       REQUIRES_SHARED(Locks::mutator_lock_);
198 
OwnsSpace(const void * mspace)199   bool OwnsSpace(const void* mspace) const NO_THREAD_SAFETY_ANALYSIS {
200     return mspace == code_mspace_ || mspace == data_mspace_;
201   }
202 
203   void* MoreCore(const void* mspace, intptr_t increment);
204 
205   // Adds to `methods` all profiled methods which are part of any of the given dex locations.
206   void GetProfiledMethods(const std::set<std::string>& dex_base_locations,
207                           std::vector<ProfileMethodInfo>& methods)
208       REQUIRES(!lock_)
209       REQUIRES_SHARED(Locks::mutator_lock_);
210 
211   uint64_t GetLastUpdateTimeNs() const;
212 
GetCurrentCapacity()213   size_t GetCurrentCapacity() REQUIRES(!lock_) {
214     MutexLock lock(Thread::Current(), lock_);
215     return current_capacity_;
216   }
217 
218   size_t GetMemorySizeOfCodePointer(const void* ptr) REQUIRES(!lock_);
219 
220   void InvalidateCompiledCodeFor(ArtMethod* method, const OatQuickMethodHeader* code)
221       REQUIRES(!lock_)
222       REQUIRES_SHARED(Locks::mutator_lock_);
223 
224   void Dump(std::ostream& os) REQUIRES(!lock_);
225 
226   bool IsOsrCompiled(ArtMethod* method) REQUIRES(!lock_);
227 
228   void SweepRootTables(IsMarkedVisitor* visitor)
229       REQUIRES(!lock_)
230       REQUIRES_SHARED(Locks::mutator_lock_);
231 
232   // The GC needs to disallow the reading of inline caches when it processes them,
233   // to avoid having a class being used while it is being deleted.
234   void AllowInlineCacheAccess() REQUIRES(!lock_);
235   void DisallowInlineCacheAccess() REQUIRES(!lock_);
236   void BroadcastForInlineCacheAccess() REQUIRES(!lock_);
237 
238   // Notify the code cache that the method at the pointer 'old_method' is being moved to the pointer
239   // 'new_method' since it is being made obsolete.
240   void MoveObsoleteMethod(ArtMethod* old_method, ArtMethod* new_method)
241       REQUIRES(!lock_) REQUIRES(Locks::mutator_lock_);
242 
243   // Dynamically change whether we want to garbage collect code. Should only be used
244   // by tests.
SetGarbageCollectCode(bool value)245   void SetGarbageCollectCode(bool value) {
246     garbage_collect_code_ = value;
247   }
248 
249  private:
250   // Take ownership of maps.
251   JitCodeCache(MemMap* code_map,
252                MemMap* data_map,
253                size_t initial_code_capacity,
254                size_t initial_data_capacity,
255                size_t max_capacity,
256                bool garbage_collect_code);
257 
258   // Internal version of 'CommitCode' that will not retry if the
259   // allocation fails. Return null if the allocation fails.
260   uint8_t* CommitCodeInternal(Thread* self,
261                               ArtMethod* method,
262                               uint8_t* stack_map,
263                               uint8_t* method_info,
264                               uint8_t* roots_data,
265                               size_t frame_size_in_bytes,
266                               size_t core_spill_mask,
267                               size_t fp_spill_mask,
268                               const uint8_t* code,
269                               size_t code_size,
270                               size_t data_size,
271                               bool osr,
272                               Handle<mirror::ObjectArray<mirror::Object>> roots,
273                               bool has_should_deoptimize_flag,
274                               const ArenaSet<ArtMethod*>& cha_single_implementation_list)
275       REQUIRES(!lock_)
276       REQUIRES_SHARED(Locks::mutator_lock_);
277 
278   ProfilingInfo* AddProfilingInfoInternal(Thread* self,
279                                           ArtMethod* method,
280                                           const std::vector<uint32_t>& entries)
281       REQUIRES(lock_)
282       REQUIRES_SHARED(Locks::mutator_lock_);
283 
284   // If a collection is in progress, wait for it to finish. Return
285   // whether the thread actually waited.
286   bool WaitForPotentialCollectionToComplete(Thread* self)
287       REQUIRES(lock_) REQUIRES(!Locks::mutator_lock_);
288 
289   // Remove CHA dependents and underlying allocations for entries in `method_headers`.
290   void FreeAllMethodHeaders(const std::unordered_set<OatQuickMethodHeader*>& method_headers)
291       REQUIRES(!lock_)
292       REQUIRES(!Locks::cha_lock_);
293 
294   // Free in the mspace allocations for `code_ptr`.
295   void FreeCode(const void* code_ptr) REQUIRES(lock_);
296 
297   // Number of bytes allocated in the code cache.
298   size_t CodeCacheSizeLocked() REQUIRES(lock_);
299 
300   // Number of bytes allocated in the data cache.
301   size_t DataCacheSizeLocked() REQUIRES(lock_);
302 
303   // Notify all waiting threads that a collection is done.
304   void NotifyCollectionDone(Thread* self) REQUIRES(lock_);
305 
306   // Try to increase the current capacity of the code cache. Return whether we
307   // succeeded at doing so.
308   bool IncreaseCodeCacheCapacity() REQUIRES(lock_);
309 
310   // Set the footprint limit of the code cache.
311   void SetFootprintLimit(size_t new_footprint) REQUIRES(lock_);
312 
313   void DoCollection(Thread* self, bool collect_profiling_info)
314       REQUIRES(!lock_)
315       REQUIRES_SHARED(Locks::mutator_lock_);
316 
317   void RemoveUnmarkedCode(Thread* self)
318       REQUIRES(!lock_)
319       REQUIRES_SHARED(Locks::mutator_lock_);
320 
321   void MarkCompiledCodeOnThreadStacks(Thread* self)
322       REQUIRES(!lock_)
323       REQUIRES_SHARED(Locks::mutator_lock_);
324 
325   bool CheckLiveCompiledCodeHasProfilingInfo()
326       REQUIRES(lock_);
327 
328   void FreeCode(uint8_t* code) REQUIRES(lock_);
329   uint8_t* AllocateCode(size_t code_size) REQUIRES(lock_);
330   void FreeData(uint8_t* data) REQUIRES(lock_);
331   uint8_t* AllocateData(size_t data_size) REQUIRES(lock_);
332 
333   bool IsWeakAccessEnabled(Thread* self) const;
334   void WaitUntilInlineCacheAccessible(Thread* self)
335       REQUIRES(!lock_)
336       REQUIRES_SHARED(Locks::mutator_lock_);
337 
338   // Lock for guarding allocations, collections, and the method_code_map_.
339   Mutex lock_;
340   // Condition to wait on during collection.
341   ConditionVariable lock_cond_ GUARDED_BY(lock_);
342   // Whether there is a code cache collection in progress.
343   bool collection_in_progress_ GUARDED_BY(lock_);
344   // Mem map which holds code.
345   std::unique_ptr<MemMap> code_map_;
346   // Mem map which holds data (stack maps and profiling info).
347   std::unique_ptr<MemMap> data_map_;
348   // The opaque mspace for allocating code.
349   void* code_mspace_ GUARDED_BY(lock_);
350   // The opaque mspace for allocating data.
351   void* data_mspace_ GUARDED_BY(lock_);
352   // Bitmap for collecting code and data.
353   std::unique_ptr<CodeCacheBitmap> live_bitmap_;
354   // Holds compiled code associated to the ArtMethod.
355   SafeMap<const void*, ArtMethod*> method_code_map_ GUARDED_BY(lock_);
356   // Holds osr compiled code associated to the ArtMethod.
357   SafeMap<ArtMethod*, const void*> osr_code_map_ GUARDED_BY(lock_);
358   // ProfilingInfo objects we have allocated.
359   std::vector<ProfilingInfo*> profiling_infos_ GUARDED_BY(lock_);
360 
361   // The maximum capacity in bytes this code cache can go to.
362   size_t max_capacity_ GUARDED_BY(lock_);
363 
364   // The current capacity in bytes of the code cache.
365   size_t current_capacity_ GUARDED_BY(lock_);
366 
367   // The current footprint in bytes of the code portion of the code cache.
368   size_t code_end_ GUARDED_BY(lock_);
369 
370   // The current footprint in bytes of the data portion of the code cache.
371   size_t data_end_ GUARDED_BY(lock_);
372 
373   // Whether the last collection round increased the code cache.
374   bool last_collection_increased_code_cache_ GUARDED_BY(lock_);
375 
376   // Last time the the code_cache was updated.
377   // It is atomic to avoid locking when reading it.
378   Atomic<uint64_t> last_update_time_ns_;
379 
380   // Whether we can do garbage collection. Not 'const' as tests may override this.
381   bool garbage_collect_code_;
382 
383   // The size in bytes of used memory for the data portion of the code cache.
384   size_t used_memory_for_data_ GUARDED_BY(lock_);
385 
386   // The size in bytes of used memory for the code portion of the code cache.
387   size_t used_memory_for_code_ GUARDED_BY(lock_);
388 
389   // Number of compilations done throughout the lifetime of the JIT.
390   size_t number_of_compilations_ GUARDED_BY(lock_);
391 
392   // Number of compilations for on-stack-replacement done throughout the lifetime of the JIT.
393   size_t number_of_osr_compilations_ GUARDED_BY(lock_);
394 
395   // Number of code cache collections done throughout the lifetime of the JIT.
396   size_t number_of_collections_ GUARDED_BY(lock_);
397 
398   // Histograms for keeping track of stack map size statistics.
399   Histogram<uint64_t> histogram_stack_map_memory_use_ GUARDED_BY(lock_);
400 
401   // Histograms for keeping track of code size statistics.
402   Histogram<uint64_t> histogram_code_memory_use_ GUARDED_BY(lock_);
403 
404   // Histograms for keeping track of profiling info statistics.
405   Histogram<uint64_t> histogram_profiling_info_memory_use_ GUARDED_BY(lock_);
406 
407   // Whether the GC allows accessing weaks in inline caches. Note that this
408   // is not used by the concurrent collector, which uses
409   // Thread::SetWeakRefAccessEnabled instead.
410   Atomic<bool> is_weak_access_enabled_;
411 
412   // Condition to wait on for accessing inline caches.
413   ConditionVariable inline_cache_cond_ GUARDED_BY(lock_);
414 
415   DISALLOW_IMPLICIT_CONSTRUCTORS(JitCodeCache);
416 };
417 
418 }  // namespace jit
419 }  // namespace art
420 
421 #endif  // ART_RUNTIME_JIT_JIT_CODE_CACHE_H_
422