• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_JIT_JIT_CODE_CACHE_H_
18 #define ART_RUNTIME_JIT_JIT_CODE_CACHE_H_
19 
20 #include "instrumentation.h"
21 
22 #include "base/arena_containers.h"
23 #include "base/atomic.h"
24 #include "base/histogram-inl.h"
25 #include "base/macros.h"
26 #include "base/mutex.h"
27 #include "base/safe_map.h"
28 #include "dex/method_reference.h"
29 #include "gc_root.h"
30 
31 namespace art {
32 
33 class ArtMethod;
34 template<class T> class Handle;
35 class LinearAlloc;
36 class InlineCache;
37 class IsMarkedVisitor;
38 class JitJniStubTestHelper;
39 class OatQuickMethodHeader;
40 struct ProfileMethodInfo;
41 class ProfilingInfo;
42 class Thread;
43 
44 namespace gc {
45 namespace accounting {
46 template<size_t kAlignment> class MemoryRangeBitmap;
47 }  // namespace accounting
48 }  // namespace gc
49 
50 namespace mirror {
51 class Class;
52 class Object;
53 template<class T> class ObjectArray;
54 }  // namespace mirror
55 
56 namespace gc {
57 namespace accounting {
58 template<size_t kAlignment> class MemoryRangeBitmap;
59 }  // namespace accounting
60 }  // namespace gc
61 
62 namespace mirror {
63 class Class;
64 class Object;
65 template<class T> class ObjectArray;
66 }  // namespace mirror
67 
68 namespace jit {
69 
70 class JitInstrumentationCache;
71 class ScopedCodeCacheWrite;
72 
73 // Alignment in bits that will suit all architectures.
74 static constexpr int kJitCodeAlignment = 16;
75 using CodeCacheBitmap = gc::accounting::MemoryRangeBitmap<kJitCodeAlignment>;
76 
77 class JitCodeCache {
78  public:
79   static constexpr size_t kMaxCapacity = 64 * MB;
80   // Put the default to a very low amount for debug builds to stress the code cache
81   // collection.
82   static constexpr size_t kInitialCapacity = kIsDebugBuild ? 8 * KB : 64 * KB;
83 
84   // By default, do not GC until reaching 256KB.
85   static constexpr size_t kReservedCapacity = kInitialCapacity * 4;
86 
87   // Create the code cache with a code + data capacity equal to "capacity", error message is passed
88   // in the out arg error_msg.
89   static JitCodeCache* Create(size_t initial_capacity,
90                               size_t max_capacity,
91                               bool generate_debug_info,
92                               bool used_only_for_profile_data,
93                               std::string* error_msg);
94   ~JitCodeCache();
95 
96   // Number of bytes allocated in the code cache.
97   size_t CodeCacheSize() REQUIRES(!lock_);
98 
99   // Number of bytes allocated in the data cache.
100   size_t DataCacheSize() REQUIRES(!lock_);
101 
102   bool NotifyCompilationOf(ArtMethod* method, Thread* self, bool osr)
103       REQUIRES_SHARED(Locks::mutator_lock_)
104       REQUIRES(!lock_);
105 
106   void NotifyMethodRedefined(ArtMethod* method)
107       REQUIRES(Locks::mutator_lock_)
108       REQUIRES(!lock_);
109 
110   // Notify to the code cache that the compiler wants to use the
111   // profiling info of `method` to drive optimizations,
112   // and therefore ensure the returned profiling info object is not
113   // collected.
114   ProfilingInfo* NotifyCompilerUse(ArtMethod* method, Thread* self)
115       REQUIRES_SHARED(Locks::mutator_lock_)
116       REQUIRES(!lock_);
117 
118   void DoneCompiling(ArtMethod* method, Thread* self, bool osr)
119       REQUIRES_SHARED(Locks::mutator_lock_)
120       REQUIRES(!lock_);
121 
122   void DoneCompilerUse(ArtMethod* method, Thread* self)
123       REQUIRES_SHARED(Locks::mutator_lock_)
124       REQUIRES(!lock_);
125 
126   // Allocate and write code and its metadata to the code cache.
127   // `cha_single_implementation_list` needs to be registered via CHA (if it's
128   // still valid), since the compiled code still needs to be invalidated if the
129   // single-implementation assumptions are violated later. This needs to be done
130   // even if `has_should_deoptimize_flag` is false, which can happen due to CHA
131   // guard elimination.
132   uint8_t* CommitCode(Thread* self,
133                       ArtMethod* method,
134                       uint8_t* stack_map,
135                       uint8_t* method_info,
136                       uint8_t* roots_data,
137                       size_t frame_size_in_bytes,
138                       size_t core_spill_mask,
139                       size_t fp_spill_mask,
140                       const uint8_t* code,
141                       size_t code_size,
142                       size_t data_size,
143                       bool osr,
144                       Handle<mirror::ObjectArray<mirror::Object>> roots,
145                       bool has_should_deoptimize_flag,
146                       const ArenaSet<ArtMethod*>& cha_single_implementation_list)
147       REQUIRES_SHARED(Locks::mutator_lock_)
148       REQUIRES(!lock_);
149 
150   // Return true if the code cache contains this pc.
151   bool ContainsPc(const void* pc) const;
152 
153   // Return true if the code cache contains this method.
154   bool ContainsMethod(ArtMethod* method) REQUIRES(!lock_);
155 
156   // Return the code pointer for a JNI-compiled stub if the method is in the cache, null otherwise.
157   const void* GetJniStubCode(ArtMethod* method) REQUIRES(!lock_);
158 
159   // Allocate a region of data that contain `size` bytes, and potentially space
160   // for storing `number_of_roots` roots. Returns null if there is no more room.
161   // Return the number of bytes allocated.
162   size_t ReserveData(Thread* self,
163                      size_t stack_map_size,
164                      size_t method_info_size,
165                      size_t number_of_roots,
166                      ArtMethod* method,
167                      uint8_t** stack_map_data,
168                      uint8_t** method_info_data,
169                      uint8_t** roots_data)
170       REQUIRES_SHARED(Locks::mutator_lock_)
171       REQUIRES(!lock_);
172 
173   // Clear data from the data portion of the code cache.
174   void ClearData(Thread* self, uint8_t* stack_map_data, uint8_t* roots_data)
175       REQUIRES_SHARED(Locks::mutator_lock_)
176       REQUIRES(!lock_);
177 
GetLiveBitmap()178   CodeCacheBitmap* GetLiveBitmap() const {
179     return live_bitmap_.get();
180   }
181 
182   // Perform a collection on the code cache.
183   void GarbageCollectCache(Thread* self)
184       REQUIRES(!lock_)
185       REQUIRES_SHARED(Locks::mutator_lock_);
186 
187   // Given the 'pc', try to find the JIT compiled code associated with it.
188   // Return null if 'pc' is not in the code cache. 'method' is passed for
189   // sanity check.
190   OatQuickMethodHeader* LookupMethodHeader(uintptr_t pc, ArtMethod* method)
191       REQUIRES(!lock_)
192       REQUIRES_SHARED(Locks::mutator_lock_);
193 
194   OatQuickMethodHeader* LookupOsrMethodHeader(ArtMethod* method)
195       REQUIRES(!lock_)
196       REQUIRES_SHARED(Locks::mutator_lock_);
197 
198   // Removes method from the cache for testing purposes. The caller
199   // must ensure that all threads are suspended and the method should
200   // not be in any thread's stack.
201   bool RemoveMethod(ArtMethod* method, bool release_memory)
202       REQUIRES(!lock_)
203       REQUIRES(Locks::mutator_lock_);
204 
205   // Remove all methods in our cache that were allocated by 'alloc'.
206   void RemoveMethodsIn(Thread* self, const LinearAlloc& alloc)
207       REQUIRES(!lock_)
208       REQUIRES_SHARED(Locks::mutator_lock_);
209 
210   void CopyInlineCacheInto(const InlineCache& ic, Handle<mirror::ObjectArray<mirror::Class>> array)
211       REQUIRES(!lock_)
212       REQUIRES_SHARED(Locks::mutator_lock_);
213 
214   // Create a 'ProfileInfo' for 'method'. If 'retry_allocation' is true,
215   // will collect and retry if the first allocation is unsuccessful.
216   ProfilingInfo* AddProfilingInfo(Thread* self,
217                                   ArtMethod* method,
218                                   const std::vector<uint32_t>& entries,
219                                   bool retry_allocation)
220       REQUIRES(!lock_)
221       REQUIRES_SHARED(Locks::mutator_lock_);
222 
OwnsSpace(const void * mspace)223   bool OwnsSpace(const void* mspace) const NO_THREAD_SAFETY_ANALYSIS {
224     return mspace == code_mspace_ || mspace == data_mspace_;
225   }
226 
227   void* MoreCore(const void* mspace, intptr_t increment);
228 
229   // Adds to `methods` all profiled methods which are part of any of the given dex locations.
230   void GetProfiledMethods(const std::set<std::string>& dex_base_locations,
231                           std::vector<ProfileMethodInfo>& methods)
232       REQUIRES(!lock_)
233       REQUIRES_SHARED(Locks::mutator_lock_);
234 
235   uint64_t GetLastUpdateTimeNs() const;
236 
237   size_t GetMemorySizeOfCodePointer(const void* ptr) REQUIRES(!lock_);
238 
239   void InvalidateCompiledCodeFor(ArtMethod* method, const OatQuickMethodHeader* code)
240       REQUIRES(!lock_)
241       REQUIRES_SHARED(Locks::mutator_lock_);
242 
243   void Dump(std::ostream& os) REQUIRES(!lock_);
244 
245   bool IsOsrCompiled(ArtMethod* method) REQUIRES(!lock_);
246 
247   void SweepRootTables(IsMarkedVisitor* visitor)
248       REQUIRES(!lock_)
249       REQUIRES_SHARED(Locks::mutator_lock_);
250 
251   // The GC needs to disallow the reading of inline caches when it processes them,
252   // to avoid having a class being used while it is being deleted.
253   void AllowInlineCacheAccess() REQUIRES(!lock_);
254   void DisallowInlineCacheAccess() REQUIRES(!lock_);
255   void BroadcastForInlineCacheAccess() REQUIRES(!lock_);
256 
257   // Notify the code cache that the method at the pointer 'old_method' is being moved to the pointer
258   // 'new_method' since it is being made obsolete.
259   void MoveObsoleteMethod(ArtMethod* old_method, ArtMethod* new_method)
260       REQUIRES(!lock_) REQUIRES(Locks::mutator_lock_);
261 
262   // Dynamically change whether we want to garbage collect code. Should only be used
263   // by tests.
SetGarbageCollectCode(bool value)264   void SetGarbageCollectCode(bool value) {
265     garbage_collect_code_ = value;
266   }
267 
268  private:
269   // Take ownership of maps.
270   JitCodeCache(MemMap* code_map,
271                MemMap* data_map,
272                size_t initial_code_capacity,
273                size_t initial_data_capacity,
274                size_t max_capacity,
275                bool garbage_collect_code,
276                int memmap_flags_prot_code);
277 
278   // Internal version of 'CommitCode' that will not retry if the
279   // allocation fails. Return null if the allocation fails.
280   uint8_t* CommitCodeInternal(Thread* self,
281                               ArtMethod* method,
282                               uint8_t* stack_map,
283                               uint8_t* method_info,
284                               uint8_t* roots_data,
285                               size_t frame_size_in_bytes,
286                               size_t core_spill_mask,
287                               size_t fp_spill_mask,
288                               const uint8_t* code,
289                               size_t code_size,
290                               size_t data_size,
291                               bool osr,
292                               Handle<mirror::ObjectArray<mirror::Object>> roots,
293                               bool has_should_deoptimize_flag,
294                               const ArenaSet<ArtMethod*>& cha_single_implementation_list)
295       REQUIRES(!lock_)
296       REQUIRES_SHARED(Locks::mutator_lock_);
297 
298   ProfilingInfo* AddProfilingInfoInternal(Thread* self,
299                                           ArtMethod* method,
300                                           const std::vector<uint32_t>& entries)
301       REQUIRES(lock_)
302       REQUIRES_SHARED(Locks::mutator_lock_);
303 
304   // If a collection is in progress, wait for it to finish. Return
305   // whether the thread actually waited.
306   bool WaitForPotentialCollectionToComplete(Thread* self)
307       REQUIRES(lock_) REQUIRES(!Locks::mutator_lock_);
308 
309   // Remove CHA dependents and underlying allocations for entries in `method_headers`.
310   void FreeAllMethodHeaders(const std::unordered_set<OatQuickMethodHeader*>& method_headers)
311       REQUIRES(!lock_)
312       REQUIRES(!Locks::cha_lock_);
313 
314   // Removes method from the cache. The caller must ensure that all threads
315   // are suspended and the method should not be in any thread's stack.
316   bool RemoveMethodLocked(ArtMethod* method, bool release_memory)
317       REQUIRES(lock_)
318       REQUIRES(Locks::mutator_lock_);
319 
320   // Free in the mspace allocations for `code_ptr`.
321   void FreeCode(const void* code_ptr) REQUIRES(lock_);
322 
323   // Number of bytes allocated in the code cache.
324   size_t CodeCacheSizeLocked() REQUIRES(lock_);
325 
326   // Number of bytes allocated in the data cache.
327   size_t DataCacheSizeLocked() REQUIRES(lock_);
328 
329   // Notify all waiting threads that a collection is done.
330   void NotifyCollectionDone(Thread* self) REQUIRES(lock_);
331 
332   // Try to increase the current capacity of the code cache. Return whether we
333   // succeeded at doing so.
334   bool IncreaseCodeCacheCapacity() REQUIRES(lock_);
335 
336   // Set the footprint limit of the code cache.
337   void SetFootprintLimit(size_t new_footprint) REQUIRES(lock_);
338 
339   // Return whether we should do a full collection given the current state of the cache.
340   bool ShouldDoFullCollection()
341       REQUIRES(lock_)
342       REQUIRES_SHARED(Locks::mutator_lock_);
343 
344   void DoCollection(Thread* self, bool collect_profiling_info)
345       REQUIRES(!lock_)
346       REQUIRES_SHARED(Locks::mutator_lock_);
347 
348   void RemoveUnmarkedCode(Thread* self)
349       REQUIRES(!lock_)
350       REQUIRES_SHARED(Locks::mutator_lock_);
351 
352   void MarkCompiledCodeOnThreadStacks(Thread* self)
353       REQUIRES(!lock_)
354       REQUIRES_SHARED(Locks::mutator_lock_);
355 
356   bool CheckLiveCompiledCodeHasProfilingInfo()
357       REQUIRES(lock_)
358       REQUIRES_SHARED(Locks::mutator_lock_);
359 
360   void FreeCode(uint8_t* code) REQUIRES(lock_);
361   uint8_t* AllocateCode(size_t code_size) REQUIRES(lock_);
362   void FreeData(uint8_t* data) REQUIRES(lock_);
363   uint8_t* AllocateData(size_t data_size) REQUIRES(lock_);
364 
365   bool IsWeakAccessEnabled(Thread* self) const;
366   void WaitUntilInlineCacheAccessible(Thread* self)
367       REQUIRES(!lock_)
368       REQUIRES_SHARED(Locks::mutator_lock_);
369 
370   class JniStubKey;
371   class JniStubData;
372 
373   // Lock for guarding allocations, collections, and the method_code_map_.
374   Mutex lock_;
375   // Condition to wait on during collection.
376   ConditionVariable lock_cond_ GUARDED_BY(lock_);
377   // Whether there is a code cache collection in progress.
378   bool collection_in_progress_ GUARDED_BY(lock_);
379   // Mem map which holds code.
380   std::unique_ptr<MemMap> code_map_;
381   // Mem map which holds data (stack maps and profiling info).
382   std::unique_ptr<MemMap> data_map_;
383   // The opaque mspace for allocating code.
384   void* code_mspace_ GUARDED_BY(lock_);
385   // The opaque mspace for allocating data.
386   void* data_mspace_ GUARDED_BY(lock_);
387   // Bitmap for collecting code and data.
388   std::unique_ptr<CodeCacheBitmap> live_bitmap_;
389   // Holds compiled code associated with the shorty for a JNI stub.
390   SafeMap<JniStubKey, JniStubData> jni_stubs_map_ GUARDED_BY(lock_);
391   // Holds compiled code associated to the ArtMethod.
392   SafeMap<const void*, ArtMethod*> method_code_map_ GUARDED_BY(lock_);
393   // Holds osr compiled code associated to the ArtMethod.
394   SafeMap<ArtMethod*, const void*> osr_code_map_ GUARDED_BY(lock_);
395   // ProfilingInfo objects we have allocated.
396   std::vector<ProfilingInfo*> profiling_infos_ GUARDED_BY(lock_);
397 
398   // The maximum capacity in bytes this code cache can go to.
399   size_t max_capacity_ GUARDED_BY(lock_);
400 
401   // The current capacity in bytes of the code cache.
402   size_t current_capacity_ GUARDED_BY(lock_);
403 
404   // The current footprint in bytes of the code portion of the code cache.
405   size_t code_end_ GUARDED_BY(lock_);
406 
407   // The current footprint in bytes of the data portion of the code cache.
408   size_t data_end_ GUARDED_BY(lock_);
409 
410   // Whether the last collection round increased the code cache.
411   bool last_collection_increased_code_cache_ GUARDED_BY(lock_);
412 
413   // Last time the the code_cache was updated.
414   // It is atomic to avoid locking when reading it.
415   Atomic<uint64_t> last_update_time_ns_;
416 
417   // Whether we can do garbage collection. Not 'const' as tests may override this.
418   bool garbage_collect_code_;
419 
420   // The size in bytes of used memory for the data portion of the code cache.
421   size_t used_memory_for_data_ GUARDED_BY(lock_);
422 
423   // The size in bytes of used memory for the code portion of the code cache.
424   size_t used_memory_for_code_ GUARDED_BY(lock_);
425 
426   // Number of compilations done throughout the lifetime of the JIT.
427   size_t number_of_compilations_ GUARDED_BY(lock_);
428 
429   // Number of compilations for on-stack-replacement done throughout the lifetime of the JIT.
430   size_t number_of_osr_compilations_ GUARDED_BY(lock_);
431 
432   // Number of code cache collections done throughout the lifetime of the JIT.
433   size_t number_of_collections_ GUARDED_BY(lock_);
434 
435   // Histograms for keeping track of stack map size statistics.
436   Histogram<uint64_t> histogram_stack_map_memory_use_ GUARDED_BY(lock_);
437 
438   // Histograms for keeping track of code size statistics.
439   Histogram<uint64_t> histogram_code_memory_use_ GUARDED_BY(lock_);
440 
441   // Histograms for keeping track of profiling info statistics.
442   Histogram<uint64_t> histogram_profiling_info_memory_use_ GUARDED_BY(lock_);
443 
444   // Whether the GC allows accessing weaks in inline caches. Note that this
445   // is not used by the concurrent collector, which uses
446   // Thread::SetWeakRefAccessEnabled instead.
447   Atomic<bool> is_weak_access_enabled_;
448 
449   // Condition to wait on for accessing inline caches.
450   ConditionVariable inline_cache_cond_ GUARDED_BY(lock_);
451 
452   // Mapping flags for the code section.
453   const int memmap_flags_prot_code_;
454 
455   friend class art::JitJniStubTestHelper;
456   friend class ScopedCodeCacheWrite;
457 
458   DISALLOW_IMPLICIT_CONSTRUCTORS(JitCodeCache);
459 };
460 
461 }  // namespace jit
462 }  // namespace art
463 
464 #endif  // ART_RUNTIME_JIT_JIT_CODE_CACHE_H_
465