• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_JIT_JIT_CODE_CACHE_H_
18 #define ART_RUNTIME_JIT_JIT_CODE_CACHE_H_
19 
20 #include <iosfwd>
21 #include <memory>
22 #include <set>
23 #include <string>
24 #include <unordered_set>
25 #include <vector>
26 
27 #include "base/arena_containers.h"
28 #include "base/array_ref.h"
29 #include "base/atomic.h"
30 #include "base/histogram.h"
31 #include "base/macros.h"
32 #include "base/mem_map.h"
33 #include "base/mutex.h"
34 #include "base/safe_map.h"
35 #include "compilation_kind.h"
36 #include "jit_memory_region.h"
37 #include "profiling_info.h"
38 
39 namespace art {
40 
41 class ArtMethod;
42 template<class T> class Handle;
43 class LinearAlloc;
44 class InlineCache;
45 class IsMarkedVisitor;
46 class JitJniStubTestHelper;
47 class OatQuickMethodHeader;
48 struct ProfileMethodInfo;
49 class ProfilingInfo;
50 class Thread;
51 
52 namespace gc {
53 namespace accounting {
54 template<size_t kAlignment> class MemoryRangeBitmap;
55 }  // namespace accounting
56 }  // namespace gc
57 
58 namespace mirror {
59 class Class;
60 class Object;
61 template<class T> class ObjectArray;
62 }  // namespace mirror
63 
64 namespace gc {
65 namespace accounting {
66 template<size_t kAlignment> class MemoryRangeBitmap;
67 }  // namespace accounting
68 }  // namespace gc
69 
70 namespace mirror {
71 class Class;
72 class Object;
73 template<class T> class ObjectArray;
74 }  // namespace mirror
75 
76 namespace jit {
77 
78 class MarkCodeClosure;
79 
80 // Type of bitmap used for tracking live functions in the JIT code cache for the purposes
81 // of garbage collecting code.
82 using CodeCacheBitmap = gc::accounting::MemoryRangeBitmap<kJitCodeAccountingBytes>;
83 
84 // The state of profile-based compilation in the zygote.
85 // - kInProgress:      JIT compilation is happening
86 // - kDone:            JIT compilation is finished, and the zygote is preparing notifying
87 //                     the other processes.
88 // - kNotifiedOk:      the zygote has notified the other processes, which can start
89 //                     sharing the boot image method mappings.
90 // - kNotifiedFailure: the zygote has notified the other processes, but they
91 //                     cannot share the boot image method mappings due to
92 //                     unexpected errors
93 enum class ZygoteCompilationState : uint8_t {
94   kInProgress = 0,
95   kDone = 1,
96   kNotifiedOk = 2,
97   kNotifiedFailure = 3,
98 };
99 
100 // Class abstraction over a map of ArtMethod -> compiled code, where the
101 // ArtMethod are compiled by the zygote, and the map acts as a communication
102 // channel between the zygote and the other processes.
103 // For the zygote process, this map is the only map it is placing the compiled
104 // code. JitCodeCache.method_code_map_ is empty.
105 //
106 // This map is writable only by the zygote, and readable by all children.
107 class ZygoteMap {
108  public:
109   struct Entry {
110     ArtMethod* method;
111     // Note we currently only allocate code in the low 4g, so we could just reserve 4 bytes
112     // for the code pointer. For simplicity and in the case we move to 64bit
113     // addresses for code, just keep it void* for now.
114     const void* code_ptr;
115   };
116 
ZygoteMap(JitMemoryRegion * region)117   explicit ZygoteMap(JitMemoryRegion* region)
118       : map_(), region_(region), compilation_state_(nullptr) {}
119 
120   // Initialize the data structure so it can hold `number_of_methods` mappings.
121   // Note that the map is fixed size and never grows.
122   void Initialize(uint32_t number_of_methods) REQUIRES(!Locks::jit_lock_);
123 
124   // Add the mapping method -> code.
125   void Put(const void* code, ArtMethod* method) REQUIRES(Locks::jit_lock_);
126 
127   // Return the code pointer for the given method. If pc is not zero, check that
128   // the pc falls into that code range. Return null otherwise.
129   const void* GetCodeFor(ArtMethod* method, uintptr_t pc = 0) const;
130 
131   // Return whether the map has associated code for the given method.
ContainsMethod(ArtMethod * method)132   bool ContainsMethod(ArtMethod* method) const {
133     return GetCodeFor(method) != nullptr;
134   }
135 
SetCompilationState(ZygoteCompilationState state)136   void SetCompilationState(ZygoteCompilationState state) {
137     region_->WriteData(compilation_state_, state);
138   }
139 
IsCompilationDoneButNotNotified()140   bool IsCompilationDoneButNotNotified() const {
141     return compilation_state_ != nullptr && *compilation_state_ == ZygoteCompilationState::kDone;
142   }
143 
IsCompilationNotified()144   bool IsCompilationNotified() const {
145     return compilation_state_ != nullptr && *compilation_state_ > ZygoteCompilationState::kDone;
146   }
147 
CanMapBootImageMethods()148   bool CanMapBootImageMethods() const {
149     return compilation_state_ != nullptr &&
150            *compilation_state_ == ZygoteCompilationState::kNotifiedOk;
151   }
152 
cbegin()153   ArrayRef<const Entry>::const_iterator cbegin() const {
154     return map_.cbegin();
155   }
begin()156   ArrayRef<const Entry>::iterator begin() {
157     return map_.begin();
158   }
cend()159   ArrayRef<const Entry>::const_iterator cend() const {
160     return map_.cend();
161   }
end()162   ArrayRef<const Entry>::iterator end() {
163     return map_.end();
164   }
165 
166  private:
167   // The map allocated with `region_`.
168   ArrayRef<const Entry> map_;
169 
170   // The region in which the map is allocated.
171   JitMemoryRegion* const region_;
172 
173   // The current state of compilation in the zygote. Starts with kInProgress,
174   // and should end with kNotifiedOk or kNotifiedFailure.
175   const ZygoteCompilationState* compilation_state_;
176 
177   DISALLOW_COPY_AND_ASSIGN(ZygoteMap);
178 };
179 
180 class JitCodeCache {
181  public:
182   static constexpr size_t kMaxCapacity = 64 * MB;
183   // Put the default to a very low amount for debug builds to stress the code cache
184   // collection.
185   static constexpr size_t kInitialCapacity = kIsDebugBuild ? 8 * KB : 64 * KB;
186 
187   // By default, do not GC until reaching 256KB.
188   static constexpr size_t kReservedCapacity = kInitialCapacity * 4;
189 
190   // Create the code cache with a code + data capacity equal to "capacity", error message is passed
191   // in the out arg error_msg.
192   static JitCodeCache* Create(bool used_only_for_profile_data,
193                               bool rwx_memory_allowed,
194                               bool is_zygote,
195                               std::string* error_msg);
196   ~JitCodeCache();
197 
198   bool NotifyCompilationOf(ArtMethod* method,
199                            Thread* self,
200                            CompilationKind compilation_kind,
201                            bool prejit)
202       REQUIRES_SHARED(Locks::mutator_lock_)
203       REQUIRES(!Locks::jit_lock_);
204 
205   void NotifyMethodRedefined(ArtMethod* method)
206       REQUIRES(Locks::mutator_lock_)
207       REQUIRES(!Locks::jit_lock_);
208 
209   // Notify to the code cache that the compiler wants to use the
210   // profiling info of `method` to drive optimizations,
211   // and therefore ensure the returned profiling info object is not
212   // collected.
213   ProfilingInfo* NotifyCompilerUse(ArtMethod* method, Thread* self)
214       REQUIRES_SHARED(Locks::mutator_lock_)
215       REQUIRES(!Locks::jit_lock_);
216 
217   void DoneCompiling(ArtMethod* method, Thread* self, CompilationKind compilation_kind)
218       REQUIRES_SHARED(Locks::mutator_lock_)
219       REQUIRES(!Locks::jit_lock_);
220 
221   void DoneCompilerUse(ArtMethod* method, Thread* self)
222       REQUIRES_SHARED(Locks::mutator_lock_)
223       REQUIRES(!Locks::jit_lock_);
224 
225   // Return true if the code cache contains this pc.
226   bool ContainsPc(const void* pc) const;
227 
228   // Return true if the code cache contains this pc in the private region (i.e. not from zygote).
229   bool PrivateRegionContainsPc(const void* pc) const;
230 
231   // Return true if the code cache contains this method.
232   bool ContainsMethod(ArtMethod* method) REQUIRES(!Locks::jit_lock_);
233 
234   // Return the code pointer for a JNI-compiled stub if the method is in the cache, null otherwise.
235   const void* GetJniStubCode(ArtMethod* method) REQUIRES(!Locks::jit_lock_);
236 
237   // Allocate a region for both code and data in the JIT code cache.
238   // The reserved memory is left completely uninitialized.
239   bool Reserve(Thread* self,
240                JitMemoryRegion* region,
241                size_t code_size,
242                size_t stack_map_size,
243                size_t number_of_roots,
244                ArtMethod* method,
245                /*out*/ArrayRef<const uint8_t>* reserved_code,
246                /*out*/ArrayRef<const uint8_t>* reserved_data)
247       REQUIRES_SHARED(Locks::mutator_lock_)
248       REQUIRES(!Locks::jit_lock_);
249 
250   // Initialize code and data of previously allocated memory.
251   //
252   // `cha_single_implementation_list` needs to be registered via CHA (if it's
253   // still valid), since the compiled code still needs to be invalidated if the
254   // single-implementation assumptions are violated later. This needs to be done
255   // even if `has_should_deoptimize_flag` is false, which can happen due to CHA
256   // guard elimination.
257   bool Commit(Thread* self,
258               JitMemoryRegion* region,
259               ArtMethod* method,
260               ArrayRef<const uint8_t> reserved_code,  // Uninitialized destination.
261               ArrayRef<const uint8_t> code,           // Compiler output (source).
262               ArrayRef<const uint8_t> reserved_data,  // Uninitialized destination.
263               const std::vector<Handle<mirror::Object>>& roots,
264               ArrayRef<const uint8_t> stack_map,      // Compiler output (source).
265               const std::vector<uint8_t>& debug_info,
266               bool is_full_debug_info,
267               CompilationKind compilation_kind,
268               bool has_should_deoptimize_flag,
269               const ArenaSet<ArtMethod*>& cha_single_implementation_list)
270       REQUIRES_SHARED(Locks::mutator_lock_)
271       REQUIRES(!Locks::jit_lock_);
272 
273   // Free the previously allocated memory regions.
274   void Free(Thread* self, JitMemoryRegion* region, const uint8_t* code, const uint8_t* data)
275       REQUIRES_SHARED(Locks::mutator_lock_)
276       REQUIRES(!Locks::jit_lock_);
277   void FreeLocked(JitMemoryRegion* region, const uint8_t* code, const uint8_t* data)
278       REQUIRES_SHARED(Locks::mutator_lock_)
279       REQUIRES(Locks::jit_lock_);
280 
281   // Perform a collection on the code cache.
282   void GarbageCollectCache(Thread* self)
283       REQUIRES(!Locks::jit_lock_)
284       REQUIRES_SHARED(Locks::mutator_lock_);
285 
286   // Given the 'pc', try to find the JIT compiled code associated with it.  'method' may be null
287   // when LookupMethodHeader is called from MarkCodeClosure::Run() in debug builds.  Return null
288   // if 'pc' is not in the code cache.
289   OatQuickMethodHeader* LookupMethodHeader(uintptr_t pc, ArtMethod* method)
290       REQUIRES(!Locks::jit_lock_)
291       REQUIRES_SHARED(Locks::mutator_lock_);
292 
293   OatQuickMethodHeader* LookupOsrMethodHeader(ArtMethod* method)
294       REQUIRES(!Locks::jit_lock_)
295       REQUIRES_SHARED(Locks::mutator_lock_);
296 
297   // Removes method from the cache for testing purposes. The caller
298   // must ensure that all threads are suspended and the method should
299   // not be in any thread's stack.
300   bool RemoveMethod(ArtMethod* method, bool release_memory)
301       REQUIRES(!Locks::jit_lock_)
302       REQUIRES(Locks::mutator_lock_);
303 
304   // Remove all methods in our cache that were allocated by 'alloc'.
305   void RemoveMethodsIn(Thread* self, const LinearAlloc& alloc)
306       REQUIRES(!Locks::jit_lock_)
307       REQUIRES_SHARED(Locks::mutator_lock_);
308 
309   void CopyInlineCacheInto(const InlineCache& ic,
310                            /*out*/StackHandleScope<InlineCache::kIndividualCacheSize>* classes)
311       REQUIRES(!Locks::jit_lock_)
312       REQUIRES_SHARED(Locks::mutator_lock_);
313 
314   // Create a 'ProfileInfo' for 'method'.
315   ProfilingInfo* AddProfilingInfo(Thread* self,
316                                   ArtMethod* method,
317                                   const std::vector<uint32_t>& entries)
318       REQUIRES(!Locks::jit_lock_)
319       REQUIRES_SHARED(Locks::mutator_lock_);
320 
OwnsSpace(const void * mspace)321   bool OwnsSpace(const void* mspace) const NO_THREAD_SAFETY_ANALYSIS {
322     return private_region_.OwnsSpace(mspace) || shared_region_.OwnsSpace(mspace);
323   }
324 
325   void* MoreCore(const void* mspace, intptr_t increment);
326 
327   // Adds to `methods` all profiled methods which are part of any of the given dex locations.
328   void GetProfiledMethods(const std::set<std::string>& dex_base_locations,
329                           std::vector<ProfileMethodInfo>& methods)
330       REQUIRES(!Locks::jit_lock_)
331       REQUIRES_SHARED(Locks::mutator_lock_);
332 
333   void InvalidateAllCompiledCode()
334       REQUIRES(!Locks::jit_lock_)
335       REQUIRES_SHARED(Locks::mutator_lock_);
336 
337   void InvalidateCompiledCodeFor(ArtMethod* method, const OatQuickMethodHeader* code)
338       REQUIRES(!Locks::jit_lock_)
339       REQUIRES_SHARED(Locks::mutator_lock_);
340 
341   void Dump(std::ostream& os) REQUIRES(!Locks::jit_lock_);
342 
343   bool IsOsrCompiled(ArtMethod* method) REQUIRES(!Locks::jit_lock_);
344 
345   void SweepRootTables(IsMarkedVisitor* visitor)
346       REQUIRES(!Locks::jit_lock_)
347       REQUIRES_SHARED(Locks::mutator_lock_);
348 
349   // The GC needs to disallow the reading of inline caches when it processes them,
350   // to avoid having a class being used while it is being deleted.
351   void AllowInlineCacheAccess() REQUIRES(!Locks::jit_lock_);
352   void DisallowInlineCacheAccess() REQUIRES(!Locks::jit_lock_);
353   void BroadcastForInlineCacheAccess() REQUIRES(!Locks::jit_lock_);
354 
355   // Notify the code cache that the method at the pointer 'old_method' is being moved to the pointer
356   // 'new_method' since it is being made obsolete.
357   void MoveObsoleteMethod(ArtMethod* old_method, ArtMethod* new_method)
358       REQUIRES(!Locks::jit_lock_) REQUIRES(Locks::mutator_lock_);
359 
360   // Dynamically change whether we want to garbage collect code.
361   void SetGarbageCollectCode(bool value) REQUIRES(!Locks::jit_lock_);
362 
363   bool GetGarbageCollectCode() REQUIRES(!Locks::jit_lock_);
364 
365   // Unsafe variant for debug checks.
GetGarbageCollectCodeUnsafe()366   bool GetGarbageCollectCodeUnsafe() const NO_THREAD_SAFETY_ANALYSIS {
367     return garbage_collect_code_;
368   }
GetZygoteMap()369   ZygoteMap* GetZygoteMap() {
370     return &zygote_map_;
371   }
372 
373   // Fetch the code of a method that was JITted, but the JIT could not
374   // update its entrypoint due to the resolution trampoline.
375   const void* GetSavedEntryPointOfPreCompiledMethod(ArtMethod* method)
376       REQUIRES(!Locks::jit_lock_)
377       REQUIRES_SHARED(Locks::mutator_lock_);
378 
379   void PostForkChildAction(bool is_system_server, bool is_zygote);
380 
381   // Clear the entrypoints of JIT compiled methods that belong in the zygote space.
382   // This is used for removing non-debuggable JIT code at the point we realize the runtime
383   // is debuggable. Also clear the Precompiled flag from all methods so the non-debuggable code
384   // doesn't come back.
385   void TransitionToDebuggable() REQUIRES(!Locks::jit_lock_) REQUIRES(Locks::mutator_lock_);
386 
387   JitMemoryRegion* GetCurrentRegion();
IsSharedRegion(const JitMemoryRegion & region)388   bool IsSharedRegion(const JitMemoryRegion& region) const { return &region == &shared_region_; }
CanAllocateProfilingInfo()389   bool CanAllocateProfilingInfo() {
390     // If we don't have a private region, we cannot allocate a profiling info.
391     // A shared region doesn't support in general GC objects, which a profiling info
392     // can reference.
393     JitMemoryRegion* region = GetCurrentRegion();
394     return region->IsValid() && !IsSharedRegion(*region);
395   }
396 
397   // Return whether the given `ptr` is in the zygote executable memory space.
IsInZygoteExecSpace(const void * ptr)398   bool IsInZygoteExecSpace(const void* ptr) const {
399     return shared_region_.IsInExecSpace(ptr);
400   }
401 
402  private:
403   JitCodeCache();
404 
405   ProfilingInfo* AddProfilingInfoInternal(Thread* self,
406                                           ArtMethod* method,
407                                           const std::vector<uint32_t>& entries)
408       REQUIRES(Locks::jit_lock_)
409       REQUIRES_SHARED(Locks::mutator_lock_);
410 
411   // If a collection is in progress, wait for it to finish. Must be called with the mutator lock.
412   // The non-mutator lock version should be used if possible. This method will release then
413   // re-acquire the mutator lock.
414   void WaitForPotentialCollectionToCompleteRunnable(Thread* self)
415       REQUIRES(Locks::jit_lock_, !Roles::uninterruptible_) REQUIRES_SHARED(Locks::mutator_lock_);
416 
417   // If a collection is in progress, wait for it to finish. Return
418   // whether the thread actually waited.
419   bool WaitForPotentialCollectionToComplete(Thread* self)
420       REQUIRES(Locks::jit_lock_) REQUIRES(!Locks::mutator_lock_);
421 
422   // Remove CHA dependents and underlying allocations for entries in `method_headers`.
423   void FreeAllMethodHeaders(const std::unordered_set<OatQuickMethodHeader*>& method_headers)
424       REQUIRES_SHARED(Locks::mutator_lock_)
425       REQUIRES(Locks::jit_lock_)
426       REQUIRES(!Locks::cha_lock_);
427 
428   // Removes method from the cache. The caller must ensure that all threads
429   // are suspended and the method should not be in any thread's stack.
430   bool RemoveMethodLocked(ArtMethod* method, bool release_memory)
431       REQUIRES(Locks::jit_lock_)
432       REQUIRES(Locks::mutator_lock_);
433 
434   // Call given callback for every compiled method in the code cache.
435   void VisitAllMethods(const std::function<void(const void*, ArtMethod*)>& cb)
436       REQUIRES(Locks::jit_lock_);
437 
438   // Free code and data allocations for `code_ptr`.
439   void FreeCodeAndData(const void* code_ptr)
440       REQUIRES(Locks::jit_lock_)
441       REQUIRES_SHARED(Locks::mutator_lock_);
442 
443   // Number of bytes allocated in the code cache.
444   size_t CodeCacheSize() REQUIRES(!Locks::jit_lock_);
445 
446   // Number of bytes allocated in the data cache.
447   size_t DataCacheSize() REQUIRES(!Locks::jit_lock_);
448 
449   // Number of bytes allocated in the code cache.
450   size_t CodeCacheSizeLocked() REQUIRES(Locks::jit_lock_);
451 
452   // Number of bytes allocated in the data cache.
453   size_t DataCacheSizeLocked() REQUIRES(Locks::jit_lock_);
454 
455   // Notify all waiting threads that a collection is done.
456   void NotifyCollectionDone(Thread* self) REQUIRES(Locks::jit_lock_);
457 
458   // Return whether the code cache's capacity is at its maximum.
459   bool IsAtMaxCapacity() const REQUIRES(Locks::jit_lock_);
460 
461   // Return whether we should do a full collection given the current state of the cache.
462   bool ShouldDoFullCollection()
463       REQUIRES(Locks::jit_lock_)
464       REQUIRES_SHARED(Locks::mutator_lock_);
465 
466   void DoCollection(Thread* self, bool collect_profiling_info)
467       REQUIRES(!Locks::jit_lock_)
468       REQUIRES_SHARED(Locks::mutator_lock_);
469 
470   void RemoveUnmarkedCode(Thread* self)
471       REQUIRES(!Locks::jit_lock_)
472       REQUIRES_SHARED(Locks::mutator_lock_);
473 
474   void MarkCompiledCodeOnThreadStacks(Thread* self)
475       REQUIRES(!Locks::jit_lock_)
476       REQUIRES_SHARED(Locks::mutator_lock_);
477 
GetLiveBitmap()478   CodeCacheBitmap* GetLiveBitmap() const {
479     return live_bitmap_.get();
480   }
481 
IsInZygoteDataSpace(const void * ptr)482   bool IsInZygoteDataSpace(const void* ptr) const {
483     return shared_region_.IsInDataSpace(ptr);
484   }
485 
486   bool IsWeakAccessEnabled(Thread* self) const;
487   void WaitUntilInlineCacheAccessible(Thread* self)
488       REQUIRES(!Locks::jit_lock_)
489       REQUIRES_SHARED(Locks::mutator_lock_);
490 
491   // Record that `method` is being compiled with the given mode.
492   void AddMethodBeingCompiled(ArtMethod* method, CompilationKind compilation_kind)
493       REQUIRES(Locks::jit_lock_);
494 
495   // Remove `method` from the list of methods meing compiled with the given mode.
496   void RemoveMethodBeingCompiled(ArtMethod* method, CompilationKind compilation_kind)
497       REQUIRES(Locks::jit_lock_);
498 
499   // Return whether `method` is being compiled with the given mode.
500   bool IsMethodBeingCompiled(ArtMethod* method, CompilationKind compilation_kind)
501       REQUIRES(Locks::jit_lock_);
502 
503   // Return whether `method` is being compiled in any mode.
504   bool IsMethodBeingCompiled(ArtMethod* method) REQUIRES(Locks::jit_lock_);
505 
506   class JniStubKey;
507   class JniStubData;
508 
509   // Whether the GC allows accessing weaks in inline caches. Note that this
510   // is not used by the concurrent collector, which uses
511   // Thread::SetWeakRefAccessEnabled instead.
512   Atomic<bool> is_weak_access_enabled_;
513 
514   // Condition to wait on for accessing inline caches.
515   ConditionVariable inline_cache_cond_ GUARDED_BY(Locks::jit_lock_);
516 
517   // -------------- JIT memory regions ------------------------------------- //
518 
519   // Shared region, inherited from the zygote.
520   JitMemoryRegion shared_region_;
521 
522   // Process's own region.
523   JitMemoryRegion private_region_;
524 
525   // -------------- Global JIT maps --------------------------------------- //
526 
527   // Holds compiled code associated with the shorty for a JNI stub.
528   SafeMap<JniStubKey, JniStubData> jni_stubs_map_ GUARDED_BY(Locks::jit_lock_);
529 
530   // Holds compiled code associated to the ArtMethod.
531   SafeMap<const void*, ArtMethod*> method_code_map_ GUARDED_BY(Locks::jit_lock_);
532 
533   // Holds compiled code associated to the ArtMethod. Used when pre-jitting
534   // methods whose entrypoints have the resolution stub.
535   SafeMap<ArtMethod*, const void*> saved_compiled_methods_map_ GUARDED_BY(Locks::jit_lock_);
536 
537   // Holds osr compiled code associated to the ArtMethod.
538   SafeMap<ArtMethod*, const void*> osr_code_map_ GUARDED_BY(Locks::jit_lock_);
539 
540   // ProfilingInfo objects we have allocated.
541   SafeMap<ArtMethod*, ProfilingInfo*> profiling_infos_ GUARDED_BY(Locks::jit_lock_);
542 
543   // Methods we are currently compiling, one set for each kind of compilation.
544   std::set<ArtMethod*> current_optimized_compilations_ GUARDED_BY(Locks::jit_lock_);
545   std::set<ArtMethod*> current_osr_compilations_ GUARDED_BY(Locks::jit_lock_);
546   std::set<ArtMethod*> current_baseline_compilations_ GUARDED_BY(Locks::jit_lock_);
547 
548   // Methods that the zygote has compiled and can be shared across processes
549   // forked from the zygote.
550   ZygoteMap zygote_map_;
551 
552   // -------------- JIT GC related data structures ----------------------- //
553 
554   // Condition to wait on during collection.
555   ConditionVariable lock_cond_ GUARDED_BY(Locks::jit_lock_);
556 
557   // Whether there is a code cache collection in progress.
558   bool collection_in_progress_ GUARDED_BY(Locks::jit_lock_);
559 
560   // Bitmap for collecting code and data.
561   std::unique_ptr<CodeCacheBitmap> live_bitmap_;
562 
563   // Whether the last collection round increased the code cache.
564   bool last_collection_increased_code_cache_ GUARDED_BY(Locks::jit_lock_);
565 
566   // Whether we can do garbage collection. Not 'const' as tests may override this.
567   bool garbage_collect_code_ GUARDED_BY(Locks::jit_lock_);
568 
569   // ---------------- JIT statistics -------------------------------------- //
570 
571   // Number of baseline compilations done throughout the lifetime of the JIT.
572   size_t number_of_baseline_compilations_ GUARDED_BY(Locks::jit_lock_);
573 
574   // Number of optimized compilations done throughout the lifetime of the JIT.
575   size_t number_of_optimized_compilations_ GUARDED_BY(Locks::jit_lock_);
576 
577   // Number of compilations for on-stack-replacement done throughout the lifetime of the JIT.
578   size_t number_of_osr_compilations_ GUARDED_BY(Locks::jit_lock_);
579 
580   // Number of code cache collections done throughout the lifetime of the JIT.
581   size_t number_of_collections_ GUARDED_BY(Locks::jit_lock_);
582 
583   // Histograms for keeping track of stack map size statistics.
584   Histogram<uint64_t> histogram_stack_map_memory_use_ GUARDED_BY(Locks::jit_lock_);
585 
586   // Histograms for keeping track of code size statistics.
587   Histogram<uint64_t> histogram_code_memory_use_ GUARDED_BY(Locks::jit_lock_);
588 
589   // Histograms for keeping track of profiling info statistics.
590   Histogram<uint64_t> histogram_profiling_info_memory_use_ GUARDED_BY(Locks::jit_lock_);
591 
592   friend class art::JitJniStubTestHelper;
593   friend class ScopedCodeCacheWrite;
594   friend class MarkCodeClosure;
595 
596   DISALLOW_COPY_AND_ASSIGN(JitCodeCache);
597 };
598 
599 }  // namespace jit
600 }  // namespace art
601 
602 #endif  // ART_RUNTIME_JIT_JIT_CODE_CACHE_H_
603