• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2022 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_BASE_GC_VISITED_ARENA_POOL_H_
18 #define ART_RUNTIME_BASE_GC_VISITED_ARENA_POOL_H_
19 
20 #include "base/casts.h"
21 #include "base/arena_allocator.h"
22 #include "base/locks.h"
23 #include "base/mem_map.h"
24 
25 #include <set>
26 
27 namespace art {
28 
29 // GcVisitedArenaPool can be used for tracking allocations so that they can
30 // be visited during GC to update the GC-roots inside them.
31 
32 // An Arena which tracks its allocations.
33 class TrackedArena final : public Arena {
34  public:
35   // Used for searching in maps. Only arena's starting address is relevant.
TrackedArena(uint8_t * addr)36   explicit TrackedArena(uint8_t* addr) : pre_zygote_fork_(false) { memory_ = addr; }
37   TrackedArena(uint8_t* start, size_t size, bool pre_zygote_fork);
38 
39   template <typename PageVisitor>
VisitRoots(PageVisitor & visitor)40   void VisitRoots(PageVisitor& visitor) const REQUIRES_SHARED(Locks::mutator_lock_) {
41     DCHECK_ALIGNED(Size(), kPageSize);
42     DCHECK_ALIGNED(Begin(), kPageSize);
43     int nr_pages = Size() / kPageSize;
44     uint8_t* page_begin = Begin();
45     for (int i = 0; i < nr_pages && first_obj_array_[i] != nullptr; i++, page_begin += kPageSize) {
46       visitor(page_begin, first_obj_array_[i]);
47     }
48   }
49 
50   // Return the page addr of the first page with first_obj set to nullptr.
GetLastUsedByte()51   uint8_t* GetLastUsedByte() const REQUIRES_SHARED(Locks::mutator_lock_) {
52     DCHECK_ALIGNED(Begin(), kPageSize);
53     DCHECK_ALIGNED(End(), kPageSize);
54     // Jump past bytes-allocated for arenas which are not currently being used
55     // by arena-allocator. This helps in reducing loop iterations below.
56     uint8_t* last_byte = AlignUp(Begin() + GetBytesAllocated(), kPageSize);
57     DCHECK_LE(last_byte, End());
58     for (size_t i = (last_byte - Begin()) / kPageSize;
59          last_byte < End() && first_obj_array_[i] != nullptr;
60          last_byte += kPageSize, i++) {
61       // No body.
62     }
63     return last_byte;
64   }
65 
GetFirstObject(uint8_t * addr)66   uint8_t* GetFirstObject(uint8_t* addr) const REQUIRES_SHARED(Locks::mutator_lock_) {
67     DCHECK_LE(Begin(), addr);
68     DCHECK_GT(End(), addr);
69     return first_obj_array_[(addr - Begin()) / kPageSize];
70   }
71 
72   // Set 'obj_begin' in first_obj_array_ in every element for which it's the
73   // first object.
74   void SetFirstObject(uint8_t* obj_begin, uint8_t* obj_end);
75 
76   void Release() override;
IsPreZygoteForkArena()77   bool IsPreZygoteForkArena() const { return pre_zygote_fork_; }
78 
79  private:
80   // first_obj_array_[i] is the object that overlaps with the ith page's
81   // beginning, i.e. first_obj_array_[i] <= ith page_begin.
82   std::unique_ptr<uint8_t*[]> first_obj_array_;
83   const bool pre_zygote_fork_;
84 };
85 
86 // An arena-pool wherein allocations can be tracked so that the GC can visit all
87 // the GC roots. All the arenas are allocated in one sufficiently large memory
88 // range to avoid multiple calls to mremapped/mprotected syscalls.
89 class GcVisitedArenaPool final : public ArenaPool {
90  public:
91 #if defined(__LP64__)
92   // Use a size in multiples of 1GB as that can utilize the optimized mremap
93   // page-table move.
94   static constexpr size_t kLinearAllocPoolSize = 1 * GB;
95   static constexpr size_t kLow4GBLinearAllocPoolSize = 32 * MB;
96 #else
97   static constexpr size_t kLinearAllocPoolSize = 32 * MB;
98 #endif
99 
100   explicit GcVisitedArenaPool(bool low_4gb = false,
101                               bool is_zygote = false,
102                               const char* name = "LinearAlloc");
103   virtual ~GcVisitedArenaPool();
104   Arena* AllocArena(size_t size) override;
105   void FreeArenaChain(Arena* first) override;
106   size_t GetBytesAllocated() const override;
ReclaimMemory()107   void ReclaimMemory() override {}
LockReclaimMemory()108   void LockReclaimMemory() override {}
TrimMaps()109   void TrimMaps() override {}
110 
Contains(void * ptr)111   bool Contains(void* ptr) {
112     std::lock_guard<std::mutex> lock(lock_);
113     for (auto& map : maps_) {
114       if (map.HasAddress(ptr)) {
115         return true;
116       }
117     }
118     return false;
119   }
120 
121   template <typename PageVisitor>
VisitRoots(PageVisitor & visitor)122   void VisitRoots(PageVisitor& visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
123     std::lock_guard<std::mutex> lock(lock_);
124     for (auto& arena : allocated_arenas_) {
125       arena.VisitRoots(visitor);
126     }
127   }
128 
129   template <typename Callback>
ForEachAllocatedArena(Callback cb)130   void ForEachAllocatedArena(Callback cb) REQUIRES_SHARED(Locks::mutator_lock_) {
131     std::lock_guard<std::mutex> lock(lock_);
132     for (auto& arena : allocated_arenas_) {
133       cb(arena);
134     }
135   }
136 
137   // Called in Heap::PreZygoteFork(). All allocations after this are done in
138   // arena-pool which is visited by userfaultfd.
SetupPostZygoteMode()139   void SetupPostZygoteMode() {
140     std::lock_guard<std::mutex> lock(lock_);
141     DCHECK(pre_zygote_fork_);
142     pre_zygote_fork_ = false;
143   }
144 
145   // For userfaultfd GC to be able to acquire the lock to avoid concurrent
146   // release of arenas when it is visiting them.
GetLock()147   std::mutex& GetLock() { return lock_; }
148 
149   // Find the given arena in allocated_arenas_. The function is called with
150   // lock_ acquired.
FindAllocatedArena(const TrackedArena * arena)151   bool FindAllocatedArena(const TrackedArena* arena) const NO_THREAD_SAFETY_ANALYSIS {
152     for (auto& allocated_arena : allocated_arenas_) {
153       if (arena == &allocated_arena) {
154         return true;
155       }
156     }
157     return false;
158   }
159 
ClearArenasFreed()160   void ClearArenasFreed() {
161     std::lock_guard<std::mutex> lock(lock_);
162     arenas_freed_ = false;
163   }
164 
165   // The function is called with lock_ acquired.
AreArenasFreed()166   bool AreArenasFreed() const NO_THREAD_SAFETY_ANALYSIS { return arenas_freed_; }
167 
168  private:
169   void FreeRangeLocked(uint8_t* range_begin, size_t range_size) REQUIRES(lock_);
170   // Add a map (to be visited by userfaultfd) to the pool of at least min_size
171   // and return its address.
172   uint8_t* AddMap(size_t min_size) REQUIRES(lock_);
173   // Add a private anonymous map prior to zygote fork to the pool and return its
174   // address.
175   uint8_t* AddPreZygoteForkMap(size_t size) REQUIRES(lock_);
176 
177   class Chunk {
178    public:
Chunk(uint8_t * addr,size_t size)179     Chunk(uint8_t* addr, size_t size) : addr_(addr), size_(size) {}
180     uint8_t* addr_;
181     size_t size_;
182   };
183 
184   class LessByChunkAddr {
185    public:
operator()186     bool operator()(const Chunk* a, const Chunk* b) const {
187       return std::less<uint8_t*>{}(a->addr_, b->addr_);
188     }
189   };
190 
191   class LessByChunkSize {
192    public:
193     // Since two chunks could have the same size, use addr when that happens.
operator()194     bool operator()(const Chunk* a, const Chunk* b) const {
195       return a->size_ < b->size_ ||
196              (a->size_ == b->size_ && std::less<uint8_t*>{}(a->addr_, b->addr_));
197     }
198   };
199 
200   class LessByArenaAddr {
201    public:
operator()202     bool operator()(const TrackedArena& a, const TrackedArena& b) const {
203       return std::less<uint8_t*>{}(a.Begin(), b.Begin());
204     }
205   };
206 
207   // Use a std::mutex here as Arenas are second-from-the-bottom when using MemMaps, and MemMap
208   // itself uses std::mutex scoped to within an allocate/free only.
209   mutable std::mutex lock_;
210   std::vector<MemMap> maps_ GUARDED_BY(lock_);
211   std::set<Chunk*, LessByChunkSize> best_fit_allocs_ GUARDED_BY(lock_);
212   std::set<Chunk*, LessByChunkAddr> free_chunks_ GUARDED_BY(lock_);
213   // Set of allocated arenas. It's required to be able to find the arena
214   // corresponding to a given address.
215   // TODO: consider using HashSet, which is more memory efficient.
216   std::set<TrackedArena, LessByArenaAddr> allocated_arenas_ GUARDED_BY(lock_);
217   // Number of bytes allocated so far.
218   size_t bytes_allocated_ GUARDED_BY(lock_);
219   const char* name_;
220   // Flag to indicate that some arenas have been freed. This flag is used as an
221   // optimization by GC to know if it needs to find if the arena being visited
222   // has been freed or not. The flag is cleared in the compaction pause and read
223   // when linear-alloc space is concurrently visited updated to update GC roots.
224   bool arenas_freed_ GUARDED_BY(lock_);
225   const bool low_4gb_;
226   // Set to true in zygote process so that all linear-alloc allocations are in
227   // private-anonymous mappings and not on userfaultfd visited pages. At
228   // first zygote fork, it's set to false, after which all allocations are done
229   // in userfaultfd visited space.
230   bool pre_zygote_fork_ GUARDED_BY(lock_);
231 
232   DISALLOW_COPY_AND_ASSIGN(GcVisitedArenaPool);
233 };
234 
235 }  // namespace art
236 
237 #endif  // ART_RUNTIME_BASE_GC_VISITED_ARENA_POOL_H_
238