1 /*
2 * Copyright 2022 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "base/gc_visited_arena_pool.h"
18
19 #include <sys/mman.h>
20 #include <sys/types.h>
21 #include <unistd.h>
22
23 #include "base/arena_allocator-inl.h"
24 #include "base/memfd.h"
25 #include "base/utils.h"
26 #include "gc/collector/mark_compact-inl.h"
27
28 namespace art {
29
TrackedArena(uint8_t * start,size_t size,bool pre_zygote_fork)30 TrackedArena::TrackedArena(uint8_t* start, size_t size, bool pre_zygote_fork)
31 : Arena(), first_obj_array_(nullptr), pre_zygote_fork_(pre_zygote_fork) {
32 static_assert(ArenaAllocator::kArenaAlignment <= kPageSize,
33 "Arena should not need stronger alignment than kPageSize.");
34 DCHECK_ALIGNED(size, kPageSize);
35 DCHECK_ALIGNED(start, kPageSize);
36 memory_ = start;
37 size_ = size;
38 size_t arr_size = size / kPageSize;
39 first_obj_array_.reset(new uint8_t*[arr_size]);
40 std::fill_n(first_obj_array_.get(), arr_size, nullptr);
41 }
42
Release()43 void TrackedArena::Release() {
44 if (bytes_allocated_ > 0) {
45 // Userfaultfd GC uses MAP_SHARED mappings for linear-alloc and therefore
46 // MADV_DONTNEED will not free the pages from page cache. Therefore use
47 // MADV_REMOVE instead, which is meant for this purpose.
48 // Arenas allocated pre-zygote fork are private anonymous and hence must be
49 // released using MADV_DONTNEED.
50 if (!gUseUserfaultfd || pre_zygote_fork_ ||
51 (madvise(Begin(), Size(), MADV_REMOVE) == -1 && errno == EINVAL)) {
52 // MADV_REMOVE fails if invoked on anonymous mapping, which could happen
53 // if the arena is released before userfaultfd-GC starts using memfd. So
54 // use MADV_DONTNEED.
55 ZeroAndReleasePages(Begin(), Size());
56 }
57 std::fill_n(first_obj_array_.get(), Size() / kPageSize, nullptr);
58 bytes_allocated_ = 0;
59 }
60 }
61
SetFirstObject(uint8_t * obj_begin,uint8_t * obj_end)62 void TrackedArena::SetFirstObject(uint8_t* obj_begin, uint8_t* obj_end) {
63 DCHECK_LE(static_cast<void*>(Begin()), static_cast<void*>(obj_end));
64 DCHECK_LT(static_cast<void*>(obj_begin), static_cast<void*>(obj_end));
65 size_t idx = static_cast<size_t>(obj_begin - Begin()) / kPageSize;
66 size_t last_byte_idx = static_cast<size_t>(obj_end - 1 - Begin()) / kPageSize;
67 // If the addr is at the beginning of a page, then we set it for that page too.
68 if (IsAligned<kPageSize>(obj_begin)) {
69 first_obj_array_[idx] = obj_begin;
70 }
71 while (idx < last_byte_idx) {
72 first_obj_array_[++idx] = obj_begin;
73 }
74 }
75
AddMap(size_t min_size)76 uint8_t* GcVisitedArenaPool::AddMap(size_t min_size) {
77 size_t size = std::max(min_size, kLinearAllocPoolSize);
78 #if defined(__LP64__)
79 // This is true only when we are running a 64-bit dex2oat to compile a 32-bit image.
80 if (low_4gb_) {
81 size = std::max(min_size, kLow4GBLinearAllocPoolSize);
82 }
83 #endif
84 size_t alignment = BestPageTableAlignment(size);
85 DCHECK_GE(size, kPMDSize);
86 std::string err_msg;
87 maps_.emplace_back(MemMap::MapAnonymousAligned(
88 name_, size, PROT_READ | PROT_WRITE, low_4gb_, alignment, &err_msg));
89 MemMap& map = maps_.back();
90 if (!map.IsValid()) {
91 LOG(FATAL) << "Failed to allocate " << name_ << ": " << err_msg;
92 UNREACHABLE();
93 }
94
95 if (gUseUserfaultfd) {
96 // Create a shadow-map for the map being added for userfaultfd GC
97 gc::collector::MarkCompact* mark_compact =
98 Runtime::Current()->GetHeap()->MarkCompactCollector();
99 DCHECK_NE(mark_compact, nullptr);
100 mark_compact->AddLinearAllocSpaceData(map.Begin(), map.Size());
101 }
102 Chunk* chunk = new Chunk(map.Begin(), map.Size());
103 best_fit_allocs_.insert(chunk);
104 free_chunks_.insert(chunk);
105 return map.Begin();
106 }
107
GcVisitedArenaPool(bool low_4gb,bool is_zygote,const char * name)108 GcVisitedArenaPool::GcVisitedArenaPool(bool low_4gb, bool is_zygote, const char* name)
109 : bytes_allocated_(0), name_(name), low_4gb_(low_4gb), pre_zygote_fork_(is_zygote) {}
110
~GcVisitedArenaPool()111 GcVisitedArenaPool::~GcVisitedArenaPool() {
112 for (Chunk* chunk : free_chunks_) {
113 delete chunk;
114 }
115 // Must not delete chunks from best_fit_allocs_ as they are shared with
116 // free_chunks_.
117 }
118
GetBytesAllocated() const119 size_t GcVisitedArenaPool::GetBytesAllocated() const {
120 std::lock_guard<std::mutex> lock(lock_);
121 return bytes_allocated_;
122 }
123
AddPreZygoteForkMap(size_t size)124 uint8_t* GcVisitedArenaPool::AddPreZygoteForkMap(size_t size) {
125 DCHECK(pre_zygote_fork_);
126 DCHECK(Runtime::Current()->IsZygote());
127 std::string pre_fork_name = "Pre-zygote-";
128 pre_fork_name += name_;
129 std::string err_msg;
130 maps_.emplace_back(MemMap::MapAnonymous(
131 pre_fork_name.c_str(), size, PROT_READ | PROT_WRITE, low_4gb_, &err_msg));
132 MemMap& map = maps_.back();
133 if (!map.IsValid()) {
134 LOG(FATAL) << "Failed to allocate " << pre_fork_name << ": " << err_msg;
135 UNREACHABLE();
136 }
137 return map.Begin();
138 }
139
AllocArena(size_t size)140 Arena* GcVisitedArenaPool::AllocArena(size_t size) {
141 // Return only page aligned sizes so that madvise can be leveraged.
142 size = RoundUp(size, kPageSize);
143 std::lock_guard<std::mutex> lock(lock_);
144
145 if (pre_zygote_fork_) {
146 // The first fork out of zygote hasn't happened yet. Allocate arena in a
147 // private-anonymous mapping to retain clean pages across fork.
148 DCHECK(Runtime::Current()->IsZygote());
149 uint8_t* addr = AddPreZygoteForkMap(size);
150 auto emplace_result = allocated_arenas_.emplace(addr, size, /*pre_zygote_fork=*/true);
151 return const_cast<TrackedArena*>(&(*emplace_result.first));
152 }
153
154 Chunk temp_chunk(nullptr, size);
155 auto best_fit_iter = best_fit_allocs_.lower_bound(&temp_chunk);
156 if (UNLIKELY(best_fit_iter == best_fit_allocs_.end())) {
157 AddMap(size);
158 best_fit_iter = best_fit_allocs_.lower_bound(&temp_chunk);
159 CHECK(best_fit_iter != best_fit_allocs_.end());
160 }
161 auto free_chunks_iter = free_chunks_.find(*best_fit_iter);
162 DCHECK(free_chunks_iter != free_chunks_.end());
163 Chunk* chunk = *best_fit_iter;
164 DCHECK_EQ(chunk, *free_chunks_iter);
165 // if the best-fit chunk < 2x the requested size, then give the whole chunk.
166 if (chunk->size_ < 2 * size) {
167 DCHECK_GE(chunk->size_, size);
168 auto emplace_result = allocated_arenas_.emplace(chunk->addr_,
169 chunk->size_,
170 /*pre_zygote_fork=*/false);
171 DCHECK(emplace_result.second);
172 free_chunks_.erase(free_chunks_iter);
173 best_fit_allocs_.erase(best_fit_iter);
174 delete chunk;
175 return const_cast<TrackedArena*>(&(*emplace_result.first));
176 } else {
177 auto emplace_result = allocated_arenas_.emplace(chunk->addr_,
178 size,
179 /*pre_zygote_fork=*/false);
180 DCHECK(emplace_result.second);
181 // Compute next iterators for faster insert later.
182 auto next_best_fit_iter = best_fit_iter;
183 next_best_fit_iter++;
184 auto next_free_chunks_iter = free_chunks_iter;
185 next_free_chunks_iter++;
186 auto best_fit_nh = best_fit_allocs_.extract(best_fit_iter);
187 auto free_chunks_nh = free_chunks_.extract(free_chunks_iter);
188 best_fit_nh.value()->addr_ += size;
189 best_fit_nh.value()->size_ -= size;
190 DCHECK_EQ(free_chunks_nh.value()->addr_, chunk->addr_);
191 best_fit_allocs_.insert(next_best_fit_iter, std::move(best_fit_nh));
192 free_chunks_.insert(next_free_chunks_iter, std::move(free_chunks_nh));
193 return const_cast<TrackedArena*>(&(*emplace_result.first));
194 }
195 }
196
FreeRangeLocked(uint8_t * range_begin,size_t range_size)197 void GcVisitedArenaPool::FreeRangeLocked(uint8_t* range_begin, size_t range_size) {
198 Chunk temp_chunk(range_begin, range_size);
199 bool merge_with_next = false;
200 bool merge_with_prev = false;
201 auto next_iter = free_chunks_.lower_bound(&temp_chunk);
202 auto iter_for_extract = free_chunks_.end();
203 // Can we merge with the previous chunk?
204 if (next_iter != free_chunks_.begin()) {
205 auto prev_iter = next_iter;
206 prev_iter--;
207 merge_with_prev = (*prev_iter)->addr_ + (*prev_iter)->size_ == range_begin;
208 if (merge_with_prev) {
209 range_begin = (*prev_iter)->addr_;
210 range_size += (*prev_iter)->size_;
211 // Hold on to the iterator for faster extract later
212 iter_for_extract = prev_iter;
213 }
214 }
215 // Can we merge with the next chunk?
216 if (next_iter != free_chunks_.end()) {
217 merge_with_next = range_begin + range_size == (*next_iter)->addr_;
218 if (merge_with_next) {
219 range_size += (*next_iter)->size_;
220 if (merge_with_prev) {
221 auto iter = next_iter;
222 next_iter++;
223 // Keep only one of the two chunks to be expanded.
224 Chunk* chunk = *iter;
225 size_t erase_res = best_fit_allocs_.erase(chunk);
226 DCHECK_EQ(erase_res, 1u);
227 free_chunks_.erase(iter);
228 delete chunk;
229 } else {
230 iter_for_extract = next_iter;
231 next_iter++;
232 }
233 }
234 }
235
236 // Extract-insert avoids 2/4 destroys and 2/2 creations
237 // as compared to erase-insert, so use that when merging.
238 if (merge_with_prev || merge_with_next) {
239 auto free_chunks_nh = free_chunks_.extract(iter_for_extract);
240 auto best_fit_allocs_nh = best_fit_allocs_.extract(*iter_for_extract);
241
242 free_chunks_nh.value()->addr_ = range_begin;
243 DCHECK_EQ(best_fit_allocs_nh.value()->addr_, range_begin);
244 free_chunks_nh.value()->size_ = range_size;
245 DCHECK_EQ(best_fit_allocs_nh.value()->size_, range_size);
246
247 free_chunks_.insert(next_iter, std::move(free_chunks_nh));
248 // Since the chunk's size has expanded, the hint won't be useful
249 // for best-fit set.
250 best_fit_allocs_.insert(std::move(best_fit_allocs_nh));
251 } else {
252 DCHECK(iter_for_extract == free_chunks_.end());
253 Chunk* chunk = new Chunk(range_begin, range_size);
254 free_chunks_.insert(next_iter, chunk);
255 best_fit_allocs_.insert(chunk);
256 }
257 }
258
FreeArenaChain(Arena * first)259 void GcVisitedArenaPool::FreeArenaChain(Arena* first) {
260 if (kRunningOnMemoryTool) {
261 for (Arena* arena = first; arena != nullptr; arena = arena->Next()) {
262 MEMORY_TOOL_MAKE_UNDEFINED(arena->Begin(), arena->GetBytesAllocated());
263 }
264 }
265
266 // TODO: Handle the case when arena_allocator::kArenaAllocatorPreciseTracking
267 // is true. See MemMapArenaPool::FreeArenaChain() for example.
268 CHECK(!arena_allocator::kArenaAllocatorPreciseTracking);
269
270 // madvise the arenas before acquiring lock for scalability
271 for (Arena* temp = first; temp != nullptr; temp = temp->Next()) {
272 temp->Release();
273 }
274
275 std::lock_guard<std::mutex> lock(lock_);
276 arenas_freed_ = true;
277 while (first != nullptr) {
278 FreeRangeLocked(first->Begin(), first->Size());
279 // In other implementations of ArenaPool this is calculated when asked for,
280 // thanks to the list of free arenas that is kept around. But in this case,
281 // we release the freed arena back to the pool and therefore need to
282 // calculate here.
283 bytes_allocated_ += first->GetBytesAllocated();
284 TrackedArena* temp = down_cast<TrackedArena*>(first);
285 // TODO: Add logic to unmap the maps corresponding to pre-zygote-fork
286 // arenas, which are expected to be released only during shutdown.
287 first = first->Next();
288 size_t erase_count = allocated_arenas_.erase(*temp);
289 DCHECK_EQ(erase_count, 1u);
290 }
291 }
292
293 } // namespace art
294