1 // Copyright 2020 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/heap/cppgc/free-list.h"
6
7 #include <algorithm>
8
9 #include "include/cppgc/internal/logging.h"
10 #include "src/base/bits.h"
11 #include "src/base/sanitizer/asan.h"
12 #include "src/heap/cppgc/globals.h"
13 #include "src/heap/cppgc/heap-object-header.h"
14
15 namespace cppgc {
16 namespace internal {
17
18 namespace {
BucketIndexForSize(uint32_t size)19 uint32_t BucketIndexForSize(uint32_t size) {
20 return v8::base::bits::WhichPowerOfTwo(
21 v8::base::bits::RoundDownToPowerOfTwo32(size));
22 }
23 } // namespace
24
25 class FreeList::Entry : public HeapObjectHeader {
26 public:
CreateAt(void * memory,size_t size)27 static Entry& CreateAt(void* memory, size_t size) {
28 // Make sure the freelist header is writable. SET_MEMORY_ACCESSIBLE is not
29 // needed as we write the whole payload of Entry.
30 ASAN_UNPOISON_MEMORY_REGION(memory, sizeof(Entry));
31 return *new (memory) Entry(size);
32 }
33
Next() const34 Entry* Next() const { return next_; }
SetNext(Entry * next)35 void SetNext(Entry* next) { next_ = next; }
36
Link(Entry ** previous_next)37 void Link(Entry** previous_next) {
38 next_ = *previous_next;
39 *previous_next = this;
40 }
Unlink(Entry ** previous_next)41 void Unlink(Entry** previous_next) {
42 *previous_next = next_;
43 next_ = nullptr;
44 }
45
46 private:
Entry(size_t size)47 explicit Entry(size_t size) : HeapObjectHeader(size, kFreeListGCInfoIndex) {
48 static_assert(sizeof(Entry) == kFreeListEntrySize, "Sizes must match");
49 }
50
51 Entry* next_ = nullptr;
52 };
53
FreeList()54 FreeList::FreeList() { Clear(); }
55
FreeList(FreeList && other)56 FreeList::FreeList(FreeList&& other) V8_NOEXCEPT
57 : free_list_heads_(std::move(other.free_list_heads_)),
58 free_list_tails_(std::move(other.free_list_tails_)),
59 biggest_free_list_index_(std::move(other.biggest_free_list_index_)) {
60 other.Clear();
61 }
62
operator =(FreeList && other)63 FreeList& FreeList::operator=(FreeList&& other) V8_NOEXCEPT {
64 Clear();
65 Append(std::move(other));
66 DCHECK(other.IsEmpty());
67 return *this;
68 }
69
Add(FreeList::Block block)70 void FreeList::Add(FreeList::Block block) { AddReturningUnusedBounds(block); }
71
AddReturningUnusedBounds(Block block)72 std::pair<Address, Address> FreeList::AddReturningUnusedBounds(Block block) {
73 const size_t size = block.size;
74 DCHECK_GT(kPageSize, size);
75 DCHECK_LE(sizeof(HeapObjectHeader), size);
76
77 if (size < sizeof(Entry)) {
78 // Create wasted entry. This can happen when an almost emptied linear
79 // allocation buffer is returned to the freelist.
80 // This could be SET_MEMORY_ACCESSIBLE. Since there's no payload, the next
81 // operating overwrites the memory completely, and we can thus avoid
82 // zeroing it out.
83 auto& filler = Filler::CreateAt(block.address, size);
84 USE(filler);
85 DCHECK_EQ(reinterpret_cast<Address>(block.address) + size,
86 filler.ObjectEnd());
87 DCHECK_EQ(reinterpret_cast<Address>(&filler + 1), filler.ObjectEnd());
88 return {reinterpret_cast<Address>(&filler + 1),
89 reinterpret_cast<Address>(&filler + 1)};
90 }
91
92 Entry& entry = Entry::CreateAt(block.address, size);
93 const size_t index = BucketIndexForSize(static_cast<uint32_t>(size));
94 entry.Link(&free_list_heads_[index]);
95 biggest_free_list_index_ = std::max(biggest_free_list_index_, index);
96 if (!entry.Next()) {
97 free_list_tails_[index] = &entry;
98 }
99 DCHECK_EQ(entry.ObjectEnd(), reinterpret_cast<Address>(&entry) + size);
100 return {reinterpret_cast<Address>(&entry + 1),
101 reinterpret_cast<Address>(&entry) + size};
102 }
103
Append(FreeList && other)104 void FreeList::Append(FreeList&& other) {
105 #if DEBUG
106 const size_t expected_size = Size() + other.Size();
107 #endif
108 // Newly created entries get added to the head.
109 for (size_t index = 0; index < free_list_tails_.size(); ++index) {
110 Entry* other_tail = other.free_list_tails_[index];
111 Entry*& this_head = this->free_list_heads_[index];
112 if (other_tail) {
113 other_tail->SetNext(this_head);
114 if (!this_head) {
115 this->free_list_tails_[index] = other_tail;
116 }
117 this_head = other.free_list_heads_[index];
118 other.free_list_heads_[index] = nullptr;
119 other.free_list_tails_[index] = nullptr;
120 }
121 }
122
123 biggest_free_list_index_ =
124 std::max(biggest_free_list_index_, other.biggest_free_list_index_);
125 other.biggest_free_list_index_ = 0;
126 #if DEBUG
127 DCHECK_EQ(expected_size, Size());
128 #endif
129 DCHECK(other.IsEmpty());
130 }
131
Allocate(size_t allocation_size)132 FreeList::Block FreeList::Allocate(size_t allocation_size) {
133 // Try reusing a block from the largest bin. The underlying reasoning
134 // being that we want to amortize this slow allocation call by carving
135 // off as a large a free block as possible in one go; a block that will
136 // service this block and let following allocations be serviced quickly
137 // by bump allocation.
138 // bucket_size represents minimal size of entries in a bucket.
139 size_t bucket_size = static_cast<size_t>(1) << biggest_free_list_index_;
140 size_t index = biggest_free_list_index_;
141 for (; index > 0; --index, bucket_size >>= 1) {
142 DCHECK(IsConsistent(index));
143 Entry* entry = free_list_heads_[index];
144 if (allocation_size > bucket_size) {
145 // Final bucket candidate; check initial entry if it is able
146 // to service this allocation. Do not perform a linear scan,
147 // as it is considered too costly.
148 if (!entry || entry->AllocatedSize() < allocation_size) break;
149 }
150 if (entry) {
151 if (!entry->Next()) {
152 DCHECK_EQ(entry, free_list_tails_[index]);
153 free_list_tails_[index] = nullptr;
154 }
155 entry->Unlink(&free_list_heads_[index]);
156 biggest_free_list_index_ = index;
157 return {entry, entry->AllocatedSize()};
158 }
159 }
160 biggest_free_list_index_ = index;
161 return {nullptr, 0u};
162 }
163
Clear()164 void FreeList::Clear() {
165 std::fill(free_list_heads_.begin(), free_list_heads_.end(), nullptr);
166 std::fill(free_list_tails_.begin(), free_list_tails_.end(), nullptr);
167 biggest_free_list_index_ = 0;
168 }
169
Size() const170 size_t FreeList::Size() const {
171 size_t size = 0;
172 for (auto* entry : free_list_heads_) {
173 while (entry) {
174 size += entry->AllocatedSize();
175 entry = entry->Next();
176 }
177 }
178 return size;
179 }
180
IsEmpty() const181 bool FreeList::IsEmpty() const {
182 return std::all_of(free_list_heads_.cbegin(), free_list_heads_.cend(),
183 [](const auto* entry) { return !entry; });
184 }
185
ContainsForTesting(Block block) const186 bool FreeList::ContainsForTesting(Block block) const {
187 for (Entry* list : free_list_heads_) {
188 for (Entry* entry = list; entry; entry = entry->Next()) {
189 if (entry <= block.address &&
190 (reinterpret_cast<Address>(block.address) + block.size <=
191 reinterpret_cast<Address>(entry) + entry->AllocatedSize()))
192 return true;
193 }
194 }
195 return false;
196 }
197
IsConsistent(size_t index) const198 bool FreeList::IsConsistent(size_t index) const {
199 // Check that freelist head and tail pointers are consistent, i.e.
200 // - either both are nulls (no entries in the bucket);
201 // - or both are non-nulls and the tail points to the end.
202 return (!free_list_heads_[index] && !free_list_tails_[index]) ||
203 (free_list_heads_[index] && free_list_tails_[index] &&
204 !free_list_tails_[index]->Next());
205 }
206
CollectStatistics(HeapStatistics::FreeListStatistics & free_list_stats)207 void FreeList::CollectStatistics(
208 HeapStatistics::FreeListStatistics& free_list_stats) {
209 std::vector<size_t>& bucket_size = free_list_stats.bucket_size;
210 std::vector<size_t>& free_count = free_list_stats.free_count;
211 std::vector<size_t>& free_size = free_list_stats.free_size;
212 DCHECK(bucket_size.empty());
213 DCHECK(free_count.empty());
214 DCHECK(free_size.empty());
215 for (size_t i = 0; i < kPageSizeLog2; ++i) {
216 size_t entry_count = 0;
217 size_t entry_size = 0;
218 for (Entry* entry = free_list_heads_[i]; entry; entry = entry->Next()) {
219 ++entry_count;
220 entry_size += entry->AllocatedSize();
221 }
222 bucket_size.push_back(static_cast<size_t>(1) << i);
223 free_count.push_back(entry_count);
224 free_size.push_back(entry_size);
225 }
226 }
227
228 } // namespace internal
229 } // namespace cppgc
230