1 // Copyright 2020 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/heap/cppgc/gc-info-table.h"
6
7 #include <algorithm>
8 #include <limits>
9 #include <memory>
10
11 #include "include/cppgc/internal/gc-info.h"
12 #include "include/cppgc/platform.h"
13 #include "src/base/bits.h"
14 #include "src/base/lazy-instance.h"
15 #include "src/base/page-allocator.h"
16
17 namespace cppgc {
18 namespace internal {
19
20 namespace {
21
22 // GCInfoTable::table_, the table which holds GCInfos, is maintained as a
23 // contiguous array reserved upfront. Subparts of the array are (re-)committed
24 // as read/write or read-only in OS pages, whose size is a power of 2. To avoid
25 // having GCInfos that cross the boundaries between these subparts we force the
26 // size of GCInfo to be a power of 2 as well.
27 constexpr size_t kEntrySize = sizeof(GCInfo);
28 static_assert(v8::base::bits::IsPowerOfTwo(kEntrySize),
29 "GCInfoTable entries size must be power of "
30 "two");
31
GetAllocator(PageAllocator * page_allocator)32 PageAllocator* GetAllocator(PageAllocator* page_allocator) {
33 if (!page_allocator) {
34 static v8::base::LeakyObject<v8::base::PageAllocator>
35 default_page_allocator;
36 page_allocator = default_page_allocator.get();
37 }
38 // No need to introduce LSAN support for PageAllocator, as `GCInfoTable` is
39 // already a leaky object and the table payload (`GCInfoTable::table_`) should
40 // not refer to dynamically allocated objects.
41 return page_allocator;
42 }
43
44 } // namespace
45
46 GCInfoTable* GlobalGCInfoTable::global_table_ = nullptr;
47 constexpr GCInfoIndex GCInfoTable::kMaxIndex;
48 constexpr GCInfoIndex GCInfoTable::kMinIndex;
49 constexpr GCInfoIndex GCInfoTable::kInitialWantedLimit;
50
51 // static
Initialize(PageAllocator * page_allocator)52 void GlobalGCInfoTable::Initialize(PageAllocator* page_allocator) {
53 static v8::base::LeakyObject<GCInfoTable> table(GetAllocator(page_allocator));
54 if (!global_table_) {
55 global_table_ = table.get();
56 } else {
57 CHECK_EQ(page_allocator, global_table_->allocator());
58 }
59 }
60
GCInfoTable(PageAllocator * page_allocator)61 GCInfoTable::GCInfoTable(PageAllocator* page_allocator)
62 : page_allocator_(page_allocator),
63 table_(static_cast<decltype(table_)>(page_allocator_->AllocatePages(
64 nullptr, MaxTableSize(), page_allocator_->AllocatePageSize(),
65 PageAllocator::kNoAccess))),
66 read_only_table_end_(reinterpret_cast<uint8_t*>(table_)) {
67 CHECK(table_);
68 Resize();
69 }
70
~GCInfoTable()71 GCInfoTable::~GCInfoTable() {
72 page_allocator_->ReleasePages(const_cast<GCInfo*>(table_), MaxTableSize(), 0);
73 }
74
MaxTableSize() const75 size_t GCInfoTable::MaxTableSize() const {
76 return RoundUp(GCInfoTable::kMaxIndex * kEntrySize,
77 page_allocator_->AllocatePageSize());
78 }
79
InitialTableLimit() const80 GCInfoIndex GCInfoTable::InitialTableLimit() const {
81 // Different OSes have different page sizes, so we have to choose the minimum
82 // of memory wanted and OS page size.
83 constexpr size_t memory_wanted = kInitialWantedLimit * kEntrySize;
84 const size_t initial_limit =
85 RoundUp(memory_wanted, page_allocator_->AllocatePageSize()) / kEntrySize;
86 CHECK_GT(std::numeric_limits<GCInfoIndex>::max(), initial_limit);
87 return static_cast<GCInfoIndex>(
88 std::min(static_cast<size_t>(kMaxIndex), initial_limit));
89 }
90
Resize()91 void GCInfoTable::Resize() {
92 const GCInfoIndex new_limit = (limit_) ? 2 * limit_ : InitialTableLimit();
93 CHECK_GT(new_limit, limit_);
94 const size_t old_committed_size = limit_ * kEntrySize;
95 const size_t new_committed_size = new_limit * kEntrySize;
96 CHECK(table_);
97 CHECK_EQ(0u, new_committed_size % page_allocator_->AllocatePageSize());
98 CHECK_GE(MaxTableSize(), new_committed_size);
99 // Recommit new area as read/write.
100 uint8_t* current_table_end =
101 reinterpret_cast<uint8_t*>(table_) + old_committed_size;
102 const size_t table_size_delta = new_committed_size - old_committed_size;
103 CHECK(page_allocator_->SetPermissions(current_table_end, table_size_delta,
104 PageAllocator::kReadWrite));
105 // Recommit old area as read-only.
106 if (read_only_table_end_ != current_table_end) {
107 DCHECK_GT(current_table_end, read_only_table_end_);
108 const size_t read_only_delta = current_table_end - read_only_table_end_;
109 CHECK(page_allocator_->SetPermissions(read_only_table_end_, read_only_delta,
110 PageAllocator::kRead));
111 read_only_table_end_ += read_only_delta;
112 }
113
114 // Check that newly-committed memory is zero-initialized.
115 CheckMemoryIsZeroed(reinterpret_cast<uintptr_t*>(current_table_end),
116 table_size_delta / sizeof(uintptr_t));
117
118 limit_ = new_limit;
119 }
120
CheckMemoryIsZeroed(uintptr_t * base,size_t len)121 void GCInfoTable::CheckMemoryIsZeroed(uintptr_t* base, size_t len) {
122 #if DEBUG
123 for (size_t i = 0; i < len; ++i) {
124 DCHECK(!base[i]);
125 }
126 #endif // DEBUG
127 }
128
RegisterNewGCInfo(std::atomic<GCInfoIndex> & registered_index,const GCInfo & info)129 GCInfoIndex GCInfoTable::RegisterNewGCInfo(
130 std::atomic<GCInfoIndex>& registered_index, const GCInfo& info) {
131 // Ensuring a new index involves current index adjustment as well as
132 // potentially resizing the table. For simplicity we use a lock.
133 v8::base::MutexGuard guard(&table_mutex_);
134
135 // Check the registered index again after taking the lock as some other
136 // thread may have registered the info at the same time.
137 GCInfoIndex index = registered_index.load(std::memory_order_relaxed);
138 if (index) {
139 return index;
140 }
141
142 if (current_index_ == limit_) {
143 Resize();
144 }
145
146 GCInfoIndex new_index = current_index_++;
147 CHECK_LT(new_index, GCInfoTable::kMaxIndex);
148 table_[new_index] = info;
149 registered_index.store(new_index, std::memory_order_release);
150 return new_index;
151 }
152
153 } // namespace internal
154 } // namespace cppgc
155