1 // Copyright 2016 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/zone/accounting-allocator.h"
6
7 #include <cstdlib>
8
9 #if V8_LIBC_BIONIC
10 #include <malloc.h> // NOLINT
11 #endif
12
13 #include "src/allocation.h"
14
15 namespace v8 {
16 namespace internal {
17
AccountingAllocator()18 AccountingAllocator::AccountingAllocator() : unused_segments_mutex_() {
19 static const size_t kDefaultBucketMaxSize = 5;
20
21 memory_pressure_level_.SetValue(MemoryPressureLevel::kNone);
22 std::fill(unused_segments_heads_, unused_segments_heads_ + kNumberBuckets,
23 nullptr);
24 std::fill(unused_segments_sizes_, unused_segments_sizes_ + kNumberBuckets, 0);
25 std::fill(unused_segments_max_sizes_,
26 unused_segments_max_sizes_ + kNumberBuckets, kDefaultBucketMaxSize);
27 }
28
~AccountingAllocator()29 AccountingAllocator::~AccountingAllocator() { ClearPool(); }
30
MemoryPressureNotification(MemoryPressureLevel level)31 void AccountingAllocator::MemoryPressureNotification(
32 MemoryPressureLevel level) {
33 memory_pressure_level_.SetValue(level);
34
35 if (level != MemoryPressureLevel::kNone) {
36 ClearPool();
37 }
38 }
39
ConfigureSegmentPool(const size_t max_pool_size)40 void AccountingAllocator::ConfigureSegmentPool(const size_t max_pool_size) {
41 // The sum of the bytes of one segment of each size.
42 static const size_t full_size = (size_t(1) << (kMaxSegmentSizePower + 1)) -
43 (size_t(1) << kMinSegmentSizePower);
44 size_t fits_fully = max_pool_size / full_size;
45
46 base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_);
47
48 // We assume few zones (less than 'fits_fully' many) to be active at the same
49 // time. When zones grow regularly, they will keep requesting segments of
50 // increasing size each time. Therefore we try to get as many segments with an
51 // equal number of segments of each size as possible.
52 // The remaining space is used to make more room for an 'incomplete set' of
53 // segments beginning with the smaller ones.
54 // This code will work best if the max_pool_size is a multiple of the
55 // full_size. If max_pool_size is no sum of segment sizes the actual pool
56 // size might be smaller then max_pool_size. Note that no actual memory gets
57 // wasted though.
58 // TODO(heimbuef): Determine better strategy generating a segment sizes
59 // distribution that is closer to real/benchmark usecases and uses the given
60 // max_pool_size more efficiently.
61 size_t total_size = fits_fully * full_size;
62
63 for (size_t power = 0; power < kNumberBuckets; ++power) {
64 if (total_size + (size_t(1) << (power + kMinSegmentSizePower)) <=
65 max_pool_size) {
66 unused_segments_max_sizes_[power] = fits_fully + 1;
67 total_size += size_t(1) << power;
68 } else {
69 unused_segments_max_sizes_[power] = fits_fully;
70 }
71 }
72 }
73
GetSegment(size_t bytes)74 Segment* AccountingAllocator::GetSegment(size_t bytes) {
75 Segment* result = GetSegmentFromPool(bytes);
76 if (result == nullptr) {
77 result = AllocateSegment(bytes);
78 if (result != nullptr) {
79 result->Initialize(bytes);
80 }
81 }
82
83 return result;
84 }
85
AllocateSegment(size_t bytes)86 Segment* AccountingAllocator::AllocateSegment(size_t bytes) {
87 void* memory = AllocWithRetry(bytes);
88 if (memory != nullptr) {
89 base::AtomicWord current =
90 base::Relaxed_AtomicIncrement(¤t_memory_usage_, bytes);
91 base::AtomicWord max = base::Relaxed_Load(&max_memory_usage_);
92 while (current > max) {
93 max = base::Relaxed_CompareAndSwap(&max_memory_usage_, max, current);
94 }
95 }
96 return reinterpret_cast<Segment*>(memory);
97 }
98
ReturnSegment(Segment * segment)99 void AccountingAllocator::ReturnSegment(Segment* segment) {
100 segment->ZapContents();
101
102 if (memory_pressure_level_.Value() != MemoryPressureLevel::kNone) {
103 FreeSegment(segment);
104 } else if (!AddSegmentToPool(segment)) {
105 FreeSegment(segment);
106 }
107 }
108
FreeSegment(Segment * memory)109 void AccountingAllocator::FreeSegment(Segment* memory) {
110 base::Relaxed_AtomicIncrement(¤t_memory_usage_,
111 -static_cast<base::AtomicWord>(memory->size()));
112 memory->ZapHeader();
113 free(memory);
114 }
115
GetCurrentMemoryUsage() const116 size_t AccountingAllocator::GetCurrentMemoryUsage() const {
117 return base::Relaxed_Load(¤t_memory_usage_);
118 }
119
GetMaxMemoryUsage() const120 size_t AccountingAllocator::GetMaxMemoryUsage() const {
121 return base::Relaxed_Load(&max_memory_usage_);
122 }
123
GetCurrentPoolSize() const124 size_t AccountingAllocator::GetCurrentPoolSize() const {
125 return base::Relaxed_Load(¤t_pool_size_);
126 }
127
GetSegmentFromPool(size_t requested_size)128 Segment* AccountingAllocator::GetSegmentFromPool(size_t requested_size) {
129 if (requested_size > (1 << kMaxSegmentSizePower)) {
130 return nullptr;
131 }
132
133 size_t power = kMinSegmentSizePower;
134 while (requested_size > (static_cast<size_t>(1) << power)) power++;
135
136 DCHECK_GE(power, kMinSegmentSizePower + 0);
137 power -= kMinSegmentSizePower;
138
139 Segment* segment;
140 {
141 base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_);
142
143 segment = unused_segments_heads_[power];
144
145 if (segment != nullptr) {
146 unused_segments_heads_[power] = segment->next();
147 segment->set_next(nullptr);
148
149 unused_segments_sizes_[power]--;
150 base::Relaxed_AtomicIncrement(
151 ¤t_pool_size_, -static_cast<base::AtomicWord>(segment->size()));
152 }
153 }
154
155 if (segment) {
156 DCHECK_GE(segment->size(), requested_size);
157 }
158 return segment;
159 }
160
AddSegmentToPool(Segment * segment)161 bool AccountingAllocator::AddSegmentToPool(Segment* segment) {
162 size_t size = segment->size();
163
164 if (size >= (1 << (kMaxSegmentSizePower + 1))) return false;
165
166 if (size < (1 << kMinSegmentSizePower)) return false;
167
168 size_t power = kMaxSegmentSizePower;
169
170 while (size < (static_cast<size_t>(1) << power)) power--;
171
172 DCHECK_GE(power, kMinSegmentSizePower + 0);
173 power -= kMinSegmentSizePower;
174
175 {
176 base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_);
177
178 if (unused_segments_sizes_[power] >= unused_segments_max_sizes_[power]) {
179 return false;
180 }
181
182 segment->set_next(unused_segments_heads_[power]);
183 unused_segments_heads_[power] = segment;
184 base::Relaxed_AtomicIncrement(¤t_pool_size_, size);
185 unused_segments_sizes_[power]++;
186 }
187
188 return true;
189 }
190
ClearPool()191 void AccountingAllocator::ClearPool() {
192 base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_);
193
194 for (size_t power = 0; power <= kMaxSegmentSizePower - kMinSegmentSizePower;
195 power++) {
196 Segment* current = unused_segments_heads_[power];
197 while (current) {
198 Segment* next = current->next();
199 FreeSegment(current);
200 current = next;
201 }
202 unused_segments_heads_[power] = nullptr;
203 }
204 }
205
206 } // namespace internal
207 } // namespace v8
208