• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2016 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/zone/accounting-allocator.h"
6 
7 #include <cstdlib>
8 
9 #if V8_LIBC_BIONIC
10 #include <malloc.h>  // NOLINT
11 #endif
12 
13 namespace v8 {
14 namespace internal {
15 
AccountingAllocator()16 AccountingAllocator::AccountingAllocator() : unused_segments_mutex_() {
17   static const size_t kDefaultBucketMaxSize = 5;
18 
19   memory_pressure_level_.SetValue(MemoryPressureLevel::kNone);
20   std::fill(unused_segments_heads_, unused_segments_heads_ + kNumberBuckets,
21             nullptr);
22   std::fill(unused_segments_sizes_, unused_segments_sizes_ + kNumberBuckets, 0);
23   std::fill(unused_segments_max_sizes_,
24             unused_segments_max_sizes_ + kNumberBuckets, kDefaultBucketMaxSize);
25 }
26 
~AccountingAllocator()27 AccountingAllocator::~AccountingAllocator() { ClearPool(); }
28 
MemoryPressureNotification(MemoryPressureLevel level)29 void AccountingAllocator::MemoryPressureNotification(
30     MemoryPressureLevel level) {
31   memory_pressure_level_.SetValue(level);
32 
33   if (level != MemoryPressureLevel::kNone) {
34     ClearPool();
35   }
36 }
37 
ConfigureSegmentPool(const size_t max_pool_size)38 void AccountingAllocator::ConfigureSegmentPool(const size_t max_pool_size) {
39   // The sum of the bytes of one segment of each size.
40   static const size_t full_size = (size_t(1) << (kMaxSegmentSizePower + 1)) -
41                                   (size_t(1) << kMinSegmentSizePower);
42   size_t fits_fully = max_pool_size / full_size;
43 
44   base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_);
45 
46   // We assume few zones (less than 'fits_fully' many) to be active at the same
47   // time. When zones grow regularly, they will keep requesting segments of
48   // increasing size each time. Therefore we try to get as many segments with an
49   // equal number of segments of each size as possible.
50   // The remaining space is used to make more room for an 'incomplete set' of
51   // segments beginning with the smaller ones.
52   // This code will work best if the max_pool_size is a multiple of the
53   // full_size. If max_pool_size is no sum of segment sizes the actual pool
54   // size might be smaller then max_pool_size. Note that no actual memory gets
55   // wasted though.
56   // TODO(heimbuef): Determine better strategy generating a segment sizes
57   // distribution that is closer to real/benchmark usecases and uses the given
58   // max_pool_size more efficiently.
59   size_t total_size = fits_fully * full_size;
60 
61   for (size_t power = 0; power < kNumberBuckets; ++power) {
62     if (total_size + (size_t(1) << (power + kMinSegmentSizePower)) <=
63         max_pool_size) {
64       unused_segments_max_sizes_[power] = fits_fully + 1;
65       total_size += size_t(1) << power;
66     } else {
67       unused_segments_max_sizes_[power] = fits_fully;
68     }
69   }
70 }
71 
GetSegment(size_t bytes)72 Segment* AccountingAllocator::GetSegment(size_t bytes) {
73   Segment* result = GetSegmentFromPool(bytes);
74   if (result == nullptr) {
75     result = AllocateSegment(bytes);
76     if (result != nullptr) {
77       result->Initialize(bytes);
78     }
79   }
80 
81   return result;
82 }
83 
AllocateSegment(size_t bytes)84 Segment* AccountingAllocator::AllocateSegment(size_t bytes) {
85   void* memory = malloc(bytes);
86   if (memory) {
87     base::AtomicWord current =
88         base::NoBarrier_AtomicIncrement(&current_memory_usage_, bytes);
89     base::AtomicWord max = base::NoBarrier_Load(&max_memory_usage_);
90     while (current > max) {
91       max = base::NoBarrier_CompareAndSwap(&max_memory_usage_, max, current);
92     }
93   }
94   return reinterpret_cast<Segment*>(memory);
95 }
96 
ReturnSegment(Segment * segment)97 void AccountingAllocator::ReturnSegment(Segment* segment) {
98   segment->ZapContents();
99 
100   if (memory_pressure_level_.Value() != MemoryPressureLevel::kNone) {
101     FreeSegment(segment);
102   } else if (!AddSegmentToPool(segment)) {
103     FreeSegment(segment);
104   }
105 }
106 
FreeSegment(Segment * memory)107 void AccountingAllocator::FreeSegment(Segment* memory) {
108   base::NoBarrier_AtomicIncrement(
109       &current_memory_usage_, -static_cast<base::AtomicWord>(memory->size()));
110   memory->ZapHeader();
111   free(memory);
112 }
113 
GetCurrentMemoryUsage() const114 size_t AccountingAllocator::GetCurrentMemoryUsage() const {
115   return base::NoBarrier_Load(&current_memory_usage_);
116 }
117 
GetMaxMemoryUsage() const118 size_t AccountingAllocator::GetMaxMemoryUsage() const {
119   return base::NoBarrier_Load(&max_memory_usage_);
120 }
121 
GetCurrentPoolSize() const122 size_t AccountingAllocator::GetCurrentPoolSize() const {
123   return base::NoBarrier_Load(&current_pool_size_);
124 }
125 
GetSegmentFromPool(size_t requested_size)126 Segment* AccountingAllocator::GetSegmentFromPool(size_t requested_size) {
127   if (requested_size > (1 << kMaxSegmentSizePower)) {
128     return nullptr;
129   }
130 
131   size_t power = kMinSegmentSizePower;
132   while (requested_size > (static_cast<size_t>(1) << power)) power++;
133 
134   DCHECK_GE(power, kMinSegmentSizePower + 0);
135   power -= kMinSegmentSizePower;
136 
137   Segment* segment;
138   {
139     base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_);
140 
141     segment = unused_segments_heads_[power];
142 
143     if (segment != nullptr) {
144       unused_segments_heads_[power] = segment->next();
145       segment->set_next(nullptr);
146 
147       unused_segments_sizes_[power]--;
148       base::NoBarrier_AtomicIncrement(
149           &current_pool_size_, -static_cast<base::AtomicWord>(segment->size()));
150     }
151   }
152 
153   if (segment) {
154     DCHECK_GE(segment->size(), requested_size);
155   }
156   return segment;
157 }
158 
AddSegmentToPool(Segment * segment)159 bool AccountingAllocator::AddSegmentToPool(Segment* segment) {
160   size_t size = segment->size();
161 
162   if (size >= (1 << (kMaxSegmentSizePower + 1))) return false;
163 
164   if (size < (1 << kMinSegmentSizePower)) return false;
165 
166   size_t power = kMaxSegmentSizePower;
167 
168   while (size < (static_cast<size_t>(1) << power)) power--;
169 
170   DCHECK_GE(power, kMinSegmentSizePower + 0);
171   power -= kMinSegmentSizePower;
172 
173   {
174     base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_);
175 
176     if (unused_segments_sizes_[power] >= unused_segments_max_sizes_[power]) {
177       return false;
178     }
179 
180     segment->set_next(unused_segments_heads_[power]);
181     unused_segments_heads_[power] = segment;
182     base::NoBarrier_AtomicIncrement(&current_pool_size_, size);
183     unused_segments_sizes_[power]++;
184   }
185 
186   return true;
187 }
188 
ClearPool()189 void AccountingAllocator::ClearPool() {
190   base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_);
191 
192   for (size_t power = 0; power <= kMaxSegmentSizePower - kMinSegmentSizePower;
193        power++) {
194     Segment* current = unused_segments_heads_[power];
195     while (current) {
196       Segment* next = current->next();
197       FreeSegment(current);
198       current = next;
199     }
200     unused_segments_heads_[power] = nullptr;
201   }
202 }
203 
204 }  // namespace internal
205 }  // namespace v8
206