1 // Protocol Buffers - Google's data interchange format
2 // Copyright 2008 Google Inc. All rights reserved.
3 // https://developers.google.com/protocol-buffers/
4 //
5 // Redistribution and use in source and binary forms, with or without
6 // modification, are permitted provided that the following conditions are
7 // met:
8 //
9 // * Redistributions of source code must retain the above copyright
10 // notice, this list of conditions and the following disclaimer.
11 // * Redistributions in binary form must reproduce the above
12 // copyright notice, this list of conditions and the following disclaimer
13 // in the documentation and/or other materials provided with the
14 // distribution.
15 // * Neither the name of Google Inc. nor the names of its
16 // contributors may be used to endorse or promote products derived from
17 // this software without specific prior written permission.
18 //
19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 #include <google/protobuf/arena.h>
32
33 #include <algorithm>
34 #include <atomic>
35 #include <limits>
36
37 #include <google/protobuf/stubs/mutex.h>
38
39 #ifdef ADDRESS_SANITIZER
40 #include <sanitizer/asan_interface.h>
41 #endif // ADDRESS_SANITIZER
42
43 #include <google/protobuf/port_def.inc>
44
45 static const size_t kMinCleanupListElements = 8;
46 static const size_t kMaxCleanupListElements = 64; // 1kB on 64-bit.
47
48 namespace google {
49 namespace protobuf {
50 namespace internal {
51
52
53 std::atomic<LifecycleId> ArenaImpl::lifecycle_id_generator_;
54 #if defined(GOOGLE_PROTOBUF_NO_THREADLOCAL)
thread_cache()55 ArenaImpl::ThreadCache& ArenaImpl::thread_cache() {
56 static internal::ThreadLocalStorage<ThreadCache>* thread_cache_ =
57 new internal::ThreadLocalStorage<ThreadCache>();
58 return *thread_cache_->Get();
59 }
60 #elif defined(PROTOBUF_USE_DLLS)
thread_cache()61 ArenaImpl::ThreadCache& ArenaImpl::thread_cache() {
62 static PROTOBUF_THREAD_LOCAL ThreadCache thread_cache_ = {-1, NULL};
63 return thread_cache_;
64 }
65 #else
66 PROTOBUF_THREAD_LOCAL ArenaImpl::ThreadCache ArenaImpl::thread_cache_ = {-1,
67 NULL};
68 #endif
69
Init()70 void ArenaImpl::Init() {
71 lifecycle_id_ =
72 lifecycle_id_generator_.fetch_add(1, std::memory_order_relaxed);
73 hint_.store(nullptr, std::memory_order_relaxed);
74 threads_.store(nullptr, std::memory_order_relaxed);
75
76 if (initial_block_) {
77 // Thread which calls Init() owns the first block. This allows the
78 // single-threaded case to allocate on the first block without having to
79 // perform atomic operations.
80 new (initial_block_) Block(options_.initial_block_size, NULL);
81 SerialArena* serial =
82 SerialArena::New(initial_block_, &thread_cache(), this);
83 serial->set_next(NULL);
84 threads_.store(serial, std::memory_order_relaxed);
85 space_allocated_.store(options_.initial_block_size,
86 std::memory_order_relaxed);
87 CacheSerialArena(serial);
88 } else {
89 space_allocated_.store(0, std::memory_order_relaxed);
90 }
91 }
92
~ArenaImpl()93 ArenaImpl::~ArenaImpl() {
94 // Have to do this in a first pass, because some of the destructors might
95 // refer to memory in other blocks.
96 CleanupList();
97 FreeBlocks();
98 }
99
Reset()100 uint64 ArenaImpl::Reset() {
101 // Have to do this in a first pass, because some of the destructors might
102 // refer to memory in other blocks.
103 CleanupList();
104 uint64 space_allocated = FreeBlocks();
105 Init();
106
107 return space_allocated;
108 }
109
NewBlock(Block * last_block,size_t min_bytes)110 ArenaImpl::Block* ArenaImpl::NewBlock(Block* last_block, size_t min_bytes) {
111 size_t size;
112 if (last_block) {
113 // Double the current block size, up to a limit.
114 size = std::min(2 * last_block->size(), options_.max_block_size);
115 } else {
116 size = options_.start_block_size;
117 }
118 // Verify that min_bytes + kBlockHeaderSize won't overflow.
119 GOOGLE_CHECK_LE(min_bytes, std::numeric_limits<size_t>::max() - kBlockHeaderSize);
120 size = std::max(size, kBlockHeaderSize + min_bytes);
121
122 void* mem = options_.block_alloc(size);
123 Block* b = new (mem) Block(size, last_block);
124 space_allocated_.fetch_add(size, std::memory_order_relaxed);
125 return b;
126 }
127
Block(size_t size,Block * next)128 ArenaImpl::Block::Block(size_t size, Block* next)
129 : next_(next), pos_(kBlockHeaderSize), size_(size) {}
130
131 PROTOBUF_NOINLINE
AddCleanupFallback(void * elem,void (* cleanup)(void *))132 void ArenaImpl::SerialArena::AddCleanupFallback(void* elem,
133 void (*cleanup)(void*)) {
134 size_t size = cleanup_ ? cleanup_->size * 2 : kMinCleanupListElements;
135 size = std::min(size, kMaxCleanupListElements);
136 size_t bytes = internal::AlignUpTo8(CleanupChunk::SizeOf(size));
137 CleanupChunk* list = reinterpret_cast<CleanupChunk*>(AllocateAligned(bytes));
138 list->next = cleanup_;
139 list->size = size;
140
141 cleanup_ = list;
142 cleanup_ptr_ = &list->nodes[0];
143 cleanup_limit_ = &list->nodes[size];
144
145 AddCleanup(elem, cleanup);
146 }
147
AllocateAlignedAndAddCleanup(size_t n,void (* cleanup)(void *))148 void* ArenaImpl::AllocateAlignedAndAddCleanup(size_t n,
149 void (*cleanup)(void*)) {
150 SerialArena* arena;
151 if (PROTOBUF_PREDICT_TRUE(GetSerialArenaFast(&arena))) {
152 return arena->AllocateAlignedAndAddCleanup(n, cleanup);
153 } else {
154 return AllocateAlignedAndAddCleanupFallback(n, cleanup);
155 }
156 }
157
AddCleanup(void * elem,void (* cleanup)(void *))158 void ArenaImpl::AddCleanup(void* elem, void (*cleanup)(void*)) {
159 SerialArena* arena;
160 if (PROTOBUF_PREDICT_TRUE(GetSerialArenaFast(&arena))) {
161 arena->AddCleanup(elem, cleanup);
162 } else {
163 return AddCleanupFallback(elem, cleanup);
164 }
165 }
166
167 PROTOBUF_NOINLINE
AllocateAlignedFallback(size_t n)168 void* ArenaImpl::AllocateAlignedFallback(size_t n) {
169 return GetSerialArena()->AllocateAligned(n);
170 }
171
172 PROTOBUF_NOINLINE
AllocateAlignedAndAddCleanupFallback(size_t n,void (* cleanup)(void *))173 void* ArenaImpl::AllocateAlignedAndAddCleanupFallback(size_t n,
174 void (*cleanup)(void*)) {
175 return GetSerialArena()->AllocateAlignedAndAddCleanup(n, cleanup);
176 }
177
178 PROTOBUF_NOINLINE
AddCleanupFallback(void * elem,void (* cleanup)(void *))179 void ArenaImpl::AddCleanupFallback(void* elem, void (*cleanup)(void*)) {
180 GetSerialArena()->AddCleanup(elem, cleanup);
181 }
182
GetSerialArena()183 ArenaImpl::SerialArena* ArenaImpl::GetSerialArena() {
184 SerialArena* arena;
185 if (PROTOBUF_PREDICT_TRUE(GetSerialArenaFast(&arena))) {
186 return arena;
187 } else {
188 return GetSerialArenaFallback(&thread_cache());
189 }
190 }
191
192 PROTOBUF_NOINLINE
AllocateAlignedFallback(size_t n)193 void* ArenaImpl::SerialArena::AllocateAlignedFallback(size_t n) {
194 // Sync back to current's pos.
195 head_->set_pos(head_->size() - (limit_ - ptr_));
196
197 head_ = arena_->NewBlock(head_, n);
198 ptr_ = head_->Pointer(head_->pos());
199 limit_ = head_->Pointer(head_->size());
200
201 #ifdef ADDRESS_SANITIZER
202 ASAN_POISON_MEMORY_REGION(ptr_, limit_ - ptr_);
203 #endif // ADDRESS_SANITIZER
204
205 return AllocateAligned(n);
206 }
207
SpaceAllocated() const208 uint64 ArenaImpl::SpaceAllocated() const {
209 return space_allocated_.load(std::memory_order_relaxed);
210 }
211
SpaceUsed() const212 uint64 ArenaImpl::SpaceUsed() const {
213 SerialArena* serial = threads_.load(std::memory_order_acquire);
214 uint64 space_used = 0;
215 for (; serial; serial = serial->next()) {
216 space_used += serial->SpaceUsed();
217 }
218 return space_used;
219 }
220
SpaceUsed() const221 uint64 ArenaImpl::SerialArena::SpaceUsed() const {
222 // Get current block's size from ptr_ (since we can't trust head_->pos().
223 uint64 space_used = ptr_ - head_->Pointer(kBlockHeaderSize);
224 // Get subsequent block size from b->pos().
225 for (Block* b = head_->next(); b; b = b->next()) {
226 space_used += (b->pos() - kBlockHeaderSize);
227 }
228 // Remove the overhead of the SerialArena itself.
229 space_used -= kSerialArenaSize;
230 return space_used;
231 }
232
FreeBlocks()233 uint64 ArenaImpl::FreeBlocks() {
234 uint64 space_allocated = 0;
235 // By omitting an Acquire barrier we ensure that any user code that doesn't
236 // properly synchronize Reset() or the destructor will throw a TSAN warning.
237 SerialArena* serial = threads_.load(std::memory_order_relaxed);
238
239 while (serial) {
240 // This is inside a block we are freeing, so we need to read it now.
241 SerialArena* next = serial->next();
242 space_allocated += ArenaImpl::SerialArena::Free(serial, initial_block_,
243 options_.block_dealloc);
244 // serial is dead now.
245 serial = next;
246 }
247
248 return space_allocated;
249 }
250
Free(ArenaImpl::SerialArena * serial,Block * initial_block,void (* block_dealloc)(void *,size_t))251 uint64 ArenaImpl::SerialArena::Free(ArenaImpl::SerialArena* serial,
252 Block* initial_block,
253 void (*block_dealloc)(void*, size_t)) {
254 uint64 space_allocated = 0;
255
256 // We have to be careful in this function, since we will be freeing the Block
257 // that contains this SerialArena. Be careful about accessing |serial|.
258
259 for (Block* b = serial->head_; b;) {
260 // This is inside the block we are freeing, so we need to read it now.
261 Block* next_block = b->next();
262 space_allocated += (b->size());
263
264 #ifdef ADDRESS_SANITIZER
265 // This memory was provided by the underlying allocator as unpoisoned, so
266 // return it in an unpoisoned state.
267 ASAN_UNPOISON_MEMORY_REGION(b->Pointer(0), b->size());
268 #endif // ADDRESS_SANITIZER
269
270 if (b != initial_block) {
271 block_dealloc(b, b->size());
272 }
273
274 b = next_block;
275 }
276
277 return space_allocated;
278 }
279
CleanupList()280 void ArenaImpl::CleanupList() {
281 // By omitting an Acquire barrier we ensure that any user code that doesn't
282 // properly synchronize Reset() or the destructor will throw a TSAN warning.
283 SerialArena* serial = threads_.load(std::memory_order_relaxed);
284
285 for (; serial; serial = serial->next()) {
286 serial->CleanupList();
287 }
288 }
289
CleanupList()290 void ArenaImpl::SerialArena::CleanupList() {
291 if (cleanup_ != NULL) {
292 CleanupListFallback();
293 }
294 }
295
CleanupListFallback()296 void ArenaImpl::SerialArena::CleanupListFallback() {
297 // The first chunk might be only partially full, so calculate its size
298 // from cleanup_ptr_. Subsequent chunks are always full, so use list->size.
299 size_t n = cleanup_ptr_ - &cleanup_->nodes[0];
300 CleanupChunk* list = cleanup_;
301 while (true) {
302 CleanupNode* node = &list->nodes[0];
303 // Cleanup newest elements first (allocated last).
304 for (size_t i = n; i > 0; i--) {
305 node[i - 1].cleanup(node[i - 1].elem);
306 }
307 list = list->next;
308 if (list == nullptr) {
309 break;
310 }
311 // All but the first chunk are always full.
312 n = list->size;
313 }
314 }
315
New(Block * b,void * owner,ArenaImpl * arena)316 ArenaImpl::SerialArena* ArenaImpl::SerialArena::New(Block* b, void* owner,
317 ArenaImpl* arena) {
318 GOOGLE_DCHECK_EQ(b->pos(), kBlockHeaderSize); // Should be a fresh block
319 GOOGLE_DCHECK_LE(kBlockHeaderSize + kSerialArenaSize, b->size());
320 SerialArena* serial =
321 reinterpret_cast<SerialArena*>(b->Pointer(kBlockHeaderSize));
322 b->set_pos(kBlockHeaderSize + kSerialArenaSize);
323 serial->arena_ = arena;
324 serial->owner_ = owner;
325 serial->head_ = b;
326 serial->ptr_ = b->Pointer(b->pos());
327 serial->limit_ = b->Pointer(b->size());
328 serial->cleanup_ = NULL;
329 serial->cleanup_ptr_ = NULL;
330 serial->cleanup_limit_ = NULL;
331 return serial;
332 }
333
334 PROTOBUF_NOINLINE
GetSerialArenaFallback(void * me)335 ArenaImpl::SerialArena* ArenaImpl::GetSerialArenaFallback(void* me) {
336 // Look for this SerialArena in our linked list.
337 SerialArena* serial = threads_.load(std::memory_order_acquire);
338 for (; serial; serial = serial->next()) {
339 if (serial->owner() == me) {
340 break;
341 }
342 }
343
344 if (!serial) {
345 // This thread doesn't have any SerialArena, which also means it doesn't
346 // have any blocks yet. So we'll allocate its first block now.
347 Block* b = NewBlock(NULL, kSerialArenaSize);
348 serial = SerialArena::New(b, me, this);
349
350 SerialArena* head = threads_.load(std::memory_order_relaxed);
351 do {
352 serial->set_next(head);
353 } while (!threads_.compare_exchange_weak(
354 head, serial, std::memory_order_release, std::memory_order_relaxed));
355 }
356
357 CacheSerialArena(serial);
358 return serial;
359 }
360
361 } // namespace internal
362
363 PROTOBUF_FUNC_ALIGN(32)
AllocateAlignedNoHook(size_t n)364 void* Arena::AllocateAlignedNoHook(size_t n) {
365 return impl_.AllocateAligned(n);
366 }
367
CallDestructorHooks()368 void Arena::CallDestructorHooks() {
369 uint64 space_allocated = impl_.SpaceAllocated();
370 // Call the reset hook
371 if (on_arena_reset_ != NULL) {
372 on_arena_reset_(this, hooks_cookie_, space_allocated);
373 }
374
375 // Call the destruction hook
376 if (on_arena_destruction_ != NULL) {
377 on_arena_destruction_(this, hooks_cookie_, space_allocated);
378 }
379 }
380
OnArenaAllocation(const std::type_info * allocated_type,size_t n) const381 void Arena::OnArenaAllocation(const std::type_info* allocated_type,
382 size_t n) const {
383 if (on_arena_allocation_ != NULL) {
384 on_arena_allocation_(allocated_type, n, hooks_cookie_);
385 }
386 }
387
388 } // namespace protobuf
389 } // namespace google
390