1 // Protocol Buffers - Google's data interchange format
2 // Copyright 2008 Google Inc. All rights reserved.
3 // https://developers.google.com/protocol-buffers/
4 //
5 // Redistribution and use in source and binary forms, with or without
6 // modification, are permitted provided that the following conditions are
7 // met:
8 //
9 // * Redistributions of source code must retain the above copyright
10 // notice, this list of conditions and the following disclaimer.
11 // * Redistributions in binary form must reproduce the above
12 // copyright notice, this list of conditions and the following disclaimer
13 // in the documentation and/or other materials provided with the
14 // distribution.
15 // * Neither the name of Google Inc. nor the names of its
16 // contributors may be used to endorse or promote products derived from
17 // this software without specific prior written permission.
18 //
19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 #include <google/protobuf/arena.h>
32
33 #include <algorithm>
34 #include <atomic>
35 #include <limits>
36
37 #include <google/protobuf/stubs/mutex.h>
38
39 #ifdef ADDRESS_SANITIZER
40 #include <sanitizer/asan_interface.h>
41 #endif // ADDRESS_SANITIZER
42
43 #include <google/protobuf/port_def.inc>
44
45 static const size_t kMinCleanupListElements = 8;
46 static const size_t kMaxCleanupListElements = 64; // 1kB on 64-bit.
47
48 namespace google {
49 namespace protobuf {
50 namespace internal {
51
52
53 std::atomic<LifecycleId> ArenaImpl::lifecycle_id_generator_;
54 #if defined(GOOGLE_PROTOBUF_NO_THREADLOCAL)
thread_cache()55 ArenaImpl::ThreadCache& ArenaImpl::thread_cache() {
56 static internal::ThreadLocalStorage<ThreadCache>* thread_cache_ =
57 new internal::ThreadLocalStorage<ThreadCache>();
58 return *thread_cache_->Get();
59 }
60 #elif defined(PROTOBUF_USE_DLLS)
thread_cache()61 ArenaImpl::ThreadCache& ArenaImpl::thread_cache() {
62 static GOOGLE_THREAD_LOCAL ThreadCache thread_cache_ = {-1, NULL};
63 return thread_cache_;
64 }
65 #else
66 GOOGLE_THREAD_LOCAL ArenaImpl::ThreadCache ArenaImpl::thread_cache_ = {-1, NULL};
67 #endif
68
Init()69 void ArenaImpl::Init() {
70 lifecycle_id_ =
71 lifecycle_id_generator_.fetch_add(1, std::memory_order_relaxed);
72 hint_.store(nullptr, std::memory_order_relaxed);
73 threads_.store(nullptr, std::memory_order_relaxed);
74
75 if (initial_block_) {
76 // Thread which calls Init() owns the first block. This allows the
77 // single-threaded case to allocate on the first block without having to
78 // perform atomic operations.
79 new (initial_block_) Block(options_.initial_block_size, NULL);
80 SerialArena* serial =
81 SerialArena::New(initial_block_, &thread_cache(), this);
82 serial->set_next(NULL);
83 threads_.store(serial, std::memory_order_relaxed);
84 space_allocated_.store(options_.initial_block_size,
85 std::memory_order_relaxed);
86 CacheSerialArena(serial);
87 } else {
88 space_allocated_.store(0, std::memory_order_relaxed);
89 }
90 }
91
~ArenaImpl()92 ArenaImpl::~ArenaImpl() {
93 // Have to do this in a first pass, because some of the destructors might
94 // refer to memory in other blocks.
95 CleanupList();
96 FreeBlocks();
97 }
98
Reset()99 uint64 ArenaImpl::Reset() {
100 // Have to do this in a first pass, because some of the destructors might
101 // refer to memory in other blocks.
102 CleanupList();
103 uint64 space_allocated = FreeBlocks();
104 Init();
105
106 return space_allocated;
107 }
108
NewBlock(Block * last_block,size_t min_bytes)109 ArenaImpl::Block* ArenaImpl::NewBlock(Block* last_block, size_t min_bytes) {
110 size_t size;
111 if (last_block) {
112 // Double the current block size, up to a limit.
113 size = std::min(2 * last_block->size(), options_.max_block_size);
114 } else {
115 size = options_.start_block_size;
116 }
117 // Verify that min_bytes + kBlockHeaderSize won't overflow.
118 GOOGLE_CHECK_LE(min_bytes, std::numeric_limits<size_t>::max() - kBlockHeaderSize);
119 size = std::max(size, kBlockHeaderSize + min_bytes);
120
121 void* mem = options_.block_alloc(size);
122 Block* b = new (mem) Block(size, last_block);
123 space_allocated_.fetch_add(size, std::memory_order_relaxed);
124 return b;
125 }
126
Block(size_t size,Block * next)127 ArenaImpl::Block::Block(size_t size, Block* next)
128 : next_(next), pos_(kBlockHeaderSize), size_(size) {}
129
130 PROTOBUF_NOINLINE
AddCleanupFallback(void * elem,void (* cleanup)(void *))131 void ArenaImpl::SerialArena::AddCleanupFallback(void* elem,
132 void (*cleanup)(void*)) {
133 size_t size = cleanup_ ? cleanup_->size * 2 : kMinCleanupListElements;
134 size = std::min(size, kMaxCleanupListElements);
135 size_t bytes = internal::AlignUpTo8(CleanupChunk::SizeOf(size));
136 CleanupChunk* list = reinterpret_cast<CleanupChunk*>(AllocateAligned(bytes));
137 list->next = cleanup_;
138 list->size = size;
139
140 cleanup_ = list;
141 cleanup_ptr_ = &list->nodes[0];
142 cleanup_limit_ = &list->nodes[size];
143
144 AddCleanup(elem, cleanup);
145 }
146
147 PROTOBUF_FUNC_ALIGN(32)
AllocateAligned(size_t n)148 void* ArenaImpl::AllocateAligned(size_t n) {
149 SerialArena* arena;
150 if (PROTOBUF_PREDICT_TRUE(GetSerialArenaFast(&arena))) {
151 return arena->AllocateAligned(n);
152 } else {
153 return AllocateAlignedFallback(n);
154 }
155 }
156
AllocateAlignedAndAddCleanup(size_t n,void (* cleanup)(void *))157 void* ArenaImpl::AllocateAlignedAndAddCleanup(size_t n,
158 void (*cleanup)(void*)) {
159 SerialArena* arena;
160 if (PROTOBUF_PREDICT_TRUE(GetSerialArenaFast(&arena))) {
161 return arena->AllocateAlignedAndAddCleanup(n, cleanup);
162 } else {
163 return AllocateAlignedAndAddCleanupFallback(n, cleanup);
164 }
165 }
166
AddCleanup(void * elem,void (* cleanup)(void *))167 void ArenaImpl::AddCleanup(void* elem, void (*cleanup)(void*)) {
168 SerialArena* arena;
169 if (PROTOBUF_PREDICT_TRUE(GetSerialArenaFast(&arena))) {
170 arena->AddCleanup(elem, cleanup);
171 } else {
172 return AddCleanupFallback(elem, cleanup);
173 }
174 }
175
176 PROTOBUF_NOINLINE
AllocateAlignedFallback(size_t n)177 void* ArenaImpl::AllocateAlignedFallback(size_t n) {
178 return GetSerialArena()->AllocateAligned(n);
179 }
180
181 PROTOBUF_NOINLINE
AllocateAlignedAndAddCleanupFallback(size_t n,void (* cleanup)(void *))182 void* ArenaImpl::AllocateAlignedAndAddCleanupFallback(size_t n,
183 void (*cleanup)(void*)) {
184 return GetSerialArena()->AllocateAlignedAndAddCleanup(n, cleanup);
185 }
186
187 PROTOBUF_NOINLINE
AddCleanupFallback(void * elem,void (* cleanup)(void *))188 void ArenaImpl::AddCleanupFallback(void* elem, void (*cleanup)(void*)) {
189 GetSerialArena()->AddCleanup(elem, cleanup);
190 }
191
GetSerialArenaFast(ArenaImpl::SerialArena ** arena)192 inline PROTOBUF_ALWAYS_INLINE bool ArenaImpl::GetSerialArenaFast(
193 ArenaImpl::SerialArena** arena) {
194 // If this thread already owns a block in this arena then try to use that.
195 // This fast path optimizes the case where multiple threads allocate from the
196 // same arena.
197 ThreadCache* tc = &thread_cache();
198 if (PROTOBUF_PREDICT_TRUE(tc->last_lifecycle_id_seen == lifecycle_id_)) {
199 *arena = tc->last_serial_arena;
200 return true;
201 }
202
203 // Check whether we own the last accessed SerialArena on this arena. This
204 // fast path optimizes the case where a single thread uses multiple arenas.
205 SerialArena* serial = hint_.load(std::memory_order_acquire);
206 if (PROTOBUF_PREDICT_TRUE(serial != NULL && serial->owner() == tc)) {
207 *arena = serial;
208 return true;
209 }
210
211 return false;
212 }
213
GetSerialArena()214 ArenaImpl::SerialArena* ArenaImpl::GetSerialArena() {
215 SerialArena* arena;
216 if (PROTOBUF_PREDICT_TRUE(GetSerialArenaFast(&arena))) {
217 return arena;
218 } else {
219 return GetSerialArenaFallback(&thread_cache());
220 }
221 }
222
223 PROTOBUF_NOINLINE
AllocateAlignedFallback(size_t n)224 void* ArenaImpl::SerialArena::AllocateAlignedFallback(size_t n) {
225 // Sync back to current's pos.
226 head_->set_pos(head_->size() - (limit_ - ptr_));
227
228 head_ = arena_->NewBlock(head_, n);
229 ptr_ = head_->Pointer(head_->pos());
230 limit_ = head_->Pointer(head_->size());
231
232 #ifdef ADDRESS_SANITIZER
233 ASAN_POISON_MEMORY_REGION(ptr_, limit_ - ptr_);
234 #endif // ADDRESS_SANITIZER
235
236 return AllocateAligned(n);
237 }
238
SpaceAllocated() const239 uint64 ArenaImpl::SpaceAllocated() const {
240 return space_allocated_.load(std::memory_order_relaxed);
241 }
242
SpaceUsed() const243 uint64 ArenaImpl::SpaceUsed() const {
244 SerialArena* serial = threads_.load(std::memory_order_acquire);
245 uint64 space_used = 0;
246 for (; serial; serial = serial->next()) {
247 space_used += serial->SpaceUsed();
248 }
249 return space_used;
250 }
251
SpaceUsed() const252 uint64 ArenaImpl::SerialArena::SpaceUsed() const {
253 // Get current block's size from ptr_ (since we can't trust head_->pos().
254 uint64 space_used = ptr_ - head_->Pointer(kBlockHeaderSize);
255 // Get subsequent block size from b->pos().
256 for (Block* b = head_->next(); b; b = b->next()) {
257 space_used += (b->pos() - kBlockHeaderSize);
258 }
259 // Remove the overhead of the SerialArena itself.
260 space_used -= kSerialArenaSize;
261 return space_used;
262 }
263
FreeBlocks()264 uint64 ArenaImpl::FreeBlocks() {
265 uint64 space_allocated = 0;
266 // By omitting an Acquire barrier we ensure that any user code that doesn't
267 // properly synchronize Reset() or the destructor will throw a TSAN warning.
268 SerialArena* serial = threads_.load(std::memory_order_relaxed);
269
270 while (serial) {
271 // This is inside a block we are freeing, so we need to read it now.
272 SerialArena* next = serial->next();
273 space_allocated += ArenaImpl::SerialArena::Free(serial, initial_block_,
274 options_.block_dealloc);
275 // serial is dead now.
276 serial = next;
277 }
278
279 return space_allocated;
280 }
281
Free(ArenaImpl::SerialArena * serial,Block * initial_block,void (* block_dealloc)(void *,size_t))282 uint64 ArenaImpl::SerialArena::Free(ArenaImpl::SerialArena* serial,
283 Block* initial_block,
284 void (*block_dealloc)(void*, size_t)) {
285 uint64 space_allocated = 0;
286
287 // We have to be careful in this function, since we will be freeing the Block
288 // that contains this SerialArena. Be careful about accessing |serial|.
289
290 for (Block* b = serial->head_; b;) {
291 // This is inside the block we are freeing, so we need to read it now.
292 Block* next_block = b->next();
293 space_allocated += (b->size());
294
295 #ifdef ADDRESS_SANITIZER
296 // This memory was provided by the underlying allocator as unpoisoned, so
297 // return it in an unpoisoned state.
298 ASAN_UNPOISON_MEMORY_REGION(b->Pointer(0), b->size());
299 #endif // ADDRESS_SANITIZER
300
301 if (b != initial_block) {
302 block_dealloc(b, b->size());
303 }
304
305 b = next_block;
306 }
307
308 return space_allocated;
309 }
310
CleanupList()311 void ArenaImpl::CleanupList() {
312 // By omitting an Acquire barrier we ensure that any user code that doesn't
313 // properly synchronize Reset() or the destructor will throw a TSAN warning.
314 SerialArena* serial = threads_.load(std::memory_order_relaxed);
315
316 for (; serial; serial = serial->next()) {
317 serial->CleanupList();
318 }
319 }
320
CleanupList()321 void ArenaImpl::SerialArena::CleanupList() {
322 if (cleanup_ != NULL) {
323 CleanupListFallback();
324 }
325 }
326
CleanupListFallback()327 void ArenaImpl::SerialArena::CleanupListFallback() {
328 // The first chunk might be only partially full, so calculate its size
329 // from cleanup_ptr_. Subsequent chunks are always full, so use list->size.
330 size_t n = cleanup_ptr_ - &cleanup_->nodes[0];
331 CleanupChunk* list = cleanup_;
332 while (true) {
333 CleanupNode* node = &list->nodes[0];
334 // Cleanup newest elements first (allocated last).
335 for (size_t i = n; i > 0; i--) {
336 node[i - 1].cleanup(node[i - 1].elem);
337 }
338 list = list->next;
339 if (list == nullptr) {
340 break;
341 }
342 // All but the first chunk are always full.
343 n = list->size;
344 }
345 }
346
New(Block * b,void * owner,ArenaImpl * arena)347 ArenaImpl::SerialArena* ArenaImpl::SerialArena::New(Block* b, void* owner,
348 ArenaImpl* arena) {
349 GOOGLE_DCHECK_EQ(b->pos(), kBlockHeaderSize); // Should be a fresh block
350 GOOGLE_DCHECK_LE(kBlockHeaderSize + kSerialArenaSize, b->size());
351 SerialArena* serial =
352 reinterpret_cast<SerialArena*>(b->Pointer(kBlockHeaderSize));
353 b->set_pos(kBlockHeaderSize + kSerialArenaSize);
354 serial->arena_ = arena;
355 serial->owner_ = owner;
356 serial->head_ = b;
357 serial->ptr_ = b->Pointer(b->pos());
358 serial->limit_ = b->Pointer(b->size());
359 serial->cleanup_ = NULL;
360 serial->cleanup_ptr_ = NULL;
361 serial->cleanup_limit_ = NULL;
362 return serial;
363 }
364
365 PROTOBUF_NOINLINE
GetSerialArenaFallback(void * me)366 ArenaImpl::SerialArena* ArenaImpl::GetSerialArenaFallback(void* me) {
367 // Look for this SerialArena in our linked list.
368 SerialArena* serial = threads_.load(std::memory_order_acquire);
369 for (; serial; serial = serial->next()) {
370 if (serial->owner() == me) {
371 break;
372 }
373 }
374
375 if (!serial) {
376 // This thread doesn't have any SerialArena, which also means it doesn't
377 // have any blocks yet. So we'll allocate its first block now.
378 Block* b = NewBlock(NULL, kSerialArenaSize);
379 serial = SerialArena::New(b, me, this);
380
381 SerialArena* head = threads_.load(std::memory_order_relaxed);
382 do {
383 serial->set_next(head);
384 } while (!threads_.compare_exchange_weak(
385 head, serial, std::memory_order_release, std::memory_order_relaxed));
386 }
387
388 CacheSerialArena(serial);
389 return serial;
390 }
391
392 } // namespace internal
393
CallDestructorHooks()394 void Arena::CallDestructorHooks() {
395 uint64 space_allocated = impl_.SpaceAllocated();
396 // Call the reset hook
397 if (on_arena_reset_ != NULL) {
398 on_arena_reset_(this, hooks_cookie_, space_allocated);
399 }
400
401 // Call the destruction hook
402 if (on_arena_destruction_ != NULL) {
403 on_arena_destruction_(this, hooks_cookie_, space_allocated);
404 }
405 }
406
OnArenaAllocation(const std::type_info * allocated_type,size_t n) const407 void Arena::OnArenaAllocation(const std::type_info* allocated_type,
408 size_t n) const {
409 if (on_arena_allocation_ != NULL) {
410 on_arena_allocation_(allocated_type, n, hooks_cookie_);
411 }
412 }
413
414 } // namespace protobuf
415 } // namespace google
416