• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Protocol Buffers - Google's data interchange format
2 // Copyright 2008 Google Inc.  All rights reserved.
3 // https://developers.google.com/protocol-buffers/
4 //
5 // Redistribution and use in source and binary forms, with or without
6 // modification, are permitted provided that the following conditions are
7 // met:
8 //
9 //     * Redistributions of source code must retain the above copyright
10 // notice, this list of conditions and the following disclaimer.
11 //     * Redistributions in binary form must reproduce the above
12 // copyright notice, this list of conditions and the following disclaimer
13 // in the documentation and/or other materials provided with the
14 // distribution.
15 //     * Neither the name of Google Inc. nor the names of its
16 // contributors may be used to endorse or promote products derived from
17 // this software without specific prior written permission.
18 //
19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 
31 #include <google/protobuf/arena.h>
32 
33 #include <algorithm>
34 #include <atomic>
35 #include <limits>
36 
37 #include <google/protobuf/stubs/mutex.h>
38 
39 #ifdef ADDRESS_SANITIZER
40 #include <sanitizer/asan_interface.h>
41 #endif  // ADDRESS_SANITIZER
42 
43 #include <google/protobuf/port_def.inc>
44 
45 static const size_t kMinCleanupListElements = 8;
46 static const size_t kMaxCleanupListElements = 64;  // 1kB on 64-bit.
47 
48 namespace google {
49 namespace protobuf {
50 
51 PROTOBUF_EXPORT /*static*/ void* (*const ArenaOptions::kDefaultBlockAlloc)(
52     size_t) = &::operator new;
53 
54 namespace internal {
55 
56 
57 ArenaImpl::CacheAlignedLifecycleIdGenerator ArenaImpl::lifecycle_id_generator_;
58 #if defined(GOOGLE_PROTOBUF_NO_THREADLOCAL)
thread_cache()59 ArenaImpl::ThreadCache& ArenaImpl::thread_cache() {
60   static internal::ThreadLocalStorage<ThreadCache>* thread_cache_ =
61       new internal::ThreadLocalStorage<ThreadCache>();
62   return *thread_cache_->Get();
63 }
64 #elif defined(PROTOBUF_USE_DLLS)
thread_cache()65 ArenaImpl::ThreadCache& ArenaImpl::thread_cache() {
66   static PROTOBUF_THREAD_LOCAL ThreadCache thread_cache_ = {
67       0, static_cast<LifecycleIdAtomic>(-1), nullptr};
68   return thread_cache_;
69 }
70 #else
71 PROTOBUF_THREAD_LOCAL ArenaImpl::ThreadCache ArenaImpl::thread_cache_ = {
72     0, static_cast<LifecycleIdAtomic>(-1), nullptr};
73 #endif
74 
ArenaFree(void * object,size_t size)75 void ArenaFree(void* object, size_t size) {
76 #if defined(__GXX_DELETE_WITH_SIZE__) || defined(__cpp_sized_deallocation)
77   ::operator delete(object, size);
78 #else
79   (void)size;
80   ::operator delete(object);
81 #endif
82 }
83 
ArenaImpl(const ArenaOptions & options)84 ArenaImpl::ArenaImpl(const ArenaOptions& options) {
85   ArenaMetricsCollector* collector = nullptr;
86   bool record_allocs = false;
87   if (options.make_metrics_collector != nullptr) {
88     collector = (*options.make_metrics_collector)();
89     record_allocs = (collector && collector->RecordAllocs());
90   }
91 
92   // Get memory where we can store non-default options if needed.
93   // Use supplied initial_block if it is large enough.
94   size_t min_block_size = kOptionsSize + kBlockHeaderSize + kSerialArenaSize;
95   char* mem = options.initial_block;
96   size_t mem_size = options.initial_block_size;
97   GOOGLE_DCHECK_EQ(reinterpret_cast<uintptr_t>(mem) & 7, 0);
98   if (mem == nullptr || mem_size < min_block_size) {
99     // Supplied initial block is not big enough.
100     mem_size = std::max(min_block_size, options.start_block_size);
101     mem = reinterpret_cast<char*>((*options.block_alloc)(mem_size));
102   }
103 
104   // Create the special block.
105   const bool special = true;
106   const bool user_owned = (mem == options.initial_block);
107   auto block =
108       new (mem) SerialArena::Block(mem_size, nullptr, special, user_owned);
109 
110   // Options occupy the beginning of the initial block.
111   options_ = new (block->Pointer(block->pos())) Options;
112 #ifdef ADDRESS_SANITIZER
113   ASAN_UNPOISON_MEMORY_REGION(options_, kOptionsSize);
114 #endif  // ADDRESS_SANITIZER
115   options_->start_block_size = options.start_block_size;
116   options_->max_block_size = options.max_block_size;
117   options_->block_alloc = options.block_alloc;
118   options_->block_dealloc = options.block_dealloc;
119   options_->metrics_collector = collector;
120   block->set_pos(block->pos() + kOptionsSize);
121 
122   Init(record_allocs);
123   SetInitialBlock(block);
124 }
125 
Init(bool record_allocs)126 void ArenaImpl::Init(bool record_allocs) {
127   ThreadCache& tc = thread_cache();
128   auto id = tc.next_lifecycle_id;
129   constexpr uint64 kInc = ThreadCache::kPerThreadIds * 2;
130   if (PROTOBUF_PREDICT_FALSE((id & (kInc - 1)) == 0)) {
131     if (sizeof(lifecycle_id_generator_.id) == 4) {
132       // 2^32 is dangerous low to guarantee uniqueness. If we start dolling out
133       // unique id's in ranges of kInc it's unacceptably low. In this case
134       // we increment by 1. The additional range of kPerThreadIds that are used
135       // per thread effectively pushes the overflow time from weeks to years
136       // of continuous running.
137       id = lifecycle_id_generator_.id.fetch_add(1, std::memory_order_relaxed) *
138            kInc;
139     } else {
140       id =
141           lifecycle_id_generator_.id.fetch_add(kInc, std::memory_order_relaxed);
142     }
143   }
144   tc.next_lifecycle_id = id + 2;
145   // We store "record_allocs" in the low bit of lifecycle_id_.
146   lifecycle_id_ = id | (record_allocs ? 1 : 0);
147   hint_.store(nullptr, std::memory_order_relaxed);
148   threads_.store(nullptr, std::memory_order_relaxed);
149   space_allocated_.store(0, std::memory_order_relaxed);
150 }
151 
SetInitialBlock(SerialArena::Block * block)152 void ArenaImpl::SetInitialBlock(SerialArena::Block* block) {
153   // Calling thread owns the first block. This allows the single-threaded case
154   // to allocate on the first block without having to perform atomic operations.
155   SerialArena* serial = SerialArena::New(block, &thread_cache(), this);
156   serial->set_next(NULL);
157   threads_.store(serial, std::memory_order_relaxed);
158   space_allocated_.store(block->size(), std::memory_order_relaxed);
159   CacheSerialArena(serial);
160 }
161 
~ArenaImpl()162 ArenaImpl::~ArenaImpl() {
163   // Have to do this in a first pass, because some of the destructors might
164   // refer to memory in other blocks.
165   CleanupList();
166 
167   ArenaMetricsCollector* collector = nullptr;
168   auto deallocator = &ArenaFree;
169   if (options_) {
170     collector = options_->metrics_collector;
171     deallocator = options_->block_dealloc;
172   }
173 
174   PerBlock([deallocator](SerialArena::Block* b) {
175 #ifdef ADDRESS_SANITIZER
176     // This memory was provided by the underlying allocator as unpoisoned, so
177     // return it in an unpoisoned state.
178     ASAN_UNPOISON_MEMORY_REGION(b->Pointer(0), b->size());
179 #endif  // ADDRESS_SANITIZER
180     if (!b->user_owned()) {
181       (*deallocator)(b, b->size());
182     }
183   });
184 
185   if (collector) {
186     collector->OnDestroy(SpaceAllocated());
187   }
188 }
189 
Reset()190 uint64 ArenaImpl::Reset() {
191   if (options_ && options_->metrics_collector) {
192     options_->metrics_collector->OnReset(SpaceAllocated());
193   }
194 
195   // Have to do this in a first pass, because some of the destructors might
196   // refer to memory in other blocks.
197   CleanupList();
198 
199   // Discard all blocks except the special block (if present).
200   uint64 space_allocated = 0;
201   SerialArena::Block* special_block = nullptr;
202   auto deallocator = (options_ ? options_->block_dealloc : &ArenaFree);
203   PerBlock(
204       [&space_allocated, &special_block, deallocator](SerialArena::Block* b) {
205         space_allocated += b->size();
206 #ifdef ADDRESS_SANITIZER
207         // This memory was provided by the underlying allocator as unpoisoned,
208         // so return it in an unpoisoned state.
209         ASAN_UNPOISON_MEMORY_REGION(b->Pointer(0), b->size());
210 #endif  // ADDRESS_SANITIZER
211         if (!b->special()) {
212           (*deallocator)(b, b->size());
213         } else {
214           // Prepare special block for reuse.
215           // Note: if options_ is present, it occupies the beginning of the
216           // block and therefore pos is advanced past it.
217           GOOGLE_DCHECK(special_block == nullptr);
218           special_block = b;
219         }
220       });
221 
222   Init(record_allocs());
223   if (special_block != nullptr) {
224     // next() should still be nullptr since we are using a stack discipline, but
225     // clear it anyway to reduce fragility.
226     GOOGLE_DCHECK_EQ(special_block->next(), nullptr);
227     special_block->clear_next();
228     special_block->set_pos(kBlockHeaderSize + (options_ ? kOptionsSize : 0));
229     SetInitialBlock(special_block);
230   }
231   return space_allocated;
232 }
233 
NewBuffer(size_t last_size,size_t min_bytes)234 std::pair<void*, size_t> ArenaImpl::NewBuffer(size_t last_size,
235                                               size_t min_bytes) {
236   size_t size;
237   if (last_size != -1) {
238     // Double the current block size, up to a limit.
239     auto max_size = options_ ? options_->max_block_size : kDefaultMaxBlockSize;
240     size = std::min(2 * last_size, max_size);
241   } else {
242     size = options_ ? options_->start_block_size : kDefaultStartBlockSize;
243   }
244   // Verify that min_bytes + kBlockHeaderSize won't overflow.
245   GOOGLE_CHECK_LE(min_bytes, std::numeric_limits<size_t>::max() - kBlockHeaderSize);
246   size = std::max(size, kBlockHeaderSize + min_bytes);
247 
248   void* mem = options_ ? (*options_->block_alloc)(size) : ::operator new(size);
249   space_allocated_.fetch_add(size, std::memory_order_relaxed);
250   return {mem, size};
251 }
252 
NewBlock(SerialArena::Block * last_block,size_t min_bytes,ArenaImpl * arena)253 SerialArena::Block* SerialArena::NewBlock(SerialArena::Block* last_block,
254                                           size_t min_bytes, ArenaImpl* arena) {
255   void* mem;
256   size_t size;
257   std::tie(mem, size) =
258       arena->NewBuffer(last_block ? last_block->size() : -1, min_bytes);
259   Block* b = new (mem) Block(size, last_block, false, false);
260   return b;
261 }
262 
263 PROTOBUF_NOINLINE
AddCleanupFallback(void * elem,void (* cleanup)(void *))264 void SerialArena::AddCleanupFallback(void* elem, void (*cleanup)(void*)) {
265   size_t size = cleanup_ ? cleanup_->size * 2 : kMinCleanupListElements;
266   size = std::min(size, kMaxCleanupListElements);
267   size_t bytes = internal::AlignUpTo8(CleanupChunk::SizeOf(size));
268   CleanupChunk* list = reinterpret_cast<CleanupChunk*>(AllocateAligned(bytes));
269   list->next = cleanup_;
270   list->size = size;
271 
272   cleanup_ = list;
273   cleanup_ptr_ = &list->nodes[0];
274   cleanup_limit_ = &list->nodes[size];
275 
276   AddCleanup(elem, cleanup);
277 }
278 
AllocateAlignedAndAddCleanup(size_t n,void (* cleanup)(void *))279 void* ArenaImpl::AllocateAlignedAndAddCleanup(size_t n,
280                                               void (*cleanup)(void*)) {
281   SerialArena* arena;
282   if (PROTOBUF_PREDICT_TRUE(GetSerialArenaFast(&arena))) {
283     return arena->AllocateAlignedAndAddCleanup(n, cleanup);
284   } else {
285     return AllocateAlignedAndAddCleanupFallback(n, cleanup);
286   }
287 }
288 
AddCleanup(void * elem,void (* cleanup)(void *))289 void ArenaImpl::AddCleanup(void* elem, void (*cleanup)(void*)) {
290   SerialArena* arena;
291   if (PROTOBUF_PREDICT_TRUE(GetSerialArenaFast(&arena))) {
292     arena->AddCleanup(elem, cleanup);
293   } else {
294     return AddCleanupFallback(elem, cleanup);
295   }
296 }
297 
298 PROTOBUF_NOINLINE
AllocateAlignedFallback(size_t n)299 void* ArenaImpl::AllocateAlignedFallback(size_t n) {
300   return GetSerialArenaFallback(&thread_cache())->AllocateAligned(n);
301 }
302 
303 PROTOBUF_NOINLINE
AllocateAlignedAndAddCleanupFallback(size_t n,void (* cleanup)(void *))304 void* ArenaImpl::AllocateAlignedAndAddCleanupFallback(size_t n,
305                                                       void (*cleanup)(void*)) {
306   return GetSerialArenaFallback(
307       &thread_cache())->AllocateAlignedAndAddCleanup(n, cleanup);
308 }
309 
310 PROTOBUF_NOINLINE
AddCleanupFallback(void * elem,void (* cleanup)(void *))311 void ArenaImpl::AddCleanupFallback(void* elem, void (*cleanup)(void*)) {
312   GetSerialArenaFallback(&thread_cache())->AddCleanup(elem, cleanup);
313 }
314 
315 PROTOBUF_NOINLINE
AllocateAlignedFallback(size_t n)316 void* SerialArena::AllocateAlignedFallback(size_t n) {
317   // Sync back to current's pos.
318   head_->set_pos(head_->size() - (limit_ - ptr_));
319 
320   head_ = NewBlock(head_, n, arena_);
321   ptr_ = head_->Pointer(head_->pos());
322   limit_ = head_->Pointer(head_->size());
323 
324 #ifdef ADDRESS_SANITIZER
325   ASAN_POISON_MEMORY_REGION(ptr_, limit_ - ptr_);
326 #endif  // ADDRESS_SANITIZER
327 
328   return AllocateAligned(n);
329 }
330 
SpaceAllocated() const331 uint64 ArenaImpl::SpaceAllocated() const {
332   return space_allocated_.load(std::memory_order_relaxed);
333 }
334 
SpaceUsed() const335 uint64 ArenaImpl::SpaceUsed() const {
336   SerialArena* serial = threads_.load(std::memory_order_acquire);
337   uint64 space_used = 0;
338   for (; serial; serial = serial->next()) {
339     space_used += serial->SpaceUsed();
340   }
341   // Remove the overhead of Options structure, if any.
342   if (options_) {
343     space_used -= kOptionsSize;
344   }
345   return space_used;
346 }
347 
SpaceUsed() const348 uint64 SerialArena::SpaceUsed() const {
349   // Get current block's size from ptr_ (since we can't trust head_->pos().
350   uint64 space_used = ptr_ - head_->Pointer(kBlockHeaderSize);
351   // Get subsequent block size from b->pos().
352   for (Block* b = head_->next(); b; b = b->next()) {
353     space_used += (b->pos() - kBlockHeaderSize);
354   }
355   // Remove the overhead of the SerialArena itself.
356   space_used -= ArenaImpl::kSerialArenaSize;
357   return space_used;
358 }
359 
CleanupList()360 void ArenaImpl::CleanupList() {
361   // By omitting an Acquire barrier we ensure that any user code that doesn't
362   // properly synchronize Reset() or the destructor will throw a TSAN warning.
363   SerialArena* serial = threads_.load(std::memory_order_relaxed);
364 
365   for (; serial; serial = serial->next()) {
366     serial->CleanupList();
367   }
368 }
369 
CleanupList()370 void SerialArena::CleanupList() {
371   if (cleanup_ != NULL) {
372     CleanupListFallback();
373   }
374 }
375 
CleanupListFallback()376 void SerialArena::CleanupListFallback() {
377   // The first chunk might be only partially full, so calculate its size
378   // from cleanup_ptr_. Subsequent chunks are always full, so use list->size.
379   size_t n = cleanup_ptr_ - &cleanup_->nodes[0];
380   CleanupChunk* list = cleanup_;
381   while (true) {
382     CleanupNode* node = &list->nodes[0];
383     // Cleanup newest elements first (allocated last).
384     for (size_t i = n; i > 0; i--) {
385       node[i - 1].cleanup(node[i - 1].elem);
386     }
387     list = list->next;
388     if (list == nullptr) {
389       break;
390     }
391     // All but the first chunk are always full.
392     n = list->size;
393   }
394 }
395 
New(Block * b,void * owner,ArenaImpl * arena)396 SerialArena* SerialArena::New(Block* b, void* owner, ArenaImpl* arena) {
397   auto pos = b->pos();
398   GOOGLE_DCHECK_LE(pos + ArenaImpl::kSerialArenaSize, b->size());
399   SerialArena* serial = reinterpret_cast<SerialArena*>(b->Pointer(pos));
400   b->set_pos(pos + ArenaImpl::kSerialArenaSize);
401   serial->arena_ = arena;
402   serial->owner_ = owner;
403   serial->head_ = b;
404   serial->ptr_ = b->Pointer(b->pos());
405   serial->limit_ = b->Pointer(b->size());
406   serial->cleanup_ = NULL;
407   serial->cleanup_ptr_ = NULL;
408   serial->cleanup_limit_ = NULL;
409   return serial;
410 }
411 
412 PROTOBUF_NOINLINE
GetSerialArenaFallback(void * me)413 SerialArena* ArenaImpl::GetSerialArenaFallback(void* me) {
414   // Look for this SerialArena in our linked list.
415   SerialArena* serial = threads_.load(std::memory_order_acquire);
416   for (; serial; serial = serial->next()) {
417     if (serial->owner() == me) {
418       break;
419     }
420   }
421 
422   if (!serial) {
423     // This thread doesn't have any SerialArena, which also means it doesn't
424     // have any blocks yet.  So we'll allocate its first block now.
425     SerialArena::Block* b = SerialArena::NewBlock(NULL, kSerialArenaSize, this);
426     serial = SerialArena::New(b, me, this);
427 
428     SerialArena* head = threads_.load(std::memory_order_relaxed);
429     do {
430       serial->set_next(head);
431     } while (!threads_.compare_exchange_weak(
432         head, serial, std::memory_order_release, std::memory_order_relaxed));
433   }
434 
435   CacheSerialArena(serial);
436   return serial;
437 }
438 
~ArenaMetricsCollector()439 ArenaMetricsCollector::~ArenaMetricsCollector() {}
440 
441 }  // namespace internal
442 
443 PROTOBUF_FUNC_ALIGN(32)
AllocateAlignedNoHook(size_t n)444 void* Arena::AllocateAlignedNoHook(size_t n) {
445   return impl_.AllocateAligned(n);
446 }
447 
448 }  // namespace protobuf
449 }  // namespace google
450