• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Protocol Buffers - Google's data interchange format
2 // Copyright 2008 Google Inc.  All rights reserved.
3 // https://developers.google.com/protocol-buffers/
4 //
5 // Redistribution and use in source and binary forms, with or without
6 // modification, are permitted provided that the following conditions are
7 // met:
8 //
9 //     * Redistributions of source code must retain the above copyright
10 // notice, this list of conditions and the following disclaimer.
11 //     * Redistributions in binary form must reproduce the above
12 // copyright notice, this list of conditions and the following disclaimer
13 // in the documentation and/or other materials provided with the
14 // distribution.
15 //     * Neither the name of Google Inc. nor the names of its
16 // contributors may be used to endorse or promote products derived from
17 // this software without specific prior written permission.
18 //
19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 
31 #include <google/protobuf/arena.h>
32 
33 
34 #ifdef ADDRESS_SANITIZER
35 #include <sanitizer/asan_interface.h>
36 #endif
37 
38 namespace google {
39 namespace protobuf {
40 
41 
42 google::protobuf::internal::SequenceNumber Arena::lifecycle_id_generator_;
43 #if defined(GOOGLE_PROTOBUF_NO_THREADLOCAL)
thread_cache()44 Arena::ThreadCache& Arena::thread_cache() {
45   static internal::ThreadLocalStorage<ThreadCache>* thread_cache_ =
46       new internal::ThreadLocalStorage<ThreadCache>();
47   return *thread_cache_->Get();
48 }
49 #elif defined(PROTOBUF_USE_DLLS)
thread_cache()50 Arena::ThreadCache& Arena::thread_cache() {
51   static GOOGLE_THREAD_LOCAL ThreadCache thread_cache_ = { -1, NULL };
52   return thread_cache_;
53 }
54 #else
55 GOOGLE_THREAD_LOCAL Arena::ThreadCache Arena::thread_cache_ = { -1, NULL };
56 #endif
57 
Init()58 void Arena::Init() {
59   lifecycle_id_ = lifecycle_id_generator_.GetNext();
60   blocks_ = 0;
61   hint_ = 0;
62   owns_first_block_ = true;
63   cleanup_list_ = 0;
64 
65   if (options_.initial_block != NULL && options_.initial_block_size > 0) {
66     GOOGLE_CHECK_GE(options_.initial_block_size, sizeof(Block))
67         << ": Initial block size too small for header.";
68 
69     // Add first unowned block to list.
70     Block* first_block = reinterpret_cast<Block*>(options_.initial_block);
71     first_block->size = options_.initial_block_size;
72     first_block->pos = kHeaderSize;
73     first_block->next = NULL;
74     // Thread which calls Init() owns the first block. This allows the
75     // single-threaded case to allocate on the first block without taking any
76     // locks.
77     first_block->owner = &thread_cache();
78     SetThreadCacheBlock(first_block);
79     AddBlockInternal(first_block);
80     owns_first_block_ = false;
81   }
82 
83   // Call the initialization hook
84   if (options_.on_arena_init != NULL) {
85     hooks_cookie_ = options_.on_arena_init(this);
86   } else {
87     hooks_cookie_ = NULL;
88   }
89 }
90 
~Arena()91 Arena::~Arena() {
92   uint64 space_allocated = ResetInternal();
93 
94   // Call the destruction hook
95   if (options_.on_arena_destruction != NULL) {
96     options_.on_arena_destruction(this, hooks_cookie_, space_allocated);
97   }
98 }
99 
Reset()100 uint64 Arena::Reset() {
101   // Invalidate any ThreadCaches pointing to any blocks we just destroyed.
102   lifecycle_id_ = lifecycle_id_generator_.GetNext();
103   return ResetInternal();
104 }
105 
ResetInternal()106 uint64 Arena::ResetInternal() {
107   CleanupList();
108   uint64 space_allocated = FreeBlocks();
109 
110   // Call the reset hook
111   if (options_.on_arena_reset != NULL) {
112     options_.on_arena_reset(this, hooks_cookie_, space_allocated);
113   }
114 
115   return space_allocated;
116 }
117 
NewBlock(void * me,Block * my_last_block,size_t n,size_t start_block_size,size_t max_block_size)118 Arena::Block* Arena::NewBlock(void* me, Block* my_last_block, size_t n,
119                               size_t start_block_size, size_t max_block_size) {
120   size_t size;
121   if (my_last_block != NULL) {
122     // Double the current block size, up to a limit.
123     size = 2 * (my_last_block->size);
124     if (size > max_block_size) size = max_block_size;
125   } else {
126     size = start_block_size;
127   }
128   if (n > size - kHeaderSize) {
129     // TODO(sanjay): Check if n + kHeaderSize would overflow
130     size = kHeaderSize + n;
131   }
132 
133   Block* b = reinterpret_cast<Block*>(options_.block_alloc(size));
134   b->pos = kHeaderSize + n;
135   b->size = size;
136   if (b->avail() == 0) {
137     // Do not attempt to reuse this block.
138     b->owner = NULL;
139   } else {
140     b->owner = me;
141   }
142 #ifdef ADDRESS_SANITIZER
143   // Poison the rest of the block for ASAN. It was unpoisoned by the underlying
144   // malloc but it's not yet usable until we return it as part of an allocation.
145   ASAN_POISON_MEMORY_REGION(
146       reinterpret_cast<char*>(b) + b->pos, b->size - b->pos);
147 #endif
148   return b;
149 }
150 
AddBlock(Block * b)151 void Arena::AddBlock(Block* b) {
152   MutexLock l(&blocks_lock_);
153   AddBlockInternal(b);
154 }
155 
AddBlockInternal(Block * b)156 void Arena::AddBlockInternal(Block* b) {
157   b->next = reinterpret_cast<Block*>(google::protobuf::internal::NoBarrier_Load(&blocks_));
158   google::protobuf::internal::Release_Store(&blocks_, reinterpret_cast<google::protobuf::internal::AtomicWord>(b));
159   if (b->avail() != 0) {
160     // Direct future allocations to this block.
161     google::protobuf::internal::Release_Store(&hint_, reinterpret_cast<google::protobuf::internal::AtomicWord>(b));
162   }
163 }
164 
AddListNode(void * elem,void (* cleanup)(void *))165 void Arena::AddListNode(void* elem, void (*cleanup)(void*)) {
166   Node* node = reinterpret_cast<Node*>(AllocateAligned(sizeof(Node)));
167   node->elem = elem;
168   node->cleanup = cleanup;
169   node->next = reinterpret_cast<Node*>(
170       google::protobuf::internal::NoBarrier_AtomicExchange(&cleanup_list_,
171             reinterpret_cast<google::protobuf::internal::AtomicWord>(node)));
172 }
173 
AllocateAligned(const std::type_info * allocated,size_t n)174 void* Arena::AllocateAligned(const std::type_info* allocated, size_t n) {
175   // Align n to next multiple of 8 (from Hacker's Delight, Chapter 3.)
176   n = (n + 7) & -8;
177 
178   // Monitor allocation if needed.
179   if (GOOGLE_PREDICT_FALSE(hooks_cookie_ != NULL) &&
180       options_.on_arena_allocation != NULL) {
181     options_.on_arena_allocation(allocated, n, hooks_cookie_);
182   }
183 
184   // If this thread already owns a block in this arena then try to use that.
185   // This fast path optimizes the case where multiple threads allocate from the
186   // same arena.
187   if (thread_cache().last_lifecycle_id_seen == lifecycle_id_ &&
188       thread_cache().last_block_used_ != NULL) {
189     if (thread_cache().last_block_used_->avail() < n) {
190       return SlowAlloc(n);
191     }
192     return AllocFromBlock(thread_cache().last_block_used_, n);
193   }
194 
195   // Check whether we own the last accessed block on this arena.
196   // This fast path optimizes the case where a single thread uses multiple
197   // arenas.
198   void* me = &thread_cache();
199   Block* b = reinterpret_cast<Block*>(google::protobuf::internal::Acquire_Load(&hint_));
200   if (!b || b->owner != me || b->avail() < n) {
201     return SlowAlloc(n);
202   }
203   return AllocFromBlock(b, n);
204 }
205 
AllocFromBlock(Block * b,size_t n)206 void* Arena::AllocFromBlock(Block* b, size_t n) {
207   size_t p = b->pos;
208   b->pos = p + n;
209 #ifdef ADDRESS_SANITIZER
210   ASAN_UNPOISON_MEMORY_REGION(reinterpret_cast<char*>(b) + p, n);
211 #endif
212   return reinterpret_cast<char*>(b) + p;
213 }
214 
SlowAlloc(size_t n)215 void* Arena::SlowAlloc(size_t n) {
216   void* me = &thread_cache();
217   Block* b = FindBlock(me);  // Find block owned by me.
218   // See if allocation fits in my latest block.
219   if (b != NULL && b->avail() >= n) {
220     SetThreadCacheBlock(b);
221     google::protobuf::internal::NoBarrier_Store(&hint_, reinterpret_cast<google::protobuf::internal::AtomicWord>(b));
222     return AllocFromBlock(b, n);
223   }
224   b = NewBlock(me, b, n, options_.start_block_size, options_.max_block_size);
225   AddBlock(b);
226   if (b->owner == me) {  // If this block can be reused (see NewBlock()).
227     SetThreadCacheBlock(b);
228   }
229   return reinterpret_cast<char*>(b) + kHeaderSize;
230 }
231 
SpaceAllocated() const232 uint64 Arena::SpaceAllocated() const {
233   uint64 space_allocated = 0;
234   Block* b = reinterpret_cast<Block*>(google::protobuf::internal::NoBarrier_Load(&blocks_));
235   while (b != NULL) {
236     space_allocated += (b->size);
237     b = b->next;
238   }
239   return space_allocated;
240 }
241 
SpaceUsed() const242 uint64 Arena::SpaceUsed() const {
243   uint64 space_used = 0;
244   Block* b = reinterpret_cast<Block*>(google::protobuf::internal::NoBarrier_Load(&blocks_));
245   while (b != NULL) {
246     space_used += (b->pos - kHeaderSize);
247     b = b->next;
248   }
249   return space_used;
250 }
251 
SpaceAllocatedAndUsed() const252 pair<uint64, uint64> Arena::SpaceAllocatedAndUsed() const {
253   uint64 allocated = 0;
254   uint64 used = 0;
255 
256   Block* b = reinterpret_cast<Block*>(google::protobuf::internal::NoBarrier_Load(&blocks_));
257   while (b != NULL) {
258     allocated += b->size;
259     used += (b->pos - kHeaderSize);
260     b = b->next;
261   }
262   return std::make_pair(allocated, used);
263 }
264 
FreeBlocks()265 uint64 Arena::FreeBlocks() {
266   uint64 space_allocated = 0;
267   Block* b = reinterpret_cast<Block*>(google::protobuf::internal::NoBarrier_Load(&blocks_));
268   Block* first_block = NULL;
269   while (b != NULL) {
270     space_allocated += (b->size);
271     Block* next = b->next;
272     if (next != NULL) {
273       options_.block_dealloc(b, b->size);
274     } else {
275       if (owns_first_block_) {
276         options_.block_dealloc(b, b->size);
277       } else {
278         // User passed in the first block, skip free'ing the memory.
279         first_block = b;
280       }
281     }
282     b = next;
283   }
284   blocks_ = 0;
285   hint_ = 0;
286   if (!owns_first_block_) {
287     // Make the first block that was passed in through ArenaOptions
288     // available for reuse.
289     first_block->pos = kHeaderSize;
290     // Thread which calls Reset() owns the first block. This allows the
291     // single-threaded case to allocate on the first block without taking any
292     // locks.
293     first_block->owner = &thread_cache();
294     SetThreadCacheBlock(first_block);
295     AddBlockInternal(first_block);
296   }
297   return space_allocated;
298 }
299 
CleanupList()300 void Arena::CleanupList() {
301   Node* head =
302       reinterpret_cast<Node*>(google::protobuf::internal::NoBarrier_Load(&cleanup_list_));
303   while (head != NULL) {
304     head->cleanup(head->elem);
305     head = head->next;
306   }
307   cleanup_list_ = 0;
308 }
309 
FindBlock(void * me)310 Arena::Block* Arena::FindBlock(void* me) {
311   // TODO(sanjay): We might want to keep a separate list with one
312   // entry per thread.
313   Block* b = reinterpret_cast<Block*>(google::protobuf::internal::Acquire_Load(&blocks_));
314   while (b != NULL && b->owner != me) {
315     b = b->next;
316   }
317   return b;
318 }
319 
320 }  // namespace protobuf
321 }  // namespace google
322