1 // Protocol Buffers - Google's data interchange format
2 // Copyright 2008 Google Inc. All rights reserved.
3 // https://developers.google.com/protocol-buffers/
4 //
5 // Redistribution and use in source and binary forms, with or without
6 // modification, are permitted provided that the following conditions are
7 // met:
8 //
9 // * Redistributions of source code must retain the above copyright
10 // notice, this list of conditions and the following disclaimer.
11 // * Redistributions in binary form must reproduce the above
12 // copyright notice, this list of conditions and the following disclaimer
13 // in the documentation and/or other materials provided with the
14 // distribution.
15 // * Neither the name of Google Inc. nor the names of its
16 // contributors may be used to endorse or promote products derived from
17 // this software without specific prior written permission.
18 //
19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 #include <google/protobuf/arena.h>
32
33
34 #ifdef ADDRESS_SANITIZER
35 #include <sanitizer/asan_interface.h>
36 #endif
37
38 namespace google {
39 namespace protobuf {
40
41
42 google::protobuf::internal::SequenceNumber Arena::lifecycle_id_generator_;
43 #if defined(GOOGLE_PROTOBUF_NO_THREADLOCAL)
thread_cache()44 Arena::ThreadCache& Arena::thread_cache() {
45 static internal::ThreadLocalStorage<ThreadCache>* thread_cache_ =
46 new internal::ThreadLocalStorage<ThreadCache>();
47 return *thread_cache_->Get();
48 }
49 #elif defined(PROTOBUF_USE_DLLS)
thread_cache()50 Arena::ThreadCache& Arena::thread_cache() {
51 static GOOGLE_THREAD_LOCAL ThreadCache thread_cache_ = { -1, NULL };
52 return thread_cache_;
53 }
54 #else
55 GOOGLE_THREAD_LOCAL Arena::ThreadCache Arena::thread_cache_ = { -1, NULL };
56 #endif
57
Init()58 void Arena::Init() {
59 lifecycle_id_ = lifecycle_id_generator_.GetNext();
60 blocks_ = 0;
61 hint_ = 0;
62 owns_first_block_ = true;
63 cleanup_list_ = 0;
64
65 if (options_.initial_block != NULL && options_.initial_block_size > 0) {
66 GOOGLE_CHECK_GE(options_.initial_block_size, sizeof(Block))
67 << ": Initial block size too small for header.";
68
69 // Add first unowned block to list.
70 Block* first_block = reinterpret_cast<Block*>(options_.initial_block);
71 first_block->size = options_.initial_block_size;
72 first_block->pos = kHeaderSize;
73 first_block->next = NULL;
74 // Thread which calls Init() owns the first block. This allows the
75 // single-threaded case to allocate on the first block without taking any
76 // locks.
77 first_block->owner = &thread_cache();
78 SetThreadCacheBlock(first_block);
79 AddBlockInternal(first_block);
80 owns_first_block_ = false;
81 }
82
83 // Call the initialization hook
84 if (options_.on_arena_init != NULL) {
85 hooks_cookie_ = options_.on_arena_init(this);
86 } else {
87 hooks_cookie_ = NULL;
88 }
89 }
90
~Arena()91 Arena::~Arena() {
92 uint64 space_allocated = ResetInternal();
93
94 // Call the destruction hook
95 if (options_.on_arena_destruction != NULL) {
96 options_.on_arena_destruction(this, hooks_cookie_, space_allocated);
97 }
98 }
99
Reset()100 uint64 Arena::Reset() {
101 // Invalidate any ThreadCaches pointing to any blocks we just destroyed.
102 lifecycle_id_ = lifecycle_id_generator_.GetNext();
103 return ResetInternal();
104 }
105
ResetInternal()106 uint64 Arena::ResetInternal() {
107 CleanupList();
108 uint64 space_allocated = FreeBlocks();
109
110 // Call the reset hook
111 if (options_.on_arena_reset != NULL) {
112 options_.on_arena_reset(this, hooks_cookie_, space_allocated);
113 }
114
115 return space_allocated;
116 }
117
NewBlock(void * me,Block * my_last_block,size_t n,size_t start_block_size,size_t max_block_size)118 Arena::Block* Arena::NewBlock(void* me, Block* my_last_block, size_t n,
119 size_t start_block_size, size_t max_block_size) {
120 size_t size;
121 if (my_last_block != NULL) {
122 // Double the current block size, up to a limit.
123 size = 2 * (my_last_block->size);
124 if (size > max_block_size) size = max_block_size;
125 } else {
126 size = start_block_size;
127 }
128 if (n > size - kHeaderSize) {
129 // TODO(sanjay): Check if n + kHeaderSize would overflow
130 size = kHeaderSize + n;
131 }
132
133 Block* b = reinterpret_cast<Block*>(options_.block_alloc(size));
134 b->pos = kHeaderSize + n;
135 b->size = size;
136 b->owner = me;
137 #ifdef ADDRESS_SANITIZER
138 // Poison the rest of the block for ASAN. It was unpoisoned by the underlying
139 // malloc but it's not yet usable until we return it as part of an allocation.
140 ASAN_POISON_MEMORY_REGION(
141 reinterpret_cast<char*>(b) + b->pos, b->size - b->pos);
142 #endif
143 return b;
144 }
145
AddBlock(Block * b)146 void Arena::AddBlock(Block* b) {
147 MutexLock l(&blocks_lock_);
148 AddBlockInternal(b);
149 }
150
AddBlockInternal(Block * b)151 void Arena::AddBlockInternal(Block* b) {
152 b->next = reinterpret_cast<Block*>(google::protobuf::internal::NoBarrier_Load(&blocks_));
153 google::protobuf::internal::Release_Store(&blocks_, reinterpret_cast<google::protobuf::internal::AtomicWord>(b));
154 if (b->avail() != 0) {
155 // Direct future allocations to this block.
156 google::protobuf::internal::Release_Store(&hint_, reinterpret_cast<google::protobuf::internal::AtomicWord>(b));
157 }
158 }
159
AddListNode(void * elem,void (* cleanup)(void *))160 void Arena::AddListNode(void* elem, void (*cleanup)(void*)) {
161 Node* node = reinterpret_cast<Node*>(AllocateAligned(sizeof(Node)));
162 node->elem = elem;
163 node->cleanup = cleanup;
164 node->next = reinterpret_cast<Node*>(
165 google::protobuf::internal::NoBarrier_AtomicExchange(&cleanup_list_,
166 reinterpret_cast<google::protobuf::internal::AtomicWord>(node)));
167 }
168
AllocateAligned(const std::type_info * allocated,size_t n)169 void* Arena::AllocateAligned(const std::type_info* allocated, size_t n) {
170 // Align n to next multiple of 8 (from Hacker's Delight, Chapter 3.)
171 n = (n + 7) & -8;
172
173 // Monitor allocation if needed.
174 if (GOOGLE_PREDICT_FALSE(hooks_cookie_ != NULL) &&
175 options_.on_arena_allocation != NULL) {
176 options_.on_arena_allocation(allocated, n, hooks_cookie_);
177 }
178
179 // If this thread already owns a block in this arena then try to use that.
180 // This fast path optimizes the case where multiple threads allocate from the
181 // same arena.
182 if (thread_cache().last_lifecycle_id_seen == lifecycle_id_ &&
183 thread_cache().last_block_used_ != NULL) {
184 if (thread_cache().last_block_used_->avail() < n) {
185 return SlowAlloc(n);
186 }
187 return AllocFromBlock(thread_cache().last_block_used_, n);
188 }
189
190 // Check whether we own the last accessed block on this arena.
191 // This fast path optimizes the case where a single thread uses multiple
192 // arenas.
193 void* me = &thread_cache();
194 Block* b = reinterpret_cast<Block*>(google::protobuf::internal::Acquire_Load(&hint_));
195 if (!b || b->owner != me || b->avail() < n) {
196 return SlowAlloc(n);
197 }
198 return AllocFromBlock(b, n);
199 }
200
AllocFromBlock(Block * b,size_t n)201 void* Arena::AllocFromBlock(Block* b, size_t n) {
202 size_t p = b->pos;
203 b->pos = p + n;
204 #ifdef ADDRESS_SANITIZER
205 ASAN_UNPOISON_MEMORY_REGION(reinterpret_cast<char*>(b) + p, n);
206 #endif
207 return reinterpret_cast<char*>(b) + p;
208 }
209
SlowAlloc(size_t n)210 void* Arena::SlowAlloc(size_t n) {
211 void* me = &thread_cache();
212 Block* b = FindBlock(me); // Find block owned by me.
213 // See if allocation fits in my latest block.
214 if (b != NULL && b->avail() >= n) {
215 SetThreadCacheBlock(b);
216 google::protobuf::internal::NoBarrier_Store(&hint_, reinterpret_cast<google::protobuf::internal::AtomicWord>(b));
217 return AllocFromBlock(b, n);
218 }
219 b = NewBlock(me, b, n, options_.start_block_size, options_.max_block_size);
220 AddBlock(b);
221 SetThreadCacheBlock(b);
222 return reinterpret_cast<char*>(b) + kHeaderSize;
223 }
224
SpaceAllocated() const225 uint64 Arena::SpaceAllocated() const {
226 uint64 space_allocated = 0;
227 Block* b = reinterpret_cast<Block*>(google::protobuf::internal::NoBarrier_Load(&blocks_));
228 while (b != NULL) {
229 space_allocated += (b->size);
230 b = b->next;
231 }
232 return space_allocated;
233 }
234
SpaceUsed() const235 uint64 Arena::SpaceUsed() const {
236 uint64 space_used = 0;
237 Block* b = reinterpret_cast<Block*>(google::protobuf::internal::NoBarrier_Load(&blocks_));
238 while (b != NULL) {
239 space_used += (b->pos - kHeaderSize);
240 b = b->next;
241 }
242 return space_used;
243 }
244
SpaceAllocatedAndUsed() const245 pair<uint64, uint64> Arena::SpaceAllocatedAndUsed() const {
246 uint64 allocated = 0;
247 uint64 used = 0;
248
249 Block* b = reinterpret_cast<Block*>(google::protobuf::internal::NoBarrier_Load(&blocks_));
250 while (b != NULL) {
251 allocated += b->size;
252 used += (b->pos - kHeaderSize);
253 b = b->next;
254 }
255 return std::make_pair(allocated, used);
256 }
257
FreeBlocks()258 uint64 Arena::FreeBlocks() {
259 uint64 space_allocated = 0;
260 Block* b = reinterpret_cast<Block*>(google::protobuf::internal::NoBarrier_Load(&blocks_));
261 Block* first_block = NULL;
262 while (b != NULL) {
263 space_allocated += (b->size);
264 Block* next = b->next;
265 if (next != NULL) {
266 options_.block_dealloc(b, b->size);
267 } else {
268 if (owns_first_block_) {
269 options_.block_dealloc(b, b->size);
270 } else {
271 // User passed in the first block, skip free'ing the memory.
272 first_block = b;
273 }
274 }
275 b = next;
276 }
277 blocks_ = 0;
278 hint_ = 0;
279 if (!owns_first_block_) {
280 // Make the first block that was passed in through ArenaOptions
281 // available for reuse.
282 first_block->pos = kHeaderSize;
283 // Thread which calls Reset() owns the first block. This allows the
284 // single-threaded case to allocate on the first block without taking any
285 // locks.
286 first_block->owner = &thread_cache();
287 SetThreadCacheBlock(first_block);
288 AddBlockInternal(first_block);
289 }
290 return space_allocated;
291 }
292
CleanupList()293 void Arena::CleanupList() {
294 Node* head =
295 reinterpret_cast<Node*>(google::protobuf::internal::NoBarrier_Load(&cleanup_list_));
296 while (head != NULL) {
297 head->cleanup(head->elem);
298 head = head->next;
299 }
300 cleanup_list_ = 0;
301 }
302
FindBlock(void * me)303 Arena::Block* Arena::FindBlock(void* me) {
304 // TODO(sanjay): We might want to keep a separate list with one
305 // entry per thread.
306 Block* b = reinterpret_cast<Block*>(google::protobuf::internal::Acquire_Load(&blocks_));
307 while (b != NULL && b->owner != me) {
308 b = b->next;
309 }
310 return b;
311 }
312
313 } // namespace protobuf
314 } // namespace google
315