1 //
2 //
3 // Copyright 2017 gRPC authors.
4 //
5 // Licensed under the Apache License, Version 2.0 (the "License");
6 // you may not use this file except in compliance with the License.
7 // You may obtain a copy of the License at
8 //
9 // http://www.apache.org/licenses/LICENSE-2.0
10 //
11 // Unless required by applicable law or agreed to in writing, software
12 // distributed under the License is distributed on an "AS IS" BASIS,
13 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 // See the License for the specific language governing permissions and
15 // limitations under the License.
16 //
17 //
18
19 #include <grpc/support/port_platform.h>
20
21 #include "src/core/lib/resource_quota/arena.h"
22
23 #include <atomic>
24 #include <new>
25
26 #include <grpc/support/alloc.h>
27
28 #include "src/core/lib/gpr/alloc.h"
29
30 namespace {
31
ArenaStorage(size_t initial_size)32 void* ArenaStorage(size_t initial_size) {
33 static constexpr size_t base_size =
34 GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_core::Arena));
35 initial_size = GPR_ROUND_UP_TO_ALIGNMENT_SIZE(initial_size);
36 size_t alloc_size = base_size + initial_size;
37 static constexpr size_t alignment =
38 (GPR_CACHELINE_SIZE > GPR_MAX_ALIGNMENT &&
39 GPR_CACHELINE_SIZE % GPR_MAX_ALIGNMENT == 0)
40 ? GPR_CACHELINE_SIZE
41 : GPR_MAX_ALIGNMENT;
42 return gpr_malloc_aligned(alloc_size, alignment);
43 }
44
45 } // namespace
46
47 namespace grpc_core {
48
~Arena()49 Arena::~Arena() {
50 Zone* z = last_zone_;
51 while (z) {
52 Zone* prev_z = z->prev;
53 Destruct(z);
54 gpr_free_aligned(z);
55 z = prev_z;
56 }
57 #ifdef GRPC_ARENA_TRACE_POOLED_ALLOCATIONS
58 gpr_log(GPR_ERROR, "DESTRUCT_ARENA %p", this);
59 #endif
60 }
61
Create(size_t initial_size,MemoryAllocator * memory_allocator)62 Arena* Arena::Create(size_t initial_size, MemoryAllocator* memory_allocator) {
63 return new (ArenaStorage(initial_size))
64 Arena(initial_size, 0, memory_allocator);
65 }
66
CreateWithAlloc(size_t initial_size,size_t alloc_size,MemoryAllocator * memory_allocator)67 std::pair<Arena*, void*> Arena::CreateWithAlloc(
68 size_t initial_size, size_t alloc_size, MemoryAllocator* memory_allocator) {
69 static constexpr size_t base_size =
70 GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(Arena));
71 auto* new_arena = new (ArenaStorage(initial_size))
72 Arena(initial_size, alloc_size, memory_allocator);
73 void* first_alloc = reinterpret_cast<char*>(new_arena) + base_size;
74 return std::make_pair(new_arena, first_alloc);
75 }
76
DestroyManagedNewObjects()77 void Arena::DestroyManagedNewObjects() {
78 ManagedNewObject* p;
79 // Outer loop: clear the managed new object list.
80 // We do this repeatedly in case a destructor ends up allocating something.
81 while ((p = managed_new_head_.exchange(nullptr, std::memory_order_relaxed)) !=
82 nullptr) {
83 // Inner loop: destruct a batch of objects.
84 while (p != nullptr) {
85 Destruct(std::exchange(p, p->next));
86 }
87 }
88 }
89
Destroy()90 void Arena::Destroy() {
91 DestroyManagedNewObjects();
92 memory_allocator_->Release(total_allocated_.load(std::memory_order_relaxed));
93 this->~Arena();
94 gpr_free_aligned(this);
95 }
96
AllocZone(size_t size)97 void* Arena::AllocZone(size_t size) {
98 // If the allocation isn't able to end in the initial zone, create a new
99 // zone for this allocation, and any unused space in the initial zone is
100 // wasted. This overflowing and wasting is uncommon because of our arena
101 // sizing hysteresis (that is, most calls should have a large enough initial
102 // zone and will not need to grow the arena).
103 static constexpr size_t zone_base_size =
104 GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(Zone));
105 size_t alloc_size = zone_base_size + size;
106 memory_allocator_->Reserve(alloc_size);
107 total_allocated_.fetch_add(alloc_size, std::memory_order_relaxed);
108 Zone* z = new (gpr_malloc_aligned(alloc_size, GPR_MAX_ALIGNMENT)) Zone();
109 auto* prev = last_zone_.load(std::memory_order_relaxed);
110 do {
111 z->prev = prev;
112 } while (!last_zone_.compare_exchange_weak(prev, z, std::memory_order_relaxed,
113 std::memory_order_relaxed));
114 return reinterpret_cast<char*>(z) + zone_base_size;
115 }
116
Link(std::atomic<ManagedNewObject * > * head)117 void Arena::ManagedNewObject::Link(std::atomic<ManagedNewObject*>* head) {
118 next = head->load(std::memory_order_relaxed);
119 while (!head->compare_exchange_weak(next, this, std::memory_order_acq_rel,
120 std::memory_order_relaxed)) {
121 }
122 }
123
124 #ifndef GRPC_ARENA_POOLED_ALLOCATIONS_USE_MALLOC
AllocPooled(size_t obj_size,size_t alloc_size,std::atomic<FreePoolNode * > * head)125 void* Arena::AllocPooled(size_t obj_size, size_t alloc_size,
126 std::atomic<FreePoolNode*>* head) {
127 // ABA mitigation:
128 // AllocPooled may be called by multiple threads, and to remove a node from
129 // the free list we need to manipulate the next pointer, which may be done
130 // differently by each thread in a naive implementation.
131 // The literature contains various ways of dealing with this. Here we expect
132 // to be mostly single threaded - Arena's are owned by calls and calls don't
133 // do a lot of concurrent work with the pooled allocator. The place that they
134 // do is allocating metadata batches for decoding HPACK headers in chttp2.
135 // So we adopt an approach that is simple and fast for the single threaded
136 // case, and that is also correct in the multi threaded case.
137
138 // First, take ownership of the entire free list. At this point we know that
139 // no other thread can see free nodes and will be forced to allocate.
140 // We think we're mostly single threaded and so that's ok.
141 FreePoolNode* p = head->exchange(nullptr, std::memory_order_acquire);
142 // If there are no nodes in the free list, then go ahead and allocate from the
143 // arena.
144 if (p == nullptr) {
145 void* r = Alloc(alloc_size);
146 TracePoolAlloc(obj_size, r);
147 return r;
148 }
149 // We had a non-empty free list... but we own the *entire* free list.
150 // We only want one node, so if there are extras we'd better give them back.
151 if (p->next != nullptr) {
152 // We perform an exchange to do so, but if there were concurrent frees with
153 // this allocation then there'll be a free list that needs to be merged with
154 // ours.
155 FreePoolNode* extra = head->exchange(p->next, std::memory_order_acq_rel);
156 // If there was a free list concurrently created, we merge it into the
157 // overall free list here by simply freeing each node in turn. This is O(n),
158 // but only O(n) in the number of nodes that were freed concurrently, and
159 // again: we think real world use cases are going to see this as mostly
160 // single threaded.
161 while (extra != nullptr) {
162 FreePoolNode* next = extra->next;
163 FreePooled(extra, head);
164 extra = next;
165 }
166 }
167 TracePoolAlloc(obj_size, p);
168 return p;
169 }
170
FreePooled(void * p,std::atomic<FreePoolNode * > * head)171 void Arena::FreePooled(void* p, std::atomic<FreePoolNode*>* head) {
172 // May spuriously trace a free of an already freed object - see AllocPooled
173 // ABA mitigation.
174 TracePoolFree(p);
175 FreePoolNode* node = static_cast<FreePoolNode*>(p);
176 node->next = head->load(std::memory_order_acquire);
177 while (!head->compare_exchange_weak(
178 node->next, node, std::memory_order_acq_rel, std::memory_order_relaxed)) {
179 }
180 }
181 #endif
182
183 } // namespace grpc_core
184