• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 //
3 // Copyright 2017 gRPC authors.
4 //
5 // Licensed under the Apache License, Version 2.0 (the "License");
6 // you may not use this file except in compliance with the License.
7 // You may obtain a copy of the License at
8 //
9 //     http://www.apache.org/licenses/LICENSE-2.0
10 //
11 // Unless required by applicable law or agreed to in writing, software
12 // distributed under the License is distributed on an "AS IS" BASIS,
13 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 // See the License for the specific language governing permissions and
15 // limitations under the License.
16 //
17 //
18 
19 // \file Arena based allocator
20 // Allows very fast allocation of memory, but that memory cannot be freed until
21 // the arena as a whole is freed
22 // Tracks the total memory allocated against it, so that future arenas can
23 // pre-allocate the right amount of memory
24 
25 #ifndef GRPC_SRC_CORE_LIB_RESOURCE_QUOTA_ARENA_H
26 #define GRPC_SRC_CORE_LIB_RESOURCE_QUOTA_ARENA_H
27 
28 #include <grpc/support/port_platform.h>
29 
30 #include <stddef.h>
31 
32 #include <atomic>
33 #include <iosfwd>
34 #include <memory>
35 #include <utility>
36 
37 #include <grpc/event_engine/memory_allocator.h>
38 
39 #include "src/core/lib/gpr/alloc.h"
40 #include "src/core/lib/gprpp/construct_destruct.h"
41 #include "src/core/lib/promise/context.h"
42 #include "src/core/lib/resource_quota/memory_quota.h"
43 
44 #define GRPC_ARENA_POOLED_ALLOCATIONS_USE_MALLOC
45 // #define GRPC_ARENA_TRACE_POOLED_ALLOCATIONS
46 
47 namespace grpc_core {
48 
49 namespace arena_detail {
50 
51 #ifndef GRPC_ARENA_POOLED_ALLOCATIONS_USE_MALLOC
52 struct PoolAndSize {
53   size_t alloc_size;
54   size_t pool_index;
55 };
56 
57 template <typename Void, size_t kIndex, size_t kObjectSize,
58           size_t... kBucketSize>
59 struct PoolIndexForSize;
60 
61 template <size_t kObjectSize, size_t kIndex, size_t kSmallestRemainingBucket,
62           size_t... kBucketSizes>
63 struct PoolIndexForSize<
64     absl::enable_if_t<kObjectSize <= kSmallestRemainingBucket>, kIndex,
65     kObjectSize, kSmallestRemainingBucket, kBucketSizes...> {
66   static constexpr size_t kPool = kIndex;
67   static constexpr size_t kSize = kSmallestRemainingBucket;
68 };
69 
70 template <size_t kObjectSize, size_t kIndex, size_t kSmallestRemainingBucket,
71           size_t... kBucketSizes>
72 struct PoolIndexForSize<
73     absl::enable_if_t<(kObjectSize > kSmallestRemainingBucket)>, kIndex,
74     kObjectSize, kSmallestRemainingBucket, kBucketSizes...>
75     : public PoolIndexForSize<void, kIndex + 1, kObjectSize, kBucketSizes...> {
76 };
77 
78 template <size_t kObjectSize, size_t... kBucketSizes>
79 constexpr size_t PoolFromObjectSize(
80     absl::integer_sequence<size_t, kBucketSizes...>) {
81   return PoolIndexForSize<void, 0, kObjectSize, kBucketSizes...>::kPool;
82 }
83 
84 template <size_t kObjectSize, size_t... kBucketSizes>
85 constexpr size_t AllocationSizeFromObjectSize(
86     absl::integer_sequence<size_t, kBucketSizes...>) {
87   return PoolIndexForSize<void, 0, kObjectSize, kBucketSizes...>::kSize;
88 }
89 
90 template <size_t kIndex, size_t... kBucketSizes>
91 struct ChoosePoolForAllocationSizeImpl;
92 
93 template <size_t kIndex, size_t kBucketSize, size_t... kBucketSizes>
94 struct ChoosePoolForAllocationSizeImpl<kIndex, kBucketSize, kBucketSizes...> {
95   static PoolAndSize Fn(size_t n) {
96     if (n <= kBucketSize) return {kBucketSize, kIndex};
97     return ChoosePoolForAllocationSizeImpl<kIndex + 1, kBucketSizes...>::Fn(n);
98   }
99 };
100 
101 template <size_t kIndex>
102 struct ChoosePoolForAllocationSizeImpl<kIndex> {
103   static PoolAndSize Fn(size_t n) {
104     return PoolAndSize{n, std::numeric_limits<size_t>::max()};
105   }
106 };
107 
108 template <size_t... kBucketSizes>
109 PoolAndSize ChoosePoolForAllocationSize(
110     size_t n, absl::integer_sequence<size_t, kBucketSizes...>) {
111   return ChoosePoolForAllocationSizeImpl<0, kBucketSizes...>::Fn(n);
112 }
113 #else
114 template <typename T, typename A, typename B>
115 struct IfArray {
116   using Result = A;
117 };
118 
119 template <typename T, typename A, typename B>
120 struct IfArray<T[], A, B> {
121   using Result = B;
122 };
123 #endif
124 
125 }  // namespace arena_detail
126 
127 class Arena {
128 #ifndef GRPC_ARENA_POOLED_ALLOCATIONS_USE_MALLOC
129   // Selected pool sizes.
130   // How to tune: see tools/codegen/core/optimize_arena_pool_sizes.py
131   using PoolSizes = absl::integer_sequence<size_t, 80, 304, 528, 1024>;
132   struct FreePoolNode {
133     FreePoolNode* next;
134   };
135 #endif
136 
137  public:
138   // Create an arena, with \a initial_size bytes in the first allocated buffer.
139   static Arena* Create(size_t initial_size, MemoryAllocator* memory_allocator);
140 
141   // Create an arena, with \a initial_size bytes in the first allocated buffer,
142   // and return both a void pointer to the returned arena and a void* with the
143   // first allocation.
144   static std::pair<Arena*, void*> CreateWithAlloc(
145       size_t initial_size, size_t alloc_size,
146       MemoryAllocator* memory_allocator);
147 
148   // Destroy all `ManagedNew` allocated objects.
149   // Allows safe destruction of these objects even if they need context held by
150   // the arena.
151   // Idempotent.
152   // TODO(ctiller): eliminate ManagedNew.
153   void DestroyManagedNewObjects();
154 
155   // Destroy an arena.
156   void Destroy();
157 
158   // Return the total amount of memory allocated by this arena.
159   size_t TotalUsedBytes() const {
160     return total_used_.load(std::memory_order_relaxed);
161   }
162 
163   // Allocate \a size bytes from the arena.
164   void* Alloc(size_t size) {
165     static constexpr size_t base_size =
166         GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(Arena));
167     size = GPR_ROUND_UP_TO_ALIGNMENT_SIZE(size);
168     size_t begin = total_used_.fetch_add(size, std::memory_order_relaxed);
169     if (begin + size <= initial_zone_size_) {
170       return reinterpret_cast<char*>(this) + base_size + begin;
171     } else {
172       return AllocZone(size);
173     }
174   }
175 
176   // Allocates T from the arena.
177   // The caller is responsible for calling p->~T(), but should NOT delete.
178   // TODO(roth): We currently assume that all callers need alignment of 16
179   // bytes, which may be wrong in some cases. When we have time, we should
180   // change this to instead use the alignment of the type being allocated by
181   // this method.
182   template <typename T, typename... Args>
183   T* New(Args&&... args) {
184     T* t = static_cast<T*>(Alloc(sizeof(T)));
185     new (t) T(std::forward<Args>(args)...);
186     return t;
187   }
188 
189   // Like New, but has the arena call p->~T() at arena destruction time.
190   // The caller should NOT delete.
191   template <typename T, typename... Args>
192   T* ManagedNew(Args&&... args) {
193     auto* p = New<ManagedNewImpl<T>>(std::forward<Args>(args)...);
194     p->Link(&managed_new_head_);
195     return &p->t;
196   }
197 
198 #ifndef GRPC_ARENA_POOLED_ALLOCATIONS_USE_MALLOC
199   class PooledDeleter {
200    public:
201     explicit PooledDeleter(std::atomic<FreePoolNode*>* free_list)
202         : free_list_(free_list) {}
203     PooledDeleter() = default;
204     template <typename T>
205     void operator()(T* p) {
206       // TODO(ctiller): promise based filter hijacks ownership of some pointers
207       // to make them appear as PoolPtr without really transferring ownership,
208       // by setting the arena to nullptr.
209       // This is a transitional hack and should be removed once promise based
210       // filter is removed.
211       if (free_list_ != nullptr) {
212         p->~T();
213         FreePooled(p, free_list_);
214       }
215     }
216 
217     bool has_freelist() const { return free_list_ != nullptr; }
218 
219    private:
220     std::atomic<FreePoolNode*>* free_list_;
221   };
222 
223   template <typename T>
224   using PoolPtr = std::unique_ptr<T, PooledDeleter>;
225 
226   // Make a unique_ptr to T that is allocated from the arena.
227   // When the pointer is released, the memory may be reused for other
228   // MakePooled(.*) calls.
229   // CAUTION: The amount of memory allocated is rounded up to the nearest
230   //          value in Arena::PoolSizes, and so this may pessimize total
231   //          arena size.
232   template <typename T, typename... Args>
233   PoolPtr<T> MakePooled(Args&&... args) {
234     auto* free_list =
235         &pools_[arena_detail::PoolFromObjectSize<sizeof(T)>(PoolSizes())];
236     return PoolPtr<T>(
237         new (AllocPooled(
238             sizeof(T),
239             arena_detail::AllocationSizeFromObjectSize<sizeof(T)>(PoolSizes()),
240             free_list)) T(std::forward<Args>(args)...),
241         PooledDeleter(free_list));
242   }
243 
244   // Make a unique_ptr to an array of T that is allocated from the arena.
245   // When the pointer is released, the memory may be reused for other
246   // MakePooled(.*) calls.
247   // One can use MakePooledArray<char> to allocate a buffer of bytes.
248   // CAUTION: The amount of memory allocated is rounded up to the nearest
249   //          value in Arena::PoolSizes, and so this may pessimize total
250   //          arena size.
251   template <typename T>
252   PoolPtr<T[]> MakePooledArray(size_t n) {
253     auto where =
254         arena_detail::ChoosePoolForAllocationSize(n * sizeof(T), PoolSizes());
255     if (where.pool_index == std::numeric_limits<size_t>::max()) {
256       return PoolPtr<T[]>(new (Alloc(where.alloc_size)) T[n],
257                           PooledDeleter(nullptr));
258     } else {
259       return PoolPtr<T[]>(new (AllocPooled(where.alloc_size, where.alloc_size,
260                                            &pools_[where.pool_index])) T[n],
261                           PooledDeleter(&pools_[where.pool_index]));
262     }
263   }
264 
265   // Like MakePooled, but with manual memory management.
266   // The caller is responsible for calling DeletePooled() on the returned
267   // pointer, and expected to call it with the same type T as was passed to this
268   // function (else the free list returned to the arena will be corrupted).
269   template <typename T, typename... Args>
270   T* NewPooled(Args&&... args) {
271     auto* free_list =
272         &pools_[arena_detail::PoolFromObjectSize<sizeof(T)>(PoolSizes())];
273     return new (AllocPooled(
274         sizeof(T),
275         arena_detail::AllocationSizeFromObjectSize<sizeof(T)>(PoolSizes()),
276         free_list)) T(std::forward<Args>(args)...);
277   }
278 
279   template <typename T>
280   void DeletePooled(T* p) {
281     auto* free_list =
282         &pools_[arena_detail::PoolFromObjectSize<sizeof(T)>(PoolSizes())];
283     p->~T();
284     FreePooled(p, free_list);
285   }
286 #else
287   class PooledDeleter {
288    public:
289     PooledDeleter() = default;
290     explicit PooledDeleter(std::nullptr_t) : delete_(false) {}
291     template <typename T>
292     void operator()(T* p) {
293       // TODO(ctiller): promise based filter hijacks ownership of some pointers
294       // to make them appear as PoolPtr without really transferring ownership,
295       // by setting the arena to nullptr.
296       // This is a transitional hack and should be removed once promise based
297       // filter is removed.
298       if (delete_) delete p;
299     }
300 
301     bool has_freelist() const { return delete_; }
302 
303    private:
304     bool delete_ = true;
305   };
306 
307   class ArrayPooledDeleter {
308    public:
309     ArrayPooledDeleter() = default;
310     explicit ArrayPooledDeleter(std::nullptr_t) : delete_(false) {}
311     template <typename T>
312     void operator()(T* p) {
313       // TODO(ctiller): promise based filter hijacks ownership of some pointers
314       // to make them appear as PoolPtr without really transferring ownership,
315       // by setting the arena to nullptr.
316       // This is a transitional hack and should be removed once promise based
317       // filter is removed.
318       if (delete_) delete[] p;
319     }
320 
321     bool has_freelist() const { return delete_; }
322 
323    private:
324     bool delete_ = true;
325   };
326 
327   template <typename T>
328   using PoolPtr =
329       std::unique_ptr<T, typename arena_detail::IfArray<
330                              T, PooledDeleter, ArrayPooledDeleter>::Result>;
331 
332   // Make a unique_ptr to T that is allocated from the arena.
333   // When the pointer is released, the memory may be reused for other
334   // MakePooled(.*) calls.
335   // CAUTION: The amount of memory allocated is rounded up to the nearest
336   //          value in Arena::PoolSizes, and so this may pessimize total
337   //          arena size.
338   template <typename T, typename... Args>
339   static PoolPtr<T> MakePooled(Args&&... args) {
340     return PoolPtr<T>(new T(std::forward<Args>(args)...), PooledDeleter());
341   }
342 
343   // Make a unique_ptr to an array of T that is allocated from the arena.
344   // When the pointer is released, the memory may be reused for other
345   // MakePooled(.*) calls.
346   // One can use MakePooledArray<char> to allocate a buffer of bytes.
347   // CAUTION: The amount of memory allocated is rounded up to the nearest
348   //          value in Arena::PoolSizes, and so this may pessimize total
349   //          arena size.
350   template <typename T>
351   PoolPtr<T[]> MakePooledArray(size_t n) {
352     return PoolPtr<T[]>(new T[n], ArrayPooledDeleter());
353   }
354 
355   // Like MakePooled, but with manual memory management.
356   // The caller is responsible for calling DeletePooled() on the returned
357   // pointer, and expected to call it with the same type T as was passed to this
358   // function (else the free list returned to the arena will be corrupted).
359   template <typename T, typename... Args>
360   T* NewPooled(Args&&... args) {
361     return new T(std::forward<Args>(args)...);
362   }
363 
364   template <typename T>
365   void DeletePooled(T* p) {
366     delete p;
367   }
368 #endif
369 
370  private:
371   struct Zone {
372     Zone* prev;
373   };
374 
375   struct ManagedNewObject {
376     ManagedNewObject* next = nullptr;
377     void Link(std::atomic<ManagedNewObject*>* head);
378     virtual ~ManagedNewObject() = default;
379   };
380 
381   template <typename T>
382   struct ManagedNewImpl : public ManagedNewObject {
383     T t;
384     template <typename... Args>
385     explicit ManagedNewImpl(Args&&... args) : t(std::forward<Args>(args)...) {}
386   };
387 
388   // Initialize an arena.
389   // Parameters:
390   //   initial_size: The initial size of the whole arena in bytes. These bytes
391   //   are contained within 'zone 0'. If the arena user ends up requiring more
392   //   memory than the arena contains in zone 0, subsequent zones are allocated
393   //   on demand and maintained in a tail-linked list.
394   //
395   //   initial_alloc: Optionally, construct the arena as though a call to
396   //   Alloc() had already been made for initial_alloc bytes. This provides a
397   //   quick optimization (avoiding an atomic fetch-add) for the common case
398   //   where we wish to create an arena and then perform an immediate
399   //   allocation.
400   explicit Arena(size_t initial_size, size_t initial_alloc,
401                  MemoryAllocator* memory_allocator)
402       : total_used_(GPR_ROUND_UP_TO_ALIGNMENT_SIZE(initial_alloc)),
403         initial_zone_size_(initial_size),
404         memory_allocator_(memory_allocator) {}
405 
406   ~Arena();
407 
408   void* AllocZone(size_t size);
409 
410 #ifndef GRPC_ARENA_POOLED_ALLOCATIONS_USE_MALLOC
411   void* AllocPooled(size_t obj_size, size_t alloc_size,
412                     std::atomic<FreePoolNode*>* head);
413   static void FreePooled(void* p, std::atomic<FreePoolNode*>* head);
414 #endif
415 
416   void TracePoolAlloc(size_t size, void* ptr) {
417     (void)size;
418     (void)ptr;
419 #ifdef GRPC_ARENA_TRACE_POOLED_ALLOCATIONS
420     gpr_log(GPR_ERROR, "ARENA %p ALLOC %" PRIdPTR " @ %p", this, size, ptr);
421 #endif
422   }
423   static void TracePoolFree(void* ptr) {
424     (void)ptr;
425 #ifdef GRPC_ARENA_TRACE_POOLED_ALLOCATIONS
426     gpr_log(GPR_ERROR, "FREE %p", ptr);
427 #endif
428   }
429 
430   // Keep track of the total used size. We use this in our call sizing
431   // hysteresis.
432   std::atomic<size_t> total_used_{0};
433   std::atomic<size_t> total_allocated_{0};
434   const size_t initial_zone_size_;
435   // If the initial arena allocation wasn't enough, we allocate additional zones
436   // in a reverse linked list. Each additional zone consists of (1) a pointer to
437   // the zone added before this zone (null if this is the first additional zone)
438   // and (2) the allocated memory. The arena itself maintains a pointer to the
439   // last zone; the zone list is reverse-walked during arena destruction only.
440   std::atomic<Zone*> last_zone_{nullptr};
441   std::atomic<ManagedNewObject*> managed_new_head_{nullptr};
442 #ifndef GRPC_ARENA_POOLED_ALLOCATIONS_USE_MALLOC
443   std::atomic<FreePoolNode*> pools_[PoolSizes::size()]{};
444 #endif
445   // The backing memory quota
446   MemoryAllocator* const memory_allocator_;
447 };
448 
449 // Smart pointer for arenas when the final size is not required.
450 struct ScopedArenaDeleter {
451   void operator()(Arena* arena) { arena->Destroy(); }
452 };
453 using ScopedArenaPtr = std::unique_ptr<Arena, ScopedArenaDeleter>;
454 inline ScopedArenaPtr MakeScopedArena(size_t initial_size,
455                                       MemoryAllocator* memory_allocator) {
456   return ScopedArenaPtr(Arena::Create(initial_size, memory_allocator));
457 }
458 
459 // Arenas form a context for activities
460 template <>
461 struct ContextType<Arena> {};
462 
463 }  // namespace grpc_core
464 
465 #endif  // GRPC_SRC_CORE_LIB_RESOURCE_QUOTA_ARENA_H
466