• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2 ** Copyright 2022, The Android Open Source Project
3 **
4 ** Licensed under the Apache License, Version 2.0 (the "License");
5 ** you may not use this file except in compliance with the License.
6 ** You may obtain a copy of the License at
7 **
8 **     http://www.apache.org/licenses/LICENSE-2.0
9 **
10 ** Unless required by applicable law or agreed to in writing, software
11 ** distributed under the License is distributed on an "AS IS" BASIS,
12 ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 ** See the License for the specific language governing permissions and
14 ** limitations under the License.
15 */
16 
17 #pragma once
18 
19 #include <sys/stat.h>
20 #include <sys/types.h>
21 #include <unistd.h>
22 
23 #include <iomanip>
24 #include <limits>
25 #include <mutex>
26 #include <sstream>
27 #include <string>
28 #include <type_traits>
29 #include <unordered_map>
30 
31 #include <binder/MemoryBase.h>
32 #include <binder/MemoryHeapBase.h>
33 #include <log/log_main.h>
34 #include <utils/StrongPointer.h>
35 
36 namespace std {
37 template <typename T>
38 struct hash<::android::wp<T>> {
39     size_t operator()(const ::android::wp<T>& x) const {
40         return std::hash<const T*>()(x.unsafe_get());
41     }
42 };
43 }  // namespace std
44 
45 namespace android::mediautils {
46 
47 // Allocations represent owning handles to a region of shared memory (and thus
48 // should not be copied in order to fulfill RAII).
49 // To share ownership between multiple objects, a
50 // ref-counting solution such as sp or shared ptr is appropriate, so the dtor
51 // is called once for a particular block of memory.
52 
53 using AllocationType = ::android::sp<IMemory>;
54 using WeakAllocationType = ::android::wp<IMemory>;
55 
56 namespace shared_allocator_impl {
57 constexpr inline size_t roundup(size_t size, size_t pageSize) {
58     LOG_ALWAYS_FATAL_IF(pageSize == 0 || (pageSize & (pageSize - 1)) != 0,
59                         "Page size not multiple of 2");
60     return ((size + pageSize - 1) & ~(pageSize - 1));
61 }
62 
63 constexpr inline bool isHeapValid(const sp<IMemoryHeap>& heap) {
64     return (heap && heap->getBase() &&
65             heap->getBase() != MAP_FAILED);  // TODO if not mapped locally
66 }
67 
68 template <typename, typename = void>
69 static constexpr bool has_deallocate_all = false;
70 
71 template <typename T>
72 static constexpr bool has_deallocate_all<
73         T, std::enable_if_t<std::is_same_v<decltype(std::declval<T>().deallocate_all()), void>,
74                             void>> = true;
75 
76 template <typename, typename = void>
77 static constexpr bool has_owns = false;
78 
79 template <typename T>
80 static constexpr bool
81         has_owns<T, std::enable_if_t<std::is_same_v<decltype(std::declval<T>().owns(
82                                                             std::declval<const AllocationType>())),
83                                                     bool>,
84                                      void>> = true;
85 
86 template <typename, typename = void>
87 static constexpr bool has_dump = false;
88 
89 template <typename T>
90 static constexpr bool has_dump<
91         T,
92         std::enable_if_t<std::is_same_v<decltype(std::declval<T>().dump()), std::string>, void>> =
93         true;
94 
95 }  // namespace shared_allocator_impl
96 
97 struct BasicAllocRequest {
98     size_t size;
99 };
100 struct NamedAllocRequest : public BasicAllocRequest {
101     std::string_view name;
102 };
103 
104 // We are required to add a layer of indirection to hold a handle to the actual
105 // block due to sp<> being unable to be created from an object once its
106 // ref-count has dropped to zero. So, we have to hold onto an extra reference
107 // here. We effectively want to know when the refCount of the object drops to
108 // one, since we need to hold on to a reference to pass the object to interfaces
109 // requiring an sp<>.
110 // TODO is there some way to avoid paying this cost?
111 template <typename Allocator>
112 class ScopedAllocator;
113 
114 class ScopedAllocation : public BnMemory {
115   public:
116     template <typename T>
117     friend class ScopedAllocator;
118     template <typename Deallocator>
119     ScopedAllocation(const AllocationType& allocation, Deallocator&& deallocator)
120         : mAllocation(allocation), mDeallocator(std::forward<Deallocator>(deallocator)) {}
121 
122     // Defer the implementation to the underlying mAllocation
123 
124     virtual sp<IMemoryHeap> getMemory(ssize_t* offset = nullptr,
125                                       size_t* size = nullptr) const override {
126         return mAllocation->getMemory(offset, size);
127     }
128 
129   private:
130     ~ScopedAllocation() override { mDeallocator(mAllocation); }
131 
132     const AllocationType mAllocation;
133     const std::function<void(const AllocationType&)> mDeallocator;
134 };
135 
136 // Allocations are only deallocated when going out of scope.
137 // This should almost always be the outermost allocator.
138 template <typename Allocator>
139 class ScopedAllocator {
140   public:
141     static constexpr size_t alignment() { return Allocator::alignment(); }
142 
143     explicit ScopedAllocator(const std::shared_ptr<Allocator>& allocator) : mAllocator(allocator) {}
144 
145     ScopedAllocator() : mAllocator(std::make_shared<Allocator>()) {}
146 
147     template <typename T>
148     auto allocate(T&& request) {
149         std::lock_guard l{*mLock};
150         const auto allocation = mAllocator->allocate(std::forward<T>(request));
151         if (!allocation) {
152             return sp<ScopedAllocation>{};
153         }
154         return sp<ScopedAllocation>::make(allocation,
155                 [allocator = mAllocator, lock = mLock] (const AllocationType& allocation) {
156                     std::lock_guard l{*lock};
157                     allocator->deallocate(allocation);
158                 });
159     }
160 
161     // Deallocate and deallocate_all are implicitly unsafe due to double
162     // deallocates upon ScopedAllocation destruction. We can protect against this
163     // efficiently with a gencount (for deallocate_all) or inefficiently (for
164     // deallocate) but we choose not to
165     //
166     // Owns is only safe to pseudo-impl due to static cast reqs
167     template <typename Enable = bool>
168     auto owns(const sp<ScopedAllocation>& allocation) const
169             -> std::enable_if_t<shared_allocator_impl::has_owns<Allocator>, Enable> {
170         std::lock_guard l{*mLock};
171         return mAllocator->owns(allocation->mAllocation);
172     }
173 
174     template <typename Enable = std::string>
175     auto dump() const -> std::enable_if_t<shared_allocator_impl::has_dump<Allocator>, Enable> {
176         std::lock_guard l{*mLock};
177         return mAllocator->dump();
178     }
179 
180   private:
181     // We store a shared pointer in order to ensure that the allocator outlives
182     // allocations (which call back to become dereferenced).
183     const std::shared_ptr<Allocator> mAllocator;
184     const std::shared_ptr<std::mutex> mLock = std::make_shared<std::mutex>();
185 };
186 
187 // A simple policy for PolicyAllocator which enforces a pool size and an allocation
188 // size range.
189 template <size_t PoolSize, size_t MinAllocSize = 0,
190           size_t MaxAllocSize = std::numeric_limits<size_t>::max()>
191 class SizePolicy {
192     static_assert(PoolSize > 0);
193 
194   public:
195     template <typename T>
196     bool isValid(T&& request) const {
197         static_assert(std::is_base_of_v<BasicAllocRequest, std::decay_t<T>>);
198         return !(request.size > kMaxAllocSize || request.size < kMinAllocSize ||
199                  mPoolSize + request.size > kPoolSize);
200     }
201 
202     void allocated(const AllocationType& alloc) { mPoolSize += alloc->size(); }
203 
204     void deallocated(const AllocationType& alloc) { mPoolSize -= alloc->size(); }
205 
206     void deallocated_all() { mPoolSize = 0; }
207 
208     static constexpr size_t kPoolSize = PoolSize;
209     static constexpr size_t kMinAllocSize = MinAllocSize;
210     static constexpr size_t kMaxAllocSize = MaxAllocSize;
211 
212   private:
213     size_t mPoolSize = 0;
214 };
215 
216 // An allocator which accepts or rejects allocation requests by a parametrized
217 // policy (which can carry state).
218 template <typename Allocator, typename Policy>
219 class PolicyAllocator {
220   public:
221     static constexpr size_t alignment() { return Allocator::alignment(); }
222 
223     PolicyAllocator(Allocator allocator, Policy policy)
224         : mAllocator(allocator), mPolicy(std::move(policy)) {}
225 
226     // Default initialize the allocator and policy
227     PolicyAllocator() = default;
228 
229     template <typename T>
230     AllocationType allocate(T&& request) {
231         static_assert(std::is_base_of_v<android::mediautils::BasicAllocRequest, std::decay_t<T>>);
232         request.size = shared_allocator_impl::roundup(request.size, alignment());
233         if (!mPolicy.isValid(request)) {
234             return {};
235         }
236         AllocationType val = mAllocator.allocate(std::forward<T>(request));
237         if (val == nullptr) return val;
238         mPolicy.allocated(val);
239         return val;
240     }
241 
242     void deallocate(const AllocationType& allocation) {
243         if (!allocation) return;
244         mPolicy.deallocated(allocation);
245         mAllocator.deallocate(allocation);
246     }
247 
248     template <typename Enable = void>
249     auto deallocate_all()
250             -> std::enable_if_t<shared_allocator_impl::has_deallocate_all<Allocator>, Enable> {
251         mAllocator.deallocate_all();
252         mPolicy.deallocated_all();
253     }
254 
255     template <typename Enable = bool>
256     auto owns(const AllocationType& allocation) const
257             -> std::enable_if_t<shared_allocator_impl::has_owns<Allocator>, Enable> {
258         return mAllocator.owns(allocation);
259     }
260 
261     template <typename Enable = std::string>
262     auto dump() const -> std::enable_if_t<shared_allocator_impl::has_dump<Allocator>, Enable> {
263         return mAllocator.dump();
264     }
265 
266   private:
267     [[no_unique_address]] Allocator mAllocator;
268     [[no_unique_address]] Policy mPolicy;
269 };
270 
271 // An allocator which keeps track of outstanding allocations for logging and
272 // querying ownership.
273 template <class Allocator>
274 class SnoopingAllocator {
275   public:
276     struct AllocationData {
277         std::string name;
278         size_t allocation_number;
279     };
280     static constexpr size_t alignment() { return Allocator::alignment(); }
281 
282     SnoopingAllocator(Allocator allocator, std::string_view name)
283         : mName(name), mAllocator(std::move(allocator)) {}
284 
285     explicit SnoopingAllocator(std::string_view name) : mName(name), mAllocator(Allocator{}) {}
286 
287     explicit SnoopingAllocator(Allocator allocator) : mAllocator(std::move(allocator)) {}
288 
289     // Default construct allocator and name
290     SnoopingAllocator() = default;
291 
292     template <typename T>
293     AllocationType allocate(T&& request) {
294         static_assert(std::is_base_of_v<NamedAllocRequest, std::decay_t<T>>);
295         AllocationType allocation = mAllocator.allocate(request);
296         if (allocation)
297             mAllocations.insert({WeakAllocationType{allocation},
298                                  {std::string{request.name}, mAllocationNumber++}});
299         return allocation;
300     }
301 
302     void deallocate(const AllocationType& allocation) {
303         if (!allocation) return;
304         mAllocations.erase(WeakAllocationType{allocation});
305         mAllocator.deallocate(allocation);
306     }
307 
308     void deallocate_all() {
309         if constexpr (shared_allocator_impl::has_deallocate_all<Allocator>) {
310             mAllocator.deallocate_all();
311         } else {
312             for (auto& [mem, value] : mAllocations) {
313                 mAllocator.deallocate(mem);
314             }
315         }
316         mAllocations.clear();
317     }
318 
319     bool owns(const AllocationType& allocation) const {
320         return (mAllocations.count(WeakAllocationType{allocation}) > 0);
321     }
322 
323     std::string dump() const {
324         std::ostringstream dump;
325         dump << mName << " Allocator Dump:\n";
326         dump << std::setw(8) << "HeapID" << std::setw(8) << "Size" << std::setw(8) << "Offset"
327              << std::setw(8) << "Order"
328              << "   Name\n";
329         for (auto& [mem, value] : mAllocations) {
330             // TODO Imem size and offset
331             const AllocationType handle = mem.promote();
332             if (!handle) {
333                 dump << "Invalid memory lifetime!";
334                 continue;
335             }
336             const auto heap = handle->getMemory();
337             dump << std::setw(8) << heap->getHeapID() << std::setw(8) << heap->getSize()
338                  << std::setw(8) << heap->getOffset() << std::setw(8) << value.allocation_number
339                  << "   " << value.name << "\n";
340         }
341         return dump.str();
342     }
343 
344     const std::unordered_map<WeakAllocationType, AllocationData>& getAllocations() {
345         return mAllocations;
346     }
347 
348   private:
349     const std::string mName;
350     [[no_unique_address]] Allocator mAllocator;
351     // We don't take copies of the underlying information in an allocation,
352     // rather, the allocation information is put on the heap and referenced via
353     // a ref-counted solution. So, the address of the allocation information is
354     // appropriate to hash. In order for this block to be freed, the underlying
355     // allocation must be referenced by no one (thus deallocated).
356     std::unordered_map<WeakAllocationType, AllocationData> mAllocations;
357     // For debugging purposes, monotonic
358     size_t mAllocationNumber = 0;
359 };
360 
361 // An allocator which passes a failed allocation request to a backup allocator.
362 template <class PrimaryAllocator, class SecondaryAllocator>
363 class FallbackAllocator {
364   public:
365     static_assert(PrimaryAllocator::alignment() == SecondaryAllocator::alignment());
366     static_assert(shared_allocator_impl::has_owns<PrimaryAllocator>);
367 
368     static constexpr size_t alignment() { return PrimaryAllocator::alignment(); }
369 
370     FallbackAllocator(const PrimaryAllocator& primary, const SecondaryAllocator& secondary)
371         : mPrimary(primary), mSecondary(secondary) {}
372 
373     // Default construct primary and secondary allocator
374     FallbackAllocator() = default;
375 
376     template <typename T>
377     AllocationType allocate(T&& request) {
378         AllocationType allocation = mPrimary.allocate(std::forward<T>(request));
379         if (!allocation) allocation = mSecondary.allocate(std::forward<T>(request));
380         return allocation;
381     }
382 
383     void deallocate(const AllocationType& allocation) {
384         if (!allocation) return;
385         if (mPrimary.owns(allocation)) {
386             mPrimary.deallocate(allocation);
387         } else {
388             mSecondary.deallocate(allocation);
389         }
390     }
391 
392     template <typename Enable = void>
393     auto deallocate_all() -> std::enable_if_t<
394             shared_allocator_impl::has_deallocate_all<PrimaryAllocator> &&
395                     shared_allocator_impl::has_deallocate_all<SecondaryAllocator>,
396             Enable> {
397         mPrimary.deallocate_all();
398         mSecondary.deallocate_all();
399     }
400 
401     template <typename Enable = bool>
402     auto owns(const AllocationType& allocation) const
403             -> std::enable_if_t<shared_allocator_impl::has_owns<SecondaryAllocator>, Enable> {
404         return mPrimary.owns(allocation) || mSecondary.owns(allocation);
405     }
406 
407     template <typename Enable = std::string>
408     auto dump() const
409             -> std::enable_if_t<shared_allocator_impl::has_dump<PrimaryAllocator> &&
410                                         shared_allocator_impl::has_dump<SecondaryAllocator>,
411                                 Enable> {
412         return std::string("Primary: \n") + mPrimary.dump() + std::string("Secondary: \n") +
413                mSecondary.dump();
414     }
415 
416   private:
417     [[no_unique_address]] PrimaryAllocator mPrimary;
418     [[no_unique_address]] SecondaryAllocator mSecondary;
419 };
420 
421 // An allocator which is backed by a shared_ptr to an allocator, so multiple
422 // allocators can share the same backing allocator (and thus the same state).
423 template <typename Allocator>
424 class IndirectAllocator {
425   public:
426     static constexpr size_t alignment() { return Allocator::alignment(); }
427 
428     explicit IndirectAllocator(const std::shared_ptr<Allocator>& allocator)
429         : mAllocator(allocator) {}
430 
431     template <typename T>
432     AllocationType allocate(T&& request) {
433         return mAllocator->allocate(std::forward<T>(request));
434     }
435 
436     void deallocate(const AllocationType& allocation) {
437         if (!allocation) return;
438         mAllocator->deallocate(allocation);
439     }
440 
441     // We can't implement deallocate_all/dump/owns, since we may not be the only allocator with
442     // access to the underlying allocator (making it not well-defined). If these
443     // methods are necesesary, we need to wrap with a snooping allocator.
444   private:
445     const std::shared_ptr<Allocator> mAllocator;
446 };
447 
448 // Stateless. This allocator allocates full page-aligned MemoryHeapBases (backed by
449 // a shared memory mapped anonymous file) as allocations.
450 class MemoryHeapBaseAllocator {
451   public:
452     static constexpr size_t alignment() { return 4096; /* PAGE_SIZE */ }
453     static constexpr unsigned FLAGS = 0;  // default flags
454 
455     template <typename T>
456     AllocationType allocate(T&& request) {
457         static_assert(std::is_base_of_v<BasicAllocRequest, std::decay_t<T>>);
458         auto heap =
459                 sp<MemoryHeapBase>::make(shared_allocator_impl::roundup(request.size, alignment()));
460         if (!shared_allocator_impl::isHeapValid(heap)) {
461             return {};
462         }
463         return sp<MemoryBase>::make(heap, 0, heap->getSize());
464     }
465 
466     // Passing a block not allocated by a HeapAllocator is undefined.
467     void deallocate(const AllocationType& allocation) {
468         if (!allocation) return;
469         const auto heap = allocation->getMemory();
470         if (!heap) return;
471         // This causes future mapped accesses (even across process boundaries)
472         // to receive SIGBUS.
473         ftruncate(heap->getHeapID(), 0);
474         // This static cast is safe, since as long as the block was originally
475         // allocated by us, the underlying IMemoryHeap was a MemoryHeapBase
476         static_cast<MemoryHeapBase&>(*heap).dispose();
477     }
478 };
479 }  // namespace android::mediautils
480