1 /* 2 * Copyright 2016 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #ifndef SkAutoMalloc_DEFINED 9 #define SkAutoMalloc_DEFINED 10 11 #include "SkMacros.h" 12 #include "SkMalloc.h" 13 #include "SkNoncopyable.h" 14 #include "SkTypes.h" 15 16 #include <memory> 17 18 /** 19 * Manage an allocated block of heap memory. This object is the sole manager of 20 * the lifetime of the block, so the caller must not call sk_free() or delete 21 * on the block, unless release() was called. 22 */ 23 class SkAutoMalloc : SkNoncopyable { 24 public: 25 explicit SkAutoMalloc(size_t size = 0) 26 : fPtr(size ? sk_malloc_throw(size) : nullptr), fSize(size) {} 27 28 /** 29 * Passed to reset to specify what happens if the requested size is smaller 30 * than the current size (and the current block was dynamically allocated). 31 */ 32 enum OnShrink { 33 /** 34 * If the requested size is smaller than the current size, and the 35 * current block is dynamically allocated, free the old block and 36 * malloc a new block of the smaller size. 37 */ 38 kAlloc_OnShrink, 39 40 /** 41 * If the requested size is smaller than the current size, and the 42 * current block is dynamically allocated, just return the old 43 * block. 44 */ 45 kReuse_OnShrink 46 }; 47 48 /** 49 * Reallocates the block to a new size. The ptr may or may not change. 50 */ 51 void* reset(size_t size = 0, OnShrink shrink = kAlloc_OnShrink) { 52 if (size != fSize && (size > fSize || kReuse_OnShrink != shrink)) { 53 fPtr.reset(size ? sk_malloc_throw(size) : nullptr); 54 fSize = size; 55 } 56 return fPtr.get(); 57 } 58 59 /** 60 * Return the allocated block. 61 */ get()62 void* get() { return fPtr.get(); } get()63 const void* get() const { return fPtr.get(); } 64 65 /** Transfer ownership of the current ptr to the caller, setting the 66 internal reference to null. Note the caller is reponsible for calling 67 sk_free on the returned address. 68 */ release()69 void* release() { 70 fSize = 0; 71 return fPtr.release(); 72 } 73 74 private: 75 struct WrapFree { operatorWrapFree76 void operator()(void* p) { sk_free(p); } 77 }; 78 std::unique_ptr<void, WrapFree> fPtr; 79 size_t fSize; // can be larger than the requested size (see kReuse) 80 }; 81 #define SkAutoMalloc(...) SK_REQUIRE_LOCAL_VAR(SkAutoMalloc) 82 83 /** 84 * Manage an allocated block of memory. If the requested size is <= kSizeRequested (or slightly 85 * more), then the allocation will come from the stack rather than the heap. This object is the 86 * sole manager of the lifetime of the block, so the caller must not call sk_free() or delete on 87 * the block. 88 */ 89 template <size_t kSizeRequested> class SkAutoSMalloc : SkNoncopyable { 90 public: 91 /** 92 * Creates initially empty storage. get() returns a ptr, but it is to a zero-byte allocation. 93 * Must call reset(size) to return an allocated block. 94 */ SkAutoSMalloc()95 SkAutoSMalloc() { 96 fPtr = fStorage; 97 fSize = kSize; 98 } 99 100 /** 101 * Allocate a block of the specified size. If size <= kSizeRequested (or slightly more), then 102 * the allocation will come from the stack, otherwise it will be dynamically allocated. 103 */ SkAutoSMalloc(size_t size)104 explicit SkAutoSMalloc(size_t size) { 105 fPtr = fStorage; 106 fSize = kSize; 107 this->reset(size); 108 } 109 110 /** 111 * Free the allocated block (if any). If the block was small enough to have been allocated on 112 * the stack, then this does nothing. 113 */ ~SkAutoSMalloc()114 ~SkAutoSMalloc() { 115 if (fPtr != (void*)fStorage) { 116 sk_free(fPtr); 117 } 118 } 119 120 /** 121 * Return the allocated block. May return non-null even if the block is of zero size. Since 122 * this may be on the stack or dynamically allocated, the caller must not call sk_free() on it, 123 * but must rely on SkAutoSMalloc to manage it. 124 */ get()125 void* get() const { return fPtr; } 126 127 /** 128 * Return a new block of the requested size, freeing (as necessary) any previously allocated 129 * block. As with the constructor, if size <= kSizeRequested (or slightly more) then the return 130 * block may be allocated locally, rather than from the heap. 131 */ 132 void* reset(size_t size, 133 SkAutoMalloc::OnShrink shrink = SkAutoMalloc::kAlloc_OnShrink, 134 bool* didChangeAlloc = nullptr) { 135 size = (size < kSize) ? kSize : size; 136 bool alloc = size != fSize && (SkAutoMalloc::kAlloc_OnShrink == shrink || size > fSize); 137 if (didChangeAlloc) { 138 *didChangeAlloc = alloc; 139 } 140 if (alloc) { 141 if (fPtr != (void*)fStorage) { 142 sk_free(fPtr); 143 } 144 145 if (size == kSize) { 146 SkASSERT(fPtr != fStorage); // otherwise we lied when setting didChangeAlloc. 147 fPtr = fStorage; 148 } else { 149 fPtr = sk_malloc_throw(size); 150 } 151 152 fSize = size; 153 } 154 SkASSERT(fSize >= size && fSize >= kSize); 155 SkASSERT((fPtr == fStorage) || fSize > kSize); 156 return fPtr; 157 } 158 159 private: 160 // Align up to 32 bits. 161 static const size_t kSizeAlign4 = SkAlign4(kSizeRequested); 162 #if defined(SK_BUILD_FOR_GOOGLE3) 163 // Stack frame size is limited for SK_BUILD_FOR_GOOGLE3. 4k is less than the actual max, but some functions 164 // have multiple large stack allocations. 165 static const size_t kMaxBytes = 4 * 1024; 166 static const size_t kSize = kSizeRequested > kMaxBytes ? kMaxBytes : kSizeAlign4; 167 #else 168 static const size_t kSize = kSizeAlign4; 169 #endif 170 171 void* fPtr; 172 size_t fSize; // can be larger than the requested size (see kReuse) 173 uint32_t fStorage[kSize >> 2]; 174 }; 175 // Can't guard the constructor because it's a template class. 176 177 #endif 178