1 /*
2 * Copyright (c) 2021 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "common/rs_common_def.h"
17
18 #include <mutex>
19
20 namespace OHOS {
21 namespace Rosen {
22 namespace {
23 class MemAllocater final {
24 struct BlockHead {
25 int size;
26 char ptr[0];
27 };
28 using Cache = std::vector<char*>;
29
30 public:
31 static MemAllocater& GetInstance();
32 MemAllocater() = default;
33 ~MemAllocater();
34
35 void* Alloc(size_t size);
36 void Free(void* ptr);
37
38 private:
39 MemAllocater(const MemAllocater&) = delete;
40 MemAllocater& operator=(const MemAllocater&) = delete;
41
42 std::mutex mutex_;
43 std::unordered_map<size_t, Cache> memCaches_;
44 std::vector<char*> blocks_;
45 static constexpr unsigned sizeStep_ = 64;
46 };
47 static MemAllocater allocater;
48 }
49
GetInstance()50 MemAllocater& MemAllocater::GetInstance()
51 {
52 return allocater;
53 }
54
~MemAllocater()55 MemAllocater::~MemAllocater()
56 {
57 std::lock_guard<std::mutex> lock(mutex_);
58 for (void* ptr : blocks_) {
59 if (ptr != nullptr) {
60 free(ptr);
61 }
62 }
63 blocks_.clear();
64 memCaches_.clear();
65 }
66
Alloc(size_t size)67 void* MemAllocater::Alloc(size_t size)
68 {
69 std::lock_guard<std::mutex> lock(mutex_);
70 Cache* cachePtr = nullptr;
71 auto itr = memCaches_.find(size);
72 if (itr == memCaches_.end()) {
73 Cache tempCache;
74 memCaches_.insert(std::pair<size_t, Cache>(size, tempCache));
75 itr = memCaches_.find(size);
76 cachePtr = &(itr->second);
77 cachePtr->reserve(sizeStep_);
78 } else {
79 cachePtr = &(itr->second);
80 }
81
82 if (cachePtr == nullptr) {
83 return nullptr;
84 }
85 size_t memSize = (size + sizeof(BlockHead));
86 if (cachePtr->empty()) {
87 char* block = static_cast<char*>(malloc(memSize * sizeStep_));
88 if (block == nullptr) {
89 return nullptr;
90 }
91 blocks_.push_back(block);
92 for (unsigned i = 0; i < sizeStep_; ++i) {
93 cachePtr->push_back(block + (i * memSize));
94 }
95 }
96
97 char* mem = cachePtr->back();
98 cachePtr->pop_back();
99 BlockHead* head = reinterpret_cast<BlockHead*>(mem);
100 head->size = static_cast<int>(size);
101 return head->ptr;
102 }
103
Free(void * ptr)104 void MemAllocater::Free(void* ptr)
105 {
106 if (ptr == nullptr) {
107 return;
108 }
109 std::lock_guard<std::mutex> lock(mutex_);
110 char* p = static_cast<char*>(ptr) - sizeof(BlockHead);
111 BlockHead* head = reinterpret_cast<BlockHead*>(p);
112 auto itr = memCaches_.find(head->size);
113 if (itr == memCaches_.end()) {
114 free(p);
115 } else {
116 itr->second.push_back(p);
117 }
118 }
119
operator new(size_t size)120 void* MemObject::operator new(size_t size)
121 {
122 return MemAllocater::GetInstance().Alloc(size);
123 }
124
operator delete(void * ptr)125 void MemObject::operator delete(void* ptr)
126 {
127 return MemAllocater::GetInstance().Free(ptr);
128 }
129
operator new(size_t size,const std::nothrow_t &)130 void* MemObject::operator new(size_t size, const std::nothrow_t&) noexcept
131 {
132 return MemAllocater::GetInstance().Alloc(size);
133 }
134
operator delete(void * ptr,const std::nothrow_t &)135 void MemObject::operator delete(void* ptr, const std::nothrow_t&) noexcept
136 {
137 return MemAllocater::GetInstance().Free(ptr);
138 }
139 } // namespace Rosen
140 } // namespace OHOS
141