• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2025 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #ifndef COMMON_COMPONENTS_HEAP_ALLOCATOR_ROSALLOC_DEQUE_H
17 #define COMMON_COMPONENTS_HEAP_ALLOCATOR_ROSALLOC_DEQUE_H
18 
19 #include <cstddef>
20 #include <cstdint>
21 
22 #include "common_components/heap/allocator/memory_map.h"
23 #include "common_components/log/log.h"
24 
25 #define DEBUG_DEQUE false
26 #if DEBUG_DEQUE
27 #define DEQUE_ASSERT(cond, msg) ASSERT_LOGF(cond, msg)
28 #else
29 #define DEQUE_ASSERT(cond, msg) (void(0))
30 #endif
31 
32 namespace common {
33 // Designed for single-use, single-purpose operations
34 // Supports both queue-like and stack-like operations
35 // Memory is efficiently cleared in O(1) time by simply resetting pointers
36 // No need to explicitly free memory, allowing for quick reuse
37 // Optimized for scenarios where content is discarded after use
38 template<class ValType>
39 class SingleUseDeque {
40 public:
41     static constexpr size_t VAL_SIZE = sizeof(ValType);
Init(size_t mapSize)42     void Init(size_t mapSize)
43     {
44         static_assert(VAL_SIZE == sizeof(void*), "invalid val type");
45         MemoryMap::Option opt = MemoryMap::DEFAULT_OPTIONS;
46         opt.tag = "arkcommon_alloc_ros_sud";
47         opt.reqBase = nullptr;
48         memMap_ = MemoryMap::MapMemory(mapSize, mapSize, opt);
49 #ifdef _WIN64
50         MemoryMap::CommitMemory(memMap_->GetBaseAddr(), mapSize);
51 #endif
52         beginAddr_ = reinterpret_cast<HeapAddress>(memMap_->GetBaseAddr());
53         endAddr_ = reinterpret_cast<HeapAddress>(memMap_->GetCurrEnd());
54         Clear();
55     }
Init(MemoryMap & other)56     void Init(MemoryMap& other)
57     {
58         // init from another sud, that is, the two suds share the same mem map
59         static_assert(VAL_SIZE == sizeof(void*), "invalid val type");
60         memMap_ = &other;
61         beginAddr_ = reinterpret_cast<HeapAddress>(memMap_->GetBaseAddr());
62         endAddr_ = reinterpret_cast<HeapAddress>(memMap_->GetCurrEnd());
63         Clear();
64     }
Fini()65     void Fini() noexcept { MemoryMap::DestroyMemoryMap(memMap_); }
GetMemMap()66     MemoryMap& GetMemMap() { return *memMap_; }
Empty()67     bool Empty() const { return topAddr_ < frontAddr_; }
Push(ValType v)68     void Push(ValType v)
69     {
70         topAddr_ += VAL_SIZE;
71         DEQUE_ASSERT(topAddr < endAddr, "not enough memory");
72         *reinterpret_cast<ValType*>(topAddr_) = v;
73     }
Top()74     ValType Top()
75     {
76         DEQUE_ASSERT(topAddr >= frontAddr, "read empty queue");
77         return *reinterpret_cast<ValType*>(topAddr_);
78     }
Pop()79     void Pop()
80     {
81         DEQUE_ASSERT(topAddr >= frontAddr, "pop empty queue");
82         topAddr_ -= VAL_SIZE;
83     }
Front()84     ValType Front()
85     {
86         DEQUE_ASSERT(frontAddr <= topAddr, "front reach end");
87         return *reinterpret_cast<ValType*>(frontAddr_);
88     }
PopFront()89     void PopFront()
90     {
91         DEQUE_ASSERT(frontAddr <= topAddr, "pop front empty queue");
92         frontAddr_ += VAL_SIZE;
93     }
Clear()94     void Clear()
95     {
96         frontAddr_ = beginAddr_;
97         topAddr_ = beginAddr_ - VAL_SIZE;
98     }
99 
100 private:
101     MemoryMap* memMap_ = nullptr;
102     HeapAddress beginAddr_ = 0;
103     HeapAddress frontAddr_ = 0;
104     HeapAddress topAddr_ = 0;
105     HeapAddress endAddr_ = 0;
106 };
107 
108 // This stack-based deque optimizes memory usage by avoiding heap allocations,
109 // which prevents unnecessary RAM access and eliminates the risk of memory leaks
110 // from non-releasable memory. However, its capacity is limited by the stack size.
111 template<class Type>
112 class LocalDeque {
113 public:
114     static_assert(sizeof(Type) == sizeof(void*), "invalid val type");
115     static constexpr int LOCAL_LENGTH = ALLOC_UTIL_PAGE_SIZE / sizeof(Type);
LocalDeque(SingleUseDeque<Type> & singleUseDeque)116     explicit LocalDeque(SingleUseDeque<Type>& singleUseDeque) : sud_(&singleUseDeque) {}
117     ~LocalDeque() = default;
Empty()118     bool Empty() const { return (top_ < front_) || (front_ == LOCAL_LENGTH && sud_->Empty()); }
Push(Type v)119     void Push(Type v)
120     {
121         if (LIKELY_CC(top_ < LOCAL_LENGTH - 1)) {
122             array_[++top_] = v;
123             return;
124         } else if (top_ == LOCAL_LENGTH - 1) {
125             ++top_;
126             sud_->Clear();
127         }
128         sud_->Push(v);
129     }
Top()130     Type Top()
131     {
132         if (LIKELY_CC(top_ < LOCAL_LENGTH)) {
133             DEQUE_ASSERT(top >= front, "read empty queue");
134             return array_[top_];
135         }
136         return sud_->Top();
137     }
Pop()138     void Pop()
139     {
140         if (LIKELY_CC(top_ < LOCAL_LENGTH)) {
141             DEQUE_ASSERT(top >= front, "pop empty queue");
142             --top_;
143             return;
144         }
145         DEQUE_ASSERT(top == LOCAL_LENGTH, "pop error");
146         sud_->Pop();
147         if (sud_->Empty()) {
148             // if local array is empty, reuse loacl array
149             if (front_ == LOCAL_LENGTH) {
150                 front_ = 0;
151                 top_ = -1;
152             } else if (front_ < LOCAL_LENGTH) {
153                 --top_;
154                 return;
155             }
156         }
157     }
Front()158     Type Front()
159     {
160         if (LIKELY_CC(front_ < LOCAL_LENGTH)) {
161             DEQUE_ASSERT(front <= top, "read empty queue front");
162             return array_[front_];
163         }
164         DEQUE_ASSERT(top == LOCAL_LENGTH, "queue front error");
165         return sud_->Front();
166     }
PopFront()167     void PopFront()
168     {
169         if (LIKELY_CC(front_ < LOCAL_LENGTH)) {
170             DEQUE_ASSERT(front <= top, "pop front empty queue");
171             ++front_;
172             return;
173         }
174         DEQUE_ASSERT(front == LOCAL_LENGTH, "pop front error");
175         sud_->PopFront();
176     }
177 
178 private:
179     int front_ = 0;
180     int top_ = -1;
181     SingleUseDeque<Type>* sud_;
182     Type array_[LOCAL_LENGTH] = { 0 };
183 };
184 
185 // this allocator allocates for a certain-sized native data structure
186 // it is very lightweight but doesn't recycle pages.
187 template<size_t allocSize, size_t align>
188 class RTAllocatorT {
189     struct Slot {
190         Slot* next = nullptr;
191     };
192 
193 public:
Init(size_t mapSize)194     void Init(size_t mapSize)
195     {
196         static_assert(allocSize >= sizeof(Slot), "invalid alloc size");
197         static_assert(align >= alignof(Slot), "invalid align");
198         static_assert(allocSize % align == 0, "size not aligned");
199 
200         MemoryMap::Option opt = MemoryMap::DEFAULT_OPTIONS;
201         opt.tag = "arkcommon_alloc";
202         opt.reqBase = nullptr;
203         memMap_ = MemoryMap::MapMemory(mapSize, mapSize, opt);
204 #ifdef _WIN64
205         MemoryMap::CommitMemory(memMap_->GetBaseAddr(), mapSize);
206 #endif
207         currAddr_ = reinterpret_cast<HeapAddress>(memMap_->GetBaseAddr());
208         endAddr_ = reinterpret_cast<HeapAddress>(memMap_->GetCurrEnd());
209         head_ = nullptr;
210     }
211 
Fini()212     void Fini() noexcept { MemoryMap::DestroyMemoryMap(memMap_); }
213 
Allocate()214     void* Allocate()
215     {
216         void* result = nullptr;
217         if (UNLIKELY_CC(this->head_ == nullptr)) {
218             DEQUE_ASSERT(this->currAddr + allocSize <= this->endAddr, "not enough memory");
219             result = reinterpret_cast<void*>(this->currAddr_);
220             this->currAddr_ += allocSize;
221         } else {
222             result = reinterpret_cast<void*>(this->head_);
223             this->head_ = this->head_->next;
224         }
225         // no zeroing
226         return result;
227     }
228 
Deallocate(void * addr)229     void Deallocate(void* addr)
230     {
231         reinterpret_cast<Slot*>(addr)->next = this->head_;
232         this->head_ = reinterpret_cast<Slot*>(addr);
233     }
234 
235 private:
236     Slot* head_ = nullptr;
237     HeapAddress currAddr_ = 0;
238     HeapAddress endAddr_ = 0;
239     MemoryMap* memMap_ = nullptr;
240 };
241 } // namespace common
242 
243 #endif // COMMON_COMPONENTS_HEAP_ALLOCATOR_ROSALLOC_DEQUE_H
244