• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2022 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef CHRE_UTIL_SEGMENTED_QUEUE_IMPL_H
18 #define CHRE_UTIL_SEGMENTED_QUEUE_IMPL_H
19 
20 #include <type_traits>
21 #include <utility>
22 
23 #include "chre/util/segmented_queue.h"
24 
25 #include "chre/util/container_support.h"
26 
27 namespace chre {
28 
29 template <typename ElementType, size_t kBlockSize>
SegmentedQueue(size_t maxBlockCount,size_t staticBlockCount)30 SegmentedQueue<ElementType, kBlockSize>::SegmentedQueue(size_t maxBlockCount,
31                                                         size_t staticBlockCount)
32     : kMaxBlockCount(maxBlockCount), kStaticBlockCount(staticBlockCount) {
33   CHRE_ASSERT(kMaxBlockCount >= kStaticBlockCount);
34   CHRE_ASSERT(kStaticBlockCount > 0);
35   CHRE_ASSERT(kMaxBlockCount * kBlockSize < SIZE_MAX);
36   mRawStoragePtrs.reserve(kMaxBlockCount);
37   for (size_t i = 0; i < kStaticBlockCount; i++) {
38     pushOneBlock();
39   }
40 }
41 
42 template <typename ElementType, size_t kBlockSize>
~SegmentedQueue()43 SegmentedQueue<ElementType, kBlockSize>::~SegmentedQueue() {
44   clear();
45 }
46 
47 template <typename ElementType, size_t kBlockSize>
push_back(const ElementType & element)48 bool SegmentedQueue<ElementType, kBlockSize>::push_back(
49     const ElementType &element) {
50   if (!prepareForPush()) {
51     return false;
52   }
53   new (&locateDataAddress(mTail)) ElementType(element);
54   mSize++;
55 
56   return true;
57 }
58 
59 template <typename ElementType, size_t kBlockSize>
push_back(ElementType && element)60 bool SegmentedQueue<ElementType, kBlockSize>::push_back(ElementType &&element) {
61   if (!prepareForPush()) {
62     return false;
63   }
64   new (&locateDataAddress(mTail)) ElementType(std::move(element));
65   mSize++;
66 
67   return true;
68 }
69 
70 template <typename ElementType, size_t kBlockSize>
push(const ElementType & element)71 bool SegmentedQueue<ElementType, kBlockSize>::push(const ElementType &element) {
72   return push_back(element);
73 }
74 
75 template <typename ElementType, size_t kBlockSize>
push(ElementType && element)76 bool SegmentedQueue<ElementType, kBlockSize>::push(ElementType &&element) {
77   return push_back(std::move(element));
78 }
79 
80 template <typename ElementType, size_t kBlockSize>
81 template <typename... Args>
emplace_back(Args &&...args)82 bool SegmentedQueue<ElementType, kBlockSize>::emplace_back(Args &&...args) {
83   if (!prepareForPush()) {
84     return false;
85   }
86   new (&locateDataAddress(mTail)) ElementType(std::forward<Args>(args)...);
87   mSize++;
88 
89   return true;
90 }
91 
92 template <typename ElementType, size_t kBlockSize>
93 ElementType &SegmentedQueue<ElementType, kBlockSize>::operator[](size_t index) {
94   CHRE_ASSERT(index < mSize);
95 
96   return locateDataAddress(relativeIndexToAbsolute(index));
97 }
98 
99 template <typename ElementType, size_t kBlockSize>
100 const ElementType &SegmentedQueue<ElementType, kBlockSize>::operator[](
101     size_t index) const {
102   CHRE_ASSERT(index < mSize);
103 
104   return locateDataAddress(relativeIndexToAbsolute(index));
105 }
106 
107 template <typename ElementType, size_t kBlockSize>
back()108 ElementType &SegmentedQueue<ElementType, kBlockSize>::back() {
109   CHRE_ASSERT(!empty());
110 
111   return locateDataAddress(mTail);
112 }
113 
114 template <typename ElementType, size_t kBlockSize>
back()115 const ElementType &SegmentedQueue<ElementType, kBlockSize>::back() const {
116   CHRE_ASSERT(!empty());
117 
118   return locateDataAddress(mTail);
119 }
120 
121 template <typename ElementType, size_t kBlockSize>
front()122 ElementType &SegmentedQueue<ElementType, kBlockSize>::front() {
123   CHRE_ASSERT(!empty());
124 
125   return locateDataAddress(mHead);
126 }
127 
128 template <typename ElementType, size_t kBlockSize>
front()129 const ElementType &SegmentedQueue<ElementType, kBlockSize>::front() const {
130   CHRE_ASSERT(!empty());
131 
132   return locateDataAddress(mHead);
133 }
134 
135 template <typename ElementType, size_t kBlockSize>
pop_front()136 void SegmentedQueue<ElementType, kBlockSize>::pop_front() {
137   CHRE_ASSERT(!empty());
138 
139   doRemove(mHead);
140 
141   if (mSize == 0) {
142     // TODO(b/258860898), Define a more proactive way to deallocate unused
143     // block.
144     resetEmptyQueue();
145   } else {
146     mHead = advanceOrWrapAround(mHead);
147   }
148 }
149 
150 template <typename ElementType, size_t kBlockSize>
pop()151 void SegmentedQueue<ElementType, kBlockSize>::pop() {
152   pop_front();
153 }
154 
155 template <typename ElementType, size_t kBlockSize>
remove(size_t index)156 bool SegmentedQueue<ElementType, kBlockSize>::remove(size_t index) {
157   if (index >= mSize) {
158     return false;
159   }
160   size_t absoluteIndex = relativeIndexToAbsolute(index);
161   doRemove(absoluteIndex);
162   if (mSize == 0) {
163     resetEmptyQueue();
164   } else {
165     // TODO(b/258557394): optimize by adding check to see if pull from head
166     // to tail is more efficient.
167     moveElements(advanceOrWrapAround(absoluteIndex), absoluteIndex,
168                  absoluteIndexToRelative(mTail) - index);
169     mTail = subtractOrWrapAround(mTail, /* steps= */ 1);
170   }
171   return true;
172 }
173 
174 template <typename ElementType, size_t kBlockSize>
searchMatches(MatchingFunction * matchFunc,size_t foundIndicesLen,size_t foundIndices[])175 size_t SegmentedQueue<ElementType, kBlockSize>::searchMatches(
176     MatchingFunction *matchFunc, size_t foundIndicesLen,
177     size_t foundIndices[]) {
178   size_t foundCount = 0;
179   size_t searchIndex = advanceOrWrapAround(mTail);
180   bool firstRound = true;
181 
182   if (size() == 0) {
183     return 0;
184   }
185 
186   // firstRound need to be checked since if the queue is full, the index after
187   // mTail will be mHead, leading to the loop falsely terminate in the first
188   // round.
189   while ((searchIndex != mHead || firstRound) &&
190          foundCount != foundIndicesLen) {
191     searchIndex = subtractOrWrapAround(searchIndex, 1 /* steps */);
192     firstRound = false;
193     if (matchFunc(locateDataAddress(searchIndex))) {
194       foundIndices[foundCount] = searchIndex;
195       ++foundCount;
196     }
197   }
198   return foundCount;
199 }
200 
201 template <typename ElementType, size_t kBlockSize>
fillGaps(size_t gapCount,const size_t gapIndices[])202 void SegmentedQueue<ElementType, kBlockSize>::fillGaps(
203     size_t gapCount, const size_t gapIndices[]) {
204   if (gapCount == 0) {
205     return;
206   }
207 
208   // TODO(b/264326627): Check if gapIndices is reverse order.
209   // TODO(b/264326627): Give a detailed explanation (example)\.
210   // Move the elements between each gap indices section by section from the
211   // section that is closest to the head. The destination index = the gap index
212   // - how many gaps has been filled.
213   for (size_t i = gapCount - 1; i > 0; --i) {
214     moveElements(advanceOrWrapAround(gapIndices[i]),
215                  subtractOrWrapAround(gapIndices[i], gapCount - 1 - i),
216                  absoluteIndexToRelative(gapIndices[i - 1]) -
217                      absoluteIndexToRelative(gapIndices[i]) - 1);
218   }
219 
220   // Since mTail is not guaranteed to be a gap, we need to make a special case
221   // for moving the last section.
222   moveElements(
223       advanceOrWrapAround(gapIndices[0]),
224       subtractOrWrapAround(gapIndices[0], gapCount - 1),
225       absoluteIndexToRelative(mTail) - absoluteIndexToRelative(gapIndices[0]));
226   mTail = subtractOrWrapAround(mTail, gapCount);
227 }
228 
229 template <typename ElementType, size_t kBlockSize>
removeMatchedFromBack(MatchingFunction * matchFunc,size_t maxNumOfElementsRemoved,FreeFunction * freeFunction,void * extraDataForFreeFunction)230 size_t SegmentedQueue<ElementType, kBlockSize>::removeMatchedFromBack(
231     MatchingFunction *matchFunc, size_t maxNumOfElementsRemoved,
232     FreeFunction *freeFunction, void *extraDataForFreeFunction) {
233   size_t removeIndex[maxNumOfElementsRemoved];
234   size_t removedItemCount =
235       searchMatches(matchFunc, maxNumOfElementsRemoved, removeIndex);
236 
237   if (removedItemCount != 0) {
238     for (size_t i = 0; i < removedItemCount; ++i) {
239       if (freeFunction == nullptr) {
240         doRemove(removeIndex[i]);
241       } else {
242         --mSize;
243         freeFunction(locateDataAddress(removeIndex[i]),
244                      extraDataForFreeFunction);
245       }
246     }
247 
248     if (mSize == 0) {
249       resetEmptyQueue();
250     } else {
251       fillGaps(removedItemCount, removeIndex);
252     }
253   }
254 
255   return removedItemCount;
256 }
257 
258 template <typename ElementType, size_t kBlockSize>
pushOneBlock()259 bool SegmentedQueue<ElementType, kBlockSize>::pushOneBlock() {
260   return insertBlock(mRawStoragePtrs.size());
261 }
262 
263 template <typename ElementType, size_t kBlockSize>
insertBlock(size_t blockIndex)264 bool SegmentedQueue<ElementType, kBlockSize>::insertBlock(size_t blockIndex) {
265   // Supporting inserting at any index since we started this data structure as
266   // std::deque and would like to support push_front() in the future. This
267   // function should not be needed once b/258771255 is implemented.
268   CHRE_ASSERT(mRawStoragePtrs.size() != kMaxBlockCount);
269   bool success = false;
270 
271   Block *newBlockPtr = static_cast<Block *>(memoryAlloc(sizeof(Block)));
272   if (newBlockPtr != nullptr) {
273     success = mRawStoragePtrs.insert(blockIndex, UniquePtr(newBlockPtr));
274     if (success) {
275       if (!empty() && mHead >= blockIndex * kBlockSize) {
276         // If we are inserting a block before the current mHead, we need to
277         // increase the offset since we now have a new gap from mHead to the
278         // head of the container.
279         mHead += kBlockSize;
280       }
281 
282       // If mTail is after the new block, we want to move mTail to make sure
283       // that the queue is continuous.
284       if (mTail >= blockIndex * kBlockSize) {
285         moveElements((blockIndex + 1) * kBlockSize, blockIndex * kBlockSize,
286                      (mTail + 1) % kBlockSize);
287       }
288     }
289   }
290   if (!success) {
291     LOG_OOM();
292   }
293   return success;
294 }
295 
296 template <typename ElementType, size_t kBlockSize>
moveElements(size_t srcIndex,size_t destIndex,size_t count)297 void SegmentedQueue<ElementType, kBlockSize>::moveElements(size_t srcIndex,
298                                                            size_t destIndex,
299                                                            size_t count) {
300   // TODO(b/259281024): Make sure SegmentedQueue::moveElement() does not
301   // incorrectly overwrites elements.
302   while (count--) {
303     doMove(srcIndex, destIndex,
304            typename std::is_move_constructible<ElementType>::type());
305     srcIndex = advanceOrWrapAround(srcIndex);
306     destIndex = advanceOrWrapAround(destIndex);
307   }
308 }
309 
310 template <typename ElementType, size_t kBlockSize>
doMove(size_t srcIndex,size_t destIndex,std::true_type)311 void SegmentedQueue<ElementType, kBlockSize>::doMove(size_t srcIndex,
312                                                      size_t destIndex,
313                                                      std::true_type) {
314   new (&locateDataAddress(destIndex))
315       ElementType(std::move(locateDataAddress(srcIndex)));
316 }
317 
318 template <typename ElementType, size_t kBlockSize>
doMove(size_t srcIndex,size_t destIndex,std::false_type)319 void SegmentedQueue<ElementType, kBlockSize>::doMove(size_t srcIndex,
320                                                      size_t destIndex,
321                                                      std::false_type) {
322   new (&locateDataAddress(destIndex)) ElementType(locateDataAddress(srcIndex));
323 }
324 
325 template <typename ElementType, size_t kBlockSize>
relativeIndexToAbsolute(size_t index)326 size_t SegmentedQueue<ElementType, kBlockSize>::relativeIndexToAbsolute(
327     size_t index) {
328   size_t absoluteIndex = mHead + index;
329   if (absoluteIndex >= capacity()) {
330     absoluteIndex -= capacity();
331   }
332   return absoluteIndex;
333 }
334 
335 template <typename ElementType, size_t kBlockSize>
absoluteIndexToRelative(size_t index)336 size_t SegmentedQueue<ElementType, kBlockSize>::absoluteIndexToRelative(
337     size_t index) {
338   if (mHead > index) {
339     index += capacity();
340   }
341   return index - mHead;
342 }
343 
344 template <typename ElementType, size_t kBlockSize>
prepareForPush()345 bool SegmentedQueue<ElementType, kBlockSize>::prepareForPush() {
346   bool success = false;
347   if (full()) {
348     LOG_OOM();
349   } else {
350     if (mSize == capacity()) {
351       // TODO(b/258771255): index-based insert block should go away when we
352       // have a ArrayQueue based container.
353       size_t insertBlockIndex = (mTail + 1) / kBlockSize;
354       success = insertBlock(insertBlockIndex);
355     } else {
356       success = true;
357     }
358     if (success) {
359       mTail = advanceOrWrapAround(mTail);
360     }
361   }
362   return success;
363 }
364 
365 template <typename ElementType, size_t kBlockSize>
clear()366 void SegmentedQueue<ElementType, kBlockSize>::clear() {
367   if (!std::is_trivially_destructible<ElementType>::value) {
368     while (!empty()) {
369       pop_front();
370     }
371   } else {
372     mSize = 0;
373     mHead = 0;
374     mTail = capacity() - 1;
375   }
376 }
377 
378 template <typename ElementType, size_t kBlockSize>
locateDataAddress(size_t index)379 ElementType &SegmentedQueue<ElementType, kBlockSize>::locateDataAddress(
380     size_t index) {
381   return mRawStoragePtrs[index / kBlockSize].get()->data()[index % kBlockSize];
382 }
383 
384 template <typename ElementType, size_t kBlockSize>
advanceOrWrapAround(size_t index)385 size_t SegmentedQueue<ElementType, kBlockSize>::advanceOrWrapAround(
386     size_t index) {
387   return index >= capacity() - 1 ? 0 : index + 1;
388 }
389 
390 template <typename ElementType, size_t kBlockSize>
subtractOrWrapAround(size_t index,size_t steps)391 size_t SegmentedQueue<ElementType, kBlockSize>::subtractOrWrapAround(
392     size_t index, size_t steps) {
393   return index < steps ? capacity() + index - steps : index - steps;
394 }
395 
396 template <typename ElementType, size_t kBlockSize>
doRemove(size_t index)397 void SegmentedQueue<ElementType, kBlockSize>::doRemove(size_t index) {
398   mSize--;
399   locateDataAddress(index).~ElementType();
400 }
401 
402 template <typename ElementType, size_t kBlockSize>
resetEmptyQueue()403 void SegmentedQueue<ElementType, kBlockSize>::resetEmptyQueue() {
404   CHRE_ASSERT(empty());
405 
406   while (mRawStoragePtrs.size() != kStaticBlockCount) {
407     mRawStoragePtrs.pop_back();
408   }
409   mHead = 0;
410   mTail = capacity() - 1;
411 }
412 
413 }  // namespace chre
414 
415 #endif  // CHRE_UTIL_SEGMENTED_QUEUE_IMPL_H