1 /** 2 * Copyright (c) 2023-2025 Huawei Device Co., Ltd. 3 * Licensed under the Apache License, Version 2.0 (the "License"); 4 * you may not use this file except in compliance with the License. 5 * You may obtain a copy of the License at 6 * 7 * http://www.apache.org/licenses/LICENSE-2.0 8 * 9 * Unless required by applicable law or agreed to in writing, software 10 * distributed under the License is distributed on an "AS IS" BASIS, 11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 * See the License for the specific language governing permissions and 13 * limitations under the License. 14 */ 15 16 #ifndef PANDA_PLUGINS_ETS_RUNTIME_INTEROP_JS_ITEM_POOL_H_ 17 #define PANDA_PLUGINS_ETS_RUNTIME_INTEROP_JS_ITEM_POOL_H_ 18 19 #include <atomic> 20 21 #include "libpandabase/macros.h" 22 #include "libpandabase/mem/mem.h" 23 #include "libpandabase/utils/math_helpers.h" 24 25 namespace ark::ets::interop::js::ets_proxy { 26 27 namespace testing { 28 class ItemsPoolTest; 29 } // namespace testing 30 31 template <typename Item, uint32_t NR_INDEX_BITS> 32 class ItemsPool { 33 union PaddedItem { 34 Item item; 35 PaddedItem *next; 36 std::atomic<size_t> size; // For the 0 index 37 std::array<uint8_t, helpers::math::GetPowerOfTwoValue32(sizeof(Item))> aligned; 38 PaddedItem()39 PaddedItem() // NOLINT(cppcoreguidelines-pro-type-member-init) 40 { 41 new (&item) Item(); 42 } ~PaddedItem()43 ~PaddedItem() 44 { 45 item.~Item(); 46 } 47 NO_COPY_SEMANTIC(PaddedItem); 48 NO_MOVE_SEMANTIC(PaddedItem); 49 }; 50 51 static constexpr size_t MAX_INDEX = 1ULL << NR_INDEX_BITS; 52 static constexpr size_t PADDED_ITEM_SIZE = sizeof(PaddedItem); 53 GetPaddedItem(Item * item)54 static PaddedItem *GetPaddedItem(Item *item) 55 { 56 ASSERT(ToUintPtr(item) % PADDED_ITEM_SIZE == 0); 57 return reinterpret_cast<PaddedItem *>(item); 58 } 59 60 public: 61 static constexpr size_t MAX_POOL_SIZE = (static_cast<size_t>(1U) << NR_INDEX_BITS) * PADDED_ITEM_SIZE; 62 ItemsPool(void * data,size_t size)63 ItemsPool(void *data, size_t size) 64 : data_(reinterpret_cast<PaddedItem *>(data)), 65 dataEnd_(reinterpret_cast<PaddedItem *>(ToUintPtr(data_) + size)), 66 currentPos_(reinterpret_cast<PaddedItem *>(data)) 67 { 68 ASSERT(data != nullptr); 69 ASSERT(size > 0U); 70 ASSERT(size % PADDED_ITEM_SIZE == 0); 71 ASSERT(MaxSize() <= MAX_POOL_SIZE); 72 // Always use 0 index as special internal space for count of allocated references 73 ++currentPos_; // NOLINT(cppcoreguidelines-pro-bounds-pointer-arithmetic) 74 ASSERT(Capacity() == 1); 75 // Atomic with relaxed order reason: data race with size loading with no synchronization or ordering constraints 76 data_->size.store(0U, std::memory_order_relaxed); 77 } 78 ~ItemsPool() = default; 79 AllocItem()80 Item *AllocItem() 81 { 82 if (freeList_ != nullptr) { 83 PaddedItem *newItem = freeList_; 84 freeList_ = freeList_->next; 85 // Atomic with relaxed order reason: data race with size loading with no synchronization or ordering 86 // constraints 87 data_->size.fetch_add(1U, std::memory_order_relaxed); 88 return &(new (newItem) PaddedItem())->item; 89 } 90 91 if (UNLIKELY(currentPos_ >= dataEnd_)) { 92 // Out of memory 93 return nullptr; 94 } 95 96 PaddedItem *newItem = currentPos_; 97 ++currentPos_; // NOLINT(cppcoreguidelines-pro-bounds-pointer-arithmetic) 98 // Atomic with relaxed order reason: data race with size loading with no synchronization or ordering constraints 99 data_->size.fetch_add(1U, std::memory_order_relaxed); 100 return &(new (newItem) PaddedItem())->item; 101 } 102 FreeItem(Item * item)103 void FreeItem(Item *item) 104 { 105 ASSERT(item != nullptr); 106 PaddedItem *paddedItem = GetPaddedItem(item); 107 ASSERT_PRINT(paddedItem != data_, "0 index is reserved by the item pool, but try to free it"); 108 paddedItem->~PaddedItem(); 109 paddedItem->next = freeList_; 110 freeList_ = paddedItem; 111 // Atomic with relaxed order reason: data race with size loading with no synchronization or ordering constraints 112 data_->size.fetch_sub(1U, std::memory_order_relaxed); 113 } 114 115 // This method only checks the validity of the item in the allocated interval 116 // This method does not check whether the item has been allocated or not IsValidItem(const Item * item)117 bool IsValidItem(const Item *item) const 118 { 119 if (UNLIKELY(!IsAligned<alignof(Item)>(ToUintPtr(item)))) { 120 return false; 121 } 122 auto addr = ToUintPtr(item); 123 return ToUintPtr(data_) < addr && addr < ToUintPtr(currentPos_); 124 } 125 GetIndexByItem(Item * item)126 inline uint32_t GetIndexByItem(Item *item) 127 { 128 ASSERT(IsValidItem(item)); 129 ASSERT(ToUintPtr(item) % PADDED_ITEM_SIZE == 0); 130 131 PaddedItem *paddedItem = GetPaddedItem(item); 132 return paddedItem - data_; 133 } 134 Size()135 ALWAYS_INLINE size_t Size() const 136 { 137 // Atomic with relaxed order reason: data race with size with no synchronization or ordering constraints 138 return data_->size.load(std::memory_order_relaxed); 139 } 140 Capacity()141 ALWAYS_INLINE size_t Capacity() const 142 { 143 return currentPos_ - data_; 144 } 145 MaxSize()146 ALWAYS_INLINE size_t MaxSize() const 147 { 148 // 0 index is reserved by the item pool 149 return (dataEnd_ - data_) - 1U; 150 } 151 GetItemByIndex(uint32_t idx)152 ALWAYS_INLINE Item *GetItemByIndex(uint32_t idx) const 153 { 154 ASSERT(idx > 0U); 155 ASSERT_PRINT(idx < Capacity(), "index: " << idx << ", capacity: " << Capacity()); 156 // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic,cppcoreguidelines-pro-type-union-access) 157 return &data_[idx].item; 158 } 159 160 NO_COPY_SEMANTIC(ItemsPool); 161 NO_MOVE_SEMANTIC(ItemsPool); 162 163 private: 164 PaddedItem *const data_ {}; 165 PaddedItem *const dataEnd_ {}; 166 PaddedItem *currentPos_ {}; 167 PaddedItem *freeList_ {}; 168 169 friend testing::ItemsPoolTest; 170 }; 171 172 } // namespace ark::ets::interop::js::ets_proxy 173 174 #endif // !PANDA_PLUGINS_ETS_RUNTIME_INTEROP_JS_ITEM_POOL_H_ 175