• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #ifndef PANDA_RUNTIME_MEM_REGION_SPACE_INL_H
16 #define PANDA_RUNTIME_MEM_REGION_SPACE_INL_H
17 
18 #include "runtime/mem/region_space.h"
19 #include "libpandabase/utils/asan_interface.h"
20 
21 namespace panda::mem {
22 
23 class RegionAllocCheck {
24 public:
RegionAllocCheck(Region * region)25     explicit RegionAllocCheck(Region *region) : region_(region)
26     {
27         ASSERT(region_->SetAllocating(true));
28     }
~RegionAllocCheck()29     ~RegionAllocCheck()
30     {
31         ASSERT(region_->SetAllocating(false));
32     }
33     NO_COPY_SEMANTIC(RegionAllocCheck);
34     NO_MOVE_SEMANTIC(RegionAllocCheck);
35 
36 private:
37     Region *region_ FIELD_UNUSED;
38 };
39 
40 class RegionIterateCheck {
41 public:
RegionIterateCheck(Region * region)42     explicit RegionIterateCheck(Region *region) : region_(region)
43     {
44         ASSERT(region_->SetIterating(true));
45     }
~RegionIterateCheck()46     ~RegionIterateCheck()
47     {
48         ASSERT(region_->SetIterating(false));
49     }
50     NO_COPY_SEMANTIC(RegionIterateCheck);
51     NO_MOVE_SEMANTIC(RegionIterateCheck);
52 
53 private:
54     Region *region_ FIELD_UNUSED;
55 };
56 
57 template <bool ATOMIC>
Alloc(size_t alignedSize)58 void *Region::Alloc(size_t alignedSize)
59 {
60     RegionAllocCheck alloc(this);
61     ASSERT(AlignUp(alignedSize, DEFAULT_ALIGNMENT_IN_BYTES) == alignedSize);
62     ASSERT(!IsTLAB());
63     uintptr_t oldTop;
64     uintptr_t newTop;
65     if (ATOMIC) {
66         auto atomicTop = reinterpret_cast<std::atomic<uintptr_t> *>(&top_);
67         // Atomic with relaxed order reason: data race with top_ with no synchronization or ordering constraints imposed
68         // on other reads or writes
69         oldTop = atomicTop->load(std::memory_order_relaxed);
70         do {
71             newTop = oldTop + alignedSize;
72             if (UNLIKELY(newTop > end_)) {
73                 return nullptr;
74             }
75         } while (!atomicTop->compare_exchange_weak(oldTop, newTop, std::memory_order_relaxed));
76         ASAN_UNPOISON_MEMORY_REGION(ToVoidPtr(oldTop), alignedSize);
77         return ToVoidPtr(oldTop);
78     }
79     newTop = top_ + alignedSize;
80     if (UNLIKELY(newTop > end_)) {
81         return nullptr;
82     }
83     oldTop = top_;
84     top_ = newTop;
85 
86     ASAN_UNPOISON_MEMORY_REGION(ToVoidPtr(oldTop), alignedSize);
87     return ToVoidPtr(oldTop);
88 }
89 
90 template <typename ObjectVisitor>
IterateOverObjects(const ObjectVisitor & visitor)91 void Region::IterateOverObjects(const ObjectVisitor &visitor)
92 {
93     // This method doesn't work for nonmovable regions
94     ASSERT(!HasFlag(RegionFlag::IS_NONMOVABLE));
95     // currently just for gc stw phase, so check it is not in allocating state
96     RegionIterateCheck iterate(this);
97     if (!IsTLAB()) {
98         auto curPtr = Begin();
99         auto endPtr = Top();
100         while (curPtr < endPtr) {
101             auto objectHeader = reinterpret_cast<ObjectHeader *>(curPtr);
102             size_t objectSize = GetObjectSize(objectHeader);
103             visitor(objectHeader);
104             curPtr = AlignUp(curPtr + objectSize, DEFAULT_ALIGNMENT_IN_BYTES);
105         }
106     } else {
107         for (auto i : *tlabVector_) {
108             i->IterateOverObjects(visitor);
109         }
110     }
111 }
112 
113 template <OSPagesPolicy OS_PAGES_POLICY>
FreeRegion(Region * region)114 void RegionPool::FreeRegion(Region *region)
115 {
116     bool releasePages = OS_PAGES_POLICY == OSPagesPolicy::IMMEDIATE_RETURN;
117     if (block_.IsAddrInRange(region)) {
118         region->IsYoung() ? spaces_->ReduceYoungOccupiedInSharedPool(region->Size())
119                           : spaces_->ReduceTenuredOccupiedInSharedPool(region->Size());
120         block_.FreeRegion(region, releasePages);
121     } else {
122         region->IsYoung() ? spaces_->FreeYoungPool(region, region->Size(), releasePages)
123                           : spaces_->FreeTenuredPool(region, region->Size(), releasePages);
124     }
125 }
126 
127 template <RegionSpace::ReleaseRegionsPolicy REGIONS_RELEASE_POLICY, OSPagesPolicy OS_PAGES_POLICY>
FreeRegion(Region * region)128 void RegionSpace::FreeRegion(Region *region)
129 {
130     ASSERT(region->GetSpace() == this);
131     ASAN_POISON_MEMORY_REGION(ToVoidPtr(region->Begin()), region->End() - region->Begin());
132     regions_.erase(region->AsListNode());
133     if (region->IsYoung()) {
134         // Atomic with relaxed order reason: data race with no synchronization or ordering constraints imposed
135         // on other reads or writes
136         [[maybe_unused]] auto previousRegionsInUse = youngRegionsInUse_.fetch_sub(1, std::memory_order_relaxed);
137         ASSERT(previousRegionsInUse > 0);
138     }
139     region->Destroy();
140     // NOLINTNEXTLINE(readability-braces-around-statements)
141     if constexpr (REGIONS_RELEASE_POLICY == ReleaseRegionsPolicy::NoRelease) {
142         if (region->IsYoung()) {
143             // unlimited
144             emptyYoungRegions_.push_back(region->AsListNode());
145             return;
146         }
147         if (region->HasFlag(RegionFlag::IS_OLD) && (emptyTenuredRegions_.size() < emptyTenuredRegionsMaxCount_)) {
148             emptyTenuredRegions_.push_back(region->AsListNode());
149             return;
150         }
151     }
152     regionPool_->FreeRegion<OS_PAGES_POLICY>(region);
153 }
154 
155 template <RegionFlag REGION_TYPE, OSPagesPolicy OS_PAGES_POLICY>
ReleaseEmptyRegions()156 void RegionSpace::ReleaseEmptyRegions()
157 {
158     auto visitor = [this](Region *region) { regionPool_->FreeRegion<OS_PAGES_POLICY>(region); };
159     if (IsYoungRegionFlag(REGION_TYPE)) {
160         IterateRegionsList(emptyYoungRegions_, visitor);
161         emptyYoungRegions_.clear();
162     } else {
163         IterateRegionsList(emptyTenuredRegions_, visitor);
164         emptyTenuredRegions_.clear();
165     }
166 }
167 
168 template <typename RegionVisitor>
IterateRegions(RegionVisitor visitor)169 void RegionSpace::IterateRegions(RegionVisitor visitor)
170 {
171     IterateRegionsList(regions_, visitor);
172 }
173 
174 template <typename RegionVisitor>
IterateRegionsList(DList & regionsList,RegionVisitor visitor)175 void RegionSpace::IterateRegionsList(DList &regionsList, RegionVisitor visitor)
176 {
177     auto it = regionsList.begin();
178     while (it != regionsList.end()) {
179         auto *region = Region::AsRegion(&(*it));
180         ++it;  // increase before visitor which may remove region
181         visitor(region);
182     }
183 }
184 
185 template <bool CROSS_REGION>
ContainObject(const ObjectHeader * object)186 bool RegionSpace::ContainObject(const ObjectHeader *object) const
187 {
188     return GetRegion<CROSS_REGION>(object) != nullptr;
189 }
190 
191 template <bool CROSS_REGION>
IsLive(const ObjectHeader * object)192 bool RegionSpace::IsLive(const ObjectHeader *object) const
193 {
194     auto *region = GetRegion<CROSS_REGION>(object);
195 
196     // check if the object is live in the range
197     return region != nullptr && region->IsInAllocRange(object);
198 }
199 
200 }  // namespace panda::mem
201 
202 #endif  // PANDA_RUNTIME_MEM_REGION_SPACE_INL_H
203