1 /**
2 * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #ifndef RUNTIME_MEM_GC_CARD_TABLE_INL_H
17 #define RUNTIME_MEM_GC_CARD_TABLE_INL_H
18
19 #include "runtime/mem/gc/card_table.h"
20 #include "runtime/include/mem/panda_containers.h"
21
22 #include <atomic>
23
24 namespace panda::mem {
25
FillRanges(PandaVector<MemRange> * ranges,const Card * start_card,const Card * end_card)26 inline void CardTable::FillRanges(PandaVector<MemRange> *ranges, const Card *start_card, const Card *end_card)
27 {
28 constexpr size_t MIN_RANGE = 32;
29 constexpr size_t MAX_CARDS_COUNT = 1000; // How many cards we can process at once
30 static std::array<char, MAX_CARDS_COUNT> zero_array {};
31
32 if (static_cast<size_t>(end_card - start_card) < MIN_RANGE) {
33 for (auto card_ptr = start_card; card_ptr <= end_card; card_ptr++) {
34 if (card_ptr->IsMarked()) {
35 ranges->emplace_back(min_address_ + (card_ptr - cards_) * CARD_SIZE,
36 min_address_ + (card_ptr - cards_ + 1) * CARD_SIZE - 1);
37 }
38 }
39 } else {
40 size_t diff = end_card - start_card + 1;
41 size_t split_size = std::min(diff / 2, MAX_CARDS_COUNT); // divide 2 to get smaller split_size
42 if (memcmp(start_card, &zero_array, split_size) != 0) {
43 FillRanges(ranges, start_card, ToNativePtr<Card>(ToUintPtr(start_card) + split_size - 1));
44 }
45 // NOLINTNEXTLINE(bugprone-branch-clone)
46 if (diff - split_size > MAX_CARDS_COUNT) {
47 FillRanges(ranges, ToNativePtr<Card>(ToUintPtr(start_card) + split_size), end_card);
48 } else if (memcmp(ToNativePtr<Card>(ToUintPtr(start_card) + split_size), &zero_array, diff - split_size) != 0) {
49 FillRanges(ranges, ToNativePtr<Card>(ToUintPtr(start_card) + split_size), end_card);
50 }
51 }
52 }
53
54 // Make sure we can treat size_t as lockfree atomic
55 static_assert(std::atomic_size_t::is_always_lock_free);
56 static_assert(sizeof(std::atomic_size_t) == sizeof(size_t));
57
58 template <typename CardVisitor>
VisitMarked(CardVisitor card_visitor,uint32_t processed_flag)59 void CardTable::VisitMarked(CardVisitor card_visitor, uint32_t processed_flag)
60 {
61 bool visit_marked = processed_flag & CardTableProcessedFlag::VISIT_MARKED;
62 bool visit_processed = processed_flag & CardTableProcessedFlag::VISIT_PROCESSED;
63 bool set_processed = processed_flag & CardTableProcessedFlag::SET_PROCESSED;
64 static_assert(sizeof(std::atomic_size_t) % sizeof(Card) == 0);
65 constexpr size_t chunk_card_num = sizeof(std::atomic_size_t) / sizeof(Card);
66 auto *card = cards_;
67 auto *card_end = cards_ + (cards_count_ / chunk_card_num) * chunk_card_num;
68 while (card < card_end) {
69 // NB! In general wide load/short store on overlapping memory of different address are allowed to be reordered
70 // This optimization currently is allowed since additional VisitMarked is called after concurrent mark with
71 // global Mutator lock held, so all previous managed thread's writes should be visible by GC thread
72 // Atomic with relaxed order reason: data race with card with no synchronization or ordering constraints imposed
73 // on other reads or writes
74 if (LIKELY((reinterpret_cast<std::atomic_size_t *>(card))->load(std::memory_order_relaxed) == 0)) {
75 // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
76 card += chunk_card_num;
77 continue;
78 }
79 // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
80 auto *chunk_end = card + chunk_card_num;
81 while (card < chunk_end) {
82 if (!(visit_marked && card->IsMarked()) && !(visit_processed && card->IsProcessed())) {
83 // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
84 ++card;
85 continue;
86 }
87
88 if (set_processed) {
89 card->SetProcessed();
90 }
91 card_visitor(GetMemoryRange(card));
92 // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
93 ++card;
94 }
95 }
96 // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
97 for (; card < cards_ + cards_count_; ++card) {
98 if ((visit_marked && card->IsMarked()) || (visit_processed && card->IsProcessed())) {
99 if (set_processed) {
100 card->SetProcessed();
101 }
102 card_visitor(GetMemoryRange(card));
103 }
104 }
105 }
106
107 template <typename CardVisitor>
VisitMarkedCompact(CardVisitor card_visitor)108 void CardTable::VisitMarkedCompact(CardVisitor card_visitor)
109 {
110 constexpr size_t MAX_CARDS_COUNT = 1000;
111 size_t cur_pos = 0;
112 size_t end_pos = 0;
113 PandaVector<MemRange> mem_ranges;
114
115 ASSERT(cards_count_ > 0);
116 auto max_pool_address = PoolManager::GetMmapMemPool()->GetMaxObjectAddress();
117 while (cur_pos < cards_count_) {
118 end_pos = std::min(cur_pos + MAX_CARDS_COUNT - 1, cards_count_ - 1);
119 FillRanges(&mem_ranges, &cards_[cur_pos], &cards_[end_pos]);
120 cur_pos = end_pos + 1;
121 if (GetCardStartAddress(&cards_[cur_pos]) > max_pool_address) {
122 break;
123 }
124 }
125 for (const auto &mem_range : mem_ranges) {
126 card_visitor(mem_range);
127 }
128 }
129
130 } // namespace panda::mem
131
132 #endif // RUNTIME_MEM_GC_CARD_TABLE_INL_H
133