1 /*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #ifndef ART_RUNTIME_GC_ACCOUNTING_SPACE_BITMAP_INL_H_
18 #define ART_RUNTIME_GC_ACCOUNTING_SPACE_BITMAP_INL_H_
19
20 #include "space_bitmap.h"
21
22 #include <memory>
23
24 #include <android-base/logging.h>
25
26 #include "base/atomic.h"
27 #include "base/bit_utils.h"
28
29 namespace art HIDDEN {
30 namespace gc {
31 namespace accounting {
32
33 template<size_t kAlignment>
AtomicTestAndSet(const mirror::Object * obj)34 inline bool SpaceBitmap<kAlignment>::AtomicTestAndSet(const mirror::Object* obj) {
35 DCHECK(obj != nullptr);
36 uintptr_t addr = reinterpret_cast<uintptr_t>(obj);
37 DCHECK_GE(addr, heap_begin_);
38 const uintptr_t offset = addr - heap_begin_;
39 const size_t index = OffsetToIndex(offset);
40 const uintptr_t mask = OffsetToMask(offset);
41 Atomic<uintptr_t>* atomic_entry = &bitmap_begin_[index];
42 DCHECK_LT(index, bitmap_size_ / sizeof(intptr_t)) << " bitmap_size_ = " << bitmap_size_;
43 uintptr_t old_word;
44 do {
45 old_word = atomic_entry->load(std::memory_order_relaxed);
46 // Fast path: The bit is already set.
47 if ((old_word & mask) != 0) {
48 DCHECK(Test(obj));
49 return true;
50 }
51 } while (!atomic_entry->CompareAndSetWeakRelaxed(old_word, old_word | mask));
52 DCHECK(Test(obj));
53 return false;
54 }
55
56 template<size_t kAlignment>
Test(const mirror::Object * obj)57 inline bool SpaceBitmap<kAlignment>::Test(const mirror::Object* obj) const {
58 uintptr_t addr = reinterpret_cast<uintptr_t>(obj);
59 DCHECK(HasAddress(obj)) << obj;
60 DCHECK(bitmap_begin_ != nullptr);
61 DCHECK_GE(addr, heap_begin_);
62 const uintptr_t offset = addr - heap_begin_;
63 size_t index = OffsetToIndex(offset);
64 return (bitmap_begin_[index].load(std::memory_order_relaxed) & OffsetToMask(offset)) != 0;
65 }
66
67 template<size_t kAlignment>
FindPrecedingObject(uintptr_t visit_begin,uintptr_t visit_end)68 inline mirror::Object* SpaceBitmap<kAlignment>::FindPrecedingObject(uintptr_t visit_begin,
69 uintptr_t visit_end) const {
70 // Covers [visit_end, visit_begin].
71 visit_end = std::max(heap_begin_, visit_end);
72 DCHECK_LE(visit_end, visit_begin);
73 DCHECK_LT(visit_begin, HeapLimit());
74
75 const uintptr_t offset_start = visit_begin - heap_begin_;
76 const uintptr_t offset_end = visit_end - heap_begin_;
77 uintptr_t index_start = OffsetToIndex(offset_start);
78 const uintptr_t index_end = OffsetToIndex(offset_end);
79
80 // Start with the right edge
81 uintptr_t word = bitmap_begin_[index_start].load(std::memory_order_relaxed);
82 // visit_begin could be the first word of the object we are looking for.
83 const uintptr_t right_edge_mask = OffsetToMask(offset_start);
84 word &= right_edge_mask | (right_edge_mask - 1);
85 while (index_start > index_end) {
86 if (word != 0) {
87 const uintptr_t ptr_base = IndexToOffset(index_start) + heap_begin_;
88 size_t pos_leading_set_bit = kBitsPerIntPtrT - CLZ(word) - 1;
89 return reinterpret_cast<mirror::Object*>(ptr_base + pos_leading_set_bit * kAlignment);
90 }
91 word = bitmap_begin_[--index_start].load(std::memory_order_relaxed);
92 }
93
94 word &= ~(OffsetToMask(offset_end) - 1);
95 if (word != 0) {
96 const uintptr_t ptr_base = IndexToOffset(index_end) + heap_begin_;
97 size_t pos_leading_set_bit = kBitsPerIntPtrT - CLZ(word) - 1;
98 return reinterpret_cast<mirror::Object*>(ptr_base + pos_leading_set_bit * kAlignment);
99 } else {
100 return nullptr;
101 }
102 }
103
104 template<size_t kAlignment>
105 template<bool kVisitOnce, typename Visitor>
VisitMarkedRange(uintptr_t visit_begin,uintptr_t visit_end,Visitor && visitor)106 inline void SpaceBitmap<kAlignment>::VisitMarkedRange(uintptr_t visit_begin,
107 uintptr_t visit_end,
108 Visitor&& visitor) const {
109 DCHECK_LE(visit_begin, visit_end);
110 #if 0
111 for (uintptr_t i = visit_begin; i < visit_end; i += kAlignment) {
112 mirror::Object* obj = reinterpret_cast<mirror::Object*>(i);
113 if (Test(obj)) {
114 visitor(obj);
115 }
116 }
117 #else
118 DCHECK_LE(heap_begin_, visit_begin);
119 DCHECK_LT(visit_begin, HeapLimit());
120 DCHECK_LE(visit_end, HeapLimit());
121
122 const uintptr_t offset_start = visit_begin - heap_begin_;
123 const uintptr_t offset_end = visit_end - heap_begin_;
124
125 const uintptr_t index_start = OffsetToIndex(offset_start);
126 const uintptr_t index_end = OffsetToIndex(offset_end);
127
128 const size_t bit_start = (offset_start / kAlignment) % kBitsPerIntPtrT;
129 const size_t bit_end = (offset_end / kAlignment) % kBitsPerIntPtrT;
130
131 // Index(begin) ... Index(end)
132 // [xxxxx???][........][????yyyy]
133 // ^ ^
134 // | #---- Bit of visit_end
135 // #---- Bit of visit_begin
136 //
137
138 // Left edge.
139 uintptr_t left_edge = bitmap_begin_[index_start];
140 // Mark of lower bits that are not in range.
141 left_edge &= ~((static_cast<uintptr_t>(1) << bit_start) - 1);
142
143 // Right edge. Either unique, or left_edge.
144 uintptr_t right_edge;
145
146 if (index_start < index_end) {
147 // Left edge != right edge.
148
149 // Traverse left edge.
150 if (left_edge != 0) {
151 const uintptr_t ptr_base = IndexToOffset(index_start) + heap_begin_;
152 do {
153 const size_t shift = CTZ(left_edge);
154 mirror::Object* obj = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
155 visitor(obj);
156 if (kVisitOnce) {
157 return;
158 }
159 left_edge ^= (static_cast<uintptr_t>(1)) << shift;
160 } while (left_edge != 0);
161 }
162
163 // Traverse the middle, full part.
164 for (size_t i = index_start + 1; i < index_end; ++i) {
165 uintptr_t w = bitmap_begin_[i].load(std::memory_order_relaxed);
166 if (w != 0) {
167 const uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
168 // Iterate on the bits set in word `w`, from the least to the most significant bit.
169 do {
170 const size_t shift = CTZ(w);
171 mirror::Object* obj = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
172 visitor(obj);
173 if (kVisitOnce) {
174 return;
175 }
176 w ^= (static_cast<uintptr_t>(1)) << shift;
177 } while (w != 0);
178 }
179 }
180
181 // Right edge is unique.
182 // But maybe we don't have anything to do: visit_end starts in a new word...
183 if (bit_end == 0) {
184 // Do not read memory, as it could be after the end of the bitmap.
185 right_edge = 0;
186 } else {
187 right_edge = bitmap_begin_[index_end];
188 }
189 } else {
190 // Right edge = left edge.
191 right_edge = left_edge;
192 }
193
194 // Right edge handling.
195 right_edge &= ((static_cast<uintptr_t>(1) << bit_end) - 1);
196 if (right_edge != 0) {
197 const uintptr_t ptr_base = IndexToOffset(index_end) + heap_begin_;
198 // Iterate on the bits set in word `right_edge`, from the least to the most significant bit.
199 do {
200 const size_t shift = CTZ(right_edge);
201 mirror::Object* obj = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
202 visitor(obj);
203 if (kVisitOnce) {
204 return;
205 }
206 right_edge ^= (static_cast<uintptr_t>(1)) << shift;
207 } while (right_edge != 0);
208 }
209 #endif
210 }
211
212 template<size_t kAlignment>
213 template<typename Visitor>
Walk(Visitor && visitor)214 void SpaceBitmap<kAlignment>::Walk(Visitor&& visitor) {
215 CHECK(bitmap_begin_ != nullptr);
216
217 uintptr_t end = OffsetToIndex(HeapLimit() - heap_begin_ - 1);
218 Atomic<uintptr_t>* bitmap_begin = bitmap_begin_;
219 for (uintptr_t i = 0; i <= end; ++i) {
220 uintptr_t w = bitmap_begin[i].load(std::memory_order_relaxed);
221 if (w != 0) {
222 uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
223 do {
224 const size_t shift = CTZ(w);
225 mirror::Object* obj = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
226 visitor(obj);
227 w ^= (static_cast<uintptr_t>(1)) << shift;
228 } while (w != 0);
229 }
230 }
231 }
232
233 template<size_t kAlignment>
234 template<bool kSetBit>
Modify(const mirror::Object * obj)235 inline bool SpaceBitmap<kAlignment>::Modify(const mirror::Object* obj) {
236 DCHECK(obj != nullptr);
237 uintptr_t addr = reinterpret_cast<uintptr_t>(obj);
238 DCHECK_GE(addr, heap_begin_);
239 DCHECK(HasAddress(obj)) << obj;
240 const uintptr_t offset = addr - heap_begin_;
241 const size_t index = OffsetToIndex(offset);
242 const uintptr_t mask = OffsetToMask(offset);
243 DCHECK_LT(index, bitmap_size_ / sizeof(intptr_t)) << " bitmap_size_ = " << bitmap_size_;
244 Atomic<uintptr_t>* atomic_entry = &bitmap_begin_[index];
245 uintptr_t old_word = atomic_entry->load(std::memory_order_relaxed);
246 if (kSetBit) {
247 // Check the bit before setting the word incase we are trying to mark a read only bitmap
248 // like an image space bitmap. This bitmap is mapped as read only and will fault if we
249 // attempt to change any words. Since all of the objects are marked, this will never
250 // occur if we check before setting the bit. This also prevents dirty pages that would
251 // occur if the bitmap was read write and we did not check the bit.
252 if ((old_word & mask) == 0) {
253 atomic_entry->store(old_word | mask, std::memory_order_relaxed);
254 }
255 } else {
256 atomic_entry->store(old_word & ~mask, std::memory_order_relaxed);
257 }
258 DCHECK_EQ(Test(obj), kSetBit);
259 return (old_word & mask) != 0;
260 }
261
262 template<size_t kAlignment>
263 inline std::ostream& operator << (std::ostream& stream, const SpaceBitmap<kAlignment>& bitmap) {
264 return stream
265 << bitmap.GetName() << "["
266 << "begin=" << reinterpret_cast<const void*>(bitmap.HeapBegin())
267 << ",end=" << reinterpret_cast<const void*>(bitmap.HeapLimit())
268 << "]";
269 }
270
271 } // namespace accounting
272 } // namespace gc
273 } // namespace art
274
275 #endif // ART_RUNTIME_GC_ACCOUNTING_SPACE_BITMAP_INL_H_
276