1 /*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #ifndef ART_RUNTIME_GC_SPACE_REGION_SPACE_INL_H_
18 #define ART_RUNTIME_GC_SPACE_REGION_SPACE_INL_H_
19
20 #include "region_space.h"
21
22 #include "base/mutex-inl.h"
23 #include "mirror/object-inl.h"
24 #include "region_space.h"
25 #include "thread-current-inl.h"
26
27 namespace art {
28 namespace gc {
29 namespace space {
30
Alloc(Thread * self ATTRIBUTE_UNUSED,size_t num_bytes,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated)31 inline mirror::Object* RegionSpace::Alloc(Thread* self ATTRIBUTE_UNUSED,
32 size_t num_bytes,
33 /* out */ size_t* bytes_allocated,
34 /* out */ size_t* usable_size,
35 /* out */ size_t* bytes_tl_bulk_allocated) {
36 num_bytes = RoundUp(num_bytes, kAlignment);
37 return AllocNonvirtual<false>(num_bytes, bytes_allocated, usable_size,
38 bytes_tl_bulk_allocated);
39 }
40
AllocThreadUnsafe(Thread * self,size_t num_bytes,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated)41 inline mirror::Object* RegionSpace::AllocThreadUnsafe(Thread* self,
42 size_t num_bytes,
43 /* out */ size_t* bytes_allocated,
44 /* out */ size_t* usable_size,
45 /* out */ size_t* bytes_tl_bulk_allocated) {
46 Locks::mutator_lock_->AssertExclusiveHeld(self);
47 return Alloc(self, num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
48 }
49
50 template<bool kForEvac>
AllocNonvirtual(size_t num_bytes,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated)51 inline mirror::Object* RegionSpace::AllocNonvirtual(size_t num_bytes,
52 /* out */ size_t* bytes_allocated,
53 /* out */ size_t* usable_size,
54 /* out */ size_t* bytes_tl_bulk_allocated) {
55 DCHECK_ALIGNED(num_bytes, kAlignment);
56 mirror::Object* obj;
57 if (LIKELY(num_bytes <= kRegionSize)) {
58 // Non-large object.
59 obj = (kForEvac ? evac_region_ : current_region_)->Alloc(num_bytes,
60 bytes_allocated,
61 usable_size,
62 bytes_tl_bulk_allocated);
63 if (LIKELY(obj != nullptr)) {
64 return obj;
65 }
66 MutexLock mu(Thread::Current(), region_lock_);
67 // Retry with current region since another thread may have updated
68 // current_region_ or evac_region_. TODO: fix race.
69 obj = (kForEvac ? evac_region_ : current_region_)->Alloc(num_bytes,
70 bytes_allocated,
71 usable_size,
72 bytes_tl_bulk_allocated);
73 if (LIKELY(obj != nullptr)) {
74 return obj;
75 }
76 Region* r = AllocateRegion(kForEvac);
77 if (LIKELY(r != nullptr)) {
78 obj = r->Alloc(num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
79 CHECK(obj != nullptr);
80 // Do our allocation before setting the region, this makes sure no threads race ahead
81 // and fill in the region before we allocate the object. b/63153464
82 if (kForEvac) {
83 evac_region_ = r;
84 } else {
85 current_region_ = r;
86 }
87 return obj;
88 }
89 } else {
90 // Large object.
91 obj = AllocLarge<kForEvac>(num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
92 if (LIKELY(obj != nullptr)) {
93 return obj;
94 }
95 }
96 return nullptr;
97 }
98
Alloc(size_t num_bytes,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated)99 inline mirror::Object* RegionSpace::Region::Alloc(size_t num_bytes,
100 /* out */ size_t* bytes_allocated,
101 /* out */ size_t* usable_size,
102 /* out */ size_t* bytes_tl_bulk_allocated) {
103 DCHECK(IsAllocated() && IsInToSpace());
104 DCHECK_ALIGNED(num_bytes, kAlignment);
105 uint8_t* old_top;
106 uint8_t* new_top;
107 do {
108 old_top = top_.load(std::memory_order_relaxed);
109 new_top = old_top + num_bytes;
110 if (UNLIKELY(new_top > end_)) {
111 return nullptr;
112 }
113 } while (!top_.CompareAndSetWeakRelaxed(old_top, new_top));
114 objects_allocated_.fetch_add(1, std::memory_order_relaxed);
115 DCHECK_LE(Top(), end_);
116 DCHECK_LT(old_top, end_);
117 DCHECK_LE(new_top, end_);
118 *bytes_allocated = num_bytes;
119 if (usable_size != nullptr) {
120 *usable_size = num_bytes;
121 }
122 *bytes_tl_bulk_allocated = num_bytes;
123 return reinterpret_cast<mirror::Object*>(old_top);
124 }
125
126 template<RegionSpace::RegionType kRegionType>
GetBytesAllocatedInternal()127 inline uint64_t RegionSpace::GetBytesAllocatedInternal() {
128 uint64_t bytes = 0;
129 MutexLock mu(Thread::Current(), region_lock_);
130 for (size_t i = 0; i < num_regions_; ++i) {
131 Region* r = ®ions_[i];
132 if (r->IsFree()) {
133 continue;
134 }
135 switch (kRegionType) {
136 case RegionType::kRegionTypeAll:
137 bytes += r->BytesAllocated();
138 break;
139 case RegionType::kRegionTypeFromSpace:
140 if (r->IsInFromSpace()) {
141 bytes += r->BytesAllocated();
142 }
143 break;
144 case RegionType::kRegionTypeUnevacFromSpace:
145 if (r->IsInUnevacFromSpace()) {
146 bytes += r->BytesAllocated();
147 }
148 break;
149 case RegionType::kRegionTypeToSpace:
150 if (r->IsInToSpace()) {
151 bytes += r->BytesAllocated();
152 }
153 break;
154 default:
155 LOG(FATAL) << "Unexpected space type : " << kRegionType;
156 }
157 }
158 return bytes;
159 }
160
161 template<RegionSpace::RegionType kRegionType>
GetObjectsAllocatedInternal()162 inline uint64_t RegionSpace::GetObjectsAllocatedInternal() {
163 uint64_t bytes = 0;
164 MutexLock mu(Thread::Current(), region_lock_);
165 for (size_t i = 0; i < num_regions_; ++i) {
166 Region* r = ®ions_[i];
167 if (r->IsFree()) {
168 continue;
169 }
170 switch (kRegionType) {
171 case RegionType::kRegionTypeAll:
172 bytes += r->ObjectsAllocated();
173 break;
174 case RegionType::kRegionTypeFromSpace:
175 if (r->IsInFromSpace()) {
176 bytes += r->ObjectsAllocated();
177 }
178 break;
179 case RegionType::kRegionTypeUnevacFromSpace:
180 if (r->IsInUnevacFromSpace()) {
181 bytes += r->ObjectsAllocated();
182 }
183 break;
184 case RegionType::kRegionTypeToSpace:
185 if (r->IsInToSpace()) {
186 bytes += r->ObjectsAllocated();
187 }
188 break;
189 default:
190 LOG(FATAL) << "Unexpected space type : " << kRegionType;
191 }
192 }
193 return bytes;
194 }
195
196 template <typename Visitor>
ScanUnevacFromSpace(accounting::ContinuousSpaceBitmap * bitmap,Visitor && visitor)197 inline void RegionSpace::ScanUnevacFromSpace(accounting::ContinuousSpaceBitmap* bitmap,
198 Visitor&& visitor) {
199 const size_t iter_limit = kUseTableLookupReadBarrier
200 ? num_regions_ : std::min(num_regions_, non_free_region_index_limit_);
201 // Instead of region-wise scan, find contiguous blocks of un-evac regions and then
202 // visit them. Everything before visit_block_begin has been processed, while
203 // [visit_block_begin, visit_block_end) still needs to be visited.
204 uint8_t* visit_block_begin = nullptr;
205 uint8_t* visit_block_end = nullptr;
206 for (size_t i = 0; i < iter_limit; ++i) {
207 Region* r = ®ions_[i];
208 if (r->IsInUnevacFromSpace()) {
209 // visit_block_begin set to nullptr means a new visit block needs to be stated.
210 if (visit_block_begin == nullptr) {
211 visit_block_begin = r->Begin();
212 }
213 visit_block_end = r->End();
214 } else if (visit_block_begin != nullptr) {
215 // Visit the block range as r is not adjacent to current visit block.
216 bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(visit_block_begin),
217 reinterpret_cast<uintptr_t>(visit_block_end),
218 visitor);
219 visit_block_begin = nullptr;
220 }
221 }
222 // Visit last block, if not processed yet.
223 if (visit_block_begin != nullptr) {
224 bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(visit_block_begin),
225 reinterpret_cast<uintptr_t>(visit_block_end),
226 visitor);
227 }
228 }
229
230 template<bool kToSpaceOnly, typename Visitor>
WalkInternal(Visitor && visitor)231 inline void RegionSpace::WalkInternal(Visitor&& visitor) {
232 // TODO: MutexLock on region_lock_ won't work due to lock order
233 // issues (the classloader classes lock and the monitor lock). We
234 // call this with threads suspended.
235 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
236 for (size_t i = 0; i < num_regions_; ++i) {
237 Region* r = ®ions_[i];
238 if (r->IsFree() || (kToSpaceOnly && !r->IsInToSpace())) {
239 continue;
240 }
241 if (r->IsLarge()) {
242 // We may visit a large object with live_bytes = 0 here. However, it is
243 // safe as it cannot contain dangling pointers because corresponding regions
244 // (and regions corresponding to dead referents) cannot be allocated for new
245 // allocations without first clearing regions' live_bytes and state.
246 mirror::Object* obj = reinterpret_cast<mirror::Object*>(r->Begin());
247 DCHECK(obj->GetClass() != nullptr);
248 visitor(obj);
249 } else if (r->IsLargeTail()) {
250 // Do nothing.
251 } else {
252 WalkNonLargeRegion(visitor, r);
253 }
254 }
255 }
256
257 template<typename Visitor>
WalkNonLargeRegion(Visitor && visitor,const Region * r)258 inline void RegionSpace::WalkNonLargeRegion(Visitor&& visitor, const Region* r) {
259 DCHECK(!r->IsLarge() && !r->IsLargeTail());
260 // For newly allocated and evacuated regions, live bytes will be -1.
261 uint8_t* pos = r->Begin();
262 uint8_t* top = r->Top();
263 // We need the region space bitmap to iterate over a region's objects
264 // if
265 // - its live bytes count is invalid (i.e. -1); or
266 // - its live bytes count is lower than the allocated bytes count.
267 //
268 // In both of the previous cases, we do not have the guarantee that
269 // all allocated objects are "alive" (i.e. valid), so we depend on
270 // the region space bitmap to identify which ones to visit.
271 //
272 // On the other hand, when all allocated bytes are known to be alive,
273 // we know that they form a range of consecutive objects (modulo
274 // object alignment constraints) that can be visited iteratively: we
275 // can compute the next object's location by using the current
276 // object's address and size (and object alignment constraints).
277 const bool need_bitmap =
278 r->LiveBytes() != static_cast<size_t>(-1) &&
279 r->LiveBytes() != static_cast<size_t>(top - pos);
280 if (need_bitmap) {
281 GetLiveBitmap()->VisitMarkedRange(
282 reinterpret_cast<uintptr_t>(pos),
283 reinterpret_cast<uintptr_t>(top),
284 visitor);
285 } else {
286 while (pos < top) {
287 mirror::Object* obj = reinterpret_cast<mirror::Object*>(pos);
288 if (obj->GetClass<kDefaultVerifyFlags, kWithoutReadBarrier>() != nullptr) {
289 visitor(obj);
290 pos = reinterpret_cast<uint8_t*>(GetNextObject(obj));
291 } else {
292 break;
293 }
294 }
295 }
296 }
297
298 template <typename Visitor>
Walk(Visitor && visitor)299 inline void RegionSpace::Walk(Visitor&& visitor) {
300 WalkInternal</* kToSpaceOnly= */ false>(visitor);
301 }
302 template <typename Visitor>
WalkToSpace(Visitor && visitor)303 inline void RegionSpace::WalkToSpace(Visitor&& visitor) {
304 WalkInternal</* kToSpaceOnly= */ true>(visitor);
305 }
306
GetNextObject(mirror::Object * obj)307 inline mirror::Object* RegionSpace::GetNextObject(mirror::Object* obj) {
308 const uintptr_t position = reinterpret_cast<uintptr_t>(obj) + obj->SizeOf();
309 return reinterpret_cast<mirror::Object*>(RoundUp(position, kAlignment));
310 }
311
312 template<bool kForEvac>
AllocLarge(size_t num_bytes,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated)313 inline mirror::Object* RegionSpace::AllocLarge(size_t num_bytes,
314 /* out */ size_t* bytes_allocated,
315 /* out */ size_t* usable_size,
316 /* out */ size_t* bytes_tl_bulk_allocated) {
317 DCHECK_ALIGNED(num_bytes, kAlignment);
318 DCHECK_GT(num_bytes, kRegionSize);
319 size_t num_regs_in_large_region = RoundUp(num_bytes, kRegionSize) / kRegionSize;
320 DCHECK_GT(num_regs_in_large_region, 0U);
321 DCHECK_LT((num_regs_in_large_region - 1) * kRegionSize, num_bytes);
322 DCHECK_LE(num_bytes, num_regs_in_large_region * kRegionSize);
323 MutexLock mu(Thread::Current(), region_lock_);
324 if (!kForEvac) {
325 // Retain sufficient free regions for full evacuation.
326 if ((num_non_free_regions_ + num_regs_in_large_region) * 2 > num_regions_) {
327 return nullptr;
328 }
329 }
330
331 mirror::Object* region = nullptr;
332 // Find a large enough set of contiguous free regions.
333 if (kCyclicRegionAllocation) {
334 size_t next_region = -1;
335 // Try to find a range of free regions within [cyclic_alloc_region_index_, num_regions_).
336 region = AllocLargeInRange<kForEvac>(cyclic_alloc_region_index_,
337 num_regions_,
338 num_regs_in_large_region,
339 bytes_allocated,
340 usable_size,
341 bytes_tl_bulk_allocated,
342 &next_region);
343
344 if (region == nullptr) {
345 DCHECK_EQ(next_region, static_cast<size_t>(-1));
346 // If the previous attempt failed, try to find a range of free regions within
347 // [0, min(cyclic_alloc_region_index_ + num_regs_in_large_region - 1, num_regions_)).
348 region = AllocLargeInRange<kForEvac>(
349 0,
350 std::min(cyclic_alloc_region_index_ + num_regs_in_large_region - 1, num_regions_),
351 num_regs_in_large_region,
352 bytes_allocated,
353 usable_size,
354 bytes_tl_bulk_allocated,
355 &next_region);
356 }
357
358 if (region != nullptr) {
359 DCHECK_LT(0u, next_region);
360 DCHECK_LE(next_region, num_regions_);
361 // Move the cyclic allocation region marker to the region
362 // following the large region that was just allocated.
363 cyclic_alloc_region_index_ = next_region % num_regions_;
364 }
365 } else {
366 // Try to find a range of free regions within [0, num_regions_).
367 region = AllocLargeInRange<kForEvac>(0,
368 num_regions_,
369 num_regs_in_large_region,
370 bytes_allocated,
371 usable_size,
372 bytes_tl_bulk_allocated);
373 }
374 if (kForEvac && region != nullptr) {
375 TraceHeapSize();
376 }
377 return region;
378 }
379
380 template<bool kForEvac>
AllocLargeInRange(size_t begin,size_t end,size_t num_regs_in_large_region,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated,size_t * next_region)381 inline mirror::Object* RegionSpace::AllocLargeInRange(size_t begin,
382 size_t end,
383 size_t num_regs_in_large_region,
384 /* out */ size_t* bytes_allocated,
385 /* out */ size_t* usable_size,
386 /* out */ size_t* bytes_tl_bulk_allocated,
387 /* out */ size_t* next_region) {
388 DCHECK_LE(0u, begin);
389 DCHECK_LT(begin, end);
390 DCHECK_LE(end, num_regions_);
391 size_t left = begin;
392 while (left + num_regs_in_large_region - 1 < end) {
393 bool found = true;
394 size_t right = left;
395 DCHECK_LT(right, left + num_regs_in_large_region)
396 << "The inner loop should iterate at least once";
397 while (right < left + num_regs_in_large_region) {
398 if (regions_[right].IsFree()) {
399 ++right;
400 // Ensure `right` is not going beyond the past-the-end index of the region space.
401 DCHECK_LE(right, num_regions_);
402 } else {
403 found = false;
404 break;
405 }
406 }
407 if (found) {
408 // `right` points to the one region past the last free region.
409 DCHECK_EQ(left + num_regs_in_large_region, right);
410 Region* first_reg = ®ions_[left];
411 DCHECK(first_reg->IsFree());
412 first_reg->UnfreeLarge(this, time_);
413 if (kForEvac) {
414 ++num_evac_regions_;
415 } else {
416 ++num_non_free_regions_;
417 }
418 size_t allocated = num_regs_in_large_region * kRegionSize;
419 // We make 'top' all usable bytes, as the caller of this
420 // allocation may use all of 'usable_size' (see mirror::Array::Alloc).
421 first_reg->SetTop(first_reg->Begin() + allocated);
422 if (!kForEvac) {
423 // Evac doesn't count as newly allocated.
424 first_reg->SetNewlyAllocated();
425 }
426 for (size_t p = left + 1; p < right; ++p) {
427 DCHECK_LT(p, num_regions_);
428 DCHECK(regions_[p].IsFree());
429 regions_[p].UnfreeLargeTail(this, time_);
430 if (kForEvac) {
431 ++num_evac_regions_;
432 } else {
433 ++num_non_free_regions_;
434 }
435 if (!kForEvac) {
436 // Evac doesn't count as newly allocated.
437 regions_[p].SetNewlyAllocated();
438 }
439 }
440 *bytes_allocated = allocated;
441 if (usable_size != nullptr) {
442 *usable_size = allocated;
443 }
444 *bytes_tl_bulk_allocated = allocated;
445 mirror::Object* large_region = reinterpret_cast<mirror::Object*>(first_reg->Begin());
446 DCHECK(large_region != nullptr);
447 if (next_region != nullptr) {
448 // Return the index to the region next to the allocated large region via `next_region`.
449 *next_region = right;
450 }
451 return large_region;
452 } else {
453 // `right` points to the non-free region. Start with the one after it.
454 left = right + 1;
455 }
456 }
457 return nullptr;
458 }
459
460 template<bool kForEvac>
FreeLarge(mirror::Object * large_obj,size_t bytes_allocated)461 inline void RegionSpace::FreeLarge(mirror::Object* large_obj, size_t bytes_allocated) {
462 DCHECK(Contains(large_obj));
463 DCHECK_ALIGNED(large_obj, kRegionSize);
464 MutexLock mu(Thread::Current(), region_lock_);
465 uint8_t* begin_addr = reinterpret_cast<uint8_t*>(large_obj);
466 uint8_t* end_addr = AlignUp(reinterpret_cast<uint8_t*>(large_obj) + bytes_allocated, kRegionSize);
467 CHECK_LT(begin_addr, end_addr);
468 for (uint8_t* addr = begin_addr; addr < end_addr; addr += kRegionSize) {
469 Region* reg = RefToRegionLocked(reinterpret_cast<mirror::Object*>(addr));
470 if (addr == begin_addr) {
471 DCHECK(reg->IsLarge());
472 } else {
473 DCHECK(reg->IsLargeTail());
474 }
475 reg->Clear(/*zero_and_release_pages=*/true);
476 if (kForEvac) {
477 --num_evac_regions_;
478 } else {
479 --num_non_free_regions_;
480 }
481 }
482 if (kIsDebugBuild && end_addr < Limit()) {
483 // If we aren't at the end of the space, check that the next region is not a large tail.
484 Region* following_reg = RefToRegionLocked(reinterpret_cast<mirror::Object*>(end_addr));
485 DCHECK(!following_reg->IsLargeTail());
486 }
487 }
488
BytesAllocated()489 inline size_t RegionSpace::Region::BytesAllocated() const {
490 if (IsLarge()) {
491 DCHECK_LT(begin_ + kRegionSize, Top());
492 return static_cast<size_t>(Top() - begin_);
493 } else if (IsLargeTail()) {
494 DCHECK_EQ(begin_, Top());
495 return 0;
496 } else {
497 DCHECK(IsAllocated()) << "state=" << state_;
498 DCHECK_LE(begin_, Top());
499 size_t bytes;
500 if (is_a_tlab_) {
501 bytes = thread_->GetTlabEnd() - begin_;
502 } else {
503 bytes = static_cast<size_t>(Top() - begin_);
504 }
505 DCHECK_LE(bytes, kRegionSize);
506 return bytes;
507 }
508 }
509
ObjectsAllocated()510 inline size_t RegionSpace::Region::ObjectsAllocated() const {
511 if (IsLarge()) {
512 DCHECK_LT(begin_ + kRegionSize, Top());
513 DCHECK_EQ(objects_allocated_.load(std::memory_order_relaxed), 0U);
514 return 1;
515 } else if (IsLargeTail()) {
516 DCHECK_EQ(begin_, Top());
517 DCHECK_EQ(objects_allocated_.load(std::memory_order_relaxed), 0U);
518 return 0;
519 } else {
520 DCHECK(IsAllocated()) << "state=" << state_;
521 return objects_allocated_;
522 }
523 }
524
525 } // namespace space
526 } // namespace gc
527 } // namespace art
528
529 #endif // ART_RUNTIME_GC_SPACE_REGION_SPACE_INL_H_
530