1 /*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #ifndef ART_RUNTIME_GC_HEAP_INL_H_
18 #define ART_RUNTIME_GC_HEAP_INL_H_
19
20 #include "heap.h"
21
22 #include "base/time_utils.h"
23 #include "gc/accounting/card_table-inl.h"
24 #include "gc/allocation_record.h"
25 #include "gc/collector/semi_space.h"
26 #include "gc/space/bump_pointer_space-inl.h"
27 #include "gc/space/dlmalloc_space-inl.h"
28 #include "gc/space/large_object_space.h"
29 #include "gc/space/region_space-inl.h"
30 #include "gc/space/rosalloc_space-inl.h"
31 #include "runtime.h"
32 #include "handle_scope-inl.h"
33 #include "thread-inl.h"
34 #include "utils.h"
35 #include "verify_object-inl.h"
36
37 namespace art {
38 namespace gc {
39
40 template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor>
AllocObjectWithAllocator(Thread * self,mirror::Class * klass,size_t byte_count,AllocatorType allocator,const PreFenceVisitor & pre_fence_visitor)41 inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self,
42 mirror::Class* klass,
43 size_t byte_count,
44 AllocatorType allocator,
45 const PreFenceVisitor& pre_fence_visitor) {
46 if (kIsDebugBuild) {
47 CheckPreconditionsForAllocObject(klass, byte_count);
48 // Since allocation can cause a GC which will need to SuspendAll, make sure all allocations are
49 // done in the runnable state where suspension is expected.
50 CHECK_EQ(self->GetState(), kRunnable);
51 self->AssertThreadSuspensionIsAllowable();
52 }
53 // Need to check that we arent the large object allocator since the large object allocation code
54 // path this function. If we didn't check we would have an infinite loop.
55 mirror::Object* obj;
56 if (kCheckLargeObject && UNLIKELY(ShouldAllocLargeObject(klass, byte_count))) {
57 obj = AllocLargeObject<kInstrumented, PreFenceVisitor>(self, &klass, byte_count,
58 pre_fence_visitor);
59 if (obj != nullptr) {
60 return obj;
61 } else {
62 // There should be an OOM exception, since we are retrying, clear it.
63 self->ClearException();
64 }
65 // If the large object allocation failed, try to use the normal spaces (main space,
66 // non moving space). This can happen if there is significant virtual address space
67 // fragmentation.
68 }
69 // bytes allocated for the (individual) object.
70 size_t bytes_allocated;
71 size_t usable_size;
72 size_t new_num_bytes_allocated = 0;
73 if (allocator == kAllocatorTypeTLAB || allocator == kAllocatorTypeRegionTLAB) {
74 byte_count = RoundUp(byte_count, space::BumpPointerSpace::kAlignment);
75 }
76 // If we have a thread local allocation we don't need to update bytes allocated.
77 if ((allocator == kAllocatorTypeTLAB || allocator == kAllocatorTypeRegionTLAB) &&
78 byte_count <= self->TlabSize()) {
79 obj = self->AllocTlab(byte_count);
80 DCHECK(obj != nullptr) << "AllocTlab can't fail";
81 obj->SetClass(klass);
82 if (kUseBakerOrBrooksReadBarrier) {
83 if (kUseBrooksReadBarrier) {
84 obj->SetReadBarrierPointer(obj);
85 }
86 obj->AssertReadBarrierPointer();
87 }
88 bytes_allocated = byte_count;
89 usable_size = bytes_allocated;
90 pre_fence_visitor(obj, usable_size);
91 QuasiAtomic::ThreadFenceForConstructor();
92 } else if (!kInstrumented && allocator == kAllocatorTypeRosAlloc &&
93 (obj = rosalloc_space_->AllocThreadLocal(self, byte_count, &bytes_allocated)) &&
94 LIKELY(obj != nullptr)) {
95 DCHECK(!is_running_on_memory_tool_);
96 obj->SetClass(klass);
97 if (kUseBakerOrBrooksReadBarrier) {
98 if (kUseBrooksReadBarrier) {
99 obj->SetReadBarrierPointer(obj);
100 }
101 obj->AssertReadBarrierPointer();
102 }
103 usable_size = bytes_allocated;
104 pre_fence_visitor(obj, usable_size);
105 QuasiAtomic::ThreadFenceForConstructor();
106 } else {
107 // bytes allocated that takes bulk thread-local buffer allocations into account.
108 size_t bytes_tl_bulk_allocated = 0;
109 obj = TryToAllocate<kInstrumented, false>(self, allocator, byte_count, &bytes_allocated,
110 &usable_size, &bytes_tl_bulk_allocated);
111 if (UNLIKELY(obj == nullptr)) {
112 // AllocateInternalWithGc can cause thread suspension, if someone instruments the entrypoints
113 // or changes the allocator in a suspend point here, we need to retry the allocation.
114 obj = AllocateInternalWithGc(self,
115 allocator,
116 kInstrumented,
117 byte_count,
118 &bytes_allocated,
119 &usable_size,
120 &bytes_tl_bulk_allocated, &klass);
121 if (obj == nullptr) {
122 // The only way that we can get a null return if there is no pending exception is if the
123 // allocator or instrumentation changed.
124 if (!self->IsExceptionPending()) {
125 // AllocObject will pick up the new allocator type, and instrumented as true is the safe
126 // default.
127 return AllocObject</*kInstrumented*/true>(self,
128 klass,
129 byte_count,
130 pre_fence_visitor);
131 }
132 return nullptr;
133 }
134 }
135 DCHECK_GT(bytes_allocated, 0u);
136 DCHECK_GT(usable_size, 0u);
137 obj->SetClass(klass);
138 if (kUseBakerOrBrooksReadBarrier) {
139 if (kUseBrooksReadBarrier) {
140 obj->SetReadBarrierPointer(obj);
141 }
142 obj->AssertReadBarrierPointer();
143 }
144 if (collector::SemiSpace::kUseRememberedSet && UNLIKELY(allocator == kAllocatorTypeNonMoving)) {
145 // (Note this if statement will be constant folded away for the
146 // fast-path quick entry points.) Because SetClass() has no write
147 // barrier, if a non-moving space allocation, we need a write
148 // barrier as the class pointer may point to the bump pointer
149 // space (where the class pointer is an "old-to-young" reference,
150 // though rare) under the GSS collector with the remembered set
151 // enabled. We don't need this for kAllocatorTypeRosAlloc/DlMalloc
152 // cases because we don't directly allocate into the main alloc
153 // space (besides promotions) under the SS/GSS collector.
154 WriteBarrierField(obj, mirror::Object::ClassOffset(), klass);
155 }
156 pre_fence_visitor(obj, usable_size);
157 QuasiAtomic::ThreadFenceForConstructor();
158 new_num_bytes_allocated = static_cast<size_t>(
159 num_bytes_allocated_.FetchAndAddRelaxed(bytes_tl_bulk_allocated)) + bytes_tl_bulk_allocated;
160 }
161 if (kIsDebugBuild && Runtime::Current()->IsStarted()) {
162 CHECK_LE(obj->SizeOf(), usable_size);
163 }
164 // TODO: Deprecate.
165 if (kInstrumented) {
166 if (Runtime::Current()->HasStatsEnabled()) {
167 RuntimeStats* thread_stats = self->GetStats();
168 ++thread_stats->allocated_objects;
169 thread_stats->allocated_bytes += bytes_allocated;
170 RuntimeStats* global_stats = Runtime::Current()->GetStats();
171 ++global_stats->allocated_objects;
172 global_stats->allocated_bytes += bytes_allocated;
173 }
174 } else {
175 DCHECK(!Runtime::Current()->HasStatsEnabled());
176 }
177 if (kInstrumented) {
178 if (IsAllocTrackingEnabled()) {
179 // allocation_records_ is not null since it never becomes null after allocation tracking is
180 // enabled.
181 DCHECK(allocation_records_ != nullptr);
182 allocation_records_->RecordAllocation(self, &obj, bytes_allocated);
183 }
184 } else {
185 DCHECK(!IsAllocTrackingEnabled());
186 }
187 if (AllocatorHasAllocationStack(allocator)) {
188 PushOnAllocationStack(self, &obj);
189 }
190 if (kInstrumented) {
191 if (gc_stress_mode_) {
192 CheckGcStressMode(self, &obj);
193 }
194 } else {
195 DCHECK(!gc_stress_mode_);
196 }
197 // IsConcurrentGc() isn't known at compile time so we can optimize by not checking it for
198 // the BumpPointer or TLAB allocators. This is nice since it allows the entire if statement to be
199 // optimized out. And for the other allocators, AllocatorMayHaveConcurrentGC is a constant since
200 // the allocator_type should be constant propagated.
201 if (AllocatorMayHaveConcurrentGC(allocator) && IsGcConcurrent()) {
202 CheckConcurrentGC(self, new_num_bytes_allocated, &obj);
203 }
204 VerifyObject(obj);
205 self->VerifyStack();
206 return obj;
207 }
208
209 // The size of a thread-local allocation stack in the number of references.
210 static constexpr size_t kThreadLocalAllocationStackSize = 128;
211
PushOnAllocationStack(Thread * self,mirror::Object ** obj)212 inline void Heap::PushOnAllocationStack(Thread* self, mirror::Object** obj) {
213 if (kUseThreadLocalAllocationStack) {
214 if (UNLIKELY(!self->PushOnThreadLocalAllocationStack(*obj))) {
215 PushOnThreadLocalAllocationStackWithInternalGC(self, obj);
216 }
217 } else if (UNLIKELY(!allocation_stack_->AtomicPushBack(*obj))) {
218 PushOnAllocationStackWithInternalGC(self, obj);
219 }
220 }
221
222 template <bool kInstrumented, typename PreFenceVisitor>
AllocLargeObject(Thread * self,mirror::Class ** klass,size_t byte_count,const PreFenceVisitor & pre_fence_visitor)223 inline mirror::Object* Heap::AllocLargeObject(Thread* self,
224 mirror::Class** klass,
225 size_t byte_count,
226 const PreFenceVisitor& pre_fence_visitor) {
227 // Save and restore the class in case it moves.
228 StackHandleScope<1> hs(self);
229 auto klass_wrapper = hs.NewHandleWrapper(klass);
230 return AllocObjectWithAllocator<kInstrumented, false, PreFenceVisitor>(self, *klass, byte_count,
231 kAllocatorTypeLOS,
232 pre_fence_visitor);
233 }
234
235 template <const bool kInstrumented, const bool kGrow>
TryToAllocate(Thread * self,AllocatorType allocator_type,size_t alloc_size,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated)236 inline mirror::Object* Heap::TryToAllocate(Thread* self,
237 AllocatorType allocator_type,
238 size_t alloc_size,
239 size_t* bytes_allocated,
240 size_t* usable_size,
241 size_t* bytes_tl_bulk_allocated) {
242 if (allocator_type != kAllocatorTypeTLAB &&
243 allocator_type != kAllocatorTypeRegionTLAB &&
244 allocator_type != kAllocatorTypeRosAlloc &&
245 UNLIKELY(IsOutOfMemoryOnAllocation<kGrow>(allocator_type, alloc_size))) {
246 return nullptr;
247 }
248 mirror::Object* ret;
249 switch (allocator_type) {
250 case kAllocatorTypeBumpPointer: {
251 DCHECK(bump_pointer_space_ != nullptr);
252 alloc_size = RoundUp(alloc_size, space::BumpPointerSpace::kAlignment);
253 ret = bump_pointer_space_->AllocNonvirtual(alloc_size);
254 if (LIKELY(ret != nullptr)) {
255 *bytes_allocated = alloc_size;
256 *usable_size = alloc_size;
257 *bytes_tl_bulk_allocated = alloc_size;
258 }
259 break;
260 }
261 case kAllocatorTypeRosAlloc: {
262 if (kInstrumented && UNLIKELY(is_running_on_memory_tool_)) {
263 // If running on valgrind or asan, we should be using the instrumented path.
264 size_t max_bytes_tl_bulk_allocated = rosalloc_space_->MaxBytesBulkAllocatedFor(alloc_size);
265 if (UNLIKELY(IsOutOfMemoryOnAllocation<kGrow>(allocator_type,
266 max_bytes_tl_bulk_allocated))) {
267 return nullptr;
268 }
269 ret = rosalloc_space_->Alloc(self, alloc_size, bytes_allocated, usable_size,
270 bytes_tl_bulk_allocated);
271 } else {
272 DCHECK(!is_running_on_memory_tool_);
273 size_t max_bytes_tl_bulk_allocated =
274 rosalloc_space_->MaxBytesBulkAllocatedForNonvirtual(alloc_size);
275 if (UNLIKELY(IsOutOfMemoryOnAllocation<kGrow>(allocator_type,
276 max_bytes_tl_bulk_allocated))) {
277 return nullptr;
278 }
279 if (!kInstrumented) {
280 DCHECK(!rosalloc_space_->CanAllocThreadLocal(self, alloc_size));
281 }
282 ret = rosalloc_space_->AllocNonvirtual(self, alloc_size, bytes_allocated, usable_size,
283 bytes_tl_bulk_allocated);
284 }
285 break;
286 }
287 case kAllocatorTypeDlMalloc: {
288 if (kInstrumented && UNLIKELY(is_running_on_memory_tool_)) {
289 // If running on valgrind, we should be using the instrumented path.
290 ret = dlmalloc_space_->Alloc(self, alloc_size, bytes_allocated, usable_size,
291 bytes_tl_bulk_allocated);
292 } else {
293 DCHECK(!is_running_on_memory_tool_);
294 ret = dlmalloc_space_->AllocNonvirtual(self, alloc_size, bytes_allocated, usable_size,
295 bytes_tl_bulk_allocated);
296 }
297 break;
298 }
299 case kAllocatorTypeNonMoving: {
300 ret = non_moving_space_->Alloc(self, alloc_size, bytes_allocated, usable_size,
301 bytes_tl_bulk_allocated);
302 break;
303 }
304 case kAllocatorTypeLOS: {
305 ret = large_object_space_->Alloc(self, alloc_size, bytes_allocated, usable_size,
306 bytes_tl_bulk_allocated);
307 // Note that the bump pointer spaces aren't necessarily next to
308 // the other continuous spaces like the non-moving alloc space or
309 // the zygote space.
310 DCHECK(ret == nullptr || large_object_space_->Contains(ret));
311 break;
312 }
313 case kAllocatorTypeTLAB: {
314 DCHECK_ALIGNED(alloc_size, space::BumpPointerSpace::kAlignment);
315 if (UNLIKELY(self->TlabSize() < alloc_size)) {
316 const size_t new_tlab_size = alloc_size + kDefaultTLABSize;
317 if (UNLIKELY(IsOutOfMemoryOnAllocation<kGrow>(allocator_type, new_tlab_size))) {
318 return nullptr;
319 }
320 // Try allocating a new thread local buffer, if the allocaiton fails the space must be
321 // full so return null.
322 if (!bump_pointer_space_->AllocNewTlab(self, new_tlab_size)) {
323 return nullptr;
324 }
325 *bytes_tl_bulk_allocated = new_tlab_size;
326 } else {
327 *bytes_tl_bulk_allocated = 0;
328 }
329 // The allocation can't fail.
330 ret = self->AllocTlab(alloc_size);
331 DCHECK(ret != nullptr);
332 *bytes_allocated = alloc_size;
333 *usable_size = alloc_size;
334 break;
335 }
336 case kAllocatorTypeRegion: {
337 DCHECK(region_space_ != nullptr);
338 alloc_size = RoundUp(alloc_size, space::RegionSpace::kAlignment);
339 ret = region_space_->AllocNonvirtual<false>(alloc_size, bytes_allocated, usable_size,
340 bytes_tl_bulk_allocated);
341 break;
342 }
343 case kAllocatorTypeRegionTLAB: {
344 DCHECK(region_space_ != nullptr);
345 DCHECK_ALIGNED(alloc_size, space::RegionSpace::kAlignment);
346 if (UNLIKELY(self->TlabSize() < alloc_size)) {
347 if (space::RegionSpace::kRegionSize >= alloc_size) {
348 // Non-large. Check OOME for a tlab.
349 if (LIKELY(!IsOutOfMemoryOnAllocation<kGrow>(allocator_type, space::RegionSpace::kRegionSize))) {
350 // Try to allocate a tlab.
351 if (!region_space_->AllocNewTlab(self)) {
352 // Failed to allocate a tlab. Try non-tlab.
353 ret = region_space_->AllocNonvirtual<false>(alloc_size, bytes_allocated, usable_size,
354 bytes_tl_bulk_allocated);
355 return ret;
356 }
357 *bytes_tl_bulk_allocated = space::RegionSpace::kRegionSize;
358 // Fall-through.
359 } else {
360 // Check OOME for a non-tlab allocation.
361 if (!IsOutOfMemoryOnAllocation<kGrow>(allocator_type, alloc_size)) {
362 ret = region_space_->AllocNonvirtual<false>(alloc_size, bytes_allocated, usable_size,
363 bytes_tl_bulk_allocated);
364 return ret;
365 } else {
366 // Neither tlab or non-tlab works. Give up.
367 return nullptr;
368 }
369 }
370 } else {
371 // Large. Check OOME.
372 if (LIKELY(!IsOutOfMemoryOnAllocation<kGrow>(allocator_type, alloc_size))) {
373 ret = region_space_->AllocNonvirtual<false>(alloc_size, bytes_allocated, usable_size,
374 bytes_tl_bulk_allocated);
375 return ret;
376 } else {
377 return nullptr;
378 }
379 }
380 } else {
381 *bytes_tl_bulk_allocated = 0; // Allocated in an existing buffer.
382 }
383 // The allocation can't fail.
384 ret = self->AllocTlab(alloc_size);
385 DCHECK(ret != nullptr);
386 *bytes_allocated = alloc_size;
387 *usable_size = alloc_size;
388 break;
389 }
390 default: {
391 LOG(FATAL) << "Invalid allocator type";
392 ret = nullptr;
393 }
394 }
395 return ret;
396 }
397
ShouldAllocLargeObject(mirror::Class * c,size_t byte_count)398 inline bool Heap::ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) const {
399 // We need to have a zygote space or else our newly allocated large object can end up in the
400 // Zygote resulting in it being prematurely freed.
401 // We can only do this for primitive objects since large objects will not be within the card table
402 // range. This also means that we rely on SetClass not dirtying the object's card.
403 return byte_count >= large_object_threshold_ && (c->IsPrimitiveArray() || c->IsStringClass());
404 }
405
406 template <bool kGrow>
IsOutOfMemoryOnAllocation(AllocatorType allocator_type,size_t alloc_size)407 inline bool Heap::IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t alloc_size) {
408 size_t new_footprint = num_bytes_allocated_.LoadSequentiallyConsistent() + alloc_size;
409 if (UNLIKELY(new_footprint > max_allowed_footprint_)) {
410 if (UNLIKELY(new_footprint > growth_limit_)) {
411 return true;
412 }
413 if (!AllocatorMayHaveConcurrentGC(allocator_type) || !IsGcConcurrent()) {
414 if (!kGrow) {
415 return true;
416 }
417 // TODO: Grow for allocation is racy, fix it.
418 VLOG(heap) << "Growing heap from " << PrettySize(max_allowed_footprint_) << " to "
419 << PrettySize(new_footprint) << " for a " << PrettySize(alloc_size) << " allocation";
420 max_allowed_footprint_ = new_footprint;
421 }
422 }
423 return false;
424 }
425
CheckConcurrentGC(Thread * self,size_t new_num_bytes_allocated,mirror::Object ** obj)426 inline void Heap::CheckConcurrentGC(Thread* self,
427 size_t new_num_bytes_allocated,
428 mirror::Object** obj) {
429 if (UNLIKELY(new_num_bytes_allocated >= concurrent_start_bytes_)) {
430 RequestConcurrentGCAndSaveObject(self, false, obj);
431 }
432 }
433
434 } // namespace gc
435 } // namespace art
436
437 #endif // ART_RUNTIME_GC_HEAP_INL_H_
438