1 /*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #ifndef ART_RUNTIME_GC_HEAP_INL_H_
18 #define ART_RUNTIME_GC_HEAP_INL_H_
19
20 #include "heap.h"
21
22 #include "debugger.h"
23 #include "gc/accounting/card_table-inl.h"
24 #include "gc/collector/semi_space.h"
25 #include "gc/space/bump_pointer_space-inl.h"
26 #include "gc/space/dlmalloc_space-inl.h"
27 #include "gc/space/large_object_space.h"
28 #include "gc/space/rosalloc_space-inl.h"
29 #include "runtime.h"
30 #include "handle_scope-inl.h"
31 #include "thread.h"
32 #include "thread-inl.h"
33 #include "verify_object-inl.h"
34
35 namespace art {
36 namespace gc {
37
38 template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor>
AllocObjectWithAllocator(Thread * self,mirror::Class * klass,size_t byte_count,AllocatorType allocator,const PreFenceVisitor & pre_fence_visitor)39 inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, mirror::Class* klass,
40 size_t byte_count, AllocatorType allocator,
41 const PreFenceVisitor& pre_fence_visitor) {
42 if (kIsDebugBuild) {
43 CheckPreconditionsForAllocObject(klass, byte_count);
44 // Since allocation can cause a GC which will need to SuspendAll, make sure all allocations are
45 // done in the runnable state where suspension is expected.
46 CHECK_EQ(self->GetState(), kRunnable);
47 self->AssertThreadSuspensionIsAllowable();
48 }
49 // Need to check that we arent the large object allocator since the large object allocation code
50 // path this function. If we didn't check we would have an infinite loop.
51 if (kCheckLargeObject && UNLIKELY(ShouldAllocLargeObject(klass, byte_count))) {
52 return AllocLargeObject<kInstrumented, PreFenceVisitor>(self, klass, byte_count,
53 pre_fence_visitor);
54 }
55 mirror::Object* obj;
56 AllocationTimer alloc_timer(this, &obj);
57 size_t bytes_allocated;
58 size_t usable_size;
59 size_t new_num_bytes_allocated = 0;
60 if (allocator == kAllocatorTypeTLAB) {
61 byte_count = RoundUp(byte_count, space::BumpPointerSpace::kAlignment);
62 }
63 // If we have a thread local allocation we don't need to update bytes allocated.
64 if (allocator == kAllocatorTypeTLAB && byte_count <= self->TlabSize()) {
65 obj = self->AllocTlab(byte_count);
66 DCHECK(obj != nullptr) << "AllocTlab can't fail";
67 obj->SetClass(klass);
68 if (kUseBakerOrBrooksReadBarrier) {
69 if (kUseBrooksReadBarrier) {
70 obj->SetReadBarrierPointer(obj);
71 }
72 obj->AssertReadBarrierPointer();
73 }
74 bytes_allocated = byte_count;
75 usable_size = bytes_allocated;
76 pre_fence_visitor(obj, usable_size);
77 QuasiAtomic::ThreadFenceForConstructor();
78 } else {
79 obj = TryToAllocate<kInstrumented, false>(self, allocator, byte_count, &bytes_allocated,
80 &usable_size);
81 if (UNLIKELY(obj == nullptr)) {
82 bool is_current_allocator = allocator == GetCurrentAllocator();
83 obj = AllocateInternalWithGc(self, allocator, byte_count, &bytes_allocated, &usable_size,
84 &klass);
85 if (obj == nullptr) {
86 bool after_is_current_allocator = allocator == GetCurrentAllocator();
87 // If there is a pending exception, fail the allocation right away since the next one
88 // could cause OOM and abort the runtime.
89 if (!self->IsExceptionPending() && is_current_allocator && !after_is_current_allocator) {
90 // If the allocator changed, we need to restart the allocation.
91 return AllocObject<kInstrumented>(self, klass, byte_count, pre_fence_visitor);
92 }
93 return nullptr;
94 }
95 }
96 DCHECK_GT(bytes_allocated, 0u);
97 DCHECK_GT(usable_size, 0u);
98 obj->SetClass(klass);
99 if (kUseBakerOrBrooksReadBarrier) {
100 if (kUseBrooksReadBarrier) {
101 obj->SetReadBarrierPointer(obj);
102 }
103 obj->AssertReadBarrierPointer();
104 }
105 if (collector::SemiSpace::kUseRememberedSet && UNLIKELY(allocator == kAllocatorTypeNonMoving)) {
106 // (Note this if statement will be constant folded away for the
107 // fast-path quick entry points.) Because SetClass() has no write
108 // barrier, if a non-moving space allocation, we need a write
109 // barrier as the class pointer may point to the bump pointer
110 // space (where the class pointer is an "old-to-young" reference,
111 // though rare) under the GSS collector with the remembered set
112 // enabled. We don't need this for kAllocatorTypeRosAlloc/DlMalloc
113 // cases because we don't directly allocate into the main alloc
114 // space (besides promotions) under the SS/GSS collector.
115 WriteBarrierField(obj, mirror::Object::ClassOffset(), klass);
116 }
117 pre_fence_visitor(obj, usable_size);
118 new_num_bytes_allocated =
119 static_cast<size_t>(num_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes_allocated))
120 + bytes_allocated;
121 }
122 if (kIsDebugBuild && Runtime::Current()->IsStarted()) {
123 CHECK_LE(obj->SizeOf(), usable_size);
124 }
125 // TODO: Deprecate.
126 if (kInstrumented) {
127 if (Runtime::Current()->HasStatsEnabled()) {
128 RuntimeStats* thread_stats = self->GetStats();
129 ++thread_stats->allocated_objects;
130 thread_stats->allocated_bytes += bytes_allocated;
131 RuntimeStats* global_stats = Runtime::Current()->GetStats();
132 ++global_stats->allocated_objects;
133 global_stats->allocated_bytes += bytes_allocated;
134 }
135 } else {
136 DCHECK(!Runtime::Current()->HasStatsEnabled());
137 }
138 if (AllocatorHasAllocationStack(allocator)) {
139 PushOnAllocationStack(self, &obj);
140 }
141 if (kInstrumented) {
142 if (Dbg::IsAllocTrackingEnabled()) {
143 Dbg::RecordAllocation(klass, bytes_allocated);
144 }
145 } else {
146 DCHECK(!Dbg::IsAllocTrackingEnabled());
147 }
148 // IsConcurrentGc() isn't known at compile time so we can optimize by not checking it for
149 // the BumpPointer or TLAB allocators. This is nice since it allows the entire if statement to be
150 // optimized out. And for the other allocators, AllocatorMayHaveConcurrentGC is a constant since
151 // the allocator_type should be constant propagated.
152 if (AllocatorMayHaveConcurrentGC(allocator) && IsGcConcurrent()) {
153 CheckConcurrentGC(self, new_num_bytes_allocated, &obj);
154 }
155 VerifyObject(obj);
156 self->VerifyStack();
157 return obj;
158 }
159
160 // The size of a thread-local allocation stack in the number of references.
161 static constexpr size_t kThreadLocalAllocationStackSize = 128;
162
PushOnAllocationStack(Thread * self,mirror::Object ** obj)163 inline void Heap::PushOnAllocationStack(Thread* self, mirror::Object** obj) {
164 if (kUseThreadLocalAllocationStack) {
165 if (UNLIKELY(!self->PushOnThreadLocalAllocationStack(*obj))) {
166 PushOnThreadLocalAllocationStackWithInternalGC(self, obj);
167 }
168 } else if (UNLIKELY(!allocation_stack_->AtomicPushBack(*obj))) {
169 PushOnAllocationStackWithInternalGC(self, obj);
170 }
171 }
172
173 template <bool kInstrumented, typename PreFenceVisitor>
AllocLargeObject(Thread * self,mirror::Class * klass,size_t byte_count,const PreFenceVisitor & pre_fence_visitor)174 inline mirror::Object* Heap::AllocLargeObject(Thread* self, mirror::Class* klass,
175 size_t byte_count,
176 const PreFenceVisitor& pre_fence_visitor) {
177 return AllocObjectWithAllocator<kInstrumented, false, PreFenceVisitor>(self, klass, byte_count,
178 kAllocatorTypeLOS,
179 pre_fence_visitor);
180 }
181
182 template <const bool kInstrumented, const bool kGrow>
TryToAllocate(Thread * self,AllocatorType allocator_type,size_t alloc_size,size_t * bytes_allocated,size_t * usable_size)183 inline mirror::Object* Heap::TryToAllocate(Thread* self, AllocatorType allocator_type,
184 size_t alloc_size, size_t* bytes_allocated,
185 size_t* usable_size) {
186 if (allocator_type != kAllocatorTypeTLAB &&
187 UNLIKELY(IsOutOfMemoryOnAllocation<kGrow>(allocator_type, alloc_size))) {
188 return nullptr;
189 }
190 mirror::Object* ret;
191 switch (allocator_type) {
192 case kAllocatorTypeBumpPointer: {
193 DCHECK(bump_pointer_space_ != nullptr);
194 alloc_size = RoundUp(alloc_size, space::BumpPointerSpace::kAlignment);
195 ret = bump_pointer_space_->AllocNonvirtual(alloc_size);
196 if (LIKELY(ret != nullptr)) {
197 *bytes_allocated = alloc_size;
198 *usable_size = alloc_size;
199 }
200 break;
201 }
202 case kAllocatorTypeRosAlloc: {
203 if (kInstrumented && UNLIKELY(running_on_valgrind_)) {
204 // If running on valgrind, we should be using the instrumented path.
205 ret = rosalloc_space_->Alloc(self, alloc_size, bytes_allocated, usable_size);
206 } else {
207 DCHECK(!running_on_valgrind_);
208 ret = rosalloc_space_->AllocNonvirtual(self, alloc_size, bytes_allocated, usable_size);
209 }
210 break;
211 }
212 case kAllocatorTypeDlMalloc: {
213 if (kInstrumented && UNLIKELY(running_on_valgrind_)) {
214 // If running on valgrind, we should be using the instrumented path.
215 ret = dlmalloc_space_->Alloc(self, alloc_size, bytes_allocated, usable_size);
216 } else {
217 DCHECK(!running_on_valgrind_);
218 ret = dlmalloc_space_->AllocNonvirtual(self, alloc_size, bytes_allocated, usable_size);
219 }
220 break;
221 }
222 case kAllocatorTypeNonMoving: {
223 ret = non_moving_space_->Alloc(self, alloc_size, bytes_allocated, usable_size);
224 break;
225 }
226 case kAllocatorTypeLOS: {
227 ret = large_object_space_->Alloc(self, alloc_size, bytes_allocated, usable_size);
228 // Note that the bump pointer spaces aren't necessarily next to
229 // the other continuous spaces like the non-moving alloc space or
230 // the zygote space.
231 DCHECK(ret == nullptr || large_object_space_->Contains(ret));
232 break;
233 }
234 case kAllocatorTypeTLAB: {
235 DCHECK_ALIGNED(alloc_size, space::BumpPointerSpace::kAlignment);
236 if (UNLIKELY(self->TlabSize() < alloc_size)) {
237 const size_t new_tlab_size = alloc_size + kDefaultTLABSize;
238 if (UNLIKELY(IsOutOfMemoryOnAllocation<kGrow>(allocator_type, new_tlab_size))) {
239 return nullptr;
240 }
241 // Try allocating a new thread local buffer, if the allocaiton fails the space must be
242 // full so return nullptr.
243 if (!bump_pointer_space_->AllocNewTlab(self, new_tlab_size)) {
244 return nullptr;
245 }
246 *bytes_allocated = new_tlab_size;
247 } else {
248 *bytes_allocated = 0;
249 }
250 // The allocation can't fail.
251 ret = self->AllocTlab(alloc_size);
252 DCHECK(ret != nullptr);
253 *usable_size = alloc_size;
254 break;
255 }
256 default: {
257 LOG(FATAL) << "Invalid allocator type";
258 ret = nullptr;
259 }
260 }
261 return ret;
262 }
263
AllocationTimer(Heap * heap,mirror::Object ** allocated_obj_ptr)264 inline Heap::AllocationTimer::AllocationTimer(Heap* heap, mirror::Object** allocated_obj_ptr)
265 : heap_(heap), allocated_obj_ptr_(allocated_obj_ptr) {
266 if (kMeasureAllocationTime) {
267 allocation_start_time_ = NanoTime() / kTimeAdjust;
268 }
269 }
270
~AllocationTimer()271 inline Heap::AllocationTimer::~AllocationTimer() {
272 if (kMeasureAllocationTime) {
273 mirror::Object* allocated_obj = *allocated_obj_ptr_;
274 // Only if the allocation succeeded, record the time.
275 if (allocated_obj != nullptr) {
276 uint64_t allocation_end_time = NanoTime() / kTimeAdjust;
277 heap_->total_allocation_time_.FetchAndAddSequentiallyConsistent(allocation_end_time - allocation_start_time_);
278 }
279 }
280 };
281
ShouldAllocLargeObject(mirror::Class * c,size_t byte_count)282 inline bool Heap::ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) const {
283 // We need to have a zygote space or else our newly allocated large object can end up in the
284 // Zygote resulting in it being prematurely freed.
285 // We can only do this for primitive objects since large objects will not be within the card table
286 // range. This also means that we rely on SetClass not dirtying the object's card.
287 return byte_count >= large_object_threshold_ && c->IsPrimitiveArray();
288 }
289
290 template <bool kGrow>
IsOutOfMemoryOnAllocation(AllocatorType allocator_type,size_t alloc_size)291 inline bool Heap::IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t alloc_size) {
292 size_t new_footprint = num_bytes_allocated_.LoadSequentiallyConsistent() + alloc_size;
293 if (UNLIKELY(new_footprint > max_allowed_footprint_)) {
294 if (UNLIKELY(new_footprint > growth_limit_)) {
295 return true;
296 }
297 if (!AllocatorMayHaveConcurrentGC(allocator_type) || !IsGcConcurrent()) {
298 if (!kGrow) {
299 return true;
300 }
301 // TODO: Grow for allocation is racy, fix it.
302 VLOG(heap) << "Growing heap from " << PrettySize(max_allowed_footprint_) << " to "
303 << PrettySize(new_footprint) << " for a " << PrettySize(alloc_size) << " allocation";
304 max_allowed_footprint_ = new_footprint;
305 }
306 }
307 return false;
308 }
309
CheckConcurrentGC(Thread * self,size_t new_num_bytes_allocated,mirror::Object ** obj)310 inline void Heap::CheckConcurrentGC(Thread* self, size_t new_num_bytes_allocated,
311 mirror::Object** obj) {
312 if (UNLIKELY(new_num_bytes_allocated >= concurrent_start_bytes_)) {
313 RequestConcurrentGCAndSaveObject(self, obj);
314 }
315 }
316
317 } // namespace gc
318 } // namespace art
319
320 #endif // ART_RUNTIME_GC_HEAP_INL_H_
321