1 /*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "dlmalloc_space-inl.h"
18
19 #include "base/time_utils.h"
20 #include "gc/accounting/card_table.h"
21 #include "gc/accounting/space_bitmap-inl.h"
22 #include "gc/heap.h"
23 #include "jit/jit.h"
24 #include "jit/jit_code_cache.h"
25 #include "memory_tool_malloc_space-inl.h"
26 #include "mirror/class-inl.h"
27 #include "mirror/object-inl.h"
28 #include "runtime.h"
29 #include "scoped_thread_state_change-inl.h"
30 #include "thread.h"
31 #include "thread_list.h"
32 #include "utils.h"
33
34 namespace art {
35 namespace gc {
36 namespace space {
37
38 static constexpr bool kPrefetchDuringDlMallocFreeList = true;
39
DlMallocSpace(MemMap * mem_map,size_t initial_size,const std::string & name,void * mspace,uint8_t * begin,uint8_t * end,uint8_t * limit,size_t growth_limit,bool can_move_objects,size_t starting_size)40 DlMallocSpace::DlMallocSpace(MemMap* mem_map, size_t initial_size, const std::string& name,
41 void* mspace, uint8_t* begin, uint8_t* end, uint8_t* limit,
42 size_t growth_limit, bool can_move_objects, size_t starting_size)
43 : MallocSpace(name, mem_map, begin, end, limit, growth_limit, true, can_move_objects,
44 starting_size, initial_size),
45 mspace_(mspace) {
46 CHECK(mspace != nullptr);
47 }
48
CreateFromMemMap(MemMap * mem_map,const std::string & name,size_t starting_size,size_t initial_size,size_t growth_limit,size_t capacity,bool can_move_objects)49 DlMallocSpace* DlMallocSpace::CreateFromMemMap(MemMap* mem_map, const std::string& name,
50 size_t starting_size, size_t initial_size,
51 size_t growth_limit, size_t capacity,
52 bool can_move_objects) {
53 DCHECK(mem_map != nullptr);
54 void* mspace = CreateMspace(mem_map->Begin(), starting_size, initial_size);
55 if (mspace == nullptr) {
56 LOG(ERROR) << "Failed to initialize mspace for alloc space (" << name << ")";
57 return nullptr;
58 }
59
60 // Protect memory beyond the starting size. morecore will add r/w permissions when necessory
61 uint8_t* end = mem_map->Begin() + starting_size;
62 if (capacity - starting_size > 0) {
63 CHECK_MEMORY_CALL(mprotect, (end, capacity - starting_size, PROT_NONE), name);
64 }
65
66 // Everything is set so record in immutable structure and leave
67 uint8_t* begin = mem_map->Begin();
68 if (Runtime::Current()->IsRunningOnMemoryTool()) {
69 return new MemoryToolMallocSpace<DlMallocSpace, kDefaultMemoryToolRedZoneBytes, true, false>(
70 mem_map, initial_size, name, mspace, begin, end, begin + capacity, growth_limit,
71 can_move_objects, starting_size);
72 } else {
73 return new DlMallocSpace(mem_map, initial_size, name, mspace, begin, end, begin + capacity,
74 growth_limit, can_move_objects, starting_size);
75 }
76 }
77
Create(const std::string & name,size_t initial_size,size_t growth_limit,size_t capacity,uint8_t * requested_begin,bool can_move_objects)78 DlMallocSpace* DlMallocSpace::Create(const std::string& name, size_t initial_size,
79 size_t growth_limit, size_t capacity, uint8_t* requested_begin,
80 bool can_move_objects) {
81 uint64_t start_time = 0;
82 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
83 start_time = NanoTime();
84 LOG(INFO) << "DlMallocSpace::Create entering " << name
85 << " initial_size=" << PrettySize(initial_size)
86 << " growth_limit=" << PrettySize(growth_limit)
87 << " capacity=" << PrettySize(capacity)
88 << " requested_begin=" << reinterpret_cast<void*>(requested_begin);
89 }
90
91 // Memory we promise to dlmalloc before it asks for morecore.
92 // Note: making this value large means that large allocations are unlikely to succeed as dlmalloc
93 // will ask for this memory from sys_alloc which will fail as the footprint (this value plus the
94 // size of the large allocation) will be greater than the footprint limit.
95 size_t starting_size = kPageSize;
96 MemMap* mem_map = CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity,
97 requested_begin);
98 if (mem_map == nullptr) {
99 LOG(ERROR) << "Failed to create mem map for alloc space (" << name << ") of size "
100 << PrettySize(capacity);
101 return nullptr;
102 }
103 DlMallocSpace* space = CreateFromMemMap(mem_map, name, starting_size, initial_size,
104 growth_limit, capacity, can_move_objects);
105 // We start out with only the initial size possibly containing objects.
106 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
107 LOG(INFO) << "DlMallocSpace::Create exiting (" << PrettyDuration(NanoTime() - start_time)
108 << " ) " << *space;
109 }
110 return space;
111 }
112
CreateMspace(void * begin,size_t morecore_start,size_t initial_size)113 void* DlMallocSpace::CreateMspace(void* begin, size_t morecore_start, size_t initial_size) {
114 // clear errno to allow PLOG on error
115 errno = 0;
116 // create mspace using our backing storage starting at begin and with a footprint of
117 // morecore_start. Don't use an internal dlmalloc lock (as we already hold heap lock). When
118 // morecore_start bytes of memory is exhaused morecore will be called.
119 void* msp = create_mspace_with_base(begin, morecore_start, false /*locked*/);
120 if (msp != nullptr) {
121 // Do not allow morecore requests to succeed beyond the initial size of the heap
122 mspace_set_footprint_limit(msp, initial_size);
123 } else {
124 PLOG(ERROR) << "create_mspace_with_base failed";
125 }
126 return msp;
127 }
128
AllocWithGrowth(Thread * self,size_t num_bytes,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated)129 mirror::Object* DlMallocSpace::AllocWithGrowth(Thread* self, size_t num_bytes,
130 size_t* bytes_allocated, size_t* usable_size,
131 size_t* bytes_tl_bulk_allocated) {
132 mirror::Object* result;
133 {
134 MutexLock mu(self, lock_);
135 // Grow as much as possible within the space.
136 size_t max_allowed = Capacity();
137 mspace_set_footprint_limit(mspace_, max_allowed);
138 // Try the allocation.
139 result = AllocWithoutGrowthLocked(self, num_bytes, bytes_allocated, usable_size,
140 bytes_tl_bulk_allocated);
141 // Shrink back down as small as possible.
142 size_t footprint = mspace_footprint(mspace_);
143 mspace_set_footprint_limit(mspace_, footprint);
144 }
145 if (result != nullptr) {
146 // Zero freshly allocated memory, done while not holding the space's lock.
147 memset(result, 0, num_bytes);
148 // Check that the result is contained in the space.
149 CHECK(!kDebugSpaces || Contains(result));
150 }
151 return result;
152 }
153
CreateInstance(MemMap * mem_map,const std::string & name,void * allocator,uint8_t * begin,uint8_t * end,uint8_t * limit,size_t growth_limit,bool can_move_objects)154 MallocSpace* DlMallocSpace::CreateInstance(MemMap* mem_map, const std::string& name,
155 void* allocator, uint8_t* begin, uint8_t* end,
156 uint8_t* limit, size_t growth_limit,
157 bool can_move_objects) {
158 if (Runtime::Current()->IsRunningOnMemoryTool()) {
159 return new MemoryToolMallocSpace<DlMallocSpace, kDefaultMemoryToolRedZoneBytes, true, false>(
160 mem_map, initial_size_, name, allocator, begin, end, limit, growth_limit,
161 can_move_objects, starting_size_);
162 } else {
163 return new DlMallocSpace(mem_map, initial_size_, name, allocator, begin, end, limit,
164 growth_limit, can_move_objects, starting_size_);
165 }
166 }
167
Free(Thread * self,mirror::Object * ptr)168 size_t DlMallocSpace::Free(Thread* self, mirror::Object* ptr) {
169 MutexLock mu(self, lock_);
170 if (kDebugSpaces) {
171 CHECK(ptr != nullptr);
172 CHECK(Contains(ptr)) << "Free (" << ptr << ") not in bounds of heap " << *this;
173 }
174 const size_t bytes_freed = AllocationSizeNonvirtual(ptr, nullptr);
175 if (kRecentFreeCount > 0) {
176 RegisterRecentFree(ptr);
177 }
178 mspace_free(mspace_, ptr);
179 return bytes_freed;
180 }
181
FreeList(Thread * self,size_t num_ptrs,mirror::Object ** ptrs)182 size_t DlMallocSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
183 DCHECK(ptrs != nullptr);
184
185 // Don't need the lock to calculate the size of the freed pointers.
186 size_t bytes_freed = 0;
187 for (size_t i = 0; i < num_ptrs; i++) {
188 mirror::Object* ptr = ptrs[i];
189 const size_t look_ahead = 8;
190 if (kPrefetchDuringDlMallocFreeList && i + look_ahead < num_ptrs) {
191 // The head of chunk for the allocation is sizeof(size_t) behind the allocation.
192 __builtin_prefetch(reinterpret_cast<char*>(ptrs[i + look_ahead]) - sizeof(size_t));
193 }
194 bytes_freed += AllocationSizeNonvirtual(ptr, nullptr);
195 }
196
197 if (kRecentFreeCount > 0) {
198 MutexLock mu(self, lock_);
199 for (size_t i = 0; i < num_ptrs; i++) {
200 RegisterRecentFree(ptrs[i]);
201 }
202 }
203
204 if (kDebugSpaces) {
205 size_t num_broken_ptrs = 0;
206 for (size_t i = 0; i < num_ptrs; i++) {
207 if (!Contains(ptrs[i])) {
208 num_broken_ptrs++;
209 LOG(ERROR) << "FreeList[" << i << "] (" << ptrs[i] << ") not in bounds of heap " << *this;
210 } else {
211 size_t size = mspace_usable_size(ptrs[i]);
212 memset(ptrs[i], 0xEF, size);
213 }
214 }
215 CHECK_EQ(num_broken_ptrs, 0u);
216 }
217
218 {
219 MutexLock mu(self, lock_);
220 mspace_bulk_free(mspace_, reinterpret_cast<void**>(ptrs), num_ptrs);
221 return bytes_freed;
222 }
223 }
224
Trim()225 size_t DlMallocSpace::Trim() {
226 MutexLock mu(Thread::Current(), lock_);
227 // Trim to release memory at the end of the space.
228 mspace_trim(mspace_, 0);
229 // Visit space looking for page-sized holes to advise the kernel we don't need.
230 size_t reclaimed = 0;
231 mspace_inspect_all(mspace_, DlmallocMadviseCallback, &reclaimed);
232 return reclaimed;
233 }
234
Walk(void (* callback)(void * start,void * end,size_t num_bytes,void * callback_arg),void * arg)235 void DlMallocSpace::Walk(void(*callback)(void *start, void *end, size_t num_bytes, void* callback_arg),
236 void* arg) {
237 MutexLock mu(Thread::Current(), lock_);
238 mspace_inspect_all(mspace_, callback, arg);
239 callback(nullptr, nullptr, 0, arg); // Indicate end of a space.
240 }
241
GetFootprint()242 size_t DlMallocSpace::GetFootprint() {
243 MutexLock mu(Thread::Current(), lock_);
244 return mspace_footprint(mspace_);
245 }
246
GetFootprintLimit()247 size_t DlMallocSpace::GetFootprintLimit() {
248 MutexLock mu(Thread::Current(), lock_);
249 return mspace_footprint_limit(mspace_);
250 }
251
SetFootprintLimit(size_t new_size)252 void DlMallocSpace::SetFootprintLimit(size_t new_size) {
253 MutexLock mu(Thread::Current(), lock_);
254 VLOG(heap) << "DlMallocSpace::SetFootprintLimit " << PrettySize(new_size);
255 // Compare against the actual footprint, rather than the Size(), because the heap may not have
256 // grown all the way to the allowed size yet.
257 size_t current_space_size = mspace_footprint(mspace_);
258 if (new_size < current_space_size) {
259 // Don't let the space grow any more.
260 new_size = current_space_size;
261 }
262 mspace_set_footprint_limit(mspace_, new_size);
263 }
264
GetBytesAllocated()265 uint64_t DlMallocSpace::GetBytesAllocated() {
266 MutexLock mu(Thread::Current(), lock_);
267 size_t bytes_allocated = 0;
268 mspace_inspect_all(mspace_, DlmallocBytesAllocatedCallback, &bytes_allocated);
269 return bytes_allocated;
270 }
271
GetObjectsAllocated()272 uint64_t DlMallocSpace::GetObjectsAllocated() {
273 MutexLock mu(Thread::Current(), lock_);
274 size_t objects_allocated = 0;
275 mspace_inspect_all(mspace_, DlmallocObjectsAllocatedCallback, &objects_allocated);
276 return objects_allocated;
277 }
278
Clear()279 void DlMallocSpace::Clear() {
280 size_t footprint_limit = GetFootprintLimit();
281 madvise(GetMemMap()->Begin(), GetMemMap()->Size(), MADV_DONTNEED);
282 live_bitmap_->Clear();
283 mark_bitmap_->Clear();
284 SetEnd(Begin() + starting_size_);
285 mspace_ = CreateMspace(mem_map_->Begin(), starting_size_, initial_size_);
286 SetFootprintLimit(footprint_limit);
287 }
288
289 #ifndef NDEBUG
CheckMoreCoreForPrecondition()290 void DlMallocSpace::CheckMoreCoreForPrecondition() {
291 lock_.AssertHeld(Thread::Current());
292 }
293 #endif
294
MSpaceChunkCallback(void * start,void * end,size_t used_bytes,void * arg)295 static void MSpaceChunkCallback(void* start, void* end, size_t used_bytes, void* arg) {
296 size_t chunk_size = reinterpret_cast<uint8_t*>(end) - reinterpret_cast<uint8_t*>(start);
297 if (used_bytes < chunk_size) {
298 size_t chunk_free_bytes = chunk_size - used_bytes;
299 size_t& max_contiguous_allocation = *reinterpret_cast<size_t*>(arg);
300 max_contiguous_allocation = std::max(max_contiguous_allocation, chunk_free_bytes);
301 }
302 }
303
LogFragmentationAllocFailure(std::ostream & os,size_t failed_alloc_bytes ATTRIBUTE_UNUSED)304 void DlMallocSpace::LogFragmentationAllocFailure(std::ostream& os,
305 size_t failed_alloc_bytes ATTRIBUTE_UNUSED) {
306 Thread* const self = Thread::Current();
307 size_t max_contiguous_allocation = 0;
308 // To allow the Walk/InspectAll() to exclusively-lock the mutator
309 // lock, temporarily release the shared access to the mutator
310 // lock here by transitioning to the suspended state.
311 Locks::mutator_lock_->AssertSharedHeld(self);
312 ScopedThreadSuspension sts(self, kSuspended);
313 Walk(MSpaceChunkCallback, &max_contiguous_allocation);
314 os << "; failed due to fragmentation (largest possible contiguous allocation "
315 << max_contiguous_allocation << " bytes)";
316 }
317
318 } // namespace space
319
320 namespace allocator {
321
322 // Implement the dlmalloc morecore callback.
ArtDlMallocMoreCore(void * mspace,intptr_t increment)323 void* ArtDlMallocMoreCore(void* mspace, intptr_t increment) REQUIRES_SHARED(Locks::mutator_lock_) {
324 Runtime* runtime = Runtime::Current();
325 Heap* heap = runtime->GetHeap();
326 ::art::gc::space::DlMallocSpace* dlmalloc_space = heap->GetDlMallocSpace();
327 // Support for multiple DlMalloc provided by a slow path.
328 if (UNLIKELY(dlmalloc_space == nullptr || dlmalloc_space->GetMspace() != mspace)) {
329 if (LIKELY(runtime->GetJit() != nullptr)) {
330 jit::JitCodeCache* code_cache = runtime->GetJit()->GetCodeCache();
331 if (code_cache->OwnsSpace(mspace)) {
332 return code_cache->MoreCore(mspace, increment);
333 }
334 }
335 dlmalloc_space = nullptr;
336 for (space::ContinuousSpace* space : heap->GetContinuousSpaces()) {
337 if (space->IsDlMallocSpace()) {
338 ::art::gc::space::DlMallocSpace* cur_dlmalloc_space = space->AsDlMallocSpace();
339 if (cur_dlmalloc_space->GetMspace() == mspace) {
340 dlmalloc_space = cur_dlmalloc_space;
341 break;
342 }
343 }
344 }
345 CHECK(dlmalloc_space != nullptr) << "Couldn't find DlmMallocSpace with mspace=" << mspace;
346 }
347 return dlmalloc_space->MoreCore(increment);
348 }
349
350 } // namespace allocator
351
352 } // namespace gc
353 } // namespace art
354