1 /*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "heap.h"
18
19 #include <limits>
20 #include <memory>
21 #include <unwind.h> // For GC verification.
22 #include <vector>
23
24 #include "art_field-inl.h"
25 #include "base/allocator.h"
26 #include "base/arena_allocator.h"
27 #include "base/dumpable.h"
28 #include "base/histogram-inl.h"
29 #include "base/stl_util.h"
30 #include "base/systrace.h"
31 #include "base/time_utils.h"
32 #include "common_throws.h"
33 #include "cutils/sched_policy.h"
34 #include "debugger.h"
35 #include "dex_file-inl.h"
36 #include "gc/accounting/atomic_stack.h"
37 #include "gc/accounting/card_table-inl.h"
38 #include "gc/accounting/heap_bitmap-inl.h"
39 #include "gc/accounting/mod_union_table-inl.h"
40 #include "gc/accounting/remembered_set.h"
41 #include "gc/accounting/space_bitmap-inl.h"
42 #include "gc/collector/concurrent_copying.h"
43 #include "gc/collector/mark_compact.h"
44 #include "gc/collector/mark_sweep.h"
45 #include "gc/collector/partial_mark_sweep.h"
46 #include "gc/collector/semi_space.h"
47 #include "gc/collector/sticky_mark_sweep.h"
48 #include "gc/reference_processor.h"
49 #include "gc/space/bump_pointer_space.h"
50 #include "gc/space/dlmalloc_space-inl.h"
51 #include "gc/space/image_space.h"
52 #include "gc/space/large_object_space.h"
53 #include "gc/space/region_space.h"
54 #include "gc/space/rosalloc_space-inl.h"
55 #include "gc/space/space-inl.h"
56 #include "gc/space/zygote_space.h"
57 #include "gc/task_processor.h"
58 #include "entrypoints/quick/quick_alloc_entrypoints.h"
59 #include "heap-inl.h"
60 #include "image.h"
61 #include "intern_table.h"
62 #include "jit/jit.h"
63 #include "jit/jit_code_cache.h"
64 #include "mirror/class-inl.h"
65 #include "mirror/object-inl.h"
66 #include "mirror/object_array-inl.h"
67 #include "mirror/reference-inl.h"
68 #include "os.h"
69 #include "reflection.h"
70 #include "runtime.h"
71 #include "ScopedLocalRef.h"
72 #include "scoped_thread_state_change.h"
73 #include "handle_scope-inl.h"
74 #include "thread_list.h"
75 #include "well_known_classes.h"
76
77 namespace art {
78
79 namespace gc {
80
81 static constexpr size_t kCollectorTransitionStressIterations = 0;
82 static constexpr size_t kCollectorTransitionStressWait = 10 * 1000; // Microseconds
83 // Minimum amount of remaining bytes before a concurrent GC is triggered.
84 static constexpr size_t kMinConcurrentRemainingBytes = 128 * KB;
85 static constexpr size_t kMaxConcurrentRemainingBytes = 512 * KB;
86 // Sticky GC throughput adjustment, divided by 4. Increasing this causes sticky GC to occur more
87 // relative to partial/full GC. This may be desirable since sticky GCs interfere less with mutator
88 // threads (lower pauses, use less memory bandwidth).
89 static constexpr double kStickyGcThroughputAdjustment = 1.0;
90 // Whether or not we compact the zygote in PreZygoteFork.
91 static constexpr bool kCompactZygote = kMovingCollector;
92 // How many reserve entries are at the end of the allocation stack, these are only needed if the
93 // allocation stack overflows.
94 static constexpr size_t kAllocationStackReserveSize = 1024;
95 // Default mark stack size in bytes.
96 static const size_t kDefaultMarkStackSize = 64 * KB;
97 // Define space name.
98 static const char* kDlMallocSpaceName[2] = {"main dlmalloc space", "main dlmalloc space 1"};
99 static const char* kRosAllocSpaceName[2] = {"main rosalloc space", "main rosalloc space 1"};
100 static const char* kMemMapSpaceName[2] = {"main space", "main space 1"};
101 static const char* kNonMovingSpaceName = "non moving space";
102 static const char* kZygoteSpaceName = "zygote space";
103 static constexpr size_t kGSSBumpPointerSpaceCapacity = 32 * MB;
104 static constexpr bool kGCALotMode = false;
105 // GC alot mode uses a small allocation stack to stress test a lot of GC.
106 static constexpr size_t kGcAlotAllocationStackSize = 4 * KB /
107 sizeof(mirror::HeapReference<mirror::Object>);
108 // Verify objet has a small allocation stack size since searching the allocation stack is slow.
109 static constexpr size_t kVerifyObjectAllocationStackSize = 16 * KB /
110 sizeof(mirror::HeapReference<mirror::Object>);
111 static constexpr size_t kDefaultAllocationStackSize = 8 * MB /
112 sizeof(mirror::HeapReference<mirror::Object>);
113 // System.runFinalization can deadlock with native allocations, to deal with this, we have a
114 // timeout on how long we wait for finalizers to run. b/21544853
115 static constexpr uint64_t kNativeAllocationFinalizeTimeout = MsToNs(250u);
116
117 // For deterministic compilation, we need the heap to be at a well-known address.
118 static constexpr uint32_t kAllocSpaceBeginForDeterministicAoT = 0x40000000;
119 // Dump the rosalloc stats on SIGQUIT.
120 static constexpr bool kDumpRosAllocStatsOnSigQuit = false;
121
122 static constexpr size_t kNativeAllocationHistogramBuckets = 16;
123
CareAboutPauseTimes()124 static inline bool CareAboutPauseTimes() {
125 return Runtime::Current()->InJankPerceptibleProcessState();
126 }
127
Heap(size_t initial_size,size_t growth_limit,size_t min_free,size_t max_free,double target_utilization,double foreground_heap_growth_multiplier,size_t capacity,size_t non_moving_space_capacity,const std::string & image_file_name,const InstructionSet image_instruction_set,CollectorType foreground_collector_type,CollectorType background_collector_type,space::LargeObjectSpaceType large_object_space_type,size_t large_object_threshold,size_t parallel_gc_threads,size_t conc_gc_threads,bool low_memory_mode,size_t long_pause_log_threshold,size_t long_gc_log_threshold,bool ignore_max_footprint,bool use_tlab,bool verify_pre_gc_heap,bool verify_pre_sweeping_heap,bool verify_post_gc_heap,bool verify_pre_gc_rosalloc,bool verify_pre_sweeping_rosalloc,bool verify_post_gc_rosalloc,bool gc_stress_mode,bool use_homogeneous_space_compaction_for_oom,uint64_t min_interval_homogeneous_space_compaction_by_oom)128 Heap::Heap(size_t initial_size,
129 size_t growth_limit,
130 size_t min_free,
131 size_t max_free,
132 double target_utilization,
133 double foreground_heap_growth_multiplier,
134 size_t capacity,
135 size_t non_moving_space_capacity,
136 const std::string& image_file_name,
137 const InstructionSet image_instruction_set,
138 CollectorType foreground_collector_type,
139 CollectorType background_collector_type,
140 space::LargeObjectSpaceType large_object_space_type,
141 size_t large_object_threshold,
142 size_t parallel_gc_threads,
143 size_t conc_gc_threads,
144 bool low_memory_mode,
145 size_t long_pause_log_threshold,
146 size_t long_gc_log_threshold,
147 bool ignore_max_footprint,
148 bool use_tlab,
149 bool verify_pre_gc_heap,
150 bool verify_pre_sweeping_heap,
151 bool verify_post_gc_heap,
152 bool verify_pre_gc_rosalloc,
153 bool verify_pre_sweeping_rosalloc,
154 bool verify_post_gc_rosalloc,
155 bool gc_stress_mode,
156 bool use_homogeneous_space_compaction_for_oom,
157 uint64_t min_interval_homogeneous_space_compaction_by_oom)
158 : non_moving_space_(nullptr),
159 rosalloc_space_(nullptr),
160 dlmalloc_space_(nullptr),
161 main_space_(nullptr),
162 collector_type_(kCollectorTypeNone),
163 foreground_collector_type_(foreground_collector_type),
164 background_collector_type_(background_collector_type),
165 desired_collector_type_(foreground_collector_type_),
166 pending_task_lock_(nullptr),
167 parallel_gc_threads_(parallel_gc_threads),
168 conc_gc_threads_(conc_gc_threads),
169 low_memory_mode_(low_memory_mode),
170 long_pause_log_threshold_(long_pause_log_threshold),
171 long_gc_log_threshold_(long_gc_log_threshold),
172 ignore_max_footprint_(ignore_max_footprint),
173 zygote_creation_lock_("zygote creation lock", kZygoteCreationLock),
174 zygote_space_(nullptr),
175 large_object_threshold_(large_object_threshold),
176 disable_thread_flip_count_(0),
177 thread_flip_running_(false),
178 collector_type_running_(kCollectorTypeNone),
179 last_gc_type_(collector::kGcTypeNone),
180 next_gc_type_(collector::kGcTypePartial),
181 capacity_(capacity),
182 growth_limit_(growth_limit),
183 max_allowed_footprint_(initial_size),
184 native_footprint_gc_watermark_(initial_size),
185 native_need_to_run_finalization_(false),
186 concurrent_start_bytes_(std::numeric_limits<size_t>::max()),
187 total_bytes_freed_ever_(0),
188 total_objects_freed_ever_(0),
189 num_bytes_allocated_(0),
190 native_bytes_allocated_(0),
191 native_histogram_lock_("Native allocation lock"),
192 native_allocation_histogram_("Native allocation sizes",
193 1U,
194 kNativeAllocationHistogramBuckets),
195 native_free_histogram_("Native free sizes", 1U, kNativeAllocationHistogramBuckets),
196 num_bytes_freed_revoke_(0),
197 verify_missing_card_marks_(false),
198 verify_system_weaks_(false),
199 verify_pre_gc_heap_(verify_pre_gc_heap),
200 verify_pre_sweeping_heap_(verify_pre_sweeping_heap),
201 verify_post_gc_heap_(verify_post_gc_heap),
202 verify_mod_union_table_(false),
203 verify_pre_gc_rosalloc_(verify_pre_gc_rosalloc),
204 verify_pre_sweeping_rosalloc_(verify_pre_sweeping_rosalloc),
205 verify_post_gc_rosalloc_(verify_post_gc_rosalloc),
206 gc_stress_mode_(gc_stress_mode),
207 /* For GC a lot mode, we limit the allocations stacks to be kGcAlotInterval allocations. This
208 * causes a lot of GC since we do a GC for alloc whenever the stack is full. When heap
209 * verification is enabled, we limit the size of allocation stacks to speed up their
210 * searching.
211 */
212 max_allocation_stack_size_(kGCALotMode ? kGcAlotAllocationStackSize
213 : (kVerifyObjectSupport > kVerifyObjectModeFast) ? kVerifyObjectAllocationStackSize :
214 kDefaultAllocationStackSize),
215 current_allocator_(kAllocatorTypeDlMalloc),
216 current_non_moving_allocator_(kAllocatorTypeNonMoving),
217 bump_pointer_space_(nullptr),
218 temp_space_(nullptr),
219 region_space_(nullptr),
220 min_free_(min_free),
221 max_free_(max_free),
222 target_utilization_(target_utilization),
223 foreground_heap_growth_multiplier_(foreground_heap_growth_multiplier),
224 total_wait_time_(0),
225 verify_object_mode_(kVerifyObjectModeDisabled),
226 disable_moving_gc_count_(0),
227 is_running_on_memory_tool_(Runtime::Current()->IsRunningOnMemoryTool()),
228 use_tlab_(use_tlab),
229 main_space_backup_(nullptr),
230 min_interval_homogeneous_space_compaction_by_oom_(
231 min_interval_homogeneous_space_compaction_by_oom),
232 last_time_homogeneous_space_compaction_by_oom_(NanoTime()),
233 pending_collector_transition_(nullptr),
234 pending_heap_trim_(nullptr),
235 use_homogeneous_space_compaction_for_oom_(use_homogeneous_space_compaction_for_oom),
236 running_collection_is_blocking_(false),
237 blocking_gc_count_(0U),
238 blocking_gc_time_(0U),
239 last_update_time_gc_count_rate_histograms_( // Round down by the window duration.
240 (NanoTime() / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration),
241 gc_count_last_window_(0U),
242 blocking_gc_count_last_window_(0U),
243 gc_count_rate_histogram_("gc count rate histogram", 1U, kGcCountRateMaxBucketCount),
244 blocking_gc_count_rate_histogram_("blocking gc count rate histogram", 1U,
245 kGcCountRateMaxBucketCount),
246 alloc_tracking_enabled_(false),
247 backtrace_lock_(nullptr),
248 seen_backtrace_count_(0u),
249 unique_backtrace_count_(0u),
250 gc_disabled_for_shutdown_(false) {
251 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
252 LOG(INFO) << "Heap() entering";
253 }
254 ScopedTrace trace(__FUNCTION__);
255 Runtime* const runtime = Runtime::Current();
256 // If we aren't the zygote, switch to the default non zygote allocator. This may update the
257 // entrypoints.
258 const bool is_zygote = runtime->IsZygote();
259 if (!is_zygote) {
260 // Background compaction is currently not supported for command line runs.
261 if (background_collector_type_ != foreground_collector_type_) {
262 VLOG(heap) << "Disabling background compaction for non zygote";
263 background_collector_type_ = foreground_collector_type_;
264 }
265 }
266 ChangeCollector(desired_collector_type_);
267 live_bitmap_.reset(new accounting::HeapBitmap(this));
268 mark_bitmap_.reset(new accounting::HeapBitmap(this));
269 // Requested begin for the alloc space, to follow the mapped image and oat files
270 uint8_t* requested_alloc_space_begin = nullptr;
271 if (foreground_collector_type_ == kCollectorTypeCC) {
272 // Need to use a low address so that we can allocate a contiguous
273 // 2 * Xmx space when there's no image (dex2oat for target).
274 CHECK_GE(300 * MB, non_moving_space_capacity);
275 requested_alloc_space_begin = reinterpret_cast<uint8_t*>(300 * MB) - non_moving_space_capacity;
276 }
277
278 // Load image space(s).
279 if (!image_file_name.empty()) {
280 // For code reuse, handle this like a work queue.
281 std::vector<std::string> image_file_names;
282 image_file_names.push_back(image_file_name);
283 // The loaded spaces. Secondary images may fail to load, in which case we need to remove
284 // already added spaces.
285 std::vector<space::Space*> added_image_spaces;
286 uint8_t* const original_requested_alloc_space_begin = requested_alloc_space_begin;
287 for (size_t index = 0; index < image_file_names.size(); ++index) {
288 std::string& image_name = image_file_names[index];
289 std::string error_msg;
290 space::ImageSpace* boot_image_space = space::ImageSpace::CreateBootImage(
291 image_name.c_str(),
292 image_instruction_set,
293 index > 0,
294 &error_msg);
295 if (boot_image_space != nullptr) {
296 AddSpace(boot_image_space);
297 added_image_spaces.push_back(boot_image_space);
298 // Oat files referenced by image files immediately follow them in memory, ensure alloc space
299 // isn't going to get in the middle
300 uint8_t* oat_file_end_addr = boot_image_space->GetImageHeader().GetOatFileEnd();
301 CHECK_GT(oat_file_end_addr, boot_image_space->End());
302 requested_alloc_space_begin = AlignUp(oat_file_end_addr, kPageSize);
303 boot_image_spaces_.push_back(boot_image_space);
304
305 if (index == 0) {
306 // If this was the first space, check whether there are more images to load.
307 const OatFile* boot_oat_file = boot_image_space->GetOatFile();
308 if (boot_oat_file == nullptr) {
309 continue;
310 }
311
312 const OatHeader& boot_oat_header = boot_oat_file->GetOatHeader();
313 const char* boot_classpath =
314 boot_oat_header.GetStoreValueByKey(OatHeader::kBootClassPathKey);
315 if (boot_classpath == nullptr) {
316 continue;
317 }
318
319 space::ImageSpace::ExtractMultiImageLocations(image_file_name,
320 boot_classpath,
321 &image_file_names);
322 }
323 } else {
324 LOG(ERROR) << "Could not create image space with image file '" << image_file_name << "'. "
325 << "Attempting to fall back to imageless running. Error was: " << error_msg
326 << "\nAttempted image: " << image_name;
327 // Remove already loaded spaces.
328 for (space::Space* loaded_space : added_image_spaces) {
329 RemoveSpace(loaded_space);
330 delete loaded_space;
331 }
332 boot_image_spaces_.clear();
333 requested_alloc_space_begin = original_requested_alloc_space_begin;
334 break;
335 }
336 }
337 }
338 /*
339 requested_alloc_space_begin -> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
340 +- nonmoving space (non_moving_space_capacity)+-
341 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
342 +-????????????????????????????????????????????+-
343 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
344 +-main alloc space / bump space 1 (capacity_) +-
345 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
346 +-????????????????????????????????????????????+-
347 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
348 +-main alloc space2 / bump space 2 (capacity_)+-
349 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
350 */
351 // We don't have hspace compaction enabled with GSS or CC.
352 if (foreground_collector_type_ == kCollectorTypeGSS ||
353 foreground_collector_type_ == kCollectorTypeCC) {
354 use_homogeneous_space_compaction_for_oom_ = false;
355 }
356 bool support_homogeneous_space_compaction =
357 background_collector_type_ == gc::kCollectorTypeHomogeneousSpaceCompact ||
358 use_homogeneous_space_compaction_for_oom_;
359 // We may use the same space the main space for the non moving space if we don't need to compact
360 // from the main space.
361 // This is not the case if we support homogeneous compaction or have a moving background
362 // collector type.
363 bool separate_non_moving_space = is_zygote ||
364 support_homogeneous_space_compaction || IsMovingGc(foreground_collector_type_) ||
365 IsMovingGc(background_collector_type_);
366 if (foreground_collector_type_ == kCollectorTypeGSS) {
367 separate_non_moving_space = false;
368 }
369 std::unique_ptr<MemMap> main_mem_map_1;
370 std::unique_ptr<MemMap> main_mem_map_2;
371
372 // Gross hack to make dex2oat deterministic.
373 if (foreground_collector_type_ == kCollectorTypeMS &&
374 requested_alloc_space_begin == nullptr &&
375 Runtime::Current()->IsAotCompiler()) {
376 // Currently only enabled for MS collector since that is what the deterministic dex2oat uses.
377 // b/26849108
378 requested_alloc_space_begin = reinterpret_cast<uint8_t*>(kAllocSpaceBeginForDeterministicAoT);
379 }
380 uint8_t* request_begin = requested_alloc_space_begin;
381 if (request_begin != nullptr && separate_non_moving_space) {
382 request_begin += non_moving_space_capacity;
383 }
384 std::string error_str;
385 std::unique_ptr<MemMap> non_moving_space_mem_map;
386 if (separate_non_moving_space) {
387 ScopedTrace trace2("Create separate non moving space");
388 // If we are the zygote, the non moving space becomes the zygote space when we run
389 // PreZygoteFork the first time. In this case, call the map "zygote space" since we can't
390 // rename the mem map later.
391 const char* space_name = is_zygote ? kZygoteSpaceName: kNonMovingSpaceName;
392 // Reserve the non moving mem map before the other two since it needs to be at a specific
393 // address.
394 non_moving_space_mem_map.reset(
395 MemMap::MapAnonymous(space_name, requested_alloc_space_begin,
396 non_moving_space_capacity, PROT_READ | PROT_WRITE, true, false,
397 &error_str));
398 CHECK(non_moving_space_mem_map != nullptr) << error_str;
399 // Try to reserve virtual memory at a lower address if we have a separate non moving space.
400 request_begin = reinterpret_cast<uint8_t*>(300 * MB);
401 }
402 // Attempt to create 2 mem maps at or after the requested begin.
403 if (foreground_collector_type_ != kCollectorTypeCC) {
404 ScopedTrace trace2("Create main mem map");
405 if (separate_non_moving_space || !is_zygote) {
406 main_mem_map_1.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[0],
407 request_begin,
408 capacity_,
409 &error_str));
410 } else {
411 // If no separate non-moving space and we are the zygote, the main space must come right
412 // after the image space to avoid a gap. This is required since we want the zygote space to
413 // be adjacent to the image space.
414 main_mem_map_1.reset(MemMap::MapAnonymous(kMemMapSpaceName[0], request_begin, capacity_,
415 PROT_READ | PROT_WRITE, true, false,
416 &error_str));
417 }
418 CHECK(main_mem_map_1.get() != nullptr) << error_str;
419 }
420 if (support_homogeneous_space_compaction ||
421 background_collector_type_ == kCollectorTypeSS ||
422 foreground_collector_type_ == kCollectorTypeSS) {
423 ScopedTrace trace2("Create main mem map 2");
424 main_mem_map_2.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[1], main_mem_map_1->End(),
425 capacity_, &error_str));
426 CHECK(main_mem_map_2.get() != nullptr) << error_str;
427 }
428
429 // Create the non moving space first so that bitmaps don't take up the address range.
430 if (separate_non_moving_space) {
431 ScopedTrace trace2("Add non moving space");
432 // Non moving space is always dlmalloc since we currently don't have support for multiple
433 // active rosalloc spaces.
434 const size_t size = non_moving_space_mem_map->Size();
435 non_moving_space_ = space::DlMallocSpace::CreateFromMemMap(
436 non_moving_space_mem_map.release(), "zygote / non moving space", kDefaultStartingSize,
437 initial_size, size, size, false);
438 non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
439 CHECK(non_moving_space_ != nullptr) << "Failed creating non moving space "
440 << requested_alloc_space_begin;
441 AddSpace(non_moving_space_);
442 }
443 // Create other spaces based on whether or not we have a moving GC.
444 if (foreground_collector_type_ == kCollectorTypeCC) {
445 region_space_ = space::RegionSpace::Create("Region space", capacity_ * 2, request_begin);
446 AddSpace(region_space_);
447 } else if (IsMovingGc(foreground_collector_type_) &&
448 foreground_collector_type_ != kCollectorTypeGSS) {
449 // Create bump pointer spaces.
450 // We only to create the bump pointer if the foreground collector is a compacting GC.
451 // TODO: Place bump-pointer spaces somewhere to minimize size of card table.
452 bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 1",
453 main_mem_map_1.release());
454 CHECK(bump_pointer_space_ != nullptr) << "Failed to create bump pointer space";
455 AddSpace(bump_pointer_space_);
456 temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
457 main_mem_map_2.release());
458 CHECK(temp_space_ != nullptr) << "Failed to create bump pointer space";
459 AddSpace(temp_space_);
460 CHECK(separate_non_moving_space);
461 } else {
462 CreateMainMallocSpace(main_mem_map_1.release(), initial_size, growth_limit_, capacity_);
463 CHECK(main_space_ != nullptr);
464 AddSpace(main_space_);
465 if (!separate_non_moving_space) {
466 non_moving_space_ = main_space_;
467 CHECK(!non_moving_space_->CanMoveObjects());
468 }
469 if (foreground_collector_type_ == kCollectorTypeGSS) {
470 CHECK_EQ(foreground_collector_type_, background_collector_type_);
471 // Create bump pointer spaces instead of a backup space.
472 main_mem_map_2.release();
473 bump_pointer_space_ = space::BumpPointerSpace::Create("Bump pointer space 1",
474 kGSSBumpPointerSpaceCapacity, nullptr);
475 CHECK(bump_pointer_space_ != nullptr);
476 AddSpace(bump_pointer_space_);
477 temp_space_ = space::BumpPointerSpace::Create("Bump pointer space 2",
478 kGSSBumpPointerSpaceCapacity, nullptr);
479 CHECK(temp_space_ != nullptr);
480 AddSpace(temp_space_);
481 } else if (main_mem_map_2.get() != nullptr) {
482 const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
483 main_space_backup_.reset(CreateMallocSpaceFromMemMap(main_mem_map_2.release(), initial_size,
484 growth_limit_, capacity_, name, true));
485 CHECK(main_space_backup_.get() != nullptr);
486 // Add the space so its accounted for in the heap_begin and heap_end.
487 AddSpace(main_space_backup_.get());
488 }
489 }
490 CHECK(non_moving_space_ != nullptr);
491 CHECK(!non_moving_space_->CanMoveObjects());
492 // Allocate the large object space.
493 if (large_object_space_type == space::LargeObjectSpaceType::kFreeList) {
494 large_object_space_ = space::FreeListSpace::Create("free list large object space", nullptr,
495 capacity_);
496 CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
497 } else if (large_object_space_type == space::LargeObjectSpaceType::kMap) {
498 large_object_space_ = space::LargeObjectMapSpace::Create("mem map large object space");
499 CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
500 } else {
501 // Disable the large object space by making the cutoff excessively large.
502 large_object_threshold_ = std::numeric_limits<size_t>::max();
503 large_object_space_ = nullptr;
504 }
505 if (large_object_space_ != nullptr) {
506 AddSpace(large_object_space_);
507 }
508 // Compute heap capacity. Continuous spaces are sorted in order of Begin().
509 CHECK(!continuous_spaces_.empty());
510 // Relies on the spaces being sorted.
511 uint8_t* heap_begin = continuous_spaces_.front()->Begin();
512 uint8_t* heap_end = continuous_spaces_.back()->Limit();
513 size_t heap_capacity = heap_end - heap_begin;
514 // Remove the main backup space since it slows down the GC to have unused extra spaces.
515 // TODO: Avoid needing to do this.
516 if (main_space_backup_.get() != nullptr) {
517 RemoveSpace(main_space_backup_.get());
518 }
519 // Allocate the card table.
520 // We currently don't support dynamically resizing the card table.
521 // Since we don't know where in the low_4gb the app image will be located, make the card table
522 // cover the whole low_4gb. TODO: Extend the card table in AddSpace.
523 UNUSED(heap_capacity);
524 // Start at 64 KB, we can be sure there are no spaces mapped this low since the address range is
525 // reserved by the kernel.
526 static constexpr size_t kMinHeapAddress = 4 * KB;
527 card_table_.reset(accounting::CardTable::Create(reinterpret_cast<uint8_t*>(kMinHeapAddress),
528 4 * GB - kMinHeapAddress));
529 CHECK(card_table_.get() != nullptr) << "Failed to create card table";
530 if (foreground_collector_type_ == kCollectorTypeCC && kUseTableLookupReadBarrier) {
531 rb_table_.reset(new accounting::ReadBarrierTable());
532 DCHECK(rb_table_->IsAllCleared());
533 }
534 if (HasBootImageSpace()) {
535 // Don't add the image mod union table if we are running without an image, this can crash if
536 // we use the CardCache implementation.
537 for (space::ImageSpace* image_space : GetBootImageSpaces()) {
538 accounting::ModUnionTable* mod_union_table = new accounting::ModUnionTableToZygoteAllocspace(
539 "Image mod-union table", this, image_space);
540 CHECK(mod_union_table != nullptr) << "Failed to create image mod-union table";
541 AddModUnionTable(mod_union_table);
542 }
543 }
544 if (collector::SemiSpace::kUseRememberedSet && non_moving_space_ != main_space_) {
545 accounting::RememberedSet* non_moving_space_rem_set =
546 new accounting::RememberedSet("Non-moving space remembered set", this, non_moving_space_);
547 CHECK(non_moving_space_rem_set != nullptr) << "Failed to create non-moving space remembered set";
548 AddRememberedSet(non_moving_space_rem_set);
549 }
550 // TODO: Count objects in the image space here?
551 num_bytes_allocated_.StoreRelaxed(0);
552 mark_stack_.reset(accounting::ObjectStack::Create("mark stack", kDefaultMarkStackSize,
553 kDefaultMarkStackSize));
554 const size_t alloc_stack_capacity = max_allocation_stack_size_ + kAllocationStackReserveSize;
555 allocation_stack_.reset(accounting::ObjectStack::Create(
556 "allocation stack", max_allocation_stack_size_, alloc_stack_capacity));
557 live_stack_.reset(accounting::ObjectStack::Create(
558 "live stack", max_allocation_stack_size_, alloc_stack_capacity));
559 // It's still too early to take a lock because there are no threads yet, but we can create locks
560 // now. We don't create it earlier to make it clear that you can't use locks during heap
561 // initialization.
562 gc_complete_lock_ = new Mutex("GC complete lock");
563 gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable",
564 *gc_complete_lock_));
565 thread_flip_lock_ = new Mutex("GC thread flip lock");
566 thread_flip_cond_.reset(new ConditionVariable("GC thread flip condition variable",
567 *thread_flip_lock_));
568 task_processor_.reset(new TaskProcessor());
569 reference_processor_.reset(new ReferenceProcessor());
570 pending_task_lock_ = new Mutex("Pending task lock");
571 if (ignore_max_footprint_) {
572 SetIdealFootprint(std::numeric_limits<size_t>::max());
573 concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
574 }
575 CHECK_NE(max_allowed_footprint_, 0U);
576 // Create our garbage collectors.
577 for (size_t i = 0; i < 2; ++i) {
578 const bool concurrent = i != 0;
579 if ((MayUseCollector(kCollectorTypeCMS) && concurrent) ||
580 (MayUseCollector(kCollectorTypeMS) && !concurrent)) {
581 garbage_collectors_.push_back(new collector::MarkSweep(this, concurrent));
582 garbage_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent));
583 garbage_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent));
584 }
585 }
586 if (kMovingCollector) {
587 if (MayUseCollector(kCollectorTypeSS) || MayUseCollector(kCollectorTypeGSS) ||
588 MayUseCollector(kCollectorTypeHomogeneousSpaceCompact) ||
589 use_homogeneous_space_compaction_for_oom_) {
590 // TODO: Clean this up.
591 const bool generational = foreground_collector_type_ == kCollectorTypeGSS;
592 semi_space_collector_ = new collector::SemiSpace(this, generational,
593 generational ? "generational" : "");
594 garbage_collectors_.push_back(semi_space_collector_);
595 }
596 if (MayUseCollector(kCollectorTypeCC)) {
597 concurrent_copying_collector_ = new collector::ConcurrentCopying(this);
598 garbage_collectors_.push_back(concurrent_copying_collector_);
599 }
600 if (MayUseCollector(kCollectorTypeMC)) {
601 mark_compact_collector_ = new collector::MarkCompact(this);
602 garbage_collectors_.push_back(mark_compact_collector_);
603 }
604 }
605 if (!GetBootImageSpaces().empty() && non_moving_space_ != nullptr &&
606 (is_zygote || separate_non_moving_space || foreground_collector_type_ == kCollectorTypeGSS)) {
607 // Check that there's no gap between the image space and the non moving space so that the
608 // immune region won't break (eg. due to a large object allocated in the gap). This is only
609 // required when we're the zygote or using GSS.
610 // Space with smallest Begin().
611 space::ImageSpace* first_space = nullptr;
612 for (space::ImageSpace* space : boot_image_spaces_) {
613 if (first_space == nullptr || space->Begin() < first_space->Begin()) {
614 first_space = space;
615 }
616 }
617 bool no_gap = MemMap::CheckNoGaps(first_space->GetMemMap(), non_moving_space_->GetMemMap());
618 if (!no_gap) {
619 PrintFileToLog("/proc/self/maps", LogSeverity::ERROR);
620 MemMap::DumpMaps(LOG(ERROR), true);
621 LOG(FATAL) << "There's a gap between the image space and the non-moving space";
622 }
623 }
624 instrumentation::Instrumentation* const instrumentation = runtime->GetInstrumentation();
625 if (gc_stress_mode_) {
626 backtrace_lock_ = new Mutex("GC complete lock");
627 }
628 if (is_running_on_memory_tool_ || gc_stress_mode_) {
629 instrumentation->InstrumentQuickAllocEntryPoints();
630 }
631 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
632 LOG(INFO) << "Heap() exiting";
633 }
634 }
635
MapAnonymousPreferredAddress(const char * name,uint8_t * request_begin,size_t capacity,std::string * out_error_str)636 MemMap* Heap::MapAnonymousPreferredAddress(const char* name,
637 uint8_t* request_begin,
638 size_t capacity,
639 std::string* out_error_str) {
640 while (true) {
641 MemMap* map = MemMap::MapAnonymous(name, request_begin, capacity,
642 PROT_READ | PROT_WRITE, true, false, out_error_str);
643 if (map != nullptr || request_begin == nullptr) {
644 return map;
645 }
646 // Retry a second time with no specified request begin.
647 request_begin = nullptr;
648 }
649 }
650
MayUseCollector(CollectorType type) const651 bool Heap::MayUseCollector(CollectorType type) const {
652 return foreground_collector_type_ == type || background_collector_type_ == type;
653 }
654
CreateMallocSpaceFromMemMap(MemMap * mem_map,size_t initial_size,size_t growth_limit,size_t capacity,const char * name,bool can_move_objects)655 space::MallocSpace* Heap::CreateMallocSpaceFromMemMap(MemMap* mem_map,
656 size_t initial_size,
657 size_t growth_limit,
658 size_t capacity,
659 const char* name,
660 bool can_move_objects) {
661 space::MallocSpace* malloc_space = nullptr;
662 if (kUseRosAlloc) {
663 // Create rosalloc space.
664 malloc_space = space::RosAllocSpace::CreateFromMemMap(mem_map, name, kDefaultStartingSize,
665 initial_size, growth_limit, capacity,
666 low_memory_mode_, can_move_objects);
667 } else {
668 malloc_space = space::DlMallocSpace::CreateFromMemMap(mem_map, name, kDefaultStartingSize,
669 initial_size, growth_limit, capacity,
670 can_move_objects);
671 }
672 if (collector::SemiSpace::kUseRememberedSet) {
673 accounting::RememberedSet* rem_set =
674 new accounting::RememberedSet(std::string(name) + " remembered set", this, malloc_space);
675 CHECK(rem_set != nullptr) << "Failed to create main space remembered set";
676 AddRememberedSet(rem_set);
677 }
678 CHECK(malloc_space != nullptr) << "Failed to create " << name;
679 malloc_space->SetFootprintLimit(malloc_space->Capacity());
680 return malloc_space;
681 }
682
CreateMainMallocSpace(MemMap * mem_map,size_t initial_size,size_t growth_limit,size_t capacity)683 void Heap::CreateMainMallocSpace(MemMap* mem_map, size_t initial_size, size_t growth_limit,
684 size_t capacity) {
685 // Is background compaction is enabled?
686 bool can_move_objects = IsMovingGc(background_collector_type_) !=
687 IsMovingGc(foreground_collector_type_) || use_homogeneous_space_compaction_for_oom_;
688 // If we are the zygote and don't yet have a zygote space, it means that the zygote fork will
689 // happen in the future. If this happens and we have kCompactZygote enabled we wish to compact
690 // from the main space to the zygote space. If background compaction is enabled, always pass in
691 // that we can move objets.
692 if (kCompactZygote && Runtime::Current()->IsZygote() && !can_move_objects) {
693 // After the zygote we want this to be false if we don't have background compaction enabled so
694 // that getting primitive array elements is faster.
695 // We never have homogeneous compaction with GSS and don't need a space with movable objects.
696 can_move_objects = !HasZygoteSpace() && foreground_collector_type_ != kCollectorTypeGSS;
697 }
698 if (collector::SemiSpace::kUseRememberedSet && main_space_ != nullptr) {
699 RemoveRememberedSet(main_space_);
700 }
701 const char* name = kUseRosAlloc ? kRosAllocSpaceName[0] : kDlMallocSpaceName[0];
702 main_space_ = CreateMallocSpaceFromMemMap(mem_map, initial_size, growth_limit, capacity, name,
703 can_move_objects);
704 SetSpaceAsDefault(main_space_);
705 VLOG(heap) << "Created main space " << main_space_;
706 }
707
ChangeAllocator(AllocatorType allocator)708 void Heap::ChangeAllocator(AllocatorType allocator) {
709 if (current_allocator_ != allocator) {
710 // These two allocators are only used internally and don't have any entrypoints.
711 CHECK_NE(allocator, kAllocatorTypeLOS);
712 CHECK_NE(allocator, kAllocatorTypeNonMoving);
713 current_allocator_ = allocator;
714 MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
715 SetQuickAllocEntryPointsAllocator(current_allocator_);
716 Runtime::Current()->GetInstrumentation()->ResetQuickAllocEntryPoints();
717 }
718 }
719
DisableMovingGc()720 void Heap::DisableMovingGc() {
721 if (IsMovingGc(foreground_collector_type_)) {
722 foreground_collector_type_ = kCollectorTypeCMS;
723 }
724 if (IsMovingGc(background_collector_type_)) {
725 background_collector_type_ = foreground_collector_type_;
726 }
727 TransitionCollector(foreground_collector_type_);
728 Thread* const self = Thread::Current();
729 ScopedThreadStateChange tsc(self, kSuspended);
730 ScopedSuspendAll ssa(__FUNCTION__);
731 // Something may have caused the transition to fail.
732 if (!IsMovingGc(collector_type_) && non_moving_space_ != main_space_) {
733 CHECK(main_space_ != nullptr);
734 // The allocation stack may have non movable objects in it. We need to flush it since the GC
735 // can't only handle marking allocation stack objects of one non moving space and one main
736 // space.
737 {
738 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
739 FlushAllocStack();
740 }
741 main_space_->DisableMovingObjects();
742 non_moving_space_ = main_space_;
743 CHECK(!non_moving_space_->CanMoveObjects());
744 }
745 }
746
SafeGetClassDescriptor(mirror::Class * klass)747 std::string Heap::SafeGetClassDescriptor(mirror::Class* klass) {
748 if (!IsValidContinuousSpaceObjectAddress(klass)) {
749 return StringPrintf("<non heap address klass %p>", klass);
750 }
751 mirror::Class* component_type = klass->GetComponentType<kVerifyNone>();
752 if (IsValidContinuousSpaceObjectAddress(component_type) && klass->IsArrayClass<kVerifyNone>()) {
753 std::string result("[");
754 result += SafeGetClassDescriptor(component_type);
755 return result;
756 } else if (UNLIKELY(klass->IsPrimitive<kVerifyNone>())) {
757 return Primitive::Descriptor(klass->GetPrimitiveType<kVerifyNone>());
758 } else if (UNLIKELY(klass->IsProxyClass<kVerifyNone>())) {
759 return Runtime::Current()->GetClassLinker()->GetDescriptorForProxy(klass);
760 } else {
761 mirror::DexCache* dex_cache = klass->GetDexCache<kVerifyNone>();
762 if (!IsValidContinuousSpaceObjectAddress(dex_cache)) {
763 return StringPrintf("<non heap address dex_cache %p>", dex_cache);
764 }
765 const DexFile* dex_file = dex_cache->GetDexFile();
766 uint16_t class_def_idx = klass->GetDexClassDefIndex();
767 if (class_def_idx == DexFile::kDexNoIndex16) {
768 return "<class def not found>";
769 }
770 const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_idx);
771 const DexFile::TypeId& type_id = dex_file->GetTypeId(class_def.class_idx_);
772 return dex_file->GetTypeDescriptor(type_id);
773 }
774 }
775
SafePrettyTypeOf(mirror::Object * obj)776 std::string Heap::SafePrettyTypeOf(mirror::Object* obj) {
777 if (obj == nullptr) {
778 return "null";
779 }
780 mirror::Class* klass = obj->GetClass<kVerifyNone>();
781 if (klass == nullptr) {
782 return "(class=null)";
783 }
784 std::string result(SafeGetClassDescriptor(klass));
785 if (obj->IsClass()) {
786 result += "<" + SafeGetClassDescriptor(obj->AsClass<kVerifyNone>()) + ">";
787 }
788 return result;
789 }
790
DumpObject(std::ostream & stream,mirror::Object * obj)791 void Heap::DumpObject(std::ostream& stream, mirror::Object* obj) {
792 if (obj == nullptr) {
793 stream << "(obj=null)";
794 return;
795 }
796 if (IsAligned<kObjectAlignment>(obj)) {
797 space::Space* space = nullptr;
798 // Don't use find space since it only finds spaces which actually contain objects instead of
799 // spaces which may contain objects (e.g. cleared bump pointer spaces).
800 for (const auto& cur_space : continuous_spaces_) {
801 if (cur_space->HasAddress(obj)) {
802 space = cur_space;
803 break;
804 }
805 }
806 // Unprotect all the spaces.
807 for (const auto& con_space : continuous_spaces_) {
808 mprotect(con_space->Begin(), con_space->Capacity(), PROT_READ | PROT_WRITE);
809 }
810 stream << "Object " << obj;
811 if (space != nullptr) {
812 stream << " in space " << *space;
813 }
814 mirror::Class* klass = obj->GetClass<kVerifyNone>();
815 stream << "\nclass=" << klass;
816 if (klass != nullptr) {
817 stream << " type= " << SafePrettyTypeOf(obj);
818 }
819 // Re-protect the address we faulted on.
820 mprotect(AlignDown(obj, kPageSize), kPageSize, PROT_NONE);
821 }
822 }
823
IsCompilingBoot() const824 bool Heap::IsCompilingBoot() const {
825 if (!Runtime::Current()->IsAotCompiler()) {
826 return false;
827 }
828 ScopedObjectAccess soa(Thread::Current());
829 for (const auto& space : continuous_spaces_) {
830 if (space->IsImageSpace() || space->IsZygoteSpace()) {
831 return false;
832 }
833 }
834 return true;
835 }
836
IncrementDisableMovingGC(Thread * self)837 void Heap::IncrementDisableMovingGC(Thread* self) {
838 // Need to do this holding the lock to prevent races where the GC is about to run / running when
839 // we attempt to disable it.
840 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
841 MutexLock mu(self, *gc_complete_lock_);
842 ++disable_moving_gc_count_;
843 if (IsMovingGc(collector_type_running_)) {
844 WaitForGcToCompleteLocked(kGcCauseDisableMovingGc, self);
845 }
846 }
847
DecrementDisableMovingGC(Thread * self)848 void Heap::DecrementDisableMovingGC(Thread* self) {
849 MutexLock mu(self, *gc_complete_lock_);
850 CHECK_GT(disable_moving_gc_count_, 0U);
851 --disable_moving_gc_count_;
852 }
853
IncrementDisableThreadFlip(Thread * self)854 void Heap::IncrementDisableThreadFlip(Thread* self) {
855 // Supposed to be called by mutators. If thread_flip_running_ is true, block. Otherwise, go ahead.
856 CHECK(kUseReadBarrier);
857 bool is_nested = self->GetDisableThreadFlipCount() > 0;
858 self->IncrementDisableThreadFlipCount();
859 if (is_nested) {
860 // If this is a nested JNI critical section enter, we don't need to wait or increment the global
861 // counter. The global counter is incremented only once for a thread for the outermost enter.
862 return;
863 }
864 ScopedThreadStateChange tsc(self, kWaitingForGcThreadFlip);
865 MutexLock mu(self, *thread_flip_lock_);
866 bool has_waited = false;
867 uint64_t wait_start = NanoTime();
868 while (thread_flip_running_) {
869 has_waited = true;
870 thread_flip_cond_->Wait(self);
871 }
872 ++disable_thread_flip_count_;
873 if (has_waited) {
874 uint64_t wait_time = NanoTime() - wait_start;
875 total_wait_time_ += wait_time;
876 if (wait_time > long_pause_log_threshold_) {
877 LOG(INFO) << __FUNCTION__ << " blocked for " << PrettyDuration(wait_time);
878 }
879 }
880 }
881
DecrementDisableThreadFlip(Thread * self)882 void Heap::DecrementDisableThreadFlip(Thread* self) {
883 // Supposed to be called by mutators. Decrement disable_thread_flip_count_ and potentially wake up
884 // the GC waiting before doing a thread flip.
885 CHECK(kUseReadBarrier);
886 self->DecrementDisableThreadFlipCount();
887 bool is_outermost = self->GetDisableThreadFlipCount() == 0;
888 if (!is_outermost) {
889 // If this is not an outermost JNI critical exit, we don't need to decrement the global counter.
890 // The global counter is decremented only once for a thread for the outermost exit.
891 return;
892 }
893 MutexLock mu(self, *thread_flip_lock_);
894 CHECK_GT(disable_thread_flip_count_, 0U);
895 --disable_thread_flip_count_;
896 if (disable_thread_flip_count_ == 0) {
897 // Potentially notify the GC thread blocking to begin a thread flip.
898 thread_flip_cond_->Broadcast(self);
899 }
900 }
901
ThreadFlipBegin(Thread * self)902 void Heap::ThreadFlipBegin(Thread* self) {
903 // Supposed to be called by GC. Set thread_flip_running_ to be true. If disable_thread_flip_count_
904 // > 0, block. Otherwise, go ahead.
905 CHECK(kUseReadBarrier);
906 ScopedThreadStateChange tsc(self, kWaitingForGcThreadFlip);
907 MutexLock mu(self, *thread_flip_lock_);
908 bool has_waited = false;
909 uint64_t wait_start = NanoTime();
910 CHECK(!thread_flip_running_);
911 // Set this to true before waiting so that frequent JNI critical enter/exits won't starve
912 // GC. This like a writer preference of a reader-writer lock.
913 thread_flip_running_ = true;
914 while (disable_thread_flip_count_ > 0) {
915 has_waited = true;
916 thread_flip_cond_->Wait(self);
917 }
918 if (has_waited) {
919 uint64_t wait_time = NanoTime() - wait_start;
920 total_wait_time_ += wait_time;
921 if (wait_time > long_pause_log_threshold_) {
922 LOG(INFO) << __FUNCTION__ << " blocked for " << PrettyDuration(wait_time);
923 }
924 }
925 }
926
ThreadFlipEnd(Thread * self)927 void Heap::ThreadFlipEnd(Thread* self) {
928 // Supposed to be called by GC. Set thread_flip_running_ to false and potentially wake up mutators
929 // waiting before doing a JNI critical.
930 CHECK(kUseReadBarrier);
931 MutexLock mu(self, *thread_flip_lock_);
932 CHECK(thread_flip_running_);
933 thread_flip_running_ = false;
934 // Potentially notify mutator threads blocking to enter a JNI critical section.
935 thread_flip_cond_->Broadcast(self);
936 }
937
UpdateProcessState(ProcessState old_process_state,ProcessState new_process_state)938 void Heap::UpdateProcessState(ProcessState old_process_state, ProcessState new_process_state) {
939 if (old_process_state != new_process_state) {
940 const bool jank_perceptible = new_process_state == kProcessStateJankPerceptible;
941 for (size_t i = 1; i <= kCollectorTransitionStressIterations; ++i) {
942 // Start at index 1 to avoid "is always false" warning.
943 // Have iteration 1 always transition the collector.
944 TransitionCollector((((i & 1) == 1) == jank_perceptible)
945 ? foreground_collector_type_
946 : background_collector_type_);
947 usleep(kCollectorTransitionStressWait);
948 }
949 if (jank_perceptible) {
950 // Transition back to foreground right away to prevent jank.
951 RequestCollectorTransition(foreground_collector_type_, 0);
952 } else {
953 // Don't delay for debug builds since we may want to stress test the GC.
954 // If background_collector_type_ is kCollectorTypeHomogeneousSpaceCompact then we have
955 // special handling which does a homogenous space compaction once but then doesn't transition
956 // the collector.
957 RequestCollectorTransition(background_collector_type_,
958 kIsDebugBuild ? 0 : kCollectorTransitionWait);
959 }
960 }
961 }
962
CreateThreadPool()963 void Heap::CreateThreadPool() {
964 const size_t num_threads = std::max(parallel_gc_threads_, conc_gc_threads_);
965 if (num_threads != 0) {
966 thread_pool_.reset(new ThreadPool("Heap thread pool", num_threads));
967 }
968 }
969
970 // Visit objects when threads aren't suspended. If concurrent moving
971 // GC, disable moving GC and suspend threads and then visit objects.
VisitObjects(ObjectCallback callback,void * arg)972 void Heap::VisitObjects(ObjectCallback callback, void* arg) {
973 Thread* self = Thread::Current();
974 Locks::mutator_lock_->AssertSharedHeld(self);
975 DCHECK(!Locks::mutator_lock_->IsExclusiveHeld(self)) << "Call VisitObjectsPaused() instead";
976 if (IsGcConcurrentAndMoving()) {
977 // Concurrent moving GC. Just suspending threads isn't sufficient
978 // because a collection isn't one big pause and we could suspend
979 // threads in the middle (between phases) of a concurrent moving
980 // collection where it's not easily known which objects are alive
981 // (both the region space and the non-moving space) or which
982 // copies of objects to visit, and the to-space invariant could be
983 // easily broken. Visit objects while GC isn't running by using
984 // IncrementDisableMovingGC() and threads are suspended.
985 IncrementDisableMovingGC(self);
986 {
987 ScopedThreadSuspension sts(self, kWaitingForVisitObjects);
988 ScopedSuspendAll ssa(__FUNCTION__);
989 VisitObjectsInternalRegionSpace(callback, arg);
990 VisitObjectsInternal(callback, arg);
991 }
992 DecrementDisableMovingGC(self);
993 } else {
994 // GCs can move objects, so don't allow this.
995 ScopedAssertNoThreadSuspension ants(self, "Visiting objects");
996 DCHECK(region_space_ == nullptr);
997 VisitObjectsInternal(callback, arg);
998 }
999 }
1000
1001 // Visit objects when threads are already suspended.
VisitObjectsPaused(ObjectCallback callback,void * arg)1002 void Heap::VisitObjectsPaused(ObjectCallback callback, void* arg) {
1003 Thread* self = Thread::Current();
1004 Locks::mutator_lock_->AssertExclusiveHeld(self);
1005 VisitObjectsInternalRegionSpace(callback, arg);
1006 VisitObjectsInternal(callback, arg);
1007 }
1008
1009 // Visit objects in the region spaces.
VisitObjectsInternalRegionSpace(ObjectCallback callback,void * arg)1010 void Heap::VisitObjectsInternalRegionSpace(ObjectCallback callback, void* arg) {
1011 Thread* self = Thread::Current();
1012 Locks::mutator_lock_->AssertExclusiveHeld(self);
1013 if (region_space_ != nullptr) {
1014 DCHECK(IsGcConcurrentAndMoving());
1015 if (!zygote_creation_lock_.IsExclusiveHeld(self)) {
1016 // Exclude the pre-zygote fork time where the semi-space collector
1017 // calls VerifyHeapReferences() as part of the zygote compaction
1018 // which then would call here without the moving GC disabled,
1019 // which is fine.
1020 DCHECK(IsMovingGCDisabled(self));
1021 }
1022 region_space_->Walk(callback, arg);
1023 }
1024 }
1025
1026 // Visit objects in the other spaces.
VisitObjectsInternal(ObjectCallback callback,void * arg)1027 void Heap::VisitObjectsInternal(ObjectCallback callback, void* arg) {
1028 if (bump_pointer_space_ != nullptr) {
1029 // Visit objects in bump pointer space.
1030 bump_pointer_space_->Walk(callback, arg);
1031 }
1032 // TODO: Switch to standard begin and end to use ranged a based loop.
1033 for (auto* it = allocation_stack_->Begin(), *end = allocation_stack_->End(); it < end; ++it) {
1034 mirror::Object* const obj = it->AsMirrorPtr();
1035 if (obj != nullptr && obj->GetClass() != nullptr) {
1036 // Avoid the race condition caused by the object not yet being written into the allocation
1037 // stack or the class not yet being written in the object. Or, if
1038 // kUseThreadLocalAllocationStack, there can be nulls on the allocation stack.
1039 callback(obj, arg);
1040 }
1041 }
1042 {
1043 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1044 GetLiveBitmap()->Walk(callback, arg);
1045 }
1046 }
1047
MarkAllocStackAsLive(accounting::ObjectStack * stack)1048 void Heap::MarkAllocStackAsLive(accounting::ObjectStack* stack) {
1049 space::ContinuousSpace* space1 = main_space_ != nullptr ? main_space_ : non_moving_space_;
1050 space::ContinuousSpace* space2 = non_moving_space_;
1051 // TODO: Generalize this to n bitmaps?
1052 CHECK(space1 != nullptr);
1053 CHECK(space2 != nullptr);
1054 MarkAllocStack(space1->GetLiveBitmap(), space2->GetLiveBitmap(),
1055 (large_object_space_ != nullptr ? large_object_space_->GetLiveBitmap() : nullptr),
1056 stack);
1057 }
1058
DeleteThreadPool()1059 void Heap::DeleteThreadPool() {
1060 thread_pool_.reset(nullptr);
1061 }
1062
AddSpace(space::Space * space)1063 void Heap::AddSpace(space::Space* space) {
1064 CHECK(space != nullptr);
1065 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1066 if (space->IsContinuousSpace()) {
1067 DCHECK(!space->IsDiscontinuousSpace());
1068 space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
1069 // Continuous spaces don't necessarily have bitmaps.
1070 accounting::ContinuousSpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
1071 accounting::ContinuousSpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
1072 if (live_bitmap != nullptr) {
1073 CHECK(mark_bitmap != nullptr);
1074 live_bitmap_->AddContinuousSpaceBitmap(live_bitmap);
1075 mark_bitmap_->AddContinuousSpaceBitmap(mark_bitmap);
1076 }
1077 continuous_spaces_.push_back(continuous_space);
1078 // Ensure that spaces remain sorted in increasing order of start address.
1079 std::sort(continuous_spaces_.begin(), continuous_spaces_.end(),
1080 [](const space::ContinuousSpace* a, const space::ContinuousSpace* b) {
1081 return a->Begin() < b->Begin();
1082 });
1083 } else {
1084 CHECK(space->IsDiscontinuousSpace());
1085 space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
1086 live_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetLiveBitmap());
1087 mark_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetMarkBitmap());
1088 discontinuous_spaces_.push_back(discontinuous_space);
1089 }
1090 if (space->IsAllocSpace()) {
1091 alloc_spaces_.push_back(space->AsAllocSpace());
1092 }
1093 }
1094
SetSpaceAsDefault(space::ContinuousSpace * continuous_space)1095 void Heap::SetSpaceAsDefault(space::ContinuousSpace* continuous_space) {
1096 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1097 if (continuous_space->IsDlMallocSpace()) {
1098 dlmalloc_space_ = continuous_space->AsDlMallocSpace();
1099 } else if (continuous_space->IsRosAllocSpace()) {
1100 rosalloc_space_ = continuous_space->AsRosAllocSpace();
1101 }
1102 }
1103
RemoveSpace(space::Space * space)1104 void Heap::RemoveSpace(space::Space* space) {
1105 DCHECK(space != nullptr);
1106 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1107 if (space->IsContinuousSpace()) {
1108 DCHECK(!space->IsDiscontinuousSpace());
1109 space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
1110 // Continuous spaces don't necessarily have bitmaps.
1111 accounting::ContinuousSpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
1112 accounting::ContinuousSpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
1113 if (live_bitmap != nullptr) {
1114 DCHECK(mark_bitmap != nullptr);
1115 live_bitmap_->RemoveContinuousSpaceBitmap(live_bitmap);
1116 mark_bitmap_->RemoveContinuousSpaceBitmap(mark_bitmap);
1117 }
1118 auto it = std::find(continuous_spaces_.begin(), continuous_spaces_.end(), continuous_space);
1119 DCHECK(it != continuous_spaces_.end());
1120 continuous_spaces_.erase(it);
1121 } else {
1122 DCHECK(space->IsDiscontinuousSpace());
1123 space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
1124 live_bitmap_->RemoveLargeObjectBitmap(discontinuous_space->GetLiveBitmap());
1125 mark_bitmap_->RemoveLargeObjectBitmap(discontinuous_space->GetMarkBitmap());
1126 auto it = std::find(discontinuous_spaces_.begin(), discontinuous_spaces_.end(),
1127 discontinuous_space);
1128 DCHECK(it != discontinuous_spaces_.end());
1129 discontinuous_spaces_.erase(it);
1130 }
1131 if (space->IsAllocSpace()) {
1132 auto it = std::find(alloc_spaces_.begin(), alloc_spaces_.end(), space->AsAllocSpace());
1133 DCHECK(it != alloc_spaces_.end());
1134 alloc_spaces_.erase(it);
1135 }
1136 }
1137
DumpGcPerformanceInfo(std::ostream & os)1138 void Heap::DumpGcPerformanceInfo(std::ostream& os) {
1139 // Dump cumulative timings.
1140 os << "Dumping cumulative Gc timings\n";
1141 uint64_t total_duration = 0;
1142 // Dump cumulative loggers for each GC type.
1143 uint64_t total_paused_time = 0;
1144 for (auto& collector : garbage_collectors_) {
1145 total_duration += collector->GetCumulativeTimings().GetTotalNs();
1146 total_paused_time += collector->GetTotalPausedTimeNs();
1147 collector->DumpPerformanceInfo(os);
1148 }
1149 if (total_duration != 0) {
1150 const double total_seconds = static_cast<double>(total_duration / 1000) / 1000000.0;
1151 os << "Total time spent in GC: " << PrettyDuration(total_duration) << "\n";
1152 os << "Mean GC size throughput: "
1153 << PrettySize(GetBytesFreedEver() / total_seconds) << "/s\n";
1154 os << "Mean GC object throughput: "
1155 << (GetObjectsFreedEver() / total_seconds) << " objects/s\n";
1156 }
1157 uint64_t total_objects_allocated = GetObjectsAllocatedEver();
1158 os << "Total number of allocations " << total_objects_allocated << "\n";
1159 os << "Total bytes allocated " << PrettySize(GetBytesAllocatedEver()) << "\n";
1160 os << "Total bytes freed " << PrettySize(GetBytesFreedEver()) << "\n";
1161 os << "Free memory " << PrettySize(GetFreeMemory()) << "\n";
1162 os << "Free memory until GC " << PrettySize(GetFreeMemoryUntilGC()) << "\n";
1163 os << "Free memory until OOME " << PrettySize(GetFreeMemoryUntilOOME()) << "\n";
1164 os << "Total memory " << PrettySize(GetTotalMemory()) << "\n";
1165 os << "Max memory " << PrettySize(GetMaxMemory()) << "\n";
1166 if (HasZygoteSpace()) {
1167 os << "Zygote space size " << PrettySize(zygote_space_->Size()) << "\n";
1168 }
1169 os << "Total mutator paused time: " << PrettyDuration(total_paused_time) << "\n";
1170 os << "Total time waiting for GC to complete: " << PrettyDuration(total_wait_time_) << "\n";
1171 os << "Total GC count: " << GetGcCount() << "\n";
1172 os << "Total GC time: " << PrettyDuration(GetGcTime()) << "\n";
1173 os << "Total blocking GC count: " << GetBlockingGcCount() << "\n";
1174 os << "Total blocking GC time: " << PrettyDuration(GetBlockingGcTime()) << "\n";
1175
1176 {
1177 MutexLock mu(Thread::Current(), *gc_complete_lock_);
1178 if (gc_count_rate_histogram_.SampleSize() > 0U) {
1179 os << "Histogram of GC count per " << NsToMs(kGcCountRateHistogramWindowDuration) << " ms: ";
1180 gc_count_rate_histogram_.DumpBins(os);
1181 os << "\n";
1182 }
1183 if (blocking_gc_count_rate_histogram_.SampleSize() > 0U) {
1184 os << "Histogram of blocking GC count per "
1185 << NsToMs(kGcCountRateHistogramWindowDuration) << " ms: ";
1186 blocking_gc_count_rate_histogram_.DumpBins(os);
1187 os << "\n";
1188 }
1189 }
1190
1191 if (kDumpRosAllocStatsOnSigQuit && rosalloc_space_ != nullptr) {
1192 rosalloc_space_->DumpStats(os);
1193 }
1194
1195 {
1196 MutexLock mu(Thread::Current(), native_histogram_lock_);
1197 if (native_allocation_histogram_.SampleSize() > 0u) {
1198 os << "Histogram of native allocation ";
1199 native_allocation_histogram_.DumpBins(os);
1200 os << " bucket size " << native_allocation_histogram_.BucketWidth() << "\n";
1201 }
1202 if (native_free_histogram_.SampleSize() > 0u) {
1203 os << "Histogram of native free ";
1204 native_free_histogram_.DumpBins(os);
1205 os << " bucket size " << native_free_histogram_.BucketWidth() << "\n";
1206 }
1207 }
1208
1209 BaseMutex::DumpAll(os);
1210 }
1211
ResetGcPerformanceInfo()1212 void Heap::ResetGcPerformanceInfo() {
1213 for (auto& collector : garbage_collectors_) {
1214 collector->ResetMeasurements();
1215 }
1216 total_bytes_freed_ever_ = 0;
1217 total_objects_freed_ever_ = 0;
1218 total_wait_time_ = 0;
1219 blocking_gc_count_ = 0;
1220 blocking_gc_time_ = 0;
1221 gc_count_last_window_ = 0;
1222 blocking_gc_count_last_window_ = 0;
1223 last_update_time_gc_count_rate_histograms_ = // Round down by the window duration.
1224 (NanoTime() / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration;
1225 {
1226 MutexLock mu(Thread::Current(), *gc_complete_lock_);
1227 gc_count_rate_histogram_.Reset();
1228 blocking_gc_count_rate_histogram_.Reset();
1229 }
1230 }
1231
GetGcCount() const1232 uint64_t Heap::GetGcCount() const {
1233 uint64_t gc_count = 0U;
1234 for (auto& collector : garbage_collectors_) {
1235 gc_count += collector->GetCumulativeTimings().GetIterations();
1236 }
1237 return gc_count;
1238 }
1239
GetGcTime() const1240 uint64_t Heap::GetGcTime() const {
1241 uint64_t gc_time = 0U;
1242 for (auto& collector : garbage_collectors_) {
1243 gc_time += collector->GetCumulativeTimings().GetTotalNs();
1244 }
1245 return gc_time;
1246 }
1247
GetBlockingGcCount() const1248 uint64_t Heap::GetBlockingGcCount() const {
1249 return blocking_gc_count_;
1250 }
1251
GetBlockingGcTime() const1252 uint64_t Heap::GetBlockingGcTime() const {
1253 return blocking_gc_time_;
1254 }
1255
DumpGcCountRateHistogram(std::ostream & os) const1256 void Heap::DumpGcCountRateHistogram(std::ostream& os) const {
1257 MutexLock mu(Thread::Current(), *gc_complete_lock_);
1258 if (gc_count_rate_histogram_.SampleSize() > 0U) {
1259 gc_count_rate_histogram_.DumpBins(os);
1260 }
1261 }
1262
DumpBlockingGcCountRateHistogram(std::ostream & os) const1263 void Heap::DumpBlockingGcCountRateHistogram(std::ostream& os) const {
1264 MutexLock mu(Thread::Current(), *gc_complete_lock_);
1265 if (blocking_gc_count_rate_histogram_.SampleSize() > 0U) {
1266 blocking_gc_count_rate_histogram_.DumpBins(os);
1267 }
1268 }
1269
~Heap()1270 Heap::~Heap() {
1271 VLOG(heap) << "Starting ~Heap()";
1272 STLDeleteElements(&garbage_collectors_);
1273 // If we don't reset then the mark stack complains in its destructor.
1274 allocation_stack_->Reset();
1275 allocation_records_.reset();
1276 live_stack_->Reset();
1277 STLDeleteValues(&mod_union_tables_);
1278 STLDeleteValues(&remembered_sets_);
1279 STLDeleteElements(&continuous_spaces_);
1280 STLDeleteElements(&discontinuous_spaces_);
1281 delete gc_complete_lock_;
1282 delete thread_flip_lock_;
1283 delete pending_task_lock_;
1284 delete backtrace_lock_;
1285 if (unique_backtrace_count_.LoadRelaxed() != 0 || seen_backtrace_count_.LoadRelaxed() != 0) {
1286 LOG(INFO) << "gc stress unique=" << unique_backtrace_count_.LoadRelaxed()
1287 << " total=" << seen_backtrace_count_.LoadRelaxed() +
1288 unique_backtrace_count_.LoadRelaxed();
1289 }
1290 VLOG(heap) << "Finished ~Heap()";
1291 }
1292
FindContinuousSpaceFromObject(const mirror::Object * obj,bool fail_ok) const1293 space::ContinuousSpace* Heap::FindContinuousSpaceFromObject(const mirror::Object* obj,
1294 bool fail_ok) const {
1295 for (const auto& space : continuous_spaces_) {
1296 if (space->Contains(obj)) {
1297 return space;
1298 }
1299 }
1300 if (!fail_ok) {
1301 LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
1302 }
1303 return nullptr;
1304 }
1305
FindDiscontinuousSpaceFromObject(const mirror::Object * obj,bool fail_ok) const1306 space::DiscontinuousSpace* Heap::FindDiscontinuousSpaceFromObject(const mirror::Object* obj,
1307 bool fail_ok) const {
1308 for (const auto& space : discontinuous_spaces_) {
1309 if (space->Contains(obj)) {
1310 return space;
1311 }
1312 }
1313 if (!fail_ok) {
1314 LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
1315 }
1316 return nullptr;
1317 }
1318
FindSpaceFromObject(const mirror::Object * obj,bool fail_ok) const1319 space::Space* Heap::FindSpaceFromObject(const mirror::Object* obj, bool fail_ok) const {
1320 space::Space* result = FindContinuousSpaceFromObject(obj, true);
1321 if (result != nullptr) {
1322 return result;
1323 }
1324 return FindDiscontinuousSpaceFromObject(obj, fail_ok);
1325 }
1326
ThrowOutOfMemoryError(Thread * self,size_t byte_count,AllocatorType allocator_type)1327 void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type) {
1328 // If we're in a stack overflow, do not create a new exception. It would require running the
1329 // constructor, which will of course still be in a stack overflow.
1330 if (self->IsHandlingStackOverflow()) {
1331 self->SetException(Runtime::Current()->GetPreAllocatedOutOfMemoryError());
1332 return;
1333 }
1334
1335 std::ostringstream oss;
1336 size_t total_bytes_free = GetFreeMemory();
1337 oss << "Failed to allocate a " << byte_count << " byte allocation with " << total_bytes_free
1338 << " free bytes and " << PrettySize(GetFreeMemoryUntilOOME()) << " until OOM";
1339 // If the allocation failed due to fragmentation, print out the largest continuous allocation.
1340 if (total_bytes_free >= byte_count) {
1341 space::AllocSpace* space = nullptr;
1342 if (allocator_type == kAllocatorTypeNonMoving) {
1343 space = non_moving_space_;
1344 } else if (allocator_type == kAllocatorTypeRosAlloc ||
1345 allocator_type == kAllocatorTypeDlMalloc) {
1346 space = main_space_;
1347 } else if (allocator_type == kAllocatorTypeBumpPointer ||
1348 allocator_type == kAllocatorTypeTLAB) {
1349 space = bump_pointer_space_;
1350 } else if (allocator_type == kAllocatorTypeRegion ||
1351 allocator_type == kAllocatorTypeRegionTLAB) {
1352 space = region_space_;
1353 }
1354 if (space != nullptr) {
1355 space->LogFragmentationAllocFailure(oss, byte_count);
1356 }
1357 }
1358 self->ThrowOutOfMemoryError(oss.str().c_str());
1359 }
1360
DoPendingCollectorTransition()1361 void Heap::DoPendingCollectorTransition() {
1362 CollectorType desired_collector_type = desired_collector_type_;
1363 // Launch homogeneous space compaction if it is desired.
1364 if (desired_collector_type == kCollectorTypeHomogeneousSpaceCompact) {
1365 if (!CareAboutPauseTimes()) {
1366 PerformHomogeneousSpaceCompact();
1367 } else {
1368 VLOG(gc) << "Homogeneous compaction ignored due to jank perceptible process state";
1369 }
1370 } else {
1371 TransitionCollector(desired_collector_type);
1372 }
1373 }
1374
Trim(Thread * self)1375 void Heap::Trim(Thread* self) {
1376 Runtime* const runtime = Runtime::Current();
1377 if (!CareAboutPauseTimes()) {
1378 // Deflate the monitors, this can cause a pause but shouldn't matter since we don't care
1379 // about pauses.
1380 ScopedTrace trace("Deflating monitors");
1381 ScopedSuspendAll ssa(__FUNCTION__);
1382 uint64_t start_time = NanoTime();
1383 size_t count = runtime->GetMonitorList()->DeflateMonitors();
1384 VLOG(heap) << "Deflating " << count << " monitors took "
1385 << PrettyDuration(NanoTime() - start_time);
1386 }
1387 TrimIndirectReferenceTables(self);
1388 TrimSpaces(self);
1389 // Trim arenas that may have been used by JIT or verifier.
1390 runtime->GetArenaPool()->TrimMaps();
1391 }
1392
1393 class TrimIndirectReferenceTableClosure : public Closure {
1394 public:
TrimIndirectReferenceTableClosure(Barrier * barrier)1395 explicit TrimIndirectReferenceTableClosure(Barrier* barrier) : barrier_(barrier) {
1396 }
Run(Thread * thread)1397 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
1398 thread->GetJniEnv()->locals.Trim();
1399 // If thread is a running mutator, then act on behalf of the trim thread.
1400 // See the code in ThreadList::RunCheckpoint.
1401 barrier_->Pass(Thread::Current());
1402 }
1403
1404 private:
1405 Barrier* const barrier_;
1406 };
1407
TrimIndirectReferenceTables(Thread * self)1408 void Heap::TrimIndirectReferenceTables(Thread* self) {
1409 ScopedObjectAccess soa(self);
1410 ScopedTrace trace(__PRETTY_FUNCTION__);
1411 JavaVMExt* vm = soa.Vm();
1412 // Trim globals indirect reference table.
1413 vm->TrimGlobals();
1414 // Trim locals indirect reference tables.
1415 Barrier barrier(0);
1416 TrimIndirectReferenceTableClosure closure(&barrier);
1417 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
1418 size_t barrier_count = Runtime::Current()->GetThreadList()->RunCheckpoint(&closure);
1419 if (barrier_count != 0) {
1420 barrier.Increment(self, barrier_count);
1421 }
1422 }
1423
StartGC(Thread * self,GcCause cause,CollectorType collector_type)1424 void Heap::StartGC(Thread* self, GcCause cause, CollectorType collector_type) {
1425 MutexLock mu(self, *gc_complete_lock_);
1426 // Ensure there is only one GC at a time.
1427 WaitForGcToCompleteLocked(cause, self);
1428 collector_type_running_ = collector_type;
1429 }
1430
TrimSpaces(Thread * self)1431 void Heap::TrimSpaces(Thread* self) {
1432 {
1433 // Need to do this before acquiring the locks since we don't want to get suspended while
1434 // holding any locks.
1435 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
1436 // Pretend we are doing a GC to prevent background compaction from deleting the space we are
1437 // trimming.
1438 StartGC(self, kGcCauseTrim, kCollectorTypeHeapTrim);
1439 }
1440 ScopedTrace trace(__PRETTY_FUNCTION__);
1441 const uint64_t start_ns = NanoTime();
1442 // Trim the managed spaces.
1443 uint64_t total_alloc_space_allocated = 0;
1444 uint64_t total_alloc_space_size = 0;
1445 uint64_t managed_reclaimed = 0;
1446 {
1447 ScopedObjectAccess soa(self);
1448 for (const auto& space : continuous_spaces_) {
1449 if (space->IsMallocSpace()) {
1450 gc::space::MallocSpace* malloc_space = space->AsMallocSpace();
1451 if (malloc_space->IsRosAllocSpace() || !CareAboutPauseTimes()) {
1452 // Don't trim dlmalloc spaces if we care about pauses since this can hold the space lock
1453 // for a long period of time.
1454 managed_reclaimed += malloc_space->Trim();
1455 }
1456 total_alloc_space_size += malloc_space->Size();
1457 }
1458 }
1459 }
1460 total_alloc_space_allocated = GetBytesAllocated();
1461 if (large_object_space_ != nullptr) {
1462 total_alloc_space_allocated -= large_object_space_->GetBytesAllocated();
1463 }
1464 if (bump_pointer_space_ != nullptr) {
1465 total_alloc_space_allocated -= bump_pointer_space_->Size();
1466 }
1467 if (region_space_ != nullptr) {
1468 total_alloc_space_allocated -= region_space_->GetBytesAllocated();
1469 }
1470 const float managed_utilization = static_cast<float>(total_alloc_space_allocated) /
1471 static_cast<float>(total_alloc_space_size);
1472 uint64_t gc_heap_end_ns = NanoTime();
1473 // We never move things in the native heap, so we can finish the GC at this point.
1474 FinishGC(self, collector::kGcTypeNone);
1475
1476 VLOG(heap) << "Heap trim of managed (duration=" << PrettyDuration(gc_heap_end_ns - start_ns)
1477 << ", advised=" << PrettySize(managed_reclaimed) << ") heap. Managed heap utilization of "
1478 << static_cast<int>(100 * managed_utilization) << "%.";
1479 }
1480
IsValidObjectAddress(const mirror::Object * obj) const1481 bool Heap::IsValidObjectAddress(const mirror::Object* obj) const {
1482 // Note: we deliberately don't take the lock here, and mustn't test anything that would require
1483 // taking the lock.
1484 if (obj == nullptr) {
1485 return true;
1486 }
1487 return IsAligned<kObjectAlignment>(obj) && FindSpaceFromObject(obj, true) != nullptr;
1488 }
1489
IsNonDiscontinuousSpaceHeapAddress(const mirror::Object * obj) const1490 bool Heap::IsNonDiscontinuousSpaceHeapAddress(const mirror::Object* obj) const {
1491 return FindContinuousSpaceFromObject(obj, true) != nullptr;
1492 }
1493
IsValidContinuousSpaceObjectAddress(const mirror::Object * obj) const1494 bool Heap::IsValidContinuousSpaceObjectAddress(const mirror::Object* obj) const {
1495 if (obj == nullptr || !IsAligned<kObjectAlignment>(obj)) {
1496 return false;
1497 }
1498 for (const auto& space : continuous_spaces_) {
1499 if (space->HasAddress(obj)) {
1500 return true;
1501 }
1502 }
1503 return false;
1504 }
1505
IsLiveObjectLocked(mirror::Object * obj,bool search_allocation_stack,bool search_live_stack,bool sorted)1506 bool Heap::IsLiveObjectLocked(mirror::Object* obj, bool search_allocation_stack,
1507 bool search_live_stack, bool sorted) {
1508 if (UNLIKELY(!IsAligned<kObjectAlignment>(obj))) {
1509 return false;
1510 }
1511 if (bump_pointer_space_ != nullptr && bump_pointer_space_->HasAddress(obj)) {
1512 mirror::Class* klass = obj->GetClass<kVerifyNone>();
1513 if (obj == klass) {
1514 // This case happens for java.lang.Class.
1515 return true;
1516 }
1517 return VerifyClassClass(klass) && IsLiveObjectLocked(klass);
1518 } else if (temp_space_ != nullptr && temp_space_->HasAddress(obj)) {
1519 // If we are in the allocated region of the temp space, then we are probably live (e.g. during
1520 // a GC). When a GC isn't running End() - Begin() is 0 which means no objects are contained.
1521 return temp_space_->Contains(obj);
1522 }
1523 if (region_space_ != nullptr && region_space_->HasAddress(obj)) {
1524 return true;
1525 }
1526 space::ContinuousSpace* c_space = FindContinuousSpaceFromObject(obj, true);
1527 space::DiscontinuousSpace* d_space = nullptr;
1528 if (c_space != nullptr) {
1529 if (c_space->GetLiveBitmap()->Test(obj)) {
1530 return true;
1531 }
1532 } else {
1533 d_space = FindDiscontinuousSpaceFromObject(obj, true);
1534 if (d_space != nullptr) {
1535 if (d_space->GetLiveBitmap()->Test(obj)) {
1536 return true;
1537 }
1538 }
1539 }
1540 // This is covering the allocation/live stack swapping that is done without mutators suspended.
1541 for (size_t i = 0; i < (sorted ? 1 : 5); ++i) {
1542 if (i > 0) {
1543 NanoSleep(MsToNs(10));
1544 }
1545 if (search_allocation_stack) {
1546 if (sorted) {
1547 if (allocation_stack_->ContainsSorted(obj)) {
1548 return true;
1549 }
1550 } else if (allocation_stack_->Contains(obj)) {
1551 return true;
1552 }
1553 }
1554
1555 if (search_live_stack) {
1556 if (sorted) {
1557 if (live_stack_->ContainsSorted(obj)) {
1558 return true;
1559 }
1560 } else if (live_stack_->Contains(obj)) {
1561 return true;
1562 }
1563 }
1564 }
1565 // We need to check the bitmaps again since there is a race where we mark something as live and
1566 // then clear the stack containing it.
1567 if (c_space != nullptr) {
1568 if (c_space->GetLiveBitmap()->Test(obj)) {
1569 return true;
1570 }
1571 } else {
1572 d_space = FindDiscontinuousSpaceFromObject(obj, true);
1573 if (d_space != nullptr && d_space->GetLiveBitmap()->Test(obj)) {
1574 return true;
1575 }
1576 }
1577 return false;
1578 }
1579
DumpSpaces() const1580 std::string Heap::DumpSpaces() const {
1581 std::ostringstream oss;
1582 DumpSpaces(oss);
1583 return oss.str();
1584 }
1585
DumpSpaces(std::ostream & stream) const1586 void Heap::DumpSpaces(std::ostream& stream) const {
1587 for (const auto& space : continuous_spaces_) {
1588 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
1589 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
1590 stream << space << " " << *space << "\n";
1591 if (live_bitmap != nullptr) {
1592 stream << live_bitmap << " " << *live_bitmap << "\n";
1593 }
1594 if (mark_bitmap != nullptr) {
1595 stream << mark_bitmap << " " << *mark_bitmap << "\n";
1596 }
1597 }
1598 for (const auto& space : discontinuous_spaces_) {
1599 stream << space << " " << *space << "\n";
1600 }
1601 }
1602
VerifyObjectBody(mirror::Object * obj)1603 void Heap::VerifyObjectBody(mirror::Object* obj) {
1604 if (verify_object_mode_ == kVerifyObjectModeDisabled) {
1605 return;
1606 }
1607
1608 // Ignore early dawn of the universe verifications.
1609 if (UNLIKELY(static_cast<size_t>(num_bytes_allocated_.LoadRelaxed()) < 10 * KB)) {
1610 return;
1611 }
1612 CHECK_ALIGNED(obj, kObjectAlignment) << "Object isn't aligned";
1613 mirror::Class* c = obj->GetFieldObject<mirror::Class, kVerifyNone>(mirror::Object::ClassOffset());
1614 CHECK(c != nullptr) << "Null class in object " << obj;
1615 CHECK_ALIGNED(c, kObjectAlignment) << "Class " << c << " not aligned in object " << obj;
1616 CHECK(VerifyClassClass(c));
1617
1618 if (verify_object_mode_ > kVerifyObjectModeFast) {
1619 // Note: the bitmap tests below are racy since we don't hold the heap bitmap lock.
1620 CHECK(IsLiveObjectLocked(obj)) << "Object is dead " << obj << "\n" << DumpSpaces();
1621 }
1622 }
1623
VerificationCallback(mirror::Object * obj,void * arg)1624 void Heap::VerificationCallback(mirror::Object* obj, void* arg) {
1625 reinterpret_cast<Heap*>(arg)->VerifyObjectBody(obj);
1626 }
1627
VerifyHeap()1628 void Heap::VerifyHeap() {
1629 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1630 GetLiveBitmap()->Walk(Heap::VerificationCallback, this);
1631 }
1632
RecordFree(uint64_t freed_objects,int64_t freed_bytes)1633 void Heap::RecordFree(uint64_t freed_objects, int64_t freed_bytes) {
1634 // Use signed comparison since freed bytes can be negative when background compaction foreground
1635 // transitions occurs. This is caused by the moving objects from a bump pointer space to a
1636 // free list backed space typically increasing memory footprint due to padding and binning.
1637 DCHECK_LE(freed_bytes, static_cast<int64_t>(num_bytes_allocated_.LoadRelaxed()));
1638 // Note: This relies on 2s complement for handling negative freed_bytes.
1639 num_bytes_allocated_.FetchAndSubSequentiallyConsistent(static_cast<ssize_t>(freed_bytes));
1640 if (Runtime::Current()->HasStatsEnabled()) {
1641 RuntimeStats* thread_stats = Thread::Current()->GetStats();
1642 thread_stats->freed_objects += freed_objects;
1643 thread_stats->freed_bytes += freed_bytes;
1644 // TODO: Do this concurrently.
1645 RuntimeStats* global_stats = Runtime::Current()->GetStats();
1646 global_stats->freed_objects += freed_objects;
1647 global_stats->freed_bytes += freed_bytes;
1648 }
1649 }
1650
RecordFreeRevoke()1651 void Heap::RecordFreeRevoke() {
1652 // Subtract num_bytes_freed_revoke_ from num_bytes_allocated_ to cancel out the
1653 // the ahead-of-time, bulk counting of bytes allocated in rosalloc thread-local buffers.
1654 // If there's a concurrent revoke, ok to not necessarily reset num_bytes_freed_revoke_
1655 // all the way to zero exactly as the remainder will be subtracted at the next GC.
1656 size_t bytes_freed = num_bytes_freed_revoke_.LoadSequentiallyConsistent();
1657 CHECK_GE(num_bytes_freed_revoke_.FetchAndSubSequentiallyConsistent(bytes_freed),
1658 bytes_freed) << "num_bytes_freed_revoke_ underflow";
1659 CHECK_GE(num_bytes_allocated_.FetchAndSubSequentiallyConsistent(bytes_freed),
1660 bytes_freed) << "num_bytes_allocated_ underflow";
1661 GetCurrentGcIteration()->SetFreedRevoke(bytes_freed);
1662 }
1663
GetRosAllocSpace(gc::allocator::RosAlloc * rosalloc) const1664 space::RosAllocSpace* Heap::GetRosAllocSpace(gc::allocator::RosAlloc* rosalloc) const {
1665 if (rosalloc_space_ != nullptr && rosalloc_space_->GetRosAlloc() == rosalloc) {
1666 return rosalloc_space_;
1667 }
1668 for (const auto& space : continuous_spaces_) {
1669 if (space->AsContinuousSpace()->IsRosAllocSpace()) {
1670 if (space->AsContinuousSpace()->AsRosAllocSpace()->GetRosAlloc() == rosalloc) {
1671 return space->AsContinuousSpace()->AsRosAllocSpace();
1672 }
1673 }
1674 }
1675 return nullptr;
1676 }
1677
EntrypointsInstrumented()1678 static inline bool EntrypointsInstrumented() SHARED_REQUIRES(Locks::mutator_lock_) {
1679 instrumentation::Instrumentation* const instrumentation =
1680 Runtime::Current()->GetInstrumentation();
1681 return instrumentation != nullptr && instrumentation->AllocEntrypointsInstrumented();
1682 }
1683
AllocateInternalWithGc(Thread * self,AllocatorType allocator,bool instrumented,size_t alloc_size,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated,mirror::Class ** klass)1684 mirror::Object* Heap::AllocateInternalWithGc(Thread* self,
1685 AllocatorType allocator,
1686 bool instrumented,
1687 size_t alloc_size,
1688 size_t* bytes_allocated,
1689 size_t* usable_size,
1690 size_t* bytes_tl_bulk_allocated,
1691 mirror::Class** klass) {
1692 bool was_default_allocator = allocator == GetCurrentAllocator();
1693 // Make sure there is no pending exception since we may need to throw an OOME.
1694 self->AssertNoPendingException();
1695 DCHECK(klass != nullptr);
1696 StackHandleScope<1> hs(self);
1697 HandleWrapper<mirror::Class> h(hs.NewHandleWrapper(klass));
1698 klass = nullptr; // Invalidate for safety.
1699 // The allocation failed. If the GC is running, block until it completes, and then retry the
1700 // allocation.
1701 collector::GcType last_gc = WaitForGcToComplete(kGcCauseForAlloc, self);
1702 // If we were the default allocator but the allocator changed while we were suspended,
1703 // abort the allocation.
1704 if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1705 (!instrumented && EntrypointsInstrumented())) {
1706 return nullptr;
1707 }
1708 if (last_gc != collector::kGcTypeNone) {
1709 // A GC was in progress and we blocked, retry allocation now that memory has been freed.
1710 mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
1711 usable_size, bytes_tl_bulk_allocated);
1712 if (ptr != nullptr) {
1713 return ptr;
1714 }
1715 }
1716
1717 collector::GcType tried_type = next_gc_type_;
1718 const bool gc_ran =
1719 CollectGarbageInternal(tried_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
1720 if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1721 (!instrumented && EntrypointsInstrumented())) {
1722 return nullptr;
1723 }
1724 if (gc_ran) {
1725 mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
1726 usable_size, bytes_tl_bulk_allocated);
1727 if (ptr != nullptr) {
1728 return ptr;
1729 }
1730 }
1731
1732 // Loop through our different Gc types and try to Gc until we get enough free memory.
1733 for (collector::GcType gc_type : gc_plan_) {
1734 if (gc_type == tried_type) {
1735 continue;
1736 }
1737 // Attempt to run the collector, if we succeed, re-try the allocation.
1738 const bool plan_gc_ran =
1739 CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
1740 if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1741 (!instrumented && EntrypointsInstrumented())) {
1742 return nullptr;
1743 }
1744 if (plan_gc_ran) {
1745 // Did we free sufficient memory for the allocation to succeed?
1746 mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
1747 usable_size, bytes_tl_bulk_allocated);
1748 if (ptr != nullptr) {
1749 return ptr;
1750 }
1751 }
1752 }
1753 // Allocations have failed after GCs; this is an exceptional state.
1754 // Try harder, growing the heap if necessary.
1755 mirror::Object* ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
1756 usable_size, bytes_tl_bulk_allocated);
1757 if (ptr != nullptr) {
1758 return ptr;
1759 }
1760 // Most allocations should have succeeded by now, so the heap is really full, really fragmented,
1761 // or the requested size is really big. Do another GC, collecting SoftReferences this time. The
1762 // VM spec requires that all SoftReferences have been collected and cleared before throwing
1763 // OOME.
1764 VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size)
1765 << " allocation";
1766 // TODO: Run finalization, but this may cause more allocations to occur.
1767 // We don't need a WaitForGcToComplete here either.
1768 DCHECK(!gc_plan_.empty());
1769 CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true);
1770 if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1771 (!instrumented && EntrypointsInstrumented())) {
1772 return nullptr;
1773 }
1774 ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size,
1775 bytes_tl_bulk_allocated);
1776 if (ptr == nullptr) {
1777 const uint64_t current_time = NanoTime();
1778 switch (allocator) {
1779 case kAllocatorTypeRosAlloc:
1780 // Fall-through.
1781 case kAllocatorTypeDlMalloc: {
1782 if (use_homogeneous_space_compaction_for_oom_ &&
1783 current_time - last_time_homogeneous_space_compaction_by_oom_ >
1784 min_interval_homogeneous_space_compaction_by_oom_) {
1785 last_time_homogeneous_space_compaction_by_oom_ = current_time;
1786 HomogeneousSpaceCompactResult result = PerformHomogeneousSpaceCompact();
1787 // Thread suspension could have occurred.
1788 if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1789 (!instrumented && EntrypointsInstrumented())) {
1790 return nullptr;
1791 }
1792 switch (result) {
1793 case HomogeneousSpaceCompactResult::kSuccess:
1794 // If the allocation succeeded, we delayed an oom.
1795 ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
1796 usable_size, bytes_tl_bulk_allocated);
1797 if (ptr != nullptr) {
1798 count_delayed_oom_++;
1799 }
1800 break;
1801 case HomogeneousSpaceCompactResult::kErrorReject:
1802 // Reject due to disabled moving GC.
1803 break;
1804 case HomogeneousSpaceCompactResult::kErrorVMShuttingDown:
1805 // Throw OOM by default.
1806 break;
1807 default: {
1808 UNIMPLEMENTED(FATAL) << "homogeneous space compaction result: "
1809 << static_cast<size_t>(result);
1810 UNREACHABLE();
1811 }
1812 }
1813 // Always print that we ran homogeneous space compation since this can cause jank.
1814 VLOG(heap) << "Ran heap homogeneous space compaction, "
1815 << " requested defragmentation "
1816 << count_requested_homogeneous_space_compaction_.LoadSequentiallyConsistent()
1817 << " performed defragmentation "
1818 << count_performed_homogeneous_space_compaction_.LoadSequentiallyConsistent()
1819 << " ignored homogeneous space compaction "
1820 << count_ignored_homogeneous_space_compaction_.LoadSequentiallyConsistent()
1821 << " delayed count = "
1822 << count_delayed_oom_.LoadSequentiallyConsistent();
1823 }
1824 break;
1825 }
1826 case kAllocatorTypeNonMoving: {
1827 // Try to transition the heap if the allocation failure was due to the space being full.
1828 if (!IsOutOfMemoryOnAllocation<false>(allocator, alloc_size)) {
1829 // If we aren't out of memory then the OOM was probably from the non moving space being
1830 // full. Attempt to disable compaction and turn the main space into a non moving space.
1831 DisableMovingGc();
1832 // Thread suspension could have occurred.
1833 if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1834 (!instrumented && EntrypointsInstrumented())) {
1835 return nullptr;
1836 }
1837 // If we are still a moving GC then something must have caused the transition to fail.
1838 if (IsMovingGc(collector_type_)) {
1839 MutexLock mu(self, *gc_complete_lock_);
1840 // If we couldn't disable moving GC, just throw OOME and return null.
1841 LOG(WARNING) << "Couldn't disable moving GC with disable GC count "
1842 << disable_moving_gc_count_;
1843 } else {
1844 LOG(WARNING) << "Disabled moving GC due to the non moving space being full";
1845 ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
1846 usable_size, bytes_tl_bulk_allocated);
1847 }
1848 }
1849 break;
1850 }
1851 default: {
1852 // Do nothing for others allocators.
1853 }
1854 }
1855 }
1856 // If the allocation hasn't succeeded by this point, throw an OOM error.
1857 if (ptr == nullptr) {
1858 ThrowOutOfMemoryError(self, alloc_size, allocator);
1859 }
1860 return ptr;
1861 }
1862
SetTargetHeapUtilization(float target)1863 void Heap::SetTargetHeapUtilization(float target) {
1864 DCHECK_GT(target, 0.0f); // asserted in Java code
1865 DCHECK_LT(target, 1.0f);
1866 target_utilization_ = target;
1867 }
1868
GetObjectsAllocated() const1869 size_t Heap::GetObjectsAllocated() const {
1870 Thread* const self = Thread::Current();
1871 ScopedThreadStateChange tsc(self, kWaitingForGetObjectsAllocated);
1872 // Need SuspendAll here to prevent lock violation if RosAlloc does it during InspectAll.
1873 ScopedSuspendAll ssa(__FUNCTION__);
1874 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
1875 size_t total = 0;
1876 for (space::AllocSpace* space : alloc_spaces_) {
1877 total += space->GetObjectsAllocated();
1878 }
1879 return total;
1880 }
1881
GetObjectsAllocatedEver() const1882 uint64_t Heap::GetObjectsAllocatedEver() const {
1883 uint64_t total = GetObjectsFreedEver();
1884 // If we are detached, we can't use GetObjectsAllocated since we can't change thread states.
1885 if (Thread::Current() != nullptr) {
1886 total += GetObjectsAllocated();
1887 }
1888 return total;
1889 }
1890
GetBytesAllocatedEver() const1891 uint64_t Heap::GetBytesAllocatedEver() const {
1892 return GetBytesFreedEver() + GetBytesAllocated();
1893 }
1894
1895 class InstanceCounter {
1896 public:
InstanceCounter(const std::vector<mirror::Class * > & classes,bool use_is_assignable_from,uint64_t * counts)1897 InstanceCounter(const std::vector<mirror::Class*>& classes,
1898 bool use_is_assignable_from,
1899 uint64_t* counts)
1900 SHARED_REQUIRES(Locks::mutator_lock_)
1901 : classes_(classes), use_is_assignable_from_(use_is_assignable_from), counts_(counts) {}
1902
Callback(mirror::Object * obj,void * arg)1903 static void Callback(mirror::Object* obj, void* arg)
1904 SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1905 InstanceCounter* instance_counter = reinterpret_cast<InstanceCounter*>(arg);
1906 mirror::Class* instance_class = obj->GetClass();
1907 CHECK(instance_class != nullptr);
1908 for (size_t i = 0; i < instance_counter->classes_.size(); ++i) {
1909 mirror::Class* klass = instance_counter->classes_[i];
1910 if (instance_counter->use_is_assignable_from_) {
1911 if (klass != nullptr && klass->IsAssignableFrom(instance_class)) {
1912 ++instance_counter->counts_[i];
1913 }
1914 } else if (instance_class == klass) {
1915 ++instance_counter->counts_[i];
1916 }
1917 }
1918 }
1919
1920 private:
1921 const std::vector<mirror::Class*>& classes_;
1922 bool use_is_assignable_from_;
1923 uint64_t* const counts_;
1924 DISALLOW_COPY_AND_ASSIGN(InstanceCounter);
1925 };
1926
CountInstances(const std::vector<mirror::Class * > & classes,bool use_is_assignable_from,uint64_t * counts)1927 void Heap::CountInstances(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from,
1928 uint64_t* counts) {
1929 InstanceCounter counter(classes, use_is_assignable_from, counts);
1930 VisitObjects(InstanceCounter::Callback, &counter);
1931 }
1932
1933 class InstanceCollector {
1934 public:
InstanceCollector(mirror::Class * c,int32_t max_count,std::vector<mirror::Object * > & instances)1935 InstanceCollector(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances)
1936 SHARED_REQUIRES(Locks::mutator_lock_)
1937 : class_(c), max_count_(max_count), instances_(instances) {
1938 }
Callback(mirror::Object * obj,void * arg)1939 static void Callback(mirror::Object* obj, void* arg)
1940 SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1941 DCHECK(arg != nullptr);
1942 InstanceCollector* instance_collector = reinterpret_cast<InstanceCollector*>(arg);
1943 if (obj->GetClass() == instance_collector->class_) {
1944 if (instance_collector->max_count_ == 0 ||
1945 instance_collector->instances_.size() < instance_collector->max_count_) {
1946 instance_collector->instances_.push_back(obj);
1947 }
1948 }
1949 }
1950
1951 private:
1952 const mirror::Class* const class_;
1953 const uint32_t max_count_;
1954 std::vector<mirror::Object*>& instances_;
1955 DISALLOW_COPY_AND_ASSIGN(InstanceCollector);
1956 };
1957
GetInstances(mirror::Class * c,int32_t max_count,std::vector<mirror::Object * > & instances)1958 void Heap::GetInstances(mirror::Class* c,
1959 int32_t max_count,
1960 std::vector<mirror::Object*>& instances) {
1961 InstanceCollector collector(c, max_count, instances);
1962 VisitObjects(&InstanceCollector::Callback, &collector);
1963 }
1964
1965 class ReferringObjectsFinder {
1966 public:
ReferringObjectsFinder(mirror::Object * object,int32_t max_count,std::vector<mirror::Object * > & referring_objects)1967 ReferringObjectsFinder(mirror::Object* object,
1968 int32_t max_count,
1969 std::vector<mirror::Object*>& referring_objects)
1970 SHARED_REQUIRES(Locks::mutator_lock_)
1971 : object_(object), max_count_(max_count), referring_objects_(referring_objects) {
1972 }
1973
Callback(mirror::Object * obj,void * arg)1974 static void Callback(mirror::Object* obj, void* arg)
1975 SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1976 reinterpret_cast<ReferringObjectsFinder*>(arg)->operator()(obj);
1977 }
1978
1979 // For bitmap Visit.
1980 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
1981 // annotalysis on visitors.
operator ()(mirror::Object * o) const1982 void operator()(mirror::Object* o) const NO_THREAD_SAFETY_ANALYSIS {
1983 o->VisitReferences(*this, VoidFunctor());
1984 }
1985
1986 // For Object::VisitReferences.
operator ()(mirror::Object * obj,MemberOffset offset,bool is_static ATTRIBUTE_UNUSED) const1987 void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
1988 SHARED_REQUIRES(Locks::mutator_lock_) {
1989 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
1990 if (ref == object_ && (max_count_ == 0 || referring_objects_.size() < max_count_)) {
1991 referring_objects_.push_back(obj);
1992 }
1993 }
1994
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root ATTRIBUTE_UNUSED) const1995 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
1996 const {}
VisitRoot(mirror::CompressedReference<mirror::Object> * root ATTRIBUTE_UNUSED) const1997 void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
1998
1999 private:
2000 const mirror::Object* const object_;
2001 const uint32_t max_count_;
2002 std::vector<mirror::Object*>& referring_objects_;
2003 DISALLOW_COPY_AND_ASSIGN(ReferringObjectsFinder);
2004 };
2005
GetReferringObjects(mirror::Object * o,int32_t max_count,std::vector<mirror::Object * > & referring_objects)2006 void Heap::GetReferringObjects(mirror::Object* o, int32_t max_count,
2007 std::vector<mirror::Object*>& referring_objects) {
2008 ReferringObjectsFinder finder(o, max_count, referring_objects);
2009 VisitObjects(&ReferringObjectsFinder::Callback, &finder);
2010 }
2011
CollectGarbage(bool clear_soft_references)2012 void Heap::CollectGarbage(bool clear_soft_references) {
2013 // Even if we waited for a GC we still need to do another GC since weaks allocated during the
2014 // last GC will not have necessarily been cleared.
2015 CollectGarbageInternal(gc_plan_.back(), kGcCauseExplicit, clear_soft_references);
2016 }
2017
SupportHomogeneousSpaceCompactAndCollectorTransitions() const2018 bool Heap::SupportHomogeneousSpaceCompactAndCollectorTransitions() const {
2019 return main_space_backup_.get() != nullptr && main_space_ != nullptr &&
2020 foreground_collector_type_ == kCollectorTypeCMS;
2021 }
2022
PerformHomogeneousSpaceCompact()2023 HomogeneousSpaceCompactResult Heap::PerformHomogeneousSpaceCompact() {
2024 Thread* self = Thread::Current();
2025 // Inc requested homogeneous space compaction.
2026 count_requested_homogeneous_space_compaction_++;
2027 // Store performed homogeneous space compaction at a new request arrival.
2028 ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
2029 Locks::mutator_lock_->AssertNotHeld(self);
2030 {
2031 ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete);
2032 MutexLock mu(self, *gc_complete_lock_);
2033 // Ensure there is only one GC at a time.
2034 WaitForGcToCompleteLocked(kGcCauseHomogeneousSpaceCompact, self);
2035 // Homogeneous space compaction is a copying transition, can't run it if the moving GC disable count
2036 // is non zero.
2037 // If the collector type changed to something which doesn't benefit from homogeneous space compaction,
2038 // exit.
2039 if (disable_moving_gc_count_ != 0 || IsMovingGc(collector_type_) ||
2040 !main_space_->CanMoveObjects()) {
2041 return kErrorReject;
2042 }
2043 if (!SupportHomogeneousSpaceCompactAndCollectorTransitions()) {
2044 return kErrorUnsupported;
2045 }
2046 collector_type_running_ = kCollectorTypeHomogeneousSpaceCompact;
2047 }
2048 if (Runtime::Current()->IsShuttingDown(self)) {
2049 // Don't allow heap transitions to happen if the runtime is shutting down since these can
2050 // cause objects to get finalized.
2051 FinishGC(self, collector::kGcTypeNone);
2052 return HomogeneousSpaceCompactResult::kErrorVMShuttingDown;
2053 }
2054 collector::GarbageCollector* collector;
2055 {
2056 ScopedSuspendAll ssa(__FUNCTION__);
2057 uint64_t start_time = NanoTime();
2058 // Launch compaction.
2059 space::MallocSpace* to_space = main_space_backup_.release();
2060 space::MallocSpace* from_space = main_space_;
2061 to_space->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2062 const uint64_t space_size_before_compaction = from_space->Size();
2063 AddSpace(to_space);
2064 // Make sure that we will have enough room to copy.
2065 CHECK_GE(to_space->GetFootprintLimit(), from_space->GetFootprintLimit());
2066 collector = Compact(to_space, from_space, kGcCauseHomogeneousSpaceCompact);
2067 const uint64_t space_size_after_compaction = to_space->Size();
2068 main_space_ = to_space;
2069 main_space_backup_.reset(from_space);
2070 RemoveSpace(from_space);
2071 SetSpaceAsDefault(main_space_); // Set as default to reset the proper dlmalloc space.
2072 // Update performed homogeneous space compaction count.
2073 count_performed_homogeneous_space_compaction_++;
2074 // Print statics log and resume all threads.
2075 uint64_t duration = NanoTime() - start_time;
2076 VLOG(heap) << "Heap homogeneous space compaction took " << PrettyDuration(duration) << " size: "
2077 << PrettySize(space_size_before_compaction) << " -> "
2078 << PrettySize(space_size_after_compaction) << " compact-ratio: "
2079 << std::fixed << static_cast<double>(space_size_after_compaction) /
2080 static_cast<double>(space_size_before_compaction);
2081 }
2082 // Finish GC.
2083 reference_processor_->EnqueueClearedReferences(self);
2084 GrowForUtilization(semi_space_collector_);
2085 LogGC(kGcCauseHomogeneousSpaceCompact, collector);
2086 FinishGC(self, collector::kGcTypeFull);
2087 {
2088 ScopedObjectAccess soa(self);
2089 soa.Vm()->UnloadNativeLibraries();
2090 }
2091 return HomogeneousSpaceCompactResult::kSuccess;
2092 }
2093
TransitionCollector(CollectorType collector_type)2094 void Heap::TransitionCollector(CollectorType collector_type) {
2095 if (collector_type == collector_type_) {
2096 return;
2097 }
2098 VLOG(heap) << "TransitionCollector: " << static_cast<int>(collector_type_)
2099 << " -> " << static_cast<int>(collector_type);
2100 uint64_t start_time = NanoTime();
2101 uint32_t before_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
2102 Runtime* const runtime = Runtime::Current();
2103 Thread* const self = Thread::Current();
2104 ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
2105 Locks::mutator_lock_->AssertNotHeld(self);
2106 // Busy wait until we can GC (StartGC can fail if we have a non-zero
2107 // compacting_gc_disable_count_, this should rarely occurs).
2108 for (;;) {
2109 {
2110 ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete);
2111 MutexLock mu(self, *gc_complete_lock_);
2112 // Ensure there is only one GC at a time.
2113 WaitForGcToCompleteLocked(kGcCauseCollectorTransition, self);
2114 // Currently we only need a heap transition if we switch from a moving collector to a
2115 // non-moving one, or visa versa.
2116 const bool copying_transition = IsMovingGc(collector_type_) != IsMovingGc(collector_type);
2117 // If someone else beat us to it and changed the collector before we could, exit.
2118 // This is safe to do before the suspend all since we set the collector_type_running_ before
2119 // we exit the loop. If another thread attempts to do the heap transition before we exit,
2120 // then it would get blocked on WaitForGcToCompleteLocked.
2121 if (collector_type == collector_type_) {
2122 return;
2123 }
2124 // GC can be disabled if someone has a used GetPrimitiveArrayCritical but not yet released.
2125 if (!copying_transition || disable_moving_gc_count_ == 0) {
2126 // TODO: Not hard code in semi-space collector?
2127 collector_type_running_ = copying_transition ? kCollectorTypeSS : collector_type;
2128 break;
2129 }
2130 }
2131 usleep(1000);
2132 }
2133 if (runtime->IsShuttingDown(self)) {
2134 // Don't allow heap transitions to happen if the runtime is shutting down since these can
2135 // cause objects to get finalized.
2136 FinishGC(self, collector::kGcTypeNone);
2137 return;
2138 }
2139 collector::GarbageCollector* collector = nullptr;
2140 {
2141 ScopedSuspendAll ssa(__FUNCTION__);
2142 switch (collector_type) {
2143 case kCollectorTypeSS: {
2144 if (!IsMovingGc(collector_type_)) {
2145 // Create the bump pointer space from the backup space.
2146 CHECK(main_space_backup_ != nullptr);
2147 std::unique_ptr<MemMap> mem_map(main_space_backup_->ReleaseMemMap());
2148 // We are transitioning from non moving GC -> moving GC, since we copied from the bump
2149 // pointer space last transition it will be protected.
2150 CHECK(mem_map != nullptr);
2151 mem_map->Protect(PROT_READ | PROT_WRITE);
2152 bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space",
2153 mem_map.release());
2154 AddSpace(bump_pointer_space_);
2155 collector = Compact(bump_pointer_space_, main_space_, kGcCauseCollectorTransition);
2156 // Use the now empty main space mem map for the bump pointer temp space.
2157 mem_map.reset(main_space_->ReleaseMemMap());
2158 // Unset the pointers just in case.
2159 if (dlmalloc_space_ == main_space_) {
2160 dlmalloc_space_ = nullptr;
2161 } else if (rosalloc_space_ == main_space_) {
2162 rosalloc_space_ = nullptr;
2163 }
2164 // Remove the main space so that we don't try to trim it, this doens't work for debug
2165 // builds since RosAlloc attempts to read the magic number from a protected page.
2166 RemoveSpace(main_space_);
2167 RemoveRememberedSet(main_space_);
2168 delete main_space_; // Delete the space since it has been removed.
2169 main_space_ = nullptr;
2170 RemoveRememberedSet(main_space_backup_.get());
2171 main_space_backup_.reset(nullptr); // Deletes the space.
2172 temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
2173 mem_map.release());
2174 AddSpace(temp_space_);
2175 }
2176 break;
2177 }
2178 case kCollectorTypeMS:
2179 // Fall through.
2180 case kCollectorTypeCMS: {
2181 if (IsMovingGc(collector_type_)) {
2182 CHECK(temp_space_ != nullptr);
2183 std::unique_ptr<MemMap> mem_map(temp_space_->ReleaseMemMap());
2184 RemoveSpace(temp_space_);
2185 temp_space_ = nullptr;
2186 mem_map->Protect(PROT_READ | PROT_WRITE);
2187 CreateMainMallocSpace(mem_map.get(),
2188 kDefaultInitialSize,
2189 std::min(mem_map->Size(), growth_limit_),
2190 mem_map->Size());
2191 mem_map.release();
2192 // Compact to the main space from the bump pointer space, don't need to swap semispaces.
2193 AddSpace(main_space_);
2194 collector = Compact(main_space_, bump_pointer_space_, kGcCauseCollectorTransition);
2195 mem_map.reset(bump_pointer_space_->ReleaseMemMap());
2196 RemoveSpace(bump_pointer_space_);
2197 bump_pointer_space_ = nullptr;
2198 const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
2199 // Temporarily unprotect the backup mem map so rosalloc can write the debug magic number.
2200 if (kIsDebugBuild && kUseRosAlloc) {
2201 mem_map->Protect(PROT_READ | PROT_WRITE);
2202 }
2203 main_space_backup_.reset(CreateMallocSpaceFromMemMap(
2204 mem_map.get(),
2205 kDefaultInitialSize,
2206 std::min(mem_map->Size(), growth_limit_),
2207 mem_map->Size(),
2208 name,
2209 true));
2210 if (kIsDebugBuild && kUseRosAlloc) {
2211 mem_map->Protect(PROT_NONE);
2212 }
2213 mem_map.release();
2214 }
2215 break;
2216 }
2217 default: {
2218 LOG(FATAL) << "Attempted to transition to invalid collector type "
2219 << static_cast<size_t>(collector_type);
2220 break;
2221 }
2222 }
2223 ChangeCollector(collector_type);
2224 }
2225 // Can't call into java code with all threads suspended.
2226 reference_processor_->EnqueueClearedReferences(self);
2227 uint64_t duration = NanoTime() - start_time;
2228 GrowForUtilization(semi_space_collector_);
2229 DCHECK(collector != nullptr);
2230 LogGC(kGcCauseCollectorTransition, collector);
2231 FinishGC(self, collector::kGcTypeFull);
2232 {
2233 ScopedObjectAccess soa(self);
2234 soa.Vm()->UnloadNativeLibraries();
2235 }
2236 int32_t after_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
2237 int32_t delta_allocated = before_allocated - after_allocated;
2238 std::string saved_str;
2239 if (delta_allocated >= 0) {
2240 saved_str = " saved at least " + PrettySize(delta_allocated);
2241 } else {
2242 saved_str = " expanded " + PrettySize(-delta_allocated);
2243 }
2244 VLOG(heap) << "Collector transition to " << collector_type << " took "
2245 << PrettyDuration(duration) << saved_str;
2246 }
2247
ChangeCollector(CollectorType collector_type)2248 void Heap::ChangeCollector(CollectorType collector_type) {
2249 // TODO: Only do this with all mutators suspended to avoid races.
2250 if (collector_type != collector_type_) {
2251 if (collector_type == kCollectorTypeMC) {
2252 // Don't allow mark compact unless support is compiled in.
2253 CHECK(kMarkCompactSupport);
2254 }
2255 collector_type_ = collector_type;
2256 gc_plan_.clear();
2257 switch (collector_type_) {
2258 case kCollectorTypeCC: {
2259 gc_plan_.push_back(collector::kGcTypeFull);
2260 if (use_tlab_) {
2261 ChangeAllocator(kAllocatorTypeRegionTLAB);
2262 } else {
2263 ChangeAllocator(kAllocatorTypeRegion);
2264 }
2265 break;
2266 }
2267 case kCollectorTypeMC: // Fall-through.
2268 case kCollectorTypeSS: // Fall-through.
2269 case kCollectorTypeGSS: {
2270 gc_plan_.push_back(collector::kGcTypeFull);
2271 if (use_tlab_) {
2272 ChangeAllocator(kAllocatorTypeTLAB);
2273 } else {
2274 ChangeAllocator(kAllocatorTypeBumpPointer);
2275 }
2276 break;
2277 }
2278 case kCollectorTypeMS: {
2279 gc_plan_.push_back(collector::kGcTypeSticky);
2280 gc_plan_.push_back(collector::kGcTypePartial);
2281 gc_plan_.push_back(collector::kGcTypeFull);
2282 ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc);
2283 break;
2284 }
2285 case kCollectorTypeCMS: {
2286 gc_plan_.push_back(collector::kGcTypeSticky);
2287 gc_plan_.push_back(collector::kGcTypePartial);
2288 gc_plan_.push_back(collector::kGcTypeFull);
2289 ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc);
2290 break;
2291 }
2292 default: {
2293 UNIMPLEMENTED(FATAL);
2294 UNREACHABLE();
2295 }
2296 }
2297 if (IsGcConcurrent()) {
2298 concurrent_start_bytes_ =
2299 std::max(max_allowed_footprint_, kMinConcurrentRemainingBytes) - kMinConcurrentRemainingBytes;
2300 } else {
2301 concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
2302 }
2303 }
2304 }
2305
2306 // Special compacting collector which uses sub-optimal bin packing to reduce zygote space size.
2307 class ZygoteCompactingCollector FINAL : public collector::SemiSpace {
2308 public:
ZygoteCompactingCollector(gc::Heap * heap,bool is_running_on_memory_tool)2309 ZygoteCompactingCollector(gc::Heap* heap, bool is_running_on_memory_tool)
2310 : SemiSpace(heap, false, "zygote collector"),
2311 bin_live_bitmap_(nullptr),
2312 bin_mark_bitmap_(nullptr),
2313 is_running_on_memory_tool_(is_running_on_memory_tool) {}
2314
BuildBins(space::ContinuousSpace * space)2315 void BuildBins(space::ContinuousSpace* space) {
2316 bin_live_bitmap_ = space->GetLiveBitmap();
2317 bin_mark_bitmap_ = space->GetMarkBitmap();
2318 BinContext context;
2319 context.prev_ = reinterpret_cast<uintptr_t>(space->Begin());
2320 context.collector_ = this;
2321 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
2322 // Note: This requires traversing the space in increasing order of object addresses.
2323 bin_live_bitmap_->Walk(Callback, reinterpret_cast<void*>(&context));
2324 // Add the last bin which spans after the last object to the end of the space.
2325 AddBin(reinterpret_cast<uintptr_t>(space->End()) - context.prev_, context.prev_);
2326 }
2327
2328 private:
2329 struct BinContext {
2330 uintptr_t prev_; // The end of the previous object.
2331 ZygoteCompactingCollector* collector_;
2332 };
2333 // Maps from bin sizes to locations.
2334 std::multimap<size_t, uintptr_t> bins_;
2335 // Live bitmap of the space which contains the bins.
2336 accounting::ContinuousSpaceBitmap* bin_live_bitmap_;
2337 // Mark bitmap of the space which contains the bins.
2338 accounting::ContinuousSpaceBitmap* bin_mark_bitmap_;
2339 const bool is_running_on_memory_tool_;
2340
Callback(mirror::Object * obj,void * arg)2341 static void Callback(mirror::Object* obj, void* arg)
2342 SHARED_REQUIRES(Locks::mutator_lock_) {
2343 DCHECK(arg != nullptr);
2344 BinContext* context = reinterpret_cast<BinContext*>(arg);
2345 ZygoteCompactingCollector* collector = context->collector_;
2346 uintptr_t object_addr = reinterpret_cast<uintptr_t>(obj);
2347 size_t bin_size = object_addr - context->prev_;
2348 // Add the bin consisting of the end of the previous object to the start of the current object.
2349 collector->AddBin(bin_size, context->prev_);
2350 context->prev_ = object_addr + RoundUp(obj->SizeOf(), kObjectAlignment);
2351 }
2352
AddBin(size_t size,uintptr_t position)2353 void AddBin(size_t size, uintptr_t position) {
2354 if (is_running_on_memory_tool_) {
2355 MEMORY_TOOL_MAKE_DEFINED(reinterpret_cast<void*>(position), size);
2356 }
2357 if (size != 0) {
2358 bins_.insert(std::make_pair(size, position));
2359 }
2360 }
2361
ShouldSweepSpace(space::ContinuousSpace * space ATTRIBUTE_UNUSED) const2362 virtual bool ShouldSweepSpace(space::ContinuousSpace* space ATTRIBUTE_UNUSED) const {
2363 // Don't sweep any spaces since we probably blasted the internal accounting of the free list
2364 // allocator.
2365 return false;
2366 }
2367
MarkNonForwardedObject(mirror::Object * obj)2368 virtual mirror::Object* MarkNonForwardedObject(mirror::Object* obj)
2369 REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
2370 size_t obj_size = obj->SizeOf();
2371 size_t alloc_size = RoundUp(obj_size, kObjectAlignment);
2372 mirror::Object* forward_address;
2373 // Find the smallest bin which we can move obj in.
2374 auto it = bins_.lower_bound(alloc_size);
2375 if (it == bins_.end()) {
2376 // No available space in the bins, place it in the target space instead (grows the zygote
2377 // space).
2378 size_t bytes_allocated, dummy;
2379 forward_address = to_space_->Alloc(self_, alloc_size, &bytes_allocated, nullptr, &dummy);
2380 if (to_space_live_bitmap_ != nullptr) {
2381 to_space_live_bitmap_->Set(forward_address);
2382 } else {
2383 GetHeap()->GetNonMovingSpace()->GetLiveBitmap()->Set(forward_address);
2384 GetHeap()->GetNonMovingSpace()->GetMarkBitmap()->Set(forward_address);
2385 }
2386 } else {
2387 size_t size = it->first;
2388 uintptr_t pos = it->second;
2389 bins_.erase(it); // Erase the old bin which we replace with the new smaller bin.
2390 forward_address = reinterpret_cast<mirror::Object*>(pos);
2391 // Set the live and mark bits so that sweeping system weaks works properly.
2392 bin_live_bitmap_->Set(forward_address);
2393 bin_mark_bitmap_->Set(forward_address);
2394 DCHECK_GE(size, alloc_size);
2395 // Add a new bin with the remaining space.
2396 AddBin(size - alloc_size, pos + alloc_size);
2397 }
2398 // Copy the object over to its new location. Don't use alloc_size to avoid valgrind error.
2399 memcpy(reinterpret_cast<void*>(forward_address), obj, obj_size);
2400 if (kUseBakerOrBrooksReadBarrier) {
2401 obj->AssertReadBarrierPointer();
2402 if (kUseBrooksReadBarrier) {
2403 DCHECK_EQ(forward_address->GetReadBarrierPointer(), obj);
2404 forward_address->SetReadBarrierPointer(forward_address);
2405 }
2406 forward_address->AssertReadBarrierPointer();
2407 }
2408 return forward_address;
2409 }
2410 };
2411
UnBindBitmaps()2412 void Heap::UnBindBitmaps() {
2413 TimingLogger::ScopedTiming t("UnBindBitmaps", GetCurrentGcIteration()->GetTimings());
2414 for (const auto& space : GetContinuousSpaces()) {
2415 if (space->IsContinuousMemMapAllocSpace()) {
2416 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
2417 if (alloc_space->HasBoundBitmaps()) {
2418 alloc_space->UnBindBitmaps();
2419 }
2420 }
2421 }
2422 }
2423
PreZygoteFork()2424 void Heap::PreZygoteFork() {
2425 if (!HasZygoteSpace()) {
2426 // We still want to GC in case there is some unreachable non moving objects that could cause a
2427 // suboptimal bin packing when we compact the zygote space.
2428 CollectGarbageInternal(collector::kGcTypeFull, kGcCauseBackground, false);
2429 // Trim the pages at the end of the non moving space. Trim while not holding zygote lock since
2430 // the trim process may require locking the mutator lock.
2431 non_moving_space_->Trim();
2432 }
2433 Thread* self = Thread::Current();
2434 MutexLock mu(self, zygote_creation_lock_);
2435 // Try to see if we have any Zygote spaces.
2436 if (HasZygoteSpace()) {
2437 return;
2438 }
2439 Runtime::Current()->GetInternTable()->AddNewTable();
2440 Runtime::Current()->GetClassLinker()->MoveClassTableToPreZygote();
2441 VLOG(heap) << "Starting PreZygoteFork";
2442 // The end of the non-moving space may be protected, unprotect it so that we can copy the zygote
2443 // there.
2444 non_moving_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2445 const bool same_space = non_moving_space_ == main_space_;
2446 if (kCompactZygote) {
2447 // Temporarily disable rosalloc verification because the zygote
2448 // compaction will mess up the rosalloc internal metadata.
2449 ScopedDisableRosAllocVerification disable_rosalloc_verif(this);
2450 ZygoteCompactingCollector zygote_collector(this, is_running_on_memory_tool_);
2451 zygote_collector.BuildBins(non_moving_space_);
2452 // Create a new bump pointer space which we will compact into.
2453 space::BumpPointerSpace target_space("zygote bump space", non_moving_space_->End(),
2454 non_moving_space_->Limit());
2455 // Compact the bump pointer space to a new zygote bump pointer space.
2456 bool reset_main_space = false;
2457 if (IsMovingGc(collector_type_)) {
2458 if (collector_type_ == kCollectorTypeCC) {
2459 zygote_collector.SetFromSpace(region_space_);
2460 } else {
2461 zygote_collector.SetFromSpace(bump_pointer_space_);
2462 }
2463 } else {
2464 CHECK(main_space_ != nullptr);
2465 CHECK_NE(main_space_, non_moving_space_)
2466 << "Does not make sense to compact within the same space";
2467 // Copy from the main space.
2468 zygote_collector.SetFromSpace(main_space_);
2469 reset_main_space = true;
2470 }
2471 zygote_collector.SetToSpace(&target_space);
2472 zygote_collector.SetSwapSemiSpaces(false);
2473 zygote_collector.Run(kGcCauseCollectorTransition, false);
2474 if (reset_main_space) {
2475 main_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2476 madvise(main_space_->Begin(), main_space_->Capacity(), MADV_DONTNEED);
2477 MemMap* mem_map = main_space_->ReleaseMemMap();
2478 RemoveSpace(main_space_);
2479 space::Space* old_main_space = main_space_;
2480 CreateMainMallocSpace(mem_map, kDefaultInitialSize, std::min(mem_map->Size(), growth_limit_),
2481 mem_map->Size());
2482 delete old_main_space;
2483 AddSpace(main_space_);
2484 } else {
2485 if (collector_type_ == kCollectorTypeCC) {
2486 region_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2487 } else {
2488 bump_pointer_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2489 }
2490 }
2491 if (temp_space_ != nullptr) {
2492 CHECK(temp_space_->IsEmpty());
2493 }
2494 total_objects_freed_ever_ += GetCurrentGcIteration()->GetFreedObjects();
2495 total_bytes_freed_ever_ += GetCurrentGcIteration()->GetFreedBytes();
2496 // Update the end and write out image.
2497 non_moving_space_->SetEnd(target_space.End());
2498 non_moving_space_->SetLimit(target_space.Limit());
2499 VLOG(heap) << "Create zygote space with size=" << non_moving_space_->Size() << " bytes";
2500 }
2501 // Change the collector to the post zygote one.
2502 ChangeCollector(foreground_collector_type_);
2503 // Save the old space so that we can remove it after we complete creating the zygote space.
2504 space::MallocSpace* old_alloc_space = non_moving_space_;
2505 // Turn the current alloc space into a zygote space and obtain the new alloc space composed of
2506 // the remaining available space.
2507 // Remove the old space before creating the zygote space since creating the zygote space sets
2508 // the old alloc space's bitmaps to null.
2509 RemoveSpace(old_alloc_space);
2510 if (collector::SemiSpace::kUseRememberedSet) {
2511 // Sanity bound check.
2512 FindRememberedSetFromSpace(old_alloc_space)->AssertAllDirtyCardsAreWithinSpace();
2513 // Remove the remembered set for the now zygote space (the old
2514 // non-moving space). Note now that we have compacted objects into
2515 // the zygote space, the data in the remembered set is no longer
2516 // needed. The zygote space will instead have a mod-union table
2517 // from this point on.
2518 RemoveRememberedSet(old_alloc_space);
2519 }
2520 // Remaining space becomes the new non moving space.
2521 zygote_space_ = old_alloc_space->CreateZygoteSpace(kNonMovingSpaceName, low_memory_mode_,
2522 &non_moving_space_);
2523 CHECK(!non_moving_space_->CanMoveObjects());
2524 if (same_space) {
2525 main_space_ = non_moving_space_;
2526 SetSpaceAsDefault(main_space_);
2527 }
2528 delete old_alloc_space;
2529 CHECK(HasZygoteSpace()) << "Failed creating zygote space";
2530 AddSpace(zygote_space_);
2531 non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
2532 AddSpace(non_moving_space_);
2533 // Create the zygote space mod union table.
2534 accounting::ModUnionTable* mod_union_table =
2535 new accounting::ModUnionTableCardCache("zygote space mod-union table", this,
2536 zygote_space_);
2537 CHECK(mod_union_table != nullptr) << "Failed to create zygote space mod-union table";
2538 // Set all the cards in the mod-union table since we don't know which objects contain references
2539 // to large objects.
2540 mod_union_table->SetCards();
2541 AddModUnionTable(mod_union_table);
2542 large_object_space_->SetAllLargeObjectsAsZygoteObjects(self);
2543 if (collector::SemiSpace::kUseRememberedSet) {
2544 // Add a new remembered set for the post-zygote non-moving space.
2545 accounting::RememberedSet* post_zygote_non_moving_space_rem_set =
2546 new accounting::RememberedSet("Post-zygote non-moving space remembered set", this,
2547 non_moving_space_);
2548 CHECK(post_zygote_non_moving_space_rem_set != nullptr)
2549 << "Failed to create post-zygote non-moving space remembered set";
2550 AddRememberedSet(post_zygote_non_moving_space_rem_set);
2551 }
2552 }
2553
FlushAllocStack()2554 void Heap::FlushAllocStack() {
2555 MarkAllocStackAsLive(allocation_stack_.get());
2556 allocation_stack_->Reset();
2557 }
2558
MarkAllocStack(accounting::ContinuousSpaceBitmap * bitmap1,accounting::ContinuousSpaceBitmap * bitmap2,accounting::LargeObjectBitmap * large_objects,accounting::ObjectStack * stack)2559 void Heap::MarkAllocStack(accounting::ContinuousSpaceBitmap* bitmap1,
2560 accounting::ContinuousSpaceBitmap* bitmap2,
2561 accounting::LargeObjectBitmap* large_objects,
2562 accounting::ObjectStack* stack) {
2563 DCHECK(bitmap1 != nullptr);
2564 DCHECK(bitmap2 != nullptr);
2565 const auto* limit = stack->End();
2566 for (auto* it = stack->Begin(); it != limit; ++it) {
2567 const mirror::Object* obj = it->AsMirrorPtr();
2568 if (!kUseThreadLocalAllocationStack || obj != nullptr) {
2569 if (bitmap1->HasAddress(obj)) {
2570 bitmap1->Set(obj);
2571 } else if (bitmap2->HasAddress(obj)) {
2572 bitmap2->Set(obj);
2573 } else {
2574 DCHECK(large_objects != nullptr);
2575 large_objects->Set(obj);
2576 }
2577 }
2578 }
2579 }
2580
SwapSemiSpaces()2581 void Heap::SwapSemiSpaces() {
2582 CHECK(bump_pointer_space_ != nullptr);
2583 CHECK(temp_space_ != nullptr);
2584 std::swap(bump_pointer_space_, temp_space_);
2585 }
2586
Compact(space::ContinuousMemMapAllocSpace * target_space,space::ContinuousMemMapAllocSpace * source_space,GcCause gc_cause)2587 collector::GarbageCollector* Heap::Compact(space::ContinuousMemMapAllocSpace* target_space,
2588 space::ContinuousMemMapAllocSpace* source_space,
2589 GcCause gc_cause) {
2590 CHECK(kMovingCollector);
2591 if (target_space != source_space) {
2592 // Don't swap spaces since this isn't a typical semi space collection.
2593 semi_space_collector_->SetSwapSemiSpaces(false);
2594 semi_space_collector_->SetFromSpace(source_space);
2595 semi_space_collector_->SetToSpace(target_space);
2596 semi_space_collector_->Run(gc_cause, false);
2597 return semi_space_collector_;
2598 } else {
2599 CHECK(target_space->IsBumpPointerSpace())
2600 << "In-place compaction is only supported for bump pointer spaces";
2601 mark_compact_collector_->SetSpace(target_space->AsBumpPointerSpace());
2602 mark_compact_collector_->Run(kGcCauseCollectorTransition, false);
2603 return mark_compact_collector_;
2604 }
2605 }
2606
CollectGarbageInternal(collector::GcType gc_type,GcCause gc_cause,bool clear_soft_references)2607 collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type,
2608 GcCause gc_cause,
2609 bool clear_soft_references) {
2610 Thread* self = Thread::Current();
2611 Runtime* runtime = Runtime::Current();
2612 // If the heap can't run the GC, silently fail and return that no GC was run.
2613 switch (gc_type) {
2614 case collector::kGcTypePartial: {
2615 if (!HasZygoteSpace()) {
2616 return collector::kGcTypeNone;
2617 }
2618 break;
2619 }
2620 default: {
2621 // Other GC types don't have any special cases which makes them not runnable. The main case
2622 // here is full GC.
2623 }
2624 }
2625 ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
2626 Locks::mutator_lock_->AssertNotHeld(self);
2627 if (self->IsHandlingStackOverflow()) {
2628 // If we are throwing a stack overflow error we probably don't have enough remaining stack
2629 // space to run the GC.
2630 return collector::kGcTypeNone;
2631 }
2632 bool compacting_gc;
2633 {
2634 gc_complete_lock_->AssertNotHeld(self);
2635 ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete);
2636 MutexLock mu(self, *gc_complete_lock_);
2637 // Ensure there is only one GC at a time.
2638 WaitForGcToCompleteLocked(gc_cause, self);
2639 compacting_gc = IsMovingGc(collector_type_);
2640 // GC can be disabled if someone has a used GetPrimitiveArrayCritical.
2641 if (compacting_gc && disable_moving_gc_count_ != 0) {
2642 LOG(WARNING) << "Skipping GC due to disable moving GC count " << disable_moving_gc_count_;
2643 return collector::kGcTypeNone;
2644 }
2645 if (gc_disabled_for_shutdown_) {
2646 return collector::kGcTypeNone;
2647 }
2648 collector_type_running_ = collector_type_;
2649 }
2650 if (gc_cause == kGcCauseForAlloc && runtime->HasStatsEnabled()) {
2651 ++runtime->GetStats()->gc_for_alloc_count;
2652 ++self->GetStats()->gc_for_alloc_count;
2653 }
2654 const uint64_t bytes_allocated_before_gc = GetBytesAllocated();
2655 // Approximate heap size.
2656 ATRACE_INT("Heap size (KB)", bytes_allocated_before_gc / KB);
2657
2658 DCHECK_LT(gc_type, collector::kGcTypeMax);
2659 DCHECK_NE(gc_type, collector::kGcTypeNone);
2660
2661 collector::GarbageCollector* collector = nullptr;
2662 // TODO: Clean this up.
2663 if (compacting_gc) {
2664 DCHECK(current_allocator_ == kAllocatorTypeBumpPointer ||
2665 current_allocator_ == kAllocatorTypeTLAB ||
2666 current_allocator_ == kAllocatorTypeRegion ||
2667 current_allocator_ == kAllocatorTypeRegionTLAB);
2668 switch (collector_type_) {
2669 case kCollectorTypeSS:
2670 // Fall-through.
2671 case kCollectorTypeGSS:
2672 semi_space_collector_->SetFromSpace(bump_pointer_space_);
2673 semi_space_collector_->SetToSpace(temp_space_);
2674 semi_space_collector_->SetSwapSemiSpaces(true);
2675 collector = semi_space_collector_;
2676 break;
2677 case kCollectorTypeCC:
2678 concurrent_copying_collector_->SetRegionSpace(region_space_);
2679 collector = concurrent_copying_collector_;
2680 break;
2681 case kCollectorTypeMC:
2682 mark_compact_collector_->SetSpace(bump_pointer_space_);
2683 collector = mark_compact_collector_;
2684 break;
2685 default:
2686 LOG(FATAL) << "Invalid collector type " << static_cast<size_t>(collector_type_);
2687 }
2688 if (collector != mark_compact_collector_ && collector != concurrent_copying_collector_) {
2689 temp_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2690 if (kIsDebugBuild) {
2691 // Try to read each page of the memory map in case mprotect didn't work properly b/19894268.
2692 temp_space_->GetMemMap()->TryReadable();
2693 }
2694 CHECK(temp_space_->IsEmpty());
2695 }
2696 gc_type = collector::kGcTypeFull; // TODO: Not hard code this in.
2697 } else if (current_allocator_ == kAllocatorTypeRosAlloc ||
2698 current_allocator_ == kAllocatorTypeDlMalloc) {
2699 collector = FindCollectorByGcType(gc_type);
2700 } else {
2701 LOG(FATAL) << "Invalid current allocator " << current_allocator_;
2702 }
2703 if (IsGcConcurrent()) {
2704 // Disable concurrent GC check so that we don't have spammy JNI requests.
2705 // This gets recalculated in GrowForUtilization. It is important that it is disabled /
2706 // calculated in the same thread so that there aren't any races that can cause it to become
2707 // permanantly disabled. b/17942071
2708 concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
2709 }
2710
2711 // It's time to clear all inline caches, in case some classes can be unloaded.
2712 if (((gc_type == collector::kGcTypeFull) || (gc_type == collector::kGcTypePartial)) &&
2713 (runtime->GetJit() != nullptr)) {
2714 runtime->GetJit()->GetCodeCache()->ClearGcRootsInInlineCaches(self);
2715 }
2716
2717 CHECK(collector != nullptr)
2718 << "Could not find garbage collector with collector_type="
2719 << static_cast<size_t>(collector_type_) << " and gc_type=" << gc_type;
2720 collector->Run(gc_cause, clear_soft_references || runtime->IsZygote());
2721 total_objects_freed_ever_ += GetCurrentGcIteration()->GetFreedObjects();
2722 total_bytes_freed_ever_ += GetCurrentGcIteration()->GetFreedBytes();
2723 RequestTrim(self);
2724 // Enqueue cleared references.
2725 reference_processor_->EnqueueClearedReferences(self);
2726 // Grow the heap so that we know when to perform the next GC.
2727 GrowForUtilization(collector, bytes_allocated_before_gc);
2728 LogGC(gc_cause, collector);
2729 FinishGC(self, gc_type);
2730 // Inform DDMS that a GC completed.
2731 Dbg::GcDidFinish();
2732 // Unload native libraries for class unloading. We do this after calling FinishGC to prevent
2733 // deadlocks in case the JNI_OnUnload function does allocations.
2734 {
2735 ScopedObjectAccess soa(self);
2736 soa.Vm()->UnloadNativeLibraries();
2737 }
2738 return gc_type;
2739 }
2740
LogGC(GcCause gc_cause,collector::GarbageCollector * collector)2741 void Heap::LogGC(GcCause gc_cause, collector::GarbageCollector* collector) {
2742 const size_t duration = GetCurrentGcIteration()->GetDurationNs();
2743 const std::vector<uint64_t>& pause_times = GetCurrentGcIteration()->GetPauseTimes();
2744 // Print the GC if it is an explicit GC (e.g. Runtime.gc()) or a slow GC
2745 // (mutator time blocked >= long_pause_log_threshold_).
2746 bool log_gc = gc_cause == kGcCauseExplicit;
2747 if (!log_gc && CareAboutPauseTimes()) {
2748 // GC for alloc pauses the allocating thread, so consider it as a pause.
2749 log_gc = duration > long_gc_log_threshold_ ||
2750 (gc_cause == kGcCauseForAlloc && duration > long_pause_log_threshold_);
2751 for (uint64_t pause : pause_times) {
2752 log_gc = log_gc || pause >= long_pause_log_threshold_;
2753 }
2754 }
2755 if (log_gc) {
2756 const size_t percent_free = GetPercentFree();
2757 const size_t current_heap_size = GetBytesAllocated();
2758 const size_t total_memory = GetTotalMemory();
2759 std::ostringstream pause_string;
2760 for (size_t i = 0; i < pause_times.size(); ++i) {
2761 pause_string << PrettyDuration((pause_times[i] / 1000) * 1000)
2762 << ((i != pause_times.size() - 1) ? "," : "");
2763 }
2764 LOG(INFO) << gc_cause << " " << collector->GetName()
2765 << " GC freed " << current_gc_iteration_.GetFreedObjects() << "("
2766 << PrettySize(current_gc_iteration_.GetFreedBytes()) << ") AllocSpace objects, "
2767 << current_gc_iteration_.GetFreedLargeObjects() << "("
2768 << PrettySize(current_gc_iteration_.GetFreedLargeObjectBytes()) << ") LOS objects, "
2769 << percent_free << "% free, " << PrettySize(current_heap_size) << "/"
2770 << PrettySize(total_memory) << ", " << "paused " << pause_string.str()
2771 << " total " << PrettyDuration((duration / 1000) * 1000);
2772 VLOG(heap) << Dumpable<TimingLogger>(*current_gc_iteration_.GetTimings());
2773 }
2774 }
2775
FinishGC(Thread * self,collector::GcType gc_type)2776 void Heap::FinishGC(Thread* self, collector::GcType gc_type) {
2777 MutexLock mu(self, *gc_complete_lock_);
2778 collector_type_running_ = kCollectorTypeNone;
2779 if (gc_type != collector::kGcTypeNone) {
2780 last_gc_type_ = gc_type;
2781
2782 // Update stats.
2783 ++gc_count_last_window_;
2784 if (running_collection_is_blocking_) {
2785 // If the currently running collection was a blocking one,
2786 // increment the counters and reset the flag.
2787 ++blocking_gc_count_;
2788 blocking_gc_time_ += GetCurrentGcIteration()->GetDurationNs();
2789 ++blocking_gc_count_last_window_;
2790 }
2791 // Update the gc count rate histograms if due.
2792 UpdateGcCountRateHistograms();
2793 }
2794 // Reset.
2795 running_collection_is_blocking_ = false;
2796 // Wake anyone who may have been waiting for the GC to complete.
2797 gc_complete_cond_->Broadcast(self);
2798 }
2799
UpdateGcCountRateHistograms()2800 void Heap::UpdateGcCountRateHistograms() {
2801 // Invariant: if the time since the last update includes more than
2802 // one windows, all the GC runs (if > 0) must have happened in first
2803 // window because otherwise the update must have already taken place
2804 // at an earlier GC run. So, we report the non-first windows with
2805 // zero counts to the histograms.
2806 DCHECK_EQ(last_update_time_gc_count_rate_histograms_ % kGcCountRateHistogramWindowDuration, 0U);
2807 uint64_t now = NanoTime();
2808 DCHECK_GE(now, last_update_time_gc_count_rate_histograms_);
2809 uint64_t time_since_last_update = now - last_update_time_gc_count_rate_histograms_;
2810 uint64_t num_of_windows = time_since_last_update / kGcCountRateHistogramWindowDuration;
2811 if (time_since_last_update >= kGcCountRateHistogramWindowDuration) {
2812 // Record the first window.
2813 gc_count_rate_histogram_.AddValue(gc_count_last_window_ - 1); // Exclude the current run.
2814 blocking_gc_count_rate_histogram_.AddValue(running_collection_is_blocking_ ?
2815 blocking_gc_count_last_window_ - 1 : blocking_gc_count_last_window_);
2816 // Record the other windows (with zero counts).
2817 for (uint64_t i = 0; i < num_of_windows - 1; ++i) {
2818 gc_count_rate_histogram_.AddValue(0);
2819 blocking_gc_count_rate_histogram_.AddValue(0);
2820 }
2821 // Update the last update time and reset the counters.
2822 last_update_time_gc_count_rate_histograms_ =
2823 (now / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration;
2824 gc_count_last_window_ = 1; // Include the current run.
2825 blocking_gc_count_last_window_ = running_collection_is_blocking_ ? 1 : 0;
2826 }
2827 DCHECK_EQ(last_update_time_gc_count_rate_histograms_ % kGcCountRateHistogramWindowDuration, 0U);
2828 }
2829
2830 class RootMatchesObjectVisitor : public SingleRootVisitor {
2831 public:
RootMatchesObjectVisitor(const mirror::Object * obj)2832 explicit RootMatchesObjectVisitor(const mirror::Object* obj) : obj_(obj) { }
2833
VisitRoot(mirror::Object * root,const RootInfo & info)2834 void VisitRoot(mirror::Object* root, const RootInfo& info)
2835 OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
2836 if (root == obj_) {
2837 LOG(INFO) << "Object " << obj_ << " is a root " << info.ToString();
2838 }
2839 }
2840
2841 private:
2842 const mirror::Object* const obj_;
2843 };
2844
2845
2846 class ScanVisitor {
2847 public:
operator ()(const mirror::Object * obj) const2848 void operator()(const mirror::Object* obj) const {
2849 LOG(ERROR) << "Would have rescanned object " << obj;
2850 }
2851 };
2852
2853 // Verify a reference from an object.
2854 class VerifyReferenceVisitor : public SingleRootVisitor {
2855 public:
VerifyReferenceVisitor(Heap * heap,Atomic<size_t> * fail_count,bool verify_referent)2856 VerifyReferenceVisitor(Heap* heap, Atomic<size_t>* fail_count, bool verify_referent)
2857 SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_)
2858 : heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {}
2859
GetFailureCount() const2860 size_t GetFailureCount() const {
2861 return fail_count_->LoadSequentiallyConsistent();
2862 }
2863
operator ()(mirror::Class * klass ATTRIBUTE_UNUSED,mirror::Reference * ref) const2864 void operator()(mirror::Class* klass ATTRIBUTE_UNUSED, mirror::Reference* ref) const
2865 SHARED_REQUIRES(Locks::mutator_lock_) {
2866 if (verify_referent_) {
2867 VerifyReference(ref, ref->GetReferent(), mirror::Reference::ReferentOffset());
2868 }
2869 }
2870
operator ()(mirror::Object * obj,MemberOffset offset,bool is_static ATTRIBUTE_UNUSED) const2871 void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
2872 SHARED_REQUIRES(Locks::mutator_lock_) {
2873 VerifyReference(obj, obj->GetFieldObject<mirror::Object>(offset), offset);
2874 }
2875
IsLive(mirror::Object * obj) const2876 bool IsLive(mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
2877 return heap_->IsLiveObjectLocked(obj, true, false, true);
2878 }
2879
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root) const2880 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
2881 SHARED_REQUIRES(Locks::mutator_lock_) {
2882 if (!root->IsNull()) {
2883 VisitRoot(root);
2884 }
2885 }
VisitRoot(mirror::CompressedReference<mirror::Object> * root) const2886 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
2887 SHARED_REQUIRES(Locks::mutator_lock_) {
2888 const_cast<VerifyReferenceVisitor*>(this)->VisitRoot(
2889 root->AsMirrorPtr(), RootInfo(kRootVMInternal));
2890 }
2891
VisitRoot(mirror::Object * root,const RootInfo & root_info)2892 virtual void VisitRoot(mirror::Object* root, const RootInfo& root_info) OVERRIDE
2893 SHARED_REQUIRES(Locks::mutator_lock_) {
2894 if (root == nullptr) {
2895 LOG(ERROR) << "Root is null with info " << root_info.GetType();
2896 } else if (!VerifyReference(nullptr, root, MemberOffset(0))) {
2897 LOG(ERROR) << "Root " << root << " is dead with type " << PrettyTypeOf(root)
2898 << " thread_id= " << root_info.GetThreadId() << " root_type= " << root_info.GetType();
2899 }
2900 }
2901
2902 private:
2903 // TODO: Fix the no thread safety analysis.
2904 // Returns false on failure.
VerifyReference(mirror::Object * obj,mirror::Object * ref,MemberOffset offset) const2905 bool VerifyReference(mirror::Object* obj, mirror::Object* ref, MemberOffset offset) const
2906 NO_THREAD_SAFETY_ANALYSIS {
2907 if (ref == nullptr || IsLive(ref)) {
2908 // Verify that the reference is live.
2909 return true;
2910 }
2911 if (fail_count_->FetchAndAddSequentiallyConsistent(1) == 0) {
2912 // Print message on only on first failure to prevent spam.
2913 LOG(ERROR) << "!!!!!!!!!!!!!!Heap corruption detected!!!!!!!!!!!!!!!!!!!";
2914 }
2915 if (obj != nullptr) {
2916 // Only do this part for non roots.
2917 accounting::CardTable* card_table = heap_->GetCardTable();
2918 accounting::ObjectStack* alloc_stack = heap_->allocation_stack_.get();
2919 accounting::ObjectStack* live_stack = heap_->live_stack_.get();
2920 uint8_t* card_addr = card_table->CardFromAddr(obj);
2921 LOG(ERROR) << "Object " << obj << " references dead object " << ref << " at offset "
2922 << offset << "\n card value = " << static_cast<int>(*card_addr);
2923 if (heap_->IsValidObjectAddress(obj->GetClass())) {
2924 LOG(ERROR) << "Obj type " << PrettyTypeOf(obj);
2925 } else {
2926 LOG(ERROR) << "Object " << obj << " class(" << obj->GetClass() << ") not a heap address";
2927 }
2928
2929 // Attempt to find the class inside of the recently freed objects.
2930 space::ContinuousSpace* ref_space = heap_->FindContinuousSpaceFromObject(ref, true);
2931 if (ref_space != nullptr && ref_space->IsMallocSpace()) {
2932 space::MallocSpace* space = ref_space->AsMallocSpace();
2933 mirror::Class* ref_class = space->FindRecentFreedObject(ref);
2934 if (ref_class != nullptr) {
2935 LOG(ERROR) << "Reference " << ref << " found as a recently freed object with class "
2936 << PrettyClass(ref_class);
2937 } else {
2938 LOG(ERROR) << "Reference " << ref << " not found as a recently freed object";
2939 }
2940 }
2941
2942 if (ref->GetClass() != nullptr && heap_->IsValidObjectAddress(ref->GetClass()) &&
2943 ref->GetClass()->IsClass()) {
2944 LOG(ERROR) << "Ref type " << PrettyTypeOf(ref);
2945 } else {
2946 LOG(ERROR) << "Ref " << ref << " class(" << ref->GetClass()
2947 << ") is not a valid heap address";
2948 }
2949
2950 card_table->CheckAddrIsInCardTable(reinterpret_cast<const uint8_t*>(obj));
2951 void* cover_begin = card_table->AddrFromCard(card_addr);
2952 void* cover_end = reinterpret_cast<void*>(reinterpret_cast<size_t>(cover_begin) +
2953 accounting::CardTable::kCardSize);
2954 LOG(ERROR) << "Card " << reinterpret_cast<void*>(card_addr) << " covers " << cover_begin
2955 << "-" << cover_end;
2956 accounting::ContinuousSpaceBitmap* bitmap =
2957 heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(obj);
2958
2959 if (bitmap == nullptr) {
2960 LOG(ERROR) << "Object " << obj << " has no bitmap";
2961 if (!VerifyClassClass(obj->GetClass())) {
2962 LOG(ERROR) << "Object " << obj << " failed class verification!";
2963 }
2964 } else {
2965 // Print out how the object is live.
2966 if (bitmap->Test(obj)) {
2967 LOG(ERROR) << "Object " << obj << " found in live bitmap";
2968 }
2969 if (alloc_stack->Contains(const_cast<mirror::Object*>(obj))) {
2970 LOG(ERROR) << "Object " << obj << " found in allocation stack";
2971 }
2972 if (live_stack->Contains(const_cast<mirror::Object*>(obj))) {
2973 LOG(ERROR) << "Object " << obj << " found in live stack";
2974 }
2975 if (alloc_stack->Contains(const_cast<mirror::Object*>(ref))) {
2976 LOG(ERROR) << "Ref " << ref << " found in allocation stack";
2977 }
2978 if (live_stack->Contains(const_cast<mirror::Object*>(ref))) {
2979 LOG(ERROR) << "Ref " << ref << " found in live stack";
2980 }
2981 // Attempt to see if the card table missed the reference.
2982 ScanVisitor scan_visitor;
2983 uint8_t* byte_cover_begin = reinterpret_cast<uint8_t*>(card_table->AddrFromCard(card_addr));
2984 card_table->Scan<false>(bitmap, byte_cover_begin,
2985 byte_cover_begin + accounting::CardTable::kCardSize, scan_visitor);
2986 }
2987
2988 // Search to see if any of the roots reference our object.
2989 RootMatchesObjectVisitor visitor1(obj);
2990 Runtime::Current()->VisitRoots(&visitor1);
2991 // Search to see if any of the roots reference our reference.
2992 RootMatchesObjectVisitor visitor2(ref);
2993 Runtime::Current()->VisitRoots(&visitor2);
2994 }
2995 return false;
2996 }
2997
2998 Heap* const heap_;
2999 Atomic<size_t>* const fail_count_;
3000 const bool verify_referent_;
3001 };
3002
3003 // Verify all references within an object, for use with HeapBitmap::Visit.
3004 class VerifyObjectVisitor {
3005 public:
VerifyObjectVisitor(Heap * heap,Atomic<size_t> * fail_count,bool verify_referent)3006 VerifyObjectVisitor(Heap* heap, Atomic<size_t>* fail_count, bool verify_referent)
3007 : heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {}
3008
operator ()(mirror::Object * obj)3009 void operator()(mirror::Object* obj)
3010 SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
3011 // Note: we are verifying the references in obj but not obj itself, this is because obj must
3012 // be live or else how did we find it in the live bitmap?
3013 VerifyReferenceVisitor visitor(heap_, fail_count_, verify_referent_);
3014 // The class doesn't count as a reference but we should verify it anyways.
3015 obj->VisitReferences(visitor, visitor);
3016 }
3017
VisitCallback(mirror::Object * obj,void * arg)3018 static void VisitCallback(mirror::Object* obj, void* arg)
3019 SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
3020 VerifyObjectVisitor* visitor = reinterpret_cast<VerifyObjectVisitor*>(arg);
3021 visitor->operator()(obj);
3022 }
3023
VerifyRoots()3024 void VerifyRoots() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_) {
3025 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
3026 VerifyReferenceVisitor visitor(heap_, fail_count_, verify_referent_);
3027 Runtime::Current()->VisitRoots(&visitor);
3028 }
3029
GetFailureCount() const3030 size_t GetFailureCount() const {
3031 return fail_count_->LoadSequentiallyConsistent();
3032 }
3033
3034 private:
3035 Heap* const heap_;
3036 Atomic<size_t>* const fail_count_;
3037 const bool verify_referent_;
3038 };
3039
PushOnAllocationStackWithInternalGC(Thread * self,mirror::Object ** obj)3040 void Heap::PushOnAllocationStackWithInternalGC(Thread* self, mirror::Object** obj) {
3041 // Slow path, the allocation stack push back must have already failed.
3042 DCHECK(!allocation_stack_->AtomicPushBack(*obj));
3043 do {
3044 // TODO: Add handle VerifyObject.
3045 StackHandleScope<1> hs(self);
3046 HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
3047 // Push our object into the reserve region of the allocaiton stack. This is only required due
3048 // to heap verification requiring that roots are live (either in the live bitmap or in the
3049 // allocation stack).
3050 CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(*obj));
3051 CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
3052 } while (!allocation_stack_->AtomicPushBack(*obj));
3053 }
3054
PushOnThreadLocalAllocationStackWithInternalGC(Thread * self,mirror::Object ** obj)3055 void Heap::PushOnThreadLocalAllocationStackWithInternalGC(Thread* self, mirror::Object** obj) {
3056 // Slow path, the allocation stack push back must have already failed.
3057 DCHECK(!self->PushOnThreadLocalAllocationStack(*obj));
3058 StackReference<mirror::Object>* start_address;
3059 StackReference<mirror::Object>* end_address;
3060 while (!allocation_stack_->AtomicBumpBack(kThreadLocalAllocationStackSize, &start_address,
3061 &end_address)) {
3062 // TODO: Add handle VerifyObject.
3063 StackHandleScope<1> hs(self);
3064 HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
3065 // Push our object into the reserve region of the allocaiton stack. This is only required due
3066 // to heap verification requiring that roots are live (either in the live bitmap or in the
3067 // allocation stack).
3068 CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(*obj));
3069 // Push into the reserve allocation stack.
3070 CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
3071 }
3072 self->SetThreadLocalAllocationStack(start_address, end_address);
3073 // Retry on the new thread-local allocation stack.
3074 CHECK(self->PushOnThreadLocalAllocationStack(*obj)); // Must succeed.
3075 }
3076
3077 // Must do this with mutators suspended since we are directly accessing the allocation stacks.
VerifyHeapReferences(bool verify_referents)3078 size_t Heap::VerifyHeapReferences(bool verify_referents) {
3079 Thread* self = Thread::Current();
3080 Locks::mutator_lock_->AssertExclusiveHeld(self);
3081 // Lets sort our allocation stacks so that we can efficiently binary search them.
3082 allocation_stack_->Sort();
3083 live_stack_->Sort();
3084 // Since we sorted the allocation stack content, need to revoke all
3085 // thread-local allocation stacks.
3086 RevokeAllThreadLocalAllocationStacks(self);
3087 Atomic<size_t> fail_count_(0);
3088 VerifyObjectVisitor visitor(this, &fail_count_, verify_referents);
3089 // Verify objects in the allocation stack since these will be objects which were:
3090 // 1. Allocated prior to the GC (pre GC verification).
3091 // 2. Allocated during the GC (pre sweep GC verification).
3092 // We don't want to verify the objects in the live stack since they themselves may be
3093 // pointing to dead objects if they are not reachable.
3094 VisitObjectsPaused(VerifyObjectVisitor::VisitCallback, &visitor);
3095 // Verify the roots:
3096 visitor.VerifyRoots();
3097 if (visitor.GetFailureCount() > 0) {
3098 // Dump mod-union tables.
3099 for (const auto& table_pair : mod_union_tables_) {
3100 accounting::ModUnionTable* mod_union_table = table_pair.second;
3101 mod_union_table->Dump(LOG(ERROR) << mod_union_table->GetName() << ": ");
3102 }
3103 // Dump remembered sets.
3104 for (const auto& table_pair : remembered_sets_) {
3105 accounting::RememberedSet* remembered_set = table_pair.second;
3106 remembered_set->Dump(LOG(ERROR) << remembered_set->GetName() << ": ");
3107 }
3108 DumpSpaces(LOG(ERROR));
3109 }
3110 return visitor.GetFailureCount();
3111 }
3112
3113 class VerifyReferenceCardVisitor {
3114 public:
VerifyReferenceCardVisitor(Heap * heap,bool * failed)3115 VerifyReferenceCardVisitor(Heap* heap, bool* failed)
3116 SHARED_REQUIRES(Locks::mutator_lock_,
3117 Locks::heap_bitmap_lock_)
3118 : heap_(heap), failed_(failed) {
3119 }
3120
3121 // There is no card marks for native roots on a class.
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root ATTRIBUTE_UNUSED) const3122 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
3123 const {}
VisitRoot(mirror::CompressedReference<mirror::Object> * root ATTRIBUTE_UNUSED) const3124 void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
3125
3126 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
3127 // annotalysis on visitors.
operator ()(mirror::Object * obj,MemberOffset offset,bool is_static) const3128 void operator()(mirror::Object* obj, MemberOffset offset, bool is_static) const
3129 NO_THREAD_SAFETY_ANALYSIS {
3130 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
3131 // Filter out class references since changing an object's class does not mark the card as dirty.
3132 // Also handles large objects, since the only reference they hold is a class reference.
3133 if (ref != nullptr && !ref->IsClass()) {
3134 accounting::CardTable* card_table = heap_->GetCardTable();
3135 // If the object is not dirty and it is referencing something in the live stack other than
3136 // class, then it must be on a dirty card.
3137 if (!card_table->AddrIsInCardTable(obj)) {
3138 LOG(ERROR) << "Object " << obj << " is not in the address range of the card table";
3139 *failed_ = true;
3140 } else if (!card_table->IsDirty(obj)) {
3141 // TODO: Check mod-union tables.
3142 // Card should be either kCardDirty if it got re-dirtied after we aged it, or
3143 // kCardDirty - 1 if it didnt get touched since we aged it.
3144 accounting::ObjectStack* live_stack = heap_->live_stack_.get();
3145 if (live_stack->ContainsSorted(ref)) {
3146 if (live_stack->ContainsSorted(obj)) {
3147 LOG(ERROR) << "Object " << obj << " found in live stack";
3148 }
3149 if (heap_->GetLiveBitmap()->Test(obj)) {
3150 LOG(ERROR) << "Object " << obj << " found in live bitmap";
3151 }
3152 LOG(ERROR) << "Object " << obj << " " << PrettyTypeOf(obj)
3153 << " references " << ref << " " << PrettyTypeOf(ref) << " in live stack";
3154
3155 // Print which field of the object is dead.
3156 if (!obj->IsObjectArray()) {
3157 mirror::Class* klass = is_static ? obj->AsClass() : obj->GetClass();
3158 CHECK(klass != nullptr);
3159 for (ArtField& field : (is_static ? klass->GetSFields() : klass->GetIFields())) {
3160 if (field.GetOffset().Int32Value() == offset.Int32Value()) {
3161 LOG(ERROR) << (is_static ? "Static " : "") << "field in the live stack is "
3162 << PrettyField(&field);
3163 break;
3164 }
3165 }
3166 } else {
3167 mirror::ObjectArray<mirror::Object>* object_array =
3168 obj->AsObjectArray<mirror::Object>();
3169 for (int32_t i = 0; i < object_array->GetLength(); ++i) {
3170 if (object_array->Get(i) == ref) {
3171 LOG(ERROR) << (is_static ? "Static " : "") << "obj[" << i << "] = ref";
3172 }
3173 }
3174 }
3175
3176 *failed_ = true;
3177 }
3178 }
3179 }
3180 }
3181
3182 private:
3183 Heap* const heap_;
3184 bool* const failed_;
3185 };
3186
3187 class VerifyLiveStackReferences {
3188 public:
VerifyLiveStackReferences(Heap * heap)3189 explicit VerifyLiveStackReferences(Heap* heap)
3190 : heap_(heap),
3191 failed_(false) {}
3192
operator ()(mirror::Object * obj) const3193 void operator()(mirror::Object* obj) const
3194 SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
3195 VerifyReferenceCardVisitor visitor(heap_, const_cast<bool*>(&failed_));
3196 obj->VisitReferences(visitor, VoidFunctor());
3197 }
3198
Failed() const3199 bool Failed() const {
3200 return failed_;
3201 }
3202
3203 private:
3204 Heap* const heap_;
3205 bool failed_;
3206 };
3207
VerifyMissingCardMarks()3208 bool Heap::VerifyMissingCardMarks() {
3209 Thread* self = Thread::Current();
3210 Locks::mutator_lock_->AssertExclusiveHeld(self);
3211 // We need to sort the live stack since we binary search it.
3212 live_stack_->Sort();
3213 // Since we sorted the allocation stack content, need to revoke all
3214 // thread-local allocation stacks.
3215 RevokeAllThreadLocalAllocationStacks(self);
3216 VerifyLiveStackReferences visitor(this);
3217 GetLiveBitmap()->Visit(visitor);
3218 // We can verify objects in the live stack since none of these should reference dead objects.
3219 for (auto* it = live_stack_->Begin(); it != live_stack_->End(); ++it) {
3220 if (!kUseThreadLocalAllocationStack || it->AsMirrorPtr() != nullptr) {
3221 visitor(it->AsMirrorPtr());
3222 }
3223 }
3224 return !visitor.Failed();
3225 }
3226
SwapStacks()3227 void Heap::SwapStacks() {
3228 if (kUseThreadLocalAllocationStack) {
3229 live_stack_->AssertAllZero();
3230 }
3231 allocation_stack_.swap(live_stack_);
3232 }
3233
RevokeAllThreadLocalAllocationStacks(Thread * self)3234 void Heap::RevokeAllThreadLocalAllocationStacks(Thread* self) {
3235 // This must be called only during the pause.
3236 DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
3237 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
3238 MutexLock mu2(self, *Locks::thread_list_lock_);
3239 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
3240 for (Thread* t : thread_list) {
3241 t->RevokeThreadLocalAllocationStack();
3242 }
3243 }
3244
AssertThreadLocalBuffersAreRevoked(Thread * thread)3245 void Heap::AssertThreadLocalBuffersAreRevoked(Thread* thread) {
3246 if (kIsDebugBuild) {
3247 if (rosalloc_space_ != nullptr) {
3248 rosalloc_space_->AssertThreadLocalBuffersAreRevoked(thread);
3249 }
3250 if (bump_pointer_space_ != nullptr) {
3251 bump_pointer_space_->AssertThreadLocalBuffersAreRevoked(thread);
3252 }
3253 }
3254 }
3255
AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked()3256 void Heap::AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked() {
3257 if (kIsDebugBuild) {
3258 if (bump_pointer_space_ != nullptr) {
3259 bump_pointer_space_->AssertAllThreadLocalBuffersAreRevoked();
3260 }
3261 }
3262 }
3263
FindModUnionTableFromSpace(space::Space * space)3264 accounting::ModUnionTable* Heap::FindModUnionTableFromSpace(space::Space* space) {
3265 auto it = mod_union_tables_.find(space);
3266 if (it == mod_union_tables_.end()) {
3267 return nullptr;
3268 }
3269 return it->second;
3270 }
3271
FindRememberedSetFromSpace(space::Space * space)3272 accounting::RememberedSet* Heap::FindRememberedSetFromSpace(space::Space* space) {
3273 auto it = remembered_sets_.find(space);
3274 if (it == remembered_sets_.end()) {
3275 return nullptr;
3276 }
3277 return it->second;
3278 }
3279
ProcessCards(TimingLogger * timings,bool use_rem_sets,bool process_alloc_space_cards,bool clear_alloc_space_cards)3280 void Heap::ProcessCards(TimingLogger* timings,
3281 bool use_rem_sets,
3282 bool process_alloc_space_cards,
3283 bool clear_alloc_space_cards) {
3284 TimingLogger::ScopedTiming t(__FUNCTION__, timings);
3285 // Clear cards and keep track of cards cleared in the mod-union table.
3286 for (const auto& space : continuous_spaces_) {
3287 accounting::ModUnionTable* table = FindModUnionTableFromSpace(space);
3288 accounting::RememberedSet* rem_set = FindRememberedSetFromSpace(space);
3289 if (table != nullptr) {
3290 const char* name = space->IsZygoteSpace() ? "ZygoteModUnionClearCards" :
3291 "ImageModUnionClearCards";
3292 TimingLogger::ScopedTiming t2(name, timings);
3293 table->ClearCards();
3294 } else if (use_rem_sets && rem_set != nullptr) {
3295 DCHECK(collector::SemiSpace::kUseRememberedSet && collector_type_ == kCollectorTypeGSS)
3296 << static_cast<int>(collector_type_);
3297 TimingLogger::ScopedTiming t2("AllocSpaceRemSetClearCards", timings);
3298 rem_set->ClearCards();
3299 } else if (process_alloc_space_cards) {
3300 TimingLogger::ScopedTiming t2("AllocSpaceClearCards", timings);
3301 if (clear_alloc_space_cards) {
3302 uint8_t* end = space->End();
3303 if (space->IsImageSpace()) {
3304 // Image space end is the end of the mirror objects, it is not necessarily page or card
3305 // aligned. Align up so that the check in ClearCardRange does not fail.
3306 end = AlignUp(end, accounting::CardTable::kCardSize);
3307 }
3308 card_table_->ClearCardRange(space->Begin(), end);
3309 } else {
3310 // No mod union table for the AllocSpace. Age the cards so that the GC knows that these
3311 // cards were dirty before the GC started.
3312 // TODO: Need to use atomic for the case where aged(cleaning thread) -> dirty(other thread)
3313 // -> clean(cleaning thread).
3314 // The races are we either end up with: Aged card, unaged card. Since we have the
3315 // checkpoint roots and then we scan / update mod union tables after. We will always
3316 // scan either card. If we end up with the non aged card, we scan it it in the pause.
3317 card_table_->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(),
3318 VoidFunctor());
3319 }
3320 }
3321 }
3322 }
3323
3324 struct IdentityMarkHeapReferenceVisitor : public MarkObjectVisitor {
MarkObjectart::gc::IdentityMarkHeapReferenceVisitor3325 virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE {
3326 return obj;
3327 }
MarkHeapReferenceart::gc::IdentityMarkHeapReferenceVisitor3328 virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>*) OVERRIDE {
3329 }
3330 };
3331
PreGcVerificationPaused(collector::GarbageCollector * gc)3332 void Heap::PreGcVerificationPaused(collector::GarbageCollector* gc) {
3333 Thread* const self = Thread::Current();
3334 TimingLogger* const timings = current_gc_iteration_.GetTimings();
3335 TimingLogger::ScopedTiming t(__FUNCTION__, timings);
3336 if (verify_pre_gc_heap_) {
3337 TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyHeapReferences", timings);
3338 size_t failures = VerifyHeapReferences();
3339 if (failures > 0) {
3340 LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed with " << failures
3341 << " failures";
3342 }
3343 }
3344 // Check that all objects which reference things in the live stack are on dirty cards.
3345 if (verify_missing_card_marks_) {
3346 TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyMissingCardMarks", timings);
3347 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
3348 SwapStacks();
3349 // Sort the live stack so that we can quickly binary search it later.
3350 CHECK(VerifyMissingCardMarks()) << "Pre " << gc->GetName()
3351 << " missing card mark verification failed\n" << DumpSpaces();
3352 SwapStacks();
3353 }
3354 if (verify_mod_union_table_) {
3355 TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyModUnionTables", timings);
3356 ReaderMutexLock reader_lock(self, *Locks::heap_bitmap_lock_);
3357 for (const auto& table_pair : mod_union_tables_) {
3358 accounting::ModUnionTable* mod_union_table = table_pair.second;
3359 IdentityMarkHeapReferenceVisitor visitor;
3360 mod_union_table->UpdateAndMarkReferences(&visitor);
3361 mod_union_table->Verify();
3362 }
3363 }
3364 }
3365
PreGcVerification(collector::GarbageCollector * gc)3366 void Heap::PreGcVerification(collector::GarbageCollector* gc) {
3367 if (verify_pre_gc_heap_ || verify_missing_card_marks_ || verify_mod_union_table_) {
3368 collector::GarbageCollector::ScopedPause pause(gc);
3369 PreGcVerificationPaused(gc);
3370 }
3371 }
3372
PrePauseRosAllocVerification(collector::GarbageCollector * gc ATTRIBUTE_UNUSED)3373 void Heap::PrePauseRosAllocVerification(collector::GarbageCollector* gc ATTRIBUTE_UNUSED) {
3374 // TODO: Add a new runtime option for this?
3375 if (verify_pre_gc_rosalloc_) {
3376 RosAllocVerification(current_gc_iteration_.GetTimings(), "PreGcRosAllocVerification");
3377 }
3378 }
3379
PreSweepingGcVerification(collector::GarbageCollector * gc)3380 void Heap::PreSweepingGcVerification(collector::GarbageCollector* gc) {
3381 Thread* const self = Thread::Current();
3382 TimingLogger* const timings = current_gc_iteration_.GetTimings();
3383 TimingLogger::ScopedTiming t(__FUNCTION__, timings);
3384 // Called before sweeping occurs since we want to make sure we are not going so reclaim any
3385 // reachable objects.
3386 if (verify_pre_sweeping_heap_) {
3387 TimingLogger::ScopedTiming t2("(Paused)PostSweepingVerifyHeapReferences", timings);
3388 CHECK_NE(self->GetState(), kRunnable);
3389 {
3390 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
3391 // Swapping bound bitmaps does nothing.
3392 gc->SwapBitmaps();
3393 }
3394 // Pass in false since concurrent reference processing can mean that the reference referents
3395 // may point to dead objects at the point which PreSweepingGcVerification is called.
3396 size_t failures = VerifyHeapReferences(false);
3397 if (failures > 0) {
3398 LOG(FATAL) << "Pre sweeping " << gc->GetName() << " GC verification failed with " << failures
3399 << " failures";
3400 }
3401 {
3402 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
3403 gc->SwapBitmaps();
3404 }
3405 }
3406 if (verify_pre_sweeping_rosalloc_) {
3407 RosAllocVerification(timings, "PreSweepingRosAllocVerification");
3408 }
3409 }
3410
PostGcVerificationPaused(collector::GarbageCollector * gc)3411 void Heap::PostGcVerificationPaused(collector::GarbageCollector* gc) {
3412 // Only pause if we have to do some verification.
3413 Thread* const self = Thread::Current();
3414 TimingLogger* const timings = GetCurrentGcIteration()->GetTimings();
3415 TimingLogger::ScopedTiming t(__FUNCTION__, timings);
3416 if (verify_system_weaks_) {
3417 ReaderMutexLock mu2(self, *Locks::heap_bitmap_lock_);
3418 collector::MarkSweep* mark_sweep = down_cast<collector::MarkSweep*>(gc);
3419 mark_sweep->VerifySystemWeaks();
3420 }
3421 if (verify_post_gc_rosalloc_) {
3422 RosAllocVerification(timings, "(Paused)PostGcRosAllocVerification");
3423 }
3424 if (verify_post_gc_heap_) {
3425 TimingLogger::ScopedTiming t2("(Paused)PostGcVerifyHeapReferences", timings);
3426 size_t failures = VerifyHeapReferences();
3427 if (failures > 0) {
3428 LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed with " << failures
3429 << " failures";
3430 }
3431 }
3432 }
3433
PostGcVerification(collector::GarbageCollector * gc)3434 void Heap::PostGcVerification(collector::GarbageCollector* gc) {
3435 if (verify_system_weaks_ || verify_post_gc_rosalloc_ || verify_post_gc_heap_) {
3436 collector::GarbageCollector::ScopedPause pause(gc);
3437 PostGcVerificationPaused(gc);
3438 }
3439 }
3440
RosAllocVerification(TimingLogger * timings,const char * name)3441 void Heap::RosAllocVerification(TimingLogger* timings, const char* name) {
3442 TimingLogger::ScopedTiming t(name, timings);
3443 for (const auto& space : continuous_spaces_) {
3444 if (space->IsRosAllocSpace()) {
3445 VLOG(heap) << name << " : " << space->GetName();
3446 space->AsRosAllocSpace()->Verify();
3447 }
3448 }
3449 }
3450
WaitForGcToComplete(GcCause cause,Thread * self)3451 collector::GcType Heap::WaitForGcToComplete(GcCause cause, Thread* self) {
3452 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
3453 MutexLock mu(self, *gc_complete_lock_);
3454 return WaitForGcToCompleteLocked(cause, self);
3455 }
3456
WaitForGcToCompleteLocked(GcCause cause,Thread * self)3457 collector::GcType Heap::WaitForGcToCompleteLocked(GcCause cause, Thread* self) {
3458 collector::GcType last_gc_type = collector::kGcTypeNone;
3459 uint64_t wait_start = NanoTime();
3460 while (collector_type_running_ != kCollectorTypeNone) {
3461 if (self != task_processor_->GetRunningThread()) {
3462 // The current thread is about to wait for a currently running
3463 // collection to finish. If the waiting thread is not the heap
3464 // task daemon thread, the currently running collection is
3465 // considered as a blocking GC.
3466 running_collection_is_blocking_ = true;
3467 VLOG(gc) << "Waiting for a blocking GC " << cause;
3468 }
3469 ScopedTrace trace("GC: Wait For Completion");
3470 // We must wait, change thread state then sleep on gc_complete_cond_;
3471 gc_complete_cond_->Wait(self);
3472 last_gc_type = last_gc_type_;
3473 }
3474 uint64_t wait_time = NanoTime() - wait_start;
3475 total_wait_time_ += wait_time;
3476 if (wait_time > long_pause_log_threshold_) {
3477 LOG(INFO) << "WaitForGcToComplete blocked for " << PrettyDuration(wait_time)
3478 << " for cause " << cause;
3479 }
3480 if (self != task_processor_->GetRunningThread()) {
3481 // The current thread is about to run a collection. If the thread
3482 // is not the heap task daemon thread, it's considered as a
3483 // blocking GC (i.e., blocking itself).
3484 running_collection_is_blocking_ = true;
3485 VLOG(gc) << "Starting a blocking GC " << cause;
3486 }
3487 return last_gc_type;
3488 }
3489
DumpForSigQuit(std::ostream & os)3490 void Heap::DumpForSigQuit(std::ostream& os) {
3491 os << "Heap: " << GetPercentFree() << "% free, " << PrettySize(GetBytesAllocated()) << "/"
3492 << PrettySize(GetTotalMemory()) << "; " << GetObjectsAllocated() << " objects\n";
3493 DumpGcPerformanceInfo(os);
3494 }
3495
GetPercentFree()3496 size_t Heap::GetPercentFree() {
3497 return static_cast<size_t>(100.0f * static_cast<float>(GetFreeMemory()) / max_allowed_footprint_);
3498 }
3499
SetIdealFootprint(size_t max_allowed_footprint)3500 void Heap::SetIdealFootprint(size_t max_allowed_footprint) {
3501 if (max_allowed_footprint > GetMaxMemory()) {
3502 VLOG(gc) << "Clamp target GC heap from " << PrettySize(max_allowed_footprint) << " to "
3503 << PrettySize(GetMaxMemory());
3504 max_allowed_footprint = GetMaxMemory();
3505 }
3506 max_allowed_footprint_ = max_allowed_footprint;
3507 }
3508
IsMovableObject(const mirror::Object * obj) const3509 bool Heap::IsMovableObject(const mirror::Object* obj) const {
3510 if (kMovingCollector) {
3511 space::Space* space = FindContinuousSpaceFromObject(obj, true);
3512 if (space != nullptr) {
3513 // TODO: Check large object?
3514 return space->CanMoveObjects();
3515 }
3516 }
3517 return false;
3518 }
3519
UpdateMaxNativeFootprint()3520 void Heap::UpdateMaxNativeFootprint() {
3521 size_t native_size = native_bytes_allocated_.LoadRelaxed();
3522 // TODO: Tune the native heap utilization to be a value other than the java heap utilization.
3523 size_t target_size = native_size / GetTargetHeapUtilization();
3524 if (target_size > native_size + max_free_) {
3525 target_size = native_size + max_free_;
3526 } else if (target_size < native_size + min_free_) {
3527 target_size = native_size + min_free_;
3528 }
3529 native_footprint_gc_watermark_ = std::min(growth_limit_, target_size);
3530 }
3531
FindCollectorByGcType(collector::GcType gc_type)3532 collector::GarbageCollector* Heap::FindCollectorByGcType(collector::GcType gc_type) {
3533 for (const auto& collector : garbage_collectors_) {
3534 if (collector->GetCollectorType() == collector_type_ &&
3535 collector->GetGcType() == gc_type) {
3536 return collector;
3537 }
3538 }
3539 return nullptr;
3540 }
3541
HeapGrowthMultiplier() const3542 double Heap::HeapGrowthMultiplier() const {
3543 // If we don't care about pause times we are background, so return 1.0.
3544 if (!CareAboutPauseTimes() || IsLowMemoryMode()) {
3545 return 1.0;
3546 }
3547 return foreground_heap_growth_multiplier_;
3548 }
3549
GrowForUtilization(collector::GarbageCollector * collector_ran,uint64_t bytes_allocated_before_gc)3550 void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran,
3551 uint64_t bytes_allocated_before_gc) {
3552 // We know what our utilization is at this moment.
3553 // This doesn't actually resize any memory. It just lets the heap grow more when necessary.
3554 const uint64_t bytes_allocated = GetBytesAllocated();
3555 uint64_t target_size;
3556 collector::GcType gc_type = collector_ran->GetGcType();
3557 const double multiplier = HeapGrowthMultiplier(); // Use the multiplier to grow more for
3558 // foreground.
3559 const uint64_t adjusted_min_free = static_cast<uint64_t>(min_free_ * multiplier);
3560 const uint64_t adjusted_max_free = static_cast<uint64_t>(max_free_ * multiplier);
3561 if (gc_type != collector::kGcTypeSticky) {
3562 // Grow the heap for non sticky GC.
3563 ssize_t delta = bytes_allocated / GetTargetHeapUtilization() - bytes_allocated;
3564 CHECK_GE(delta, 0);
3565 target_size = bytes_allocated + delta * multiplier;
3566 target_size = std::min(target_size, bytes_allocated + adjusted_max_free);
3567 target_size = std::max(target_size, bytes_allocated + adjusted_min_free);
3568 native_need_to_run_finalization_ = true;
3569 next_gc_type_ = collector::kGcTypeSticky;
3570 } else {
3571 collector::GcType non_sticky_gc_type =
3572 HasZygoteSpace() ? collector::kGcTypePartial : collector::kGcTypeFull;
3573 // Find what the next non sticky collector will be.
3574 collector::GarbageCollector* non_sticky_collector = FindCollectorByGcType(non_sticky_gc_type);
3575 // If the throughput of the current sticky GC >= throughput of the non sticky collector, then
3576 // do another sticky collection next.
3577 // We also check that the bytes allocated aren't over the footprint limit in order to prevent a
3578 // pathological case where dead objects which aren't reclaimed by sticky could get accumulated
3579 // if the sticky GC throughput always remained >= the full/partial throughput.
3580 if (current_gc_iteration_.GetEstimatedThroughput() * kStickyGcThroughputAdjustment >=
3581 non_sticky_collector->GetEstimatedMeanThroughput() &&
3582 non_sticky_collector->NumberOfIterations() > 0 &&
3583 bytes_allocated <= max_allowed_footprint_) {
3584 next_gc_type_ = collector::kGcTypeSticky;
3585 } else {
3586 next_gc_type_ = non_sticky_gc_type;
3587 }
3588 // If we have freed enough memory, shrink the heap back down.
3589 if (bytes_allocated + adjusted_max_free < max_allowed_footprint_) {
3590 target_size = bytes_allocated + adjusted_max_free;
3591 } else {
3592 target_size = std::max(bytes_allocated, static_cast<uint64_t>(max_allowed_footprint_));
3593 }
3594 }
3595 if (!ignore_max_footprint_) {
3596 SetIdealFootprint(target_size);
3597 if (IsGcConcurrent()) {
3598 const uint64_t freed_bytes = current_gc_iteration_.GetFreedBytes() +
3599 current_gc_iteration_.GetFreedLargeObjectBytes() +
3600 current_gc_iteration_.GetFreedRevokeBytes();
3601 // Bytes allocated will shrink by freed_bytes after the GC runs, so if we want to figure out
3602 // how many bytes were allocated during the GC we need to add freed_bytes back on.
3603 CHECK_GE(bytes_allocated + freed_bytes, bytes_allocated_before_gc);
3604 const uint64_t bytes_allocated_during_gc = bytes_allocated + freed_bytes -
3605 bytes_allocated_before_gc;
3606 // Calculate when to perform the next ConcurrentGC.
3607 // Calculate the estimated GC duration.
3608 const double gc_duration_seconds = NsToMs(current_gc_iteration_.GetDurationNs()) / 1000.0;
3609 // Estimate how many remaining bytes we will have when we need to start the next GC.
3610 size_t remaining_bytes = bytes_allocated_during_gc * gc_duration_seconds;
3611 remaining_bytes = std::min(remaining_bytes, kMaxConcurrentRemainingBytes);
3612 remaining_bytes = std::max(remaining_bytes, kMinConcurrentRemainingBytes);
3613 if (UNLIKELY(remaining_bytes > max_allowed_footprint_)) {
3614 // A never going to happen situation that from the estimated allocation rate we will exceed
3615 // the applications entire footprint with the given estimated allocation rate. Schedule
3616 // another GC nearly straight away.
3617 remaining_bytes = kMinConcurrentRemainingBytes;
3618 }
3619 DCHECK_LE(remaining_bytes, max_allowed_footprint_);
3620 DCHECK_LE(max_allowed_footprint_, GetMaxMemory());
3621 // Start a concurrent GC when we get close to the estimated remaining bytes. When the
3622 // allocation rate is very high, remaining_bytes could tell us that we should start a GC
3623 // right away.
3624 concurrent_start_bytes_ = std::max(max_allowed_footprint_ - remaining_bytes,
3625 static_cast<size_t>(bytes_allocated));
3626 }
3627 }
3628 }
3629
ClampGrowthLimit()3630 void Heap::ClampGrowthLimit() {
3631 // Use heap bitmap lock to guard against races with BindLiveToMarkBitmap.
3632 ScopedObjectAccess soa(Thread::Current());
3633 WriterMutexLock mu(soa.Self(), *Locks::heap_bitmap_lock_);
3634 capacity_ = growth_limit_;
3635 for (const auto& space : continuous_spaces_) {
3636 if (space->IsMallocSpace()) {
3637 gc::space::MallocSpace* malloc_space = space->AsMallocSpace();
3638 malloc_space->ClampGrowthLimit();
3639 }
3640 }
3641 // This space isn't added for performance reasons.
3642 if (main_space_backup_.get() != nullptr) {
3643 main_space_backup_->ClampGrowthLimit();
3644 }
3645 }
3646
ClearGrowthLimit()3647 void Heap::ClearGrowthLimit() {
3648 growth_limit_ = capacity_;
3649 ScopedObjectAccess soa(Thread::Current());
3650 for (const auto& space : continuous_spaces_) {
3651 if (space->IsMallocSpace()) {
3652 gc::space::MallocSpace* malloc_space = space->AsMallocSpace();
3653 malloc_space->ClearGrowthLimit();
3654 malloc_space->SetFootprintLimit(malloc_space->Capacity());
3655 }
3656 }
3657 // This space isn't added for performance reasons.
3658 if (main_space_backup_.get() != nullptr) {
3659 main_space_backup_->ClearGrowthLimit();
3660 main_space_backup_->SetFootprintLimit(main_space_backup_->Capacity());
3661 }
3662 }
3663
AddFinalizerReference(Thread * self,mirror::Object ** object)3664 void Heap::AddFinalizerReference(Thread* self, mirror::Object** object) {
3665 ScopedObjectAccess soa(self);
3666 ScopedLocalRef<jobject> arg(self->GetJniEnv(), soa.AddLocalReference<jobject>(*object));
3667 jvalue args[1];
3668 args[0].l = arg.get();
3669 InvokeWithJValues(soa, nullptr, WellKnownClasses::java_lang_ref_FinalizerReference_add, args);
3670 // Restore object in case it gets moved.
3671 *object = soa.Decode<mirror::Object*>(arg.get());
3672 }
3673
RequestConcurrentGCAndSaveObject(Thread * self,bool force_full,mirror::Object ** obj)3674 void Heap::RequestConcurrentGCAndSaveObject(Thread* self, bool force_full, mirror::Object** obj) {
3675 StackHandleScope<1> hs(self);
3676 HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
3677 RequestConcurrentGC(self, force_full);
3678 }
3679
3680 class Heap::ConcurrentGCTask : public HeapTask {
3681 public:
ConcurrentGCTask(uint64_t target_time,bool force_full)3682 ConcurrentGCTask(uint64_t target_time, bool force_full)
3683 : HeapTask(target_time), force_full_(force_full) { }
Run(Thread * self)3684 virtual void Run(Thread* self) OVERRIDE {
3685 gc::Heap* heap = Runtime::Current()->GetHeap();
3686 heap->ConcurrentGC(self, force_full_);
3687 heap->ClearConcurrentGCRequest();
3688 }
3689
3690 private:
3691 const bool force_full_; // If true, force full (or partial) collection.
3692 };
3693
CanAddHeapTask(Thread * self)3694 static bool CanAddHeapTask(Thread* self) REQUIRES(!Locks::runtime_shutdown_lock_) {
3695 Runtime* runtime = Runtime::Current();
3696 return runtime != nullptr && runtime->IsFinishedStarting() && !runtime->IsShuttingDown(self) &&
3697 !self->IsHandlingStackOverflow();
3698 }
3699
ClearConcurrentGCRequest()3700 void Heap::ClearConcurrentGCRequest() {
3701 concurrent_gc_pending_.StoreRelaxed(false);
3702 }
3703
RequestConcurrentGC(Thread * self,bool force_full)3704 void Heap::RequestConcurrentGC(Thread* self, bool force_full) {
3705 if (CanAddHeapTask(self) &&
3706 concurrent_gc_pending_.CompareExchangeStrongSequentiallyConsistent(false, true)) {
3707 task_processor_->AddTask(self, new ConcurrentGCTask(NanoTime(), // Start straight away.
3708 force_full));
3709 }
3710 }
3711
ConcurrentGC(Thread * self,bool force_full)3712 void Heap::ConcurrentGC(Thread* self, bool force_full) {
3713 if (!Runtime::Current()->IsShuttingDown(self)) {
3714 // Wait for any GCs currently running to finish.
3715 if (WaitForGcToComplete(kGcCauseBackground, self) == collector::kGcTypeNone) {
3716 // If the we can't run the GC type we wanted to run, find the next appropriate one and try that
3717 // instead. E.g. can't do partial, so do full instead.
3718 collector::GcType next_gc_type = next_gc_type_;
3719 // If forcing full and next gc type is sticky, override with a non-sticky type.
3720 if (force_full && next_gc_type == collector::kGcTypeSticky) {
3721 next_gc_type = HasZygoteSpace() ? collector::kGcTypePartial : collector::kGcTypeFull;
3722 }
3723 if (CollectGarbageInternal(next_gc_type, kGcCauseBackground, false) ==
3724 collector::kGcTypeNone) {
3725 for (collector::GcType gc_type : gc_plan_) {
3726 // Attempt to run the collector, if we succeed, we are done.
3727 if (gc_type > next_gc_type &&
3728 CollectGarbageInternal(gc_type, kGcCauseBackground, false) !=
3729 collector::kGcTypeNone) {
3730 break;
3731 }
3732 }
3733 }
3734 }
3735 }
3736 }
3737
3738 class Heap::CollectorTransitionTask : public HeapTask {
3739 public:
CollectorTransitionTask(uint64_t target_time)3740 explicit CollectorTransitionTask(uint64_t target_time) : HeapTask(target_time) {}
3741
Run(Thread * self)3742 virtual void Run(Thread* self) OVERRIDE {
3743 gc::Heap* heap = Runtime::Current()->GetHeap();
3744 heap->DoPendingCollectorTransition();
3745 heap->ClearPendingCollectorTransition(self);
3746 }
3747 };
3748
ClearPendingCollectorTransition(Thread * self)3749 void Heap::ClearPendingCollectorTransition(Thread* self) {
3750 MutexLock mu(self, *pending_task_lock_);
3751 pending_collector_transition_ = nullptr;
3752 }
3753
RequestCollectorTransition(CollectorType desired_collector_type,uint64_t delta_time)3754 void Heap::RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time) {
3755 Thread* self = Thread::Current();
3756 desired_collector_type_ = desired_collector_type;
3757 if (desired_collector_type_ == collector_type_ || !CanAddHeapTask(self)) {
3758 return;
3759 }
3760 CollectorTransitionTask* added_task = nullptr;
3761 const uint64_t target_time = NanoTime() + delta_time;
3762 {
3763 MutexLock mu(self, *pending_task_lock_);
3764 // If we have an existing collector transition, update the targe time to be the new target.
3765 if (pending_collector_transition_ != nullptr) {
3766 task_processor_->UpdateTargetRunTime(self, pending_collector_transition_, target_time);
3767 return;
3768 }
3769 added_task = new CollectorTransitionTask(target_time);
3770 pending_collector_transition_ = added_task;
3771 }
3772 task_processor_->AddTask(self, added_task);
3773 }
3774
3775 class Heap::HeapTrimTask : public HeapTask {
3776 public:
HeapTrimTask(uint64_t delta_time)3777 explicit HeapTrimTask(uint64_t delta_time) : HeapTask(NanoTime() + delta_time) { }
Run(Thread * self)3778 virtual void Run(Thread* self) OVERRIDE {
3779 gc::Heap* heap = Runtime::Current()->GetHeap();
3780 heap->Trim(self);
3781 heap->ClearPendingTrim(self);
3782 }
3783 };
3784
ClearPendingTrim(Thread * self)3785 void Heap::ClearPendingTrim(Thread* self) {
3786 MutexLock mu(self, *pending_task_lock_);
3787 pending_heap_trim_ = nullptr;
3788 }
3789
RequestTrim(Thread * self)3790 void Heap::RequestTrim(Thread* self) {
3791 if (!CanAddHeapTask(self)) {
3792 return;
3793 }
3794 // GC completed and now we must decide whether to request a heap trim (advising pages back to the
3795 // kernel) or not. Issuing a request will also cause trimming of the libc heap. As a trim scans
3796 // a space it will hold its lock and can become a cause of jank.
3797 // Note, the large object space self trims and the Zygote space was trimmed and unchanging since
3798 // forking.
3799
3800 // We don't have a good measure of how worthwhile a trim might be. We can't use the live bitmap
3801 // because that only marks object heads, so a large array looks like lots of empty space. We
3802 // don't just call dlmalloc all the time, because the cost of an _attempted_ trim is proportional
3803 // to utilization (which is probably inversely proportional to how much benefit we can expect).
3804 // We could try mincore(2) but that's only a measure of how many pages we haven't given away,
3805 // not how much use we're making of those pages.
3806 HeapTrimTask* added_task = nullptr;
3807 {
3808 MutexLock mu(self, *pending_task_lock_);
3809 if (pending_heap_trim_ != nullptr) {
3810 // Already have a heap trim request in task processor, ignore this request.
3811 return;
3812 }
3813 added_task = new HeapTrimTask(kHeapTrimWait);
3814 pending_heap_trim_ = added_task;
3815 }
3816 task_processor_->AddTask(self, added_task);
3817 }
3818
RevokeThreadLocalBuffers(Thread * thread)3819 void Heap::RevokeThreadLocalBuffers(Thread* thread) {
3820 if (rosalloc_space_ != nullptr) {
3821 size_t freed_bytes_revoke = rosalloc_space_->RevokeThreadLocalBuffers(thread);
3822 if (freed_bytes_revoke > 0U) {
3823 num_bytes_freed_revoke_.FetchAndAddSequentiallyConsistent(freed_bytes_revoke);
3824 CHECK_GE(num_bytes_allocated_.LoadRelaxed(), num_bytes_freed_revoke_.LoadRelaxed());
3825 }
3826 }
3827 if (bump_pointer_space_ != nullptr) {
3828 CHECK_EQ(bump_pointer_space_->RevokeThreadLocalBuffers(thread), 0U);
3829 }
3830 if (region_space_ != nullptr) {
3831 CHECK_EQ(region_space_->RevokeThreadLocalBuffers(thread), 0U);
3832 }
3833 }
3834
RevokeRosAllocThreadLocalBuffers(Thread * thread)3835 void Heap::RevokeRosAllocThreadLocalBuffers(Thread* thread) {
3836 if (rosalloc_space_ != nullptr) {
3837 size_t freed_bytes_revoke = rosalloc_space_->RevokeThreadLocalBuffers(thread);
3838 if (freed_bytes_revoke > 0U) {
3839 num_bytes_freed_revoke_.FetchAndAddSequentiallyConsistent(freed_bytes_revoke);
3840 CHECK_GE(num_bytes_allocated_.LoadRelaxed(), num_bytes_freed_revoke_.LoadRelaxed());
3841 }
3842 }
3843 }
3844
RevokeAllThreadLocalBuffers()3845 void Heap::RevokeAllThreadLocalBuffers() {
3846 if (rosalloc_space_ != nullptr) {
3847 size_t freed_bytes_revoke = rosalloc_space_->RevokeAllThreadLocalBuffers();
3848 if (freed_bytes_revoke > 0U) {
3849 num_bytes_freed_revoke_.FetchAndAddSequentiallyConsistent(freed_bytes_revoke);
3850 CHECK_GE(num_bytes_allocated_.LoadRelaxed(), num_bytes_freed_revoke_.LoadRelaxed());
3851 }
3852 }
3853 if (bump_pointer_space_ != nullptr) {
3854 CHECK_EQ(bump_pointer_space_->RevokeAllThreadLocalBuffers(), 0U);
3855 }
3856 if (region_space_ != nullptr) {
3857 CHECK_EQ(region_space_->RevokeAllThreadLocalBuffers(), 0U);
3858 }
3859 }
3860
IsGCRequestPending() const3861 bool Heap::IsGCRequestPending() const {
3862 return concurrent_gc_pending_.LoadRelaxed();
3863 }
3864
RunFinalization(JNIEnv * env,uint64_t timeout)3865 void Heap::RunFinalization(JNIEnv* env, uint64_t timeout) {
3866 env->CallStaticVoidMethod(WellKnownClasses::dalvik_system_VMRuntime,
3867 WellKnownClasses::dalvik_system_VMRuntime_runFinalization,
3868 static_cast<jlong>(timeout));
3869 }
3870
RegisterNativeAllocation(JNIEnv * env,size_t bytes)3871 void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) {
3872 Thread* self = ThreadForEnv(env);
3873 {
3874 MutexLock mu(self, native_histogram_lock_);
3875 native_allocation_histogram_.AddValue(bytes);
3876 }
3877 if (native_need_to_run_finalization_) {
3878 RunFinalization(env, kNativeAllocationFinalizeTimeout);
3879 UpdateMaxNativeFootprint();
3880 native_need_to_run_finalization_ = false;
3881 }
3882 // Total number of native bytes allocated.
3883 size_t new_native_bytes_allocated = native_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes);
3884 new_native_bytes_allocated += bytes;
3885 if (new_native_bytes_allocated > native_footprint_gc_watermark_) {
3886 collector::GcType gc_type = HasZygoteSpace() ? collector::kGcTypePartial :
3887 collector::kGcTypeFull;
3888
3889 // The second watermark is higher than the gc watermark. If you hit this it means you are
3890 // allocating native objects faster than the GC can keep up with.
3891 if (new_native_bytes_allocated > growth_limit_) {
3892 if (WaitForGcToComplete(kGcCauseForNativeAlloc, self) != collector::kGcTypeNone) {
3893 // Just finished a GC, attempt to run finalizers.
3894 RunFinalization(env, kNativeAllocationFinalizeTimeout);
3895 CHECK(!env->ExceptionCheck());
3896 // Native bytes allocated may be updated by finalization, refresh it.
3897 new_native_bytes_allocated = native_bytes_allocated_.LoadRelaxed();
3898 }
3899 // If we still are over the watermark, attempt a GC for alloc and run finalizers.
3900 if (new_native_bytes_allocated > growth_limit_) {
3901 CollectGarbageInternal(gc_type, kGcCauseForNativeAlloc, false);
3902 RunFinalization(env, kNativeAllocationFinalizeTimeout);
3903 native_need_to_run_finalization_ = false;
3904 CHECK(!env->ExceptionCheck());
3905 }
3906 // We have just run finalizers, update the native watermark since it is very likely that
3907 // finalizers released native managed allocations.
3908 UpdateMaxNativeFootprint();
3909 } else if (!IsGCRequestPending()) {
3910 if (IsGcConcurrent()) {
3911 RequestConcurrentGC(self, true); // Request non-sticky type.
3912 } else {
3913 CollectGarbageInternal(gc_type, kGcCauseForNativeAlloc, false);
3914 }
3915 }
3916 }
3917 }
3918
RegisterNativeFree(JNIEnv * env,size_t bytes)3919 void Heap::RegisterNativeFree(JNIEnv* env, size_t bytes) {
3920 size_t expected_size;
3921 {
3922 MutexLock mu(Thread::Current(), native_histogram_lock_);
3923 native_free_histogram_.AddValue(bytes);
3924 }
3925 do {
3926 expected_size = native_bytes_allocated_.LoadRelaxed();
3927 if (UNLIKELY(bytes > expected_size)) {
3928 ScopedObjectAccess soa(env);
3929 env->ThrowNew(WellKnownClasses::java_lang_RuntimeException,
3930 StringPrintf("Attempted to free %zd native bytes with only %zd native bytes "
3931 "registered as allocated", bytes, expected_size).c_str());
3932 break;
3933 }
3934 } while (!native_bytes_allocated_.CompareExchangeWeakRelaxed(expected_size,
3935 expected_size - bytes));
3936 }
3937
GetTotalMemory() const3938 size_t Heap::GetTotalMemory() const {
3939 return std::max(max_allowed_footprint_, GetBytesAllocated());
3940 }
3941
AddModUnionTable(accounting::ModUnionTable * mod_union_table)3942 void Heap::AddModUnionTable(accounting::ModUnionTable* mod_union_table) {
3943 DCHECK(mod_union_table != nullptr);
3944 mod_union_tables_.Put(mod_union_table->GetSpace(), mod_union_table);
3945 }
3946
CheckPreconditionsForAllocObject(mirror::Class * c,size_t byte_count)3947 void Heap::CheckPreconditionsForAllocObject(mirror::Class* c, size_t byte_count) {
3948 CHECK(c == nullptr || (c->IsClassClass() && byte_count >= sizeof(mirror::Class)) ||
3949 (c->IsVariableSize() || c->GetObjectSize() == byte_count)) << c->GetClassFlags();
3950 CHECK_GE(byte_count, sizeof(mirror::Object));
3951 }
3952
AddRememberedSet(accounting::RememberedSet * remembered_set)3953 void Heap::AddRememberedSet(accounting::RememberedSet* remembered_set) {
3954 CHECK(remembered_set != nullptr);
3955 space::Space* space = remembered_set->GetSpace();
3956 CHECK(space != nullptr);
3957 CHECK(remembered_sets_.find(space) == remembered_sets_.end()) << space;
3958 remembered_sets_.Put(space, remembered_set);
3959 CHECK(remembered_sets_.find(space) != remembered_sets_.end()) << space;
3960 }
3961
RemoveRememberedSet(space::Space * space)3962 void Heap::RemoveRememberedSet(space::Space* space) {
3963 CHECK(space != nullptr);
3964 auto it = remembered_sets_.find(space);
3965 CHECK(it != remembered_sets_.end());
3966 delete it->second;
3967 remembered_sets_.erase(it);
3968 CHECK(remembered_sets_.find(space) == remembered_sets_.end());
3969 }
3970
ClearMarkedObjects()3971 void Heap::ClearMarkedObjects() {
3972 // Clear all of the spaces' mark bitmaps.
3973 for (const auto& space : GetContinuousSpaces()) {
3974 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
3975 if (space->GetLiveBitmap() != mark_bitmap) {
3976 mark_bitmap->Clear();
3977 }
3978 }
3979 // Clear the marked objects in the discontinous space object sets.
3980 for (const auto& space : GetDiscontinuousSpaces()) {
3981 space->GetMarkBitmap()->Clear();
3982 }
3983 }
3984
SetAllocationRecords(AllocRecordObjectMap * records)3985 void Heap::SetAllocationRecords(AllocRecordObjectMap* records) {
3986 allocation_records_.reset(records);
3987 }
3988
VisitAllocationRecords(RootVisitor * visitor) const3989 void Heap::VisitAllocationRecords(RootVisitor* visitor) const {
3990 if (IsAllocTrackingEnabled()) {
3991 MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
3992 if (IsAllocTrackingEnabled()) {
3993 GetAllocationRecords()->VisitRoots(visitor);
3994 }
3995 }
3996 }
3997
SweepAllocationRecords(IsMarkedVisitor * visitor) const3998 void Heap::SweepAllocationRecords(IsMarkedVisitor* visitor) const {
3999 if (IsAllocTrackingEnabled()) {
4000 MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
4001 if (IsAllocTrackingEnabled()) {
4002 GetAllocationRecords()->SweepAllocationRecords(visitor);
4003 }
4004 }
4005 }
4006
AllowNewAllocationRecords() const4007 void Heap::AllowNewAllocationRecords() const {
4008 CHECK(!kUseReadBarrier);
4009 MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
4010 AllocRecordObjectMap* allocation_records = GetAllocationRecords();
4011 if (allocation_records != nullptr) {
4012 allocation_records->AllowNewAllocationRecords();
4013 }
4014 }
4015
DisallowNewAllocationRecords() const4016 void Heap::DisallowNewAllocationRecords() const {
4017 CHECK(!kUseReadBarrier);
4018 MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
4019 AllocRecordObjectMap* allocation_records = GetAllocationRecords();
4020 if (allocation_records != nullptr) {
4021 allocation_records->DisallowNewAllocationRecords();
4022 }
4023 }
4024
BroadcastForNewAllocationRecords() const4025 void Heap::BroadcastForNewAllocationRecords() const {
4026 CHECK(kUseReadBarrier);
4027 // Always broadcast without checking IsAllocTrackingEnabled() because IsAllocTrackingEnabled() may
4028 // be set to false while some threads are waiting for system weak access in
4029 // AllocRecordObjectMap::RecordAllocation() and we may fail to wake them up. b/27467554.
4030 MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
4031 AllocRecordObjectMap* allocation_records = GetAllocationRecords();
4032 if (allocation_records != nullptr) {
4033 allocation_records->BroadcastForNewAllocationRecords();
4034 }
4035 }
4036
4037 // Based on debug malloc logic from libc/bionic/debug_stacktrace.cpp.
4038 class StackCrawlState {
4039 public:
StackCrawlState(uintptr_t * frames,size_t max_depth,size_t skip_count)4040 StackCrawlState(uintptr_t* frames, size_t max_depth, size_t skip_count)
4041 : frames_(frames), frame_count_(0), max_depth_(max_depth), skip_count_(skip_count) {
4042 }
GetFrameCount() const4043 size_t GetFrameCount() const {
4044 return frame_count_;
4045 }
Callback(_Unwind_Context * context,void * arg)4046 static _Unwind_Reason_Code Callback(_Unwind_Context* context, void* arg) {
4047 auto* const state = reinterpret_cast<StackCrawlState*>(arg);
4048 const uintptr_t ip = _Unwind_GetIP(context);
4049 // The first stack frame is get_backtrace itself. Skip it.
4050 if (ip != 0 && state->skip_count_ > 0) {
4051 --state->skip_count_;
4052 return _URC_NO_REASON;
4053 }
4054 // ip may be off for ARM but it shouldn't matter since we only use it for hashing.
4055 state->frames_[state->frame_count_] = ip;
4056 state->frame_count_++;
4057 return state->frame_count_ >= state->max_depth_ ? _URC_END_OF_STACK : _URC_NO_REASON;
4058 }
4059
4060 private:
4061 uintptr_t* const frames_;
4062 size_t frame_count_;
4063 const size_t max_depth_;
4064 size_t skip_count_;
4065 };
4066
get_backtrace(uintptr_t * frames,size_t max_depth)4067 static size_t get_backtrace(uintptr_t* frames, size_t max_depth) {
4068 StackCrawlState state(frames, max_depth, 0u);
4069 _Unwind_Backtrace(&StackCrawlState::Callback, &state);
4070 return state.GetFrameCount();
4071 }
4072
CheckGcStressMode(Thread * self,mirror::Object ** obj)4073 void Heap::CheckGcStressMode(Thread* self, mirror::Object** obj) {
4074 auto* const runtime = Runtime::Current();
4075 if (gc_stress_mode_ && runtime->GetClassLinker()->IsInitialized() &&
4076 !runtime->IsActiveTransaction() && mirror::Class::HasJavaLangClass()) {
4077 // Check if we should GC.
4078 bool new_backtrace = false;
4079 {
4080 static constexpr size_t kMaxFrames = 16u;
4081 uintptr_t backtrace[kMaxFrames];
4082 const size_t frames = get_backtrace(backtrace, kMaxFrames);
4083 uint64_t hash = 0;
4084 for (size_t i = 0; i < frames; ++i) {
4085 hash = hash * 2654435761 + backtrace[i];
4086 hash += (hash >> 13) ^ (hash << 6);
4087 }
4088 MutexLock mu(self, *backtrace_lock_);
4089 new_backtrace = seen_backtraces_.find(hash) == seen_backtraces_.end();
4090 if (new_backtrace) {
4091 seen_backtraces_.insert(hash);
4092 }
4093 }
4094 if (new_backtrace) {
4095 StackHandleScope<1> hs(self);
4096 auto h = hs.NewHandleWrapper(obj);
4097 CollectGarbage(false);
4098 unique_backtrace_count_.FetchAndAddSequentiallyConsistent(1);
4099 } else {
4100 seen_backtrace_count_.FetchAndAddSequentiallyConsistent(1);
4101 }
4102 }
4103 }
4104
DisableGCForShutdown()4105 void Heap::DisableGCForShutdown() {
4106 Thread* const self = Thread::Current();
4107 CHECK(Runtime::Current()->IsShuttingDown(self));
4108 MutexLock mu(self, *gc_complete_lock_);
4109 gc_disabled_for_shutdown_ = true;
4110 }
4111
ObjectIsInBootImageSpace(mirror::Object * obj) const4112 bool Heap::ObjectIsInBootImageSpace(mirror::Object* obj) const {
4113 for (gc::space::ImageSpace* space : boot_image_spaces_) {
4114 if (space->HasAddress(obj)) {
4115 return true;
4116 }
4117 }
4118 return false;
4119 }
4120
IsInBootImageOatFile(const void * p) const4121 bool Heap::IsInBootImageOatFile(const void* p) const {
4122 for (gc::space::ImageSpace* space : boot_image_spaces_) {
4123 if (space->GetOatFile()->Contains(p)) {
4124 return true;
4125 }
4126 }
4127 return false;
4128 }
4129
GetBootImagesSize(uint32_t * boot_image_begin,uint32_t * boot_image_end,uint32_t * boot_oat_begin,uint32_t * boot_oat_end)4130 void Heap::GetBootImagesSize(uint32_t* boot_image_begin,
4131 uint32_t* boot_image_end,
4132 uint32_t* boot_oat_begin,
4133 uint32_t* boot_oat_end) {
4134 DCHECK(boot_image_begin != nullptr);
4135 DCHECK(boot_image_end != nullptr);
4136 DCHECK(boot_oat_begin != nullptr);
4137 DCHECK(boot_oat_end != nullptr);
4138 *boot_image_begin = 0u;
4139 *boot_image_end = 0u;
4140 *boot_oat_begin = 0u;
4141 *boot_oat_end = 0u;
4142 for (gc::space::ImageSpace* space_ : GetBootImageSpaces()) {
4143 const uint32_t image_begin = PointerToLowMemUInt32(space_->Begin());
4144 const uint32_t image_size = space_->GetImageHeader().GetImageSize();
4145 if (*boot_image_begin == 0 || image_begin < *boot_image_begin) {
4146 *boot_image_begin = image_begin;
4147 }
4148 *boot_image_end = std::max(*boot_image_end, image_begin + image_size);
4149 const OatFile* boot_oat_file = space_->GetOatFile();
4150 const uint32_t oat_begin = PointerToLowMemUInt32(boot_oat_file->Begin());
4151 const uint32_t oat_size = boot_oat_file->Size();
4152 if (*boot_oat_begin == 0 || oat_begin < *boot_oat_begin) {
4153 *boot_oat_begin = oat_begin;
4154 }
4155 *boot_oat_end = std::max(*boot_oat_end, oat_begin + oat_size);
4156 }
4157 }
4158
4159 } // namespace gc
4160 } // namespace art
4161