1 /*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "heap.h"
18
19 #include <limits>
20 #include <memory>
21 #include <vector>
22
23 #include "android-base/stringprintf.h"
24
25 #include "allocation_listener.h"
26 #include "art_field-inl.h"
27 #include "backtrace_helper.h"
28 #include "base/allocator.h"
29 #include "base/arena_allocator.h"
30 #include "base/dumpable.h"
31 #include "base/file_utils.h"
32 #include "base/histogram-inl.h"
33 #include "base/logging.h" // For VLOG.
34 #include "base/memory_tool.h"
35 #include "base/os.h"
36 #include "base/stl_util.h"
37 #include "base/systrace.h"
38 #include "base/time_utils.h"
39 #include "common_throws.h"
40 #include "cutils/sched_policy.h"
41 #include "debugger.h"
42 #include "dex/dex_file-inl.h"
43 #include "entrypoints/quick/quick_alloc_entrypoints.h"
44 #include "gc/accounting/card_table-inl.h"
45 #include "gc/accounting/heap_bitmap-inl.h"
46 #include "gc/accounting/mod_union_table-inl.h"
47 #include "gc/accounting/read_barrier_table.h"
48 #include "gc/accounting/remembered_set.h"
49 #include "gc/accounting/space_bitmap-inl.h"
50 #include "gc/collector/concurrent_copying.h"
51 #include "gc/collector/mark_compact.h"
52 #include "gc/collector/mark_sweep.h"
53 #include "gc/collector/partial_mark_sweep.h"
54 #include "gc/collector/semi_space.h"
55 #include "gc/collector/sticky_mark_sweep.h"
56 #include "gc/reference_processor.h"
57 #include "gc/scoped_gc_critical_section.h"
58 #include "gc/space/bump_pointer_space.h"
59 #include "gc/space/dlmalloc_space-inl.h"
60 #include "gc/space/image_space.h"
61 #include "gc/space/large_object_space.h"
62 #include "gc/space/region_space.h"
63 #include "gc/space/rosalloc_space-inl.h"
64 #include "gc/space/space-inl.h"
65 #include "gc/space/zygote_space.h"
66 #include "gc/task_processor.h"
67 #include "gc/verification.h"
68 #include "gc_pause_listener.h"
69 #include "gc_root.h"
70 #include "handle_scope-inl.h"
71 #include "heap-inl.h"
72 #include "heap-visit-objects-inl.h"
73 #include "image.h"
74 #include "intern_table.h"
75 #include "java_vm_ext.h"
76 #include "jit/jit.h"
77 #include "jit/jit_code_cache.h"
78 #include "mirror/class-inl.h"
79 #include "mirror/object-inl.h"
80 #include "mirror/object-refvisitor-inl.h"
81 #include "mirror/object_array-inl.h"
82 #include "mirror/reference-inl.h"
83 #include "nativehelper/scoped_local_ref.h"
84 #include "obj_ptr-inl.h"
85 #include "reflection.h"
86 #include "runtime.h"
87 #include "scoped_thread_state_change-inl.h"
88 #include "thread_list.h"
89 #include "verify_object-inl.h"
90 #include "well_known_classes.h"
91
92 namespace art {
93
94 namespace gc {
95
96 static constexpr size_t kCollectorTransitionStressIterations = 0;
97 static constexpr size_t kCollectorTransitionStressWait = 10 * 1000; // Microseconds
98
99 DEFINE_RUNTIME_DEBUG_FLAG(Heap, kStressCollectorTransition);
100
101 // Minimum amount of remaining bytes before a concurrent GC is triggered.
102 static constexpr size_t kMinConcurrentRemainingBytes = 128 * KB;
103 static constexpr size_t kMaxConcurrentRemainingBytes = 512 * KB;
104 // Sticky GC throughput adjustment, divided by 4. Increasing this causes sticky GC to occur more
105 // relative to partial/full GC. This may be desirable since sticky GCs interfere less with mutator
106 // threads (lower pauses, use less memory bandwidth).
107 static constexpr double kStickyGcThroughputAdjustment = 1.0;
108 // Whether or not we compact the zygote in PreZygoteFork.
109 static constexpr bool kCompactZygote = kMovingCollector;
110 // How many reserve entries are at the end of the allocation stack, these are only needed if the
111 // allocation stack overflows.
112 static constexpr size_t kAllocationStackReserveSize = 1024;
113 // Default mark stack size in bytes.
114 static const size_t kDefaultMarkStackSize = 64 * KB;
115 // Define space name.
116 static const char* kDlMallocSpaceName[2] = {"main dlmalloc space", "main dlmalloc space 1"};
117 static const char* kRosAllocSpaceName[2] = {"main rosalloc space", "main rosalloc space 1"};
118 static const char* kMemMapSpaceName[2] = {"main space", "main space 1"};
119 static const char* kNonMovingSpaceName = "non moving space";
120 static const char* kZygoteSpaceName = "zygote space";
121 static constexpr size_t kGSSBumpPointerSpaceCapacity = 32 * MB;
122 static constexpr bool kGCALotMode = false;
123 // GC alot mode uses a small allocation stack to stress test a lot of GC.
124 static constexpr size_t kGcAlotAllocationStackSize = 4 * KB /
125 sizeof(mirror::HeapReference<mirror::Object>);
126 // Verify objet has a small allocation stack size since searching the allocation stack is slow.
127 static constexpr size_t kVerifyObjectAllocationStackSize = 16 * KB /
128 sizeof(mirror::HeapReference<mirror::Object>);
129 static constexpr size_t kDefaultAllocationStackSize = 8 * MB /
130 sizeof(mirror::HeapReference<mirror::Object>);
131
132 // For deterministic compilation, we need the heap to be at a well-known address.
133 static constexpr uint32_t kAllocSpaceBeginForDeterministicAoT = 0x40000000;
134 // Dump the rosalloc stats on SIGQUIT.
135 static constexpr bool kDumpRosAllocStatsOnSigQuit = false;
136
137 static const char* kRegionSpaceName = "main space (region space)";
138
139 // If true, we log all GCs in the both the foreground and background. Used for debugging.
140 static constexpr bool kLogAllGCs = false;
141
142 // How much we grow the TLAB if we can do it.
143 static constexpr size_t kPartialTlabSize = 16 * KB;
144 static constexpr bool kUsePartialTlabs = true;
145
146 #if defined(__LP64__) || !defined(ADDRESS_SANITIZER)
147 // 300 MB (0x12c00000) - (default non-moving space capacity).
148 uint8_t* const Heap::kPreferredAllocSpaceBegin =
149 reinterpret_cast<uint8_t*>(300 * MB - kDefaultNonMovingSpaceCapacity);
150 #else
151 #ifdef __ANDROID__
152 // For 32-bit Android, use 0x20000000 because asan reserves 0x04000000 - 0x20000000.
153 uint8_t* const Heap::kPreferredAllocSpaceBegin = reinterpret_cast<uint8_t*>(0x20000000);
154 #else
155 // For 32-bit host, use 0x40000000 because asan uses most of the space below this.
156 uint8_t* const Heap::kPreferredAllocSpaceBegin = reinterpret_cast<uint8_t*>(0x40000000);
157 #endif
158 #endif
159
CareAboutPauseTimes()160 static inline bool CareAboutPauseTimes() {
161 return Runtime::Current()->InJankPerceptibleProcessState();
162 }
163
Heap(size_t initial_size,size_t growth_limit,size_t min_free,size_t max_free,double target_utilization,double foreground_heap_growth_multiplier,size_t capacity,size_t non_moving_space_capacity,const std::string & image_file_name,const InstructionSet image_instruction_set,CollectorType foreground_collector_type,CollectorType background_collector_type,space::LargeObjectSpaceType large_object_space_type,size_t large_object_threshold,size_t parallel_gc_threads,size_t conc_gc_threads,bool low_memory_mode,size_t long_pause_log_threshold,size_t long_gc_log_threshold,bool ignore_max_footprint,bool use_tlab,bool verify_pre_gc_heap,bool verify_pre_sweeping_heap,bool verify_post_gc_heap,bool verify_pre_gc_rosalloc,bool verify_pre_sweeping_rosalloc,bool verify_post_gc_rosalloc,bool gc_stress_mode,bool measure_gc_performance,bool use_homogeneous_space_compaction_for_oom,uint64_t min_interval_homogeneous_space_compaction_by_oom)164 Heap::Heap(size_t initial_size,
165 size_t growth_limit,
166 size_t min_free,
167 size_t max_free,
168 double target_utilization,
169 double foreground_heap_growth_multiplier,
170 size_t capacity,
171 size_t non_moving_space_capacity,
172 const std::string& image_file_name,
173 const InstructionSet image_instruction_set,
174 CollectorType foreground_collector_type,
175 CollectorType background_collector_type,
176 space::LargeObjectSpaceType large_object_space_type,
177 size_t large_object_threshold,
178 size_t parallel_gc_threads,
179 size_t conc_gc_threads,
180 bool low_memory_mode,
181 size_t long_pause_log_threshold,
182 size_t long_gc_log_threshold,
183 bool ignore_max_footprint,
184 bool use_tlab,
185 bool verify_pre_gc_heap,
186 bool verify_pre_sweeping_heap,
187 bool verify_post_gc_heap,
188 bool verify_pre_gc_rosalloc,
189 bool verify_pre_sweeping_rosalloc,
190 bool verify_post_gc_rosalloc,
191 bool gc_stress_mode,
192 bool measure_gc_performance,
193 bool use_homogeneous_space_compaction_for_oom,
194 uint64_t min_interval_homogeneous_space_compaction_by_oom)
195 : non_moving_space_(nullptr),
196 rosalloc_space_(nullptr),
197 dlmalloc_space_(nullptr),
198 main_space_(nullptr),
199 collector_type_(kCollectorTypeNone),
200 foreground_collector_type_(foreground_collector_type),
201 background_collector_type_(background_collector_type),
202 desired_collector_type_(foreground_collector_type_),
203 pending_task_lock_(nullptr),
204 parallel_gc_threads_(parallel_gc_threads),
205 conc_gc_threads_(conc_gc_threads),
206 low_memory_mode_(low_memory_mode),
207 long_pause_log_threshold_(long_pause_log_threshold),
208 long_gc_log_threshold_(long_gc_log_threshold),
209 ignore_max_footprint_(ignore_max_footprint),
210 zygote_creation_lock_("zygote creation lock", kZygoteCreationLock),
211 zygote_space_(nullptr),
212 large_object_threshold_(large_object_threshold),
213 disable_thread_flip_count_(0),
214 thread_flip_running_(false),
215 collector_type_running_(kCollectorTypeNone),
216 last_gc_cause_(kGcCauseNone),
217 thread_running_gc_(nullptr),
218 last_gc_type_(collector::kGcTypeNone),
219 next_gc_type_(collector::kGcTypePartial),
220 capacity_(capacity),
221 growth_limit_(growth_limit),
222 max_allowed_footprint_(initial_size),
223 concurrent_start_bytes_(std::numeric_limits<size_t>::max()),
224 total_bytes_freed_ever_(0),
225 total_objects_freed_ever_(0),
226 num_bytes_allocated_(0),
227 new_native_bytes_allocated_(0),
228 old_native_bytes_allocated_(0),
229 num_bytes_freed_revoke_(0),
230 verify_missing_card_marks_(false),
231 verify_system_weaks_(false),
232 verify_pre_gc_heap_(verify_pre_gc_heap),
233 verify_pre_sweeping_heap_(verify_pre_sweeping_heap),
234 verify_post_gc_heap_(verify_post_gc_heap),
235 verify_mod_union_table_(false),
236 verify_pre_gc_rosalloc_(verify_pre_gc_rosalloc),
237 verify_pre_sweeping_rosalloc_(verify_pre_sweeping_rosalloc),
238 verify_post_gc_rosalloc_(verify_post_gc_rosalloc),
239 gc_stress_mode_(gc_stress_mode),
240 /* For GC a lot mode, we limit the allocation stacks to be kGcAlotInterval allocations. This
241 * causes a lot of GC since we do a GC for alloc whenever the stack is full. When heap
242 * verification is enabled, we limit the size of allocation stacks to speed up their
243 * searching.
244 */
245 max_allocation_stack_size_(kGCALotMode ? kGcAlotAllocationStackSize
246 : (kVerifyObjectSupport > kVerifyObjectModeFast) ? kVerifyObjectAllocationStackSize :
247 kDefaultAllocationStackSize),
248 current_allocator_(kAllocatorTypeDlMalloc),
249 current_non_moving_allocator_(kAllocatorTypeNonMoving),
250 bump_pointer_space_(nullptr),
251 temp_space_(nullptr),
252 region_space_(nullptr),
253 min_free_(min_free),
254 max_free_(max_free),
255 target_utilization_(target_utilization),
256 foreground_heap_growth_multiplier_(foreground_heap_growth_multiplier),
257 total_wait_time_(0),
258 verify_object_mode_(kVerifyObjectModeDisabled),
259 disable_moving_gc_count_(0),
260 semi_space_collector_(nullptr),
261 mark_compact_collector_(nullptr),
262 concurrent_copying_collector_(nullptr),
263 is_running_on_memory_tool_(Runtime::Current()->IsRunningOnMemoryTool()),
264 use_tlab_(use_tlab),
265 main_space_backup_(nullptr),
266 min_interval_homogeneous_space_compaction_by_oom_(
267 min_interval_homogeneous_space_compaction_by_oom),
268 last_time_homogeneous_space_compaction_by_oom_(NanoTime()),
269 pending_collector_transition_(nullptr),
270 pending_heap_trim_(nullptr),
271 use_homogeneous_space_compaction_for_oom_(use_homogeneous_space_compaction_for_oom),
272 running_collection_is_blocking_(false),
273 blocking_gc_count_(0U),
274 blocking_gc_time_(0U),
275 last_update_time_gc_count_rate_histograms_( // Round down by the window duration.
276 (NanoTime() / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration),
277 gc_count_last_window_(0U),
278 blocking_gc_count_last_window_(0U),
279 gc_count_rate_histogram_("gc count rate histogram", 1U, kGcCountRateMaxBucketCount),
280 blocking_gc_count_rate_histogram_("blocking gc count rate histogram", 1U,
281 kGcCountRateMaxBucketCount),
282 alloc_tracking_enabled_(false),
283 backtrace_lock_(nullptr),
284 seen_backtrace_count_(0u),
285 unique_backtrace_count_(0u),
286 gc_disabled_for_shutdown_(false) {
287 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
288 LOG(INFO) << "Heap() entering";
289 }
290 if (kUseReadBarrier) {
291 CHECK_EQ(foreground_collector_type_, kCollectorTypeCC);
292 CHECK_EQ(background_collector_type_, kCollectorTypeCCBackground);
293 }
294 verification_.reset(new Verification(this));
295 CHECK_GE(large_object_threshold, kMinLargeObjectThreshold);
296 ScopedTrace trace(__FUNCTION__);
297 Runtime* const runtime = Runtime::Current();
298 // If we aren't the zygote, switch to the default non zygote allocator. This may update the
299 // entrypoints.
300 const bool is_zygote = runtime->IsZygote();
301 if (!is_zygote) {
302 // Background compaction is currently not supported for command line runs.
303 if (background_collector_type_ != foreground_collector_type_) {
304 VLOG(heap) << "Disabling background compaction for non zygote";
305 background_collector_type_ = foreground_collector_type_;
306 }
307 }
308 ChangeCollector(desired_collector_type_);
309 live_bitmap_.reset(new accounting::HeapBitmap(this));
310 mark_bitmap_.reset(new accounting::HeapBitmap(this));
311 // Requested begin for the alloc space, to follow the mapped image and oat files
312 uint8_t* requested_alloc_space_begin = nullptr;
313 if (foreground_collector_type_ == kCollectorTypeCC) {
314 // Need to use a low address so that we can allocate a contiguous 2 * Xmx space when there's no
315 // image (dex2oat for target).
316 requested_alloc_space_begin = kPreferredAllocSpaceBegin;
317 }
318
319 // Load image space(s).
320 if (space::ImageSpace::LoadBootImage(image_file_name,
321 image_instruction_set,
322 &boot_image_spaces_,
323 &requested_alloc_space_begin)) {
324 for (auto space : boot_image_spaces_) {
325 AddSpace(space);
326 }
327 }
328
329 /*
330 requested_alloc_space_begin -> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
331 +- nonmoving space (non_moving_space_capacity)+-
332 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
333 +-????????????????????????????????????????????+-
334 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
335 +-main alloc space / bump space 1 (capacity_) +-
336 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
337 +-????????????????????????????????????????????+-
338 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
339 +-main alloc space2 / bump space 2 (capacity_)+-
340 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
341 */
342 // We don't have hspace compaction enabled with GSS or CC.
343 if (foreground_collector_type_ == kCollectorTypeGSS ||
344 foreground_collector_type_ == kCollectorTypeCC) {
345 use_homogeneous_space_compaction_for_oom_ = false;
346 }
347 bool support_homogeneous_space_compaction =
348 background_collector_type_ == gc::kCollectorTypeHomogeneousSpaceCompact ||
349 use_homogeneous_space_compaction_for_oom_;
350 // We may use the same space the main space for the non moving space if we don't need to compact
351 // from the main space.
352 // This is not the case if we support homogeneous compaction or have a moving background
353 // collector type.
354 bool separate_non_moving_space = is_zygote ||
355 support_homogeneous_space_compaction || IsMovingGc(foreground_collector_type_) ||
356 IsMovingGc(background_collector_type_);
357 if (foreground_collector_type_ == kCollectorTypeGSS) {
358 separate_non_moving_space = false;
359 }
360 std::unique_ptr<MemMap> main_mem_map_1;
361 std::unique_ptr<MemMap> main_mem_map_2;
362
363 // Gross hack to make dex2oat deterministic.
364 if (foreground_collector_type_ == kCollectorTypeMS &&
365 requested_alloc_space_begin == nullptr &&
366 Runtime::Current()->IsAotCompiler()) {
367 // Currently only enabled for MS collector since that is what the deterministic dex2oat uses.
368 // b/26849108
369 requested_alloc_space_begin = reinterpret_cast<uint8_t*>(kAllocSpaceBeginForDeterministicAoT);
370 }
371 uint8_t* request_begin = requested_alloc_space_begin;
372 if (request_begin != nullptr && separate_non_moving_space) {
373 request_begin += non_moving_space_capacity;
374 }
375 std::string error_str;
376 std::unique_ptr<MemMap> non_moving_space_mem_map;
377 if (separate_non_moving_space) {
378 ScopedTrace trace2("Create separate non moving space");
379 // If we are the zygote, the non moving space becomes the zygote space when we run
380 // PreZygoteFork the first time. In this case, call the map "zygote space" since we can't
381 // rename the mem map later.
382 const char* space_name = is_zygote ? kZygoteSpaceName : kNonMovingSpaceName;
383 // Reserve the non moving mem map before the other two since it needs to be at a specific
384 // address.
385 non_moving_space_mem_map.reset(MapAnonymousPreferredAddress(space_name,
386 requested_alloc_space_begin,
387 non_moving_space_capacity,
388 &error_str));
389 CHECK(non_moving_space_mem_map != nullptr) << error_str;
390 // Try to reserve virtual memory at a lower address if we have a separate non moving space.
391 request_begin = kPreferredAllocSpaceBegin + non_moving_space_capacity;
392 }
393 // Attempt to create 2 mem maps at or after the requested begin.
394 if (foreground_collector_type_ != kCollectorTypeCC) {
395 ScopedTrace trace2("Create main mem map");
396 if (separate_non_moving_space || !is_zygote) {
397 main_mem_map_1.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[0],
398 request_begin,
399 capacity_,
400 &error_str));
401 } else {
402 // If no separate non-moving space and we are the zygote, the main space must come right
403 // after the image space to avoid a gap. This is required since we want the zygote space to
404 // be adjacent to the image space.
405 main_mem_map_1.reset(MemMap::MapAnonymous(kMemMapSpaceName[0], request_begin, capacity_,
406 PROT_READ | PROT_WRITE, true, false,
407 &error_str));
408 }
409 CHECK(main_mem_map_1.get() != nullptr) << error_str;
410 }
411 if (support_homogeneous_space_compaction ||
412 background_collector_type_ == kCollectorTypeSS ||
413 foreground_collector_type_ == kCollectorTypeSS) {
414 ScopedTrace trace2("Create main mem map 2");
415 main_mem_map_2.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[1], main_mem_map_1->End(),
416 capacity_, &error_str));
417 CHECK(main_mem_map_2.get() != nullptr) << error_str;
418 }
419
420 // Create the non moving space first so that bitmaps don't take up the address range.
421 if (separate_non_moving_space) {
422 ScopedTrace trace2("Add non moving space");
423 // Non moving space is always dlmalloc since we currently don't have support for multiple
424 // active rosalloc spaces.
425 const size_t size = non_moving_space_mem_map->Size();
426 non_moving_space_ = space::DlMallocSpace::CreateFromMemMap(
427 non_moving_space_mem_map.release(), "zygote / non moving space", kDefaultStartingSize,
428 initial_size, size, size, false);
429 non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
430 CHECK(non_moving_space_ != nullptr) << "Failed creating non moving space "
431 << requested_alloc_space_begin;
432 AddSpace(non_moving_space_);
433 }
434 // Create other spaces based on whether or not we have a moving GC.
435 if (foreground_collector_type_ == kCollectorTypeCC) {
436 CHECK(separate_non_moving_space);
437 // Reserve twice the capacity, to allow evacuating every region for explicit GCs.
438 MemMap* region_space_mem_map = space::RegionSpace::CreateMemMap(kRegionSpaceName,
439 capacity_ * 2,
440 request_begin);
441 CHECK(region_space_mem_map != nullptr) << "No region space mem map";
442 region_space_ = space::RegionSpace::Create(kRegionSpaceName, region_space_mem_map);
443 AddSpace(region_space_);
444 } else if (IsMovingGc(foreground_collector_type_) &&
445 foreground_collector_type_ != kCollectorTypeGSS) {
446 // Create bump pointer spaces.
447 // We only to create the bump pointer if the foreground collector is a compacting GC.
448 // TODO: Place bump-pointer spaces somewhere to minimize size of card table.
449 bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 1",
450 main_mem_map_1.release());
451 CHECK(bump_pointer_space_ != nullptr) << "Failed to create bump pointer space";
452 AddSpace(bump_pointer_space_);
453 temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
454 main_mem_map_2.release());
455 CHECK(temp_space_ != nullptr) << "Failed to create bump pointer space";
456 AddSpace(temp_space_);
457 CHECK(separate_non_moving_space);
458 } else {
459 CreateMainMallocSpace(main_mem_map_1.release(), initial_size, growth_limit_, capacity_);
460 CHECK(main_space_ != nullptr);
461 AddSpace(main_space_);
462 if (!separate_non_moving_space) {
463 non_moving_space_ = main_space_;
464 CHECK(!non_moving_space_->CanMoveObjects());
465 }
466 if (foreground_collector_type_ == kCollectorTypeGSS) {
467 CHECK_EQ(foreground_collector_type_, background_collector_type_);
468 // Create bump pointer spaces instead of a backup space.
469 main_mem_map_2.release();
470 bump_pointer_space_ = space::BumpPointerSpace::Create("Bump pointer space 1",
471 kGSSBumpPointerSpaceCapacity, nullptr);
472 CHECK(bump_pointer_space_ != nullptr);
473 AddSpace(bump_pointer_space_);
474 temp_space_ = space::BumpPointerSpace::Create("Bump pointer space 2",
475 kGSSBumpPointerSpaceCapacity, nullptr);
476 CHECK(temp_space_ != nullptr);
477 AddSpace(temp_space_);
478 } else if (main_mem_map_2.get() != nullptr) {
479 const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
480 main_space_backup_.reset(CreateMallocSpaceFromMemMap(main_mem_map_2.release(), initial_size,
481 growth_limit_, capacity_, name, true));
482 CHECK(main_space_backup_.get() != nullptr);
483 // Add the space so its accounted for in the heap_begin and heap_end.
484 AddSpace(main_space_backup_.get());
485 }
486 }
487 CHECK(non_moving_space_ != nullptr);
488 CHECK(!non_moving_space_->CanMoveObjects());
489 // Allocate the large object space.
490 if (large_object_space_type == space::LargeObjectSpaceType::kFreeList) {
491 large_object_space_ = space::FreeListSpace::Create("free list large object space", nullptr,
492 capacity_);
493 CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
494 } else if (large_object_space_type == space::LargeObjectSpaceType::kMap) {
495 large_object_space_ = space::LargeObjectMapSpace::Create("mem map large object space");
496 CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
497 } else {
498 // Disable the large object space by making the cutoff excessively large.
499 large_object_threshold_ = std::numeric_limits<size_t>::max();
500 large_object_space_ = nullptr;
501 }
502 if (large_object_space_ != nullptr) {
503 AddSpace(large_object_space_);
504 }
505 // Compute heap capacity. Continuous spaces are sorted in order of Begin().
506 CHECK(!continuous_spaces_.empty());
507 // Relies on the spaces being sorted.
508 uint8_t* heap_begin = continuous_spaces_.front()->Begin();
509 uint8_t* heap_end = continuous_spaces_.back()->Limit();
510 size_t heap_capacity = heap_end - heap_begin;
511 // Remove the main backup space since it slows down the GC to have unused extra spaces.
512 // TODO: Avoid needing to do this.
513 if (main_space_backup_.get() != nullptr) {
514 RemoveSpace(main_space_backup_.get());
515 }
516 // Allocate the card table.
517 // We currently don't support dynamically resizing the card table.
518 // Since we don't know where in the low_4gb the app image will be located, make the card table
519 // cover the whole low_4gb. TODO: Extend the card table in AddSpace.
520 UNUSED(heap_capacity);
521 // Start at 4 KB, we can be sure there are no spaces mapped this low since the address range is
522 // reserved by the kernel.
523 static constexpr size_t kMinHeapAddress = 4 * KB;
524 card_table_.reset(accounting::CardTable::Create(reinterpret_cast<uint8_t*>(kMinHeapAddress),
525 4 * GB - kMinHeapAddress));
526 CHECK(card_table_.get() != nullptr) << "Failed to create card table";
527 if (foreground_collector_type_ == kCollectorTypeCC && kUseTableLookupReadBarrier) {
528 rb_table_.reset(new accounting::ReadBarrierTable());
529 DCHECK(rb_table_->IsAllCleared());
530 }
531 if (HasBootImageSpace()) {
532 // Don't add the image mod union table if we are running without an image, this can crash if
533 // we use the CardCache implementation.
534 for (space::ImageSpace* image_space : GetBootImageSpaces()) {
535 accounting::ModUnionTable* mod_union_table = new accounting::ModUnionTableToZygoteAllocspace(
536 "Image mod-union table", this, image_space);
537 CHECK(mod_union_table != nullptr) << "Failed to create image mod-union table";
538 AddModUnionTable(mod_union_table);
539 }
540 }
541 if (collector::SemiSpace::kUseRememberedSet && non_moving_space_ != main_space_) {
542 accounting::RememberedSet* non_moving_space_rem_set =
543 new accounting::RememberedSet("Non-moving space remembered set", this, non_moving_space_);
544 CHECK(non_moving_space_rem_set != nullptr) << "Failed to create non-moving space remembered set";
545 AddRememberedSet(non_moving_space_rem_set);
546 }
547 // TODO: Count objects in the image space here?
548 num_bytes_allocated_.StoreRelaxed(0);
549 mark_stack_.reset(accounting::ObjectStack::Create("mark stack", kDefaultMarkStackSize,
550 kDefaultMarkStackSize));
551 const size_t alloc_stack_capacity = max_allocation_stack_size_ + kAllocationStackReserveSize;
552 allocation_stack_.reset(accounting::ObjectStack::Create(
553 "allocation stack", max_allocation_stack_size_, alloc_stack_capacity));
554 live_stack_.reset(accounting::ObjectStack::Create(
555 "live stack", max_allocation_stack_size_, alloc_stack_capacity));
556 // It's still too early to take a lock because there are no threads yet, but we can create locks
557 // now. We don't create it earlier to make it clear that you can't use locks during heap
558 // initialization.
559 gc_complete_lock_ = new Mutex("GC complete lock");
560 gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable",
561 *gc_complete_lock_));
562
563 thread_flip_lock_ = new Mutex("GC thread flip lock");
564 thread_flip_cond_.reset(new ConditionVariable("GC thread flip condition variable",
565 *thread_flip_lock_));
566 task_processor_.reset(new TaskProcessor());
567 reference_processor_.reset(new ReferenceProcessor());
568 pending_task_lock_ = new Mutex("Pending task lock");
569 if (ignore_max_footprint_) {
570 SetIdealFootprint(std::numeric_limits<size_t>::max());
571 concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
572 }
573 CHECK_NE(max_allowed_footprint_, 0U);
574 // Create our garbage collectors.
575 for (size_t i = 0; i < 2; ++i) {
576 const bool concurrent = i != 0;
577 if ((MayUseCollector(kCollectorTypeCMS) && concurrent) ||
578 (MayUseCollector(kCollectorTypeMS) && !concurrent)) {
579 garbage_collectors_.push_back(new collector::MarkSweep(this, concurrent));
580 garbage_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent));
581 garbage_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent));
582 }
583 }
584 if (kMovingCollector) {
585 if (MayUseCollector(kCollectorTypeSS) || MayUseCollector(kCollectorTypeGSS) ||
586 MayUseCollector(kCollectorTypeHomogeneousSpaceCompact) ||
587 use_homogeneous_space_compaction_for_oom_) {
588 // TODO: Clean this up.
589 const bool generational = foreground_collector_type_ == kCollectorTypeGSS;
590 semi_space_collector_ = new collector::SemiSpace(this, generational,
591 generational ? "generational" : "");
592 garbage_collectors_.push_back(semi_space_collector_);
593 }
594 if (MayUseCollector(kCollectorTypeCC)) {
595 concurrent_copying_collector_ = new collector::ConcurrentCopying(this,
596 "",
597 measure_gc_performance);
598 DCHECK(region_space_ != nullptr);
599 concurrent_copying_collector_->SetRegionSpace(region_space_);
600 garbage_collectors_.push_back(concurrent_copying_collector_);
601 }
602 if (MayUseCollector(kCollectorTypeMC)) {
603 mark_compact_collector_ = new collector::MarkCompact(this);
604 garbage_collectors_.push_back(mark_compact_collector_);
605 }
606 }
607 if (!GetBootImageSpaces().empty() && non_moving_space_ != nullptr &&
608 (is_zygote || separate_non_moving_space || foreground_collector_type_ == kCollectorTypeGSS)) {
609 // Check that there's no gap between the image space and the non moving space so that the
610 // immune region won't break (eg. due to a large object allocated in the gap). This is only
611 // required when we're the zygote or using GSS.
612 // Space with smallest Begin().
613 space::ImageSpace* first_space = nullptr;
614 for (space::ImageSpace* space : boot_image_spaces_) {
615 if (first_space == nullptr || space->Begin() < first_space->Begin()) {
616 first_space = space;
617 }
618 }
619 bool no_gap = MemMap::CheckNoGaps(first_space->GetMemMap(), non_moving_space_->GetMemMap());
620 if (!no_gap) {
621 PrintFileToLog("/proc/self/maps", LogSeverity::ERROR);
622 MemMap::DumpMaps(LOG_STREAM(ERROR), true);
623 LOG(FATAL) << "There's a gap between the image space and the non-moving space";
624 }
625 }
626 instrumentation::Instrumentation* const instrumentation = runtime->GetInstrumentation();
627 if (gc_stress_mode_) {
628 backtrace_lock_ = new Mutex("GC complete lock");
629 }
630 if (is_running_on_memory_tool_ || gc_stress_mode_) {
631 instrumentation->InstrumentQuickAllocEntryPoints();
632 }
633 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
634 LOG(INFO) << "Heap() exiting";
635 }
636 }
637
MapAnonymousPreferredAddress(const char * name,uint8_t * request_begin,size_t capacity,std::string * out_error_str)638 MemMap* Heap::MapAnonymousPreferredAddress(const char* name,
639 uint8_t* request_begin,
640 size_t capacity,
641 std::string* out_error_str) {
642 while (true) {
643 MemMap* map = MemMap::MapAnonymous(name, request_begin, capacity,
644 PROT_READ | PROT_WRITE, true, false, out_error_str);
645 if (map != nullptr || request_begin == nullptr) {
646 return map;
647 }
648 // Retry a second time with no specified request begin.
649 request_begin = nullptr;
650 }
651 }
652
MayUseCollector(CollectorType type) const653 bool Heap::MayUseCollector(CollectorType type) const {
654 return foreground_collector_type_ == type || background_collector_type_ == type;
655 }
656
CreateMallocSpaceFromMemMap(MemMap * mem_map,size_t initial_size,size_t growth_limit,size_t capacity,const char * name,bool can_move_objects)657 space::MallocSpace* Heap::CreateMallocSpaceFromMemMap(MemMap* mem_map,
658 size_t initial_size,
659 size_t growth_limit,
660 size_t capacity,
661 const char* name,
662 bool can_move_objects) {
663 space::MallocSpace* malloc_space = nullptr;
664 if (kUseRosAlloc) {
665 // Create rosalloc space.
666 malloc_space = space::RosAllocSpace::CreateFromMemMap(mem_map, name, kDefaultStartingSize,
667 initial_size, growth_limit, capacity,
668 low_memory_mode_, can_move_objects);
669 } else {
670 malloc_space = space::DlMallocSpace::CreateFromMemMap(mem_map, name, kDefaultStartingSize,
671 initial_size, growth_limit, capacity,
672 can_move_objects);
673 }
674 if (collector::SemiSpace::kUseRememberedSet) {
675 accounting::RememberedSet* rem_set =
676 new accounting::RememberedSet(std::string(name) + " remembered set", this, malloc_space);
677 CHECK(rem_set != nullptr) << "Failed to create main space remembered set";
678 AddRememberedSet(rem_set);
679 }
680 CHECK(malloc_space != nullptr) << "Failed to create " << name;
681 malloc_space->SetFootprintLimit(malloc_space->Capacity());
682 return malloc_space;
683 }
684
CreateMainMallocSpace(MemMap * mem_map,size_t initial_size,size_t growth_limit,size_t capacity)685 void Heap::CreateMainMallocSpace(MemMap* mem_map, size_t initial_size, size_t growth_limit,
686 size_t capacity) {
687 // Is background compaction is enabled?
688 bool can_move_objects = IsMovingGc(background_collector_type_) !=
689 IsMovingGc(foreground_collector_type_) || use_homogeneous_space_compaction_for_oom_;
690 // If we are the zygote and don't yet have a zygote space, it means that the zygote fork will
691 // happen in the future. If this happens and we have kCompactZygote enabled we wish to compact
692 // from the main space to the zygote space. If background compaction is enabled, always pass in
693 // that we can move objets.
694 if (kCompactZygote && Runtime::Current()->IsZygote() && !can_move_objects) {
695 // After the zygote we want this to be false if we don't have background compaction enabled so
696 // that getting primitive array elements is faster.
697 // We never have homogeneous compaction with GSS and don't need a space with movable objects.
698 can_move_objects = !HasZygoteSpace() && foreground_collector_type_ != kCollectorTypeGSS;
699 }
700 if (collector::SemiSpace::kUseRememberedSet && main_space_ != nullptr) {
701 RemoveRememberedSet(main_space_);
702 }
703 const char* name = kUseRosAlloc ? kRosAllocSpaceName[0] : kDlMallocSpaceName[0];
704 main_space_ = CreateMallocSpaceFromMemMap(mem_map, initial_size, growth_limit, capacity, name,
705 can_move_objects);
706 SetSpaceAsDefault(main_space_);
707 VLOG(heap) << "Created main space " << main_space_;
708 }
709
ChangeAllocator(AllocatorType allocator)710 void Heap::ChangeAllocator(AllocatorType allocator) {
711 if (current_allocator_ != allocator) {
712 // These two allocators are only used internally and don't have any entrypoints.
713 CHECK_NE(allocator, kAllocatorTypeLOS);
714 CHECK_NE(allocator, kAllocatorTypeNonMoving);
715 current_allocator_ = allocator;
716 MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
717 SetQuickAllocEntryPointsAllocator(current_allocator_);
718 Runtime::Current()->GetInstrumentation()->ResetQuickAllocEntryPoints();
719 }
720 }
721
DisableMovingGc()722 void Heap::DisableMovingGc() {
723 CHECK(!kUseReadBarrier);
724 if (IsMovingGc(foreground_collector_type_)) {
725 foreground_collector_type_ = kCollectorTypeCMS;
726 }
727 if (IsMovingGc(background_collector_type_)) {
728 background_collector_type_ = foreground_collector_type_;
729 }
730 TransitionCollector(foreground_collector_type_);
731 Thread* const self = Thread::Current();
732 ScopedThreadStateChange tsc(self, kSuspended);
733 ScopedSuspendAll ssa(__FUNCTION__);
734 // Something may have caused the transition to fail.
735 if (!IsMovingGc(collector_type_) && non_moving_space_ != main_space_) {
736 CHECK(main_space_ != nullptr);
737 // The allocation stack may have non movable objects in it. We need to flush it since the GC
738 // can't only handle marking allocation stack objects of one non moving space and one main
739 // space.
740 {
741 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
742 FlushAllocStack();
743 }
744 main_space_->DisableMovingObjects();
745 non_moving_space_ = main_space_;
746 CHECK(!non_moving_space_->CanMoveObjects());
747 }
748 }
749
IsCompilingBoot() const750 bool Heap::IsCompilingBoot() const {
751 if (!Runtime::Current()->IsAotCompiler()) {
752 return false;
753 }
754 ScopedObjectAccess soa(Thread::Current());
755 for (const auto& space : continuous_spaces_) {
756 if (space->IsImageSpace() || space->IsZygoteSpace()) {
757 return false;
758 }
759 }
760 return true;
761 }
762
IncrementDisableMovingGC(Thread * self)763 void Heap::IncrementDisableMovingGC(Thread* self) {
764 // Need to do this holding the lock to prevent races where the GC is about to run / running when
765 // we attempt to disable it.
766 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
767 MutexLock mu(self, *gc_complete_lock_);
768 ++disable_moving_gc_count_;
769 if (IsMovingGc(collector_type_running_)) {
770 WaitForGcToCompleteLocked(kGcCauseDisableMovingGc, self);
771 }
772 }
773
DecrementDisableMovingGC(Thread * self)774 void Heap::DecrementDisableMovingGC(Thread* self) {
775 MutexLock mu(self, *gc_complete_lock_);
776 CHECK_GT(disable_moving_gc_count_, 0U);
777 --disable_moving_gc_count_;
778 }
779
IncrementDisableThreadFlip(Thread * self)780 void Heap::IncrementDisableThreadFlip(Thread* self) {
781 // Supposed to be called by mutators. If thread_flip_running_ is true, block. Otherwise, go ahead.
782 CHECK(kUseReadBarrier);
783 bool is_nested = self->GetDisableThreadFlipCount() > 0;
784 self->IncrementDisableThreadFlipCount();
785 if (is_nested) {
786 // If this is a nested JNI critical section enter, we don't need to wait or increment the global
787 // counter. The global counter is incremented only once for a thread for the outermost enter.
788 return;
789 }
790 ScopedThreadStateChange tsc(self, kWaitingForGcThreadFlip);
791 MutexLock mu(self, *thread_flip_lock_);
792 bool has_waited = false;
793 uint64_t wait_start = NanoTime();
794 if (thread_flip_running_) {
795 ScopedTrace trace("IncrementDisableThreadFlip");
796 while (thread_flip_running_) {
797 has_waited = true;
798 thread_flip_cond_->Wait(self);
799 }
800 }
801 ++disable_thread_flip_count_;
802 if (has_waited) {
803 uint64_t wait_time = NanoTime() - wait_start;
804 total_wait_time_ += wait_time;
805 if (wait_time > long_pause_log_threshold_) {
806 LOG(INFO) << __FUNCTION__ << " blocked for " << PrettyDuration(wait_time);
807 }
808 }
809 }
810
DecrementDisableThreadFlip(Thread * self)811 void Heap::DecrementDisableThreadFlip(Thread* self) {
812 // Supposed to be called by mutators. Decrement disable_thread_flip_count_ and potentially wake up
813 // the GC waiting before doing a thread flip.
814 CHECK(kUseReadBarrier);
815 self->DecrementDisableThreadFlipCount();
816 bool is_outermost = self->GetDisableThreadFlipCount() == 0;
817 if (!is_outermost) {
818 // If this is not an outermost JNI critical exit, we don't need to decrement the global counter.
819 // The global counter is decremented only once for a thread for the outermost exit.
820 return;
821 }
822 MutexLock mu(self, *thread_flip_lock_);
823 CHECK_GT(disable_thread_flip_count_, 0U);
824 --disable_thread_flip_count_;
825 if (disable_thread_flip_count_ == 0) {
826 // Potentially notify the GC thread blocking to begin a thread flip.
827 thread_flip_cond_->Broadcast(self);
828 }
829 }
830
ThreadFlipBegin(Thread * self)831 void Heap::ThreadFlipBegin(Thread* self) {
832 // Supposed to be called by GC. Set thread_flip_running_ to be true. If disable_thread_flip_count_
833 // > 0, block. Otherwise, go ahead.
834 CHECK(kUseReadBarrier);
835 ScopedThreadStateChange tsc(self, kWaitingForGcThreadFlip);
836 MutexLock mu(self, *thread_flip_lock_);
837 bool has_waited = false;
838 uint64_t wait_start = NanoTime();
839 CHECK(!thread_flip_running_);
840 // Set this to true before waiting so that frequent JNI critical enter/exits won't starve
841 // GC. This like a writer preference of a reader-writer lock.
842 thread_flip_running_ = true;
843 while (disable_thread_flip_count_ > 0) {
844 has_waited = true;
845 thread_flip_cond_->Wait(self);
846 }
847 if (has_waited) {
848 uint64_t wait_time = NanoTime() - wait_start;
849 total_wait_time_ += wait_time;
850 if (wait_time > long_pause_log_threshold_) {
851 LOG(INFO) << __FUNCTION__ << " blocked for " << PrettyDuration(wait_time);
852 }
853 }
854 }
855
ThreadFlipEnd(Thread * self)856 void Heap::ThreadFlipEnd(Thread* self) {
857 // Supposed to be called by GC. Set thread_flip_running_ to false and potentially wake up mutators
858 // waiting before doing a JNI critical.
859 CHECK(kUseReadBarrier);
860 MutexLock mu(self, *thread_flip_lock_);
861 CHECK(thread_flip_running_);
862 thread_flip_running_ = false;
863 // Potentially notify mutator threads blocking to enter a JNI critical section.
864 thread_flip_cond_->Broadcast(self);
865 }
866
UpdateProcessState(ProcessState old_process_state,ProcessState new_process_state)867 void Heap::UpdateProcessState(ProcessState old_process_state, ProcessState new_process_state) {
868 if (old_process_state != new_process_state) {
869 const bool jank_perceptible = new_process_state == kProcessStateJankPerceptible;
870 for (size_t i = 1; i <= kCollectorTransitionStressIterations; ++i) {
871 // Start at index 1 to avoid "is always false" warning.
872 // Have iteration 1 always transition the collector.
873 TransitionCollector((((i & 1) == 1) == jank_perceptible)
874 ? foreground_collector_type_
875 : background_collector_type_);
876 usleep(kCollectorTransitionStressWait);
877 }
878 if (jank_perceptible) {
879 // Transition back to foreground right away to prevent jank.
880 RequestCollectorTransition(foreground_collector_type_, 0);
881 } else {
882 // Don't delay for debug builds since we may want to stress test the GC.
883 // If background_collector_type_ is kCollectorTypeHomogeneousSpaceCompact then we have
884 // special handling which does a homogenous space compaction once but then doesn't transition
885 // the collector. Similarly, we invoke a full compaction for kCollectorTypeCC but don't
886 // transition the collector.
887 RequestCollectorTransition(background_collector_type_,
888 kStressCollectorTransition
889 ? 0
890 : kCollectorTransitionWait);
891 }
892 }
893 }
894
CreateThreadPool()895 void Heap::CreateThreadPool() {
896 const size_t num_threads = std::max(parallel_gc_threads_, conc_gc_threads_);
897 if (num_threads != 0) {
898 thread_pool_.reset(new ThreadPool("Heap thread pool", num_threads));
899 }
900 }
901
MarkAllocStackAsLive(accounting::ObjectStack * stack)902 void Heap::MarkAllocStackAsLive(accounting::ObjectStack* stack) {
903 space::ContinuousSpace* space1 = main_space_ != nullptr ? main_space_ : non_moving_space_;
904 space::ContinuousSpace* space2 = non_moving_space_;
905 // TODO: Generalize this to n bitmaps?
906 CHECK(space1 != nullptr);
907 CHECK(space2 != nullptr);
908 MarkAllocStack(space1->GetLiveBitmap(), space2->GetLiveBitmap(),
909 (large_object_space_ != nullptr ? large_object_space_->GetLiveBitmap() : nullptr),
910 stack);
911 }
912
DeleteThreadPool()913 void Heap::DeleteThreadPool() {
914 thread_pool_.reset(nullptr);
915 }
916
AddSpace(space::Space * space)917 void Heap::AddSpace(space::Space* space) {
918 CHECK(space != nullptr);
919 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
920 if (space->IsContinuousSpace()) {
921 DCHECK(!space->IsDiscontinuousSpace());
922 space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
923 // Continuous spaces don't necessarily have bitmaps.
924 accounting::ContinuousSpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
925 accounting::ContinuousSpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
926 // The region space bitmap is not added since VisitObjects visits the region space objects with
927 // special handling.
928 if (live_bitmap != nullptr && !space->IsRegionSpace()) {
929 CHECK(mark_bitmap != nullptr);
930 live_bitmap_->AddContinuousSpaceBitmap(live_bitmap);
931 mark_bitmap_->AddContinuousSpaceBitmap(mark_bitmap);
932 }
933 continuous_spaces_.push_back(continuous_space);
934 // Ensure that spaces remain sorted in increasing order of start address.
935 std::sort(continuous_spaces_.begin(), continuous_spaces_.end(),
936 [](const space::ContinuousSpace* a, const space::ContinuousSpace* b) {
937 return a->Begin() < b->Begin();
938 });
939 } else {
940 CHECK(space->IsDiscontinuousSpace());
941 space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
942 live_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetLiveBitmap());
943 mark_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetMarkBitmap());
944 discontinuous_spaces_.push_back(discontinuous_space);
945 }
946 if (space->IsAllocSpace()) {
947 alloc_spaces_.push_back(space->AsAllocSpace());
948 }
949 }
950
SetSpaceAsDefault(space::ContinuousSpace * continuous_space)951 void Heap::SetSpaceAsDefault(space::ContinuousSpace* continuous_space) {
952 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
953 if (continuous_space->IsDlMallocSpace()) {
954 dlmalloc_space_ = continuous_space->AsDlMallocSpace();
955 } else if (continuous_space->IsRosAllocSpace()) {
956 rosalloc_space_ = continuous_space->AsRosAllocSpace();
957 }
958 }
959
RemoveSpace(space::Space * space)960 void Heap::RemoveSpace(space::Space* space) {
961 DCHECK(space != nullptr);
962 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
963 if (space->IsContinuousSpace()) {
964 DCHECK(!space->IsDiscontinuousSpace());
965 space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
966 // Continuous spaces don't necessarily have bitmaps.
967 accounting::ContinuousSpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
968 accounting::ContinuousSpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
969 if (live_bitmap != nullptr && !space->IsRegionSpace()) {
970 DCHECK(mark_bitmap != nullptr);
971 live_bitmap_->RemoveContinuousSpaceBitmap(live_bitmap);
972 mark_bitmap_->RemoveContinuousSpaceBitmap(mark_bitmap);
973 }
974 auto it = std::find(continuous_spaces_.begin(), continuous_spaces_.end(), continuous_space);
975 DCHECK(it != continuous_spaces_.end());
976 continuous_spaces_.erase(it);
977 } else {
978 DCHECK(space->IsDiscontinuousSpace());
979 space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
980 live_bitmap_->RemoveLargeObjectBitmap(discontinuous_space->GetLiveBitmap());
981 mark_bitmap_->RemoveLargeObjectBitmap(discontinuous_space->GetMarkBitmap());
982 auto it = std::find(discontinuous_spaces_.begin(), discontinuous_spaces_.end(),
983 discontinuous_space);
984 DCHECK(it != discontinuous_spaces_.end());
985 discontinuous_spaces_.erase(it);
986 }
987 if (space->IsAllocSpace()) {
988 auto it = std::find(alloc_spaces_.begin(), alloc_spaces_.end(), space->AsAllocSpace());
989 DCHECK(it != alloc_spaces_.end());
990 alloc_spaces_.erase(it);
991 }
992 }
993
DumpGcPerformanceInfo(std::ostream & os)994 void Heap::DumpGcPerformanceInfo(std::ostream& os) {
995 // Dump cumulative timings.
996 os << "Dumping cumulative Gc timings\n";
997 uint64_t total_duration = 0;
998 // Dump cumulative loggers for each GC type.
999 uint64_t total_paused_time = 0;
1000 for (auto& collector : garbage_collectors_) {
1001 total_duration += collector->GetCumulativeTimings().GetTotalNs();
1002 total_paused_time += collector->GetTotalPausedTimeNs();
1003 collector->DumpPerformanceInfo(os);
1004 }
1005 if (total_duration != 0) {
1006 const double total_seconds = static_cast<double>(total_duration / 1000) / 1000000.0;
1007 os << "Total time spent in GC: " << PrettyDuration(total_duration) << "\n";
1008 os << "Mean GC size throughput: "
1009 << PrettySize(GetBytesFreedEver() / total_seconds) << "/s\n";
1010 os << "Mean GC object throughput: "
1011 << (GetObjectsFreedEver() / total_seconds) << " objects/s\n";
1012 }
1013 uint64_t total_objects_allocated = GetObjectsAllocatedEver();
1014 os << "Total number of allocations " << total_objects_allocated << "\n";
1015 os << "Total bytes allocated " << PrettySize(GetBytesAllocatedEver()) << "\n";
1016 os << "Total bytes freed " << PrettySize(GetBytesFreedEver()) << "\n";
1017 os << "Free memory " << PrettySize(GetFreeMemory()) << "\n";
1018 os << "Free memory until GC " << PrettySize(GetFreeMemoryUntilGC()) << "\n";
1019 os << "Free memory until OOME " << PrettySize(GetFreeMemoryUntilOOME()) << "\n";
1020 os << "Total memory " << PrettySize(GetTotalMemory()) << "\n";
1021 os << "Max memory " << PrettySize(GetMaxMemory()) << "\n";
1022 if (HasZygoteSpace()) {
1023 os << "Zygote space size " << PrettySize(zygote_space_->Size()) << "\n";
1024 }
1025 os << "Total mutator paused time: " << PrettyDuration(total_paused_time) << "\n";
1026 os << "Total time waiting for GC to complete: " << PrettyDuration(total_wait_time_) << "\n";
1027 os << "Total GC count: " << GetGcCount() << "\n";
1028 os << "Total GC time: " << PrettyDuration(GetGcTime()) << "\n";
1029 os << "Total blocking GC count: " << GetBlockingGcCount() << "\n";
1030 os << "Total blocking GC time: " << PrettyDuration(GetBlockingGcTime()) << "\n";
1031
1032 {
1033 MutexLock mu(Thread::Current(), *gc_complete_lock_);
1034 if (gc_count_rate_histogram_.SampleSize() > 0U) {
1035 os << "Histogram of GC count per " << NsToMs(kGcCountRateHistogramWindowDuration) << " ms: ";
1036 gc_count_rate_histogram_.DumpBins(os);
1037 os << "\n";
1038 }
1039 if (blocking_gc_count_rate_histogram_.SampleSize() > 0U) {
1040 os << "Histogram of blocking GC count per "
1041 << NsToMs(kGcCountRateHistogramWindowDuration) << " ms: ";
1042 blocking_gc_count_rate_histogram_.DumpBins(os);
1043 os << "\n";
1044 }
1045 }
1046
1047 if (kDumpRosAllocStatsOnSigQuit && rosalloc_space_ != nullptr) {
1048 rosalloc_space_->DumpStats(os);
1049 }
1050
1051 os << "Registered native bytes allocated: "
1052 << old_native_bytes_allocated_.LoadRelaxed() + new_native_bytes_allocated_.LoadRelaxed()
1053 << "\n";
1054
1055 BaseMutex::DumpAll(os);
1056 }
1057
ResetGcPerformanceInfo()1058 void Heap::ResetGcPerformanceInfo() {
1059 for (auto& collector : garbage_collectors_) {
1060 collector->ResetMeasurements();
1061 }
1062 total_bytes_freed_ever_ = 0;
1063 total_objects_freed_ever_ = 0;
1064 total_wait_time_ = 0;
1065 blocking_gc_count_ = 0;
1066 blocking_gc_time_ = 0;
1067 gc_count_last_window_ = 0;
1068 blocking_gc_count_last_window_ = 0;
1069 last_update_time_gc_count_rate_histograms_ = // Round down by the window duration.
1070 (NanoTime() / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration;
1071 {
1072 MutexLock mu(Thread::Current(), *gc_complete_lock_);
1073 gc_count_rate_histogram_.Reset();
1074 blocking_gc_count_rate_histogram_.Reset();
1075 }
1076 }
1077
GetGcCount() const1078 uint64_t Heap::GetGcCount() const {
1079 uint64_t gc_count = 0U;
1080 for (auto& collector : garbage_collectors_) {
1081 gc_count += collector->GetCumulativeTimings().GetIterations();
1082 }
1083 return gc_count;
1084 }
1085
GetGcTime() const1086 uint64_t Heap::GetGcTime() const {
1087 uint64_t gc_time = 0U;
1088 for (auto& collector : garbage_collectors_) {
1089 gc_time += collector->GetCumulativeTimings().GetTotalNs();
1090 }
1091 return gc_time;
1092 }
1093
GetBlockingGcCount() const1094 uint64_t Heap::GetBlockingGcCount() const {
1095 return blocking_gc_count_;
1096 }
1097
GetBlockingGcTime() const1098 uint64_t Heap::GetBlockingGcTime() const {
1099 return blocking_gc_time_;
1100 }
1101
DumpGcCountRateHistogram(std::ostream & os) const1102 void Heap::DumpGcCountRateHistogram(std::ostream& os) const {
1103 MutexLock mu(Thread::Current(), *gc_complete_lock_);
1104 if (gc_count_rate_histogram_.SampleSize() > 0U) {
1105 gc_count_rate_histogram_.DumpBins(os);
1106 }
1107 }
1108
DumpBlockingGcCountRateHistogram(std::ostream & os) const1109 void Heap::DumpBlockingGcCountRateHistogram(std::ostream& os) const {
1110 MutexLock mu(Thread::Current(), *gc_complete_lock_);
1111 if (blocking_gc_count_rate_histogram_.SampleSize() > 0U) {
1112 blocking_gc_count_rate_histogram_.DumpBins(os);
1113 }
1114 }
1115
1116 ALWAYS_INLINE
GetAndOverwriteAllocationListener(Atomic<AllocationListener * > * storage,AllocationListener * new_value)1117 static inline AllocationListener* GetAndOverwriteAllocationListener(
1118 Atomic<AllocationListener*>* storage, AllocationListener* new_value) {
1119 AllocationListener* old;
1120 do {
1121 old = storage->LoadSequentiallyConsistent();
1122 } while (!storage->CompareAndSetStrongSequentiallyConsistent(old, new_value));
1123 return old;
1124 }
1125
~Heap()1126 Heap::~Heap() {
1127 VLOG(heap) << "Starting ~Heap()";
1128 STLDeleteElements(&garbage_collectors_);
1129 // If we don't reset then the mark stack complains in its destructor.
1130 allocation_stack_->Reset();
1131 allocation_records_.reset();
1132 live_stack_->Reset();
1133 STLDeleteValues(&mod_union_tables_);
1134 STLDeleteValues(&remembered_sets_);
1135 STLDeleteElements(&continuous_spaces_);
1136 STLDeleteElements(&discontinuous_spaces_);
1137 delete gc_complete_lock_;
1138 delete thread_flip_lock_;
1139 delete pending_task_lock_;
1140 delete backtrace_lock_;
1141 if (unique_backtrace_count_.LoadRelaxed() != 0 || seen_backtrace_count_.LoadRelaxed() != 0) {
1142 LOG(INFO) << "gc stress unique=" << unique_backtrace_count_.LoadRelaxed()
1143 << " total=" << seen_backtrace_count_.LoadRelaxed() +
1144 unique_backtrace_count_.LoadRelaxed();
1145 }
1146
1147 VLOG(heap) << "Finished ~Heap()";
1148 }
1149
1150
FindContinuousSpaceFromAddress(const mirror::Object * addr) const1151 space::ContinuousSpace* Heap::FindContinuousSpaceFromAddress(const mirror::Object* addr) const {
1152 for (const auto& space : continuous_spaces_) {
1153 if (space->Contains(addr)) {
1154 return space;
1155 }
1156 }
1157 return nullptr;
1158 }
1159
FindContinuousSpaceFromObject(ObjPtr<mirror::Object> obj,bool fail_ok) const1160 space::ContinuousSpace* Heap::FindContinuousSpaceFromObject(ObjPtr<mirror::Object> obj,
1161 bool fail_ok) const {
1162 space::ContinuousSpace* space = FindContinuousSpaceFromAddress(obj.Ptr());
1163 if (space != nullptr) {
1164 return space;
1165 }
1166 if (!fail_ok) {
1167 LOG(FATAL) << "object " << obj << " not inside any spaces!";
1168 }
1169 return nullptr;
1170 }
1171
FindDiscontinuousSpaceFromObject(ObjPtr<mirror::Object> obj,bool fail_ok) const1172 space::DiscontinuousSpace* Heap::FindDiscontinuousSpaceFromObject(ObjPtr<mirror::Object> obj,
1173 bool fail_ok) const {
1174 for (const auto& space : discontinuous_spaces_) {
1175 if (space->Contains(obj.Ptr())) {
1176 return space;
1177 }
1178 }
1179 if (!fail_ok) {
1180 LOG(FATAL) << "object " << obj << " not inside any spaces!";
1181 }
1182 return nullptr;
1183 }
1184
FindSpaceFromObject(ObjPtr<mirror::Object> obj,bool fail_ok) const1185 space::Space* Heap::FindSpaceFromObject(ObjPtr<mirror::Object> obj, bool fail_ok) const {
1186 space::Space* result = FindContinuousSpaceFromObject(obj, true);
1187 if (result != nullptr) {
1188 return result;
1189 }
1190 return FindDiscontinuousSpaceFromObject(obj, fail_ok);
1191 }
1192
FindSpaceFromAddress(const void * addr) const1193 space::Space* Heap::FindSpaceFromAddress(const void* addr) const {
1194 for (const auto& space : continuous_spaces_) {
1195 if (space->Contains(reinterpret_cast<const mirror::Object*>(addr))) {
1196 return space;
1197 }
1198 }
1199 for (const auto& space : discontinuous_spaces_) {
1200 if (space->Contains(reinterpret_cast<const mirror::Object*>(addr))) {
1201 return space;
1202 }
1203 }
1204 return nullptr;
1205 }
1206
1207
ThrowOutOfMemoryError(Thread * self,size_t byte_count,AllocatorType allocator_type)1208 void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type) {
1209 // If we're in a stack overflow, do not create a new exception. It would require running the
1210 // constructor, which will of course still be in a stack overflow.
1211 if (self->IsHandlingStackOverflow()) {
1212 self->SetException(Runtime::Current()->GetPreAllocatedOutOfMemoryError());
1213 return;
1214 }
1215
1216 std::ostringstream oss;
1217 size_t total_bytes_free = GetFreeMemory();
1218 oss << "Failed to allocate a " << byte_count << " byte allocation with " << total_bytes_free
1219 << " free bytes and " << PrettySize(GetFreeMemoryUntilOOME()) << " until OOM,"
1220 << " max allowed footprint " << max_allowed_footprint_ << ", growth limit "
1221 << growth_limit_;
1222 // If the allocation failed due to fragmentation, print out the largest continuous allocation.
1223 if (total_bytes_free >= byte_count) {
1224 space::AllocSpace* space = nullptr;
1225 if (allocator_type == kAllocatorTypeNonMoving) {
1226 space = non_moving_space_;
1227 } else if (allocator_type == kAllocatorTypeRosAlloc ||
1228 allocator_type == kAllocatorTypeDlMalloc) {
1229 space = main_space_;
1230 } else if (allocator_type == kAllocatorTypeBumpPointer ||
1231 allocator_type == kAllocatorTypeTLAB) {
1232 space = bump_pointer_space_;
1233 } else if (allocator_type == kAllocatorTypeRegion ||
1234 allocator_type == kAllocatorTypeRegionTLAB) {
1235 space = region_space_;
1236 }
1237 if (space != nullptr) {
1238 space->LogFragmentationAllocFailure(oss, byte_count);
1239 }
1240 }
1241 self->ThrowOutOfMemoryError(oss.str().c_str());
1242 }
1243
DoPendingCollectorTransition()1244 void Heap::DoPendingCollectorTransition() {
1245 CollectorType desired_collector_type = desired_collector_type_;
1246 // Launch homogeneous space compaction if it is desired.
1247 if (desired_collector_type == kCollectorTypeHomogeneousSpaceCompact) {
1248 if (!CareAboutPauseTimes()) {
1249 PerformHomogeneousSpaceCompact();
1250 } else {
1251 VLOG(gc) << "Homogeneous compaction ignored due to jank perceptible process state";
1252 }
1253 } else if (desired_collector_type == kCollectorTypeCCBackground) {
1254 DCHECK(kUseReadBarrier);
1255 if (!CareAboutPauseTimes()) {
1256 // Invoke CC full compaction.
1257 CollectGarbageInternal(collector::kGcTypeFull,
1258 kGcCauseCollectorTransition,
1259 /*clear_soft_references*/false);
1260 } else {
1261 VLOG(gc) << "CC background compaction ignored due to jank perceptible process state";
1262 }
1263 } else {
1264 TransitionCollector(desired_collector_type);
1265 }
1266 }
1267
Trim(Thread * self)1268 void Heap::Trim(Thread* self) {
1269 Runtime* const runtime = Runtime::Current();
1270 if (!CareAboutPauseTimes()) {
1271 // Deflate the monitors, this can cause a pause but shouldn't matter since we don't care
1272 // about pauses.
1273 ScopedTrace trace("Deflating monitors");
1274 // Avoid race conditions on the lock word for CC.
1275 ScopedGCCriticalSection gcs(self, kGcCauseTrim, kCollectorTypeHeapTrim);
1276 ScopedSuspendAll ssa(__FUNCTION__);
1277 uint64_t start_time = NanoTime();
1278 size_t count = runtime->GetMonitorList()->DeflateMonitors();
1279 VLOG(heap) << "Deflating " << count << " monitors took "
1280 << PrettyDuration(NanoTime() - start_time);
1281 }
1282 TrimIndirectReferenceTables(self);
1283 TrimSpaces(self);
1284 // Trim arenas that may have been used by JIT or verifier.
1285 runtime->GetArenaPool()->TrimMaps();
1286 }
1287
1288 class TrimIndirectReferenceTableClosure : public Closure {
1289 public:
TrimIndirectReferenceTableClosure(Barrier * barrier)1290 explicit TrimIndirectReferenceTableClosure(Barrier* barrier) : barrier_(barrier) {
1291 }
Run(Thread * thread)1292 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
1293 thread->GetJniEnv()->TrimLocals();
1294 // If thread is a running mutator, then act on behalf of the trim thread.
1295 // See the code in ThreadList::RunCheckpoint.
1296 barrier_->Pass(Thread::Current());
1297 }
1298
1299 private:
1300 Barrier* const barrier_;
1301 };
1302
TrimIndirectReferenceTables(Thread * self)1303 void Heap::TrimIndirectReferenceTables(Thread* self) {
1304 ScopedObjectAccess soa(self);
1305 ScopedTrace trace(__PRETTY_FUNCTION__);
1306 JavaVMExt* vm = soa.Vm();
1307 // Trim globals indirect reference table.
1308 vm->TrimGlobals();
1309 // Trim locals indirect reference tables.
1310 Barrier barrier(0);
1311 TrimIndirectReferenceTableClosure closure(&barrier);
1312 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
1313 size_t barrier_count = Runtime::Current()->GetThreadList()->RunCheckpoint(&closure);
1314 if (barrier_count != 0) {
1315 barrier.Increment(self, barrier_count);
1316 }
1317 }
1318
StartGC(Thread * self,GcCause cause,CollectorType collector_type)1319 void Heap::StartGC(Thread* self, GcCause cause, CollectorType collector_type) {
1320 // Need to do this before acquiring the locks since we don't want to get suspended while
1321 // holding any locks.
1322 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
1323 MutexLock mu(self, *gc_complete_lock_);
1324 // Ensure there is only one GC at a time.
1325 WaitForGcToCompleteLocked(cause, self);
1326 collector_type_running_ = collector_type;
1327 last_gc_cause_ = cause;
1328 thread_running_gc_ = self;
1329 }
1330
TrimSpaces(Thread * self)1331 void Heap::TrimSpaces(Thread* self) {
1332 // Pretend we are doing a GC to prevent background compaction from deleting the space we are
1333 // trimming.
1334 StartGC(self, kGcCauseTrim, kCollectorTypeHeapTrim);
1335 ScopedTrace trace(__PRETTY_FUNCTION__);
1336 const uint64_t start_ns = NanoTime();
1337 // Trim the managed spaces.
1338 uint64_t total_alloc_space_allocated = 0;
1339 uint64_t total_alloc_space_size = 0;
1340 uint64_t managed_reclaimed = 0;
1341 {
1342 ScopedObjectAccess soa(self);
1343 for (const auto& space : continuous_spaces_) {
1344 if (space->IsMallocSpace()) {
1345 gc::space::MallocSpace* malloc_space = space->AsMallocSpace();
1346 if (malloc_space->IsRosAllocSpace() || !CareAboutPauseTimes()) {
1347 // Don't trim dlmalloc spaces if we care about pauses since this can hold the space lock
1348 // for a long period of time.
1349 managed_reclaimed += malloc_space->Trim();
1350 }
1351 total_alloc_space_size += malloc_space->Size();
1352 }
1353 }
1354 }
1355 total_alloc_space_allocated = GetBytesAllocated();
1356 if (large_object_space_ != nullptr) {
1357 total_alloc_space_allocated -= large_object_space_->GetBytesAllocated();
1358 }
1359 if (bump_pointer_space_ != nullptr) {
1360 total_alloc_space_allocated -= bump_pointer_space_->Size();
1361 }
1362 if (region_space_ != nullptr) {
1363 total_alloc_space_allocated -= region_space_->GetBytesAllocated();
1364 }
1365 const float managed_utilization = static_cast<float>(total_alloc_space_allocated) /
1366 static_cast<float>(total_alloc_space_size);
1367 uint64_t gc_heap_end_ns = NanoTime();
1368 // We never move things in the native heap, so we can finish the GC at this point.
1369 FinishGC(self, collector::kGcTypeNone);
1370
1371 VLOG(heap) << "Heap trim of managed (duration=" << PrettyDuration(gc_heap_end_ns - start_ns)
1372 << ", advised=" << PrettySize(managed_reclaimed) << ") heap. Managed heap utilization of "
1373 << static_cast<int>(100 * managed_utilization) << "%.";
1374 }
1375
IsValidObjectAddress(const void * addr) const1376 bool Heap::IsValidObjectAddress(const void* addr) const {
1377 if (addr == nullptr) {
1378 return true;
1379 }
1380 return IsAligned<kObjectAlignment>(addr) && FindSpaceFromAddress(addr) != nullptr;
1381 }
1382
IsNonDiscontinuousSpaceHeapAddress(const void * addr) const1383 bool Heap::IsNonDiscontinuousSpaceHeapAddress(const void* addr) const {
1384 return FindContinuousSpaceFromAddress(reinterpret_cast<const mirror::Object*>(addr)) != nullptr;
1385 }
1386
IsLiveObjectLocked(ObjPtr<mirror::Object> obj,bool search_allocation_stack,bool search_live_stack,bool sorted)1387 bool Heap::IsLiveObjectLocked(ObjPtr<mirror::Object> obj,
1388 bool search_allocation_stack,
1389 bool search_live_stack,
1390 bool sorted) {
1391 if (UNLIKELY(!IsAligned<kObjectAlignment>(obj.Ptr()))) {
1392 return false;
1393 }
1394 if (bump_pointer_space_ != nullptr && bump_pointer_space_->HasAddress(obj.Ptr())) {
1395 mirror::Class* klass = obj->GetClass<kVerifyNone>();
1396 if (obj == klass) {
1397 // This case happens for java.lang.Class.
1398 return true;
1399 }
1400 return VerifyClassClass(klass) && IsLiveObjectLocked(klass);
1401 } else if (temp_space_ != nullptr && temp_space_->HasAddress(obj.Ptr())) {
1402 // If we are in the allocated region of the temp space, then we are probably live (e.g. during
1403 // a GC). When a GC isn't running End() - Begin() is 0 which means no objects are contained.
1404 return temp_space_->Contains(obj.Ptr());
1405 }
1406 if (region_space_ != nullptr && region_space_->HasAddress(obj.Ptr())) {
1407 return true;
1408 }
1409 space::ContinuousSpace* c_space = FindContinuousSpaceFromObject(obj, true);
1410 space::DiscontinuousSpace* d_space = nullptr;
1411 if (c_space != nullptr) {
1412 if (c_space->GetLiveBitmap()->Test(obj.Ptr())) {
1413 return true;
1414 }
1415 } else {
1416 d_space = FindDiscontinuousSpaceFromObject(obj, true);
1417 if (d_space != nullptr) {
1418 if (d_space->GetLiveBitmap()->Test(obj.Ptr())) {
1419 return true;
1420 }
1421 }
1422 }
1423 // This is covering the allocation/live stack swapping that is done without mutators suspended.
1424 for (size_t i = 0; i < (sorted ? 1 : 5); ++i) {
1425 if (i > 0) {
1426 NanoSleep(MsToNs(10));
1427 }
1428 if (search_allocation_stack) {
1429 if (sorted) {
1430 if (allocation_stack_->ContainsSorted(obj.Ptr())) {
1431 return true;
1432 }
1433 } else if (allocation_stack_->Contains(obj.Ptr())) {
1434 return true;
1435 }
1436 }
1437
1438 if (search_live_stack) {
1439 if (sorted) {
1440 if (live_stack_->ContainsSorted(obj.Ptr())) {
1441 return true;
1442 }
1443 } else if (live_stack_->Contains(obj.Ptr())) {
1444 return true;
1445 }
1446 }
1447 }
1448 // We need to check the bitmaps again since there is a race where we mark something as live and
1449 // then clear the stack containing it.
1450 if (c_space != nullptr) {
1451 if (c_space->GetLiveBitmap()->Test(obj.Ptr())) {
1452 return true;
1453 }
1454 } else {
1455 d_space = FindDiscontinuousSpaceFromObject(obj, true);
1456 if (d_space != nullptr && d_space->GetLiveBitmap()->Test(obj.Ptr())) {
1457 return true;
1458 }
1459 }
1460 return false;
1461 }
1462
DumpSpaces() const1463 std::string Heap::DumpSpaces() const {
1464 std::ostringstream oss;
1465 DumpSpaces(oss);
1466 return oss.str();
1467 }
1468
DumpSpaces(std::ostream & stream) const1469 void Heap::DumpSpaces(std::ostream& stream) const {
1470 for (const auto& space : continuous_spaces_) {
1471 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
1472 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
1473 stream << space << " " << *space << "\n";
1474 if (live_bitmap != nullptr) {
1475 stream << live_bitmap << " " << *live_bitmap << "\n";
1476 }
1477 if (mark_bitmap != nullptr) {
1478 stream << mark_bitmap << " " << *mark_bitmap << "\n";
1479 }
1480 }
1481 for (const auto& space : discontinuous_spaces_) {
1482 stream << space << " " << *space << "\n";
1483 }
1484 }
1485
VerifyObjectBody(ObjPtr<mirror::Object> obj)1486 void Heap::VerifyObjectBody(ObjPtr<mirror::Object> obj) {
1487 if (verify_object_mode_ == kVerifyObjectModeDisabled) {
1488 return;
1489 }
1490
1491 // Ignore early dawn of the universe verifications.
1492 if (UNLIKELY(static_cast<size_t>(num_bytes_allocated_.LoadRelaxed()) < 10 * KB)) {
1493 return;
1494 }
1495 CHECK_ALIGNED(obj.Ptr(), kObjectAlignment) << "Object isn't aligned";
1496 mirror::Class* c = obj->GetFieldObject<mirror::Class, kVerifyNone>(mirror::Object::ClassOffset());
1497 CHECK(c != nullptr) << "Null class in object " << obj;
1498 CHECK_ALIGNED(c, kObjectAlignment) << "Class " << c << " not aligned in object " << obj;
1499 CHECK(VerifyClassClass(c));
1500
1501 if (verify_object_mode_ > kVerifyObjectModeFast) {
1502 // Note: the bitmap tests below are racy since we don't hold the heap bitmap lock.
1503 CHECK(IsLiveObjectLocked(obj)) << "Object is dead " << obj << "\n" << DumpSpaces();
1504 }
1505 }
1506
VerifyHeap()1507 void Heap::VerifyHeap() {
1508 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1509 auto visitor = [&](mirror::Object* obj) {
1510 VerifyObjectBody(obj);
1511 };
1512 // Technically we need the mutator lock here to call Visit. However, VerifyObjectBody is already
1513 // NO_THREAD_SAFETY_ANALYSIS.
1514 auto no_thread_safety_analysis = [&]() NO_THREAD_SAFETY_ANALYSIS {
1515 GetLiveBitmap()->Visit(visitor);
1516 };
1517 no_thread_safety_analysis();
1518 }
1519
RecordFree(uint64_t freed_objects,int64_t freed_bytes)1520 void Heap::RecordFree(uint64_t freed_objects, int64_t freed_bytes) {
1521 // Use signed comparison since freed bytes can be negative when background compaction foreground
1522 // transitions occurs. This is caused by the moving objects from a bump pointer space to a
1523 // free list backed space typically increasing memory footprint due to padding and binning.
1524 DCHECK_LE(freed_bytes, static_cast<int64_t>(num_bytes_allocated_.LoadRelaxed()));
1525 // Note: This relies on 2s complement for handling negative freed_bytes.
1526 num_bytes_allocated_.FetchAndSubSequentiallyConsistent(static_cast<ssize_t>(freed_bytes));
1527 if (Runtime::Current()->HasStatsEnabled()) {
1528 RuntimeStats* thread_stats = Thread::Current()->GetStats();
1529 thread_stats->freed_objects += freed_objects;
1530 thread_stats->freed_bytes += freed_bytes;
1531 // TODO: Do this concurrently.
1532 RuntimeStats* global_stats = Runtime::Current()->GetStats();
1533 global_stats->freed_objects += freed_objects;
1534 global_stats->freed_bytes += freed_bytes;
1535 }
1536 }
1537
RecordFreeRevoke()1538 void Heap::RecordFreeRevoke() {
1539 // Subtract num_bytes_freed_revoke_ from num_bytes_allocated_ to cancel out the
1540 // ahead-of-time, bulk counting of bytes allocated in rosalloc thread-local buffers.
1541 // If there's a concurrent revoke, ok to not necessarily reset num_bytes_freed_revoke_
1542 // all the way to zero exactly as the remainder will be subtracted at the next GC.
1543 size_t bytes_freed = num_bytes_freed_revoke_.LoadSequentiallyConsistent();
1544 CHECK_GE(num_bytes_freed_revoke_.FetchAndSubSequentiallyConsistent(bytes_freed),
1545 bytes_freed) << "num_bytes_freed_revoke_ underflow";
1546 CHECK_GE(num_bytes_allocated_.FetchAndSubSequentiallyConsistent(bytes_freed),
1547 bytes_freed) << "num_bytes_allocated_ underflow";
1548 GetCurrentGcIteration()->SetFreedRevoke(bytes_freed);
1549 }
1550
GetRosAllocSpace(gc::allocator::RosAlloc * rosalloc) const1551 space::RosAllocSpace* Heap::GetRosAllocSpace(gc::allocator::RosAlloc* rosalloc) const {
1552 if (rosalloc_space_ != nullptr && rosalloc_space_->GetRosAlloc() == rosalloc) {
1553 return rosalloc_space_;
1554 }
1555 for (const auto& space : continuous_spaces_) {
1556 if (space->AsContinuousSpace()->IsRosAllocSpace()) {
1557 if (space->AsContinuousSpace()->AsRosAllocSpace()->GetRosAlloc() == rosalloc) {
1558 return space->AsContinuousSpace()->AsRosAllocSpace();
1559 }
1560 }
1561 }
1562 return nullptr;
1563 }
1564
EntrypointsInstrumented()1565 static inline bool EntrypointsInstrumented() REQUIRES_SHARED(Locks::mutator_lock_) {
1566 instrumentation::Instrumentation* const instrumentation =
1567 Runtime::Current()->GetInstrumentation();
1568 return instrumentation != nullptr && instrumentation->AllocEntrypointsInstrumented();
1569 }
1570
AllocateInternalWithGc(Thread * self,AllocatorType allocator,bool instrumented,size_t alloc_size,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated,ObjPtr<mirror::Class> * klass)1571 mirror::Object* Heap::AllocateInternalWithGc(Thread* self,
1572 AllocatorType allocator,
1573 bool instrumented,
1574 size_t alloc_size,
1575 size_t* bytes_allocated,
1576 size_t* usable_size,
1577 size_t* bytes_tl_bulk_allocated,
1578 ObjPtr<mirror::Class>* klass) {
1579 bool was_default_allocator = allocator == GetCurrentAllocator();
1580 // Make sure there is no pending exception since we may need to throw an OOME.
1581 self->AssertNoPendingException();
1582 DCHECK(klass != nullptr);
1583 StackHandleScope<1> hs(self);
1584 HandleWrapperObjPtr<mirror::Class> h(hs.NewHandleWrapper(klass));
1585 // The allocation failed. If the GC is running, block until it completes, and then retry the
1586 // allocation.
1587 collector::GcType last_gc = WaitForGcToComplete(kGcCauseForAlloc, self);
1588 // If we were the default allocator but the allocator changed while we were suspended,
1589 // abort the allocation.
1590 if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1591 (!instrumented && EntrypointsInstrumented())) {
1592 return nullptr;
1593 }
1594 if (last_gc != collector::kGcTypeNone) {
1595 // A GC was in progress and we blocked, retry allocation now that memory has been freed.
1596 mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
1597 usable_size, bytes_tl_bulk_allocated);
1598 if (ptr != nullptr) {
1599 return ptr;
1600 }
1601 }
1602
1603 collector::GcType tried_type = next_gc_type_;
1604 const bool gc_ran =
1605 CollectGarbageInternal(tried_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
1606 if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1607 (!instrumented && EntrypointsInstrumented())) {
1608 return nullptr;
1609 }
1610 if (gc_ran) {
1611 mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
1612 usable_size, bytes_tl_bulk_allocated);
1613 if (ptr != nullptr) {
1614 return ptr;
1615 }
1616 }
1617
1618 // Loop through our different Gc types and try to Gc until we get enough free memory.
1619 for (collector::GcType gc_type : gc_plan_) {
1620 if (gc_type == tried_type) {
1621 continue;
1622 }
1623 // Attempt to run the collector, if we succeed, re-try the allocation.
1624 const bool plan_gc_ran =
1625 CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
1626 if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1627 (!instrumented && EntrypointsInstrumented())) {
1628 return nullptr;
1629 }
1630 if (plan_gc_ran) {
1631 // Did we free sufficient memory for the allocation to succeed?
1632 mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
1633 usable_size, bytes_tl_bulk_allocated);
1634 if (ptr != nullptr) {
1635 return ptr;
1636 }
1637 }
1638 }
1639 // Allocations have failed after GCs; this is an exceptional state.
1640 // Try harder, growing the heap if necessary.
1641 mirror::Object* ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
1642 usable_size, bytes_tl_bulk_allocated);
1643 if (ptr != nullptr) {
1644 return ptr;
1645 }
1646 // Most allocations should have succeeded by now, so the heap is really full, really fragmented,
1647 // or the requested size is really big. Do another GC, collecting SoftReferences this time. The
1648 // VM spec requires that all SoftReferences have been collected and cleared before throwing
1649 // OOME.
1650 VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size)
1651 << " allocation";
1652 // TODO: Run finalization, but this may cause more allocations to occur.
1653 // We don't need a WaitForGcToComplete here either.
1654 DCHECK(!gc_plan_.empty());
1655 CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true);
1656 if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1657 (!instrumented && EntrypointsInstrumented())) {
1658 return nullptr;
1659 }
1660 ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size,
1661 bytes_tl_bulk_allocated);
1662 if (ptr == nullptr) {
1663 const uint64_t current_time = NanoTime();
1664 switch (allocator) {
1665 case kAllocatorTypeRosAlloc:
1666 // Fall-through.
1667 case kAllocatorTypeDlMalloc: {
1668 if (use_homogeneous_space_compaction_for_oom_ &&
1669 current_time - last_time_homogeneous_space_compaction_by_oom_ >
1670 min_interval_homogeneous_space_compaction_by_oom_) {
1671 last_time_homogeneous_space_compaction_by_oom_ = current_time;
1672 HomogeneousSpaceCompactResult result = PerformHomogeneousSpaceCompact();
1673 // Thread suspension could have occurred.
1674 if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1675 (!instrumented && EntrypointsInstrumented())) {
1676 return nullptr;
1677 }
1678 switch (result) {
1679 case HomogeneousSpaceCompactResult::kSuccess:
1680 // If the allocation succeeded, we delayed an oom.
1681 ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
1682 usable_size, bytes_tl_bulk_allocated);
1683 if (ptr != nullptr) {
1684 count_delayed_oom_++;
1685 }
1686 break;
1687 case HomogeneousSpaceCompactResult::kErrorReject:
1688 // Reject due to disabled moving GC.
1689 break;
1690 case HomogeneousSpaceCompactResult::kErrorVMShuttingDown:
1691 // Throw OOM by default.
1692 break;
1693 default: {
1694 UNIMPLEMENTED(FATAL) << "homogeneous space compaction result: "
1695 << static_cast<size_t>(result);
1696 UNREACHABLE();
1697 }
1698 }
1699 // Always print that we ran homogeneous space compation since this can cause jank.
1700 VLOG(heap) << "Ran heap homogeneous space compaction, "
1701 << " requested defragmentation "
1702 << count_requested_homogeneous_space_compaction_.LoadSequentiallyConsistent()
1703 << " performed defragmentation "
1704 << count_performed_homogeneous_space_compaction_.LoadSequentiallyConsistent()
1705 << " ignored homogeneous space compaction "
1706 << count_ignored_homogeneous_space_compaction_.LoadSequentiallyConsistent()
1707 << " delayed count = "
1708 << count_delayed_oom_.LoadSequentiallyConsistent();
1709 }
1710 break;
1711 }
1712 case kAllocatorTypeNonMoving: {
1713 if (kUseReadBarrier) {
1714 // DisableMovingGc() isn't compatible with CC.
1715 break;
1716 }
1717 // Try to transition the heap if the allocation failure was due to the space being full.
1718 if (!IsOutOfMemoryOnAllocation(allocator, alloc_size, /*grow*/ false)) {
1719 // If we aren't out of memory then the OOM was probably from the non moving space being
1720 // full. Attempt to disable compaction and turn the main space into a non moving space.
1721 DisableMovingGc();
1722 // Thread suspension could have occurred.
1723 if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1724 (!instrumented && EntrypointsInstrumented())) {
1725 return nullptr;
1726 }
1727 // If we are still a moving GC then something must have caused the transition to fail.
1728 if (IsMovingGc(collector_type_)) {
1729 MutexLock mu(self, *gc_complete_lock_);
1730 // If we couldn't disable moving GC, just throw OOME and return null.
1731 LOG(WARNING) << "Couldn't disable moving GC with disable GC count "
1732 << disable_moving_gc_count_;
1733 } else {
1734 LOG(WARNING) << "Disabled moving GC due to the non moving space being full";
1735 ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
1736 usable_size, bytes_tl_bulk_allocated);
1737 }
1738 }
1739 break;
1740 }
1741 default: {
1742 // Do nothing for others allocators.
1743 }
1744 }
1745 }
1746 // If the allocation hasn't succeeded by this point, throw an OOM error.
1747 if (ptr == nullptr) {
1748 ThrowOutOfMemoryError(self, alloc_size, allocator);
1749 }
1750 return ptr;
1751 }
1752
SetTargetHeapUtilization(float target)1753 void Heap::SetTargetHeapUtilization(float target) {
1754 DCHECK_GT(target, 0.0f); // asserted in Java code
1755 DCHECK_LT(target, 1.0f);
1756 target_utilization_ = target;
1757 }
1758
GetObjectsAllocated() const1759 size_t Heap::GetObjectsAllocated() const {
1760 Thread* const self = Thread::Current();
1761 ScopedThreadStateChange tsc(self, kWaitingForGetObjectsAllocated);
1762 // Prevent GC running during GetObjectsAllocated since we may get a checkpoint request that tells
1763 // us to suspend while we are doing SuspendAll. b/35232978
1764 gc::ScopedGCCriticalSection gcs(Thread::Current(),
1765 gc::kGcCauseGetObjectsAllocated,
1766 gc::kCollectorTypeGetObjectsAllocated);
1767 // Need SuspendAll here to prevent lock violation if RosAlloc does it during InspectAll.
1768 ScopedSuspendAll ssa(__FUNCTION__);
1769 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
1770 size_t total = 0;
1771 for (space::AllocSpace* space : alloc_spaces_) {
1772 total += space->GetObjectsAllocated();
1773 }
1774 return total;
1775 }
1776
GetObjectsAllocatedEver() const1777 uint64_t Heap::GetObjectsAllocatedEver() const {
1778 uint64_t total = GetObjectsFreedEver();
1779 // If we are detached, we can't use GetObjectsAllocated since we can't change thread states.
1780 if (Thread::Current() != nullptr) {
1781 total += GetObjectsAllocated();
1782 }
1783 return total;
1784 }
1785
GetBytesAllocatedEver() const1786 uint64_t Heap::GetBytesAllocatedEver() const {
1787 return GetBytesFreedEver() + GetBytesAllocated();
1788 }
1789
1790 // Check whether the given object is an instance of the given class.
MatchesClass(mirror::Object * obj,Handle<mirror::Class> h_class,bool use_is_assignable_from)1791 static bool MatchesClass(mirror::Object* obj,
1792 Handle<mirror::Class> h_class,
1793 bool use_is_assignable_from) REQUIRES_SHARED(Locks::mutator_lock_) {
1794 mirror::Class* instance_class = obj->GetClass();
1795 CHECK(instance_class != nullptr);
1796 ObjPtr<mirror::Class> klass = h_class.Get();
1797 if (use_is_assignable_from) {
1798 return klass != nullptr && klass->IsAssignableFrom(instance_class);
1799 }
1800 return instance_class == klass;
1801 }
1802
CountInstances(const std::vector<Handle<mirror::Class>> & classes,bool use_is_assignable_from,uint64_t * counts)1803 void Heap::CountInstances(const std::vector<Handle<mirror::Class>>& classes,
1804 bool use_is_assignable_from,
1805 uint64_t* counts) {
1806 auto instance_counter = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
1807 for (size_t i = 0; i < classes.size(); ++i) {
1808 if (MatchesClass(obj, classes[i], use_is_assignable_from)) {
1809 ++counts[i];
1810 }
1811 }
1812 };
1813 VisitObjects(instance_counter);
1814 }
1815
GetInstances(VariableSizedHandleScope & scope,Handle<mirror::Class> h_class,bool use_is_assignable_from,int32_t max_count,std::vector<Handle<mirror::Object>> & instances)1816 void Heap::GetInstances(VariableSizedHandleScope& scope,
1817 Handle<mirror::Class> h_class,
1818 bool use_is_assignable_from,
1819 int32_t max_count,
1820 std::vector<Handle<mirror::Object>>& instances) {
1821 DCHECK_GE(max_count, 0);
1822 auto instance_collector = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
1823 if (MatchesClass(obj, h_class, use_is_assignable_from)) {
1824 if (max_count == 0 || instances.size() < static_cast<size_t>(max_count)) {
1825 instances.push_back(scope.NewHandle(obj));
1826 }
1827 }
1828 };
1829 VisitObjects(instance_collector);
1830 }
1831
GetReferringObjects(VariableSizedHandleScope & scope,Handle<mirror::Object> o,int32_t max_count,std::vector<Handle<mirror::Object>> & referring_objects)1832 void Heap::GetReferringObjects(VariableSizedHandleScope& scope,
1833 Handle<mirror::Object> o,
1834 int32_t max_count,
1835 std::vector<Handle<mirror::Object>>& referring_objects) {
1836 class ReferringObjectsFinder {
1837 public:
1838 ReferringObjectsFinder(VariableSizedHandleScope& scope_in,
1839 Handle<mirror::Object> object_in,
1840 int32_t max_count_in,
1841 std::vector<Handle<mirror::Object>>& referring_objects_in)
1842 REQUIRES_SHARED(Locks::mutator_lock_)
1843 : scope_(scope_in),
1844 object_(object_in),
1845 max_count_(max_count_in),
1846 referring_objects_(referring_objects_in) {}
1847
1848 // For Object::VisitReferences.
1849 void operator()(ObjPtr<mirror::Object> obj,
1850 MemberOffset offset,
1851 bool is_static ATTRIBUTE_UNUSED) const
1852 REQUIRES_SHARED(Locks::mutator_lock_) {
1853 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
1854 if (ref == object_.Get() && (max_count_ == 0 || referring_objects_.size() < max_count_)) {
1855 referring_objects_.push_back(scope_.NewHandle(obj));
1856 }
1857 }
1858
1859 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
1860 const {}
1861 void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
1862
1863 private:
1864 VariableSizedHandleScope& scope_;
1865 Handle<mirror::Object> const object_;
1866 const uint32_t max_count_;
1867 std::vector<Handle<mirror::Object>>& referring_objects_;
1868 DISALLOW_COPY_AND_ASSIGN(ReferringObjectsFinder);
1869 };
1870 ReferringObjectsFinder finder(scope, o, max_count, referring_objects);
1871 auto referring_objects_finder = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
1872 obj->VisitReferences(finder, VoidFunctor());
1873 };
1874 VisitObjects(referring_objects_finder);
1875 }
1876
CollectGarbage(bool clear_soft_references,GcCause cause)1877 void Heap::CollectGarbage(bool clear_soft_references, GcCause cause) {
1878 // Even if we waited for a GC we still need to do another GC since weaks allocated during the
1879 // last GC will not have necessarily been cleared.
1880 CollectGarbageInternal(gc_plan_.back(), cause, clear_soft_references);
1881 }
1882
SupportHomogeneousSpaceCompactAndCollectorTransitions() const1883 bool Heap::SupportHomogeneousSpaceCompactAndCollectorTransitions() const {
1884 return main_space_backup_.get() != nullptr && main_space_ != nullptr &&
1885 foreground_collector_type_ == kCollectorTypeCMS;
1886 }
1887
PerformHomogeneousSpaceCompact()1888 HomogeneousSpaceCompactResult Heap::PerformHomogeneousSpaceCompact() {
1889 Thread* self = Thread::Current();
1890 // Inc requested homogeneous space compaction.
1891 count_requested_homogeneous_space_compaction_++;
1892 // Store performed homogeneous space compaction at a new request arrival.
1893 ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
1894 // TODO: Clang prebuilt for r316199 produces bogus thread safety analysis warning for holding both
1895 // exclusive and shared lock in the same scope. Remove the assertion as a temporary workaround.
1896 // http://b/71769596
1897 // Locks::mutator_lock_->AssertNotHeld(self);
1898 {
1899 ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete);
1900 MutexLock mu(self, *gc_complete_lock_);
1901 // Ensure there is only one GC at a time.
1902 WaitForGcToCompleteLocked(kGcCauseHomogeneousSpaceCompact, self);
1903 // Homogeneous space compaction is a copying transition, can't run it if the moving GC disable
1904 // count is non zero.
1905 // If the collector type changed to something which doesn't benefit from homogeneous space
1906 // compaction, exit.
1907 if (disable_moving_gc_count_ != 0 || IsMovingGc(collector_type_) ||
1908 !main_space_->CanMoveObjects()) {
1909 return kErrorReject;
1910 }
1911 if (!SupportHomogeneousSpaceCompactAndCollectorTransitions()) {
1912 return kErrorUnsupported;
1913 }
1914 collector_type_running_ = kCollectorTypeHomogeneousSpaceCompact;
1915 }
1916 if (Runtime::Current()->IsShuttingDown(self)) {
1917 // Don't allow heap transitions to happen if the runtime is shutting down since these can
1918 // cause objects to get finalized.
1919 FinishGC(self, collector::kGcTypeNone);
1920 return HomogeneousSpaceCompactResult::kErrorVMShuttingDown;
1921 }
1922 collector::GarbageCollector* collector;
1923 {
1924 ScopedSuspendAll ssa(__FUNCTION__);
1925 uint64_t start_time = NanoTime();
1926 // Launch compaction.
1927 space::MallocSpace* to_space = main_space_backup_.release();
1928 space::MallocSpace* from_space = main_space_;
1929 to_space->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
1930 const uint64_t space_size_before_compaction = from_space->Size();
1931 AddSpace(to_space);
1932 // Make sure that we will have enough room to copy.
1933 CHECK_GE(to_space->GetFootprintLimit(), from_space->GetFootprintLimit());
1934 collector = Compact(to_space, from_space, kGcCauseHomogeneousSpaceCompact);
1935 const uint64_t space_size_after_compaction = to_space->Size();
1936 main_space_ = to_space;
1937 main_space_backup_.reset(from_space);
1938 RemoveSpace(from_space);
1939 SetSpaceAsDefault(main_space_); // Set as default to reset the proper dlmalloc space.
1940 // Update performed homogeneous space compaction count.
1941 count_performed_homogeneous_space_compaction_++;
1942 // Print statics log and resume all threads.
1943 uint64_t duration = NanoTime() - start_time;
1944 VLOG(heap) << "Heap homogeneous space compaction took " << PrettyDuration(duration) << " size: "
1945 << PrettySize(space_size_before_compaction) << " -> "
1946 << PrettySize(space_size_after_compaction) << " compact-ratio: "
1947 << std::fixed << static_cast<double>(space_size_after_compaction) /
1948 static_cast<double>(space_size_before_compaction);
1949 }
1950 // Finish GC.
1951 reference_processor_->EnqueueClearedReferences(self);
1952 GrowForUtilization(semi_space_collector_);
1953 LogGC(kGcCauseHomogeneousSpaceCompact, collector);
1954 FinishGC(self, collector::kGcTypeFull);
1955 {
1956 ScopedObjectAccess soa(self);
1957 soa.Vm()->UnloadNativeLibraries();
1958 }
1959 return HomogeneousSpaceCompactResult::kSuccess;
1960 }
1961
TransitionCollector(CollectorType collector_type)1962 void Heap::TransitionCollector(CollectorType collector_type) {
1963 if (collector_type == collector_type_) {
1964 return;
1965 }
1966 // Collector transition must not happen with CC
1967 CHECK(!kUseReadBarrier);
1968 VLOG(heap) << "TransitionCollector: " << static_cast<int>(collector_type_)
1969 << " -> " << static_cast<int>(collector_type);
1970 uint64_t start_time = NanoTime();
1971 uint32_t before_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
1972 Runtime* const runtime = Runtime::Current();
1973 Thread* const self = Thread::Current();
1974 ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
1975 // TODO: Clang prebuilt for r316199 produces bogus thread safety analysis warning for holding both
1976 // exclusive and shared lock in the same scope. Remove the assertion as a temporary workaround.
1977 // http://b/71769596
1978 // Locks::mutator_lock_->AssertNotHeld(self);
1979 // Busy wait until we can GC (StartGC can fail if we have a non-zero
1980 // compacting_gc_disable_count_, this should rarely occurs).
1981 for (;;) {
1982 {
1983 ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete);
1984 MutexLock mu(self, *gc_complete_lock_);
1985 // Ensure there is only one GC at a time.
1986 WaitForGcToCompleteLocked(kGcCauseCollectorTransition, self);
1987 // Currently we only need a heap transition if we switch from a moving collector to a
1988 // non-moving one, or visa versa.
1989 const bool copying_transition = IsMovingGc(collector_type_) != IsMovingGc(collector_type);
1990 // If someone else beat us to it and changed the collector before we could, exit.
1991 // This is safe to do before the suspend all since we set the collector_type_running_ before
1992 // we exit the loop. If another thread attempts to do the heap transition before we exit,
1993 // then it would get blocked on WaitForGcToCompleteLocked.
1994 if (collector_type == collector_type_) {
1995 return;
1996 }
1997 // GC can be disabled if someone has a used GetPrimitiveArrayCritical but not yet released.
1998 if (!copying_transition || disable_moving_gc_count_ == 0) {
1999 // TODO: Not hard code in semi-space collector?
2000 collector_type_running_ = copying_transition ? kCollectorTypeSS : collector_type;
2001 break;
2002 }
2003 }
2004 usleep(1000);
2005 }
2006 if (runtime->IsShuttingDown(self)) {
2007 // Don't allow heap transitions to happen if the runtime is shutting down since these can
2008 // cause objects to get finalized.
2009 FinishGC(self, collector::kGcTypeNone);
2010 return;
2011 }
2012 collector::GarbageCollector* collector = nullptr;
2013 {
2014 ScopedSuspendAll ssa(__FUNCTION__);
2015 switch (collector_type) {
2016 case kCollectorTypeSS: {
2017 if (!IsMovingGc(collector_type_)) {
2018 // Create the bump pointer space from the backup space.
2019 CHECK(main_space_backup_ != nullptr);
2020 std::unique_ptr<MemMap> mem_map(main_space_backup_->ReleaseMemMap());
2021 // We are transitioning from non moving GC -> moving GC, since we copied from the bump
2022 // pointer space last transition it will be protected.
2023 CHECK(mem_map != nullptr);
2024 mem_map->Protect(PROT_READ | PROT_WRITE);
2025 bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space",
2026 mem_map.release());
2027 AddSpace(bump_pointer_space_);
2028 collector = Compact(bump_pointer_space_, main_space_, kGcCauseCollectorTransition);
2029 // Use the now empty main space mem map for the bump pointer temp space.
2030 mem_map.reset(main_space_->ReleaseMemMap());
2031 // Unset the pointers just in case.
2032 if (dlmalloc_space_ == main_space_) {
2033 dlmalloc_space_ = nullptr;
2034 } else if (rosalloc_space_ == main_space_) {
2035 rosalloc_space_ = nullptr;
2036 }
2037 // Remove the main space so that we don't try to trim it, this doens't work for debug
2038 // builds since RosAlloc attempts to read the magic number from a protected page.
2039 RemoveSpace(main_space_);
2040 RemoveRememberedSet(main_space_);
2041 delete main_space_; // Delete the space since it has been removed.
2042 main_space_ = nullptr;
2043 RemoveRememberedSet(main_space_backup_.get());
2044 main_space_backup_.reset(nullptr); // Deletes the space.
2045 temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
2046 mem_map.release());
2047 AddSpace(temp_space_);
2048 }
2049 break;
2050 }
2051 case kCollectorTypeMS:
2052 // Fall through.
2053 case kCollectorTypeCMS: {
2054 if (IsMovingGc(collector_type_)) {
2055 CHECK(temp_space_ != nullptr);
2056 std::unique_ptr<MemMap> mem_map(temp_space_->ReleaseMemMap());
2057 RemoveSpace(temp_space_);
2058 temp_space_ = nullptr;
2059 mem_map->Protect(PROT_READ | PROT_WRITE);
2060 CreateMainMallocSpace(mem_map.get(),
2061 kDefaultInitialSize,
2062 std::min(mem_map->Size(), growth_limit_),
2063 mem_map->Size());
2064 mem_map.release();
2065 // Compact to the main space from the bump pointer space, don't need to swap semispaces.
2066 AddSpace(main_space_);
2067 collector = Compact(main_space_, bump_pointer_space_, kGcCauseCollectorTransition);
2068 mem_map.reset(bump_pointer_space_->ReleaseMemMap());
2069 RemoveSpace(bump_pointer_space_);
2070 bump_pointer_space_ = nullptr;
2071 const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
2072 // Temporarily unprotect the backup mem map so rosalloc can write the debug magic number.
2073 if (kIsDebugBuild && kUseRosAlloc) {
2074 mem_map->Protect(PROT_READ | PROT_WRITE);
2075 }
2076 main_space_backup_.reset(CreateMallocSpaceFromMemMap(
2077 mem_map.get(),
2078 kDefaultInitialSize,
2079 std::min(mem_map->Size(), growth_limit_),
2080 mem_map->Size(),
2081 name,
2082 true));
2083 if (kIsDebugBuild && kUseRosAlloc) {
2084 mem_map->Protect(PROT_NONE);
2085 }
2086 mem_map.release();
2087 }
2088 break;
2089 }
2090 default: {
2091 LOG(FATAL) << "Attempted to transition to invalid collector type "
2092 << static_cast<size_t>(collector_type);
2093 break;
2094 }
2095 }
2096 ChangeCollector(collector_type);
2097 }
2098 // Can't call into java code with all threads suspended.
2099 reference_processor_->EnqueueClearedReferences(self);
2100 uint64_t duration = NanoTime() - start_time;
2101 GrowForUtilization(semi_space_collector_);
2102 DCHECK(collector != nullptr);
2103 LogGC(kGcCauseCollectorTransition, collector);
2104 FinishGC(self, collector::kGcTypeFull);
2105 {
2106 ScopedObjectAccess soa(self);
2107 soa.Vm()->UnloadNativeLibraries();
2108 }
2109 int32_t after_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
2110 int32_t delta_allocated = before_allocated - after_allocated;
2111 std::string saved_str;
2112 if (delta_allocated >= 0) {
2113 saved_str = " saved at least " + PrettySize(delta_allocated);
2114 } else {
2115 saved_str = " expanded " + PrettySize(-delta_allocated);
2116 }
2117 VLOG(heap) << "Collector transition to " << collector_type << " took "
2118 << PrettyDuration(duration) << saved_str;
2119 }
2120
ChangeCollector(CollectorType collector_type)2121 void Heap::ChangeCollector(CollectorType collector_type) {
2122 // TODO: Only do this with all mutators suspended to avoid races.
2123 if (collector_type != collector_type_) {
2124 if (collector_type == kCollectorTypeMC) {
2125 // Don't allow mark compact unless support is compiled in.
2126 CHECK(kMarkCompactSupport);
2127 }
2128 collector_type_ = collector_type;
2129 gc_plan_.clear();
2130 switch (collector_type_) {
2131 case kCollectorTypeCC: {
2132 gc_plan_.push_back(collector::kGcTypeFull);
2133 if (use_tlab_) {
2134 ChangeAllocator(kAllocatorTypeRegionTLAB);
2135 } else {
2136 ChangeAllocator(kAllocatorTypeRegion);
2137 }
2138 break;
2139 }
2140 case kCollectorTypeMC: // Fall-through.
2141 case kCollectorTypeSS: // Fall-through.
2142 case kCollectorTypeGSS: {
2143 gc_plan_.push_back(collector::kGcTypeFull);
2144 if (use_tlab_) {
2145 ChangeAllocator(kAllocatorTypeTLAB);
2146 } else {
2147 ChangeAllocator(kAllocatorTypeBumpPointer);
2148 }
2149 break;
2150 }
2151 case kCollectorTypeMS: {
2152 gc_plan_.push_back(collector::kGcTypeSticky);
2153 gc_plan_.push_back(collector::kGcTypePartial);
2154 gc_plan_.push_back(collector::kGcTypeFull);
2155 ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc);
2156 break;
2157 }
2158 case kCollectorTypeCMS: {
2159 gc_plan_.push_back(collector::kGcTypeSticky);
2160 gc_plan_.push_back(collector::kGcTypePartial);
2161 gc_plan_.push_back(collector::kGcTypeFull);
2162 ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc);
2163 break;
2164 }
2165 default: {
2166 UNIMPLEMENTED(FATAL);
2167 UNREACHABLE();
2168 }
2169 }
2170 if (IsGcConcurrent()) {
2171 concurrent_start_bytes_ =
2172 std::max(max_allowed_footprint_, kMinConcurrentRemainingBytes) - kMinConcurrentRemainingBytes;
2173 } else {
2174 concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
2175 }
2176 }
2177 }
2178
2179 // Special compacting collector which uses sub-optimal bin packing to reduce zygote space size.
2180 class ZygoteCompactingCollector FINAL : public collector::SemiSpace {
2181 public:
ZygoteCompactingCollector(gc::Heap * heap,bool is_running_on_memory_tool)2182 ZygoteCompactingCollector(gc::Heap* heap, bool is_running_on_memory_tool)
2183 : SemiSpace(heap, false, "zygote collector"),
2184 bin_live_bitmap_(nullptr),
2185 bin_mark_bitmap_(nullptr),
2186 is_running_on_memory_tool_(is_running_on_memory_tool) {}
2187
BuildBins(space::ContinuousSpace * space)2188 void BuildBins(space::ContinuousSpace* space) REQUIRES_SHARED(Locks::mutator_lock_) {
2189 bin_live_bitmap_ = space->GetLiveBitmap();
2190 bin_mark_bitmap_ = space->GetMarkBitmap();
2191 uintptr_t prev = reinterpret_cast<uintptr_t>(space->Begin());
2192 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
2193 // Note: This requires traversing the space in increasing order of object addresses.
2194 auto visitor = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
2195 uintptr_t object_addr = reinterpret_cast<uintptr_t>(obj);
2196 size_t bin_size = object_addr - prev;
2197 // Add the bin consisting of the end of the previous object to the start of the current object.
2198 AddBin(bin_size, prev);
2199 prev = object_addr + RoundUp(obj->SizeOf<kDefaultVerifyFlags>(), kObjectAlignment);
2200 };
2201 bin_live_bitmap_->Walk(visitor);
2202 // Add the last bin which spans after the last object to the end of the space.
2203 AddBin(reinterpret_cast<uintptr_t>(space->End()) - prev, prev);
2204 }
2205
2206 private:
2207 // Maps from bin sizes to locations.
2208 std::multimap<size_t, uintptr_t> bins_;
2209 // Live bitmap of the space which contains the bins.
2210 accounting::ContinuousSpaceBitmap* bin_live_bitmap_;
2211 // Mark bitmap of the space which contains the bins.
2212 accounting::ContinuousSpaceBitmap* bin_mark_bitmap_;
2213 const bool is_running_on_memory_tool_;
2214
AddBin(size_t size,uintptr_t position)2215 void AddBin(size_t size, uintptr_t position) {
2216 if (is_running_on_memory_tool_) {
2217 MEMORY_TOOL_MAKE_DEFINED(reinterpret_cast<void*>(position), size);
2218 }
2219 if (size != 0) {
2220 bins_.insert(std::make_pair(size, position));
2221 }
2222 }
2223
ShouldSweepSpace(space::ContinuousSpace * space ATTRIBUTE_UNUSED) const2224 virtual bool ShouldSweepSpace(space::ContinuousSpace* space ATTRIBUTE_UNUSED) const {
2225 // Don't sweep any spaces since we probably blasted the internal accounting of the free list
2226 // allocator.
2227 return false;
2228 }
2229
MarkNonForwardedObject(mirror::Object * obj)2230 virtual mirror::Object* MarkNonForwardedObject(mirror::Object* obj)
2231 REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
2232 size_t obj_size = obj->SizeOf<kDefaultVerifyFlags>();
2233 size_t alloc_size = RoundUp(obj_size, kObjectAlignment);
2234 mirror::Object* forward_address;
2235 // Find the smallest bin which we can move obj in.
2236 auto it = bins_.lower_bound(alloc_size);
2237 if (it == bins_.end()) {
2238 // No available space in the bins, place it in the target space instead (grows the zygote
2239 // space).
2240 size_t bytes_allocated, dummy;
2241 forward_address = to_space_->Alloc(self_, alloc_size, &bytes_allocated, nullptr, &dummy);
2242 if (to_space_live_bitmap_ != nullptr) {
2243 to_space_live_bitmap_->Set(forward_address);
2244 } else {
2245 GetHeap()->GetNonMovingSpace()->GetLiveBitmap()->Set(forward_address);
2246 GetHeap()->GetNonMovingSpace()->GetMarkBitmap()->Set(forward_address);
2247 }
2248 } else {
2249 size_t size = it->first;
2250 uintptr_t pos = it->second;
2251 bins_.erase(it); // Erase the old bin which we replace with the new smaller bin.
2252 forward_address = reinterpret_cast<mirror::Object*>(pos);
2253 // Set the live and mark bits so that sweeping system weaks works properly.
2254 bin_live_bitmap_->Set(forward_address);
2255 bin_mark_bitmap_->Set(forward_address);
2256 DCHECK_GE(size, alloc_size);
2257 // Add a new bin with the remaining space.
2258 AddBin(size - alloc_size, pos + alloc_size);
2259 }
2260 // Copy the object over to its new location. Don't use alloc_size to avoid valgrind error.
2261 memcpy(reinterpret_cast<void*>(forward_address), obj, obj_size);
2262 if (kUseBakerReadBarrier) {
2263 obj->AssertReadBarrierState();
2264 forward_address->AssertReadBarrierState();
2265 }
2266 return forward_address;
2267 }
2268 };
2269
UnBindBitmaps()2270 void Heap::UnBindBitmaps() {
2271 TimingLogger::ScopedTiming t("UnBindBitmaps", GetCurrentGcIteration()->GetTimings());
2272 for (const auto& space : GetContinuousSpaces()) {
2273 if (space->IsContinuousMemMapAllocSpace()) {
2274 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
2275 if (alloc_space->HasBoundBitmaps()) {
2276 alloc_space->UnBindBitmaps();
2277 }
2278 }
2279 }
2280 }
2281
PreZygoteFork()2282 void Heap::PreZygoteFork() {
2283 if (!HasZygoteSpace()) {
2284 // We still want to GC in case there is some unreachable non moving objects that could cause a
2285 // suboptimal bin packing when we compact the zygote space.
2286 CollectGarbageInternal(collector::kGcTypeFull, kGcCauseBackground, false);
2287 // Trim the pages at the end of the non moving space. Trim while not holding zygote lock since
2288 // the trim process may require locking the mutator lock.
2289 non_moving_space_->Trim();
2290 }
2291 Thread* self = Thread::Current();
2292 MutexLock mu(self, zygote_creation_lock_);
2293 // Try to see if we have any Zygote spaces.
2294 if (HasZygoteSpace()) {
2295 return;
2296 }
2297 Runtime::Current()->GetInternTable()->AddNewTable();
2298 Runtime::Current()->GetClassLinker()->MoveClassTableToPreZygote();
2299 VLOG(heap) << "Starting PreZygoteFork";
2300 // The end of the non-moving space may be protected, unprotect it so that we can copy the zygote
2301 // there.
2302 non_moving_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2303 const bool same_space = non_moving_space_ == main_space_;
2304 if (kCompactZygote) {
2305 // Temporarily disable rosalloc verification because the zygote
2306 // compaction will mess up the rosalloc internal metadata.
2307 ScopedDisableRosAllocVerification disable_rosalloc_verif(this);
2308 ZygoteCompactingCollector zygote_collector(this, is_running_on_memory_tool_);
2309 zygote_collector.BuildBins(non_moving_space_);
2310 // Create a new bump pointer space which we will compact into.
2311 space::BumpPointerSpace target_space("zygote bump space", non_moving_space_->End(),
2312 non_moving_space_->Limit());
2313 // Compact the bump pointer space to a new zygote bump pointer space.
2314 bool reset_main_space = false;
2315 if (IsMovingGc(collector_type_)) {
2316 if (collector_type_ == kCollectorTypeCC) {
2317 zygote_collector.SetFromSpace(region_space_);
2318 } else {
2319 zygote_collector.SetFromSpace(bump_pointer_space_);
2320 }
2321 } else {
2322 CHECK(main_space_ != nullptr);
2323 CHECK_NE(main_space_, non_moving_space_)
2324 << "Does not make sense to compact within the same space";
2325 // Copy from the main space.
2326 zygote_collector.SetFromSpace(main_space_);
2327 reset_main_space = true;
2328 }
2329 zygote_collector.SetToSpace(&target_space);
2330 zygote_collector.SetSwapSemiSpaces(false);
2331 zygote_collector.Run(kGcCauseCollectorTransition, false);
2332 if (reset_main_space) {
2333 main_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2334 madvise(main_space_->Begin(), main_space_->Capacity(), MADV_DONTNEED);
2335 MemMap* mem_map = main_space_->ReleaseMemMap();
2336 RemoveSpace(main_space_);
2337 space::Space* old_main_space = main_space_;
2338 CreateMainMallocSpace(mem_map, kDefaultInitialSize, std::min(mem_map->Size(), growth_limit_),
2339 mem_map->Size());
2340 delete old_main_space;
2341 AddSpace(main_space_);
2342 } else {
2343 if (collector_type_ == kCollectorTypeCC) {
2344 region_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2345 // Evacuated everything out of the region space, clear the mark bitmap.
2346 region_space_->GetMarkBitmap()->Clear();
2347 } else {
2348 bump_pointer_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2349 }
2350 }
2351 if (temp_space_ != nullptr) {
2352 CHECK(temp_space_->IsEmpty());
2353 }
2354 total_objects_freed_ever_ += GetCurrentGcIteration()->GetFreedObjects();
2355 total_bytes_freed_ever_ += GetCurrentGcIteration()->GetFreedBytes();
2356 // Update the end and write out image.
2357 non_moving_space_->SetEnd(target_space.End());
2358 non_moving_space_->SetLimit(target_space.Limit());
2359 VLOG(heap) << "Create zygote space with size=" << non_moving_space_->Size() << " bytes";
2360 }
2361 // Change the collector to the post zygote one.
2362 ChangeCollector(foreground_collector_type_);
2363 // Save the old space so that we can remove it after we complete creating the zygote space.
2364 space::MallocSpace* old_alloc_space = non_moving_space_;
2365 // Turn the current alloc space into a zygote space and obtain the new alloc space composed of
2366 // the remaining available space.
2367 // Remove the old space before creating the zygote space since creating the zygote space sets
2368 // the old alloc space's bitmaps to null.
2369 RemoveSpace(old_alloc_space);
2370 if (collector::SemiSpace::kUseRememberedSet) {
2371 // Sanity bound check.
2372 FindRememberedSetFromSpace(old_alloc_space)->AssertAllDirtyCardsAreWithinSpace();
2373 // Remove the remembered set for the now zygote space (the old
2374 // non-moving space). Note now that we have compacted objects into
2375 // the zygote space, the data in the remembered set is no longer
2376 // needed. The zygote space will instead have a mod-union table
2377 // from this point on.
2378 RemoveRememberedSet(old_alloc_space);
2379 }
2380 // Remaining space becomes the new non moving space.
2381 zygote_space_ = old_alloc_space->CreateZygoteSpace(kNonMovingSpaceName, low_memory_mode_,
2382 &non_moving_space_);
2383 CHECK(!non_moving_space_->CanMoveObjects());
2384 if (same_space) {
2385 main_space_ = non_moving_space_;
2386 SetSpaceAsDefault(main_space_);
2387 }
2388 delete old_alloc_space;
2389 CHECK(HasZygoteSpace()) << "Failed creating zygote space";
2390 AddSpace(zygote_space_);
2391 non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
2392 AddSpace(non_moving_space_);
2393 if (kUseBakerReadBarrier && gc::collector::ConcurrentCopying::kGrayDirtyImmuneObjects) {
2394 // Treat all of the objects in the zygote as marked to avoid unnecessary dirty pages. This is
2395 // safe since we mark all of the objects that may reference non immune objects as gray.
2396 zygote_space_->GetLiveBitmap()->VisitMarkedRange(
2397 reinterpret_cast<uintptr_t>(zygote_space_->Begin()),
2398 reinterpret_cast<uintptr_t>(zygote_space_->Limit()),
2399 [](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
2400 CHECK(obj->AtomicSetMarkBit(0, 1));
2401 });
2402 }
2403
2404 // Create the zygote space mod union table.
2405 accounting::ModUnionTable* mod_union_table =
2406 new accounting::ModUnionTableCardCache("zygote space mod-union table", this, zygote_space_);
2407 CHECK(mod_union_table != nullptr) << "Failed to create zygote space mod-union table";
2408
2409 if (collector_type_ != kCollectorTypeCC) {
2410 // Set all the cards in the mod-union table since we don't know which objects contain references
2411 // to large objects.
2412 mod_union_table->SetCards();
2413 } else {
2414 // Make sure to clear the zygote space cards so that we don't dirty pages in the next GC. There
2415 // may be dirty cards from the zygote compaction or reference processing. These cards are not
2416 // necessary to have marked since the zygote space may not refer to any objects not in the
2417 // zygote or image spaces at this point.
2418 mod_union_table->ProcessCards();
2419 mod_union_table->ClearTable();
2420
2421 // For CC we never collect zygote large objects. This means we do not need to set the cards for
2422 // the zygote mod-union table and we can also clear all of the existing image mod-union tables.
2423 // The existing mod-union tables are only for image spaces and may only reference zygote and
2424 // image objects.
2425 for (auto& pair : mod_union_tables_) {
2426 CHECK(pair.first->IsImageSpace());
2427 CHECK(!pair.first->AsImageSpace()->GetImageHeader().IsAppImage());
2428 accounting::ModUnionTable* table = pair.second;
2429 table->ClearTable();
2430 }
2431 }
2432 AddModUnionTable(mod_union_table);
2433 large_object_space_->SetAllLargeObjectsAsZygoteObjects(self);
2434 if (collector::SemiSpace::kUseRememberedSet) {
2435 // Add a new remembered set for the post-zygote non-moving space.
2436 accounting::RememberedSet* post_zygote_non_moving_space_rem_set =
2437 new accounting::RememberedSet("Post-zygote non-moving space remembered set", this,
2438 non_moving_space_);
2439 CHECK(post_zygote_non_moving_space_rem_set != nullptr)
2440 << "Failed to create post-zygote non-moving space remembered set";
2441 AddRememberedSet(post_zygote_non_moving_space_rem_set);
2442 }
2443 }
2444
FlushAllocStack()2445 void Heap::FlushAllocStack() {
2446 MarkAllocStackAsLive(allocation_stack_.get());
2447 allocation_stack_->Reset();
2448 }
2449
MarkAllocStack(accounting::ContinuousSpaceBitmap * bitmap1,accounting::ContinuousSpaceBitmap * bitmap2,accounting::LargeObjectBitmap * large_objects,accounting::ObjectStack * stack)2450 void Heap::MarkAllocStack(accounting::ContinuousSpaceBitmap* bitmap1,
2451 accounting::ContinuousSpaceBitmap* bitmap2,
2452 accounting::LargeObjectBitmap* large_objects,
2453 accounting::ObjectStack* stack) {
2454 DCHECK(bitmap1 != nullptr);
2455 DCHECK(bitmap2 != nullptr);
2456 const auto* limit = stack->End();
2457 for (auto* it = stack->Begin(); it != limit; ++it) {
2458 const mirror::Object* obj = it->AsMirrorPtr();
2459 if (!kUseThreadLocalAllocationStack || obj != nullptr) {
2460 if (bitmap1->HasAddress(obj)) {
2461 bitmap1->Set(obj);
2462 } else if (bitmap2->HasAddress(obj)) {
2463 bitmap2->Set(obj);
2464 } else {
2465 DCHECK(large_objects != nullptr);
2466 large_objects->Set(obj);
2467 }
2468 }
2469 }
2470 }
2471
SwapSemiSpaces()2472 void Heap::SwapSemiSpaces() {
2473 CHECK(bump_pointer_space_ != nullptr);
2474 CHECK(temp_space_ != nullptr);
2475 std::swap(bump_pointer_space_, temp_space_);
2476 }
2477
Compact(space::ContinuousMemMapAllocSpace * target_space,space::ContinuousMemMapAllocSpace * source_space,GcCause gc_cause)2478 collector::GarbageCollector* Heap::Compact(space::ContinuousMemMapAllocSpace* target_space,
2479 space::ContinuousMemMapAllocSpace* source_space,
2480 GcCause gc_cause) {
2481 CHECK(kMovingCollector);
2482 if (target_space != source_space) {
2483 // Don't swap spaces since this isn't a typical semi space collection.
2484 semi_space_collector_->SetSwapSemiSpaces(false);
2485 semi_space_collector_->SetFromSpace(source_space);
2486 semi_space_collector_->SetToSpace(target_space);
2487 semi_space_collector_->Run(gc_cause, false);
2488 return semi_space_collector_;
2489 } else {
2490 CHECK(target_space->IsBumpPointerSpace())
2491 << "In-place compaction is only supported for bump pointer spaces";
2492 mark_compact_collector_->SetSpace(target_space->AsBumpPointerSpace());
2493 mark_compact_collector_->Run(kGcCauseCollectorTransition, false);
2494 return mark_compact_collector_;
2495 }
2496 }
2497
TraceHeapSize(size_t heap_size)2498 void Heap::TraceHeapSize(size_t heap_size) {
2499 ATRACE_INT("Heap size (KB)", heap_size / KB);
2500 }
2501
CollectGarbageInternal(collector::GcType gc_type,GcCause gc_cause,bool clear_soft_references)2502 collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type,
2503 GcCause gc_cause,
2504 bool clear_soft_references) {
2505 Thread* self = Thread::Current();
2506 Runtime* runtime = Runtime::Current();
2507 // If the heap can't run the GC, silently fail and return that no GC was run.
2508 switch (gc_type) {
2509 case collector::kGcTypePartial: {
2510 if (!HasZygoteSpace()) {
2511 return collector::kGcTypeNone;
2512 }
2513 break;
2514 }
2515 default: {
2516 // Other GC types don't have any special cases which makes them not runnable. The main case
2517 // here is full GC.
2518 }
2519 }
2520 ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
2521 // TODO: Clang prebuilt for r316199 produces bogus thread safety analysis warning for holding both
2522 // exclusive and shared lock in the same scope. Remove the assertion as a temporary workaround.
2523 // http://b/71769596
2524 // Locks::mutator_lock_->AssertNotHeld(self);
2525 if (self->IsHandlingStackOverflow()) {
2526 // If we are throwing a stack overflow error we probably don't have enough remaining stack
2527 // space to run the GC.
2528 return collector::kGcTypeNone;
2529 }
2530 bool compacting_gc;
2531 {
2532 gc_complete_lock_->AssertNotHeld(self);
2533 ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete);
2534 MutexLock mu(self, *gc_complete_lock_);
2535 // Ensure there is only one GC at a time.
2536 WaitForGcToCompleteLocked(gc_cause, self);
2537 compacting_gc = IsMovingGc(collector_type_);
2538 // GC can be disabled if someone has a used GetPrimitiveArrayCritical.
2539 if (compacting_gc && disable_moving_gc_count_ != 0) {
2540 LOG(WARNING) << "Skipping GC due to disable moving GC count " << disable_moving_gc_count_;
2541 return collector::kGcTypeNone;
2542 }
2543 if (gc_disabled_for_shutdown_) {
2544 return collector::kGcTypeNone;
2545 }
2546 collector_type_running_ = collector_type_;
2547 }
2548 if (gc_cause == kGcCauseForAlloc && runtime->HasStatsEnabled()) {
2549 ++runtime->GetStats()->gc_for_alloc_count;
2550 ++self->GetStats()->gc_for_alloc_count;
2551 }
2552 const uint64_t bytes_allocated_before_gc = GetBytesAllocated();
2553
2554 if (gc_type == NonStickyGcType()) {
2555 // Move all bytes from new_native_bytes_allocated_ to
2556 // old_native_bytes_allocated_ now that GC has been triggered, resetting
2557 // new_native_bytes_allocated_ to zero in the process.
2558 old_native_bytes_allocated_.FetchAndAddRelaxed(new_native_bytes_allocated_.ExchangeRelaxed(0));
2559 }
2560
2561 DCHECK_LT(gc_type, collector::kGcTypeMax);
2562 DCHECK_NE(gc_type, collector::kGcTypeNone);
2563
2564 collector::GarbageCollector* collector = nullptr;
2565 // TODO: Clean this up.
2566 if (compacting_gc) {
2567 DCHECK(current_allocator_ == kAllocatorTypeBumpPointer ||
2568 current_allocator_ == kAllocatorTypeTLAB ||
2569 current_allocator_ == kAllocatorTypeRegion ||
2570 current_allocator_ == kAllocatorTypeRegionTLAB);
2571 switch (collector_type_) {
2572 case kCollectorTypeSS:
2573 // Fall-through.
2574 case kCollectorTypeGSS:
2575 semi_space_collector_->SetFromSpace(bump_pointer_space_);
2576 semi_space_collector_->SetToSpace(temp_space_);
2577 semi_space_collector_->SetSwapSemiSpaces(true);
2578 collector = semi_space_collector_;
2579 break;
2580 case kCollectorTypeCC:
2581 collector = concurrent_copying_collector_;
2582 break;
2583 case kCollectorTypeMC:
2584 mark_compact_collector_->SetSpace(bump_pointer_space_);
2585 collector = mark_compact_collector_;
2586 break;
2587 default:
2588 LOG(FATAL) << "Invalid collector type " << static_cast<size_t>(collector_type_);
2589 }
2590 if (collector != mark_compact_collector_ && collector != concurrent_copying_collector_) {
2591 temp_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2592 if (kIsDebugBuild) {
2593 // Try to read each page of the memory map in case mprotect didn't work properly b/19894268.
2594 temp_space_->GetMemMap()->TryReadable();
2595 }
2596 CHECK(temp_space_->IsEmpty());
2597 }
2598 gc_type = collector::kGcTypeFull; // TODO: Not hard code this in.
2599 } else if (current_allocator_ == kAllocatorTypeRosAlloc ||
2600 current_allocator_ == kAllocatorTypeDlMalloc) {
2601 collector = FindCollectorByGcType(gc_type);
2602 } else {
2603 LOG(FATAL) << "Invalid current allocator " << current_allocator_;
2604 }
2605 if (IsGcConcurrent()) {
2606 // Disable concurrent GC check so that we don't have spammy JNI requests.
2607 // This gets recalculated in GrowForUtilization. It is important that it is disabled /
2608 // calculated in the same thread so that there aren't any races that can cause it to become
2609 // permanantly disabled. b/17942071
2610 concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
2611 }
2612
2613 CHECK(collector != nullptr)
2614 << "Could not find garbage collector with collector_type="
2615 << static_cast<size_t>(collector_type_) << " and gc_type=" << gc_type;
2616 collector->Run(gc_cause, clear_soft_references || runtime->IsZygote());
2617 total_objects_freed_ever_ += GetCurrentGcIteration()->GetFreedObjects();
2618 total_bytes_freed_ever_ += GetCurrentGcIteration()->GetFreedBytes();
2619 RequestTrim(self);
2620 // Enqueue cleared references.
2621 reference_processor_->EnqueueClearedReferences(self);
2622 // Grow the heap so that we know when to perform the next GC.
2623 GrowForUtilization(collector, bytes_allocated_before_gc);
2624 LogGC(gc_cause, collector);
2625 FinishGC(self, gc_type);
2626 // Inform DDMS that a GC completed.
2627 Dbg::GcDidFinish();
2628 // Unload native libraries for class unloading. We do this after calling FinishGC to prevent
2629 // deadlocks in case the JNI_OnUnload function does allocations.
2630 {
2631 ScopedObjectAccess soa(self);
2632 soa.Vm()->UnloadNativeLibraries();
2633 }
2634 return gc_type;
2635 }
2636
LogGC(GcCause gc_cause,collector::GarbageCollector * collector)2637 void Heap::LogGC(GcCause gc_cause, collector::GarbageCollector* collector) {
2638 const size_t duration = GetCurrentGcIteration()->GetDurationNs();
2639 const std::vector<uint64_t>& pause_times = GetCurrentGcIteration()->GetPauseTimes();
2640 // Print the GC if it is an explicit GC (e.g. Runtime.gc()) or a slow GC
2641 // (mutator time blocked >= long_pause_log_threshold_).
2642 bool log_gc = kLogAllGCs || gc_cause == kGcCauseExplicit;
2643 if (!log_gc && CareAboutPauseTimes()) {
2644 // GC for alloc pauses the allocating thread, so consider it as a pause.
2645 log_gc = duration > long_gc_log_threshold_ ||
2646 (gc_cause == kGcCauseForAlloc && duration > long_pause_log_threshold_);
2647 for (uint64_t pause : pause_times) {
2648 log_gc = log_gc || pause >= long_pause_log_threshold_;
2649 }
2650 }
2651 if (log_gc) {
2652 const size_t percent_free = GetPercentFree();
2653 const size_t current_heap_size = GetBytesAllocated();
2654 const size_t total_memory = GetTotalMemory();
2655 std::ostringstream pause_string;
2656 for (size_t i = 0; i < pause_times.size(); ++i) {
2657 pause_string << PrettyDuration((pause_times[i] / 1000) * 1000)
2658 << ((i != pause_times.size() - 1) ? "," : "");
2659 }
2660 LOG(INFO) << gc_cause << " " << collector->GetName()
2661 << " GC freed " << current_gc_iteration_.GetFreedObjects() << "("
2662 << PrettySize(current_gc_iteration_.GetFreedBytes()) << ") AllocSpace objects, "
2663 << current_gc_iteration_.GetFreedLargeObjects() << "("
2664 << PrettySize(current_gc_iteration_.GetFreedLargeObjectBytes()) << ") LOS objects, "
2665 << percent_free << "% free, " << PrettySize(current_heap_size) << "/"
2666 << PrettySize(total_memory) << ", " << "paused " << pause_string.str()
2667 << " total " << PrettyDuration((duration / 1000) * 1000);
2668 VLOG(heap) << Dumpable<TimingLogger>(*current_gc_iteration_.GetTimings());
2669 }
2670 }
2671
FinishGC(Thread * self,collector::GcType gc_type)2672 void Heap::FinishGC(Thread* self, collector::GcType gc_type) {
2673 MutexLock mu(self, *gc_complete_lock_);
2674 collector_type_running_ = kCollectorTypeNone;
2675 if (gc_type != collector::kGcTypeNone) {
2676 last_gc_type_ = gc_type;
2677
2678 // Update stats.
2679 ++gc_count_last_window_;
2680 if (running_collection_is_blocking_) {
2681 // If the currently running collection was a blocking one,
2682 // increment the counters and reset the flag.
2683 ++blocking_gc_count_;
2684 blocking_gc_time_ += GetCurrentGcIteration()->GetDurationNs();
2685 ++blocking_gc_count_last_window_;
2686 }
2687 // Update the gc count rate histograms if due.
2688 UpdateGcCountRateHistograms();
2689 }
2690 // Reset.
2691 running_collection_is_blocking_ = false;
2692 thread_running_gc_ = nullptr;
2693 // Wake anyone who may have been waiting for the GC to complete.
2694 gc_complete_cond_->Broadcast(self);
2695 }
2696
UpdateGcCountRateHistograms()2697 void Heap::UpdateGcCountRateHistograms() {
2698 // Invariant: if the time since the last update includes more than
2699 // one windows, all the GC runs (if > 0) must have happened in first
2700 // window because otherwise the update must have already taken place
2701 // at an earlier GC run. So, we report the non-first windows with
2702 // zero counts to the histograms.
2703 DCHECK_EQ(last_update_time_gc_count_rate_histograms_ % kGcCountRateHistogramWindowDuration, 0U);
2704 uint64_t now = NanoTime();
2705 DCHECK_GE(now, last_update_time_gc_count_rate_histograms_);
2706 uint64_t time_since_last_update = now - last_update_time_gc_count_rate_histograms_;
2707 uint64_t num_of_windows = time_since_last_update / kGcCountRateHistogramWindowDuration;
2708 if (time_since_last_update >= kGcCountRateHistogramWindowDuration) {
2709 // Record the first window.
2710 gc_count_rate_histogram_.AddValue(gc_count_last_window_ - 1); // Exclude the current run.
2711 blocking_gc_count_rate_histogram_.AddValue(running_collection_is_blocking_ ?
2712 blocking_gc_count_last_window_ - 1 : blocking_gc_count_last_window_);
2713 // Record the other windows (with zero counts).
2714 for (uint64_t i = 0; i < num_of_windows - 1; ++i) {
2715 gc_count_rate_histogram_.AddValue(0);
2716 blocking_gc_count_rate_histogram_.AddValue(0);
2717 }
2718 // Update the last update time and reset the counters.
2719 last_update_time_gc_count_rate_histograms_ =
2720 (now / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration;
2721 gc_count_last_window_ = 1; // Include the current run.
2722 blocking_gc_count_last_window_ = running_collection_is_blocking_ ? 1 : 0;
2723 }
2724 DCHECK_EQ(last_update_time_gc_count_rate_histograms_ % kGcCountRateHistogramWindowDuration, 0U);
2725 }
2726
2727 class RootMatchesObjectVisitor : public SingleRootVisitor {
2728 public:
RootMatchesObjectVisitor(const mirror::Object * obj)2729 explicit RootMatchesObjectVisitor(const mirror::Object* obj) : obj_(obj) { }
2730
VisitRoot(mirror::Object * root,const RootInfo & info)2731 void VisitRoot(mirror::Object* root, const RootInfo& info)
2732 OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
2733 if (root == obj_) {
2734 LOG(INFO) << "Object " << obj_ << " is a root " << info.ToString();
2735 }
2736 }
2737
2738 private:
2739 const mirror::Object* const obj_;
2740 };
2741
2742
2743 class ScanVisitor {
2744 public:
operator ()(const mirror::Object * obj) const2745 void operator()(const mirror::Object* obj) const {
2746 LOG(ERROR) << "Would have rescanned object " << obj;
2747 }
2748 };
2749
2750 // Verify a reference from an object.
2751 class VerifyReferenceVisitor : public SingleRootVisitor {
2752 public:
VerifyReferenceVisitor(Heap * heap,Atomic<size_t> * fail_count,bool verify_referent)2753 VerifyReferenceVisitor(Heap* heap, Atomic<size_t>* fail_count, bool verify_referent)
2754 REQUIRES_SHARED(Locks::mutator_lock_)
2755 : heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {}
2756
GetFailureCount() const2757 size_t GetFailureCount() const {
2758 return fail_count_->LoadSequentiallyConsistent();
2759 }
2760
operator ()(ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED,ObjPtr<mirror::Reference> ref) const2761 void operator()(ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED, ObjPtr<mirror::Reference> ref) const
2762 REQUIRES_SHARED(Locks::mutator_lock_) {
2763 if (verify_referent_) {
2764 VerifyReference(ref.Ptr(), ref->GetReferent(), mirror::Reference::ReferentOffset());
2765 }
2766 }
2767
operator ()(ObjPtr<mirror::Object> obj,MemberOffset offset,bool is_static ATTRIBUTE_UNUSED) const2768 void operator()(ObjPtr<mirror::Object> obj,
2769 MemberOffset offset,
2770 bool is_static ATTRIBUTE_UNUSED) const
2771 REQUIRES_SHARED(Locks::mutator_lock_) {
2772 VerifyReference(obj.Ptr(), obj->GetFieldObject<mirror::Object>(offset), offset);
2773 }
2774
IsLive(ObjPtr<mirror::Object> obj) const2775 bool IsLive(ObjPtr<mirror::Object> obj) const NO_THREAD_SAFETY_ANALYSIS {
2776 return heap_->IsLiveObjectLocked(obj, true, false, true);
2777 }
2778
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root) const2779 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
2780 REQUIRES_SHARED(Locks::mutator_lock_) {
2781 if (!root->IsNull()) {
2782 VisitRoot(root);
2783 }
2784 }
VisitRoot(mirror::CompressedReference<mirror::Object> * root) const2785 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
2786 REQUIRES_SHARED(Locks::mutator_lock_) {
2787 const_cast<VerifyReferenceVisitor*>(this)->VisitRoot(
2788 root->AsMirrorPtr(), RootInfo(kRootVMInternal));
2789 }
2790
VisitRoot(mirror::Object * root,const RootInfo & root_info)2791 virtual void VisitRoot(mirror::Object* root, const RootInfo& root_info) OVERRIDE
2792 REQUIRES_SHARED(Locks::mutator_lock_) {
2793 if (root == nullptr) {
2794 LOG(ERROR) << "Root is null with info " << root_info.GetType();
2795 } else if (!VerifyReference(nullptr, root, MemberOffset(0))) {
2796 LOG(ERROR) << "Root " << root << " is dead with type " << mirror::Object::PrettyTypeOf(root)
2797 << " thread_id= " << root_info.GetThreadId() << " root_type= " << root_info.GetType();
2798 }
2799 }
2800
2801 private:
2802 // TODO: Fix the no thread safety analysis.
2803 // Returns false on failure.
VerifyReference(mirror::Object * obj,mirror::Object * ref,MemberOffset offset) const2804 bool VerifyReference(mirror::Object* obj, mirror::Object* ref, MemberOffset offset) const
2805 NO_THREAD_SAFETY_ANALYSIS {
2806 if (ref == nullptr || IsLive(ref)) {
2807 // Verify that the reference is live.
2808 return true;
2809 }
2810 if (fail_count_->FetchAndAddSequentiallyConsistent(1) == 0) {
2811 // Print message on only on first failure to prevent spam.
2812 LOG(ERROR) << "!!!!!!!!!!!!!!Heap corruption detected!!!!!!!!!!!!!!!!!!!";
2813 }
2814 if (obj != nullptr) {
2815 // Only do this part for non roots.
2816 accounting::CardTable* card_table = heap_->GetCardTable();
2817 accounting::ObjectStack* alloc_stack = heap_->allocation_stack_.get();
2818 accounting::ObjectStack* live_stack = heap_->live_stack_.get();
2819 uint8_t* card_addr = card_table->CardFromAddr(obj);
2820 LOG(ERROR) << "Object " << obj << " references dead object " << ref << " at offset "
2821 << offset << "\n card value = " << static_cast<int>(*card_addr);
2822 if (heap_->IsValidObjectAddress(obj->GetClass())) {
2823 LOG(ERROR) << "Obj type " << obj->PrettyTypeOf();
2824 } else {
2825 LOG(ERROR) << "Object " << obj << " class(" << obj->GetClass() << ") not a heap address";
2826 }
2827
2828 // Attempt to find the class inside of the recently freed objects.
2829 space::ContinuousSpace* ref_space = heap_->FindContinuousSpaceFromObject(ref, true);
2830 if (ref_space != nullptr && ref_space->IsMallocSpace()) {
2831 space::MallocSpace* space = ref_space->AsMallocSpace();
2832 mirror::Class* ref_class = space->FindRecentFreedObject(ref);
2833 if (ref_class != nullptr) {
2834 LOG(ERROR) << "Reference " << ref << " found as a recently freed object with class "
2835 << ref_class->PrettyClass();
2836 } else {
2837 LOG(ERROR) << "Reference " << ref << " not found as a recently freed object";
2838 }
2839 }
2840
2841 if (ref->GetClass() != nullptr && heap_->IsValidObjectAddress(ref->GetClass()) &&
2842 ref->GetClass()->IsClass()) {
2843 LOG(ERROR) << "Ref type " << ref->PrettyTypeOf();
2844 } else {
2845 LOG(ERROR) << "Ref " << ref << " class(" << ref->GetClass()
2846 << ") is not a valid heap address";
2847 }
2848
2849 card_table->CheckAddrIsInCardTable(reinterpret_cast<const uint8_t*>(obj));
2850 void* cover_begin = card_table->AddrFromCard(card_addr);
2851 void* cover_end = reinterpret_cast<void*>(reinterpret_cast<size_t>(cover_begin) +
2852 accounting::CardTable::kCardSize);
2853 LOG(ERROR) << "Card " << reinterpret_cast<void*>(card_addr) << " covers " << cover_begin
2854 << "-" << cover_end;
2855 accounting::ContinuousSpaceBitmap* bitmap =
2856 heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(obj);
2857
2858 if (bitmap == nullptr) {
2859 LOG(ERROR) << "Object " << obj << " has no bitmap";
2860 if (!VerifyClassClass(obj->GetClass())) {
2861 LOG(ERROR) << "Object " << obj << " failed class verification!";
2862 }
2863 } else {
2864 // Print out how the object is live.
2865 if (bitmap->Test(obj)) {
2866 LOG(ERROR) << "Object " << obj << " found in live bitmap";
2867 }
2868 if (alloc_stack->Contains(const_cast<mirror::Object*>(obj))) {
2869 LOG(ERROR) << "Object " << obj << " found in allocation stack";
2870 }
2871 if (live_stack->Contains(const_cast<mirror::Object*>(obj))) {
2872 LOG(ERROR) << "Object " << obj << " found in live stack";
2873 }
2874 if (alloc_stack->Contains(const_cast<mirror::Object*>(ref))) {
2875 LOG(ERROR) << "Ref " << ref << " found in allocation stack";
2876 }
2877 if (live_stack->Contains(const_cast<mirror::Object*>(ref))) {
2878 LOG(ERROR) << "Ref " << ref << " found in live stack";
2879 }
2880 // Attempt to see if the card table missed the reference.
2881 ScanVisitor scan_visitor;
2882 uint8_t* byte_cover_begin = reinterpret_cast<uint8_t*>(card_table->AddrFromCard(card_addr));
2883 card_table->Scan<false>(bitmap, byte_cover_begin,
2884 byte_cover_begin + accounting::CardTable::kCardSize, scan_visitor);
2885 }
2886
2887 // Search to see if any of the roots reference our object.
2888 RootMatchesObjectVisitor visitor1(obj);
2889 Runtime::Current()->VisitRoots(&visitor1);
2890 // Search to see if any of the roots reference our reference.
2891 RootMatchesObjectVisitor visitor2(ref);
2892 Runtime::Current()->VisitRoots(&visitor2);
2893 }
2894 return false;
2895 }
2896
2897 Heap* const heap_;
2898 Atomic<size_t>* const fail_count_;
2899 const bool verify_referent_;
2900 };
2901
2902 // Verify all references within an object, for use with HeapBitmap::Visit.
2903 class VerifyObjectVisitor {
2904 public:
VerifyObjectVisitor(Heap * heap,Atomic<size_t> * fail_count,bool verify_referent)2905 VerifyObjectVisitor(Heap* heap, Atomic<size_t>* fail_count, bool verify_referent)
2906 : heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {}
2907
operator ()(mirror::Object * obj)2908 void operator()(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
2909 // Note: we are verifying the references in obj but not obj itself, this is because obj must
2910 // be live or else how did we find it in the live bitmap?
2911 VerifyReferenceVisitor visitor(heap_, fail_count_, verify_referent_);
2912 // The class doesn't count as a reference but we should verify it anyways.
2913 obj->VisitReferences(visitor, visitor);
2914 }
2915
VerifyRoots()2916 void VerifyRoots() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_) {
2917 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
2918 VerifyReferenceVisitor visitor(heap_, fail_count_, verify_referent_);
2919 Runtime::Current()->VisitRoots(&visitor);
2920 }
2921
GetFailureCount() const2922 size_t GetFailureCount() const {
2923 return fail_count_->LoadSequentiallyConsistent();
2924 }
2925
2926 private:
2927 Heap* const heap_;
2928 Atomic<size_t>* const fail_count_;
2929 const bool verify_referent_;
2930 };
2931
PushOnAllocationStackWithInternalGC(Thread * self,ObjPtr<mirror::Object> * obj)2932 void Heap::PushOnAllocationStackWithInternalGC(Thread* self, ObjPtr<mirror::Object>* obj) {
2933 // Slow path, the allocation stack push back must have already failed.
2934 DCHECK(!allocation_stack_->AtomicPushBack(obj->Ptr()));
2935 do {
2936 // TODO: Add handle VerifyObject.
2937 StackHandleScope<1> hs(self);
2938 HandleWrapperObjPtr<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
2939 // Push our object into the reserve region of the allocation stack. This is only required due
2940 // to heap verification requiring that roots are live (either in the live bitmap or in the
2941 // allocation stack).
2942 CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(obj->Ptr()));
2943 CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
2944 } while (!allocation_stack_->AtomicPushBack(obj->Ptr()));
2945 }
2946
PushOnThreadLocalAllocationStackWithInternalGC(Thread * self,ObjPtr<mirror::Object> * obj)2947 void Heap::PushOnThreadLocalAllocationStackWithInternalGC(Thread* self,
2948 ObjPtr<mirror::Object>* obj) {
2949 // Slow path, the allocation stack push back must have already failed.
2950 DCHECK(!self->PushOnThreadLocalAllocationStack(obj->Ptr()));
2951 StackReference<mirror::Object>* start_address;
2952 StackReference<mirror::Object>* end_address;
2953 while (!allocation_stack_->AtomicBumpBack(kThreadLocalAllocationStackSize, &start_address,
2954 &end_address)) {
2955 // TODO: Add handle VerifyObject.
2956 StackHandleScope<1> hs(self);
2957 HandleWrapperObjPtr<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
2958 // Push our object into the reserve region of the allocaiton stack. This is only required due
2959 // to heap verification requiring that roots are live (either in the live bitmap or in the
2960 // allocation stack).
2961 CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(obj->Ptr()));
2962 // Push into the reserve allocation stack.
2963 CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
2964 }
2965 self->SetThreadLocalAllocationStack(start_address, end_address);
2966 // Retry on the new thread-local allocation stack.
2967 CHECK(self->PushOnThreadLocalAllocationStack(obj->Ptr())); // Must succeed.
2968 }
2969
2970 // Must do this with mutators suspended since we are directly accessing the allocation stacks.
VerifyHeapReferences(bool verify_referents)2971 size_t Heap::VerifyHeapReferences(bool verify_referents) {
2972 Thread* self = Thread::Current();
2973 Locks::mutator_lock_->AssertExclusiveHeld(self);
2974 // Lets sort our allocation stacks so that we can efficiently binary search them.
2975 allocation_stack_->Sort();
2976 live_stack_->Sort();
2977 // Since we sorted the allocation stack content, need to revoke all
2978 // thread-local allocation stacks.
2979 RevokeAllThreadLocalAllocationStacks(self);
2980 Atomic<size_t> fail_count_(0);
2981 VerifyObjectVisitor visitor(this, &fail_count_, verify_referents);
2982 // Verify objects in the allocation stack since these will be objects which were:
2983 // 1. Allocated prior to the GC (pre GC verification).
2984 // 2. Allocated during the GC (pre sweep GC verification).
2985 // We don't want to verify the objects in the live stack since they themselves may be
2986 // pointing to dead objects if they are not reachable.
2987 VisitObjectsPaused(visitor);
2988 // Verify the roots:
2989 visitor.VerifyRoots();
2990 if (visitor.GetFailureCount() > 0) {
2991 // Dump mod-union tables.
2992 for (const auto& table_pair : mod_union_tables_) {
2993 accounting::ModUnionTable* mod_union_table = table_pair.second;
2994 mod_union_table->Dump(LOG_STREAM(ERROR) << mod_union_table->GetName() << ": ");
2995 }
2996 // Dump remembered sets.
2997 for (const auto& table_pair : remembered_sets_) {
2998 accounting::RememberedSet* remembered_set = table_pair.second;
2999 remembered_set->Dump(LOG_STREAM(ERROR) << remembered_set->GetName() << ": ");
3000 }
3001 DumpSpaces(LOG_STREAM(ERROR));
3002 }
3003 return visitor.GetFailureCount();
3004 }
3005
3006 class VerifyReferenceCardVisitor {
3007 public:
VerifyReferenceCardVisitor(Heap * heap,bool * failed)3008 VerifyReferenceCardVisitor(Heap* heap, bool* failed)
3009 REQUIRES_SHARED(Locks::mutator_lock_,
3010 Locks::heap_bitmap_lock_)
3011 : heap_(heap), failed_(failed) {
3012 }
3013
3014 // There is no card marks for native roots on a class.
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root ATTRIBUTE_UNUSED) const3015 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
3016 const {}
VisitRoot(mirror::CompressedReference<mirror::Object> * root ATTRIBUTE_UNUSED) const3017 void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
3018
3019 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
3020 // annotalysis on visitors.
operator ()(mirror::Object * obj,MemberOffset offset,bool is_static) const3021 void operator()(mirror::Object* obj, MemberOffset offset, bool is_static) const
3022 NO_THREAD_SAFETY_ANALYSIS {
3023 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
3024 // Filter out class references since changing an object's class does not mark the card as dirty.
3025 // Also handles large objects, since the only reference they hold is a class reference.
3026 if (ref != nullptr && !ref->IsClass()) {
3027 accounting::CardTable* card_table = heap_->GetCardTable();
3028 // If the object is not dirty and it is referencing something in the live stack other than
3029 // class, then it must be on a dirty card.
3030 if (!card_table->AddrIsInCardTable(obj)) {
3031 LOG(ERROR) << "Object " << obj << " is not in the address range of the card table";
3032 *failed_ = true;
3033 } else if (!card_table->IsDirty(obj)) {
3034 // TODO: Check mod-union tables.
3035 // Card should be either kCardDirty if it got re-dirtied after we aged it, or
3036 // kCardDirty - 1 if it didnt get touched since we aged it.
3037 accounting::ObjectStack* live_stack = heap_->live_stack_.get();
3038 if (live_stack->ContainsSorted(ref)) {
3039 if (live_stack->ContainsSorted(obj)) {
3040 LOG(ERROR) << "Object " << obj << " found in live stack";
3041 }
3042 if (heap_->GetLiveBitmap()->Test(obj)) {
3043 LOG(ERROR) << "Object " << obj << " found in live bitmap";
3044 }
3045 LOG(ERROR) << "Object " << obj << " " << mirror::Object::PrettyTypeOf(obj)
3046 << " references " << ref << " " << mirror::Object::PrettyTypeOf(ref)
3047 << " in live stack";
3048
3049 // Print which field of the object is dead.
3050 if (!obj->IsObjectArray()) {
3051 mirror::Class* klass = is_static ? obj->AsClass() : obj->GetClass();
3052 CHECK(klass != nullptr);
3053 for (ArtField& field : (is_static ? klass->GetSFields() : klass->GetIFields())) {
3054 if (field.GetOffset().Int32Value() == offset.Int32Value()) {
3055 LOG(ERROR) << (is_static ? "Static " : "") << "field in the live stack is "
3056 << field.PrettyField();
3057 break;
3058 }
3059 }
3060 } else {
3061 mirror::ObjectArray<mirror::Object>* object_array =
3062 obj->AsObjectArray<mirror::Object>();
3063 for (int32_t i = 0; i < object_array->GetLength(); ++i) {
3064 if (object_array->Get(i) == ref) {
3065 LOG(ERROR) << (is_static ? "Static " : "") << "obj[" << i << "] = ref";
3066 }
3067 }
3068 }
3069
3070 *failed_ = true;
3071 }
3072 }
3073 }
3074 }
3075
3076 private:
3077 Heap* const heap_;
3078 bool* const failed_;
3079 };
3080
3081 class VerifyLiveStackReferences {
3082 public:
VerifyLiveStackReferences(Heap * heap)3083 explicit VerifyLiveStackReferences(Heap* heap)
3084 : heap_(heap),
3085 failed_(false) {}
3086
operator ()(mirror::Object * obj) const3087 void operator()(mirror::Object* obj) const
3088 REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
3089 VerifyReferenceCardVisitor visitor(heap_, const_cast<bool*>(&failed_));
3090 obj->VisitReferences(visitor, VoidFunctor());
3091 }
3092
Failed() const3093 bool Failed() const {
3094 return failed_;
3095 }
3096
3097 private:
3098 Heap* const heap_;
3099 bool failed_;
3100 };
3101
VerifyMissingCardMarks()3102 bool Heap::VerifyMissingCardMarks() {
3103 Thread* self = Thread::Current();
3104 Locks::mutator_lock_->AssertExclusiveHeld(self);
3105 // We need to sort the live stack since we binary search it.
3106 live_stack_->Sort();
3107 // Since we sorted the allocation stack content, need to revoke all
3108 // thread-local allocation stacks.
3109 RevokeAllThreadLocalAllocationStacks(self);
3110 VerifyLiveStackReferences visitor(this);
3111 GetLiveBitmap()->Visit(visitor);
3112 // We can verify objects in the live stack since none of these should reference dead objects.
3113 for (auto* it = live_stack_->Begin(); it != live_stack_->End(); ++it) {
3114 if (!kUseThreadLocalAllocationStack || it->AsMirrorPtr() != nullptr) {
3115 visitor(it->AsMirrorPtr());
3116 }
3117 }
3118 return !visitor.Failed();
3119 }
3120
SwapStacks()3121 void Heap::SwapStacks() {
3122 if (kUseThreadLocalAllocationStack) {
3123 live_stack_->AssertAllZero();
3124 }
3125 allocation_stack_.swap(live_stack_);
3126 }
3127
RevokeAllThreadLocalAllocationStacks(Thread * self)3128 void Heap::RevokeAllThreadLocalAllocationStacks(Thread* self) {
3129 // This must be called only during the pause.
3130 DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
3131 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
3132 MutexLock mu2(self, *Locks::thread_list_lock_);
3133 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
3134 for (Thread* t : thread_list) {
3135 t->RevokeThreadLocalAllocationStack();
3136 }
3137 }
3138
AssertThreadLocalBuffersAreRevoked(Thread * thread)3139 void Heap::AssertThreadLocalBuffersAreRevoked(Thread* thread) {
3140 if (kIsDebugBuild) {
3141 if (rosalloc_space_ != nullptr) {
3142 rosalloc_space_->AssertThreadLocalBuffersAreRevoked(thread);
3143 }
3144 if (bump_pointer_space_ != nullptr) {
3145 bump_pointer_space_->AssertThreadLocalBuffersAreRevoked(thread);
3146 }
3147 }
3148 }
3149
AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked()3150 void Heap::AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked() {
3151 if (kIsDebugBuild) {
3152 if (bump_pointer_space_ != nullptr) {
3153 bump_pointer_space_->AssertAllThreadLocalBuffersAreRevoked();
3154 }
3155 }
3156 }
3157
FindModUnionTableFromSpace(space::Space * space)3158 accounting::ModUnionTable* Heap::FindModUnionTableFromSpace(space::Space* space) {
3159 auto it = mod_union_tables_.find(space);
3160 if (it == mod_union_tables_.end()) {
3161 return nullptr;
3162 }
3163 return it->second;
3164 }
3165
FindRememberedSetFromSpace(space::Space * space)3166 accounting::RememberedSet* Heap::FindRememberedSetFromSpace(space::Space* space) {
3167 auto it = remembered_sets_.find(space);
3168 if (it == remembered_sets_.end()) {
3169 return nullptr;
3170 }
3171 return it->second;
3172 }
3173
ProcessCards(TimingLogger * timings,bool use_rem_sets,bool process_alloc_space_cards,bool clear_alloc_space_cards)3174 void Heap::ProcessCards(TimingLogger* timings,
3175 bool use_rem_sets,
3176 bool process_alloc_space_cards,
3177 bool clear_alloc_space_cards) {
3178 TimingLogger::ScopedTiming t(__FUNCTION__, timings);
3179 // Clear cards and keep track of cards cleared in the mod-union table.
3180 for (const auto& space : continuous_spaces_) {
3181 accounting::ModUnionTable* table = FindModUnionTableFromSpace(space);
3182 accounting::RememberedSet* rem_set = FindRememberedSetFromSpace(space);
3183 if (table != nullptr) {
3184 const char* name = space->IsZygoteSpace() ? "ZygoteModUnionClearCards" :
3185 "ImageModUnionClearCards";
3186 TimingLogger::ScopedTiming t2(name, timings);
3187 table->ProcessCards();
3188 } else if (use_rem_sets && rem_set != nullptr) {
3189 DCHECK(collector::SemiSpace::kUseRememberedSet && collector_type_ == kCollectorTypeGSS)
3190 << static_cast<int>(collector_type_);
3191 TimingLogger::ScopedTiming t2("AllocSpaceRemSetClearCards", timings);
3192 rem_set->ClearCards();
3193 } else if (process_alloc_space_cards) {
3194 TimingLogger::ScopedTiming t2("AllocSpaceClearCards", timings);
3195 if (clear_alloc_space_cards) {
3196 uint8_t* end = space->End();
3197 if (space->IsImageSpace()) {
3198 // Image space end is the end of the mirror objects, it is not necessarily page or card
3199 // aligned. Align up so that the check in ClearCardRange does not fail.
3200 end = AlignUp(end, accounting::CardTable::kCardSize);
3201 }
3202 card_table_->ClearCardRange(space->Begin(), end);
3203 } else {
3204 // No mod union table for the AllocSpace. Age the cards so that the GC knows that these
3205 // cards were dirty before the GC started.
3206 // TODO: Need to use atomic for the case where aged(cleaning thread) -> dirty(other thread)
3207 // -> clean(cleaning thread).
3208 // The races are we either end up with: Aged card, unaged card. Since we have the
3209 // checkpoint roots and then we scan / update mod union tables after. We will always
3210 // scan either card. If we end up with the non aged card, we scan it it in the pause.
3211 card_table_->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(),
3212 VoidFunctor());
3213 }
3214 }
3215 }
3216 }
3217
3218 struct IdentityMarkHeapReferenceVisitor : public MarkObjectVisitor {
MarkObjectart::gc::IdentityMarkHeapReferenceVisitor3219 virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE {
3220 return obj;
3221 }
MarkHeapReferenceart::gc::IdentityMarkHeapReferenceVisitor3222 virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>*, bool) OVERRIDE {
3223 }
3224 };
3225
PreGcVerificationPaused(collector::GarbageCollector * gc)3226 void Heap::PreGcVerificationPaused(collector::GarbageCollector* gc) {
3227 Thread* const self = Thread::Current();
3228 TimingLogger* const timings = current_gc_iteration_.GetTimings();
3229 TimingLogger::ScopedTiming t(__FUNCTION__, timings);
3230 if (verify_pre_gc_heap_) {
3231 TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyHeapReferences", timings);
3232 size_t failures = VerifyHeapReferences();
3233 if (failures > 0) {
3234 LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed with " << failures
3235 << " failures";
3236 }
3237 }
3238 // Check that all objects which reference things in the live stack are on dirty cards.
3239 if (verify_missing_card_marks_) {
3240 TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyMissingCardMarks", timings);
3241 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
3242 SwapStacks();
3243 // Sort the live stack so that we can quickly binary search it later.
3244 CHECK(VerifyMissingCardMarks()) << "Pre " << gc->GetName()
3245 << " missing card mark verification failed\n" << DumpSpaces();
3246 SwapStacks();
3247 }
3248 if (verify_mod_union_table_) {
3249 TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyModUnionTables", timings);
3250 ReaderMutexLock reader_lock(self, *Locks::heap_bitmap_lock_);
3251 for (const auto& table_pair : mod_union_tables_) {
3252 accounting::ModUnionTable* mod_union_table = table_pair.second;
3253 IdentityMarkHeapReferenceVisitor visitor;
3254 mod_union_table->UpdateAndMarkReferences(&visitor);
3255 mod_union_table->Verify();
3256 }
3257 }
3258 }
3259
PreGcVerification(collector::GarbageCollector * gc)3260 void Heap::PreGcVerification(collector::GarbageCollector* gc) {
3261 if (verify_pre_gc_heap_ || verify_missing_card_marks_ || verify_mod_union_table_) {
3262 collector::GarbageCollector::ScopedPause pause(gc, false);
3263 PreGcVerificationPaused(gc);
3264 }
3265 }
3266
PrePauseRosAllocVerification(collector::GarbageCollector * gc ATTRIBUTE_UNUSED)3267 void Heap::PrePauseRosAllocVerification(collector::GarbageCollector* gc ATTRIBUTE_UNUSED) {
3268 // TODO: Add a new runtime option for this?
3269 if (verify_pre_gc_rosalloc_) {
3270 RosAllocVerification(current_gc_iteration_.GetTimings(), "PreGcRosAllocVerification");
3271 }
3272 }
3273
PreSweepingGcVerification(collector::GarbageCollector * gc)3274 void Heap::PreSweepingGcVerification(collector::GarbageCollector* gc) {
3275 Thread* const self = Thread::Current();
3276 TimingLogger* const timings = current_gc_iteration_.GetTimings();
3277 TimingLogger::ScopedTiming t(__FUNCTION__, timings);
3278 // Called before sweeping occurs since we want to make sure we are not going so reclaim any
3279 // reachable objects.
3280 if (verify_pre_sweeping_heap_) {
3281 TimingLogger::ScopedTiming t2("(Paused)PostSweepingVerifyHeapReferences", timings);
3282 CHECK_NE(self->GetState(), kRunnable);
3283 {
3284 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
3285 // Swapping bound bitmaps does nothing.
3286 gc->SwapBitmaps();
3287 }
3288 // Pass in false since concurrent reference processing can mean that the reference referents
3289 // may point to dead objects at the point which PreSweepingGcVerification is called.
3290 size_t failures = VerifyHeapReferences(false);
3291 if (failures > 0) {
3292 LOG(FATAL) << "Pre sweeping " << gc->GetName() << " GC verification failed with " << failures
3293 << " failures";
3294 }
3295 {
3296 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
3297 gc->SwapBitmaps();
3298 }
3299 }
3300 if (verify_pre_sweeping_rosalloc_) {
3301 RosAllocVerification(timings, "PreSweepingRosAllocVerification");
3302 }
3303 }
3304
PostGcVerificationPaused(collector::GarbageCollector * gc)3305 void Heap::PostGcVerificationPaused(collector::GarbageCollector* gc) {
3306 // Only pause if we have to do some verification.
3307 Thread* const self = Thread::Current();
3308 TimingLogger* const timings = GetCurrentGcIteration()->GetTimings();
3309 TimingLogger::ScopedTiming t(__FUNCTION__, timings);
3310 if (verify_system_weaks_) {
3311 ReaderMutexLock mu2(self, *Locks::heap_bitmap_lock_);
3312 collector::MarkSweep* mark_sweep = down_cast<collector::MarkSweep*>(gc);
3313 mark_sweep->VerifySystemWeaks();
3314 }
3315 if (verify_post_gc_rosalloc_) {
3316 RosAllocVerification(timings, "(Paused)PostGcRosAllocVerification");
3317 }
3318 if (verify_post_gc_heap_) {
3319 TimingLogger::ScopedTiming t2("(Paused)PostGcVerifyHeapReferences", timings);
3320 size_t failures = VerifyHeapReferences();
3321 if (failures > 0) {
3322 LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed with " << failures
3323 << " failures";
3324 }
3325 }
3326 }
3327
PostGcVerification(collector::GarbageCollector * gc)3328 void Heap::PostGcVerification(collector::GarbageCollector* gc) {
3329 if (verify_system_weaks_ || verify_post_gc_rosalloc_ || verify_post_gc_heap_) {
3330 collector::GarbageCollector::ScopedPause pause(gc, false);
3331 PostGcVerificationPaused(gc);
3332 }
3333 }
3334
RosAllocVerification(TimingLogger * timings,const char * name)3335 void Heap::RosAllocVerification(TimingLogger* timings, const char* name) {
3336 TimingLogger::ScopedTiming t(name, timings);
3337 for (const auto& space : continuous_spaces_) {
3338 if (space->IsRosAllocSpace()) {
3339 VLOG(heap) << name << " : " << space->GetName();
3340 space->AsRosAllocSpace()->Verify();
3341 }
3342 }
3343 }
3344
WaitForGcToComplete(GcCause cause,Thread * self)3345 collector::GcType Heap::WaitForGcToComplete(GcCause cause, Thread* self) {
3346 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
3347 MutexLock mu(self, *gc_complete_lock_);
3348 return WaitForGcToCompleteLocked(cause, self);
3349 }
3350
WaitForGcToCompleteLocked(GcCause cause,Thread * self)3351 collector::GcType Heap::WaitForGcToCompleteLocked(GcCause cause, Thread* self) {
3352 collector::GcType last_gc_type = collector::kGcTypeNone;
3353 GcCause last_gc_cause = kGcCauseNone;
3354 uint64_t wait_start = NanoTime();
3355 while (collector_type_running_ != kCollectorTypeNone) {
3356 if (self != task_processor_->GetRunningThread()) {
3357 // The current thread is about to wait for a currently running
3358 // collection to finish. If the waiting thread is not the heap
3359 // task daemon thread, the currently running collection is
3360 // considered as a blocking GC.
3361 running_collection_is_blocking_ = true;
3362 VLOG(gc) << "Waiting for a blocking GC " << cause;
3363 }
3364 ScopedTrace trace("GC: Wait For Completion");
3365 // We must wait, change thread state then sleep on gc_complete_cond_;
3366 gc_complete_cond_->Wait(self);
3367 last_gc_type = last_gc_type_;
3368 last_gc_cause = last_gc_cause_;
3369 }
3370 uint64_t wait_time = NanoTime() - wait_start;
3371 total_wait_time_ += wait_time;
3372 if (wait_time > long_pause_log_threshold_) {
3373 LOG(INFO) << "WaitForGcToComplete blocked " << cause << " on " << last_gc_cause << " for "
3374 << PrettyDuration(wait_time);
3375 }
3376 if (self != task_processor_->GetRunningThread()) {
3377 // The current thread is about to run a collection. If the thread
3378 // is not the heap task daemon thread, it's considered as a
3379 // blocking GC (i.e., blocking itself).
3380 running_collection_is_blocking_ = true;
3381 // Don't log fake "GC" types that are only used for debugger or hidden APIs. If we log these,
3382 // it results in log spam. kGcCauseExplicit is already logged in LogGC, so avoid it here too.
3383 if (cause == kGcCauseForAlloc ||
3384 cause == kGcCauseForNativeAlloc ||
3385 cause == kGcCauseDisableMovingGc) {
3386 VLOG(gc) << "Starting a blocking GC " << cause;
3387 }
3388 }
3389 return last_gc_type;
3390 }
3391
DumpForSigQuit(std::ostream & os)3392 void Heap::DumpForSigQuit(std::ostream& os) {
3393 os << "Heap: " << GetPercentFree() << "% free, " << PrettySize(GetBytesAllocated()) << "/"
3394 << PrettySize(GetTotalMemory()) << "; " << GetObjectsAllocated() << " objects\n";
3395 DumpGcPerformanceInfo(os);
3396 }
3397
GetPercentFree()3398 size_t Heap::GetPercentFree() {
3399 return static_cast<size_t>(100.0f * static_cast<float>(GetFreeMemory()) / max_allowed_footprint_);
3400 }
3401
SetIdealFootprint(size_t max_allowed_footprint)3402 void Heap::SetIdealFootprint(size_t max_allowed_footprint) {
3403 if (max_allowed_footprint > GetMaxMemory()) {
3404 VLOG(gc) << "Clamp target GC heap from " << PrettySize(max_allowed_footprint) << " to "
3405 << PrettySize(GetMaxMemory());
3406 max_allowed_footprint = GetMaxMemory();
3407 }
3408 max_allowed_footprint_ = max_allowed_footprint;
3409 }
3410
IsMovableObject(ObjPtr<mirror::Object> obj) const3411 bool Heap::IsMovableObject(ObjPtr<mirror::Object> obj) const {
3412 if (kMovingCollector) {
3413 space::Space* space = FindContinuousSpaceFromObject(obj.Ptr(), true);
3414 if (space != nullptr) {
3415 // TODO: Check large object?
3416 return space->CanMoveObjects();
3417 }
3418 }
3419 return false;
3420 }
3421
FindCollectorByGcType(collector::GcType gc_type)3422 collector::GarbageCollector* Heap::FindCollectorByGcType(collector::GcType gc_type) {
3423 for (const auto& collector : garbage_collectors_) {
3424 if (collector->GetCollectorType() == collector_type_ &&
3425 collector->GetGcType() == gc_type) {
3426 return collector;
3427 }
3428 }
3429 return nullptr;
3430 }
3431
HeapGrowthMultiplier() const3432 double Heap::HeapGrowthMultiplier() const {
3433 // If we don't care about pause times we are background, so return 1.0.
3434 if (!CareAboutPauseTimes()) {
3435 return 1.0;
3436 }
3437 return foreground_heap_growth_multiplier_;
3438 }
3439
GrowForUtilization(collector::GarbageCollector * collector_ran,uint64_t bytes_allocated_before_gc)3440 void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran,
3441 uint64_t bytes_allocated_before_gc) {
3442 // We know what our utilization is at this moment.
3443 // This doesn't actually resize any memory. It just lets the heap grow more when necessary.
3444 const uint64_t bytes_allocated = GetBytesAllocated();
3445 // Trace the new heap size after the GC is finished.
3446 TraceHeapSize(bytes_allocated);
3447 uint64_t target_size;
3448 collector::GcType gc_type = collector_ran->GetGcType();
3449 // Use the multiplier to grow more for foreground.
3450 const double multiplier = HeapGrowthMultiplier();
3451 const uint64_t adjusted_min_free = static_cast<uint64_t>(min_free_ * multiplier);
3452 const uint64_t adjusted_max_free = static_cast<uint64_t>(max_free_ * multiplier);
3453 if (gc_type != collector::kGcTypeSticky) {
3454 // Grow the heap for non sticky GC.
3455 ssize_t delta = bytes_allocated / GetTargetHeapUtilization() - bytes_allocated;
3456 CHECK_GE(delta, 0) << "bytes_allocated=" << bytes_allocated
3457 << " target_utilization_=" << target_utilization_;
3458 target_size = bytes_allocated + delta * multiplier;
3459 target_size = std::min(target_size, bytes_allocated + adjusted_max_free);
3460 target_size = std::max(target_size, bytes_allocated + adjusted_min_free);
3461 next_gc_type_ = collector::kGcTypeSticky;
3462 } else {
3463 collector::GcType non_sticky_gc_type = NonStickyGcType();
3464 // Find what the next non sticky collector will be.
3465 collector::GarbageCollector* non_sticky_collector = FindCollectorByGcType(non_sticky_gc_type);
3466 // If the throughput of the current sticky GC >= throughput of the non sticky collector, then
3467 // do another sticky collection next.
3468 // We also check that the bytes allocated aren't over the footprint limit in order to prevent a
3469 // pathological case where dead objects which aren't reclaimed by sticky could get accumulated
3470 // if the sticky GC throughput always remained >= the full/partial throughput.
3471 if (current_gc_iteration_.GetEstimatedThroughput() * kStickyGcThroughputAdjustment >=
3472 non_sticky_collector->GetEstimatedMeanThroughput() &&
3473 non_sticky_collector->NumberOfIterations() > 0 &&
3474 bytes_allocated <= max_allowed_footprint_) {
3475 next_gc_type_ = collector::kGcTypeSticky;
3476 } else {
3477 next_gc_type_ = non_sticky_gc_type;
3478 }
3479 // If we have freed enough memory, shrink the heap back down.
3480 if (bytes_allocated + adjusted_max_free < max_allowed_footprint_) {
3481 target_size = bytes_allocated + adjusted_max_free;
3482 } else {
3483 target_size = std::max(bytes_allocated, static_cast<uint64_t>(max_allowed_footprint_));
3484 }
3485 }
3486 if (!ignore_max_footprint_) {
3487 SetIdealFootprint(target_size);
3488 if (IsGcConcurrent()) {
3489 const uint64_t freed_bytes = current_gc_iteration_.GetFreedBytes() +
3490 current_gc_iteration_.GetFreedLargeObjectBytes() +
3491 current_gc_iteration_.GetFreedRevokeBytes();
3492 // Bytes allocated will shrink by freed_bytes after the GC runs, so if we want to figure out
3493 // how many bytes were allocated during the GC we need to add freed_bytes back on.
3494 CHECK_GE(bytes_allocated + freed_bytes, bytes_allocated_before_gc);
3495 const uint64_t bytes_allocated_during_gc = bytes_allocated + freed_bytes -
3496 bytes_allocated_before_gc;
3497 // Calculate when to perform the next ConcurrentGC.
3498 // Estimate how many remaining bytes we will have when we need to start the next GC.
3499 size_t remaining_bytes = bytes_allocated_during_gc;
3500 remaining_bytes = std::min(remaining_bytes, kMaxConcurrentRemainingBytes);
3501 remaining_bytes = std::max(remaining_bytes, kMinConcurrentRemainingBytes);
3502 if (UNLIKELY(remaining_bytes > max_allowed_footprint_)) {
3503 // A never going to happen situation that from the estimated allocation rate we will exceed
3504 // the applications entire footprint with the given estimated allocation rate. Schedule
3505 // another GC nearly straight away.
3506 remaining_bytes = kMinConcurrentRemainingBytes;
3507 }
3508 DCHECK_LE(remaining_bytes, max_allowed_footprint_);
3509 DCHECK_LE(max_allowed_footprint_, GetMaxMemory());
3510 // Start a concurrent GC when we get close to the estimated remaining bytes. When the
3511 // allocation rate is very high, remaining_bytes could tell us that we should start a GC
3512 // right away.
3513 concurrent_start_bytes_ = std::max(max_allowed_footprint_ - remaining_bytes,
3514 static_cast<size_t>(bytes_allocated));
3515 }
3516 }
3517 }
3518
ClampGrowthLimit()3519 void Heap::ClampGrowthLimit() {
3520 // Use heap bitmap lock to guard against races with BindLiveToMarkBitmap.
3521 ScopedObjectAccess soa(Thread::Current());
3522 WriterMutexLock mu(soa.Self(), *Locks::heap_bitmap_lock_);
3523 capacity_ = growth_limit_;
3524 for (const auto& space : continuous_spaces_) {
3525 if (space->IsMallocSpace()) {
3526 gc::space::MallocSpace* malloc_space = space->AsMallocSpace();
3527 malloc_space->ClampGrowthLimit();
3528 }
3529 }
3530 if (collector_type_ == kCollectorTypeCC) {
3531 DCHECK(region_space_ != nullptr);
3532 // Twice the capacity as CC needs extra space for evacuating objects.
3533 region_space_->ClampGrowthLimit(2 * capacity_);
3534 }
3535 // This space isn't added for performance reasons.
3536 if (main_space_backup_.get() != nullptr) {
3537 main_space_backup_->ClampGrowthLimit();
3538 }
3539 }
3540
ClearGrowthLimit()3541 void Heap::ClearGrowthLimit() {
3542 growth_limit_ = capacity_;
3543 ScopedObjectAccess soa(Thread::Current());
3544 for (const auto& space : continuous_spaces_) {
3545 if (space->IsMallocSpace()) {
3546 gc::space::MallocSpace* malloc_space = space->AsMallocSpace();
3547 malloc_space->ClearGrowthLimit();
3548 malloc_space->SetFootprintLimit(malloc_space->Capacity());
3549 }
3550 }
3551 // This space isn't added for performance reasons.
3552 if (main_space_backup_.get() != nullptr) {
3553 main_space_backup_->ClearGrowthLimit();
3554 main_space_backup_->SetFootprintLimit(main_space_backup_->Capacity());
3555 }
3556 }
3557
AddFinalizerReference(Thread * self,ObjPtr<mirror::Object> * object)3558 void Heap::AddFinalizerReference(Thread* self, ObjPtr<mirror::Object>* object) {
3559 ScopedObjectAccess soa(self);
3560 ScopedLocalRef<jobject> arg(self->GetJniEnv(), soa.AddLocalReference<jobject>(*object));
3561 jvalue args[1];
3562 args[0].l = arg.get();
3563 InvokeWithJValues(soa, nullptr, WellKnownClasses::java_lang_ref_FinalizerReference_add, args);
3564 // Restore object in case it gets moved.
3565 *object = soa.Decode<mirror::Object>(arg.get());
3566 }
3567
RequestConcurrentGCAndSaveObject(Thread * self,bool force_full,ObjPtr<mirror::Object> * obj)3568 void Heap::RequestConcurrentGCAndSaveObject(Thread* self,
3569 bool force_full,
3570 ObjPtr<mirror::Object>* obj) {
3571 StackHandleScope<1> hs(self);
3572 HandleWrapperObjPtr<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
3573 RequestConcurrentGC(self, kGcCauseBackground, force_full);
3574 }
3575
3576 class Heap::ConcurrentGCTask : public HeapTask {
3577 public:
ConcurrentGCTask(uint64_t target_time,GcCause cause,bool force_full)3578 ConcurrentGCTask(uint64_t target_time, GcCause cause, bool force_full)
3579 : HeapTask(target_time), cause_(cause), force_full_(force_full) {}
Run(Thread * self)3580 virtual void Run(Thread* self) OVERRIDE {
3581 gc::Heap* heap = Runtime::Current()->GetHeap();
3582 heap->ConcurrentGC(self, cause_, force_full_);
3583 heap->ClearConcurrentGCRequest();
3584 }
3585
3586 private:
3587 const GcCause cause_;
3588 const bool force_full_; // If true, force full (or partial) collection.
3589 };
3590
CanAddHeapTask(Thread * self)3591 static bool CanAddHeapTask(Thread* self) REQUIRES(!Locks::runtime_shutdown_lock_) {
3592 Runtime* runtime = Runtime::Current();
3593 return runtime != nullptr && runtime->IsFinishedStarting() && !runtime->IsShuttingDown(self) &&
3594 !self->IsHandlingStackOverflow();
3595 }
3596
ClearConcurrentGCRequest()3597 void Heap::ClearConcurrentGCRequest() {
3598 concurrent_gc_pending_.StoreRelaxed(false);
3599 }
3600
RequestConcurrentGC(Thread * self,GcCause cause,bool force_full)3601 void Heap::RequestConcurrentGC(Thread* self, GcCause cause, bool force_full) {
3602 if (CanAddHeapTask(self) &&
3603 concurrent_gc_pending_.CompareAndSetStrongSequentiallyConsistent(false, true)) {
3604 task_processor_->AddTask(self, new ConcurrentGCTask(NanoTime(), // Start straight away.
3605 cause,
3606 force_full));
3607 }
3608 }
3609
ConcurrentGC(Thread * self,GcCause cause,bool force_full)3610 void Heap::ConcurrentGC(Thread* self, GcCause cause, bool force_full) {
3611 if (!Runtime::Current()->IsShuttingDown(self)) {
3612 // Wait for any GCs currently running to finish.
3613 if (WaitForGcToComplete(cause, self) == collector::kGcTypeNone) {
3614 // If the we can't run the GC type we wanted to run, find the next appropriate one and try
3615 // that instead. E.g. can't do partial, so do full instead.
3616 collector::GcType next_gc_type = next_gc_type_;
3617 // If forcing full and next gc type is sticky, override with a non-sticky type.
3618 if (force_full && next_gc_type == collector::kGcTypeSticky) {
3619 next_gc_type = NonStickyGcType();
3620 }
3621 if (CollectGarbageInternal(next_gc_type, cause, false) == collector::kGcTypeNone) {
3622 for (collector::GcType gc_type : gc_plan_) {
3623 // Attempt to run the collector, if we succeed, we are done.
3624 if (gc_type > next_gc_type &&
3625 CollectGarbageInternal(gc_type, cause, false) != collector::kGcTypeNone) {
3626 break;
3627 }
3628 }
3629 }
3630 }
3631 }
3632 }
3633
3634 class Heap::CollectorTransitionTask : public HeapTask {
3635 public:
CollectorTransitionTask(uint64_t target_time)3636 explicit CollectorTransitionTask(uint64_t target_time) : HeapTask(target_time) {}
3637
Run(Thread * self)3638 virtual void Run(Thread* self) OVERRIDE {
3639 gc::Heap* heap = Runtime::Current()->GetHeap();
3640 heap->DoPendingCollectorTransition();
3641 heap->ClearPendingCollectorTransition(self);
3642 }
3643 };
3644
ClearPendingCollectorTransition(Thread * self)3645 void Heap::ClearPendingCollectorTransition(Thread* self) {
3646 MutexLock mu(self, *pending_task_lock_);
3647 pending_collector_transition_ = nullptr;
3648 }
3649
RequestCollectorTransition(CollectorType desired_collector_type,uint64_t delta_time)3650 void Heap::RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time) {
3651 Thread* self = Thread::Current();
3652 desired_collector_type_ = desired_collector_type;
3653 if (desired_collector_type_ == collector_type_ || !CanAddHeapTask(self)) {
3654 return;
3655 }
3656 if (collector_type_ == kCollectorTypeCC) {
3657 // For CC, we invoke a full compaction when going to the background, but the collector type
3658 // doesn't change.
3659 DCHECK_EQ(desired_collector_type_, kCollectorTypeCCBackground);
3660 }
3661 DCHECK_NE(collector_type_, kCollectorTypeCCBackground);
3662 CollectorTransitionTask* added_task = nullptr;
3663 const uint64_t target_time = NanoTime() + delta_time;
3664 {
3665 MutexLock mu(self, *pending_task_lock_);
3666 // If we have an existing collector transition, update the targe time to be the new target.
3667 if (pending_collector_transition_ != nullptr) {
3668 task_processor_->UpdateTargetRunTime(self, pending_collector_transition_, target_time);
3669 return;
3670 }
3671 added_task = new CollectorTransitionTask(target_time);
3672 pending_collector_transition_ = added_task;
3673 }
3674 task_processor_->AddTask(self, added_task);
3675 }
3676
3677 class Heap::HeapTrimTask : public HeapTask {
3678 public:
HeapTrimTask(uint64_t delta_time)3679 explicit HeapTrimTask(uint64_t delta_time) : HeapTask(NanoTime() + delta_time) { }
Run(Thread * self)3680 virtual void Run(Thread* self) OVERRIDE {
3681 gc::Heap* heap = Runtime::Current()->GetHeap();
3682 heap->Trim(self);
3683 heap->ClearPendingTrim(self);
3684 }
3685 };
3686
ClearPendingTrim(Thread * self)3687 void Heap::ClearPendingTrim(Thread* self) {
3688 MutexLock mu(self, *pending_task_lock_);
3689 pending_heap_trim_ = nullptr;
3690 }
3691
RequestTrim(Thread * self)3692 void Heap::RequestTrim(Thread* self) {
3693 if (!CanAddHeapTask(self)) {
3694 return;
3695 }
3696 // GC completed and now we must decide whether to request a heap trim (advising pages back to the
3697 // kernel) or not. Issuing a request will also cause trimming of the libc heap. As a trim scans
3698 // a space it will hold its lock and can become a cause of jank.
3699 // Note, the large object space self trims and the Zygote space was trimmed and unchanging since
3700 // forking.
3701
3702 // We don't have a good measure of how worthwhile a trim might be. We can't use the live bitmap
3703 // because that only marks object heads, so a large array looks like lots of empty space. We
3704 // don't just call dlmalloc all the time, because the cost of an _attempted_ trim is proportional
3705 // to utilization (which is probably inversely proportional to how much benefit we can expect).
3706 // We could try mincore(2) but that's only a measure of how many pages we haven't given away,
3707 // not how much use we're making of those pages.
3708 HeapTrimTask* added_task = nullptr;
3709 {
3710 MutexLock mu(self, *pending_task_lock_);
3711 if (pending_heap_trim_ != nullptr) {
3712 // Already have a heap trim request in task processor, ignore this request.
3713 return;
3714 }
3715 added_task = new HeapTrimTask(kHeapTrimWait);
3716 pending_heap_trim_ = added_task;
3717 }
3718 task_processor_->AddTask(self, added_task);
3719 }
3720
RevokeThreadLocalBuffers(Thread * thread)3721 void Heap::RevokeThreadLocalBuffers(Thread* thread) {
3722 if (rosalloc_space_ != nullptr) {
3723 size_t freed_bytes_revoke = rosalloc_space_->RevokeThreadLocalBuffers(thread);
3724 if (freed_bytes_revoke > 0U) {
3725 num_bytes_freed_revoke_.FetchAndAddSequentiallyConsistent(freed_bytes_revoke);
3726 CHECK_GE(num_bytes_allocated_.LoadRelaxed(), num_bytes_freed_revoke_.LoadRelaxed());
3727 }
3728 }
3729 if (bump_pointer_space_ != nullptr) {
3730 CHECK_EQ(bump_pointer_space_->RevokeThreadLocalBuffers(thread), 0U);
3731 }
3732 if (region_space_ != nullptr) {
3733 CHECK_EQ(region_space_->RevokeThreadLocalBuffers(thread), 0U);
3734 }
3735 }
3736
RevokeRosAllocThreadLocalBuffers(Thread * thread)3737 void Heap::RevokeRosAllocThreadLocalBuffers(Thread* thread) {
3738 if (rosalloc_space_ != nullptr) {
3739 size_t freed_bytes_revoke = rosalloc_space_->RevokeThreadLocalBuffers(thread);
3740 if (freed_bytes_revoke > 0U) {
3741 num_bytes_freed_revoke_.FetchAndAddSequentiallyConsistent(freed_bytes_revoke);
3742 CHECK_GE(num_bytes_allocated_.LoadRelaxed(), num_bytes_freed_revoke_.LoadRelaxed());
3743 }
3744 }
3745 }
3746
RevokeAllThreadLocalBuffers()3747 void Heap::RevokeAllThreadLocalBuffers() {
3748 if (rosalloc_space_ != nullptr) {
3749 size_t freed_bytes_revoke = rosalloc_space_->RevokeAllThreadLocalBuffers();
3750 if (freed_bytes_revoke > 0U) {
3751 num_bytes_freed_revoke_.FetchAndAddSequentiallyConsistent(freed_bytes_revoke);
3752 CHECK_GE(num_bytes_allocated_.LoadRelaxed(), num_bytes_freed_revoke_.LoadRelaxed());
3753 }
3754 }
3755 if (bump_pointer_space_ != nullptr) {
3756 CHECK_EQ(bump_pointer_space_->RevokeAllThreadLocalBuffers(), 0U);
3757 }
3758 if (region_space_ != nullptr) {
3759 CHECK_EQ(region_space_->RevokeAllThreadLocalBuffers(), 0U);
3760 }
3761 }
3762
IsGCRequestPending() const3763 bool Heap::IsGCRequestPending() const {
3764 return concurrent_gc_pending_.LoadRelaxed();
3765 }
3766
RunFinalization(JNIEnv * env,uint64_t timeout)3767 void Heap::RunFinalization(JNIEnv* env, uint64_t timeout) {
3768 env->CallStaticVoidMethod(WellKnownClasses::dalvik_system_VMRuntime,
3769 WellKnownClasses::dalvik_system_VMRuntime_runFinalization,
3770 static_cast<jlong>(timeout));
3771 }
3772
RegisterNativeAllocation(JNIEnv * env,size_t bytes)3773 void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) {
3774 size_t old_value = new_native_bytes_allocated_.FetchAndAddRelaxed(bytes);
3775
3776 if (old_value > NativeAllocationGcWatermark() * HeapGrowthMultiplier() &&
3777 !IsGCRequestPending()) {
3778 // Trigger another GC because there have been enough native bytes
3779 // allocated since the last GC.
3780 if (IsGcConcurrent()) {
3781 RequestConcurrentGC(ThreadForEnv(env), kGcCauseForNativeAlloc, /*force_full*/true);
3782 } else {
3783 CollectGarbageInternal(NonStickyGcType(), kGcCauseForNativeAlloc, false);
3784 }
3785 }
3786 }
3787
RegisterNativeFree(JNIEnv *,size_t bytes)3788 void Heap::RegisterNativeFree(JNIEnv*, size_t bytes) {
3789 // Take the bytes freed out of new_native_bytes_allocated_ first. If
3790 // new_native_bytes_allocated_ reaches zero, take the remaining bytes freed
3791 // out of old_native_bytes_allocated_ to ensure all freed bytes are
3792 // accounted for.
3793 size_t allocated;
3794 size_t new_freed_bytes;
3795 do {
3796 allocated = new_native_bytes_allocated_.LoadRelaxed();
3797 new_freed_bytes = std::min(allocated, bytes);
3798 } while (!new_native_bytes_allocated_.CompareAndSetWeakRelaxed(allocated,
3799 allocated - new_freed_bytes));
3800 if (new_freed_bytes < bytes) {
3801 old_native_bytes_allocated_.FetchAndSubRelaxed(bytes - new_freed_bytes);
3802 }
3803 }
3804
GetTotalMemory() const3805 size_t Heap::GetTotalMemory() const {
3806 return std::max(max_allowed_footprint_, GetBytesAllocated());
3807 }
3808
AddModUnionTable(accounting::ModUnionTable * mod_union_table)3809 void Heap::AddModUnionTable(accounting::ModUnionTable* mod_union_table) {
3810 DCHECK(mod_union_table != nullptr);
3811 mod_union_tables_.Put(mod_union_table->GetSpace(), mod_union_table);
3812 }
3813
CheckPreconditionsForAllocObject(ObjPtr<mirror::Class> c,size_t byte_count)3814 void Heap::CheckPreconditionsForAllocObject(ObjPtr<mirror::Class> c, size_t byte_count) {
3815 // Compare rounded sizes since the allocation may have been retried after rounding the size.
3816 // See b/37885600
3817 CHECK(c == nullptr || (c->IsClassClass() && byte_count >= sizeof(mirror::Class)) ||
3818 (c->IsVariableSize() ||
3819 RoundUp(c->GetObjectSize(), kObjectAlignment) ==
3820 RoundUp(byte_count, kObjectAlignment)))
3821 << "ClassFlags=" << c->GetClassFlags()
3822 << " IsClassClass=" << c->IsClassClass()
3823 << " byte_count=" << byte_count
3824 << " IsVariableSize=" << c->IsVariableSize()
3825 << " ObjectSize=" << c->GetObjectSize()
3826 << " sizeof(Class)=" << sizeof(mirror::Class)
3827 << " " << verification_->DumpObjectInfo(c.Ptr(), /*tag*/ "klass");
3828 CHECK_GE(byte_count, sizeof(mirror::Object));
3829 }
3830
AddRememberedSet(accounting::RememberedSet * remembered_set)3831 void Heap::AddRememberedSet(accounting::RememberedSet* remembered_set) {
3832 CHECK(remembered_set != nullptr);
3833 space::Space* space = remembered_set->GetSpace();
3834 CHECK(space != nullptr);
3835 CHECK(remembered_sets_.find(space) == remembered_sets_.end()) << space;
3836 remembered_sets_.Put(space, remembered_set);
3837 CHECK(remembered_sets_.find(space) != remembered_sets_.end()) << space;
3838 }
3839
RemoveRememberedSet(space::Space * space)3840 void Heap::RemoveRememberedSet(space::Space* space) {
3841 CHECK(space != nullptr);
3842 auto it = remembered_sets_.find(space);
3843 CHECK(it != remembered_sets_.end());
3844 delete it->second;
3845 remembered_sets_.erase(it);
3846 CHECK(remembered_sets_.find(space) == remembered_sets_.end());
3847 }
3848
ClearMarkedObjects()3849 void Heap::ClearMarkedObjects() {
3850 // Clear all of the spaces' mark bitmaps.
3851 for (const auto& space : GetContinuousSpaces()) {
3852 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
3853 if (space->GetLiveBitmap() != mark_bitmap) {
3854 mark_bitmap->Clear();
3855 }
3856 }
3857 // Clear the marked objects in the discontinous space object sets.
3858 for (const auto& space : GetDiscontinuousSpaces()) {
3859 space->GetMarkBitmap()->Clear();
3860 }
3861 }
3862
SetAllocationRecords(AllocRecordObjectMap * records)3863 void Heap::SetAllocationRecords(AllocRecordObjectMap* records) {
3864 allocation_records_.reset(records);
3865 }
3866
VisitAllocationRecords(RootVisitor * visitor) const3867 void Heap::VisitAllocationRecords(RootVisitor* visitor) const {
3868 if (IsAllocTrackingEnabled()) {
3869 MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
3870 if (IsAllocTrackingEnabled()) {
3871 GetAllocationRecords()->VisitRoots(visitor);
3872 }
3873 }
3874 }
3875
SweepAllocationRecords(IsMarkedVisitor * visitor) const3876 void Heap::SweepAllocationRecords(IsMarkedVisitor* visitor) const {
3877 if (IsAllocTrackingEnabled()) {
3878 MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
3879 if (IsAllocTrackingEnabled()) {
3880 GetAllocationRecords()->SweepAllocationRecords(visitor);
3881 }
3882 }
3883 }
3884
AllowNewAllocationRecords() const3885 void Heap::AllowNewAllocationRecords() const {
3886 CHECK(!kUseReadBarrier);
3887 MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
3888 AllocRecordObjectMap* allocation_records = GetAllocationRecords();
3889 if (allocation_records != nullptr) {
3890 allocation_records->AllowNewAllocationRecords();
3891 }
3892 }
3893
DisallowNewAllocationRecords() const3894 void Heap::DisallowNewAllocationRecords() const {
3895 CHECK(!kUseReadBarrier);
3896 MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
3897 AllocRecordObjectMap* allocation_records = GetAllocationRecords();
3898 if (allocation_records != nullptr) {
3899 allocation_records->DisallowNewAllocationRecords();
3900 }
3901 }
3902
BroadcastForNewAllocationRecords() const3903 void Heap::BroadcastForNewAllocationRecords() const {
3904 // Always broadcast without checking IsAllocTrackingEnabled() because IsAllocTrackingEnabled() may
3905 // be set to false while some threads are waiting for system weak access in
3906 // AllocRecordObjectMap::RecordAllocation() and we may fail to wake them up. b/27467554.
3907 MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
3908 AllocRecordObjectMap* allocation_records = GetAllocationRecords();
3909 if (allocation_records != nullptr) {
3910 allocation_records->BroadcastForNewAllocationRecords();
3911 }
3912 }
3913
CheckGcStressMode(Thread * self,ObjPtr<mirror::Object> * obj)3914 void Heap::CheckGcStressMode(Thread* self, ObjPtr<mirror::Object>* obj) {
3915 auto* const runtime = Runtime::Current();
3916 if (gc_stress_mode_ && runtime->GetClassLinker()->IsInitialized() &&
3917 !runtime->IsActiveTransaction() && mirror::Class::HasJavaLangClass()) {
3918 // Check if we should GC.
3919 bool new_backtrace = false;
3920 {
3921 static constexpr size_t kMaxFrames = 16u;
3922 FixedSizeBacktrace<kMaxFrames> backtrace;
3923 backtrace.Collect(/* skip_frames */ 2);
3924 uint64_t hash = backtrace.Hash();
3925 MutexLock mu(self, *backtrace_lock_);
3926 new_backtrace = seen_backtraces_.find(hash) == seen_backtraces_.end();
3927 if (new_backtrace) {
3928 seen_backtraces_.insert(hash);
3929 }
3930 }
3931 if (new_backtrace) {
3932 StackHandleScope<1> hs(self);
3933 auto h = hs.NewHandleWrapper(obj);
3934 CollectGarbage(/* clear_soft_references */ false);
3935 unique_backtrace_count_.FetchAndAddSequentiallyConsistent(1);
3936 } else {
3937 seen_backtrace_count_.FetchAndAddSequentiallyConsistent(1);
3938 }
3939 }
3940 }
3941
DisableGCForShutdown()3942 void Heap::DisableGCForShutdown() {
3943 Thread* const self = Thread::Current();
3944 CHECK(Runtime::Current()->IsShuttingDown(self));
3945 MutexLock mu(self, *gc_complete_lock_);
3946 gc_disabled_for_shutdown_ = true;
3947 }
3948
ObjectIsInBootImageSpace(ObjPtr<mirror::Object> obj) const3949 bool Heap::ObjectIsInBootImageSpace(ObjPtr<mirror::Object> obj) const {
3950 for (gc::space::ImageSpace* space : boot_image_spaces_) {
3951 if (space->HasAddress(obj.Ptr())) {
3952 return true;
3953 }
3954 }
3955 return false;
3956 }
3957
IsInBootImageOatFile(const void * p) const3958 bool Heap::IsInBootImageOatFile(const void* p) const {
3959 for (gc::space::ImageSpace* space : boot_image_spaces_) {
3960 if (space->GetOatFile()->Contains(p)) {
3961 return true;
3962 }
3963 }
3964 return false;
3965 }
3966
GetBootImagesSize(uint32_t * boot_image_begin,uint32_t * boot_image_end,uint32_t * boot_oat_begin,uint32_t * boot_oat_end)3967 void Heap::GetBootImagesSize(uint32_t* boot_image_begin,
3968 uint32_t* boot_image_end,
3969 uint32_t* boot_oat_begin,
3970 uint32_t* boot_oat_end) {
3971 DCHECK(boot_image_begin != nullptr);
3972 DCHECK(boot_image_end != nullptr);
3973 DCHECK(boot_oat_begin != nullptr);
3974 DCHECK(boot_oat_end != nullptr);
3975 *boot_image_begin = 0u;
3976 *boot_image_end = 0u;
3977 *boot_oat_begin = 0u;
3978 *boot_oat_end = 0u;
3979 for (gc::space::ImageSpace* space_ : GetBootImageSpaces()) {
3980 const uint32_t image_begin = PointerToLowMemUInt32(space_->Begin());
3981 const uint32_t image_size = space_->GetImageHeader().GetImageSize();
3982 if (*boot_image_begin == 0 || image_begin < *boot_image_begin) {
3983 *boot_image_begin = image_begin;
3984 }
3985 *boot_image_end = std::max(*boot_image_end, image_begin + image_size);
3986 const OatFile* boot_oat_file = space_->GetOatFile();
3987 const uint32_t oat_begin = PointerToLowMemUInt32(boot_oat_file->Begin());
3988 const uint32_t oat_size = boot_oat_file->Size();
3989 if (*boot_oat_begin == 0 || oat_begin < *boot_oat_begin) {
3990 *boot_oat_begin = oat_begin;
3991 }
3992 *boot_oat_end = std::max(*boot_oat_end, oat_begin + oat_size);
3993 }
3994 }
3995
SetAllocationListener(AllocationListener * l)3996 void Heap::SetAllocationListener(AllocationListener* l) {
3997 AllocationListener* old = GetAndOverwriteAllocationListener(&alloc_listener_, l);
3998
3999 if (old == nullptr) {
4000 Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
4001 }
4002 }
4003
RemoveAllocationListener()4004 void Heap::RemoveAllocationListener() {
4005 AllocationListener* old = GetAndOverwriteAllocationListener(&alloc_listener_, nullptr);
4006
4007 if (old != nullptr) {
4008 Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints();
4009 }
4010 }
4011
SetGcPauseListener(GcPauseListener * l)4012 void Heap::SetGcPauseListener(GcPauseListener* l) {
4013 gc_pause_listener_.StoreRelaxed(l);
4014 }
4015
RemoveGcPauseListener()4016 void Heap::RemoveGcPauseListener() {
4017 gc_pause_listener_.StoreRelaxed(nullptr);
4018 }
4019
AllocWithNewTLAB(Thread * self,size_t alloc_size,bool grow,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated)4020 mirror::Object* Heap::AllocWithNewTLAB(Thread* self,
4021 size_t alloc_size,
4022 bool grow,
4023 size_t* bytes_allocated,
4024 size_t* usable_size,
4025 size_t* bytes_tl_bulk_allocated) {
4026 const AllocatorType allocator_type = GetCurrentAllocator();
4027 if (kUsePartialTlabs && alloc_size <= self->TlabRemainingCapacity()) {
4028 DCHECK_GT(alloc_size, self->TlabSize());
4029 // There is enough space if we grow the TLAB. Lets do that. This increases the
4030 // TLAB bytes.
4031 const size_t min_expand_size = alloc_size - self->TlabSize();
4032 const size_t expand_bytes = std::max(
4033 min_expand_size,
4034 std::min(self->TlabRemainingCapacity() - self->TlabSize(), kPartialTlabSize));
4035 if (UNLIKELY(IsOutOfMemoryOnAllocation(allocator_type, expand_bytes, grow))) {
4036 return nullptr;
4037 }
4038 *bytes_tl_bulk_allocated = expand_bytes;
4039 self->ExpandTlab(expand_bytes);
4040 DCHECK_LE(alloc_size, self->TlabSize());
4041 } else if (allocator_type == kAllocatorTypeTLAB) {
4042 DCHECK(bump_pointer_space_ != nullptr);
4043 const size_t new_tlab_size = alloc_size + kDefaultTLABSize;
4044 if (UNLIKELY(IsOutOfMemoryOnAllocation(allocator_type, new_tlab_size, grow))) {
4045 return nullptr;
4046 }
4047 // Try allocating a new thread local buffer, if the allocation fails the space must be
4048 // full so return null.
4049 if (!bump_pointer_space_->AllocNewTlab(self, new_tlab_size)) {
4050 return nullptr;
4051 }
4052 *bytes_tl_bulk_allocated = new_tlab_size;
4053 } else {
4054 DCHECK(allocator_type == kAllocatorTypeRegionTLAB);
4055 DCHECK(region_space_ != nullptr);
4056 if (space::RegionSpace::kRegionSize >= alloc_size) {
4057 // Non-large. Check OOME for a tlab.
4058 if (LIKELY(!IsOutOfMemoryOnAllocation(allocator_type,
4059 space::RegionSpace::kRegionSize,
4060 grow))) {
4061 const size_t new_tlab_size = kUsePartialTlabs
4062 ? std::max(alloc_size, kPartialTlabSize)
4063 : gc::space::RegionSpace::kRegionSize;
4064 // Try to allocate a tlab.
4065 if (!region_space_->AllocNewTlab(self, new_tlab_size)) {
4066 // Failed to allocate a tlab. Try non-tlab.
4067 return region_space_->AllocNonvirtual<false>(alloc_size,
4068 bytes_allocated,
4069 usable_size,
4070 bytes_tl_bulk_allocated);
4071 }
4072 *bytes_tl_bulk_allocated = new_tlab_size;
4073 // Fall-through to using the TLAB below.
4074 } else {
4075 // Check OOME for a non-tlab allocation.
4076 if (!IsOutOfMemoryOnAllocation(allocator_type, alloc_size, grow)) {
4077 return region_space_->AllocNonvirtual<false>(alloc_size,
4078 bytes_allocated,
4079 usable_size,
4080 bytes_tl_bulk_allocated);
4081 }
4082 // Neither tlab or non-tlab works. Give up.
4083 return nullptr;
4084 }
4085 } else {
4086 // Large. Check OOME.
4087 if (LIKELY(!IsOutOfMemoryOnAllocation(allocator_type, alloc_size, grow))) {
4088 return region_space_->AllocNonvirtual<false>(alloc_size,
4089 bytes_allocated,
4090 usable_size,
4091 bytes_tl_bulk_allocated);
4092 }
4093 return nullptr;
4094 }
4095 }
4096 // Refilled TLAB, return.
4097 mirror::Object* ret = self->AllocTlab(alloc_size);
4098 DCHECK(ret != nullptr);
4099 *bytes_allocated = alloc_size;
4100 *usable_size = alloc_size;
4101 return ret;
4102 }
4103
GetVerification() const4104 const Verification* Heap::GetVerification() const {
4105 return verification_.get();
4106 }
4107
VlogHeapGrowth(size_t max_allowed_footprint,size_t new_footprint,size_t alloc_size)4108 void Heap::VlogHeapGrowth(size_t max_allowed_footprint, size_t new_footprint, size_t alloc_size) {
4109 VLOG(heap) << "Growing heap from " << PrettySize(max_allowed_footprint) << " to "
4110 << PrettySize(new_footprint) << " for a " << PrettySize(alloc_size) << " allocation";
4111 }
4112
4113 } // namespace gc
4114 } // namespace art
4115