1 /*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "heap.h"
18
19 #include <limits>
20 #include "android-base/thread_annotations.h"
21 #if defined(__BIONIC__) || defined(__GLIBC__)
22 #include <malloc.h> // For mallinfo()
23 #endif
24 #include <memory>
25 #include <vector>
26
27 #include "android-base/stringprintf.h"
28
29 #include "allocation_listener.h"
30 #include "art_field-inl.h"
31 #include "backtrace_helper.h"
32 #include "base/allocator.h"
33 #include "base/arena_allocator.h"
34 #include "base/dumpable.h"
35 #include "base/file_utils.h"
36 #include "base/histogram-inl.h"
37 #include "base/logging.h" // For VLOG.
38 #include "base/memory_tool.h"
39 #include "base/mutex.h"
40 #include "base/os.h"
41 #include "base/stl_util.h"
42 #include "base/systrace.h"
43 #include "base/time_utils.h"
44 #include "base/utils.h"
45 #include "class_root-inl.h"
46 #include "common_throws.h"
47 #include "debugger.h"
48 #include "dex/dex_file-inl.h"
49 #include "entrypoints/quick/quick_alloc_entrypoints.h"
50 #include "gc/accounting/card_table-inl.h"
51 #include "gc/accounting/heap_bitmap-inl.h"
52 #include "gc/accounting/mod_union_table-inl.h"
53 #include "gc/accounting/read_barrier_table.h"
54 #include "gc/accounting/remembered_set.h"
55 #include "gc/accounting/space_bitmap-inl.h"
56 #include "gc/collector/concurrent_copying.h"
57 #include "gc/collector/mark_sweep.h"
58 #include "gc/collector/partial_mark_sweep.h"
59 #include "gc/collector/semi_space.h"
60 #include "gc/collector/sticky_mark_sweep.h"
61 #include "gc/racing_check.h"
62 #include "gc/reference_processor.h"
63 #include "gc/scoped_gc_critical_section.h"
64 #include "gc/space/bump_pointer_space.h"
65 #include "gc/space/dlmalloc_space-inl.h"
66 #include "gc/space/image_space.h"
67 #include "gc/space/large_object_space.h"
68 #include "gc/space/region_space.h"
69 #include "gc/space/rosalloc_space-inl.h"
70 #include "gc/space/space-inl.h"
71 #include "gc/space/zygote_space.h"
72 #include "gc/task_processor.h"
73 #include "gc/verification.h"
74 #include "gc_pause_listener.h"
75 #include "gc_root.h"
76 #include "handle_scope-inl.h"
77 #include "heap-inl.h"
78 #include "heap-visit-objects-inl.h"
79 #include "image.h"
80 #include "intern_table.h"
81 #include "jit/jit.h"
82 #include "jit/jit_code_cache.h"
83 #include "jni/java_vm_ext.h"
84 #include "mirror/class-inl.h"
85 #include "mirror/executable-inl.h"
86 #include "mirror/field.h"
87 #include "mirror/method_handle_impl.h"
88 #include "mirror/object-inl.h"
89 #include "mirror/object-refvisitor-inl.h"
90 #include "mirror/object_array-inl.h"
91 #include "mirror/reference-inl.h"
92 #include "mirror/var_handle.h"
93 #include "nativehelper/scoped_local_ref.h"
94 #include "obj_ptr-inl.h"
95 #ifdef ART_TARGET_ANDROID
96 #include "perfetto/heap_profile.h"
97 #endif
98 #include "reflection.h"
99 #include "runtime.h"
100 #include "javaheapprof/javaheapsampler.h"
101 #include "scoped_thread_state_change-inl.h"
102 #include "thread_list.h"
103 #include "verify_object-inl.h"
104 #include "well_known_classes.h"
105
106 namespace art {
107
108 #ifdef ART_TARGET_ANDROID
109 namespace {
110
111 // Enable the heap sampler Callback function used by Perfetto.
EnableHeapSamplerCallback(void * enable_ptr,const AHeapProfileEnableCallbackInfo * enable_info_ptr)112 void EnableHeapSamplerCallback(void* enable_ptr,
113 const AHeapProfileEnableCallbackInfo* enable_info_ptr) {
114 HeapSampler* sampler_self = reinterpret_cast<HeapSampler*>(enable_ptr);
115 // Set the ART profiler sampling interval to the value from Perfetto.
116 uint64_t interval = AHeapProfileEnableCallbackInfo_getSamplingInterval(enable_info_ptr);
117 if (interval > 0) {
118 sampler_self->SetSamplingInterval(interval);
119 }
120 // Else default is 4K sampling interval. However, default case shouldn't happen for Perfetto API.
121 // AHeapProfileEnableCallbackInfo_getSamplingInterval should always give the requested
122 // (non-negative) sampling interval. It is a uint64_t and gets checked for != 0
123 // Do not call heap as a temp here, it will build but test run will silently fail.
124 // Heap is not fully constructed yet in some cases.
125 sampler_self->EnableHeapSampler();
126 }
127
128 // Disable the heap sampler Callback function used by Perfetto.
DisableHeapSamplerCallback(void * disable_ptr,const AHeapProfileDisableCallbackInfo * info_ptr ATTRIBUTE_UNUSED)129 void DisableHeapSamplerCallback(void* disable_ptr,
130 const AHeapProfileDisableCallbackInfo* info_ptr ATTRIBUTE_UNUSED) {
131 HeapSampler* sampler_self = reinterpret_cast<HeapSampler*>(disable_ptr);
132 sampler_self->DisableHeapSampler();
133 }
134
135 } // namespace
136 #endif
137
138 namespace gc {
139
140 DEFINE_RUNTIME_DEBUG_FLAG(Heap, kStressCollectorTransition);
141
142 // Minimum amount of remaining bytes before a concurrent GC is triggered.
143 static constexpr size_t kMinConcurrentRemainingBytes = 128 * KB;
144 static constexpr size_t kMaxConcurrentRemainingBytes = 512 * KB;
145 // Sticky GC throughput adjustment, divided by 4. Increasing this causes sticky GC to occur more
146 // relative to partial/full GC. This may be desirable since sticky GCs interfere less with mutator
147 // threads (lower pauses, use less memory bandwidth).
GetStickyGcThroughputAdjustment(bool use_generational_cc)148 static double GetStickyGcThroughputAdjustment(bool use_generational_cc) {
149 return use_generational_cc ? 0.5 : 1.0;
150 }
151 // Whether or not we compact the zygote in PreZygoteFork.
152 static constexpr bool kCompactZygote = kMovingCollector;
153 // How many reserve entries are at the end of the allocation stack, these are only needed if the
154 // allocation stack overflows.
155 static constexpr size_t kAllocationStackReserveSize = 1024;
156 // Default mark stack size in bytes.
157 static const size_t kDefaultMarkStackSize = 64 * KB;
158 // Define space name.
159 static const char* kDlMallocSpaceName[2] = {"main dlmalloc space", "main dlmalloc space 1"};
160 static const char* kRosAllocSpaceName[2] = {"main rosalloc space", "main rosalloc space 1"};
161 static const char* kMemMapSpaceName[2] = {"main space", "main space 1"};
162 static const char* kNonMovingSpaceName = "non moving space";
163 static const char* kZygoteSpaceName = "zygote space";
164 static constexpr bool kGCALotMode = false;
165 // GC alot mode uses a small allocation stack to stress test a lot of GC.
166 static constexpr size_t kGcAlotAllocationStackSize = 4 * KB /
167 sizeof(mirror::HeapReference<mirror::Object>);
168 // Verify objet has a small allocation stack size since searching the allocation stack is slow.
169 static constexpr size_t kVerifyObjectAllocationStackSize = 16 * KB /
170 sizeof(mirror::HeapReference<mirror::Object>);
171 static constexpr size_t kDefaultAllocationStackSize = 8 * MB /
172 sizeof(mirror::HeapReference<mirror::Object>);
173
174 // After a GC (due to allocation failure) we should retrieve at least this
175 // fraction of the current max heap size. Otherwise throw OOME.
176 static constexpr double kMinFreeHeapAfterGcForAlloc = 0.01;
177
178 // For deterministic compilation, we need the heap to be at a well-known address.
179 static constexpr uint32_t kAllocSpaceBeginForDeterministicAoT = 0x40000000;
180 // Dump the rosalloc stats on SIGQUIT.
181 static constexpr bool kDumpRosAllocStatsOnSigQuit = false;
182
183 static const char* kRegionSpaceName = "main space (region space)";
184
185 // If true, we log all GCs in the both the foreground and background. Used for debugging.
186 static constexpr bool kLogAllGCs = false;
187
188 // Use Max heap for 2 seconds, this is smaller than the usual 5s window since we don't want to leave
189 // allocate with relaxed ergonomics for that long.
190 static constexpr size_t kPostForkMaxHeapDurationMS = 2000;
191
192 #if defined(__LP64__) || !defined(ADDRESS_SANITIZER)
193 // 300 MB (0x12c00000) - (default non-moving space capacity).
194 uint8_t* const Heap::kPreferredAllocSpaceBegin =
195 reinterpret_cast<uint8_t*>(300 * MB - kDefaultNonMovingSpaceCapacity);
196 #else
197 #ifdef __ANDROID__
198 // For 32-bit Android, use 0x20000000 because asan reserves 0x04000000 - 0x20000000.
199 uint8_t* const Heap::kPreferredAllocSpaceBegin = reinterpret_cast<uint8_t*>(0x20000000);
200 #else
201 // For 32-bit host, use 0x40000000 because asan uses most of the space below this.
202 uint8_t* const Heap::kPreferredAllocSpaceBegin = reinterpret_cast<uint8_t*>(0x40000000);
203 #endif
204 #endif
205
CareAboutPauseTimes()206 static inline bool CareAboutPauseTimes() {
207 return Runtime::Current()->InJankPerceptibleProcessState();
208 }
209
VerifyBootImagesContiguity(const std::vector<gc::space::ImageSpace * > & image_spaces)210 static void VerifyBootImagesContiguity(const std::vector<gc::space::ImageSpace*>& image_spaces) {
211 uint32_t boot_image_size = 0u;
212 for (size_t i = 0u, num_spaces = image_spaces.size(); i != num_spaces; ) {
213 const ImageHeader& image_header = image_spaces[i]->GetImageHeader();
214 uint32_t reservation_size = image_header.GetImageReservationSize();
215 uint32_t image_count = image_header.GetImageSpaceCount();
216
217 CHECK_NE(image_count, 0u);
218 CHECK_LE(image_count, num_spaces - i);
219 CHECK_NE(reservation_size, 0u);
220 for (size_t j = 1u; j != image_count; ++j) {
221 CHECK_EQ(image_spaces[i + j]->GetImageHeader().GetComponentCount(), 0u);
222 CHECK_EQ(image_spaces[i + j]->GetImageHeader().GetImageReservationSize(), 0u);
223 }
224
225 // Check the start of the heap.
226 CHECK_EQ(image_spaces[0]->Begin() + boot_image_size, image_spaces[i]->Begin());
227 // Check contiguous layout of images and oat files.
228 const uint8_t* current_heap = image_spaces[i]->Begin();
229 const uint8_t* current_oat = image_spaces[i]->GetImageHeader().GetOatFileBegin();
230 for (size_t j = 0u; j != image_count; ++j) {
231 const ImageHeader& current_header = image_spaces[i + j]->GetImageHeader();
232 CHECK_EQ(current_heap, image_spaces[i + j]->Begin());
233 CHECK_EQ(current_oat, current_header.GetOatFileBegin());
234 current_heap += RoundUp(current_header.GetImageSize(), kPageSize);
235 CHECK_GT(current_header.GetOatFileEnd(), current_header.GetOatFileBegin());
236 current_oat = current_header.GetOatFileEnd();
237 }
238 // Check that oat files start at the end of images.
239 CHECK_EQ(current_heap, image_spaces[i]->GetImageHeader().GetOatFileBegin());
240 // Check that the reservation size equals the size of images and oat files.
241 CHECK_EQ(reservation_size, static_cast<size_t>(current_oat - image_spaces[i]->Begin()));
242
243 boot_image_size += reservation_size;
244 i += image_count;
245 }
246 }
247
Heap(size_t initial_size,size_t growth_limit,size_t min_free,size_t max_free,double target_utilization,double foreground_heap_growth_multiplier,size_t stop_for_native_allocs,size_t capacity,size_t non_moving_space_capacity,const std::vector<std::string> & boot_class_path,const std::vector<std::string> & boot_class_path_locations,const std::string & image_file_name,const InstructionSet image_instruction_set,CollectorType foreground_collector_type,CollectorType background_collector_type,space::LargeObjectSpaceType large_object_space_type,size_t large_object_threshold,size_t parallel_gc_threads,size_t conc_gc_threads,bool low_memory_mode,size_t long_pause_log_threshold,size_t long_gc_log_threshold,bool ignore_target_footprint,bool always_log_explicit_gcs,bool use_tlab,bool verify_pre_gc_heap,bool verify_pre_sweeping_heap,bool verify_post_gc_heap,bool verify_pre_gc_rosalloc,bool verify_pre_sweeping_rosalloc,bool verify_post_gc_rosalloc,bool gc_stress_mode,bool measure_gc_performance,bool use_homogeneous_space_compaction_for_oom,bool use_generational_cc,uint64_t min_interval_homogeneous_space_compaction_by_oom,bool dump_region_info_before_gc,bool dump_region_info_after_gc)248 Heap::Heap(size_t initial_size,
249 size_t growth_limit,
250 size_t min_free,
251 size_t max_free,
252 double target_utilization,
253 double foreground_heap_growth_multiplier,
254 size_t stop_for_native_allocs,
255 size_t capacity,
256 size_t non_moving_space_capacity,
257 const std::vector<std::string>& boot_class_path,
258 const std::vector<std::string>& boot_class_path_locations,
259 const std::string& image_file_name,
260 const InstructionSet image_instruction_set,
261 CollectorType foreground_collector_type,
262 CollectorType background_collector_type,
263 space::LargeObjectSpaceType large_object_space_type,
264 size_t large_object_threshold,
265 size_t parallel_gc_threads,
266 size_t conc_gc_threads,
267 bool low_memory_mode,
268 size_t long_pause_log_threshold,
269 size_t long_gc_log_threshold,
270 bool ignore_target_footprint,
271 bool always_log_explicit_gcs,
272 bool use_tlab,
273 bool verify_pre_gc_heap,
274 bool verify_pre_sweeping_heap,
275 bool verify_post_gc_heap,
276 bool verify_pre_gc_rosalloc,
277 bool verify_pre_sweeping_rosalloc,
278 bool verify_post_gc_rosalloc,
279 bool gc_stress_mode,
280 bool measure_gc_performance,
281 bool use_homogeneous_space_compaction_for_oom,
282 bool use_generational_cc,
283 uint64_t min_interval_homogeneous_space_compaction_by_oom,
284 bool dump_region_info_before_gc,
285 bool dump_region_info_after_gc)
286 : non_moving_space_(nullptr),
287 rosalloc_space_(nullptr),
288 dlmalloc_space_(nullptr),
289 main_space_(nullptr),
290 collector_type_(kCollectorTypeNone),
291 foreground_collector_type_(foreground_collector_type),
292 background_collector_type_(background_collector_type),
293 desired_collector_type_(foreground_collector_type_),
294 pending_task_lock_(nullptr),
295 parallel_gc_threads_(parallel_gc_threads),
296 conc_gc_threads_(conc_gc_threads),
297 low_memory_mode_(low_memory_mode),
298 long_pause_log_threshold_(long_pause_log_threshold),
299 long_gc_log_threshold_(long_gc_log_threshold),
300 process_cpu_start_time_ns_(ProcessCpuNanoTime()),
301 pre_gc_last_process_cpu_time_ns_(process_cpu_start_time_ns_),
302 post_gc_last_process_cpu_time_ns_(process_cpu_start_time_ns_),
303 pre_gc_weighted_allocated_bytes_(0.0),
304 post_gc_weighted_allocated_bytes_(0.0),
305 ignore_target_footprint_(ignore_target_footprint),
306 always_log_explicit_gcs_(always_log_explicit_gcs),
307 zygote_creation_lock_("zygote creation lock", kZygoteCreationLock),
308 zygote_space_(nullptr),
309 large_object_threshold_(large_object_threshold),
310 disable_thread_flip_count_(0),
311 thread_flip_running_(false),
312 collector_type_running_(kCollectorTypeNone),
313 last_gc_cause_(kGcCauseNone),
314 thread_running_gc_(nullptr),
315 last_gc_type_(collector::kGcTypeNone),
316 next_gc_type_(collector::kGcTypePartial),
317 capacity_(capacity),
318 growth_limit_(growth_limit),
319 target_footprint_(initial_size),
320 // Using kPostMonitorLock as a lock at kDefaultMutexLevel is acquired after
321 // this one.
322 process_state_update_lock_("process state update lock", kPostMonitorLock),
323 min_foreground_target_footprint_(0),
324 concurrent_start_bytes_(std::numeric_limits<size_t>::max()),
325 total_bytes_freed_ever_(0),
326 total_objects_freed_ever_(0),
327 num_bytes_allocated_(0),
328 native_bytes_registered_(0),
329 old_native_bytes_allocated_(0),
330 native_objects_notified_(0),
331 num_bytes_freed_revoke_(0),
332 verify_missing_card_marks_(false),
333 verify_system_weaks_(false),
334 verify_pre_gc_heap_(verify_pre_gc_heap),
335 verify_pre_sweeping_heap_(verify_pre_sweeping_heap),
336 verify_post_gc_heap_(verify_post_gc_heap),
337 verify_mod_union_table_(false),
338 verify_pre_gc_rosalloc_(verify_pre_gc_rosalloc),
339 verify_pre_sweeping_rosalloc_(verify_pre_sweeping_rosalloc),
340 verify_post_gc_rosalloc_(verify_post_gc_rosalloc),
341 gc_stress_mode_(gc_stress_mode),
342 /* For GC a lot mode, we limit the allocation stacks to be kGcAlotInterval allocations. This
343 * causes a lot of GC since we do a GC for alloc whenever the stack is full. When heap
344 * verification is enabled, we limit the size of allocation stacks to speed up their
345 * searching.
346 */
347 max_allocation_stack_size_(kGCALotMode ? kGcAlotAllocationStackSize
348 : (kVerifyObjectSupport > kVerifyObjectModeFast) ? kVerifyObjectAllocationStackSize :
349 kDefaultAllocationStackSize),
350 current_allocator_(kAllocatorTypeDlMalloc),
351 current_non_moving_allocator_(kAllocatorTypeNonMoving),
352 bump_pointer_space_(nullptr),
353 temp_space_(nullptr),
354 region_space_(nullptr),
355 min_free_(min_free),
356 max_free_(max_free),
357 target_utilization_(target_utilization),
358 foreground_heap_growth_multiplier_(foreground_heap_growth_multiplier),
359 stop_for_native_allocs_(stop_for_native_allocs),
360 total_wait_time_(0),
361 verify_object_mode_(kVerifyObjectModeDisabled),
362 disable_moving_gc_count_(0),
363 semi_space_collector_(nullptr),
364 active_concurrent_copying_collector_(nullptr),
365 young_concurrent_copying_collector_(nullptr),
366 concurrent_copying_collector_(nullptr),
367 is_running_on_memory_tool_(Runtime::Current()->IsRunningOnMemoryTool()),
368 use_tlab_(use_tlab),
369 main_space_backup_(nullptr),
370 min_interval_homogeneous_space_compaction_by_oom_(
371 min_interval_homogeneous_space_compaction_by_oom),
372 last_time_homogeneous_space_compaction_by_oom_(NanoTime()),
373 gcs_completed_(0u),
374 max_gc_requested_(0u),
375 pending_collector_transition_(nullptr),
376 pending_heap_trim_(nullptr),
377 use_homogeneous_space_compaction_for_oom_(use_homogeneous_space_compaction_for_oom),
378 use_generational_cc_(use_generational_cc),
379 running_collection_is_blocking_(false),
380 blocking_gc_count_(0U),
381 blocking_gc_time_(0U),
382 last_update_time_gc_count_rate_histograms_( // Round down by the window duration.
383 (NanoTime() / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration),
384 gc_count_last_window_(0U),
385 blocking_gc_count_last_window_(0U),
386 gc_count_rate_histogram_("gc count rate histogram", 1U, kGcCountRateMaxBucketCount),
387 blocking_gc_count_rate_histogram_("blocking gc count rate histogram", 1U,
388 kGcCountRateMaxBucketCount),
389 alloc_tracking_enabled_(false),
390 alloc_record_depth_(AllocRecordObjectMap::kDefaultAllocStackDepth),
391 backtrace_lock_(nullptr),
392 seen_backtrace_count_(0u),
393 unique_backtrace_count_(0u),
394 gc_disabled_for_shutdown_(false),
395 dump_region_info_before_gc_(dump_region_info_before_gc),
396 dump_region_info_after_gc_(dump_region_info_after_gc),
397 boot_image_spaces_(),
398 boot_images_start_address_(0u),
399 boot_images_size_(0u) {
400 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
401 LOG(INFO) << "Heap() entering";
402 }
403 if (kUseReadBarrier) {
404 CHECK_EQ(foreground_collector_type_, kCollectorTypeCC);
405 CHECK_EQ(background_collector_type_, kCollectorTypeCCBackground);
406 } else if (background_collector_type_ != gc::kCollectorTypeHomogeneousSpaceCompact) {
407 CHECK_EQ(IsMovingGc(foreground_collector_type_), IsMovingGc(background_collector_type_))
408 << "Changing from " << foreground_collector_type_ << " to "
409 << background_collector_type_ << " (or visa versa) is not supported.";
410 }
411 verification_.reset(new Verification(this));
412 CHECK_GE(large_object_threshold, kMinLargeObjectThreshold);
413 ScopedTrace trace(__FUNCTION__);
414 Runtime* const runtime = Runtime::Current();
415 // If we aren't the zygote, switch to the default non zygote allocator. This may update the
416 // entrypoints.
417 const bool is_zygote = runtime->IsZygote();
418 if (!is_zygote) {
419 // Background compaction is currently not supported for command line runs.
420 if (background_collector_type_ != foreground_collector_type_) {
421 VLOG(heap) << "Disabling background compaction for non zygote";
422 background_collector_type_ = foreground_collector_type_;
423 }
424 }
425 ChangeCollector(desired_collector_type_);
426 live_bitmap_.reset(new accounting::HeapBitmap(this));
427 mark_bitmap_.reset(new accounting::HeapBitmap(this));
428
429 // We don't have hspace compaction enabled with CC.
430 if (foreground_collector_type_ == kCollectorTypeCC) {
431 use_homogeneous_space_compaction_for_oom_ = false;
432 }
433 bool support_homogeneous_space_compaction =
434 background_collector_type_ == gc::kCollectorTypeHomogeneousSpaceCompact ||
435 use_homogeneous_space_compaction_for_oom_;
436 // We may use the same space the main space for the non moving space if we don't need to compact
437 // from the main space.
438 // This is not the case if we support homogeneous compaction or have a moving background
439 // collector type.
440 bool separate_non_moving_space = is_zygote ||
441 support_homogeneous_space_compaction || IsMovingGc(foreground_collector_type_) ||
442 IsMovingGc(background_collector_type_);
443
444 // Requested begin for the alloc space, to follow the mapped image and oat files
445 uint8_t* request_begin = nullptr;
446 // Calculate the extra space required after the boot image, see allocations below.
447 size_t heap_reservation_size = 0u;
448 if (separate_non_moving_space) {
449 heap_reservation_size = non_moving_space_capacity;
450 } else if (foreground_collector_type_ != kCollectorTypeCC && is_zygote) {
451 heap_reservation_size = capacity_;
452 }
453 heap_reservation_size = RoundUp(heap_reservation_size, kPageSize);
454 // Load image space(s).
455 std::vector<std::unique_ptr<space::ImageSpace>> boot_image_spaces;
456 MemMap heap_reservation;
457 if (space::ImageSpace::LoadBootImage(boot_class_path,
458 boot_class_path_locations,
459 image_file_name,
460 image_instruction_set,
461 runtime->ShouldRelocate(),
462 /*executable=*/ !runtime->IsAotCompiler(),
463 heap_reservation_size,
464 &boot_image_spaces,
465 &heap_reservation)) {
466 DCHECK_EQ(heap_reservation_size, heap_reservation.IsValid() ? heap_reservation.Size() : 0u);
467 DCHECK(!boot_image_spaces.empty());
468 request_begin = boot_image_spaces.back()->GetImageHeader().GetOatFileEnd();
469 DCHECK(!heap_reservation.IsValid() || request_begin == heap_reservation.Begin())
470 << "request_begin=" << static_cast<const void*>(request_begin)
471 << " heap_reservation.Begin()=" << static_cast<const void*>(heap_reservation.Begin());
472 for (std::unique_ptr<space::ImageSpace>& space : boot_image_spaces) {
473 boot_image_spaces_.push_back(space.get());
474 AddSpace(space.release());
475 }
476 boot_images_start_address_ = PointerToLowMemUInt32(boot_image_spaces_.front()->Begin());
477 uint32_t boot_images_end =
478 PointerToLowMemUInt32(boot_image_spaces_.back()->GetImageHeader().GetOatFileEnd());
479 boot_images_size_ = boot_images_end - boot_images_start_address_;
480 if (kIsDebugBuild) {
481 VerifyBootImagesContiguity(boot_image_spaces_);
482 }
483 } else {
484 if (foreground_collector_type_ == kCollectorTypeCC) {
485 // Need to use a low address so that we can allocate a contiguous 2 * Xmx space
486 // when there's no image (dex2oat for target).
487 request_begin = kPreferredAllocSpaceBegin;
488 }
489 // Gross hack to make dex2oat deterministic.
490 if (foreground_collector_type_ == kCollectorTypeMS && Runtime::Current()->IsAotCompiler()) {
491 // Currently only enabled for MS collector since that is what the deterministic dex2oat uses.
492 // b/26849108
493 request_begin = reinterpret_cast<uint8_t*>(kAllocSpaceBeginForDeterministicAoT);
494 }
495 }
496
497 /*
498 requested_alloc_space_begin -> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
499 +- nonmoving space (non_moving_space_capacity)+-
500 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
501 +-????????????????????????????????????????????+-
502 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
503 +-main alloc space / bump space 1 (capacity_) +-
504 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
505 +-????????????????????????????????????????????+-
506 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
507 +-main alloc space2 / bump space 2 (capacity_)+-
508 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
509 */
510
511 MemMap main_mem_map_1;
512 MemMap main_mem_map_2;
513
514 std::string error_str;
515 MemMap non_moving_space_mem_map;
516 if (separate_non_moving_space) {
517 ScopedTrace trace2("Create separate non moving space");
518 // If we are the zygote, the non moving space becomes the zygote space when we run
519 // PreZygoteFork the first time. In this case, call the map "zygote space" since we can't
520 // rename the mem map later.
521 const char* space_name = is_zygote ? kZygoteSpaceName : kNonMovingSpaceName;
522 // Reserve the non moving mem map before the other two since it needs to be at a specific
523 // address.
524 DCHECK_EQ(heap_reservation.IsValid(), !boot_image_spaces_.empty());
525 if (heap_reservation.IsValid()) {
526 non_moving_space_mem_map = heap_reservation.RemapAtEnd(
527 heap_reservation.Begin(), space_name, PROT_READ | PROT_WRITE, &error_str);
528 } else {
529 non_moving_space_mem_map = MapAnonymousPreferredAddress(
530 space_name, request_begin, non_moving_space_capacity, &error_str);
531 }
532 CHECK(non_moving_space_mem_map.IsValid()) << error_str;
533 DCHECK(!heap_reservation.IsValid());
534 // Try to reserve virtual memory at a lower address if we have a separate non moving space.
535 request_begin = kPreferredAllocSpaceBegin + non_moving_space_capacity;
536 }
537 // Attempt to create 2 mem maps at or after the requested begin.
538 if (foreground_collector_type_ != kCollectorTypeCC) {
539 ScopedTrace trace2("Create main mem map");
540 if (separate_non_moving_space || !is_zygote) {
541 main_mem_map_1 = MapAnonymousPreferredAddress(
542 kMemMapSpaceName[0], request_begin, capacity_, &error_str);
543 } else {
544 // If no separate non-moving space and we are the zygote, the main space must come right after
545 // the image space to avoid a gap. This is required since we want the zygote space to be
546 // adjacent to the image space.
547 DCHECK_EQ(heap_reservation.IsValid(), !boot_image_spaces_.empty());
548 main_mem_map_1 = MemMap::MapAnonymous(
549 kMemMapSpaceName[0],
550 request_begin,
551 capacity_,
552 PROT_READ | PROT_WRITE,
553 /* low_4gb= */ true,
554 /* reuse= */ false,
555 heap_reservation.IsValid() ? &heap_reservation : nullptr,
556 &error_str);
557 }
558 CHECK(main_mem_map_1.IsValid()) << error_str;
559 DCHECK(!heap_reservation.IsValid());
560 }
561 if (support_homogeneous_space_compaction ||
562 background_collector_type_ == kCollectorTypeSS ||
563 foreground_collector_type_ == kCollectorTypeSS) {
564 ScopedTrace trace2("Create main mem map 2");
565 main_mem_map_2 = MapAnonymousPreferredAddress(
566 kMemMapSpaceName[1], main_mem_map_1.End(), capacity_, &error_str);
567 CHECK(main_mem_map_2.IsValid()) << error_str;
568 }
569
570 // Create the non moving space first so that bitmaps don't take up the address range.
571 if (separate_non_moving_space) {
572 ScopedTrace trace2("Add non moving space");
573 // Non moving space is always dlmalloc since we currently don't have support for multiple
574 // active rosalloc spaces.
575 const size_t size = non_moving_space_mem_map.Size();
576 const void* non_moving_space_mem_map_begin = non_moving_space_mem_map.Begin();
577 non_moving_space_ = space::DlMallocSpace::CreateFromMemMap(std::move(non_moving_space_mem_map),
578 "zygote / non moving space",
579 kDefaultStartingSize,
580 initial_size,
581 size,
582 size,
583 /* can_move_objects= */ false);
584 CHECK(non_moving_space_ != nullptr) << "Failed creating non moving space "
585 << non_moving_space_mem_map_begin;
586 non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
587 AddSpace(non_moving_space_);
588 }
589 // Create other spaces based on whether or not we have a moving GC.
590 if (foreground_collector_type_ == kCollectorTypeCC) {
591 CHECK(separate_non_moving_space);
592 // Reserve twice the capacity, to allow evacuating every region for explicit GCs.
593 MemMap region_space_mem_map =
594 space::RegionSpace::CreateMemMap(kRegionSpaceName, capacity_ * 2, request_begin);
595 CHECK(region_space_mem_map.IsValid()) << "No region space mem map";
596 region_space_ = space::RegionSpace::Create(
597 kRegionSpaceName, std::move(region_space_mem_map), use_generational_cc_);
598 AddSpace(region_space_);
599 } else if (IsMovingGc(foreground_collector_type_)) {
600 // Create bump pointer spaces.
601 // We only to create the bump pointer if the foreground collector is a compacting GC.
602 // TODO: Place bump-pointer spaces somewhere to minimize size of card table.
603 bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 1",
604 std::move(main_mem_map_1));
605 CHECK(bump_pointer_space_ != nullptr) << "Failed to create bump pointer space";
606 AddSpace(bump_pointer_space_);
607 temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
608 std::move(main_mem_map_2));
609 CHECK(temp_space_ != nullptr) << "Failed to create bump pointer space";
610 AddSpace(temp_space_);
611 CHECK(separate_non_moving_space);
612 } else {
613 CreateMainMallocSpace(std::move(main_mem_map_1), initial_size, growth_limit_, capacity_);
614 CHECK(main_space_ != nullptr);
615 AddSpace(main_space_);
616 if (!separate_non_moving_space) {
617 non_moving_space_ = main_space_;
618 CHECK(!non_moving_space_->CanMoveObjects());
619 }
620 if (main_mem_map_2.IsValid()) {
621 const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
622 main_space_backup_.reset(CreateMallocSpaceFromMemMap(std::move(main_mem_map_2),
623 initial_size,
624 growth_limit_,
625 capacity_,
626 name,
627 /* can_move_objects= */ true));
628 CHECK(main_space_backup_.get() != nullptr);
629 // Add the space so its accounted for in the heap_begin and heap_end.
630 AddSpace(main_space_backup_.get());
631 }
632 }
633 CHECK(non_moving_space_ != nullptr);
634 CHECK(!non_moving_space_->CanMoveObjects());
635 // Allocate the large object space.
636 if (large_object_space_type == space::LargeObjectSpaceType::kFreeList) {
637 large_object_space_ = space::FreeListSpace::Create("free list large object space", capacity_);
638 CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
639 } else if (large_object_space_type == space::LargeObjectSpaceType::kMap) {
640 large_object_space_ = space::LargeObjectMapSpace::Create("mem map large object space");
641 CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
642 } else {
643 // Disable the large object space by making the cutoff excessively large.
644 large_object_threshold_ = std::numeric_limits<size_t>::max();
645 large_object_space_ = nullptr;
646 }
647 if (large_object_space_ != nullptr) {
648 AddSpace(large_object_space_);
649 }
650 // Compute heap capacity. Continuous spaces are sorted in order of Begin().
651 CHECK(!continuous_spaces_.empty());
652 // Relies on the spaces being sorted.
653 uint8_t* heap_begin = continuous_spaces_.front()->Begin();
654 uint8_t* heap_end = continuous_spaces_.back()->Limit();
655 size_t heap_capacity = heap_end - heap_begin;
656 // Remove the main backup space since it slows down the GC to have unused extra spaces.
657 // TODO: Avoid needing to do this.
658 if (main_space_backup_.get() != nullptr) {
659 RemoveSpace(main_space_backup_.get());
660 }
661 // Allocate the card table.
662 // We currently don't support dynamically resizing the card table.
663 // Since we don't know where in the low_4gb the app image will be located, make the card table
664 // cover the whole low_4gb. TODO: Extend the card table in AddSpace.
665 UNUSED(heap_capacity);
666 // Start at 4 KB, we can be sure there are no spaces mapped this low since the address range is
667 // reserved by the kernel.
668 static constexpr size_t kMinHeapAddress = 4 * KB;
669 card_table_.reset(accounting::CardTable::Create(reinterpret_cast<uint8_t*>(kMinHeapAddress),
670 4 * GB - kMinHeapAddress));
671 CHECK(card_table_.get() != nullptr) << "Failed to create card table";
672 if (foreground_collector_type_ == kCollectorTypeCC && kUseTableLookupReadBarrier) {
673 rb_table_.reset(new accounting::ReadBarrierTable());
674 DCHECK(rb_table_->IsAllCleared());
675 }
676 if (HasBootImageSpace()) {
677 // Don't add the image mod union table if we are running without an image, this can crash if
678 // we use the CardCache implementation.
679 for (space::ImageSpace* image_space : GetBootImageSpaces()) {
680 accounting::ModUnionTable* mod_union_table = new accounting::ModUnionTableToZygoteAllocspace(
681 "Image mod-union table", this, image_space);
682 CHECK(mod_union_table != nullptr) << "Failed to create image mod-union table";
683 AddModUnionTable(mod_union_table);
684 }
685 }
686 if (collector::SemiSpace::kUseRememberedSet && non_moving_space_ != main_space_) {
687 accounting::RememberedSet* non_moving_space_rem_set =
688 new accounting::RememberedSet("Non-moving space remembered set", this, non_moving_space_);
689 CHECK(non_moving_space_rem_set != nullptr) << "Failed to create non-moving space remembered set";
690 AddRememberedSet(non_moving_space_rem_set);
691 }
692 // TODO: Count objects in the image space here?
693 num_bytes_allocated_.store(0, std::memory_order_relaxed);
694 mark_stack_.reset(accounting::ObjectStack::Create("mark stack", kDefaultMarkStackSize,
695 kDefaultMarkStackSize));
696 const size_t alloc_stack_capacity = max_allocation_stack_size_ + kAllocationStackReserveSize;
697 allocation_stack_.reset(accounting::ObjectStack::Create(
698 "allocation stack", max_allocation_stack_size_, alloc_stack_capacity));
699 live_stack_.reset(accounting::ObjectStack::Create(
700 "live stack", max_allocation_stack_size_, alloc_stack_capacity));
701 // It's still too early to take a lock because there are no threads yet, but we can create locks
702 // now. We don't create it earlier to make it clear that you can't use locks during heap
703 // initialization.
704 gc_complete_lock_ = new Mutex("GC complete lock");
705 gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable",
706 *gc_complete_lock_));
707
708 thread_flip_lock_ = new Mutex("GC thread flip lock");
709 thread_flip_cond_.reset(new ConditionVariable("GC thread flip condition variable",
710 *thread_flip_lock_));
711 task_processor_.reset(new TaskProcessor());
712 reference_processor_.reset(new ReferenceProcessor());
713 pending_task_lock_ = new Mutex("Pending task lock");
714 if (ignore_target_footprint_) {
715 SetIdealFootprint(std::numeric_limits<size_t>::max());
716 concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
717 }
718 CHECK_NE(target_footprint_.load(std::memory_order_relaxed), 0U);
719 // Create our garbage collectors.
720 for (size_t i = 0; i < 2; ++i) {
721 const bool concurrent = i != 0;
722 if ((MayUseCollector(kCollectorTypeCMS) && concurrent) ||
723 (MayUseCollector(kCollectorTypeMS) && !concurrent)) {
724 garbage_collectors_.push_back(new collector::MarkSweep(this, concurrent));
725 garbage_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent));
726 garbage_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent));
727 }
728 }
729 if (kMovingCollector) {
730 if (MayUseCollector(kCollectorTypeSS) ||
731 MayUseCollector(kCollectorTypeHomogeneousSpaceCompact) ||
732 use_homogeneous_space_compaction_for_oom_) {
733 semi_space_collector_ = new collector::SemiSpace(this);
734 garbage_collectors_.push_back(semi_space_collector_);
735 }
736 if (MayUseCollector(kCollectorTypeCC)) {
737 concurrent_copying_collector_ = new collector::ConcurrentCopying(this,
738 /*young_gen=*/false,
739 use_generational_cc_,
740 "",
741 measure_gc_performance);
742 if (use_generational_cc_) {
743 young_concurrent_copying_collector_ = new collector::ConcurrentCopying(
744 this,
745 /*young_gen=*/true,
746 use_generational_cc_,
747 "young",
748 measure_gc_performance);
749 }
750 active_concurrent_copying_collector_.store(concurrent_copying_collector_,
751 std::memory_order_relaxed);
752 DCHECK(region_space_ != nullptr);
753 concurrent_copying_collector_->SetRegionSpace(region_space_);
754 if (use_generational_cc_) {
755 young_concurrent_copying_collector_->SetRegionSpace(region_space_);
756 // At this point, non-moving space should be created.
757 DCHECK(non_moving_space_ != nullptr);
758 concurrent_copying_collector_->CreateInterRegionRefBitmaps();
759 }
760 garbage_collectors_.push_back(concurrent_copying_collector_);
761 if (use_generational_cc_) {
762 garbage_collectors_.push_back(young_concurrent_copying_collector_);
763 }
764 }
765 }
766 if (!GetBootImageSpaces().empty() && non_moving_space_ != nullptr &&
767 (is_zygote || separate_non_moving_space)) {
768 // Check that there's no gap between the image space and the non moving space so that the
769 // immune region won't break (eg. due to a large object allocated in the gap). This is only
770 // required when we're the zygote.
771 // Space with smallest Begin().
772 space::ImageSpace* first_space = nullptr;
773 for (space::ImageSpace* space : boot_image_spaces_) {
774 if (first_space == nullptr || space->Begin() < first_space->Begin()) {
775 first_space = space;
776 }
777 }
778 bool no_gap = MemMap::CheckNoGaps(*first_space->GetMemMap(), *non_moving_space_->GetMemMap());
779 if (!no_gap) {
780 PrintFileToLog("/proc/self/maps", LogSeverity::ERROR);
781 MemMap::DumpMaps(LOG_STREAM(ERROR), /* terse= */ true);
782 LOG(FATAL) << "There's a gap between the image space and the non-moving space";
783 }
784 }
785 // Perfetto Java Heap Profiler Support.
786 if (runtime->IsPerfettoJavaHeapStackProfEnabled()) {
787 // Perfetto Plugin is loaded and enabled, initialize the Java Heap Profiler.
788 InitPerfettoJavaHeapProf();
789 } else {
790 // Disable the Java Heap Profiler.
791 GetHeapSampler().DisableHeapSampler();
792 }
793
794 instrumentation::Instrumentation* const instrumentation = runtime->GetInstrumentation();
795 if (gc_stress_mode_) {
796 backtrace_lock_ = new Mutex("GC complete lock");
797 }
798 if (is_running_on_memory_tool_ || gc_stress_mode_) {
799 instrumentation->InstrumentQuickAllocEntryPoints();
800 }
801 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
802 LOG(INFO) << "Heap() exiting";
803 }
804 }
805
MapAnonymousPreferredAddress(const char * name,uint8_t * request_begin,size_t capacity,std::string * out_error_str)806 MemMap Heap::MapAnonymousPreferredAddress(const char* name,
807 uint8_t* request_begin,
808 size_t capacity,
809 std::string* out_error_str) {
810 while (true) {
811 MemMap map = MemMap::MapAnonymous(name,
812 request_begin,
813 capacity,
814 PROT_READ | PROT_WRITE,
815 /*low_4gb=*/ true,
816 /*reuse=*/ false,
817 /*reservation=*/ nullptr,
818 out_error_str);
819 if (map.IsValid() || request_begin == nullptr) {
820 return map;
821 }
822 // Retry a second time with no specified request begin.
823 request_begin = nullptr;
824 }
825 }
826
MayUseCollector(CollectorType type) const827 bool Heap::MayUseCollector(CollectorType type) const {
828 return foreground_collector_type_ == type || background_collector_type_ == type;
829 }
830
CreateMallocSpaceFromMemMap(MemMap && mem_map,size_t initial_size,size_t growth_limit,size_t capacity,const char * name,bool can_move_objects)831 space::MallocSpace* Heap::CreateMallocSpaceFromMemMap(MemMap&& mem_map,
832 size_t initial_size,
833 size_t growth_limit,
834 size_t capacity,
835 const char* name,
836 bool can_move_objects) {
837 space::MallocSpace* malloc_space = nullptr;
838 if (kUseRosAlloc) {
839 // Create rosalloc space.
840 malloc_space = space::RosAllocSpace::CreateFromMemMap(std::move(mem_map),
841 name,
842 kDefaultStartingSize,
843 initial_size,
844 growth_limit,
845 capacity,
846 low_memory_mode_,
847 can_move_objects);
848 } else {
849 malloc_space = space::DlMallocSpace::CreateFromMemMap(std::move(mem_map),
850 name,
851 kDefaultStartingSize,
852 initial_size,
853 growth_limit,
854 capacity,
855 can_move_objects);
856 }
857 if (collector::SemiSpace::kUseRememberedSet) {
858 accounting::RememberedSet* rem_set =
859 new accounting::RememberedSet(std::string(name) + " remembered set", this, malloc_space);
860 CHECK(rem_set != nullptr) << "Failed to create main space remembered set";
861 AddRememberedSet(rem_set);
862 }
863 CHECK(malloc_space != nullptr) << "Failed to create " << name;
864 malloc_space->SetFootprintLimit(malloc_space->Capacity());
865 return malloc_space;
866 }
867
CreateMainMallocSpace(MemMap && mem_map,size_t initial_size,size_t growth_limit,size_t capacity)868 void Heap::CreateMainMallocSpace(MemMap&& mem_map,
869 size_t initial_size,
870 size_t growth_limit,
871 size_t capacity) {
872 // Is background compaction is enabled?
873 bool can_move_objects = IsMovingGc(background_collector_type_) !=
874 IsMovingGc(foreground_collector_type_) || use_homogeneous_space_compaction_for_oom_;
875 // If we are the zygote and don't yet have a zygote space, it means that the zygote fork will
876 // happen in the future. If this happens and we have kCompactZygote enabled we wish to compact
877 // from the main space to the zygote space. If background compaction is enabled, always pass in
878 // that we can move objets.
879 if (kCompactZygote && Runtime::Current()->IsZygote() && !can_move_objects) {
880 // After the zygote we want this to be false if we don't have background compaction enabled so
881 // that getting primitive array elements is faster.
882 can_move_objects = !HasZygoteSpace();
883 }
884 if (collector::SemiSpace::kUseRememberedSet && main_space_ != nullptr) {
885 RemoveRememberedSet(main_space_);
886 }
887 const char* name = kUseRosAlloc ? kRosAllocSpaceName[0] : kDlMallocSpaceName[0];
888 main_space_ = CreateMallocSpaceFromMemMap(std::move(mem_map),
889 initial_size,
890 growth_limit,
891 capacity, name,
892 can_move_objects);
893 SetSpaceAsDefault(main_space_);
894 VLOG(heap) << "Created main space " << main_space_;
895 }
896
ChangeAllocator(AllocatorType allocator)897 void Heap::ChangeAllocator(AllocatorType allocator) {
898 if (current_allocator_ != allocator) {
899 // These two allocators are only used internally and don't have any entrypoints.
900 CHECK_NE(allocator, kAllocatorTypeLOS);
901 CHECK_NE(allocator, kAllocatorTypeNonMoving);
902 current_allocator_ = allocator;
903 MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
904 SetQuickAllocEntryPointsAllocator(current_allocator_);
905 Runtime::Current()->GetInstrumentation()->ResetQuickAllocEntryPoints();
906 }
907 }
908
IsCompilingBoot() const909 bool Heap::IsCompilingBoot() const {
910 if (!Runtime::Current()->IsAotCompiler()) {
911 return false;
912 }
913 ScopedObjectAccess soa(Thread::Current());
914 for (const auto& space : continuous_spaces_) {
915 if (space->IsImageSpace() || space->IsZygoteSpace()) {
916 return false;
917 }
918 }
919 return true;
920 }
921
IncrementDisableMovingGC(Thread * self)922 void Heap::IncrementDisableMovingGC(Thread* self) {
923 // Need to do this holding the lock to prevent races where the GC is about to run / running when
924 // we attempt to disable it.
925 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
926 MutexLock mu(self, *gc_complete_lock_);
927 ++disable_moving_gc_count_;
928 if (IsMovingGc(collector_type_running_)) {
929 WaitForGcToCompleteLocked(kGcCauseDisableMovingGc, self);
930 }
931 }
932
DecrementDisableMovingGC(Thread * self)933 void Heap::DecrementDisableMovingGC(Thread* self) {
934 MutexLock mu(self, *gc_complete_lock_);
935 CHECK_GT(disable_moving_gc_count_, 0U);
936 --disable_moving_gc_count_;
937 }
938
IncrementDisableThreadFlip(Thread * self)939 void Heap::IncrementDisableThreadFlip(Thread* self) {
940 // Supposed to be called by mutators. If thread_flip_running_ is true, block. Otherwise, go ahead.
941 CHECK(kUseReadBarrier);
942 bool is_nested = self->GetDisableThreadFlipCount() > 0;
943 self->IncrementDisableThreadFlipCount();
944 if (is_nested) {
945 // If this is a nested JNI critical section enter, we don't need to wait or increment the global
946 // counter. The global counter is incremented only once for a thread for the outermost enter.
947 return;
948 }
949 ScopedThreadStateChange tsc(self, kWaitingForGcThreadFlip);
950 MutexLock mu(self, *thread_flip_lock_);
951 thread_flip_cond_->CheckSafeToWait(self);
952 bool has_waited = false;
953 uint64_t wait_start = 0;
954 if (thread_flip_running_) {
955 wait_start = NanoTime();
956 ScopedTrace trace("IncrementDisableThreadFlip");
957 while (thread_flip_running_) {
958 has_waited = true;
959 thread_flip_cond_->Wait(self);
960 }
961 }
962 ++disable_thread_flip_count_;
963 if (has_waited) {
964 uint64_t wait_time = NanoTime() - wait_start;
965 total_wait_time_ += wait_time;
966 if (wait_time > long_pause_log_threshold_) {
967 LOG(INFO) << __FUNCTION__ << " blocked for " << PrettyDuration(wait_time);
968 }
969 }
970 }
971
DecrementDisableThreadFlip(Thread * self)972 void Heap::DecrementDisableThreadFlip(Thread* self) {
973 // Supposed to be called by mutators. Decrement disable_thread_flip_count_ and potentially wake up
974 // the GC waiting before doing a thread flip.
975 CHECK(kUseReadBarrier);
976 self->DecrementDisableThreadFlipCount();
977 bool is_outermost = self->GetDisableThreadFlipCount() == 0;
978 if (!is_outermost) {
979 // If this is not an outermost JNI critical exit, we don't need to decrement the global counter.
980 // The global counter is decremented only once for a thread for the outermost exit.
981 return;
982 }
983 MutexLock mu(self, *thread_flip_lock_);
984 CHECK_GT(disable_thread_flip_count_, 0U);
985 --disable_thread_flip_count_;
986 if (disable_thread_flip_count_ == 0) {
987 // Potentially notify the GC thread blocking to begin a thread flip.
988 thread_flip_cond_->Broadcast(self);
989 }
990 }
991
ThreadFlipBegin(Thread * self)992 void Heap::ThreadFlipBegin(Thread* self) {
993 // Supposed to be called by GC. Set thread_flip_running_ to be true. If disable_thread_flip_count_
994 // > 0, block. Otherwise, go ahead.
995 CHECK(kUseReadBarrier);
996 ScopedThreadStateChange tsc(self, kWaitingForGcThreadFlip);
997 MutexLock mu(self, *thread_flip_lock_);
998 thread_flip_cond_->CheckSafeToWait(self);
999 bool has_waited = false;
1000 uint64_t wait_start = NanoTime();
1001 CHECK(!thread_flip_running_);
1002 // Set this to true before waiting so that frequent JNI critical enter/exits won't starve
1003 // GC. This like a writer preference of a reader-writer lock.
1004 thread_flip_running_ = true;
1005 while (disable_thread_flip_count_ > 0) {
1006 has_waited = true;
1007 thread_flip_cond_->Wait(self);
1008 }
1009 if (has_waited) {
1010 uint64_t wait_time = NanoTime() - wait_start;
1011 total_wait_time_ += wait_time;
1012 if (wait_time > long_pause_log_threshold_) {
1013 LOG(INFO) << __FUNCTION__ << " blocked for " << PrettyDuration(wait_time);
1014 }
1015 }
1016 }
1017
ThreadFlipEnd(Thread * self)1018 void Heap::ThreadFlipEnd(Thread* self) {
1019 // Supposed to be called by GC. Set thread_flip_running_ to false and potentially wake up mutators
1020 // waiting before doing a JNI critical.
1021 CHECK(kUseReadBarrier);
1022 MutexLock mu(self, *thread_flip_lock_);
1023 CHECK(thread_flip_running_);
1024 thread_flip_running_ = false;
1025 // Potentially notify mutator threads blocking to enter a JNI critical section.
1026 thread_flip_cond_->Broadcast(self);
1027 }
1028
GrowHeapOnJankPerceptibleSwitch()1029 void Heap::GrowHeapOnJankPerceptibleSwitch() {
1030 MutexLock mu(Thread::Current(), process_state_update_lock_);
1031 size_t orig_target_footprint = target_footprint_.load(std::memory_order_relaxed);
1032 if (orig_target_footprint < min_foreground_target_footprint_) {
1033 target_footprint_.compare_exchange_strong(orig_target_footprint,
1034 min_foreground_target_footprint_,
1035 std::memory_order_relaxed);
1036 }
1037 min_foreground_target_footprint_ = 0;
1038 }
1039
UpdateProcessState(ProcessState old_process_state,ProcessState new_process_state)1040 void Heap::UpdateProcessState(ProcessState old_process_state, ProcessState new_process_state) {
1041 if (old_process_state != new_process_state) {
1042 const bool jank_perceptible = new_process_state == kProcessStateJankPerceptible;
1043 if (jank_perceptible) {
1044 // Transition back to foreground right away to prevent jank.
1045 RequestCollectorTransition(foreground_collector_type_, 0);
1046 GrowHeapOnJankPerceptibleSwitch();
1047 } else {
1048 // Don't delay for debug builds since we may want to stress test the GC.
1049 // If background_collector_type_ is kCollectorTypeHomogeneousSpaceCompact then we have
1050 // special handling which does a homogenous space compaction once but then doesn't transition
1051 // the collector. Similarly, we invoke a full compaction for kCollectorTypeCC but don't
1052 // transition the collector.
1053 RequestCollectorTransition(background_collector_type_,
1054 kStressCollectorTransition
1055 ? 0
1056 : kCollectorTransitionWait);
1057 }
1058 }
1059 }
1060
CreateThreadPool()1061 void Heap::CreateThreadPool() {
1062 const size_t num_threads = std::max(parallel_gc_threads_, conc_gc_threads_);
1063 if (num_threads != 0) {
1064 thread_pool_.reset(new ThreadPool("Heap thread pool", num_threads));
1065 }
1066 }
1067
MarkAllocStackAsLive(accounting::ObjectStack * stack)1068 void Heap::MarkAllocStackAsLive(accounting::ObjectStack* stack) {
1069 space::ContinuousSpace* space1 = main_space_ != nullptr ? main_space_ : non_moving_space_;
1070 space::ContinuousSpace* space2 = non_moving_space_;
1071 // TODO: Generalize this to n bitmaps?
1072 CHECK(space1 != nullptr);
1073 CHECK(space2 != nullptr);
1074 MarkAllocStack(space1->GetLiveBitmap(), space2->GetLiveBitmap(),
1075 (large_object_space_ != nullptr ? large_object_space_->GetLiveBitmap() : nullptr),
1076 stack);
1077 }
1078
DeleteThreadPool()1079 void Heap::DeleteThreadPool() {
1080 thread_pool_.reset(nullptr);
1081 }
1082
AddSpace(space::Space * space)1083 void Heap::AddSpace(space::Space* space) {
1084 CHECK(space != nullptr);
1085 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1086 if (space->IsContinuousSpace()) {
1087 DCHECK(!space->IsDiscontinuousSpace());
1088 space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
1089 // Continuous spaces don't necessarily have bitmaps.
1090 accounting::ContinuousSpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
1091 accounting::ContinuousSpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
1092 // The region space bitmap is not added since VisitObjects visits the region space objects with
1093 // special handling.
1094 if (live_bitmap != nullptr && !space->IsRegionSpace()) {
1095 CHECK(mark_bitmap != nullptr);
1096 live_bitmap_->AddContinuousSpaceBitmap(live_bitmap);
1097 mark_bitmap_->AddContinuousSpaceBitmap(mark_bitmap);
1098 }
1099 continuous_spaces_.push_back(continuous_space);
1100 // Ensure that spaces remain sorted in increasing order of start address.
1101 std::sort(continuous_spaces_.begin(), continuous_spaces_.end(),
1102 [](const space::ContinuousSpace* a, const space::ContinuousSpace* b) {
1103 return a->Begin() < b->Begin();
1104 });
1105 } else {
1106 CHECK(space->IsDiscontinuousSpace());
1107 space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
1108 live_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetLiveBitmap());
1109 mark_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetMarkBitmap());
1110 discontinuous_spaces_.push_back(discontinuous_space);
1111 }
1112 if (space->IsAllocSpace()) {
1113 alloc_spaces_.push_back(space->AsAllocSpace());
1114 }
1115 }
1116
SetSpaceAsDefault(space::ContinuousSpace * continuous_space)1117 void Heap::SetSpaceAsDefault(space::ContinuousSpace* continuous_space) {
1118 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1119 if (continuous_space->IsDlMallocSpace()) {
1120 dlmalloc_space_ = continuous_space->AsDlMallocSpace();
1121 } else if (continuous_space->IsRosAllocSpace()) {
1122 rosalloc_space_ = continuous_space->AsRosAllocSpace();
1123 }
1124 }
1125
RemoveSpace(space::Space * space)1126 void Heap::RemoveSpace(space::Space* space) {
1127 DCHECK(space != nullptr);
1128 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1129 if (space->IsContinuousSpace()) {
1130 DCHECK(!space->IsDiscontinuousSpace());
1131 space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
1132 // Continuous spaces don't necessarily have bitmaps.
1133 accounting::ContinuousSpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
1134 accounting::ContinuousSpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
1135 if (live_bitmap != nullptr && !space->IsRegionSpace()) {
1136 DCHECK(mark_bitmap != nullptr);
1137 live_bitmap_->RemoveContinuousSpaceBitmap(live_bitmap);
1138 mark_bitmap_->RemoveContinuousSpaceBitmap(mark_bitmap);
1139 }
1140 auto it = std::find(continuous_spaces_.begin(), continuous_spaces_.end(), continuous_space);
1141 DCHECK(it != continuous_spaces_.end());
1142 continuous_spaces_.erase(it);
1143 } else {
1144 DCHECK(space->IsDiscontinuousSpace());
1145 space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
1146 live_bitmap_->RemoveLargeObjectBitmap(discontinuous_space->GetLiveBitmap());
1147 mark_bitmap_->RemoveLargeObjectBitmap(discontinuous_space->GetMarkBitmap());
1148 auto it = std::find(discontinuous_spaces_.begin(), discontinuous_spaces_.end(),
1149 discontinuous_space);
1150 DCHECK(it != discontinuous_spaces_.end());
1151 discontinuous_spaces_.erase(it);
1152 }
1153 if (space->IsAllocSpace()) {
1154 auto it = std::find(alloc_spaces_.begin(), alloc_spaces_.end(), space->AsAllocSpace());
1155 DCHECK(it != alloc_spaces_.end());
1156 alloc_spaces_.erase(it);
1157 }
1158 }
1159
CalculateGcWeightedAllocatedBytes(uint64_t gc_last_process_cpu_time_ns,uint64_t current_process_cpu_time) const1160 double Heap::CalculateGcWeightedAllocatedBytes(uint64_t gc_last_process_cpu_time_ns,
1161 uint64_t current_process_cpu_time) const {
1162 uint64_t bytes_allocated = GetBytesAllocated();
1163 double weight = current_process_cpu_time - gc_last_process_cpu_time_ns;
1164 return weight * bytes_allocated;
1165 }
1166
CalculatePreGcWeightedAllocatedBytes()1167 void Heap::CalculatePreGcWeightedAllocatedBytes() {
1168 uint64_t current_process_cpu_time = ProcessCpuNanoTime();
1169 pre_gc_weighted_allocated_bytes_ +=
1170 CalculateGcWeightedAllocatedBytes(pre_gc_last_process_cpu_time_ns_, current_process_cpu_time);
1171 pre_gc_last_process_cpu_time_ns_ = current_process_cpu_time;
1172 }
1173
CalculatePostGcWeightedAllocatedBytes()1174 void Heap::CalculatePostGcWeightedAllocatedBytes() {
1175 uint64_t current_process_cpu_time = ProcessCpuNanoTime();
1176 post_gc_weighted_allocated_bytes_ +=
1177 CalculateGcWeightedAllocatedBytes(post_gc_last_process_cpu_time_ns_, current_process_cpu_time);
1178 post_gc_last_process_cpu_time_ns_ = current_process_cpu_time;
1179 }
1180
GetTotalGcCpuTime()1181 uint64_t Heap::GetTotalGcCpuTime() {
1182 uint64_t sum = 0;
1183 for (auto* collector : garbage_collectors_) {
1184 sum += collector->GetTotalCpuTime();
1185 }
1186 return sum;
1187 }
1188
DumpGcPerformanceInfo(std::ostream & os)1189 void Heap::DumpGcPerformanceInfo(std::ostream& os) {
1190 // Dump cumulative timings.
1191 os << "Dumping cumulative Gc timings\n";
1192 uint64_t total_duration = 0;
1193 // Dump cumulative loggers for each GC type.
1194 uint64_t total_paused_time = 0;
1195 for (auto* collector : garbage_collectors_) {
1196 total_duration += collector->GetCumulativeTimings().GetTotalNs();
1197 total_paused_time += collector->GetTotalPausedTimeNs();
1198 collector->DumpPerformanceInfo(os);
1199 }
1200 if (total_duration != 0) {
1201 const double total_seconds = total_duration / 1.0e9;
1202 const double total_cpu_seconds = GetTotalGcCpuTime() / 1.0e9;
1203 os << "Total time spent in GC: " << PrettyDuration(total_duration) << "\n";
1204 os << "Mean GC size throughput: "
1205 << PrettySize(GetBytesFreedEver() / total_seconds) << "/s"
1206 << " per cpu-time: "
1207 << PrettySize(GetBytesFreedEver() / total_cpu_seconds) << "/s\n";
1208 os << "Mean GC object throughput: "
1209 << (GetObjectsFreedEver() / total_seconds) << " objects/s\n";
1210 }
1211 uint64_t total_objects_allocated = GetObjectsAllocatedEver();
1212 os << "Total number of allocations " << total_objects_allocated << "\n";
1213 os << "Total bytes allocated " << PrettySize(GetBytesAllocatedEver()) << "\n";
1214 os << "Total bytes freed " << PrettySize(GetBytesFreedEver()) << "\n";
1215 os << "Free memory " << PrettySize(GetFreeMemory()) << "\n";
1216 os << "Free memory until GC " << PrettySize(GetFreeMemoryUntilGC()) << "\n";
1217 os << "Free memory until OOME " << PrettySize(GetFreeMemoryUntilOOME()) << "\n";
1218 os << "Total memory " << PrettySize(GetTotalMemory()) << "\n";
1219 os << "Max memory " << PrettySize(GetMaxMemory()) << "\n";
1220 if (HasZygoteSpace()) {
1221 os << "Zygote space size " << PrettySize(zygote_space_->Size()) << "\n";
1222 }
1223 os << "Total mutator paused time: " << PrettyDuration(total_paused_time) << "\n";
1224 os << "Total time waiting for GC to complete: " << PrettyDuration(total_wait_time_) << "\n";
1225 os << "Total GC count: " << GetGcCount() << "\n";
1226 os << "Total GC time: " << PrettyDuration(GetGcTime()) << "\n";
1227 os << "Total blocking GC count: " << GetBlockingGcCount() << "\n";
1228 os << "Total blocking GC time: " << PrettyDuration(GetBlockingGcTime()) << "\n";
1229
1230 {
1231 MutexLock mu(Thread::Current(), *gc_complete_lock_);
1232 if (gc_count_rate_histogram_.SampleSize() > 0U) {
1233 os << "Histogram of GC count per " << NsToMs(kGcCountRateHistogramWindowDuration) << " ms: ";
1234 gc_count_rate_histogram_.DumpBins(os);
1235 os << "\n";
1236 }
1237 if (blocking_gc_count_rate_histogram_.SampleSize() > 0U) {
1238 os << "Histogram of blocking GC count per "
1239 << NsToMs(kGcCountRateHistogramWindowDuration) << " ms: ";
1240 blocking_gc_count_rate_histogram_.DumpBins(os);
1241 os << "\n";
1242 }
1243 }
1244
1245 if (kDumpRosAllocStatsOnSigQuit && rosalloc_space_ != nullptr) {
1246 rosalloc_space_->DumpStats(os);
1247 }
1248
1249 os << "Native bytes total: " << GetNativeBytes()
1250 << " registered: " << native_bytes_registered_.load(std::memory_order_relaxed) << "\n";
1251
1252 os << "Total native bytes at last GC: "
1253 << old_native_bytes_allocated_.load(std::memory_order_relaxed) << "\n";
1254
1255 BaseMutex::DumpAll(os);
1256 }
1257
ResetGcPerformanceInfo()1258 void Heap::ResetGcPerformanceInfo() {
1259 for (auto* collector : garbage_collectors_) {
1260 collector->ResetMeasurements();
1261 }
1262
1263 process_cpu_start_time_ns_ = ProcessCpuNanoTime();
1264
1265 pre_gc_last_process_cpu_time_ns_ = process_cpu_start_time_ns_;
1266 pre_gc_weighted_allocated_bytes_ = 0u;
1267
1268 post_gc_last_process_cpu_time_ns_ = process_cpu_start_time_ns_;
1269 post_gc_weighted_allocated_bytes_ = 0u;
1270
1271 total_bytes_freed_ever_.store(0);
1272 total_objects_freed_ever_.store(0);
1273 total_wait_time_ = 0;
1274 blocking_gc_count_ = 0;
1275 blocking_gc_time_ = 0;
1276 gc_count_last_window_ = 0;
1277 blocking_gc_count_last_window_ = 0;
1278 last_update_time_gc_count_rate_histograms_ = // Round down by the window duration.
1279 (NanoTime() / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration;
1280 {
1281 MutexLock mu(Thread::Current(), *gc_complete_lock_);
1282 gc_count_rate_histogram_.Reset();
1283 blocking_gc_count_rate_histogram_.Reset();
1284 }
1285 }
1286
GetGcCount() const1287 uint64_t Heap::GetGcCount() const {
1288 uint64_t gc_count = 0U;
1289 for (auto* collector : garbage_collectors_) {
1290 gc_count += collector->GetCumulativeTimings().GetIterations();
1291 }
1292 return gc_count;
1293 }
1294
GetGcTime() const1295 uint64_t Heap::GetGcTime() const {
1296 uint64_t gc_time = 0U;
1297 for (auto* collector : garbage_collectors_) {
1298 gc_time += collector->GetCumulativeTimings().GetTotalNs();
1299 }
1300 return gc_time;
1301 }
1302
GetBlockingGcCount() const1303 uint64_t Heap::GetBlockingGcCount() const {
1304 return blocking_gc_count_;
1305 }
1306
GetBlockingGcTime() const1307 uint64_t Heap::GetBlockingGcTime() const {
1308 return blocking_gc_time_;
1309 }
1310
DumpGcCountRateHistogram(std::ostream & os) const1311 void Heap::DumpGcCountRateHistogram(std::ostream& os) const {
1312 MutexLock mu(Thread::Current(), *gc_complete_lock_);
1313 if (gc_count_rate_histogram_.SampleSize() > 0U) {
1314 gc_count_rate_histogram_.DumpBins(os);
1315 }
1316 }
1317
DumpBlockingGcCountRateHistogram(std::ostream & os) const1318 void Heap::DumpBlockingGcCountRateHistogram(std::ostream& os) const {
1319 MutexLock mu(Thread::Current(), *gc_complete_lock_);
1320 if (blocking_gc_count_rate_histogram_.SampleSize() > 0U) {
1321 blocking_gc_count_rate_histogram_.DumpBins(os);
1322 }
1323 }
1324
1325 ALWAYS_INLINE
GetAndOverwriteAllocationListener(Atomic<AllocationListener * > * storage,AllocationListener * new_value)1326 static inline AllocationListener* GetAndOverwriteAllocationListener(
1327 Atomic<AllocationListener*>* storage, AllocationListener* new_value) {
1328 return storage->exchange(new_value);
1329 }
1330
~Heap()1331 Heap::~Heap() {
1332 VLOG(heap) << "Starting ~Heap()";
1333 STLDeleteElements(&garbage_collectors_);
1334 // If we don't reset then the mark stack complains in its destructor.
1335 allocation_stack_->Reset();
1336 allocation_records_.reset();
1337 live_stack_->Reset();
1338 STLDeleteValues(&mod_union_tables_);
1339 STLDeleteValues(&remembered_sets_);
1340 STLDeleteElements(&continuous_spaces_);
1341 STLDeleteElements(&discontinuous_spaces_);
1342 delete gc_complete_lock_;
1343 delete thread_flip_lock_;
1344 delete pending_task_lock_;
1345 delete backtrace_lock_;
1346 uint64_t unique_count = unique_backtrace_count_.load();
1347 uint64_t seen_count = seen_backtrace_count_.load();
1348 if (unique_count != 0 || seen_count != 0) {
1349 LOG(INFO) << "gc stress unique=" << unique_count << " total=" << (unique_count + seen_count);
1350 }
1351 VLOG(heap) << "Finished ~Heap()";
1352 }
1353
1354
FindContinuousSpaceFromAddress(const mirror::Object * addr) const1355 space::ContinuousSpace* Heap::FindContinuousSpaceFromAddress(const mirror::Object* addr) const {
1356 for (const auto& space : continuous_spaces_) {
1357 if (space->Contains(addr)) {
1358 return space;
1359 }
1360 }
1361 return nullptr;
1362 }
1363
FindContinuousSpaceFromObject(ObjPtr<mirror::Object> obj,bool fail_ok) const1364 space::ContinuousSpace* Heap::FindContinuousSpaceFromObject(ObjPtr<mirror::Object> obj,
1365 bool fail_ok) const {
1366 space::ContinuousSpace* space = FindContinuousSpaceFromAddress(obj.Ptr());
1367 if (space != nullptr) {
1368 return space;
1369 }
1370 if (!fail_ok) {
1371 LOG(FATAL) << "object " << obj << " not inside any spaces!";
1372 }
1373 return nullptr;
1374 }
1375
FindDiscontinuousSpaceFromObject(ObjPtr<mirror::Object> obj,bool fail_ok) const1376 space::DiscontinuousSpace* Heap::FindDiscontinuousSpaceFromObject(ObjPtr<mirror::Object> obj,
1377 bool fail_ok) const {
1378 for (const auto& space : discontinuous_spaces_) {
1379 if (space->Contains(obj.Ptr())) {
1380 return space;
1381 }
1382 }
1383 if (!fail_ok) {
1384 LOG(FATAL) << "object " << obj << " not inside any spaces!";
1385 }
1386 return nullptr;
1387 }
1388
FindSpaceFromObject(ObjPtr<mirror::Object> obj,bool fail_ok) const1389 space::Space* Heap::FindSpaceFromObject(ObjPtr<mirror::Object> obj, bool fail_ok) const {
1390 space::Space* result = FindContinuousSpaceFromObject(obj, true);
1391 if (result != nullptr) {
1392 return result;
1393 }
1394 return FindDiscontinuousSpaceFromObject(obj, fail_ok);
1395 }
1396
FindSpaceFromAddress(const void * addr) const1397 space::Space* Heap::FindSpaceFromAddress(const void* addr) const {
1398 for (const auto& space : continuous_spaces_) {
1399 if (space->Contains(reinterpret_cast<const mirror::Object*>(addr))) {
1400 return space;
1401 }
1402 }
1403 for (const auto& space : discontinuous_spaces_) {
1404 if (space->Contains(reinterpret_cast<const mirror::Object*>(addr))) {
1405 return space;
1406 }
1407 }
1408 return nullptr;
1409 }
1410
DumpSpaceNameFromAddress(const void * addr) const1411 std::string Heap::DumpSpaceNameFromAddress(const void* addr) const {
1412 space::Space* space = FindSpaceFromAddress(addr);
1413 return (space != nullptr) ? space->GetName() : "no space";
1414 }
1415
ThrowOutOfMemoryError(Thread * self,size_t byte_count,AllocatorType allocator_type)1416 void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type) {
1417 // If we're in a stack overflow, do not create a new exception. It would require running the
1418 // constructor, which will of course still be in a stack overflow.
1419 if (self->IsHandlingStackOverflow()) {
1420 self->SetException(
1421 Runtime::Current()->GetPreAllocatedOutOfMemoryErrorWhenHandlingStackOverflow());
1422 return;
1423 }
1424
1425 std::ostringstream oss;
1426 size_t total_bytes_free = GetFreeMemory();
1427 oss << "Failed to allocate a " << byte_count << " byte allocation with " << total_bytes_free
1428 << " free bytes and " << PrettySize(GetFreeMemoryUntilOOME()) << " until OOM,"
1429 << " target footprint " << target_footprint_.load(std::memory_order_relaxed)
1430 << ", growth limit "
1431 << growth_limit_;
1432 // If the allocation failed due to fragmentation, print out the largest continuous allocation.
1433 if (total_bytes_free >= byte_count) {
1434 space::AllocSpace* space = nullptr;
1435 if (allocator_type == kAllocatorTypeNonMoving) {
1436 space = non_moving_space_;
1437 } else if (allocator_type == kAllocatorTypeRosAlloc ||
1438 allocator_type == kAllocatorTypeDlMalloc) {
1439 space = main_space_;
1440 } else if (allocator_type == kAllocatorTypeBumpPointer ||
1441 allocator_type == kAllocatorTypeTLAB) {
1442 space = bump_pointer_space_;
1443 } else if (allocator_type == kAllocatorTypeRegion ||
1444 allocator_type == kAllocatorTypeRegionTLAB) {
1445 space = region_space_;
1446 }
1447
1448 // There is no fragmentation info to log for large-object space.
1449 if (allocator_type != kAllocatorTypeLOS) {
1450 CHECK(space != nullptr) << "allocator_type:" << allocator_type
1451 << " byte_count:" << byte_count
1452 << " total_bytes_free:" << total_bytes_free;
1453 // LogFragmentationAllocFailure returns true if byte_count is greater than
1454 // the largest free contiguous chunk in the space. Return value false
1455 // means that we are throwing OOME because the amount of free heap after
1456 // GC is less than kMinFreeHeapAfterGcForAlloc in proportion of the heap-size.
1457 // Log an appropriate message in that case.
1458 if (!space->LogFragmentationAllocFailure(oss, byte_count)) {
1459 oss << "; giving up on allocation because <"
1460 << kMinFreeHeapAfterGcForAlloc * 100
1461 << "% of heap free after GC.";
1462 }
1463 }
1464 }
1465 self->ThrowOutOfMemoryError(oss.str().c_str());
1466 }
1467
DoPendingCollectorTransition()1468 void Heap::DoPendingCollectorTransition() {
1469 CollectorType desired_collector_type = desired_collector_type_;
1470 // Launch homogeneous space compaction if it is desired.
1471 if (desired_collector_type == kCollectorTypeHomogeneousSpaceCompact) {
1472 if (!CareAboutPauseTimes()) {
1473 PerformHomogeneousSpaceCompact();
1474 } else {
1475 VLOG(gc) << "Homogeneous compaction ignored due to jank perceptible process state";
1476 }
1477 } else if (desired_collector_type == kCollectorTypeCCBackground) {
1478 DCHECK(kUseReadBarrier);
1479 if (!CareAboutPauseTimes()) {
1480 // Invoke CC full compaction.
1481 CollectGarbageInternal(collector::kGcTypeFull,
1482 kGcCauseCollectorTransition,
1483 /*clear_soft_references=*/false, GC_NUM_ANY);
1484 } else {
1485 VLOG(gc) << "CC background compaction ignored due to jank perceptible process state";
1486 }
1487 } else {
1488 CHECK_EQ(desired_collector_type, collector_type_) << "Unsupported collector transition";
1489 }
1490 }
1491
Trim(Thread * self)1492 void Heap::Trim(Thread* self) {
1493 Runtime* const runtime = Runtime::Current();
1494 if (!CareAboutPauseTimes()) {
1495 // Deflate the monitors, this can cause a pause but shouldn't matter since we don't care
1496 // about pauses.
1497 ScopedTrace trace("Deflating monitors");
1498 // Avoid race conditions on the lock word for CC.
1499 ScopedGCCriticalSection gcs(self, kGcCauseTrim, kCollectorTypeHeapTrim);
1500 ScopedSuspendAll ssa(__FUNCTION__);
1501 uint64_t start_time = NanoTime();
1502 size_t count = runtime->GetMonitorList()->DeflateMonitors();
1503 VLOG(heap) << "Deflating " << count << " monitors took "
1504 << PrettyDuration(NanoTime() - start_time);
1505 }
1506 TrimIndirectReferenceTables(self);
1507 TrimSpaces(self);
1508 // Trim arenas that may have been used by JIT or verifier.
1509 runtime->GetArenaPool()->TrimMaps();
1510 }
1511
1512 class TrimIndirectReferenceTableClosure : public Closure {
1513 public:
TrimIndirectReferenceTableClosure(Barrier * barrier)1514 explicit TrimIndirectReferenceTableClosure(Barrier* barrier) : barrier_(barrier) {
1515 }
Run(Thread * thread)1516 void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
1517 thread->GetJniEnv()->TrimLocals();
1518 // If thread is a running mutator, then act on behalf of the trim thread.
1519 // See the code in ThreadList::RunCheckpoint.
1520 barrier_->Pass(Thread::Current());
1521 }
1522
1523 private:
1524 Barrier* const barrier_;
1525 };
1526
TrimIndirectReferenceTables(Thread * self)1527 void Heap::TrimIndirectReferenceTables(Thread* self) {
1528 ScopedObjectAccess soa(self);
1529 ScopedTrace trace(__PRETTY_FUNCTION__);
1530 JavaVMExt* vm = soa.Vm();
1531 // Trim globals indirect reference table.
1532 vm->TrimGlobals();
1533 // Trim locals indirect reference tables.
1534 Barrier barrier(0);
1535 TrimIndirectReferenceTableClosure closure(&barrier);
1536 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
1537 size_t barrier_count = Runtime::Current()->GetThreadList()->RunCheckpoint(&closure);
1538 if (barrier_count != 0) {
1539 barrier.Increment(self, barrier_count);
1540 }
1541 }
1542
StartGC(Thread * self,GcCause cause,CollectorType collector_type)1543 void Heap::StartGC(Thread* self, GcCause cause, CollectorType collector_type) {
1544 // Need to do this before acquiring the locks since we don't want to get suspended while
1545 // holding any locks.
1546 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
1547 MutexLock mu(self, *gc_complete_lock_);
1548 // Ensure there is only one GC at a time.
1549 WaitForGcToCompleteLocked(cause, self);
1550 collector_type_running_ = collector_type;
1551 last_gc_cause_ = cause;
1552 thread_running_gc_ = self;
1553 }
1554
TrimSpaces(Thread * self)1555 void Heap::TrimSpaces(Thread* self) {
1556 // Pretend we are doing a GC to prevent background compaction from deleting the space we are
1557 // trimming.
1558 StartGC(self, kGcCauseTrim, kCollectorTypeHeapTrim);
1559 ScopedTrace trace(__PRETTY_FUNCTION__);
1560 const uint64_t start_ns = NanoTime();
1561 // Trim the managed spaces.
1562 uint64_t total_alloc_space_allocated = 0;
1563 uint64_t total_alloc_space_size = 0;
1564 uint64_t managed_reclaimed = 0;
1565 {
1566 ScopedObjectAccess soa(self);
1567 for (const auto& space : continuous_spaces_) {
1568 if (space->IsMallocSpace()) {
1569 gc::space::MallocSpace* malloc_space = space->AsMallocSpace();
1570 if (malloc_space->IsRosAllocSpace() || !CareAboutPauseTimes()) {
1571 // Don't trim dlmalloc spaces if we care about pauses since this can hold the space lock
1572 // for a long period of time.
1573 managed_reclaimed += malloc_space->Trim();
1574 }
1575 total_alloc_space_size += malloc_space->Size();
1576 }
1577 }
1578 }
1579 total_alloc_space_allocated = GetBytesAllocated();
1580 if (large_object_space_ != nullptr) {
1581 total_alloc_space_allocated -= large_object_space_->GetBytesAllocated();
1582 }
1583 if (bump_pointer_space_ != nullptr) {
1584 total_alloc_space_allocated -= bump_pointer_space_->Size();
1585 }
1586 if (region_space_ != nullptr) {
1587 total_alloc_space_allocated -= region_space_->GetBytesAllocated();
1588 }
1589 const float managed_utilization = static_cast<float>(total_alloc_space_allocated) /
1590 static_cast<float>(total_alloc_space_size);
1591 uint64_t gc_heap_end_ns = NanoTime();
1592 // We never move things in the native heap, so we can finish the GC at this point.
1593 FinishGC(self, collector::kGcTypeNone);
1594
1595 VLOG(heap) << "Heap trim of managed (duration=" << PrettyDuration(gc_heap_end_ns - start_ns)
1596 << ", advised=" << PrettySize(managed_reclaimed) << ") heap. Managed heap utilization of "
1597 << static_cast<int>(100 * managed_utilization) << "%.";
1598 }
1599
IsValidObjectAddress(const void * addr) const1600 bool Heap::IsValidObjectAddress(const void* addr) const {
1601 if (addr == nullptr) {
1602 return true;
1603 }
1604 return IsAligned<kObjectAlignment>(addr) && FindSpaceFromAddress(addr) != nullptr;
1605 }
1606
IsNonDiscontinuousSpaceHeapAddress(const void * addr) const1607 bool Heap::IsNonDiscontinuousSpaceHeapAddress(const void* addr) const {
1608 return FindContinuousSpaceFromAddress(reinterpret_cast<const mirror::Object*>(addr)) != nullptr;
1609 }
1610
IsLiveObjectLocked(ObjPtr<mirror::Object> obj,bool search_allocation_stack,bool search_live_stack,bool sorted)1611 bool Heap::IsLiveObjectLocked(ObjPtr<mirror::Object> obj,
1612 bool search_allocation_stack,
1613 bool search_live_stack,
1614 bool sorted) {
1615 if (UNLIKELY(!IsAligned<kObjectAlignment>(obj.Ptr()))) {
1616 return false;
1617 }
1618 if (bump_pointer_space_ != nullptr && bump_pointer_space_->HasAddress(obj.Ptr())) {
1619 mirror::Class* klass = obj->GetClass<kVerifyNone>();
1620 if (obj == klass) {
1621 // This case happens for java.lang.Class.
1622 return true;
1623 }
1624 return VerifyClassClass(klass) && IsLiveObjectLocked(klass);
1625 } else if (temp_space_ != nullptr && temp_space_->HasAddress(obj.Ptr())) {
1626 // If we are in the allocated region of the temp space, then we are probably live (e.g. during
1627 // a GC). When a GC isn't running End() - Begin() is 0 which means no objects are contained.
1628 return temp_space_->Contains(obj.Ptr());
1629 }
1630 if (region_space_ != nullptr && region_space_->HasAddress(obj.Ptr())) {
1631 return true;
1632 }
1633 space::ContinuousSpace* c_space = FindContinuousSpaceFromObject(obj, true);
1634 space::DiscontinuousSpace* d_space = nullptr;
1635 if (c_space != nullptr) {
1636 if (c_space->GetLiveBitmap()->Test(obj.Ptr())) {
1637 return true;
1638 }
1639 } else {
1640 d_space = FindDiscontinuousSpaceFromObject(obj, true);
1641 if (d_space != nullptr) {
1642 if (d_space->GetLiveBitmap()->Test(obj.Ptr())) {
1643 return true;
1644 }
1645 }
1646 }
1647 // This is covering the allocation/live stack swapping that is done without mutators suspended.
1648 for (size_t i = 0; i < (sorted ? 1 : 5); ++i) {
1649 if (i > 0) {
1650 NanoSleep(MsToNs(10));
1651 }
1652 if (search_allocation_stack) {
1653 if (sorted) {
1654 if (allocation_stack_->ContainsSorted(obj.Ptr())) {
1655 return true;
1656 }
1657 } else if (allocation_stack_->Contains(obj.Ptr())) {
1658 return true;
1659 }
1660 }
1661
1662 if (search_live_stack) {
1663 if (sorted) {
1664 if (live_stack_->ContainsSorted(obj.Ptr())) {
1665 return true;
1666 }
1667 } else if (live_stack_->Contains(obj.Ptr())) {
1668 return true;
1669 }
1670 }
1671 }
1672 // We need to check the bitmaps again since there is a race where we mark something as live and
1673 // then clear the stack containing it.
1674 if (c_space != nullptr) {
1675 if (c_space->GetLiveBitmap()->Test(obj.Ptr())) {
1676 return true;
1677 }
1678 } else {
1679 d_space = FindDiscontinuousSpaceFromObject(obj, true);
1680 if (d_space != nullptr && d_space->GetLiveBitmap()->Test(obj.Ptr())) {
1681 return true;
1682 }
1683 }
1684 return false;
1685 }
1686
DumpSpaces() const1687 std::string Heap::DumpSpaces() const {
1688 std::ostringstream oss;
1689 DumpSpaces(oss);
1690 return oss.str();
1691 }
1692
DumpSpaces(std::ostream & stream) const1693 void Heap::DumpSpaces(std::ostream& stream) const {
1694 for (const auto& space : continuous_spaces_) {
1695 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
1696 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
1697 stream << space << " " << *space << "\n";
1698 if (live_bitmap != nullptr) {
1699 stream << live_bitmap << " " << *live_bitmap << "\n";
1700 }
1701 if (mark_bitmap != nullptr) {
1702 stream << mark_bitmap << " " << *mark_bitmap << "\n";
1703 }
1704 }
1705 for (const auto& space : discontinuous_spaces_) {
1706 stream << space << " " << *space << "\n";
1707 }
1708 }
1709
VerifyObjectBody(ObjPtr<mirror::Object> obj)1710 void Heap::VerifyObjectBody(ObjPtr<mirror::Object> obj) {
1711 if (verify_object_mode_ == kVerifyObjectModeDisabled) {
1712 return;
1713 }
1714
1715 // Ignore early dawn of the universe verifications.
1716 if (UNLIKELY(num_bytes_allocated_.load(std::memory_order_relaxed) < 10 * KB)) {
1717 return;
1718 }
1719 CHECK_ALIGNED(obj.Ptr(), kObjectAlignment) << "Object isn't aligned";
1720 mirror::Class* c = obj->GetFieldObject<mirror::Class, kVerifyNone>(mirror::Object::ClassOffset());
1721 CHECK(c != nullptr) << "Null class in object " << obj;
1722 CHECK_ALIGNED(c, kObjectAlignment) << "Class " << c << " not aligned in object " << obj;
1723 CHECK(VerifyClassClass(c));
1724
1725 if (verify_object_mode_ > kVerifyObjectModeFast) {
1726 // Note: the bitmap tests below are racy since we don't hold the heap bitmap lock.
1727 CHECK(IsLiveObjectLocked(obj)) << "Object is dead " << obj << "\n" << DumpSpaces();
1728 }
1729 }
1730
VerifyHeap()1731 void Heap::VerifyHeap() {
1732 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1733 auto visitor = [&](mirror::Object* obj) {
1734 VerifyObjectBody(obj);
1735 };
1736 // Technically we need the mutator lock here to call Visit. However, VerifyObjectBody is already
1737 // NO_THREAD_SAFETY_ANALYSIS.
1738 auto no_thread_safety_analysis = [&]() NO_THREAD_SAFETY_ANALYSIS {
1739 GetLiveBitmap()->Visit(visitor);
1740 };
1741 no_thread_safety_analysis();
1742 }
1743
RecordFree(uint64_t freed_objects,int64_t freed_bytes)1744 void Heap::RecordFree(uint64_t freed_objects, int64_t freed_bytes) {
1745 // Use signed comparison since freed bytes can be negative when background compaction foreground
1746 // transitions occurs. This is typically due to objects moving from a bump pointer space to a
1747 // free list backed space, which may increase memory footprint due to padding and binning.
1748 RACING_DCHECK_LE(freed_bytes,
1749 static_cast<int64_t>(num_bytes_allocated_.load(std::memory_order_relaxed)));
1750 // Note: This relies on 2s complement for handling negative freed_bytes.
1751 num_bytes_allocated_.fetch_sub(static_cast<ssize_t>(freed_bytes), std::memory_order_relaxed);
1752 if (Runtime::Current()->HasStatsEnabled()) {
1753 RuntimeStats* thread_stats = Thread::Current()->GetStats();
1754 thread_stats->freed_objects += freed_objects;
1755 thread_stats->freed_bytes += freed_bytes;
1756 // TODO: Do this concurrently.
1757 RuntimeStats* global_stats = Runtime::Current()->GetStats();
1758 global_stats->freed_objects += freed_objects;
1759 global_stats->freed_bytes += freed_bytes;
1760 }
1761 }
1762
RecordFreeRevoke()1763 void Heap::RecordFreeRevoke() {
1764 // Subtract num_bytes_freed_revoke_ from num_bytes_allocated_ to cancel out the
1765 // ahead-of-time, bulk counting of bytes allocated in rosalloc thread-local buffers.
1766 // If there's a concurrent revoke, ok to not necessarily reset num_bytes_freed_revoke_
1767 // all the way to zero exactly as the remainder will be subtracted at the next GC.
1768 size_t bytes_freed = num_bytes_freed_revoke_.load(std::memory_order_relaxed);
1769 CHECK_GE(num_bytes_freed_revoke_.fetch_sub(bytes_freed, std::memory_order_relaxed),
1770 bytes_freed) << "num_bytes_freed_revoke_ underflow";
1771 CHECK_GE(num_bytes_allocated_.fetch_sub(bytes_freed, std::memory_order_relaxed),
1772 bytes_freed) << "num_bytes_allocated_ underflow";
1773 GetCurrentGcIteration()->SetFreedRevoke(bytes_freed);
1774 }
1775
GetRosAllocSpace(gc::allocator::RosAlloc * rosalloc) const1776 space::RosAllocSpace* Heap::GetRosAllocSpace(gc::allocator::RosAlloc* rosalloc) const {
1777 if (rosalloc_space_ != nullptr && rosalloc_space_->GetRosAlloc() == rosalloc) {
1778 return rosalloc_space_;
1779 }
1780 for (const auto& space : continuous_spaces_) {
1781 if (space->AsContinuousSpace()->IsRosAllocSpace()) {
1782 if (space->AsContinuousSpace()->AsRosAllocSpace()->GetRosAlloc() == rosalloc) {
1783 return space->AsContinuousSpace()->AsRosAllocSpace();
1784 }
1785 }
1786 }
1787 return nullptr;
1788 }
1789
EntrypointsInstrumented()1790 static inline bool EntrypointsInstrumented() REQUIRES_SHARED(Locks::mutator_lock_) {
1791 instrumentation::Instrumentation* const instrumentation =
1792 Runtime::Current()->GetInstrumentation();
1793 return instrumentation != nullptr && instrumentation->AllocEntrypointsInstrumented();
1794 }
1795
AllocateInternalWithGc(Thread * self,AllocatorType allocator,bool instrumented,size_t alloc_size,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated,ObjPtr<mirror::Class> * klass)1796 mirror::Object* Heap::AllocateInternalWithGc(Thread* self,
1797 AllocatorType allocator,
1798 bool instrumented,
1799 size_t alloc_size,
1800 size_t* bytes_allocated,
1801 size_t* usable_size,
1802 size_t* bytes_tl_bulk_allocated,
1803 ObjPtr<mirror::Class>* klass) {
1804 bool was_default_allocator = allocator == GetCurrentAllocator();
1805 // Make sure there is no pending exception since we may need to throw an OOME.
1806 self->AssertNoPendingException();
1807 DCHECK(klass != nullptr);
1808
1809 StackHandleScope<1> hs(self);
1810 HandleWrapperObjPtr<mirror::Class> h_klass(hs.NewHandleWrapper(klass));
1811
1812 auto send_object_pre_alloc =
1813 [&]() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_) {
1814 if (UNLIKELY(instrumented)) {
1815 AllocationListener* l = alloc_listener_.load(std::memory_order_seq_cst);
1816 if (UNLIKELY(l != nullptr) && UNLIKELY(l->HasPreAlloc())) {
1817 l->PreObjectAllocated(self, h_klass, &alloc_size);
1818 }
1819 }
1820 };
1821 #define PERFORM_SUSPENDING_OPERATION(op) \
1822 [&]() REQUIRES(Roles::uninterruptible_) REQUIRES_SHARED(Locks::mutator_lock_) { \
1823 ScopedAllowThreadSuspension ats; \
1824 auto res = (op); \
1825 send_object_pre_alloc(); \
1826 return res; \
1827 }()
1828
1829 // The allocation failed. If the GC is running, block until it completes, and then retry the
1830 // allocation.
1831 collector::GcType last_gc =
1832 PERFORM_SUSPENDING_OPERATION(WaitForGcToComplete(kGcCauseForAlloc, self));
1833 // If we were the default allocator but the allocator changed while we were suspended,
1834 // abort the allocation.
1835 if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1836 (!instrumented && EntrypointsInstrumented())) {
1837 return nullptr;
1838 }
1839 uint32_t starting_gc_num = GetCurrentGcNum();
1840 if (last_gc != collector::kGcTypeNone) {
1841 // A GC was in progress and we blocked, retry allocation now that memory has been freed.
1842 mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
1843 usable_size, bytes_tl_bulk_allocated);
1844 if (ptr != nullptr) {
1845 return ptr;
1846 }
1847 }
1848
1849 auto have_reclaimed_enough = [&]() {
1850 size_t curr_bytes_allocated = GetBytesAllocated();
1851 double curr_free_heap =
1852 static_cast<double>(growth_limit_ - curr_bytes_allocated) / growth_limit_;
1853 return curr_free_heap >= kMinFreeHeapAfterGcForAlloc;
1854 };
1855 // We perform one GC as per the next_gc_type_ (chosen in GrowForUtilization),
1856 // if it's not already tried. If that doesn't succeed then go for the most
1857 // exhaustive option. Perform a full-heap collection including clearing
1858 // SoftReferences. In case of ConcurrentCopying, it will also ensure that
1859 // all regions are evacuated. If allocation doesn't succeed even after that
1860 // then there is no hope, so we throw OOME.
1861 collector::GcType tried_type = next_gc_type_;
1862 if (last_gc < tried_type) {
1863 const bool gc_ran = PERFORM_SUSPENDING_OPERATION(
1864 CollectGarbageInternal(tried_type, kGcCauseForAlloc, false, starting_gc_num + 1)
1865 != collector::kGcTypeNone);
1866
1867 if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1868 (!instrumented && EntrypointsInstrumented())) {
1869 return nullptr;
1870 }
1871 if (gc_ran && have_reclaimed_enough()) {
1872 mirror::Object* ptr = TryToAllocate<true, false>(self, allocator,
1873 alloc_size, bytes_allocated,
1874 usable_size, bytes_tl_bulk_allocated);
1875 if (ptr != nullptr) {
1876 return ptr;
1877 }
1878 }
1879 }
1880 // Most allocations should have succeeded by now, so the heap is really full, really fragmented,
1881 // or the requested size is really big. Do another GC, collecting SoftReferences this time. The
1882 // VM spec requires that all SoftReferences have been collected and cleared before throwing
1883 // OOME.
1884 VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size)
1885 << " allocation";
1886 // TODO: Run finalization, but this may cause more allocations to occur.
1887 // We don't need a WaitForGcToComplete here either.
1888 // TODO: Should check whether another thread already just ran a GC with soft
1889 // references.
1890 DCHECK(!gc_plan_.empty());
1891 PERFORM_SUSPENDING_OPERATION(
1892 CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true, GC_NUM_ANY));
1893 if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1894 (!instrumented && EntrypointsInstrumented())) {
1895 return nullptr;
1896 }
1897 mirror::Object* ptr = nullptr;
1898 if (have_reclaimed_enough()) {
1899 ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
1900 usable_size, bytes_tl_bulk_allocated);
1901 }
1902
1903 if (ptr == nullptr) {
1904 const uint64_t current_time = NanoTime();
1905 switch (allocator) {
1906 case kAllocatorTypeRosAlloc:
1907 // Fall-through.
1908 case kAllocatorTypeDlMalloc: {
1909 if (use_homogeneous_space_compaction_for_oom_ &&
1910 current_time - last_time_homogeneous_space_compaction_by_oom_ >
1911 min_interval_homogeneous_space_compaction_by_oom_) {
1912 last_time_homogeneous_space_compaction_by_oom_ = current_time;
1913 HomogeneousSpaceCompactResult result =
1914 PERFORM_SUSPENDING_OPERATION(PerformHomogeneousSpaceCompact());
1915 // Thread suspension could have occurred.
1916 if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1917 (!instrumented && EntrypointsInstrumented())) {
1918 return nullptr;
1919 }
1920 switch (result) {
1921 case HomogeneousSpaceCompactResult::kSuccess:
1922 // If the allocation succeeded, we delayed an oom.
1923 ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
1924 usable_size, bytes_tl_bulk_allocated);
1925 if (ptr != nullptr) {
1926 count_delayed_oom_++;
1927 }
1928 break;
1929 case HomogeneousSpaceCompactResult::kErrorReject:
1930 // Reject due to disabled moving GC.
1931 break;
1932 case HomogeneousSpaceCompactResult::kErrorVMShuttingDown:
1933 // Throw OOM by default.
1934 break;
1935 default: {
1936 UNIMPLEMENTED(FATAL) << "homogeneous space compaction result: "
1937 << static_cast<size_t>(result);
1938 UNREACHABLE();
1939 }
1940 }
1941 // Always print that we ran homogeneous space compation since this can cause jank.
1942 VLOG(heap) << "Ran heap homogeneous space compaction, "
1943 << " requested defragmentation "
1944 << count_requested_homogeneous_space_compaction_.load()
1945 << " performed defragmentation "
1946 << count_performed_homogeneous_space_compaction_.load()
1947 << " ignored homogeneous space compaction "
1948 << count_ignored_homogeneous_space_compaction_.load()
1949 << " delayed count = "
1950 << count_delayed_oom_.load();
1951 }
1952 break;
1953 }
1954 default: {
1955 // Do nothing for others allocators.
1956 }
1957 }
1958 }
1959 #undef PERFORM_SUSPENDING_OPERATION
1960 // If the allocation hasn't succeeded by this point, throw an OOM error.
1961 if (ptr == nullptr) {
1962 ScopedAllowThreadSuspension ats;
1963 ThrowOutOfMemoryError(self, alloc_size, allocator);
1964 }
1965 return ptr;
1966 }
1967
SetTargetHeapUtilization(float target)1968 void Heap::SetTargetHeapUtilization(float target) {
1969 DCHECK_GT(target, 0.1f); // asserted in Java code
1970 DCHECK_LT(target, 1.0f);
1971 target_utilization_ = target;
1972 }
1973
GetObjectsAllocated() const1974 size_t Heap::GetObjectsAllocated() const {
1975 Thread* const self = Thread::Current();
1976 ScopedThreadStateChange tsc(self, kWaitingForGetObjectsAllocated);
1977 // Prevent GC running during GetObjectsAllocated since we may get a checkpoint request that tells
1978 // us to suspend while we are doing SuspendAll. b/35232978
1979 gc::ScopedGCCriticalSection gcs(Thread::Current(),
1980 gc::kGcCauseGetObjectsAllocated,
1981 gc::kCollectorTypeGetObjectsAllocated);
1982 // Need SuspendAll here to prevent lock violation if RosAlloc does it during InspectAll.
1983 ScopedSuspendAll ssa(__FUNCTION__);
1984 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
1985 size_t total = 0;
1986 for (space::AllocSpace* space : alloc_spaces_) {
1987 total += space->GetObjectsAllocated();
1988 }
1989 return total;
1990 }
1991
GetObjectsAllocatedEver() const1992 uint64_t Heap::GetObjectsAllocatedEver() const {
1993 uint64_t total = GetObjectsFreedEver();
1994 // If we are detached, we can't use GetObjectsAllocated since we can't change thread states.
1995 if (Thread::Current() != nullptr) {
1996 total += GetObjectsAllocated();
1997 }
1998 return total;
1999 }
2000
GetBytesAllocatedEver() const2001 uint64_t Heap::GetBytesAllocatedEver() const {
2002 // Force the returned value to be monotonically increasing, in the sense that if this is called
2003 // at A and B, such that A happens-before B, then the call at B returns a value no smaller than
2004 // that at A. This is not otherwise guaranteed, since num_bytes_allocated_ is decremented first,
2005 // and total_bytes_freed_ever_ is incremented later.
2006 static std::atomic<uint64_t> max_bytes_so_far(0);
2007 uint64_t so_far = max_bytes_so_far.load(std::memory_order_relaxed);
2008 uint64_t current_bytes = GetBytesFreedEver(std::memory_order_acquire);
2009 current_bytes += GetBytesAllocated();
2010 do {
2011 if (current_bytes <= so_far) {
2012 return so_far;
2013 }
2014 } while (!max_bytes_so_far.compare_exchange_weak(so_far /* updated */,
2015 current_bytes, std::memory_order_relaxed));
2016 return current_bytes;
2017 }
2018
2019 // Check whether the given object is an instance of the given class.
MatchesClass(mirror::Object * obj,Handle<mirror::Class> h_class,bool use_is_assignable_from)2020 static bool MatchesClass(mirror::Object* obj,
2021 Handle<mirror::Class> h_class,
2022 bool use_is_assignable_from) REQUIRES_SHARED(Locks::mutator_lock_) {
2023 mirror::Class* instance_class = obj->GetClass();
2024 CHECK(instance_class != nullptr);
2025 ObjPtr<mirror::Class> klass = h_class.Get();
2026 if (use_is_assignable_from) {
2027 return klass != nullptr && klass->IsAssignableFrom(instance_class);
2028 }
2029 return instance_class == klass;
2030 }
2031
CountInstances(const std::vector<Handle<mirror::Class>> & classes,bool use_is_assignable_from,uint64_t * counts)2032 void Heap::CountInstances(const std::vector<Handle<mirror::Class>>& classes,
2033 bool use_is_assignable_from,
2034 uint64_t* counts) {
2035 auto instance_counter = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
2036 for (size_t i = 0; i < classes.size(); ++i) {
2037 if (MatchesClass(obj, classes[i], use_is_assignable_from)) {
2038 ++counts[i];
2039 }
2040 }
2041 };
2042 VisitObjects(instance_counter);
2043 }
2044
CollectGarbage(bool clear_soft_references,GcCause cause)2045 void Heap::CollectGarbage(bool clear_soft_references, GcCause cause) {
2046 // Even if we waited for a GC we still need to do another GC since weaks allocated during the
2047 // last GC will not have necessarily been cleared.
2048 CollectGarbageInternal(gc_plan_.back(), cause, clear_soft_references, GC_NUM_ANY);
2049 }
2050
SupportHomogeneousSpaceCompactAndCollectorTransitions() const2051 bool Heap::SupportHomogeneousSpaceCompactAndCollectorTransitions() const {
2052 return main_space_backup_.get() != nullptr && main_space_ != nullptr &&
2053 foreground_collector_type_ == kCollectorTypeCMS;
2054 }
2055
PerformHomogeneousSpaceCompact()2056 HomogeneousSpaceCompactResult Heap::PerformHomogeneousSpaceCompact() {
2057 Thread* self = Thread::Current();
2058 // Inc requested homogeneous space compaction.
2059 count_requested_homogeneous_space_compaction_++;
2060 // Store performed homogeneous space compaction at a new request arrival.
2061 ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
2062 Locks::mutator_lock_->AssertNotHeld(self);
2063 {
2064 ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete);
2065 MutexLock mu(self, *gc_complete_lock_);
2066 // Ensure there is only one GC at a time.
2067 WaitForGcToCompleteLocked(kGcCauseHomogeneousSpaceCompact, self);
2068 // Homogeneous space compaction is a copying transition, can't run it if the moving GC disable
2069 // count is non zero.
2070 // If the collector type changed to something which doesn't benefit from homogeneous space
2071 // compaction, exit.
2072 if (disable_moving_gc_count_ != 0 || IsMovingGc(collector_type_) ||
2073 !main_space_->CanMoveObjects()) {
2074 return kErrorReject;
2075 }
2076 if (!SupportHomogeneousSpaceCompactAndCollectorTransitions()) {
2077 return kErrorUnsupported;
2078 }
2079 collector_type_running_ = kCollectorTypeHomogeneousSpaceCompact;
2080 }
2081 if (Runtime::Current()->IsShuttingDown(self)) {
2082 // Don't allow heap transitions to happen if the runtime is shutting down since these can
2083 // cause objects to get finalized.
2084 FinishGC(self, collector::kGcTypeNone);
2085 return HomogeneousSpaceCompactResult::kErrorVMShuttingDown;
2086 }
2087 collector::GarbageCollector* collector;
2088 {
2089 ScopedSuspendAll ssa(__FUNCTION__);
2090 uint64_t start_time = NanoTime();
2091 // Launch compaction.
2092 space::MallocSpace* to_space = main_space_backup_.release();
2093 space::MallocSpace* from_space = main_space_;
2094 to_space->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2095 const uint64_t space_size_before_compaction = from_space->Size();
2096 AddSpace(to_space);
2097 // Make sure that we will have enough room to copy.
2098 CHECK_GE(to_space->GetFootprintLimit(), from_space->GetFootprintLimit());
2099 collector = Compact(to_space, from_space, kGcCauseHomogeneousSpaceCompact);
2100 const uint64_t space_size_after_compaction = to_space->Size();
2101 main_space_ = to_space;
2102 main_space_backup_.reset(from_space);
2103 RemoveSpace(from_space);
2104 SetSpaceAsDefault(main_space_); // Set as default to reset the proper dlmalloc space.
2105 // Update performed homogeneous space compaction count.
2106 count_performed_homogeneous_space_compaction_++;
2107 // Print statics log and resume all threads.
2108 uint64_t duration = NanoTime() - start_time;
2109 VLOG(heap) << "Heap homogeneous space compaction took " << PrettyDuration(duration) << " size: "
2110 << PrettySize(space_size_before_compaction) << " -> "
2111 << PrettySize(space_size_after_compaction) << " compact-ratio: "
2112 << std::fixed << static_cast<double>(space_size_after_compaction) /
2113 static_cast<double>(space_size_before_compaction);
2114 }
2115 // Finish GC.
2116 // Get the references we need to enqueue.
2117 SelfDeletingTask* clear = reference_processor_->CollectClearedReferences(self);
2118 GrowForUtilization(semi_space_collector_);
2119 LogGC(kGcCauseHomogeneousSpaceCompact, collector);
2120 FinishGC(self, collector::kGcTypeFull);
2121 // Enqueue any references after losing the GC locks.
2122 clear->Run(self);
2123 clear->Finalize();
2124 {
2125 ScopedObjectAccess soa(self);
2126 soa.Vm()->UnloadNativeLibraries();
2127 }
2128 return HomogeneousSpaceCompactResult::kSuccess;
2129 }
2130
ChangeCollector(CollectorType collector_type)2131 void Heap::ChangeCollector(CollectorType collector_type) {
2132 // TODO: Only do this with all mutators suspended to avoid races.
2133 if (collector_type != collector_type_) {
2134 collector_type_ = collector_type;
2135 gc_plan_.clear();
2136 switch (collector_type_) {
2137 case kCollectorTypeCC: {
2138 if (use_generational_cc_) {
2139 gc_plan_.push_back(collector::kGcTypeSticky);
2140 }
2141 gc_plan_.push_back(collector::kGcTypeFull);
2142 if (use_tlab_) {
2143 ChangeAllocator(kAllocatorTypeRegionTLAB);
2144 } else {
2145 ChangeAllocator(kAllocatorTypeRegion);
2146 }
2147 break;
2148 }
2149 case kCollectorTypeSS: {
2150 gc_plan_.push_back(collector::kGcTypeFull);
2151 if (use_tlab_) {
2152 ChangeAllocator(kAllocatorTypeTLAB);
2153 } else {
2154 ChangeAllocator(kAllocatorTypeBumpPointer);
2155 }
2156 break;
2157 }
2158 case kCollectorTypeMS: {
2159 gc_plan_.push_back(collector::kGcTypeSticky);
2160 gc_plan_.push_back(collector::kGcTypePartial);
2161 gc_plan_.push_back(collector::kGcTypeFull);
2162 ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc);
2163 break;
2164 }
2165 case kCollectorTypeCMS: {
2166 gc_plan_.push_back(collector::kGcTypeSticky);
2167 gc_plan_.push_back(collector::kGcTypePartial);
2168 gc_plan_.push_back(collector::kGcTypeFull);
2169 ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc);
2170 break;
2171 }
2172 default: {
2173 UNIMPLEMENTED(FATAL);
2174 UNREACHABLE();
2175 }
2176 }
2177 if (IsGcConcurrent()) {
2178 concurrent_start_bytes_ =
2179 UnsignedDifference(target_footprint_.load(std::memory_order_relaxed),
2180 kMinConcurrentRemainingBytes);
2181 } else {
2182 concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
2183 }
2184 }
2185 }
2186
2187 // Special compacting collector which uses sub-optimal bin packing to reduce zygote space size.
2188 class ZygoteCompactingCollector final : public collector::SemiSpace {
2189 public:
ZygoteCompactingCollector(gc::Heap * heap,bool is_running_on_memory_tool)2190 ZygoteCompactingCollector(gc::Heap* heap, bool is_running_on_memory_tool)
2191 : SemiSpace(heap, "zygote collector"),
2192 bin_live_bitmap_(nullptr),
2193 bin_mark_bitmap_(nullptr),
2194 is_running_on_memory_tool_(is_running_on_memory_tool) {}
2195
BuildBins(space::ContinuousSpace * space)2196 void BuildBins(space::ContinuousSpace* space) REQUIRES_SHARED(Locks::mutator_lock_) {
2197 bin_live_bitmap_ = space->GetLiveBitmap();
2198 bin_mark_bitmap_ = space->GetMarkBitmap();
2199 uintptr_t prev = reinterpret_cast<uintptr_t>(space->Begin());
2200 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
2201 // Note: This requires traversing the space in increasing order of object addresses.
2202 auto visitor = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
2203 uintptr_t object_addr = reinterpret_cast<uintptr_t>(obj);
2204 size_t bin_size = object_addr - prev;
2205 // Add the bin consisting of the end of the previous object to the start of the current object.
2206 AddBin(bin_size, prev);
2207 prev = object_addr + RoundUp(obj->SizeOf<kDefaultVerifyFlags>(), kObjectAlignment);
2208 };
2209 bin_live_bitmap_->Walk(visitor);
2210 // Add the last bin which spans after the last object to the end of the space.
2211 AddBin(reinterpret_cast<uintptr_t>(space->End()) - prev, prev);
2212 }
2213
2214 private:
2215 // Maps from bin sizes to locations.
2216 std::multimap<size_t, uintptr_t> bins_;
2217 // Live bitmap of the space which contains the bins.
2218 accounting::ContinuousSpaceBitmap* bin_live_bitmap_;
2219 // Mark bitmap of the space which contains the bins.
2220 accounting::ContinuousSpaceBitmap* bin_mark_bitmap_;
2221 const bool is_running_on_memory_tool_;
2222
AddBin(size_t size,uintptr_t position)2223 void AddBin(size_t size, uintptr_t position) {
2224 if (is_running_on_memory_tool_) {
2225 MEMORY_TOOL_MAKE_DEFINED(reinterpret_cast<void*>(position), size);
2226 }
2227 if (size != 0) {
2228 bins_.insert(std::make_pair(size, position));
2229 }
2230 }
2231
ShouldSweepSpace(space::ContinuousSpace * space ATTRIBUTE_UNUSED) const2232 bool ShouldSweepSpace(space::ContinuousSpace* space ATTRIBUTE_UNUSED) const override {
2233 // Don't sweep any spaces since we probably blasted the internal accounting of the free list
2234 // allocator.
2235 return false;
2236 }
2237
MarkNonForwardedObject(mirror::Object * obj)2238 mirror::Object* MarkNonForwardedObject(mirror::Object* obj) override
2239 REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
2240 size_t obj_size = obj->SizeOf<kDefaultVerifyFlags>();
2241 size_t alloc_size = RoundUp(obj_size, kObjectAlignment);
2242 mirror::Object* forward_address;
2243 // Find the smallest bin which we can move obj in.
2244 auto it = bins_.lower_bound(alloc_size);
2245 if (it == bins_.end()) {
2246 // No available space in the bins, place it in the target space instead (grows the zygote
2247 // space).
2248 size_t bytes_allocated, unused_bytes_tl_bulk_allocated;
2249 forward_address = to_space_->Alloc(
2250 self_, alloc_size, &bytes_allocated, nullptr, &unused_bytes_tl_bulk_allocated);
2251 if (to_space_live_bitmap_ != nullptr) {
2252 to_space_live_bitmap_->Set(forward_address);
2253 } else {
2254 GetHeap()->GetNonMovingSpace()->GetLiveBitmap()->Set(forward_address);
2255 GetHeap()->GetNonMovingSpace()->GetMarkBitmap()->Set(forward_address);
2256 }
2257 } else {
2258 size_t size = it->first;
2259 uintptr_t pos = it->second;
2260 bins_.erase(it); // Erase the old bin which we replace with the new smaller bin.
2261 forward_address = reinterpret_cast<mirror::Object*>(pos);
2262 // Set the live and mark bits so that sweeping system weaks works properly.
2263 bin_live_bitmap_->Set(forward_address);
2264 bin_mark_bitmap_->Set(forward_address);
2265 DCHECK_GE(size, alloc_size);
2266 // Add a new bin with the remaining space.
2267 AddBin(size - alloc_size, pos + alloc_size);
2268 }
2269 // Copy the object over to its new location.
2270 // Historical note: We did not use `alloc_size` to avoid a Valgrind error.
2271 memcpy(reinterpret_cast<void*>(forward_address), obj, obj_size);
2272 if (kUseBakerReadBarrier) {
2273 obj->AssertReadBarrierState();
2274 forward_address->AssertReadBarrierState();
2275 }
2276 return forward_address;
2277 }
2278 };
2279
UnBindBitmaps()2280 void Heap::UnBindBitmaps() {
2281 TimingLogger::ScopedTiming t("UnBindBitmaps", GetCurrentGcIteration()->GetTimings());
2282 for (const auto& space : GetContinuousSpaces()) {
2283 if (space->IsContinuousMemMapAllocSpace()) {
2284 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
2285 if (alloc_space->GetLiveBitmap() != nullptr && alloc_space->HasBoundBitmaps()) {
2286 alloc_space->UnBindBitmaps();
2287 }
2288 }
2289 }
2290 }
2291
IncrementFreedEver()2292 void Heap::IncrementFreedEver() {
2293 // Counters are updated only by us, but may be read concurrently.
2294 // The updates should become visible after the corresponding live object info.
2295 total_objects_freed_ever_.store(total_objects_freed_ever_.load(std::memory_order_relaxed)
2296 + GetCurrentGcIteration()->GetFreedObjects()
2297 + GetCurrentGcIteration()->GetFreedLargeObjects(),
2298 std::memory_order_release);
2299 total_bytes_freed_ever_.store(total_bytes_freed_ever_.load(std::memory_order_relaxed)
2300 + GetCurrentGcIteration()->GetFreedBytes()
2301 + GetCurrentGcIteration()->GetFreedLargeObjectBytes(),
2302 std::memory_order_release);
2303 }
2304
2305 #pragma clang diagnostic push
2306 #if !ART_USE_FUTEXES
2307 // Frame gets too large, perhaps due to Bionic pthread_mutex_lock size. We don't care.
2308 # pragma clang diagnostic ignored "-Wframe-larger-than="
2309 #endif
2310 // This has a large frame, but shouldn't be run anywhere near the stack limit.
PreZygoteFork()2311 void Heap::PreZygoteFork() {
2312 if (!HasZygoteSpace()) {
2313 // We still want to GC in case there is some unreachable non moving objects that could cause a
2314 // suboptimal bin packing when we compact the zygote space.
2315 CollectGarbageInternal(collector::kGcTypeFull, kGcCauseBackground, false, GC_NUM_ANY);
2316 // Trim the pages at the end of the non moving space. Trim while not holding zygote lock since
2317 // the trim process may require locking the mutator lock.
2318 non_moving_space_->Trim();
2319 }
2320 Thread* self = Thread::Current();
2321 MutexLock mu(self, zygote_creation_lock_);
2322 // Try to see if we have any Zygote spaces.
2323 if (HasZygoteSpace()) {
2324 return;
2325 }
2326 Runtime::Current()->GetInternTable()->AddNewTable();
2327 Runtime::Current()->GetClassLinker()->MoveClassTableToPreZygote();
2328 VLOG(heap) << "Starting PreZygoteFork";
2329 // The end of the non-moving space may be protected, unprotect it so that we can copy the zygote
2330 // there.
2331 non_moving_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2332 const bool same_space = non_moving_space_ == main_space_;
2333 if (kCompactZygote) {
2334 // Temporarily disable rosalloc verification because the zygote
2335 // compaction will mess up the rosalloc internal metadata.
2336 ScopedDisableRosAllocVerification disable_rosalloc_verif(this);
2337 ZygoteCompactingCollector zygote_collector(this, is_running_on_memory_tool_);
2338 zygote_collector.BuildBins(non_moving_space_);
2339 // Create a new bump pointer space which we will compact into.
2340 space::BumpPointerSpace target_space("zygote bump space", non_moving_space_->End(),
2341 non_moving_space_->Limit());
2342 // Compact the bump pointer space to a new zygote bump pointer space.
2343 bool reset_main_space = false;
2344 if (IsMovingGc(collector_type_)) {
2345 if (collector_type_ == kCollectorTypeCC) {
2346 zygote_collector.SetFromSpace(region_space_);
2347 } else {
2348 zygote_collector.SetFromSpace(bump_pointer_space_);
2349 }
2350 } else {
2351 CHECK(main_space_ != nullptr);
2352 CHECK_NE(main_space_, non_moving_space_)
2353 << "Does not make sense to compact within the same space";
2354 // Copy from the main space.
2355 zygote_collector.SetFromSpace(main_space_);
2356 reset_main_space = true;
2357 }
2358 zygote_collector.SetToSpace(&target_space);
2359 zygote_collector.SetSwapSemiSpaces(false);
2360 zygote_collector.Run(kGcCauseCollectorTransition, false);
2361 if (reset_main_space) {
2362 main_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2363 madvise(main_space_->Begin(), main_space_->Capacity(), MADV_DONTNEED);
2364 MemMap mem_map = main_space_->ReleaseMemMap();
2365 RemoveSpace(main_space_);
2366 space::Space* old_main_space = main_space_;
2367 CreateMainMallocSpace(std::move(mem_map),
2368 kDefaultInitialSize,
2369 std::min(mem_map.Size(), growth_limit_),
2370 mem_map.Size());
2371 delete old_main_space;
2372 AddSpace(main_space_);
2373 } else {
2374 if (collector_type_ == kCollectorTypeCC) {
2375 region_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2376 // Evacuated everything out of the region space, clear the mark bitmap.
2377 region_space_->GetMarkBitmap()->Clear();
2378 } else {
2379 bump_pointer_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2380 }
2381 }
2382 if (temp_space_ != nullptr) {
2383 CHECK(temp_space_->IsEmpty());
2384 }
2385 IncrementFreedEver();
2386 // Update the end and write out image.
2387 non_moving_space_->SetEnd(target_space.End());
2388 non_moving_space_->SetLimit(target_space.Limit());
2389 VLOG(heap) << "Create zygote space with size=" << non_moving_space_->Size() << " bytes";
2390 }
2391 // Change the collector to the post zygote one.
2392 ChangeCollector(foreground_collector_type_);
2393 // Save the old space so that we can remove it after we complete creating the zygote space.
2394 space::MallocSpace* old_alloc_space = non_moving_space_;
2395 // Turn the current alloc space into a zygote space and obtain the new alloc space composed of
2396 // the remaining available space.
2397 // Remove the old space before creating the zygote space since creating the zygote space sets
2398 // the old alloc space's bitmaps to null.
2399 RemoveSpace(old_alloc_space);
2400 if (collector::SemiSpace::kUseRememberedSet) {
2401 // Consistency bound check.
2402 FindRememberedSetFromSpace(old_alloc_space)->AssertAllDirtyCardsAreWithinSpace();
2403 // Remove the remembered set for the now zygote space (the old
2404 // non-moving space). Note now that we have compacted objects into
2405 // the zygote space, the data in the remembered set is no longer
2406 // needed. The zygote space will instead have a mod-union table
2407 // from this point on.
2408 RemoveRememberedSet(old_alloc_space);
2409 }
2410 // Remaining space becomes the new non moving space.
2411 zygote_space_ = old_alloc_space->CreateZygoteSpace(kNonMovingSpaceName, low_memory_mode_,
2412 &non_moving_space_);
2413 CHECK(!non_moving_space_->CanMoveObjects());
2414 if (same_space) {
2415 main_space_ = non_moving_space_;
2416 SetSpaceAsDefault(main_space_);
2417 }
2418 delete old_alloc_space;
2419 CHECK(HasZygoteSpace()) << "Failed creating zygote space";
2420 AddSpace(zygote_space_);
2421 non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
2422 AddSpace(non_moving_space_);
2423 constexpr bool set_mark_bit = kUseBakerReadBarrier
2424 && gc::collector::ConcurrentCopying::kGrayDirtyImmuneObjects;
2425 if (set_mark_bit) {
2426 // Treat all of the objects in the zygote as marked to avoid unnecessary dirty pages. This is
2427 // safe since we mark all of the objects that may reference non immune objects as gray.
2428 zygote_space_->SetMarkBitInLiveObjects();
2429 }
2430
2431 // Create the zygote space mod union table.
2432 accounting::ModUnionTable* mod_union_table =
2433 new accounting::ModUnionTableCardCache("zygote space mod-union table", this, zygote_space_);
2434 CHECK(mod_union_table != nullptr) << "Failed to create zygote space mod-union table";
2435
2436 if (collector_type_ != kCollectorTypeCC) {
2437 // Set all the cards in the mod-union table since we don't know which objects contain references
2438 // to large objects.
2439 mod_union_table->SetCards();
2440 } else {
2441 // Make sure to clear the zygote space cards so that we don't dirty pages in the next GC. There
2442 // may be dirty cards from the zygote compaction or reference processing. These cards are not
2443 // necessary to have marked since the zygote space may not refer to any objects not in the
2444 // zygote or image spaces at this point.
2445 mod_union_table->ProcessCards();
2446 mod_union_table->ClearTable();
2447
2448 // For CC we never collect zygote large objects. This means we do not need to set the cards for
2449 // the zygote mod-union table and we can also clear all of the existing image mod-union tables.
2450 // The existing mod-union tables are only for image spaces and may only reference zygote and
2451 // image objects.
2452 for (auto& pair : mod_union_tables_) {
2453 CHECK(pair.first->IsImageSpace());
2454 CHECK(!pair.first->AsImageSpace()->GetImageHeader().IsAppImage());
2455 accounting::ModUnionTable* table = pair.second;
2456 table->ClearTable();
2457 }
2458 }
2459 AddModUnionTable(mod_union_table);
2460 large_object_space_->SetAllLargeObjectsAsZygoteObjects(self, set_mark_bit);
2461 if (collector::SemiSpace::kUseRememberedSet) {
2462 // Add a new remembered set for the post-zygote non-moving space.
2463 accounting::RememberedSet* post_zygote_non_moving_space_rem_set =
2464 new accounting::RememberedSet("Post-zygote non-moving space remembered set", this,
2465 non_moving_space_);
2466 CHECK(post_zygote_non_moving_space_rem_set != nullptr)
2467 << "Failed to create post-zygote non-moving space remembered set";
2468 AddRememberedSet(post_zygote_non_moving_space_rem_set);
2469 }
2470 }
2471 #pragma clang diagnostic pop
2472
FlushAllocStack()2473 void Heap::FlushAllocStack() {
2474 MarkAllocStackAsLive(allocation_stack_.get());
2475 allocation_stack_->Reset();
2476 }
2477
MarkAllocStack(accounting::ContinuousSpaceBitmap * bitmap1,accounting::ContinuousSpaceBitmap * bitmap2,accounting::LargeObjectBitmap * large_objects,accounting::ObjectStack * stack)2478 void Heap::MarkAllocStack(accounting::ContinuousSpaceBitmap* bitmap1,
2479 accounting::ContinuousSpaceBitmap* bitmap2,
2480 accounting::LargeObjectBitmap* large_objects,
2481 accounting::ObjectStack* stack) {
2482 DCHECK(bitmap1 != nullptr);
2483 DCHECK(bitmap2 != nullptr);
2484 const auto* limit = stack->End();
2485 for (auto* it = stack->Begin(); it != limit; ++it) {
2486 const mirror::Object* obj = it->AsMirrorPtr();
2487 if (!kUseThreadLocalAllocationStack || obj != nullptr) {
2488 if (bitmap1->HasAddress(obj)) {
2489 bitmap1->Set(obj);
2490 } else if (bitmap2->HasAddress(obj)) {
2491 bitmap2->Set(obj);
2492 } else {
2493 DCHECK(large_objects != nullptr);
2494 large_objects->Set(obj);
2495 }
2496 }
2497 }
2498 }
2499
SwapSemiSpaces()2500 void Heap::SwapSemiSpaces() {
2501 CHECK(bump_pointer_space_ != nullptr);
2502 CHECK(temp_space_ != nullptr);
2503 std::swap(bump_pointer_space_, temp_space_);
2504 }
2505
Compact(space::ContinuousMemMapAllocSpace * target_space,space::ContinuousMemMapAllocSpace * source_space,GcCause gc_cause)2506 collector::GarbageCollector* Heap::Compact(space::ContinuousMemMapAllocSpace* target_space,
2507 space::ContinuousMemMapAllocSpace* source_space,
2508 GcCause gc_cause) {
2509 CHECK(kMovingCollector);
2510 if (target_space != source_space) {
2511 // Don't swap spaces since this isn't a typical semi space collection.
2512 semi_space_collector_->SetSwapSemiSpaces(false);
2513 semi_space_collector_->SetFromSpace(source_space);
2514 semi_space_collector_->SetToSpace(target_space);
2515 semi_space_collector_->Run(gc_cause, false);
2516 return semi_space_collector_;
2517 }
2518 LOG(FATAL) << "Unsupported";
2519 UNREACHABLE();
2520 }
2521
TraceHeapSize(size_t heap_size)2522 void Heap::TraceHeapSize(size_t heap_size) {
2523 ATraceIntegerValue("Heap size (KB)", heap_size / KB);
2524 }
2525
2526 #if defined(__GLIBC__)
2527 # define IF_GLIBC(x) x
2528 #else
2529 # define IF_GLIBC(x)
2530 #endif
2531
GetNativeBytes()2532 size_t Heap::GetNativeBytes() {
2533 size_t malloc_bytes;
2534 #if defined(__BIONIC__) || defined(__GLIBC__)
2535 IF_GLIBC(size_t mmapped_bytes;)
2536 struct mallinfo mi = mallinfo();
2537 // In spite of the documentation, the jemalloc version of this call seems to do what we want,
2538 // and it is thread-safe.
2539 if (sizeof(size_t) > sizeof(mi.uordblks) && sizeof(size_t) > sizeof(mi.hblkhd)) {
2540 // Shouldn't happen, but glibc declares uordblks as int.
2541 // Avoiding sign extension gets us correct behavior for another 2 GB.
2542 malloc_bytes = (unsigned int)mi.uordblks;
2543 IF_GLIBC(mmapped_bytes = (unsigned int)mi.hblkhd;)
2544 } else {
2545 malloc_bytes = mi.uordblks;
2546 IF_GLIBC(mmapped_bytes = mi.hblkhd;)
2547 }
2548 // From the spec, it appeared mmapped_bytes <= malloc_bytes. Reality was sometimes
2549 // dramatically different. (b/119580449 was an early bug.) If so, we try to fudge it.
2550 // However, malloc implementations seem to interpret hblkhd differently, namely as
2551 // mapped blocks backing the entire heap (e.g. jemalloc) vs. large objects directly
2552 // allocated via mmap (e.g. glibc). Thus we now only do this for glibc, where it
2553 // previously helped, and which appears to use a reading of the spec compatible
2554 // with our adjustment.
2555 #if defined(__GLIBC__)
2556 if (mmapped_bytes > malloc_bytes) {
2557 malloc_bytes = mmapped_bytes;
2558 }
2559 #endif // GLIBC
2560 #else // Neither Bionic nor Glibc
2561 // We should hit this case only in contexts in which GC triggering is not critical. Effectively
2562 // disable GC triggering based on malloc().
2563 malloc_bytes = 1000;
2564 #endif
2565 return malloc_bytes + native_bytes_registered_.load(std::memory_order_relaxed);
2566 // An alternative would be to get RSS from /proc/self/statm. Empirically, that's no
2567 // more expensive, and it would allow us to count memory allocated by means other than malloc.
2568 // However it would change as pages are unmapped and remapped due to memory pressure, among
2569 // other things. It seems risky to trigger GCs as a result of such changes.
2570 }
2571
GCNumberLt(uint32_t gc_num1,uint32_t gc_num2)2572 static inline bool GCNumberLt(uint32_t gc_num1, uint32_t gc_num2) {
2573 // unsigned comparison, assuming a non-huge difference, but dealing correctly with wrapping.
2574 uint32_t difference = gc_num2 - gc_num1;
2575 bool completed_more_than_requested = difference > 0x80000000;
2576 return difference > 0 && !completed_more_than_requested;
2577 }
2578
2579
CollectGarbageInternal(collector::GcType gc_type,GcCause gc_cause,bool clear_soft_references,uint32_t requested_gc_num)2580 collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type,
2581 GcCause gc_cause,
2582 bool clear_soft_references,
2583 uint32_t requested_gc_num) {
2584 Thread* self = Thread::Current();
2585 Runtime* runtime = Runtime::Current();
2586 // If the heap can't run the GC, silently fail and return that no GC was run.
2587 switch (gc_type) {
2588 case collector::kGcTypePartial: {
2589 if (!HasZygoteSpace()) {
2590 // Do not increment gcs_completed_ . We should retry with kGcTypeFull.
2591 return collector::kGcTypeNone;
2592 }
2593 break;
2594 }
2595 default: {
2596 // Other GC types don't have any special cases which makes them not runnable. The main case
2597 // here is full GC.
2598 }
2599 }
2600 ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
2601 Locks::mutator_lock_->AssertNotHeld(self);
2602 if (self->IsHandlingStackOverflow()) {
2603 // If we are throwing a stack overflow error we probably don't have enough remaining stack
2604 // space to run the GC.
2605 // Count this as a GC in case someone is waiting for it to complete.
2606 gcs_completed_.fetch_add(1, std::memory_order_release);
2607 return collector::kGcTypeNone;
2608 }
2609 bool compacting_gc;
2610 {
2611 gc_complete_lock_->AssertNotHeld(self);
2612 ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete);
2613 MutexLock mu(self, *gc_complete_lock_);
2614 // Ensure there is only one GC at a time.
2615 WaitForGcToCompleteLocked(gc_cause, self);
2616 if (requested_gc_num != GC_NUM_ANY && !GCNumberLt(GetCurrentGcNum(), requested_gc_num)) {
2617 // The appropriate GC was already triggered elsewhere.
2618 return collector::kGcTypeNone;
2619 }
2620 compacting_gc = IsMovingGc(collector_type_);
2621 // GC can be disabled if someone has a used GetPrimitiveArrayCritical.
2622 if (compacting_gc && disable_moving_gc_count_ != 0) {
2623 LOG(WARNING) << "Skipping GC due to disable moving GC count " << disable_moving_gc_count_;
2624 // Again count this as a GC.
2625 gcs_completed_.fetch_add(1, std::memory_order_release);
2626 return collector::kGcTypeNone;
2627 }
2628 if (gc_disabled_for_shutdown_) {
2629 gcs_completed_.fetch_add(1, std::memory_order_release);
2630 return collector::kGcTypeNone;
2631 }
2632 collector_type_running_ = collector_type_;
2633 last_gc_cause_ = gc_cause;
2634 }
2635 if (gc_cause == kGcCauseForAlloc && runtime->HasStatsEnabled()) {
2636 ++runtime->GetStats()->gc_for_alloc_count;
2637 ++self->GetStats()->gc_for_alloc_count;
2638 }
2639 const size_t bytes_allocated_before_gc = GetBytesAllocated();
2640
2641 DCHECK_LT(gc_type, collector::kGcTypeMax);
2642 DCHECK_NE(gc_type, collector::kGcTypeNone);
2643
2644 collector::GarbageCollector* collector = nullptr;
2645 // TODO: Clean this up.
2646 if (compacting_gc) {
2647 DCHECK(current_allocator_ == kAllocatorTypeBumpPointer ||
2648 current_allocator_ == kAllocatorTypeTLAB ||
2649 current_allocator_ == kAllocatorTypeRegion ||
2650 current_allocator_ == kAllocatorTypeRegionTLAB);
2651 switch (collector_type_) {
2652 case kCollectorTypeSS:
2653 semi_space_collector_->SetFromSpace(bump_pointer_space_);
2654 semi_space_collector_->SetToSpace(temp_space_);
2655 semi_space_collector_->SetSwapSemiSpaces(true);
2656 collector = semi_space_collector_;
2657 break;
2658 case kCollectorTypeCC:
2659 collector::ConcurrentCopying* active_cc_collector;
2660 if (use_generational_cc_) {
2661 // TODO: Other threads must do the flip checkpoint before they start poking at
2662 // active_concurrent_copying_collector_. So we should not concurrency here.
2663 active_cc_collector = (gc_type == collector::kGcTypeSticky) ?
2664 young_concurrent_copying_collector_ : concurrent_copying_collector_;
2665 active_concurrent_copying_collector_.store(active_cc_collector,
2666 std::memory_order_relaxed);
2667 DCHECK(active_cc_collector->RegionSpace() == region_space_);
2668 collector = active_cc_collector;
2669 } else {
2670 collector = active_concurrent_copying_collector_.load(std::memory_order_relaxed);
2671 }
2672 break;
2673 default:
2674 LOG(FATAL) << "Invalid collector type " << static_cast<size_t>(collector_type_);
2675 }
2676 if (collector != active_concurrent_copying_collector_.load(std::memory_order_relaxed)) {
2677 temp_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2678 if (kIsDebugBuild) {
2679 // Try to read each page of the memory map in case mprotect didn't work properly b/19894268.
2680 temp_space_->GetMemMap()->TryReadable();
2681 }
2682 CHECK(temp_space_->IsEmpty());
2683 }
2684 gc_type = collector::kGcTypeFull; // TODO: Not hard code this in.
2685 } else if (current_allocator_ == kAllocatorTypeRosAlloc ||
2686 current_allocator_ == kAllocatorTypeDlMalloc) {
2687 collector = FindCollectorByGcType(gc_type);
2688 } else {
2689 LOG(FATAL) << "Invalid current allocator " << current_allocator_;
2690 }
2691
2692 CHECK(collector != nullptr)
2693 << "Could not find garbage collector with collector_type="
2694 << static_cast<size_t>(collector_type_) << " and gc_type=" << gc_type;
2695 collector->Run(gc_cause, clear_soft_references || runtime->IsZygote());
2696 IncrementFreedEver();
2697 RequestTrim(self);
2698 // Collect cleared references.
2699 SelfDeletingTask* clear = reference_processor_->CollectClearedReferences(self);
2700 // Grow the heap so that we know when to perform the next GC.
2701 GrowForUtilization(collector, bytes_allocated_before_gc);
2702 old_native_bytes_allocated_.store(GetNativeBytes());
2703 LogGC(gc_cause, collector);
2704 FinishGC(self, gc_type);
2705 // Actually enqueue all cleared references. Do this after the GC has officially finished since
2706 // otherwise we can deadlock.
2707 clear->Run(self);
2708 clear->Finalize();
2709 // Inform DDMS that a GC completed.
2710 Dbg::GcDidFinish();
2711
2712 // Unload native libraries for class unloading. We do this after calling FinishGC to prevent
2713 // deadlocks in case the JNI_OnUnload function does allocations.
2714 {
2715 ScopedObjectAccess soa(self);
2716 soa.Vm()->UnloadNativeLibraries();
2717 }
2718 return gc_type;
2719 }
2720
LogGC(GcCause gc_cause,collector::GarbageCollector * collector)2721 void Heap::LogGC(GcCause gc_cause, collector::GarbageCollector* collector) {
2722 const size_t duration = GetCurrentGcIteration()->GetDurationNs();
2723 const std::vector<uint64_t>& pause_times = GetCurrentGcIteration()->GetPauseTimes();
2724 // Print the GC if it is an explicit GC (e.g. Runtime.gc()) or a slow GC
2725 // (mutator time blocked >= long_pause_log_threshold_).
2726 bool log_gc = kLogAllGCs || (gc_cause == kGcCauseExplicit && always_log_explicit_gcs_);
2727 if (!log_gc && CareAboutPauseTimes()) {
2728 // GC for alloc pauses the allocating thread, so consider it as a pause.
2729 log_gc = duration > long_gc_log_threshold_ ||
2730 (gc_cause == kGcCauseForAlloc && duration > long_pause_log_threshold_);
2731 for (uint64_t pause : pause_times) {
2732 log_gc = log_gc || pause >= long_pause_log_threshold_;
2733 }
2734 }
2735 if (log_gc) {
2736 const size_t percent_free = GetPercentFree();
2737 const size_t current_heap_size = GetBytesAllocated();
2738 const size_t total_memory = GetTotalMemory();
2739 std::ostringstream pause_string;
2740 for (size_t i = 0; i < pause_times.size(); ++i) {
2741 pause_string << PrettyDuration((pause_times[i] / 1000) * 1000)
2742 << ((i != pause_times.size() - 1) ? "," : "");
2743 }
2744 LOG(INFO) << gc_cause << " " << collector->GetName()
2745 << " GC freed " << current_gc_iteration_.GetFreedObjects() << "("
2746 << PrettySize(current_gc_iteration_.GetFreedBytes()) << ") AllocSpace objects, "
2747 << current_gc_iteration_.GetFreedLargeObjects() << "("
2748 << PrettySize(current_gc_iteration_.GetFreedLargeObjectBytes()) << ") LOS objects, "
2749 << percent_free << "% free, " << PrettySize(current_heap_size) << "/"
2750 << PrettySize(total_memory) << ", " << "paused " << pause_string.str()
2751 << " total " << PrettyDuration((duration / 1000) * 1000);
2752 VLOG(heap) << Dumpable<TimingLogger>(*current_gc_iteration_.GetTimings());
2753 }
2754 }
2755
FinishGC(Thread * self,collector::GcType gc_type)2756 void Heap::FinishGC(Thread* self, collector::GcType gc_type) {
2757 MutexLock mu(self, *gc_complete_lock_);
2758 collector_type_running_ = kCollectorTypeNone;
2759 if (gc_type != collector::kGcTypeNone) {
2760 last_gc_type_ = gc_type;
2761
2762 // Update stats.
2763 ++gc_count_last_window_;
2764 if (running_collection_is_blocking_) {
2765 // If the currently running collection was a blocking one,
2766 // increment the counters and reset the flag.
2767 ++blocking_gc_count_;
2768 blocking_gc_time_ += GetCurrentGcIteration()->GetDurationNs();
2769 ++blocking_gc_count_last_window_;
2770 }
2771 // Update the gc count rate histograms if due.
2772 UpdateGcCountRateHistograms();
2773 }
2774 // Reset.
2775 running_collection_is_blocking_ = false;
2776 thread_running_gc_ = nullptr;
2777 if (gc_type != collector::kGcTypeNone) {
2778 gcs_completed_.fetch_add(1, std::memory_order_release);
2779 }
2780 // Wake anyone who may have been waiting for the GC to complete.
2781 gc_complete_cond_->Broadcast(self);
2782 }
2783
UpdateGcCountRateHistograms()2784 void Heap::UpdateGcCountRateHistograms() {
2785 // Invariant: if the time since the last update includes more than
2786 // one windows, all the GC runs (if > 0) must have happened in first
2787 // window because otherwise the update must have already taken place
2788 // at an earlier GC run. So, we report the non-first windows with
2789 // zero counts to the histograms.
2790 DCHECK_EQ(last_update_time_gc_count_rate_histograms_ % kGcCountRateHistogramWindowDuration, 0U);
2791 uint64_t now = NanoTime();
2792 DCHECK_GE(now, last_update_time_gc_count_rate_histograms_);
2793 uint64_t time_since_last_update = now - last_update_time_gc_count_rate_histograms_;
2794 uint64_t num_of_windows = time_since_last_update / kGcCountRateHistogramWindowDuration;
2795
2796 // The computed number of windows can be incoherently high if NanoTime() is not monotonic.
2797 // Setting a limit on its maximum value reduces the impact on CPU time in such cases.
2798 if (num_of_windows > kGcCountRateHistogramMaxNumMissedWindows) {
2799 LOG(WARNING) << "Reducing the number of considered missed Gc histogram windows from "
2800 << num_of_windows << " to " << kGcCountRateHistogramMaxNumMissedWindows;
2801 num_of_windows = kGcCountRateHistogramMaxNumMissedWindows;
2802 }
2803
2804 if (time_since_last_update >= kGcCountRateHistogramWindowDuration) {
2805 // Record the first window.
2806 gc_count_rate_histogram_.AddValue(gc_count_last_window_ - 1); // Exclude the current run.
2807 blocking_gc_count_rate_histogram_.AddValue(running_collection_is_blocking_ ?
2808 blocking_gc_count_last_window_ - 1 : blocking_gc_count_last_window_);
2809 // Record the other windows (with zero counts).
2810 for (uint64_t i = 0; i < num_of_windows - 1; ++i) {
2811 gc_count_rate_histogram_.AddValue(0);
2812 blocking_gc_count_rate_histogram_.AddValue(0);
2813 }
2814 // Update the last update time and reset the counters.
2815 last_update_time_gc_count_rate_histograms_ =
2816 (now / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration;
2817 gc_count_last_window_ = 1; // Include the current run.
2818 blocking_gc_count_last_window_ = running_collection_is_blocking_ ? 1 : 0;
2819 }
2820 DCHECK_EQ(last_update_time_gc_count_rate_histograms_ % kGcCountRateHistogramWindowDuration, 0U);
2821 }
2822
2823 class RootMatchesObjectVisitor : public SingleRootVisitor {
2824 public:
RootMatchesObjectVisitor(const mirror::Object * obj)2825 explicit RootMatchesObjectVisitor(const mirror::Object* obj) : obj_(obj) { }
2826
VisitRoot(mirror::Object * root,const RootInfo & info)2827 void VisitRoot(mirror::Object* root, const RootInfo& info)
2828 override REQUIRES_SHARED(Locks::mutator_lock_) {
2829 if (root == obj_) {
2830 LOG(INFO) << "Object " << obj_ << " is a root " << info.ToString();
2831 }
2832 }
2833
2834 private:
2835 const mirror::Object* const obj_;
2836 };
2837
2838
2839 class ScanVisitor {
2840 public:
operator ()(const mirror::Object * obj) const2841 void operator()(const mirror::Object* obj) const {
2842 LOG(ERROR) << "Would have rescanned object " << obj;
2843 }
2844 };
2845
2846 // Verify a reference from an object.
2847 class VerifyReferenceVisitor : public SingleRootVisitor {
2848 public:
VerifyReferenceVisitor(Thread * self,Heap * heap,size_t * fail_count,bool verify_referent)2849 VerifyReferenceVisitor(Thread* self, Heap* heap, size_t* fail_count, bool verify_referent)
2850 REQUIRES_SHARED(Locks::mutator_lock_)
2851 : self_(self), heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {
2852 CHECK_EQ(self_, Thread::Current());
2853 }
2854
operator ()(ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED,ObjPtr<mirror::Reference> ref) const2855 void operator()(ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED, ObjPtr<mirror::Reference> ref) const
2856 REQUIRES_SHARED(Locks::mutator_lock_) {
2857 if (verify_referent_) {
2858 VerifyReference(ref.Ptr(), ref->GetReferent(), mirror::Reference::ReferentOffset());
2859 }
2860 }
2861
operator ()(ObjPtr<mirror::Object> obj,MemberOffset offset,bool is_static ATTRIBUTE_UNUSED) const2862 void operator()(ObjPtr<mirror::Object> obj,
2863 MemberOffset offset,
2864 bool is_static ATTRIBUTE_UNUSED) const
2865 REQUIRES_SHARED(Locks::mutator_lock_) {
2866 VerifyReference(obj.Ptr(), obj->GetFieldObject<mirror::Object>(offset), offset);
2867 }
2868
IsLive(ObjPtr<mirror::Object> obj) const2869 bool IsLive(ObjPtr<mirror::Object> obj) const NO_THREAD_SAFETY_ANALYSIS {
2870 return heap_->IsLiveObjectLocked(obj, true, false, true);
2871 }
2872
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root) const2873 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
2874 REQUIRES_SHARED(Locks::mutator_lock_) {
2875 if (!root->IsNull()) {
2876 VisitRoot(root);
2877 }
2878 }
VisitRoot(mirror::CompressedReference<mirror::Object> * root) const2879 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
2880 REQUIRES_SHARED(Locks::mutator_lock_) {
2881 const_cast<VerifyReferenceVisitor*>(this)->VisitRoot(
2882 root->AsMirrorPtr(), RootInfo(kRootVMInternal));
2883 }
2884
VisitRoot(mirror::Object * root,const RootInfo & root_info)2885 void VisitRoot(mirror::Object* root, const RootInfo& root_info) override
2886 REQUIRES_SHARED(Locks::mutator_lock_) {
2887 if (root == nullptr) {
2888 LOG(ERROR) << "Root is null with info " << root_info.GetType();
2889 } else if (!VerifyReference(nullptr, root, MemberOffset(0))) {
2890 LOG(ERROR) << "Root " << root << " is dead with type " << mirror::Object::PrettyTypeOf(root)
2891 << " thread_id= " << root_info.GetThreadId() << " root_type= " << root_info.GetType();
2892 }
2893 }
2894
2895 private:
2896 // TODO: Fix the no thread safety analysis.
2897 // Returns false on failure.
VerifyReference(mirror::Object * obj,mirror::Object * ref,MemberOffset offset) const2898 bool VerifyReference(mirror::Object* obj, mirror::Object* ref, MemberOffset offset) const
2899 NO_THREAD_SAFETY_ANALYSIS {
2900 if (ref == nullptr || IsLive(ref)) {
2901 // Verify that the reference is live.
2902 return true;
2903 }
2904 CHECK_EQ(self_, Thread::Current()); // fail_count_ is private to the calling thread.
2905 *fail_count_ += 1;
2906 if (*fail_count_ == 1) {
2907 // Only print message for the first failure to prevent spam.
2908 LOG(ERROR) << "!!!!!!!!!!!!!!Heap corruption detected!!!!!!!!!!!!!!!!!!!";
2909 }
2910 if (obj != nullptr) {
2911 // Only do this part for non roots.
2912 accounting::CardTable* card_table = heap_->GetCardTable();
2913 accounting::ObjectStack* alloc_stack = heap_->allocation_stack_.get();
2914 accounting::ObjectStack* live_stack = heap_->live_stack_.get();
2915 uint8_t* card_addr = card_table->CardFromAddr(obj);
2916 LOG(ERROR) << "Object " << obj << " references dead object " << ref << " at offset "
2917 << offset << "\n card value = " << static_cast<int>(*card_addr);
2918 if (heap_->IsValidObjectAddress(obj->GetClass())) {
2919 LOG(ERROR) << "Obj type " << obj->PrettyTypeOf();
2920 } else {
2921 LOG(ERROR) << "Object " << obj << " class(" << obj->GetClass() << ") not a heap address";
2922 }
2923
2924 // Attempt to find the class inside of the recently freed objects.
2925 space::ContinuousSpace* ref_space = heap_->FindContinuousSpaceFromObject(ref, true);
2926 if (ref_space != nullptr && ref_space->IsMallocSpace()) {
2927 space::MallocSpace* space = ref_space->AsMallocSpace();
2928 mirror::Class* ref_class = space->FindRecentFreedObject(ref);
2929 if (ref_class != nullptr) {
2930 LOG(ERROR) << "Reference " << ref << " found as a recently freed object with class "
2931 << ref_class->PrettyClass();
2932 } else {
2933 LOG(ERROR) << "Reference " << ref << " not found as a recently freed object";
2934 }
2935 }
2936
2937 if (ref->GetClass() != nullptr && heap_->IsValidObjectAddress(ref->GetClass()) &&
2938 ref->GetClass()->IsClass()) {
2939 LOG(ERROR) << "Ref type " << ref->PrettyTypeOf();
2940 } else {
2941 LOG(ERROR) << "Ref " << ref << " class(" << ref->GetClass()
2942 << ") is not a valid heap address";
2943 }
2944
2945 card_table->CheckAddrIsInCardTable(reinterpret_cast<const uint8_t*>(obj));
2946 void* cover_begin = card_table->AddrFromCard(card_addr);
2947 void* cover_end = reinterpret_cast<void*>(reinterpret_cast<size_t>(cover_begin) +
2948 accounting::CardTable::kCardSize);
2949 LOG(ERROR) << "Card " << reinterpret_cast<void*>(card_addr) << " covers " << cover_begin
2950 << "-" << cover_end;
2951 accounting::ContinuousSpaceBitmap* bitmap =
2952 heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(obj);
2953
2954 if (bitmap == nullptr) {
2955 LOG(ERROR) << "Object " << obj << " has no bitmap";
2956 if (!VerifyClassClass(obj->GetClass())) {
2957 LOG(ERROR) << "Object " << obj << " failed class verification!";
2958 }
2959 } else {
2960 // Print out how the object is live.
2961 if (bitmap->Test(obj)) {
2962 LOG(ERROR) << "Object " << obj << " found in live bitmap";
2963 }
2964 if (alloc_stack->Contains(const_cast<mirror::Object*>(obj))) {
2965 LOG(ERROR) << "Object " << obj << " found in allocation stack";
2966 }
2967 if (live_stack->Contains(const_cast<mirror::Object*>(obj))) {
2968 LOG(ERROR) << "Object " << obj << " found in live stack";
2969 }
2970 if (alloc_stack->Contains(const_cast<mirror::Object*>(ref))) {
2971 LOG(ERROR) << "Ref " << ref << " found in allocation stack";
2972 }
2973 if (live_stack->Contains(const_cast<mirror::Object*>(ref))) {
2974 LOG(ERROR) << "Ref " << ref << " found in live stack";
2975 }
2976 // Attempt to see if the card table missed the reference.
2977 ScanVisitor scan_visitor;
2978 uint8_t* byte_cover_begin = reinterpret_cast<uint8_t*>(card_table->AddrFromCard(card_addr));
2979 card_table->Scan<false>(bitmap, byte_cover_begin,
2980 byte_cover_begin + accounting::CardTable::kCardSize, scan_visitor);
2981 }
2982
2983 // Search to see if any of the roots reference our object.
2984 RootMatchesObjectVisitor visitor1(obj);
2985 Runtime::Current()->VisitRoots(&visitor1);
2986 // Search to see if any of the roots reference our reference.
2987 RootMatchesObjectVisitor visitor2(ref);
2988 Runtime::Current()->VisitRoots(&visitor2);
2989 }
2990 return false;
2991 }
2992
2993 Thread* const self_;
2994 Heap* const heap_;
2995 size_t* const fail_count_;
2996 const bool verify_referent_;
2997 };
2998
2999 // Verify all references within an object, for use with HeapBitmap::Visit.
3000 class VerifyObjectVisitor {
3001 public:
VerifyObjectVisitor(Thread * self,Heap * heap,size_t * fail_count,bool verify_referent)3002 VerifyObjectVisitor(Thread* self, Heap* heap, size_t* fail_count, bool verify_referent)
3003 : self_(self), heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {}
3004
operator ()(mirror::Object * obj)3005 void operator()(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
3006 // Note: we are verifying the references in obj but not obj itself, this is because obj must
3007 // be live or else how did we find it in the live bitmap?
3008 VerifyReferenceVisitor visitor(self_, heap_, fail_count_, verify_referent_);
3009 // The class doesn't count as a reference but we should verify it anyways.
3010 obj->VisitReferences(visitor, visitor);
3011 }
3012
VerifyRoots()3013 void VerifyRoots() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_) {
3014 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
3015 VerifyReferenceVisitor visitor(self_, heap_, fail_count_, verify_referent_);
3016 Runtime::Current()->VisitRoots(&visitor);
3017 }
3018
GetFailureCount() const3019 uint32_t GetFailureCount() const REQUIRES(Locks::mutator_lock_) {
3020 CHECK_EQ(self_, Thread::Current());
3021 return *fail_count_;
3022 }
3023
3024 private:
3025 Thread* const self_;
3026 Heap* const heap_;
3027 size_t* const fail_count_;
3028 const bool verify_referent_;
3029 };
3030
PushOnAllocationStackWithInternalGC(Thread * self,ObjPtr<mirror::Object> * obj)3031 void Heap::PushOnAllocationStackWithInternalGC(Thread* self, ObjPtr<mirror::Object>* obj) {
3032 // Slow path, the allocation stack push back must have already failed.
3033 DCHECK(!allocation_stack_->AtomicPushBack(obj->Ptr()));
3034 do {
3035 // TODO: Add handle VerifyObject.
3036 StackHandleScope<1> hs(self);
3037 HandleWrapperObjPtr<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
3038 // Push our object into the reserve region of the allocation stack. This is only required due
3039 // to heap verification requiring that roots are live (either in the live bitmap or in the
3040 // allocation stack).
3041 CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(obj->Ptr()));
3042 CollectGarbageInternal(collector::kGcTypeSticky,
3043 kGcCauseForAlloc,
3044 false,
3045 GetCurrentGcNum() + 1);
3046 } while (!allocation_stack_->AtomicPushBack(obj->Ptr()));
3047 }
3048
PushOnThreadLocalAllocationStackWithInternalGC(Thread * self,ObjPtr<mirror::Object> * obj)3049 void Heap::PushOnThreadLocalAllocationStackWithInternalGC(Thread* self,
3050 ObjPtr<mirror::Object>* obj) {
3051 // Slow path, the allocation stack push back must have already failed.
3052 DCHECK(!self->PushOnThreadLocalAllocationStack(obj->Ptr()));
3053 StackReference<mirror::Object>* start_address;
3054 StackReference<mirror::Object>* end_address;
3055 while (!allocation_stack_->AtomicBumpBack(kThreadLocalAllocationStackSize, &start_address,
3056 &end_address)) {
3057 // TODO: Add handle VerifyObject.
3058 StackHandleScope<1> hs(self);
3059 HandleWrapperObjPtr<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
3060 // Push our object into the reserve region of the allocaiton stack. This is only required due
3061 // to heap verification requiring that roots are live (either in the live bitmap or in the
3062 // allocation stack).
3063 CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(obj->Ptr()));
3064 // Push into the reserve allocation stack.
3065 CollectGarbageInternal(collector::kGcTypeSticky,
3066 kGcCauseForAlloc,
3067 false,
3068 GetCurrentGcNum() + 1);
3069 }
3070 self->SetThreadLocalAllocationStack(start_address, end_address);
3071 // Retry on the new thread-local allocation stack.
3072 CHECK(self->PushOnThreadLocalAllocationStack(obj->Ptr())); // Must succeed.
3073 }
3074
3075 // Must do this with mutators suspended since we are directly accessing the allocation stacks.
VerifyHeapReferences(bool verify_referents)3076 size_t Heap::VerifyHeapReferences(bool verify_referents) {
3077 Thread* self = Thread::Current();
3078 Locks::mutator_lock_->AssertExclusiveHeld(self);
3079 // Lets sort our allocation stacks so that we can efficiently binary search them.
3080 allocation_stack_->Sort();
3081 live_stack_->Sort();
3082 // Since we sorted the allocation stack content, need to revoke all
3083 // thread-local allocation stacks.
3084 RevokeAllThreadLocalAllocationStacks(self);
3085 size_t fail_count = 0;
3086 VerifyObjectVisitor visitor(self, this, &fail_count, verify_referents);
3087 // Verify objects in the allocation stack since these will be objects which were:
3088 // 1. Allocated prior to the GC (pre GC verification).
3089 // 2. Allocated during the GC (pre sweep GC verification).
3090 // We don't want to verify the objects in the live stack since they themselves may be
3091 // pointing to dead objects if they are not reachable.
3092 VisitObjectsPaused(visitor);
3093 // Verify the roots:
3094 visitor.VerifyRoots();
3095 if (visitor.GetFailureCount() > 0) {
3096 // Dump mod-union tables.
3097 for (const auto& table_pair : mod_union_tables_) {
3098 accounting::ModUnionTable* mod_union_table = table_pair.second;
3099 mod_union_table->Dump(LOG_STREAM(ERROR) << mod_union_table->GetName() << ": ");
3100 }
3101 // Dump remembered sets.
3102 for (const auto& table_pair : remembered_sets_) {
3103 accounting::RememberedSet* remembered_set = table_pair.second;
3104 remembered_set->Dump(LOG_STREAM(ERROR) << remembered_set->GetName() << ": ");
3105 }
3106 DumpSpaces(LOG_STREAM(ERROR));
3107 }
3108 return visitor.GetFailureCount();
3109 }
3110
3111 class VerifyReferenceCardVisitor {
3112 public:
VerifyReferenceCardVisitor(Heap * heap,bool * failed)3113 VerifyReferenceCardVisitor(Heap* heap, bool* failed)
3114 REQUIRES_SHARED(Locks::mutator_lock_,
3115 Locks::heap_bitmap_lock_)
3116 : heap_(heap), failed_(failed) {
3117 }
3118
3119 // There is no card marks for native roots on a class.
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root ATTRIBUTE_UNUSED) const3120 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
3121 const {}
VisitRoot(mirror::CompressedReference<mirror::Object> * root ATTRIBUTE_UNUSED) const3122 void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
3123
3124 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
3125 // annotalysis on visitors.
operator ()(mirror::Object * obj,MemberOffset offset,bool is_static) const3126 void operator()(mirror::Object* obj, MemberOffset offset, bool is_static) const
3127 NO_THREAD_SAFETY_ANALYSIS {
3128 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
3129 // Filter out class references since changing an object's class does not mark the card as dirty.
3130 // Also handles large objects, since the only reference they hold is a class reference.
3131 if (ref != nullptr && !ref->IsClass()) {
3132 accounting::CardTable* card_table = heap_->GetCardTable();
3133 // If the object is not dirty and it is referencing something in the live stack other than
3134 // class, then it must be on a dirty card.
3135 if (!card_table->AddrIsInCardTable(obj)) {
3136 LOG(ERROR) << "Object " << obj << " is not in the address range of the card table";
3137 *failed_ = true;
3138 } else if (!card_table->IsDirty(obj)) {
3139 // TODO: Check mod-union tables.
3140 // Card should be either kCardDirty if it got re-dirtied after we aged it, or
3141 // kCardDirty - 1 if it didnt get touched since we aged it.
3142 accounting::ObjectStack* live_stack = heap_->live_stack_.get();
3143 if (live_stack->ContainsSorted(ref)) {
3144 if (live_stack->ContainsSorted(obj)) {
3145 LOG(ERROR) << "Object " << obj << " found in live stack";
3146 }
3147 if (heap_->GetLiveBitmap()->Test(obj)) {
3148 LOG(ERROR) << "Object " << obj << " found in live bitmap";
3149 }
3150 LOG(ERROR) << "Object " << obj << " " << mirror::Object::PrettyTypeOf(obj)
3151 << " references " << ref << " " << mirror::Object::PrettyTypeOf(ref)
3152 << " in live stack";
3153
3154 // Print which field of the object is dead.
3155 if (!obj->IsObjectArray()) {
3156 ObjPtr<mirror::Class> klass = is_static ? obj->AsClass() : obj->GetClass();
3157 CHECK(klass != nullptr);
3158 for (ArtField& field : (is_static ? klass->GetSFields() : klass->GetIFields())) {
3159 if (field.GetOffset().Int32Value() == offset.Int32Value()) {
3160 LOG(ERROR) << (is_static ? "Static " : "") << "field in the live stack is "
3161 << field.PrettyField();
3162 break;
3163 }
3164 }
3165 } else {
3166 ObjPtr<mirror::ObjectArray<mirror::Object>> object_array =
3167 obj->AsObjectArray<mirror::Object>();
3168 for (int32_t i = 0; i < object_array->GetLength(); ++i) {
3169 if (object_array->Get(i) == ref) {
3170 LOG(ERROR) << (is_static ? "Static " : "") << "obj[" << i << "] = ref";
3171 }
3172 }
3173 }
3174
3175 *failed_ = true;
3176 }
3177 }
3178 }
3179 }
3180
3181 private:
3182 Heap* const heap_;
3183 bool* const failed_;
3184 };
3185
3186 class VerifyLiveStackReferences {
3187 public:
VerifyLiveStackReferences(Heap * heap)3188 explicit VerifyLiveStackReferences(Heap* heap)
3189 : heap_(heap),
3190 failed_(false) {}
3191
operator ()(mirror::Object * obj) const3192 void operator()(mirror::Object* obj) const
3193 REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
3194 VerifyReferenceCardVisitor visitor(heap_, const_cast<bool*>(&failed_));
3195 obj->VisitReferences(visitor, VoidFunctor());
3196 }
3197
Failed() const3198 bool Failed() const {
3199 return failed_;
3200 }
3201
3202 private:
3203 Heap* const heap_;
3204 bool failed_;
3205 };
3206
VerifyMissingCardMarks()3207 bool Heap::VerifyMissingCardMarks() {
3208 Thread* self = Thread::Current();
3209 Locks::mutator_lock_->AssertExclusiveHeld(self);
3210 // We need to sort the live stack since we binary search it.
3211 live_stack_->Sort();
3212 // Since we sorted the allocation stack content, need to revoke all
3213 // thread-local allocation stacks.
3214 RevokeAllThreadLocalAllocationStacks(self);
3215 VerifyLiveStackReferences visitor(this);
3216 GetLiveBitmap()->Visit(visitor);
3217 // We can verify objects in the live stack since none of these should reference dead objects.
3218 for (auto* it = live_stack_->Begin(); it != live_stack_->End(); ++it) {
3219 if (!kUseThreadLocalAllocationStack || it->AsMirrorPtr() != nullptr) {
3220 visitor(it->AsMirrorPtr());
3221 }
3222 }
3223 return !visitor.Failed();
3224 }
3225
SwapStacks()3226 void Heap::SwapStacks() {
3227 if (kUseThreadLocalAllocationStack) {
3228 live_stack_->AssertAllZero();
3229 }
3230 allocation_stack_.swap(live_stack_);
3231 }
3232
RevokeAllThreadLocalAllocationStacks(Thread * self)3233 void Heap::RevokeAllThreadLocalAllocationStacks(Thread* self) {
3234 // This must be called only during the pause.
3235 DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
3236 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
3237 MutexLock mu2(self, *Locks::thread_list_lock_);
3238 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
3239 for (Thread* t : thread_list) {
3240 t->RevokeThreadLocalAllocationStack();
3241 }
3242 }
3243
AssertThreadLocalBuffersAreRevoked(Thread * thread)3244 void Heap::AssertThreadLocalBuffersAreRevoked(Thread* thread) {
3245 if (kIsDebugBuild) {
3246 if (rosalloc_space_ != nullptr) {
3247 rosalloc_space_->AssertThreadLocalBuffersAreRevoked(thread);
3248 }
3249 if (bump_pointer_space_ != nullptr) {
3250 bump_pointer_space_->AssertThreadLocalBuffersAreRevoked(thread);
3251 }
3252 }
3253 }
3254
AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked()3255 void Heap::AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked() {
3256 if (kIsDebugBuild) {
3257 if (bump_pointer_space_ != nullptr) {
3258 bump_pointer_space_->AssertAllThreadLocalBuffersAreRevoked();
3259 }
3260 }
3261 }
3262
FindModUnionTableFromSpace(space::Space * space)3263 accounting::ModUnionTable* Heap::FindModUnionTableFromSpace(space::Space* space) {
3264 auto it = mod_union_tables_.find(space);
3265 if (it == mod_union_tables_.end()) {
3266 return nullptr;
3267 }
3268 return it->second;
3269 }
3270
FindRememberedSetFromSpace(space::Space * space)3271 accounting::RememberedSet* Heap::FindRememberedSetFromSpace(space::Space* space) {
3272 auto it = remembered_sets_.find(space);
3273 if (it == remembered_sets_.end()) {
3274 return nullptr;
3275 }
3276 return it->second;
3277 }
3278
ProcessCards(TimingLogger * timings,bool use_rem_sets,bool process_alloc_space_cards,bool clear_alloc_space_cards)3279 void Heap::ProcessCards(TimingLogger* timings,
3280 bool use_rem_sets,
3281 bool process_alloc_space_cards,
3282 bool clear_alloc_space_cards) {
3283 TimingLogger::ScopedTiming t(__FUNCTION__, timings);
3284 // Clear cards and keep track of cards cleared in the mod-union table.
3285 for (const auto& space : continuous_spaces_) {
3286 accounting::ModUnionTable* table = FindModUnionTableFromSpace(space);
3287 accounting::RememberedSet* rem_set = FindRememberedSetFromSpace(space);
3288 if (table != nullptr) {
3289 const char* name = space->IsZygoteSpace() ? "ZygoteModUnionClearCards" :
3290 "ImageModUnionClearCards";
3291 TimingLogger::ScopedTiming t2(name, timings);
3292 table->ProcessCards();
3293 } else if (use_rem_sets && rem_set != nullptr) {
3294 DCHECK(collector::SemiSpace::kUseRememberedSet) << static_cast<int>(collector_type_);
3295 TimingLogger::ScopedTiming t2("AllocSpaceRemSetClearCards", timings);
3296 rem_set->ClearCards();
3297 } else if (process_alloc_space_cards) {
3298 TimingLogger::ScopedTiming t2("AllocSpaceClearCards", timings);
3299 if (clear_alloc_space_cards) {
3300 uint8_t* end = space->End();
3301 if (space->IsImageSpace()) {
3302 // Image space end is the end of the mirror objects, it is not necessarily page or card
3303 // aligned. Align up so that the check in ClearCardRange does not fail.
3304 end = AlignUp(end, accounting::CardTable::kCardSize);
3305 }
3306 card_table_->ClearCardRange(space->Begin(), end);
3307 } else {
3308 // No mod union table for the AllocSpace. Age the cards so that the GC knows that these
3309 // cards were dirty before the GC started.
3310 // TODO: Need to use atomic for the case where aged(cleaning thread) -> dirty(other thread)
3311 // -> clean(cleaning thread).
3312 // The races are we either end up with: Aged card, unaged card. Since we have the
3313 // checkpoint roots and then we scan / update mod union tables after. We will always
3314 // scan either card. If we end up with the non aged card, we scan it it in the pause.
3315 card_table_->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(),
3316 VoidFunctor());
3317 }
3318 }
3319 }
3320 }
3321
3322 struct IdentityMarkHeapReferenceVisitor : public MarkObjectVisitor {
MarkObjectart::gc::IdentityMarkHeapReferenceVisitor3323 mirror::Object* MarkObject(mirror::Object* obj) override {
3324 return obj;
3325 }
MarkHeapReferenceart::gc::IdentityMarkHeapReferenceVisitor3326 void MarkHeapReference(mirror::HeapReference<mirror::Object>*, bool) override {
3327 }
3328 };
3329
PreGcVerificationPaused(collector::GarbageCollector * gc)3330 void Heap::PreGcVerificationPaused(collector::GarbageCollector* gc) {
3331 Thread* const self = Thread::Current();
3332 TimingLogger* const timings = current_gc_iteration_.GetTimings();
3333 TimingLogger::ScopedTiming t(__FUNCTION__, timings);
3334 if (verify_pre_gc_heap_) {
3335 TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyHeapReferences", timings);
3336 size_t failures = VerifyHeapReferences();
3337 if (failures > 0) {
3338 LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed with " << failures
3339 << " failures";
3340 }
3341 }
3342 // Check that all objects which reference things in the live stack are on dirty cards.
3343 if (verify_missing_card_marks_) {
3344 TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyMissingCardMarks", timings);
3345 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
3346 SwapStacks();
3347 // Sort the live stack so that we can quickly binary search it later.
3348 CHECK(VerifyMissingCardMarks()) << "Pre " << gc->GetName()
3349 << " missing card mark verification failed\n" << DumpSpaces();
3350 SwapStacks();
3351 }
3352 if (verify_mod_union_table_) {
3353 TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyModUnionTables", timings);
3354 ReaderMutexLock reader_lock(self, *Locks::heap_bitmap_lock_);
3355 for (const auto& table_pair : mod_union_tables_) {
3356 accounting::ModUnionTable* mod_union_table = table_pair.second;
3357 IdentityMarkHeapReferenceVisitor visitor;
3358 mod_union_table->UpdateAndMarkReferences(&visitor);
3359 mod_union_table->Verify();
3360 }
3361 }
3362 }
3363
PreGcVerification(collector::GarbageCollector * gc)3364 void Heap::PreGcVerification(collector::GarbageCollector* gc) {
3365 if (verify_pre_gc_heap_ || verify_missing_card_marks_ || verify_mod_union_table_) {
3366 collector::GarbageCollector::ScopedPause pause(gc, false);
3367 PreGcVerificationPaused(gc);
3368 }
3369 }
3370
PrePauseRosAllocVerification(collector::GarbageCollector * gc ATTRIBUTE_UNUSED)3371 void Heap::PrePauseRosAllocVerification(collector::GarbageCollector* gc ATTRIBUTE_UNUSED) {
3372 // TODO: Add a new runtime option for this?
3373 if (verify_pre_gc_rosalloc_) {
3374 RosAllocVerification(current_gc_iteration_.GetTimings(), "PreGcRosAllocVerification");
3375 }
3376 }
3377
PreSweepingGcVerification(collector::GarbageCollector * gc)3378 void Heap::PreSweepingGcVerification(collector::GarbageCollector* gc) {
3379 Thread* const self = Thread::Current();
3380 TimingLogger* const timings = current_gc_iteration_.GetTimings();
3381 TimingLogger::ScopedTiming t(__FUNCTION__, timings);
3382 // Called before sweeping occurs since we want to make sure we are not going so reclaim any
3383 // reachable objects.
3384 if (verify_pre_sweeping_heap_) {
3385 TimingLogger::ScopedTiming t2("(Paused)PostSweepingVerifyHeapReferences", timings);
3386 CHECK_NE(self->GetState(), kRunnable);
3387 {
3388 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
3389 // Swapping bound bitmaps does nothing.
3390 gc->SwapBitmaps();
3391 }
3392 // Pass in false since concurrent reference processing can mean that the reference referents
3393 // may point to dead objects at the point which PreSweepingGcVerification is called.
3394 size_t failures = VerifyHeapReferences(false);
3395 if (failures > 0) {
3396 LOG(FATAL) << "Pre sweeping " << gc->GetName() << " GC verification failed with " << failures
3397 << " failures";
3398 }
3399 {
3400 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
3401 gc->SwapBitmaps();
3402 }
3403 }
3404 if (verify_pre_sweeping_rosalloc_) {
3405 RosAllocVerification(timings, "PreSweepingRosAllocVerification");
3406 }
3407 }
3408
PostGcVerificationPaused(collector::GarbageCollector * gc)3409 void Heap::PostGcVerificationPaused(collector::GarbageCollector* gc) {
3410 // Only pause if we have to do some verification.
3411 Thread* const self = Thread::Current();
3412 TimingLogger* const timings = GetCurrentGcIteration()->GetTimings();
3413 TimingLogger::ScopedTiming t(__FUNCTION__, timings);
3414 if (verify_system_weaks_) {
3415 ReaderMutexLock mu2(self, *Locks::heap_bitmap_lock_);
3416 collector::MarkSweep* mark_sweep = down_cast<collector::MarkSweep*>(gc);
3417 mark_sweep->VerifySystemWeaks();
3418 }
3419 if (verify_post_gc_rosalloc_) {
3420 RosAllocVerification(timings, "(Paused)PostGcRosAllocVerification");
3421 }
3422 if (verify_post_gc_heap_) {
3423 TimingLogger::ScopedTiming t2("(Paused)PostGcVerifyHeapReferences", timings);
3424 size_t failures = VerifyHeapReferences();
3425 if (failures > 0) {
3426 LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed with " << failures
3427 << " failures";
3428 }
3429 }
3430 }
3431
PostGcVerification(collector::GarbageCollector * gc)3432 void Heap::PostGcVerification(collector::GarbageCollector* gc) {
3433 if (verify_system_weaks_ || verify_post_gc_rosalloc_ || verify_post_gc_heap_) {
3434 collector::GarbageCollector::ScopedPause pause(gc, false);
3435 PostGcVerificationPaused(gc);
3436 }
3437 }
3438
RosAllocVerification(TimingLogger * timings,const char * name)3439 void Heap::RosAllocVerification(TimingLogger* timings, const char* name) {
3440 TimingLogger::ScopedTiming t(name, timings);
3441 for (const auto& space : continuous_spaces_) {
3442 if (space->IsRosAllocSpace()) {
3443 VLOG(heap) << name << " : " << space->GetName();
3444 space->AsRosAllocSpace()->Verify();
3445 }
3446 }
3447 }
3448
WaitForGcToComplete(GcCause cause,Thread * self)3449 collector::GcType Heap::WaitForGcToComplete(GcCause cause, Thread* self) {
3450 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
3451 MutexLock mu(self, *gc_complete_lock_);
3452 return WaitForGcToCompleteLocked(cause, self);
3453 }
3454
WaitForGcToCompleteLocked(GcCause cause,Thread * self)3455 collector::GcType Heap::WaitForGcToCompleteLocked(GcCause cause, Thread* self) {
3456 gc_complete_cond_->CheckSafeToWait(self);
3457 collector::GcType last_gc_type = collector::kGcTypeNone;
3458 GcCause last_gc_cause = kGcCauseNone;
3459 uint64_t wait_start = NanoTime();
3460 while (collector_type_running_ != kCollectorTypeNone) {
3461 if (self != task_processor_->GetRunningThread()) {
3462 // The current thread is about to wait for a currently running
3463 // collection to finish. If the waiting thread is not the heap
3464 // task daemon thread, the currently running collection is
3465 // considered as a blocking GC.
3466 running_collection_is_blocking_ = true;
3467 VLOG(gc) << "Waiting for a blocking GC " << cause;
3468 }
3469 SCOPED_TRACE << "GC: Wait For Completion " << cause;
3470 // We must wait, change thread state then sleep on gc_complete_cond_;
3471 gc_complete_cond_->Wait(self);
3472 last_gc_type = last_gc_type_;
3473 last_gc_cause = last_gc_cause_;
3474 }
3475 uint64_t wait_time = NanoTime() - wait_start;
3476 total_wait_time_ += wait_time;
3477 if (wait_time > long_pause_log_threshold_) {
3478 LOG(INFO) << "WaitForGcToComplete blocked " << cause << " on " << last_gc_cause << " for "
3479 << PrettyDuration(wait_time);
3480 }
3481 if (self != task_processor_->GetRunningThread()) {
3482 // The current thread is about to run a collection. If the thread
3483 // is not the heap task daemon thread, it's considered as a
3484 // blocking GC (i.e., blocking itself).
3485 running_collection_is_blocking_ = true;
3486 // Don't log fake "GC" types that are only used for debugger or hidden APIs. If we log these,
3487 // it results in log spam. kGcCauseExplicit is already logged in LogGC, so avoid it here too.
3488 if (cause == kGcCauseForAlloc ||
3489 cause == kGcCauseDisableMovingGc) {
3490 VLOG(gc) << "Starting a blocking GC " << cause;
3491 }
3492 }
3493 return last_gc_type;
3494 }
3495
DumpForSigQuit(std::ostream & os)3496 void Heap::DumpForSigQuit(std::ostream& os) {
3497 os << "Heap: " << GetPercentFree() << "% free, " << PrettySize(GetBytesAllocated()) << "/"
3498 << PrettySize(GetTotalMemory()) << "; " << GetObjectsAllocated() << " objects\n";
3499 DumpGcPerformanceInfo(os);
3500 }
3501
GetPercentFree()3502 size_t Heap::GetPercentFree() {
3503 return static_cast<size_t>(100.0f * static_cast<float>(
3504 GetFreeMemory()) / target_footprint_.load(std::memory_order_relaxed));
3505 }
3506
SetIdealFootprint(size_t target_footprint)3507 void Heap::SetIdealFootprint(size_t target_footprint) {
3508 if (target_footprint > GetMaxMemory()) {
3509 VLOG(gc) << "Clamp target GC heap from " << PrettySize(target_footprint) << " to "
3510 << PrettySize(GetMaxMemory());
3511 target_footprint = GetMaxMemory();
3512 }
3513 target_footprint_.store(target_footprint, std::memory_order_relaxed);
3514 }
3515
IsMovableObject(ObjPtr<mirror::Object> obj) const3516 bool Heap::IsMovableObject(ObjPtr<mirror::Object> obj) const {
3517 if (kMovingCollector) {
3518 space::Space* space = FindContinuousSpaceFromObject(obj.Ptr(), true);
3519 if (space != nullptr) {
3520 // TODO: Check large object?
3521 return space->CanMoveObjects();
3522 }
3523 }
3524 return false;
3525 }
3526
FindCollectorByGcType(collector::GcType gc_type)3527 collector::GarbageCollector* Heap::FindCollectorByGcType(collector::GcType gc_type) {
3528 for (auto* collector : garbage_collectors_) {
3529 if (collector->GetCollectorType() == collector_type_ &&
3530 collector->GetGcType() == gc_type) {
3531 return collector;
3532 }
3533 }
3534 return nullptr;
3535 }
3536
HeapGrowthMultiplier() const3537 double Heap::HeapGrowthMultiplier() const {
3538 // If we don't care about pause times we are background, so return 1.0.
3539 if (!CareAboutPauseTimes()) {
3540 return 1.0;
3541 }
3542 return foreground_heap_growth_multiplier_;
3543 }
3544
GrowForUtilization(collector::GarbageCollector * collector_ran,size_t bytes_allocated_before_gc)3545 void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran,
3546 size_t bytes_allocated_before_gc) {
3547 // We know what our utilization is at this moment.
3548 // This doesn't actually resize any memory. It just lets the heap grow more when necessary.
3549 const size_t bytes_allocated = GetBytesAllocated();
3550 // Trace the new heap size after the GC is finished.
3551 TraceHeapSize(bytes_allocated);
3552 uint64_t target_size, grow_bytes;
3553 collector::GcType gc_type = collector_ran->GetGcType();
3554 MutexLock mu(Thread::Current(), process_state_update_lock_);
3555 // Use the multiplier to grow more for foreground.
3556 const double multiplier = HeapGrowthMultiplier();
3557 if (gc_type != collector::kGcTypeSticky) {
3558 // Grow the heap for non sticky GC.
3559 uint64_t delta = bytes_allocated * (1.0 / GetTargetHeapUtilization() - 1.0);
3560 DCHECK_LE(delta, std::numeric_limits<size_t>::max()) << "bytes_allocated=" << bytes_allocated
3561 << " target_utilization_=" << target_utilization_;
3562 grow_bytes = std::min(delta, static_cast<uint64_t>(max_free_));
3563 grow_bytes = std::max(grow_bytes, static_cast<uint64_t>(min_free_));
3564 target_size = bytes_allocated + static_cast<uint64_t>(grow_bytes * multiplier);
3565 next_gc_type_ = collector::kGcTypeSticky;
3566 } else {
3567 collector::GcType non_sticky_gc_type = NonStickyGcType();
3568 // Find what the next non sticky collector will be.
3569 collector::GarbageCollector* non_sticky_collector = FindCollectorByGcType(non_sticky_gc_type);
3570 if (use_generational_cc_) {
3571 if (non_sticky_collector == nullptr) {
3572 non_sticky_collector = FindCollectorByGcType(collector::kGcTypePartial);
3573 }
3574 CHECK(non_sticky_collector != nullptr);
3575 }
3576 double sticky_gc_throughput_adjustment = GetStickyGcThroughputAdjustment(use_generational_cc_);
3577
3578 // If the throughput of the current sticky GC >= throughput of the non sticky collector, then
3579 // do another sticky collection next.
3580 // We also check that the bytes allocated aren't over the target_footprint, or
3581 // concurrent_start_bytes in case of concurrent GCs, in order to prevent a
3582 // pathological case where dead objects which aren't reclaimed by sticky could get accumulated
3583 // if the sticky GC throughput always remained >= the full/partial throughput.
3584 size_t target_footprint = target_footprint_.load(std::memory_order_relaxed);
3585 if (current_gc_iteration_.GetEstimatedThroughput() * sticky_gc_throughput_adjustment >=
3586 non_sticky_collector->GetEstimatedMeanThroughput() &&
3587 non_sticky_collector->NumberOfIterations() > 0 &&
3588 bytes_allocated <= (IsGcConcurrent() ? concurrent_start_bytes_ : target_footprint)) {
3589 next_gc_type_ = collector::kGcTypeSticky;
3590 } else {
3591 next_gc_type_ = non_sticky_gc_type;
3592 }
3593 // If we have freed enough memory, shrink the heap back down.
3594 const size_t adjusted_max_free = static_cast<size_t>(max_free_ * multiplier);
3595 if (bytes_allocated + adjusted_max_free < target_footprint) {
3596 target_size = bytes_allocated + adjusted_max_free;
3597 grow_bytes = max_free_;
3598 } else {
3599 target_size = std::max(bytes_allocated, target_footprint);
3600 // The same whether jank perceptible or not; just avoid the adjustment.
3601 grow_bytes = 0;
3602 }
3603 }
3604 CHECK_LE(target_size, std::numeric_limits<size_t>::max());
3605 if (!ignore_target_footprint_) {
3606 SetIdealFootprint(target_size);
3607 // Store target size (computed with foreground heap growth multiplier) for updating
3608 // target_footprint_ when process state switches to foreground.
3609 // target_size = 0 ensures that target_footprint_ is not updated on
3610 // process-state switch.
3611 min_foreground_target_footprint_ =
3612 (multiplier <= 1.0 && grow_bytes > 0)
3613 ? bytes_allocated + static_cast<size_t>(grow_bytes * foreground_heap_growth_multiplier_)
3614 : 0;
3615
3616 if (IsGcConcurrent()) {
3617 const uint64_t freed_bytes = current_gc_iteration_.GetFreedBytes() +
3618 current_gc_iteration_.GetFreedLargeObjectBytes() +
3619 current_gc_iteration_.GetFreedRevokeBytes();
3620 // Bytes allocated will shrink by freed_bytes after the GC runs, so if we want to figure out
3621 // how many bytes were allocated during the GC we need to add freed_bytes back on.
3622 // Almost always bytes_allocated + freed_bytes >= bytes_allocated_before_gc.
3623 const size_t bytes_allocated_during_gc =
3624 UnsignedDifference(bytes_allocated + freed_bytes, bytes_allocated_before_gc);
3625 // Calculate when to perform the next ConcurrentGC.
3626 // Estimate how many remaining bytes we will have when we need to start the next GC.
3627 size_t remaining_bytes = bytes_allocated_during_gc;
3628 remaining_bytes = std::min(remaining_bytes, kMaxConcurrentRemainingBytes);
3629 remaining_bytes = std::max(remaining_bytes, kMinConcurrentRemainingBytes);
3630 size_t target_footprint = target_footprint_.load(std::memory_order_relaxed);
3631 if (UNLIKELY(remaining_bytes > target_footprint)) {
3632 // A never going to happen situation that from the estimated allocation rate we will exceed
3633 // the applications entire footprint with the given estimated allocation rate. Schedule
3634 // another GC nearly straight away.
3635 remaining_bytes = std::min(kMinConcurrentRemainingBytes, target_footprint);
3636 }
3637 DCHECK_LE(target_footprint_.load(std::memory_order_relaxed), GetMaxMemory());
3638 // Start a concurrent GC when we get close to the estimated remaining bytes. When the
3639 // allocation rate is very high, remaining_bytes could tell us that we should start a GC
3640 // right away.
3641 concurrent_start_bytes_ = std::max(target_footprint - remaining_bytes, bytes_allocated);
3642 }
3643 }
3644 }
3645
ClampGrowthLimit()3646 void Heap::ClampGrowthLimit() {
3647 // Use heap bitmap lock to guard against races with BindLiveToMarkBitmap.
3648 ScopedObjectAccess soa(Thread::Current());
3649 WriterMutexLock mu(soa.Self(), *Locks::heap_bitmap_lock_);
3650 capacity_ = growth_limit_;
3651 for (const auto& space : continuous_spaces_) {
3652 if (space->IsMallocSpace()) {
3653 gc::space::MallocSpace* malloc_space = space->AsMallocSpace();
3654 malloc_space->ClampGrowthLimit();
3655 }
3656 }
3657 if (collector_type_ == kCollectorTypeCC) {
3658 DCHECK(region_space_ != nullptr);
3659 // Twice the capacity as CC needs extra space for evacuating objects.
3660 region_space_->ClampGrowthLimit(2 * capacity_);
3661 }
3662 // This space isn't added for performance reasons.
3663 if (main_space_backup_.get() != nullptr) {
3664 main_space_backup_->ClampGrowthLimit();
3665 }
3666 }
3667
ClearGrowthLimit()3668 void Heap::ClearGrowthLimit() {
3669 if (target_footprint_.load(std::memory_order_relaxed) == growth_limit_
3670 && growth_limit_ < capacity_) {
3671 target_footprint_.store(capacity_, std::memory_order_relaxed);
3672 concurrent_start_bytes_ =
3673 UnsignedDifference(capacity_, kMinConcurrentRemainingBytes);
3674 }
3675 growth_limit_ = capacity_;
3676 ScopedObjectAccess soa(Thread::Current());
3677 for (const auto& space : continuous_spaces_) {
3678 if (space->IsMallocSpace()) {
3679 gc::space::MallocSpace* malloc_space = space->AsMallocSpace();
3680 malloc_space->ClearGrowthLimit();
3681 malloc_space->SetFootprintLimit(malloc_space->Capacity());
3682 }
3683 }
3684 // This space isn't added for performance reasons.
3685 if (main_space_backup_.get() != nullptr) {
3686 main_space_backup_->ClearGrowthLimit();
3687 main_space_backup_->SetFootprintLimit(main_space_backup_->Capacity());
3688 }
3689 }
3690
AddFinalizerReference(Thread * self,ObjPtr<mirror::Object> * object)3691 void Heap::AddFinalizerReference(Thread* self, ObjPtr<mirror::Object>* object) {
3692 ScopedObjectAccess soa(self);
3693 ScopedLocalRef<jobject> arg(self->GetJniEnv(), soa.AddLocalReference<jobject>(*object));
3694 jvalue args[1];
3695 args[0].l = arg.get();
3696 InvokeWithJValues(soa, nullptr, WellKnownClasses::java_lang_ref_FinalizerReference_add, args);
3697 // Restore object in case it gets moved.
3698 *object = soa.Decode<mirror::Object>(arg.get());
3699 }
3700
RequestConcurrentGCAndSaveObject(Thread * self,bool force_full,uint32_t observed_gc_num,ObjPtr<mirror::Object> * obj)3701 void Heap::RequestConcurrentGCAndSaveObject(Thread* self,
3702 bool force_full,
3703 uint32_t observed_gc_num,
3704 ObjPtr<mirror::Object>* obj) {
3705 StackHandleScope<1> hs(self);
3706 HandleWrapperObjPtr<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
3707 RequestConcurrentGC(self, kGcCauseBackground, force_full, observed_gc_num);
3708 }
3709
3710 class Heap::ConcurrentGCTask : public HeapTask {
3711 public:
ConcurrentGCTask(uint64_t target_time,GcCause cause,bool force_full,uint32_t gc_num)3712 ConcurrentGCTask(uint64_t target_time, GcCause cause, bool force_full, uint32_t gc_num)
3713 : HeapTask(target_time), cause_(cause), force_full_(force_full), my_gc_num_(gc_num) {}
Run(Thread * self)3714 void Run(Thread* self) override {
3715 Runtime* runtime = Runtime::Current();
3716 gc::Heap* heap = runtime->GetHeap();
3717 DCHECK(GCNumberLt(my_gc_num_, heap->GetCurrentGcNum() + 2)); // <= current_gc_num + 1
3718 heap->ConcurrentGC(self, cause_, force_full_, my_gc_num_);
3719 CHECK(!GCNumberLt(heap->GetCurrentGcNum(), my_gc_num_) || runtime->IsShuttingDown(self));
3720 }
3721
3722 private:
3723 const GcCause cause_;
3724 const bool force_full_; // If true, force full (or partial) collection.
3725 const uint32_t my_gc_num_; // Sequence number of requested GC.
3726 };
3727
CanAddHeapTask(Thread * self)3728 static bool CanAddHeapTask(Thread* self) REQUIRES(!Locks::runtime_shutdown_lock_) {
3729 Runtime* runtime = Runtime::Current();
3730 return runtime != nullptr && runtime->IsFinishedStarting() && !runtime->IsShuttingDown(self) &&
3731 !self->IsHandlingStackOverflow();
3732 }
3733
RequestConcurrentGC(Thread * self,GcCause cause,bool force_full,uint32_t observed_gc_num)3734 bool Heap::RequestConcurrentGC(Thread* self,
3735 GcCause cause,
3736 bool force_full,
3737 uint32_t observed_gc_num) {
3738 uint32_t max_gc_requested = max_gc_requested_.load(std::memory_order_relaxed);
3739 if (!GCNumberLt(observed_gc_num, max_gc_requested)) {
3740 // observed_gc_num >= max_gc_requested: Nobody beat us to requesting the next gc.
3741 if (CanAddHeapTask(self)) {
3742 // Since observed_gc_num >= max_gc_requested, this increases max_gc_requested_, if successful.
3743 if (max_gc_requested_.CompareAndSetStrongRelaxed(max_gc_requested, observed_gc_num + 1)) {
3744 task_processor_->AddTask(self, new ConcurrentGCTask(NanoTime(), // Start straight away.
3745 cause,
3746 force_full,
3747 observed_gc_num + 1));
3748 }
3749 DCHECK(GCNumberLt(observed_gc_num, max_gc_requested_.load(std::memory_order_relaxed)));
3750 // If we increased max_gc_requested_, then we added a task that will eventually cause
3751 // gcs_completed_ to be incremented (to at least observed_gc_num + 1).
3752 // If the CAS failed, somebody else did.
3753 return true;
3754 }
3755 return false;
3756 }
3757 return true; // Vacuously.
3758 }
3759
ConcurrentGC(Thread * self,GcCause cause,bool force_full,uint32_t requested_gc_num)3760 void Heap::ConcurrentGC(Thread* self, GcCause cause, bool force_full, uint32_t requested_gc_num) {
3761 if (!Runtime::Current()->IsShuttingDown(self)) {
3762 // Wait for any GCs currently running to finish. If this incremented GC number, we're done.
3763 WaitForGcToComplete(cause, self);
3764 if (GCNumberLt(GetCurrentGcNum(), requested_gc_num)) {
3765 collector::GcType next_gc_type = next_gc_type_;
3766 // If forcing full and next gc type is sticky, override with a non-sticky type.
3767 if (force_full && next_gc_type == collector::kGcTypeSticky) {
3768 next_gc_type = NonStickyGcType();
3769 }
3770 // If we can't run the GC type we wanted to run, find the next appropriate one and try
3771 // that instead. E.g. can't do partial, so do full instead.
3772 // We must ensure that we run something that ends up inrementing gcs_completed_.
3773 // In the kGcTypePartial case, the initial CollectGarbageInternal call may not have that
3774 // effect, but the subsequent KGcTypeFull call will.
3775 if (CollectGarbageInternal(next_gc_type, cause, false, requested_gc_num)
3776 == collector::kGcTypeNone) {
3777 for (collector::GcType gc_type : gc_plan_) {
3778 if (!GCNumberLt(GetCurrentGcNum(), requested_gc_num)) {
3779 // Somebody did it for us.
3780 break;
3781 }
3782 // Attempt to run the collector, if we succeed, we are done.
3783 if (gc_type > next_gc_type &&
3784 CollectGarbageInternal(gc_type, cause, false, requested_gc_num)
3785 != collector::kGcTypeNone) {
3786 break;
3787 }
3788 }
3789 }
3790 }
3791 }
3792 }
3793
3794 class Heap::CollectorTransitionTask : public HeapTask {
3795 public:
CollectorTransitionTask(uint64_t target_time)3796 explicit CollectorTransitionTask(uint64_t target_time) : HeapTask(target_time) {}
3797
Run(Thread * self)3798 void Run(Thread* self) override {
3799 gc::Heap* heap = Runtime::Current()->GetHeap();
3800 heap->DoPendingCollectorTransition();
3801 heap->ClearPendingCollectorTransition(self);
3802 }
3803 };
3804
ClearPendingCollectorTransition(Thread * self)3805 void Heap::ClearPendingCollectorTransition(Thread* self) {
3806 MutexLock mu(self, *pending_task_lock_);
3807 pending_collector_transition_ = nullptr;
3808 }
3809
RequestCollectorTransition(CollectorType desired_collector_type,uint64_t delta_time)3810 void Heap::RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time) {
3811 Thread* self = Thread::Current();
3812 desired_collector_type_ = desired_collector_type;
3813 if (desired_collector_type_ == collector_type_ || !CanAddHeapTask(self)) {
3814 return;
3815 }
3816 if (collector_type_ == kCollectorTypeCC) {
3817 // For CC, we invoke a full compaction when going to the background, but the collector type
3818 // doesn't change.
3819 DCHECK_EQ(desired_collector_type_, kCollectorTypeCCBackground);
3820 }
3821 DCHECK_NE(collector_type_, kCollectorTypeCCBackground);
3822 CollectorTransitionTask* added_task = nullptr;
3823 const uint64_t target_time = NanoTime() + delta_time;
3824 {
3825 MutexLock mu(self, *pending_task_lock_);
3826 // If we have an existing collector transition, update the target time to be the new target.
3827 if (pending_collector_transition_ != nullptr) {
3828 task_processor_->UpdateTargetRunTime(self, pending_collector_transition_, target_time);
3829 return;
3830 }
3831 added_task = new CollectorTransitionTask(target_time);
3832 pending_collector_transition_ = added_task;
3833 }
3834 task_processor_->AddTask(self, added_task);
3835 }
3836
3837 class Heap::HeapTrimTask : public HeapTask {
3838 public:
HeapTrimTask(uint64_t delta_time)3839 explicit HeapTrimTask(uint64_t delta_time) : HeapTask(NanoTime() + delta_time) { }
Run(Thread * self)3840 void Run(Thread* self) override {
3841 gc::Heap* heap = Runtime::Current()->GetHeap();
3842 heap->Trim(self);
3843 heap->ClearPendingTrim(self);
3844 }
3845 };
3846
ClearPendingTrim(Thread * self)3847 void Heap::ClearPendingTrim(Thread* self) {
3848 MutexLock mu(self, *pending_task_lock_);
3849 pending_heap_trim_ = nullptr;
3850 }
3851
RequestTrim(Thread * self)3852 void Heap::RequestTrim(Thread* self) {
3853 if (!CanAddHeapTask(self)) {
3854 return;
3855 }
3856 // GC completed and now we must decide whether to request a heap trim (advising pages back to the
3857 // kernel) or not. Issuing a request will also cause trimming of the libc heap. As a trim scans
3858 // a space it will hold its lock and can become a cause of jank.
3859 // Note, the large object space self trims and the Zygote space was trimmed and unchanging since
3860 // forking.
3861
3862 // We don't have a good measure of how worthwhile a trim might be. We can't use the live bitmap
3863 // because that only marks object heads, so a large array looks like lots of empty space. We
3864 // don't just call dlmalloc all the time, because the cost of an _attempted_ trim is proportional
3865 // to utilization (which is probably inversely proportional to how much benefit we can expect).
3866 // We could try mincore(2) but that's only a measure of how many pages we haven't given away,
3867 // not how much use we're making of those pages.
3868 HeapTrimTask* added_task = nullptr;
3869 {
3870 MutexLock mu(self, *pending_task_lock_);
3871 if (pending_heap_trim_ != nullptr) {
3872 // Already have a heap trim request in task processor, ignore this request.
3873 return;
3874 }
3875 added_task = new HeapTrimTask(kHeapTrimWait);
3876 pending_heap_trim_ = added_task;
3877 }
3878 task_processor_->AddTask(self, added_task);
3879 }
3880
IncrementNumberOfBytesFreedRevoke(size_t freed_bytes_revoke)3881 void Heap::IncrementNumberOfBytesFreedRevoke(size_t freed_bytes_revoke) {
3882 size_t previous_num_bytes_freed_revoke =
3883 num_bytes_freed_revoke_.fetch_add(freed_bytes_revoke, std::memory_order_relaxed);
3884 // Check the updated value is less than the number of bytes allocated. There is a risk of
3885 // execution being suspended between the increment above and the CHECK below, leading to
3886 // the use of previous_num_bytes_freed_revoke in the comparison.
3887 CHECK_GE(num_bytes_allocated_.load(std::memory_order_relaxed),
3888 previous_num_bytes_freed_revoke + freed_bytes_revoke);
3889 }
3890
RevokeThreadLocalBuffers(Thread * thread)3891 void Heap::RevokeThreadLocalBuffers(Thread* thread) {
3892 if (rosalloc_space_ != nullptr) {
3893 size_t freed_bytes_revoke = rosalloc_space_->RevokeThreadLocalBuffers(thread);
3894 if (freed_bytes_revoke > 0U) {
3895 IncrementNumberOfBytesFreedRevoke(freed_bytes_revoke);
3896 }
3897 }
3898 if (bump_pointer_space_ != nullptr) {
3899 CHECK_EQ(bump_pointer_space_->RevokeThreadLocalBuffers(thread), 0U);
3900 }
3901 if (region_space_ != nullptr) {
3902 CHECK_EQ(region_space_->RevokeThreadLocalBuffers(thread), 0U);
3903 }
3904 }
3905
RevokeRosAllocThreadLocalBuffers(Thread * thread)3906 void Heap::RevokeRosAllocThreadLocalBuffers(Thread* thread) {
3907 if (rosalloc_space_ != nullptr) {
3908 size_t freed_bytes_revoke = rosalloc_space_->RevokeThreadLocalBuffers(thread);
3909 if (freed_bytes_revoke > 0U) {
3910 IncrementNumberOfBytesFreedRevoke(freed_bytes_revoke);
3911 }
3912 }
3913 }
3914
RevokeAllThreadLocalBuffers()3915 void Heap::RevokeAllThreadLocalBuffers() {
3916 if (rosalloc_space_ != nullptr) {
3917 size_t freed_bytes_revoke = rosalloc_space_->RevokeAllThreadLocalBuffers();
3918 if (freed_bytes_revoke > 0U) {
3919 IncrementNumberOfBytesFreedRevoke(freed_bytes_revoke);
3920 }
3921 }
3922 if (bump_pointer_space_ != nullptr) {
3923 CHECK_EQ(bump_pointer_space_->RevokeAllThreadLocalBuffers(), 0U);
3924 }
3925 if (region_space_ != nullptr) {
3926 CHECK_EQ(region_space_->RevokeAllThreadLocalBuffers(), 0U);
3927 }
3928 }
3929
RunFinalization(JNIEnv * env,uint64_t timeout)3930 void Heap::RunFinalization(JNIEnv* env, uint64_t timeout) {
3931 env->CallStaticVoidMethod(WellKnownClasses::dalvik_system_VMRuntime,
3932 WellKnownClasses::dalvik_system_VMRuntime_runFinalization,
3933 static_cast<jlong>(timeout));
3934 }
3935
3936 // For GC triggering purposes, we count old (pre-last-GC) and new native allocations as
3937 // different fractions of Java allocations.
3938 // For now, we essentially do not count old native allocations at all, so that we can preserve the
3939 // existing behavior of not limiting native heap size. If we seriously considered it, we would
3940 // have to adjust collection thresholds when we encounter large amounts of old native memory,
3941 // and handle native out-of-memory situations.
3942
3943 static constexpr size_t kOldNativeDiscountFactor = 65536; // Approximately infinite for now.
3944 static constexpr size_t kNewNativeDiscountFactor = 2;
3945
3946 // If weighted java + native memory use exceeds our target by kStopForNativeFactor, and
3947 // newly allocated memory exceeds stop_for_native_allocs_, we wait for GC to complete to avoid
3948 // running out of memory.
3949 static constexpr float kStopForNativeFactor = 4.0;
3950
3951 // Return the ratio of the weighted native + java allocated bytes to its target value.
3952 // A return value > 1.0 means we should collect. Significantly larger values mean we're falling
3953 // behind.
NativeMemoryOverTarget(size_t current_native_bytes,bool is_gc_concurrent)3954 inline float Heap::NativeMemoryOverTarget(size_t current_native_bytes, bool is_gc_concurrent) {
3955 // Collection check for native allocation. Does not enforce Java heap bounds.
3956 // With adj_start_bytes defined below, effectively checks
3957 // <java bytes allocd> + c1*<old native allocd> + c2*<new native allocd) >= adj_start_bytes,
3958 // where c3 > 1, and currently c1 and c2 are 1 divided by the values defined above.
3959 size_t old_native_bytes = old_native_bytes_allocated_.load(std::memory_order_relaxed);
3960 if (old_native_bytes > current_native_bytes) {
3961 // Net decrease; skip the check, but update old value.
3962 // It's OK to lose an update if two stores race.
3963 old_native_bytes_allocated_.store(current_native_bytes, std::memory_order_relaxed);
3964 return 0.0;
3965 } else {
3966 size_t new_native_bytes = UnsignedDifference(current_native_bytes, old_native_bytes);
3967 size_t weighted_native_bytes = new_native_bytes / kNewNativeDiscountFactor
3968 + old_native_bytes / kOldNativeDiscountFactor;
3969 size_t add_bytes_allowed = static_cast<size_t>(
3970 NativeAllocationGcWatermark() * HeapGrowthMultiplier());
3971 size_t java_gc_start_bytes = is_gc_concurrent
3972 ? concurrent_start_bytes_
3973 : target_footprint_.load(std::memory_order_relaxed);
3974 size_t adj_start_bytes = UnsignedSum(java_gc_start_bytes,
3975 add_bytes_allowed / kNewNativeDiscountFactor);
3976 return static_cast<float>(GetBytesAllocated() + weighted_native_bytes)
3977 / static_cast<float>(adj_start_bytes);
3978 }
3979 }
3980
CheckGCForNative(Thread * self)3981 inline void Heap::CheckGCForNative(Thread* self) {
3982 bool is_gc_concurrent = IsGcConcurrent();
3983 uint32_t starting_gc_num = GetCurrentGcNum();
3984 size_t current_native_bytes = GetNativeBytes();
3985 float gc_urgency = NativeMemoryOverTarget(current_native_bytes, is_gc_concurrent);
3986 if (UNLIKELY(gc_urgency >= 1.0)) {
3987 if (is_gc_concurrent) {
3988 bool requested =
3989 RequestConcurrentGC(self, kGcCauseForNativeAlloc, /*force_full=*/true, starting_gc_num);
3990 if (gc_urgency > kStopForNativeFactor
3991 && current_native_bytes > stop_for_native_allocs_) {
3992 // We're in danger of running out of memory due to rampant native allocation.
3993 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
3994 LOG(INFO) << "Stopping for native allocation, urgency: " << gc_urgency;
3995 }
3996 if (WaitForGcToComplete(kGcCauseForNativeAlloc, self) == collector::kGcTypeNone) {
3997 DCHECK(!requested
3998 || GCNumberLt(starting_gc_num, max_gc_requested_.load(std::memory_order_relaxed)));
3999 // TODO: Eventually sleep here again.
4000 }
4001 }
4002 } else {
4003 CollectGarbageInternal(NonStickyGcType(), kGcCauseForNativeAlloc, false, starting_gc_num + 1);
4004 }
4005 }
4006 }
4007
4008 // About kNotifyNativeInterval allocations have occurred. Check whether we should garbage collect.
NotifyNativeAllocations(JNIEnv * env)4009 void Heap::NotifyNativeAllocations(JNIEnv* env) {
4010 native_objects_notified_.fetch_add(kNotifyNativeInterval, std::memory_order_relaxed);
4011 CheckGCForNative(ThreadForEnv(env));
4012 }
4013
4014 // Register a native allocation with an explicit size.
4015 // This should only be done for large allocations of non-malloc memory, which we wouldn't
4016 // otherwise see.
RegisterNativeAllocation(JNIEnv * env,size_t bytes)4017 void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) {
4018 // Cautiously check for a wrapped negative bytes argument.
4019 DCHECK(sizeof(size_t) < 8 || bytes < (std::numeric_limits<size_t>::max() / 2));
4020 native_bytes_registered_.fetch_add(bytes, std::memory_order_relaxed);
4021 uint32_t objects_notified =
4022 native_objects_notified_.fetch_add(1, std::memory_order_relaxed);
4023 if (objects_notified % kNotifyNativeInterval == kNotifyNativeInterval - 1
4024 || bytes > kCheckImmediatelyThreshold) {
4025 CheckGCForNative(ThreadForEnv(env));
4026 }
4027 }
4028
RegisterNativeFree(JNIEnv *,size_t bytes)4029 void Heap::RegisterNativeFree(JNIEnv*, size_t bytes) {
4030 size_t allocated;
4031 size_t new_freed_bytes;
4032 do {
4033 allocated = native_bytes_registered_.load(std::memory_order_relaxed);
4034 new_freed_bytes = std::min(allocated, bytes);
4035 // We should not be registering more free than allocated bytes.
4036 // But correctly keep going in non-debug builds.
4037 DCHECK_EQ(new_freed_bytes, bytes);
4038 } while (!native_bytes_registered_.CompareAndSetWeakRelaxed(allocated,
4039 allocated - new_freed_bytes));
4040 }
4041
GetTotalMemory() const4042 size_t Heap::GetTotalMemory() const {
4043 return std::max(target_footprint_.load(std::memory_order_relaxed), GetBytesAllocated());
4044 }
4045
AddModUnionTable(accounting::ModUnionTable * mod_union_table)4046 void Heap::AddModUnionTable(accounting::ModUnionTable* mod_union_table) {
4047 DCHECK(mod_union_table != nullptr);
4048 mod_union_tables_.Put(mod_union_table->GetSpace(), mod_union_table);
4049 }
4050
CheckPreconditionsForAllocObject(ObjPtr<mirror::Class> c,size_t byte_count)4051 void Heap::CheckPreconditionsForAllocObject(ObjPtr<mirror::Class> c, size_t byte_count) {
4052 // Compare rounded sizes since the allocation may have been retried after rounding the size.
4053 // See b/37885600
4054 CHECK(c == nullptr || (c->IsClassClass() && byte_count >= sizeof(mirror::Class)) ||
4055 (c->IsVariableSize() ||
4056 RoundUp(c->GetObjectSize(), kObjectAlignment) ==
4057 RoundUp(byte_count, kObjectAlignment)))
4058 << "ClassFlags=" << c->GetClassFlags()
4059 << " IsClassClass=" << c->IsClassClass()
4060 << " byte_count=" << byte_count
4061 << " IsVariableSize=" << c->IsVariableSize()
4062 << " ObjectSize=" << c->GetObjectSize()
4063 << " sizeof(Class)=" << sizeof(mirror::Class)
4064 << " " << verification_->DumpObjectInfo(c.Ptr(), /*tag=*/ "klass");
4065 CHECK_GE(byte_count, sizeof(mirror::Object));
4066 }
4067
AddRememberedSet(accounting::RememberedSet * remembered_set)4068 void Heap::AddRememberedSet(accounting::RememberedSet* remembered_set) {
4069 CHECK(remembered_set != nullptr);
4070 space::Space* space = remembered_set->GetSpace();
4071 CHECK(space != nullptr);
4072 CHECK(remembered_sets_.find(space) == remembered_sets_.end()) << space;
4073 remembered_sets_.Put(space, remembered_set);
4074 CHECK(remembered_sets_.find(space) != remembered_sets_.end()) << space;
4075 }
4076
RemoveRememberedSet(space::Space * space)4077 void Heap::RemoveRememberedSet(space::Space* space) {
4078 CHECK(space != nullptr);
4079 auto it = remembered_sets_.find(space);
4080 CHECK(it != remembered_sets_.end());
4081 delete it->second;
4082 remembered_sets_.erase(it);
4083 CHECK(remembered_sets_.find(space) == remembered_sets_.end());
4084 }
4085
ClearMarkedObjects()4086 void Heap::ClearMarkedObjects() {
4087 // Clear all of the spaces' mark bitmaps.
4088 for (const auto& space : GetContinuousSpaces()) {
4089 if (space->GetLiveBitmap() != nullptr && !space->HasBoundBitmaps()) {
4090 space->GetMarkBitmap()->Clear();
4091 }
4092 }
4093 // Clear the marked objects in the discontinous space object sets.
4094 for (const auto& space : GetDiscontinuousSpaces()) {
4095 space->GetMarkBitmap()->Clear();
4096 }
4097 }
4098
SetAllocationRecords(AllocRecordObjectMap * records)4099 void Heap::SetAllocationRecords(AllocRecordObjectMap* records) {
4100 allocation_records_.reset(records);
4101 }
4102
VisitAllocationRecords(RootVisitor * visitor) const4103 void Heap::VisitAllocationRecords(RootVisitor* visitor) const {
4104 if (IsAllocTrackingEnabled()) {
4105 MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
4106 if (IsAllocTrackingEnabled()) {
4107 GetAllocationRecords()->VisitRoots(visitor);
4108 }
4109 }
4110 }
4111
SweepAllocationRecords(IsMarkedVisitor * visitor) const4112 void Heap::SweepAllocationRecords(IsMarkedVisitor* visitor) const {
4113 if (IsAllocTrackingEnabled()) {
4114 MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
4115 if (IsAllocTrackingEnabled()) {
4116 GetAllocationRecords()->SweepAllocationRecords(visitor);
4117 }
4118 }
4119 }
4120
AllowNewAllocationRecords() const4121 void Heap::AllowNewAllocationRecords() const {
4122 CHECK(!kUseReadBarrier);
4123 MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
4124 AllocRecordObjectMap* allocation_records = GetAllocationRecords();
4125 if (allocation_records != nullptr) {
4126 allocation_records->AllowNewAllocationRecords();
4127 }
4128 }
4129
DisallowNewAllocationRecords() const4130 void Heap::DisallowNewAllocationRecords() const {
4131 CHECK(!kUseReadBarrier);
4132 MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
4133 AllocRecordObjectMap* allocation_records = GetAllocationRecords();
4134 if (allocation_records != nullptr) {
4135 allocation_records->DisallowNewAllocationRecords();
4136 }
4137 }
4138
BroadcastForNewAllocationRecords() const4139 void Heap::BroadcastForNewAllocationRecords() const {
4140 // Always broadcast without checking IsAllocTrackingEnabled() because IsAllocTrackingEnabled() may
4141 // be set to false while some threads are waiting for system weak access in
4142 // AllocRecordObjectMap::RecordAllocation() and we may fail to wake them up. b/27467554.
4143 MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
4144 AllocRecordObjectMap* allocation_records = GetAllocationRecords();
4145 if (allocation_records != nullptr) {
4146 allocation_records->BroadcastForNewAllocationRecords();
4147 }
4148 }
4149
4150 // Perfetto Java Heap Profiler Support.
4151
4152 // Perfetto initialization.
InitPerfettoJavaHeapProf()4153 void Heap::InitPerfettoJavaHeapProf() {
4154 // Initialize Perfetto Heap info and Heap id.
4155 uint32_t heap_id = 1; // Initialize to 1, to be overwritten by Perfetto heap id.
4156 #ifdef ART_TARGET_ANDROID
4157 // Register the heap and create the heapid.
4158 // Use a Perfetto heap name = "com.android.art" for the Java Heap Profiler.
4159 AHeapInfo* info = AHeapInfo_create("com.android.art");
4160 // Set the Enable Callback, there is no callback data ("nullptr").
4161 AHeapInfo_setEnabledCallback(info, &EnableHeapSamplerCallback, &heap_sampler_);
4162 // Set the Disable Callback.
4163 AHeapInfo_setDisabledCallback(info, &DisableHeapSamplerCallback, &heap_sampler_);
4164 heap_id = AHeapProfile_registerHeap(info);
4165 // Do not enable the Java Heap Profiler in this case, wait for Perfetto to enable it through
4166 // the callback function.
4167 #else
4168 // This is the host case, enable the Java Heap Profiler for host testing.
4169 // Perfetto API is currently not available on host.
4170 heap_sampler_.EnableHeapSampler();
4171 #endif
4172 heap_sampler_.SetHeapID(heap_id);
4173 VLOG(heap) << "Java Heap Profiler Initialized";
4174 }
4175
4176 // Check if the Java Heap Profiler is enabled and initialized.
CheckPerfettoJHPEnabled()4177 int Heap::CheckPerfettoJHPEnabled() {
4178 return GetHeapSampler().IsEnabled();
4179 }
4180
JHPCheckNonTlabSampleAllocation(Thread * self,mirror::Object * obj,size_t alloc_size)4181 void Heap::JHPCheckNonTlabSampleAllocation(Thread* self, mirror::Object* obj, size_t alloc_size) {
4182 bool take_sample = false;
4183 size_t bytes_until_sample = 0;
4184 HeapSampler& prof_heap_sampler = GetHeapSampler();
4185 if (obj != nullptr && prof_heap_sampler.IsEnabled()) {
4186 // An allocation occurred, sample it, even if non-Tlab.
4187 // In case take_sample is already set from the previous GetSampleOffset
4188 // because we tried the Tlab allocation first, we will not use this value.
4189 // A new value is generated below. Also bytes_until_sample will be updated.
4190 // Note that we are not using the return value from the GetSampleOffset in
4191 // the NonTlab case here.
4192 prof_heap_sampler.GetSampleOffset(alloc_size,
4193 self->GetTlabPosOffset(),
4194 &take_sample,
4195 &bytes_until_sample);
4196 prof_heap_sampler.SetBytesUntilSample(bytes_until_sample);
4197 if (take_sample) {
4198 prof_heap_sampler.ReportSample(obj, alloc_size);
4199 }
4200 VLOG(heap) << "JHP:NonTlab Non-moving or Large Allocation";
4201 }
4202 }
4203
JHPCalculateNextTlabSize(Thread * self,size_t jhp_def_tlab_size,size_t alloc_size,bool * take_sample,size_t * bytes_until_sample)4204 size_t Heap::JHPCalculateNextTlabSize(Thread* self,
4205 size_t jhp_def_tlab_size,
4206 size_t alloc_size,
4207 bool* take_sample,
4208 size_t* bytes_until_sample) {
4209 size_t next_tlab_size = jhp_def_tlab_size;
4210 if (CheckPerfettoJHPEnabled()) {
4211 size_t next_sample_point =
4212 GetHeapSampler().GetSampleOffset(alloc_size,
4213 self->GetTlabPosOffset(),
4214 take_sample,
4215 bytes_until_sample);
4216 next_tlab_size = std::min(next_sample_point, jhp_def_tlab_size);
4217 }
4218 return next_tlab_size;
4219 }
4220
AdjustSampleOffset(size_t adjustment)4221 void Heap::AdjustSampleOffset(size_t adjustment) {
4222 GetHeapSampler().AdjustSampleOffset(adjustment);
4223 }
4224
CheckGcStressMode(Thread * self,ObjPtr<mirror::Object> * obj)4225 void Heap::CheckGcStressMode(Thread* self, ObjPtr<mirror::Object>* obj) {
4226 DCHECK(gc_stress_mode_);
4227 auto* const runtime = Runtime::Current();
4228 if (runtime->GetClassLinker()->IsInitialized() && !runtime->IsActiveTransaction()) {
4229 // Check if we should GC.
4230 bool new_backtrace = false;
4231 {
4232 static constexpr size_t kMaxFrames = 16u;
4233 MutexLock mu(self, *backtrace_lock_);
4234 FixedSizeBacktrace<kMaxFrames> backtrace;
4235 backtrace.Collect(/* skip_count= */ 2);
4236 uint64_t hash = backtrace.Hash();
4237 new_backtrace = seen_backtraces_.find(hash) == seen_backtraces_.end();
4238 if (new_backtrace) {
4239 seen_backtraces_.insert(hash);
4240 }
4241 }
4242 if (new_backtrace) {
4243 StackHandleScope<1> hs(self);
4244 auto h = hs.NewHandleWrapper(obj);
4245 CollectGarbage(/* clear_soft_references= */ false);
4246 unique_backtrace_count_.fetch_add(1);
4247 } else {
4248 seen_backtrace_count_.fetch_add(1);
4249 }
4250 }
4251 }
4252
DisableGCForShutdown()4253 void Heap::DisableGCForShutdown() {
4254 Thread* const self = Thread::Current();
4255 CHECK(Runtime::Current()->IsShuttingDown(self));
4256 MutexLock mu(self, *gc_complete_lock_);
4257 gc_disabled_for_shutdown_ = true;
4258 }
4259
ObjectIsInBootImageSpace(ObjPtr<mirror::Object> obj) const4260 bool Heap::ObjectIsInBootImageSpace(ObjPtr<mirror::Object> obj) const {
4261 DCHECK_EQ(IsBootImageAddress(obj.Ptr()),
4262 any_of(boot_image_spaces_.begin(),
4263 boot_image_spaces_.end(),
4264 [obj](gc::space::ImageSpace* space) REQUIRES_SHARED(Locks::mutator_lock_) {
4265 return space->HasAddress(obj.Ptr());
4266 }));
4267 return IsBootImageAddress(obj.Ptr());
4268 }
4269
IsInBootImageOatFile(const void * p) const4270 bool Heap::IsInBootImageOatFile(const void* p) const {
4271 DCHECK_EQ(IsBootImageAddress(p),
4272 any_of(boot_image_spaces_.begin(),
4273 boot_image_spaces_.end(),
4274 [p](gc::space::ImageSpace* space) REQUIRES_SHARED(Locks::mutator_lock_) {
4275 return space->GetOatFile()->Contains(p);
4276 }));
4277 return IsBootImageAddress(p);
4278 }
4279
SetAllocationListener(AllocationListener * l)4280 void Heap::SetAllocationListener(AllocationListener* l) {
4281 AllocationListener* old = GetAndOverwriteAllocationListener(&alloc_listener_, l);
4282
4283 if (old == nullptr) {
4284 Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
4285 }
4286 }
4287
RemoveAllocationListener()4288 void Heap::RemoveAllocationListener() {
4289 AllocationListener* old = GetAndOverwriteAllocationListener(&alloc_listener_, nullptr);
4290
4291 if (old != nullptr) {
4292 Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints();
4293 }
4294 }
4295
SetGcPauseListener(GcPauseListener * l)4296 void Heap::SetGcPauseListener(GcPauseListener* l) {
4297 gc_pause_listener_.store(l, std::memory_order_relaxed);
4298 }
4299
RemoveGcPauseListener()4300 void Heap::RemoveGcPauseListener() {
4301 gc_pause_listener_.store(nullptr, std::memory_order_relaxed);
4302 }
4303
AllocWithNewTLAB(Thread * self,AllocatorType allocator_type,size_t alloc_size,bool grow,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated)4304 mirror::Object* Heap::AllocWithNewTLAB(Thread* self,
4305 AllocatorType allocator_type,
4306 size_t alloc_size,
4307 bool grow,
4308 size_t* bytes_allocated,
4309 size_t* usable_size,
4310 size_t* bytes_tl_bulk_allocated) {
4311 mirror::Object* ret = nullptr;
4312 bool take_sample = false;
4313 size_t bytes_until_sample = 0;
4314
4315 if (kUsePartialTlabs && alloc_size <= self->TlabRemainingCapacity()) {
4316 DCHECK_GT(alloc_size, self->TlabSize());
4317 // There is enough space if we grow the TLAB. Lets do that. This increases the
4318 // TLAB bytes.
4319 const size_t min_expand_size = alloc_size - self->TlabSize();
4320 size_t next_tlab_size = JHPCalculateNextTlabSize(self,
4321 kPartialTlabSize,
4322 alloc_size,
4323 &take_sample,
4324 &bytes_until_sample);
4325 const size_t expand_bytes = std::max(
4326 min_expand_size,
4327 std::min(self->TlabRemainingCapacity() - self->TlabSize(), next_tlab_size));
4328 if (UNLIKELY(IsOutOfMemoryOnAllocation(allocator_type, expand_bytes, grow))) {
4329 return nullptr;
4330 }
4331 *bytes_tl_bulk_allocated = expand_bytes;
4332 self->ExpandTlab(expand_bytes);
4333 DCHECK_LE(alloc_size, self->TlabSize());
4334 } else if (allocator_type == kAllocatorTypeTLAB) {
4335 DCHECK(bump_pointer_space_ != nullptr);
4336 size_t next_tlab_size = JHPCalculateNextTlabSize(self,
4337 kDefaultTLABSize,
4338 alloc_size,
4339 &take_sample,
4340 &bytes_until_sample);
4341 const size_t new_tlab_size = alloc_size + next_tlab_size;
4342 if (UNLIKELY(IsOutOfMemoryOnAllocation(allocator_type, new_tlab_size, grow))) {
4343 return nullptr;
4344 }
4345 // Try allocating a new thread local buffer, if the allocation fails the space must be
4346 // full so return null.
4347 if (!bump_pointer_space_->AllocNewTlab(self, new_tlab_size)) {
4348 return nullptr;
4349 }
4350 *bytes_tl_bulk_allocated = new_tlab_size;
4351 if (CheckPerfettoJHPEnabled()) {
4352 VLOG(heap) << "JHP:kAllocatorTypeTLAB, New Tlab bytes allocated= " << new_tlab_size;
4353 }
4354 } else {
4355 DCHECK(allocator_type == kAllocatorTypeRegionTLAB);
4356 DCHECK(region_space_ != nullptr);
4357 if (space::RegionSpace::kRegionSize >= alloc_size) {
4358 // Non-large. Check OOME for a tlab.
4359 if (LIKELY(!IsOutOfMemoryOnAllocation(allocator_type,
4360 space::RegionSpace::kRegionSize,
4361 grow))) {
4362 size_t def_pr_tlab_size = kUsePartialTlabs
4363 ? kPartialTlabSize
4364 : gc::space::RegionSpace::kRegionSize;
4365 size_t next_pr_tlab_size = JHPCalculateNextTlabSize(self,
4366 def_pr_tlab_size,
4367 alloc_size,
4368 &take_sample,
4369 &bytes_until_sample);
4370 const size_t new_tlab_size = kUsePartialTlabs
4371 ? std::max(alloc_size, next_pr_tlab_size)
4372 : next_pr_tlab_size;
4373 // Try to allocate a tlab.
4374 if (!region_space_->AllocNewTlab(self, new_tlab_size, bytes_tl_bulk_allocated)) {
4375 // Failed to allocate a tlab. Try non-tlab.
4376 ret = region_space_->AllocNonvirtual<false>(alloc_size,
4377 bytes_allocated,
4378 usable_size,
4379 bytes_tl_bulk_allocated);
4380 JHPCheckNonTlabSampleAllocation(self, ret, alloc_size);
4381 return ret;
4382 }
4383 // Fall-through to using the TLAB below.
4384 } else {
4385 // Check OOME for a non-tlab allocation.
4386 if (!IsOutOfMemoryOnAllocation(allocator_type, alloc_size, grow)) {
4387 ret = region_space_->AllocNonvirtual<false>(alloc_size,
4388 bytes_allocated,
4389 usable_size,
4390 bytes_tl_bulk_allocated);
4391 JHPCheckNonTlabSampleAllocation(self, ret, alloc_size);
4392 return ret;
4393 }
4394 // Neither tlab or non-tlab works. Give up.
4395 return nullptr;
4396 }
4397 } else {
4398 // Large. Check OOME.
4399 if (LIKELY(!IsOutOfMemoryOnAllocation(allocator_type, alloc_size, grow))) {
4400 ret = region_space_->AllocNonvirtual<false>(alloc_size,
4401 bytes_allocated,
4402 usable_size,
4403 bytes_tl_bulk_allocated);
4404 JHPCheckNonTlabSampleAllocation(self, ret, alloc_size);
4405 return ret;
4406 }
4407 return nullptr;
4408 }
4409 }
4410 // Refilled TLAB, return.
4411 ret = self->AllocTlab(alloc_size);
4412 DCHECK(ret != nullptr);
4413 *bytes_allocated = alloc_size;
4414 *usable_size = alloc_size;
4415
4416 // JavaHeapProfiler: Send the thread information about this allocation in case a sample is
4417 // requested.
4418 // This is the fallthrough from both the if and else if above cases => Cases that use TLAB.
4419 if (CheckPerfettoJHPEnabled()) {
4420 if (take_sample) {
4421 GetHeapSampler().ReportSample(ret, alloc_size);
4422 // Update the bytes_until_sample now that the allocation is already done.
4423 GetHeapSampler().SetBytesUntilSample(bytes_until_sample);
4424 }
4425 VLOG(heap) << "JHP:Fallthrough Tlab allocation";
4426 }
4427
4428 return ret;
4429 }
4430
GetVerification() const4431 const Verification* Heap::GetVerification() const {
4432 return verification_.get();
4433 }
4434
VlogHeapGrowth(size_t old_footprint,size_t new_footprint,size_t alloc_size)4435 void Heap::VlogHeapGrowth(size_t old_footprint, size_t new_footprint, size_t alloc_size) {
4436 VLOG(heap) << "Growing heap from " << PrettySize(old_footprint) << " to "
4437 << PrettySize(new_footprint) << " for a " << PrettySize(alloc_size) << " allocation";
4438 }
4439
4440 class Heap::TriggerPostForkCCGcTask : public HeapTask {
4441 public:
TriggerPostForkCCGcTask(uint64_t target_time)4442 explicit TriggerPostForkCCGcTask(uint64_t target_time) : HeapTask(target_time) {}
Run(Thread * self)4443 void Run(Thread* self) override {
4444 gc::Heap* heap = Runtime::Current()->GetHeap();
4445 // Trigger a GC, if not already done. The first GC after fork, whenever it
4446 // takes place, will adjust the thresholds to normal levels.
4447 if (heap->target_footprint_.load(std::memory_order_relaxed) == heap->growth_limit_) {
4448 heap->RequestConcurrentGC(self, kGcCauseBackground, false, heap->GetCurrentGcNum());
4449 }
4450 }
4451 };
4452
PostForkChildAction(Thread * self)4453 void Heap::PostForkChildAction(Thread* self) {
4454 // Temporarily increase target_footprint_ and concurrent_start_bytes_ to
4455 // max values to avoid GC during app launch.
4456 if (collector_type_ == kCollectorTypeCC && !IsLowMemoryMode()) {
4457 // Set target_footprint_ to the largest allowed value.
4458 SetIdealFootprint(growth_limit_);
4459 // Set concurrent_start_bytes_ to half of the heap size.
4460 size_t target_footprint = target_footprint_.load(std::memory_order_relaxed);
4461 concurrent_start_bytes_ = std::max(target_footprint / 2, GetBytesAllocated());
4462
4463 GetTaskProcessor()->AddTask(
4464 self, new TriggerPostForkCCGcTask(NanoTime() + MsToNs(kPostForkMaxHeapDurationMS)));
4465 }
4466 }
4467
VisitReflectiveTargets(ReflectiveValueVisitor * visit)4468 void Heap::VisitReflectiveTargets(ReflectiveValueVisitor *visit) {
4469 VisitObjectsPaused([&visit](mirror::Object* ref) NO_THREAD_SAFETY_ANALYSIS {
4470 art::ObjPtr<mirror::Class> klass(ref->GetClass());
4471 // All these classes are in the BootstrapClassLoader.
4472 if (!klass->IsBootStrapClassLoaded()) {
4473 return;
4474 }
4475 if (GetClassRoot<mirror::Method>()->IsAssignableFrom(klass) ||
4476 GetClassRoot<mirror::Constructor>()->IsAssignableFrom(klass)) {
4477 down_cast<mirror::Executable*>(ref)->VisitTarget(visit);
4478 } else if (art::GetClassRoot<art::mirror::Field>() == klass) {
4479 down_cast<mirror::Field*>(ref)->VisitTarget(visit);
4480 } else if (art::GetClassRoot<art::mirror::MethodHandle>()->IsAssignableFrom(klass)) {
4481 down_cast<mirror::MethodHandle*>(ref)->VisitTarget(visit);
4482 } else if (art::GetClassRoot<art::mirror::FieldVarHandle>()->IsAssignableFrom(klass)) {
4483 down_cast<mirror::FieldVarHandle*>(ref)->VisitTarget(visit);
4484 } else if (art::GetClassRoot<art::mirror::DexCache>()->IsAssignableFrom(klass)) {
4485 down_cast<mirror::DexCache*>(ref)->VisitReflectiveTargets(visit);
4486 }
4487 });
4488 }
4489
AddHeapTask(gc::HeapTask * task)4490 bool Heap::AddHeapTask(gc::HeapTask* task) {
4491 Thread* const self = Thread::Current();
4492 if (!CanAddHeapTask(self)) {
4493 return false;
4494 }
4495 GetTaskProcessor()->AddTask(self, task);
4496 return true;
4497 }
4498
4499 } // namespace gc
4500 } // namespace art
4501