• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "heap.h"
18 
19 #include <sys/types.h>
20 #include <unistd.h>
21 
22 #include <limits>
23 #include <memory>
24 #include <random>
25 #include <sstream>
26 #include <vector>
27 
28 #include "allocation_listener.h"
29 #include "android-base/stringprintf.h"
30 #include "android-base/thread_annotations.h"
31 #include "art_field-inl.h"
32 #include "backtrace_helper.h"
33 #include "base/allocator.h"
34 #include "base/arena_allocator.h"
35 #include "base/dumpable.h"
36 #include "base/file_utils.h"
37 #include "base/histogram-inl.h"
38 #include "base/logging.h"  // For VLOG.
39 #include "base/memory_tool.h"
40 #include "base/mutex.h"
41 #include "base/os.h"
42 #include "base/stl_util.h"
43 #include "base/systrace.h"
44 #include "base/time_utils.h"
45 #include "base/utils.h"
46 #include "class_root-inl.h"
47 #include "common_throws.h"
48 #include "debugger.h"
49 #include "dex/dex_file-inl.h"
50 #include "entrypoints/quick/quick_alloc_entrypoints.h"
51 #include "gc/accounting/card_table-inl.h"
52 #include "gc/accounting/heap_bitmap-inl.h"
53 #include "gc/accounting/mod_union_table-inl.h"
54 #include "gc/accounting/read_barrier_table.h"
55 #include "gc/accounting/remembered_set.h"
56 #include "gc/accounting/space_bitmap-inl.h"
57 #include "gc/collector/concurrent_copying.h"
58 #include "gc/collector/mark_compact.h"
59 #include "gc/collector/mark_sweep.h"
60 #include "gc/collector/partial_mark_sweep.h"
61 #include "gc/collector/semi_space.h"
62 #include "gc/collector/sticky_mark_sweep.h"
63 #include "gc/racing_check.h"
64 #include "gc/reference_processor.h"
65 #include "gc/scoped_gc_critical_section.h"
66 #include "gc/space/bump_pointer_space.h"
67 #include "gc/space/dlmalloc_space-inl.h"
68 #include "gc/space/image_space.h"
69 #include "gc/space/large_object_space.h"
70 #include "gc/space/region_space.h"
71 #include "gc/space/rosalloc_space-inl.h"
72 #include "gc/space/space-inl.h"
73 #include "gc/space/zygote_space.h"
74 #include "gc/task_processor.h"
75 #include "gc/verification.h"
76 #include "gc_pause_listener.h"
77 #include "gc_root.h"
78 #include "handle_scope-inl.h"
79 #include "heap-inl.h"
80 #include "heap-visit-objects-inl.h"
81 #include "intern_table.h"
82 #include "jit/jit.h"
83 #include "jit/jit_code_cache.h"
84 #include "jni/java_vm_ext.h"
85 #include "mirror/class-inl.h"
86 #include "mirror/executable-inl.h"
87 #include "mirror/field.h"
88 #include "mirror/method_handle_impl.h"
89 #include "mirror/object-inl.h"
90 #include "mirror/object-refvisitor-inl.h"
91 #include "mirror/object_array-inl.h"
92 #include "mirror/reference-inl.h"
93 #include "mirror/var_handle.h"
94 #include "nativehelper/scoped_local_ref.h"
95 #include "oat/image.h"
96 #include "obj_ptr-inl.h"
97 #ifdef ART_TARGET_ANDROID
98 #include "perfetto/heap_profile.h"
99 #endif
100 #include "reflection.h"
101 #include "runtime.h"
102 #include "javaheapprof/javaheapsampler.h"
103 #include "scoped_thread_state_change-inl.h"
104 #include "thread-inl.h"
105 #include "thread_list.h"
106 #include "verify_object-inl.h"
107 #include "well_known_classes.h"
108 
109 #if defined(__BIONIC__) || defined(__GLIBC__) || defined(ANDROID_HOST_MUSL)
110 #include <malloc.h>  // For mallinfo()
111 #endif
112 
113 namespace art HIDDEN {
114 
115 #ifdef ART_TARGET_ANDROID
116 namespace {
117 
118 // Enable the heap sampler Callback function used by Perfetto.
EnableHeapSamplerCallback(void * enable_ptr,const AHeapProfileEnableCallbackInfo * enable_info_ptr)119 void EnableHeapSamplerCallback(void* enable_ptr,
120                                const AHeapProfileEnableCallbackInfo* enable_info_ptr) {
121   HeapSampler* sampler_self = reinterpret_cast<HeapSampler*>(enable_ptr);
122   // Set the ART profiler sampling interval to the value from Perfetto.
123   uint64_t interval = AHeapProfileEnableCallbackInfo_getSamplingInterval(enable_info_ptr);
124   if (interval > 0) {
125     sampler_self->SetSamplingInterval(interval);
126   }
127   // Else default is 4K sampling interval. However, default case shouldn't happen for Perfetto API.
128   // AHeapProfileEnableCallbackInfo_getSamplingInterval should always give the requested
129   // (non-negative) sampling interval. It is a uint64_t and gets checked for != 0
130   // Do not call heap as a temp here, it will build but test run will silently fail.
131   // Heap is not fully constructed yet in some cases.
132   sampler_self->EnableHeapSampler();
133 }
134 
135 // Disable the heap sampler Callback function used by Perfetto.
DisableHeapSamplerCallback(void * disable_ptr,const AHeapProfileDisableCallbackInfo * info_ptr)136 void DisableHeapSamplerCallback(void* disable_ptr,
137                                 [[maybe_unused]] const AHeapProfileDisableCallbackInfo* info_ptr) {
138   HeapSampler* sampler_self = reinterpret_cast<HeapSampler*>(disable_ptr);
139   sampler_self->DisableHeapSampler();
140 }
141 
142 }  // namespace
143 #endif
144 
145 namespace gc {
146 
147 DEFINE_RUNTIME_DEBUG_FLAG(Heap, kStressCollectorTransition);
148 
149 // Minimum amount of remaining bytes before a concurrent GC is triggered.
150 static constexpr size_t kMinConcurrentRemainingBytes = 128 * KB;
151 static constexpr size_t kMaxConcurrentRemainingBytes = 512 * KB;
152 // Sticky GC throughput adjustment, divided by 4. Increasing this causes sticky GC to occur more
153 // relative to partial/full GC. This may be desirable since sticky GCs interfere less with mutator
154 // threads (lower pauses, use less memory bandwidth).
GetStickyGcThroughputAdjustment(bool use_generational_cc)155 static double GetStickyGcThroughputAdjustment(bool use_generational_cc) {
156   return use_generational_cc ? 0.5 : 1.0;
157 }
158 // Whether or not we compact the zygote in PreZygoteFork.
159 static constexpr bool kCompactZygote = kMovingCollector;
160 // How many reserve entries are at the end of the allocation stack, these are only needed if the
161 // allocation stack overflows.
162 static constexpr size_t kAllocationStackReserveSize = 1024;
163 // Default mark stack size in bytes.
164 static const size_t kDefaultMarkStackSize = 64 * KB;
165 // Define space name.
166 static const char* kDlMallocSpaceName[2] = {"main dlmalloc space", "main dlmalloc space 1"};
167 static const char* kRosAllocSpaceName[2] = {"main rosalloc space", "main rosalloc space 1"};
168 static const char* kMemMapSpaceName[2] = {"main space", "main space 1"};
169 static const char* kNonMovingSpaceName = "non moving space";
170 static const char* kZygoteSpaceName = "zygote space";
171 static constexpr bool kGCALotMode = false;
172 // GC alot mode uses a small allocation stack to stress test a lot of GC.
173 static constexpr size_t kGcAlotAllocationStackSize = 4 * KB /
174     sizeof(mirror::HeapReference<mirror::Object>);
175 // Verify objet has a small allocation stack size since searching the allocation stack is slow.
176 static constexpr size_t kVerifyObjectAllocationStackSize = 16 * KB /
177     sizeof(mirror::HeapReference<mirror::Object>);
178 static constexpr size_t kDefaultAllocationStackSize = 8 * MB /
179     sizeof(mirror::HeapReference<mirror::Object>);
180 
181 // If we violate BOTH of the following constraints, we throw OOME.
182 // They differ due to concurrent allocation.
183 // After a GC (due to allocation failure) we should retrieve at least this
184 // fraction of the current max heap size.
185 static constexpr double kMinFreedHeapAfterGcForAlloc = 0.05;
186 // After a GC (due to allocation failure), at least this fraction of the
187 // heap should be available.
188 static constexpr double kMinFreeHeapAfterGcForAlloc = 0.01;
189 
190 // For deterministic compilation, we need the heap to be at a well-known address.
191 static constexpr uint32_t kAllocSpaceBeginForDeterministicAoT = 0x40000000;
192 // Dump the rosalloc stats on SIGQUIT.
193 static constexpr bool kDumpRosAllocStatsOnSigQuit = false;
194 
195 static const char* kRegionSpaceName = "main space (region space)";
196 
197 // If true, we log all GCs in the both the foreground and background. Used for debugging.
198 static constexpr bool kLogAllGCs = false;
199 
200 // Use Max heap for 2 seconds, this is smaller than the usual 5s window since we don't want to leave
201 // allocate with relaxed ergonomics for that long.
202 static constexpr size_t kPostForkMaxHeapDurationMS = 2000;
203 
204 #if defined(__LP64__) || !defined(ADDRESS_SANITIZER)
205 // 320 MB (0x14000000) - (default non-moving space capacity).
206 // The value is picked to ensure it is aligned to the largest supported PMD
207 // size, which is 32mb with a 16k page size on AArch64.
__anon0f53ace70202() 208 uint8_t* const Heap::kPreferredAllocSpaceBegin = reinterpret_cast<uint8_t*>(([]() constexpr {
209   constexpr size_t kBegin = 320 * MB - Heap::kDefaultNonMovingSpaceCapacity;
210   constexpr int kMaxPMDSize = (kMaxPageSize / sizeof(uint64_t)) * kMaxPageSize;
211   static_assert(IsAligned<kMaxPMDSize>(kBegin),
212                 "kPreferredAllocSpaceBegin should be aligned to the maximum "
213                 "supported PMD size.");
214   return kBegin;
215 })());
216 #else
217 #ifdef __ANDROID__
218 // For 32-bit Android, use 0x20000000 because asan reserves 0x04000000 - 0x20000000.
219 uint8_t* const Heap::kPreferredAllocSpaceBegin = reinterpret_cast<uint8_t*>(0x20000000);
220 #else
221 // For 32-bit host, use 0x40000000 because asan uses most of the space below this.
222 uint8_t* const Heap::kPreferredAllocSpaceBegin = reinterpret_cast<uint8_t*>(0x40000000);
223 #endif
224 #endif
225 
226 // Log GC on regular (but fairly large) intervals during GC stress mode.
227 // It is expected that the other runtime options will be used to reduce the usual logging.
228 // This allows us to make the logging much less verbose while still reporting some
229 // progress (biased towards expensive GCs), and while still reporting pathological cases.
230 static constexpr int64_t kGcStressModeGcLogSampleFrequencyNs = MsToNs(10000);
231 
CareAboutPauseTimes()232 static inline bool CareAboutPauseTimes() {
233   return Runtime::Current()->InJankPerceptibleProcessState();
234 }
235 
VerifyBootImagesContiguity(const std::vector<gc::space::ImageSpace * > & image_spaces)236 static void VerifyBootImagesContiguity(const std::vector<gc::space::ImageSpace*>& image_spaces) {
237   uint32_t boot_image_size = 0u;
238   for (size_t i = 0u, num_spaces = image_spaces.size(); i != num_spaces; ) {
239     const ImageHeader& image_header = image_spaces[i]->GetImageHeader();
240     uint32_t reservation_size = image_header.GetImageReservationSize();
241     uint32_t image_count = image_header.GetImageSpaceCount();
242 
243     CHECK_NE(image_count, 0u);
244     CHECK_LE(image_count, num_spaces - i);
245     CHECK_NE(reservation_size, 0u);
246     for (size_t j = 1u; j != image_count; ++j) {
247       CHECK_EQ(image_spaces[i + j]->GetImageHeader().GetComponentCount(), 0u);
248       CHECK_EQ(image_spaces[i + j]->GetImageHeader().GetImageReservationSize(), 0u);
249     }
250 
251     // Check the start of the heap.
252     CHECK_EQ(image_spaces[0]->Begin() + boot_image_size, image_spaces[i]->Begin());
253     // Check contiguous layout of images and oat files.
254     const uint8_t* current_heap = image_spaces[i]->Begin();
255     const uint8_t* current_oat = image_spaces[i]->GetImageHeader().GetOatFileBegin();
256     for (size_t j = 0u; j != image_count; ++j) {
257       const ImageHeader& current_header = image_spaces[i + j]->GetImageHeader();
258       CHECK_EQ(current_heap, image_spaces[i + j]->Begin());
259       CHECK_EQ(current_oat, current_header.GetOatFileBegin());
260       current_heap += RoundUp(current_header.GetImageSize(), kElfSegmentAlignment);
261       CHECK_GT(current_header.GetOatFileEnd(), current_header.GetOatFileBegin());
262       current_oat = current_header.GetOatFileEnd();
263     }
264     // Check that oat files start at the end of images.
265     CHECK_EQ(current_heap, image_spaces[i]->GetImageHeader().GetOatFileBegin());
266     // Check that the reservation size equals the size of images and oat files.
267     CHECK_EQ(reservation_size, static_cast<size_t>(current_oat - image_spaces[i]->Begin()));
268 
269     boot_image_size += reservation_size;
270     i += image_count;
271   }
272 }
273 
Heap(size_t initial_size,size_t growth_limit,size_t min_free,size_t max_free,double target_utilization,double foreground_heap_growth_multiplier,size_t stop_for_native_allocs,size_t capacity,size_t non_moving_space_capacity,const std::vector<std::string> & boot_class_path,const std::vector<std::string> & boot_class_path_locations,ArrayRef<File> boot_class_path_files,ArrayRef<File> boot_class_path_image_files,ArrayRef<File> boot_class_path_vdex_files,ArrayRef<File> boot_class_path_oat_files,const std::vector<std::string> & image_file_names,const InstructionSet image_instruction_set,CollectorType foreground_collector_type,CollectorType background_collector_type,space::LargeObjectSpaceType large_object_space_type,size_t large_object_threshold,size_t parallel_gc_threads,size_t conc_gc_threads,bool low_memory_mode,size_t long_pause_log_threshold,size_t long_gc_log_threshold,bool ignore_target_footprint,bool always_log_explicit_gcs,bool use_tlab,bool verify_pre_gc_heap,bool verify_pre_sweeping_heap,bool verify_post_gc_heap,bool verify_pre_gc_rosalloc,bool verify_pre_sweeping_rosalloc,bool verify_post_gc_rosalloc,bool gc_stress_mode,bool measure_gc_performance,bool use_homogeneous_space_compaction_for_oom,bool use_generational_cc,uint64_t min_interval_homogeneous_space_compaction_by_oom,bool dump_region_info_before_gc,bool dump_region_info_after_gc)274 Heap::Heap(size_t initial_size,
275            size_t growth_limit,
276            size_t min_free,
277            size_t max_free,
278            double target_utilization,
279            double foreground_heap_growth_multiplier,
280            size_t stop_for_native_allocs,
281            size_t capacity,
282            size_t non_moving_space_capacity,
283            const std::vector<std::string>& boot_class_path,
284            const std::vector<std::string>& boot_class_path_locations,
285            ArrayRef<File> boot_class_path_files,
286            ArrayRef<File> boot_class_path_image_files,
287            ArrayRef<File> boot_class_path_vdex_files,
288            ArrayRef<File> boot_class_path_oat_files,
289            const std::vector<std::string>& image_file_names,
290            const InstructionSet image_instruction_set,
291            CollectorType foreground_collector_type,
292            CollectorType background_collector_type,
293            space::LargeObjectSpaceType large_object_space_type,
294            size_t large_object_threshold,
295            size_t parallel_gc_threads,
296            size_t conc_gc_threads,
297            bool low_memory_mode,
298            size_t long_pause_log_threshold,
299            size_t long_gc_log_threshold,
300            bool ignore_target_footprint,
301            bool always_log_explicit_gcs,
302            bool use_tlab,
303            bool verify_pre_gc_heap,
304            bool verify_pre_sweeping_heap,
305            bool verify_post_gc_heap,
306            bool verify_pre_gc_rosalloc,
307            bool verify_pre_sweeping_rosalloc,
308            bool verify_post_gc_rosalloc,
309            bool gc_stress_mode,
310            bool measure_gc_performance,
311            bool use_homogeneous_space_compaction_for_oom,
312            bool use_generational_cc,
313            uint64_t min_interval_homogeneous_space_compaction_by_oom,
314            bool dump_region_info_before_gc,
315            bool dump_region_info_after_gc)
316     : non_moving_space_(nullptr),
317       rosalloc_space_(nullptr),
318       dlmalloc_space_(nullptr),
319       main_space_(nullptr),
320       collector_type_(kCollectorTypeNone),
321       foreground_collector_type_(foreground_collector_type),
322       background_collector_type_(background_collector_type),
323       desired_collector_type_(foreground_collector_type_),
324       pending_task_lock_(nullptr),
325       parallel_gc_threads_(parallel_gc_threads),
326       conc_gc_threads_(conc_gc_threads),
327       low_memory_mode_(low_memory_mode),
328       long_pause_log_threshold_(long_pause_log_threshold),
329       long_gc_log_threshold_(long_gc_log_threshold),
330       process_cpu_start_time_ns_(ProcessCpuNanoTime()),
331       pre_gc_last_process_cpu_time_ns_(process_cpu_start_time_ns_),
332       post_gc_last_process_cpu_time_ns_(process_cpu_start_time_ns_),
333       pre_gc_weighted_allocated_bytes_(0.0),
334       post_gc_weighted_allocated_bytes_(0.0),
335       ignore_target_footprint_(ignore_target_footprint),
336       always_log_explicit_gcs_(always_log_explicit_gcs),
337       zygote_creation_lock_("zygote creation lock", kZygoteCreationLock),
338       zygote_space_(nullptr),
339       large_object_threshold_(large_object_threshold),
340       disable_thread_flip_count_(0),
341       thread_flip_running_(false),
342       collector_type_running_(kCollectorTypeNone),
343       last_gc_cause_(kGcCauseNone),
344       thread_running_gc_(nullptr),
345       last_gc_type_(collector::kGcTypeNone),
346       next_gc_type_(collector::kGcTypePartial),
347       capacity_(capacity),
348       growth_limit_(growth_limit),
349       initial_heap_size_(initial_size),
350       target_footprint_(initial_size),
351       // Using kPostMonitorLock as a lock at kDefaultMutexLevel is acquired after
352       // this one.
353       process_state_update_lock_("process state update lock", kPostMonitorLock),
354       min_foreground_target_footprint_(0),
355       min_foreground_concurrent_start_bytes_(0),
356       concurrent_start_bytes_(std::numeric_limits<size_t>::max()),
357       total_bytes_freed_ever_(0),
358       total_objects_freed_ever_(0),
359       num_bytes_allocated_(0),
360       native_bytes_registered_(0),
361       old_native_bytes_allocated_(0),
362       native_objects_notified_(0),
363       num_bytes_freed_revoke_(0),
364       num_bytes_alive_after_gc_(0),
365       verify_missing_card_marks_(false),
366       verify_system_weaks_(false),
367       verify_pre_gc_heap_(verify_pre_gc_heap),
368       verify_pre_sweeping_heap_(verify_pre_sweeping_heap),
369       verify_post_gc_heap_(verify_post_gc_heap),
370       verify_mod_union_table_(false),
371       verify_pre_gc_rosalloc_(verify_pre_gc_rosalloc),
372       verify_pre_sweeping_rosalloc_(verify_pre_sweeping_rosalloc),
373       verify_post_gc_rosalloc_(verify_post_gc_rosalloc),
374       gc_stress_mode_(gc_stress_mode),
375       /* For GC a lot mode, we limit the allocation stacks to be kGcAlotInterval allocations. This
376        * causes a lot of GC since we do a GC for alloc whenever the stack is full. When heap
377        * verification is enabled, we limit the size of allocation stacks to speed up their
378        * searching.
379        */
380       max_allocation_stack_size_(kGCALotMode
381           ? kGcAlotAllocationStackSize
382           : (kVerifyObjectSupport > kVerifyObjectModeFast)
383               ? kVerifyObjectAllocationStackSize
384               : kDefaultAllocationStackSize),
385       current_allocator_(kAllocatorTypeDlMalloc),
386       current_non_moving_allocator_(kAllocatorTypeNonMoving),
387       bump_pointer_space_(nullptr),
388       temp_space_(nullptr),
389       region_space_(nullptr),
390       min_free_(min_free),
391       max_free_(max_free),
392       target_utilization_(target_utilization),
393       foreground_heap_growth_multiplier_(foreground_heap_growth_multiplier),
394       stop_for_native_allocs_(stop_for_native_allocs),
395       total_wait_time_(0),
396       verify_object_mode_(kVerifyObjectModeDisabled),
397       disable_moving_gc_count_(0),
398       semi_space_collector_(nullptr),
399       active_concurrent_copying_collector_(nullptr),
400       young_concurrent_copying_collector_(nullptr),
401       concurrent_copying_collector_(nullptr),
402       is_running_on_memory_tool_(Runtime::Current()->IsRunningOnMemoryTool()),
403       use_tlab_(use_tlab),
404       main_space_backup_(nullptr),
405       min_interval_homogeneous_space_compaction_by_oom_(
406           min_interval_homogeneous_space_compaction_by_oom),
407       last_time_homogeneous_space_compaction_by_oom_(NanoTime()),
408       gcs_completed_(0u),
409       max_gc_requested_(0u),
410       pending_collector_transition_(nullptr),
411       pending_heap_trim_(nullptr),
412       use_homogeneous_space_compaction_for_oom_(use_homogeneous_space_compaction_for_oom),
413       use_generational_cc_(use_generational_cc),
414       running_collection_is_blocking_(false),
415       blocking_gc_count_(0U),
416       blocking_gc_time_(0U),
417       last_update_time_gc_count_rate_histograms_(  // Round down by the window duration.
418           (NanoTime() / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration),
419       gc_count_last_window_(0U),
420       blocking_gc_count_last_window_(0U),
421       gc_count_rate_histogram_("gc count rate histogram", 1U, kGcCountRateMaxBucketCount),
422       blocking_gc_count_rate_histogram_(
423           "blocking gc count rate histogram", 1U, kGcCountRateMaxBucketCount),
424       alloc_tracking_enabled_(false),
425       alloc_record_depth_(AllocRecordObjectMap::kDefaultAllocStackDepth),
426       backtrace_lock_(nullptr),
427       seen_backtrace_count_(0u),
428       unique_backtrace_count_(0u),
429       gc_disabled_for_shutdown_(false),
430       dump_region_info_before_gc_(dump_region_info_before_gc),
431       dump_region_info_after_gc_(dump_region_info_after_gc),
432       boot_image_spaces_(),
433       boot_images_start_address_(0u),
434       boot_images_size_(0u),
435       pre_oome_gc_count_(0u) {
436   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
437     LOG(INFO) << "Heap() entering";
438   }
439 
440   LOG(INFO) << "Using " << foreground_collector_type_ << " GC.";
441   if (gUseUserfaultfd) {
442     CHECK_EQ(foreground_collector_type_, kCollectorTypeCMC);
443     CHECK_EQ(background_collector_type_, kCollectorTypeCMCBackground);
444   } else {
445     // This ensures that userfaultfd syscall is done before any seccomp filter is installed.
446     // TODO(b/266731037): Remove this when we no longer need to collect metric on userfaultfd
447     // support.
448     auto [uffd_supported, minor_fault_supported] = collector::MarkCompact::GetUffdAndMinorFault();
449     // The check is just to ensure that compiler doesn't eliminate the function call above.
450     // Userfaultfd support is certain to be there if its minor-fault feature is supported.
451     CHECK_IMPLIES(minor_fault_supported, uffd_supported);
452   }
453 
454   if (gUseReadBarrier) {
455     CHECK_EQ(foreground_collector_type_, kCollectorTypeCC);
456     CHECK_EQ(background_collector_type_, kCollectorTypeCCBackground);
457   } else if (background_collector_type_ != gc::kCollectorTypeHomogeneousSpaceCompact) {
458     CHECK_EQ(IsMovingGc(foreground_collector_type_), IsMovingGc(background_collector_type_))
459         << "Changing from " << foreground_collector_type_ << " to "
460         << background_collector_type_ << " (or visa versa) is not supported.";
461   }
462   verification_.reset(new Verification(this));
463   CHECK_GE(large_object_threshold, kMinLargeObjectThreshold);
464   ScopedTrace trace(__FUNCTION__);
465   Runtime* const runtime = Runtime::Current();
466   // If we aren't the zygote, switch to the default non zygote allocator. This may update the
467   // entrypoints.
468   const bool is_zygote = runtime->IsZygote();
469   if (!is_zygote) {
470     // Background compaction is currently not supported for command line runs.
471     if (background_collector_type_ != foreground_collector_type_) {
472       VLOG(heap) << "Disabling background compaction for non zygote";
473       background_collector_type_ = foreground_collector_type_;
474     }
475   }
476   ChangeCollector(desired_collector_type_);
477   live_bitmap_.reset(new accounting::HeapBitmap(this));
478   mark_bitmap_.reset(new accounting::HeapBitmap(this));
479 
480   // We don't have hspace compaction enabled with CC.
481   if (foreground_collector_type_ == kCollectorTypeCC
482       || foreground_collector_type_ == kCollectorTypeCMC) {
483     use_homogeneous_space_compaction_for_oom_ = false;
484   }
485   bool support_homogeneous_space_compaction =
486       background_collector_type_ == gc::kCollectorTypeHomogeneousSpaceCompact ||
487       use_homogeneous_space_compaction_for_oom_;
488   // We may use the same space the main space for the non moving space if we don't need to compact
489   // from the main space.
490   // This is not the case if we support homogeneous compaction or have a moving background
491   // collector type.
492   bool separate_non_moving_space = is_zygote ||
493       support_homogeneous_space_compaction || IsMovingGc(foreground_collector_type_) ||
494       IsMovingGc(background_collector_type_);
495 
496   // Requested begin for the alloc space, to follow the mapped image and oat files
497   uint8_t* request_begin = nullptr;
498   // Calculate the extra space required after the boot image, see allocations below.
499   size_t heap_reservation_size = 0u;
500   if (separate_non_moving_space) {
501     heap_reservation_size = non_moving_space_capacity;
502   } else if (foreground_collector_type_ != kCollectorTypeCC && is_zygote) {
503     heap_reservation_size = capacity_;
504   }
505   heap_reservation_size = RoundUp(heap_reservation_size, gPageSize);
506   // Load image space(s).
507   std::vector<std::unique_ptr<space::ImageSpace>> boot_image_spaces;
508   MemMap heap_reservation;
509   if (space::ImageSpace::LoadBootImage(boot_class_path,
510                                        boot_class_path_locations,
511                                        boot_class_path_files,
512                                        boot_class_path_image_files,
513                                        boot_class_path_vdex_files,
514                                        boot_class_path_oat_files,
515                                        image_file_names,
516                                        image_instruction_set,
517                                        runtime->ShouldRelocate(),
518                                        /*executable=*/!runtime->IsAotCompiler(),
519                                        heap_reservation_size,
520                                        runtime->AllowInMemoryCompilation(),
521                                        runtime->GetApexVersions(),
522                                        &boot_image_spaces,
523                                        &heap_reservation)) {
524     DCHECK_EQ(heap_reservation_size, heap_reservation.IsValid() ? heap_reservation.Size() : 0u);
525     DCHECK(!boot_image_spaces.empty());
526     request_begin = boot_image_spaces.back()->GetImageHeader().GetOatFileEnd();
527     DCHECK_IMPLIES(heap_reservation.IsValid(), request_begin == heap_reservation.Begin())
528         << "request_begin=" << static_cast<const void*>(request_begin)
529         << " heap_reservation.Begin()=" << static_cast<const void*>(heap_reservation.Begin());
530     for (std::unique_ptr<space::ImageSpace>& space : boot_image_spaces) {
531       boot_image_spaces_.push_back(space.get());
532       AddSpace(space.release());
533     }
534     boot_images_start_address_ = PointerToLowMemUInt32(boot_image_spaces_.front()->Begin());
535     uint32_t boot_images_end =
536         PointerToLowMemUInt32(boot_image_spaces_.back()->GetImageHeader().GetOatFileEnd());
537     boot_images_size_ = boot_images_end - boot_images_start_address_;
538     if (kIsDebugBuild) {
539       VerifyBootImagesContiguity(boot_image_spaces_);
540     }
541   } else {
542     if (foreground_collector_type_ == kCollectorTypeCC) {
543       // Need to use a low address so that we can allocate a contiguous 2 * Xmx space
544       // when there's no image (dex2oat for target).
545       request_begin = kPreferredAllocSpaceBegin;
546     }
547     // Gross hack to make dex2oat deterministic.
548     if (foreground_collector_type_ == kCollectorTypeMS && Runtime::Current()->IsAotCompiler()) {
549       // Currently only enabled for MS collector since that is what the deterministic dex2oat uses.
550       // b/26849108
551       request_begin = reinterpret_cast<uint8_t*>(kAllocSpaceBeginForDeterministicAoT);
552     }
553   }
554 
555   /*
556   requested_alloc_space_begin ->     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
557                                      +-  nonmoving space (non_moving_space_capacity)+-
558                                      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
559                                      +-????????????????????????????????????????????+-
560                                      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
561                                      +-main alloc space / bump space 1 (capacity_) +-
562                                      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
563                                      +-????????????????????????????????????????????+-
564                                      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
565                                      +-main alloc space2 / bump space 2 (capacity_)+-
566                                      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
567   */
568 
569   MemMap main_mem_map_1;
570   MemMap main_mem_map_2;
571 
572   std::string error_str;
573   MemMap non_moving_space_mem_map;
574   if (separate_non_moving_space) {
575     ScopedTrace trace2("Create separate non moving space");
576     // If we are the zygote, the non moving space becomes the zygote space when we run
577     // PreZygoteFork the first time. In this case, call the map "zygote space" since we can't
578     // rename the mem map later.
579     const char* space_name = is_zygote ? kZygoteSpaceName : kNonMovingSpaceName;
580     // Reserve the non moving mem map before the other two since it needs to be at a specific
581     // address.
582     DCHECK_EQ(heap_reservation.IsValid(), !boot_image_spaces_.empty());
583     if (heap_reservation.IsValid()) {
584       non_moving_space_mem_map = heap_reservation.RemapAtEnd(
585           heap_reservation.Begin(), space_name, PROT_READ | PROT_WRITE, &error_str);
586     } else {
587       non_moving_space_mem_map = MapAnonymousPreferredAddress(
588           space_name, request_begin, non_moving_space_capacity, &error_str);
589     }
590     CHECK(non_moving_space_mem_map.IsValid()) << error_str;
591     DCHECK(!heap_reservation.IsValid());
592     // Try to reserve virtual memory at a lower address if we have a separate non moving space.
593     request_begin = kPreferredAllocSpaceBegin + non_moving_space_capacity;
594   }
595   // Attempt to create 2 mem maps at or after the requested begin.
596   if (foreground_collector_type_ != kCollectorTypeCC) {
597     ScopedTrace trace2("Create main mem map");
598     if (separate_non_moving_space || !is_zygote) {
599       main_mem_map_1 = MapAnonymousPreferredAddress(
600           kMemMapSpaceName[0], request_begin, capacity_, &error_str);
601     } else {
602       // If no separate non-moving space and we are the zygote, the main space must come right after
603       // the image space to avoid a gap. This is required since we want the zygote space to be
604       // adjacent to the image space.
605       DCHECK_EQ(heap_reservation.IsValid(), !boot_image_spaces_.empty());
606       main_mem_map_1 = MemMap::MapAnonymous(
607           kMemMapSpaceName[0],
608           request_begin,
609           capacity_,
610           PROT_READ | PROT_WRITE,
611           /* low_4gb= */ true,
612           /* reuse= */ false,
613           heap_reservation.IsValid() ? &heap_reservation : nullptr,
614           &error_str);
615     }
616     CHECK(main_mem_map_1.IsValid()) << error_str;
617     DCHECK(!heap_reservation.IsValid());
618   }
619   if (support_homogeneous_space_compaction ||
620       background_collector_type_ == kCollectorTypeSS ||
621       foreground_collector_type_ == kCollectorTypeSS) {
622     ScopedTrace trace2("Create main mem map 2");
623     main_mem_map_2 = MapAnonymousPreferredAddress(
624         kMemMapSpaceName[1], main_mem_map_1.End(), capacity_, &error_str);
625     CHECK(main_mem_map_2.IsValid()) << error_str;
626   }
627 
628   // Create the non moving space first so that bitmaps don't take up the address range.
629   if (separate_non_moving_space) {
630     ScopedTrace trace2("Add non moving space");
631     // Non moving space is always dlmalloc since we currently don't have support for multiple
632     // active rosalloc spaces.
633     const size_t size = non_moving_space_mem_map.Size();
634     const void* non_moving_space_mem_map_begin = non_moving_space_mem_map.Begin();
635     non_moving_space_ = space::DlMallocSpace::CreateFromMemMap(std::move(non_moving_space_mem_map),
636                                                                "zygote / non moving space",
637                                                                GetDefaultStartingSize(),
638                                                                initial_size,
639                                                                size,
640                                                                size,
641                                                                /* can_move_objects= */ false);
642     CHECK(non_moving_space_ != nullptr) << "Failed creating non moving space "
643         << non_moving_space_mem_map_begin;
644     non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
645     AddSpace(non_moving_space_);
646   }
647   // Create other spaces based on whether or not we have a moving GC.
648   if (foreground_collector_type_ == kCollectorTypeCC) {
649     CHECK(separate_non_moving_space);
650     // Reserve twice the capacity, to allow evacuating every region for explicit GCs.
651     MemMap region_space_mem_map =
652         space::RegionSpace::CreateMemMap(kRegionSpaceName, capacity_ * 2, request_begin);
653     CHECK(region_space_mem_map.IsValid()) << "No region space mem map";
654     region_space_ = space::RegionSpace::Create(
655         kRegionSpaceName, std::move(region_space_mem_map), use_generational_cc_);
656     AddSpace(region_space_);
657   } else if (IsMovingGc(foreground_collector_type_)) {
658     // Create bump pointer spaces.
659     // We only to create the bump pointer if the foreground collector is a compacting GC.
660     // TODO: Place bump-pointer spaces somewhere to minimize size of card table.
661     bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 1",
662                                                                     std::move(main_mem_map_1));
663     CHECK(bump_pointer_space_ != nullptr) << "Failed to create bump pointer space";
664     AddSpace(bump_pointer_space_);
665     // For Concurrent Mark-compact GC we don't need the temp space to be in
666     // lower 4GB. So its temp space will be created by the GC itself.
667     if (foreground_collector_type_ != kCollectorTypeCMC) {
668       temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
669                                                               std::move(main_mem_map_2));
670       CHECK(temp_space_ != nullptr) << "Failed to create bump pointer space";
671       AddSpace(temp_space_);
672     }
673     CHECK(separate_non_moving_space);
674   } else {
675     CreateMainMallocSpace(std::move(main_mem_map_1), initial_size, growth_limit_, capacity_);
676     CHECK(main_space_ != nullptr);
677     AddSpace(main_space_);
678     if (!separate_non_moving_space) {
679       non_moving_space_ = main_space_;
680       CHECK(!non_moving_space_->CanMoveObjects());
681     }
682     if (main_mem_map_2.IsValid()) {
683       const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
684       main_space_backup_.reset(CreateMallocSpaceFromMemMap(std::move(main_mem_map_2),
685                                                            initial_size,
686                                                            growth_limit_,
687                                                            capacity_,
688                                                            name,
689                                                            /* can_move_objects= */ true));
690       CHECK(main_space_backup_.get() != nullptr);
691       // Add the space so its accounted for in the heap_begin and heap_end.
692       AddSpace(main_space_backup_.get());
693     }
694   }
695   CHECK(non_moving_space_ != nullptr);
696   CHECK(!non_moving_space_->CanMoveObjects());
697   // Allocate the large object space.
698   if (large_object_space_type == space::LargeObjectSpaceType::kFreeList) {
699     large_object_space_ = space::FreeListSpace::Create("free list large object space", capacity_);
700     CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
701   } else if (large_object_space_type == space::LargeObjectSpaceType::kMap) {
702     large_object_space_ = space::LargeObjectMapSpace::Create("mem map large object space");
703     CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
704   } else {
705     // Disable the large object space by making the cutoff excessively large.
706     large_object_threshold_ = std::numeric_limits<size_t>::max();
707     large_object_space_ = nullptr;
708   }
709   if (large_object_space_ != nullptr) {
710     AddSpace(large_object_space_);
711   }
712   // Compute heap capacity. Continuous spaces are sorted in order of Begin().
713   CHECK(!continuous_spaces_.empty());
714   // Relies on the spaces being sorted.
715   uint8_t* heap_begin = continuous_spaces_.front()->Begin();
716   uint8_t* heap_end = continuous_spaces_.back()->Limit();
717   size_t heap_capacity = heap_end - heap_begin;
718   // Remove the main backup space since it slows down the GC to have unused extra spaces.
719   // TODO: Avoid needing to do this.
720   if (main_space_backup_.get() != nullptr) {
721     RemoveSpace(main_space_backup_.get());
722   }
723   // Allocate the card table.
724   // We currently don't support dynamically resizing the card table.
725   // Since we don't know where in the low_4gb the app image will be located, make the card table
726   // cover the whole low_4gb. TODO: Extend the card table in AddSpace.
727   UNUSED(heap_capacity);
728   // Start at 4 KB, we can be sure there are no spaces mapped this low since the address range is
729   // reserved by the kernel.
730   static constexpr size_t kMinHeapAddress = 4 * KB;
731   card_table_.reset(accounting::CardTable::Create(reinterpret_cast<uint8_t*>(kMinHeapAddress),
732                                                   4 * GB - kMinHeapAddress));
733   CHECK(card_table_.get() != nullptr) << "Failed to create card table";
734   if (foreground_collector_type_ == kCollectorTypeCC && kUseTableLookupReadBarrier) {
735     rb_table_.reset(new accounting::ReadBarrierTable());
736     DCHECK(rb_table_->IsAllCleared());
737   }
738   if (HasBootImageSpace()) {
739     // Don't add the image mod union table if we are running without an image, this can crash if
740     // we use the CardCache implementation.
741     for (space::ImageSpace* image_space : GetBootImageSpaces()) {
742       accounting::ModUnionTable* mod_union_table = new accounting::ModUnionTableToZygoteAllocspace(
743           "Image mod-union table", this, image_space);
744       CHECK(mod_union_table != nullptr) << "Failed to create image mod-union table";
745       AddModUnionTable(mod_union_table);
746     }
747   }
748   if (collector::SemiSpace::kUseRememberedSet && non_moving_space_ != main_space_) {
749     accounting::RememberedSet* non_moving_space_rem_set =
750         new accounting::RememberedSet("Non-moving space remembered set", this, non_moving_space_);
751     CHECK(non_moving_space_rem_set != nullptr) << "Failed to create non-moving space remembered set";
752     AddRememberedSet(non_moving_space_rem_set);
753   }
754   // TODO: Count objects in the image space here?
755   num_bytes_allocated_.store(0, std::memory_order_relaxed);
756   mark_stack_.reset(accounting::ObjectStack::Create("mark stack", kDefaultMarkStackSize,
757                                                     kDefaultMarkStackSize));
758   const size_t alloc_stack_capacity = max_allocation_stack_size_ + kAllocationStackReserveSize;
759   allocation_stack_.reset(accounting::ObjectStack::Create(
760       "allocation stack", max_allocation_stack_size_, alloc_stack_capacity));
761   live_stack_.reset(accounting::ObjectStack::Create(
762       "live stack", max_allocation_stack_size_, alloc_stack_capacity));
763   // It's still too early to take a lock because there are no threads yet, but we can create locks
764   // now. We don't create it earlier to make it clear that you can't use locks during heap
765   // initialization.
766   gc_complete_lock_ = new Mutex("GC complete lock");
767   gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable",
768                                                 *gc_complete_lock_));
769 
770   thread_flip_lock_ = new Mutex("GC thread flip lock");
771   thread_flip_cond_.reset(new ConditionVariable("GC thread flip condition variable",
772                                                 *thread_flip_lock_));
773   task_processor_.reset(new TaskProcessor());
774   reference_processor_.reset(new ReferenceProcessor());
775   pending_task_lock_ = new Mutex("Pending task lock");
776   if (ignore_target_footprint_) {
777     SetIdealFootprint(std::numeric_limits<size_t>::max());
778     concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
779   }
780   CHECK_NE(target_footprint_.load(std::memory_order_relaxed), 0U);
781   // Create our garbage collectors.
782   for (size_t i = 0; i < 2; ++i) {
783     const bool concurrent = i != 0;
784     if ((MayUseCollector(kCollectorTypeCMS) && concurrent) ||
785         (MayUseCollector(kCollectorTypeMS) && !concurrent)) {
786       garbage_collectors_.push_back(new collector::MarkSweep(this, concurrent));
787       garbage_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent));
788       garbage_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent));
789     }
790   }
791   if (kMovingCollector) {
792     if (MayUseCollector(kCollectorTypeSS) ||
793         MayUseCollector(kCollectorTypeHomogeneousSpaceCompact) ||
794         use_homogeneous_space_compaction_for_oom_) {
795       semi_space_collector_ = new collector::SemiSpace(this);
796       garbage_collectors_.push_back(semi_space_collector_);
797     }
798     if (MayUseCollector(kCollectorTypeCMC)) {
799       mark_compact_ = new collector::MarkCompact(this);
800       garbage_collectors_.push_back(mark_compact_);
801     }
802     if (MayUseCollector(kCollectorTypeCC)) {
803       concurrent_copying_collector_ = new collector::ConcurrentCopying(this,
804                                                                        /*young_gen=*/false,
805                                                                        use_generational_cc_,
806                                                                        "",
807                                                                        measure_gc_performance);
808       if (use_generational_cc_) {
809         young_concurrent_copying_collector_ = new collector::ConcurrentCopying(
810             this,
811             /*young_gen=*/true,
812             use_generational_cc_,
813             "young",
814             measure_gc_performance);
815       }
816       active_concurrent_copying_collector_.store(concurrent_copying_collector_,
817                                                  std::memory_order_relaxed);
818       DCHECK(region_space_ != nullptr);
819       concurrent_copying_collector_->SetRegionSpace(region_space_);
820       if (use_generational_cc_) {
821         young_concurrent_copying_collector_->SetRegionSpace(region_space_);
822         // At this point, non-moving space should be created.
823         DCHECK(non_moving_space_ != nullptr);
824         concurrent_copying_collector_->CreateInterRegionRefBitmaps();
825       }
826       garbage_collectors_.push_back(concurrent_copying_collector_);
827       if (use_generational_cc_) {
828         garbage_collectors_.push_back(young_concurrent_copying_collector_);
829       }
830     }
831   }
832   if (!GetBootImageSpaces().empty() && non_moving_space_ != nullptr &&
833       (is_zygote || separate_non_moving_space)) {
834     // Check that there's no gap between the image space and the non moving space so that the
835     // immune region won't break (eg. due to a large object allocated in the gap). This is only
836     // required when we're the zygote.
837     // Space with smallest Begin().
838     space::ImageSpace* first_space = nullptr;
839     for (space::ImageSpace* space : boot_image_spaces_) {
840       if (first_space == nullptr || space->Begin() < first_space->Begin()) {
841         first_space = space;
842       }
843     }
844     bool no_gap = MemMap::CheckNoGaps(*first_space->GetMemMap(), *non_moving_space_->GetMemMap());
845     if (!no_gap) {
846       PrintFileToLog("/proc/self/maps", LogSeverity::ERROR);
847       MemMap::DumpMaps(LOG_STREAM(ERROR), /* terse= */ true);
848       LOG(FATAL) << "There's a gap between the image space and the non-moving space";
849     }
850   }
851   // Perfetto Java Heap Profiler Support.
852   if (runtime->IsPerfettoJavaHeapStackProfEnabled()) {
853     // Perfetto Plugin is loaded and enabled, initialize the Java Heap Profiler.
854     InitPerfettoJavaHeapProf();
855   } else {
856     // Disable the Java Heap Profiler.
857     GetHeapSampler().DisableHeapSampler();
858   }
859 
860   instrumentation::Instrumentation* const instrumentation = runtime->GetInstrumentation();
861   if (gc_stress_mode_) {
862     backtrace_lock_ = new Mutex("GC complete lock");
863   }
864   if (is_running_on_memory_tool_ || gc_stress_mode_) {
865     instrumentation->InstrumentQuickAllocEntryPoints();
866   }
867   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
868     LOG(INFO) << "Heap() exiting";
869   }
870 }
871 
MapAnonymousPreferredAddress(const char * name,uint8_t * request_begin,size_t capacity,std::string * out_error_str)872 MemMap Heap::MapAnonymousPreferredAddress(const char* name,
873                                           uint8_t* request_begin,
874                                           size_t capacity,
875                                           std::string* out_error_str) {
876   while (true) {
877     MemMap map = MemMap::MapAnonymous(name,
878                                       request_begin,
879                                       capacity,
880                                       PROT_READ | PROT_WRITE,
881                                       /*low_4gb=*/ true,
882                                       /*reuse=*/ false,
883                                       /*reservation=*/ nullptr,
884                                       out_error_str);
885     if (map.IsValid() || request_begin == nullptr) {
886       return map;
887     }
888     // Retry a  second time with no specified request begin.
889     request_begin = nullptr;
890   }
891 }
892 
MayUseCollector(CollectorType type) const893 bool Heap::MayUseCollector(CollectorType type) const {
894   return foreground_collector_type_ == type || background_collector_type_ == type;
895 }
896 
CreateMallocSpaceFromMemMap(MemMap && mem_map,size_t initial_size,size_t growth_limit,size_t capacity,const char * name,bool can_move_objects)897 space::MallocSpace* Heap::CreateMallocSpaceFromMemMap(MemMap&& mem_map,
898                                                       size_t initial_size,
899                                                       size_t growth_limit,
900                                                       size_t capacity,
901                                                       const char* name,
902                                                       bool can_move_objects) {
903   space::MallocSpace* malloc_space = nullptr;
904   if (kUseRosAlloc) {
905     // Create rosalloc space.
906     malloc_space = space::RosAllocSpace::CreateFromMemMap(std::move(mem_map),
907                                                           name,
908                                                           GetDefaultStartingSize(),
909                                                           initial_size,
910                                                           growth_limit,
911                                                           capacity,
912                                                           low_memory_mode_,
913                                                           can_move_objects);
914   } else {
915     malloc_space = space::DlMallocSpace::CreateFromMemMap(std::move(mem_map),
916                                                           name,
917                                                           GetDefaultStartingSize(),
918                                                           initial_size,
919                                                           growth_limit,
920                                                           capacity,
921                                                           can_move_objects);
922   }
923   if (collector::SemiSpace::kUseRememberedSet) {
924     accounting::RememberedSet* rem_set  =
925         new accounting::RememberedSet(std::string(name) + " remembered set", this, malloc_space);
926     CHECK(rem_set != nullptr) << "Failed to create main space remembered set";
927     AddRememberedSet(rem_set);
928   }
929   CHECK(malloc_space != nullptr) << "Failed to create " << name;
930   malloc_space->SetFootprintLimit(malloc_space->Capacity());
931   return malloc_space;
932 }
933 
CreateMainMallocSpace(MemMap && mem_map,size_t initial_size,size_t growth_limit,size_t capacity)934 void Heap::CreateMainMallocSpace(MemMap&& mem_map,
935                                  size_t initial_size,
936                                  size_t growth_limit,
937                                  size_t capacity) {
938   // Is background compaction is enabled?
939   bool can_move_objects = IsMovingGc(background_collector_type_) !=
940       IsMovingGc(foreground_collector_type_) || use_homogeneous_space_compaction_for_oom_;
941   // If we are the zygote and don't yet have a zygote space, it means that the zygote fork will
942   // happen in the future. If this happens and we have kCompactZygote enabled we wish to compact
943   // from the main space to the zygote space. If background compaction is enabled, always pass in
944   // that we can move objets.
945   if (kCompactZygote && Runtime::Current()->IsZygote() && !can_move_objects) {
946     // After the zygote we want this to be false if we don't have background compaction enabled so
947     // that getting primitive array elements is faster.
948     can_move_objects = !HasZygoteSpace();
949   }
950   if (collector::SemiSpace::kUseRememberedSet && main_space_ != nullptr) {
951     RemoveRememberedSet(main_space_);
952   }
953   const char* name = kUseRosAlloc ? kRosAllocSpaceName[0] : kDlMallocSpaceName[0];
954   main_space_ = CreateMallocSpaceFromMemMap(std::move(mem_map),
955                                             initial_size,
956                                             growth_limit,
957                                             capacity, name,
958                                             can_move_objects);
959   SetSpaceAsDefault(main_space_);
960   VLOG(heap) << "Created main space " << main_space_;
961 }
962 
ChangeAllocator(AllocatorType allocator)963 void Heap::ChangeAllocator(AllocatorType allocator) {
964   if (current_allocator_ != allocator) {
965     // These two allocators are only used internally and don't have any entrypoints.
966     CHECK_NE(allocator, kAllocatorTypeLOS);
967     CHECK_NE(allocator, kAllocatorTypeNonMoving);
968     current_allocator_ = allocator;
969     MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
970     SetQuickAllocEntryPointsAllocator(current_allocator_);
971     Runtime::Current()->GetInstrumentation()->ResetQuickAllocEntryPoints();
972   }
973 }
974 
IsCompilingBoot() const975 bool Heap::IsCompilingBoot() const {
976   if (!Runtime::Current()->IsAotCompiler()) {
977     return false;
978   }
979   ScopedObjectAccess soa(Thread::Current());
980   for (const auto& space : continuous_spaces_) {
981     if (space->IsImageSpace() || space->IsZygoteSpace()) {
982       return false;
983     }
984   }
985   return true;
986 }
987 
IncrementDisableMovingGC(Thread * self)988 void Heap::IncrementDisableMovingGC(Thread* self) {
989   // Need to do this holding the lock to prevent races where the GC is about to run / running when
990   // we attempt to disable it.
991   ScopedThreadStateChange tsc(self, ThreadState::kWaitingForGcToComplete);
992   MutexLock mu(self, *gc_complete_lock_);
993   ++disable_moving_gc_count_;
994   if (IsMovingGc(collector_type_running_)) {
995     WaitForGcToCompleteLocked(kGcCauseDisableMovingGc, self);
996   }
997 }
998 
DecrementDisableMovingGC(Thread * self)999 void Heap::DecrementDisableMovingGC(Thread* self) {
1000   MutexLock mu(self, *gc_complete_lock_);
1001   CHECK_GT(disable_moving_gc_count_, 0U);
1002   --disable_moving_gc_count_;
1003 }
1004 
IncrementDisableThreadFlip(Thread * self)1005 void Heap::IncrementDisableThreadFlip(Thread* self) {
1006   // Supposed to be called by mutators. If thread_flip_running_ is true, block. Otherwise, go ahead.
1007   bool is_nested = self->GetDisableThreadFlipCount() > 0;
1008   self->IncrementDisableThreadFlipCount();
1009   if (is_nested) {
1010     // If this is a nested JNI critical section enter, we don't need to wait or increment the global
1011     // counter. The global counter is incremented only once for a thread for the outermost enter.
1012     return;
1013   }
1014   ScopedThreadStateChange tsc(self, ThreadState::kWaitingForGcThreadFlip);
1015   MutexLock mu(self, *thread_flip_lock_);
1016   thread_flip_cond_->CheckSafeToWait(self);
1017   bool has_waited = false;
1018   uint64_t wait_start = 0;
1019   if (thread_flip_running_) {
1020     wait_start = NanoTime();
1021     ScopedTrace trace("IncrementDisableThreadFlip");
1022     while (thread_flip_running_) {
1023       has_waited = true;
1024       thread_flip_cond_->Wait(self);
1025     }
1026   }
1027   ++disable_thread_flip_count_;
1028   if (has_waited) {
1029     uint64_t wait_time = NanoTime() - wait_start;
1030     total_wait_time_ += wait_time;
1031     if (wait_time > long_pause_log_threshold_) {
1032       LOG(INFO) << __FUNCTION__ << " blocked for " << PrettyDuration(wait_time);
1033     }
1034   }
1035 }
1036 
EnsureObjectUserfaulted(ObjPtr<mirror::Object> obj)1037 void Heap::EnsureObjectUserfaulted(ObjPtr<mirror::Object> obj) {
1038   if (gUseUserfaultfd) {
1039     // Use volatile to ensure that compiler loads from memory to trigger userfaults, if required.
1040     const uint8_t* start = reinterpret_cast<uint8_t*>(obj.Ptr());
1041     const uint8_t* end = AlignUp(start + obj->SizeOf(), gPageSize);
1042     // The first page is already touched by SizeOf().
1043     start += gPageSize;
1044     while (start < end) {
1045       ForceRead(start);
1046       start += gPageSize;
1047     }
1048   }
1049 }
1050 
DecrementDisableThreadFlip(Thread * self)1051 void Heap::DecrementDisableThreadFlip(Thread* self) {
1052   // Supposed to be called by mutators. Decrement disable_thread_flip_count_ and potentially wake up
1053   // the GC waiting before doing a thread flip.
1054   self->DecrementDisableThreadFlipCount();
1055   bool is_outermost = self->GetDisableThreadFlipCount() == 0;
1056   if (!is_outermost) {
1057     // If this is not an outermost JNI critical exit, we don't need to decrement the global counter.
1058     // The global counter is decremented only once for a thread for the outermost exit.
1059     return;
1060   }
1061   MutexLock mu(self, *thread_flip_lock_);
1062   CHECK_GT(disable_thread_flip_count_, 0U);
1063   --disable_thread_flip_count_;
1064   if (disable_thread_flip_count_ == 0) {
1065     // Potentially notify the GC thread blocking to begin a thread flip.
1066     thread_flip_cond_->Broadcast(self);
1067   }
1068 }
1069 
ThreadFlipBegin(Thread * self)1070 void Heap::ThreadFlipBegin(Thread* self) {
1071   // Supposed to be called by GC. Set thread_flip_running_ to be true. If disable_thread_flip_count_
1072   // > 0, block. Otherwise, go ahead.
1073   ScopedThreadStateChange tsc(self, ThreadState::kWaitingForGcThreadFlip);
1074   MutexLock mu(self, *thread_flip_lock_);
1075   thread_flip_cond_->CheckSafeToWait(self);
1076   bool has_waited = false;
1077   uint64_t wait_start = NanoTime();
1078   CHECK(!thread_flip_running_);
1079   // Set this to true before waiting so that frequent JNI critical enter/exits won't starve
1080   // GC. This like a writer preference of a reader-writer lock.
1081   thread_flip_running_ = true;
1082   while (disable_thread_flip_count_ > 0) {
1083     has_waited = true;
1084     thread_flip_cond_->Wait(self);
1085   }
1086   if (has_waited) {
1087     uint64_t wait_time = NanoTime() - wait_start;
1088     total_wait_time_ += wait_time;
1089     if (wait_time > long_pause_log_threshold_) {
1090       LOG(INFO) << __FUNCTION__ << " blocked for " << PrettyDuration(wait_time);
1091     }
1092   }
1093 }
1094 
ThreadFlipEnd(Thread * self)1095 void Heap::ThreadFlipEnd(Thread* self) {
1096   // Supposed to be called by GC. Set thread_flip_running_ to false and potentially wake up mutators
1097   // waiting before doing a JNI critical.
1098   MutexLock mu(self, *thread_flip_lock_);
1099   CHECK(thread_flip_running_);
1100   thread_flip_running_ = false;
1101   // Potentially notify mutator threads blocking to enter a JNI critical section.
1102   thread_flip_cond_->Broadcast(self);
1103 }
1104 
GrowHeapOnJankPerceptibleSwitch()1105 void Heap::GrowHeapOnJankPerceptibleSwitch() {
1106   MutexLock mu(Thread::Current(), process_state_update_lock_);
1107   size_t orig_target_footprint = target_footprint_.load(std::memory_order_relaxed);
1108   if (orig_target_footprint < min_foreground_target_footprint_) {
1109     target_footprint_.compare_exchange_strong(orig_target_footprint,
1110                                               min_foreground_target_footprint_,
1111                                               std::memory_order_relaxed);
1112   }
1113   if (IsGcConcurrent() && concurrent_start_bytes_ < min_foreground_concurrent_start_bytes_) {
1114     concurrent_start_bytes_ = min_foreground_concurrent_start_bytes_;
1115   }
1116 }
1117 
UpdateProcessState(ProcessState old_process_state,ProcessState new_process_state)1118 void Heap::UpdateProcessState(ProcessState old_process_state, ProcessState new_process_state) {
1119   if (old_process_state != new_process_state) {
1120     const bool jank_perceptible = new_process_state == kProcessStateJankPerceptible;
1121     if (jank_perceptible) {
1122       // Transition back to foreground right away to prevent jank.
1123       RequestCollectorTransition(foreground_collector_type_, 0);
1124       GrowHeapOnJankPerceptibleSwitch();
1125     } else {
1126       // If background_collector_type_ is kCollectorTypeHomogeneousSpaceCompact then we have
1127       // special handling which does a homogenous space compaction once but then doesn't transition
1128       // the collector. Similarly, we invoke a full compaction for kCollectorTypeCC but don't
1129       // transition the collector.
1130       RequestCollectorTransition(background_collector_type_, 0);
1131     }
1132   }
1133 }
1134 
CreateThreadPool(size_t num_threads)1135 void Heap::CreateThreadPool(size_t num_threads) {
1136   if (num_threads == 0) {
1137     num_threads = std::max(parallel_gc_threads_, conc_gc_threads_);
1138   }
1139   if (num_threads != 0) {
1140     thread_pool_.reset(ThreadPool::Create("Heap thread pool", num_threads));
1141   }
1142 }
1143 
WaitForWorkersToBeCreated()1144 void Heap::WaitForWorkersToBeCreated() {
1145   DCHECK(!Runtime::Current()->IsShuttingDown(Thread::Current()))
1146       << "Cannot create new threads during runtime shutdown";
1147   if (thread_pool_ != nullptr) {
1148     thread_pool_->WaitForWorkersToBeCreated();
1149   }
1150 }
1151 
MarkAllocStackAsLive(accounting::ObjectStack * stack)1152 void Heap::MarkAllocStackAsLive(accounting::ObjectStack* stack) {
1153   space::ContinuousSpace* space1 = main_space_ != nullptr ? main_space_ : non_moving_space_;
1154   space::ContinuousSpace* space2 = non_moving_space_;
1155   // TODO: Generalize this to n bitmaps?
1156   CHECK(space1 != nullptr);
1157   CHECK(space2 != nullptr);
1158   MarkAllocStack(space1->GetLiveBitmap(), space2->GetLiveBitmap(),
1159                  (large_object_space_ != nullptr ? large_object_space_->GetLiveBitmap() : nullptr),
1160                  stack);
1161 }
1162 
DeleteThreadPool()1163 void Heap::DeleteThreadPool() {
1164   thread_pool_.reset(nullptr);
1165 }
1166 
AddSpace(space::Space * space)1167 void Heap::AddSpace(space::Space* space) {
1168   CHECK(space != nullptr);
1169   WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1170   if (space->IsContinuousSpace()) {
1171     DCHECK(!space->IsDiscontinuousSpace());
1172     space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
1173     // Continuous spaces don't necessarily have bitmaps.
1174     accounting::ContinuousSpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
1175     accounting::ContinuousSpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
1176     // The region space bitmap is not added since VisitObjects visits the region space objects with
1177     // special handling.
1178     if (live_bitmap != nullptr && !space->IsRegionSpace()) {
1179       CHECK(mark_bitmap != nullptr);
1180       live_bitmap_->AddContinuousSpaceBitmap(live_bitmap);
1181       mark_bitmap_->AddContinuousSpaceBitmap(mark_bitmap);
1182     }
1183     continuous_spaces_.push_back(continuous_space);
1184     // Ensure that spaces remain sorted in increasing order of start address.
1185     std::sort(continuous_spaces_.begin(), continuous_spaces_.end(),
1186               [](const space::ContinuousSpace* a, const space::ContinuousSpace* b) {
1187       return a->Begin() < b->Begin();
1188     });
1189   } else {
1190     CHECK(space->IsDiscontinuousSpace());
1191     space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
1192     live_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetLiveBitmap());
1193     mark_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetMarkBitmap());
1194     discontinuous_spaces_.push_back(discontinuous_space);
1195   }
1196   if (space->IsAllocSpace()) {
1197     alloc_spaces_.push_back(space->AsAllocSpace());
1198   }
1199 }
1200 
SetSpaceAsDefault(space::ContinuousSpace * continuous_space)1201 void Heap::SetSpaceAsDefault(space::ContinuousSpace* continuous_space) {
1202   WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1203   if (continuous_space->IsDlMallocSpace()) {
1204     dlmalloc_space_ = continuous_space->AsDlMallocSpace();
1205   } else if (continuous_space->IsRosAllocSpace()) {
1206     rosalloc_space_ = continuous_space->AsRosAllocSpace();
1207   }
1208 }
1209 
RemoveSpace(space::Space * space)1210 void Heap::RemoveSpace(space::Space* space) {
1211   DCHECK(space != nullptr);
1212   WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1213   if (space->IsContinuousSpace()) {
1214     DCHECK(!space->IsDiscontinuousSpace());
1215     space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
1216     // Continuous spaces don't necessarily have bitmaps.
1217     accounting::ContinuousSpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
1218     accounting::ContinuousSpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
1219     if (live_bitmap != nullptr && !space->IsRegionSpace()) {
1220       DCHECK(mark_bitmap != nullptr);
1221       live_bitmap_->RemoveContinuousSpaceBitmap(live_bitmap);
1222       mark_bitmap_->RemoveContinuousSpaceBitmap(mark_bitmap);
1223     }
1224     auto it = std::find(continuous_spaces_.begin(), continuous_spaces_.end(), continuous_space);
1225     DCHECK(it != continuous_spaces_.end());
1226     continuous_spaces_.erase(it);
1227   } else {
1228     DCHECK(space->IsDiscontinuousSpace());
1229     space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
1230     live_bitmap_->RemoveLargeObjectBitmap(discontinuous_space->GetLiveBitmap());
1231     mark_bitmap_->RemoveLargeObjectBitmap(discontinuous_space->GetMarkBitmap());
1232     auto it = std::find(discontinuous_spaces_.begin(), discontinuous_spaces_.end(),
1233                         discontinuous_space);
1234     DCHECK(it != discontinuous_spaces_.end());
1235     discontinuous_spaces_.erase(it);
1236   }
1237   if (space->IsAllocSpace()) {
1238     auto it = std::find(alloc_spaces_.begin(), alloc_spaces_.end(), space->AsAllocSpace());
1239     DCHECK(it != alloc_spaces_.end());
1240     alloc_spaces_.erase(it);
1241   }
1242 }
1243 
CalculateGcWeightedAllocatedBytes(uint64_t gc_last_process_cpu_time_ns,uint64_t current_process_cpu_time) const1244 double Heap::CalculateGcWeightedAllocatedBytes(uint64_t gc_last_process_cpu_time_ns,
1245                                                uint64_t current_process_cpu_time) const {
1246   uint64_t bytes_allocated = GetBytesAllocated();
1247   double weight = current_process_cpu_time - gc_last_process_cpu_time_ns;
1248   return weight * bytes_allocated;
1249 }
1250 
CalculatePreGcWeightedAllocatedBytes()1251 void Heap::CalculatePreGcWeightedAllocatedBytes() {
1252   uint64_t current_process_cpu_time = ProcessCpuNanoTime();
1253   pre_gc_weighted_allocated_bytes_ +=
1254     CalculateGcWeightedAllocatedBytes(pre_gc_last_process_cpu_time_ns_, current_process_cpu_time);
1255   pre_gc_last_process_cpu_time_ns_ = current_process_cpu_time;
1256 }
1257 
CalculatePostGcWeightedAllocatedBytes()1258 void Heap::CalculatePostGcWeightedAllocatedBytes() {
1259   uint64_t current_process_cpu_time = ProcessCpuNanoTime();
1260   post_gc_weighted_allocated_bytes_ +=
1261     CalculateGcWeightedAllocatedBytes(post_gc_last_process_cpu_time_ns_, current_process_cpu_time);
1262   post_gc_last_process_cpu_time_ns_ = current_process_cpu_time;
1263 }
1264 
GetTotalGcCpuTime()1265 uint64_t Heap::GetTotalGcCpuTime() {
1266   uint64_t sum = 0;
1267   for (auto* collector : garbage_collectors_) {
1268     sum += collector->GetTotalCpuTime();
1269   }
1270   return sum;
1271 }
1272 
DumpGcPerformanceInfo(std::ostream & os)1273 void Heap::DumpGcPerformanceInfo(std::ostream& os) {
1274   // Dump cumulative timings.
1275   os << "Dumping cumulative Gc timings\n";
1276   uint64_t total_duration = 0;
1277   // Dump cumulative loggers for each GC type.
1278   uint64_t total_paused_time = 0;
1279   for (auto* collector : garbage_collectors_) {
1280     total_duration += collector->GetCumulativeTimings().GetTotalNs();
1281     total_paused_time += collector->GetTotalPausedTimeNs();
1282     collector->DumpPerformanceInfo(os);
1283   }
1284   if (total_duration != 0) {
1285     const double total_seconds = total_duration / 1.0e9;
1286     const double total_cpu_seconds = GetTotalGcCpuTime() / 1.0e9;
1287     os << "Total time spent in GC: " << PrettyDuration(total_duration) << "\n";
1288     os << "Mean GC size throughput: "
1289        << PrettySize(GetBytesFreedEver() / total_seconds) << "/s"
1290        << " per cpu-time: "
1291        << PrettySize(GetBytesFreedEver() / total_cpu_seconds) << "/s\n";
1292   }
1293   os << "Total bytes allocated " << PrettySize(GetBytesAllocatedEver()) << "\n";
1294   os << "Total bytes freed " << PrettySize(GetBytesFreedEver()) << "\n";
1295   os << "Free memory " << PrettySize(GetFreeMemory()) << "\n";
1296   os << "Free memory until GC " << PrettySize(GetFreeMemoryUntilGC()) << "\n";
1297   os << "Free memory until OOME " << PrettySize(GetFreeMemoryUntilOOME()) << "\n";
1298   os << "Total memory " << PrettySize(GetTotalMemory()) << "\n";
1299   os << "Max memory " << PrettySize(GetMaxMemory()) << "\n";
1300   if (HasZygoteSpace()) {
1301     os << "Zygote space size " << PrettySize(zygote_space_->Size()) << "\n";
1302   }
1303   os << "Total mutator paused time: " << PrettyDuration(total_paused_time) << "\n";
1304   os << "Total time waiting for GC to complete: " << PrettyDuration(total_wait_time_) << "\n";
1305   os << "Total GC count: " << GetGcCount() << "\n";
1306   os << "Total GC time: " << PrettyDuration(GetGcTime()) << "\n";
1307   os << "Total blocking GC count: " << GetBlockingGcCount() << "\n";
1308   os << "Total blocking GC time: " << PrettyDuration(GetBlockingGcTime()) << "\n";
1309   os << "Total pre-OOME GC count: " << GetPreOomeGcCount() << "\n";
1310   {
1311     MutexLock mu(Thread::Current(), *gc_complete_lock_);
1312     if (gc_count_rate_histogram_.SampleSize() > 0U) {
1313       os << "Histogram of GC count per " << NsToMs(kGcCountRateHistogramWindowDuration) << " ms: ";
1314       gc_count_rate_histogram_.DumpBins(os);
1315       os << "\n";
1316     }
1317     if (blocking_gc_count_rate_histogram_.SampleSize() > 0U) {
1318       os << "Histogram of blocking GC count per "
1319          << NsToMs(kGcCountRateHistogramWindowDuration) << " ms: ";
1320       blocking_gc_count_rate_histogram_.DumpBins(os);
1321       os << "\n";
1322     }
1323   }
1324 
1325   if (kDumpRosAllocStatsOnSigQuit && rosalloc_space_ != nullptr) {
1326     rosalloc_space_->DumpStats(os);
1327   }
1328 
1329   os << "Native bytes total: " << GetNativeBytes()
1330      << " registered: " << native_bytes_registered_.load(std::memory_order_relaxed) << "\n";
1331 
1332   os << "Total native bytes at last GC: "
1333      << old_native_bytes_allocated_.load(std::memory_order_relaxed) << "\n";
1334 
1335   BaseMutex::DumpAll(os);
1336 }
1337 
ResetGcPerformanceInfo()1338 void Heap::ResetGcPerformanceInfo() {
1339   for (auto* collector : garbage_collectors_) {
1340     collector->ResetMeasurements();
1341   }
1342 
1343   process_cpu_start_time_ns_ = ProcessCpuNanoTime();
1344 
1345   pre_gc_last_process_cpu_time_ns_ = process_cpu_start_time_ns_;
1346   pre_gc_weighted_allocated_bytes_ = 0u;
1347 
1348   post_gc_last_process_cpu_time_ns_ = process_cpu_start_time_ns_;
1349   post_gc_weighted_allocated_bytes_ = 0u;
1350 
1351   total_bytes_freed_ever_.store(0);
1352   total_objects_freed_ever_.store(0);
1353   total_wait_time_ = 0;
1354   blocking_gc_count_ = 0;
1355   blocking_gc_time_ = 0;
1356   pre_oome_gc_count_.store(0, std::memory_order_relaxed);
1357   gc_count_last_window_ = 0;
1358   blocking_gc_count_last_window_ = 0;
1359   last_update_time_gc_count_rate_histograms_ =  // Round down by the window duration.
1360       (NanoTime() / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration;
1361   {
1362     MutexLock mu(Thread::Current(), *gc_complete_lock_);
1363     gc_count_rate_histogram_.Reset();
1364     blocking_gc_count_rate_histogram_.Reset();
1365   }
1366 }
1367 
GetGcCount() const1368 uint64_t Heap::GetGcCount() const {
1369   uint64_t gc_count = 0U;
1370   for (auto* collector : garbage_collectors_) {
1371     gc_count += collector->GetCumulativeTimings().GetIterations();
1372   }
1373   return gc_count;
1374 }
1375 
GetGcTime() const1376 uint64_t Heap::GetGcTime() const {
1377   uint64_t gc_time = 0U;
1378   for (auto* collector : garbage_collectors_) {
1379     gc_time += collector->GetCumulativeTimings().GetTotalNs();
1380   }
1381   return gc_time;
1382 }
1383 
GetBlockingGcCount() const1384 uint64_t Heap::GetBlockingGcCount() const {
1385   return blocking_gc_count_;
1386 }
1387 
GetBlockingGcTime() const1388 uint64_t Heap::GetBlockingGcTime() const {
1389   return blocking_gc_time_;
1390 }
1391 
DumpGcCountRateHistogram(std::ostream & os) const1392 void Heap::DumpGcCountRateHistogram(std::ostream& os) const {
1393   MutexLock mu(Thread::Current(), *gc_complete_lock_);
1394   if (gc_count_rate_histogram_.SampleSize() > 0U) {
1395     gc_count_rate_histogram_.DumpBins(os);
1396   }
1397 }
1398 
DumpBlockingGcCountRateHistogram(std::ostream & os) const1399 void Heap::DumpBlockingGcCountRateHistogram(std::ostream& os) const {
1400   MutexLock mu(Thread::Current(), *gc_complete_lock_);
1401   if (blocking_gc_count_rate_histogram_.SampleSize() > 0U) {
1402     blocking_gc_count_rate_histogram_.DumpBins(os);
1403   }
1404 }
1405 
GetPreOomeGcCount() const1406 uint64_t Heap::GetPreOomeGcCount() const {
1407   return pre_oome_gc_count_.load(std::memory_order_relaxed);
1408 }
1409 
1410 ALWAYS_INLINE
GetAndOverwriteAllocationListener(Atomic<AllocationListener * > * storage,AllocationListener * new_value)1411 static inline AllocationListener* GetAndOverwriteAllocationListener(
1412     Atomic<AllocationListener*>* storage, AllocationListener* new_value) {
1413   return storage->exchange(new_value);
1414 }
1415 
~Heap()1416 Heap::~Heap() {
1417   VLOG(heap) << "Starting ~Heap()";
1418   STLDeleteElements(&garbage_collectors_);
1419   // If we don't reset then the mark stack complains in its destructor.
1420   allocation_stack_->Reset();
1421   allocation_records_.reset();
1422   live_stack_->Reset();
1423   STLDeleteValues(&mod_union_tables_);
1424   STLDeleteValues(&remembered_sets_);
1425   STLDeleteElements(&continuous_spaces_);
1426   STLDeleteElements(&discontinuous_spaces_);
1427   delete gc_complete_lock_;
1428   delete thread_flip_lock_;
1429   delete pending_task_lock_;
1430   delete backtrace_lock_;
1431   uint64_t unique_count = unique_backtrace_count_.load();
1432   uint64_t seen_count = seen_backtrace_count_.load();
1433   if (unique_count != 0 || seen_count != 0) {
1434     LOG(INFO) << "gc stress unique=" << unique_count << " total=" << (unique_count + seen_count);
1435   }
1436   VLOG(heap) << "Finished ~Heap()";
1437 }
1438 
1439 
FindContinuousSpaceFromAddress(const mirror::Object * addr) const1440 space::ContinuousSpace* Heap::FindContinuousSpaceFromAddress(const mirror::Object* addr) const {
1441   for (const auto& space : continuous_spaces_) {
1442     if (space->Contains(addr)) {
1443       return space;
1444     }
1445   }
1446   return nullptr;
1447 }
1448 
FindContinuousSpaceFromObject(ObjPtr<mirror::Object> obj,bool fail_ok) const1449 space::ContinuousSpace* Heap::FindContinuousSpaceFromObject(ObjPtr<mirror::Object> obj,
1450                                                             bool fail_ok) const {
1451   space::ContinuousSpace* space = FindContinuousSpaceFromAddress(obj.Ptr());
1452   if (space != nullptr) {
1453     return space;
1454   }
1455   if (!fail_ok) {
1456     LOG(FATAL) << "object " << obj << " not inside any spaces!";
1457   }
1458   return nullptr;
1459 }
1460 
FindDiscontinuousSpaceFromObject(ObjPtr<mirror::Object> obj,bool fail_ok) const1461 space::DiscontinuousSpace* Heap::FindDiscontinuousSpaceFromObject(ObjPtr<mirror::Object> obj,
1462                                                                   bool fail_ok) const {
1463   for (const auto& space : discontinuous_spaces_) {
1464     if (space->Contains(obj.Ptr())) {
1465       return space;
1466     }
1467   }
1468   if (!fail_ok) {
1469     LOG(FATAL) << "object " << obj << " not inside any spaces!";
1470   }
1471   return nullptr;
1472 }
1473 
FindSpaceFromObject(ObjPtr<mirror::Object> obj,bool fail_ok) const1474 space::Space* Heap::FindSpaceFromObject(ObjPtr<mirror::Object> obj, bool fail_ok) const {
1475   space::Space* result = FindContinuousSpaceFromObject(obj, true);
1476   if (result != nullptr) {
1477     return result;
1478   }
1479   return FindDiscontinuousSpaceFromObject(obj, fail_ok);
1480 }
1481 
FindSpaceFromAddress(const void * addr) const1482 space::Space* Heap::FindSpaceFromAddress(const void* addr) const {
1483   for (const auto& space : continuous_spaces_) {
1484     if (space->Contains(reinterpret_cast<const mirror::Object*>(addr))) {
1485       return space;
1486     }
1487   }
1488   for (const auto& space : discontinuous_spaces_) {
1489     if (space->Contains(reinterpret_cast<const mirror::Object*>(addr))) {
1490       return space;
1491     }
1492   }
1493   return nullptr;
1494 }
1495 
DumpSpaceNameFromAddress(const void * addr) const1496 std::string Heap::DumpSpaceNameFromAddress(const void* addr) const {
1497   space::Space* space = FindSpaceFromAddress(addr);
1498   return (space != nullptr) ? space->GetName() : "no space";
1499 }
1500 
ThrowOutOfMemoryError(Thread * self,size_t byte_count,AllocatorType allocator_type)1501 void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type) {
1502   // If we're in a stack overflow, do not create a new exception. It would require running the
1503   // constructor, which will of course still be in a stack overflow.
1504   if (self->IsHandlingStackOverflow()) {
1505     self->SetException(
1506         Runtime::Current()->GetPreAllocatedOutOfMemoryErrorWhenHandlingStackOverflow());
1507     return;
1508   }
1509   // Allow plugins to intercept out of memory errors.
1510   Runtime::Current()->OutOfMemoryErrorHook();
1511 
1512   std::ostringstream oss;
1513   size_t total_bytes_free = GetFreeMemory();
1514   oss << "Failed to allocate a " << byte_count << " byte allocation with " << total_bytes_free
1515       << " free bytes and " << PrettySize(GetFreeMemoryUntilOOME()) << " until OOM,"
1516       << " target footprint " << target_footprint_.load(std::memory_order_relaxed)
1517       << ", growth limit "
1518       << growth_limit_;
1519   // If the allocation failed due to fragmentation, print out the largest continuous allocation.
1520   if (total_bytes_free >= byte_count) {
1521     space::AllocSpace* space = nullptr;
1522     if (allocator_type == kAllocatorTypeNonMoving) {
1523       space = non_moving_space_;
1524     } else if (allocator_type == kAllocatorTypeRosAlloc ||
1525                allocator_type == kAllocatorTypeDlMalloc) {
1526       space = main_space_;
1527     } else if (allocator_type == kAllocatorTypeBumpPointer ||
1528                allocator_type == kAllocatorTypeTLAB) {
1529       space = bump_pointer_space_;
1530     } else if (allocator_type == kAllocatorTypeRegion ||
1531                allocator_type == kAllocatorTypeRegionTLAB) {
1532       space = region_space_;
1533     }
1534 
1535     // There is no fragmentation info to log for large-object space.
1536     if (allocator_type != kAllocatorTypeLOS) {
1537       CHECK(space != nullptr) << "allocator_type:" << allocator_type
1538                               << " byte_count:" << byte_count
1539                               << " total_bytes_free:" << total_bytes_free;
1540       // LogFragmentationAllocFailure returns true if byte_count is greater than
1541       // the largest free contiguous chunk in the space. Return value false
1542       // means that we are throwing OOME because the amount of free heap after
1543       // GC is less than kMinFreeHeapAfterGcForAlloc in proportion of the heap-size.
1544       // Log an appropriate message in that case.
1545       if (!space->LogFragmentationAllocFailure(oss, byte_count)) {
1546         oss << "; giving up on allocation because <"
1547             << kMinFreeHeapAfterGcForAlloc * 100
1548             << "% of heap free after GC.";
1549       }
1550     }
1551   }
1552   self->ThrowOutOfMemoryError(oss.str().c_str());
1553 }
1554 
DoPendingCollectorTransition()1555 void Heap::DoPendingCollectorTransition() {
1556   CollectorType desired_collector_type = desired_collector_type_;
1557 
1558   if (collector_type_ == kCollectorTypeCC || collector_type_ == kCollectorTypeCMC) {
1559     // App's allocations (since last GC) more than the threshold then do TransitionGC
1560     // when the app was in background. If not then don't do TransitionGC.
1561     // num_bytes_allocated_since_gc should always be positive even if initially
1562     // num_bytes_alive_after_gc_ is coming from Zygote. This gives positive or zero value.
1563     size_t num_bytes_allocated_since_gc =
1564         UnsignedDifference(GetBytesAllocated(), num_bytes_alive_after_gc_);
1565     if (num_bytes_allocated_since_gc <
1566         (UnsignedDifference(target_footprint_.load(std::memory_order_relaxed),
1567                             num_bytes_alive_after_gc_)/4)
1568         && !kStressCollectorTransition
1569         && !IsLowMemoryMode()) {
1570       return;
1571     }
1572   }
1573 
1574   // Launch homogeneous space compaction if it is desired.
1575   if (desired_collector_type == kCollectorTypeHomogeneousSpaceCompact) {
1576     if (!CareAboutPauseTimes()) {
1577       PerformHomogeneousSpaceCompact();
1578     } else {
1579       VLOG(gc) << "Homogeneous compaction ignored due to jank perceptible process state";
1580     }
1581   } else if (desired_collector_type == kCollectorTypeCCBackground ||
1582              desired_collector_type == kCollectorTypeCMCBackground) {
1583     if (!CareAboutPauseTimes()) {
1584       // Invoke full compaction.
1585       CollectGarbageInternal(collector::kGcTypeFull,
1586                              kGcCauseCollectorTransition,
1587                              /*clear_soft_references=*/false, GetCurrentGcNum() + 1);
1588     } else {
1589       VLOG(gc) << "background compaction ignored due to jank perceptible process state";
1590     }
1591   } else {
1592     CHECK_EQ(desired_collector_type, collector_type_) << "Unsupported collector transition";
1593   }
1594 }
1595 
Trim(Thread * self)1596 void Heap::Trim(Thread* self) {
1597   Runtime* const runtime = Runtime::Current();
1598   if (!CareAboutPauseTimes()) {
1599     // Deflate the monitors, this can cause a pause but shouldn't matter since we don't care
1600     // about pauses.
1601     ScopedTrace trace("Deflating monitors");
1602     // Avoid race conditions on the lock word for CC.
1603     ScopedGCCriticalSection gcs(self, kGcCauseTrim, kCollectorTypeHeapTrim);
1604     ScopedSuspendAll ssa(__FUNCTION__);
1605     uint64_t start_time = NanoTime();
1606     size_t count = runtime->GetMonitorList()->DeflateMonitors();
1607     VLOG(heap) << "Deflating " << count << " monitors took "
1608         << PrettyDuration(NanoTime() - start_time);
1609   }
1610   TrimIndirectReferenceTables(self);
1611   TrimSpaces(self);
1612   // Trim arenas that may have been used by JIT or verifier.
1613   runtime->GetArenaPool()->TrimMaps();
1614 }
1615 
1616 class TrimIndirectReferenceTableClosure : public Closure {
1617  public:
TrimIndirectReferenceTableClosure(Barrier * barrier)1618   explicit TrimIndirectReferenceTableClosure(Barrier* barrier) : barrier_(barrier) {
1619   }
Run(Thread * thread)1620   void Run(Thread* thread) override REQUIRES_SHARED(Locks::mutator_lock_) {
1621     thread->GetJniEnv()->TrimLocals();
1622     // If thread is a running mutator, then act on behalf of the trim thread.
1623     // See the code in ThreadList::RunCheckpoint.
1624     barrier_->Pass(Thread::Current());
1625   }
1626 
1627  private:
1628   Barrier* const barrier_;
1629 };
1630 
TrimIndirectReferenceTables(Thread * self)1631 void Heap::TrimIndirectReferenceTables(Thread* self) {
1632   ScopedObjectAccess soa(self);
1633   ScopedTrace trace(__PRETTY_FUNCTION__);
1634   JavaVMExt* vm = soa.Vm();
1635   // Trim globals indirect reference table.
1636   vm->TrimGlobals();
1637   // Trim locals indirect reference tables.
1638   // TODO: May also want to look for entirely empty pages maintained by SmallIrtAllocator.
1639   Barrier barrier(0);
1640   TrimIndirectReferenceTableClosure closure(&barrier);
1641   size_t barrier_count = Runtime::Current()->GetThreadList()->RunCheckpoint(&closure);
1642   ScopedThreadStateChange tsc(self, ThreadState::kWaitingForCheckPointsToRun);
1643   if (barrier_count != 0) {
1644     barrier.Increment(self, barrier_count);
1645   }
1646 }
1647 
StartGC(Thread * self,GcCause cause,CollectorType collector_type)1648 void Heap::StartGC(Thread* self, GcCause cause, CollectorType collector_type) {
1649   // This can be called in either kRunnable or suspended states.
1650   // TODO: Consider fixing that?
1651   ThreadState old_thread_state = self->GetState();
1652   if (old_thread_state == ThreadState::kRunnable) {
1653     Locks::mutator_lock_->AssertSharedHeld(self);
1654     // Manually inlining the following call breaks thread-safety analysis.
1655     StartGCRunnable(self, cause, collector_type);
1656     return;
1657   }
1658   Locks::mutator_lock_->AssertNotHeld(self);
1659   self->SetState(ThreadState::kWaitingForGcToComplete);
1660   MutexLock mu(self, *gc_complete_lock_);
1661   WaitForGcToCompleteLocked(cause, self);
1662   collector_type_running_ = collector_type;
1663   last_gc_cause_ = cause;
1664   thread_running_gc_ = self;
1665   self->SetState(old_thread_state);
1666 }
1667 
StartGCRunnable(Thread * self,GcCause cause,CollectorType collector_type)1668 void Heap::StartGCRunnable(Thread* self, GcCause cause, CollectorType collector_type) {
1669   Locks::mutator_lock_->AssertSharedHeld(self);
1670   while (true) {
1671     self->TransitionFromRunnableToSuspended(ThreadState::kWaitingForGcToComplete);
1672     {
1673       MutexLock mu(self, *gc_complete_lock_);
1674       // Ensure there is only one GC at a time.
1675       WaitForGcToCompleteLocked(cause, self);
1676       collector_type_running_ = collector_type;
1677       last_gc_cause_ = cause;
1678       thread_running_gc_ = self;
1679     }
1680     // We have to be careful returning to runnable state, since that could cause us to block.
1681     // That would be bad, since collector_type_running_ is set, and hence no GC is possible in this
1682     // state, allowing deadlock.
1683     if (LIKELY(self->TryTransitionFromSuspendedToRunnable())) {
1684       return;
1685     }
1686     {
1687       MutexLock mu(self, *gc_complete_lock_);
1688       collector_type_running_ = kCollectorTypeNone;
1689       thread_running_gc_ = nullptr;
1690     }
1691     self->TransitionFromSuspendedToRunnable();  // Will handle suspension request and block.
1692   }
1693 }
1694 
TrimSpaces(Thread * self)1695 void Heap::TrimSpaces(Thread* self) {
1696   // Pretend we are doing a GC to prevent background compaction from deleting the space we are
1697   // trimming.
1698   StartGC(self, kGcCauseTrim, kCollectorTypeHeapTrim);
1699   ScopedTrace trace(__PRETTY_FUNCTION__);
1700   const uint64_t start_ns = NanoTime();
1701   // Trim the managed spaces.
1702   uint64_t total_alloc_space_allocated = 0;
1703   uint64_t total_alloc_space_size = 0;
1704   uint64_t managed_reclaimed = 0;
1705   {
1706     ScopedObjectAccess soa(self);
1707     for (const auto& space : continuous_spaces_) {
1708       if (space->IsMallocSpace()) {
1709         gc::space::MallocSpace* malloc_space = space->AsMallocSpace();
1710         if (malloc_space->IsRosAllocSpace() || !CareAboutPauseTimes()) {
1711           // Don't trim dlmalloc spaces if we care about pauses since this can hold the space lock
1712           // for a long period of time.
1713           managed_reclaimed += malloc_space->Trim();
1714         }
1715         total_alloc_space_size += malloc_space->Size();
1716       }
1717     }
1718   }
1719   total_alloc_space_allocated = GetBytesAllocated();
1720   if (large_object_space_ != nullptr) {
1721     total_alloc_space_allocated -= large_object_space_->GetBytesAllocated();
1722   }
1723   if (bump_pointer_space_ != nullptr) {
1724     total_alloc_space_allocated -= bump_pointer_space_->Size();
1725   }
1726   if (region_space_ != nullptr) {
1727     total_alloc_space_allocated -= region_space_->GetBytesAllocated();
1728   }
1729   const float managed_utilization = static_cast<float>(total_alloc_space_allocated) /
1730       static_cast<float>(total_alloc_space_size);
1731   uint64_t gc_heap_end_ns = NanoTime();
1732   // We never move things in the native heap, so we can finish the GC at this point.
1733   FinishGC(self, collector::kGcTypeNone);
1734 
1735   VLOG(heap) << "Heap trim of managed (duration=" << PrettyDuration(gc_heap_end_ns - start_ns)
1736       << ", advised=" << PrettySize(managed_reclaimed) << ") heap. Managed heap utilization of "
1737       << static_cast<int>(100 * managed_utilization) << "%.";
1738 }
1739 
IsValidObjectAddress(const void * addr) const1740 bool Heap::IsValidObjectAddress(const void* addr) const {
1741   if (addr == nullptr) {
1742     return true;
1743   }
1744   return IsAligned<kObjectAlignment>(addr) && FindSpaceFromAddress(addr) != nullptr;
1745 }
1746 
IsNonDiscontinuousSpaceHeapAddress(const void * addr) const1747 bool Heap::IsNonDiscontinuousSpaceHeapAddress(const void* addr) const {
1748   return FindContinuousSpaceFromAddress(reinterpret_cast<const mirror::Object*>(addr)) != nullptr;
1749 }
1750 
IsLiveObjectLocked(ObjPtr<mirror::Object> obj,bool search_allocation_stack,bool search_live_stack,bool sorted)1751 bool Heap::IsLiveObjectLocked(ObjPtr<mirror::Object> obj,
1752                               bool search_allocation_stack,
1753                               bool search_live_stack,
1754                               bool sorted) {
1755   if (UNLIKELY(!IsAligned<kObjectAlignment>(obj.Ptr()))) {
1756     return false;
1757   }
1758   if (bump_pointer_space_ != nullptr && bump_pointer_space_->HasAddress(obj.Ptr())) {
1759     mirror::Class* klass = obj->GetClass<kVerifyNone>();
1760     if (obj == klass) {
1761       // This case happens for java.lang.Class.
1762       return true;
1763     }
1764     return VerifyClassClass(klass) && IsLiveObjectLocked(klass);
1765   } else if (temp_space_ != nullptr && temp_space_->HasAddress(obj.Ptr())) {
1766     // If we are in the allocated region of the temp space, then we are probably live (e.g. during
1767     // a GC). When a GC isn't running End() - Begin() is 0 which means no objects are contained.
1768     return temp_space_->Contains(obj.Ptr());
1769   }
1770   if (region_space_ != nullptr && region_space_->HasAddress(obj.Ptr())) {
1771     return true;
1772   }
1773   space::ContinuousSpace* c_space = FindContinuousSpaceFromObject(obj, true);
1774   space::DiscontinuousSpace* d_space = nullptr;
1775   if (c_space != nullptr) {
1776     if (c_space->GetLiveBitmap()->Test(obj.Ptr())) {
1777       return true;
1778     }
1779   } else {
1780     d_space = FindDiscontinuousSpaceFromObject(obj, true);
1781     if (d_space != nullptr) {
1782       if (d_space->GetLiveBitmap()->Test(obj.Ptr())) {
1783         return true;
1784       }
1785     }
1786   }
1787   // This is covering the allocation/live stack swapping that is done without mutators suspended.
1788   for (size_t i = 0; i < (sorted ? 1 : 5); ++i) {
1789     if (i > 0) {
1790       NanoSleep(MsToNs(10));
1791     }
1792     if (search_allocation_stack) {
1793       if (sorted) {
1794         if (allocation_stack_->ContainsSorted(obj.Ptr())) {
1795           return true;
1796         }
1797       } else if (allocation_stack_->Contains(obj.Ptr())) {
1798         return true;
1799       }
1800     }
1801 
1802     if (search_live_stack) {
1803       if (sorted) {
1804         if (live_stack_->ContainsSorted(obj.Ptr())) {
1805           return true;
1806         }
1807       } else if (live_stack_->Contains(obj.Ptr())) {
1808         return true;
1809       }
1810     }
1811   }
1812   // We need to check the bitmaps again since there is a race where we mark something as live and
1813   // then clear the stack containing it.
1814   if (c_space != nullptr) {
1815     if (c_space->GetLiveBitmap()->Test(obj.Ptr())) {
1816       return true;
1817     }
1818   } else {
1819     d_space = FindDiscontinuousSpaceFromObject(obj, true);
1820     if (d_space != nullptr && d_space->GetLiveBitmap()->Test(obj.Ptr())) {
1821       return true;
1822     }
1823   }
1824   return false;
1825 }
1826 
DumpSpaces() const1827 std::string Heap::DumpSpaces() const {
1828   std::ostringstream oss;
1829   DumpSpaces(oss);
1830   return oss.str();
1831 }
1832 
DumpSpaces(std::ostream & stream) const1833 void Heap::DumpSpaces(std::ostream& stream) const {
1834   for (const auto& space : continuous_spaces_) {
1835     accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
1836     accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
1837     stream << space << " " << *space << "\n";
1838     if (live_bitmap != nullptr) {
1839       stream << live_bitmap << " " << *live_bitmap << "\n";
1840     }
1841     if (mark_bitmap != nullptr) {
1842       stream << mark_bitmap << " " << *mark_bitmap << "\n";
1843     }
1844   }
1845   for (const auto& space : discontinuous_spaces_) {
1846     stream << space << " " << *space << "\n";
1847   }
1848 }
1849 
VerifyObjectBody(ObjPtr<mirror::Object> obj)1850 void Heap::VerifyObjectBody(ObjPtr<mirror::Object> obj) {
1851   if (verify_object_mode_ == kVerifyObjectModeDisabled) {
1852     return;
1853   }
1854 
1855   // Ignore early dawn of the universe verifications.
1856   if (UNLIKELY(num_bytes_allocated_.load(std::memory_order_relaxed) < 10 * KB)) {
1857     return;
1858   }
1859   CHECK_ALIGNED(obj.Ptr(), kObjectAlignment) << "Object isn't aligned";
1860   mirror::Class* c = obj->GetFieldObject<mirror::Class, kVerifyNone>(mirror::Object::ClassOffset());
1861   CHECK(c != nullptr) << "Null class in object " << obj;
1862   CHECK_ALIGNED(c, kObjectAlignment) << "Class " << c << " not aligned in object " << obj;
1863   CHECK(VerifyClassClass(c));
1864 
1865   if (verify_object_mode_ > kVerifyObjectModeFast) {
1866     // Note: the bitmap tests below are racy since we don't hold the heap bitmap lock.
1867     CHECK(IsLiveObjectLocked(obj)) << "Object is dead " << obj << "\n" << DumpSpaces();
1868   }
1869 }
1870 
VerifyHeap()1871 void Heap::VerifyHeap() {
1872   ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1873   auto visitor = [&](mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS {
1874     VerifyObjectBody(obj);
1875   };
1876   // Technically we need the mutator lock here to call Visit. However, VerifyObjectBody is already
1877   // NO_THREAD_SAFETY_ANALYSIS.
1878   auto no_thread_safety_analysis = [&]() NO_THREAD_SAFETY_ANALYSIS {
1879     GetLiveBitmap()->Visit(visitor);
1880   };
1881   no_thread_safety_analysis();
1882 }
1883 
RecordFree(uint64_t freed_objects,int64_t freed_bytes)1884 void Heap::RecordFree(uint64_t freed_objects, int64_t freed_bytes) {
1885   // Use signed comparison since freed bytes can be negative when background compaction foreground
1886   // transitions occurs. This is typically due to objects moving from a bump pointer space to a
1887   // free list backed space, which may increase memory footprint due to padding and binning.
1888   RACING_DCHECK_LE(freed_bytes,
1889                    static_cast<int64_t>(num_bytes_allocated_.load(std::memory_order_relaxed)));
1890   // Note: This relies on 2s complement for handling negative freed_bytes.
1891   num_bytes_allocated_.fetch_sub(static_cast<ssize_t>(freed_bytes), std::memory_order_relaxed);
1892   if (Runtime::Current()->HasStatsEnabled()) {
1893     RuntimeStats* thread_stats = Thread::Current()->GetStats();
1894     thread_stats->freed_objects += freed_objects;
1895     thread_stats->freed_bytes += freed_bytes;
1896     // TODO: Do this concurrently.
1897     RuntimeStats* global_stats = Runtime::Current()->GetStats();
1898     global_stats->freed_objects += freed_objects;
1899     global_stats->freed_bytes += freed_bytes;
1900   }
1901 }
1902 
RecordFreeRevoke()1903 void Heap::RecordFreeRevoke() {
1904   // Subtract num_bytes_freed_revoke_ from num_bytes_allocated_ to cancel out the
1905   // ahead-of-time, bulk counting of bytes allocated in rosalloc thread-local buffers.
1906   // If there's a concurrent revoke, ok to not necessarily reset num_bytes_freed_revoke_
1907   // all the way to zero exactly as the remainder will be subtracted at the next GC.
1908   size_t bytes_freed = num_bytes_freed_revoke_.load(std::memory_order_relaxed);
1909   CHECK_GE(num_bytes_freed_revoke_.fetch_sub(bytes_freed, std::memory_order_relaxed),
1910            bytes_freed) << "num_bytes_freed_revoke_ underflow";
1911   CHECK_GE(num_bytes_allocated_.fetch_sub(bytes_freed, std::memory_order_relaxed),
1912            bytes_freed) << "num_bytes_allocated_ underflow";
1913   GetCurrentGcIteration()->SetFreedRevoke(bytes_freed);
1914 }
1915 
GetRosAllocSpace(gc::allocator::RosAlloc * rosalloc) const1916 space::RosAllocSpace* Heap::GetRosAllocSpace(gc::allocator::RosAlloc* rosalloc) const {
1917   if (rosalloc_space_ != nullptr && rosalloc_space_->GetRosAlloc() == rosalloc) {
1918     return rosalloc_space_;
1919   }
1920   for (const auto& space : continuous_spaces_) {
1921     if (space->AsContinuousSpace()->IsRosAllocSpace()) {
1922       if (space->AsContinuousSpace()->AsRosAllocSpace()->GetRosAlloc() == rosalloc) {
1923         return space->AsContinuousSpace()->AsRosAllocSpace();
1924       }
1925     }
1926   }
1927   return nullptr;
1928 }
1929 
EntrypointsInstrumented()1930 static inline bool EntrypointsInstrumented() REQUIRES_SHARED(Locks::mutator_lock_) {
1931   instrumentation::Instrumentation* const instrumentation =
1932       Runtime::Current()->GetInstrumentation();
1933   return instrumentation != nullptr && instrumentation->AllocEntrypointsInstrumented();
1934 }
1935 
AllocateInternalWithGc(Thread * self,AllocatorType allocator,bool instrumented,size_t alloc_size,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated,ObjPtr<mirror::Class> * klass)1936 mirror::Object* Heap::AllocateInternalWithGc(Thread* self,
1937                                              AllocatorType allocator,
1938                                              bool instrumented,
1939                                              size_t alloc_size,
1940                                              size_t* bytes_allocated,
1941                                              size_t* usable_size,
1942                                              size_t* bytes_tl_bulk_allocated,
1943                                              ObjPtr<mirror::Class>* klass) {
1944   bool was_default_allocator = allocator == GetCurrentAllocator();
1945   // Make sure there is no pending exception since we may need to throw an OOME.
1946   self->AssertNoPendingException();
1947   DCHECK(klass != nullptr);
1948 
1949   StackHandleScope<1> hs(self);
1950   HandleWrapperObjPtr<mirror::Class> h_klass(hs.NewHandleWrapper(klass));
1951 
1952   auto send_object_pre_alloc =
1953       [&]() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_) {
1954         if (UNLIKELY(instrumented)) {
1955           AllocationListener* l = alloc_listener_.load(std::memory_order_seq_cst);
1956           if (UNLIKELY(l != nullptr) && UNLIKELY(l->HasPreAlloc())) {
1957             l->PreObjectAllocated(self, h_klass, &alloc_size);
1958           }
1959         }
1960       };
1961 #define PERFORM_SUSPENDING_OPERATION(op)                                          \
1962   [&]() REQUIRES(Roles::uninterruptible_) REQUIRES_SHARED(Locks::mutator_lock_) { \
1963     ScopedAllowThreadSuspension ats;                                              \
1964     auto res = (op);                                                              \
1965     send_object_pre_alloc();                                                      \
1966     return res;                                                                   \
1967   }()
1968 
1969   // The allocation failed. If the GC is running, block until it completes, and then retry the
1970   // allocation.
1971   collector::GcType last_gc =
1972       PERFORM_SUSPENDING_OPERATION(WaitForGcToComplete(kGcCauseForAlloc, self));
1973   // If we were the default allocator but the allocator changed while we were suspended,
1974   // abort the allocation.
1975   if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1976       (!instrumented && EntrypointsInstrumented())) {
1977     return nullptr;
1978   }
1979   uint32_t starting_gc_num = GetCurrentGcNum();
1980   if (last_gc != collector::kGcTypeNone) {
1981     // A GC was in progress and we blocked, retry allocation now that memory has been freed.
1982     mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
1983                                                      usable_size, bytes_tl_bulk_allocated);
1984     if (ptr != nullptr) {
1985       return ptr;
1986     }
1987   }
1988   if (IsGCDisabledForShutdown()) {
1989     // We're just shutting down and GCs don't work anymore. Try a different allocator.
1990     mirror::Object* ptr = TryToAllocate<true, false>(self,
1991                                                      kAllocatorTypeNonMoving,
1992                                                      alloc_size,
1993                                                      bytes_allocated,
1994                                                      usable_size,
1995                                                      bytes_tl_bulk_allocated);
1996     if (ptr != nullptr) {
1997       return ptr;
1998     }
1999   }
2000 
2001   int64_t bytes_freed_before = GetBytesFreedEver();
2002   auto have_reclaimed_enough = [&]() {
2003     size_t curr_bytes_allocated = GetBytesAllocated();
2004     size_t free_heap = UnsignedDifference(growth_limit_, curr_bytes_allocated);
2005     int64_t newly_freed = GetBytesFreedEver() - bytes_freed_before;
2006     double free_heap_ratio = static_cast<double>(free_heap) / growth_limit_;
2007     double newly_freed_ratio = static_cast<double>(newly_freed) / growth_limit_;
2008     return free_heap_ratio >= kMinFreeHeapAfterGcForAlloc ||
2009            newly_freed_ratio >= kMinFreedHeapAfterGcForAlloc;
2010   };
2011   // We perform one GC as per the next_gc_type_ (chosen in GrowForUtilization),
2012   // if it's not already tried. If that doesn't succeed then go for the most
2013   // exhaustive option. Perform a full-heap collection including clearing
2014   // SoftReferences. In case of ConcurrentCopying, it will also ensure that
2015   // all regions are evacuated. If allocation doesn't succeed even after that
2016   // then there is no hope, so we throw OOME.
2017   collector::GcType tried_type = next_gc_type_;
2018   if (last_gc < tried_type) {
2019     const bool gc_ran = PERFORM_SUSPENDING_OPERATION(
2020         CollectGarbageInternal(tried_type, kGcCauseForAlloc, false, starting_gc_num + 1)
2021         != collector::kGcTypeNone);
2022 
2023     if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
2024         (!instrumented && EntrypointsInstrumented())) {
2025       return nullptr;
2026     }
2027     if (gc_ran && have_reclaimed_enough()) {
2028       mirror::Object* ptr = TryToAllocate<true, false>(self, allocator,
2029                                                        alloc_size, bytes_allocated,
2030                                                        usable_size, bytes_tl_bulk_allocated);
2031       if (ptr != nullptr) {
2032         return ptr;
2033       }
2034     }
2035   }
2036   // Most allocations should have succeeded by now, so the heap is really full, really fragmented,
2037   // or the requested size is really big. Do another GC, collecting SoftReferences this time. The
2038   // VM spec requires that all SoftReferences have been collected and cleared before throwing
2039   // OOME.
2040   VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size)
2041            << " allocation";
2042   // TODO: Run finalization, but this may cause more allocations to occur.
2043   // We don't need a WaitForGcToComplete here either.
2044   // TODO: Should check whether another thread already just ran a GC with soft
2045   // references.
2046 
2047   DCHECK(!gc_plan_.empty());
2048 
2049   int64_t min_freed_to_continue =
2050       static_cast<int64_t>(kMinFreedHeapAfterGcForAlloc * growth_limit_ + alloc_size);
2051   // Repeatedly collect the entire heap until either
2052   // (a) this was insufficiently productive at reclaiming memory and we should give upt to avoid
2053   // "GC thrashing", or
2054   // (b) GC was sufficiently productive (reclaimed min_freed_to_continue bytes) AND allowed us to
2055   // satisfy the allocation request.
2056   do {
2057     bytes_freed_before = GetBytesFreedEver();
2058     pre_oome_gc_count_.fetch_add(1, std::memory_order_relaxed);
2059     PERFORM_SUSPENDING_OPERATION(
2060         CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true, GC_NUM_ANY));
2061     if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
2062         (!instrumented && EntrypointsInstrumented())) {
2063       return nullptr;
2064     }
2065     bool ran_homogeneous_space_compaction = false;
2066     bool immediately_reclaimed_enough = have_reclaimed_enough();
2067     if (!immediately_reclaimed_enough) {
2068       const uint64_t current_time = NanoTime();
2069       if (allocator == kAllocatorTypeRosAlloc || allocator == kAllocatorTypeDlMalloc) {
2070         if (use_homogeneous_space_compaction_for_oom_ &&
2071             current_time - last_time_homogeneous_space_compaction_by_oom_ >
2072             min_interval_homogeneous_space_compaction_by_oom_) {
2073           last_time_homogeneous_space_compaction_by_oom_ = current_time;
2074           ran_homogeneous_space_compaction =
2075               (PERFORM_SUSPENDING_OPERATION(PerformHomogeneousSpaceCompact()) ==
2076                HomogeneousSpaceCompactResult::kSuccess);
2077           // Thread suspension could have occurred.
2078           if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
2079               (!instrumented && EntrypointsInstrumented())) {
2080             return nullptr;
2081           }
2082           // Always print that we ran homogeneous space compation since this can cause jank.
2083           VLOG(heap) << "Ran heap homogeneous space compaction, "
2084                     << " requested defragmentation "
2085                     << count_requested_homogeneous_space_compaction_.load()
2086                     << " performed defragmentation "
2087                     << count_performed_homogeneous_space_compaction_.load()
2088                     << " ignored homogeneous space compaction "
2089                     << count_ignored_homogeneous_space_compaction_.load()
2090                     << " delayed count = "
2091                     << count_delayed_oom_.load();
2092         }
2093       }
2094     }
2095     if (immediately_reclaimed_enough ||
2096         (ran_homogeneous_space_compaction && have_reclaimed_enough())) {
2097       mirror::Object* ptr = TryToAllocate<true, true>(
2098           self, allocator, alloc_size, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
2099       if (ptr != nullptr) {
2100         if (ran_homogeneous_space_compaction) {
2101           count_delayed_oom_++;
2102         }
2103         return ptr;
2104       }
2105     }
2106     // This loops only if we reclaimed plenty of memory, but presumably some other thread beat us
2107     // to allocating it. In the very unlikely case that we're running into a serious fragmentation
2108     // issue, and there is no other thread allocating, GCs will quickly become unsuccessful, and we
2109     // will stop then. If another thread is allocating aggressively, this may go on for a while,
2110     // but we are still making progress somewhere.
2111   } while (GetBytesFreedEver() - bytes_freed_before > min_freed_to_continue);
2112 #undef PERFORM_SUSPENDING_OPERATION
2113   // Throw an OOM error.
2114   {
2115     ScopedAllowThreadSuspension ats;
2116     ThrowOutOfMemoryError(self, alloc_size, allocator);
2117   }
2118   return nullptr;
2119 }
2120 
SetTargetHeapUtilization(float target)2121 void Heap::SetTargetHeapUtilization(float target) {
2122   DCHECK_GT(target, 0.1f);  // asserted in Java code
2123   DCHECK_LT(target, 1.0f);
2124   target_utilization_ = target;
2125 }
2126 
GetObjectsAllocated() const2127 size_t Heap::GetObjectsAllocated() const {
2128   Thread* const self = Thread::Current();
2129   ScopedThreadStateChange tsc(self, ThreadState::kWaitingForGetObjectsAllocated);
2130   // Prevent GC running during GetObjectsAllocated since we may get a checkpoint request that tells
2131   // us to suspend while we are doing SuspendAll. b/35232978
2132   gc::ScopedGCCriticalSection gcs(Thread::Current(),
2133                                   gc::kGcCauseGetObjectsAllocated,
2134                                   gc::kCollectorTypeGetObjectsAllocated);
2135   // Need SuspendAll here to prevent lock violation if RosAlloc does it during InspectAll.
2136   ScopedSuspendAll ssa(__FUNCTION__);
2137   ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
2138   size_t total = 0;
2139   for (space::AllocSpace* space : alloc_spaces_) {
2140     total += space->GetObjectsAllocated();
2141   }
2142   return total;
2143 }
2144 
GetBytesAllocatedEver() const2145 uint64_t Heap::GetBytesAllocatedEver() const {
2146   // Force the returned value to be monotonically increasing, in the sense that if this is called
2147   // at A and B, such that A happens-before B, then the call at B returns a value no smaller than
2148   // that at A. This is not otherwise guaranteed, since num_bytes_allocated_ is decremented first,
2149   // and total_bytes_freed_ever_ is incremented later.
2150   static std::atomic<uint64_t> max_bytes_so_far(0);
2151   uint64_t so_far = max_bytes_so_far.load(std::memory_order_relaxed);
2152   uint64_t current_bytes = GetBytesFreedEver(std::memory_order_acquire) + GetBytesAllocated();
2153   DCHECK(current_bytes < (static_cast<uint64_t>(1) << 63));  // result is "positive".
2154   do {
2155     if (current_bytes <= so_far) {
2156       return so_far;
2157     }
2158   } while (!max_bytes_so_far.compare_exchange_weak(so_far /* updated */,
2159                                                    current_bytes, std::memory_order_relaxed));
2160   return current_bytes;
2161 }
2162 
2163 // Check whether the given object is an instance of the given class.
MatchesClass(mirror::Object * obj,Handle<mirror::Class> h_class,bool use_is_assignable_from)2164 static bool MatchesClass(mirror::Object* obj,
2165                          Handle<mirror::Class> h_class,
2166                          bool use_is_assignable_from) REQUIRES_SHARED(Locks::mutator_lock_) {
2167   mirror::Class* instance_class = obj->GetClass();
2168   CHECK(instance_class != nullptr);
2169   ObjPtr<mirror::Class> klass = h_class.Get();
2170   if (use_is_assignable_from) {
2171     return klass != nullptr && klass->IsAssignableFrom(instance_class);
2172   }
2173   return instance_class == klass;
2174 }
2175 
CountInstances(const std::vector<Handle<mirror::Class>> & classes,bool use_is_assignable_from,uint64_t * counts)2176 void Heap::CountInstances(const std::vector<Handle<mirror::Class>>& classes,
2177                           bool use_is_assignable_from,
2178                           uint64_t* counts) {
2179   auto instance_counter = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
2180     for (size_t i = 0; i < classes.size(); ++i) {
2181       if (MatchesClass(obj, classes[i], use_is_assignable_from)) {
2182         ++counts[i];
2183       }
2184     }
2185   };
2186   VisitObjects(instance_counter);
2187 }
2188 
CollectGarbage(bool clear_soft_references,GcCause cause)2189 void Heap::CollectGarbage(bool clear_soft_references, GcCause cause) {
2190   // Even if we waited for a GC we still need to do another GC since weaks allocated during the
2191   // last GC will not have necessarily been cleared.
2192   CollectGarbageInternal(gc_plan_.back(), cause, clear_soft_references, GC_NUM_ANY);
2193 }
2194 
SupportHomogeneousSpaceCompactAndCollectorTransitions() const2195 bool Heap::SupportHomogeneousSpaceCompactAndCollectorTransitions() const {
2196   return main_space_backup_.get() != nullptr && main_space_ != nullptr &&
2197       foreground_collector_type_ == kCollectorTypeCMS;
2198 }
2199 
PerformHomogeneousSpaceCompact()2200 HomogeneousSpaceCompactResult Heap::PerformHomogeneousSpaceCompact() {
2201   Thread* self = Thread::Current();
2202   // Inc requested homogeneous space compaction.
2203   count_requested_homogeneous_space_compaction_++;
2204   // Store performed homogeneous space compaction at a new request arrival.
2205   ScopedThreadStateChange tsc(self, ThreadState::kWaitingPerformingGc);
2206   Locks::mutator_lock_->AssertNotHeld(self);
2207   {
2208     ScopedThreadStateChange tsc2(self, ThreadState::kWaitingForGcToComplete);
2209     MutexLock mu(self, *gc_complete_lock_);
2210     // Ensure there is only one GC at a time.
2211     WaitForGcToCompleteLocked(kGcCauseHomogeneousSpaceCompact, self);
2212     // Homogeneous space compaction is a copying transition, can't run it if the moving GC disable
2213     // count is non zero.
2214     // If the collector type changed to something which doesn't benefit from homogeneous space
2215     // compaction, exit.
2216     if (disable_moving_gc_count_ != 0 || IsMovingGc(collector_type_) ||
2217         !main_space_->CanMoveObjects()) {
2218       return kErrorReject;
2219     }
2220     if (!SupportHomogeneousSpaceCompactAndCollectorTransitions()) {
2221       return kErrorUnsupported;
2222     }
2223     collector_type_running_ = kCollectorTypeHomogeneousSpaceCompact;
2224   }
2225   if (Runtime::Current()->IsShuttingDown(self)) {
2226     // Don't allow heap transitions to happen if the runtime is shutting down since these can
2227     // cause objects to get finalized.
2228     FinishGC(self, collector::kGcTypeNone);
2229     return HomogeneousSpaceCompactResult::kErrorVMShuttingDown;
2230   }
2231   collector::GarbageCollector* collector;
2232   {
2233     ScopedSuspendAll ssa(__FUNCTION__);
2234     uint64_t start_time = NanoTime();
2235     // Launch compaction.
2236     space::MallocSpace* to_space = main_space_backup_.release();
2237     space::MallocSpace* from_space = main_space_;
2238     to_space->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2239     const uint64_t space_size_before_compaction = from_space->Size();
2240     AddSpace(to_space);
2241     // Make sure that we will have enough room to copy.
2242     CHECK_GE(to_space->GetFootprintLimit(), from_space->GetFootprintLimit());
2243     collector = Compact(to_space, from_space, kGcCauseHomogeneousSpaceCompact);
2244     const uint64_t space_size_after_compaction = to_space->Size();
2245     main_space_ = to_space;
2246     main_space_backup_.reset(from_space);
2247     RemoveSpace(from_space);
2248     SetSpaceAsDefault(main_space_);  // Set as default to reset the proper dlmalloc space.
2249     // Update performed homogeneous space compaction count.
2250     count_performed_homogeneous_space_compaction_++;
2251     // Print statics log and resume all threads.
2252     uint64_t duration = NanoTime() - start_time;
2253     VLOG(heap) << "Heap homogeneous space compaction took " << PrettyDuration(duration) << " size: "
2254                << PrettySize(space_size_before_compaction) << " -> "
2255                << PrettySize(space_size_after_compaction) << " compact-ratio: "
2256                << std::fixed << static_cast<double>(space_size_after_compaction) /
2257                static_cast<double>(space_size_before_compaction);
2258   }
2259   // Finish GC.
2260   // Get the references we need to enqueue.
2261   SelfDeletingTask* clear = reference_processor_->CollectClearedReferences(self);
2262   GrowForUtilization(semi_space_collector_);
2263   LogGC(kGcCauseHomogeneousSpaceCompact, collector);
2264   FinishGC(self, collector::kGcTypeFull);
2265   // Enqueue any references after losing the GC locks.
2266   clear->Run(self);
2267   clear->Finalize();
2268   {
2269     ScopedObjectAccess soa(self);
2270     soa.Vm()->UnloadNativeLibraries();
2271   }
2272   return HomogeneousSpaceCompactResult::kSuccess;
2273 }
2274 
SetDefaultConcurrentStartBytes()2275 void Heap::SetDefaultConcurrentStartBytes() {
2276   MutexLock mu(Thread::Current(), *gc_complete_lock_);
2277   if (collector_type_running_ != kCollectorTypeNone) {
2278     // If a collector is already running, just let it set concurrent_start_bytes_ .
2279     return;
2280   }
2281   SetDefaultConcurrentStartBytesLocked();
2282 }
2283 
SetDefaultConcurrentStartBytesLocked()2284 void Heap::SetDefaultConcurrentStartBytesLocked() {
2285   if (IsGcConcurrent()) {
2286     size_t target_footprint = target_footprint_.load(std::memory_order_relaxed);
2287     size_t reserve_bytes = target_footprint / 4;
2288     reserve_bytes = std::min(reserve_bytes, kMaxConcurrentRemainingBytes);
2289     reserve_bytes = std::max(reserve_bytes, kMinConcurrentRemainingBytes);
2290     concurrent_start_bytes_ = UnsignedDifference(target_footprint, reserve_bytes);
2291   } else {
2292     concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
2293   }
2294 }
2295 
ChangeCollector(CollectorType collector_type)2296 void Heap::ChangeCollector(CollectorType collector_type) {
2297   // TODO: Only do this with all mutators suspended to avoid races.
2298   if (collector_type != collector_type_) {
2299     collector_type_ = collector_type;
2300     gc_plan_.clear();
2301     switch (collector_type_) {
2302       case kCollectorTypeCC: {
2303         if (use_generational_cc_) {
2304           gc_plan_.push_back(collector::kGcTypeSticky);
2305         }
2306         gc_plan_.push_back(collector::kGcTypeFull);
2307         if (use_tlab_) {
2308           ChangeAllocator(kAllocatorTypeRegionTLAB);
2309         } else {
2310           ChangeAllocator(kAllocatorTypeRegion);
2311         }
2312         break;
2313       }
2314       case kCollectorTypeCMC: {
2315         gc_plan_.push_back(collector::kGcTypeFull);
2316         if (use_tlab_) {
2317           ChangeAllocator(kAllocatorTypeTLAB);
2318         } else {
2319           ChangeAllocator(kAllocatorTypeBumpPointer);
2320         }
2321         break;
2322       }
2323       case kCollectorTypeSS: {
2324         gc_plan_.push_back(collector::kGcTypeFull);
2325         if (use_tlab_) {
2326           ChangeAllocator(kAllocatorTypeTLAB);
2327         } else {
2328           ChangeAllocator(kAllocatorTypeBumpPointer);
2329         }
2330         break;
2331       }
2332       case kCollectorTypeMS: {
2333         gc_plan_.push_back(collector::kGcTypeSticky);
2334         gc_plan_.push_back(collector::kGcTypePartial);
2335         gc_plan_.push_back(collector::kGcTypeFull);
2336         ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc);
2337         break;
2338       }
2339       case kCollectorTypeCMS: {
2340         gc_plan_.push_back(collector::kGcTypeSticky);
2341         gc_plan_.push_back(collector::kGcTypePartial);
2342         gc_plan_.push_back(collector::kGcTypeFull);
2343         ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc);
2344         break;
2345       }
2346       default: {
2347         UNIMPLEMENTED(FATAL);
2348         UNREACHABLE();
2349       }
2350     }
2351     SetDefaultConcurrentStartBytesLocked();
2352   }
2353 }
2354 
2355 // Special compacting collector which uses sub-optimal bin packing to reduce zygote space size.
2356 class ZygoteCompactingCollector final : public collector::SemiSpace {
2357  public:
ZygoteCompactingCollector(gc::Heap * heap,bool is_running_on_memory_tool)2358   ZygoteCompactingCollector(gc::Heap* heap, bool is_running_on_memory_tool)
2359       : SemiSpace(heap, "zygote collector"),
2360         bin_live_bitmap_(nullptr),
2361         bin_mark_bitmap_(nullptr),
2362         is_running_on_memory_tool_(is_running_on_memory_tool) {}
2363 
BuildBins(space::ContinuousSpace * space)2364   void BuildBins(space::ContinuousSpace* space) REQUIRES_SHARED(Locks::mutator_lock_) {
2365     bin_live_bitmap_ = space->GetLiveBitmap();
2366     bin_mark_bitmap_ = space->GetMarkBitmap();
2367     uintptr_t prev = reinterpret_cast<uintptr_t>(space->Begin());
2368     WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
2369     // Note: This requires traversing the space in increasing order of object addresses.
2370     auto visitor = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
2371       uintptr_t object_addr = reinterpret_cast<uintptr_t>(obj);
2372       size_t bin_size = object_addr - prev;
2373       // Add the bin consisting of the end of the previous object to the start of the current object.
2374       AddBin(bin_size, prev);
2375       prev = object_addr + RoundUp(obj->SizeOf<kDefaultVerifyFlags>(), kObjectAlignment);
2376     };
2377     bin_live_bitmap_->Walk(visitor);
2378     // Add the last bin which spans after the last object to the end of the space.
2379     AddBin(reinterpret_cast<uintptr_t>(space->End()) - prev, prev);
2380   }
2381 
2382  private:
2383   // Maps from bin sizes to locations.
2384   std::multimap<size_t, uintptr_t> bins_;
2385   // Live bitmap of the space which contains the bins.
2386   accounting::ContinuousSpaceBitmap* bin_live_bitmap_;
2387   // Mark bitmap of the space which contains the bins.
2388   accounting::ContinuousSpaceBitmap* bin_mark_bitmap_;
2389   const bool is_running_on_memory_tool_;
2390 
AddBin(size_t size,uintptr_t position)2391   void AddBin(size_t size, uintptr_t position) {
2392     if (is_running_on_memory_tool_) {
2393       MEMORY_TOOL_MAKE_DEFINED(reinterpret_cast<void*>(position), size);
2394     }
2395     if (size != 0) {
2396       bins_.insert(std::make_pair(size, position));
2397     }
2398   }
2399 
ShouldSweepSpace(space::ContinuousSpace * space) const2400   bool ShouldSweepSpace([[maybe_unused]] space::ContinuousSpace* space) const override {
2401     // Don't sweep any spaces since we probably blasted the internal accounting of the free list
2402     // allocator.
2403     return false;
2404   }
2405 
MarkNonForwardedObject(mirror::Object * obj)2406   mirror::Object* MarkNonForwardedObject(mirror::Object* obj) override
2407       REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
2408     size_t obj_size = obj->SizeOf<kDefaultVerifyFlags>();
2409     size_t alloc_size = RoundUp(obj_size, kObjectAlignment);
2410     mirror::Object* forward_address;
2411     // Find the smallest bin which we can move obj in.
2412     auto it = bins_.lower_bound(alloc_size);
2413     if (it == bins_.end()) {
2414       // No available space in the bins, place it in the target space instead (grows the zygote
2415       // space).
2416       size_t bytes_allocated, unused_bytes_tl_bulk_allocated;
2417       forward_address = to_space_->Alloc(
2418           self_, alloc_size, &bytes_allocated, nullptr, &unused_bytes_tl_bulk_allocated);
2419       if (to_space_live_bitmap_ != nullptr) {
2420         to_space_live_bitmap_->Set(forward_address);
2421       } else {
2422         GetHeap()->GetNonMovingSpace()->GetLiveBitmap()->Set(forward_address);
2423         GetHeap()->GetNonMovingSpace()->GetMarkBitmap()->Set(forward_address);
2424       }
2425     } else {
2426       size_t size = it->first;
2427       uintptr_t pos = it->second;
2428       bins_.erase(it);  // Erase the old bin which we replace with the new smaller bin.
2429       forward_address = reinterpret_cast<mirror::Object*>(pos);
2430       // Set the live and mark bits so that sweeping system weaks works properly.
2431       bin_live_bitmap_->Set(forward_address);
2432       bin_mark_bitmap_->Set(forward_address);
2433       DCHECK_GE(size, alloc_size);
2434       // Add a new bin with the remaining space.
2435       AddBin(size - alloc_size, pos + alloc_size);
2436     }
2437     // Copy the object over to its new location.
2438     // Historical note: We did not use `alloc_size` to avoid a Valgrind error.
2439     memcpy(reinterpret_cast<void*>(forward_address), obj, obj_size);
2440     if (kUseBakerReadBarrier) {
2441       obj->AssertReadBarrierState();
2442       forward_address->AssertReadBarrierState();
2443     }
2444     return forward_address;
2445   }
2446 };
2447 
UnBindBitmaps()2448 void Heap::UnBindBitmaps() {
2449   TimingLogger::ScopedTiming t("UnBindBitmaps", GetCurrentGcIteration()->GetTimings());
2450   for (const auto& space : GetContinuousSpaces()) {
2451     if (space->IsContinuousMemMapAllocSpace()) {
2452       space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
2453       if (alloc_space->GetLiveBitmap() != nullptr && alloc_space->HasBoundBitmaps()) {
2454         alloc_space->UnBindBitmaps();
2455       }
2456     }
2457   }
2458 }
2459 
IncrementFreedEver()2460 void Heap::IncrementFreedEver() {
2461   // Counters are updated only by us, but may be read concurrently.
2462   // The updates should become visible after the corresponding live object info.
2463   total_objects_freed_ever_.store(total_objects_freed_ever_.load(std::memory_order_relaxed)
2464                                   + GetCurrentGcIteration()->GetFreedObjects()
2465                                   + GetCurrentGcIteration()->GetFreedLargeObjects(),
2466                                   std::memory_order_release);
2467   total_bytes_freed_ever_.store(total_bytes_freed_ever_.load(std::memory_order_relaxed)
2468                                 + GetCurrentGcIteration()->GetFreedBytes()
2469                                 + GetCurrentGcIteration()->GetFreedLargeObjectBytes(),
2470                                 std::memory_order_release);
2471 }
2472 
2473 #pragma clang diagnostic push
2474 #if !ART_USE_FUTEXES
2475 // Frame gets too large, perhaps due to Bionic pthread_mutex_lock size. We don't care.
2476 #  pragma clang diagnostic ignored "-Wframe-larger-than="
2477 #endif
2478 // This has a large frame, but shouldn't be run anywhere near the stack limit.
2479 // FIXME: BUT it did exceed... http://b/197647048
2480 #  pragma clang diagnostic ignored "-Wframe-larger-than="
PreZygoteFork()2481 void Heap::PreZygoteFork() {
2482   if (!HasZygoteSpace()) {
2483     // We still want to GC in case there is some unreachable non moving objects that could cause a
2484     // suboptimal bin packing when we compact the zygote space.
2485     CollectGarbageInternal(collector::kGcTypeFull, kGcCauseBackground, false, GC_NUM_ANY);
2486     // Trim the pages at the end of the non moving space. Trim while not holding zygote lock since
2487     // the trim process may require locking the mutator lock.
2488     non_moving_space_->Trim();
2489   }
2490   // We need to close userfaultfd fd for app/webview zygotes to avoid getattr
2491   // (stat) on the fd during fork.
2492   Thread* self = Thread::Current();
2493   MutexLock mu(self, zygote_creation_lock_);
2494   // Try to see if we have any Zygote spaces.
2495   if (HasZygoteSpace()) {
2496     return;
2497   }
2498   Runtime* runtime = Runtime::Current();
2499   // Setup linear-alloc pool for post-zygote fork allocations before freezing
2500   // snapshots of intern-table and class-table.
2501   runtime->SetupLinearAllocForPostZygoteFork(self);
2502   runtime->GetInternTable()->AddNewTable();
2503   runtime->GetClassLinker()->MoveClassTableToPreZygote();
2504   VLOG(heap) << "Starting PreZygoteFork";
2505   // The end of the non-moving space may be protected, unprotect it so that we can copy the zygote
2506   // there.
2507   non_moving_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2508   const bool same_space = non_moving_space_ == main_space_;
2509   if (kCompactZygote) {
2510     // Temporarily disable rosalloc verification because the zygote
2511     // compaction will mess up the rosalloc internal metadata.
2512     ScopedDisableRosAllocVerification disable_rosalloc_verif(this);
2513     ZygoteCompactingCollector zygote_collector(this, is_running_on_memory_tool_);
2514     zygote_collector.BuildBins(non_moving_space_);
2515     // Create a new bump pointer space which we will compact into.
2516     space::BumpPointerSpace target_space("zygote bump space", non_moving_space_->End(),
2517                                          non_moving_space_->Limit());
2518     // Compact the bump pointer space to a new zygote bump pointer space.
2519     bool reset_main_space = false;
2520     if (IsMovingGc(collector_type_)) {
2521       if (collector_type_ == kCollectorTypeCC) {
2522         zygote_collector.SetFromSpace(region_space_);
2523       } else {
2524         zygote_collector.SetFromSpace(bump_pointer_space_);
2525       }
2526     } else {
2527       CHECK(main_space_ != nullptr);
2528       CHECK_NE(main_space_, non_moving_space_)
2529           << "Does not make sense to compact within the same space";
2530       // Copy from the main space.
2531       zygote_collector.SetFromSpace(main_space_);
2532       reset_main_space = true;
2533     }
2534     zygote_collector.SetToSpace(&target_space);
2535     zygote_collector.SetSwapSemiSpaces(false);
2536     zygote_collector.Run(kGcCauseCollectorTransition, false);
2537     if (reset_main_space) {
2538       main_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2539       madvise(main_space_->Begin(), main_space_->Capacity(), MADV_DONTNEED);
2540       MemMap mem_map = main_space_->ReleaseMemMap();
2541       RemoveSpace(main_space_);
2542       space::Space* old_main_space = main_space_;
2543       CreateMainMallocSpace(std::move(mem_map),
2544                             kDefaultInitialSize,
2545                             std::min(mem_map.Size(), growth_limit_),
2546                             mem_map.Size());
2547       delete old_main_space;
2548       AddSpace(main_space_);
2549     } else {
2550       if (collector_type_ == kCollectorTypeCC) {
2551         region_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2552         // Evacuated everything out of the region space, clear the mark bitmap.
2553         region_space_->GetMarkBitmap()->Clear();
2554       } else {
2555         bump_pointer_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2556       }
2557     }
2558     if (temp_space_ != nullptr) {
2559       CHECK(temp_space_->IsEmpty());
2560     }
2561     IncrementFreedEver();
2562     // Update the end and write out image.
2563     non_moving_space_->SetEnd(target_space.End());
2564     non_moving_space_->SetLimit(target_space.Limit());
2565     VLOG(heap) << "Create zygote space with size=" << non_moving_space_->Size() << " bytes";
2566   }
2567   // Change the collector to the post zygote one.
2568   ChangeCollector(foreground_collector_type_);
2569   // Save the old space so that we can remove it after we complete creating the zygote space.
2570   space::MallocSpace* old_alloc_space = non_moving_space_;
2571   // Turn the current alloc space into a zygote space and obtain the new alloc space composed of
2572   // the remaining available space.
2573   // Remove the old space before creating the zygote space since creating the zygote space sets
2574   // the old alloc space's bitmaps to null.
2575   RemoveSpace(old_alloc_space);
2576   if (collector::SemiSpace::kUseRememberedSet) {
2577     // Consistency bound check.
2578     FindRememberedSetFromSpace(old_alloc_space)->AssertAllDirtyCardsAreWithinSpace();
2579     // Remove the remembered set for the now zygote space (the old
2580     // non-moving space). Note now that we have compacted objects into
2581     // the zygote space, the data in the remembered set is no longer
2582     // needed. The zygote space will instead have a mod-union table
2583     // from this point on.
2584     RemoveRememberedSet(old_alloc_space);
2585   }
2586   // Remaining space becomes the new non moving space.
2587   zygote_space_ = old_alloc_space->CreateZygoteSpace(kNonMovingSpaceName, low_memory_mode_,
2588                                                      &non_moving_space_);
2589   CHECK(!non_moving_space_->CanMoveObjects());
2590   if (same_space) {
2591     main_space_ = non_moving_space_;
2592     SetSpaceAsDefault(main_space_);
2593   }
2594   delete old_alloc_space;
2595   CHECK(HasZygoteSpace()) << "Failed creating zygote space";
2596   AddSpace(zygote_space_);
2597   non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
2598   AddSpace(non_moving_space_);
2599   constexpr bool set_mark_bit = kUseBakerReadBarrier
2600                                 && gc::collector::ConcurrentCopying::kGrayDirtyImmuneObjects;
2601   if (set_mark_bit) {
2602     // Treat all of the objects in the zygote as marked to avoid unnecessary dirty pages. This is
2603     // safe since we mark all of the objects that may reference non immune objects as gray.
2604     zygote_space_->SetMarkBitInLiveObjects();
2605   }
2606 
2607   // Create the zygote space mod union table.
2608   accounting::ModUnionTable* mod_union_table =
2609       new accounting::ModUnionTableCardCache("zygote space mod-union table", this, zygote_space_);
2610   CHECK(mod_union_table != nullptr) << "Failed to create zygote space mod-union table";
2611 
2612   if (collector_type_ != kCollectorTypeCC && collector_type_ != kCollectorTypeCMC) {
2613     // Set all the cards in the mod-union table since we don't know which objects contain references
2614     // to large objects.
2615     mod_union_table->SetCards();
2616   } else {
2617     // Make sure to clear the zygote space cards so that we don't dirty pages in the next GC. There
2618     // may be dirty cards from the zygote compaction or reference processing. These cards are not
2619     // necessary to have marked since the zygote space may not refer to any objects not in the
2620     // zygote or image spaces at this point.
2621     mod_union_table->ProcessCards();
2622     mod_union_table->ClearTable();
2623 
2624     // For CC and CMC we never collect zygote large objects. This means we do not need to set the
2625     // cards for the zygote mod-union table and we can also clear all of the existing image
2626     // mod-union tables. The existing mod-union tables are only for image spaces and may only
2627     // reference zygote and image objects.
2628     for (auto& pair : mod_union_tables_) {
2629       CHECK(pair.first->IsImageSpace());
2630       CHECK(!pair.first->AsImageSpace()->GetImageHeader().IsAppImage());
2631       accounting::ModUnionTable* table = pair.second;
2632       table->ClearTable();
2633     }
2634   }
2635   AddModUnionTable(mod_union_table);
2636   large_object_space_->SetAllLargeObjectsAsZygoteObjects(self, set_mark_bit);
2637   if (collector::SemiSpace::kUseRememberedSet) {
2638     // Add a new remembered set for the post-zygote non-moving space.
2639     accounting::RememberedSet* post_zygote_non_moving_space_rem_set =
2640         new accounting::RememberedSet("Post-zygote non-moving space remembered set", this,
2641                                       non_moving_space_);
2642     CHECK(post_zygote_non_moving_space_rem_set != nullptr)
2643         << "Failed to create post-zygote non-moving space remembered set";
2644     AddRememberedSet(post_zygote_non_moving_space_rem_set);
2645   }
2646 }
2647 #pragma clang diagnostic pop
2648 
FlushAllocStack()2649 void Heap::FlushAllocStack() {
2650   MarkAllocStackAsLive(allocation_stack_.get());
2651   allocation_stack_->Reset();
2652 }
2653 
MarkAllocStack(accounting::ContinuousSpaceBitmap * bitmap1,accounting::ContinuousSpaceBitmap * bitmap2,accounting::LargeObjectBitmap * large_objects,accounting::ObjectStack * stack)2654 void Heap::MarkAllocStack(accounting::ContinuousSpaceBitmap* bitmap1,
2655                           accounting::ContinuousSpaceBitmap* bitmap2,
2656                           accounting::LargeObjectBitmap* large_objects,
2657                           accounting::ObjectStack* stack) {
2658   DCHECK(bitmap1 != nullptr);
2659   DCHECK(bitmap2 != nullptr);
2660   const auto* limit = stack->End();
2661   for (auto* it = stack->Begin(); it != limit; ++it) {
2662     const mirror::Object* obj = it->AsMirrorPtr();
2663     if (!kUseThreadLocalAllocationStack || obj != nullptr) {
2664       if (bitmap1->HasAddress(obj)) {
2665         bitmap1->Set(obj);
2666       } else if (bitmap2->HasAddress(obj)) {
2667         bitmap2->Set(obj);
2668       } else {
2669         DCHECK(large_objects != nullptr);
2670         large_objects->Set(obj);
2671       }
2672     }
2673   }
2674 }
2675 
SwapSemiSpaces()2676 void Heap::SwapSemiSpaces() {
2677   CHECK(bump_pointer_space_ != nullptr);
2678   CHECK(temp_space_ != nullptr);
2679   std::swap(bump_pointer_space_, temp_space_);
2680 }
2681 
Compact(space::ContinuousMemMapAllocSpace * target_space,space::ContinuousMemMapAllocSpace * source_space,GcCause gc_cause)2682 collector::GarbageCollector* Heap::Compact(space::ContinuousMemMapAllocSpace* target_space,
2683                                            space::ContinuousMemMapAllocSpace* source_space,
2684                                            GcCause gc_cause) {
2685   CHECK(kMovingCollector);
2686   if (target_space != source_space) {
2687     // Don't swap spaces since this isn't a typical semi space collection.
2688     semi_space_collector_->SetSwapSemiSpaces(false);
2689     semi_space_collector_->SetFromSpace(source_space);
2690     semi_space_collector_->SetToSpace(target_space);
2691     semi_space_collector_->Run(gc_cause, false);
2692     return semi_space_collector_;
2693   }
2694   LOG(FATAL) << "Unsupported";
2695   UNREACHABLE();
2696 }
2697 
TraceHeapSize(size_t heap_size)2698 void Heap::TraceHeapSize(size_t heap_size) {
2699   ATraceIntegerValue("Heap size (KB)", heap_size / KB);
2700 }
2701 
2702 #if defined(__GLIBC__)
2703 # define IF_GLIBC(x) x
2704 #else
2705 # define IF_GLIBC(x)
2706 #endif
2707 
GetNativeBytes()2708 size_t Heap::GetNativeBytes() {
2709   size_t malloc_bytes;
2710 #if defined(__BIONIC__) || defined(__GLIBC__) || defined(ANDROID_HOST_MUSL)
2711   IF_GLIBC(size_t mmapped_bytes;)
2712   struct mallinfo mi = mallinfo();
2713   // In spite of the documentation, the jemalloc version of this call seems to do what we want,
2714   // and it is thread-safe.
2715   if (sizeof(size_t) > sizeof(mi.uordblks) && sizeof(size_t) > sizeof(mi.hblkhd)) {
2716     // Shouldn't happen, but glibc declares uordblks as int.
2717     // Avoiding sign extension gets us correct behavior for another 2 GB.
2718     malloc_bytes = (unsigned int)mi.uordblks;
2719     IF_GLIBC(mmapped_bytes = (unsigned int)mi.hblkhd;)
2720   } else {
2721     malloc_bytes = mi.uordblks;
2722     IF_GLIBC(mmapped_bytes = mi.hblkhd;)
2723   }
2724   // From the spec, it appeared mmapped_bytes <= malloc_bytes. Reality was sometimes
2725   // dramatically different. (b/119580449 was an early bug.) If so, we try to fudge it.
2726   // However, malloc implementations seem to interpret hblkhd differently, namely as
2727   // mapped blocks backing the entire heap (e.g. jemalloc) vs. large objects directly
2728   // allocated via mmap (e.g. glibc). Thus we now only do this for glibc, where it
2729   // previously helped, and which appears to use a reading of the spec compatible
2730   // with our adjustment.
2731 #if defined(__GLIBC__)
2732   if (mmapped_bytes > malloc_bytes) {
2733     malloc_bytes = mmapped_bytes;
2734   }
2735 #endif  // GLIBC
2736 #else  // Neither Bionic nor Glibc
2737   // We should hit this case only in contexts in which GC triggering is not critical. Effectively
2738   // disable GC triggering based on malloc().
2739   malloc_bytes = 1000;
2740 #endif
2741   return malloc_bytes + native_bytes_registered_.load(std::memory_order_relaxed);
2742   // An alternative would be to get RSS from /proc/self/statm. Empirically, that's no
2743   // more expensive, and it would allow us to count memory allocated by means other than malloc.
2744   // However it would change as pages are unmapped and remapped due to memory pressure, among
2745   // other things. It seems risky to trigger GCs as a result of such changes.
2746 }
2747 
GCNumberLt(uint32_t gc_num1,uint32_t gc_num2)2748 static inline bool GCNumberLt(uint32_t gc_num1, uint32_t gc_num2) {
2749   // unsigned comparison, assuming a non-huge difference, but dealing correctly with wrapping.
2750   uint32_t difference = gc_num2 - gc_num1;
2751   bool completed_more_than_requested = difference > 0x80000000;
2752   return difference > 0 && !completed_more_than_requested;
2753 }
2754 
2755 
CollectGarbageInternal(collector::GcType gc_type,GcCause gc_cause,bool clear_soft_references,uint32_t requested_gc_num)2756 collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type,
2757                                                GcCause gc_cause,
2758                                                bool clear_soft_references,
2759                                                uint32_t requested_gc_num) {
2760   Thread* self = Thread::Current();
2761   Runtime* runtime = Runtime::Current();
2762   // If the heap can't run the GC, silently fail and return that no GC was run.
2763   switch (gc_type) {
2764     case collector::kGcTypePartial: {
2765       if (!HasZygoteSpace()) {
2766         // Do not increment gcs_completed_ . We should retry with kGcTypeFull.
2767         return collector::kGcTypeNone;
2768       }
2769       break;
2770     }
2771     default: {
2772       // Other GC types don't have any special cases which makes them not runnable. The main case
2773       // here is full GC.
2774     }
2775   }
2776   ScopedThreadStateChange tsc(self, ThreadState::kWaitingPerformingGc);
2777   Locks::mutator_lock_->AssertNotHeld(self);
2778   SelfDeletingTask* clear;  // Unconditionally set below.
2779   {
2780     // We should not ever become runnable and re-suspend while executing a GC.
2781     // This would likely cause a deadlock if we acted on a suspension request.
2782     // TODO: We really want to assert that we don't transition to kRunnable.
2783     ScopedAssertNoThreadSuspension scoped_assert("Performing GC");
2784     if (self->IsHandlingStackOverflow()) {
2785       // If we are throwing a stack overflow error we probably don't have enough remaining stack
2786       // space to run the GC.
2787       // Count this as a GC in case someone is waiting for it to complete.
2788       gcs_completed_.fetch_add(1, std::memory_order_release);
2789       return collector::kGcTypeNone;
2790     }
2791     bool compacting_gc;
2792     {
2793       gc_complete_lock_->AssertNotHeld(self);
2794       // Already not runnable; just switch suspended states. We remain in a suspended state until
2795       // FinishGC(). This avoids the complicated dance in StartGC().
2796       ScopedThreadStateChange tsc2(self, ThreadState::kWaitingForGcToComplete);
2797       MutexLock mu(self, *gc_complete_lock_);
2798       // Ensure there is only one GC at a time.
2799       WaitForGcToCompleteLocked(gc_cause, self);
2800       if (requested_gc_num != GC_NUM_ANY && !GCNumberLt(GetCurrentGcNum(), requested_gc_num)) {
2801         // The appropriate GC was already triggered elsewhere.
2802         return collector::kGcTypeNone;
2803       }
2804       compacting_gc = IsMovingGc(collector_type_);
2805       // GC can be disabled if someone has a used GetPrimitiveArrayCritical.
2806       if (compacting_gc && disable_moving_gc_count_ != 0) {
2807         LOG(WARNING) << "Skipping GC due to disable moving GC count " << disable_moving_gc_count_;
2808         // Again count this as a GC.
2809         gcs_completed_.fetch_add(1, std::memory_order_release);
2810         return collector::kGcTypeNone;
2811       }
2812       if (gc_disabled_for_shutdown_) {
2813         gcs_completed_.fetch_add(1, std::memory_order_release);
2814         return collector::kGcTypeNone;
2815       }
2816       collector_type_running_ = collector_type_;
2817       last_gc_cause_ = gc_cause;
2818     }
2819     if (gc_cause == kGcCauseForAlloc && runtime->HasStatsEnabled()) {
2820       ++runtime->GetStats()->gc_for_alloc_count;
2821       ++self->GetStats()->gc_for_alloc_count;
2822     }
2823     const size_t bytes_allocated_before_gc = GetBytesAllocated();
2824 
2825     DCHECK_LT(gc_type, collector::kGcTypeMax);
2826     DCHECK_NE(gc_type, collector::kGcTypeNone);
2827 
2828     collector::GarbageCollector* collector = nullptr;
2829     // TODO: Clean this up.
2830     if (compacting_gc) {
2831       DCHECK(current_allocator_ == kAllocatorTypeBumpPointer ||
2832              current_allocator_ == kAllocatorTypeTLAB ||
2833              current_allocator_ == kAllocatorTypeRegion ||
2834              current_allocator_ == kAllocatorTypeRegionTLAB);
2835       switch (collector_type_) {
2836         case kCollectorTypeSS:
2837           semi_space_collector_->SetFromSpace(bump_pointer_space_);
2838           semi_space_collector_->SetToSpace(temp_space_);
2839           semi_space_collector_->SetSwapSemiSpaces(true);
2840           collector = semi_space_collector_;
2841           break;
2842         case kCollectorTypeCMC:
2843           collector = mark_compact_;
2844           break;
2845         case kCollectorTypeCC:
2846           collector::ConcurrentCopying* active_cc_collector;
2847           if (use_generational_cc_) {
2848             // TODO: Other threads must do the flip checkpoint before they start poking at
2849             // active_concurrent_copying_collector_. So we should not concurrency here.
2850             active_cc_collector = (gc_type == collector::kGcTypeSticky) ?
2851                                       young_concurrent_copying_collector_ :
2852                                       concurrent_copying_collector_;
2853             active_concurrent_copying_collector_.store(active_cc_collector,
2854                                                        std::memory_order_relaxed);
2855             DCHECK(active_cc_collector->RegionSpace() == region_space_);
2856             collector = active_cc_collector;
2857           } else {
2858             collector = active_concurrent_copying_collector_.load(std::memory_order_relaxed);
2859           }
2860           break;
2861         default:
2862           LOG(FATAL) << "Invalid collector type " << static_cast<size_t>(collector_type_);
2863       }
2864       // temp_space_ will be null for kCollectorTypeCMC.
2865       if (temp_space_ != nullptr &&
2866           collector != active_concurrent_copying_collector_.load(std::memory_order_relaxed)) {
2867         temp_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2868         if (kIsDebugBuild) {
2869           // Try to read each page of the memory map in case mprotect didn't work properly
2870           // b/19894268.
2871           temp_space_->GetMemMap()->TryReadable();
2872         }
2873         CHECK(temp_space_->IsEmpty());
2874       }
2875     } else if (current_allocator_ == kAllocatorTypeRosAlloc ||
2876                current_allocator_ == kAllocatorTypeDlMalloc) {
2877       collector = FindCollectorByGcType(gc_type);
2878     } else {
2879       LOG(FATAL) << "Invalid current allocator " << current_allocator_;
2880     }
2881 
2882     CHECK(collector != nullptr) << "Could not find garbage collector with collector_type="
2883                                 << static_cast<size_t>(collector_type_)
2884                                 << " and gc_type=" << gc_type;
2885     collector->Run(gc_cause, clear_soft_references || runtime->IsZygote());
2886     IncrementFreedEver();
2887     RequestTrim(self);
2888     // Collect cleared references.
2889     clear = reference_processor_->CollectClearedReferences(self);
2890     // Grow the heap so that we know when to perform the next GC.
2891     GrowForUtilization(collector, bytes_allocated_before_gc);
2892     old_native_bytes_allocated_.store(GetNativeBytes());
2893     LogGC(gc_cause, collector);
2894     FinishGC(self, gc_type);
2895     // We're suspended up to this point.
2896   }
2897   // Actually enqueue all cleared references. Do this after the GC has officially finished since
2898   // otherwise we can deadlock.
2899   clear->Run(self);
2900   clear->Finalize();
2901   // Inform DDMS that a GC completed.
2902   Dbg::GcDidFinish();
2903 
2904   // Unload native libraries for class unloading. We do this after calling FinishGC to prevent
2905   // deadlocks in case the JNI_OnUnload function does allocations.
2906   {
2907     ScopedObjectAccess soa(self);
2908     soa.Vm()->UnloadNativeLibraries();
2909   }
2910   return gc_type;
2911 }
2912 
LogGC(GcCause gc_cause,collector::GarbageCollector * collector)2913 void Heap::LogGC(GcCause gc_cause, collector::GarbageCollector* collector) {
2914   const size_t duration = GetCurrentGcIteration()->GetDurationNs();
2915   const std::vector<uint64_t>& pause_times = GetCurrentGcIteration()->GetPauseTimes();
2916   // Print the GC if it is an explicit GC (e.g. Runtime.gc()) or a slow GC
2917   // (mutator time blocked >= long_pause_log_threshold_).
2918   bool log_gc = kLogAllGCs || (gc_cause == kGcCauseExplicit && always_log_explicit_gcs_);
2919   if (!log_gc && CareAboutPauseTimes()) {
2920     // GC for alloc pauses the allocating thread, so consider it as a pause.
2921     log_gc = duration > long_gc_log_threshold_ ||
2922         (gc_cause == kGcCauseForAlloc && duration > long_pause_log_threshold_);
2923     for (uint64_t pause : pause_times) {
2924       log_gc = log_gc || pause >= long_pause_log_threshold_;
2925     }
2926   }
2927   bool is_sampled = false;
2928   if (UNLIKELY(gc_stress_mode_)) {
2929     static std::atomic_int64_t accumulated_duration_ns = 0;
2930     accumulated_duration_ns += duration;
2931     if (accumulated_duration_ns >= kGcStressModeGcLogSampleFrequencyNs) {
2932       accumulated_duration_ns -= kGcStressModeGcLogSampleFrequencyNs;
2933       log_gc = true;
2934       is_sampled = true;
2935     }
2936   }
2937   if (log_gc) {
2938     const size_t percent_free = GetPercentFree();
2939     const size_t current_heap_size = GetBytesAllocated();
2940     const size_t total_memory = GetTotalMemory();
2941     std::ostringstream pause_string;
2942     for (size_t i = 0; i < pause_times.size(); ++i) {
2943       pause_string << PrettyDuration((pause_times[i] / 1000) * 1000)
2944                    << ((i != pause_times.size() - 1) ? "," : "");
2945     }
2946     LOG(INFO) << gc_cause << " " << collector->GetName()
2947               << (is_sampled ? " (sampled)" : "")
2948               << " GC freed "
2949               << PrettySize(current_gc_iteration_.GetFreedBytes()) << " AllocSpace bytes, "
2950               << current_gc_iteration_.GetFreedLargeObjects() << "("
2951               << PrettySize(current_gc_iteration_.GetFreedLargeObjectBytes()) << ") LOS objects, "
2952               << percent_free << "% free, " << PrettySize(current_heap_size) << "/"
2953               << PrettySize(total_memory) << ", " << "paused " << pause_string.str()
2954               << " total " << PrettyDuration((duration / 1000) * 1000);
2955     VLOG(heap) << Dumpable<TimingLogger>(*current_gc_iteration_.GetTimings());
2956   }
2957 }
2958 
FinishGC(Thread * self,collector::GcType gc_type)2959 void Heap::FinishGC(Thread* self, collector::GcType gc_type) {
2960   MutexLock mu(self, *gc_complete_lock_);
2961   collector_type_running_ = kCollectorTypeNone;
2962   if (gc_type != collector::kGcTypeNone) {
2963     last_gc_type_ = gc_type;
2964 
2965     // Update stats.
2966     ++gc_count_last_window_;
2967     if (running_collection_is_blocking_) {
2968       // If the currently running collection was a blocking one,
2969       // increment the counters and reset the flag.
2970       ++blocking_gc_count_;
2971       blocking_gc_time_ += GetCurrentGcIteration()->GetDurationNs();
2972       ++blocking_gc_count_last_window_;
2973     }
2974     // Update the gc count rate histograms if due.
2975     UpdateGcCountRateHistograms();
2976   }
2977   // Reset.
2978   running_collection_is_blocking_ = false;
2979   thread_running_gc_ = nullptr;
2980   if (gc_type != collector::kGcTypeNone) {
2981     gcs_completed_.fetch_add(1, std::memory_order_release);
2982   }
2983   // Wake anyone who may have been waiting for the GC to complete.
2984   gc_complete_cond_->Broadcast(self);
2985 }
2986 
UpdateGcCountRateHistograms()2987 void Heap::UpdateGcCountRateHistograms() {
2988   // Invariant: if the time since the last update includes more than
2989   // one windows, all the GC runs (if > 0) must have happened in first
2990   // window because otherwise the update must have already taken place
2991   // at an earlier GC run. So, we report the non-first windows with
2992   // zero counts to the histograms.
2993   DCHECK_EQ(last_update_time_gc_count_rate_histograms_ % kGcCountRateHistogramWindowDuration, 0U);
2994   uint64_t now = NanoTime();
2995   DCHECK_GE(now, last_update_time_gc_count_rate_histograms_);
2996   uint64_t time_since_last_update = now - last_update_time_gc_count_rate_histograms_;
2997   uint64_t num_of_windows = time_since_last_update / kGcCountRateHistogramWindowDuration;
2998 
2999   // The computed number of windows can be incoherently high if NanoTime() is not monotonic.
3000   // Setting a limit on its maximum value reduces the impact on CPU time in such cases.
3001   if (num_of_windows > kGcCountRateHistogramMaxNumMissedWindows) {
3002     LOG(WARNING) << "Reducing the number of considered missed Gc histogram windows from "
3003                  << num_of_windows << " to " << kGcCountRateHistogramMaxNumMissedWindows;
3004     num_of_windows = kGcCountRateHistogramMaxNumMissedWindows;
3005   }
3006 
3007   if (time_since_last_update >= kGcCountRateHistogramWindowDuration) {
3008     // Record the first window.
3009     gc_count_rate_histogram_.AddValue(gc_count_last_window_ - 1);  // Exclude the current run.
3010     blocking_gc_count_rate_histogram_.AddValue(running_collection_is_blocking_ ?
3011         blocking_gc_count_last_window_ - 1 : blocking_gc_count_last_window_);
3012     // Record the other windows (with zero counts).
3013     for (uint64_t i = 0; i < num_of_windows - 1; ++i) {
3014       gc_count_rate_histogram_.AddValue(0);
3015       blocking_gc_count_rate_histogram_.AddValue(0);
3016     }
3017     // Update the last update time and reset the counters.
3018     last_update_time_gc_count_rate_histograms_ =
3019         (now / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration;
3020     gc_count_last_window_ = 1;  // Include the current run.
3021     blocking_gc_count_last_window_ = running_collection_is_blocking_ ? 1 : 0;
3022   }
3023   DCHECK_EQ(last_update_time_gc_count_rate_histograms_ % kGcCountRateHistogramWindowDuration, 0U);
3024 }
3025 
3026 class RootMatchesObjectVisitor : public SingleRootVisitor {
3027  public:
RootMatchesObjectVisitor(const mirror::Object * obj)3028   explicit RootMatchesObjectVisitor(const mirror::Object* obj) : obj_(obj) { }
3029 
VisitRoot(mirror::Object * root,const RootInfo & info)3030   void VisitRoot(mirror::Object* root, const RootInfo& info)
3031       override REQUIRES_SHARED(Locks::mutator_lock_) {
3032     if (root == obj_) {
3033       LOG(INFO) << "Object " << obj_ << " is a root " << info.ToString();
3034     }
3035   }
3036 
3037  private:
3038   const mirror::Object* const obj_;
3039 };
3040 
3041 
3042 class ScanVisitor {
3043  public:
operator ()(const mirror::Object * obj) const3044   void operator()(const mirror::Object* obj) const {
3045     LOG(ERROR) << "Would have rescanned object " << obj;
3046   }
3047 };
3048 
3049 // Verify a reference from an object.
3050 class VerifyReferenceVisitor : public SingleRootVisitor {
3051  public:
VerifyReferenceVisitor(Thread * self,Heap * heap,size_t * fail_count,bool verify_referent)3052   VerifyReferenceVisitor(Thread* self, Heap* heap, size_t* fail_count, bool verify_referent)
3053       REQUIRES_SHARED(Locks::mutator_lock_)
3054       : self_(self), heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {
3055     CHECK_EQ(self_, Thread::Current());
3056   }
3057 
operator ()(ObjPtr<mirror::Class> klass,ObjPtr<mirror::Reference> ref) const3058   void operator()([[maybe_unused]] ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
3059       REQUIRES_SHARED(Locks::mutator_lock_) {
3060     if (verify_referent_) {
3061       VerifyReference(ref.Ptr(), ref->GetReferent(), mirror::Reference::ReferentOffset());
3062     }
3063   }
3064 
operator ()(ObjPtr<mirror::Object> obj,MemberOffset offset,bool is_static) const3065   void operator()(ObjPtr<mirror::Object> obj,
3066                   MemberOffset offset,
3067                   [[maybe_unused]] bool is_static) const REQUIRES_SHARED(Locks::mutator_lock_) {
3068     VerifyReference(obj.Ptr(), obj->GetFieldObject<mirror::Object>(offset), offset);
3069   }
3070 
IsLive(ObjPtr<mirror::Object> obj) const3071   bool IsLive(ObjPtr<mirror::Object> obj) const NO_THREAD_SAFETY_ANALYSIS {
3072     return heap_->IsLiveObjectLocked(obj, true, false, true);
3073   }
3074 
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root) const3075   void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
3076       REQUIRES_SHARED(Locks::mutator_lock_) {
3077     if (!root->IsNull()) {
3078       VisitRoot(root);
3079     }
3080   }
VisitRoot(mirror::CompressedReference<mirror::Object> * root) const3081   void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
3082       REQUIRES_SHARED(Locks::mutator_lock_) {
3083     const_cast<VerifyReferenceVisitor*>(this)->VisitRoot(
3084         root->AsMirrorPtr(), RootInfo(kRootVMInternal));
3085   }
3086 
VisitRoot(mirror::Object * root,const RootInfo & root_info)3087   void VisitRoot(mirror::Object* root, const RootInfo& root_info) override
3088       REQUIRES_SHARED(Locks::mutator_lock_) {
3089     if (root == nullptr) {
3090       LOG(ERROR) << "Root is null with info " << root_info.GetType();
3091     } else if (!VerifyReference(nullptr, root, MemberOffset(0))) {
3092       LOG(ERROR) << "Root " << root << " is dead with type " << mirror::Object::PrettyTypeOf(root)
3093           << " thread_id= " << root_info.GetThreadId() << " root_type= " << root_info.GetType();
3094     }
3095   }
3096 
3097  private:
3098   // TODO: Fix the no thread safety analysis.
3099   // Returns false on failure.
VerifyReference(mirror::Object * obj,mirror::Object * ref,MemberOffset offset) const3100   bool VerifyReference(mirror::Object* obj, mirror::Object* ref, MemberOffset offset) const
3101       NO_THREAD_SAFETY_ANALYSIS {
3102     if (ref == nullptr || IsLive(ref)) {
3103       // Verify that the reference is live.
3104       return true;
3105     }
3106     CHECK_EQ(self_, Thread::Current());  // fail_count_ is private to the calling thread.
3107     *fail_count_ += 1;
3108     if (*fail_count_ == 1) {
3109       // Only print message for the first failure to prevent spam.
3110       LOG(ERROR) << "!!!!!!!!!!!!!!Heap corruption detected!!!!!!!!!!!!!!!!!!!";
3111     }
3112     if (obj != nullptr) {
3113       // Only do this part for non roots.
3114       accounting::CardTable* card_table = heap_->GetCardTable();
3115       accounting::ObjectStack* alloc_stack = heap_->allocation_stack_.get();
3116       accounting::ObjectStack* live_stack = heap_->live_stack_.get();
3117       uint8_t* card_addr = card_table->CardFromAddr(obj);
3118       LOG(ERROR) << "Object " << obj << " references dead object " << ref << " at offset "
3119                  << offset << "\n card value = " << static_cast<int>(*card_addr);
3120       if (heap_->IsValidObjectAddress(obj->GetClass())) {
3121         LOG(ERROR) << "Obj type " << obj->PrettyTypeOf();
3122       } else {
3123         LOG(ERROR) << "Object " << obj << " class(" << obj->GetClass() << ") not a heap address";
3124       }
3125 
3126       // Attempt to find the class inside of the recently freed objects.
3127       space::ContinuousSpace* ref_space = heap_->FindContinuousSpaceFromObject(ref, true);
3128       if (ref_space != nullptr && ref_space->IsMallocSpace()) {
3129         space::MallocSpace* space = ref_space->AsMallocSpace();
3130         mirror::Class* ref_class = space->FindRecentFreedObject(ref);
3131         if (ref_class != nullptr) {
3132           LOG(ERROR) << "Reference " << ref << " found as a recently freed object with class "
3133                      << ref_class->PrettyClass();
3134         } else {
3135           LOG(ERROR) << "Reference " << ref << " not found as a recently freed object";
3136         }
3137       }
3138 
3139       if (ref->GetClass() != nullptr && heap_->IsValidObjectAddress(ref->GetClass()) &&
3140           ref->GetClass()->IsClass()) {
3141         LOG(ERROR) << "Ref type " << ref->PrettyTypeOf();
3142       } else {
3143         LOG(ERROR) << "Ref " << ref << " class(" << ref->GetClass()
3144                    << ") is not a valid heap address";
3145       }
3146 
3147       card_table->CheckAddrIsInCardTable(reinterpret_cast<const uint8_t*>(obj));
3148       void* cover_begin = card_table->AddrFromCard(card_addr);
3149       void* cover_end = reinterpret_cast<void*>(reinterpret_cast<size_t>(cover_begin) +
3150           accounting::CardTable::kCardSize);
3151       LOG(ERROR) << "Card " << reinterpret_cast<void*>(card_addr) << " covers " << cover_begin
3152           << "-" << cover_end;
3153       accounting::ContinuousSpaceBitmap* bitmap =
3154           heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(obj);
3155 
3156       if (bitmap == nullptr) {
3157         LOG(ERROR) << "Object " << obj << " has no bitmap";
3158         if (!VerifyClassClass(obj->GetClass())) {
3159           LOG(ERROR) << "Object " << obj << " failed class verification!";
3160         }
3161       } else {
3162         // Print out how the object is live.
3163         if (bitmap->Test(obj)) {
3164           LOG(ERROR) << "Object " << obj << " found in live bitmap";
3165         }
3166         if (alloc_stack->Contains(const_cast<mirror::Object*>(obj))) {
3167           LOG(ERROR) << "Object " << obj << " found in allocation stack";
3168         }
3169         if (live_stack->Contains(const_cast<mirror::Object*>(obj))) {
3170           LOG(ERROR) << "Object " << obj << " found in live stack";
3171         }
3172         if (alloc_stack->Contains(const_cast<mirror::Object*>(ref))) {
3173           LOG(ERROR) << "Ref " << ref << " found in allocation stack";
3174         }
3175         if (live_stack->Contains(const_cast<mirror::Object*>(ref))) {
3176           LOG(ERROR) << "Ref " << ref << " found in live stack";
3177         }
3178         // Attempt to see if the card table missed the reference.
3179         ScanVisitor scan_visitor;
3180         uint8_t* byte_cover_begin = reinterpret_cast<uint8_t*>(card_table->AddrFromCard(card_addr));
3181         card_table->Scan<false>(bitmap, byte_cover_begin,
3182                                 byte_cover_begin + accounting::CardTable::kCardSize, scan_visitor);
3183       }
3184 
3185       // Search to see if any of the roots reference our object.
3186       RootMatchesObjectVisitor visitor1(obj);
3187       Runtime::Current()->VisitRoots(&visitor1);
3188       // Search to see if any of the roots reference our reference.
3189       RootMatchesObjectVisitor visitor2(ref);
3190       Runtime::Current()->VisitRoots(&visitor2);
3191     }
3192     return false;
3193   }
3194 
3195   Thread* const self_;
3196   Heap* const heap_;
3197   size_t* const fail_count_;
3198   const bool verify_referent_;
3199 };
3200 
3201 // Verify all references within an object, for use with HeapBitmap::Visit.
3202 class VerifyObjectVisitor {
3203  public:
VerifyObjectVisitor(Thread * self,Heap * heap,size_t * fail_count,bool verify_referent)3204   VerifyObjectVisitor(Thread* self, Heap* heap, size_t* fail_count, bool verify_referent)
3205       : self_(self), heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {}
3206 
operator ()(mirror::Object * obj)3207   void operator()(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
3208     // Note: we are verifying the references in obj but not obj itself, this is because obj must
3209     // be live or else how did we find it in the live bitmap?
3210     VerifyReferenceVisitor visitor(self_, heap_, fail_count_, verify_referent_);
3211     // The class doesn't count as a reference but we should verify it anyways.
3212     obj->VisitReferences(visitor, visitor);
3213   }
3214 
VerifyRoots()3215   void VerifyRoots() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_) {
3216     ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
3217     VerifyReferenceVisitor visitor(self_, heap_, fail_count_, verify_referent_);
3218     Runtime::Current()->VisitRoots(&visitor);
3219   }
3220 
GetFailureCount() const3221   uint32_t GetFailureCount() const REQUIRES(Locks::mutator_lock_) {
3222     CHECK_EQ(self_, Thread::Current());
3223     return *fail_count_;
3224   }
3225 
3226  private:
3227   Thread* const self_;
3228   Heap* const heap_;
3229   size_t* const fail_count_;
3230   const bool verify_referent_;
3231 };
3232 
PushOnAllocationStackWithInternalGC(Thread * self,ObjPtr<mirror::Object> * obj)3233 void Heap::PushOnAllocationStackWithInternalGC(Thread* self, ObjPtr<mirror::Object>* obj) {
3234   // Slow path, the allocation stack push back must have already failed.
3235   DCHECK(!allocation_stack_->AtomicPushBack(obj->Ptr()));
3236   do {
3237     // TODO: Add handle VerifyObject.
3238     StackHandleScope<1> hs(self);
3239     HandleWrapperObjPtr<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
3240     // Push our object into the reserve region of the allocation stack. This is only required due
3241     // to heap verification requiring that roots are live (either in the live bitmap or in the
3242     // allocation stack).
3243     CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(obj->Ptr()));
3244     CollectGarbageInternal(collector::kGcTypeSticky,
3245                            kGcCauseForAlloc,
3246                            false,
3247                            GetCurrentGcNum() + 1);
3248   } while (!allocation_stack_->AtomicPushBack(obj->Ptr()));
3249 }
3250 
PushOnThreadLocalAllocationStackWithInternalGC(Thread * self,ObjPtr<mirror::Object> * obj)3251 void Heap::PushOnThreadLocalAllocationStackWithInternalGC(Thread* self,
3252                                                           ObjPtr<mirror::Object>* obj) {
3253   // Slow path, the allocation stack push back must have already failed.
3254   DCHECK(!self->PushOnThreadLocalAllocationStack(obj->Ptr()));
3255   StackReference<mirror::Object>* start_address;
3256   StackReference<mirror::Object>* end_address;
3257   while (!allocation_stack_->AtomicBumpBack(kThreadLocalAllocationStackSize, &start_address,
3258                                             &end_address)) {
3259     // TODO: Add handle VerifyObject.
3260     StackHandleScope<1> hs(self);
3261     HandleWrapperObjPtr<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
3262     // Push our object into the reserve region of the allocaiton stack. This is only required due
3263     // to heap verification requiring that roots are live (either in the live bitmap or in the
3264     // allocation stack).
3265     CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(obj->Ptr()));
3266     // Push into the reserve allocation stack.
3267     CollectGarbageInternal(collector::kGcTypeSticky,
3268                            kGcCauseForAlloc,
3269                            false,
3270                            GetCurrentGcNum() + 1);
3271   }
3272   self->SetThreadLocalAllocationStack(start_address, end_address);
3273   // Retry on the new thread-local allocation stack.
3274   CHECK(self->PushOnThreadLocalAllocationStack(obj->Ptr()));  // Must succeed.
3275 }
3276 
3277 // Must do this with mutators suspended since we are directly accessing the allocation stacks.
VerifyHeapReferences(bool verify_referents)3278 size_t Heap::VerifyHeapReferences(bool verify_referents) {
3279   Thread* self = Thread::Current();
3280   Locks::mutator_lock_->AssertExclusiveHeld(self);
3281   // Lets sort our allocation stacks so that we can efficiently binary search them.
3282   allocation_stack_->Sort();
3283   live_stack_->Sort();
3284   // Since we sorted the allocation stack content, need to revoke all
3285   // thread-local allocation stacks.
3286   RevokeAllThreadLocalAllocationStacks(self);
3287   size_t fail_count = 0;
3288   VerifyObjectVisitor visitor(self, this, &fail_count, verify_referents);
3289   // Verify objects in the allocation stack since these will be objects which were:
3290   // 1. Allocated prior to the GC (pre GC verification).
3291   // 2. Allocated during the GC (pre sweep GC verification).
3292   // We don't want to verify the objects in the live stack since they themselves may be
3293   // pointing to dead objects if they are not reachable.
3294   VisitObjectsPaused(visitor);
3295   // Verify the roots:
3296   visitor.VerifyRoots();
3297   if (visitor.GetFailureCount() > 0) {
3298     // Dump mod-union tables.
3299     for (const auto& table_pair : mod_union_tables_) {
3300       accounting::ModUnionTable* mod_union_table = table_pair.second;
3301       mod_union_table->Dump(LOG_STREAM(ERROR) << mod_union_table->GetName() << ": ");
3302     }
3303     // Dump remembered sets.
3304     for (const auto& table_pair : remembered_sets_) {
3305       accounting::RememberedSet* remembered_set = table_pair.second;
3306       remembered_set->Dump(LOG_STREAM(ERROR) << remembered_set->GetName() << ": ");
3307     }
3308     DumpSpaces(LOG_STREAM(ERROR));
3309   }
3310   return visitor.GetFailureCount();
3311 }
3312 
3313 class VerifyReferenceCardVisitor {
3314  public:
VerifyReferenceCardVisitor(Heap * heap,bool * failed)3315   VerifyReferenceCardVisitor(Heap* heap, bool* failed)
3316       REQUIRES_SHARED(Locks::mutator_lock_,
3317                             Locks::heap_bitmap_lock_)
3318       : heap_(heap), failed_(failed) {
3319   }
3320 
3321   // There is no card marks for native roots on a class.
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root) const3322   void VisitRootIfNonNull(
3323       [[maybe_unused]] mirror::CompressedReference<mirror::Object>* root) const {}
VisitRoot(mirror::CompressedReference<mirror::Object> * root) const3324   void VisitRoot([[maybe_unused]] mirror::CompressedReference<mirror::Object>* root) const {}
3325 
3326   // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
3327   // annotalysis on visitors.
operator ()(mirror::Object * obj,MemberOffset offset,bool is_static) const3328   void operator()(mirror::Object* obj, MemberOffset offset, bool is_static) const
3329       NO_THREAD_SAFETY_ANALYSIS {
3330     mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
3331     // Filter out class references since changing an object's class does not mark the card as dirty.
3332     // Also handles large objects, since the only reference they hold is a class reference.
3333     if (ref != nullptr && !ref->IsClass()) {
3334       accounting::CardTable* card_table = heap_->GetCardTable();
3335       // If the object is not dirty and it is referencing something in the live stack other than
3336       // class, then it must be on a dirty card.
3337       if (!card_table->AddrIsInCardTable(obj)) {
3338         LOG(ERROR) << "Object " << obj << " is not in the address range of the card table";
3339         *failed_ = true;
3340       } else if (!card_table->IsDirty(obj)) {
3341         // TODO: Check mod-union tables.
3342         // Card should be either kCardDirty if it got re-dirtied after we aged it, or
3343         // kCardDirty - 1 if it didnt get touched since we aged it.
3344         accounting::ObjectStack* live_stack = heap_->live_stack_.get();
3345         if (live_stack->ContainsSorted(ref)) {
3346           if (live_stack->ContainsSorted(obj)) {
3347             LOG(ERROR) << "Object " << obj << " found in live stack";
3348           }
3349           if (heap_->GetLiveBitmap()->Test(obj)) {
3350             LOG(ERROR) << "Object " << obj << " found in live bitmap";
3351           }
3352           LOG(ERROR) << "Object " << obj << " " << mirror::Object::PrettyTypeOf(obj)
3353                     << " references " << ref << " " << mirror::Object::PrettyTypeOf(ref)
3354                     << " in live stack";
3355 
3356           // Print which field of the object is dead.
3357           if (!obj->IsObjectArray()) {
3358             ObjPtr<mirror::Class> klass = is_static ? obj->AsClass() : obj->GetClass();
3359             CHECK(klass != nullptr);
3360             for (ArtField& field : (is_static ? klass->GetSFields() : klass->GetIFields())) {
3361               if (field.GetOffset().Int32Value() == offset.Int32Value()) {
3362                 LOG(ERROR) << (is_static ? "Static " : "") << "field in the live stack is "
3363                            << field.PrettyField();
3364                 break;
3365               }
3366             }
3367           } else {
3368             ObjPtr<mirror::ObjectArray<mirror::Object>> object_array =
3369                 obj->AsObjectArray<mirror::Object>();
3370             for (int32_t i = 0; i < object_array->GetLength(); ++i) {
3371               if (object_array->Get(i) == ref) {
3372                 LOG(ERROR) << (is_static ? "Static " : "") << "obj[" << i << "] = ref";
3373               }
3374             }
3375           }
3376 
3377           *failed_ = true;
3378         }
3379       }
3380     }
3381   }
3382 
3383  private:
3384   Heap* const heap_;
3385   bool* const failed_;
3386 };
3387 
3388 class VerifyLiveStackReferences {
3389  public:
VerifyLiveStackReferences(Heap * heap)3390   explicit VerifyLiveStackReferences(Heap* heap)
3391       : heap_(heap),
3392         failed_(false) {}
3393 
operator ()(mirror::Object * obj) const3394   void operator()(mirror::Object* obj) const
3395       REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
3396     VerifyReferenceCardVisitor visitor(heap_, const_cast<bool*>(&failed_));
3397     obj->VisitReferences(visitor, VoidFunctor());
3398   }
3399 
Failed() const3400   bool Failed() const {
3401     return failed_;
3402   }
3403 
3404  private:
3405   Heap* const heap_;
3406   bool failed_;
3407 };
3408 
VerifyMissingCardMarks()3409 bool Heap::VerifyMissingCardMarks() {
3410   Thread* self = Thread::Current();
3411   Locks::mutator_lock_->AssertExclusiveHeld(self);
3412   // We need to sort the live stack since we binary search it.
3413   live_stack_->Sort();
3414   // Since we sorted the allocation stack content, need to revoke all
3415   // thread-local allocation stacks.
3416   RevokeAllThreadLocalAllocationStacks(self);
3417   VerifyLiveStackReferences visitor(this);
3418   GetLiveBitmap()->Visit(visitor);
3419   // We can verify objects in the live stack since none of these should reference dead objects.
3420   for (auto* it = live_stack_->Begin(); it != live_stack_->End(); ++it) {
3421     if (!kUseThreadLocalAllocationStack || it->AsMirrorPtr() != nullptr) {
3422       visitor(it->AsMirrorPtr());
3423     }
3424   }
3425   return !visitor.Failed();
3426 }
3427 
SwapStacks()3428 void Heap::SwapStacks() {
3429   if (kUseThreadLocalAllocationStack) {
3430     live_stack_->AssertAllZero();
3431   }
3432   allocation_stack_.swap(live_stack_);
3433 }
3434 
RevokeAllThreadLocalAllocationStacks(Thread * self)3435 void Heap::RevokeAllThreadLocalAllocationStacks(Thread* self) {
3436   // This must be called only during the pause.
3437   DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
3438   MutexLock mu(self, *Locks::runtime_shutdown_lock_);
3439   MutexLock mu2(self, *Locks::thread_list_lock_);
3440   std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
3441   for (Thread* t : thread_list) {
3442     t->RevokeThreadLocalAllocationStack();
3443   }
3444 }
3445 
AssertThreadLocalBuffersAreRevoked(Thread * thread)3446 void Heap::AssertThreadLocalBuffersAreRevoked(Thread* thread) {
3447   if (kIsDebugBuild) {
3448     if (rosalloc_space_ != nullptr) {
3449       rosalloc_space_->AssertThreadLocalBuffersAreRevoked(thread);
3450     }
3451     if (bump_pointer_space_ != nullptr) {
3452       bump_pointer_space_->AssertThreadLocalBuffersAreRevoked(thread);
3453     }
3454   }
3455 }
3456 
AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked()3457 void Heap::AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked() {
3458   if (kIsDebugBuild) {
3459     if (bump_pointer_space_ != nullptr) {
3460       bump_pointer_space_->AssertAllThreadLocalBuffersAreRevoked();
3461     }
3462   }
3463 }
3464 
FindModUnionTableFromSpace(space::Space * space)3465 accounting::ModUnionTable* Heap::FindModUnionTableFromSpace(space::Space* space) {
3466   auto it = mod_union_tables_.find(space);
3467   if (it == mod_union_tables_.end()) {
3468     return nullptr;
3469   }
3470   return it->second;
3471 }
3472 
FindRememberedSetFromSpace(space::Space * space)3473 accounting::RememberedSet* Heap::FindRememberedSetFromSpace(space::Space* space) {
3474   auto it = remembered_sets_.find(space);
3475   if (it == remembered_sets_.end()) {
3476     return nullptr;
3477   }
3478   return it->second;
3479 }
3480 
ProcessCards(TimingLogger * timings,bool use_rem_sets,bool process_alloc_space_cards,bool clear_alloc_space_cards)3481 void Heap::ProcessCards(TimingLogger* timings,
3482                         bool use_rem_sets,
3483                         bool process_alloc_space_cards,
3484                         bool clear_alloc_space_cards) {
3485   TimingLogger::ScopedTiming t(__FUNCTION__, timings);
3486   // Clear cards and keep track of cards cleared in the mod-union table.
3487   for (const auto& space : continuous_spaces_) {
3488     accounting::ModUnionTable* table = FindModUnionTableFromSpace(space);
3489     accounting::RememberedSet* rem_set = FindRememberedSetFromSpace(space);
3490     if (table != nullptr) {
3491       const char* name = space->IsZygoteSpace() ? "ZygoteModUnionClearCards" :
3492           "ImageModUnionClearCards";
3493       TimingLogger::ScopedTiming t2(name, timings);
3494       table->ProcessCards();
3495     } else if (use_rem_sets && rem_set != nullptr) {
3496       DCHECK(collector::SemiSpace::kUseRememberedSet) << static_cast<int>(collector_type_);
3497       TimingLogger::ScopedTiming t2("AllocSpaceRemSetClearCards", timings);
3498       rem_set->ClearCards();
3499     } else if (process_alloc_space_cards) {
3500       TimingLogger::ScopedTiming t2("AllocSpaceClearCards", timings);
3501       if (clear_alloc_space_cards) {
3502         uint8_t* end = space->End();
3503         if (space->IsImageSpace()) {
3504           // Image space end is the end of the mirror objects, it is not necessarily page or card
3505           // aligned. Align up so that the check in ClearCardRange does not fail.
3506           end = AlignUp(end, accounting::CardTable::kCardSize);
3507         }
3508         card_table_->ClearCardRange(space->Begin(), end);
3509       } else {
3510         // No mod union table for the AllocSpace. Age the cards so that the GC knows that these
3511         // cards were dirty before the GC started.
3512         // TODO: Need to use atomic for the case where aged(cleaning thread) -> dirty(other thread)
3513         // -> clean(cleaning thread).
3514         // The races are we either end up with: Aged card, unaged card. Since we have the
3515         // checkpoint roots and then we scan / update mod union tables after. We will always
3516         // scan either card. If we end up with the non aged card, we scan it it in the pause.
3517         card_table_->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(),
3518                                        VoidFunctor());
3519       }
3520     }
3521   }
3522 }
3523 
3524 struct IdentityMarkHeapReferenceVisitor : public MarkObjectVisitor {
MarkObjectart::gc::IdentityMarkHeapReferenceVisitor3525   mirror::Object* MarkObject(mirror::Object* obj) override {
3526     return obj;
3527   }
MarkHeapReferenceart::gc::IdentityMarkHeapReferenceVisitor3528   void MarkHeapReference(mirror::HeapReference<mirror::Object>*, bool) override {
3529   }
3530 };
3531 
PreGcVerificationPaused(collector::GarbageCollector * gc)3532 void Heap::PreGcVerificationPaused(collector::GarbageCollector* gc) {
3533   Thread* const self = Thread::Current();
3534   TimingLogger* const timings = current_gc_iteration_.GetTimings();
3535   TimingLogger::ScopedTiming t(__FUNCTION__, timings);
3536   if (verify_pre_gc_heap_) {
3537     TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyHeapReferences", timings);
3538     size_t failures = VerifyHeapReferences();
3539     if (failures > 0) {
3540       LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed with " << failures
3541           << " failures";
3542     }
3543   }
3544   // Check that all objects which reference things in the live stack are on dirty cards.
3545   if (verify_missing_card_marks_) {
3546     TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyMissingCardMarks", timings);
3547     ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
3548     SwapStacks();
3549     // Sort the live stack so that we can quickly binary search it later.
3550     CHECK(VerifyMissingCardMarks()) << "Pre " << gc->GetName()
3551                                     << " missing card mark verification failed\n" << DumpSpaces();
3552     SwapStacks();
3553   }
3554   if (verify_mod_union_table_) {
3555     TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyModUnionTables", timings);
3556     ReaderMutexLock reader_lock(self, *Locks::heap_bitmap_lock_);
3557     for (const auto& table_pair : mod_union_tables_) {
3558       accounting::ModUnionTable* mod_union_table = table_pair.second;
3559       IdentityMarkHeapReferenceVisitor visitor;
3560       mod_union_table->UpdateAndMarkReferences(&visitor);
3561       mod_union_table->Verify();
3562     }
3563   }
3564 }
3565 
PreGcVerification(collector::GarbageCollector * gc)3566 void Heap::PreGcVerification(collector::GarbageCollector* gc) {
3567   if (verify_pre_gc_heap_ || verify_missing_card_marks_ || verify_mod_union_table_) {
3568     collector::GarbageCollector::ScopedPause pause(gc, false);
3569     PreGcVerificationPaused(gc);
3570   }
3571 }
3572 
PrePauseRosAllocVerification(collector::GarbageCollector * gc)3573 void Heap::PrePauseRosAllocVerification([[maybe_unused]] collector::GarbageCollector* gc) {
3574   // TODO: Add a new runtime option for this?
3575   if (verify_pre_gc_rosalloc_) {
3576     RosAllocVerification(current_gc_iteration_.GetTimings(), "PreGcRosAllocVerification");
3577   }
3578 }
3579 
PreSweepingGcVerification(collector::GarbageCollector * gc)3580 void Heap::PreSweepingGcVerification(collector::GarbageCollector* gc) {
3581   Thread* const self = Thread::Current();
3582   TimingLogger* const timings = current_gc_iteration_.GetTimings();
3583   TimingLogger::ScopedTiming t(__FUNCTION__, timings);
3584   // Called before sweeping occurs since we want to make sure we are not going so reclaim any
3585   // reachable objects.
3586   if (verify_pre_sweeping_heap_) {
3587     TimingLogger::ScopedTiming t2("(Paused)PostSweepingVerifyHeapReferences", timings);
3588     CHECK_NE(self->GetState(), ThreadState::kRunnable);
3589     {
3590       WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
3591       // Swapping bound bitmaps does nothing.
3592       gc->SwapBitmaps();
3593     }
3594     // Pass in false since concurrent reference processing can mean that the reference referents
3595     // may point to dead objects at the point which PreSweepingGcVerification is called.
3596     size_t failures = VerifyHeapReferences(false);
3597     if (failures > 0) {
3598       LOG(FATAL) << "Pre sweeping " << gc->GetName() << " GC verification failed with " << failures
3599           << " failures";
3600     }
3601     {
3602       WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
3603       gc->SwapBitmaps();
3604     }
3605   }
3606   if (verify_pre_sweeping_rosalloc_) {
3607     RosAllocVerification(timings, "PreSweepingRosAllocVerification");
3608   }
3609 }
3610 
PostGcVerificationPaused(collector::GarbageCollector * gc)3611 void Heap::PostGcVerificationPaused(collector::GarbageCollector* gc) {
3612   // Only pause if we have to do some verification.
3613   Thread* const self = Thread::Current();
3614   TimingLogger* const timings = GetCurrentGcIteration()->GetTimings();
3615   TimingLogger::ScopedTiming t(__FUNCTION__, timings);
3616   if (verify_system_weaks_) {
3617     ReaderMutexLock mu2(self, *Locks::heap_bitmap_lock_);
3618     collector::MarkSweep* mark_sweep = down_cast<collector::MarkSweep*>(gc);
3619     mark_sweep->VerifySystemWeaks();
3620   }
3621   if (verify_post_gc_rosalloc_) {
3622     RosAllocVerification(timings, "(Paused)PostGcRosAllocVerification");
3623   }
3624   if (verify_post_gc_heap_) {
3625     TimingLogger::ScopedTiming t2("(Paused)PostGcVerifyHeapReferences", timings);
3626     size_t failures = VerifyHeapReferences();
3627     if (failures > 0) {
3628       LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed with " << failures
3629           << " failures";
3630     }
3631   }
3632 }
3633 
PostGcVerification(collector::GarbageCollector * gc)3634 void Heap::PostGcVerification(collector::GarbageCollector* gc) {
3635   if (verify_system_weaks_ || verify_post_gc_rosalloc_ || verify_post_gc_heap_) {
3636     collector::GarbageCollector::ScopedPause pause(gc, false);
3637     PostGcVerificationPaused(gc);
3638   }
3639 }
3640 
RosAllocVerification(TimingLogger * timings,const char * name)3641 void Heap::RosAllocVerification(TimingLogger* timings, const char* name) {
3642   TimingLogger::ScopedTiming t(name, timings);
3643   for (const auto& space : continuous_spaces_) {
3644     if (space->IsRosAllocSpace()) {
3645       VLOG(heap) << name << " : " << space->GetName();
3646       space->AsRosAllocSpace()->Verify();
3647     }
3648   }
3649 }
3650 
WaitForGcToComplete(GcCause cause,Thread * self)3651 collector::GcType Heap::WaitForGcToComplete(GcCause cause, Thread* self) {
3652   ScopedThreadStateChange tsc(self, ThreadState::kWaitingForGcToComplete);
3653   MutexLock mu(self, *gc_complete_lock_);
3654   return WaitForGcToCompleteLocked(cause, self);
3655 }
3656 
WaitForGcToCompleteLocked(GcCause cause,Thread * self)3657 collector::GcType Heap::WaitForGcToCompleteLocked(GcCause cause, Thread* self) {
3658   gc_complete_cond_->CheckSafeToWait(self);
3659   collector::GcType last_gc_type = collector::kGcTypeNone;
3660   GcCause last_gc_cause = kGcCauseNone;
3661   uint64_t wait_start = NanoTime();
3662   while (collector_type_running_ != kCollectorTypeNone) {
3663     if (!task_processor_->IsRunningThread(self)) {
3664       // The current thread is about to wait for a currently running
3665       // collection to finish. If the waiting thread is not the heap
3666       // task daemon thread, the currently running collection is
3667       // considered as a blocking GC.
3668       running_collection_is_blocking_ = true;
3669       VLOG(gc) << "Waiting for a blocking GC " << cause;
3670     }
3671     SCOPED_TRACE << "GC: Wait For Completion " << cause;
3672     // We must wait, change thread state then sleep on gc_complete_cond_;
3673     gc_complete_cond_->Wait(self);
3674     last_gc_type = last_gc_type_;
3675     last_gc_cause = last_gc_cause_;
3676   }
3677   uint64_t wait_time = NanoTime() - wait_start;
3678   total_wait_time_ += wait_time;
3679   if (wait_time > long_pause_log_threshold_) {
3680     LOG(INFO) << "WaitForGcToComplete blocked " << cause << " on " << last_gc_cause << " for "
3681               << PrettyDuration(wait_time);
3682   }
3683   if (!task_processor_->IsRunningThread(self)) {
3684     // The current thread is about to run a collection. If the thread
3685     // is not the heap task daemon thread, it's considered as a
3686     // blocking GC (i.e., blocking itself).
3687     running_collection_is_blocking_ = true;
3688     // Don't log fake "GC" types that are only used for debugger or hidden APIs. If we log these,
3689     // it results in log spam. kGcCauseExplicit is already logged in LogGC, so avoid it here too.
3690     if (cause == kGcCauseForAlloc ||
3691         cause == kGcCauseDisableMovingGc) {
3692       VLOG(gc) << "Starting a blocking GC " << cause;
3693     }
3694   }
3695   return last_gc_type;
3696 }
3697 
DumpForSigQuit(std::ostream & os)3698 void Heap::DumpForSigQuit(std::ostream& os) {
3699   os << "Heap: " << GetPercentFree() << "% free, " << PrettySize(GetBytesAllocated()) << "/"
3700      << PrettySize(GetTotalMemory());
3701   {
3702     os << "Image spaces:\n";
3703     ScopedObjectAccess soa(Thread::Current());
3704     for (const auto& space : continuous_spaces_) {
3705       if (space->IsImageSpace()) {
3706         os << space->GetName() << "\n";
3707       }
3708     }
3709   }
3710   DumpGcPerformanceInfo(os);
3711 }
3712 
GetPercentFree()3713 size_t Heap::GetPercentFree() {
3714   return static_cast<size_t>(100.0f * static_cast<float>(
3715       GetFreeMemory()) / target_footprint_.load(std::memory_order_relaxed));
3716 }
3717 
SetIdealFootprint(size_t target_footprint)3718 void Heap::SetIdealFootprint(size_t target_footprint) {
3719   if (target_footprint > GetMaxMemory()) {
3720     VLOG(gc) << "Clamp target GC heap from " << PrettySize(target_footprint) << " to "
3721              << PrettySize(GetMaxMemory());
3722     target_footprint = GetMaxMemory();
3723   }
3724   target_footprint_.store(target_footprint, std::memory_order_relaxed);
3725 }
3726 
IsMovableObject(ObjPtr<mirror::Object> obj) const3727 bool Heap::IsMovableObject(ObjPtr<mirror::Object> obj) const {
3728   if (kMovingCollector) {
3729     space::Space* space = FindContinuousSpaceFromObject(obj.Ptr(), true);
3730     if (space != nullptr) {
3731       // TODO: Check large object?
3732       return space->CanMoveObjects();
3733     }
3734   }
3735   return false;
3736 }
3737 
FindCollectorByGcType(collector::GcType gc_type)3738 collector::GarbageCollector* Heap::FindCollectorByGcType(collector::GcType gc_type) {
3739   for (auto* collector : garbage_collectors_) {
3740     if (collector->GetCollectorType() == collector_type_ &&
3741         collector->GetGcType() == gc_type) {
3742       return collector;
3743     }
3744   }
3745   return nullptr;
3746 }
3747 
HeapGrowthMultiplier() const3748 double Heap::HeapGrowthMultiplier() const {
3749   // If we don't care about pause times we are background, so return 1.0.
3750   if (!CareAboutPauseTimes()) {
3751     return 1.0;
3752   }
3753   return foreground_heap_growth_multiplier_;
3754 }
3755 
GrowForUtilization(collector::GarbageCollector * collector_ran,size_t bytes_allocated_before_gc)3756 void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran,
3757                               size_t bytes_allocated_before_gc) {
3758   // We're running in the thread that set collector_type_running_ to something other than none,
3759   // thus ensuring that there is only one of us running. Thus
3760   // collector_type_running_ != kCollectorTypeNone, but that's a little tricky to turn into a
3761   // DCHECK.
3762 
3763   // We know what our utilization is at this moment.
3764   // This doesn't actually resize any memory. It just lets the heap grow more when necessary.
3765   const size_t bytes_allocated = GetBytesAllocated();
3766   // Trace the new heap size after the GC is finished.
3767   TraceHeapSize(bytes_allocated);
3768   uint64_t target_size, grow_bytes;
3769   collector::GcType gc_type = collector_ran->GetGcType();
3770   MutexLock mu(Thread::Current(), process_state_update_lock_);
3771   // Use the multiplier to grow more for foreground.
3772   const double multiplier = HeapGrowthMultiplier();
3773   if (gc_type != collector::kGcTypeSticky) {
3774     // Grow the heap for non sticky GC.
3775     uint64_t delta = bytes_allocated * (1.0 / GetTargetHeapUtilization() - 1.0);
3776     DCHECK_LE(delta, std::numeric_limits<size_t>::max()) << "bytes_allocated=" << bytes_allocated
3777         << " target_utilization_=" << target_utilization_;
3778     grow_bytes = std::min(delta, static_cast<uint64_t>(max_free_));
3779     grow_bytes = std::max(grow_bytes, static_cast<uint64_t>(min_free_));
3780     target_size = bytes_allocated + static_cast<uint64_t>(grow_bytes * multiplier);
3781     next_gc_type_ = collector::kGcTypeSticky;
3782   } else {
3783     collector::GcType non_sticky_gc_type = NonStickyGcType();
3784     // Find what the next non sticky collector will be.
3785     collector::GarbageCollector* non_sticky_collector = FindCollectorByGcType(non_sticky_gc_type);
3786     if (use_generational_cc_) {
3787       if (non_sticky_collector == nullptr) {
3788         non_sticky_collector = FindCollectorByGcType(collector::kGcTypePartial);
3789       }
3790       CHECK(non_sticky_collector != nullptr);
3791     }
3792     double sticky_gc_throughput_adjustment = GetStickyGcThroughputAdjustment(use_generational_cc_);
3793 
3794     // If the throughput of the current sticky GC >= throughput of the non sticky collector, then
3795     // do another sticky collection next.
3796     // We also check that the bytes allocated aren't over the target_footprint, or
3797     // concurrent_start_bytes in case of concurrent GCs, in order to prevent a
3798     // pathological case where dead objects which aren't reclaimed by sticky could get accumulated
3799     // if the sticky GC throughput always remained >= the full/partial throughput.
3800     size_t target_footprint = target_footprint_.load(std::memory_order_relaxed);
3801     if (current_gc_iteration_.GetEstimatedThroughput() * sticky_gc_throughput_adjustment >=
3802         non_sticky_collector->GetEstimatedMeanThroughput() &&
3803         non_sticky_collector->NumberOfIterations() > 0 &&
3804         bytes_allocated <= (IsGcConcurrent() ? concurrent_start_bytes_ : target_footprint)) {
3805       next_gc_type_ = collector::kGcTypeSticky;
3806     } else {
3807       next_gc_type_ = non_sticky_gc_type;
3808     }
3809     // If we have freed enough memory, shrink the heap back down.
3810     const size_t adjusted_max_free = static_cast<size_t>(max_free_ * multiplier);
3811     if (bytes_allocated + adjusted_max_free < target_footprint) {
3812       target_size = bytes_allocated + adjusted_max_free;
3813       grow_bytes = max_free_;
3814     } else {
3815       target_size = std::max(bytes_allocated, target_footprint);
3816       // The same whether jank perceptible or not; just avoid the adjustment.
3817       grow_bytes = 0;
3818     }
3819   }
3820   CHECK_LE(target_size, std::numeric_limits<size_t>::max())
3821       << " bytes_allocated:" << bytes_allocated
3822       << " bytes_freed:" << current_gc_iteration_.GetFreedBytes()
3823       << " large_obj_bytes_freed:" << current_gc_iteration_.GetFreedLargeObjectBytes();
3824   if (!ignore_target_footprint_) {
3825     SetIdealFootprint(target_size);
3826     // Store target size (computed with foreground heap growth multiplier) for updating
3827     // target_footprint_ when process state switches to foreground.
3828     // target_size = 0 ensures that target_footprint_ is not updated on
3829     // process-state switch.
3830     min_foreground_target_footprint_ =
3831         (multiplier <= 1.0 && grow_bytes > 0)
3832         ? std::min(
3833           bytes_allocated + static_cast<size_t>(grow_bytes * foreground_heap_growth_multiplier_),
3834           GetMaxMemory())
3835         : 0;
3836 
3837     if (IsGcConcurrent()) {
3838       const uint64_t freed_bytes = current_gc_iteration_.GetFreedBytes() +
3839           current_gc_iteration_.GetFreedLargeObjectBytes() +
3840           current_gc_iteration_.GetFreedRevokeBytes();
3841       // Records the number of bytes allocated at the time of GC finish,excluding the number of
3842       // bytes allocated during GC.
3843       num_bytes_alive_after_gc_ = UnsignedDifference(bytes_allocated_before_gc, freed_bytes);
3844       // Bytes allocated will shrink by freed_bytes after the GC runs, so if we want to figure out
3845       // how many bytes were allocated during the GC we need to add freed_bytes back on.
3846       // Almost always bytes_allocated + freed_bytes >= bytes_allocated_before_gc.
3847       const size_t bytes_allocated_during_gc =
3848           UnsignedDifference(bytes_allocated + freed_bytes, bytes_allocated_before_gc);
3849       // Calculate when to perform the next ConcurrentGC.
3850       // Estimate how many remaining bytes we will have when we need to start the next GC.
3851       size_t remaining_bytes = bytes_allocated_during_gc;
3852       remaining_bytes = std::min(remaining_bytes, kMaxConcurrentRemainingBytes);
3853       remaining_bytes = std::max(remaining_bytes, kMinConcurrentRemainingBytes);
3854       size_t target_footprint = target_footprint_.load(std::memory_order_relaxed);
3855       if (UNLIKELY(remaining_bytes > target_footprint)) {
3856         // A never going to happen situation that from the estimated allocation rate we will exceed
3857         // the applications entire footprint with the given estimated allocation rate. Schedule
3858         // another GC nearly straight away.
3859         remaining_bytes = std::min(kMinConcurrentRemainingBytes, target_footprint);
3860       }
3861       DCHECK_LE(target_footprint_.load(std::memory_order_relaxed), GetMaxMemory());
3862       // Start a concurrent GC when we get close to the estimated remaining bytes. When the
3863       // allocation rate is very high, remaining_bytes could tell us that we should start a GC
3864       // right away.
3865       concurrent_start_bytes_ = std::max(target_footprint - remaining_bytes, bytes_allocated);
3866       // Store concurrent_start_bytes_ (computed with foreground heap growth multiplier) for update
3867       // itself when process state switches to foreground.
3868       min_foreground_concurrent_start_bytes_ =
3869           min_foreground_target_footprint_ != 0
3870           ? std::max(min_foreground_target_footprint_ - remaining_bytes, bytes_allocated)
3871           : 0;
3872     }
3873   }
3874 }
3875 
ClampGrowthLimit()3876 void Heap::ClampGrowthLimit() {
3877   // Use heap bitmap lock to guard against races with BindLiveToMarkBitmap.
3878   ScopedObjectAccess soa(Thread::Current());
3879   WriterMutexLock mu(soa.Self(), *Locks::heap_bitmap_lock_);
3880   capacity_ = growth_limit_;
3881   for (const auto& space : continuous_spaces_) {
3882     if (space->IsMallocSpace()) {
3883       gc::space::MallocSpace* malloc_space = space->AsMallocSpace();
3884       malloc_space->ClampGrowthLimit();
3885     }
3886   }
3887   if (large_object_space_ != nullptr) {
3888     large_object_space_->ClampGrowthLimit(capacity_);
3889   }
3890   if (collector_type_ == kCollectorTypeCC) {
3891     DCHECK(region_space_ != nullptr);
3892     // Twice the capacity as CC needs extra space for evacuating objects.
3893     region_space_->ClampGrowthLimit(2 * capacity_);
3894   } else if (collector_type_ == kCollectorTypeCMC) {
3895     DCHECK(gUseUserfaultfd);
3896     DCHECK_NE(mark_compact_, nullptr);
3897     DCHECK_NE(bump_pointer_space_, nullptr);
3898     mark_compact_->ClampGrowthLimit(capacity_);
3899   }
3900   // This space isn't added for performance reasons.
3901   if (main_space_backup_.get() != nullptr) {
3902     main_space_backup_->ClampGrowthLimit();
3903   }
3904 }
3905 
ClearGrowthLimit()3906 void Heap::ClearGrowthLimit() {
3907   if (target_footprint_.load(std::memory_order_relaxed) == growth_limit_
3908       && growth_limit_ < capacity_) {
3909     target_footprint_.store(capacity_, std::memory_order_relaxed);
3910     SetDefaultConcurrentStartBytes();
3911   }
3912   growth_limit_ = capacity_;
3913   ScopedObjectAccess soa(Thread::Current());
3914   for (const auto& space : continuous_spaces_) {
3915     if (space->IsMallocSpace()) {
3916       gc::space::MallocSpace* malloc_space = space->AsMallocSpace();
3917       malloc_space->ClearGrowthLimit();
3918       malloc_space->SetFootprintLimit(malloc_space->Capacity());
3919     }
3920   }
3921   // This space isn't added for performance reasons.
3922   if (main_space_backup_.get() != nullptr) {
3923     main_space_backup_->ClearGrowthLimit();
3924     main_space_backup_->SetFootprintLimit(main_space_backup_->Capacity());
3925   }
3926 }
3927 
AddFinalizerReference(Thread * self,ObjPtr<mirror::Object> * object)3928 void Heap::AddFinalizerReference(Thread* self, ObjPtr<mirror::Object>* object) {
3929   ScopedObjectAccess soa(self);
3930   StackHandleScope<1u> hs(self);
3931   // Use handle wrapper to update the `*object` if the object gets moved.
3932   HandleWrapperObjPtr<mirror::Object> h_object = hs.NewHandleWrapper(object);
3933   WellKnownClasses::java_lang_ref_FinalizerReference_add->InvokeStatic<'V', 'L'>(
3934       self, h_object.Get());
3935 }
3936 
RequestConcurrentGCAndSaveObject(Thread * self,bool force_full,uint32_t observed_gc_num,ObjPtr<mirror::Object> * obj)3937 void Heap::RequestConcurrentGCAndSaveObject(Thread* self,
3938                                             bool force_full,
3939                                             uint32_t observed_gc_num,
3940                                             ObjPtr<mirror::Object>* obj) {
3941   StackHandleScope<1> hs(self);
3942   HandleWrapperObjPtr<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
3943   RequestConcurrentGC(self, kGcCauseBackground, force_full, observed_gc_num);
3944 }
3945 
3946 class Heap::ConcurrentGCTask : public HeapTask {
3947  public:
ConcurrentGCTask(uint64_t target_time,GcCause cause,bool force_full,uint32_t gc_num)3948   ConcurrentGCTask(uint64_t target_time, GcCause cause, bool force_full, uint32_t gc_num)
3949       : HeapTask(target_time), cause_(cause), force_full_(force_full), my_gc_num_(gc_num) {}
Run(Thread * self)3950   void Run(Thread* self) override {
3951     Runtime* runtime = Runtime::Current();
3952     gc::Heap* heap = runtime->GetHeap();
3953     DCHECK(GCNumberLt(my_gc_num_, heap->GetCurrentGcNum() + 2));  // <= current_gc_num + 1
3954     heap->ConcurrentGC(self, cause_, force_full_, my_gc_num_);
3955     CHECK_IMPLIES(GCNumberLt(heap->GetCurrentGcNum(), my_gc_num_), runtime->IsShuttingDown(self));
3956   }
3957 
3958  private:
3959   const GcCause cause_;
3960   const bool force_full_;  // If true, force full (or partial) collection.
3961   const uint32_t my_gc_num_;  // Sequence number of requested GC.
3962 };
3963 
CanAddHeapTask(Thread * self)3964 static bool CanAddHeapTask(Thread* self) REQUIRES(!Locks::runtime_shutdown_lock_) {
3965   Runtime* runtime = Runtime::Current();
3966   return runtime != nullptr && runtime->IsFinishedStarting() && !runtime->IsShuttingDown(self) &&
3967       !self->IsHandlingStackOverflow();
3968 }
3969 
RequestConcurrentGC(Thread * self,GcCause cause,bool force_full,uint32_t observed_gc_num)3970 bool Heap::RequestConcurrentGC(Thread* self,
3971                                GcCause cause,
3972                                bool force_full,
3973                                uint32_t observed_gc_num) {
3974   uint32_t max_gc_requested = max_gc_requested_.load(std::memory_order_relaxed);
3975   if (!GCNumberLt(observed_gc_num, max_gc_requested)) {
3976     // observed_gc_num >= max_gc_requested: Nobody beat us to requesting the next gc.
3977     if (CanAddHeapTask(self)) {
3978       // Since observed_gc_num >= max_gc_requested, this increases max_gc_requested_, if successful.
3979       if (max_gc_requested_.CompareAndSetStrongRelaxed(max_gc_requested, observed_gc_num + 1)) {
3980         task_processor_->AddTask(self, new ConcurrentGCTask(NanoTime(),  // Start straight away.
3981                                                             cause,
3982                                                             force_full,
3983                                                             observed_gc_num + 1));
3984       }
3985       DCHECK(GCNumberLt(observed_gc_num, max_gc_requested_.load(std::memory_order_relaxed)));
3986       // If we increased max_gc_requested_, then we added a task that will eventually cause
3987       // gcs_completed_ to be incremented (to at least observed_gc_num + 1).
3988       // If the CAS failed, somebody else did.
3989       return true;
3990     }
3991     return false;
3992   }
3993   return true;  // Vacuously.
3994 }
3995 
ConcurrentGC(Thread * self,GcCause cause,bool force_full,uint32_t requested_gc_num)3996 void Heap::ConcurrentGC(Thread* self, GcCause cause, bool force_full, uint32_t requested_gc_num) {
3997   if (!Runtime::Current()->IsShuttingDown(self)) {
3998     // Wait for any GCs currently running to finish. If this incremented GC number, we're done.
3999     WaitForGcToComplete(cause, self);
4000     if (GCNumberLt(GetCurrentGcNum(), requested_gc_num)) {
4001       collector::GcType next_gc_type = next_gc_type_;
4002       // If forcing full and next gc type is sticky, override with a non-sticky type.
4003       if (force_full && next_gc_type == collector::kGcTypeSticky) {
4004         next_gc_type = NonStickyGcType();
4005       }
4006       // If we can't run the GC type we wanted to run, find the next appropriate one and try
4007       // that instead. E.g. can't do partial, so do full instead.
4008       // We must ensure that we run something that ends up incrementing gcs_completed_.
4009       // In the kGcTypePartial case, the initial CollectGarbageInternal call may not have that
4010       // effect, but the subsequent KGcTypeFull call will.
4011       if (CollectGarbageInternal(next_gc_type, cause, false, requested_gc_num)
4012           == collector::kGcTypeNone) {
4013         for (collector::GcType gc_type : gc_plan_) {
4014           if (!GCNumberLt(GetCurrentGcNum(), requested_gc_num)) {
4015             // Somebody did it for us.
4016             break;
4017           }
4018           // Attempt to run the collector, if we succeed, we are done.
4019           if (gc_type > next_gc_type &&
4020               CollectGarbageInternal(gc_type, cause, false, requested_gc_num)
4021               != collector::kGcTypeNone) {
4022             break;
4023           }
4024         }
4025       }
4026     }
4027   }
4028 }
4029 
4030 class Heap::CollectorTransitionTask : public HeapTask {
4031  public:
CollectorTransitionTask(uint64_t target_time)4032   explicit CollectorTransitionTask(uint64_t target_time) : HeapTask(target_time) {}
4033 
Run(Thread * self)4034   void Run(Thread* self) override {
4035     gc::Heap* heap = Runtime::Current()->GetHeap();
4036     heap->DoPendingCollectorTransition();
4037     heap->ClearPendingCollectorTransition(self);
4038   }
4039 };
4040 
ClearPendingCollectorTransition(Thread * self)4041 void Heap::ClearPendingCollectorTransition(Thread* self) {
4042   MutexLock mu(self, *pending_task_lock_);
4043   pending_collector_transition_ = nullptr;
4044 }
4045 
RequestCollectorTransition(CollectorType desired_collector_type,uint64_t delta_time)4046 void Heap::RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time) {
4047   Thread* self = Thread::Current();
4048   desired_collector_type_ = desired_collector_type;
4049   if (desired_collector_type_ == collector_type_ || !CanAddHeapTask(self)) {
4050     return;
4051   }
4052   if (collector_type_ == kCollectorTypeCC) {
4053     // For CC, we invoke a full compaction when going to the background, but the collector type
4054     // doesn't change.
4055     DCHECK_EQ(desired_collector_type_, kCollectorTypeCCBackground);
4056   }
4057   if (collector_type_ == kCollectorTypeCMC) {
4058     // For CMC collector type doesn't change.
4059     DCHECK_EQ(desired_collector_type_, kCollectorTypeCMCBackground);
4060   }
4061   DCHECK_NE(collector_type_, kCollectorTypeCCBackground);
4062   DCHECK_NE(collector_type_, kCollectorTypeCMCBackground);
4063   CollectorTransitionTask* added_task = nullptr;
4064   const uint64_t target_time = NanoTime() + delta_time;
4065   {
4066     MutexLock mu(self, *pending_task_lock_);
4067     // If we have an existing collector transition, update the target time to be the new target.
4068     if (pending_collector_transition_ != nullptr) {
4069       task_processor_->UpdateTargetRunTime(self, pending_collector_transition_, target_time);
4070       return;
4071     }
4072     added_task = new CollectorTransitionTask(target_time);
4073     pending_collector_transition_ = added_task;
4074   }
4075   task_processor_->AddTask(self, added_task);
4076 }
4077 
4078 class Heap::HeapTrimTask : public HeapTask {
4079  public:
HeapTrimTask(uint64_t delta_time)4080   explicit HeapTrimTask(uint64_t delta_time) : HeapTask(NanoTime() + delta_time) { }
Run(Thread * self)4081   void Run(Thread* self) override {
4082     gc::Heap* heap = Runtime::Current()->GetHeap();
4083     heap->Trim(self);
4084     heap->ClearPendingTrim(self);
4085   }
4086 };
4087 
ClearPendingTrim(Thread * self)4088 void Heap::ClearPendingTrim(Thread* self) {
4089   MutexLock mu(self, *pending_task_lock_);
4090   pending_heap_trim_ = nullptr;
4091 }
4092 
RequestTrim(Thread * self)4093 void Heap::RequestTrim(Thread* self) {
4094   if (!CanAddHeapTask(self)) {
4095     return;
4096   }
4097   // GC completed and now we must decide whether to request a heap trim (advising pages back to the
4098   // kernel) or not. Issuing a request will also cause trimming of the libc heap. As a trim scans
4099   // a space it will hold its lock and can become a cause of jank.
4100   // Note, the large object space self trims and the Zygote space was trimmed and unchanging since
4101   // forking.
4102 
4103   // We don't have a good measure of how worthwhile a trim might be. We can't use the live bitmap
4104   // because that only marks object heads, so a large array looks like lots of empty space. We
4105   // don't just call dlmalloc all the time, because the cost of an _attempted_ trim is proportional
4106   // to utilization (which is probably inversely proportional to how much benefit we can expect).
4107   // We could try mincore(2) but that's only a measure of how many pages we haven't given away,
4108   // not how much use we're making of those pages.
4109   HeapTrimTask* added_task = nullptr;
4110   {
4111     MutexLock mu(self, *pending_task_lock_);
4112     if (pending_heap_trim_ != nullptr) {
4113       // Already have a heap trim request in task processor, ignore this request.
4114       return;
4115     }
4116     added_task = new HeapTrimTask(kHeapTrimWait);
4117     pending_heap_trim_ = added_task;
4118   }
4119   task_processor_->AddTask(self, added_task);
4120 }
4121 
IncrementNumberOfBytesFreedRevoke(size_t freed_bytes_revoke)4122 void Heap::IncrementNumberOfBytesFreedRevoke(size_t freed_bytes_revoke) {
4123   size_t previous_num_bytes_freed_revoke =
4124       num_bytes_freed_revoke_.fetch_add(freed_bytes_revoke, std::memory_order_relaxed);
4125   // Check the updated value is less than the number of bytes allocated. There is a risk of
4126   // execution being suspended between the increment above and the CHECK below, leading to
4127   // the use of previous_num_bytes_freed_revoke in the comparison.
4128   CHECK_GE(num_bytes_allocated_.load(std::memory_order_relaxed),
4129            previous_num_bytes_freed_revoke + freed_bytes_revoke);
4130 }
4131 
RevokeThreadLocalBuffers(Thread * thread)4132 void Heap::RevokeThreadLocalBuffers(Thread* thread) {
4133   if (rosalloc_space_ != nullptr) {
4134     size_t freed_bytes_revoke = rosalloc_space_->RevokeThreadLocalBuffers(thread);
4135     if (freed_bytes_revoke > 0U) {
4136       IncrementNumberOfBytesFreedRevoke(freed_bytes_revoke);
4137     }
4138   }
4139   if (bump_pointer_space_ != nullptr) {
4140     CHECK_EQ(bump_pointer_space_->RevokeThreadLocalBuffers(thread), 0U);
4141   }
4142   if (region_space_ != nullptr) {
4143     CHECK_EQ(region_space_->RevokeThreadLocalBuffers(thread), 0U);
4144   }
4145 }
4146 
RevokeRosAllocThreadLocalBuffers(Thread * thread)4147 void Heap::RevokeRosAllocThreadLocalBuffers(Thread* thread) {
4148   if (rosalloc_space_ != nullptr) {
4149     size_t freed_bytes_revoke = rosalloc_space_->RevokeThreadLocalBuffers(thread);
4150     if (freed_bytes_revoke > 0U) {
4151       IncrementNumberOfBytesFreedRevoke(freed_bytes_revoke);
4152     }
4153   }
4154 }
4155 
RevokeAllThreadLocalBuffers()4156 void Heap::RevokeAllThreadLocalBuffers() {
4157   if (rosalloc_space_ != nullptr) {
4158     size_t freed_bytes_revoke = rosalloc_space_->RevokeAllThreadLocalBuffers();
4159     if (freed_bytes_revoke > 0U) {
4160       IncrementNumberOfBytesFreedRevoke(freed_bytes_revoke);
4161     }
4162   }
4163   if (bump_pointer_space_ != nullptr) {
4164     CHECK_EQ(bump_pointer_space_->RevokeAllThreadLocalBuffers(), 0U);
4165   }
4166   if (region_space_ != nullptr) {
4167     CHECK_EQ(region_space_->RevokeAllThreadLocalBuffers(), 0U);
4168   }
4169 }
4170 
4171 // For GC triggering purposes, we count old (pre-last-GC) and new native allocations as
4172 // different fractions of Java allocations.
4173 // For now, we essentially do not count old native allocations at all, so that we can preserve the
4174 // existing behavior of not limiting native heap size. If we seriously considered it, we would
4175 // have to adjust collection thresholds when we encounter large amounts of old native memory,
4176 // and handle native out-of-memory situations.
4177 
4178 static constexpr size_t kOldNativeDiscountFactor = 65536;  // Approximately infinite for now.
4179 static constexpr size_t kNewNativeDiscountFactor = 2;
4180 
4181 // If weighted java + native memory use exceeds our target by kStopForNativeFactor, and
4182 // newly allocated memory exceeds stop_for_native_allocs_, we wait for GC to complete to avoid
4183 // running out of memory.
4184 static constexpr float kStopForNativeFactor = 4.0;
4185 
4186 // Return the ratio of the weighted native + java allocated bytes to its target value.
4187 // A return value > 1.0 means we should collect. Significantly larger values mean we're falling
4188 // behind.
NativeMemoryOverTarget(size_t current_native_bytes,bool is_gc_concurrent)4189 inline float Heap::NativeMemoryOverTarget(size_t current_native_bytes, bool is_gc_concurrent) {
4190   // Collection check for native allocation. Does not enforce Java heap bounds.
4191   // With adj_start_bytes defined below, effectively checks
4192   // <java bytes allocd> + c1*<old native allocd> + c2*<new native allocd) >= adj_start_bytes,
4193   // where c3 > 1, and currently c1 and c2 are 1 divided by the values defined above.
4194   size_t old_native_bytes = old_native_bytes_allocated_.load(std::memory_order_relaxed);
4195   if (old_native_bytes > current_native_bytes) {
4196     // Net decrease; skip the check, but update old value.
4197     // It's OK to lose an update if two stores race.
4198     old_native_bytes_allocated_.store(current_native_bytes, std::memory_order_relaxed);
4199     return 0.0;
4200   } else {
4201     size_t new_native_bytes = UnsignedDifference(current_native_bytes, old_native_bytes);
4202     size_t weighted_native_bytes = new_native_bytes / kNewNativeDiscountFactor
4203         + old_native_bytes / kOldNativeDiscountFactor;
4204     size_t add_bytes_allowed = static_cast<size_t>(
4205         NativeAllocationGcWatermark() * HeapGrowthMultiplier());
4206     size_t java_gc_start_bytes = is_gc_concurrent
4207         ? concurrent_start_bytes_
4208         : target_footprint_.load(std::memory_order_relaxed);
4209     size_t adj_start_bytes = UnsignedSum(java_gc_start_bytes,
4210                                          add_bytes_allowed / kNewNativeDiscountFactor);
4211     return static_cast<float>(GetBytesAllocated() + weighted_native_bytes)
4212          / static_cast<float>(adj_start_bytes);
4213   }
4214 }
4215 
CheckGCForNative(Thread * self)4216 inline void Heap::CheckGCForNative(Thread* self) {
4217   bool is_gc_concurrent = IsGcConcurrent();
4218   uint32_t starting_gc_num = GetCurrentGcNum();
4219   size_t current_native_bytes = GetNativeBytes();
4220   float gc_urgency = NativeMemoryOverTarget(current_native_bytes, is_gc_concurrent);
4221   if (UNLIKELY(gc_urgency >= 1.0)) {
4222     if (is_gc_concurrent) {
4223       bool requested =
4224           RequestConcurrentGC(self, kGcCauseForNativeAlloc, /*force_full=*/true, starting_gc_num);
4225       if (requested && gc_urgency > kStopForNativeFactor
4226           && current_native_bytes > stop_for_native_allocs_) {
4227         // We're in danger of running out of memory due to rampant native allocation.
4228         if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
4229           LOG(INFO) << "Stopping for native allocation, urgency: " << gc_urgency;
4230         }
4231         // Count how many times we do this, so we can warn if this becomes excessive.
4232         // Stop after a while, out of excessive caution.
4233         static constexpr int kGcWaitIters = 20;
4234         for (int i = 1; i <= kGcWaitIters; ++i) {
4235           if (!GCNumberLt(GetCurrentGcNum(), max_gc_requested_.load(std::memory_order_relaxed))
4236               || WaitForGcToComplete(kGcCauseForNativeAlloc, self) != collector::kGcTypeNone) {
4237             break;
4238           }
4239           CHECK(GCNumberLt(starting_gc_num, max_gc_requested_.load(std::memory_order_relaxed)));
4240           if (i % 10 == 0) {
4241             LOG(WARNING) << "Slept " << i << " times in native allocation, waiting for GC";
4242           }
4243           static constexpr int kGcWaitSleepMicros = 2000;
4244           usleep(kGcWaitSleepMicros);  // Encourage our requested GC to start.
4245         }
4246       }
4247     } else {
4248       CollectGarbageInternal(NonStickyGcType(), kGcCauseForNativeAlloc, false, starting_gc_num + 1);
4249     }
4250   }
4251 }
4252 
4253 // About kNotifyNativeInterval allocations have occurred. Check whether we should garbage collect.
NotifyNativeAllocations(JNIEnv * env)4254 void Heap::NotifyNativeAllocations(JNIEnv* env) {
4255   native_objects_notified_.fetch_add(kNotifyNativeInterval, std::memory_order_relaxed);
4256   CheckGCForNative(Thread::ForEnv(env));
4257 }
4258 
4259 // Register a native allocation with an explicit size.
4260 // This should only be done for large allocations of non-malloc memory, which we wouldn't
4261 // otherwise see.
RegisterNativeAllocation(JNIEnv * env,size_t bytes)4262 void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) {
4263   // Cautiously check for a wrapped negative bytes argument.
4264   DCHECK(sizeof(size_t) < 8 || bytes < (std::numeric_limits<size_t>::max() / 2));
4265   native_bytes_registered_.fetch_add(bytes, std::memory_order_relaxed);
4266   uint32_t objects_notified =
4267       native_objects_notified_.fetch_add(1, std::memory_order_relaxed);
4268   if (objects_notified % kNotifyNativeInterval == kNotifyNativeInterval - 1
4269       || bytes > kCheckImmediatelyThreshold) {
4270     CheckGCForNative(Thread::ForEnv(env));
4271   }
4272   // Heap profiler treats this as a Java allocation with a null object.
4273   if (GetHeapSampler().IsEnabled()) {
4274     JHPCheckNonTlabSampleAllocation(Thread::Current(), nullptr, bytes);
4275   }
4276 }
4277 
RegisterNativeFree(JNIEnv *,size_t bytes)4278 void Heap::RegisterNativeFree(JNIEnv*, size_t bytes) {
4279   size_t allocated;
4280   size_t new_freed_bytes;
4281   do {
4282     allocated = native_bytes_registered_.load(std::memory_order_relaxed);
4283     new_freed_bytes = std::min(allocated, bytes);
4284     // We should not be registering more free than allocated bytes.
4285     // But correctly keep going in non-debug builds.
4286     DCHECK_EQ(new_freed_bytes, bytes);
4287   } while (!native_bytes_registered_.CompareAndSetWeakRelaxed(allocated,
4288                                                               allocated - new_freed_bytes));
4289 }
4290 
GetTotalMemory() const4291 size_t Heap::GetTotalMemory() const {
4292   return std::max(target_footprint_.load(std::memory_order_relaxed), GetBytesAllocated());
4293 }
4294 
AddModUnionTable(accounting::ModUnionTable * mod_union_table)4295 void Heap::AddModUnionTable(accounting::ModUnionTable* mod_union_table) {
4296   DCHECK(mod_union_table != nullptr);
4297   mod_union_tables_.Put(mod_union_table->GetSpace(), mod_union_table);
4298 }
4299 
CheckPreconditionsForAllocObject(ObjPtr<mirror::Class> c,size_t byte_count)4300 void Heap::CheckPreconditionsForAllocObject(ObjPtr<mirror::Class> c, size_t byte_count) {
4301   // Compare rounded sizes since the allocation may have been retried after rounding the size.
4302   // See b/37885600
4303   CHECK(c == nullptr || (c->IsClassClass() && byte_count >= sizeof(mirror::Class)) ||
4304         (c->IsVariableSize() ||
4305             RoundUp(c->GetObjectSize(), kObjectAlignment) ==
4306                 RoundUp(byte_count, kObjectAlignment)))
4307       << "ClassFlags=" << c->GetClassFlags()
4308       << " IsClassClass=" << c->IsClassClass()
4309       << " byte_count=" << byte_count
4310       << " IsVariableSize=" << c->IsVariableSize()
4311       << " ObjectSize=" << c->GetObjectSize()
4312       << " sizeof(Class)=" << sizeof(mirror::Class)
4313       << " " << verification_->DumpObjectInfo(c.Ptr(), /*tag=*/ "klass");
4314   CHECK_GE(byte_count, sizeof(mirror::Object));
4315 }
4316 
AddRememberedSet(accounting::RememberedSet * remembered_set)4317 void Heap::AddRememberedSet(accounting::RememberedSet* remembered_set) {
4318   CHECK(remembered_set != nullptr);
4319   space::Space* space = remembered_set->GetSpace();
4320   CHECK(space != nullptr);
4321   CHECK(remembered_sets_.find(space) == remembered_sets_.end()) << space;
4322   remembered_sets_.Put(space, remembered_set);
4323   CHECK(remembered_sets_.find(space) != remembered_sets_.end()) << space;
4324 }
4325 
RemoveRememberedSet(space::Space * space)4326 void Heap::RemoveRememberedSet(space::Space* space) {
4327   CHECK(space != nullptr);
4328   auto it = remembered_sets_.find(space);
4329   CHECK(it != remembered_sets_.end());
4330   delete it->second;
4331   remembered_sets_.erase(it);
4332   CHECK(remembered_sets_.find(space) == remembered_sets_.end());
4333 }
4334 
ClearMarkedObjects(bool release_eagerly)4335 void Heap::ClearMarkedObjects(bool release_eagerly) {
4336   // Clear all of the spaces' mark bitmaps.
4337   for (const auto& space : GetContinuousSpaces()) {
4338     if (space->GetLiveBitmap() != nullptr && !space->HasBoundBitmaps()) {
4339       space->GetMarkBitmap()->Clear(release_eagerly);
4340     }
4341   }
4342   // Clear the marked objects in the discontinous space object sets.
4343   for (const auto& space : GetDiscontinuousSpaces()) {
4344     space->GetMarkBitmap()->Clear(release_eagerly);
4345   }
4346 }
4347 
SetAllocationRecords(AllocRecordObjectMap * records)4348 void Heap::SetAllocationRecords(AllocRecordObjectMap* records) {
4349   allocation_records_.reset(records);
4350 }
4351 
VisitAllocationRecords(RootVisitor * visitor) const4352 void Heap::VisitAllocationRecords(RootVisitor* visitor) const {
4353   if (IsAllocTrackingEnabled()) {
4354     MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
4355     if (IsAllocTrackingEnabled()) {
4356       GetAllocationRecords()->VisitRoots(visitor);
4357     }
4358   }
4359 }
4360 
SweepAllocationRecords(IsMarkedVisitor * visitor) const4361 void Heap::SweepAllocationRecords(IsMarkedVisitor* visitor) const {
4362   if (IsAllocTrackingEnabled()) {
4363     MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
4364     if (IsAllocTrackingEnabled()) {
4365       GetAllocationRecords()->SweepAllocationRecords(visitor);
4366     }
4367   }
4368 }
4369 
AllowNewAllocationRecords() const4370 void Heap::AllowNewAllocationRecords() const {
4371   CHECK(!gUseReadBarrier);
4372   MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
4373   AllocRecordObjectMap* allocation_records = GetAllocationRecords();
4374   if (allocation_records != nullptr) {
4375     allocation_records->AllowNewAllocationRecords();
4376   }
4377 }
4378 
DisallowNewAllocationRecords() const4379 void Heap::DisallowNewAllocationRecords() const {
4380   CHECK(!gUseReadBarrier);
4381   MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
4382   AllocRecordObjectMap* allocation_records = GetAllocationRecords();
4383   if (allocation_records != nullptr) {
4384     allocation_records->DisallowNewAllocationRecords();
4385   }
4386 }
4387 
BroadcastForNewAllocationRecords() const4388 void Heap::BroadcastForNewAllocationRecords() const {
4389   // Always broadcast without checking IsAllocTrackingEnabled() because IsAllocTrackingEnabled() may
4390   // be set to false while some threads are waiting for system weak access in
4391   // AllocRecordObjectMap::RecordAllocation() and we may fail to wake them up. b/27467554.
4392   MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
4393   AllocRecordObjectMap* allocation_records = GetAllocationRecords();
4394   if (allocation_records != nullptr) {
4395     allocation_records->BroadcastForNewAllocationRecords();
4396   }
4397 }
4398 
4399 // Perfetto Java Heap Profiler Support.
4400 
4401 // Perfetto initialization.
InitPerfettoJavaHeapProf()4402 void Heap::InitPerfettoJavaHeapProf() {
4403   // Initialize Perfetto Heap info and Heap id.
4404   uint32_t heap_id = 1;  // Initialize to 1, to be overwritten by Perfetto heap id.
4405 #ifdef ART_TARGET_ANDROID
4406   // Register the heap and create the heapid.
4407   // Use a Perfetto heap name = "com.android.art" for the Java Heap Profiler.
4408   AHeapInfo* info = AHeapInfo_create("com.android.art");
4409   // Set the Enable Callback, there is no callback data ("nullptr").
4410   AHeapInfo_setEnabledCallback(info, &EnableHeapSamplerCallback, &heap_sampler_);
4411   // Set the Disable Callback.
4412   AHeapInfo_setDisabledCallback(info, &DisableHeapSamplerCallback, &heap_sampler_);
4413   heap_id = AHeapProfile_registerHeap(info);
4414   // Do not enable the Java Heap Profiler in this case, wait for Perfetto to enable it through
4415   // the callback function.
4416 #else
4417   // This is the host case, enable the Java Heap Profiler for host testing.
4418   // Perfetto API is currently not available on host.
4419   heap_sampler_.EnableHeapSampler();
4420 #endif
4421   heap_sampler_.SetHeapID(heap_id);
4422   VLOG(heap) << "Java Heap Profiler Initialized";
4423 }
4424 
JHPCheckNonTlabSampleAllocation(Thread * self,mirror::Object * obj,size_t alloc_size)4425 void Heap::JHPCheckNonTlabSampleAllocation(Thread* self, mirror::Object* obj, size_t alloc_size) {
4426   bool take_sample = false;
4427   size_t bytes_until_sample = 0;
4428   HeapSampler& prof_heap_sampler = GetHeapSampler();
4429   // An allocation occurred, sample it, even if non-Tlab.
4430   // In case take_sample is already set from the previous GetSampleOffset
4431   // because we tried the Tlab allocation first, we will not use this value.
4432   // A new value is generated below. Also bytes_until_sample will be updated.
4433   // Note that we are not using the return value from the GetSampleOffset in
4434   // the NonTlab case here.
4435   prof_heap_sampler.GetSampleOffset(
4436       alloc_size, self->GetTlabPosOffset(), &take_sample, &bytes_until_sample);
4437   prof_heap_sampler.SetBytesUntilSample(bytes_until_sample);
4438   if (take_sample) {
4439     prof_heap_sampler.ReportSample(obj, alloc_size);
4440   }
4441   VLOG(heap) << "JHP:NonTlab Non-moving or Large Allocation or RegisterNativeAllocation";
4442 }
4443 
JHPCalculateNextTlabSize(Thread * self,size_t jhp_def_tlab_size,size_t alloc_size,bool * take_sample,size_t * bytes_until_sample)4444 size_t Heap::JHPCalculateNextTlabSize(Thread* self,
4445                                       size_t jhp_def_tlab_size,
4446                                       size_t alloc_size,
4447                                       bool* take_sample,
4448                                       size_t* bytes_until_sample) {
4449   size_t next_sample_point = GetHeapSampler().GetSampleOffset(
4450       alloc_size, self->GetTlabPosOffset(), take_sample, bytes_until_sample);
4451   return std::min(next_sample_point, jhp_def_tlab_size);
4452 }
4453 
AdjustSampleOffset(size_t adjustment)4454 void Heap::AdjustSampleOffset(size_t adjustment) {
4455   GetHeapSampler().AdjustSampleOffset(adjustment);
4456 }
4457 
CheckGcStressMode(Thread * self,ObjPtr<mirror::Object> * obj)4458 void Heap::CheckGcStressMode(Thread* self, ObjPtr<mirror::Object>* obj) {
4459   DCHECK(gc_stress_mode_);
4460   auto* const runtime = Runtime::Current();
4461   if (runtime->GetClassLinker()->IsInitialized() && !runtime->IsActiveTransaction()) {
4462     // Check if we should GC.
4463     bool new_backtrace = false;
4464     {
4465       static constexpr size_t kMaxFrames = 16u;
4466       MutexLock mu(self, *backtrace_lock_);
4467       FixedSizeBacktrace<kMaxFrames> backtrace;
4468       backtrace.Collect(/* skip_count= */ 2);
4469       uint64_t hash = backtrace.Hash();
4470       new_backtrace = seen_backtraces_.find(hash) == seen_backtraces_.end();
4471       if (new_backtrace) {
4472         seen_backtraces_.insert(hash);
4473       }
4474     }
4475     if (new_backtrace) {
4476       StackHandleScope<1> hs(self);
4477       auto h = hs.NewHandleWrapper(obj);
4478       CollectGarbage(/* clear_soft_references= */ false);
4479       unique_backtrace_count_.fetch_add(1);
4480     } else {
4481       seen_backtrace_count_.fetch_add(1);
4482     }
4483   }
4484 }
4485 
DisableGCForShutdown()4486 void Heap::DisableGCForShutdown() {
4487   MutexLock mu(Thread::Current(), *gc_complete_lock_);
4488   gc_disabled_for_shutdown_ = true;
4489 }
4490 
IsGCDisabledForShutdown() const4491 bool Heap::IsGCDisabledForShutdown() const {
4492   MutexLock mu(Thread::Current(), *gc_complete_lock_);
4493   return gc_disabled_for_shutdown_;
4494 }
4495 
ObjectIsInBootImageSpace(ObjPtr<mirror::Object> obj) const4496 bool Heap::ObjectIsInBootImageSpace(ObjPtr<mirror::Object> obj) const {
4497   DCHECK_EQ(IsBootImageAddress(obj.Ptr()),
4498             any_of(boot_image_spaces_.begin(),
4499                    boot_image_spaces_.end(),
4500                    [obj](gc::space::ImageSpace* space) REQUIRES_SHARED(Locks::mutator_lock_) {
4501                      return space->HasAddress(obj.Ptr());
4502                    }));
4503   return IsBootImageAddress(obj.Ptr());
4504 }
4505 
IsInBootImageOatFile(const void * p) const4506 bool Heap::IsInBootImageOatFile(const void* p) const {
4507   DCHECK_EQ(IsBootImageAddress(p),
4508             any_of(boot_image_spaces_.begin(),
4509                    boot_image_spaces_.end(),
4510                    [p](gc::space::ImageSpace* space) REQUIRES_SHARED(Locks::mutator_lock_) {
4511                      return space->GetOatFile()->Contains(p);
4512                    }));
4513   return IsBootImageAddress(p);
4514 }
4515 
SetAllocationListener(AllocationListener * l)4516 void Heap::SetAllocationListener(AllocationListener* l) {
4517   AllocationListener* old = GetAndOverwriteAllocationListener(&alloc_listener_, l);
4518 
4519   if (old == nullptr) {
4520     Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
4521   }
4522 }
4523 
RemoveAllocationListener()4524 void Heap::RemoveAllocationListener() {
4525   AllocationListener* old = GetAndOverwriteAllocationListener(&alloc_listener_, nullptr);
4526 
4527   if (old != nullptr) {
4528     Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints();
4529   }
4530 }
4531 
SetGcPauseListener(GcPauseListener * l)4532 void Heap::SetGcPauseListener(GcPauseListener* l) {
4533   gc_pause_listener_.store(l, std::memory_order_relaxed);
4534 }
4535 
RemoveGcPauseListener()4536 void Heap::RemoveGcPauseListener() {
4537   gc_pause_listener_.store(nullptr, std::memory_order_relaxed);
4538 }
4539 
AllocWithNewTLAB(Thread * self,AllocatorType allocator_type,size_t alloc_size,bool grow,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated)4540 mirror::Object* Heap::AllocWithNewTLAB(Thread* self,
4541                                        AllocatorType allocator_type,
4542                                        size_t alloc_size,
4543                                        bool grow,
4544                                        size_t* bytes_allocated,
4545                                        size_t* usable_size,
4546                                        size_t* bytes_tl_bulk_allocated) {
4547   mirror::Object* ret = nullptr;
4548   bool take_sample = false;
4549   size_t bytes_until_sample = 0;
4550   bool jhp_enabled = GetHeapSampler().IsEnabled();
4551 
4552   if (kUsePartialTlabs && alloc_size <= self->TlabRemainingCapacity()) {
4553     DCHECK_GT(alloc_size, self->TlabSize());
4554     // There is enough space if we grow the TLAB. Lets do that. This increases the
4555     // TLAB bytes.
4556     const size_t min_expand_size = alloc_size - self->TlabSize();
4557     size_t next_tlab_size =
4558         jhp_enabled ? JHPCalculateNextTlabSize(
4559                           self, kPartialTlabSize, alloc_size, &take_sample, &bytes_until_sample) :
4560                       kPartialTlabSize;
4561     const size_t expand_bytes = std::max(
4562         min_expand_size,
4563         std::min(self->TlabRemainingCapacity() - self->TlabSize(), next_tlab_size));
4564     if (UNLIKELY(IsOutOfMemoryOnAllocation(allocator_type, expand_bytes, grow))) {
4565       return nullptr;
4566     }
4567     *bytes_tl_bulk_allocated = expand_bytes;
4568     self->ExpandTlab(expand_bytes);
4569     DCHECK_LE(alloc_size, self->TlabSize());
4570   } else if (allocator_type == kAllocatorTypeTLAB) {
4571     DCHECK(bump_pointer_space_ != nullptr);
4572     // Try to allocate a page-aligned TLAB (not necessary though).
4573     // TODO: for large allocations, which are rare, maybe we should allocate
4574     // that object and return. There is no need to revoke the current TLAB,
4575     // particularly if it's mostly unutilized.
4576     size_t next_tlab_size = RoundDown(alloc_size + kDefaultTLABSize, gPageSize) - alloc_size;
4577     if (jhp_enabled) {
4578       next_tlab_size = JHPCalculateNextTlabSize(
4579           self, next_tlab_size, alloc_size, &take_sample, &bytes_until_sample);
4580     }
4581     const size_t new_tlab_size = alloc_size + next_tlab_size;
4582     if (UNLIKELY(IsOutOfMemoryOnAllocation(allocator_type, new_tlab_size, grow))) {
4583       return nullptr;
4584     }
4585     // Try allocating a new thread local buffer, if the allocation fails the space must be
4586     // full so return null.
4587     if (!bump_pointer_space_->AllocNewTlab(self, new_tlab_size, bytes_tl_bulk_allocated)) {
4588       return nullptr;
4589     }
4590     if (jhp_enabled) {
4591       VLOG(heap) << "JHP:kAllocatorTypeTLAB, New Tlab bytes allocated= " << new_tlab_size;
4592     }
4593   } else {
4594     DCHECK(allocator_type == kAllocatorTypeRegionTLAB);
4595     DCHECK(region_space_ != nullptr);
4596     if (space::RegionSpace::kRegionSize >= alloc_size) {
4597       // Non-large. Check OOME for a tlab.
4598       if (LIKELY(!IsOutOfMemoryOnAllocation(allocator_type,
4599                                             space::RegionSpace::kRegionSize,
4600                                             grow))) {
4601         size_t next_pr_tlab_size =
4602             kUsePartialTlabs ? kPartialTlabSize : gc::space::RegionSpace::kRegionSize;
4603         if (jhp_enabled) {
4604           next_pr_tlab_size = JHPCalculateNextTlabSize(
4605               self, next_pr_tlab_size, alloc_size, &take_sample, &bytes_until_sample);
4606         }
4607         const size_t new_tlab_size = kUsePartialTlabs
4608             ? std::max(alloc_size, next_pr_tlab_size)
4609             : next_pr_tlab_size;
4610         // Try to allocate a tlab.
4611         if (!region_space_->AllocNewTlab(self, new_tlab_size, bytes_tl_bulk_allocated)) {
4612           // Failed to allocate a tlab. Try non-tlab.
4613           ret = region_space_->AllocNonvirtual<false>(alloc_size,
4614                                                       bytes_allocated,
4615                                                       usable_size,
4616                                                       bytes_tl_bulk_allocated);
4617           if (jhp_enabled) {
4618             JHPCheckNonTlabSampleAllocation(self, ret, alloc_size);
4619           }
4620           return ret;
4621         }
4622         // Fall-through to using the TLAB below.
4623       } else {
4624         // Check OOME for a non-tlab allocation.
4625         if (!IsOutOfMemoryOnAllocation(allocator_type, alloc_size, grow)) {
4626           ret = region_space_->AllocNonvirtual<false>(alloc_size,
4627                                                       bytes_allocated,
4628                                                       usable_size,
4629                                                       bytes_tl_bulk_allocated);
4630           if (jhp_enabled) {
4631             JHPCheckNonTlabSampleAllocation(self, ret, alloc_size);
4632           }
4633           return ret;
4634         }
4635         // Neither tlab or non-tlab works. Give up.
4636         return nullptr;
4637       }
4638     } else {
4639       // Large. Check OOME.
4640       if (LIKELY(!IsOutOfMemoryOnAllocation(allocator_type, alloc_size, grow))) {
4641         ret = region_space_->AllocNonvirtual<false>(alloc_size,
4642                                                     bytes_allocated,
4643                                                     usable_size,
4644                                                     bytes_tl_bulk_allocated);
4645         if (jhp_enabled) {
4646           JHPCheckNonTlabSampleAllocation(self, ret, alloc_size);
4647         }
4648         return ret;
4649       }
4650       return nullptr;
4651     }
4652   }
4653   // Refilled TLAB, return.
4654   ret = self->AllocTlab(alloc_size);
4655   DCHECK(ret != nullptr);
4656   *bytes_allocated = alloc_size;
4657   *usable_size = alloc_size;
4658 
4659   // JavaHeapProfiler: Send the thread information about this allocation in case a sample is
4660   // requested.
4661   // This is the fallthrough from both the if and else if above cases => Cases that use TLAB.
4662   if (jhp_enabled) {
4663     if (take_sample) {
4664       GetHeapSampler().ReportSample(ret, alloc_size);
4665       // Update the bytes_until_sample now that the allocation is already done.
4666       GetHeapSampler().SetBytesUntilSample(bytes_until_sample);
4667     }
4668     VLOG(heap) << "JHP:Fallthrough Tlab allocation";
4669   }
4670 
4671   return ret;
4672 }
4673 
GetVerification() const4674 const Verification* Heap::GetVerification() const {
4675   return verification_.get();
4676 }
4677 
VlogHeapGrowth(size_t old_footprint,size_t new_footprint,size_t alloc_size)4678 void Heap::VlogHeapGrowth(size_t old_footprint, size_t new_footprint, size_t alloc_size) {
4679   VLOG(heap) << "Growing heap from " << PrettySize(old_footprint) << " to "
4680              << PrettySize(new_footprint) << " for a " << PrettySize(alloc_size) << " allocation";
4681 }
4682 
4683 // Run a gc if we haven't run one since initial_gc_num. This forces processes to
4684 // reclaim memory allocated during startup, even if they don't do much
4685 // allocation post startup. If the process is actively allocating and triggering
4686 // GCs, or has moved to the background and hence forced a GC, this does nothing.
4687 class Heap::TriggerPostForkCCGcTask : public HeapTask {
4688  public:
TriggerPostForkCCGcTask(uint64_t target_time,uint32_t initial_gc_num)4689   explicit TriggerPostForkCCGcTask(uint64_t target_time, uint32_t initial_gc_num) :
4690       HeapTask(target_time), initial_gc_num_(initial_gc_num) {}
Run(Thread * self)4691   void Run(Thread* self) override {
4692     gc::Heap* heap = Runtime::Current()->GetHeap();
4693     if (heap->GetCurrentGcNum() == initial_gc_num_) {
4694       if (kLogAllGCs) {
4695         LOG(INFO) << "Forcing GC for allocation-inactive process";
4696       }
4697       heap->RequestConcurrentGC(self, kGcCauseBackground, false, initial_gc_num_);
4698     }
4699   }
4700  private:
4701   uint32_t initial_gc_num_;
4702 };
4703 
4704 // Reduce target footprint, if no GC has occurred since initial_gc_num.
4705 // If a GC already occurred, it will have done this for us.
4706 class Heap::ReduceTargetFootprintTask : public HeapTask {
4707  public:
ReduceTargetFootprintTask(uint64_t target_time,size_t new_target_sz,uint32_t initial_gc_num)4708   explicit ReduceTargetFootprintTask(uint64_t target_time, size_t new_target_sz,
4709                                      uint32_t initial_gc_num) :
4710       HeapTask(target_time), new_target_sz_(new_target_sz), initial_gc_num_(initial_gc_num) {}
Run(Thread * self)4711   void Run(Thread* self) override {
4712     gc::Heap* heap = Runtime::Current()->GetHeap();
4713     MutexLock mu(self, *(heap->gc_complete_lock_));
4714     if (heap->GetCurrentGcNum() == initial_gc_num_
4715         && heap->collector_type_running_ == kCollectorTypeNone) {
4716       size_t target_footprint = heap->target_footprint_.load(std::memory_order_relaxed);
4717       if (target_footprint > new_target_sz_) {
4718         if (heap->target_footprint_.CompareAndSetStrongRelaxed(target_footprint, new_target_sz_)) {
4719           heap->SetDefaultConcurrentStartBytesLocked();
4720         }
4721       }
4722     }
4723   }
4724  private:
4725   size_t new_target_sz_;
4726   uint32_t initial_gc_num_;
4727 };
4728 
4729 // Return a pseudo-random integer between 0 and 19999, using the uid as a seed.  We want this to
4730 // be deterministic for a given process, but to vary randomly across processes. Empirically, the
4731 // uids for processes for which this matters are distinct.
GetPseudoRandomFromUid()4732 static uint32_t GetPseudoRandomFromUid() {
4733   std::default_random_engine rng(getuid());
4734   std::uniform_int_distribution<int> dist(0, 19999);
4735   return dist(rng);
4736 }
4737 
PostForkChildAction(Thread * self)4738 void Heap::PostForkChildAction(Thread* self) {
4739   uint32_t starting_gc_num = GetCurrentGcNum();
4740   uint64_t last_adj_time = NanoTime();
4741   next_gc_type_ = NonStickyGcType();  // Always start with a full gc.
4742 
4743   LOG(INFO) << "Using " << foreground_collector_type_ << " GC.";
4744   if (gUseUserfaultfd) {
4745     DCHECK_NE(mark_compact_, nullptr);
4746     mark_compact_->CreateUserfaultfd(/*post_fork*/true);
4747   }
4748 
4749   // Temporarily increase target_footprint_ and concurrent_start_bytes_ to
4750   // max values to avoid GC during app launch.
4751   // Set target_footprint_ to the largest allowed value.
4752   SetIdealFootprint(growth_limit_);
4753   SetDefaultConcurrentStartBytes();
4754 
4755   // Shrink heap after kPostForkMaxHeapDurationMS, to force a memory hog process to GC.
4756   // This remains high enough that many processes will continue without a GC.
4757   if (initial_heap_size_ < growth_limit_) {
4758     size_t first_shrink_size = std::max(growth_limit_ / 4, initial_heap_size_);
4759     last_adj_time += MsToNs(kPostForkMaxHeapDurationMS);
4760     GetTaskProcessor()->AddTask(
4761         self, new ReduceTargetFootprintTask(last_adj_time, first_shrink_size, starting_gc_num));
4762     // Shrink to a small value after a substantial time period. This will typically force a
4763     // GC if none has occurred yet. Has no effect if there was a GC before this anyway, which
4764     // is commonly the case, e.g. because of a process transition.
4765     if (initial_heap_size_ < first_shrink_size) {
4766       last_adj_time += MsToNs(4 * kPostForkMaxHeapDurationMS);
4767       GetTaskProcessor()->AddTask(
4768           self,
4769           new ReduceTargetFootprintTask(last_adj_time, initial_heap_size_, starting_gc_num));
4770     }
4771   }
4772   // Schedule a GC after a substantial period of time. This will become a no-op if another GC is
4773   // scheduled in the interim. If not, we want to avoid holding onto start-up garbage.
4774   uint64_t post_fork_gc_time = last_adj_time
4775       + MsToNs(4 * kPostForkMaxHeapDurationMS + GetPseudoRandomFromUid());
4776   GetTaskProcessor()->AddTask(self,
4777                               new TriggerPostForkCCGcTask(post_fork_gc_time, starting_gc_num));
4778 }
4779 
VisitReflectiveTargets(ReflectiveValueVisitor * visit)4780 void Heap::VisitReflectiveTargets(ReflectiveValueVisitor *visit) {
4781   VisitObjectsPaused([&visit](mirror::Object* ref) NO_THREAD_SAFETY_ANALYSIS {
4782     art::ObjPtr<mirror::Class> klass(ref->GetClass());
4783     // All these classes are in the BootstrapClassLoader.
4784     if (!klass->IsBootStrapClassLoaded()) {
4785       return;
4786     }
4787     if (GetClassRoot<mirror::Method>()->IsAssignableFrom(klass) ||
4788         GetClassRoot<mirror::Constructor>()->IsAssignableFrom(klass)) {
4789       down_cast<mirror::Executable*>(ref)->VisitTarget(visit);
4790     } else if (art::GetClassRoot<art::mirror::Field>() == klass) {
4791       down_cast<mirror::Field*>(ref)->VisitTarget(visit);
4792     } else if (art::GetClassRoot<art::mirror::MethodHandle>()->IsAssignableFrom(klass)) {
4793       down_cast<mirror::MethodHandle*>(ref)->VisitTarget(visit);
4794     } else if (art::GetClassRoot<art::mirror::StaticFieldVarHandle>()->IsAssignableFrom(klass)) {
4795       down_cast<mirror::StaticFieldVarHandle*>(ref)->VisitTarget(visit);
4796     } else if (art::GetClassRoot<art::mirror::FieldVarHandle>()->IsAssignableFrom(klass)) {
4797       down_cast<mirror::FieldVarHandle*>(ref)->VisitTarget(visit);
4798     } else if (art::GetClassRoot<art::mirror::DexCache>()->IsAssignableFrom(klass)) {
4799       down_cast<mirror::DexCache*>(ref)->VisitReflectiveTargets(visit);
4800     }
4801   });
4802 }
4803 
AddHeapTask(gc::HeapTask * task)4804 bool Heap::AddHeapTask(gc::HeapTask* task) {
4805   Thread* const self = Thread::Current();
4806   if (!CanAddHeapTask(self)) {
4807     return false;
4808   }
4809   GetTaskProcessor()->AddTask(self, task);
4810   return true;
4811 }
4812 
GetForegroundCollectorName()4813 std::string Heap::GetForegroundCollectorName() {
4814   std::ostringstream oss;
4815   oss << foreground_collector_type_;
4816   return oss.str();
4817 }
4818 
HasAppImageSpaceFor(const std::string & dex_location) const4819 bool Heap::HasAppImageSpaceFor(const std::string& dex_location) const {
4820   ScopedObjectAccess soa(Thread::Current());
4821   for (space::ContinuousSpace* space : continuous_spaces_) {
4822     // An image space is either a boot image space or an app image space.
4823     if (space->IsImageSpace() &&
4824         !IsBootImageAddress(space->Begin()) &&
4825         (space->AsImageSpace()->GetOatFile()->GetOatDexFiles()[0]->GetDexFileLocation() ==
4826               dex_location)) {
4827       return true;
4828     }
4829   }
4830   return false;
4831 }
4832 
4833 }  // namespace gc
4834 }  // namespace art
4835