• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "heap.h"
18 
19 #include <limits>
20 #if defined(__BIONIC__) || defined(__GLIBC__)
21 #include <malloc.h>  // For mallinfo()
22 #endif
23 #include <memory>
24 #include <vector>
25 
26 #include "android-base/stringprintf.h"
27 
28 #include "allocation_listener.h"
29 #include "art_field-inl.h"
30 #include "backtrace_helper.h"
31 #include "base/allocator.h"
32 #include "base/arena_allocator.h"
33 #include "base/dumpable.h"
34 #include "base/file_utils.h"
35 #include "base/histogram-inl.h"
36 #include "base/logging.h"  // For VLOG.
37 #include "base/memory_tool.h"
38 #include "base/mutex.h"
39 #include "base/os.h"
40 #include "base/stl_util.h"
41 #include "base/systrace.h"
42 #include "base/time_utils.h"
43 #include "base/utils.h"
44 #include "common_throws.h"
45 #include "debugger.h"
46 #include "dex/dex_file-inl.h"
47 #include "entrypoints/quick/quick_alloc_entrypoints.h"
48 #include "gc/accounting/card_table-inl.h"
49 #include "gc/accounting/heap_bitmap-inl.h"
50 #include "gc/accounting/mod_union_table-inl.h"
51 #include "gc/accounting/read_barrier_table.h"
52 #include "gc/accounting/remembered_set.h"
53 #include "gc/accounting/space_bitmap-inl.h"
54 #include "gc/collector/concurrent_copying.h"
55 #include "gc/collector/mark_sweep.h"
56 #include "gc/collector/partial_mark_sweep.h"
57 #include "gc/collector/semi_space.h"
58 #include "gc/collector/sticky_mark_sweep.h"
59 #include "gc/racing_check.h"
60 #include "gc/reference_processor.h"
61 #include "gc/scoped_gc_critical_section.h"
62 #include "gc/space/bump_pointer_space.h"
63 #include "gc/space/dlmalloc_space-inl.h"
64 #include "gc/space/image_space.h"
65 #include "gc/space/large_object_space.h"
66 #include "gc/space/region_space.h"
67 #include "gc/space/rosalloc_space-inl.h"
68 #include "gc/space/space-inl.h"
69 #include "gc/space/zygote_space.h"
70 #include "gc/task_processor.h"
71 #include "gc/verification.h"
72 #include "gc_pause_listener.h"
73 #include "gc_root.h"
74 #include "handle_scope-inl.h"
75 #include "heap-inl.h"
76 #include "heap-visit-objects-inl.h"
77 #include "image.h"
78 #include "intern_table.h"
79 #include "jit/jit.h"
80 #include "jit/jit_code_cache.h"
81 #include "jni/java_vm_ext.h"
82 #include "mirror/class-inl.h"
83 #include "mirror/object-inl.h"
84 #include "mirror/object-refvisitor-inl.h"
85 #include "mirror/object_array-inl.h"
86 #include "mirror/reference-inl.h"
87 #include "nativehelper/scoped_local_ref.h"
88 #include "obj_ptr-inl.h"
89 #include "reflection.h"
90 #include "runtime.h"
91 #include "scoped_thread_state_change-inl.h"
92 #include "thread_list.h"
93 #include "verify_object-inl.h"
94 #include "well_known_classes.h"
95 
96 namespace art {
97 
98 namespace gc {
99 
100 static constexpr size_t kCollectorTransitionStressIterations = 0;
101 static constexpr size_t kCollectorTransitionStressWait = 10 * 1000;  // Microseconds
102 
103 DEFINE_RUNTIME_DEBUG_FLAG(Heap, kStressCollectorTransition);
104 
105 // Minimum amount of remaining bytes before a concurrent GC is triggered.
106 static constexpr size_t kMinConcurrentRemainingBytes = 128 * KB;
107 static constexpr size_t kMaxConcurrentRemainingBytes = 512 * KB;
108 // Sticky GC throughput adjustment, divided by 4. Increasing this causes sticky GC to occur more
109 // relative to partial/full GC. This may be desirable since sticky GCs interfere less with mutator
110 // threads (lower pauses, use less memory bandwidth).
GetStickyGcThroughputAdjustment(bool use_generational_cc)111 static double GetStickyGcThroughputAdjustment(bool use_generational_cc) {
112   return use_generational_cc ? 0.5 : 1.0;
113 }
114 // Whether or not we compact the zygote in PreZygoteFork.
115 static constexpr bool kCompactZygote = kMovingCollector;
116 // How many reserve entries are at the end of the allocation stack, these are only needed if the
117 // allocation stack overflows.
118 static constexpr size_t kAllocationStackReserveSize = 1024;
119 // Default mark stack size in bytes.
120 static const size_t kDefaultMarkStackSize = 64 * KB;
121 // Define space name.
122 static const char* kDlMallocSpaceName[2] = {"main dlmalloc space", "main dlmalloc space 1"};
123 static const char* kRosAllocSpaceName[2] = {"main rosalloc space", "main rosalloc space 1"};
124 static const char* kMemMapSpaceName[2] = {"main space", "main space 1"};
125 static const char* kNonMovingSpaceName = "non moving space";
126 static const char* kZygoteSpaceName = "zygote space";
127 static constexpr size_t kGSSBumpPointerSpaceCapacity = 32 * MB;
128 static constexpr bool kGCALotMode = false;
129 // GC alot mode uses a small allocation stack to stress test a lot of GC.
130 static constexpr size_t kGcAlotAllocationStackSize = 4 * KB /
131     sizeof(mirror::HeapReference<mirror::Object>);
132 // Verify objet has a small allocation stack size since searching the allocation stack is slow.
133 static constexpr size_t kVerifyObjectAllocationStackSize = 16 * KB /
134     sizeof(mirror::HeapReference<mirror::Object>);
135 static constexpr size_t kDefaultAllocationStackSize = 8 * MB /
136     sizeof(mirror::HeapReference<mirror::Object>);
137 
138 // For deterministic compilation, we need the heap to be at a well-known address.
139 static constexpr uint32_t kAllocSpaceBeginForDeterministicAoT = 0x40000000;
140 // Dump the rosalloc stats on SIGQUIT.
141 static constexpr bool kDumpRosAllocStatsOnSigQuit = false;
142 
143 static const char* kRegionSpaceName = "main space (region space)";
144 
145 // If true, we log all GCs in the both the foreground and background. Used for debugging.
146 static constexpr bool kLogAllGCs = false;
147 
148 // How much we grow the TLAB if we can do it.
149 static constexpr size_t kPartialTlabSize = 16 * KB;
150 static constexpr bool kUsePartialTlabs = true;
151 
152 // Use Max heap for 2 seconds, this is smaller than the usual 5s window since we don't want to leave
153 // allocate with relaxed ergonomics for that long.
154 static constexpr size_t kPostForkMaxHeapDurationMS = 2000;
155 
156 #if defined(__LP64__) || !defined(ADDRESS_SANITIZER)
157 // 300 MB (0x12c00000) - (default non-moving space capacity).
158 uint8_t* const Heap::kPreferredAllocSpaceBegin =
159     reinterpret_cast<uint8_t*>(300 * MB - kDefaultNonMovingSpaceCapacity);
160 #else
161 #ifdef __ANDROID__
162 // For 32-bit Android, use 0x20000000 because asan reserves 0x04000000 - 0x20000000.
163 uint8_t* const Heap::kPreferredAllocSpaceBegin = reinterpret_cast<uint8_t*>(0x20000000);
164 #else
165 // For 32-bit host, use 0x40000000 because asan uses most of the space below this.
166 uint8_t* const Heap::kPreferredAllocSpaceBegin = reinterpret_cast<uint8_t*>(0x40000000);
167 #endif
168 #endif
169 
CareAboutPauseTimes()170 static inline bool CareAboutPauseTimes() {
171   return Runtime::Current()->InJankPerceptibleProcessState();
172 }
173 
Heap(size_t initial_size,size_t growth_limit,size_t min_free,size_t max_free,double target_utilization,double foreground_heap_growth_multiplier,size_t capacity,size_t non_moving_space_capacity,const std::vector<std::string> & boot_class_path,const std::vector<std::string> & boot_class_path_locations,const std::string & image_file_name,const InstructionSet image_instruction_set,CollectorType foreground_collector_type,CollectorType background_collector_type,space::LargeObjectSpaceType large_object_space_type,size_t large_object_threshold,size_t parallel_gc_threads,size_t conc_gc_threads,bool low_memory_mode,size_t long_pause_log_threshold,size_t long_gc_log_threshold,bool ignore_target_footprint,bool use_tlab,bool verify_pre_gc_heap,bool verify_pre_sweeping_heap,bool verify_post_gc_heap,bool verify_pre_gc_rosalloc,bool verify_pre_sweeping_rosalloc,bool verify_post_gc_rosalloc,bool gc_stress_mode,bool measure_gc_performance,bool use_homogeneous_space_compaction_for_oom,bool use_generational_cc,uint64_t min_interval_homogeneous_space_compaction_by_oom,bool dump_region_info_before_gc,bool dump_region_info_after_gc,space::ImageSpaceLoadingOrder image_space_loading_order)174 Heap::Heap(size_t initial_size,
175            size_t growth_limit,
176            size_t min_free,
177            size_t max_free,
178            double target_utilization,
179            double foreground_heap_growth_multiplier,
180            size_t capacity,
181            size_t non_moving_space_capacity,
182            const std::vector<std::string>& boot_class_path,
183            const std::vector<std::string>& boot_class_path_locations,
184            const std::string& image_file_name,
185            const InstructionSet image_instruction_set,
186            CollectorType foreground_collector_type,
187            CollectorType background_collector_type,
188            space::LargeObjectSpaceType large_object_space_type,
189            size_t large_object_threshold,
190            size_t parallel_gc_threads,
191            size_t conc_gc_threads,
192            bool low_memory_mode,
193            size_t long_pause_log_threshold,
194            size_t long_gc_log_threshold,
195            bool ignore_target_footprint,
196            bool use_tlab,
197            bool verify_pre_gc_heap,
198            bool verify_pre_sweeping_heap,
199            bool verify_post_gc_heap,
200            bool verify_pre_gc_rosalloc,
201            bool verify_pre_sweeping_rosalloc,
202            bool verify_post_gc_rosalloc,
203            bool gc_stress_mode,
204            bool measure_gc_performance,
205            bool use_homogeneous_space_compaction_for_oom,
206            bool use_generational_cc,
207            uint64_t min_interval_homogeneous_space_compaction_by_oom,
208            bool dump_region_info_before_gc,
209            bool dump_region_info_after_gc,
210            space::ImageSpaceLoadingOrder image_space_loading_order)
211     : non_moving_space_(nullptr),
212       rosalloc_space_(nullptr),
213       dlmalloc_space_(nullptr),
214       main_space_(nullptr),
215       collector_type_(kCollectorTypeNone),
216       foreground_collector_type_(foreground_collector_type),
217       background_collector_type_(background_collector_type),
218       desired_collector_type_(foreground_collector_type_),
219       pending_task_lock_(nullptr),
220       parallel_gc_threads_(parallel_gc_threads),
221       conc_gc_threads_(conc_gc_threads),
222       low_memory_mode_(low_memory_mode),
223       long_pause_log_threshold_(long_pause_log_threshold),
224       long_gc_log_threshold_(long_gc_log_threshold),
225       process_cpu_start_time_ns_(ProcessCpuNanoTime()),
226       pre_gc_last_process_cpu_time_ns_(process_cpu_start_time_ns_),
227       post_gc_last_process_cpu_time_ns_(process_cpu_start_time_ns_),
228       pre_gc_weighted_allocated_bytes_(0.0),
229       post_gc_weighted_allocated_bytes_(0.0),
230       ignore_target_footprint_(ignore_target_footprint),
231       zygote_creation_lock_("zygote creation lock", kZygoteCreationLock),
232       zygote_space_(nullptr),
233       large_object_threshold_(large_object_threshold),
234       disable_thread_flip_count_(0),
235       thread_flip_running_(false),
236       collector_type_running_(kCollectorTypeNone),
237       last_gc_cause_(kGcCauseNone),
238       thread_running_gc_(nullptr),
239       last_gc_type_(collector::kGcTypeNone),
240       next_gc_type_(collector::kGcTypePartial),
241       capacity_(capacity),
242       growth_limit_(growth_limit),
243       target_footprint_(initial_size),
244       concurrent_start_bytes_(std::numeric_limits<size_t>::max()),
245       total_bytes_freed_ever_(0),
246       total_objects_freed_ever_(0),
247       num_bytes_allocated_(0),
248       native_bytes_registered_(0),
249       old_native_bytes_allocated_(0),
250       native_objects_notified_(0),
251       num_bytes_freed_revoke_(0),
252       verify_missing_card_marks_(false),
253       verify_system_weaks_(false),
254       verify_pre_gc_heap_(verify_pre_gc_heap),
255       verify_pre_sweeping_heap_(verify_pre_sweeping_heap),
256       verify_post_gc_heap_(verify_post_gc_heap),
257       verify_mod_union_table_(false),
258       verify_pre_gc_rosalloc_(verify_pre_gc_rosalloc),
259       verify_pre_sweeping_rosalloc_(verify_pre_sweeping_rosalloc),
260       verify_post_gc_rosalloc_(verify_post_gc_rosalloc),
261       gc_stress_mode_(gc_stress_mode),
262       /* For GC a lot mode, we limit the allocation stacks to be kGcAlotInterval allocations. This
263        * causes a lot of GC since we do a GC for alloc whenever the stack is full. When heap
264        * verification is enabled, we limit the size of allocation stacks to speed up their
265        * searching.
266        */
267       max_allocation_stack_size_(kGCALotMode ? kGcAlotAllocationStackSize
268           : (kVerifyObjectSupport > kVerifyObjectModeFast) ? kVerifyObjectAllocationStackSize :
269           kDefaultAllocationStackSize),
270       current_allocator_(kAllocatorTypeDlMalloc),
271       current_non_moving_allocator_(kAllocatorTypeNonMoving),
272       bump_pointer_space_(nullptr),
273       temp_space_(nullptr),
274       region_space_(nullptr),
275       min_free_(min_free),
276       max_free_(max_free),
277       target_utilization_(target_utilization),
278       foreground_heap_growth_multiplier_(foreground_heap_growth_multiplier),
279       total_wait_time_(0),
280       verify_object_mode_(kVerifyObjectModeDisabled),
281       disable_moving_gc_count_(0),
282       semi_space_collector_(nullptr),
283       active_concurrent_copying_collector_(nullptr),
284       young_concurrent_copying_collector_(nullptr),
285       concurrent_copying_collector_(nullptr),
286       is_running_on_memory_tool_(Runtime::Current()->IsRunningOnMemoryTool()),
287       use_tlab_(use_tlab),
288       main_space_backup_(nullptr),
289       min_interval_homogeneous_space_compaction_by_oom_(
290           min_interval_homogeneous_space_compaction_by_oom),
291       last_time_homogeneous_space_compaction_by_oom_(NanoTime()),
292       pending_collector_transition_(nullptr),
293       pending_heap_trim_(nullptr),
294       use_homogeneous_space_compaction_for_oom_(use_homogeneous_space_compaction_for_oom),
295       use_generational_cc_(use_generational_cc),
296       running_collection_is_blocking_(false),
297       blocking_gc_count_(0U),
298       blocking_gc_time_(0U),
299       last_update_time_gc_count_rate_histograms_(  // Round down by the window duration.
300           (NanoTime() / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration),
301       gc_count_last_window_(0U),
302       blocking_gc_count_last_window_(0U),
303       gc_count_rate_histogram_("gc count rate histogram", 1U, kGcCountRateMaxBucketCount),
304       blocking_gc_count_rate_histogram_("blocking gc count rate histogram", 1U,
305                                         kGcCountRateMaxBucketCount),
306       alloc_tracking_enabled_(false),
307       alloc_record_depth_(AllocRecordObjectMap::kDefaultAllocStackDepth),
308       backtrace_lock_(nullptr),
309       seen_backtrace_count_(0u),
310       unique_backtrace_count_(0u),
311       gc_disabled_for_shutdown_(false),
312       dump_region_info_before_gc_(dump_region_info_before_gc),
313       dump_region_info_after_gc_(dump_region_info_after_gc) {
314   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
315     LOG(INFO) << "Heap() entering";
316   }
317   if (kUseReadBarrier) {
318     CHECK_EQ(foreground_collector_type_, kCollectorTypeCC);
319     CHECK_EQ(background_collector_type_, kCollectorTypeCCBackground);
320   }
321   verification_.reset(new Verification(this));
322   CHECK_GE(large_object_threshold, kMinLargeObjectThreshold);
323   ScopedTrace trace(__FUNCTION__);
324   Runtime* const runtime = Runtime::Current();
325   // If we aren't the zygote, switch to the default non zygote allocator. This may update the
326   // entrypoints.
327   const bool is_zygote = runtime->IsZygote();
328   if (!is_zygote) {
329     // Background compaction is currently not supported for command line runs.
330     if (background_collector_type_ != foreground_collector_type_) {
331       VLOG(heap) << "Disabling background compaction for non zygote";
332       background_collector_type_ = foreground_collector_type_;
333     }
334   }
335   ChangeCollector(desired_collector_type_);
336   live_bitmap_.reset(new accounting::HeapBitmap(this));
337   mark_bitmap_.reset(new accounting::HeapBitmap(this));
338 
339   // We don't have hspace compaction enabled with GSS or CC.
340   if (foreground_collector_type_ == kCollectorTypeGSS ||
341       foreground_collector_type_ == kCollectorTypeCC) {
342     use_homogeneous_space_compaction_for_oom_ = false;
343   }
344   bool support_homogeneous_space_compaction =
345       background_collector_type_ == gc::kCollectorTypeHomogeneousSpaceCompact ||
346       use_homogeneous_space_compaction_for_oom_;
347   // We may use the same space the main space for the non moving space if we don't need to compact
348   // from the main space.
349   // This is not the case if we support homogeneous compaction or have a moving background
350   // collector type.
351   bool separate_non_moving_space = is_zygote ||
352       support_homogeneous_space_compaction || IsMovingGc(foreground_collector_type_) ||
353       IsMovingGc(background_collector_type_);
354   if (foreground_collector_type_ == kCollectorTypeGSS) {
355     separate_non_moving_space = false;
356   }
357 
358   // Requested begin for the alloc space, to follow the mapped image and oat files
359   uint8_t* request_begin = nullptr;
360   // Calculate the extra space required after the boot image, see allocations below.
361   size_t heap_reservation_size = 0u;
362   if (separate_non_moving_space) {
363     heap_reservation_size = non_moving_space_capacity;
364   } else if ((foreground_collector_type_ != kCollectorTypeCC) &&
365              (is_zygote || foreground_collector_type_ == kCollectorTypeGSS)) {
366     heap_reservation_size = capacity_;
367   }
368   heap_reservation_size = RoundUp(heap_reservation_size, kPageSize);
369   // Load image space(s).
370   std::vector<std::unique_ptr<space::ImageSpace>> boot_image_spaces;
371   MemMap heap_reservation;
372   if (space::ImageSpace::LoadBootImage(boot_class_path,
373                                        boot_class_path_locations,
374                                        image_file_name,
375                                        image_instruction_set,
376                                        image_space_loading_order,
377                                        runtime->ShouldRelocate(),
378                                        /*executable=*/ !runtime->IsAotCompiler(),
379                                        is_zygote,
380                                        heap_reservation_size,
381                                        &boot_image_spaces,
382                                        &heap_reservation)) {
383     DCHECK_EQ(heap_reservation_size, heap_reservation.IsValid() ? heap_reservation.Size() : 0u);
384     DCHECK(!boot_image_spaces.empty());
385     request_begin = boot_image_spaces.back()->GetImageHeader().GetOatFileEnd();
386     DCHECK(!heap_reservation.IsValid() || request_begin == heap_reservation.Begin())
387         << "request_begin=" << static_cast<const void*>(request_begin)
388         << " heap_reservation.Begin()=" << static_cast<const void*>(heap_reservation.Begin());
389     for (std::unique_ptr<space::ImageSpace>& space : boot_image_spaces) {
390       boot_image_spaces_.push_back(space.get());
391       AddSpace(space.release());
392     }
393   } else {
394     if (foreground_collector_type_ == kCollectorTypeCC) {
395       // Need to use a low address so that we can allocate a contiguous 2 * Xmx space
396       // when there's no image (dex2oat for target).
397       request_begin = kPreferredAllocSpaceBegin;
398     }
399     // Gross hack to make dex2oat deterministic.
400     if (foreground_collector_type_ == kCollectorTypeMS && Runtime::Current()->IsAotCompiler()) {
401       // Currently only enabled for MS collector since that is what the deterministic dex2oat uses.
402       // b/26849108
403       request_begin = reinterpret_cast<uint8_t*>(kAllocSpaceBeginForDeterministicAoT);
404     }
405   }
406 
407   /*
408   requested_alloc_space_begin ->     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
409                                      +-  nonmoving space (non_moving_space_capacity)+-
410                                      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
411                                      +-????????????????????????????????????????????+-
412                                      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
413                                      +-main alloc space / bump space 1 (capacity_) +-
414                                      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
415                                      +-????????????????????????????????????????????+-
416                                      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
417                                      +-main alloc space2 / bump space 2 (capacity_)+-
418                                      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
419   */
420 
421   MemMap main_mem_map_1;
422   MemMap main_mem_map_2;
423 
424   std::string error_str;
425   MemMap non_moving_space_mem_map;
426   if (separate_non_moving_space) {
427     ScopedTrace trace2("Create separate non moving space");
428     // If we are the zygote, the non moving space becomes the zygote space when we run
429     // PreZygoteFork the first time. In this case, call the map "zygote space" since we can't
430     // rename the mem map later.
431     const char* space_name = is_zygote ? kZygoteSpaceName : kNonMovingSpaceName;
432     // Reserve the non moving mem map before the other two since it needs to be at a specific
433     // address.
434     DCHECK_EQ(heap_reservation.IsValid(), !boot_image_spaces_.empty());
435     if (heap_reservation.IsValid()) {
436       non_moving_space_mem_map = heap_reservation.RemapAtEnd(
437           heap_reservation.Begin(), space_name, PROT_READ | PROT_WRITE, &error_str);
438     } else {
439       non_moving_space_mem_map = MapAnonymousPreferredAddress(
440           space_name, request_begin, non_moving_space_capacity, &error_str);
441     }
442     CHECK(non_moving_space_mem_map.IsValid()) << error_str;
443     DCHECK(!heap_reservation.IsValid());
444     // Try to reserve virtual memory at a lower address if we have a separate non moving space.
445     request_begin = kPreferredAllocSpaceBegin + non_moving_space_capacity;
446   }
447   // Attempt to create 2 mem maps at or after the requested begin.
448   if (foreground_collector_type_ != kCollectorTypeCC) {
449     ScopedTrace trace2("Create main mem map");
450     if (separate_non_moving_space ||
451         !(is_zygote || foreground_collector_type_ == kCollectorTypeGSS)) {
452       main_mem_map_1 = MapAnonymousPreferredAddress(
453           kMemMapSpaceName[0], request_begin, capacity_, &error_str);
454     } else {
455       // If no separate non-moving space and we are the zygote or the collector type is GSS,
456       // the main space must come right after the image space to avoid a gap.
457       // This is required since we want the zygote space to be adjacent to the image space.
458       DCHECK_EQ(heap_reservation.IsValid(), !boot_image_spaces_.empty());
459       main_mem_map_1 = MemMap::MapAnonymous(
460           kMemMapSpaceName[0],
461           request_begin,
462           capacity_,
463           PROT_READ | PROT_WRITE,
464           /* low_4gb= */ true,
465           /* reuse= */ false,
466           heap_reservation.IsValid() ? &heap_reservation : nullptr,
467           &error_str);
468     }
469     CHECK(main_mem_map_1.IsValid()) << error_str;
470     DCHECK(!heap_reservation.IsValid());
471   }
472   if (support_homogeneous_space_compaction ||
473       background_collector_type_ == kCollectorTypeSS ||
474       foreground_collector_type_ == kCollectorTypeSS) {
475     ScopedTrace trace2("Create main mem map 2");
476     main_mem_map_2 = MapAnonymousPreferredAddress(
477         kMemMapSpaceName[1], main_mem_map_1.End(), capacity_, &error_str);
478     CHECK(main_mem_map_2.IsValid()) << error_str;
479   }
480 
481   // Create the non moving space first so that bitmaps don't take up the address range.
482   if (separate_non_moving_space) {
483     ScopedTrace trace2("Add non moving space");
484     // Non moving space is always dlmalloc since we currently don't have support for multiple
485     // active rosalloc spaces.
486     const size_t size = non_moving_space_mem_map.Size();
487     const void* non_moving_space_mem_map_begin = non_moving_space_mem_map.Begin();
488     non_moving_space_ = space::DlMallocSpace::CreateFromMemMap(std::move(non_moving_space_mem_map),
489                                                                "zygote / non moving space",
490                                                                kDefaultStartingSize,
491                                                                initial_size,
492                                                                size,
493                                                                size,
494                                                                /* can_move_objects= */ false);
495     CHECK(non_moving_space_ != nullptr) << "Failed creating non moving space "
496         << non_moving_space_mem_map_begin;
497     non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
498     AddSpace(non_moving_space_);
499   }
500   // Create other spaces based on whether or not we have a moving GC.
501   if (foreground_collector_type_ == kCollectorTypeCC) {
502     CHECK(separate_non_moving_space);
503     // Reserve twice the capacity, to allow evacuating every region for explicit GCs.
504     MemMap region_space_mem_map =
505         space::RegionSpace::CreateMemMap(kRegionSpaceName, capacity_ * 2, request_begin);
506     CHECK(region_space_mem_map.IsValid()) << "No region space mem map";
507     region_space_ = space::RegionSpace::Create(
508         kRegionSpaceName, std::move(region_space_mem_map), use_generational_cc_);
509     AddSpace(region_space_);
510   } else if (IsMovingGc(foreground_collector_type_) &&
511       foreground_collector_type_ != kCollectorTypeGSS) {
512     // Create bump pointer spaces.
513     // We only to create the bump pointer if the foreground collector is a compacting GC.
514     // TODO: Place bump-pointer spaces somewhere to minimize size of card table.
515     bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 1",
516                                                                     std::move(main_mem_map_1));
517     CHECK(bump_pointer_space_ != nullptr) << "Failed to create bump pointer space";
518     AddSpace(bump_pointer_space_);
519     temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
520                                                             std::move(main_mem_map_2));
521     CHECK(temp_space_ != nullptr) << "Failed to create bump pointer space";
522     AddSpace(temp_space_);
523     CHECK(separate_non_moving_space);
524   } else {
525     CreateMainMallocSpace(std::move(main_mem_map_1), initial_size, growth_limit_, capacity_);
526     CHECK(main_space_ != nullptr);
527     AddSpace(main_space_);
528     if (!separate_non_moving_space) {
529       non_moving_space_ = main_space_;
530       CHECK(!non_moving_space_->CanMoveObjects());
531     }
532     if (foreground_collector_type_ == kCollectorTypeGSS) {
533       CHECK_EQ(foreground_collector_type_, background_collector_type_);
534       // Create bump pointer spaces instead of a backup space.
535       main_mem_map_2.Reset();
536       bump_pointer_space_ = space::BumpPointerSpace::Create(
537           "Bump pointer space 1", kGSSBumpPointerSpaceCapacity);
538       CHECK(bump_pointer_space_ != nullptr);
539       AddSpace(bump_pointer_space_);
540       temp_space_ = space::BumpPointerSpace::Create(
541           "Bump pointer space 2", kGSSBumpPointerSpaceCapacity);
542       CHECK(temp_space_ != nullptr);
543       AddSpace(temp_space_);
544     } else if (main_mem_map_2.IsValid()) {
545       const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
546       main_space_backup_.reset(CreateMallocSpaceFromMemMap(std::move(main_mem_map_2),
547                                                            initial_size,
548                                                            growth_limit_,
549                                                            capacity_,
550                                                            name,
551                                                            /* can_move_objects= */ true));
552       CHECK(main_space_backup_.get() != nullptr);
553       // Add the space so its accounted for in the heap_begin and heap_end.
554       AddSpace(main_space_backup_.get());
555     }
556   }
557   CHECK(non_moving_space_ != nullptr);
558   CHECK(!non_moving_space_->CanMoveObjects());
559   // Allocate the large object space.
560   if (large_object_space_type == space::LargeObjectSpaceType::kFreeList) {
561     large_object_space_ = space::FreeListSpace::Create("free list large object space", capacity_);
562     CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
563   } else if (large_object_space_type == space::LargeObjectSpaceType::kMap) {
564     large_object_space_ = space::LargeObjectMapSpace::Create("mem map large object space");
565     CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
566   } else {
567     // Disable the large object space by making the cutoff excessively large.
568     large_object_threshold_ = std::numeric_limits<size_t>::max();
569     large_object_space_ = nullptr;
570   }
571   if (large_object_space_ != nullptr) {
572     AddSpace(large_object_space_);
573   }
574   // Compute heap capacity. Continuous spaces are sorted in order of Begin().
575   CHECK(!continuous_spaces_.empty());
576   // Relies on the spaces being sorted.
577   uint8_t* heap_begin = continuous_spaces_.front()->Begin();
578   uint8_t* heap_end = continuous_spaces_.back()->Limit();
579   size_t heap_capacity = heap_end - heap_begin;
580   // Remove the main backup space since it slows down the GC to have unused extra spaces.
581   // TODO: Avoid needing to do this.
582   if (main_space_backup_.get() != nullptr) {
583     RemoveSpace(main_space_backup_.get());
584   }
585   // Allocate the card table.
586   // We currently don't support dynamically resizing the card table.
587   // Since we don't know where in the low_4gb the app image will be located, make the card table
588   // cover the whole low_4gb. TODO: Extend the card table in AddSpace.
589   UNUSED(heap_capacity);
590   // Start at 4 KB, we can be sure there are no spaces mapped this low since the address range is
591   // reserved by the kernel.
592   static constexpr size_t kMinHeapAddress = 4 * KB;
593   card_table_.reset(accounting::CardTable::Create(reinterpret_cast<uint8_t*>(kMinHeapAddress),
594                                                   4 * GB - kMinHeapAddress));
595   CHECK(card_table_.get() != nullptr) << "Failed to create card table";
596   if (foreground_collector_type_ == kCollectorTypeCC && kUseTableLookupReadBarrier) {
597     rb_table_.reset(new accounting::ReadBarrierTable());
598     DCHECK(rb_table_->IsAllCleared());
599   }
600   if (HasBootImageSpace()) {
601     // Don't add the image mod union table if we are running without an image, this can crash if
602     // we use the CardCache implementation.
603     for (space::ImageSpace* image_space : GetBootImageSpaces()) {
604       accounting::ModUnionTable* mod_union_table = new accounting::ModUnionTableToZygoteAllocspace(
605           "Image mod-union table", this, image_space);
606       CHECK(mod_union_table != nullptr) << "Failed to create image mod-union table";
607       AddModUnionTable(mod_union_table);
608     }
609   }
610   if (collector::SemiSpace::kUseRememberedSet && non_moving_space_ != main_space_) {
611     accounting::RememberedSet* non_moving_space_rem_set =
612         new accounting::RememberedSet("Non-moving space remembered set", this, non_moving_space_);
613     CHECK(non_moving_space_rem_set != nullptr) << "Failed to create non-moving space remembered set";
614     AddRememberedSet(non_moving_space_rem_set);
615   }
616   // TODO: Count objects in the image space here?
617   num_bytes_allocated_.store(0, std::memory_order_relaxed);
618   mark_stack_.reset(accounting::ObjectStack::Create("mark stack", kDefaultMarkStackSize,
619                                                     kDefaultMarkStackSize));
620   const size_t alloc_stack_capacity = max_allocation_stack_size_ + kAllocationStackReserveSize;
621   allocation_stack_.reset(accounting::ObjectStack::Create(
622       "allocation stack", max_allocation_stack_size_, alloc_stack_capacity));
623   live_stack_.reset(accounting::ObjectStack::Create(
624       "live stack", max_allocation_stack_size_, alloc_stack_capacity));
625   // It's still too early to take a lock because there are no threads yet, but we can create locks
626   // now. We don't create it earlier to make it clear that you can't use locks during heap
627   // initialization.
628   gc_complete_lock_ = new Mutex("GC complete lock");
629   gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable",
630                                                 *gc_complete_lock_));
631 
632   thread_flip_lock_ = new Mutex("GC thread flip lock");
633   thread_flip_cond_.reset(new ConditionVariable("GC thread flip condition variable",
634                                                 *thread_flip_lock_));
635   task_processor_.reset(new TaskProcessor());
636   reference_processor_.reset(new ReferenceProcessor());
637   pending_task_lock_ = new Mutex("Pending task lock");
638   if (ignore_target_footprint_) {
639     SetIdealFootprint(std::numeric_limits<size_t>::max());
640     concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
641   }
642   CHECK_NE(target_footprint_.load(std::memory_order_relaxed), 0U);
643   // Create our garbage collectors.
644   for (size_t i = 0; i < 2; ++i) {
645     const bool concurrent = i != 0;
646     if ((MayUseCollector(kCollectorTypeCMS) && concurrent) ||
647         (MayUseCollector(kCollectorTypeMS) && !concurrent)) {
648       garbage_collectors_.push_back(new collector::MarkSweep(this, concurrent));
649       garbage_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent));
650       garbage_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent));
651     }
652   }
653   if (kMovingCollector) {
654     if (MayUseCollector(kCollectorTypeSS) || MayUseCollector(kCollectorTypeGSS) ||
655         MayUseCollector(kCollectorTypeHomogeneousSpaceCompact) ||
656         use_homogeneous_space_compaction_for_oom_) {
657       // TODO: Clean this up.
658       const bool generational = foreground_collector_type_ == kCollectorTypeGSS;
659       semi_space_collector_ = new collector::SemiSpace(this, generational,
660                                                        generational ? "generational" : "");
661       garbage_collectors_.push_back(semi_space_collector_);
662     }
663     if (MayUseCollector(kCollectorTypeCC)) {
664       concurrent_copying_collector_ = new collector::ConcurrentCopying(this,
665                                                                        /*young_gen=*/false,
666                                                                        use_generational_cc_,
667                                                                        "",
668                                                                        measure_gc_performance);
669       if (use_generational_cc_) {
670         young_concurrent_copying_collector_ = new collector::ConcurrentCopying(
671             this,
672             /*young_gen=*/true,
673             use_generational_cc_,
674             "young",
675             measure_gc_performance);
676       }
677       active_concurrent_copying_collector_ = concurrent_copying_collector_;
678       DCHECK(region_space_ != nullptr);
679       concurrent_copying_collector_->SetRegionSpace(region_space_);
680       if (use_generational_cc_) {
681         young_concurrent_copying_collector_->SetRegionSpace(region_space_);
682         // At this point, non-moving space should be created.
683         DCHECK(non_moving_space_ != nullptr);
684         concurrent_copying_collector_->CreateInterRegionRefBitmaps();
685       }
686       garbage_collectors_.push_back(concurrent_copying_collector_);
687       if (use_generational_cc_) {
688         garbage_collectors_.push_back(young_concurrent_copying_collector_);
689       }
690     }
691   }
692   if (!GetBootImageSpaces().empty() && non_moving_space_ != nullptr &&
693       (is_zygote || separate_non_moving_space || foreground_collector_type_ == kCollectorTypeGSS)) {
694     // Check that there's no gap between the image space and the non moving space so that the
695     // immune region won't break (eg. due to a large object allocated in the gap). This is only
696     // required when we're the zygote or using GSS.
697     // Space with smallest Begin().
698     space::ImageSpace* first_space = nullptr;
699     for (space::ImageSpace* space : boot_image_spaces_) {
700       if (first_space == nullptr || space->Begin() < first_space->Begin()) {
701         first_space = space;
702       }
703     }
704     bool no_gap = MemMap::CheckNoGaps(*first_space->GetMemMap(), *non_moving_space_->GetMemMap());
705     if (!no_gap) {
706       PrintFileToLog("/proc/self/maps", LogSeverity::ERROR);
707       MemMap::DumpMaps(LOG_STREAM(ERROR), /* terse= */ true);
708       LOG(FATAL) << "There's a gap between the image space and the non-moving space";
709     }
710   }
711   instrumentation::Instrumentation* const instrumentation = runtime->GetInstrumentation();
712   if (gc_stress_mode_) {
713     backtrace_lock_ = new Mutex("GC complete lock");
714   }
715   if (is_running_on_memory_tool_ || gc_stress_mode_) {
716     instrumentation->InstrumentQuickAllocEntryPoints();
717   }
718   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
719     LOG(INFO) << "Heap() exiting";
720   }
721 }
722 
MapAnonymousPreferredAddress(const char * name,uint8_t * request_begin,size_t capacity,std::string * out_error_str)723 MemMap Heap::MapAnonymousPreferredAddress(const char* name,
724                                           uint8_t* request_begin,
725                                           size_t capacity,
726                                           std::string* out_error_str) {
727   while (true) {
728     MemMap map = MemMap::MapAnonymous(name,
729                                       request_begin,
730                                       capacity,
731                                       PROT_READ | PROT_WRITE,
732                                       /*low_4gb=*/ true,
733                                       /*reuse=*/ false,
734                                       /*reservation=*/ nullptr,
735                                       out_error_str);
736     if (map.IsValid() || request_begin == nullptr) {
737       return map;
738     }
739     // Retry a  second time with no specified request begin.
740     request_begin = nullptr;
741   }
742 }
743 
MayUseCollector(CollectorType type) const744 bool Heap::MayUseCollector(CollectorType type) const {
745   return foreground_collector_type_ == type || background_collector_type_ == type;
746 }
747 
CreateMallocSpaceFromMemMap(MemMap && mem_map,size_t initial_size,size_t growth_limit,size_t capacity,const char * name,bool can_move_objects)748 space::MallocSpace* Heap::CreateMallocSpaceFromMemMap(MemMap&& mem_map,
749                                                       size_t initial_size,
750                                                       size_t growth_limit,
751                                                       size_t capacity,
752                                                       const char* name,
753                                                       bool can_move_objects) {
754   space::MallocSpace* malloc_space = nullptr;
755   if (kUseRosAlloc) {
756     // Create rosalloc space.
757     malloc_space = space::RosAllocSpace::CreateFromMemMap(std::move(mem_map),
758                                                           name,
759                                                           kDefaultStartingSize,
760                                                           initial_size,
761                                                           growth_limit,
762                                                           capacity,
763                                                           low_memory_mode_,
764                                                           can_move_objects);
765   } else {
766     malloc_space = space::DlMallocSpace::CreateFromMemMap(std::move(mem_map),
767                                                           name,
768                                                           kDefaultStartingSize,
769                                                           initial_size,
770                                                           growth_limit,
771                                                           capacity,
772                                                           can_move_objects);
773   }
774   if (collector::SemiSpace::kUseRememberedSet) {
775     accounting::RememberedSet* rem_set  =
776         new accounting::RememberedSet(std::string(name) + " remembered set", this, malloc_space);
777     CHECK(rem_set != nullptr) << "Failed to create main space remembered set";
778     AddRememberedSet(rem_set);
779   }
780   CHECK(malloc_space != nullptr) << "Failed to create " << name;
781   malloc_space->SetFootprintLimit(malloc_space->Capacity());
782   return malloc_space;
783 }
784 
CreateMainMallocSpace(MemMap && mem_map,size_t initial_size,size_t growth_limit,size_t capacity)785 void Heap::CreateMainMallocSpace(MemMap&& mem_map,
786                                  size_t initial_size,
787                                  size_t growth_limit,
788                                  size_t capacity) {
789   // Is background compaction is enabled?
790   bool can_move_objects = IsMovingGc(background_collector_type_) !=
791       IsMovingGc(foreground_collector_type_) || use_homogeneous_space_compaction_for_oom_;
792   // If we are the zygote and don't yet have a zygote space, it means that the zygote fork will
793   // happen in the future. If this happens and we have kCompactZygote enabled we wish to compact
794   // from the main space to the zygote space. If background compaction is enabled, always pass in
795   // that we can move objets.
796   if (kCompactZygote && Runtime::Current()->IsZygote() && !can_move_objects) {
797     // After the zygote we want this to be false if we don't have background compaction enabled so
798     // that getting primitive array elements is faster.
799     // We never have homogeneous compaction with GSS and don't need a space with movable objects.
800     can_move_objects = !HasZygoteSpace() && foreground_collector_type_ != kCollectorTypeGSS;
801   }
802   if (collector::SemiSpace::kUseRememberedSet && main_space_ != nullptr) {
803     RemoveRememberedSet(main_space_);
804   }
805   const char* name = kUseRosAlloc ? kRosAllocSpaceName[0] : kDlMallocSpaceName[0];
806   main_space_ = CreateMallocSpaceFromMemMap(std::move(mem_map),
807                                             initial_size,
808                                             growth_limit,
809                                             capacity, name,
810                                             can_move_objects);
811   SetSpaceAsDefault(main_space_);
812   VLOG(heap) << "Created main space " << main_space_;
813 }
814 
ChangeAllocator(AllocatorType allocator)815 void Heap::ChangeAllocator(AllocatorType allocator) {
816   if (current_allocator_ != allocator) {
817     // These two allocators are only used internally and don't have any entrypoints.
818     CHECK_NE(allocator, kAllocatorTypeLOS);
819     CHECK_NE(allocator, kAllocatorTypeNonMoving);
820     current_allocator_ = allocator;
821     MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
822     SetQuickAllocEntryPointsAllocator(current_allocator_);
823     Runtime::Current()->GetInstrumentation()->ResetQuickAllocEntryPoints();
824   }
825 }
826 
DisableMovingGc()827 void Heap::DisableMovingGc() {
828   CHECK(!kUseReadBarrier);
829   if (IsMovingGc(foreground_collector_type_)) {
830     foreground_collector_type_ = kCollectorTypeCMS;
831   }
832   if (IsMovingGc(background_collector_type_)) {
833     background_collector_type_ = foreground_collector_type_;
834   }
835   TransitionCollector(foreground_collector_type_);
836   Thread* const self = Thread::Current();
837   ScopedThreadStateChange tsc(self, kSuspended);
838   ScopedSuspendAll ssa(__FUNCTION__);
839   // Something may have caused the transition to fail.
840   if (!IsMovingGc(collector_type_) && non_moving_space_ != main_space_) {
841     CHECK(main_space_ != nullptr);
842     // The allocation stack may have non movable objects in it. We need to flush it since the GC
843     // can't only handle marking allocation stack objects of one non moving space and one main
844     // space.
845     {
846       WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
847       FlushAllocStack();
848     }
849     main_space_->DisableMovingObjects();
850     non_moving_space_ = main_space_;
851     CHECK(!non_moving_space_->CanMoveObjects());
852   }
853 }
854 
IsCompilingBoot() const855 bool Heap::IsCompilingBoot() const {
856   if (!Runtime::Current()->IsAotCompiler()) {
857     return false;
858   }
859   ScopedObjectAccess soa(Thread::Current());
860   for (const auto& space : continuous_spaces_) {
861     if (space->IsImageSpace() || space->IsZygoteSpace()) {
862       return false;
863     }
864   }
865   return true;
866 }
867 
IncrementDisableMovingGC(Thread * self)868 void Heap::IncrementDisableMovingGC(Thread* self) {
869   // Need to do this holding the lock to prevent races where the GC is about to run / running when
870   // we attempt to disable it.
871   ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
872   MutexLock mu(self, *gc_complete_lock_);
873   ++disable_moving_gc_count_;
874   if (IsMovingGc(collector_type_running_)) {
875     WaitForGcToCompleteLocked(kGcCauseDisableMovingGc, self);
876   }
877 }
878 
DecrementDisableMovingGC(Thread * self)879 void Heap::DecrementDisableMovingGC(Thread* self) {
880   MutexLock mu(self, *gc_complete_lock_);
881   CHECK_GT(disable_moving_gc_count_, 0U);
882   --disable_moving_gc_count_;
883 }
884 
IncrementDisableThreadFlip(Thread * self)885 void Heap::IncrementDisableThreadFlip(Thread* self) {
886   // Supposed to be called by mutators. If thread_flip_running_ is true, block. Otherwise, go ahead.
887   CHECK(kUseReadBarrier);
888   bool is_nested = self->GetDisableThreadFlipCount() > 0;
889   self->IncrementDisableThreadFlipCount();
890   if (is_nested) {
891     // If this is a nested JNI critical section enter, we don't need to wait or increment the global
892     // counter. The global counter is incremented only once for a thread for the outermost enter.
893     return;
894   }
895   ScopedThreadStateChange tsc(self, kWaitingForGcThreadFlip);
896   MutexLock mu(self, *thread_flip_lock_);
897   thread_flip_cond_->CheckSafeToWait(self);
898   bool has_waited = false;
899   uint64_t wait_start = NanoTime();
900   if (thread_flip_running_) {
901     ScopedTrace trace("IncrementDisableThreadFlip");
902     while (thread_flip_running_) {
903       has_waited = true;
904       thread_flip_cond_->Wait(self);
905     }
906   }
907   ++disable_thread_flip_count_;
908   if (has_waited) {
909     uint64_t wait_time = NanoTime() - wait_start;
910     total_wait_time_ += wait_time;
911     if (wait_time > long_pause_log_threshold_) {
912       LOG(INFO) << __FUNCTION__ << " blocked for " << PrettyDuration(wait_time);
913     }
914   }
915 }
916 
DecrementDisableThreadFlip(Thread * self)917 void Heap::DecrementDisableThreadFlip(Thread* self) {
918   // Supposed to be called by mutators. Decrement disable_thread_flip_count_ and potentially wake up
919   // the GC waiting before doing a thread flip.
920   CHECK(kUseReadBarrier);
921   self->DecrementDisableThreadFlipCount();
922   bool is_outermost = self->GetDisableThreadFlipCount() == 0;
923   if (!is_outermost) {
924     // If this is not an outermost JNI critical exit, we don't need to decrement the global counter.
925     // The global counter is decremented only once for a thread for the outermost exit.
926     return;
927   }
928   MutexLock mu(self, *thread_flip_lock_);
929   CHECK_GT(disable_thread_flip_count_, 0U);
930   --disable_thread_flip_count_;
931   if (disable_thread_flip_count_ == 0) {
932     // Potentially notify the GC thread blocking to begin a thread flip.
933     thread_flip_cond_->Broadcast(self);
934   }
935 }
936 
ThreadFlipBegin(Thread * self)937 void Heap::ThreadFlipBegin(Thread* self) {
938   // Supposed to be called by GC. Set thread_flip_running_ to be true. If disable_thread_flip_count_
939   // > 0, block. Otherwise, go ahead.
940   CHECK(kUseReadBarrier);
941   ScopedThreadStateChange tsc(self, kWaitingForGcThreadFlip);
942   MutexLock mu(self, *thread_flip_lock_);
943   thread_flip_cond_->CheckSafeToWait(self);
944   bool has_waited = false;
945   uint64_t wait_start = NanoTime();
946   CHECK(!thread_flip_running_);
947   // Set this to true before waiting so that frequent JNI critical enter/exits won't starve
948   // GC. This like a writer preference of a reader-writer lock.
949   thread_flip_running_ = true;
950   while (disable_thread_flip_count_ > 0) {
951     has_waited = true;
952     thread_flip_cond_->Wait(self);
953   }
954   if (has_waited) {
955     uint64_t wait_time = NanoTime() - wait_start;
956     total_wait_time_ += wait_time;
957     if (wait_time > long_pause_log_threshold_) {
958       LOG(INFO) << __FUNCTION__ << " blocked for " << PrettyDuration(wait_time);
959     }
960   }
961 }
962 
ThreadFlipEnd(Thread * self)963 void Heap::ThreadFlipEnd(Thread* self) {
964   // Supposed to be called by GC. Set thread_flip_running_ to false and potentially wake up mutators
965   // waiting before doing a JNI critical.
966   CHECK(kUseReadBarrier);
967   MutexLock mu(self, *thread_flip_lock_);
968   CHECK(thread_flip_running_);
969   thread_flip_running_ = false;
970   // Potentially notify mutator threads blocking to enter a JNI critical section.
971   thread_flip_cond_->Broadcast(self);
972 }
973 
UpdateProcessState(ProcessState old_process_state,ProcessState new_process_state)974 void Heap::UpdateProcessState(ProcessState old_process_state, ProcessState new_process_state) {
975   if (old_process_state != new_process_state) {
976     const bool jank_perceptible = new_process_state == kProcessStateJankPerceptible;
977     for (size_t i = 1; i <= kCollectorTransitionStressIterations; ++i) {
978       // Start at index 1 to avoid "is always false" warning.
979       // Have iteration 1 always transition the collector.
980       TransitionCollector((((i & 1) == 1) == jank_perceptible)
981           ? foreground_collector_type_
982           : background_collector_type_);
983       usleep(kCollectorTransitionStressWait);
984     }
985     if (jank_perceptible) {
986       // Transition back to foreground right away to prevent jank.
987       RequestCollectorTransition(foreground_collector_type_, 0);
988     } else {
989       // Don't delay for debug builds since we may want to stress test the GC.
990       // If background_collector_type_ is kCollectorTypeHomogeneousSpaceCompact then we have
991       // special handling which does a homogenous space compaction once but then doesn't transition
992       // the collector. Similarly, we invoke a full compaction for kCollectorTypeCC but don't
993       // transition the collector.
994       RequestCollectorTransition(background_collector_type_,
995                                  kStressCollectorTransition
996                                      ? 0
997                                      : kCollectorTransitionWait);
998     }
999   }
1000 }
1001 
CreateThreadPool()1002 void Heap::CreateThreadPool() {
1003   const size_t num_threads = std::max(parallel_gc_threads_, conc_gc_threads_);
1004   if (num_threads != 0) {
1005     thread_pool_.reset(new ThreadPool("Heap thread pool", num_threads));
1006   }
1007 }
1008 
MarkAllocStackAsLive(accounting::ObjectStack * stack)1009 void Heap::MarkAllocStackAsLive(accounting::ObjectStack* stack) {
1010   space::ContinuousSpace* space1 = main_space_ != nullptr ? main_space_ : non_moving_space_;
1011   space::ContinuousSpace* space2 = non_moving_space_;
1012   // TODO: Generalize this to n bitmaps?
1013   CHECK(space1 != nullptr);
1014   CHECK(space2 != nullptr);
1015   MarkAllocStack(space1->GetLiveBitmap(), space2->GetLiveBitmap(),
1016                  (large_object_space_ != nullptr ? large_object_space_->GetLiveBitmap() : nullptr),
1017                  stack);
1018 }
1019 
DeleteThreadPool()1020 void Heap::DeleteThreadPool() {
1021   thread_pool_.reset(nullptr);
1022 }
1023 
AddSpace(space::Space * space)1024 void Heap::AddSpace(space::Space* space) {
1025   CHECK(space != nullptr);
1026   WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1027   if (space->IsContinuousSpace()) {
1028     DCHECK(!space->IsDiscontinuousSpace());
1029     space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
1030     // Continuous spaces don't necessarily have bitmaps.
1031     accounting::ContinuousSpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
1032     accounting::ContinuousSpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
1033     // The region space bitmap is not added since VisitObjects visits the region space objects with
1034     // special handling.
1035     if (live_bitmap != nullptr && !space->IsRegionSpace()) {
1036       CHECK(mark_bitmap != nullptr);
1037       live_bitmap_->AddContinuousSpaceBitmap(live_bitmap);
1038       mark_bitmap_->AddContinuousSpaceBitmap(mark_bitmap);
1039     }
1040     continuous_spaces_.push_back(continuous_space);
1041     // Ensure that spaces remain sorted in increasing order of start address.
1042     std::sort(continuous_spaces_.begin(), continuous_spaces_.end(),
1043               [](const space::ContinuousSpace* a, const space::ContinuousSpace* b) {
1044       return a->Begin() < b->Begin();
1045     });
1046   } else {
1047     CHECK(space->IsDiscontinuousSpace());
1048     space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
1049     live_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetLiveBitmap());
1050     mark_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetMarkBitmap());
1051     discontinuous_spaces_.push_back(discontinuous_space);
1052   }
1053   if (space->IsAllocSpace()) {
1054     alloc_spaces_.push_back(space->AsAllocSpace());
1055   }
1056 }
1057 
SetSpaceAsDefault(space::ContinuousSpace * continuous_space)1058 void Heap::SetSpaceAsDefault(space::ContinuousSpace* continuous_space) {
1059   WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1060   if (continuous_space->IsDlMallocSpace()) {
1061     dlmalloc_space_ = continuous_space->AsDlMallocSpace();
1062   } else if (continuous_space->IsRosAllocSpace()) {
1063     rosalloc_space_ = continuous_space->AsRosAllocSpace();
1064   }
1065 }
1066 
RemoveSpace(space::Space * space)1067 void Heap::RemoveSpace(space::Space* space) {
1068   DCHECK(space != nullptr);
1069   WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1070   if (space->IsContinuousSpace()) {
1071     DCHECK(!space->IsDiscontinuousSpace());
1072     space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
1073     // Continuous spaces don't necessarily have bitmaps.
1074     accounting::ContinuousSpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
1075     accounting::ContinuousSpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
1076     if (live_bitmap != nullptr && !space->IsRegionSpace()) {
1077       DCHECK(mark_bitmap != nullptr);
1078       live_bitmap_->RemoveContinuousSpaceBitmap(live_bitmap);
1079       mark_bitmap_->RemoveContinuousSpaceBitmap(mark_bitmap);
1080     }
1081     auto it = std::find(continuous_spaces_.begin(), continuous_spaces_.end(), continuous_space);
1082     DCHECK(it != continuous_spaces_.end());
1083     continuous_spaces_.erase(it);
1084   } else {
1085     DCHECK(space->IsDiscontinuousSpace());
1086     space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
1087     live_bitmap_->RemoveLargeObjectBitmap(discontinuous_space->GetLiveBitmap());
1088     mark_bitmap_->RemoveLargeObjectBitmap(discontinuous_space->GetMarkBitmap());
1089     auto it = std::find(discontinuous_spaces_.begin(), discontinuous_spaces_.end(),
1090                         discontinuous_space);
1091     DCHECK(it != discontinuous_spaces_.end());
1092     discontinuous_spaces_.erase(it);
1093   }
1094   if (space->IsAllocSpace()) {
1095     auto it = std::find(alloc_spaces_.begin(), alloc_spaces_.end(), space->AsAllocSpace());
1096     DCHECK(it != alloc_spaces_.end());
1097     alloc_spaces_.erase(it);
1098   }
1099 }
1100 
CalculateGcWeightedAllocatedBytes(uint64_t gc_last_process_cpu_time_ns,uint64_t current_process_cpu_time) const1101 double Heap::CalculateGcWeightedAllocatedBytes(uint64_t gc_last_process_cpu_time_ns,
1102                                                uint64_t current_process_cpu_time) const {
1103   uint64_t bytes_allocated = GetBytesAllocated();
1104   double weight = current_process_cpu_time - gc_last_process_cpu_time_ns;
1105   return weight * bytes_allocated;
1106 }
1107 
CalculatePreGcWeightedAllocatedBytes()1108 void Heap::CalculatePreGcWeightedAllocatedBytes() {
1109   uint64_t current_process_cpu_time = ProcessCpuNanoTime();
1110   pre_gc_weighted_allocated_bytes_ +=
1111     CalculateGcWeightedAllocatedBytes(pre_gc_last_process_cpu_time_ns_, current_process_cpu_time);
1112   pre_gc_last_process_cpu_time_ns_ = current_process_cpu_time;
1113 }
1114 
CalculatePostGcWeightedAllocatedBytes()1115 void Heap::CalculatePostGcWeightedAllocatedBytes() {
1116   uint64_t current_process_cpu_time = ProcessCpuNanoTime();
1117   post_gc_weighted_allocated_bytes_ +=
1118     CalculateGcWeightedAllocatedBytes(post_gc_last_process_cpu_time_ns_, current_process_cpu_time);
1119   post_gc_last_process_cpu_time_ns_ = current_process_cpu_time;
1120 }
1121 
GetTotalGcCpuTime()1122 uint64_t Heap::GetTotalGcCpuTime() {
1123   uint64_t sum = 0;
1124   for (auto* collector : garbage_collectors_) {
1125     sum += collector->GetTotalCpuTime();
1126   }
1127   return sum;
1128 }
1129 
DumpGcPerformanceInfo(std::ostream & os)1130 void Heap::DumpGcPerformanceInfo(std::ostream& os) {
1131   // Dump cumulative timings.
1132   os << "Dumping cumulative Gc timings\n";
1133   uint64_t total_duration = 0;
1134   // Dump cumulative loggers for each GC type.
1135   uint64_t total_paused_time = 0;
1136   for (auto* collector : garbage_collectors_) {
1137     total_duration += collector->GetCumulativeTimings().GetTotalNs();
1138     total_paused_time += collector->GetTotalPausedTimeNs();
1139     collector->DumpPerformanceInfo(os);
1140   }
1141   if (total_duration != 0) {
1142     const double total_seconds = static_cast<double>(total_duration / 1000) / 1000000.0;
1143     os << "Total time spent in GC: " << PrettyDuration(total_duration) << "\n";
1144     os << "Mean GC size throughput: "
1145        << PrettySize(GetBytesFreedEver() / total_seconds) << "/s\n";
1146     os << "Mean GC object throughput: "
1147        << (GetObjectsFreedEver() / total_seconds) << " objects/s\n";
1148   }
1149   uint64_t total_objects_allocated = GetObjectsAllocatedEver();
1150   os << "Total number of allocations " << total_objects_allocated << "\n";
1151   os << "Total bytes allocated " << PrettySize(GetBytesAllocatedEver()) << "\n";
1152   os << "Total bytes freed " << PrettySize(GetBytesFreedEver()) << "\n";
1153   os << "Free memory " << PrettySize(GetFreeMemory()) << "\n";
1154   os << "Free memory until GC " << PrettySize(GetFreeMemoryUntilGC()) << "\n";
1155   os << "Free memory until OOME " << PrettySize(GetFreeMemoryUntilOOME()) << "\n";
1156   os << "Total memory " << PrettySize(GetTotalMemory()) << "\n";
1157   os << "Max memory " << PrettySize(GetMaxMemory()) << "\n";
1158   if (HasZygoteSpace()) {
1159     os << "Zygote space size " << PrettySize(zygote_space_->Size()) << "\n";
1160   }
1161   os << "Total mutator paused time: " << PrettyDuration(total_paused_time) << "\n";
1162   os << "Total time waiting for GC to complete: " << PrettyDuration(total_wait_time_) << "\n";
1163   os << "Total GC count: " << GetGcCount() << "\n";
1164   os << "Total GC time: " << PrettyDuration(GetGcTime()) << "\n";
1165   os << "Total blocking GC count: " << GetBlockingGcCount() << "\n";
1166   os << "Total blocking GC time: " << PrettyDuration(GetBlockingGcTime()) << "\n";
1167 
1168   {
1169     MutexLock mu(Thread::Current(), *gc_complete_lock_);
1170     if (gc_count_rate_histogram_.SampleSize() > 0U) {
1171       os << "Histogram of GC count per " << NsToMs(kGcCountRateHistogramWindowDuration) << " ms: ";
1172       gc_count_rate_histogram_.DumpBins(os);
1173       os << "\n";
1174     }
1175     if (blocking_gc_count_rate_histogram_.SampleSize() > 0U) {
1176       os << "Histogram of blocking GC count per "
1177          << NsToMs(kGcCountRateHistogramWindowDuration) << " ms: ";
1178       blocking_gc_count_rate_histogram_.DumpBins(os);
1179       os << "\n";
1180     }
1181   }
1182 
1183   if (kDumpRosAllocStatsOnSigQuit && rosalloc_space_ != nullptr) {
1184     rosalloc_space_->DumpStats(os);
1185   }
1186 
1187   os << "Native bytes total: " << GetNativeBytes()
1188      << " registered: " << native_bytes_registered_.load(std::memory_order_relaxed) << "\n";
1189 
1190   os << "Total native bytes at last GC: "
1191      << old_native_bytes_allocated_.load(std::memory_order_relaxed) << "\n";
1192 
1193   BaseMutex::DumpAll(os);
1194 }
1195 
ResetGcPerformanceInfo()1196 void Heap::ResetGcPerformanceInfo() {
1197   for (auto* collector : garbage_collectors_) {
1198     collector->ResetMeasurements();
1199   }
1200 
1201   process_cpu_start_time_ns_ = ProcessCpuNanoTime();
1202 
1203   pre_gc_last_process_cpu_time_ns_ = process_cpu_start_time_ns_;
1204   pre_gc_weighted_allocated_bytes_ = 0u;
1205 
1206   post_gc_last_process_cpu_time_ns_ = process_cpu_start_time_ns_;
1207   post_gc_weighted_allocated_bytes_ = 0u;
1208 
1209   total_bytes_freed_ever_ = 0;
1210   total_objects_freed_ever_ = 0;
1211   total_wait_time_ = 0;
1212   blocking_gc_count_ = 0;
1213   blocking_gc_time_ = 0;
1214   gc_count_last_window_ = 0;
1215   blocking_gc_count_last_window_ = 0;
1216   last_update_time_gc_count_rate_histograms_ =  // Round down by the window duration.
1217       (NanoTime() / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration;
1218   {
1219     MutexLock mu(Thread::Current(), *gc_complete_lock_);
1220     gc_count_rate_histogram_.Reset();
1221     blocking_gc_count_rate_histogram_.Reset();
1222   }
1223 }
1224 
GetGcCount() const1225 uint64_t Heap::GetGcCount() const {
1226   uint64_t gc_count = 0U;
1227   for (auto* collector : garbage_collectors_) {
1228     gc_count += collector->GetCumulativeTimings().GetIterations();
1229   }
1230   return gc_count;
1231 }
1232 
GetGcTime() const1233 uint64_t Heap::GetGcTime() const {
1234   uint64_t gc_time = 0U;
1235   for (auto* collector : garbage_collectors_) {
1236     gc_time += collector->GetCumulativeTimings().GetTotalNs();
1237   }
1238   return gc_time;
1239 }
1240 
GetBlockingGcCount() const1241 uint64_t Heap::GetBlockingGcCount() const {
1242   return blocking_gc_count_;
1243 }
1244 
GetBlockingGcTime() const1245 uint64_t Heap::GetBlockingGcTime() const {
1246   return blocking_gc_time_;
1247 }
1248 
DumpGcCountRateHistogram(std::ostream & os) const1249 void Heap::DumpGcCountRateHistogram(std::ostream& os) const {
1250   MutexLock mu(Thread::Current(), *gc_complete_lock_);
1251   if (gc_count_rate_histogram_.SampleSize() > 0U) {
1252     gc_count_rate_histogram_.DumpBins(os);
1253   }
1254 }
1255 
DumpBlockingGcCountRateHistogram(std::ostream & os) const1256 void Heap::DumpBlockingGcCountRateHistogram(std::ostream& os) const {
1257   MutexLock mu(Thread::Current(), *gc_complete_lock_);
1258   if (blocking_gc_count_rate_histogram_.SampleSize() > 0U) {
1259     blocking_gc_count_rate_histogram_.DumpBins(os);
1260   }
1261 }
1262 
1263 ALWAYS_INLINE
GetAndOverwriteAllocationListener(Atomic<AllocationListener * > * storage,AllocationListener * new_value)1264 static inline AllocationListener* GetAndOverwriteAllocationListener(
1265     Atomic<AllocationListener*>* storage, AllocationListener* new_value) {
1266   return storage->exchange(new_value);
1267 }
1268 
~Heap()1269 Heap::~Heap() {
1270   VLOG(heap) << "Starting ~Heap()";
1271   STLDeleteElements(&garbage_collectors_);
1272   // If we don't reset then the mark stack complains in its destructor.
1273   allocation_stack_->Reset();
1274   allocation_records_.reset();
1275   live_stack_->Reset();
1276   STLDeleteValues(&mod_union_tables_);
1277   STLDeleteValues(&remembered_sets_);
1278   STLDeleteElements(&continuous_spaces_);
1279   STLDeleteElements(&discontinuous_spaces_);
1280   delete gc_complete_lock_;
1281   delete thread_flip_lock_;
1282   delete pending_task_lock_;
1283   delete backtrace_lock_;
1284   uint64_t unique_count = unique_backtrace_count_.load();
1285   uint64_t seen_count = seen_backtrace_count_.load();
1286   if (unique_count != 0 || seen_count != 0) {
1287     LOG(INFO) << "gc stress unique=" << unique_count << " total=" << (unique_count + seen_count);
1288   }
1289   VLOG(heap) << "Finished ~Heap()";
1290 }
1291 
1292 
FindContinuousSpaceFromAddress(const mirror::Object * addr) const1293 space::ContinuousSpace* Heap::FindContinuousSpaceFromAddress(const mirror::Object* addr) const {
1294   for (const auto& space : continuous_spaces_) {
1295     if (space->Contains(addr)) {
1296       return space;
1297     }
1298   }
1299   return nullptr;
1300 }
1301 
FindContinuousSpaceFromObject(ObjPtr<mirror::Object> obj,bool fail_ok) const1302 space::ContinuousSpace* Heap::FindContinuousSpaceFromObject(ObjPtr<mirror::Object> obj,
1303                                                             bool fail_ok) const {
1304   space::ContinuousSpace* space = FindContinuousSpaceFromAddress(obj.Ptr());
1305   if (space != nullptr) {
1306     return space;
1307   }
1308   if (!fail_ok) {
1309     LOG(FATAL) << "object " << obj << " not inside any spaces!";
1310   }
1311   return nullptr;
1312 }
1313 
FindDiscontinuousSpaceFromObject(ObjPtr<mirror::Object> obj,bool fail_ok) const1314 space::DiscontinuousSpace* Heap::FindDiscontinuousSpaceFromObject(ObjPtr<mirror::Object> obj,
1315                                                                   bool fail_ok) const {
1316   for (const auto& space : discontinuous_spaces_) {
1317     if (space->Contains(obj.Ptr())) {
1318       return space;
1319     }
1320   }
1321   if (!fail_ok) {
1322     LOG(FATAL) << "object " << obj << " not inside any spaces!";
1323   }
1324   return nullptr;
1325 }
1326 
FindSpaceFromObject(ObjPtr<mirror::Object> obj,bool fail_ok) const1327 space::Space* Heap::FindSpaceFromObject(ObjPtr<mirror::Object> obj, bool fail_ok) const {
1328   space::Space* result = FindContinuousSpaceFromObject(obj, true);
1329   if (result != nullptr) {
1330     return result;
1331   }
1332   return FindDiscontinuousSpaceFromObject(obj, fail_ok);
1333 }
1334 
FindSpaceFromAddress(const void * addr) const1335 space::Space* Heap::FindSpaceFromAddress(const void* addr) const {
1336   for (const auto& space : continuous_spaces_) {
1337     if (space->Contains(reinterpret_cast<const mirror::Object*>(addr))) {
1338       return space;
1339     }
1340   }
1341   for (const auto& space : discontinuous_spaces_) {
1342     if (space->Contains(reinterpret_cast<const mirror::Object*>(addr))) {
1343       return space;
1344     }
1345   }
1346   return nullptr;
1347 }
1348 
DumpSpaceNameFromAddress(const void * addr) const1349 std::string Heap::DumpSpaceNameFromAddress(const void* addr) const {
1350   space::Space* space = FindSpaceFromAddress(addr);
1351   return (space != nullptr) ? space->GetName() : "no space";
1352 }
1353 
ThrowOutOfMemoryError(Thread * self,size_t byte_count,AllocatorType allocator_type)1354 void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type) {
1355   // If we're in a stack overflow, do not create a new exception. It would require running the
1356   // constructor, which will of course still be in a stack overflow.
1357   if (self->IsHandlingStackOverflow()) {
1358     self->SetException(
1359         Runtime::Current()->GetPreAllocatedOutOfMemoryErrorWhenHandlingStackOverflow());
1360     return;
1361   }
1362 
1363   std::ostringstream oss;
1364   size_t total_bytes_free = GetFreeMemory();
1365   oss << "Failed to allocate a " << byte_count << " byte allocation with " << total_bytes_free
1366       << " free bytes and " << PrettySize(GetFreeMemoryUntilOOME()) << " until OOM,"
1367       << " target footprint " << target_footprint_.load(std::memory_order_relaxed)
1368       << ", growth limit "
1369       << growth_limit_;
1370   // If the allocation failed due to fragmentation, print out the largest continuous allocation.
1371   if (total_bytes_free >= byte_count) {
1372     space::AllocSpace* space = nullptr;
1373     if (allocator_type == kAllocatorTypeNonMoving) {
1374       space = non_moving_space_;
1375     } else if (allocator_type == kAllocatorTypeRosAlloc ||
1376                allocator_type == kAllocatorTypeDlMalloc) {
1377       space = main_space_;
1378     } else if (allocator_type == kAllocatorTypeBumpPointer ||
1379                allocator_type == kAllocatorTypeTLAB) {
1380       space = bump_pointer_space_;
1381     } else if (allocator_type == kAllocatorTypeRegion ||
1382                allocator_type == kAllocatorTypeRegionTLAB) {
1383       space = region_space_;
1384     }
1385     if (space != nullptr) {
1386       space->LogFragmentationAllocFailure(oss, byte_count);
1387     }
1388   }
1389   self->ThrowOutOfMemoryError(oss.str().c_str());
1390 }
1391 
DoPendingCollectorTransition()1392 void Heap::DoPendingCollectorTransition() {
1393   CollectorType desired_collector_type = desired_collector_type_;
1394   // Launch homogeneous space compaction if it is desired.
1395   if (desired_collector_type == kCollectorTypeHomogeneousSpaceCompact) {
1396     if (!CareAboutPauseTimes()) {
1397       PerformHomogeneousSpaceCompact();
1398     } else {
1399       VLOG(gc) << "Homogeneous compaction ignored due to jank perceptible process state";
1400     }
1401   } else if (desired_collector_type == kCollectorTypeCCBackground) {
1402     DCHECK(kUseReadBarrier);
1403     if (!CareAboutPauseTimes()) {
1404       // Invoke CC full compaction.
1405       CollectGarbageInternal(collector::kGcTypeFull,
1406                              kGcCauseCollectorTransition,
1407                              /*clear_soft_references=*/false);
1408     } else {
1409       VLOG(gc) << "CC background compaction ignored due to jank perceptible process state";
1410     }
1411   } else {
1412     TransitionCollector(desired_collector_type);
1413   }
1414 }
1415 
Trim(Thread * self)1416 void Heap::Trim(Thread* self) {
1417   Runtime* const runtime = Runtime::Current();
1418   if (!CareAboutPauseTimes()) {
1419     // Deflate the monitors, this can cause a pause but shouldn't matter since we don't care
1420     // about pauses.
1421     ScopedTrace trace("Deflating monitors");
1422     // Avoid race conditions on the lock word for CC.
1423     ScopedGCCriticalSection gcs(self, kGcCauseTrim, kCollectorTypeHeapTrim);
1424     ScopedSuspendAll ssa(__FUNCTION__);
1425     uint64_t start_time = NanoTime();
1426     size_t count = runtime->GetMonitorList()->DeflateMonitors();
1427     VLOG(heap) << "Deflating " << count << " monitors took "
1428         << PrettyDuration(NanoTime() - start_time);
1429   }
1430   TrimIndirectReferenceTables(self);
1431   TrimSpaces(self);
1432   // Trim arenas that may have been used by JIT or verifier.
1433   runtime->GetArenaPool()->TrimMaps();
1434 }
1435 
1436 class TrimIndirectReferenceTableClosure : public Closure {
1437  public:
TrimIndirectReferenceTableClosure(Barrier * barrier)1438   explicit TrimIndirectReferenceTableClosure(Barrier* barrier) : barrier_(barrier) {
1439   }
Run(Thread * thread)1440   void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
1441     thread->GetJniEnv()->TrimLocals();
1442     // If thread is a running mutator, then act on behalf of the trim thread.
1443     // See the code in ThreadList::RunCheckpoint.
1444     barrier_->Pass(Thread::Current());
1445   }
1446 
1447  private:
1448   Barrier* const barrier_;
1449 };
1450 
TrimIndirectReferenceTables(Thread * self)1451 void Heap::TrimIndirectReferenceTables(Thread* self) {
1452   ScopedObjectAccess soa(self);
1453   ScopedTrace trace(__PRETTY_FUNCTION__);
1454   JavaVMExt* vm = soa.Vm();
1455   // Trim globals indirect reference table.
1456   vm->TrimGlobals();
1457   // Trim locals indirect reference tables.
1458   Barrier barrier(0);
1459   TrimIndirectReferenceTableClosure closure(&barrier);
1460   ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
1461   size_t barrier_count = Runtime::Current()->GetThreadList()->RunCheckpoint(&closure);
1462   if (barrier_count != 0) {
1463     barrier.Increment(self, barrier_count);
1464   }
1465 }
1466 
StartGC(Thread * self,GcCause cause,CollectorType collector_type)1467 void Heap::StartGC(Thread* self, GcCause cause, CollectorType collector_type) {
1468   // Need to do this before acquiring the locks since we don't want to get suspended while
1469   // holding any locks.
1470   ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
1471   MutexLock mu(self, *gc_complete_lock_);
1472   // Ensure there is only one GC at a time.
1473   WaitForGcToCompleteLocked(cause, self);
1474   collector_type_running_ = collector_type;
1475   last_gc_cause_ = cause;
1476   thread_running_gc_ = self;
1477 }
1478 
TrimSpaces(Thread * self)1479 void Heap::TrimSpaces(Thread* self) {
1480   // Pretend we are doing a GC to prevent background compaction from deleting the space we are
1481   // trimming.
1482   StartGC(self, kGcCauseTrim, kCollectorTypeHeapTrim);
1483   ScopedTrace trace(__PRETTY_FUNCTION__);
1484   const uint64_t start_ns = NanoTime();
1485   // Trim the managed spaces.
1486   uint64_t total_alloc_space_allocated = 0;
1487   uint64_t total_alloc_space_size = 0;
1488   uint64_t managed_reclaimed = 0;
1489   {
1490     ScopedObjectAccess soa(self);
1491     for (const auto& space : continuous_spaces_) {
1492       if (space->IsMallocSpace()) {
1493         gc::space::MallocSpace* malloc_space = space->AsMallocSpace();
1494         if (malloc_space->IsRosAllocSpace() || !CareAboutPauseTimes()) {
1495           // Don't trim dlmalloc spaces if we care about pauses since this can hold the space lock
1496           // for a long period of time.
1497           managed_reclaimed += malloc_space->Trim();
1498         }
1499         total_alloc_space_size += malloc_space->Size();
1500       }
1501     }
1502   }
1503   total_alloc_space_allocated = GetBytesAllocated();
1504   if (large_object_space_ != nullptr) {
1505     total_alloc_space_allocated -= large_object_space_->GetBytesAllocated();
1506   }
1507   if (bump_pointer_space_ != nullptr) {
1508     total_alloc_space_allocated -= bump_pointer_space_->Size();
1509   }
1510   if (region_space_ != nullptr) {
1511     total_alloc_space_allocated -= region_space_->GetBytesAllocated();
1512   }
1513   const float managed_utilization = static_cast<float>(total_alloc_space_allocated) /
1514       static_cast<float>(total_alloc_space_size);
1515   uint64_t gc_heap_end_ns = NanoTime();
1516   // We never move things in the native heap, so we can finish the GC at this point.
1517   FinishGC(self, collector::kGcTypeNone);
1518 
1519   VLOG(heap) << "Heap trim of managed (duration=" << PrettyDuration(gc_heap_end_ns - start_ns)
1520       << ", advised=" << PrettySize(managed_reclaimed) << ") heap. Managed heap utilization of "
1521       << static_cast<int>(100 * managed_utilization) << "%.";
1522 }
1523 
IsValidObjectAddress(const void * addr) const1524 bool Heap::IsValidObjectAddress(const void* addr) const {
1525   if (addr == nullptr) {
1526     return true;
1527   }
1528   return IsAligned<kObjectAlignment>(addr) && FindSpaceFromAddress(addr) != nullptr;
1529 }
1530 
IsNonDiscontinuousSpaceHeapAddress(const void * addr) const1531 bool Heap::IsNonDiscontinuousSpaceHeapAddress(const void* addr) const {
1532   return FindContinuousSpaceFromAddress(reinterpret_cast<const mirror::Object*>(addr)) != nullptr;
1533 }
1534 
IsLiveObjectLocked(ObjPtr<mirror::Object> obj,bool search_allocation_stack,bool search_live_stack,bool sorted)1535 bool Heap::IsLiveObjectLocked(ObjPtr<mirror::Object> obj,
1536                               bool search_allocation_stack,
1537                               bool search_live_stack,
1538                               bool sorted) {
1539   if (UNLIKELY(!IsAligned<kObjectAlignment>(obj.Ptr()))) {
1540     return false;
1541   }
1542   if (bump_pointer_space_ != nullptr && bump_pointer_space_->HasAddress(obj.Ptr())) {
1543     mirror::Class* klass = obj->GetClass<kVerifyNone>();
1544     if (obj == klass) {
1545       // This case happens for java.lang.Class.
1546       return true;
1547     }
1548     return VerifyClassClass(klass) && IsLiveObjectLocked(klass);
1549   } else if (temp_space_ != nullptr && temp_space_->HasAddress(obj.Ptr())) {
1550     // If we are in the allocated region of the temp space, then we are probably live (e.g. during
1551     // a GC). When a GC isn't running End() - Begin() is 0 which means no objects are contained.
1552     return temp_space_->Contains(obj.Ptr());
1553   }
1554   if (region_space_ != nullptr && region_space_->HasAddress(obj.Ptr())) {
1555     return true;
1556   }
1557   space::ContinuousSpace* c_space = FindContinuousSpaceFromObject(obj, true);
1558   space::DiscontinuousSpace* d_space = nullptr;
1559   if (c_space != nullptr) {
1560     if (c_space->GetLiveBitmap()->Test(obj.Ptr())) {
1561       return true;
1562     }
1563   } else {
1564     d_space = FindDiscontinuousSpaceFromObject(obj, true);
1565     if (d_space != nullptr) {
1566       if (d_space->GetLiveBitmap()->Test(obj.Ptr())) {
1567         return true;
1568       }
1569     }
1570   }
1571   // This is covering the allocation/live stack swapping that is done without mutators suspended.
1572   for (size_t i = 0; i < (sorted ? 1 : 5); ++i) {
1573     if (i > 0) {
1574       NanoSleep(MsToNs(10));
1575     }
1576     if (search_allocation_stack) {
1577       if (sorted) {
1578         if (allocation_stack_->ContainsSorted(obj.Ptr())) {
1579           return true;
1580         }
1581       } else if (allocation_stack_->Contains(obj.Ptr())) {
1582         return true;
1583       }
1584     }
1585 
1586     if (search_live_stack) {
1587       if (sorted) {
1588         if (live_stack_->ContainsSorted(obj.Ptr())) {
1589           return true;
1590         }
1591       } else if (live_stack_->Contains(obj.Ptr())) {
1592         return true;
1593       }
1594     }
1595   }
1596   // We need to check the bitmaps again since there is a race where we mark something as live and
1597   // then clear the stack containing it.
1598   if (c_space != nullptr) {
1599     if (c_space->GetLiveBitmap()->Test(obj.Ptr())) {
1600       return true;
1601     }
1602   } else {
1603     d_space = FindDiscontinuousSpaceFromObject(obj, true);
1604     if (d_space != nullptr && d_space->GetLiveBitmap()->Test(obj.Ptr())) {
1605       return true;
1606     }
1607   }
1608   return false;
1609 }
1610 
DumpSpaces() const1611 std::string Heap::DumpSpaces() const {
1612   std::ostringstream oss;
1613   DumpSpaces(oss);
1614   return oss.str();
1615 }
1616 
DumpSpaces(std::ostream & stream) const1617 void Heap::DumpSpaces(std::ostream& stream) const {
1618   for (const auto& space : continuous_spaces_) {
1619     accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
1620     accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
1621     stream << space << " " << *space << "\n";
1622     if (live_bitmap != nullptr) {
1623       stream << live_bitmap << " " << *live_bitmap << "\n";
1624     }
1625     if (mark_bitmap != nullptr) {
1626       stream << mark_bitmap << " " << *mark_bitmap << "\n";
1627     }
1628   }
1629   for (const auto& space : discontinuous_spaces_) {
1630     stream << space << " " << *space << "\n";
1631   }
1632 }
1633 
VerifyObjectBody(ObjPtr<mirror::Object> obj)1634 void Heap::VerifyObjectBody(ObjPtr<mirror::Object> obj) {
1635   if (verify_object_mode_ == kVerifyObjectModeDisabled) {
1636     return;
1637   }
1638 
1639   // Ignore early dawn of the universe verifications.
1640   if (UNLIKELY(num_bytes_allocated_.load(std::memory_order_relaxed) < 10 * KB)) {
1641     return;
1642   }
1643   CHECK_ALIGNED(obj.Ptr(), kObjectAlignment) << "Object isn't aligned";
1644   mirror::Class* c = obj->GetFieldObject<mirror::Class, kVerifyNone>(mirror::Object::ClassOffset());
1645   CHECK(c != nullptr) << "Null class in object " << obj;
1646   CHECK_ALIGNED(c, kObjectAlignment) << "Class " << c << " not aligned in object " << obj;
1647   CHECK(VerifyClassClass(c));
1648 
1649   if (verify_object_mode_ > kVerifyObjectModeFast) {
1650     // Note: the bitmap tests below are racy since we don't hold the heap bitmap lock.
1651     CHECK(IsLiveObjectLocked(obj)) << "Object is dead " << obj << "\n" << DumpSpaces();
1652   }
1653 }
1654 
VerifyHeap()1655 void Heap::VerifyHeap() {
1656   ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1657   auto visitor = [&](mirror::Object* obj) {
1658     VerifyObjectBody(obj);
1659   };
1660   // Technically we need the mutator lock here to call Visit. However, VerifyObjectBody is already
1661   // NO_THREAD_SAFETY_ANALYSIS.
1662   auto no_thread_safety_analysis = [&]() NO_THREAD_SAFETY_ANALYSIS {
1663     GetLiveBitmap()->Visit(visitor);
1664   };
1665   no_thread_safety_analysis();
1666 }
1667 
RecordFree(uint64_t freed_objects,int64_t freed_bytes)1668 void Heap::RecordFree(uint64_t freed_objects, int64_t freed_bytes) {
1669   // Use signed comparison since freed bytes can be negative when background compaction foreground
1670   // transitions occurs. This is typically due to objects moving from a bump pointer space to a
1671   // free list backed space, which may increase memory footprint due to padding and binning.
1672   RACING_DCHECK_LE(freed_bytes,
1673                    static_cast<int64_t>(num_bytes_allocated_.load(std::memory_order_relaxed)));
1674   // Note: This relies on 2s complement for handling negative freed_bytes.
1675   num_bytes_allocated_.fetch_sub(static_cast<ssize_t>(freed_bytes), std::memory_order_relaxed);
1676   if (Runtime::Current()->HasStatsEnabled()) {
1677     RuntimeStats* thread_stats = Thread::Current()->GetStats();
1678     thread_stats->freed_objects += freed_objects;
1679     thread_stats->freed_bytes += freed_bytes;
1680     // TODO: Do this concurrently.
1681     RuntimeStats* global_stats = Runtime::Current()->GetStats();
1682     global_stats->freed_objects += freed_objects;
1683     global_stats->freed_bytes += freed_bytes;
1684   }
1685 }
1686 
RecordFreeRevoke()1687 void Heap::RecordFreeRevoke() {
1688   // Subtract num_bytes_freed_revoke_ from num_bytes_allocated_ to cancel out the
1689   // ahead-of-time, bulk counting of bytes allocated in rosalloc thread-local buffers.
1690   // If there's a concurrent revoke, ok to not necessarily reset num_bytes_freed_revoke_
1691   // all the way to zero exactly as the remainder will be subtracted at the next GC.
1692   size_t bytes_freed = num_bytes_freed_revoke_.load(std::memory_order_relaxed);
1693   CHECK_GE(num_bytes_freed_revoke_.fetch_sub(bytes_freed, std::memory_order_relaxed),
1694            bytes_freed) << "num_bytes_freed_revoke_ underflow";
1695   CHECK_GE(num_bytes_allocated_.fetch_sub(bytes_freed, std::memory_order_relaxed),
1696            bytes_freed) << "num_bytes_allocated_ underflow";
1697   GetCurrentGcIteration()->SetFreedRevoke(bytes_freed);
1698 }
1699 
GetRosAllocSpace(gc::allocator::RosAlloc * rosalloc) const1700 space::RosAllocSpace* Heap::GetRosAllocSpace(gc::allocator::RosAlloc* rosalloc) const {
1701   if (rosalloc_space_ != nullptr && rosalloc_space_->GetRosAlloc() == rosalloc) {
1702     return rosalloc_space_;
1703   }
1704   for (const auto& space : continuous_spaces_) {
1705     if (space->AsContinuousSpace()->IsRosAllocSpace()) {
1706       if (space->AsContinuousSpace()->AsRosAllocSpace()->GetRosAlloc() == rosalloc) {
1707         return space->AsContinuousSpace()->AsRosAllocSpace();
1708       }
1709     }
1710   }
1711   return nullptr;
1712 }
1713 
EntrypointsInstrumented()1714 static inline bool EntrypointsInstrumented() REQUIRES_SHARED(Locks::mutator_lock_) {
1715   instrumentation::Instrumentation* const instrumentation =
1716       Runtime::Current()->GetInstrumentation();
1717   return instrumentation != nullptr && instrumentation->AllocEntrypointsInstrumented();
1718 }
1719 
AllocateInternalWithGc(Thread * self,AllocatorType allocator,bool instrumented,size_t alloc_size,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated,ObjPtr<mirror::Class> * klass)1720 mirror::Object* Heap::AllocateInternalWithGc(Thread* self,
1721                                              AllocatorType allocator,
1722                                              bool instrumented,
1723                                              size_t alloc_size,
1724                                              size_t* bytes_allocated,
1725                                              size_t* usable_size,
1726                                              size_t* bytes_tl_bulk_allocated,
1727                                              ObjPtr<mirror::Class>* klass) {
1728   bool was_default_allocator = allocator == GetCurrentAllocator();
1729   // Make sure there is no pending exception since we may need to throw an OOME.
1730   self->AssertNoPendingException();
1731   DCHECK(klass != nullptr);
1732   StackHandleScope<1> hs(self);
1733   HandleWrapperObjPtr<mirror::Class> h(hs.NewHandleWrapper(klass));
1734   // The allocation failed. If the GC is running, block until it completes, and then retry the
1735   // allocation.
1736   collector::GcType last_gc = WaitForGcToComplete(kGcCauseForAlloc, self);
1737   // If we were the default allocator but the allocator changed while we were suspended,
1738   // abort the allocation.
1739   if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1740       (!instrumented && EntrypointsInstrumented())) {
1741     return nullptr;
1742   }
1743   if (last_gc != collector::kGcTypeNone) {
1744     // A GC was in progress and we blocked, retry allocation now that memory has been freed.
1745     mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
1746                                                      usable_size, bytes_tl_bulk_allocated);
1747     if (ptr != nullptr) {
1748       return ptr;
1749     }
1750   }
1751 
1752   collector::GcType tried_type = next_gc_type_;
1753   const bool gc_ran =
1754       CollectGarbageInternal(tried_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
1755   if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1756       (!instrumented && EntrypointsInstrumented())) {
1757     return nullptr;
1758   }
1759   if (gc_ran) {
1760     mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
1761                                                      usable_size, bytes_tl_bulk_allocated);
1762     if (ptr != nullptr) {
1763       return ptr;
1764     }
1765   }
1766 
1767   // Loop through our different Gc types and try to Gc until we get enough free memory.
1768   for (collector::GcType gc_type : gc_plan_) {
1769     if (gc_type == tried_type) {
1770       continue;
1771     }
1772     // Attempt to run the collector, if we succeed, re-try the allocation.
1773     const bool plan_gc_ran =
1774         CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
1775     if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1776         (!instrumented && EntrypointsInstrumented())) {
1777       return nullptr;
1778     }
1779     if (plan_gc_ran) {
1780       // Did we free sufficient memory for the allocation to succeed?
1781       mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
1782                                                        usable_size, bytes_tl_bulk_allocated);
1783       if (ptr != nullptr) {
1784         return ptr;
1785       }
1786     }
1787   }
1788   // Allocations have failed after GCs;  this is an exceptional state.
1789   // Try harder, growing the heap if necessary.
1790   mirror::Object* ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
1791                                                   usable_size, bytes_tl_bulk_allocated);
1792   if (ptr != nullptr) {
1793     return ptr;
1794   }
1795   // Most allocations should have succeeded by now, so the heap is really full, really fragmented,
1796   // or the requested size is really big. Do another GC, collecting SoftReferences this time. The
1797   // VM spec requires that all SoftReferences have been collected and cleared before throwing
1798   // OOME.
1799   VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size)
1800            << " allocation";
1801   // TODO: Run finalization, but this may cause more allocations to occur.
1802   // We don't need a WaitForGcToComplete here either.
1803   DCHECK(!gc_plan_.empty());
1804   CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true);
1805   if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1806       (!instrumented && EntrypointsInstrumented())) {
1807     return nullptr;
1808   }
1809   ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size,
1810                                   bytes_tl_bulk_allocated);
1811   if (ptr == nullptr) {
1812     const uint64_t current_time = NanoTime();
1813     switch (allocator) {
1814       case kAllocatorTypeRosAlloc:
1815         // Fall-through.
1816       case kAllocatorTypeDlMalloc: {
1817         if (use_homogeneous_space_compaction_for_oom_ &&
1818             current_time - last_time_homogeneous_space_compaction_by_oom_ >
1819             min_interval_homogeneous_space_compaction_by_oom_) {
1820           last_time_homogeneous_space_compaction_by_oom_ = current_time;
1821           HomogeneousSpaceCompactResult result = PerformHomogeneousSpaceCompact();
1822           // Thread suspension could have occurred.
1823           if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1824               (!instrumented && EntrypointsInstrumented())) {
1825             return nullptr;
1826           }
1827           switch (result) {
1828             case HomogeneousSpaceCompactResult::kSuccess:
1829               // If the allocation succeeded, we delayed an oom.
1830               ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
1831                                               usable_size, bytes_tl_bulk_allocated);
1832               if (ptr != nullptr) {
1833                 count_delayed_oom_++;
1834               }
1835               break;
1836             case HomogeneousSpaceCompactResult::kErrorReject:
1837               // Reject due to disabled moving GC.
1838               break;
1839             case HomogeneousSpaceCompactResult::kErrorVMShuttingDown:
1840               // Throw OOM by default.
1841               break;
1842             default: {
1843               UNIMPLEMENTED(FATAL) << "homogeneous space compaction result: "
1844                   << static_cast<size_t>(result);
1845               UNREACHABLE();
1846             }
1847           }
1848           // Always print that we ran homogeneous space compation since this can cause jank.
1849           VLOG(heap) << "Ran heap homogeneous space compaction, "
1850                     << " requested defragmentation "
1851                     << count_requested_homogeneous_space_compaction_.load()
1852                     << " performed defragmentation "
1853                     << count_performed_homogeneous_space_compaction_.load()
1854                     << " ignored homogeneous space compaction "
1855                     << count_ignored_homogeneous_space_compaction_.load()
1856                     << " delayed count = "
1857                     << count_delayed_oom_.load();
1858         }
1859         break;
1860       }
1861       case kAllocatorTypeNonMoving: {
1862         if (kUseReadBarrier) {
1863           // DisableMovingGc() isn't compatible with CC.
1864           break;
1865         }
1866         // Try to transition the heap if the allocation failure was due to the space being full.
1867         if (!IsOutOfMemoryOnAllocation(allocator, alloc_size, /*grow=*/ false)) {
1868           // If we aren't out of memory then the OOM was probably from the non moving space being
1869           // full. Attempt to disable compaction and turn the main space into a non moving space.
1870           DisableMovingGc();
1871           // Thread suspension could have occurred.
1872           if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1873               (!instrumented && EntrypointsInstrumented())) {
1874             return nullptr;
1875           }
1876           // If we are still a moving GC then something must have caused the transition to fail.
1877           if (IsMovingGc(collector_type_)) {
1878             MutexLock mu(self, *gc_complete_lock_);
1879             // If we couldn't disable moving GC, just throw OOME and return null.
1880             LOG(WARNING) << "Couldn't disable moving GC with disable GC count "
1881                          << disable_moving_gc_count_;
1882           } else {
1883             LOG(WARNING) << "Disabled moving GC due to the non moving space being full";
1884             ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
1885                                             usable_size, bytes_tl_bulk_allocated);
1886           }
1887         }
1888         break;
1889       }
1890       default: {
1891         // Do nothing for others allocators.
1892       }
1893     }
1894   }
1895   // If the allocation hasn't succeeded by this point, throw an OOM error.
1896   if (ptr == nullptr) {
1897     ThrowOutOfMemoryError(self, alloc_size, allocator);
1898   }
1899   return ptr;
1900 }
1901 
SetTargetHeapUtilization(float target)1902 void Heap::SetTargetHeapUtilization(float target) {
1903   DCHECK_GT(target, 0.1f);  // asserted in Java code
1904   DCHECK_LT(target, 1.0f);
1905   target_utilization_ = target;
1906 }
1907 
GetObjectsAllocated() const1908 size_t Heap::GetObjectsAllocated() const {
1909   Thread* const self = Thread::Current();
1910   ScopedThreadStateChange tsc(self, kWaitingForGetObjectsAllocated);
1911   // Prevent GC running during GetObjectsAllocated since we may get a checkpoint request that tells
1912   // us to suspend while we are doing SuspendAll. b/35232978
1913   gc::ScopedGCCriticalSection gcs(Thread::Current(),
1914                                   gc::kGcCauseGetObjectsAllocated,
1915                                   gc::kCollectorTypeGetObjectsAllocated);
1916   // Need SuspendAll here to prevent lock violation if RosAlloc does it during InspectAll.
1917   ScopedSuspendAll ssa(__FUNCTION__);
1918   ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
1919   size_t total = 0;
1920   for (space::AllocSpace* space : alloc_spaces_) {
1921     total += space->GetObjectsAllocated();
1922   }
1923   return total;
1924 }
1925 
GetObjectsAllocatedEver() const1926 uint64_t Heap::GetObjectsAllocatedEver() const {
1927   uint64_t total = GetObjectsFreedEver();
1928   // If we are detached, we can't use GetObjectsAllocated since we can't change thread states.
1929   if (Thread::Current() != nullptr) {
1930     total += GetObjectsAllocated();
1931   }
1932   return total;
1933 }
1934 
GetBytesAllocatedEver() const1935 uint64_t Heap::GetBytesAllocatedEver() const {
1936   return GetBytesFreedEver() + GetBytesAllocated();
1937 }
1938 
1939 // Check whether the given object is an instance of the given class.
MatchesClass(mirror::Object * obj,Handle<mirror::Class> h_class,bool use_is_assignable_from)1940 static bool MatchesClass(mirror::Object* obj,
1941                          Handle<mirror::Class> h_class,
1942                          bool use_is_assignable_from) REQUIRES_SHARED(Locks::mutator_lock_) {
1943   mirror::Class* instance_class = obj->GetClass();
1944   CHECK(instance_class != nullptr);
1945   ObjPtr<mirror::Class> klass = h_class.Get();
1946   if (use_is_assignable_from) {
1947     return klass != nullptr && klass->IsAssignableFrom(instance_class);
1948   }
1949   return instance_class == klass;
1950 }
1951 
CountInstances(const std::vector<Handle<mirror::Class>> & classes,bool use_is_assignable_from,uint64_t * counts)1952 void Heap::CountInstances(const std::vector<Handle<mirror::Class>>& classes,
1953                           bool use_is_assignable_from,
1954                           uint64_t* counts) {
1955   auto instance_counter = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
1956     for (size_t i = 0; i < classes.size(); ++i) {
1957       if (MatchesClass(obj, classes[i], use_is_assignable_from)) {
1958         ++counts[i];
1959       }
1960     }
1961   };
1962   VisitObjects(instance_counter);
1963 }
1964 
GetInstances(VariableSizedHandleScope & scope,Handle<mirror::Class> h_class,bool use_is_assignable_from,int32_t max_count,std::vector<Handle<mirror::Object>> & instances)1965 void Heap::GetInstances(VariableSizedHandleScope& scope,
1966                         Handle<mirror::Class> h_class,
1967                         bool use_is_assignable_from,
1968                         int32_t max_count,
1969                         std::vector<Handle<mirror::Object>>& instances) {
1970   DCHECK_GE(max_count, 0);
1971   auto instance_collector = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
1972     if (MatchesClass(obj, h_class, use_is_assignable_from)) {
1973       if (max_count == 0 || instances.size() < static_cast<size_t>(max_count)) {
1974         instances.push_back(scope.NewHandle(obj));
1975       }
1976     }
1977   };
1978   VisitObjects(instance_collector);
1979 }
1980 
GetReferringObjects(VariableSizedHandleScope & scope,Handle<mirror::Object> o,int32_t max_count,std::vector<Handle<mirror::Object>> & referring_objects)1981 void Heap::GetReferringObjects(VariableSizedHandleScope& scope,
1982                                Handle<mirror::Object> o,
1983                                int32_t max_count,
1984                                std::vector<Handle<mirror::Object>>& referring_objects) {
1985   class ReferringObjectsFinder {
1986    public:
1987     ReferringObjectsFinder(VariableSizedHandleScope& scope_in,
1988                            Handle<mirror::Object> object_in,
1989                            int32_t max_count_in,
1990                            std::vector<Handle<mirror::Object>>& referring_objects_in)
1991         REQUIRES_SHARED(Locks::mutator_lock_)
1992         : scope_(scope_in),
1993           object_(object_in),
1994           max_count_(max_count_in),
1995           referring_objects_(referring_objects_in) {}
1996 
1997     // For Object::VisitReferences.
1998     void operator()(ObjPtr<mirror::Object> obj,
1999                     MemberOffset offset,
2000                     bool is_static ATTRIBUTE_UNUSED) const
2001         REQUIRES_SHARED(Locks::mutator_lock_) {
2002       mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
2003       if (ref == object_.Get() && (max_count_ == 0 || referring_objects_.size() < max_count_)) {
2004         referring_objects_.push_back(scope_.NewHandle(obj));
2005       }
2006     }
2007 
2008     void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
2009         const {}
2010     void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
2011 
2012    private:
2013     VariableSizedHandleScope& scope_;
2014     Handle<mirror::Object> const object_;
2015     const uint32_t max_count_;
2016     std::vector<Handle<mirror::Object>>& referring_objects_;
2017     DISALLOW_COPY_AND_ASSIGN(ReferringObjectsFinder);
2018   };
2019   ReferringObjectsFinder finder(scope, o, max_count, referring_objects);
2020   auto referring_objects_finder = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
2021     obj->VisitReferences(finder, VoidFunctor());
2022   };
2023   VisitObjects(referring_objects_finder);
2024 }
2025 
CollectGarbage(bool clear_soft_references,GcCause cause)2026 void Heap::CollectGarbage(bool clear_soft_references, GcCause cause) {
2027   // Even if we waited for a GC we still need to do another GC since weaks allocated during the
2028   // last GC will not have necessarily been cleared.
2029   CollectGarbageInternal(gc_plan_.back(), cause, clear_soft_references);
2030 }
2031 
SupportHomogeneousSpaceCompactAndCollectorTransitions() const2032 bool Heap::SupportHomogeneousSpaceCompactAndCollectorTransitions() const {
2033   return main_space_backup_.get() != nullptr && main_space_ != nullptr &&
2034       foreground_collector_type_ == kCollectorTypeCMS;
2035 }
2036 
PerformHomogeneousSpaceCompact()2037 HomogeneousSpaceCompactResult Heap::PerformHomogeneousSpaceCompact() {
2038   Thread* self = Thread::Current();
2039   // Inc requested homogeneous space compaction.
2040   count_requested_homogeneous_space_compaction_++;
2041   // Store performed homogeneous space compaction at a new request arrival.
2042   ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
2043   // TODO: Clang prebuilt for r316199 produces bogus thread safety analysis warning for holding both
2044   // exclusive and shared lock in the same scope. Remove the assertion as a temporary workaround.
2045   // http://b/71769596
2046   // Locks::mutator_lock_->AssertNotHeld(self);
2047   {
2048     ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete);
2049     MutexLock mu(self, *gc_complete_lock_);
2050     // Ensure there is only one GC at a time.
2051     WaitForGcToCompleteLocked(kGcCauseHomogeneousSpaceCompact, self);
2052     // Homogeneous space compaction is a copying transition, can't run it if the moving GC disable
2053     // count is non zero.
2054     // If the collector type changed to something which doesn't benefit from homogeneous space
2055     // compaction, exit.
2056     if (disable_moving_gc_count_ != 0 || IsMovingGc(collector_type_) ||
2057         !main_space_->CanMoveObjects()) {
2058       return kErrorReject;
2059     }
2060     if (!SupportHomogeneousSpaceCompactAndCollectorTransitions()) {
2061       return kErrorUnsupported;
2062     }
2063     collector_type_running_ = kCollectorTypeHomogeneousSpaceCompact;
2064   }
2065   if (Runtime::Current()->IsShuttingDown(self)) {
2066     // Don't allow heap transitions to happen if the runtime is shutting down since these can
2067     // cause objects to get finalized.
2068     FinishGC(self, collector::kGcTypeNone);
2069     return HomogeneousSpaceCompactResult::kErrorVMShuttingDown;
2070   }
2071   collector::GarbageCollector* collector;
2072   {
2073     ScopedSuspendAll ssa(__FUNCTION__);
2074     uint64_t start_time = NanoTime();
2075     // Launch compaction.
2076     space::MallocSpace* to_space = main_space_backup_.release();
2077     space::MallocSpace* from_space = main_space_;
2078     to_space->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2079     const uint64_t space_size_before_compaction = from_space->Size();
2080     AddSpace(to_space);
2081     // Make sure that we will have enough room to copy.
2082     CHECK_GE(to_space->GetFootprintLimit(), from_space->GetFootprintLimit());
2083     collector = Compact(to_space, from_space, kGcCauseHomogeneousSpaceCompact);
2084     const uint64_t space_size_after_compaction = to_space->Size();
2085     main_space_ = to_space;
2086     main_space_backup_.reset(from_space);
2087     RemoveSpace(from_space);
2088     SetSpaceAsDefault(main_space_);  // Set as default to reset the proper dlmalloc space.
2089     // Update performed homogeneous space compaction count.
2090     count_performed_homogeneous_space_compaction_++;
2091     // Print statics log and resume all threads.
2092     uint64_t duration = NanoTime() - start_time;
2093     VLOG(heap) << "Heap homogeneous space compaction took " << PrettyDuration(duration) << " size: "
2094                << PrettySize(space_size_before_compaction) << " -> "
2095                << PrettySize(space_size_after_compaction) << " compact-ratio: "
2096                << std::fixed << static_cast<double>(space_size_after_compaction) /
2097                static_cast<double>(space_size_before_compaction);
2098   }
2099   // Finish GC.
2100   // Get the references we need to enqueue.
2101   SelfDeletingTask* clear = reference_processor_->CollectClearedReferences(self);
2102   GrowForUtilization(semi_space_collector_);
2103   LogGC(kGcCauseHomogeneousSpaceCompact, collector);
2104   FinishGC(self, collector::kGcTypeFull);
2105   // Enqueue any references after losing the GC locks.
2106   clear->Run(self);
2107   clear->Finalize();
2108   {
2109     ScopedObjectAccess soa(self);
2110     soa.Vm()->UnloadNativeLibraries();
2111   }
2112   return HomogeneousSpaceCompactResult::kSuccess;
2113 }
2114 
TransitionCollector(CollectorType collector_type)2115 void Heap::TransitionCollector(CollectorType collector_type) {
2116   if (collector_type == collector_type_) {
2117     return;
2118   }
2119   // Collector transition must not happen with CC
2120   CHECK(!kUseReadBarrier);
2121   VLOG(heap) << "TransitionCollector: " << static_cast<int>(collector_type_)
2122              << " -> " << static_cast<int>(collector_type);
2123   uint64_t start_time = NanoTime();
2124   uint32_t before_allocated = num_bytes_allocated_.load(std::memory_order_relaxed);
2125   Runtime* const runtime = Runtime::Current();
2126   Thread* const self = Thread::Current();
2127   ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
2128   // TODO: Clang prebuilt for r316199 produces bogus thread safety analysis warning for holding both
2129   // exclusive and shared lock in the same scope. Remove the assertion as a temporary workaround.
2130   // http://b/71769596
2131   // Locks::mutator_lock_->AssertNotHeld(self);
2132   // Busy wait until we can GC (StartGC can fail if we have a non-zero
2133   // compacting_gc_disable_count_, this should rarely occurs).
2134   for (;;) {
2135     {
2136       ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete);
2137       MutexLock mu(self, *gc_complete_lock_);
2138       // Ensure there is only one GC at a time.
2139       WaitForGcToCompleteLocked(kGcCauseCollectorTransition, self);
2140       // Currently we only need a heap transition if we switch from a moving collector to a
2141       // non-moving one, or visa versa.
2142       const bool copying_transition = IsMovingGc(collector_type_) != IsMovingGc(collector_type);
2143       // If someone else beat us to it and changed the collector before we could, exit.
2144       // This is safe to do before the suspend all since we set the collector_type_running_ before
2145       // we exit the loop. If another thread attempts to do the heap transition before we exit,
2146       // then it would get blocked on WaitForGcToCompleteLocked.
2147       if (collector_type == collector_type_) {
2148         return;
2149       }
2150       // GC can be disabled if someone has a used GetPrimitiveArrayCritical but not yet released.
2151       if (!copying_transition || disable_moving_gc_count_ == 0) {
2152         // TODO: Not hard code in semi-space collector?
2153         collector_type_running_ = copying_transition ? kCollectorTypeSS : collector_type;
2154         break;
2155       }
2156     }
2157     usleep(1000);
2158   }
2159   if (runtime->IsShuttingDown(self)) {
2160     // Don't allow heap transitions to happen if the runtime is shutting down since these can
2161     // cause objects to get finalized.
2162     FinishGC(self, collector::kGcTypeNone);
2163     return;
2164   }
2165   collector::GarbageCollector* collector = nullptr;
2166   {
2167     ScopedSuspendAll ssa(__FUNCTION__);
2168     switch (collector_type) {
2169       case kCollectorTypeSS: {
2170         if (!IsMovingGc(collector_type_)) {
2171           // Create the bump pointer space from the backup space.
2172           CHECK(main_space_backup_ != nullptr);
2173           MemMap mem_map = main_space_backup_->ReleaseMemMap();
2174           // We are transitioning from non moving GC -> moving GC, since we copied from the bump
2175           // pointer space last transition it will be protected.
2176           CHECK(mem_map.IsValid());
2177           mem_map.Protect(PROT_READ | PROT_WRITE);
2178           bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space",
2179                                                                           std::move(mem_map));
2180           AddSpace(bump_pointer_space_);
2181           collector = Compact(bump_pointer_space_, main_space_, kGcCauseCollectorTransition);
2182           // Use the now empty main space mem map for the bump pointer temp space.
2183           mem_map = main_space_->ReleaseMemMap();
2184           // Unset the pointers just in case.
2185           if (dlmalloc_space_ == main_space_) {
2186             dlmalloc_space_ = nullptr;
2187           } else if (rosalloc_space_ == main_space_) {
2188             rosalloc_space_ = nullptr;
2189           }
2190           // Remove the main space so that we don't try to trim it, this doens't work for debug
2191           // builds since RosAlloc attempts to read the magic number from a protected page.
2192           RemoveSpace(main_space_);
2193           RemoveRememberedSet(main_space_);
2194           delete main_space_;  // Delete the space since it has been removed.
2195           main_space_ = nullptr;
2196           RemoveRememberedSet(main_space_backup_.get());
2197           main_space_backup_.reset(nullptr);  // Deletes the space.
2198           temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
2199                                                                   std::move(mem_map));
2200           AddSpace(temp_space_);
2201         }
2202         break;
2203       }
2204       case kCollectorTypeMS:
2205         // Fall through.
2206       case kCollectorTypeCMS: {
2207         if (IsMovingGc(collector_type_)) {
2208           CHECK(temp_space_ != nullptr);
2209           MemMap mem_map = temp_space_->ReleaseMemMap();
2210           RemoveSpace(temp_space_);
2211           temp_space_ = nullptr;
2212           mem_map.Protect(PROT_READ | PROT_WRITE);
2213           CreateMainMallocSpace(std::move(mem_map),
2214                                 kDefaultInitialSize,
2215                                 std::min(mem_map.Size(), growth_limit_),
2216                                 mem_map.Size());
2217           // Compact to the main space from the bump pointer space, don't need to swap semispaces.
2218           AddSpace(main_space_);
2219           collector = Compact(main_space_, bump_pointer_space_, kGcCauseCollectorTransition);
2220           mem_map = bump_pointer_space_->ReleaseMemMap();
2221           RemoveSpace(bump_pointer_space_);
2222           bump_pointer_space_ = nullptr;
2223           const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
2224           // Temporarily unprotect the backup mem map so rosalloc can write the debug magic number.
2225           if (kIsDebugBuild && kUseRosAlloc) {
2226             mem_map.Protect(PROT_READ | PROT_WRITE);
2227           }
2228           main_space_backup_.reset(CreateMallocSpaceFromMemMap(
2229               std::move(mem_map),
2230               kDefaultInitialSize,
2231               std::min(mem_map.Size(), growth_limit_),
2232               mem_map.Size(),
2233               name,
2234               true));
2235           if (kIsDebugBuild && kUseRosAlloc) {
2236             main_space_backup_->GetMemMap()->Protect(PROT_NONE);
2237           }
2238         }
2239         break;
2240       }
2241       default: {
2242         LOG(FATAL) << "Attempted to transition to invalid collector type "
2243                    << static_cast<size_t>(collector_type);
2244         UNREACHABLE();
2245       }
2246     }
2247     ChangeCollector(collector_type);
2248   }
2249   // Can't call into java code with all threads suspended or the GC ongoing.
2250   SelfDeletingTask* clear = reference_processor_->CollectClearedReferences(self);
2251   uint64_t duration = NanoTime() - start_time;
2252   GrowForUtilization(semi_space_collector_);
2253   DCHECK(collector != nullptr);
2254   LogGC(kGcCauseCollectorTransition, collector);
2255   FinishGC(self, collector::kGcTypeFull);
2256   // Now call into java and enqueue the references.
2257   clear->Run(self);
2258   clear->Finalize();
2259   {
2260     ScopedObjectAccess soa(self);
2261     soa.Vm()->UnloadNativeLibraries();
2262   }
2263   int32_t after_allocated = num_bytes_allocated_.load(std::memory_order_relaxed);
2264   int32_t delta_allocated = before_allocated - after_allocated;
2265   std::string saved_str;
2266   if (delta_allocated >= 0) {
2267     saved_str = " saved at least " + PrettySize(delta_allocated);
2268   } else {
2269     saved_str = " expanded " + PrettySize(-delta_allocated);
2270   }
2271   VLOG(heap) << "Collector transition to " << collector_type << " took "
2272              << PrettyDuration(duration) << saved_str;
2273 }
2274 
ChangeCollector(CollectorType collector_type)2275 void Heap::ChangeCollector(CollectorType collector_type) {
2276   // TODO: Only do this with all mutators suspended to avoid races.
2277   if (collector_type != collector_type_) {
2278     collector_type_ = collector_type;
2279     gc_plan_.clear();
2280     switch (collector_type_) {
2281       case kCollectorTypeCC: {
2282         if (use_generational_cc_) {
2283           gc_plan_.push_back(collector::kGcTypeSticky);
2284         }
2285         gc_plan_.push_back(collector::kGcTypeFull);
2286         if (use_tlab_) {
2287           ChangeAllocator(kAllocatorTypeRegionTLAB);
2288         } else {
2289           ChangeAllocator(kAllocatorTypeRegion);
2290         }
2291         break;
2292       }
2293       case kCollectorTypeSS:  // Fall-through.
2294       case kCollectorTypeGSS: {
2295         gc_plan_.push_back(collector::kGcTypeFull);
2296         if (use_tlab_) {
2297           ChangeAllocator(kAllocatorTypeTLAB);
2298         } else {
2299           ChangeAllocator(kAllocatorTypeBumpPointer);
2300         }
2301         break;
2302       }
2303       case kCollectorTypeMS: {
2304         gc_plan_.push_back(collector::kGcTypeSticky);
2305         gc_plan_.push_back(collector::kGcTypePartial);
2306         gc_plan_.push_back(collector::kGcTypeFull);
2307         ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc);
2308         break;
2309       }
2310       case kCollectorTypeCMS: {
2311         gc_plan_.push_back(collector::kGcTypeSticky);
2312         gc_plan_.push_back(collector::kGcTypePartial);
2313         gc_plan_.push_back(collector::kGcTypeFull);
2314         ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc);
2315         break;
2316       }
2317       default: {
2318         UNIMPLEMENTED(FATAL);
2319         UNREACHABLE();
2320       }
2321     }
2322     if (IsGcConcurrent()) {
2323       concurrent_start_bytes_ =
2324           UnsignedDifference(target_footprint_.load(std::memory_order_relaxed),
2325                              kMinConcurrentRemainingBytes);
2326     } else {
2327       concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
2328     }
2329   }
2330 }
2331 
2332 // Special compacting collector which uses sub-optimal bin packing to reduce zygote space size.
2333 class ZygoteCompactingCollector final : public collector::SemiSpace {
2334  public:
ZygoteCompactingCollector(gc::Heap * heap,bool is_running_on_memory_tool)2335   ZygoteCompactingCollector(gc::Heap* heap, bool is_running_on_memory_tool)
2336       : SemiSpace(heap, false, "zygote collector"),
2337         bin_live_bitmap_(nullptr),
2338         bin_mark_bitmap_(nullptr),
2339         is_running_on_memory_tool_(is_running_on_memory_tool) {}
2340 
BuildBins(space::ContinuousSpace * space)2341   void BuildBins(space::ContinuousSpace* space) REQUIRES_SHARED(Locks::mutator_lock_) {
2342     bin_live_bitmap_ = space->GetLiveBitmap();
2343     bin_mark_bitmap_ = space->GetMarkBitmap();
2344     uintptr_t prev = reinterpret_cast<uintptr_t>(space->Begin());
2345     WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
2346     // Note: This requires traversing the space in increasing order of object addresses.
2347     auto visitor = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
2348       uintptr_t object_addr = reinterpret_cast<uintptr_t>(obj);
2349       size_t bin_size = object_addr - prev;
2350       // Add the bin consisting of the end of the previous object to the start of the current object.
2351       AddBin(bin_size, prev);
2352       prev = object_addr + RoundUp(obj->SizeOf<kDefaultVerifyFlags>(), kObjectAlignment);
2353     };
2354     bin_live_bitmap_->Walk(visitor);
2355     // Add the last bin which spans after the last object to the end of the space.
2356     AddBin(reinterpret_cast<uintptr_t>(space->End()) - prev, prev);
2357   }
2358 
2359  private:
2360   // Maps from bin sizes to locations.
2361   std::multimap<size_t, uintptr_t> bins_;
2362   // Live bitmap of the space which contains the bins.
2363   accounting::ContinuousSpaceBitmap* bin_live_bitmap_;
2364   // Mark bitmap of the space which contains the bins.
2365   accounting::ContinuousSpaceBitmap* bin_mark_bitmap_;
2366   const bool is_running_on_memory_tool_;
2367 
AddBin(size_t size,uintptr_t position)2368   void AddBin(size_t size, uintptr_t position) {
2369     if (is_running_on_memory_tool_) {
2370       MEMORY_TOOL_MAKE_DEFINED(reinterpret_cast<void*>(position), size);
2371     }
2372     if (size != 0) {
2373       bins_.insert(std::make_pair(size, position));
2374     }
2375   }
2376 
ShouldSweepSpace(space::ContinuousSpace * space ATTRIBUTE_UNUSED) const2377   bool ShouldSweepSpace(space::ContinuousSpace* space ATTRIBUTE_UNUSED) const override {
2378     // Don't sweep any spaces since we probably blasted the internal accounting of the free list
2379     // allocator.
2380     return false;
2381   }
2382 
MarkNonForwardedObject(mirror::Object * obj)2383   mirror::Object* MarkNonForwardedObject(mirror::Object* obj) override
2384       REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
2385     size_t obj_size = obj->SizeOf<kDefaultVerifyFlags>();
2386     size_t alloc_size = RoundUp(obj_size, kObjectAlignment);
2387     mirror::Object* forward_address;
2388     // Find the smallest bin which we can move obj in.
2389     auto it = bins_.lower_bound(alloc_size);
2390     if (it == bins_.end()) {
2391       // No available space in the bins, place it in the target space instead (grows the zygote
2392       // space).
2393       size_t bytes_allocated, dummy;
2394       forward_address = to_space_->Alloc(self_, alloc_size, &bytes_allocated, nullptr, &dummy);
2395       if (to_space_live_bitmap_ != nullptr) {
2396         to_space_live_bitmap_->Set(forward_address);
2397       } else {
2398         GetHeap()->GetNonMovingSpace()->GetLiveBitmap()->Set(forward_address);
2399         GetHeap()->GetNonMovingSpace()->GetMarkBitmap()->Set(forward_address);
2400       }
2401     } else {
2402       size_t size = it->first;
2403       uintptr_t pos = it->second;
2404       bins_.erase(it);  // Erase the old bin which we replace with the new smaller bin.
2405       forward_address = reinterpret_cast<mirror::Object*>(pos);
2406       // Set the live and mark bits so that sweeping system weaks works properly.
2407       bin_live_bitmap_->Set(forward_address);
2408       bin_mark_bitmap_->Set(forward_address);
2409       DCHECK_GE(size, alloc_size);
2410       // Add a new bin with the remaining space.
2411       AddBin(size - alloc_size, pos + alloc_size);
2412     }
2413     // Copy the object over to its new location.
2414     // Historical note: We did not use `alloc_size` to avoid a Valgrind error.
2415     memcpy(reinterpret_cast<void*>(forward_address), obj, obj_size);
2416     if (kUseBakerReadBarrier) {
2417       obj->AssertReadBarrierState();
2418       forward_address->AssertReadBarrierState();
2419     }
2420     return forward_address;
2421   }
2422 };
2423 
UnBindBitmaps()2424 void Heap::UnBindBitmaps() {
2425   TimingLogger::ScopedTiming t("UnBindBitmaps", GetCurrentGcIteration()->GetTimings());
2426   for (const auto& space : GetContinuousSpaces()) {
2427     if (space->IsContinuousMemMapAllocSpace()) {
2428       space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
2429       if (alloc_space->HasBoundBitmaps()) {
2430         alloc_space->UnBindBitmaps();
2431       }
2432     }
2433   }
2434 }
2435 
PreZygoteFork()2436 void Heap::PreZygoteFork() {
2437   if (!HasZygoteSpace()) {
2438     // We still want to GC in case there is some unreachable non moving objects that could cause a
2439     // suboptimal bin packing when we compact the zygote space.
2440     CollectGarbageInternal(collector::kGcTypeFull, kGcCauseBackground, false);
2441     // Trim the pages at the end of the non moving space. Trim while not holding zygote lock since
2442     // the trim process may require locking the mutator lock.
2443     non_moving_space_->Trim();
2444   }
2445   Thread* self = Thread::Current();
2446   MutexLock mu(self, zygote_creation_lock_);
2447   // Try to see if we have any Zygote spaces.
2448   if (HasZygoteSpace()) {
2449     return;
2450   }
2451   Runtime::Current()->GetInternTable()->AddNewTable();
2452   Runtime::Current()->GetClassLinker()->MoveClassTableToPreZygote();
2453   VLOG(heap) << "Starting PreZygoteFork";
2454   // The end of the non-moving space may be protected, unprotect it so that we can copy the zygote
2455   // there.
2456   non_moving_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2457   const bool same_space = non_moving_space_ == main_space_;
2458   if (kCompactZygote) {
2459     // Temporarily disable rosalloc verification because the zygote
2460     // compaction will mess up the rosalloc internal metadata.
2461     ScopedDisableRosAllocVerification disable_rosalloc_verif(this);
2462     ZygoteCompactingCollector zygote_collector(this, is_running_on_memory_tool_);
2463     zygote_collector.BuildBins(non_moving_space_);
2464     // Create a new bump pointer space which we will compact into.
2465     space::BumpPointerSpace target_space("zygote bump space", non_moving_space_->End(),
2466                                          non_moving_space_->Limit());
2467     // Compact the bump pointer space to a new zygote bump pointer space.
2468     bool reset_main_space = false;
2469     if (IsMovingGc(collector_type_)) {
2470       if (collector_type_ == kCollectorTypeCC) {
2471         zygote_collector.SetFromSpace(region_space_);
2472       } else {
2473         zygote_collector.SetFromSpace(bump_pointer_space_);
2474       }
2475     } else {
2476       CHECK(main_space_ != nullptr);
2477       CHECK_NE(main_space_, non_moving_space_)
2478           << "Does not make sense to compact within the same space";
2479       // Copy from the main space.
2480       zygote_collector.SetFromSpace(main_space_);
2481       reset_main_space = true;
2482     }
2483     zygote_collector.SetToSpace(&target_space);
2484     zygote_collector.SetSwapSemiSpaces(false);
2485     zygote_collector.Run(kGcCauseCollectorTransition, false);
2486     if (reset_main_space) {
2487       main_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2488       madvise(main_space_->Begin(), main_space_->Capacity(), MADV_DONTNEED);
2489       MemMap mem_map = main_space_->ReleaseMemMap();
2490       RemoveSpace(main_space_);
2491       space::Space* old_main_space = main_space_;
2492       CreateMainMallocSpace(std::move(mem_map),
2493                             kDefaultInitialSize,
2494                             std::min(mem_map.Size(), growth_limit_),
2495                             mem_map.Size());
2496       delete old_main_space;
2497       AddSpace(main_space_);
2498     } else {
2499       if (collector_type_ == kCollectorTypeCC) {
2500         region_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2501         // Evacuated everything out of the region space, clear the mark bitmap.
2502         region_space_->GetMarkBitmap()->Clear();
2503       } else {
2504         bump_pointer_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2505       }
2506     }
2507     if (temp_space_ != nullptr) {
2508       CHECK(temp_space_->IsEmpty());
2509     }
2510     total_objects_freed_ever_ += GetCurrentGcIteration()->GetFreedObjects();
2511     total_bytes_freed_ever_ += GetCurrentGcIteration()->GetFreedBytes();
2512     // Update the end and write out image.
2513     non_moving_space_->SetEnd(target_space.End());
2514     non_moving_space_->SetLimit(target_space.Limit());
2515     VLOG(heap) << "Create zygote space with size=" << non_moving_space_->Size() << " bytes";
2516   }
2517   // Change the collector to the post zygote one.
2518   ChangeCollector(foreground_collector_type_);
2519   // Save the old space so that we can remove it after we complete creating the zygote space.
2520   space::MallocSpace* old_alloc_space = non_moving_space_;
2521   // Turn the current alloc space into a zygote space and obtain the new alloc space composed of
2522   // the remaining available space.
2523   // Remove the old space before creating the zygote space since creating the zygote space sets
2524   // the old alloc space's bitmaps to null.
2525   RemoveSpace(old_alloc_space);
2526   if (collector::SemiSpace::kUseRememberedSet) {
2527     // Sanity bound check.
2528     FindRememberedSetFromSpace(old_alloc_space)->AssertAllDirtyCardsAreWithinSpace();
2529     // Remove the remembered set for the now zygote space (the old
2530     // non-moving space). Note now that we have compacted objects into
2531     // the zygote space, the data in the remembered set is no longer
2532     // needed. The zygote space will instead have a mod-union table
2533     // from this point on.
2534     RemoveRememberedSet(old_alloc_space);
2535   }
2536   // Remaining space becomes the new non moving space.
2537   zygote_space_ = old_alloc_space->CreateZygoteSpace(kNonMovingSpaceName, low_memory_mode_,
2538                                                      &non_moving_space_);
2539   CHECK(!non_moving_space_->CanMoveObjects());
2540   if (same_space) {
2541     main_space_ = non_moving_space_;
2542     SetSpaceAsDefault(main_space_);
2543   }
2544   delete old_alloc_space;
2545   CHECK(HasZygoteSpace()) << "Failed creating zygote space";
2546   AddSpace(zygote_space_);
2547   non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
2548   AddSpace(non_moving_space_);
2549   if (kUseBakerReadBarrier && gc::collector::ConcurrentCopying::kGrayDirtyImmuneObjects) {
2550     // Treat all of the objects in the zygote as marked to avoid unnecessary dirty pages. This is
2551     // safe since we mark all of the objects that may reference non immune objects as gray.
2552     zygote_space_->GetLiveBitmap()->VisitMarkedRange(
2553         reinterpret_cast<uintptr_t>(zygote_space_->Begin()),
2554         reinterpret_cast<uintptr_t>(zygote_space_->Limit()),
2555         [](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
2556       CHECK(obj->AtomicSetMarkBit(0, 1));
2557     });
2558   }
2559 
2560   // Create the zygote space mod union table.
2561   accounting::ModUnionTable* mod_union_table =
2562       new accounting::ModUnionTableCardCache("zygote space mod-union table", this, zygote_space_);
2563   CHECK(mod_union_table != nullptr) << "Failed to create zygote space mod-union table";
2564 
2565   if (collector_type_ != kCollectorTypeCC) {
2566     // Set all the cards in the mod-union table since we don't know which objects contain references
2567     // to large objects.
2568     mod_union_table->SetCards();
2569   } else {
2570     // Make sure to clear the zygote space cards so that we don't dirty pages in the next GC. There
2571     // may be dirty cards from the zygote compaction or reference processing. These cards are not
2572     // necessary to have marked since the zygote space may not refer to any objects not in the
2573     // zygote or image spaces at this point.
2574     mod_union_table->ProcessCards();
2575     mod_union_table->ClearTable();
2576 
2577     // For CC we never collect zygote large objects. This means we do not need to set the cards for
2578     // the zygote mod-union table and we can also clear all of the existing image mod-union tables.
2579     // The existing mod-union tables are only for image spaces and may only reference zygote and
2580     // image objects.
2581     for (auto& pair : mod_union_tables_) {
2582       CHECK(pair.first->IsImageSpace());
2583       CHECK(!pair.first->AsImageSpace()->GetImageHeader().IsAppImage());
2584       accounting::ModUnionTable* table = pair.second;
2585       table->ClearTable();
2586     }
2587   }
2588   AddModUnionTable(mod_union_table);
2589   large_object_space_->SetAllLargeObjectsAsZygoteObjects(self);
2590   if (collector::SemiSpace::kUseRememberedSet) {
2591     // Add a new remembered set for the post-zygote non-moving space.
2592     accounting::RememberedSet* post_zygote_non_moving_space_rem_set =
2593         new accounting::RememberedSet("Post-zygote non-moving space remembered set", this,
2594                                       non_moving_space_);
2595     CHECK(post_zygote_non_moving_space_rem_set != nullptr)
2596         << "Failed to create post-zygote non-moving space remembered set";
2597     AddRememberedSet(post_zygote_non_moving_space_rem_set);
2598   }
2599 }
2600 
FlushAllocStack()2601 void Heap::FlushAllocStack() {
2602   MarkAllocStackAsLive(allocation_stack_.get());
2603   allocation_stack_->Reset();
2604 }
2605 
MarkAllocStack(accounting::ContinuousSpaceBitmap * bitmap1,accounting::ContinuousSpaceBitmap * bitmap2,accounting::LargeObjectBitmap * large_objects,accounting::ObjectStack * stack)2606 void Heap::MarkAllocStack(accounting::ContinuousSpaceBitmap* bitmap1,
2607                           accounting::ContinuousSpaceBitmap* bitmap2,
2608                           accounting::LargeObjectBitmap* large_objects,
2609                           accounting::ObjectStack* stack) {
2610   DCHECK(bitmap1 != nullptr);
2611   DCHECK(bitmap2 != nullptr);
2612   const auto* limit = stack->End();
2613   for (auto* it = stack->Begin(); it != limit; ++it) {
2614     const mirror::Object* obj = it->AsMirrorPtr();
2615     if (!kUseThreadLocalAllocationStack || obj != nullptr) {
2616       if (bitmap1->HasAddress(obj)) {
2617         bitmap1->Set(obj);
2618       } else if (bitmap2->HasAddress(obj)) {
2619         bitmap2->Set(obj);
2620       } else {
2621         DCHECK(large_objects != nullptr);
2622         large_objects->Set(obj);
2623       }
2624     }
2625   }
2626 }
2627 
SwapSemiSpaces()2628 void Heap::SwapSemiSpaces() {
2629   CHECK(bump_pointer_space_ != nullptr);
2630   CHECK(temp_space_ != nullptr);
2631   std::swap(bump_pointer_space_, temp_space_);
2632 }
2633 
Compact(space::ContinuousMemMapAllocSpace * target_space,space::ContinuousMemMapAllocSpace * source_space,GcCause gc_cause)2634 collector::GarbageCollector* Heap::Compact(space::ContinuousMemMapAllocSpace* target_space,
2635                                            space::ContinuousMemMapAllocSpace* source_space,
2636                                            GcCause gc_cause) {
2637   CHECK(kMovingCollector);
2638   if (target_space != source_space) {
2639     // Don't swap spaces since this isn't a typical semi space collection.
2640     semi_space_collector_->SetSwapSemiSpaces(false);
2641     semi_space_collector_->SetFromSpace(source_space);
2642     semi_space_collector_->SetToSpace(target_space);
2643     semi_space_collector_->Run(gc_cause, false);
2644     return semi_space_collector_;
2645   }
2646   LOG(FATAL) << "Unsupported";
2647   UNREACHABLE();
2648 }
2649 
TraceHeapSize(size_t heap_size)2650 void Heap::TraceHeapSize(size_t heap_size) {
2651   ATraceIntegerValue("Heap size (KB)", heap_size / KB);
2652 }
2653 
GetNativeBytes()2654 size_t Heap::GetNativeBytes() {
2655   size_t malloc_bytes;
2656 #if defined(__BIONIC__) || defined(__GLIBC__)
2657   size_t mmapped_bytes;
2658   struct mallinfo mi = mallinfo();
2659   // In spite of the documentation, the jemalloc version of this call seems to do what we want,
2660   // and it is thread-safe.
2661   if (sizeof(size_t) > sizeof(mi.uordblks) && sizeof(size_t) > sizeof(mi.hblkhd)) {
2662     // Shouldn't happen, but glibc declares uordblks as int.
2663     // Avoiding sign extension gets us correct behavior for another 2 GB.
2664     malloc_bytes = (unsigned int)mi.uordblks;
2665     mmapped_bytes = (unsigned int)mi.hblkhd;
2666   } else {
2667     malloc_bytes = mi.uordblks;
2668     mmapped_bytes = mi.hblkhd;
2669   }
2670   // From the spec, we clearly have mmapped_bytes <= malloc_bytes. Reality is sometimes
2671   // dramatically different. (b/119580449) If so, fudge it.
2672   if (mmapped_bytes > malloc_bytes) {
2673     malloc_bytes = mmapped_bytes;
2674   }
2675 #else
2676   // We should hit this case only in contexts in which GC triggering is not critical. Effectively
2677   // disable GC triggering based on malloc().
2678   malloc_bytes = 1000;
2679 #endif
2680   return malloc_bytes + native_bytes_registered_.load(std::memory_order_relaxed);
2681   // An alternative would be to get RSS from /proc/self/statm. Empirically, that's no
2682   // more expensive, and it would allow us to count memory allocated by means other than malloc.
2683   // However it would change as pages are unmapped and remapped due to memory pressure, among
2684   // other things. It seems risky to trigger GCs as a result of such changes.
2685 }
2686 
CollectGarbageInternal(collector::GcType gc_type,GcCause gc_cause,bool clear_soft_references)2687 collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type,
2688                                                GcCause gc_cause,
2689                                                bool clear_soft_references) {
2690   Thread* self = Thread::Current();
2691   Runtime* runtime = Runtime::Current();
2692   // If the heap can't run the GC, silently fail and return that no GC was run.
2693   switch (gc_type) {
2694     case collector::kGcTypePartial: {
2695       if (!HasZygoteSpace()) {
2696         return collector::kGcTypeNone;
2697       }
2698       break;
2699     }
2700     default: {
2701       // Other GC types don't have any special cases which makes them not runnable. The main case
2702       // here is full GC.
2703     }
2704   }
2705   ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
2706   // TODO: Clang prebuilt for r316199 produces bogus thread safety analysis warning for holding both
2707   // exclusive and shared lock in the same scope. Remove the assertion as a temporary workaround.
2708   // http://b/71769596
2709   // Locks::mutator_lock_->AssertNotHeld(self);
2710   if (self->IsHandlingStackOverflow()) {
2711     // If we are throwing a stack overflow error we probably don't have enough remaining stack
2712     // space to run the GC.
2713     return collector::kGcTypeNone;
2714   }
2715   bool compacting_gc;
2716   {
2717     gc_complete_lock_->AssertNotHeld(self);
2718     ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete);
2719     MutexLock mu(self, *gc_complete_lock_);
2720     // Ensure there is only one GC at a time.
2721     WaitForGcToCompleteLocked(gc_cause, self);
2722     compacting_gc = IsMovingGc(collector_type_);
2723     // GC can be disabled if someone has a used GetPrimitiveArrayCritical.
2724     if (compacting_gc && disable_moving_gc_count_ != 0) {
2725       LOG(WARNING) << "Skipping GC due to disable moving GC count " << disable_moving_gc_count_;
2726       return collector::kGcTypeNone;
2727     }
2728     if (gc_disabled_for_shutdown_) {
2729       return collector::kGcTypeNone;
2730     }
2731     collector_type_running_ = collector_type_;
2732   }
2733   if (gc_cause == kGcCauseForAlloc && runtime->HasStatsEnabled()) {
2734     ++runtime->GetStats()->gc_for_alloc_count;
2735     ++self->GetStats()->gc_for_alloc_count;
2736   }
2737   const size_t bytes_allocated_before_gc = GetBytesAllocated();
2738 
2739   DCHECK_LT(gc_type, collector::kGcTypeMax);
2740   DCHECK_NE(gc_type, collector::kGcTypeNone);
2741 
2742   collector::GarbageCollector* collector = nullptr;
2743   // TODO: Clean this up.
2744   if (compacting_gc) {
2745     DCHECK(current_allocator_ == kAllocatorTypeBumpPointer ||
2746            current_allocator_ == kAllocatorTypeTLAB ||
2747            current_allocator_ == kAllocatorTypeRegion ||
2748            current_allocator_ == kAllocatorTypeRegionTLAB);
2749     switch (collector_type_) {
2750       case kCollectorTypeSS:
2751         // Fall-through.
2752       case kCollectorTypeGSS:
2753         semi_space_collector_->SetFromSpace(bump_pointer_space_);
2754         semi_space_collector_->SetToSpace(temp_space_);
2755         semi_space_collector_->SetSwapSemiSpaces(true);
2756         collector = semi_space_collector_;
2757         break;
2758       case kCollectorTypeCC:
2759         if (use_generational_cc_) {
2760           // TODO: Other threads must do the flip checkpoint before they start poking at
2761           // active_concurrent_copying_collector_. So we should not concurrency here.
2762           active_concurrent_copying_collector_ = (gc_type == collector::kGcTypeSticky) ?
2763               young_concurrent_copying_collector_ : concurrent_copying_collector_;
2764           DCHECK(active_concurrent_copying_collector_->RegionSpace() == region_space_);
2765         }
2766         collector = active_concurrent_copying_collector_;
2767         break;
2768       default:
2769         LOG(FATAL) << "Invalid collector type " << static_cast<size_t>(collector_type_);
2770     }
2771     if (collector != active_concurrent_copying_collector_) {
2772       temp_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2773       if (kIsDebugBuild) {
2774         // Try to read each page of the memory map in case mprotect didn't work properly b/19894268.
2775         temp_space_->GetMemMap()->TryReadable();
2776       }
2777       CHECK(temp_space_->IsEmpty());
2778     }
2779     gc_type = collector::kGcTypeFull;  // TODO: Not hard code this in.
2780   } else if (current_allocator_ == kAllocatorTypeRosAlloc ||
2781       current_allocator_ == kAllocatorTypeDlMalloc) {
2782     collector = FindCollectorByGcType(gc_type);
2783   } else {
2784     LOG(FATAL) << "Invalid current allocator " << current_allocator_;
2785   }
2786 
2787   CHECK(collector != nullptr)
2788       << "Could not find garbage collector with collector_type="
2789       << static_cast<size_t>(collector_type_) << " and gc_type=" << gc_type;
2790   collector->Run(gc_cause, clear_soft_references || runtime->IsZygote());
2791   total_objects_freed_ever_ += GetCurrentGcIteration()->GetFreedObjects();
2792   total_bytes_freed_ever_ += GetCurrentGcIteration()->GetFreedBytes();
2793   RequestTrim(self);
2794   // Collect cleared references.
2795   SelfDeletingTask* clear = reference_processor_->CollectClearedReferences(self);
2796   // Grow the heap so that we know when to perform the next GC.
2797   GrowForUtilization(collector, bytes_allocated_before_gc);
2798   LogGC(gc_cause, collector);
2799   FinishGC(self, gc_type);
2800   // Actually enqueue all cleared references. Do this after the GC has officially finished since
2801   // otherwise we can deadlock.
2802   clear->Run(self);
2803   clear->Finalize();
2804   // Inform DDMS that a GC completed.
2805   Dbg::GcDidFinish();
2806 
2807   old_native_bytes_allocated_.store(GetNativeBytes());
2808 
2809   // Unload native libraries for class unloading. We do this after calling FinishGC to prevent
2810   // deadlocks in case the JNI_OnUnload function does allocations.
2811   {
2812     ScopedObjectAccess soa(self);
2813     soa.Vm()->UnloadNativeLibraries();
2814   }
2815   return gc_type;
2816 }
2817 
LogGC(GcCause gc_cause,collector::GarbageCollector * collector)2818 void Heap::LogGC(GcCause gc_cause, collector::GarbageCollector* collector) {
2819   const size_t duration = GetCurrentGcIteration()->GetDurationNs();
2820   const std::vector<uint64_t>& pause_times = GetCurrentGcIteration()->GetPauseTimes();
2821   // Print the GC if it is an explicit GC (e.g. Runtime.gc()) or a slow GC
2822   // (mutator time blocked >= long_pause_log_threshold_).
2823   bool log_gc = kLogAllGCs || gc_cause == kGcCauseExplicit;
2824   if (!log_gc && CareAboutPauseTimes()) {
2825     // GC for alloc pauses the allocating thread, so consider it as a pause.
2826     log_gc = duration > long_gc_log_threshold_ ||
2827         (gc_cause == kGcCauseForAlloc && duration > long_pause_log_threshold_);
2828     for (uint64_t pause : pause_times) {
2829       log_gc = log_gc || pause >= long_pause_log_threshold_;
2830     }
2831   }
2832   if (log_gc) {
2833     const size_t percent_free = GetPercentFree();
2834     const size_t current_heap_size = GetBytesAllocated();
2835     const size_t total_memory = GetTotalMemory();
2836     std::ostringstream pause_string;
2837     for (size_t i = 0; i < pause_times.size(); ++i) {
2838       pause_string << PrettyDuration((pause_times[i] / 1000) * 1000)
2839                    << ((i != pause_times.size() - 1) ? "," : "");
2840     }
2841     LOG(INFO) << gc_cause << " " << collector->GetName()
2842               << " GC freed "  << current_gc_iteration_.GetFreedObjects() << "("
2843               << PrettySize(current_gc_iteration_.GetFreedBytes()) << ") AllocSpace objects, "
2844               << current_gc_iteration_.GetFreedLargeObjects() << "("
2845               << PrettySize(current_gc_iteration_.GetFreedLargeObjectBytes()) << ") LOS objects, "
2846               << percent_free << "% free, " << PrettySize(current_heap_size) << "/"
2847               << PrettySize(total_memory) << ", " << "paused " << pause_string.str()
2848               << " total " << PrettyDuration((duration / 1000) * 1000);
2849     VLOG(heap) << Dumpable<TimingLogger>(*current_gc_iteration_.GetTimings());
2850   }
2851 }
2852 
FinishGC(Thread * self,collector::GcType gc_type)2853 void Heap::FinishGC(Thread* self, collector::GcType gc_type) {
2854   MutexLock mu(self, *gc_complete_lock_);
2855   collector_type_running_ = kCollectorTypeNone;
2856   if (gc_type != collector::kGcTypeNone) {
2857     last_gc_type_ = gc_type;
2858 
2859     // Update stats.
2860     ++gc_count_last_window_;
2861     if (running_collection_is_blocking_) {
2862       // If the currently running collection was a blocking one,
2863       // increment the counters and reset the flag.
2864       ++blocking_gc_count_;
2865       blocking_gc_time_ += GetCurrentGcIteration()->GetDurationNs();
2866       ++blocking_gc_count_last_window_;
2867     }
2868     // Update the gc count rate histograms if due.
2869     UpdateGcCountRateHistograms();
2870   }
2871   // Reset.
2872   running_collection_is_blocking_ = false;
2873   thread_running_gc_ = nullptr;
2874   // Wake anyone who may have been waiting for the GC to complete.
2875   gc_complete_cond_->Broadcast(self);
2876 }
2877 
UpdateGcCountRateHistograms()2878 void Heap::UpdateGcCountRateHistograms() {
2879   // Invariant: if the time since the last update includes more than
2880   // one windows, all the GC runs (if > 0) must have happened in first
2881   // window because otherwise the update must have already taken place
2882   // at an earlier GC run. So, we report the non-first windows with
2883   // zero counts to the histograms.
2884   DCHECK_EQ(last_update_time_gc_count_rate_histograms_ % kGcCountRateHistogramWindowDuration, 0U);
2885   uint64_t now = NanoTime();
2886   DCHECK_GE(now, last_update_time_gc_count_rate_histograms_);
2887   uint64_t time_since_last_update = now - last_update_time_gc_count_rate_histograms_;
2888   uint64_t num_of_windows = time_since_last_update / kGcCountRateHistogramWindowDuration;
2889 
2890   // The computed number of windows can be incoherently high if NanoTime() is not monotonic.
2891   // Setting a limit on its maximum value reduces the impact on CPU time in such cases.
2892   if (num_of_windows > kGcCountRateHistogramMaxNumMissedWindows) {
2893     LOG(WARNING) << "Reducing the number of considered missed Gc histogram windows from "
2894                  << num_of_windows << " to " << kGcCountRateHistogramMaxNumMissedWindows;
2895     num_of_windows = kGcCountRateHistogramMaxNumMissedWindows;
2896   }
2897 
2898   if (time_since_last_update >= kGcCountRateHistogramWindowDuration) {
2899     // Record the first window.
2900     gc_count_rate_histogram_.AddValue(gc_count_last_window_ - 1);  // Exclude the current run.
2901     blocking_gc_count_rate_histogram_.AddValue(running_collection_is_blocking_ ?
2902         blocking_gc_count_last_window_ - 1 : blocking_gc_count_last_window_);
2903     // Record the other windows (with zero counts).
2904     for (uint64_t i = 0; i < num_of_windows - 1; ++i) {
2905       gc_count_rate_histogram_.AddValue(0);
2906       blocking_gc_count_rate_histogram_.AddValue(0);
2907     }
2908     // Update the last update time and reset the counters.
2909     last_update_time_gc_count_rate_histograms_ =
2910         (now / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration;
2911     gc_count_last_window_ = 1;  // Include the current run.
2912     blocking_gc_count_last_window_ = running_collection_is_blocking_ ? 1 : 0;
2913   }
2914   DCHECK_EQ(last_update_time_gc_count_rate_histograms_ % kGcCountRateHistogramWindowDuration, 0U);
2915 }
2916 
2917 class RootMatchesObjectVisitor : public SingleRootVisitor {
2918  public:
RootMatchesObjectVisitor(const mirror::Object * obj)2919   explicit RootMatchesObjectVisitor(const mirror::Object* obj) : obj_(obj) { }
2920 
VisitRoot(mirror::Object * root,const RootInfo & info)2921   void VisitRoot(mirror::Object* root, const RootInfo& info)
2922       override REQUIRES_SHARED(Locks::mutator_lock_) {
2923     if (root == obj_) {
2924       LOG(INFO) << "Object " << obj_ << " is a root " << info.ToString();
2925     }
2926   }
2927 
2928  private:
2929   const mirror::Object* const obj_;
2930 };
2931 
2932 
2933 class ScanVisitor {
2934  public:
operator ()(const mirror::Object * obj) const2935   void operator()(const mirror::Object* obj) const {
2936     LOG(ERROR) << "Would have rescanned object " << obj;
2937   }
2938 };
2939 
2940 // Verify a reference from an object.
2941 class VerifyReferenceVisitor : public SingleRootVisitor {
2942  public:
VerifyReferenceVisitor(Thread * self,Heap * heap,size_t * fail_count,bool verify_referent)2943   VerifyReferenceVisitor(Thread* self, Heap* heap, size_t* fail_count, bool verify_referent)
2944       REQUIRES_SHARED(Locks::mutator_lock_)
2945       : self_(self), heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {
2946     CHECK_EQ(self_, Thread::Current());
2947   }
2948 
operator ()(ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED,ObjPtr<mirror::Reference> ref) const2949   void operator()(ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED, ObjPtr<mirror::Reference> ref) const
2950       REQUIRES_SHARED(Locks::mutator_lock_) {
2951     if (verify_referent_) {
2952       VerifyReference(ref.Ptr(), ref->GetReferent(), mirror::Reference::ReferentOffset());
2953     }
2954   }
2955 
operator ()(ObjPtr<mirror::Object> obj,MemberOffset offset,bool is_static ATTRIBUTE_UNUSED) const2956   void operator()(ObjPtr<mirror::Object> obj,
2957                   MemberOffset offset,
2958                   bool is_static ATTRIBUTE_UNUSED) const
2959       REQUIRES_SHARED(Locks::mutator_lock_) {
2960     VerifyReference(obj.Ptr(), obj->GetFieldObject<mirror::Object>(offset), offset);
2961   }
2962 
IsLive(ObjPtr<mirror::Object> obj) const2963   bool IsLive(ObjPtr<mirror::Object> obj) const NO_THREAD_SAFETY_ANALYSIS {
2964     return heap_->IsLiveObjectLocked(obj, true, false, true);
2965   }
2966 
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root) const2967   void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
2968       REQUIRES_SHARED(Locks::mutator_lock_) {
2969     if (!root->IsNull()) {
2970       VisitRoot(root);
2971     }
2972   }
VisitRoot(mirror::CompressedReference<mirror::Object> * root) const2973   void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
2974       REQUIRES_SHARED(Locks::mutator_lock_) {
2975     const_cast<VerifyReferenceVisitor*>(this)->VisitRoot(
2976         root->AsMirrorPtr(), RootInfo(kRootVMInternal));
2977   }
2978 
VisitRoot(mirror::Object * root,const RootInfo & root_info)2979   void VisitRoot(mirror::Object* root, const RootInfo& root_info) override
2980       REQUIRES_SHARED(Locks::mutator_lock_) {
2981     if (root == nullptr) {
2982       LOG(ERROR) << "Root is null with info " << root_info.GetType();
2983     } else if (!VerifyReference(nullptr, root, MemberOffset(0))) {
2984       LOG(ERROR) << "Root " << root << " is dead with type " << mirror::Object::PrettyTypeOf(root)
2985           << " thread_id= " << root_info.GetThreadId() << " root_type= " << root_info.GetType();
2986     }
2987   }
2988 
2989  private:
2990   // TODO: Fix the no thread safety analysis.
2991   // Returns false on failure.
VerifyReference(mirror::Object * obj,mirror::Object * ref,MemberOffset offset) const2992   bool VerifyReference(mirror::Object* obj, mirror::Object* ref, MemberOffset offset) const
2993       NO_THREAD_SAFETY_ANALYSIS {
2994     if (ref == nullptr || IsLive(ref)) {
2995       // Verify that the reference is live.
2996       return true;
2997     }
2998     CHECK_EQ(self_, Thread::Current());  // fail_count_ is private to the calling thread.
2999     *fail_count_ += 1;
3000     if (*fail_count_ == 1) {
3001       // Only print message for the first failure to prevent spam.
3002       LOG(ERROR) << "!!!!!!!!!!!!!!Heap corruption detected!!!!!!!!!!!!!!!!!!!";
3003     }
3004     if (obj != nullptr) {
3005       // Only do this part for non roots.
3006       accounting::CardTable* card_table = heap_->GetCardTable();
3007       accounting::ObjectStack* alloc_stack = heap_->allocation_stack_.get();
3008       accounting::ObjectStack* live_stack = heap_->live_stack_.get();
3009       uint8_t* card_addr = card_table->CardFromAddr(obj);
3010       LOG(ERROR) << "Object " << obj << " references dead object " << ref << " at offset "
3011                  << offset << "\n card value = " << static_cast<int>(*card_addr);
3012       if (heap_->IsValidObjectAddress(obj->GetClass())) {
3013         LOG(ERROR) << "Obj type " << obj->PrettyTypeOf();
3014       } else {
3015         LOG(ERROR) << "Object " << obj << " class(" << obj->GetClass() << ") not a heap address";
3016       }
3017 
3018       // Attempt to find the class inside of the recently freed objects.
3019       space::ContinuousSpace* ref_space = heap_->FindContinuousSpaceFromObject(ref, true);
3020       if (ref_space != nullptr && ref_space->IsMallocSpace()) {
3021         space::MallocSpace* space = ref_space->AsMallocSpace();
3022         mirror::Class* ref_class = space->FindRecentFreedObject(ref);
3023         if (ref_class != nullptr) {
3024           LOG(ERROR) << "Reference " << ref << " found as a recently freed object with class "
3025                      << ref_class->PrettyClass();
3026         } else {
3027           LOG(ERROR) << "Reference " << ref << " not found as a recently freed object";
3028         }
3029       }
3030 
3031       if (ref->GetClass() != nullptr && heap_->IsValidObjectAddress(ref->GetClass()) &&
3032           ref->GetClass()->IsClass()) {
3033         LOG(ERROR) << "Ref type " << ref->PrettyTypeOf();
3034       } else {
3035         LOG(ERROR) << "Ref " << ref << " class(" << ref->GetClass()
3036                    << ") is not a valid heap address";
3037       }
3038 
3039       card_table->CheckAddrIsInCardTable(reinterpret_cast<const uint8_t*>(obj));
3040       void* cover_begin = card_table->AddrFromCard(card_addr);
3041       void* cover_end = reinterpret_cast<void*>(reinterpret_cast<size_t>(cover_begin) +
3042           accounting::CardTable::kCardSize);
3043       LOG(ERROR) << "Card " << reinterpret_cast<void*>(card_addr) << " covers " << cover_begin
3044           << "-" << cover_end;
3045       accounting::ContinuousSpaceBitmap* bitmap =
3046           heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(obj);
3047 
3048       if (bitmap == nullptr) {
3049         LOG(ERROR) << "Object " << obj << " has no bitmap";
3050         if (!VerifyClassClass(obj->GetClass())) {
3051           LOG(ERROR) << "Object " << obj << " failed class verification!";
3052         }
3053       } else {
3054         // Print out how the object is live.
3055         if (bitmap->Test(obj)) {
3056           LOG(ERROR) << "Object " << obj << " found in live bitmap";
3057         }
3058         if (alloc_stack->Contains(const_cast<mirror::Object*>(obj))) {
3059           LOG(ERROR) << "Object " << obj << " found in allocation stack";
3060         }
3061         if (live_stack->Contains(const_cast<mirror::Object*>(obj))) {
3062           LOG(ERROR) << "Object " << obj << " found in live stack";
3063         }
3064         if (alloc_stack->Contains(const_cast<mirror::Object*>(ref))) {
3065           LOG(ERROR) << "Ref " << ref << " found in allocation stack";
3066         }
3067         if (live_stack->Contains(const_cast<mirror::Object*>(ref))) {
3068           LOG(ERROR) << "Ref " << ref << " found in live stack";
3069         }
3070         // Attempt to see if the card table missed the reference.
3071         ScanVisitor scan_visitor;
3072         uint8_t* byte_cover_begin = reinterpret_cast<uint8_t*>(card_table->AddrFromCard(card_addr));
3073         card_table->Scan<false>(bitmap, byte_cover_begin,
3074                                 byte_cover_begin + accounting::CardTable::kCardSize, scan_visitor);
3075       }
3076 
3077       // Search to see if any of the roots reference our object.
3078       RootMatchesObjectVisitor visitor1(obj);
3079       Runtime::Current()->VisitRoots(&visitor1);
3080       // Search to see if any of the roots reference our reference.
3081       RootMatchesObjectVisitor visitor2(ref);
3082       Runtime::Current()->VisitRoots(&visitor2);
3083     }
3084     return false;
3085   }
3086 
3087   Thread* const self_;
3088   Heap* const heap_;
3089   size_t* const fail_count_;
3090   const bool verify_referent_;
3091 };
3092 
3093 // Verify all references within an object, for use with HeapBitmap::Visit.
3094 class VerifyObjectVisitor {
3095  public:
VerifyObjectVisitor(Thread * self,Heap * heap,size_t * fail_count,bool verify_referent)3096   VerifyObjectVisitor(Thread* self, Heap* heap, size_t* fail_count, bool verify_referent)
3097       : self_(self), heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {}
3098 
operator ()(mirror::Object * obj)3099   void operator()(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
3100     // Note: we are verifying the references in obj but not obj itself, this is because obj must
3101     // be live or else how did we find it in the live bitmap?
3102     VerifyReferenceVisitor visitor(self_, heap_, fail_count_, verify_referent_);
3103     // The class doesn't count as a reference but we should verify it anyways.
3104     obj->VisitReferences(visitor, visitor);
3105   }
3106 
VerifyRoots()3107   void VerifyRoots() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_) {
3108     ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
3109     VerifyReferenceVisitor visitor(self_, heap_, fail_count_, verify_referent_);
3110     Runtime::Current()->VisitRoots(&visitor);
3111   }
3112 
GetFailureCount() const3113   uint32_t GetFailureCount() const REQUIRES(Locks::mutator_lock_) {
3114     CHECK_EQ(self_, Thread::Current());
3115     return *fail_count_;
3116   }
3117 
3118  private:
3119   Thread* const self_;
3120   Heap* const heap_;
3121   size_t* const fail_count_;
3122   const bool verify_referent_;
3123 };
3124 
PushOnAllocationStackWithInternalGC(Thread * self,ObjPtr<mirror::Object> * obj)3125 void Heap::PushOnAllocationStackWithInternalGC(Thread* self, ObjPtr<mirror::Object>* obj) {
3126   // Slow path, the allocation stack push back must have already failed.
3127   DCHECK(!allocation_stack_->AtomicPushBack(obj->Ptr()));
3128   do {
3129     // TODO: Add handle VerifyObject.
3130     StackHandleScope<1> hs(self);
3131     HandleWrapperObjPtr<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
3132     // Push our object into the reserve region of the allocation stack. This is only required due
3133     // to heap verification requiring that roots are live (either in the live bitmap or in the
3134     // allocation stack).
3135     CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(obj->Ptr()));
3136     CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
3137   } while (!allocation_stack_->AtomicPushBack(obj->Ptr()));
3138 }
3139 
PushOnThreadLocalAllocationStackWithInternalGC(Thread * self,ObjPtr<mirror::Object> * obj)3140 void Heap::PushOnThreadLocalAllocationStackWithInternalGC(Thread* self,
3141                                                           ObjPtr<mirror::Object>* obj) {
3142   // Slow path, the allocation stack push back must have already failed.
3143   DCHECK(!self->PushOnThreadLocalAllocationStack(obj->Ptr()));
3144   StackReference<mirror::Object>* start_address;
3145   StackReference<mirror::Object>* end_address;
3146   while (!allocation_stack_->AtomicBumpBack(kThreadLocalAllocationStackSize, &start_address,
3147                                             &end_address)) {
3148     // TODO: Add handle VerifyObject.
3149     StackHandleScope<1> hs(self);
3150     HandleWrapperObjPtr<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
3151     // Push our object into the reserve region of the allocaiton stack. This is only required due
3152     // to heap verification requiring that roots are live (either in the live bitmap or in the
3153     // allocation stack).
3154     CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(obj->Ptr()));
3155     // Push into the reserve allocation stack.
3156     CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
3157   }
3158   self->SetThreadLocalAllocationStack(start_address, end_address);
3159   // Retry on the new thread-local allocation stack.
3160   CHECK(self->PushOnThreadLocalAllocationStack(obj->Ptr()));  // Must succeed.
3161 }
3162 
3163 // Must do this with mutators suspended since we are directly accessing the allocation stacks.
VerifyHeapReferences(bool verify_referents)3164 size_t Heap::VerifyHeapReferences(bool verify_referents) {
3165   Thread* self = Thread::Current();
3166   Locks::mutator_lock_->AssertExclusiveHeld(self);
3167   // Lets sort our allocation stacks so that we can efficiently binary search them.
3168   allocation_stack_->Sort();
3169   live_stack_->Sort();
3170   // Since we sorted the allocation stack content, need to revoke all
3171   // thread-local allocation stacks.
3172   RevokeAllThreadLocalAllocationStacks(self);
3173   size_t fail_count = 0;
3174   VerifyObjectVisitor visitor(self, this, &fail_count, verify_referents);
3175   // Verify objects in the allocation stack since these will be objects which were:
3176   // 1. Allocated prior to the GC (pre GC verification).
3177   // 2. Allocated during the GC (pre sweep GC verification).
3178   // We don't want to verify the objects in the live stack since they themselves may be
3179   // pointing to dead objects if they are not reachable.
3180   VisitObjectsPaused(visitor);
3181   // Verify the roots:
3182   visitor.VerifyRoots();
3183   if (visitor.GetFailureCount() > 0) {
3184     // Dump mod-union tables.
3185     for (const auto& table_pair : mod_union_tables_) {
3186       accounting::ModUnionTable* mod_union_table = table_pair.second;
3187       mod_union_table->Dump(LOG_STREAM(ERROR) << mod_union_table->GetName() << ": ");
3188     }
3189     // Dump remembered sets.
3190     for (const auto& table_pair : remembered_sets_) {
3191       accounting::RememberedSet* remembered_set = table_pair.second;
3192       remembered_set->Dump(LOG_STREAM(ERROR) << remembered_set->GetName() << ": ");
3193     }
3194     DumpSpaces(LOG_STREAM(ERROR));
3195   }
3196   return visitor.GetFailureCount();
3197 }
3198 
3199 class VerifyReferenceCardVisitor {
3200  public:
VerifyReferenceCardVisitor(Heap * heap,bool * failed)3201   VerifyReferenceCardVisitor(Heap* heap, bool* failed)
3202       REQUIRES_SHARED(Locks::mutator_lock_,
3203                             Locks::heap_bitmap_lock_)
3204       : heap_(heap), failed_(failed) {
3205   }
3206 
3207   // There is no card marks for native roots on a class.
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root ATTRIBUTE_UNUSED) const3208   void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
3209       const {}
VisitRoot(mirror::CompressedReference<mirror::Object> * root ATTRIBUTE_UNUSED) const3210   void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
3211 
3212   // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
3213   // annotalysis on visitors.
operator ()(mirror::Object * obj,MemberOffset offset,bool is_static) const3214   void operator()(mirror::Object* obj, MemberOffset offset, bool is_static) const
3215       NO_THREAD_SAFETY_ANALYSIS {
3216     mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
3217     // Filter out class references since changing an object's class does not mark the card as dirty.
3218     // Also handles large objects, since the only reference they hold is a class reference.
3219     if (ref != nullptr && !ref->IsClass()) {
3220       accounting::CardTable* card_table = heap_->GetCardTable();
3221       // If the object is not dirty and it is referencing something in the live stack other than
3222       // class, then it must be on a dirty card.
3223       if (!card_table->AddrIsInCardTable(obj)) {
3224         LOG(ERROR) << "Object " << obj << " is not in the address range of the card table";
3225         *failed_ = true;
3226       } else if (!card_table->IsDirty(obj)) {
3227         // TODO: Check mod-union tables.
3228         // Card should be either kCardDirty if it got re-dirtied after we aged it, or
3229         // kCardDirty - 1 if it didnt get touched since we aged it.
3230         accounting::ObjectStack* live_stack = heap_->live_stack_.get();
3231         if (live_stack->ContainsSorted(ref)) {
3232           if (live_stack->ContainsSorted(obj)) {
3233             LOG(ERROR) << "Object " << obj << " found in live stack";
3234           }
3235           if (heap_->GetLiveBitmap()->Test(obj)) {
3236             LOG(ERROR) << "Object " << obj << " found in live bitmap";
3237           }
3238           LOG(ERROR) << "Object " << obj << " " << mirror::Object::PrettyTypeOf(obj)
3239                     << " references " << ref << " " << mirror::Object::PrettyTypeOf(ref)
3240                     << " in live stack";
3241 
3242           // Print which field of the object is dead.
3243           if (!obj->IsObjectArray()) {
3244             ObjPtr<mirror::Class> klass = is_static ? obj->AsClass() : obj->GetClass();
3245             CHECK(klass != nullptr);
3246             for (ArtField& field : (is_static ? klass->GetSFields() : klass->GetIFields())) {
3247               if (field.GetOffset().Int32Value() == offset.Int32Value()) {
3248                 LOG(ERROR) << (is_static ? "Static " : "") << "field in the live stack is "
3249                            << field.PrettyField();
3250                 break;
3251               }
3252             }
3253           } else {
3254             ObjPtr<mirror::ObjectArray<mirror::Object>> object_array =
3255                 obj->AsObjectArray<mirror::Object>();
3256             for (int32_t i = 0; i < object_array->GetLength(); ++i) {
3257               if (object_array->Get(i) == ref) {
3258                 LOG(ERROR) << (is_static ? "Static " : "") << "obj[" << i << "] = ref";
3259               }
3260             }
3261           }
3262 
3263           *failed_ = true;
3264         }
3265       }
3266     }
3267   }
3268 
3269  private:
3270   Heap* const heap_;
3271   bool* const failed_;
3272 };
3273 
3274 class VerifyLiveStackReferences {
3275  public:
VerifyLiveStackReferences(Heap * heap)3276   explicit VerifyLiveStackReferences(Heap* heap)
3277       : heap_(heap),
3278         failed_(false) {}
3279 
operator ()(mirror::Object * obj) const3280   void operator()(mirror::Object* obj) const
3281       REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
3282     VerifyReferenceCardVisitor visitor(heap_, const_cast<bool*>(&failed_));
3283     obj->VisitReferences(visitor, VoidFunctor());
3284   }
3285 
Failed() const3286   bool Failed() const {
3287     return failed_;
3288   }
3289 
3290  private:
3291   Heap* const heap_;
3292   bool failed_;
3293 };
3294 
VerifyMissingCardMarks()3295 bool Heap::VerifyMissingCardMarks() {
3296   Thread* self = Thread::Current();
3297   Locks::mutator_lock_->AssertExclusiveHeld(self);
3298   // We need to sort the live stack since we binary search it.
3299   live_stack_->Sort();
3300   // Since we sorted the allocation stack content, need to revoke all
3301   // thread-local allocation stacks.
3302   RevokeAllThreadLocalAllocationStacks(self);
3303   VerifyLiveStackReferences visitor(this);
3304   GetLiveBitmap()->Visit(visitor);
3305   // We can verify objects in the live stack since none of these should reference dead objects.
3306   for (auto* it = live_stack_->Begin(); it != live_stack_->End(); ++it) {
3307     if (!kUseThreadLocalAllocationStack || it->AsMirrorPtr() != nullptr) {
3308       visitor(it->AsMirrorPtr());
3309     }
3310   }
3311   return !visitor.Failed();
3312 }
3313 
SwapStacks()3314 void Heap::SwapStacks() {
3315   if (kUseThreadLocalAllocationStack) {
3316     live_stack_->AssertAllZero();
3317   }
3318   allocation_stack_.swap(live_stack_);
3319 }
3320 
RevokeAllThreadLocalAllocationStacks(Thread * self)3321 void Heap::RevokeAllThreadLocalAllocationStacks(Thread* self) {
3322   // This must be called only during the pause.
3323   DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
3324   MutexLock mu(self, *Locks::runtime_shutdown_lock_);
3325   MutexLock mu2(self, *Locks::thread_list_lock_);
3326   std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
3327   for (Thread* t : thread_list) {
3328     t->RevokeThreadLocalAllocationStack();
3329   }
3330 }
3331 
AssertThreadLocalBuffersAreRevoked(Thread * thread)3332 void Heap::AssertThreadLocalBuffersAreRevoked(Thread* thread) {
3333   if (kIsDebugBuild) {
3334     if (rosalloc_space_ != nullptr) {
3335       rosalloc_space_->AssertThreadLocalBuffersAreRevoked(thread);
3336     }
3337     if (bump_pointer_space_ != nullptr) {
3338       bump_pointer_space_->AssertThreadLocalBuffersAreRevoked(thread);
3339     }
3340   }
3341 }
3342 
AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked()3343 void Heap::AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked() {
3344   if (kIsDebugBuild) {
3345     if (bump_pointer_space_ != nullptr) {
3346       bump_pointer_space_->AssertAllThreadLocalBuffersAreRevoked();
3347     }
3348   }
3349 }
3350 
FindModUnionTableFromSpace(space::Space * space)3351 accounting::ModUnionTable* Heap::FindModUnionTableFromSpace(space::Space* space) {
3352   auto it = mod_union_tables_.find(space);
3353   if (it == mod_union_tables_.end()) {
3354     return nullptr;
3355   }
3356   return it->second;
3357 }
3358 
FindRememberedSetFromSpace(space::Space * space)3359 accounting::RememberedSet* Heap::FindRememberedSetFromSpace(space::Space* space) {
3360   auto it = remembered_sets_.find(space);
3361   if (it == remembered_sets_.end()) {
3362     return nullptr;
3363   }
3364   return it->second;
3365 }
3366 
ProcessCards(TimingLogger * timings,bool use_rem_sets,bool process_alloc_space_cards,bool clear_alloc_space_cards)3367 void Heap::ProcessCards(TimingLogger* timings,
3368                         bool use_rem_sets,
3369                         bool process_alloc_space_cards,
3370                         bool clear_alloc_space_cards) {
3371   TimingLogger::ScopedTiming t(__FUNCTION__, timings);
3372   // Clear cards and keep track of cards cleared in the mod-union table.
3373   for (const auto& space : continuous_spaces_) {
3374     accounting::ModUnionTable* table = FindModUnionTableFromSpace(space);
3375     accounting::RememberedSet* rem_set = FindRememberedSetFromSpace(space);
3376     if (table != nullptr) {
3377       const char* name = space->IsZygoteSpace() ? "ZygoteModUnionClearCards" :
3378           "ImageModUnionClearCards";
3379       TimingLogger::ScopedTiming t2(name, timings);
3380       table->ProcessCards();
3381     } else if (use_rem_sets && rem_set != nullptr) {
3382       DCHECK(collector::SemiSpace::kUseRememberedSet && collector_type_ == kCollectorTypeGSS)
3383           << static_cast<int>(collector_type_);
3384       TimingLogger::ScopedTiming t2("AllocSpaceRemSetClearCards", timings);
3385       rem_set->ClearCards();
3386     } else if (process_alloc_space_cards) {
3387       TimingLogger::ScopedTiming t2("AllocSpaceClearCards", timings);
3388       if (clear_alloc_space_cards) {
3389         uint8_t* end = space->End();
3390         if (space->IsImageSpace()) {
3391           // Image space end is the end of the mirror objects, it is not necessarily page or card
3392           // aligned. Align up so that the check in ClearCardRange does not fail.
3393           end = AlignUp(end, accounting::CardTable::kCardSize);
3394         }
3395         card_table_->ClearCardRange(space->Begin(), end);
3396       } else {
3397         // No mod union table for the AllocSpace. Age the cards so that the GC knows that these
3398         // cards were dirty before the GC started.
3399         // TODO: Need to use atomic for the case where aged(cleaning thread) -> dirty(other thread)
3400         // -> clean(cleaning thread).
3401         // The races are we either end up with: Aged card, unaged card. Since we have the
3402         // checkpoint roots and then we scan / update mod union tables after. We will always
3403         // scan either card. If we end up with the non aged card, we scan it it in the pause.
3404         card_table_->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(),
3405                                        VoidFunctor());
3406       }
3407     }
3408   }
3409 }
3410 
3411 struct IdentityMarkHeapReferenceVisitor : public MarkObjectVisitor {
MarkObjectart::gc::IdentityMarkHeapReferenceVisitor3412   mirror::Object* MarkObject(mirror::Object* obj) override {
3413     return obj;
3414   }
MarkHeapReferenceart::gc::IdentityMarkHeapReferenceVisitor3415   void MarkHeapReference(mirror::HeapReference<mirror::Object>*, bool) override {
3416   }
3417 };
3418 
PreGcVerificationPaused(collector::GarbageCollector * gc)3419 void Heap::PreGcVerificationPaused(collector::GarbageCollector* gc) {
3420   Thread* const self = Thread::Current();
3421   TimingLogger* const timings = current_gc_iteration_.GetTimings();
3422   TimingLogger::ScopedTiming t(__FUNCTION__, timings);
3423   if (verify_pre_gc_heap_) {
3424     TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyHeapReferences", timings);
3425     size_t failures = VerifyHeapReferences();
3426     if (failures > 0) {
3427       LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed with " << failures
3428           << " failures";
3429     }
3430   }
3431   // Check that all objects which reference things in the live stack are on dirty cards.
3432   if (verify_missing_card_marks_) {
3433     TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyMissingCardMarks", timings);
3434     ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
3435     SwapStacks();
3436     // Sort the live stack so that we can quickly binary search it later.
3437     CHECK(VerifyMissingCardMarks()) << "Pre " << gc->GetName()
3438                                     << " missing card mark verification failed\n" << DumpSpaces();
3439     SwapStacks();
3440   }
3441   if (verify_mod_union_table_) {
3442     TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyModUnionTables", timings);
3443     ReaderMutexLock reader_lock(self, *Locks::heap_bitmap_lock_);
3444     for (const auto& table_pair : mod_union_tables_) {
3445       accounting::ModUnionTable* mod_union_table = table_pair.second;
3446       IdentityMarkHeapReferenceVisitor visitor;
3447       mod_union_table->UpdateAndMarkReferences(&visitor);
3448       mod_union_table->Verify();
3449     }
3450   }
3451 }
3452 
PreGcVerification(collector::GarbageCollector * gc)3453 void Heap::PreGcVerification(collector::GarbageCollector* gc) {
3454   if (verify_pre_gc_heap_ || verify_missing_card_marks_ || verify_mod_union_table_) {
3455     collector::GarbageCollector::ScopedPause pause(gc, false);
3456     PreGcVerificationPaused(gc);
3457   }
3458 }
3459 
PrePauseRosAllocVerification(collector::GarbageCollector * gc ATTRIBUTE_UNUSED)3460 void Heap::PrePauseRosAllocVerification(collector::GarbageCollector* gc ATTRIBUTE_UNUSED) {
3461   // TODO: Add a new runtime option for this?
3462   if (verify_pre_gc_rosalloc_) {
3463     RosAllocVerification(current_gc_iteration_.GetTimings(), "PreGcRosAllocVerification");
3464   }
3465 }
3466 
PreSweepingGcVerification(collector::GarbageCollector * gc)3467 void Heap::PreSweepingGcVerification(collector::GarbageCollector* gc) {
3468   Thread* const self = Thread::Current();
3469   TimingLogger* const timings = current_gc_iteration_.GetTimings();
3470   TimingLogger::ScopedTiming t(__FUNCTION__, timings);
3471   // Called before sweeping occurs since we want to make sure we are not going so reclaim any
3472   // reachable objects.
3473   if (verify_pre_sweeping_heap_) {
3474     TimingLogger::ScopedTiming t2("(Paused)PostSweepingVerifyHeapReferences", timings);
3475     CHECK_NE(self->GetState(), kRunnable);
3476     {
3477       WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
3478       // Swapping bound bitmaps does nothing.
3479       gc->SwapBitmaps();
3480     }
3481     // Pass in false since concurrent reference processing can mean that the reference referents
3482     // may point to dead objects at the point which PreSweepingGcVerification is called.
3483     size_t failures = VerifyHeapReferences(false);
3484     if (failures > 0) {
3485       LOG(FATAL) << "Pre sweeping " << gc->GetName() << " GC verification failed with " << failures
3486           << " failures";
3487     }
3488     {
3489       WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
3490       gc->SwapBitmaps();
3491     }
3492   }
3493   if (verify_pre_sweeping_rosalloc_) {
3494     RosAllocVerification(timings, "PreSweepingRosAllocVerification");
3495   }
3496 }
3497 
PostGcVerificationPaused(collector::GarbageCollector * gc)3498 void Heap::PostGcVerificationPaused(collector::GarbageCollector* gc) {
3499   // Only pause if we have to do some verification.
3500   Thread* const self = Thread::Current();
3501   TimingLogger* const timings = GetCurrentGcIteration()->GetTimings();
3502   TimingLogger::ScopedTiming t(__FUNCTION__, timings);
3503   if (verify_system_weaks_) {
3504     ReaderMutexLock mu2(self, *Locks::heap_bitmap_lock_);
3505     collector::MarkSweep* mark_sweep = down_cast<collector::MarkSweep*>(gc);
3506     mark_sweep->VerifySystemWeaks();
3507   }
3508   if (verify_post_gc_rosalloc_) {
3509     RosAllocVerification(timings, "(Paused)PostGcRosAllocVerification");
3510   }
3511   if (verify_post_gc_heap_) {
3512     TimingLogger::ScopedTiming t2("(Paused)PostGcVerifyHeapReferences", timings);
3513     size_t failures = VerifyHeapReferences();
3514     if (failures > 0) {
3515       LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed with " << failures
3516           << " failures";
3517     }
3518   }
3519 }
3520 
PostGcVerification(collector::GarbageCollector * gc)3521 void Heap::PostGcVerification(collector::GarbageCollector* gc) {
3522   if (verify_system_weaks_ || verify_post_gc_rosalloc_ || verify_post_gc_heap_) {
3523     collector::GarbageCollector::ScopedPause pause(gc, false);
3524     PostGcVerificationPaused(gc);
3525   }
3526 }
3527 
RosAllocVerification(TimingLogger * timings,const char * name)3528 void Heap::RosAllocVerification(TimingLogger* timings, const char* name) {
3529   TimingLogger::ScopedTiming t(name, timings);
3530   for (const auto& space : continuous_spaces_) {
3531     if (space->IsRosAllocSpace()) {
3532       VLOG(heap) << name << " : " << space->GetName();
3533       space->AsRosAllocSpace()->Verify();
3534     }
3535   }
3536 }
3537 
WaitForGcToComplete(GcCause cause,Thread * self)3538 collector::GcType Heap::WaitForGcToComplete(GcCause cause, Thread* self) {
3539   ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
3540   MutexLock mu(self, *gc_complete_lock_);
3541   return WaitForGcToCompleteLocked(cause, self);
3542 }
3543 
WaitForGcToCompleteLocked(GcCause cause,Thread * self)3544 collector::GcType Heap::WaitForGcToCompleteLocked(GcCause cause, Thread* self) {
3545   gc_complete_cond_->CheckSafeToWait(self);
3546   collector::GcType last_gc_type = collector::kGcTypeNone;
3547   GcCause last_gc_cause = kGcCauseNone;
3548   uint64_t wait_start = NanoTime();
3549   while (collector_type_running_ != kCollectorTypeNone) {
3550     if (self != task_processor_->GetRunningThread()) {
3551       // The current thread is about to wait for a currently running
3552       // collection to finish. If the waiting thread is not the heap
3553       // task daemon thread, the currently running collection is
3554       // considered as a blocking GC.
3555       running_collection_is_blocking_ = true;
3556       VLOG(gc) << "Waiting for a blocking GC " << cause;
3557     }
3558     SCOPED_TRACE << "GC: Wait For Completion " << cause;
3559     // We must wait, change thread state then sleep on gc_complete_cond_;
3560     gc_complete_cond_->Wait(self);
3561     last_gc_type = last_gc_type_;
3562     last_gc_cause = last_gc_cause_;
3563   }
3564   uint64_t wait_time = NanoTime() - wait_start;
3565   total_wait_time_ += wait_time;
3566   if (wait_time > long_pause_log_threshold_) {
3567     LOG(INFO) << "WaitForGcToComplete blocked " << cause << " on " << last_gc_cause << " for "
3568               << PrettyDuration(wait_time);
3569   }
3570   if (self != task_processor_->GetRunningThread()) {
3571     // The current thread is about to run a collection. If the thread
3572     // is not the heap task daemon thread, it's considered as a
3573     // blocking GC (i.e., blocking itself).
3574     running_collection_is_blocking_ = true;
3575     // Don't log fake "GC" types that are only used for debugger or hidden APIs. If we log these,
3576     // it results in log spam. kGcCauseExplicit is already logged in LogGC, so avoid it here too.
3577     if (cause == kGcCauseForAlloc ||
3578         cause == kGcCauseForNativeAlloc ||
3579         cause == kGcCauseDisableMovingGc) {
3580       VLOG(gc) << "Starting a blocking GC " << cause;
3581     }
3582   }
3583   return last_gc_type;
3584 }
3585 
DumpForSigQuit(std::ostream & os)3586 void Heap::DumpForSigQuit(std::ostream& os) {
3587   os << "Heap: " << GetPercentFree() << "% free, " << PrettySize(GetBytesAllocated()) << "/"
3588      << PrettySize(GetTotalMemory()) << "; " << GetObjectsAllocated() << " objects\n";
3589   DumpGcPerformanceInfo(os);
3590 }
3591 
GetPercentFree()3592 size_t Heap::GetPercentFree() {
3593   return static_cast<size_t>(100.0f * static_cast<float>(
3594       GetFreeMemory()) / target_footprint_.load(std::memory_order_relaxed));
3595 }
3596 
SetIdealFootprint(size_t target_footprint)3597 void Heap::SetIdealFootprint(size_t target_footprint) {
3598   if (target_footprint > GetMaxMemory()) {
3599     VLOG(gc) << "Clamp target GC heap from " << PrettySize(target_footprint) << " to "
3600              << PrettySize(GetMaxMemory());
3601     target_footprint = GetMaxMemory();
3602   }
3603   target_footprint_.store(target_footprint, std::memory_order_relaxed);
3604 }
3605 
IsMovableObject(ObjPtr<mirror::Object> obj) const3606 bool Heap::IsMovableObject(ObjPtr<mirror::Object> obj) const {
3607   if (kMovingCollector) {
3608     space::Space* space = FindContinuousSpaceFromObject(obj.Ptr(), true);
3609     if (space != nullptr) {
3610       // TODO: Check large object?
3611       return space->CanMoveObjects();
3612     }
3613   }
3614   return false;
3615 }
3616 
FindCollectorByGcType(collector::GcType gc_type)3617 collector::GarbageCollector* Heap::FindCollectorByGcType(collector::GcType gc_type) {
3618   for (auto* collector : garbage_collectors_) {
3619     if (collector->GetCollectorType() == collector_type_ &&
3620         collector->GetGcType() == gc_type) {
3621       return collector;
3622     }
3623   }
3624   return nullptr;
3625 }
3626 
HeapGrowthMultiplier() const3627 double Heap::HeapGrowthMultiplier() const {
3628   // If we don't care about pause times we are background, so return 1.0.
3629   if (!CareAboutPauseTimes()) {
3630     return 1.0;
3631   }
3632   return foreground_heap_growth_multiplier_;
3633 }
3634 
GrowForUtilization(collector::GarbageCollector * collector_ran,size_t bytes_allocated_before_gc)3635 void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran,
3636                               size_t bytes_allocated_before_gc) {
3637   // We know what our utilization is at this moment.
3638   // This doesn't actually resize any memory. It just lets the heap grow more when necessary.
3639   const size_t bytes_allocated = GetBytesAllocated();
3640   // Trace the new heap size after the GC is finished.
3641   TraceHeapSize(bytes_allocated);
3642   uint64_t target_size;
3643   collector::GcType gc_type = collector_ran->GetGcType();
3644   // Use the multiplier to grow more for foreground.
3645   const double multiplier = HeapGrowthMultiplier();  // Use the multiplier to grow more for
3646   // foreground.
3647   const size_t adjusted_min_free = static_cast<size_t>(min_free_ * multiplier);
3648   const size_t adjusted_max_free = static_cast<size_t>(max_free_ * multiplier);
3649   if (gc_type != collector::kGcTypeSticky) {
3650     // Grow the heap for non sticky GC.
3651     uint64_t delta = bytes_allocated * (1.0 / GetTargetHeapUtilization() - 1.0);
3652     DCHECK_LE(delta, std::numeric_limits<size_t>::max()) << "bytes_allocated=" << bytes_allocated
3653         << " target_utilization_=" << target_utilization_;
3654     target_size = bytes_allocated + delta * multiplier;
3655     target_size = std::min(target_size,
3656                            static_cast<uint64_t>(bytes_allocated + adjusted_max_free));
3657     target_size = std::max(target_size,
3658                            static_cast<uint64_t>(bytes_allocated + adjusted_min_free));
3659     next_gc_type_ = collector::kGcTypeSticky;
3660   } else {
3661     collector::GcType non_sticky_gc_type = NonStickyGcType();
3662     // Find what the next non sticky collector will be.
3663     collector::GarbageCollector* non_sticky_collector = FindCollectorByGcType(non_sticky_gc_type);
3664     if (use_generational_cc_) {
3665       if (non_sticky_collector == nullptr) {
3666         non_sticky_collector = FindCollectorByGcType(collector::kGcTypePartial);
3667       }
3668       CHECK(non_sticky_collector != nullptr);
3669     }
3670     double sticky_gc_throughput_adjustment = GetStickyGcThroughputAdjustment(use_generational_cc_);
3671 
3672     // If the throughput of the current sticky GC >= throughput of the non sticky collector, then
3673     // do another sticky collection next.
3674     // We also check that the bytes allocated aren't over the target_footprint, or
3675     // concurrent_start_bytes in case of concurrent GCs, in order to prevent a
3676     // pathological case where dead objects which aren't reclaimed by sticky could get accumulated
3677     // if the sticky GC throughput always remained >= the full/partial throughput.
3678     size_t target_footprint = target_footprint_.load(std::memory_order_relaxed);
3679     if (current_gc_iteration_.GetEstimatedThroughput() * sticky_gc_throughput_adjustment >=
3680         non_sticky_collector->GetEstimatedMeanThroughput() &&
3681         non_sticky_collector->NumberOfIterations() > 0 &&
3682         bytes_allocated <= (IsGcConcurrent() ? concurrent_start_bytes_ : target_footprint)) {
3683       next_gc_type_ = collector::kGcTypeSticky;
3684     } else {
3685       next_gc_type_ = non_sticky_gc_type;
3686     }
3687     // If we have freed enough memory, shrink the heap back down.
3688     if (bytes_allocated + adjusted_max_free < target_footprint) {
3689       target_size = bytes_allocated + adjusted_max_free;
3690     } else {
3691       target_size = std::max(bytes_allocated, target_footprint);
3692     }
3693   }
3694   CHECK_LE(target_size, std::numeric_limits<size_t>::max());
3695   if (!ignore_target_footprint_) {
3696     SetIdealFootprint(target_size);
3697     if (IsGcConcurrent()) {
3698       const uint64_t freed_bytes = current_gc_iteration_.GetFreedBytes() +
3699           current_gc_iteration_.GetFreedLargeObjectBytes() +
3700           current_gc_iteration_.GetFreedRevokeBytes();
3701       // Bytes allocated will shrink by freed_bytes after the GC runs, so if we want to figure out
3702       // how many bytes were allocated during the GC we need to add freed_bytes back on.
3703       CHECK_GE(bytes_allocated + freed_bytes, bytes_allocated_before_gc);
3704       const size_t bytes_allocated_during_gc = bytes_allocated + freed_bytes -
3705           bytes_allocated_before_gc;
3706       // Calculate when to perform the next ConcurrentGC.
3707       // Estimate how many remaining bytes we will have when we need to start the next GC.
3708       size_t remaining_bytes = bytes_allocated_during_gc;
3709       remaining_bytes = std::min(remaining_bytes, kMaxConcurrentRemainingBytes);
3710       remaining_bytes = std::max(remaining_bytes, kMinConcurrentRemainingBytes);
3711       size_t target_footprint = target_footprint_.load(std::memory_order_relaxed);
3712       if (UNLIKELY(remaining_bytes > target_footprint)) {
3713         // A never going to happen situation that from the estimated allocation rate we will exceed
3714         // the applications entire footprint with the given estimated allocation rate. Schedule
3715         // another GC nearly straight away.
3716         remaining_bytes = std::min(kMinConcurrentRemainingBytes, target_footprint);
3717       }
3718       DCHECK_LE(target_footprint_.load(std::memory_order_relaxed), GetMaxMemory());
3719       // Start a concurrent GC when we get close to the estimated remaining bytes. When the
3720       // allocation rate is very high, remaining_bytes could tell us that we should start a GC
3721       // right away.
3722       concurrent_start_bytes_ = std::max(target_footprint - remaining_bytes, bytes_allocated);
3723     }
3724   }
3725 }
3726 
ClampGrowthLimit()3727 void Heap::ClampGrowthLimit() {
3728   // Use heap bitmap lock to guard against races with BindLiveToMarkBitmap.
3729   ScopedObjectAccess soa(Thread::Current());
3730   WriterMutexLock mu(soa.Self(), *Locks::heap_bitmap_lock_);
3731   capacity_ = growth_limit_;
3732   for (const auto& space : continuous_spaces_) {
3733     if (space->IsMallocSpace()) {
3734       gc::space::MallocSpace* malloc_space = space->AsMallocSpace();
3735       malloc_space->ClampGrowthLimit();
3736     }
3737   }
3738   if (collector_type_ == kCollectorTypeCC) {
3739     DCHECK(region_space_ != nullptr);
3740     // Twice the capacity as CC needs extra space for evacuating objects.
3741     region_space_->ClampGrowthLimit(2 * capacity_);
3742   }
3743   // This space isn't added for performance reasons.
3744   if (main_space_backup_.get() != nullptr) {
3745     main_space_backup_->ClampGrowthLimit();
3746   }
3747 }
3748 
ClearGrowthLimit()3749 void Heap::ClearGrowthLimit() {
3750   if (target_footprint_.load(std::memory_order_relaxed) == growth_limit_
3751       && growth_limit_ < capacity_) {
3752     target_footprint_.store(capacity_, std::memory_order_relaxed);
3753     concurrent_start_bytes_ =
3754         UnsignedDifference(capacity_, kMinConcurrentRemainingBytes);
3755   }
3756   growth_limit_ = capacity_;
3757   ScopedObjectAccess soa(Thread::Current());
3758   for (const auto& space : continuous_spaces_) {
3759     if (space->IsMallocSpace()) {
3760       gc::space::MallocSpace* malloc_space = space->AsMallocSpace();
3761       malloc_space->ClearGrowthLimit();
3762       malloc_space->SetFootprintLimit(malloc_space->Capacity());
3763     }
3764   }
3765   // This space isn't added for performance reasons.
3766   if (main_space_backup_.get() != nullptr) {
3767     main_space_backup_->ClearGrowthLimit();
3768     main_space_backup_->SetFootprintLimit(main_space_backup_->Capacity());
3769   }
3770 }
3771 
AddFinalizerReference(Thread * self,ObjPtr<mirror::Object> * object)3772 void Heap::AddFinalizerReference(Thread* self, ObjPtr<mirror::Object>* object) {
3773   ScopedObjectAccess soa(self);
3774   ScopedLocalRef<jobject> arg(self->GetJniEnv(), soa.AddLocalReference<jobject>(*object));
3775   jvalue args[1];
3776   args[0].l = arg.get();
3777   InvokeWithJValues(soa, nullptr, WellKnownClasses::java_lang_ref_FinalizerReference_add, args);
3778   // Restore object in case it gets moved.
3779   *object = soa.Decode<mirror::Object>(arg.get());
3780 }
3781 
RequestConcurrentGCAndSaveObject(Thread * self,bool force_full,ObjPtr<mirror::Object> * obj)3782 void Heap::RequestConcurrentGCAndSaveObject(Thread* self,
3783                                             bool force_full,
3784                                             ObjPtr<mirror::Object>* obj) {
3785   StackHandleScope<1> hs(self);
3786   HandleWrapperObjPtr<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
3787   RequestConcurrentGC(self, kGcCauseBackground, force_full);
3788 }
3789 
3790 class Heap::ConcurrentGCTask : public HeapTask {
3791  public:
ConcurrentGCTask(uint64_t target_time,GcCause cause,bool force_full)3792   ConcurrentGCTask(uint64_t target_time, GcCause cause, bool force_full)
3793       : HeapTask(target_time), cause_(cause), force_full_(force_full) {}
Run(Thread * self)3794   void Run(Thread* self) override {
3795     gc::Heap* heap = Runtime::Current()->GetHeap();
3796     heap->ConcurrentGC(self, cause_, force_full_);
3797     heap->ClearConcurrentGCRequest();
3798   }
3799 
3800  private:
3801   const GcCause cause_;
3802   const bool force_full_;  // If true, force full (or partial) collection.
3803 };
3804 
CanAddHeapTask(Thread * self)3805 static bool CanAddHeapTask(Thread* self) REQUIRES(!Locks::runtime_shutdown_lock_) {
3806   Runtime* runtime = Runtime::Current();
3807   return runtime != nullptr && runtime->IsFinishedStarting() && !runtime->IsShuttingDown(self) &&
3808       !self->IsHandlingStackOverflow();
3809 }
3810 
ClearConcurrentGCRequest()3811 void Heap::ClearConcurrentGCRequest() {
3812   concurrent_gc_pending_.store(false, std::memory_order_relaxed);
3813 }
3814 
RequestConcurrentGC(Thread * self,GcCause cause,bool force_full)3815 void Heap::RequestConcurrentGC(Thread* self, GcCause cause, bool force_full) {
3816   if (CanAddHeapTask(self) &&
3817       concurrent_gc_pending_.CompareAndSetStrongSequentiallyConsistent(false, true)) {
3818     task_processor_->AddTask(self, new ConcurrentGCTask(NanoTime(),  // Start straight away.
3819                                                         cause,
3820                                                         force_full));
3821   }
3822 }
3823 
ConcurrentGC(Thread * self,GcCause cause,bool force_full)3824 void Heap::ConcurrentGC(Thread* self, GcCause cause, bool force_full) {
3825   if (!Runtime::Current()->IsShuttingDown(self)) {
3826     // Wait for any GCs currently running to finish.
3827     if (WaitForGcToComplete(cause, self) == collector::kGcTypeNone) {
3828       // If we can't run the GC type we wanted to run, find the next appropriate one and try
3829       // that instead. E.g. can't do partial, so do full instead.
3830       collector::GcType next_gc_type = next_gc_type_;
3831       // If forcing full and next gc type is sticky, override with a non-sticky type.
3832       if (force_full && next_gc_type == collector::kGcTypeSticky) {
3833         next_gc_type = NonStickyGcType();
3834       }
3835       if (CollectGarbageInternal(next_gc_type, cause, false) == collector::kGcTypeNone) {
3836         for (collector::GcType gc_type : gc_plan_) {
3837           // Attempt to run the collector, if we succeed, we are done.
3838           if (gc_type > next_gc_type &&
3839               CollectGarbageInternal(gc_type, cause, false) != collector::kGcTypeNone) {
3840             break;
3841           }
3842         }
3843       }
3844     }
3845   }
3846 }
3847 
3848 class Heap::CollectorTransitionTask : public HeapTask {
3849  public:
CollectorTransitionTask(uint64_t target_time)3850   explicit CollectorTransitionTask(uint64_t target_time) : HeapTask(target_time) {}
3851 
Run(Thread * self)3852   void Run(Thread* self) override {
3853     gc::Heap* heap = Runtime::Current()->GetHeap();
3854     heap->DoPendingCollectorTransition();
3855     heap->ClearPendingCollectorTransition(self);
3856   }
3857 };
3858 
ClearPendingCollectorTransition(Thread * self)3859 void Heap::ClearPendingCollectorTransition(Thread* self) {
3860   MutexLock mu(self, *pending_task_lock_);
3861   pending_collector_transition_ = nullptr;
3862 }
3863 
RequestCollectorTransition(CollectorType desired_collector_type,uint64_t delta_time)3864 void Heap::RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time) {
3865   Thread* self = Thread::Current();
3866   desired_collector_type_ = desired_collector_type;
3867   if (desired_collector_type_ == collector_type_ || !CanAddHeapTask(self)) {
3868     return;
3869   }
3870   if (collector_type_ == kCollectorTypeCC) {
3871     // For CC, we invoke a full compaction when going to the background, but the collector type
3872     // doesn't change.
3873     DCHECK_EQ(desired_collector_type_, kCollectorTypeCCBackground);
3874   }
3875   DCHECK_NE(collector_type_, kCollectorTypeCCBackground);
3876   CollectorTransitionTask* added_task = nullptr;
3877   const uint64_t target_time = NanoTime() + delta_time;
3878   {
3879     MutexLock mu(self, *pending_task_lock_);
3880     // If we have an existing collector transition, update the targe time to be the new target.
3881     if (pending_collector_transition_ != nullptr) {
3882       task_processor_->UpdateTargetRunTime(self, pending_collector_transition_, target_time);
3883       return;
3884     }
3885     added_task = new CollectorTransitionTask(target_time);
3886     pending_collector_transition_ = added_task;
3887   }
3888   task_processor_->AddTask(self, added_task);
3889 }
3890 
3891 class Heap::HeapTrimTask : public HeapTask {
3892  public:
HeapTrimTask(uint64_t delta_time)3893   explicit HeapTrimTask(uint64_t delta_time) : HeapTask(NanoTime() + delta_time) { }
Run(Thread * self)3894   void Run(Thread* self) override {
3895     gc::Heap* heap = Runtime::Current()->GetHeap();
3896     heap->Trim(self);
3897     heap->ClearPendingTrim(self);
3898   }
3899 };
3900 
ClearPendingTrim(Thread * self)3901 void Heap::ClearPendingTrim(Thread* self) {
3902   MutexLock mu(self, *pending_task_lock_);
3903   pending_heap_trim_ = nullptr;
3904 }
3905 
RequestTrim(Thread * self)3906 void Heap::RequestTrim(Thread* self) {
3907   if (!CanAddHeapTask(self)) {
3908     return;
3909   }
3910   // GC completed and now we must decide whether to request a heap trim (advising pages back to the
3911   // kernel) or not. Issuing a request will also cause trimming of the libc heap. As a trim scans
3912   // a space it will hold its lock and can become a cause of jank.
3913   // Note, the large object space self trims and the Zygote space was trimmed and unchanging since
3914   // forking.
3915 
3916   // We don't have a good measure of how worthwhile a trim might be. We can't use the live bitmap
3917   // because that only marks object heads, so a large array looks like lots of empty space. We
3918   // don't just call dlmalloc all the time, because the cost of an _attempted_ trim is proportional
3919   // to utilization (which is probably inversely proportional to how much benefit we can expect).
3920   // We could try mincore(2) but that's only a measure of how many pages we haven't given away,
3921   // not how much use we're making of those pages.
3922   HeapTrimTask* added_task = nullptr;
3923   {
3924     MutexLock mu(self, *pending_task_lock_);
3925     if (pending_heap_trim_ != nullptr) {
3926       // Already have a heap trim request in task processor, ignore this request.
3927       return;
3928     }
3929     added_task = new HeapTrimTask(kHeapTrimWait);
3930     pending_heap_trim_ = added_task;
3931   }
3932   task_processor_->AddTask(self, added_task);
3933 }
3934 
IncrementNumberOfBytesFreedRevoke(size_t freed_bytes_revoke)3935 void Heap::IncrementNumberOfBytesFreedRevoke(size_t freed_bytes_revoke) {
3936   size_t previous_num_bytes_freed_revoke =
3937       num_bytes_freed_revoke_.fetch_add(freed_bytes_revoke, std::memory_order_relaxed);
3938   // Check the updated value is less than the number of bytes allocated. There is a risk of
3939   // execution being suspended between the increment above and the CHECK below, leading to
3940   // the use of previous_num_bytes_freed_revoke in the comparison.
3941   CHECK_GE(num_bytes_allocated_.load(std::memory_order_relaxed),
3942            previous_num_bytes_freed_revoke + freed_bytes_revoke);
3943 }
3944 
RevokeThreadLocalBuffers(Thread * thread)3945 void Heap::RevokeThreadLocalBuffers(Thread* thread) {
3946   if (rosalloc_space_ != nullptr) {
3947     size_t freed_bytes_revoke = rosalloc_space_->RevokeThreadLocalBuffers(thread);
3948     if (freed_bytes_revoke > 0U) {
3949       IncrementNumberOfBytesFreedRevoke(freed_bytes_revoke);
3950     }
3951   }
3952   if (bump_pointer_space_ != nullptr) {
3953     CHECK_EQ(bump_pointer_space_->RevokeThreadLocalBuffers(thread), 0U);
3954   }
3955   if (region_space_ != nullptr) {
3956     CHECK_EQ(region_space_->RevokeThreadLocalBuffers(thread), 0U);
3957   }
3958 }
3959 
RevokeRosAllocThreadLocalBuffers(Thread * thread)3960 void Heap::RevokeRosAllocThreadLocalBuffers(Thread* thread) {
3961   if (rosalloc_space_ != nullptr) {
3962     size_t freed_bytes_revoke = rosalloc_space_->RevokeThreadLocalBuffers(thread);
3963     if (freed_bytes_revoke > 0U) {
3964       IncrementNumberOfBytesFreedRevoke(freed_bytes_revoke);
3965     }
3966   }
3967 }
3968 
RevokeAllThreadLocalBuffers()3969 void Heap::RevokeAllThreadLocalBuffers() {
3970   if (rosalloc_space_ != nullptr) {
3971     size_t freed_bytes_revoke = rosalloc_space_->RevokeAllThreadLocalBuffers();
3972     if (freed_bytes_revoke > 0U) {
3973       IncrementNumberOfBytesFreedRevoke(freed_bytes_revoke);
3974     }
3975   }
3976   if (bump_pointer_space_ != nullptr) {
3977     CHECK_EQ(bump_pointer_space_->RevokeAllThreadLocalBuffers(), 0U);
3978   }
3979   if (region_space_ != nullptr) {
3980     CHECK_EQ(region_space_->RevokeAllThreadLocalBuffers(), 0U);
3981   }
3982 }
3983 
IsGCRequestPending() const3984 bool Heap::IsGCRequestPending() const {
3985   return concurrent_gc_pending_.load(std::memory_order_relaxed);
3986 }
3987 
RunFinalization(JNIEnv * env,uint64_t timeout)3988 void Heap::RunFinalization(JNIEnv* env, uint64_t timeout) {
3989   env->CallStaticVoidMethod(WellKnownClasses::dalvik_system_VMRuntime,
3990                             WellKnownClasses::dalvik_system_VMRuntime_runFinalization,
3991                             static_cast<jlong>(timeout));
3992 }
3993 
3994 // For GC triggering purposes, we count old (pre-last-GC) and new native allocations as
3995 // different fractions of Java allocations.
3996 // For now, we essentially do not count old native allocations at all, so that we can preserve the
3997 // existing behavior of not limiting native heap size. If we seriously considered it, we would
3998 // have to adjust collection thresholds when we encounter large amounts of old native memory,
3999 // and handle native out-of-memory situations.
4000 
4001 static constexpr size_t kOldNativeDiscountFactor = 65536;  // Approximately infinite for now.
4002 static constexpr size_t kNewNativeDiscountFactor = 2;
4003 
4004 // If weighted java + native memory use exceeds our target by kStopForNativeFactor, and
4005 // newly allocated memory exceeds kHugeNativeAlloc, we wait for GC to complete to avoid
4006 // running out of memory.
4007 static constexpr float kStopForNativeFactor = 4.0;
4008 // TODO: Allow this to be tuned. We want this much smaller for some apps, like Calculator.
4009 // But making it too small can cause jank in apps like launcher that intentionally allocate
4010 // large amounts of memory in rapid succession. (b/122099093)
4011 // For now, we punt, and use a value that should be easily large enough to disable this in all
4012 // questionable setting, but that is clearly too large to be effective for small memory devices.
4013 static constexpr size_t kHugeNativeAllocs = 1 * GB;
4014 
4015 // Return the ratio of the weighted native + java allocated bytes to its target value.
4016 // A return value > 1.0 means we should collect. Significantly larger values mean we're falling
4017 // behind.
NativeMemoryOverTarget(size_t current_native_bytes,bool is_gc_concurrent)4018 inline float Heap::NativeMemoryOverTarget(size_t current_native_bytes, bool is_gc_concurrent) {
4019   // Collection check for native allocation. Does not enforce Java heap bounds.
4020   // With adj_start_bytes defined below, effectively checks
4021   // <java bytes allocd> + c1*<old native allocd> + c2*<new native allocd) >= adj_start_bytes,
4022   // where c3 > 1, and currently c1 and c2 are 1 divided by the values defined above.
4023   size_t old_native_bytes = old_native_bytes_allocated_.load(std::memory_order_relaxed);
4024   if (old_native_bytes > current_native_bytes) {
4025     // Net decrease; skip the check, but update old value.
4026     // It's OK to lose an update if two stores race.
4027     old_native_bytes_allocated_.store(current_native_bytes, std::memory_order_relaxed);
4028     return 0.0;
4029   } else {
4030     size_t new_native_bytes = UnsignedDifference(current_native_bytes, old_native_bytes);
4031     size_t weighted_native_bytes = new_native_bytes / kNewNativeDiscountFactor
4032         + old_native_bytes / kOldNativeDiscountFactor;
4033     size_t add_bytes_allowed = static_cast<size_t>(
4034         NativeAllocationGcWatermark() * HeapGrowthMultiplier());
4035     size_t java_gc_start_bytes = is_gc_concurrent
4036         ? concurrent_start_bytes_
4037         : target_footprint_.load(std::memory_order_relaxed);
4038     size_t adj_start_bytes = UnsignedSum(java_gc_start_bytes,
4039                                          add_bytes_allowed / kNewNativeDiscountFactor);
4040     return static_cast<float>(GetBytesAllocated() + weighted_native_bytes)
4041          / static_cast<float>(adj_start_bytes);
4042   }
4043 }
4044 
CheckGCForNative(Thread * self)4045 inline void Heap::CheckGCForNative(Thread* self) {
4046   bool is_gc_concurrent = IsGcConcurrent();
4047   size_t current_native_bytes = GetNativeBytes();
4048   float gc_urgency = NativeMemoryOverTarget(current_native_bytes, is_gc_concurrent);
4049   if (UNLIKELY(gc_urgency >= 1.0)) {
4050     if (is_gc_concurrent) {
4051       RequestConcurrentGC(self, kGcCauseForNativeAlloc, /*force_full=*/true);
4052       if (gc_urgency > kStopForNativeFactor
4053           && current_native_bytes > kHugeNativeAllocs) {
4054         // We're in danger of running out of memory due to rampant native allocation.
4055         if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
4056           LOG(INFO) << "Stopping for native allocation, urgency: " << gc_urgency;
4057         }
4058         WaitForGcToComplete(kGcCauseForNativeAlloc, self);
4059       }
4060     } else {
4061       CollectGarbageInternal(NonStickyGcType(), kGcCauseForNativeAlloc, false);
4062     }
4063   }
4064 }
4065 
4066 // About kNotifyNativeInterval allocations have occurred. Check whether we should garbage collect.
NotifyNativeAllocations(JNIEnv * env)4067 void Heap::NotifyNativeAllocations(JNIEnv* env) {
4068   native_objects_notified_.fetch_add(kNotifyNativeInterval, std::memory_order_relaxed);
4069   CheckGCForNative(ThreadForEnv(env));
4070 }
4071 
4072 // Register a native allocation with an explicit size.
4073 // This should only be done for large allocations of non-malloc memory, which we wouldn't
4074 // otherwise see.
RegisterNativeAllocation(JNIEnv * env,size_t bytes)4075 void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) {
4076   native_bytes_registered_.fetch_add(bytes, std::memory_order_relaxed);
4077   uint32_t objects_notified =
4078       native_objects_notified_.fetch_add(1, std::memory_order_relaxed);
4079   if (objects_notified % kNotifyNativeInterval == kNotifyNativeInterval - 1
4080       || bytes > kCheckImmediatelyThreshold) {
4081     CheckGCForNative(ThreadForEnv(env));
4082   }
4083 }
4084 
RegisterNativeFree(JNIEnv *,size_t bytes)4085 void Heap::RegisterNativeFree(JNIEnv*, size_t bytes) {
4086   size_t allocated;
4087   size_t new_freed_bytes;
4088   do {
4089     allocated = native_bytes_registered_.load(std::memory_order_relaxed);
4090     new_freed_bytes = std::min(allocated, bytes);
4091     // We should not be registering more free than allocated bytes.
4092     // But correctly keep going in non-debug builds.
4093     DCHECK_EQ(new_freed_bytes, bytes);
4094   } while (!native_bytes_registered_.CompareAndSetWeakRelaxed(allocated,
4095                                                               allocated - new_freed_bytes));
4096 }
4097 
GetTotalMemory() const4098 size_t Heap::GetTotalMemory() const {
4099   return std::max(target_footprint_.load(std::memory_order_relaxed), GetBytesAllocated());
4100 }
4101 
AddModUnionTable(accounting::ModUnionTable * mod_union_table)4102 void Heap::AddModUnionTable(accounting::ModUnionTable* mod_union_table) {
4103   DCHECK(mod_union_table != nullptr);
4104   mod_union_tables_.Put(mod_union_table->GetSpace(), mod_union_table);
4105 }
4106 
CheckPreconditionsForAllocObject(ObjPtr<mirror::Class> c,size_t byte_count)4107 void Heap::CheckPreconditionsForAllocObject(ObjPtr<mirror::Class> c, size_t byte_count) {
4108   // Compare rounded sizes since the allocation may have been retried after rounding the size.
4109   // See b/37885600
4110   CHECK(c == nullptr || (c->IsClassClass() && byte_count >= sizeof(mirror::Class)) ||
4111         (c->IsVariableSize() ||
4112             RoundUp(c->GetObjectSize(), kObjectAlignment) ==
4113                 RoundUp(byte_count, kObjectAlignment)))
4114       << "ClassFlags=" << c->GetClassFlags()
4115       << " IsClassClass=" << c->IsClassClass()
4116       << " byte_count=" << byte_count
4117       << " IsVariableSize=" << c->IsVariableSize()
4118       << " ObjectSize=" << c->GetObjectSize()
4119       << " sizeof(Class)=" << sizeof(mirror::Class)
4120       << " " << verification_->DumpObjectInfo(c.Ptr(), /*tag=*/ "klass");
4121   CHECK_GE(byte_count, sizeof(mirror::Object));
4122 }
4123 
AddRememberedSet(accounting::RememberedSet * remembered_set)4124 void Heap::AddRememberedSet(accounting::RememberedSet* remembered_set) {
4125   CHECK(remembered_set != nullptr);
4126   space::Space* space = remembered_set->GetSpace();
4127   CHECK(space != nullptr);
4128   CHECK(remembered_sets_.find(space) == remembered_sets_.end()) << space;
4129   remembered_sets_.Put(space, remembered_set);
4130   CHECK(remembered_sets_.find(space) != remembered_sets_.end()) << space;
4131 }
4132 
RemoveRememberedSet(space::Space * space)4133 void Heap::RemoveRememberedSet(space::Space* space) {
4134   CHECK(space != nullptr);
4135   auto it = remembered_sets_.find(space);
4136   CHECK(it != remembered_sets_.end());
4137   delete it->second;
4138   remembered_sets_.erase(it);
4139   CHECK(remembered_sets_.find(space) == remembered_sets_.end());
4140 }
4141 
ClearMarkedObjects()4142 void Heap::ClearMarkedObjects() {
4143   // Clear all of the spaces' mark bitmaps.
4144   for (const auto& space : GetContinuousSpaces()) {
4145     accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
4146     if (space->GetLiveBitmap() != mark_bitmap) {
4147       mark_bitmap->Clear();
4148     }
4149   }
4150   // Clear the marked objects in the discontinous space object sets.
4151   for (const auto& space : GetDiscontinuousSpaces()) {
4152     space->GetMarkBitmap()->Clear();
4153   }
4154 }
4155 
SetAllocationRecords(AllocRecordObjectMap * records)4156 void Heap::SetAllocationRecords(AllocRecordObjectMap* records) {
4157   allocation_records_.reset(records);
4158 }
4159 
VisitAllocationRecords(RootVisitor * visitor) const4160 void Heap::VisitAllocationRecords(RootVisitor* visitor) const {
4161   if (IsAllocTrackingEnabled()) {
4162     MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
4163     if (IsAllocTrackingEnabled()) {
4164       GetAllocationRecords()->VisitRoots(visitor);
4165     }
4166   }
4167 }
4168 
SweepAllocationRecords(IsMarkedVisitor * visitor) const4169 void Heap::SweepAllocationRecords(IsMarkedVisitor* visitor) const {
4170   if (IsAllocTrackingEnabled()) {
4171     MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
4172     if (IsAllocTrackingEnabled()) {
4173       GetAllocationRecords()->SweepAllocationRecords(visitor);
4174     }
4175   }
4176 }
4177 
AllowNewAllocationRecords() const4178 void Heap::AllowNewAllocationRecords() const {
4179   CHECK(!kUseReadBarrier);
4180   MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
4181   AllocRecordObjectMap* allocation_records = GetAllocationRecords();
4182   if (allocation_records != nullptr) {
4183     allocation_records->AllowNewAllocationRecords();
4184   }
4185 }
4186 
DisallowNewAllocationRecords() const4187 void Heap::DisallowNewAllocationRecords() const {
4188   CHECK(!kUseReadBarrier);
4189   MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
4190   AllocRecordObjectMap* allocation_records = GetAllocationRecords();
4191   if (allocation_records != nullptr) {
4192     allocation_records->DisallowNewAllocationRecords();
4193   }
4194 }
4195 
BroadcastForNewAllocationRecords() const4196 void Heap::BroadcastForNewAllocationRecords() const {
4197   // Always broadcast without checking IsAllocTrackingEnabled() because IsAllocTrackingEnabled() may
4198   // be set to false while some threads are waiting for system weak access in
4199   // AllocRecordObjectMap::RecordAllocation() and we may fail to wake them up. b/27467554.
4200   MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
4201   AllocRecordObjectMap* allocation_records = GetAllocationRecords();
4202   if (allocation_records != nullptr) {
4203     allocation_records->BroadcastForNewAllocationRecords();
4204   }
4205 }
4206 
CheckGcStressMode(Thread * self,ObjPtr<mirror::Object> * obj)4207 void Heap::CheckGcStressMode(Thread* self, ObjPtr<mirror::Object>* obj) {
4208   DCHECK(gc_stress_mode_);
4209   auto* const runtime = Runtime::Current();
4210   if (runtime->GetClassLinker()->IsInitialized() && !runtime->IsActiveTransaction()) {
4211     // Check if we should GC.
4212     bool new_backtrace = false;
4213     {
4214       static constexpr size_t kMaxFrames = 16u;
4215       FixedSizeBacktrace<kMaxFrames> backtrace;
4216       backtrace.Collect(/* skip_count= */ 2);
4217       uint64_t hash = backtrace.Hash();
4218       MutexLock mu(self, *backtrace_lock_);
4219       new_backtrace = seen_backtraces_.find(hash) == seen_backtraces_.end();
4220       if (new_backtrace) {
4221         seen_backtraces_.insert(hash);
4222       }
4223     }
4224     if (new_backtrace) {
4225       StackHandleScope<1> hs(self);
4226       auto h = hs.NewHandleWrapper(obj);
4227       CollectGarbage(/* clear_soft_references= */ false);
4228       unique_backtrace_count_.fetch_add(1);
4229     } else {
4230       seen_backtrace_count_.fetch_add(1);
4231     }
4232   }
4233 }
4234 
DisableGCForShutdown()4235 void Heap::DisableGCForShutdown() {
4236   Thread* const self = Thread::Current();
4237   CHECK(Runtime::Current()->IsShuttingDown(self));
4238   MutexLock mu(self, *gc_complete_lock_);
4239   gc_disabled_for_shutdown_ = true;
4240 }
4241 
ObjectIsInBootImageSpace(ObjPtr<mirror::Object> obj) const4242 bool Heap::ObjectIsInBootImageSpace(ObjPtr<mirror::Object> obj) const {
4243   for (gc::space::ImageSpace* space : boot_image_spaces_) {
4244     if (space->HasAddress(obj.Ptr())) {
4245       return true;
4246     }
4247   }
4248   return false;
4249 }
4250 
IsInBootImageOatFile(const void * p) const4251 bool Heap::IsInBootImageOatFile(const void* p) const {
4252   for (gc::space::ImageSpace* space : boot_image_spaces_) {
4253     if (space->GetOatFile()->Contains(p)) {
4254       return true;
4255     }
4256   }
4257   return false;
4258 }
4259 
GetBootImagesSize(uint32_t * boot_image_begin,uint32_t * boot_image_end,uint32_t * boot_oat_begin,uint32_t * boot_oat_end)4260 void Heap::GetBootImagesSize(uint32_t* boot_image_begin,
4261                              uint32_t* boot_image_end,
4262                              uint32_t* boot_oat_begin,
4263                              uint32_t* boot_oat_end) {
4264   DCHECK(boot_image_begin != nullptr);
4265   DCHECK(boot_image_end != nullptr);
4266   DCHECK(boot_oat_begin != nullptr);
4267   DCHECK(boot_oat_end != nullptr);
4268   *boot_image_begin = 0u;
4269   *boot_image_end = 0u;
4270   *boot_oat_begin = 0u;
4271   *boot_oat_end = 0u;
4272   for (gc::space::ImageSpace* space_ : GetBootImageSpaces()) {
4273     const uint32_t image_begin = PointerToLowMemUInt32(space_->Begin());
4274     const uint32_t image_size = space_->GetImageHeader().GetImageSize();
4275     if (*boot_image_begin == 0 || image_begin < *boot_image_begin) {
4276       *boot_image_begin = image_begin;
4277     }
4278     *boot_image_end = std::max(*boot_image_end, image_begin + image_size);
4279     const OatFile* boot_oat_file = space_->GetOatFile();
4280     const uint32_t oat_begin = PointerToLowMemUInt32(boot_oat_file->Begin());
4281     const uint32_t oat_size = boot_oat_file->Size();
4282     if (*boot_oat_begin == 0 || oat_begin < *boot_oat_begin) {
4283       *boot_oat_begin = oat_begin;
4284     }
4285     *boot_oat_end = std::max(*boot_oat_end, oat_begin + oat_size);
4286   }
4287 }
4288 
SetAllocationListener(AllocationListener * l)4289 void Heap::SetAllocationListener(AllocationListener* l) {
4290   AllocationListener* old = GetAndOverwriteAllocationListener(&alloc_listener_, l);
4291 
4292   if (old == nullptr) {
4293     Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
4294   }
4295 }
4296 
RemoveAllocationListener()4297 void Heap::RemoveAllocationListener() {
4298   AllocationListener* old = GetAndOverwriteAllocationListener(&alloc_listener_, nullptr);
4299 
4300   if (old != nullptr) {
4301     Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints();
4302   }
4303 }
4304 
SetGcPauseListener(GcPauseListener * l)4305 void Heap::SetGcPauseListener(GcPauseListener* l) {
4306   gc_pause_listener_.store(l, std::memory_order_relaxed);
4307 }
4308 
RemoveGcPauseListener()4309 void Heap::RemoveGcPauseListener() {
4310   gc_pause_listener_.store(nullptr, std::memory_order_relaxed);
4311 }
4312 
AllocWithNewTLAB(Thread * self,size_t alloc_size,bool grow,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated)4313 mirror::Object* Heap::AllocWithNewTLAB(Thread* self,
4314                                        size_t alloc_size,
4315                                        bool grow,
4316                                        size_t* bytes_allocated,
4317                                        size_t* usable_size,
4318                                        size_t* bytes_tl_bulk_allocated) {
4319   const AllocatorType allocator_type = GetCurrentAllocator();
4320   if (kUsePartialTlabs && alloc_size <= self->TlabRemainingCapacity()) {
4321     DCHECK_GT(alloc_size, self->TlabSize());
4322     // There is enough space if we grow the TLAB. Lets do that. This increases the
4323     // TLAB bytes.
4324     const size_t min_expand_size = alloc_size - self->TlabSize();
4325     const size_t expand_bytes = std::max(
4326         min_expand_size,
4327         std::min(self->TlabRemainingCapacity() - self->TlabSize(), kPartialTlabSize));
4328     if (UNLIKELY(IsOutOfMemoryOnAllocation(allocator_type, expand_bytes, grow))) {
4329       return nullptr;
4330     }
4331     *bytes_tl_bulk_allocated = expand_bytes;
4332     self->ExpandTlab(expand_bytes);
4333     DCHECK_LE(alloc_size, self->TlabSize());
4334   } else if (allocator_type == kAllocatorTypeTLAB) {
4335     DCHECK(bump_pointer_space_ != nullptr);
4336     const size_t new_tlab_size = alloc_size + kDefaultTLABSize;
4337     if (UNLIKELY(IsOutOfMemoryOnAllocation(allocator_type, new_tlab_size, grow))) {
4338       return nullptr;
4339     }
4340     // Try allocating a new thread local buffer, if the allocation fails the space must be
4341     // full so return null.
4342     if (!bump_pointer_space_->AllocNewTlab(self, new_tlab_size)) {
4343       return nullptr;
4344     }
4345     *bytes_tl_bulk_allocated = new_tlab_size;
4346   } else {
4347     DCHECK(allocator_type == kAllocatorTypeRegionTLAB);
4348     DCHECK(region_space_ != nullptr);
4349     if (space::RegionSpace::kRegionSize >= alloc_size) {
4350       // Non-large. Check OOME for a tlab.
4351       if (LIKELY(!IsOutOfMemoryOnAllocation(allocator_type,
4352                                             space::RegionSpace::kRegionSize,
4353                                             grow))) {
4354         const size_t new_tlab_size = kUsePartialTlabs
4355             ? std::max(alloc_size, kPartialTlabSize)
4356             : gc::space::RegionSpace::kRegionSize;
4357         // Try to allocate a tlab.
4358         if (!region_space_->AllocNewTlab(self, new_tlab_size)) {
4359           // Failed to allocate a tlab. Try non-tlab.
4360           return region_space_->AllocNonvirtual<false>(alloc_size,
4361                                                        bytes_allocated,
4362                                                        usable_size,
4363                                                        bytes_tl_bulk_allocated);
4364         }
4365         *bytes_tl_bulk_allocated = new_tlab_size;
4366         // Fall-through to using the TLAB below.
4367       } else {
4368         // Check OOME for a non-tlab allocation.
4369         if (!IsOutOfMemoryOnAllocation(allocator_type, alloc_size, grow)) {
4370           return region_space_->AllocNonvirtual<false>(alloc_size,
4371                                                        bytes_allocated,
4372                                                        usable_size,
4373                                                        bytes_tl_bulk_allocated);
4374         }
4375         // Neither tlab or non-tlab works. Give up.
4376         return nullptr;
4377       }
4378     } else {
4379       // Large. Check OOME.
4380       if (LIKELY(!IsOutOfMemoryOnAllocation(allocator_type, alloc_size, grow))) {
4381         return region_space_->AllocNonvirtual<false>(alloc_size,
4382                                                      bytes_allocated,
4383                                                      usable_size,
4384                                                      bytes_tl_bulk_allocated);
4385       }
4386       return nullptr;
4387     }
4388   }
4389   // Refilled TLAB, return.
4390   mirror::Object* ret = self->AllocTlab(alloc_size);
4391   DCHECK(ret != nullptr);
4392   *bytes_allocated = alloc_size;
4393   *usable_size = alloc_size;
4394   return ret;
4395 }
4396 
GetVerification() const4397 const Verification* Heap::GetVerification() const {
4398   return verification_.get();
4399 }
4400 
VlogHeapGrowth(size_t old_footprint,size_t new_footprint,size_t alloc_size)4401 void Heap::VlogHeapGrowth(size_t old_footprint, size_t new_footprint, size_t alloc_size) {
4402   VLOG(heap) << "Growing heap from " << PrettySize(old_footprint) << " to "
4403              << PrettySize(new_footprint) << " for a " << PrettySize(alloc_size) << " allocation";
4404 }
4405 
4406 class Heap::TriggerPostForkCCGcTask : public HeapTask {
4407  public:
TriggerPostForkCCGcTask(uint64_t target_time)4408   explicit TriggerPostForkCCGcTask(uint64_t target_time) : HeapTask(target_time) {}
Run(Thread * self)4409   void Run(Thread* self) override {
4410     gc::Heap* heap = Runtime::Current()->GetHeap();
4411     // Trigger a GC, if not already done. The first GC after fork, whenever it
4412     // takes place, will adjust the thresholds to normal levels.
4413     if (heap->target_footprint_.load(std::memory_order_relaxed) == heap->growth_limit_) {
4414       heap->RequestConcurrentGC(self, kGcCauseBackground, false);
4415     }
4416   }
4417 };
4418 
PostForkChildAction(Thread * self)4419 void Heap::PostForkChildAction(Thread* self) {
4420   // Temporarily increase target_footprint_ and concurrent_start_bytes_ to
4421   // max values to avoid GC during app launch.
4422   if (collector_type_ == kCollectorTypeCC && !IsLowMemoryMode()) {
4423     // Set target_footprint_ to the largest allowed value.
4424     SetIdealFootprint(growth_limit_);
4425     // Set concurrent_start_bytes_ to half of the heap size.
4426     size_t target_footprint = target_footprint_.load(std::memory_order_relaxed);
4427     concurrent_start_bytes_ = std::max(target_footprint / 2, GetBytesAllocated());
4428 
4429     GetTaskProcessor()->AddTask(
4430         self, new TriggerPostForkCCGcTask(NanoTime() + MsToNs(kPostForkMaxHeapDurationMS)));
4431   }
4432 }
4433 
4434 }  // namespace gc
4435 }  // namespace art
4436