• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "heap.h"
18 
19 #include <limits>
20 #include <memory>
21 #include <vector>
22 
23 #include "android-base/stringprintf.h"
24 
25 #include "allocation_listener.h"
26 #include "art_field-inl.h"
27 #include "backtrace_helper.h"
28 #include "base/allocator.h"
29 #include "base/arena_allocator.h"
30 #include "base/dumpable.h"
31 #include "base/histogram-inl.h"
32 #include "base/memory_tool.h"
33 #include "base/stl_util.h"
34 #include "base/systrace.h"
35 #include "base/time_utils.h"
36 #include "common_throws.h"
37 #include "cutils/sched_policy.h"
38 #include "debugger.h"
39 #include "dex_file-inl.h"
40 #include "entrypoints/quick/quick_alloc_entrypoints.h"
41 #include "gc/accounting/card_table-inl.h"
42 #include "gc/accounting/heap_bitmap-inl.h"
43 #include "gc/accounting/mod_union_table-inl.h"
44 #include "gc/accounting/read_barrier_table.h"
45 #include "gc/accounting/remembered_set.h"
46 #include "gc/accounting/space_bitmap-inl.h"
47 #include "gc/collector/concurrent_copying.h"
48 #include "gc/collector/mark_compact.h"
49 #include "gc/collector/mark_sweep.h"
50 #include "gc/collector/partial_mark_sweep.h"
51 #include "gc/collector/semi_space.h"
52 #include "gc/collector/sticky_mark_sweep.h"
53 #include "gc/reference_processor.h"
54 #include "gc/scoped_gc_critical_section.h"
55 #include "gc/space/bump_pointer_space.h"
56 #include "gc/space/dlmalloc_space-inl.h"
57 #include "gc/space/image_space.h"
58 #include "gc/space/large_object_space.h"
59 #include "gc/space/region_space.h"
60 #include "gc/space/rosalloc_space-inl.h"
61 #include "gc/space/space-inl.h"
62 #include "gc/space/zygote_space.h"
63 #include "gc/task_processor.h"
64 #include "gc/verification.h"
65 #include "gc_pause_listener.h"
66 #include "gc_root.h"
67 #include "handle_scope-inl.h"
68 #include "heap-inl.h"
69 #include "heap-visit-objects-inl.h"
70 #include "image.h"
71 #include "intern_table.h"
72 #include "java_vm_ext.h"
73 #include "jit/jit.h"
74 #include "jit/jit_code_cache.h"
75 #include "mirror/class-inl.h"
76 #include "mirror/object-inl.h"
77 #include "mirror/object-refvisitor-inl.h"
78 #include "mirror/object_array-inl.h"
79 #include "mirror/reference-inl.h"
80 #include "nativehelper/ScopedLocalRef.h"
81 #include "obj_ptr-inl.h"
82 #include "os.h"
83 #include "reflection.h"
84 #include "runtime.h"
85 #include "scoped_thread_state_change-inl.h"
86 #include "thread_list.h"
87 #include "verify_object-inl.h"
88 #include "well_known_classes.h"
89 
90 namespace art {
91 
92 namespace gc {
93 
94 static constexpr size_t kCollectorTransitionStressIterations = 0;
95 static constexpr size_t kCollectorTransitionStressWait = 10 * 1000;  // Microseconds
96 // Minimum amount of remaining bytes before a concurrent GC is triggered.
97 static constexpr size_t kMinConcurrentRemainingBytes = 128 * KB;
98 static constexpr size_t kMaxConcurrentRemainingBytes = 512 * KB;
99 // Sticky GC throughput adjustment, divided by 4. Increasing this causes sticky GC to occur more
100 // relative to partial/full GC. This may be desirable since sticky GCs interfere less with mutator
101 // threads (lower pauses, use less memory bandwidth).
102 static constexpr double kStickyGcThroughputAdjustment = 1.0;
103 // Whether or not we compact the zygote in PreZygoteFork.
104 static constexpr bool kCompactZygote = kMovingCollector;
105 // How many reserve entries are at the end of the allocation stack, these are only needed if the
106 // allocation stack overflows.
107 static constexpr size_t kAllocationStackReserveSize = 1024;
108 // Default mark stack size in bytes.
109 static const size_t kDefaultMarkStackSize = 64 * KB;
110 // Define space name.
111 static const char* kDlMallocSpaceName[2] = {"main dlmalloc space", "main dlmalloc space 1"};
112 static const char* kRosAllocSpaceName[2] = {"main rosalloc space", "main rosalloc space 1"};
113 static const char* kMemMapSpaceName[2] = {"main space", "main space 1"};
114 static const char* kNonMovingSpaceName = "non moving space";
115 static const char* kZygoteSpaceName = "zygote space";
116 static constexpr size_t kGSSBumpPointerSpaceCapacity = 32 * MB;
117 static constexpr bool kGCALotMode = false;
118 // GC alot mode uses a small allocation stack to stress test a lot of GC.
119 static constexpr size_t kGcAlotAllocationStackSize = 4 * KB /
120     sizeof(mirror::HeapReference<mirror::Object>);
121 // Verify objet has a small allocation stack size since searching the allocation stack is slow.
122 static constexpr size_t kVerifyObjectAllocationStackSize = 16 * KB /
123     sizeof(mirror::HeapReference<mirror::Object>);
124 static constexpr size_t kDefaultAllocationStackSize = 8 * MB /
125     sizeof(mirror::HeapReference<mirror::Object>);
126 // System.runFinalization can deadlock with native allocations, to deal with this, we have a
127 // timeout on how long we wait for finalizers to run. b/21544853
128 static constexpr uint64_t kNativeAllocationFinalizeTimeout = MsToNs(250u);
129 
130 // For deterministic compilation, we need the heap to be at a well-known address.
131 static constexpr uint32_t kAllocSpaceBeginForDeterministicAoT = 0x40000000;
132 // Dump the rosalloc stats on SIGQUIT.
133 static constexpr bool kDumpRosAllocStatsOnSigQuit = false;
134 
135 static const char* kRegionSpaceName = "main space (region space)";
136 
137 // If true, we log all GCs in the both the foreground and background. Used for debugging.
138 static constexpr bool kLogAllGCs = false;
139 
140 // How much we grow the TLAB if we can do it.
141 static constexpr size_t kPartialTlabSize = 16 * KB;
142 static constexpr bool kUsePartialTlabs = true;
143 
144 #if defined(__LP64__) || !defined(ADDRESS_SANITIZER)
145 // 300 MB (0x12c00000) - (default non-moving space capacity).
146 static uint8_t* const kPreferredAllocSpaceBegin =
147     reinterpret_cast<uint8_t*>(300 * MB - Heap::kDefaultNonMovingSpaceCapacity);
148 #else
149 #ifdef __ANDROID__
150 // For 32-bit Android, use 0x20000000 because asan reserves 0x04000000 - 0x20000000.
151 static uint8_t* const kPreferredAllocSpaceBegin = reinterpret_cast<uint8_t*>(0x20000000);
152 #else
153 // For 32-bit host, use 0x40000000 because asan uses most of the space below this.
154 static uint8_t* const kPreferredAllocSpaceBegin = reinterpret_cast<uint8_t*>(0x40000000);
155 #endif
156 #endif
157 
CareAboutPauseTimes()158 static inline bool CareAboutPauseTimes() {
159   return Runtime::Current()->InJankPerceptibleProcessState();
160 }
161 
Heap(size_t initial_size,size_t growth_limit,size_t min_free,size_t max_free,double target_utilization,double foreground_heap_growth_multiplier,size_t capacity,size_t non_moving_space_capacity,const std::string & image_file_name,const InstructionSet image_instruction_set,CollectorType foreground_collector_type,CollectorType background_collector_type,space::LargeObjectSpaceType large_object_space_type,size_t large_object_threshold,size_t parallel_gc_threads,size_t conc_gc_threads,bool low_memory_mode,size_t long_pause_log_threshold,size_t long_gc_log_threshold,bool ignore_max_footprint,bool use_tlab,bool verify_pre_gc_heap,bool verify_pre_sweeping_heap,bool verify_post_gc_heap,bool verify_pre_gc_rosalloc,bool verify_pre_sweeping_rosalloc,bool verify_post_gc_rosalloc,bool gc_stress_mode,bool measure_gc_performance,bool use_homogeneous_space_compaction_for_oom,uint64_t min_interval_homogeneous_space_compaction_by_oom)162 Heap::Heap(size_t initial_size,
163            size_t growth_limit,
164            size_t min_free,
165            size_t max_free,
166            double target_utilization,
167            double foreground_heap_growth_multiplier,
168            size_t capacity,
169            size_t non_moving_space_capacity,
170            const std::string& image_file_name,
171            const InstructionSet image_instruction_set,
172            CollectorType foreground_collector_type,
173            CollectorType background_collector_type,
174            space::LargeObjectSpaceType large_object_space_type,
175            size_t large_object_threshold,
176            size_t parallel_gc_threads,
177            size_t conc_gc_threads,
178            bool low_memory_mode,
179            size_t long_pause_log_threshold,
180            size_t long_gc_log_threshold,
181            bool ignore_max_footprint,
182            bool use_tlab,
183            bool verify_pre_gc_heap,
184            bool verify_pre_sweeping_heap,
185            bool verify_post_gc_heap,
186            bool verify_pre_gc_rosalloc,
187            bool verify_pre_sweeping_rosalloc,
188            bool verify_post_gc_rosalloc,
189            bool gc_stress_mode,
190            bool measure_gc_performance,
191            bool use_homogeneous_space_compaction_for_oom,
192            uint64_t min_interval_homogeneous_space_compaction_by_oom)
193     : non_moving_space_(nullptr),
194       rosalloc_space_(nullptr),
195       dlmalloc_space_(nullptr),
196       main_space_(nullptr),
197       collector_type_(kCollectorTypeNone),
198       foreground_collector_type_(foreground_collector_type),
199       background_collector_type_(background_collector_type),
200       desired_collector_type_(foreground_collector_type_),
201       pending_task_lock_(nullptr),
202       parallel_gc_threads_(parallel_gc_threads),
203       conc_gc_threads_(conc_gc_threads),
204       low_memory_mode_(low_memory_mode),
205       long_pause_log_threshold_(long_pause_log_threshold),
206       long_gc_log_threshold_(long_gc_log_threshold),
207       ignore_max_footprint_(ignore_max_footprint),
208       zygote_creation_lock_("zygote creation lock", kZygoteCreationLock),
209       zygote_space_(nullptr),
210       large_object_threshold_(large_object_threshold),
211       disable_thread_flip_count_(0),
212       thread_flip_running_(false),
213       collector_type_running_(kCollectorTypeNone),
214       last_gc_cause_(kGcCauseNone),
215       thread_running_gc_(nullptr),
216       last_gc_type_(collector::kGcTypeNone),
217       next_gc_type_(collector::kGcTypePartial),
218       capacity_(capacity),
219       growth_limit_(growth_limit),
220       max_allowed_footprint_(initial_size),
221       concurrent_start_bytes_(std::numeric_limits<size_t>::max()),
222       total_bytes_freed_ever_(0),
223       total_objects_freed_ever_(0),
224       num_bytes_allocated_(0),
225       new_native_bytes_allocated_(0),
226       old_native_bytes_allocated_(0),
227       num_bytes_freed_revoke_(0),
228       verify_missing_card_marks_(false),
229       verify_system_weaks_(false),
230       verify_pre_gc_heap_(verify_pre_gc_heap),
231       verify_pre_sweeping_heap_(verify_pre_sweeping_heap),
232       verify_post_gc_heap_(verify_post_gc_heap),
233       verify_mod_union_table_(false),
234       verify_pre_gc_rosalloc_(verify_pre_gc_rosalloc),
235       verify_pre_sweeping_rosalloc_(verify_pre_sweeping_rosalloc),
236       verify_post_gc_rosalloc_(verify_post_gc_rosalloc),
237       gc_stress_mode_(gc_stress_mode),
238       /* For GC a lot mode, we limit the allocations stacks to be kGcAlotInterval allocations. This
239        * causes a lot of GC since we do a GC for alloc whenever the stack is full. When heap
240        * verification is enabled, we limit the size of allocation stacks to speed up their
241        * searching.
242        */
243       max_allocation_stack_size_(kGCALotMode ? kGcAlotAllocationStackSize
244           : (kVerifyObjectSupport > kVerifyObjectModeFast) ? kVerifyObjectAllocationStackSize :
245           kDefaultAllocationStackSize),
246       current_allocator_(kAllocatorTypeDlMalloc),
247       current_non_moving_allocator_(kAllocatorTypeNonMoving),
248       bump_pointer_space_(nullptr),
249       temp_space_(nullptr),
250       region_space_(nullptr),
251       min_free_(min_free),
252       max_free_(max_free),
253       target_utilization_(target_utilization),
254       foreground_heap_growth_multiplier_(foreground_heap_growth_multiplier),
255       total_wait_time_(0),
256       verify_object_mode_(kVerifyObjectModeDisabled),
257       disable_moving_gc_count_(0),
258       semi_space_collector_(nullptr),
259       mark_compact_collector_(nullptr),
260       concurrent_copying_collector_(nullptr),
261       is_running_on_memory_tool_(Runtime::Current()->IsRunningOnMemoryTool()),
262       use_tlab_(use_tlab),
263       main_space_backup_(nullptr),
264       min_interval_homogeneous_space_compaction_by_oom_(
265           min_interval_homogeneous_space_compaction_by_oom),
266       last_time_homogeneous_space_compaction_by_oom_(NanoTime()),
267       pending_collector_transition_(nullptr),
268       pending_heap_trim_(nullptr),
269       use_homogeneous_space_compaction_for_oom_(use_homogeneous_space_compaction_for_oom),
270       running_collection_is_blocking_(false),
271       blocking_gc_count_(0U),
272       blocking_gc_time_(0U),
273       last_update_time_gc_count_rate_histograms_(  // Round down by the window duration.
274           (NanoTime() / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration),
275       gc_count_last_window_(0U),
276       blocking_gc_count_last_window_(0U),
277       gc_count_rate_histogram_("gc count rate histogram", 1U, kGcCountRateMaxBucketCount),
278       blocking_gc_count_rate_histogram_("blocking gc count rate histogram", 1U,
279                                         kGcCountRateMaxBucketCount),
280       alloc_tracking_enabled_(false),
281       backtrace_lock_(nullptr),
282       seen_backtrace_count_(0u),
283       unique_backtrace_count_(0u),
284       gc_disabled_for_shutdown_(false) {
285   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
286     LOG(INFO) << "Heap() entering";
287   }
288   if (kUseReadBarrier) {
289     CHECK_EQ(foreground_collector_type_, kCollectorTypeCC);
290     CHECK_EQ(background_collector_type_, kCollectorTypeCCBackground);
291   }
292   verification_.reset(new Verification(this));
293   CHECK_GE(large_object_threshold, kMinLargeObjectThreshold);
294   ScopedTrace trace(__FUNCTION__);
295   Runtime* const runtime = Runtime::Current();
296   // If we aren't the zygote, switch to the default non zygote allocator. This may update the
297   // entrypoints.
298   const bool is_zygote = runtime->IsZygote();
299   if (!is_zygote) {
300     // Background compaction is currently not supported for command line runs.
301     if (background_collector_type_ != foreground_collector_type_) {
302       VLOG(heap) << "Disabling background compaction for non zygote";
303       background_collector_type_ = foreground_collector_type_;
304     }
305   }
306   ChangeCollector(desired_collector_type_);
307   live_bitmap_.reset(new accounting::HeapBitmap(this));
308   mark_bitmap_.reset(new accounting::HeapBitmap(this));
309   // Requested begin for the alloc space, to follow the mapped image and oat files
310   uint8_t* requested_alloc_space_begin = nullptr;
311   if (foreground_collector_type_ == kCollectorTypeCC) {
312     // Need to use a low address so that we can allocate a contiguous 2 * Xmx space when there's no
313     // image (dex2oat for target).
314     requested_alloc_space_begin = kPreferredAllocSpaceBegin;
315   }
316 
317   // Load image space(s).
318   if (space::ImageSpace::LoadBootImage(image_file_name,
319                                        image_instruction_set,
320                                        &boot_image_spaces_,
321                                        &requested_alloc_space_begin)) {
322     for (auto space : boot_image_spaces_) {
323       AddSpace(space);
324     }
325   }
326 
327   /*
328   requested_alloc_space_begin ->     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
329                                      +-  nonmoving space (non_moving_space_capacity)+-
330                                      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
331                                      +-????????????????????????????????????????????+-
332                                      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
333                                      +-main alloc space / bump space 1 (capacity_) +-
334                                      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
335                                      +-????????????????????????????????????????????+-
336                                      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
337                                      +-main alloc space2 / bump space 2 (capacity_)+-
338                                      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
339   */
340   // We don't have hspace compaction enabled with GSS or CC.
341   if (foreground_collector_type_ == kCollectorTypeGSS ||
342       foreground_collector_type_ == kCollectorTypeCC) {
343     use_homogeneous_space_compaction_for_oom_ = false;
344   }
345   bool support_homogeneous_space_compaction =
346       background_collector_type_ == gc::kCollectorTypeHomogeneousSpaceCompact ||
347       use_homogeneous_space_compaction_for_oom_;
348   // We may use the same space the main space for the non moving space if we don't need to compact
349   // from the main space.
350   // This is not the case if we support homogeneous compaction or have a moving background
351   // collector type.
352   bool separate_non_moving_space = is_zygote ||
353       support_homogeneous_space_compaction || IsMovingGc(foreground_collector_type_) ||
354       IsMovingGc(background_collector_type_);
355   if (foreground_collector_type_ == kCollectorTypeGSS) {
356     separate_non_moving_space = false;
357   }
358   std::unique_ptr<MemMap> main_mem_map_1;
359   std::unique_ptr<MemMap> main_mem_map_2;
360 
361   // Gross hack to make dex2oat deterministic.
362   if (foreground_collector_type_ == kCollectorTypeMS &&
363       requested_alloc_space_begin == nullptr &&
364       Runtime::Current()->IsAotCompiler()) {
365     // Currently only enabled for MS collector since that is what the deterministic dex2oat uses.
366     // b/26849108
367     requested_alloc_space_begin = reinterpret_cast<uint8_t*>(kAllocSpaceBeginForDeterministicAoT);
368   }
369   uint8_t* request_begin = requested_alloc_space_begin;
370   if (request_begin != nullptr && separate_non_moving_space) {
371     request_begin += non_moving_space_capacity;
372   }
373   std::string error_str;
374   std::unique_ptr<MemMap> non_moving_space_mem_map;
375   if (separate_non_moving_space) {
376     ScopedTrace trace2("Create separate non moving space");
377     // If we are the zygote, the non moving space becomes the zygote space when we run
378     // PreZygoteFork the first time. In this case, call the map "zygote space" since we can't
379     // rename the mem map later.
380     const char* space_name = is_zygote ? kZygoteSpaceName : kNonMovingSpaceName;
381     // Reserve the non moving mem map before the other two since it needs to be at a specific
382     // address.
383     non_moving_space_mem_map.reset(
384         MemMap::MapAnonymous(space_name, requested_alloc_space_begin,
385                              non_moving_space_capacity, PROT_READ | PROT_WRITE, true, false,
386                              &error_str));
387     CHECK(non_moving_space_mem_map != nullptr) << error_str;
388     // Try to reserve virtual memory at a lower address if we have a separate non moving space.
389     request_begin = kPreferredAllocSpaceBegin + non_moving_space_capacity;
390   }
391   // Attempt to create 2 mem maps at or after the requested begin.
392   if (foreground_collector_type_ != kCollectorTypeCC) {
393     ScopedTrace trace2("Create main mem map");
394     if (separate_non_moving_space || !is_zygote) {
395       main_mem_map_1.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[0],
396                                                         request_begin,
397                                                         capacity_,
398                                                         &error_str));
399     } else {
400       // If no separate non-moving space and we are the zygote, the main space must come right
401       // after the image space to avoid a gap. This is required since we want the zygote space to
402       // be adjacent to the image space.
403       main_mem_map_1.reset(MemMap::MapAnonymous(kMemMapSpaceName[0], request_begin, capacity_,
404                                                 PROT_READ | PROT_WRITE, true, false,
405                                                 &error_str));
406     }
407     CHECK(main_mem_map_1.get() != nullptr) << error_str;
408   }
409   if (support_homogeneous_space_compaction ||
410       background_collector_type_ == kCollectorTypeSS ||
411       foreground_collector_type_ == kCollectorTypeSS) {
412     ScopedTrace trace2("Create main mem map 2");
413     main_mem_map_2.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[1], main_mem_map_1->End(),
414                                                       capacity_, &error_str));
415     CHECK(main_mem_map_2.get() != nullptr) << error_str;
416   }
417 
418   // Create the non moving space first so that bitmaps don't take up the address range.
419   if (separate_non_moving_space) {
420     ScopedTrace trace2("Add non moving space");
421     // Non moving space is always dlmalloc since we currently don't have support for multiple
422     // active rosalloc spaces.
423     const size_t size = non_moving_space_mem_map->Size();
424     non_moving_space_ = space::DlMallocSpace::CreateFromMemMap(
425         non_moving_space_mem_map.release(), "zygote / non moving space", kDefaultStartingSize,
426         initial_size, size, size, false);
427     non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
428     CHECK(non_moving_space_ != nullptr) << "Failed creating non moving space "
429         << requested_alloc_space_begin;
430     AddSpace(non_moving_space_);
431   }
432   // Create other spaces based on whether or not we have a moving GC.
433   if (foreground_collector_type_ == kCollectorTypeCC) {
434     CHECK(separate_non_moving_space);
435     MemMap* region_space_mem_map = space::RegionSpace::CreateMemMap(kRegionSpaceName,
436                                                                     capacity_ * 2,
437                                                                     request_begin);
438     CHECK(region_space_mem_map != nullptr) << "No region space mem map";
439     region_space_ = space::RegionSpace::Create(kRegionSpaceName, region_space_mem_map);
440     AddSpace(region_space_);
441   } else if (IsMovingGc(foreground_collector_type_) &&
442       foreground_collector_type_ != kCollectorTypeGSS) {
443     // Create bump pointer spaces.
444     // We only to create the bump pointer if the foreground collector is a compacting GC.
445     // TODO: Place bump-pointer spaces somewhere to minimize size of card table.
446     bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 1",
447                                                                     main_mem_map_1.release());
448     CHECK(bump_pointer_space_ != nullptr) << "Failed to create bump pointer space";
449     AddSpace(bump_pointer_space_);
450     temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
451                                                             main_mem_map_2.release());
452     CHECK(temp_space_ != nullptr) << "Failed to create bump pointer space";
453     AddSpace(temp_space_);
454     CHECK(separate_non_moving_space);
455   } else {
456     CreateMainMallocSpace(main_mem_map_1.release(), initial_size, growth_limit_, capacity_);
457     CHECK(main_space_ != nullptr);
458     AddSpace(main_space_);
459     if (!separate_non_moving_space) {
460       non_moving_space_ = main_space_;
461       CHECK(!non_moving_space_->CanMoveObjects());
462     }
463     if (foreground_collector_type_ == kCollectorTypeGSS) {
464       CHECK_EQ(foreground_collector_type_, background_collector_type_);
465       // Create bump pointer spaces instead of a backup space.
466       main_mem_map_2.release();
467       bump_pointer_space_ = space::BumpPointerSpace::Create("Bump pointer space 1",
468                                                             kGSSBumpPointerSpaceCapacity, nullptr);
469       CHECK(bump_pointer_space_ != nullptr);
470       AddSpace(bump_pointer_space_);
471       temp_space_ = space::BumpPointerSpace::Create("Bump pointer space 2",
472                                                     kGSSBumpPointerSpaceCapacity, nullptr);
473       CHECK(temp_space_ != nullptr);
474       AddSpace(temp_space_);
475     } else if (main_mem_map_2.get() != nullptr) {
476       const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
477       main_space_backup_.reset(CreateMallocSpaceFromMemMap(main_mem_map_2.release(), initial_size,
478                                                            growth_limit_, capacity_, name, true));
479       CHECK(main_space_backup_.get() != nullptr);
480       // Add the space so its accounted for in the heap_begin and heap_end.
481       AddSpace(main_space_backup_.get());
482     }
483   }
484   CHECK(non_moving_space_ != nullptr);
485   CHECK(!non_moving_space_->CanMoveObjects());
486   // Allocate the large object space.
487   if (large_object_space_type == space::LargeObjectSpaceType::kFreeList) {
488     large_object_space_ = space::FreeListSpace::Create("free list large object space", nullptr,
489                                                        capacity_);
490     CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
491   } else if (large_object_space_type == space::LargeObjectSpaceType::kMap) {
492     large_object_space_ = space::LargeObjectMapSpace::Create("mem map large object space");
493     CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
494   } else {
495     // Disable the large object space by making the cutoff excessively large.
496     large_object_threshold_ = std::numeric_limits<size_t>::max();
497     large_object_space_ = nullptr;
498   }
499   if (large_object_space_ != nullptr) {
500     AddSpace(large_object_space_);
501   }
502   // Compute heap capacity. Continuous spaces are sorted in order of Begin().
503   CHECK(!continuous_spaces_.empty());
504   // Relies on the spaces being sorted.
505   uint8_t* heap_begin = continuous_spaces_.front()->Begin();
506   uint8_t* heap_end = continuous_spaces_.back()->Limit();
507   size_t heap_capacity = heap_end - heap_begin;
508   // Remove the main backup space since it slows down the GC to have unused extra spaces.
509   // TODO: Avoid needing to do this.
510   if (main_space_backup_.get() != nullptr) {
511     RemoveSpace(main_space_backup_.get());
512   }
513   // Allocate the card table.
514   // We currently don't support dynamically resizing the card table.
515   // Since we don't know where in the low_4gb the app image will be located, make the card table
516   // cover the whole low_4gb. TODO: Extend the card table in AddSpace.
517   UNUSED(heap_capacity);
518   // Start at 64 KB, we can be sure there are no spaces mapped this low since the address range is
519   // reserved by the kernel.
520   static constexpr size_t kMinHeapAddress = 4 * KB;
521   card_table_.reset(accounting::CardTable::Create(reinterpret_cast<uint8_t*>(kMinHeapAddress),
522                                                   4 * GB - kMinHeapAddress));
523   CHECK(card_table_.get() != nullptr) << "Failed to create card table";
524   if (foreground_collector_type_ == kCollectorTypeCC && kUseTableLookupReadBarrier) {
525     rb_table_.reset(new accounting::ReadBarrierTable());
526     DCHECK(rb_table_->IsAllCleared());
527   }
528   if (HasBootImageSpace()) {
529     // Don't add the image mod union table if we are running without an image, this can crash if
530     // we use the CardCache implementation.
531     for (space::ImageSpace* image_space : GetBootImageSpaces()) {
532       accounting::ModUnionTable* mod_union_table = new accounting::ModUnionTableToZygoteAllocspace(
533           "Image mod-union table", this, image_space);
534       CHECK(mod_union_table != nullptr) << "Failed to create image mod-union table";
535       AddModUnionTable(mod_union_table);
536     }
537   }
538   if (collector::SemiSpace::kUseRememberedSet && non_moving_space_ != main_space_) {
539     accounting::RememberedSet* non_moving_space_rem_set =
540         new accounting::RememberedSet("Non-moving space remembered set", this, non_moving_space_);
541     CHECK(non_moving_space_rem_set != nullptr) << "Failed to create non-moving space remembered set";
542     AddRememberedSet(non_moving_space_rem_set);
543   }
544   // TODO: Count objects in the image space here?
545   num_bytes_allocated_.StoreRelaxed(0);
546   mark_stack_.reset(accounting::ObjectStack::Create("mark stack", kDefaultMarkStackSize,
547                                                     kDefaultMarkStackSize));
548   const size_t alloc_stack_capacity = max_allocation_stack_size_ + kAllocationStackReserveSize;
549   allocation_stack_.reset(accounting::ObjectStack::Create(
550       "allocation stack", max_allocation_stack_size_, alloc_stack_capacity));
551   live_stack_.reset(accounting::ObjectStack::Create(
552       "live stack", max_allocation_stack_size_, alloc_stack_capacity));
553   // It's still too early to take a lock because there are no threads yet, but we can create locks
554   // now. We don't create it earlier to make it clear that you can't use locks during heap
555   // initialization.
556   gc_complete_lock_ = new Mutex("GC complete lock");
557   gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable",
558                                                 *gc_complete_lock_));
559   native_blocking_gc_lock_ = new Mutex("Native blocking GC lock");
560   native_blocking_gc_cond_.reset(new ConditionVariable("Native blocking GC condition variable",
561                                                        *native_blocking_gc_lock_));
562   native_blocking_gc_is_assigned_ = false;
563   native_blocking_gc_in_progress_ = false;
564   native_blocking_gcs_finished_ = 0;
565 
566   thread_flip_lock_ = new Mutex("GC thread flip lock");
567   thread_flip_cond_.reset(new ConditionVariable("GC thread flip condition variable",
568                                                 *thread_flip_lock_));
569   task_processor_.reset(new TaskProcessor());
570   reference_processor_.reset(new ReferenceProcessor());
571   pending_task_lock_ = new Mutex("Pending task lock");
572   if (ignore_max_footprint_) {
573     SetIdealFootprint(std::numeric_limits<size_t>::max());
574     concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
575   }
576   CHECK_NE(max_allowed_footprint_, 0U);
577   // Create our garbage collectors.
578   for (size_t i = 0; i < 2; ++i) {
579     const bool concurrent = i != 0;
580     if ((MayUseCollector(kCollectorTypeCMS) && concurrent) ||
581         (MayUseCollector(kCollectorTypeMS) && !concurrent)) {
582       garbage_collectors_.push_back(new collector::MarkSweep(this, concurrent));
583       garbage_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent));
584       garbage_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent));
585     }
586   }
587   if (kMovingCollector) {
588     if (MayUseCollector(kCollectorTypeSS) || MayUseCollector(kCollectorTypeGSS) ||
589         MayUseCollector(kCollectorTypeHomogeneousSpaceCompact) ||
590         use_homogeneous_space_compaction_for_oom_) {
591       // TODO: Clean this up.
592       const bool generational = foreground_collector_type_ == kCollectorTypeGSS;
593       semi_space_collector_ = new collector::SemiSpace(this, generational,
594                                                        generational ? "generational" : "");
595       garbage_collectors_.push_back(semi_space_collector_);
596     }
597     if (MayUseCollector(kCollectorTypeCC)) {
598       concurrent_copying_collector_ = new collector::ConcurrentCopying(this,
599                                                                        "",
600                                                                        measure_gc_performance);
601       DCHECK(region_space_ != nullptr);
602       concurrent_copying_collector_->SetRegionSpace(region_space_);
603       garbage_collectors_.push_back(concurrent_copying_collector_);
604     }
605     if (MayUseCollector(kCollectorTypeMC)) {
606       mark_compact_collector_ = new collector::MarkCompact(this);
607       garbage_collectors_.push_back(mark_compact_collector_);
608     }
609   }
610   if (!GetBootImageSpaces().empty() && non_moving_space_ != nullptr &&
611       (is_zygote || separate_non_moving_space || foreground_collector_type_ == kCollectorTypeGSS)) {
612     // Check that there's no gap between the image space and the non moving space so that the
613     // immune region won't break (eg. due to a large object allocated in the gap). This is only
614     // required when we're the zygote or using GSS.
615     // Space with smallest Begin().
616     space::ImageSpace* first_space = nullptr;
617     for (space::ImageSpace* space : boot_image_spaces_) {
618       if (first_space == nullptr || space->Begin() < first_space->Begin()) {
619         first_space = space;
620       }
621     }
622     bool no_gap = MemMap::CheckNoGaps(first_space->GetMemMap(), non_moving_space_->GetMemMap());
623     if (!no_gap) {
624       PrintFileToLog("/proc/self/maps", LogSeverity::ERROR);
625       MemMap::DumpMaps(LOG_STREAM(ERROR), true);
626       LOG(FATAL) << "There's a gap between the image space and the non-moving space";
627     }
628   }
629   instrumentation::Instrumentation* const instrumentation = runtime->GetInstrumentation();
630   if (gc_stress_mode_) {
631     backtrace_lock_ = new Mutex("GC complete lock");
632   }
633   if (is_running_on_memory_tool_ || gc_stress_mode_) {
634     instrumentation->InstrumentQuickAllocEntryPoints();
635   }
636   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
637     LOG(INFO) << "Heap() exiting";
638   }
639 }
640 
MapAnonymousPreferredAddress(const char * name,uint8_t * request_begin,size_t capacity,std::string * out_error_str)641 MemMap* Heap::MapAnonymousPreferredAddress(const char* name,
642                                            uint8_t* request_begin,
643                                            size_t capacity,
644                                            std::string* out_error_str) {
645   while (true) {
646     MemMap* map = MemMap::MapAnonymous(name, request_begin, capacity,
647                                        PROT_READ | PROT_WRITE, true, false, out_error_str);
648     if (map != nullptr || request_begin == nullptr) {
649       return map;
650     }
651     // Retry a  second time with no specified request begin.
652     request_begin = nullptr;
653   }
654 }
655 
MayUseCollector(CollectorType type) const656 bool Heap::MayUseCollector(CollectorType type) const {
657   return foreground_collector_type_ == type || background_collector_type_ == type;
658 }
659 
CreateMallocSpaceFromMemMap(MemMap * mem_map,size_t initial_size,size_t growth_limit,size_t capacity,const char * name,bool can_move_objects)660 space::MallocSpace* Heap::CreateMallocSpaceFromMemMap(MemMap* mem_map,
661                                                       size_t initial_size,
662                                                       size_t growth_limit,
663                                                       size_t capacity,
664                                                       const char* name,
665                                                       bool can_move_objects) {
666   space::MallocSpace* malloc_space = nullptr;
667   if (kUseRosAlloc) {
668     // Create rosalloc space.
669     malloc_space = space::RosAllocSpace::CreateFromMemMap(mem_map, name, kDefaultStartingSize,
670                                                           initial_size, growth_limit, capacity,
671                                                           low_memory_mode_, can_move_objects);
672   } else {
673     malloc_space = space::DlMallocSpace::CreateFromMemMap(mem_map, name, kDefaultStartingSize,
674                                                           initial_size, growth_limit, capacity,
675                                                           can_move_objects);
676   }
677   if (collector::SemiSpace::kUseRememberedSet) {
678     accounting::RememberedSet* rem_set  =
679         new accounting::RememberedSet(std::string(name) + " remembered set", this, malloc_space);
680     CHECK(rem_set != nullptr) << "Failed to create main space remembered set";
681     AddRememberedSet(rem_set);
682   }
683   CHECK(malloc_space != nullptr) << "Failed to create " << name;
684   malloc_space->SetFootprintLimit(malloc_space->Capacity());
685   return malloc_space;
686 }
687 
CreateMainMallocSpace(MemMap * mem_map,size_t initial_size,size_t growth_limit,size_t capacity)688 void Heap::CreateMainMallocSpace(MemMap* mem_map, size_t initial_size, size_t growth_limit,
689                                  size_t capacity) {
690   // Is background compaction is enabled?
691   bool can_move_objects = IsMovingGc(background_collector_type_) !=
692       IsMovingGc(foreground_collector_type_) || use_homogeneous_space_compaction_for_oom_;
693   // If we are the zygote and don't yet have a zygote space, it means that the zygote fork will
694   // happen in the future. If this happens and we have kCompactZygote enabled we wish to compact
695   // from the main space to the zygote space. If background compaction is enabled, always pass in
696   // that we can move objets.
697   if (kCompactZygote && Runtime::Current()->IsZygote() && !can_move_objects) {
698     // After the zygote we want this to be false if we don't have background compaction enabled so
699     // that getting primitive array elements is faster.
700     // We never have homogeneous compaction with GSS and don't need a space with movable objects.
701     can_move_objects = !HasZygoteSpace() && foreground_collector_type_ != kCollectorTypeGSS;
702   }
703   if (collector::SemiSpace::kUseRememberedSet && main_space_ != nullptr) {
704     RemoveRememberedSet(main_space_);
705   }
706   const char* name = kUseRosAlloc ? kRosAllocSpaceName[0] : kDlMallocSpaceName[0];
707   main_space_ = CreateMallocSpaceFromMemMap(mem_map, initial_size, growth_limit, capacity, name,
708                                             can_move_objects);
709   SetSpaceAsDefault(main_space_);
710   VLOG(heap) << "Created main space " << main_space_;
711 }
712 
ChangeAllocator(AllocatorType allocator)713 void Heap::ChangeAllocator(AllocatorType allocator) {
714   if (current_allocator_ != allocator) {
715     // These two allocators are only used internally and don't have any entrypoints.
716     CHECK_NE(allocator, kAllocatorTypeLOS);
717     CHECK_NE(allocator, kAllocatorTypeNonMoving);
718     current_allocator_ = allocator;
719     MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
720     SetQuickAllocEntryPointsAllocator(current_allocator_);
721     Runtime::Current()->GetInstrumentation()->ResetQuickAllocEntryPoints();
722   }
723 }
724 
DisableMovingGc()725 void Heap::DisableMovingGc() {
726   CHECK(!kUseReadBarrier);
727   if (IsMovingGc(foreground_collector_type_)) {
728     foreground_collector_type_ = kCollectorTypeCMS;
729   }
730   if (IsMovingGc(background_collector_type_)) {
731     background_collector_type_ = foreground_collector_type_;
732   }
733   TransitionCollector(foreground_collector_type_);
734   Thread* const self = Thread::Current();
735   ScopedThreadStateChange tsc(self, kSuspended);
736   ScopedSuspendAll ssa(__FUNCTION__);
737   // Something may have caused the transition to fail.
738   if (!IsMovingGc(collector_type_) && non_moving_space_ != main_space_) {
739     CHECK(main_space_ != nullptr);
740     // The allocation stack may have non movable objects in it. We need to flush it since the GC
741     // can't only handle marking allocation stack objects of one non moving space and one main
742     // space.
743     {
744       WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
745       FlushAllocStack();
746     }
747     main_space_->DisableMovingObjects();
748     non_moving_space_ = main_space_;
749     CHECK(!non_moving_space_->CanMoveObjects());
750   }
751 }
752 
IsCompilingBoot() const753 bool Heap::IsCompilingBoot() const {
754   if (!Runtime::Current()->IsAotCompiler()) {
755     return false;
756   }
757   ScopedObjectAccess soa(Thread::Current());
758   for (const auto& space : continuous_spaces_) {
759     if (space->IsImageSpace() || space->IsZygoteSpace()) {
760       return false;
761     }
762   }
763   return true;
764 }
765 
IncrementDisableMovingGC(Thread * self)766 void Heap::IncrementDisableMovingGC(Thread* self) {
767   // Need to do this holding the lock to prevent races where the GC is about to run / running when
768   // we attempt to disable it.
769   ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
770   MutexLock mu(self, *gc_complete_lock_);
771   ++disable_moving_gc_count_;
772   if (IsMovingGc(collector_type_running_)) {
773     WaitForGcToCompleteLocked(kGcCauseDisableMovingGc, self);
774   }
775 }
776 
DecrementDisableMovingGC(Thread * self)777 void Heap::DecrementDisableMovingGC(Thread* self) {
778   MutexLock mu(self, *gc_complete_lock_);
779   CHECK_GT(disable_moving_gc_count_, 0U);
780   --disable_moving_gc_count_;
781 }
782 
IncrementDisableThreadFlip(Thread * self)783 void Heap::IncrementDisableThreadFlip(Thread* self) {
784   // Supposed to be called by mutators. If thread_flip_running_ is true, block. Otherwise, go ahead.
785   CHECK(kUseReadBarrier);
786   bool is_nested = self->GetDisableThreadFlipCount() > 0;
787   self->IncrementDisableThreadFlipCount();
788   if (is_nested) {
789     // If this is a nested JNI critical section enter, we don't need to wait or increment the global
790     // counter. The global counter is incremented only once for a thread for the outermost enter.
791     return;
792   }
793   ScopedThreadStateChange tsc(self, kWaitingForGcThreadFlip);
794   MutexLock mu(self, *thread_flip_lock_);
795   bool has_waited = false;
796   uint64_t wait_start = NanoTime();
797   if (thread_flip_running_) {
798     ATRACE_BEGIN("IncrementDisableThreadFlip");
799     while (thread_flip_running_) {
800       has_waited = true;
801       thread_flip_cond_->Wait(self);
802     }
803     ATRACE_END();
804   }
805   ++disable_thread_flip_count_;
806   if (has_waited) {
807     uint64_t wait_time = NanoTime() - wait_start;
808     total_wait_time_ += wait_time;
809     if (wait_time > long_pause_log_threshold_) {
810       LOG(INFO) << __FUNCTION__ << " blocked for " << PrettyDuration(wait_time);
811     }
812   }
813 }
814 
DecrementDisableThreadFlip(Thread * self)815 void Heap::DecrementDisableThreadFlip(Thread* self) {
816   // Supposed to be called by mutators. Decrement disable_thread_flip_count_ and potentially wake up
817   // the GC waiting before doing a thread flip.
818   CHECK(kUseReadBarrier);
819   self->DecrementDisableThreadFlipCount();
820   bool is_outermost = self->GetDisableThreadFlipCount() == 0;
821   if (!is_outermost) {
822     // If this is not an outermost JNI critical exit, we don't need to decrement the global counter.
823     // The global counter is decremented only once for a thread for the outermost exit.
824     return;
825   }
826   MutexLock mu(self, *thread_flip_lock_);
827   CHECK_GT(disable_thread_flip_count_, 0U);
828   --disable_thread_flip_count_;
829   if (disable_thread_flip_count_ == 0) {
830     // Potentially notify the GC thread blocking to begin a thread flip.
831     thread_flip_cond_->Broadcast(self);
832   }
833 }
834 
ThreadFlipBegin(Thread * self)835 void Heap::ThreadFlipBegin(Thread* self) {
836   // Supposed to be called by GC. Set thread_flip_running_ to be true. If disable_thread_flip_count_
837   // > 0, block. Otherwise, go ahead.
838   CHECK(kUseReadBarrier);
839   ScopedThreadStateChange tsc(self, kWaitingForGcThreadFlip);
840   MutexLock mu(self, *thread_flip_lock_);
841   bool has_waited = false;
842   uint64_t wait_start = NanoTime();
843   CHECK(!thread_flip_running_);
844   // Set this to true before waiting so that frequent JNI critical enter/exits won't starve
845   // GC. This like a writer preference of a reader-writer lock.
846   thread_flip_running_ = true;
847   while (disable_thread_flip_count_ > 0) {
848     has_waited = true;
849     thread_flip_cond_->Wait(self);
850   }
851   if (has_waited) {
852     uint64_t wait_time = NanoTime() - wait_start;
853     total_wait_time_ += wait_time;
854     if (wait_time > long_pause_log_threshold_) {
855       LOG(INFO) << __FUNCTION__ << " blocked for " << PrettyDuration(wait_time);
856     }
857   }
858 }
859 
ThreadFlipEnd(Thread * self)860 void Heap::ThreadFlipEnd(Thread* self) {
861   // Supposed to be called by GC. Set thread_flip_running_ to false and potentially wake up mutators
862   // waiting before doing a JNI critical.
863   CHECK(kUseReadBarrier);
864   MutexLock mu(self, *thread_flip_lock_);
865   CHECK(thread_flip_running_);
866   thread_flip_running_ = false;
867   // Potentially notify mutator threads blocking to enter a JNI critical section.
868   thread_flip_cond_->Broadcast(self);
869 }
870 
UpdateProcessState(ProcessState old_process_state,ProcessState new_process_state)871 void Heap::UpdateProcessState(ProcessState old_process_state, ProcessState new_process_state) {
872   if (old_process_state != new_process_state) {
873     const bool jank_perceptible = new_process_state == kProcessStateJankPerceptible;
874     for (size_t i = 1; i <= kCollectorTransitionStressIterations; ++i) {
875       // Start at index 1 to avoid "is always false" warning.
876       // Have iteration 1 always transition the collector.
877       TransitionCollector((((i & 1) == 1) == jank_perceptible)
878           ? foreground_collector_type_
879           : background_collector_type_);
880       usleep(kCollectorTransitionStressWait);
881     }
882     if (jank_perceptible) {
883       // Transition back to foreground right away to prevent jank.
884       RequestCollectorTransition(foreground_collector_type_, 0);
885     } else {
886       // Don't delay for debug builds since we may want to stress test the GC.
887       // If background_collector_type_ is kCollectorTypeHomogeneousSpaceCompact then we have
888       // special handling which does a homogenous space compaction once but then doesn't transition
889       // the collector. Similarly, we invoke a full compaction for kCollectorTypeCC but don't
890       // transition the collector.
891       RequestCollectorTransition(background_collector_type_,
892                                  kIsDebugBuild ? 0 : kCollectorTransitionWait);
893     }
894   }
895 }
896 
CreateThreadPool()897 void Heap::CreateThreadPool() {
898   const size_t num_threads = std::max(parallel_gc_threads_, conc_gc_threads_);
899   if (num_threads != 0) {
900     thread_pool_.reset(new ThreadPool("Heap thread pool", num_threads));
901   }
902 }
903 
MarkAllocStackAsLive(accounting::ObjectStack * stack)904 void Heap::MarkAllocStackAsLive(accounting::ObjectStack* stack) {
905   space::ContinuousSpace* space1 = main_space_ != nullptr ? main_space_ : non_moving_space_;
906   space::ContinuousSpace* space2 = non_moving_space_;
907   // TODO: Generalize this to n bitmaps?
908   CHECK(space1 != nullptr);
909   CHECK(space2 != nullptr);
910   MarkAllocStack(space1->GetLiveBitmap(), space2->GetLiveBitmap(),
911                  (large_object_space_ != nullptr ? large_object_space_->GetLiveBitmap() : nullptr),
912                  stack);
913 }
914 
DeleteThreadPool()915 void Heap::DeleteThreadPool() {
916   thread_pool_.reset(nullptr);
917 }
918 
AddSpace(space::Space * space)919 void Heap::AddSpace(space::Space* space) {
920   CHECK(space != nullptr);
921   WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
922   if (space->IsContinuousSpace()) {
923     DCHECK(!space->IsDiscontinuousSpace());
924     space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
925     // Continuous spaces don't necessarily have bitmaps.
926     accounting::ContinuousSpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
927     accounting::ContinuousSpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
928     // The region space bitmap is not added since VisitObjects visits the region space objects with
929     // special handling.
930     if (live_bitmap != nullptr && !space->IsRegionSpace()) {
931       CHECK(mark_bitmap != nullptr);
932       live_bitmap_->AddContinuousSpaceBitmap(live_bitmap);
933       mark_bitmap_->AddContinuousSpaceBitmap(mark_bitmap);
934     }
935     continuous_spaces_.push_back(continuous_space);
936     // Ensure that spaces remain sorted in increasing order of start address.
937     std::sort(continuous_spaces_.begin(), continuous_spaces_.end(),
938               [](const space::ContinuousSpace* a, const space::ContinuousSpace* b) {
939       return a->Begin() < b->Begin();
940     });
941   } else {
942     CHECK(space->IsDiscontinuousSpace());
943     space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
944     live_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetLiveBitmap());
945     mark_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetMarkBitmap());
946     discontinuous_spaces_.push_back(discontinuous_space);
947   }
948   if (space->IsAllocSpace()) {
949     alloc_spaces_.push_back(space->AsAllocSpace());
950   }
951 }
952 
SetSpaceAsDefault(space::ContinuousSpace * continuous_space)953 void Heap::SetSpaceAsDefault(space::ContinuousSpace* continuous_space) {
954   WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
955   if (continuous_space->IsDlMallocSpace()) {
956     dlmalloc_space_ = continuous_space->AsDlMallocSpace();
957   } else if (continuous_space->IsRosAllocSpace()) {
958     rosalloc_space_ = continuous_space->AsRosAllocSpace();
959   }
960 }
961 
RemoveSpace(space::Space * space)962 void Heap::RemoveSpace(space::Space* space) {
963   DCHECK(space != nullptr);
964   WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
965   if (space->IsContinuousSpace()) {
966     DCHECK(!space->IsDiscontinuousSpace());
967     space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
968     // Continuous spaces don't necessarily have bitmaps.
969     accounting::ContinuousSpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
970     accounting::ContinuousSpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
971     if (live_bitmap != nullptr && !space->IsRegionSpace()) {
972       DCHECK(mark_bitmap != nullptr);
973       live_bitmap_->RemoveContinuousSpaceBitmap(live_bitmap);
974       mark_bitmap_->RemoveContinuousSpaceBitmap(mark_bitmap);
975     }
976     auto it = std::find(continuous_spaces_.begin(), continuous_spaces_.end(), continuous_space);
977     DCHECK(it != continuous_spaces_.end());
978     continuous_spaces_.erase(it);
979   } else {
980     DCHECK(space->IsDiscontinuousSpace());
981     space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
982     live_bitmap_->RemoveLargeObjectBitmap(discontinuous_space->GetLiveBitmap());
983     mark_bitmap_->RemoveLargeObjectBitmap(discontinuous_space->GetMarkBitmap());
984     auto it = std::find(discontinuous_spaces_.begin(), discontinuous_spaces_.end(),
985                         discontinuous_space);
986     DCHECK(it != discontinuous_spaces_.end());
987     discontinuous_spaces_.erase(it);
988   }
989   if (space->IsAllocSpace()) {
990     auto it = std::find(alloc_spaces_.begin(), alloc_spaces_.end(), space->AsAllocSpace());
991     DCHECK(it != alloc_spaces_.end());
992     alloc_spaces_.erase(it);
993   }
994 }
995 
DumpGcPerformanceInfo(std::ostream & os)996 void Heap::DumpGcPerformanceInfo(std::ostream& os) {
997   // Dump cumulative timings.
998   os << "Dumping cumulative Gc timings\n";
999   uint64_t total_duration = 0;
1000   // Dump cumulative loggers for each GC type.
1001   uint64_t total_paused_time = 0;
1002   for (auto& collector : garbage_collectors_) {
1003     total_duration += collector->GetCumulativeTimings().GetTotalNs();
1004     total_paused_time += collector->GetTotalPausedTimeNs();
1005     collector->DumpPerformanceInfo(os);
1006   }
1007   if (total_duration != 0) {
1008     const double total_seconds = static_cast<double>(total_duration / 1000) / 1000000.0;
1009     os << "Total time spent in GC: " << PrettyDuration(total_duration) << "\n";
1010     os << "Mean GC size throughput: "
1011        << PrettySize(GetBytesFreedEver() / total_seconds) << "/s\n";
1012     os << "Mean GC object throughput: "
1013        << (GetObjectsFreedEver() / total_seconds) << " objects/s\n";
1014   }
1015   uint64_t total_objects_allocated = GetObjectsAllocatedEver();
1016   os << "Total number of allocations " << total_objects_allocated << "\n";
1017   os << "Total bytes allocated " << PrettySize(GetBytesAllocatedEver()) << "\n";
1018   os << "Total bytes freed " << PrettySize(GetBytesFreedEver()) << "\n";
1019   os << "Free memory " << PrettySize(GetFreeMemory()) << "\n";
1020   os << "Free memory until GC " << PrettySize(GetFreeMemoryUntilGC()) << "\n";
1021   os << "Free memory until OOME " << PrettySize(GetFreeMemoryUntilOOME()) << "\n";
1022   os << "Total memory " << PrettySize(GetTotalMemory()) << "\n";
1023   os << "Max memory " << PrettySize(GetMaxMemory()) << "\n";
1024   if (HasZygoteSpace()) {
1025     os << "Zygote space size " << PrettySize(zygote_space_->Size()) << "\n";
1026   }
1027   os << "Total mutator paused time: " << PrettyDuration(total_paused_time) << "\n";
1028   os << "Total time waiting for GC to complete: " << PrettyDuration(total_wait_time_) << "\n";
1029   os << "Total GC count: " << GetGcCount() << "\n";
1030   os << "Total GC time: " << PrettyDuration(GetGcTime()) << "\n";
1031   os << "Total blocking GC count: " << GetBlockingGcCount() << "\n";
1032   os << "Total blocking GC time: " << PrettyDuration(GetBlockingGcTime()) << "\n";
1033 
1034   {
1035     MutexLock mu(Thread::Current(), *gc_complete_lock_);
1036     if (gc_count_rate_histogram_.SampleSize() > 0U) {
1037       os << "Histogram of GC count per " << NsToMs(kGcCountRateHistogramWindowDuration) << " ms: ";
1038       gc_count_rate_histogram_.DumpBins(os);
1039       os << "\n";
1040     }
1041     if (blocking_gc_count_rate_histogram_.SampleSize() > 0U) {
1042       os << "Histogram of blocking GC count per "
1043          << NsToMs(kGcCountRateHistogramWindowDuration) << " ms: ";
1044       blocking_gc_count_rate_histogram_.DumpBins(os);
1045       os << "\n";
1046     }
1047   }
1048 
1049   if (kDumpRosAllocStatsOnSigQuit && rosalloc_space_ != nullptr) {
1050     rosalloc_space_->DumpStats(os);
1051   }
1052 
1053   os << "Registered native bytes allocated: "
1054      << old_native_bytes_allocated_.LoadRelaxed() + new_native_bytes_allocated_.LoadRelaxed()
1055      << "\n";
1056 
1057   BaseMutex::DumpAll(os);
1058 }
1059 
ResetGcPerformanceInfo()1060 void Heap::ResetGcPerformanceInfo() {
1061   for (auto& collector : garbage_collectors_) {
1062     collector->ResetMeasurements();
1063   }
1064   total_bytes_freed_ever_ = 0;
1065   total_objects_freed_ever_ = 0;
1066   total_wait_time_ = 0;
1067   blocking_gc_count_ = 0;
1068   blocking_gc_time_ = 0;
1069   gc_count_last_window_ = 0;
1070   blocking_gc_count_last_window_ = 0;
1071   last_update_time_gc_count_rate_histograms_ =  // Round down by the window duration.
1072       (NanoTime() / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration;
1073   {
1074     MutexLock mu(Thread::Current(), *gc_complete_lock_);
1075     gc_count_rate_histogram_.Reset();
1076     blocking_gc_count_rate_histogram_.Reset();
1077   }
1078 }
1079 
GetGcCount() const1080 uint64_t Heap::GetGcCount() const {
1081   uint64_t gc_count = 0U;
1082   for (auto& collector : garbage_collectors_) {
1083     gc_count += collector->GetCumulativeTimings().GetIterations();
1084   }
1085   return gc_count;
1086 }
1087 
GetGcTime() const1088 uint64_t Heap::GetGcTime() const {
1089   uint64_t gc_time = 0U;
1090   for (auto& collector : garbage_collectors_) {
1091     gc_time += collector->GetCumulativeTimings().GetTotalNs();
1092   }
1093   return gc_time;
1094 }
1095 
GetBlockingGcCount() const1096 uint64_t Heap::GetBlockingGcCount() const {
1097   return blocking_gc_count_;
1098 }
1099 
GetBlockingGcTime() const1100 uint64_t Heap::GetBlockingGcTime() const {
1101   return blocking_gc_time_;
1102 }
1103 
DumpGcCountRateHistogram(std::ostream & os) const1104 void Heap::DumpGcCountRateHistogram(std::ostream& os) const {
1105   MutexLock mu(Thread::Current(), *gc_complete_lock_);
1106   if (gc_count_rate_histogram_.SampleSize() > 0U) {
1107     gc_count_rate_histogram_.DumpBins(os);
1108   }
1109 }
1110 
DumpBlockingGcCountRateHistogram(std::ostream & os) const1111 void Heap::DumpBlockingGcCountRateHistogram(std::ostream& os) const {
1112   MutexLock mu(Thread::Current(), *gc_complete_lock_);
1113   if (blocking_gc_count_rate_histogram_.SampleSize() > 0U) {
1114     blocking_gc_count_rate_histogram_.DumpBins(os);
1115   }
1116 }
1117 
1118 ALWAYS_INLINE
GetAndOverwriteAllocationListener(Atomic<AllocationListener * > * storage,AllocationListener * new_value)1119 static inline AllocationListener* GetAndOverwriteAllocationListener(
1120     Atomic<AllocationListener*>* storage, AllocationListener* new_value) {
1121   AllocationListener* old;
1122   do {
1123     old = storage->LoadSequentiallyConsistent();
1124   } while (!storage->CompareExchangeStrongSequentiallyConsistent(old, new_value));
1125   return old;
1126 }
1127 
~Heap()1128 Heap::~Heap() {
1129   VLOG(heap) << "Starting ~Heap()";
1130   STLDeleteElements(&garbage_collectors_);
1131   // If we don't reset then the mark stack complains in its destructor.
1132   allocation_stack_->Reset();
1133   allocation_records_.reset();
1134   live_stack_->Reset();
1135   STLDeleteValues(&mod_union_tables_);
1136   STLDeleteValues(&remembered_sets_);
1137   STLDeleteElements(&continuous_spaces_);
1138   STLDeleteElements(&discontinuous_spaces_);
1139   delete gc_complete_lock_;
1140   delete native_blocking_gc_lock_;
1141   delete thread_flip_lock_;
1142   delete pending_task_lock_;
1143   delete backtrace_lock_;
1144   if (unique_backtrace_count_.LoadRelaxed() != 0 || seen_backtrace_count_.LoadRelaxed() != 0) {
1145     LOG(INFO) << "gc stress unique=" << unique_backtrace_count_.LoadRelaxed()
1146         << " total=" << seen_backtrace_count_.LoadRelaxed() +
1147             unique_backtrace_count_.LoadRelaxed();
1148   }
1149 
1150   VLOG(heap) << "Finished ~Heap()";
1151 }
1152 
1153 
FindContinuousSpaceFromAddress(const mirror::Object * addr) const1154 space::ContinuousSpace* Heap::FindContinuousSpaceFromAddress(const mirror::Object* addr) const {
1155   for (const auto& space : continuous_spaces_) {
1156     if (space->Contains(addr)) {
1157       return space;
1158     }
1159   }
1160   return nullptr;
1161 }
1162 
FindContinuousSpaceFromObject(ObjPtr<mirror::Object> obj,bool fail_ok) const1163 space::ContinuousSpace* Heap::FindContinuousSpaceFromObject(ObjPtr<mirror::Object> obj,
1164                                                             bool fail_ok) const {
1165   space::ContinuousSpace* space = FindContinuousSpaceFromAddress(obj.Ptr());
1166   if (space != nullptr) {
1167     return space;
1168   }
1169   if (!fail_ok) {
1170     LOG(FATAL) << "object " << obj << " not inside any spaces!";
1171   }
1172   return nullptr;
1173 }
1174 
FindDiscontinuousSpaceFromObject(ObjPtr<mirror::Object> obj,bool fail_ok) const1175 space::DiscontinuousSpace* Heap::FindDiscontinuousSpaceFromObject(ObjPtr<mirror::Object> obj,
1176                                                                   bool fail_ok) const {
1177   for (const auto& space : discontinuous_spaces_) {
1178     if (space->Contains(obj.Ptr())) {
1179       return space;
1180     }
1181   }
1182   if (!fail_ok) {
1183     LOG(FATAL) << "object " << obj << " not inside any spaces!";
1184   }
1185   return nullptr;
1186 }
1187 
FindSpaceFromObject(ObjPtr<mirror::Object> obj,bool fail_ok) const1188 space::Space* Heap::FindSpaceFromObject(ObjPtr<mirror::Object> obj, bool fail_ok) const {
1189   space::Space* result = FindContinuousSpaceFromObject(obj, true);
1190   if (result != nullptr) {
1191     return result;
1192   }
1193   return FindDiscontinuousSpaceFromObject(obj, fail_ok);
1194 }
1195 
FindSpaceFromAddress(const void * addr) const1196 space::Space* Heap::FindSpaceFromAddress(const void* addr) const {
1197   for (const auto& space : continuous_spaces_) {
1198     if (space->Contains(reinterpret_cast<const mirror::Object*>(addr))) {
1199       return space;
1200     }
1201   }
1202   for (const auto& space : discontinuous_spaces_) {
1203     if (space->Contains(reinterpret_cast<const mirror::Object*>(addr))) {
1204       return space;
1205     }
1206   }
1207   return nullptr;
1208 }
1209 
1210 
ThrowOutOfMemoryError(Thread * self,size_t byte_count,AllocatorType allocator_type)1211 void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type) {
1212   // If we're in a stack overflow, do not create a new exception. It would require running the
1213   // constructor, which will of course still be in a stack overflow.
1214   if (self->IsHandlingStackOverflow()) {
1215     self->SetException(Runtime::Current()->GetPreAllocatedOutOfMemoryError());
1216     return;
1217   }
1218 
1219   std::ostringstream oss;
1220   size_t total_bytes_free = GetFreeMemory();
1221   oss << "Failed to allocate a " << byte_count << " byte allocation with " << total_bytes_free
1222       << " free bytes and " << PrettySize(GetFreeMemoryUntilOOME()) << " until OOM,"
1223       << " max allowed footprint " << max_allowed_footprint_ << ", growth limit "
1224       << growth_limit_;
1225   // If the allocation failed due to fragmentation, print out the largest continuous allocation.
1226   if (total_bytes_free >= byte_count) {
1227     space::AllocSpace* space = nullptr;
1228     if (allocator_type == kAllocatorTypeNonMoving) {
1229       space = non_moving_space_;
1230     } else if (allocator_type == kAllocatorTypeRosAlloc ||
1231                allocator_type == kAllocatorTypeDlMalloc) {
1232       space = main_space_;
1233     } else if (allocator_type == kAllocatorTypeBumpPointer ||
1234                allocator_type == kAllocatorTypeTLAB) {
1235       space = bump_pointer_space_;
1236     } else if (allocator_type == kAllocatorTypeRegion ||
1237                allocator_type == kAllocatorTypeRegionTLAB) {
1238       space = region_space_;
1239     }
1240     if (space != nullptr) {
1241       space->LogFragmentationAllocFailure(oss, byte_count);
1242     }
1243   }
1244   self->ThrowOutOfMemoryError(oss.str().c_str());
1245 }
1246 
DoPendingCollectorTransition()1247 void Heap::DoPendingCollectorTransition() {
1248   CollectorType desired_collector_type = desired_collector_type_;
1249   // Launch homogeneous space compaction if it is desired.
1250   if (desired_collector_type == kCollectorTypeHomogeneousSpaceCompact) {
1251     if (!CareAboutPauseTimes()) {
1252       PerformHomogeneousSpaceCompact();
1253     } else {
1254       VLOG(gc) << "Homogeneous compaction ignored due to jank perceptible process state";
1255     }
1256   } else if (desired_collector_type == kCollectorTypeCCBackground) {
1257     DCHECK(kUseReadBarrier);
1258     if (!CareAboutPauseTimes()) {
1259       // Invoke CC full compaction.
1260       CollectGarbageInternal(collector::kGcTypeFull,
1261                              kGcCauseCollectorTransition,
1262                              /*clear_soft_references*/false);
1263     } else {
1264       VLOG(gc) << "CC background compaction ignored due to jank perceptible process state";
1265     }
1266   } else {
1267     TransitionCollector(desired_collector_type);
1268   }
1269 }
1270 
Trim(Thread * self)1271 void Heap::Trim(Thread* self) {
1272   Runtime* const runtime = Runtime::Current();
1273   if (!CareAboutPauseTimes()) {
1274     // Deflate the monitors, this can cause a pause but shouldn't matter since we don't care
1275     // about pauses.
1276     ScopedTrace trace("Deflating monitors");
1277     // Avoid race conditions on the lock word for CC.
1278     ScopedGCCriticalSection gcs(self, kGcCauseTrim, kCollectorTypeHeapTrim);
1279     ScopedSuspendAll ssa(__FUNCTION__);
1280     uint64_t start_time = NanoTime();
1281     size_t count = runtime->GetMonitorList()->DeflateMonitors();
1282     VLOG(heap) << "Deflating " << count << " monitors took "
1283         << PrettyDuration(NanoTime() - start_time);
1284   }
1285   TrimIndirectReferenceTables(self);
1286   TrimSpaces(self);
1287   // Trim arenas that may have been used by JIT or verifier.
1288   runtime->GetArenaPool()->TrimMaps();
1289 }
1290 
1291 class TrimIndirectReferenceTableClosure : public Closure {
1292  public:
TrimIndirectReferenceTableClosure(Barrier * barrier)1293   explicit TrimIndirectReferenceTableClosure(Barrier* barrier) : barrier_(barrier) {
1294   }
Run(Thread * thread)1295   virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
1296     thread->GetJniEnv()->locals.Trim();
1297     // If thread is a running mutator, then act on behalf of the trim thread.
1298     // See the code in ThreadList::RunCheckpoint.
1299     barrier_->Pass(Thread::Current());
1300   }
1301 
1302  private:
1303   Barrier* const barrier_;
1304 };
1305 
TrimIndirectReferenceTables(Thread * self)1306 void Heap::TrimIndirectReferenceTables(Thread* self) {
1307   ScopedObjectAccess soa(self);
1308   ScopedTrace trace(__PRETTY_FUNCTION__);
1309   JavaVMExt* vm = soa.Vm();
1310   // Trim globals indirect reference table.
1311   vm->TrimGlobals();
1312   // Trim locals indirect reference tables.
1313   Barrier barrier(0);
1314   TrimIndirectReferenceTableClosure closure(&barrier);
1315   ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
1316   size_t barrier_count = Runtime::Current()->GetThreadList()->RunCheckpoint(&closure);
1317   if (barrier_count != 0) {
1318     barrier.Increment(self, barrier_count);
1319   }
1320 }
1321 
StartGC(Thread * self,GcCause cause,CollectorType collector_type)1322 void Heap::StartGC(Thread* self, GcCause cause, CollectorType collector_type) {
1323   // Need to do this before acquiring the locks since we don't want to get suspended while
1324   // holding any locks.
1325   ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
1326   MutexLock mu(self, *gc_complete_lock_);
1327   // Ensure there is only one GC at a time.
1328   WaitForGcToCompleteLocked(cause, self);
1329   collector_type_running_ = collector_type;
1330   last_gc_cause_ = cause;
1331   thread_running_gc_ = self;
1332 }
1333 
TrimSpaces(Thread * self)1334 void Heap::TrimSpaces(Thread* self) {
1335   // Pretend we are doing a GC to prevent background compaction from deleting the space we are
1336   // trimming.
1337   StartGC(self, kGcCauseTrim, kCollectorTypeHeapTrim);
1338   ScopedTrace trace(__PRETTY_FUNCTION__);
1339   const uint64_t start_ns = NanoTime();
1340   // Trim the managed spaces.
1341   uint64_t total_alloc_space_allocated = 0;
1342   uint64_t total_alloc_space_size = 0;
1343   uint64_t managed_reclaimed = 0;
1344   {
1345     ScopedObjectAccess soa(self);
1346     for (const auto& space : continuous_spaces_) {
1347       if (space->IsMallocSpace()) {
1348         gc::space::MallocSpace* malloc_space = space->AsMallocSpace();
1349         if (malloc_space->IsRosAllocSpace() || !CareAboutPauseTimes()) {
1350           // Don't trim dlmalloc spaces if we care about pauses since this can hold the space lock
1351           // for a long period of time.
1352           managed_reclaimed += malloc_space->Trim();
1353         }
1354         total_alloc_space_size += malloc_space->Size();
1355       }
1356     }
1357   }
1358   total_alloc_space_allocated = GetBytesAllocated();
1359   if (large_object_space_ != nullptr) {
1360     total_alloc_space_allocated -= large_object_space_->GetBytesAllocated();
1361   }
1362   if (bump_pointer_space_ != nullptr) {
1363     total_alloc_space_allocated -= bump_pointer_space_->Size();
1364   }
1365   if (region_space_ != nullptr) {
1366     total_alloc_space_allocated -= region_space_->GetBytesAllocated();
1367   }
1368   const float managed_utilization = static_cast<float>(total_alloc_space_allocated) /
1369       static_cast<float>(total_alloc_space_size);
1370   uint64_t gc_heap_end_ns = NanoTime();
1371   // We never move things in the native heap, so we can finish the GC at this point.
1372   FinishGC(self, collector::kGcTypeNone);
1373 
1374   VLOG(heap) << "Heap trim of managed (duration=" << PrettyDuration(gc_heap_end_ns - start_ns)
1375       << ", advised=" << PrettySize(managed_reclaimed) << ") heap. Managed heap utilization of "
1376       << static_cast<int>(100 * managed_utilization) << "%.";
1377 }
1378 
IsValidObjectAddress(const void * addr) const1379 bool Heap::IsValidObjectAddress(const void* addr) const {
1380   if (addr == nullptr) {
1381     return true;
1382   }
1383   return IsAligned<kObjectAlignment>(addr) && FindSpaceFromAddress(addr) != nullptr;
1384 }
1385 
IsNonDiscontinuousSpaceHeapAddress(const void * addr) const1386 bool Heap::IsNonDiscontinuousSpaceHeapAddress(const void* addr) const {
1387   return FindContinuousSpaceFromAddress(reinterpret_cast<const mirror::Object*>(addr)) != nullptr;
1388 }
1389 
IsLiveObjectLocked(ObjPtr<mirror::Object> obj,bool search_allocation_stack,bool search_live_stack,bool sorted)1390 bool Heap::IsLiveObjectLocked(ObjPtr<mirror::Object> obj,
1391                               bool search_allocation_stack,
1392                               bool search_live_stack,
1393                               bool sorted) {
1394   if (UNLIKELY(!IsAligned<kObjectAlignment>(obj.Ptr()))) {
1395     return false;
1396   }
1397   if (bump_pointer_space_ != nullptr && bump_pointer_space_->HasAddress(obj.Ptr())) {
1398     mirror::Class* klass = obj->GetClass<kVerifyNone>();
1399     if (obj == klass) {
1400       // This case happens for java.lang.Class.
1401       return true;
1402     }
1403     return VerifyClassClass(klass) && IsLiveObjectLocked(klass);
1404   } else if (temp_space_ != nullptr && temp_space_->HasAddress(obj.Ptr())) {
1405     // If we are in the allocated region of the temp space, then we are probably live (e.g. during
1406     // a GC). When a GC isn't running End() - Begin() is 0 which means no objects are contained.
1407     return temp_space_->Contains(obj.Ptr());
1408   }
1409   if (region_space_ != nullptr && region_space_->HasAddress(obj.Ptr())) {
1410     return true;
1411   }
1412   space::ContinuousSpace* c_space = FindContinuousSpaceFromObject(obj, true);
1413   space::DiscontinuousSpace* d_space = nullptr;
1414   if (c_space != nullptr) {
1415     if (c_space->GetLiveBitmap()->Test(obj.Ptr())) {
1416       return true;
1417     }
1418   } else {
1419     d_space = FindDiscontinuousSpaceFromObject(obj, true);
1420     if (d_space != nullptr) {
1421       if (d_space->GetLiveBitmap()->Test(obj.Ptr())) {
1422         return true;
1423       }
1424     }
1425   }
1426   // This is covering the allocation/live stack swapping that is done without mutators suspended.
1427   for (size_t i = 0; i < (sorted ? 1 : 5); ++i) {
1428     if (i > 0) {
1429       NanoSleep(MsToNs(10));
1430     }
1431     if (search_allocation_stack) {
1432       if (sorted) {
1433         if (allocation_stack_->ContainsSorted(obj.Ptr())) {
1434           return true;
1435         }
1436       } else if (allocation_stack_->Contains(obj.Ptr())) {
1437         return true;
1438       }
1439     }
1440 
1441     if (search_live_stack) {
1442       if (sorted) {
1443         if (live_stack_->ContainsSorted(obj.Ptr())) {
1444           return true;
1445         }
1446       } else if (live_stack_->Contains(obj.Ptr())) {
1447         return true;
1448       }
1449     }
1450   }
1451   // We need to check the bitmaps again since there is a race where we mark something as live and
1452   // then clear the stack containing it.
1453   if (c_space != nullptr) {
1454     if (c_space->GetLiveBitmap()->Test(obj.Ptr())) {
1455       return true;
1456     }
1457   } else {
1458     d_space = FindDiscontinuousSpaceFromObject(obj, true);
1459     if (d_space != nullptr && d_space->GetLiveBitmap()->Test(obj.Ptr())) {
1460       return true;
1461     }
1462   }
1463   return false;
1464 }
1465 
DumpSpaces() const1466 std::string Heap::DumpSpaces() const {
1467   std::ostringstream oss;
1468   DumpSpaces(oss);
1469   return oss.str();
1470 }
1471 
DumpSpaces(std::ostream & stream) const1472 void Heap::DumpSpaces(std::ostream& stream) const {
1473   for (const auto& space : continuous_spaces_) {
1474     accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
1475     accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
1476     stream << space << " " << *space << "\n";
1477     if (live_bitmap != nullptr) {
1478       stream << live_bitmap << " " << *live_bitmap << "\n";
1479     }
1480     if (mark_bitmap != nullptr) {
1481       stream << mark_bitmap << " " << *mark_bitmap << "\n";
1482     }
1483   }
1484   for (const auto& space : discontinuous_spaces_) {
1485     stream << space << " " << *space << "\n";
1486   }
1487 }
1488 
VerifyObjectBody(ObjPtr<mirror::Object> obj)1489 void Heap::VerifyObjectBody(ObjPtr<mirror::Object> obj) {
1490   if (verify_object_mode_ == kVerifyObjectModeDisabled) {
1491     return;
1492   }
1493 
1494   // Ignore early dawn of the universe verifications.
1495   if (UNLIKELY(static_cast<size_t>(num_bytes_allocated_.LoadRelaxed()) < 10 * KB)) {
1496     return;
1497   }
1498   CHECK_ALIGNED(obj.Ptr(), kObjectAlignment) << "Object isn't aligned";
1499   mirror::Class* c = obj->GetFieldObject<mirror::Class, kVerifyNone>(mirror::Object::ClassOffset());
1500   CHECK(c != nullptr) << "Null class in object " << obj;
1501   CHECK_ALIGNED(c, kObjectAlignment) << "Class " << c << " not aligned in object " << obj;
1502   CHECK(VerifyClassClass(c));
1503 
1504   if (verify_object_mode_ > kVerifyObjectModeFast) {
1505     // Note: the bitmap tests below are racy since we don't hold the heap bitmap lock.
1506     CHECK(IsLiveObjectLocked(obj)) << "Object is dead " << obj << "\n" << DumpSpaces();
1507   }
1508 }
1509 
VerifyHeap()1510 void Heap::VerifyHeap() {
1511   ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1512   auto visitor = [&](mirror::Object* obj) {
1513     VerifyObjectBody(obj);
1514   };
1515   // Technically we need the mutator lock here to call Visit. However, VerifyObjectBody is already
1516   // NO_THREAD_SAFETY_ANALYSIS.
1517   auto no_thread_safety_analysis = [&]() NO_THREAD_SAFETY_ANALYSIS {
1518     GetLiveBitmap()->Visit(visitor);
1519   };
1520   no_thread_safety_analysis();
1521 }
1522 
RecordFree(uint64_t freed_objects,int64_t freed_bytes)1523 void Heap::RecordFree(uint64_t freed_objects, int64_t freed_bytes) {
1524   // Use signed comparison since freed bytes can be negative when background compaction foreground
1525   // transitions occurs. This is caused by the moving objects from a bump pointer space to a
1526   // free list backed space typically increasing memory footprint due to padding and binning.
1527   DCHECK_LE(freed_bytes, static_cast<int64_t>(num_bytes_allocated_.LoadRelaxed()));
1528   // Note: This relies on 2s complement for handling negative freed_bytes.
1529   num_bytes_allocated_.FetchAndSubSequentiallyConsistent(static_cast<ssize_t>(freed_bytes));
1530   if (Runtime::Current()->HasStatsEnabled()) {
1531     RuntimeStats* thread_stats = Thread::Current()->GetStats();
1532     thread_stats->freed_objects += freed_objects;
1533     thread_stats->freed_bytes += freed_bytes;
1534     // TODO: Do this concurrently.
1535     RuntimeStats* global_stats = Runtime::Current()->GetStats();
1536     global_stats->freed_objects += freed_objects;
1537     global_stats->freed_bytes += freed_bytes;
1538   }
1539 }
1540 
RecordFreeRevoke()1541 void Heap::RecordFreeRevoke() {
1542   // Subtract num_bytes_freed_revoke_ from num_bytes_allocated_ to cancel out the
1543   // the ahead-of-time, bulk counting of bytes allocated in rosalloc thread-local buffers.
1544   // If there's a concurrent revoke, ok to not necessarily reset num_bytes_freed_revoke_
1545   // all the way to zero exactly as the remainder will be subtracted at the next GC.
1546   size_t bytes_freed = num_bytes_freed_revoke_.LoadSequentiallyConsistent();
1547   CHECK_GE(num_bytes_freed_revoke_.FetchAndSubSequentiallyConsistent(bytes_freed),
1548            bytes_freed) << "num_bytes_freed_revoke_ underflow";
1549   CHECK_GE(num_bytes_allocated_.FetchAndSubSequentiallyConsistent(bytes_freed),
1550            bytes_freed) << "num_bytes_allocated_ underflow";
1551   GetCurrentGcIteration()->SetFreedRevoke(bytes_freed);
1552 }
1553 
GetRosAllocSpace(gc::allocator::RosAlloc * rosalloc) const1554 space::RosAllocSpace* Heap::GetRosAllocSpace(gc::allocator::RosAlloc* rosalloc) const {
1555   if (rosalloc_space_ != nullptr && rosalloc_space_->GetRosAlloc() == rosalloc) {
1556     return rosalloc_space_;
1557   }
1558   for (const auto& space : continuous_spaces_) {
1559     if (space->AsContinuousSpace()->IsRosAllocSpace()) {
1560       if (space->AsContinuousSpace()->AsRosAllocSpace()->GetRosAlloc() == rosalloc) {
1561         return space->AsContinuousSpace()->AsRosAllocSpace();
1562       }
1563     }
1564   }
1565   return nullptr;
1566 }
1567 
EntrypointsInstrumented()1568 static inline bool EntrypointsInstrumented() REQUIRES_SHARED(Locks::mutator_lock_) {
1569   instrumentation::Instrumentation* const instrumentation =
1570       Runtime::Current()->GetInstrumentation();
1571   return instrumentation != nullptr && instrumentation->AllocEntrypointsInstrumented();
1572 }
1573 
AllocateInternalWithGc(Thread * self,AllocatorType allocator,bool instrumented,size_t alloc_size,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated,ObjPtr<mirror::Class> * klass)1574 mirror::Object* Heap::AllocateInternalWithGc(Thread* self,
1575                                              AllocatorType allocator,
1576                                              bool instrumented,
1577                                              size_t alloc_size,
1578                                              size_t* bytes_allocated,
1579                                              size_t* usable_size,
1580                                              size_t* bytes_tl_bulk_allocated,
1581                                              ObjPtr<mirror::Class>* klass) {
1582   bool was_default_allocator = allocator == GetCurrentAllocator();
1583   // Make sure there is no pending exception since we may need to throw an OOME.
1584   self->AssertNoPendingException();
1585   DCHECK(klass != nullptr);
1586   StackHandleScope<1> hs(self);
1587   HandleWrapperObjPtr<mirror::Class> h(hs.NewHandleWrapper(klass));
1588   // The allocation failed. If the GC is running, block until it completes, and then retry the
1589   // allocation.
1590   collector::GcType last_gc = WaitForGcToComplete(kGcCauseForAlloc, self);
1591   // If we were the default allocator but the allocator changed while we were suspended,
1592   // abort the allocation.
1593   if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1594       (!instrumented && EntrypointsInstrumented())) {
1595     return nullptr;
1596   }
1597   if (last_gc != collector::kGcTypeNone) {
1598     // A GC was in progress and we blocked, retry allocation now that memory has been freed.
1599     mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
1600                                                      usable_size, bytes_tl_bulk_allocated);
1601     if (ptr != nullptr) {
1602       return ptr;
1603     }
1604   }
1605 
1606   collector::GcType tried_type = next_gc_type_;
1607   const bool gc_ran =
1608       CollectGarbageInternal(tried_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
1609   if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1610       (!instrumented && EntrypointsInstrumented())) {
1611     return nullptr;
1612   }
1613   if (gc_ran) {
1614     mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
1615                                                      usable_size, bytes_tl_bulk_allocated);
1616     if (ptr != nullptr) {
1617       return ptr;
1618     }
1619   }
1620 
1621   // Loop through our different Gc types and try to Gc until we get enough free memory.
1622   for (collector::GcType gc_type : gc_plan_) {
1623     if (gc_type == tried_type) {
1624       continue;
1625     }
1626     // Attempt to run the collector, if we succeed, re-try the allocation.
1627     const bool plan_gc_ran =
1628         CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
1629     if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1630         (!instrumented && EntrypointsInstrumented())) {
1631       return nullptr;
1632     }
1633     if (plan_gc_ran) {
1634       // Did we free sufficient memory for the allocation to succeed?
1635       mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
1636                                                        usable_size, bytes_tl_bulk_allocated);
1637       if (ptr != nullptr) {
1638         return ptr;
1639       }
1640     }
1641   }
1642   // Allocations have failed after GCs;  this is an exceptional state.
1643   // Try harder, growing the heap if necessary.
1644   mirror::Object* ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
1645                                                   usable_size, bytes_tl_bulk_allocated);
1646   if (ptr != nullptr) {
1647     return ptr;
1648   }
1649   // Most allocations should have succeeded by now, so the heap is really full, really fragmented,
1650   // or the requested size is really big. Do another GC, collecting SoftReferences this time. The
1651   // VM spec requires that all SoftReferences have been collected and cleared before throwing
1652   // OOME.
1653   VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size)
1654            << " allocation";
1655   // TODO: Run finalization, but this may cause more allocations to occur.
1656   // We don't need a WaitForGcToComplete here either.
1657   DCHECK(!gc_plan_.empty());
1658   CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true);
1659   if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1660       (!instrumented && EntrypointsInstrumented())) {
1661     return nullptr;
1662   }
1663   ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size,
1664                                   bytes_tl_bulk_allocated);
1665   if (ptr == nullptr) {
1666     const uint64_t current_time = NanoTime();
1667     switch (allocator) {
1668       case kAllocatorTypeRosAlloc:
1669         // Fall-through.
1670       case kAllocatorTypeDlMalloc: {
1671         if (use_homogeneous_space_compaction_for_oom_ &&
1672             current_time - last_time_homogeneous_space_compaction_by_oom_ >
1673             min_interval_homogeneous_space_compaction_by_oom_) {
1674           last_time_homogeneous_space_compaction_by_oom_ = current_time;
1675           HomogeneousSpaceCompactResult result = PerformHomogeneousSpaceCompact();
1676           // Thread suspension could have occurred.
1677           if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1678               (!instrumented && EntrypointsInstrumented())) {
1679             return nullptr;
1680           }
1681           switch (result) {
1682             case HomogeneousSpaceCompactResult::kSuccess:
1683               // If the allocation succeeded, we delayed an oom.
1684               ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
1685                                               usable_size, bytes_tl_bulk_allocated);
1686               if (ptr != nullptr) {
1687                 count_delayed_oom_++;
1688               }
1689               break;
1690             case HomogeneousSpaceCompactResult::kErrorReject:
1691               // Reject due to disabled moving GC.
1692               break;
1693             case HomogeneousSpaceCompactResult::kErrorVMShuttingDown:
1694               // Throw OOM by default.
1695               break;
1696             default: {
1697               UNIMPLEMENTED(FATAL) << "homogeneous space compaction result: "
1698                   << static_cast<size_t>(result);
1699               UNREACHABLE();
1700             }
1701           }
1702           // Always print that we ran homogeneous space compation since this can cause jank.
1703           VLOG(heap) << "Ran heap homogeneous space compaction, "
1704                     << " requested defragmentation "
1705                     << count_requested_homogeneous_space_compaction_.LoadSequentiallyConsistent()
1706                     << " performed defragmentation "
1707                     << count_performed_homogeneous_space_compaction_.LoadSequentiallyConsistent()
1708                     << " ignored homogeneous space compaction "
1709                     << count_ignored_homogeneous_space_compaction_.LoadSequentiallyConsistent()
1710                     << " delayed count = "
1711                     << count_delayed_oom_.LoadSequentiallyConsistent();
1712         }
1713         break;
1714       }
1715       case kAllocatorTypeNonMoving: {
1716         if (kUseReadBarrier) {
1717           // DisableMovingGc() isn't compatible with CC.
1718           break;
1719         }
1720         // Try to transition the heap if the allocation failure was due to the space being full.
1721         if (!IsOutOfMemoryOnAllocation(allocator, alloc_size, /*grow*/ false)) {
1722           // If we aren't out of memory then the OOM was probably from the non moving space being
1723           // full. Attempt to disable compaction and turn the main space into a non moving space.
1724           DisableMovingGc();
1725           // Thread suspension could have occurred.
1726           if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1727               (!instrumented && EntrypointsInstrumented())) {
1728             return nullptr;
1729           }
1730           // If we are still a moving GC then something must have caused the transition to fail.
1731           if (IsMovingGc(collector_type_)) {
1732             MutexLock mu(self, *gc_complete_lock_);
1733             // If we couldn't disable moving GC, just throw OOME and return null.
1734             LOG(WARNING) << "Couldn't disable moving GC with disable GC count "
1735                          << disable_moving_gc_count_;
1736           } else {
1737             LOG(WARNING) << "Disabled moving GC due to the non moving space being full";
1738             ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
1739                                             usable_size, bytes_tl_bulk_allocated);
1740           }
1741         }
1742         break;
1743       }
1744       default: {
1745         // Do nothing for others allocators.
1746       }
1747     }
1748   }
1749   // If the allocation hasn't succeeded by this point, throw an OOM error.
1750   if (ptr == nullptr) {
1751     ThrowOutOfMemoryError(self, alloc_size, allocator);
1752   }
1753   return ptr;
1754 }
1755 
SetTargetHeapUtilization(float target)1756 void Heap::SetTargetHeapUtilization(float target) {
1757   DCHECK_GT(target, 0.0f);  // asserted in Java code
1758   DCHECK_LT(target, 1.0f);
1759   target_utilization_ = target;
1760 }
1761 
GetObjectsAllocated() const1762 size_t Heap::GetObjectsAllocated() const {
1763   Thread* const self = Thread::Current();
1764   ScopedThreadStateChange tsc(self, kWaitingForGetObjectsAllocated);
1765   // Prevent GC running during GetObjectsALlocated since we may get a checkpoint request that tells
1766   // us to suspend while we are doing SuspendAll. b/35232978
1767   gc::ScopedGCCriticalSection gcs(Thread::Current(),
1768                                   gc::kGcCauseGetObjectsAllocated,
1769                                   gc::kCollectorTypeGetObjectsAllocated);
1770   // Need SuspendAll here to prevent lock violation if RosAlloc does it during InspectAll.
1771   ScopedSuspendAll ssa(__FUNCTION__);
1772   ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
1773   size_t total = 0;
1774   for (space::AllocSpace* space : alloc_spaces_) {
1775     total += space->GetObjectsAllocated();
1776   }
1777   return total;
1778 }
1779 
GetObjectsAllocatedEver() const1780 uint64_t Heap::GetObjectsAllocatedEver() const {
1781   uint64_t total = GetObjectsFreedEver();
1782   // If we are detached, we can't use GetObjectsAllocated since we can't change thread states.
1783   if (Thread::Current() != nullptr) {
1784     total += GetObjectsAllocated();
1785   }
1786   return total;
1787 }
1788 
GetBytesAllocatedEver() const1789 uint64_t Heap::GetBytesAllocatedEver() const {
1790   return GetBytesFreedEver() + GetBytesAllocated();
1791 }
1792 
CountInstances(const std::vector<Handle<mirror::Class>> & classes,bool use_is_assignable_from,uint64_t * counts)1793 void Heap::CountInstances(const std::vector<Handle<mirror::Class>>& classes,
1794                           bool use_is_assignable_from,
1795                           uint64_t* counts) {
1796   auto instance_counter = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
1797     mirror::Class* instance_class = obj->GetClass();
1798     CHECK(instance_class != nullptr);
1799     for (size_t i = 0; i < classes.size(); ++i) {
1800       ObjPtr<mirror::Class> klass = classes[i].Get();
1801       if (use_is_assignable_from) {
1802         if (klass != nullptr && klass->IsAssignableFrom(instance_class)) {
1803           ++counts[i];
1804         }
1805       } else if (instance_class == klass) {
1806         ++counts[i];
1807       }
1808     }
1809   };
1810   VisitObjects(instance_counter);
1811 }
1812 
GetInstances(VariableSizedHandleScope & scope,Handle<mirror::Class> h_class,int32_t max_count,std::vector<Handle<mirror::Object>> & instances)1813 void Heap::GetInstances(VariableSizedHandleScope& scope,
1814                         Handle<mirror::Class> h_class,
1815                         int32_t max_count,
1816                         std::vector<Handle<mirror::Object>>& instances) {
1817   DCHECK_GE(max_count, 0);
1818   auto instance_collector = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
1819     if (obj->GetClass() == h_class.Get()) {
1820       if (max_count == 0 || instances.size() < static_cast<size_t>(max_count)) {
1821         instances.push_back(scope.NewHandle(obj));
1822       }
1823     }
1824   };
1825   VisitObjects(instance_collector);
1826 }
1827 
GetReferringObjects(VariableSizedHandleScope & scope,Handle<mirror::Object> o,int32_t max_count,std::vector<Handle<mirror::Object>> & referring_objects)1828 void Heap::GetReferringObjects(VariableSizedHandleScope& scope,
1829                                Handle<mirror::Object> o,
1830                                int32_t max_count,
1831                                std::vector<Handle<mirror::Object>>& referring_objects) {
1832   class ReferringObjectsFinder {
1833    public:
1834     ReferringObjectsFinder(VariableSizedHandleScope& scope_in,
1835                            Handle<mirror::Object> object_in,
1836                            int32_t max_count_in,
1837                            std::vector<Handle<mirror::Object>>& referring_objects_in)
1838         REQUIRES_SHARED(Locks::mutator_lock_)
1839         : scope_(scope_in),
1840           object_(object_in),
1841           max_count_(max_count_in),
1842           referring_objects_(referring_objects_in) {}
1843 
1844     // For Object::VisitReferences.
1845     void operator()(ObjPtr<mirror::Object> obj,
1846                     MemberOffset offset,
1847                     bool is_static ATTRIBUTE_UNUSED) const
1848         REQUIRES_SHARED(Locks::mutator_lock_) {
1849       mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
1850       if (ref == object_.Get() && (max_count_ == 0 || referring_objects_.size() < max_count_)) {
1851         referring_objects_.push_back(scope_.NewHandle(obj));
1852       }
1853     }
1854 
1855     void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
1856         const {}
1857     void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
1858 
1859    private:
1860     VariableSizedHandleScope& scope_;
1861     Handle<mirror::Object> const object_;
1862     const uint32_t max_count_;
1863     std::vector<Handle<mirror::Object>>& referring_objects_;
1864     DISALLOW_COPY_AND_ASSIGN(ReferringObjectsFinder);
1865   };
1866   ReferringObjectsFinder finder(scope, o, max_count, referring_objects);
1867   auto referring_objects_finder = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
1868     obj->VisitReferences(finder, VoidFunctor());
1869   };
1870   VisitObjects(referring_objects_finder);
1871 }
1872 
CollectGarbage(bool clear_soft_references)1873 void Heap::CollectGarbage(bool clear_soft_references) {
1874   // Even if we waited for a GC we still need to do another GC since weaks allocated during the
1875   // last GC will not have necessarily been cleared.
1876   CollectGarbageInternal(gc_plan_.back(), kGcCauseExplicit, clear_soft_references);
1877 }
1878 
SupportHomogeneousSpaceCompactAndCollectorTransitions() const1879 bool Heap::SupportHomogeneousSpaceCompactAndCollectorTransitions() const {
1880   return main_space_backup_.get() != nullptr && main_space_ != nullptr &&
1881       foreground_collector_type_ == kCollectorTypeCMS;
1882 }
1883 
PerformHomogeneousSpaceCompact()1884 HomogeneousSpaceCompactResult Heap::PerformHomogeneousSpaceCompact() {
1885   Thread* self = Thread::Current();
1886   // Inc requested homogeneous space compaction.
1887   count_requested_homogeneous_space_compaction_++;
1888   // Store performed homogeneous space compaction at a new request arrival.
1889   ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
1890   Locks::mutator_lock_->AssertNotHeld(self);
1891   {
1892     ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete);
1893     MutexLock mu(self, *gc_complete_lock_);
1894     // Ensure there is only one GC at a time.
1895     WaitForGcToCompleteLocked(kGcCauseHomogeneousSpaceCompact, self);
1896     // Homogeneous space compaction is a copying transition, can't run it if the moving GC disable count
1897     // is non zero.
1898     // If the collector type changed to something which doesn't benefit from homogeneous space compaction,
1899     // exit.
1900     if (disable_moving_gc_count_ != 0 || IsMovingGc(collector_type_) ||
1901         !main_space_->CanMoveObjects()) {
1902       return kErrorReject;
1903     }
1904     if (!SupportHomogeneousSpaceCompactAndCollectorTransitions()) {
1905       return kErrorUnsupported;
1906     }
1907     collector_type_running_ = kCollectorTypeHomogeneousSpaceCompact;
1908   }
1909   if (Runtime::Current()->IsShuttingDown(self)) {
1910     // Don't allow heap transitions to happen if the runtime is shutting down since these can
1911     // cause objects to get finalized.
1912     FinishGC(self, collector::kGcTypeNone);
1913     return HomogeneousSpaceCompactResult::kErrorVMShuttingDown;
1914   }
1915   collector::GarbageCollector* collector;
1916   {
1917     ScopedSuspendAll ssa(__FUNCTION__);
1918     uint64_t start_time = NanoTime();
1919     // Launch compaction.
1920     space::MallocSpace* to_space = main_space_backup_.release();
1921     space::MallocSpace* from_space = main_space_;
1922     to_space->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
1923     const uint64_t space_size_before_compaction = from_space->Size();
1924     AddSpace(to_space);
1925     // Make sure that we will have enough room to copy.
1926     CHECK_GE(to_space->GetFootprintLimit(), from_space->GetFootprintLimit());
1927     collector = Compact(to_space, from_space, kGcCauseHomogeneousSpaceCompact);
1928     const uint64_t space_size_after_compaction = to_space->Size();
1929     main_space_ = to_space;
1930     main_space_backup_.reset(from_space);
1931     RemoveSpace(from_space);
1932     SetSpaceAsDefault(main_space_);  // Set as default to reset the proper dlmalloc space.
1933     // Update performed homogeneous space compaction count.
1934     count_performed_homogeneous_space_compaction_++;
1935     // Print statics log and resume all threads.
1936     uint64_t duration = NanoTime() - start_time;
1937     VLOG(heap) << "Heap homogeneous space compaction took " << PrettyDuration(duration) << " size: "
1938                << PrettySize(space_size_before_compaction) << " -> "
1939                << PrettySize(space_size_after_compaction) << " compact-ratio: "
1940                << std::fixed << static_cast<double>(space_size_after_compaction) /
1941                static_cast<double>(space_size_before_compaction);
1942   }
1943   // Finish GC.
1944   reference_processor_->EnqueueClearedReferences(self);
1945   GrowForUtilization(semi_space_collector_);
1946   LogGC(kGcCauseHomogeneousSpaceCompact, collector);
1947   FinishGC(self, collector::kGcTypeFull);
1948   {
1949     ScopedObjectAccess soa(self);
1950     soa.Vm()->UnloadNativeLibraries();
1951   }
1952   return HomogeneousSpaceCompactResult::kSuccess;
1953 }
1954 
TransitionCollector(CollectorType collector_type)1955 void Heap::TransitionCollector(CollectorType collector_type) {
1956   if (collector_type == collector_type_) {
1957     return;
1958   }
1959   // Collector transition must not happen with CC
1960   CHECK(!kUseReadBarrier);
1961   VLOG(heap) << "TransitionCollector: " << static_cast<int>(collector_type_)
1962              << " -> " << static_cast<int>(collector_type);
1963   uint64_t start_time = NanoTime();
1964   uint32_t before_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
1965   Runtime* const runtime = Runtime::Current();
1966   Thread* const self = Thread::Current();
1967   ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
1968   Locks::mutator_lock_->AssertNotHeld(self);
1969   // Busy wait until we can GC (StartGC can fail if we have a non-zero
1970   // compacting_gc_disable_count_, this should rarely occurs).
1971   for (;;) {
1972     {
1973       ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete);
1974       MutexLock mu(self, *gc_complete_lock_);
1975       // Ensure there is only one GC at a time.
1976       WaitForGcToCompleteLocked(kGcCauseCollectorTransition, self);
1977       // Currently we only need a heap transition if we switch from a moving collector to a
1978       // non-moving one, or visa versa.
1979       const bool copying_transition = IsMovingGc(collector_type_) != IsMovingGc(collector_type);
1980       // If someone else beat us to it and changed the collector before we could, exit.
1981       // This is safe to do before the suspend all since we set the collector_type_running_ before
1982       // we exit the loop. If another thread attempts to do the heap transition before we exit,
1983       // then it would get blocked on WaitForGcToCompleteLocked.
1984       if (collector_type == collector_type_) {
1985         return;
1986       }
1987       // GC can be disabled if someone has a used GetPrimitiveArrayCritical but not yet released.
1988       if (!copying_transition || disable_moving_gc_count_ == 0) {
1989         // TODO: Not hard code in semi-space collector?
1990         collector_type_running_ = copying_transition ? kCollectorTypeSS : collector_type;
1991         break;
1992       }
1993     }
1994     usleep(1000);
1995   }
1996   if (runtime->IsShuttingDown(self)) {
1997     // Don't allow heap transitions to happen if the runtime is shutting down since these can
1998     // cause objects to get finalized.
1999     FinishGC(self, collector::kGcTypeNone);
2000     return;
2001   }
2002   collector::GarbageCollector* collector = nullptr;
2003   {
2004     ScopedSuspendAll ssa(__FUNCTION__);
2005     switch (collector_type) {
2006       case kCollectorTypeSS: {
2007         if (!IsMovingGc(collector_type_)) {
2008           // Create the bump pointer space from the backup space.
2009           CHECK(main_space_backup_ != nullptr);
2010           std::unique_ptr<MemMap> mem_map(main_space_backup_->ReleaseMemMap());
2011           // We are transitioning from non moving GC -> moving GC, since we copied from the bump
2012           // pointer space last transition it will be protected.
2013           CHECK(mem_map != nullptr);
2014           mem_map->Protect(PROT_READ | PROT_WRITE);
2015           bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space",
2016                                                                           mem_map.release());
2017           AddSpace(bump_pointer_space_);
2018           collector = Compact(bump_pointer_space_, main_space_, kGcCauseCollectorTransition);
2019           // Use the now empty main space mem map for the bump pointer temp space.
2020           mem_map.reset(main_space_->ReleaseMemMap());
2021           // Unset the pointers just in case.
2022           if (dlmalloc_space_ == main_space_) {
2023             dlmalloc_space_ = nullptr;
2024           } else if (rosalloc_space_ == main_space_) {
2025             rosalloc_space_ = nullptr;
2026           }
2027           // Remove the main space so that we don't try to trim it, this doens't work for debug
2028           // builds since RosAlloc attempts to read the magic number from a protected page.
2029           RemoveSpace(main_space_);
2030           RemoveRememberedSet(main_space_);
2031           delete main_space_;  // Delete the space since it has been removed.
2032           main_space_ = nullptr;
2033           RemoveRememberedSet(main_space_backup_.get());
2034           main_space_backup_.reset(nullptr);  // Deletes the space.
2035           temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
2036                                                                   mem_map.release());
2037           AddSpace(temp_space_);
2038         }
2039         break;
2040       }
2041       case kCollectorTypeMS:
2042         // Fall through.
2043       case kCollectorTypeCMS: {
2044         if (IsMovingGc(collector_type_)) {
2045           CHECK(temp_space_ != nullptr);
2046           std::unique_ptr<MemMap> mem_map(temp_space_->ReleaseMemMap());
2047           RemoveSpace(temp_space_);
2048           temp_space_ = nullptr;
2049           mem_map->Protect(PROT_READ | PROT_WRITE);
2050           CreateMainMallocSpace(mem_map.get(),
2051                                 kDefaultInitialSize,
2052                                 std::min(mem_map->Size(), growth_limit_),
2053                                 mem_map->Size());
2054           mem_map.release();
2055           // Compact to the main space from the bump pointer space, don't need to swap semispaces.
2056           AddSpace(main_space_);
2057           collector = Compact(main_space_, bump_pointer_space_, kGcCauseCollectorTransition);
2058           mem_map.reset(bump_pointer_space_->ReleaseMemMap());
2059           RemoveSpace(bump_pointer_space_);
2060           bump_pointer_space_ = nullptr;
2061           const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
2062           // Temporarily unprotect the backup mem map so rosalloc can write the debug magic number.
2063           if (kIsDebugBuild && kUseRosAlloc) {
2064             mem_map->Protect(PROT_READ | PROT_WRITE);
2065           }
2066           main_space_backup_.reset(CreateMallocSpaceFromMemMap(
2067               mem_map.get(),
2068               kDefaultInitialSize,
2069               std::min(mem_map->Size(), growth_limit_),
2070               mem_map->Size(),
2071               name,
2072               true));
2073           if (kIsDebugBuild && kUseRosAlloc) {
2074             mem_map->Protect(PROT_NONE);
2075           }
2076           mem_map.release();
2077         }
2078         break;
2079       }
2080       default: {
2081         LOG(FATAL) << "Attempted to transition to invalid collector type "
2082                    << static_cast<size_t>(collector_type);
2083         break;
2084       }
2085     }
2086     ChangeCollector(collector_type);
2087   }
2088   // Can't call into java code with all threads suspended.
2089   reference_processor_->EnqueueClearedReferences(self);
2090   uint64_t duration = NanoTime() - start_time;
2091   GrowForUtilization(semi_space_collector_);
2092   DCHECK(collector != nullptr);
2093   LogGC(kGcCauseCollectorTransition, collector);
2094   FinishGC(self, collector::kGcTypeFull);
2095   {
2096     ScopedObjectAccess soa(self);
2097     soa.Vm()->UnloadNativeLibraries();
2098   }
2099   int32_t after_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
2100   int32_t delta_allocated = before_allocated - after_allocated;
2101   std::string saved_str;
2102   if (delta_allocated >= 0) {
2103     saved_str = " saved at least " + PrettySize(delta_allocated);
2104   } else {
2105     saved_str = " expanded " + PrettySize(-delta_allocated);
2106   }
2107   VLOG(heap) << "Collector transition to " << collector_type << " took "
2108              << PrettyDuration(duration) << saved_str;
2109 }
2110 
ChangeCollector(CollectorType collector_type)2111 void Heap::ChangeCollector(CollectorType collector_type) {
2112   // TODO: Only do this with all mutators suspended to avoid races.
2113   if (collector_type != collector_type_) {
2114     if (collector_type == kCollectorTypeMC) {
2115       // Don't allow mark compact unless support is compiled in.
2116       CHECK(kMarkCompactSupport);
2117     }
2118     collector_type_ = collector_type;
2119     gc_plan_.clear();
2120     switch (collector_type_) {
2121       case kCollectorTypeCC: {
2122         gc_plan_.push_back(collector::kGcTypeFull);
2123         if (use_tlab_) {
2124           ChangeAllocator(kAllocatorTypeRegionTLAB);
2125         } else {
2126           ChangeAllocator(kAllocatorTypeRegion);
2127         }
2128         break;
2129       }
2130       case kCollectorTypeMC:  // Fall-through.
2131       case kCollectorTypeSS:  // Fall-through.
2132       case kCollectorTypeGSS: {
2133         gc_plan_.push_back(collector::kGcTypeFull);
2134         if (use_tlab_) {
2135           ChangeAllocator(kAllocatorTypeTLAB);
2136         } else {
2137           ChangeAllocator(kAllocatorTypeBumpPointer);
2138         }
2139         break;
2140       }
2141       case kCollectorTypeMS: {
2142         gc_plan_.push_back(collector::kGcTypeSticky);
2143         gc_plan_.push_back(collector::kGcTypePartial);
2144         gc_plan_.push_back(collector::kGcTypeFull);
2145         ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc);
2146         break;
2147       }
2148       case kCollectorTypeCMS: {
2149         gc_plan_.push_back(collector::kGcTypeSticky);
2150         gc_plan_.push_back(collector::kGcTypePartial);
2151         gc_plan_.push_back(collector::kGcTypeFull);
2152         ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc);
2153         break;
2154       }
2155       default: {
2156         UNIMPLEMENTED(FATAL);
2157         UNREACHABLE();
2158       }
2159     }
2160     if (IsGcConcurrent()) {
2161       concurrent_start_bytes_ =
2162           std::max(max_allowed_footprint_, kMinConcurrentRemainingBytes) - kMinConcurrentRemainingBytes;
2163     } else {
2164       concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
2165     }
2166   }
2167 }
2168 
2169 // Special compacting collector which uses sub-optimal bin packing to reduce zygote space size.
2170 class ZygoteCompactingCollector FINAL : public collector::SemiSpace {
2171  public:
ZygoteCompactingCollector(gc::Heap * heap,bool is_running_on_memory_tool)2172   ZygoteCompactingCollector(gc::Heap* heap, bool is_running_on_memory_tool)
2173       : SemiSpace(heap, false, "zygote collector"),
2174         bin_live_bitmap_(nullptr),
2175         bin_mark_bitmap_(nullptr),
2176         is_running_on_memory_tool_(is_running_on_memory_tool) {}
2177 
BuildBins(space::ContinuousSpace * space)2178   void BuildBins(space::ContinuousSpace* space) REQUIRES_SHARED(Locks::mutator_lock_) {
2179     bin_live_bitmap_ = space->GetLiveBitmap();
2180     bin_mark_bitmap_ = space->GetMarkBitmap();
2181     uintptr_t prev = reinterpret_cast<uintptr_t>(space->Begin());
2182     WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
2183     // Note: This requires traversing the space in increasing order of object addresses.
2184     auto visitor = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
2185       uintptr_t object_addr = reinterpret_cast<uintptr_t>(obj);
2186       size_t bin_size = object_addr - prev;
2187       // Add the bin consisting of the end of the previous object to the start of the current object.
2188       AddBin(bin_size, prev);
2189       prev = object_addr + RoundUp(obj->SizeOf<kDefaultVerifyFlags>(), kObjectAlignment);
2190     };
2191     bin_live_bitmap_->Walk(visitor);
2192     // Add the last bin which spans after the last object to the end of the space.
2193     AddBin(reinterpret_cast<uintptr_t>(space->End()) - prev, prev);
2194   }
2195 
2196  private:
2197   // Maps from bin sizes to locations.
2198   std::multimap<size_t, uintptr_t> bins_;
2199   // Live bitmap of the space which contains the bins.
2200   accounting::ContinuousSpaceBitmap* bin_live_bitmap_;
2201   // Mark bitmap of the space which contains the bins.
2202   accounting::ContinuousSpaceBitmap* bin_mark_bitmap_;
2203   const bool is_running_on_memory_tool_;
2204 
AddBin(size_t size,uintptr_t position)2205   void AddBin(size_t size, uintptr_t position) {
2206     if (is_running_on_memory_tool_) {
2207       MEMORY_TOOL_MAKE_DEFINED(reinterpret_cast<void*>(position), size);
2208     }
2209     if (size != 0) {
2210       bins_.insert(std::make_pair(size, position));
2211     }
2212   }
2213 
ShouldSweepSpace(space::ContinuousSpace * space ATTRIBUTE_UNUSED) const2214   virtual bool ShouldSweepSpace(space::ContinuousSpace* space ATTRIBUTE_UNUSED) const {
2215     // Don't sweep any spaces since we probably blasted the internal accounting of the free list
2216     // allocator.
2217     return false;
2218   }
2219 
MarkNonForwardedObject(mirror::Object * obj)2220   virtual mirror::Object* MarkNonForwardedObject(mirror::Object* obj)
2221       REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
2222     size_t obj_size = obj->SizeOf<kDefaultVerifyFlags>();
2223     size_t alloc_size = RoundUp(obj_size, kObjectAlignment);
2224     mirror::Object* forward_address;
2225     // Find the smallest bin which we can move obj in.
2226     auto it = bins_.lower_bound(alloc_size);
2227     if (it == bins_.end()) {
2228       // No available space in the bins, place it in the target space instead (grows the zygote
2229       // space).
2230       size_t bytes_allocated, dummy;
2231       forward_address = to_space_->Alloc(self_, alloc_size, &bytes_allocated, nullptr, &dummy);
2232       if (to_space_live_bitmap_ != nullptr) {
2233         to_space_live_bitmap_->Set(forward_address);
2234       } else {
2235         GetHeap()->GetNonMovingSpace()->GetLiveBitmap()->Set(forward_address);
2236         GetHeap()->GetNonMovingSpace()->GetMarkBitmap()->Set(forward_address);
2237       }
2238     } else {
2239       size_t size = it->first;
2240       uintptr_t pos = it->second;
2241       bins_.erase(it);  // Erase the old bin which we replace with the new smaller bin.
2242       forward_address = reinterpret_cast<mirror::Object*>(pos);
2243       // Set the live and mark bits so that sweeping system weaks works properly.
2244       bin_live_bitmap_->Set(forward_address);
2245       bin_mark_bitmap_->Set(forward_address);
2246       DCHECK_GE(size, alloc_size);
2247       // Add a new bin with the remaining space.
2248       AddBin(size - alloc_size, pos + alloc_size);
2249     }
2250     // Copy the object over to its new location. Don't use alloc_size to avoid valgrind error.
2251     memcpy(reinterpret_cast<void*>(forward_address), obj, obj_size);
2252     if (kUseBakerReadBarrier) {
2253       obj->AssertReadBarrierState();
2254       forward_address->AssertReadBarrierState();
2255     }
2256     return forward_address;
2257   }
2258 };
2259 
UnBindBitmaps()2260 void Heap::UnBindBitmaps() {
2261   TimingLogger::ScopedTiming t("UnBindBitmaps", GetCurrentGcIteration()->GetTimings());
2262   for (const auto& space : GetContinuousSpaces()) {
2263     if (space->IsContinuousMemMapAllocSpace()) {
2264       space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
2265       if (alloc_space->HasBoundBitmaps()) {
2266         alloc_space->UnBindBitmaps();
2267       }
2268     }
2269   }
2270 }
2271 
PreZygoteFork()2272 void Heap::PreZygoteFork() {
2273   if (!HasZygoteSpace()) {
2274     // We still want to GC in case there is some unreachable non moving objects that could cause a
2275     // suboptimal bin packing when we compact the zygote space.
2276     CollectGarbageInternal(collector::kGcTypeFull, kGcCauseBackground, false);
2277     // Trim the pages at the end of the non moving space. Trim while not holding zygote lock since
2278     // the trim process may require locking the mutator lock.
2279     non_moving_space_->Trim();
2280   }
2281   Thread* self = Thread::Current();
2282   MutexLock mu(self, zygote_creation_lock_);
2283   // Try to see if we have any Zygote spaces.
2284   if (HasZygoteSpace()) {
2285     return;
2286   }
2287   Runtime::Current()->GetInternTable()->AddNewTable();
2288   Runtime::Current()->GetClassLinker()->MoveClassTableToPreZygote();
2289   VLOG(heap) << "Starting PreZygoteFork";
2290   // The end of the non-moving space may be protected, unprotect it so that we can copy the zygote
2291   // there.
2292   non_moving_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2293   const bool same_space = non_moving_space_ == main_space_;
2294   if (kCompactZygote) {
2295     // Temporarily disable rosalloc verification because the zygote
2296     // compaction will mess up the rosalloc internal metadata.
2297     ScopedDisableRosAllocVerification disable_rosalloc_verif(this);
2298     ZygoteCompactingCollector zygote_collector(this, is_running_on_memory_tool_);
2299     zygote_collector.BuildBins(non_moving_space_);
2300     // Create a new bump pointer space which we will compact into.
2301     space::BumpPointerSpace target_space("zygote bump space", non_moving_space_->End(),
2302                                          non_moving_space_->Limit());
2303     // Compact the bump pointer space to a new zygote bump pointer space.
2304     bool reset_main_space = false;
2305     if (IsMovingGc(collector_type_)) {
2306       if (collector_type_ == kCollectorTypeCC) {
2307         zygote_collector.SetFromSpace(region_space_);
2308       } else {
2309         zygote_collector.SetFromSpace(bump_pointer_space_);
2310       }
2311     } else {
2312       CHECK(main_space_ != nullptr);
2313       CHECK_NE(main_space_, non_moving_space_)
2314           << "Does not make sense to compact within the same space";
2315       // Copy from the main space.
2316       zygote_collector.SetFromSpace(main_space_);
2317       reset_main_space = true;
2318     }
2319     zygote_collector.SetToSpace(&target_space);
2320     zygote_collector.SetSwapSemiSpaces(false);
2321     zygote_collector.Run(kGcCauseCollectorTransition, false);
2322     if (reset_main_space) {
2323       main_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2324       madvise(main_space_->Begin(), main_space_->Capacity(), MADV_DONTNEED);
2325       MemMap* mem_map = main_space_->ReleaseMemMap();
2326       RemoveSpace(main_space_);
2327       space::Space* old_main_space = main_space_;
2328       CreateMainMallocSpace(mem_map, kDefaultInitialSize, std::min(mem_map->Size(), growth_limit_),
2329                             mem_map->Size());
2330       delete old_main_space;
2331       AddSpace(main_space_);
2332     } else {
2333       if (collector_type_ == kCollectorTypeCC) {
2334         region_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2335         // Evacuated everything out of the region space, clear the mark bitmap.
2336         region_space_->GetMarkBitmap()->Clear();
2337       } else {
2338         bump_pointer_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2339       }
2340     }
2341     if (temp_space_ != nullptr) {
2342       CHECK(temp_space_->IsEmpty());
2343     }
2344     total_objects_freed_ever_ += GetCurrentGcIteration()->GetFreedObjects();
2345     total_bytes_freed_ever_ += GetCurrentGcIteration()->GetFreedBytes();
2346     // Update the end and write out image.
2347     non_moving_space_->SetEnd(target_space.End());
2348     non_moving_space_->SetLimit(target_space.Limit());
2349     VLOG(heap) << "Create zygote space with size=" << non_moving_space_->Size() << " bytes";
2350   }
2351   // Change the collector to the post zygote one.
2352   ChangeCollector(foreground_collector_type_);
2353   // Save the old space so that we can remove it after we complete creating the zygote space.
2354   space::MallocSpace* old_alloc_space = non_moving_space_;
2355   // Turn the current alloc space into a zygote space and obtain the new alloc space composed of
2356   // the remaining available space.
2357   // Remove the old space before creating the zygote space since creating the zygote space sets
2358   // the old alloc space's bitmaps to null.
2359   RemoveSpace(old_alloc_space);
2360   if (collector::SemiSpace::kUseRememberedSet) {
2361     // Sanity bound check.
2362     FindRememberedSetFromSpace(old_alloc_space)->AssertAllDirtyCardsAreWithinSpace();
2363     // Remove the remembered set for the now zygote space (the old
2364     // non-moving space). Note now that we have compacted objects into
2365     // the zygote space, the data in the remembered set is no longer
2366     // needed. The zygote space will instead have a mod-union table
2367     // from this point on.
2368     RemoveRememberedSet(old_alloc_space);
2369   }
2370   // Remaining space becomes the new non moving space.
2371   zygote_space_ = old_alloc_space->CreateZygoteSpace(kNonMovingSpaceName, low_memory_mode_,
2372                                                      &non_moving_space_);
2373   CHECK(!non_moving_space_->CanMoveObjects());
2374   if (same_space) {
2375     main_space_ = non_moving_space_;
2376     SetSpaceAsDefault(main_space_);
2377   }
2378   delete old_alloc_space;
2379   CHECK(HasZygoteSpace()) << "Failed creating zygote space";
2380   AddSpace(zygote_space_);
2381   non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
2382   AddSpace(non_moving_space_);
2383   if (kUseBakerReadBarrier && gc::collector::ConcurrentCopying::kGrayDirtyImmuneObjects) {
2384     // Treat all of the objects in the zygote as marked to avoid unnecessary dirty pages. This is
2385     // safe since we mark all of the objects that may reference non immune objects as gray.
2386     zygote_space_->GetLiveBitmap()->VisitMarkedRange(
2387         reinterpret_cast<uintptr_t>(zygote_space_->Begin()),
2388         reinterpret_cast<uintptr_t>(zygote_space_->Limit()),
2389         [](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
2390       CHECK(obj->AtomicSetMarkBit(0, 1));
2391     });
2392   }
2393 
2394   // Create the zygote space mod union table.
2395   accounting::ModUnionTable* mod_union_table =
2396       new accounting::ModUnionTableCardCache("zygote space mod-union table", this, zygote_space_);
2397   CHECK(mod_union_table != nullptr) << "Failed to create zygote space mod-union table";
2398 
2399   if (collector_type_ != kCollectorTypeCC) {
2400     // Set all the cards in the mod-union table since we don't know which objects contain references
2401     // to large objects.
2402     mod_union_table->SetCards();
2403   } else {
2404     // Make sure to clear the zygote space cards so that we don't dirty pages in the next GC. There
2405     // may be dirty cards from the zygote compaction or reference processing. These cards are not
2406     // necessary to have marked since the zygote space may not refer to any objects not in the
2407     // zygote or image spaces at this point.
2408     mod_union_table->ProcessCards();
2409     mod_union_table->ClearTable();
2410 
2411     // For CC we never collect zygote large objects. This means we do not need to set the cards for
2412     // the zygote mod-union table and we can also clear all of the existing image mod-union tables.
2413     // The existing mod-union tables are only for image spaces and may only reference zygote and
2414     // image objects.
2415     for (auto& pair : mod_union_tables_) {
2416       CHECK(pair.first->IsImageSpace());
2417       CHECK(!pair.first->AsImageSpace()->GetImageHeader().IsAppImage());
2418       accounting::ModUnionTable* table = pair.second;
2419       table->ClearTable();
2420     }
2421   }
2422   AddModUnionTable(mod_union_table);
2423   large_object_space_->SetAllLargeObjectsAsZygoteObjects(self);
2424   if (collector::SemiSpace::kUseRememberedSet) {
2425     // Add a new remembered set for the post-zygote non-moving space.
2426     accounting::RememberedSet* post_zygote_non_moving_space_rem_set =
2427         new accounting::RememberedSet("Post-zygote non-moving space remembered set", this,
2428                                       non_moving_space_);
2429     CHECK(post_zygote_non_moving_space_rem_set != nullptr)
2430         << "Failed to create post-zygote non-moving space remembered set";
2431     AddRememberedSet(post_zygote_non_moving_space_rem_set);
2432   }
2433 }
2434 
FlushAllocStack()2435 void Heap::FlushAllocStack() {
2436   MarkAllocStackAsLive(allocation_stack_.get());
2437   allocation_stack_->Reset();
2438 }
2439 
MarkAllocStack(accounting::ContinuousSpaceBitmap * bitmap1,accounting::ContinuousSpaceBitmap * bitmap2,accounting::LargeObjectBitmap * large_objects,accounting::ObjectStack * stack)2440 void Heap::MarkAllocStack(accounting::ContinuousSpaceBitmap* bitmap1,
2441                           accounting::ContinuousSpaceBitmap* bitmap2,
2442                           accounting::LargeObjectBitmap* large_objects,
2443                           accounting::ObjectStack* stack) {
2444   DCHECK(bitmap1 != nullptr);
2445   DCHECK(bitmap2 != nullptr);
2446   const auto* limit = stack->End();
2447   for (auto* it = stack->Begin(); it != limit; ++it) {
2448     const mirror::Object* obj = it->AsMirrorPtr();
2449     if (!kUseThreadLocalAllocationStack || obj != nullptr) {
2450       if (bitmap1->HasAddress(obj)) {
2451         bitmap1->Set(obj);
2452       } else if (bitmap2->HasAddress(obj)) {
2453         bitmap2->Set(obj);
2454       } else {
2455         DCHECK(large_objects != nullptr);
2456         large_objects->Set(obj);
2457       }
2458     }
2459   }
2460 }
2461 
SwapSemiSpaces()2462 void Heap::SwapSemiSpaces() {
2463   CHECK(bump_pointer_space_ != nullptr);
2464   CHECK(temp_space_ != nullptr);
2465   std::swap(bump_pointer_space_, temp_space_);
2466 }
2467 
Compact(space::ContinuousMemMapAllocSpace * target_space,space::ContinuousMemMapAllocSpace * source_space,GcCause gc_cause)2468 collector::GarbageCollector* Heap::Compact(space::ContinuousMemMapAllocSpace* target_space,
2469                                            space::ContinuousMemMapAllocSpace* source_space,
2470                                            GcCause gc_cause) {
2471   CHECK(kMovingCollector);
2472   if (target_space != source_space) {
2473     // Don't swap spaces since this isn't a typical semi space collection.
2474     semi_space_collector_->SetSwapSemiSpaces(false);
2475     semi_space_collector_->SetFromSpace(source_space);
2476     semi_space_collector_->SetToSpace(target_space);
2477     semi_space_collector_->Run(gc_cause, false);
2478     return semi_space_collector_;
2479   } else {
2480     CHECK(target_space->IsBumpPointerSpace())
2481         << "In-place compaction is only supported for bump pointer spaces";
2482     mark_compact_collector_->SetSpace(target_space->AsBumpPointerSpace());
2483     mark_compact_collector_->Run(kGcCauseCollectorTransition, false);
2484     return mark_compact_collector_;
2485   }
2486 }
2487 
TraceHeapSize(size_t heap_size)2488 void Heap::TraceHeapSize(size_t heap_size) {
2489   ATRACE_INT("Heap size (KB)", heap_size / KB);
2490 }
2491 
CollectGarbageInternal(collector::GcType gc_type,GcCause gc_cause,bool clear_soft_references)2492 collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type,
2493                                                GcCause gc_cause,
2494                                                bool clear_soft_references) {
2495   Thread* self = Thread::Current();
2496   Runtime* runtime = Runtime::Current();
2497   // If the heap can't run the GC, silently fail and return that no GC was run.
2498   switch (gc_type) {
2499     case collector::kGcTypePartial: {
2500       if (!HasZygoteSpace()) {
2501         return collector::kGcTypeNone;
2502       }
2503       break;
2504     }
2505     default: {
2506       // Other GC types don't have any special cases which makes them not runnable. The main case
2507       // here is full GC.
2508     }
2509   }
2510   ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
2511   Locks::mutator_lock_->AssertNotHeld(self);
2512   if (self->IsHandlingStackOverflow()) {
2513     // If we are throwing a stack overflow error we probably don't have enough remaining stack
2514     // space to run the GC.
2515     return collector::kGcTypeNone;
2516   }
2517   bool compacting_gc;
2518   {
2519     gc_complete_lock_->AssertNotHeld(self);
2520     ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete);
2521     MutexLock mu(self, *gc_complete_lock_);
2522     // Ensure there is only one GC at a time.
2523     WaitForGcToCompleteLocked(gc_cause, self);
2524     compacting_gc = IsMovingGc(collector_type_);
2525     // GC can be disabled if someone has a used GetPrimitiveArrayCritical.
2526     if (compacting_gc && disable_moving_gc_count_ != 0) {
2527       LOG(WARNING) << "Skipping GC due to disable moving GC count " << disable_moving_gc_count_;
2528       return collector::kGcTypeNone;
2529     }
2530     if (gc_disabled_for_shutdown_) {
2531       return collector::kGcTypeNone;
2532     }
2533     collector_type_running_ = collector_type_;
2534   }
2535   if (gc_cause == kGcCauseForAlloc && runtime->HasStatsEnabled()) {
2536     ++runtime->GetStats()->gc_for_alloc_count;
2537     ++self->GetStats()->gc_for_alloc_count;
2538   }
2539   const uint64_t bytes_allocated_before_gc = GetBytesAllocated();
2540 
2541   if (gc_type == NonStickyGcType()) {
2542     // Move all bytes from new_native_bytes_allocated_ to
2543     // old_native_bytes_allocated_ now that GC has been triggered, resetting
2544     // new_native_bytes_allocated_ to zero in the process.
2545     old_native_bytes_allocated_.FetchAndAddRelaxed(new_native_bytes_allocated_.ExchangeRelaxed(0));
2546     if (gc_cause == kGcCauseForNativeAllocBlocking) {
2547       MutexLock mu(self, *native_blocking_gc_lock_);
2548       native_blocking_gc_in_progress_ = true;
2549     }
2550   }
2551 
2552   DCHECK_LT(gc_type, collector::kGcTypeMax);
2553   DCHECK_NE(gc_type, collector::kGcTypeNone);
2554 
2555   collector::GarbageCollector* collector = nullptr;
2556   // TODO: Clean this up.
2557   if (compacting_gc) {
2558     DCHECK(current_allocator_ == kAllocatorTypeBumpPointer ||
2559            current_allocator_ == kAllocatorTypeTLAB ||
2560            current_allocator_ == kAllocatorTypeRegion ||
2561            current_allocator_ == kAllocatorTypeRegionTLAB);
2562     switch (collector_type_) {
2563       case kCollectorTypeSS:
2564         // Fall-through.
2565       case kCollectorTypeGSS:
2566         semi_space_collector_->SetFromSpace(bump_pointer_space_);
2567         semi_space_collector_->SetToSpace(temp_space_);
2568         semi_space_collector_->SetSwapSemiSpaces(true);
2569         collector = semi_space_collector_;
2570         break;
2571       case kCollectorTypeCC:
2572         collector = concurrent_copying_collector_;
2573         break;
2574       case kCollectorTypeMC:
2575         mark_compact_collector_->SetSpace(bump_pointer_space_);
2576         collector = mark_compact_collector_;
2577         break;
2578       default:
2579         LOG(FATAL) << "Invalid collector type " << static_cast<size_t>(collector_type_);
2580     }
2581     if (collector != mark_compact_collector_ && collector != concurrent_copying_collector_) {
2582       temp_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2583       if (kIsDebugBuild) {
2584         // Try to read each page of the memory map in case mprotect didn't work properly b/19894268.
2585         temp_space_->GetMemMap()->TryReadable();
2586       }
2587       CHECK(temp_space_->IsEmpty());
2588     }
2589     gc_type = collector::kGcTypeFull;  // TODO: Not hard code this in.
2590   } else if (current_allocator_ == kAllocatorTypeRosAlloc ||
2591       current_allocator_ == kAllocatorTypeDlMalloc) {
2592     collector = FindCollectorByGcType(gc_type);
2593   } else {
2594     LOG(FATAL) << "Invalid current allocator " << current_allocator_;
2595   }
2596   if (IsGcConcurrent()) {
2597     // Disable concurrent GC check so that we don't have spammy JNI requests.
2598     // This gets recalculated in GrowForUtilization. It is important that it is disabled /
2599     // calculated in the same thread so that there aren't any races that can cause it to become
2600     // permanantly disabled. b/17942071
2601     concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
2602   }
2603 
2604   CHECK(collector != nullptr)
2605       << "Could not find garbage collector with collector_type="
2606       << static_cast<size_t>(collector_type_) << " and gc_type=" << gc_type;
2607   collector->Run(gc_cause, clear_soft_references || runtime->IsZygote());
2608   total_objects_freed_ever_ += GetCurrentGcIteration()->GetFreedObjects();
2609   total_bytes_freed_ever_ += GetCurrentGcIteration()->GetFreedBytes();
2610   RequestTrim(self);
2611   // Enqueue cleared references.
2612   reference_processor_->EnqueueClearedReferences(self);
2613   // Grow the heap so that we know when to perform the next GC.
2614   GrowForUtilization(collector, bytes_allocated_before_gc);
2615   LogGC(gc_cause, collector);
2616   FinishGC(self, gc_type);
2617   // Inform DDMS that a GC completed.
2618   Dbg::GcDidFinish();
2619   // Unload native libraries for class unloading. We do this after calling FinishGC to prevent
2620   // deadlocks in case the JNI_OnUnload function does allocations.
2621   {
2622     ScopedObjectAccess soa(self);
2623     soa.Vm()->UnloadNativeLibraries();
2624   }
2625   return gc_type;
2626 }
2627 
LogGC(GcCause gc_cause,collector::GarbageCollector * collector)2628 void Heap::LogGC(GcCause gc_cause, collector::GarbageCollector* collector) {
2629   const size_t duration = GetCurrentGcIteration()->GetDurationNs();
2630   const std::vector<uint64_t>& pause_times = GetCurrentGcIteration()->GetPauseTimes();
2631   // Print the GC if it is an explicit GC (e.g. Runtime.gc()) or a slow GC
2632   // (mutator time blocked >= long_pause_log_threshold_).
2633   bool log_gc = kLogAllGCs || gc_cause == kGcCauseExplicit;
2634   if (!log_gc && CareAboutPauseTimes()) {
2635     // GC for alloc pauses the allocating thread, so consider it as a pause.
2636     log_gc = duration > long_gc_log_threshold_ ||
2637         (gc_cause == kGcCauseForAlloc && duration > long_pause_log_threshold_);
2638     for (uint64_t pause : pause_times) {
2639       log_gc = log_gc || pause >= long_pause_log_threshold_;
2640     }
2641   }
2642   if (log_gc) {
2643     const size_t percent_free = GetPercentFree();
2644     const size_t current_heap_size = GetBytesAllocated();
2645     const size_t total_memory = GetTotalMemory();
2646     std::ostringstream pause_string;
2647     for (size_t i = 0; i < pause_times.size(); ++i) {
2648       pause_string << PrettyDuration((pause_times[i] / 1000) * 1000)
2649                    << ((i != pause_times.size() - 1) ? "," : "");
2650     }
2651     LOG(INFO) << gc_cause << " " << collector->GetName()
2652               << " GC freed "  << current_gc_iteration_.GetFreedObjects() << "("
2653               << PrettySize(current_gc_iteration_.GetFreedBytes()) << ") AllocSpace objects, "
2654               << current_gc_iteration_.GetFreedLargeObjects() << "("
2655               << PrettySize(current_gc_iteration_.GetFreedLargeObjectBytes()) << ") LOS objects, "
2656               << percent_free << "% free, " << PrettySize(current_heap_size) << "/"
2657               << PrettySize(total_memory) << ", " << "paused " << pause_string.str()
2658               << " total " << PrettyDuration((duration / 1000) * 1000);
2659     VLOG(heap) << Dumpable<TimingLogger>(*current_gc_iteration_.GetTimings());
2660   }
2661 }
2662 
FinishGC(Thread * self,collector::GcType gc_type)2663 void Heap::FinishGC(Thread* self, collector::GcType gc_type) {
2664   MutexLock mu(self, *gc_complete_lock_);
2665   collector_type_running_ = kCollectorTypeNone;
2666   if (gc_type != collector::kGcTypeNone) {
2667     last_gc_type_ = gc_type;
2668 
2669     // Update stats.
2670     ++gc_count_last_window_;
2671     if (running_collection_is_blocking_) {
2672       // If the currently running collection was a blocking one,
2673       // increment the counters and reset the flag.
2674       ++blocking_gc_count_;
2675       blocking_gc_time_ += GetCurrentGcIteration()->GetDurationNs();
2676       ++blocking_gc_count_last_window_;
2677     }
2678     // Update the gc count rate histograms if due.
2679     UpdateGcCountRateHistograms();
2680   }
2681   // Reset.
2682   running_collection_is_blocking_ = false;
2683   thread_running_gc_ = nullptr;
2684   // Wake anyone who may have been waiting for the GC to complete.
2685   gc_complete_cond_->Broadcast(self);
2686 }
2687 
UpdateGcCountRateHistograms()2688 void Heap::UpdateGcCountRateHistograms() {
2689   // Invariant: if the time since the last update includes more than
2690   // one windows, all the GC runs (if > 0) must have happened in first
2691   // window because otherwise the update must have already taken place
2692   // at an earlier GC run. So, we report the non-first windows with
2693   // zero counts to the histograms.
2694   DCHECK_EQ(last_update_time_gc_count_rate_histograms_ % kGcCountRateHistogramWindowDuration, 0U);
2695   uint64_t now = NanoTime();
2696   DCHECK_GE(now, last_update_time_gc_count_rate_histograms_);
2697   uint64_t time_since_last_update = now - last_update_time_gc_count_rate_histograms_;
2698   uint64_t num_of_windows = time_since_last_update / kGcCountRateHistogramWindowDuration;
2699   if (time_since_last_update >= kGcCountRateHistogramWindowDuration) {
2700     // Record the first window.
2701     gc_count_rate_histogram_.AddValue(gc_count_last_window_ - 1);  // Exclude the current run.
2702     blocking_gc_count_rate_histogram_.AddValue(running_collection_is_blocking_ ?
2703         blocking_gc_count_last_window_ - 1 : blocking_gc_count_last_window_);
2704     // Record the other windows (with zero counts).
2705     for (uint64_t i = 0; i < num_of_windows - 1; ++i) {
2706       gc_count_rate_histogram_.AddValue(0);
2707       blocking_gc_count_rate_histogram_.AddValue(0);
2708     }
2709     // Update the last update time and reset the counters.
2710     last_update_time_gc_count_rate_histograms_ =
2711         (now / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration;
2712     gc_count_last_window_ = 1;  // Include the current run.
2713     blocking_gc_count_last_window_ = running_collection_is_blocking_ ? 1 : 0;
2714   }
2715   DCHECK_EQ(last_update_time_gc_count_rate_histograms_ % kGcCountRateHistogramWindowDuration, 0U);
2716 }
2717 
2718 class RootMatchesObjectVisitor : public SingleRootVisitor {
2719  public:
RootMatchesObjectVisitor(const mirror::Object * obj)2720   explicit RootMatchesObjectVisitor(const mirror::Object* obj) : obj_(obj) { }
2721 
VisitRoot(mirror::Object * root,const RootInfo & info)2722   void VisitRoot(mirror::Object* root, const RootInfo& info)
2723       OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
2724     if (root == obj_) {
2725       LOG(INFO) << "Object " << obj_ << " is a root " << info.ToString();
2726     }
2727   }
2728 
2729  private:
2730   const mirror::Object* const obj_;
2731 };
2732 
2733 
2734 class ScanVisitor {
2735  public:
operator ()(const mirror::Object * obj) const2736   void operator()(const mirror::Object* obj) const {
2737     LOG(ERROR) << "Would have rescanned object " << obj;
2738   }
2739 };
2740 
2741 // Verify a reference from an object.
2742 class VerifyReferenceVisitor : public SingleRootVisitor {
2743  public:
VerifyReferenceVisitor(Heap * heap,Atomic<size_t> * fail_count,bool verify_referent)2744   VerifyReferenceVisitor(Heap* heap, Atomic<size_t>* fail_count, bool verify_referent)
2745       REQUIRES_SHARED(Locks::mutator_lock_)
2746       : heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {}
2747 
GetFailureCount() const2748   size_t GetFailureCount() const {
2749     return fail_count_->LoadSequentiallyConsistent();
2750   }
2751 
operator ()(ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED,ObjPtr<mirror::Reference> ref) const2752   void operator()(ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED, ObjPtr<mirror::Reference> ref) const
2753       REQUIRES_SHARED(Locks::mutator_lock_) {
2754     if (verify_referent_) {
2755       VerifyReference(ref.Ptr(), ref->GetReferent(), mirror::Reference::ReferentOffset());
2756     }
2757   }
2758 
operator ()(ObjPtr<mirror::Object> obj,MemberOffset offset,bool is_static ATTRIBUTE_UNUSED) const2759   void operator()(ObjPtr<mirror::Object> obj,
2760                   MemberOffset offset,
2761                   bool is_static ATTRIBUTE_UNUSED) const
2762       REQUIRES_SHARED(Locks::mutator_lock_) {
2763     VerifyReference(obj.Ptr(), obj->GetFieldObject<mirror::Object>(offset), offset);
2764   }
2765 
IsLive(ObjPtr<mirror::Object> obj) const2766   bool IsLive(ObjPtr<mirror::Object> obj) const NO_THREAD_SAFETY_ANALYSIS {
2767     return heap_->IsLiveObjectLocked(obj, true, false, true);
2768   }
2769 
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root) const2770   void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
2771       REQUIRES_SHARED(Locks::mutator_lock_) {
2772     if (!root->IsNull()) {
2773       VisitRoot(root);
2774     }
2775   }
VisitRoot(mirror::CompressedReference<mirror::Object> * root) const2776   void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
2777       REQUIRES_SHARED(Locks::mutator_lock_) {
2778     const_cast<VerifyReferenceVisitor*>(this)->VisitRoot(
2779         root->AsMirrorPtr(), RootInfo(kRootVMInternal));
2780   }
2781 
VisitRoot(mirror::Object * root,const RootInfo & root_info)2782   virtual void VisitRoot(mirror::Object* root, const RootInfo& root_info) OVERRIDE
2783       REQUIRES_SHARED(Locks::mutator_lock_) {
2784     if (root == nullptr) {
2785       LOG(ERROR) << "Root is null with info " << root_info.GetType();
2786     } else if (!VerifyReference(nullptr, root, MemberOffset(0))) {
2787       LOG(ERROR) << "Root " << root << " is dead with type " << mirror::Object::PrettyTypeOf(root)
2788           << " thread_id= " << root_info.GetThreadId() << " root_type= " << root_info.GetType();
2789     }
2790   }
2791 
2792  private:
2793   // TODO: Fix the no thread safety analysis.
2794   // Returns false on failure.
VerifyReference(mirror::Object * obj,mirror::Object * ref,MemberOffset offset) const2795   bool VerifyReference(mirror::Object* obj, mirror::Object* ref, MemberOffset offset) const
2796       NO_THREAD_SAFETY_ANALYSIS {
2797     if (ref == nullptr || IsLive(ref)) {
2798       // Verify that the reference is live.
2799       return true;
2800     }
2801     if (fail_count_->FetchAndAddSequentiallyConsistent(1) == 0) {
2802       // Print message on only on first failure to prevent spam.
2803       LOG(ERROR) << "!!!!!!!!!!!!!!Heap corruption detected!!!!!!!!!!!!!!!!!!!";
2804     }
2805     if (obj != nullptr) {
2806       // Only do this part for non roots.
2807       accounting::CardTable* card_table = heap_->GetCardTable();
2808       accounting::ObjectStack* alloc_stack = heap_->allocation_stack_.get();
2809       accounting::ObjectStack* live_stack = heap_->live_stack_.get();
2810       uint8_t* card_addr = card_table->CardFromAddr(obj);
2811       LOG(ERROR) << "Object " << obj << " references dead object " << ref << " at offset "
2812                  << offset << "\n card value = " << static_cast<int>(*card_addr);
2813       if (heap_->IsValidObjectAddress(obj->GetClass())) {
2814         LOG(ERROR) << "Obj type " << obj->PrettyTypeOf();
2815       } else {
2816         LOG(ERROR) << "Object " << obj << " class(" << obj->GetClass() << ") not a heap address";
2817       }
2818 
2819       // Attempt to find the class inside of the recently freed objects.
2820       space::ContinuousSpace* ref_space = heap_->FindContinuousSpaceFromObject(ref, true);
2821       if (ref_space != nullptr && ref_space->IsMallocSpace()) {
2822         space::MallocSpace* space = ref_space->AsMallocSpace();
2823         mirror::Class* ref_class = space->FindRecentFreedObject(ref);
2824         if (ref_class != nullptr) {
2825           LOG(ERROR) << "Reference " << ref << " found as a recently freed object with class "
2826                      << ref_class->PrettyClass();
2827         } else {
2828           LOG(ERROR) << "Reference " << ref << " not found as a recently freed object";
2829         }
2830       }
2831 
2832       if (ref->GetClass() != nullptr && heap_->IsValidObjectAddress(ref->GetClass()) &&
2833           ref->GetClass()->IsClass()) {
2834         LOG(ERROR) << "Ref type " << ref->PrettyTypeOf();
2835       } else {
2836         LOG(ERROR) << "Ref " << ref << " class(" << ref->GetClass()
2837                    << ") is not a valid heap address";
2838       }
2839 
2840       card_table->CheckAddrIsInCardTable(reinterpret_cast<const uint8_t*>(obj));
2841       void* cover_begin = card_table->AddrFromCard(card_addr);
2842       void* cover_end = reinterpret_cast<void*>(reinterpret_cast<size_t>(cover_begin) +
2843           accounting::CardTable::kCardSize);
2844       LOG(ERROR) << "Card " << reinterpret_cast<void*>(card_addr) << " covers " << cover_begin
2845           << "-" << cover_end;
2846       accounting::ContinuousSpaceBitmap* bitmap =
2847           heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(obj);
2848 
2849       if (bitmap == nullptr) {
2850         LOG(ERROR) << "Object " << obj << " has no bitmap";
2851         if (!VerifyClassClass(obj->GetClass())) {
2852           LOG(ERROR) << "Object " << obj << " failed class verification!";
2853         }
2854       } else {
2855         // Print out how the object is live.
2856         if (bitmap->Test(obj)) {
2857           LOG(ERROR) << "Object " << obj << " found in live bitmap";
2858         }
2859         if (alloc_stack->Contains(const_cast<mirror::Object*>(obj))) {
2860           LOG(ERROR) << "Object " << obj << " found in allocation stack";
2861         }
2862         if (live_stack->Contains(const_cast<mirror::Object*>(obj))) {
2863           LOG(ERROR) << "Object " << obj << " found in live stack";
2864         }
2865         if (alloc_stack->Contains(const_cast<mirror::Object*>(ref))) {
2866           LOG(ERROR) << "Ref " << ref << " found in allocation stack";
2867         }
2868         if (live_stack->Contains(const_cast<mirror::Object*>(ref))) {
2869           LOG(ERROR) << "Ref " << ref << " found in live stack";
2870         }
2871         // Attempt to see if the card table missed the reference.
2872         ScanVisitor scan_visitor;
2873         uint8_t* byte_cover_begin = reinterpret_cast<uint8_t*>(card_table->AddrFromCard(card_addr));
2874         card_table->Scan<false>(bitmap, byte_cover_begin,
2875                                 byte_cover_begin + accounting::CardTable::kCardSize, scan_visitor);
2876       }
2877 
2878       // Search to see if any of the roots reference our object.
2879       RootMatchesObjectVisitor visitor1(obj);
2880       Runtime::Current()->VisitRoots(&visitor1);
2881       // Search to see if any of the roots reference our reference.
2882       RootMatchesObjectVisitor visitor2(ref);
2883       Runtime::Current()->VisitRoots(&visitor2);
2884     }
2885     return false;
2886   }
2887 
2888   Heap* const heap_;
2889   Atomic<size_t>* const fail_count_;
2890   const bool verify_referent_;
2891 };
2892 
2893 // Verify all references within an object, for use with HeapBitmap::Visit.
2894 class VerifyObjectVisitor {
2895  public:
VerifyObjectVisitor(Heap * heap,Atomic<size_t> * fail_count,bool verify_referent)2896   VerifyObjectVisitor(Heap* heap, Atomic<size_t>* fail_count, bool verify_referent)
2897       : heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {}
2898 
operator ()(mirror::Object * obj)2899   void operator()(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
2900     // Note: we are verifying the references in obj but not obj itself, this is because obj must
2901     // be live or else how did we find it in the live bitmap?
2902     VerifyReferenceVisitor visitor(heap_, fail_count_, verify_referent_);
2903     // The class doesn't count as a reference but we should verify it anyways.
2904     obj->VisitReferences(visitor, visitor);
2905   }
2906 
VerifyRoots()2907   void VerifyRoots() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_) {
2908     ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
2909     VerifyReferenceVisitor visitor(heap_, fail_count_, verify_referent_);
2910     Runtime::Current()->VisitRoots(&visitor);
2911   }
2912 
GetFailureCount() const2913   size_t GetFailureCount() const {
2914     return fail_count_->LoadSequentiallyConsistent();
2915   }
2916 
2917  private:
2918   Heap* const heap_;
2919   Atomic<size_t>* const fail_count_;
2920   const bool verify_referent_;
2921 };
2922 
PushOnAllocationStackWithInternalGC(Thread * self,ObjPtr<mirror::Object> * obj)2923 void Heap::PushOnAllocationStackWithInternalGC(Thread* self, ObjPtr<mirror::Object>* obj) {
2924   // Slow path, the allocation stack push back must have already failed.
2925   DCHECK(!allocation_stack_->AtomicPushBack(obj->Ptr()));
2926   do {
2927     // TODO: Add handle VerifyObject.
2928     StackHandleScope<1> hs(self);
2929     HandleWrapperObjPtr<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
2930     // Push our object into the reserve region of the allocaiton stack. This is only required due
2931     // to heap verification requiring that roots are live (either in the live bitmap or in the
2932     // allocation stack).
2933     CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(obj->Ptr()));
2934     CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
2935   } while (!allocation_stack_->AtomicPushBack(obj->Ptr()));
2936 }
2937 
PushOnThreadLocalAllocationStackWithInternalGC(Thread * self,ObjPtr<mirror::Object> * obj)2938 void Heap::PushOnThreadLocalAllocationStackWithInternalGC(Thread* self,
2939                                                           ObjPtr<mirror::Object>* obj) {
2940   // Slow path, the allocation stack push back must have already failed.
2941   DCHECK(!self->PushOnThreadLocalAllocationStack(obj->Ptr()));
2942   StackReference<mirror::Object>* start_address;
2943   StackReference<mirror::Object>* end_address;
2944   while (!allocation_stack_->AtomicBumpBack(kThreadLocalAllocationStackSize, &start_address,
2945                                             &end_address)) {
2946     // TODO: Add handle VerifyObject.
2947     StackHandleScope<1> hs(self);
2948     HandleWrapperObjPtr<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
2949     // Push our object into the reserve region of the allocaiton stack. This is only required due
2950     // to heap verification requiring that roots are live (either in the live bitmap or in the
2951     // allocation stack).
2952     CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(obj->Ptr()));
2953     // Push into the reserve allocation stack.
2954     CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
2955   }
2956   self->SetThreadLocalAllocationStack(start_address, end_address);
2957   // Retry on the new thread-local allocation stack.
2958   CHECK(self->PushOnThreadLocalAllocationStack(obj->Ptr()));  // Must succeed.
2959 }
2960 
2961 // Must do this with mutators suspended since we are directly accessing the allocation stacks.
VerifyHeapReferences(bool verify_referents)2962 size_t Heap::VerifyHeapReferences(bool verify_referents) {
2963   Thread* self = Thread::Current();
2964   Locks::mutator_lock_->AssertExclusiveHeld(self);
2965   // Lets sort our allocation stacks so that we can efficiently binary search them.
2966   allocation_stack_->Sort();
2967   live_stack_->Sort();
2968   // Since we sorted the allocation stack content, need to revoke all
2969   // thread-local allocation stacks.
2970   RevokeAllThreadLocalAllocationStacks(self);
2971   Atomic<size_t> fail_count_(0);
2972   VerifyObjectVisitor visitor(this, &fail_count_, verify_referents);
2973   // Verify objects in the allocation stack since these will be objects which were:
2974   // 1. Allocated prior to the GC (pre GC verification).
2975   // 2. Allocated during the GC (pre sweep GC verification).
2976   // We don't want to verify the objects in the live stack since they themselves may be
2977   // pointing to dead objects if they are not reachable.
2978   VisitObjectsPaused(visitor);
2979   // Verify the roots:
2980   visitor.VerifyRoots();
2981   if (visitor.GetFailureCount() > 0) {
2982     // Dump mod-union tables.
2983     for (const auto& table_pair : mod_union_tables_) {
2984       accounting::ModUnionTable* mod_union_table = table_pair.second;
2985       mod_union_table->Dump(LOG_STREAM(ERROR) << mod_union_table->GetName() << ": ");
2986     }
2987     // Dump remembered sets.
2988     for (const auto& table_pair : remembered_sets_) {
2989       accounting::RememberedSet* remembered_set = table_pair.second;
2990       remembered_set->Dump(LOG_STREAM(ERROR) << remembered_set->GetName() << ": ");
2991     }
2992     DumpSpaces(LOG_STREAM(ERROR));
2993   }
2994   return visitor.GetFailureCount();
2995 }
2996 
2997 class VerifyReferenceCardVisitor {
2998  public:
VerifyReferenceCardVisitor(Heap * heap,bool * failed)2999   VerifyReferenceCardVisitor(Heap* heap, bool* failed)
3000       REQUIRES_SHARED(Locks::mutator_lock_,
3001                             Locks::heap_bitmap_lock_)
3002       : heap_(heap), failed_(failed) {
3003   }
3004 
3005   // There is no card marks for native roots on a class.
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root ATTRIBUTE_UNUSED) const3006   void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
3007       const {}
VisitRoot(mirror::CompressedReference<mirror::Object> * root ATTRIBUTE_UNUSED) const3008   void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
3009 
3010   // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
3011   // annotalysis on visitors.
operator ()(mirror::Object * obj,MemberOffset offset,bool is_static) const3012   void operator()(mirror::Object* obj, MemberOffset offset, bool is_static) const
3013       NO_THREAD_SAFETY_ANALYSIS {
3014     mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
3015     // Filter out class references since changing an object's class does not mark the card as dirty.
3016     // Also handles large objects, since the only reference they hold is a class reference.
3017     if (ref != nullptr && !ref->IsClass()) {
3018       accounting::CardTable* card_table = heap_->GetCardTable();
3019       // If the object is not dirty and it is referencing something in the live stack other than
3020       // class, then it must be on a dirty card.
3021       if (!card_table->AddrIsInCardTable(obj)) {
3022         LOG(ERROR) << "Object " << obj << " is not in the address range of the card table";
3023         *failed_ = true;
3024       } else if (!card_table->IsDirty(obj)) {
3025         // TODO: Check mod-union tables.
3026         // Card should be either kCardDirty if it got re-dirtied after we aged it, or
3027         // kCardDirty - 1 if it didnt get touched since we aged it.
3028         accounting::ObjectStack* live_stack = heap_->live_stack_.get();
3029         if (live_stack->ContainsSorted(ref)) {
3030           if (live_stack->ContainsSorted(obj)) {
3031             LOG(ERROR) << "Object " << obj << " found in live stack";
3032           }
3033           if (heap_->GetLiveBitmap()->Test(obj)) {
3034             LOG(ERROR) << "Object " << obj << " found in live bitmap";
3035           }
3036           LOG(ERROR) << "Object " << obj << " " << mirror::Object::PrettyTypeOf(obj)
3037                     << " references " << ref << " " << mirror::Object::PrettyTypeOf(ref)
3038                     << " in live stack";
3039 
3040           // Print which field of the object is dead.
3041           if (!obj->IsObjectArray()) {
3042             mirror::Class* klass = is_static ? obj->AsClass() : obj->GetClass();
3043             CHECK(klass != nullptr);
3044             for (ArtField& field : (is_static ? klass->GetSFields() : klass->GetIFields())) {
3045               if (field.GetOffset().Int32Value() == offset.Int32Value()) {
3046                 LOG(ERROR) << (is_static ? "Static " : "") << "field in the live stack is "
3047                            << field.PrettyField();
3048                 break;
3049               }
3050             }
3051           } else {
3052             mirror::ObjectArray<mirror::Object>* object_array =
3053                 obj->AsObjectArray<mirror::Object>();
3054             for (int32_t i = 0; i < object_array->GetLength(); ++i) {
3055               if (object_array->Get(i) == ref) {
3056                 LOG(ERROR) << (is_static ? "Static " : "") << "obj[" << i << "] = ref";
3057               }
3058             }
3059           }
3060 
3061           *failed_ = true;
3062         }
3063       }
3064     }
3065   }
3066 
3067  private:
3068   Heap* const heap_;
3069   bool* const failed_;
3070 };
3071 
3072 class VerifyLiveStackReferences {
3073  public:
VerifyLiveStackReferences(Heap * heap)3074   explicit VerifyLiveStackReferences(Heap* heap)
3075       : heap_(heap),
3076         failed_(false) {}
3077 
operator ()(mirror::Object * obj) const3078   void operator()(mirror::Object* obj) const
3079       REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
3080     VerifyReferenceCardVisitor visitor(heap_, const_cast<bool*>(&failed_));
3081     obj->VisitReferences(visitor, VoidFunctor());
3082   }
3083 
Failed() const3084   bool Failed() const {
3085     return failed_;
3086   }
3087 
3088  private:
3089   Heap* const heap_;
3090   bool failed_;
3091 };
3092 
VerifyMissingCardMarks()3093 bool Heap::VerifyMissingCardMarks() {
3094   Thread* self = Thread::Current();
3095   Locks::mutator_lock_->AssertExclusiveHeld(self);
3096   // We need to sort the live stack since we binary search it.
3097   live_stack_->Sort();
3098   // Since we sorted the allocation stack content, need to revoke all
3099   // thread-local allocation stacks.
3100   RevokeAllThreadLocalAllocationStacks(self);
3101   VerifyLiveStackReferences visitor(this);
3102   GetLiveBitmap()->Visit(visitor);
3103   // We can verify objects in the live stack since none of these should reference dead objects.
3104   for (auto* it = live_stack_->Begin(); it != live_stack_->End(); ++it) {
3105     if (!kUseThreadLocalAllocationStack || it->AsMirrorPtr() != nullptr) {
3106       visitor(it->AsMirrorPtr());
3107     }
3108   }
3109   return !visitor.Failed();
3110 }
3111 
SwapStacks()3112 void Heap::SwapStacks() {
3113   if (kUseThreadLocalAllocationStack) {
3114     live_stack_->AssertAllZero();
3115   }
3116   allocation_stack_.swap(live_stack_);
3117 }
3118 
RevokeAllThreadLocalAllocationStacks(Thread * self)3119 void Heap::RevokeAllThreadLocalAllocationStacks(Thread* self) {
3120   // This must be called only during the pause.
3121   DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
3122   MutexLock mu(self, *Locks::runtime_shutdown_lock_);
3123   MutexLock mu2(self, *Locks::thread_list_lock_);
3124   std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
3125   for (Thread* t : thread_list) {
3126     t->RevokeThreadLocalAllocationStack();
3127   }
3128 }
3129 
AssertThreadLocalBuffersAreRevoked(Thread * thread)3130 void Heap::AssertThreadLocalBuffersAreRevoked(Thread* thread) {
3131   if (kIsDebugBuild) {
3132     if (rosalloc_space_ != nullptr) {
3133       rosalloc_space_->AssertThreadLocalBuffersAreRevoked(thread);
3134     }
3135     if (bump_pointer_space_ != nullptr) {
3136       bump_pointer_space_->AssertThreadLocalBuffersAreRevoked(thread);
3137     }
3138   }
3139 }
3140 
AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked()3141 void Heap::AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked() {
3142   if (kIsDebugBuild) {
3143     if (bump_pointer_space_ != nullptr) {
3144       bump_pointer_space_->AssertAllThreadLocalBuffersAreRevoked();
3145     }
3146   }
3147 }
3148 
FindModUnionTableFromSpace(space::Space * space)3149 accounting::ModUnionTable* Heap::FindModUnionTableFromSpace(space::Space* space) {
3150   auto it = mod_union_tables_.find(space);
3151   if (it == mod_union_tables_.end()) {
3152     return nullptr;
3153   }
3154   return it->second;
3155 }
3156 
FindRememberedSetFromSpace(space::Space * space)3157 accounting::RememberedSet* Heap::FindRememberedSetFromSpace(space::Space* space) {
3158   auto it = remembered_sets_.find(space);
3159   if (it == remembered_sets_.end()) {
3160     return nullptr;
3161   }
3162   return it->second;
3163 }
3164 
ProcessCards(TimingLogger * timings,bool use_rem_sets,bool process_alloc_space_cards,bool clear_alloc_space_cards)3165 void Heap::ProcessCards(TimingLogger* timings,
3166                         bool use_rem_sets,
3167                         bool process_alloc_space_cards,
3168                         bool clear_alloc_space_cards) {
3169   TimingLogger::ScopedTiming t(__FUNCTION__, timings);
3170   // Clear cards and keep track of cards cleared in the mod-union table.
3171   for (const auto& space : continuous_spaces_) {
3172     accounting::ModUnionTable* table = FindModUnionTableFromSpace(space);
3173     accounting::RememberedSet* rem_set = FindRememberedSetFromSpace(space);
3174     if (table != nullptr) {
3175       const char* name = space->IsZygoteSpace() ? "ZygoteModUnionClearCards" :
3176           "ImageModUnionClearCards";
3177       TimingLogger::ScopedTiming t2(name, timings);
3178       table->ProcessCards();
3179     } else if (use_rem_sets && rem_set != nullptr) {
3180       DCHECK(collector::SemiSpace::kUseRememberedSet && collector_type_ == kCollectorTypeGSS)
3181           << static_cast<int>(collector_type_);
3182       TimingLogger::ScopedTiming t2("AllocSpaceRemSetClearCards", timings);
3183       rem_set->ClearCards();
3184     } else if (process_alloc_space_cards) {
3185       TimingLogger::ScopedTiming t2("AllocSpaceClearCards", timings);
3186       if (clear_alloc_space_cards) {
3187         uint8_t* end = space->End();
3188         if (space->IsImageSpace()) {
3189           // Image space end is the end of the mirror objects, it is not necessarily page or card
3190           // aligned. Align up so that the check in ClearCardRange does not fail.
3191           end = AlignUp(end, accounting::CardTable::kCardSize);
3192         }
3193         card_table_->ClearCardRange(space->Begin(), end);
3194       } else {
3195         // No mod union table for the AllocSpace. Age the cards so that the GC knows that these
3196         // cards were dirty before the GC started.
3197         // TODO: Need to use atomic for the case where aged(cleaning thread) -> dirty(other thread)
3198         // -> clean(cleaning thread).
3199         // The races are we either end up with: Aged card, unaged card. Since we have the
3200         // checkpoint roots and then we scan / update mod union tables after. We will always
3201         // scan either card. If we end up with the non aged card, we scan it it in the pause.
3202         card_table_->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(),
3203                                        VoidFunctor());
3204       }
3205     }
3206   }
3207 }
3208 
3209 struct IdentityMarkHeapReferenceVisitor : public MarkObjectVisitor {
MarkObjectart::gc::IdentityMarkHeapReferenceVisitor3210   virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE {
3211     return obj;
3212   }
MarkHeapReferenceart::gc::IdentityMarkHeapReferenceVisitor3213   virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>*, bool) OVERRIDE {
3214   }
3215 };
3216 
PreGcVerificationPaused(collector::GarbageCollector * gc)3217 void Heap::PreGcVerificationPaused(collector::GarbageCollector* gc) {
3218   Thread* const self = Thread::Current();
3219   TimingLogger* const timings = current_gc_iteration_.GetTimings();
3220   TimingLogger::ScopedTiming t(__FUNCTION__, timings);
3221   if (verify_pre_gc_heap_) {
3222     TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyHeapReferences", timings);
3223     size_t failures = VerifyHeapReferences();
3224     if (failures > 0) {
3225       LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed with " << failures
3226           << " failures";
3227     }
3228   }
3229   // Check that all objects which reference things in the live stack are on dirty cards.
3230   if (verify_missing_card_marks_) {
3231     TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyMissingCardMarks", timings);
3232     ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
3233     SwapStacks();
3234     // Sort the live stack so that we can quickly binary search it later.
3235     CHECK(VerifyMissingCardMarks()) << "Pre " << gc->GetName()
3236                                     << " missing card mark verification failed\n" << DumpSpaces();
3237     SwapStacks();
3238   }
3239   if (verify_mod_union_table_) {
3240     TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyModUnionTables", timings);
3241     ReaderMutexLock reader_lock(self, *Locks::heap_bitmap_lock_);
3242     for (const auto& table_pair : mod_union_tables_) {
3243       accounting::ModUnionTable* mod_union_table = table_pair.second;
3244       IdentityMarkHeapReferenceVisitor visitor;
3245       mod_union_table->UpdateAndMarkReferences(&visitor);
3246       mod_union_table->Verify();
3247     }
3248   }
3249 }
3250 
PreGcVerification(collector::GarbageCollector * gc)3251 void Heap::PreGcVerification(collector::GarbageCollector* gc) {
3252   if (verify_pre_gc_heap_ || verify_missing_card_marks_ || verify_mod_union_table_) {
3253     collector::GarbageCollector::ScopedPause pause(gc, false);
3254     PreGcVerificationPaused(gc);
3255   }
3256 }
3257 
PrePauseRosAllocVerification(collector::GarbageCollector * gc ATTRIBUTE_UNUSED)3258 void Heap::PrePauseRosAllocVerification(collector::GarbageCollector* gc ATTRIBUTE_UNUSED) {
3259   // TODO: Add a new runtime option for this?
3260   if (verify_pre_gc_rosalloc_) {
3261     RosAllocVerification(current_gc_iteration_.GetTimings(), "PreGcRosAllocVerification");
3262   }
3263 }
3264 
PreSweepingGcVerification(collector::GarbageCollector * gc)3265 void Heap::PreSweepingGcVerification(collector::GarbageCollector* gc) {
3266   Thread* const self = Thread::Current();
3267   TimingLogger* const timings = current_gc_iteration_.GetTimings();
3268   TimingLogger::ScopedTiming t(__FUNCTION__, timings);
3269   // Called before sweeping occurs since we want to make sure we are not going so reclaim any
3270   // reachable objects.
3271   if (verify_pre_sweeping_heap_) {
3272     TimingLogger::ScopedTiming t2("(Paused)PostSweepingVerifyHeapReferences", timings);
3273     CHECK_NE(self->GetState(), kRunnable);
3274     {
3275       WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
3276       // Swapping bound bitmaps does nothing.
3277       gc->SwapBitmaps();
3278     }
3279     // Pass in false since concurrent reference processing can mean that the reference referents
3280     // may point to dead objects at the point which PreSweepingGcVerification is called.
3281     size_t failures = VerifyHeapReferences(false);
3282     if (failures > 0) {
3283       LOG(FATAL) << "Pre sweeping " << gc->GetName() << " GC verification failed with " << failures
3284           << " failures";
3285     }
3286     {
3287       WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
3288       gc->SwapBitmaps();
3289     }
3290   }
3291   if (verify_pre_sweeping_rosalloc_) {
3292     RosAllocVerification(timings, "PreSweepingRosAllocVerification");
3293   }
3294 }
3295 
PostGcVerificationPaused(collector::GarbageCollector * gc)3296 void Heap::PostGcVerificationPaused(collector::GarbageCollector* gc) {
3297   // Only pause if we have to do some verification.
3298   Thread* const self = Thread::Current();
3299   TimingLogger* const timings = GetCurrentGcIteration()->GetTimings();
3300   TimingLogger::ScopedTiming t(__FUNCTION__, timings);
3301   if (verify_system_weaks_) {
3302     ReaderMutexLock mu2(self, *Locks::heap_bitmap_lock_);
3303     collector::MarkSweep* mark_sweep = down_cast<collector::MarkSweep*>(gc);
3304     mark_sweep->VerifySystemWeaks();
3305   }
3306   if (verify_post_gc_rosalloc_) {
3307     RosAllocVerification(timings, "(Paused)PostGcRosAllocVerification");
3308   }
3309   if (verify_post_gc_heap_) {
3310     TimingLogger::ScopedTiming t2("(Paused)PostGcVerifyHeapReferences", timings);
3311     size_t failures = VerifyHeapReferences();
3312     if (failures > 0) {
3313       LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed with " << failures
3314           << " failures";
3315     }
3316   }
3317 }
3318 
PostGcVerification(collector::GarbageCollector * gc)3319 void Heap::PostGcVerification(collector::GarbageCollector* gc) {
3320   if (verify_system_weaks_ || verify_post_gc_rosalloc_ || verify_post_gc_heap_) {
3321     collector::GarbageCollector::ScopedPause pause(gc, false);
3322     PostGcVerificationPaused(gc);
3323   }
3324 }
3325 
RosAllocVerification(TimingLogger * timings,const char * name)3326 void Heap::RosAllocVerification(TimingLogger* timings, const char* name) {
3327   TimingLogger::ScopedTiming t(name, timings);
3328   for (const auto& space : continuous_spaces_) {
3329     if (space->IsRosAllocSpace()) {
3330       VLOG(heap) << name << " : " << space->GetName();
3331       space->AsRosAllocSpace()->Verify();
3332     }
3333   }
3334 }
3335 
WaitForGcToComplete(GcCause cause,Thread * self)3336 collector::GcType Heap::WaitForGcToComplete(GcCause cause, Thread* self) {
3337   ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
3338   MutexLock mu(self, *gc_complete_lock_);
3339   return WaitForGcToCompleteLocked(cause, self);
3340 }
3341 
WaitForGcToCompleteLocked(GcCause cause,Thread * self)3342 collector::GcType Heap::WaitForGcToCompleteLocked(GcCause cause, Thread* self) {
3343   collector::GcType last_gc_type = collector::kGcTypeNone;
3344   GcCause last_gc_cause = kGcCauseNone;
3345   uint64_t wait_start = NanoTime();
3346   while (collector_type_running_ != kCollectorTypeNone) {
3347     if (self != task_processor_->GetRunningThread()) {
3348       // The current thread is about to wait for a currently running
3349       // collection to finish. If the waiting thread is not the heap
3350       // task daemon thread, the currently running collection is
3351       // considered as a blocking GC.
3352       running_collection_is_blocking_ = true;
3353       VLOG(gc) << "Waiting for a blocking GC " << cause;
3354     }
3355     ScopedTrace trace("GC: Wait For Completion");
3356     // We must wait, change thread state then sleep on gc_complete_cond_;
3357     gc_complete_cond_->Wait(self);
3358     last_gc_type = last_gc_type_;
3359     last_gc_cause = last_gc_cause_;
3360   }
3361   uint64_t wait_time = NanoTime() - wait_start;
3362   total_wait_time_ += wait_time;
3363   if (wait_time > long_pause_log_threshold_) {
3364     LOG(INFO) << "WaitForGcToComplete blocked " << cause << " on " << last_gc_cause << " for "
3365               << PrettyDuration(wait_time);
3366   }
3367   if (self != task_processor_->GetRunningThread()) {
3368     // The current thread is about to run a collection. If the thread
3369     // is not the heap task daemon thread, it's considered as a
3370     // blocking GC (i.e., blocking itself).
3371     running_collection_is_blocking_ = true;
3372     // Don't log fake "GC" types that are only used for debugger or hidden APIs. If we log these,
3373     // it results in log spam. kGcCauseExplicit is already logged in LogGC, so avoid it here too.
3374     if (cause == kGcCauseForAlloc ||
3375         cause == kGcCauseForNativeAlloc ||
3376         cause == kGcCauseForNativeAllocBlocking ||
3377         cause == kGcCauseDisableMovingGc) {
3378       VLOG(gc) << "Starting a blocking GC " << cause;
3379     }
3380   }
3381   return last_gc_type;
3382 }
3383 
DumpForSigQuit(std::ostream & os)3384 void Heap::DumpForSigQuit(std::ostream& os) {
3385   os << "Heap: " << GetPercentFree() << "% free, " << PrettySize(GetBytesAllocated()) << "/"
3386      << PrettySize(GetTotalMemory()) << "; " << GetObjectsAllocated() << " objects\n";
3387   DumpGcPerformanceInfo(os);
3388 }
3389 
GetPercentFree()3390 size_t Heap::GetPercentFree() {
3391   return static_cast<size_t>(100.0f * static_cast<float>(GetFreeMemory()) / max_allowed_footprint_);
3392 }
3393 
SetIdealFootprint(size_t max_allowed_footprint)3394 void Heap::SetIdealFootprint(size_t max_allowed_footprint) {
3395   if (max_allowed_footprint > GetMaxMemory()) {
3396     VLOG(gc) << "Clamp target GC heap from " << PrettySize(max_allowed_footprint) << " to "
3397              << PrettySize(GetMaxMemory());
3398     max_allowed_footprint = GetMaxMemory();
3399   }
3400   max_allowed_footprint_ = max_allowed_footprint;
3401 }
3402 
IsMovableObject(ObjPtr<mirror::Object> obj) const3403 bool Heap::IsMovableObject(ObjPtr<mirror::Object> obj) const {
3404   if (kMovingCollector) {
3405     space::Space* space = FindContinuousSpaceFromObject(obj.Ptr(), true);
3406     if (space != nullptr) {
3407       // TODO: Check large object?
3408       return space->CanMoveObjects();
3409     }
3410   }
3411   return false;
3412 }
3413 
FindCollectorByGcType(collector::GcType gc_type)3414 collector::GarbageCollector* Heap::FindCollectorByGcType(collector::GcType gc_type) {
3415   for (const auto& collector : garbage_collectors_) {
3416     if (collector->GetCollectorType() == collector_type_ &&
3417         collector->GetGcType() == gc_type) {
3418       return collector;
3419     }
3420   }
3421   return nullptr;
3422 }
3423 
HeapGrowthMultiplier() const3424 double Heap::HeapGrowthMultiplier() const {
3425   // If we don't care about pause times we are background, so return 1.0.
3426   if (!CareAboutPauseTimes()) {
3427     return 1.0;
3428   }
3429   return foreground_heap_growth_multiplier_;
3430 }
3431 
GrowForUtilization(collector::GarbageCollector * collector_ran,uint64_t bytes_allocated_before_gc)3432 void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran,
3433                               uint64_t bytes_allocated_before_gc) {
3434   // We know what our utilization is at this moment.
3435   // This doesn't actually resize any memory. It just lets the heap grow more when necessary.
3436   const uint64_t bytes_allocated = GetBytesAllocated();
3437   // Trace the new heap size after the GC is finished.
3438   TraceHeapSize(bytes_allocated);
3439   uint64_t target_size;
3440   collector::GcType gc_type = collector_ran->GetGcType();
3441   const double multiplier = HeapGrowthMultiplier();  // Use the multiplier to grow more for
3442   // foreground.
3443   const uint64_t adjusted_min_free = static_cast<uint64_t>(min_free_ * multiplier);
3444   const uint64_t adjusted_max_free = static_cast<uint64_t>(max_free_ * multiplier);
3445   if (gc_type != collector::kGcTypeSticky) {
3446     // Grow the heap for non sticky GC.
3447     ssize_t delta = bytes_allocated / GetTargetHeapUtilization() - bytes_allocated;
3448     CHECK_GE(delta, 0);
3449     target_size = bytes_allocated + delta * multiplier;
3450     target_size = std::min(target_size, bytes_allocated + adjusted_max_free);
3451     target_size = std::max(target_size, bytes_allocated + adjusted_min_free);
3452     next_gc_type_ = collector::kGcTypeSticky;
3453   } else {
3454     collector::GcType non_sticky_gc_type = NonStickyGcType();
3455     // Find what the next non sticky collector will be.
3456     collector::GarbageCollector* non_sticky_collector = FindCollectorByGcType(non_sticky_gc_type);
3457     // If the throughput of the current sticky GC >= throughput of the non sticky collector, then
3458     // do another sticky collection next.
3459     // We also check that the bytes allocated aren't over the footprint limit in order to prevent a
3460     // pathological case where dead objects which aren't reclaimed by sticky could get accumulated
3461     // if the sticky GC throughput always remained >= the full/partial throughput.
3462     if (current_gc_iteration_.GetEstimatedThroughput() * kStickyGcThroughputAdjustment >=
3463         non_sticky_collector->GetEstimatedMeanThroughput() &&
3464         non_sticky_collector->NumberOfIterations() > 0 &&
3465         bytes_allocated <= max_allowed_footprint_) {
3466       next_gc_type_ = collector::kGcTypeSticky;
3467     } else {
3468       next_gc_type_ = non_sticky_gc_type;
3469     }
3470     // If we have freed enough memory, shrink the heap back down.
3471     if (bytes_allocated + adjusted_max_free < max_allowed_footprint_) {
3472       target_size = bytes_allocated + adjusted_max_free;
3473     } else {
3474       target_size = std::max(bytes_allocated, static_cast<uint64_t>(max_allowed_footprint_));
3475     }
3476   }
3477   if (!ignore_max_footprint_) {
3478     SetIdealFootprint(target_size);
3479     if (IsGcConcurrent()) {
3480       const uint64_t freed_bytes = current_gc_iteration_.GetFreedBytes() +
3481           current_gc_iteration_.GetFreedLargeObjectBytes() +
3482           current_gc_iteration_.GetFreedRevokeBytes();
3483       // Bytes allocated will shrink by freed_bytes after the GC runs, so if we want to figure out
3484       // how many bytes were allocated during the GC we need to add freed_bytes back on.
3485       CHECK_GE(bytes_allocated + freed_bytes, bytes_allocated_before_gc);
3486       const uint64_t bytes_allocated_during_gc = bytes_allocated + freed_bytes -
3487           bytes_allocated_before_gc;
3488       // Calculate when to perform the next ConcurrentGC.
3489       // Calculate the estimated GC duration.
3490       const double gc_duration_seconds = NsToMs(current_gc_iteration_.GetDurationNs()) / 1000.0;
3491       // Estimate how many remaining bytes we will have when we need to start the next GC.
3492       size_t remaining_bytes = bytes_allocated_during_gc * gc_duration_seconds;
3493       remaining_bytes = std::min(remaining_bytes, kMaxConcurrentRemainingBytes);
3494       remaining_bytes = std::max(remaining_bytes, kMinConcurrentRemainingBytes);
3495       if (UNLIKELY(remaining_bytes > max_allowed_footprint_)) {
3496         // A never going to happen situation that from the estimated allocation rate we will exceed
3497         // the applications entire footprint with the given estimated allocation rate. Schedule
3498         // another GC nearly straight away.
3499         remaining_bytes = kMinConcurrentRemainingBytes;
3500       }
3501       DCHECK_LE(remaining_bytes, max_allowed_footprint_);
3502       DCHECK_LE(max_allowed_footprint_, GetMaxMemory());
3503       // Start a concurrent GC when we get close to the estimated remaining bytes. When the
3504       // allocation rate is very high, remaining_bytes could tell us that we should start a GC
3505       // right away.
3506       concurrent_start_bytes_ = std::max(max_allowed_footprint_ - remaining_bytes,
3507                                          static_cast<size_t>(bytes_allocated));
3508     }
3509   }
3510 }
3511 
ClampGrowthLimit()3512 void Heap::ClampGrowthLimit() {
3513   // Use heap bitmap lock to guard against races with BindLiveToMarkBitmap.
3514   ScopedObjectAccess soa(Thread::Current());
3515   WriterMutexLock mu(soa.Self(), *Locks::heap_bitmap_lock_);
3516   capacity_ = growth_limit_;
3517   for (const auto& space : continuous_spaces_) {
3518     if (space->IsMallocSpace()) {
3519       gc::space::MallocSpace* malloc_space = space->AsMallocSpace();
3520       malloc_space->ClampGrowthLimit();
3521     }
3522   }
3523   // This space isn't added for performance reasons.
3524   if (main_space_backup_.get() != nullptr) {
3525     main_space_backup_->ClampGrowthLimit();
3526   }
3527 }
3528 
ClearGrowthLimit()3529 void Heap::ClearGrowthLimit() {
3530   growth_limit_ = capacity_;
3531   ScopedObjectAccess soa(Thread::Current());
3532   for (const auto& space : continuous_spaces_) {
3533     if (space->IsMallocSpace()) {
3534       gc::space::MallocSpace* malloc_space = space->AsMallocSpace();
3535       malloc_space->ClearGrowthLimit();
3536       malloc_space->SetFootprintLimit(malloc_space->Capacity());
3537     }
3538   }
3539   // This space isn't added for performance reasons.
3540   if (main_space_backup_.get() != nullptr) {
3541     main_space_backup_->ClearGrowthLimit();
3542     main_space_backup_->SetFootprintLimit(main_space_backup_->Capacity());
3543   }
3544 }
3545 
AddFinalizerReference(Thread * self,ObjPtr<mirror::Object> * object)3546 void Heap::AddFinalizerReference(Thread* self, ObjPtr<mirror::Object>* object) {
3547   ScopedObjectAccess soa(self);
3548   ScopedLocalRef<jobject> arg(self->GetJniEnv(), soa.AddLocalReference<jobject>(*object));
3549   jvalue args[1];
3550   args[0].l = arg.get();
3551   InvokeWithJValues(soa, nullptr, WellKnownClasses::java_lang_ref_FinalizerReference_add, args);
3552   // Restore object in case it gets moved.
3553   *object = soa.Decode<mirror::Object>(arg.get());
3554 }
3555 
RequestConcurrentGCAndSaveObject(Thread * self,bool force_full,ObjPtr<mirror::Object> * obj)3556 void Heap::RequestConcurrentGCAndSaveObject(Thread* self,
3557                                             bool force_full,
3558                                             ObjPtr<mirror::Object>* obj) {
3559   StackHandleScope<1> hs(self);
3560   HandleWrapperObjPtr<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
3561   RequestConcurrentGC(self, kGcCauseBackground, force_full);
3562 }
3563 
3564 class Heap::ConcurrentGCTask : public HeapTask {
3565  public:
ConcurrentGCTask(uint64_t target_time,GcCause cause,bool force_full)3566   ConcurrentGCTask(uint64_t target_time, GcCause cause, bool force_full)
3567       : HeapTask(target_time), cause_(cause), force_full_(force_full) {}
Run(Thread * self)3568   virtual void Run(Thread* self) OVERRIDE {
3569     gc::Heap* heap = Runtime::Current()->GetHeap();
3570     heap->ConcurrentGC(self, cause_, force_full_);
3571     heap->ClearConcurrentGCRequest();
3572   }
3573 
3574  private:
3575   const GcCause cause_;
3576   const bool force_full_;  // If true, force full (or partial) collection.
3577 };
3578 
CanAddHeapTask(Thread * self)3579 static bool CanAddHeapTask(Thread* self) REQUIRES(!Locks::runtime_shutdown_lock_) {
3580   Runtime* runtime = Runtime::Current();
3581   return runtime != nullptr && runtime->IsFinishedStarting() && !runtime->IsShuttingDown(self) &&
3582       !self->IsHandlingStackOverflow();
3583 }
3584 
ClearConcurrentGCRequest()3585 void Heap::ClearConcurrentGCRequest() {
3586   concurrent_gc_pending_.StoreRelaxed(false);
3587 }
3588 
RequestConcurrentGC(Thread * self,GcCause cause,bool force_full)3589 void Heap::RequestConcurrentGC(Thread* self, GcCause cause, bool force_full) {
3590   if (CanAddHeapTask(self) &&
3591       concurrent_gc_pending_.CompareExchangeStrongSequentiallyConsistent(false, true)) {
3592     task_processor_->AddTask(self, new ConcurrentGCTask(NanoTime(),  // Start straight away.
3593                                                         cause,
3594                                                         force_full));
3595   }
3596 }
3597 
ConcurrentGC(Thread * self,GcCause cause,bool force_full)3598 void Heap::ConcurrentGC(Thread* self, GcCause cause, bool force_full) {
3599   if (!Runtime::Current()->IsShuttingDown(self)) {
3600     // Wait for any GCs currently running to finish.
3601     if (WaitForGcToComplete(cause, self) == collector::kGcTypeNone) {
3602       // If the we can't run the GC type we wanted to run, find the next appropriate one and try
3603       // that instead. E.g. can't do partial, so do full instead.
3604       collector::GcType next_gc_type = next_gc_type_;
3605       // If forcing full and next gc type is sticky, override with a non-sticky type.
3606       if (force_full && next_gc_type == collector::kGcTypeSticky) {
3607         next_gc_type = NonStickyGcType();
3608       }
3609       if (CollectGarbageInternal(next_gc_type, cause, false) == collector::kGcTypeNone) {
3610         for (collector::GcType gc_type : gc_plan_) {
3611           // Attempt to run the collector, if we succeed, we are done.
3612           if (gc_type > next_gc_type &&
3613               CollectGarbageInternal(gc_type, cause, false) != collector::kGcTypeNone) {
3614             break;
3615           }
3616         }
3617       }
3618     }
3619   }
3620 }
3621 
3622 class Heap::CollectorTransitionTask : public HeapTask {
3623  public:
CollectorTransitionTask(uint64_t target_time)3624   explicit CollectorTransitionTask(uint64_t target_time) : HeapTask(target_time) {}
3625 
Run(Thread * self)3626   virtual void Run(Thread* self) OVERRIDE {
3627     gc::Heap* heap = Runtime::Current()->GetHeap();
3628     heap->DoPendingCollectorTransition();
3629     heap->ClearPendingCollectorTransition(self);
3630   }
3631 };
3632 
ClearPendingCollectorTransition(Thread * self)3633 void Heap::ClearPendingCollectorTransition(Thread* self) {
3634   MutexLock mu(self, *pending_task_lock_);
3635   pending_collector_transition_ = nullptr;
3636 }
3637 
RequestCollectorTransition(CollectorType desired_collector_type,uint64_t delta_time)3638 void Heap::RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time) {
3639   Thread* self = Thread::Current();
3640   desired_collector_type_ = desired_collector_type;
3641   if (desired_collector_type_ == collector_type_ || !CanAddHeapTask(self)) {
3642     return;
3643   }
3644   if (collector_type_ == kCollectorTypeCC) {
3645     // For CC, we invoke a full compaction when going to the background, but the collector type
3646     // doesn't change.
3647     DCHECK_EQ(desired_collector_type_, kCollectorTypeCCBackground);
3648   }
3649   DCHECK_NE(collector_type_, kCollectorTypeCCBackground);
3650   CollectorTransitionTask* added_task = nullptr;
3651   const uint64_t target_time = NanoTime() + delta_time;
3652   {
3653     MutexLock mu(self, *pending_task_lock_);
3654     // If we have an existing collector transition, update the targe time to be the new target.
3655     if (pending_collector_transition_ != nullptr) {
3656       task_processor_->UpdateTargetRunTime(self, pending_collector_transition_, target_time);
3657       return;
3658     }
3659     added_task = new CollectorTransitionTask(target_time);
3660     pending_collector_transition_ = added_task;
3661   }
3662   task_processor_->AddTask(self, added_task);
3663 }
3664 
3665 class Heap::HeapTrimTask : public HeapTask {
3666  public:
HeapTrimTask(uint64_t delta_time)3667   explicit HeapTrimTask(uint64_t delta_time) : HeapTask(NanoTime() + delta_time) { }
Run(Thread * self)3668   virtual void Run(Thread* self) OVERRIDE {
3669     gc::Heap* heap = Runtime::Current()->GetHeap();
3670     heap->Trim(self);
3671     heap->ClearPendingTrim(self);
3672   }
3673 };
3674 
ClearPendingTrim(Thread * self)3675 void Heap::ClearPendingTrim(Thread* self) {
3676   MutexLock mu(self, *pending_task_lock_);
3677   pending_heap_trim_ = nullptr;
3678 }
3679 
RequestTrim(Thread * self)3680 void Heap::RequestTrim(Thread* self) {
3681   if (!CanAddHeapTask(self)) {
3682     return;
3683   }
3684   // GC completed and now we must decide whether to request a heap trim (advising pages back to the
3685   // kernel) or not. Issuing a request will also cause trimming of the libc heap. As a trim scans
3686   // a space it will hold its lock and can become a cause of jank.
3687   // Note, the large object space self trims and the Zygote space was trimmed and unchanging since
3688   // forking.
3689 
3690   // We don't have a good measure of how worthwhile a trim might be. We can't use the live bitmap
3691   // because that only marks object heads, so a large array looks like lots of empty space. We
3692   // don't just call dlmalloc all the time, because the cost of an _attempted_ trim is proportional
3693   // to utilization (which is probably inversely proportional to how much benefit we can expect).
3694   // We could try mincore(2) but that's only a measure of how many pages we haven't given away,
3695   // not how much use we're making of those pages.
3696   HeapTrimTask* added_task = nullptr;
3697   {
3698     MutexLock mu(self, *pending_task_lock_);
3699     if (pending_heap_trim_ != nullptr) {
3700       // Already have a heap trim request in task processor, ignore this request.
3701       return;
3702     }
3703     added_task = new HeapTrimTask(kHeapTrimWait);
3704     pending_heap_trim_ = added_task;
3705   }
3706   task_processor_->AddTask(self, added_task);
3707 }
3708 
RevokeThreadLocalBuffers(Thread * thread)3709 void Heap::RevokeThreadLocalBuffers(Thread* thread) {
3710   if (rosalloc_space_ != nullptr) {
3711     size_t freed_bytes_revoke = rosalloc_space_->RevokeThreadLocalBuffers(thread);
3712     if (freed_bytes_revoke > 0U) {
3713       num_bytes_freed_revoke_.FetchAndAddSequentiallyConsistent(freed_bytes_revoke);
3714       CHECK_GE(num_bytes_allocated_.LoadRelaxed(), num_bytes_freed_revoke_.LoadRelaxed());
3715     }
3716   }
3717   if (bump_pointer_space_ != nullptr) {
3718     CHECK_EQ(bump_pointer_space_->RevokeThreadLocalBuffers(thread), 0U);
3719   }
3720   if (region_space_ != nullptr) {
3721     CHECK_EQ(region_space_->RevokeThreadLocalBuffers(thread), 0U);
3722   }
3723 }
3724 
RevokeRosAllocThreadLocalBuffers(Thread * thread)3725 void Heap::RevokeRosAllocThreadLocalBuffers(Thread* thread) {
3726   if (rosalloc_space_ != nullptr) {
3727     size_t freed_bytes_revoke = rosalloc_space_->RevokeThreadLocalBuffers(thread);
3728     if (freed_bytes_revoke > 0U) {
3729       num_bytes_freed_revoke_.FetchAndAddSequentiallyConsistent(freed_bytes_revoke);
3730       CHECK_GE(num_bytes_allocated_.LoadRelaxed(), num_bytes_freed_revoke_.LoadRelaxed());
3731     }
3732   }
3733 }
3734 
RevokeAllThreadLocalBuffers()3735 void Heap::RevokeAllThreadLocalBuffers() {
3736   if (rosalloc_space_ != nullptr) {
3737     size_t freed_bytes_revoke = rosalloc_space_->RevokeAllThreadLocalBuffers();
3738     if (freed_bytes_revoke > 0U) {
3739       num_bytes_freed_revoke_.FetchAndAddSequentiallyConsistent(freed_bytes_revoke);
3740       CHECK_GE(num_bytes_allocated_.LoadRelaxed(), num_bytes_freed_revoke_.LoadRelaxed());
3741     }
3742   }
3743   if (bump_pointer_space_ != nullptr) {
3744     CHECK_EQ(bump_pointer_space_->RevokeAllThreadLocalBuffers(), 0U);
3745   }
3746   if (region_space_ != nullptr) {
3747     CHECK_EQ(region_space_->RevokeAllThreadLocalBuffers(), 0U);
3748   }
3749 }
3750 
IsGCRequestPending() const3751 bool Heap::IsGCRequestPending() const {
3752   return concurrent_gc_pending_.LoadRelaxed();
3753 }
3754 
RunFinalization(JNIEnv * env,uint64_t timeout)3755 void Heap::RunFinalization(JNIEnv* env, uint64_t timeout) {
3756   env->CallStaticVoidMethod(WellKnownClasses::dalvik_system_VMRuntime,
3757                             WellKnownClasses::dalvik_system_VMRuntime_runFinalization,
3758                             static_cast<jlong>(timeout));
3759 }
3760 
RegisterNativeAllocation(JNIEnv * env,size_t bytes)3761 void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) {
3762   // See the REDESIGN section of go/understanding-register-native-allocation
3763   // for an explanation of how RegisterNativeAllocation works.
3764   size_t new_value = bytes + new_native_bytes_allocated_.FetchAndAddRelaxed(bytes);
3765   if (new_value > NativeAllocationBlockingGcWatermark()) {
3766     // Wait for a new GC to finish and finalizers to run, because the
3767     // allocation rate is too high.
3768     Thread* self = ThreadForEnv(env);
3769 
3770     bool run_gc = false;
3771     {
3772       MutexLock mu(self, *native_blocking_gc_lock_);
3773       uint32_t initial_gcs_finished = native_blocking_gcs_finished_;
3774       if (native_blocking_gc_in_progress_) {
3775         // A native blocking GC is in progress from the last time the native
3776         // allocation blocking GC watermark was exceeded. Wait for that GC to
3777         // finish before addressing the fact that we exceeded the blocking
3778         // watermark again.
3779         do {
3780           ScopedTrace trace("RegisterNativeAllocation: Wait For Prior Blocking GC Completion");
3781           native_blocking_gc_cond_->Wait(self);
3782         } while (native_blocking_gcs_finished_ == initial_gcs_finished);
3783         initial_gcs_finished++;
3784       }
3785 
3786       // It's possible multiple threads have seen that we exceeded the
3787       // blocking watermark. Ensure that only one of those threads is assigned
3788       // to run the blocking GC. The rest of the threads should instead wait
3789       // for the blocking GC to complete.
3790       if (native_blocking_gcs_finished_ == initial_gcs_finished) {
3791         if (native_blocking_gc_is_assigned_) {
3792           do {
3793             ScopedTrace trace("RegisterNativeAllocation: Wait For Blocking GC Completion");
3794             native_blocking_gc_cond_->Wait(self);
3795           } while (native_blocking_gcs_finished_ == initial_gcs_finished);
3796         } else {
3797           native_blocking_gc_is_assigned_ = true;
3798           run_gc = true;
3799         }
3800       }
3801     }
3802 
3803     if (run_gc) {
3804       CollectGarbageInternal(NonStickyGcType(), kGcCauseForNativeAllocBlocking, false);
3805       RunFinalization(env, kNativeAllocationFinalizeTimeout);
3806       CHECK(!env->ExceptionCheck());
3807 
3808       MutexLock mu(self, *native_blocking_gc_lock_);
3809       native_blocking_gc_is_assigned_ = false;
3810       native_blocking_gc_in_progress_ = false;
3811       native_blocking_gcs_finished_++;
3812       native_blocking_gc_cond_->Broadcast(self);
3813     }
3814   } else if (new_value > NativeAllocationGcWatermark() * HeapGrowthMultiplier() &&
3815              !IsGCRequestPending()) {
3816     // Trigger another GC because there have been enough native bytes
3817     // allocated since the last GC.
3818     if (IsGcConcurrent()) {
3819       RequestConcurrentGC(ThreadForEnv(env), kGcCauseForNativeAlloc, /*force_full*/true);
3820     } else {
3821       CollectGarbageInternal(NonStickyGcType(), kGcCauseForNativeAlloc, false);
3822     }
3823   }
3824 }
3825 
RegisterNativeFree(JNIEnv *,size_t bytes)3826 void Heap::RegisterNativeFree(JNIEnv*, size_t bytes) {
3827   // Take the bytes freed out of new_native_bytes_allocated_ first. If
3828   // new_native_bytes_allocated_ reaches zero, take the remaining bytes freed
3829   // out of old_native_bytes_allocated_ to ensure all freed bytes are
3830   // accounted for.
3831   size_t allocated;
3832   size_t new_freed_bytes;
3833   do {
3834     allocated = new_native_bytes_allocated_.LoadRelaxed();
3835     new_freed_bytes = std::min(allocated, bytes);
3836   } while (!new_native_bytes_allocated_.CompareExchangeWeakRelaxed(allocated,
3837                                                                    allocated - new_freed_bytes));
3838   if (new_freed_bytes < bytes) {
3839     old_native_bytes_allocated_.FetchAndSubRelaxed(bytes - new_freed_bytes);
3840   }
3841 }
3842 
GetTotalMemory() const3843 size_t Heap::GetTotalMemory() const {
3844   return std::max(max_allowed_footprint_, GetBytesAllocated());
3845 }
3846 
AddModUnionTable(accounting::ModUnionTable * mod_union_table)3847 void Heap::AddModUnionTable(accounting::ModUnionTable* mod_union_table) {
3848   DCHECK(mod_union_table != nullptr);
3849   mod_union_tables_.Put(mod_union_table->GetSpace(), mod_union_table);
3850 }
3851 
CheckPreconditionsForAllocObject(ObjPtr<mirror::Class> c,size_t byte_count)3852 void Heap::CheckPreconditionsForAllocObject(ObjPtr<mirror::Class> c, size_t byte_count) {
3853   CHECK(c == nullptr || (c->IsClassClass() && byte_count >= sizeof(mirror::Class)) ||
3854         (c->IsVariableSize() || c->GetObjectSize() == byte_count))
3855       << "ClassFlags=" << c->GetClassFlags()
3856       << " IsClassClass=" << c->IsClassClass()
3857       << " byte_count=" << byte_count
3858       << " IsVariableSize=" << c->IsVariableSize()
3859       << " ObjectSize=" << c->GetObjectSize()
3860       << " sizeof(Class)=" << sizeof(mirror::Class)
3861       << verification_->DumpObjectInfo(c.Ptr(), /*tag*/ "klass");
3862   CHECK_GE(byte_count, sizeof(mirror::Object));
3863 }
3864 
AddRememberedSet(accounting::RememberedSet * remembered_set)3865 void Heap::AddRememberedSet(accounting::RememberedSet* remembered_set) {
3866   CHECK(remembered_set != nullptr);
3867   space::Space* space = remembered_set->GetSpace();
3868   CHECK(space != nullptr);
3869   CHECK(remembered_sets_.find(space) == remembered_sets_.end()) << space;
3870   remembered_sets_.Put(space, remembered_set);
3871   CHECK(remembered_sets_.find(space) != remembered_sets_.end()) << space;
3872 }
3873 
RemoveRememberedSet(space::Space * space)3874 void Heap::RemoveRememberedSet(space::Space* space) {
3875   CHECK(space != nullptr);
3876   auto it = remembered_sets_.find(space);
3877   CHECK(it != remembered_sets_.end());
3878   delete it->second;
3879   remembered_sets_.erase(it);
3880   CHECK(remembered_sets_.find(space) == remembered_sets_.end());
3881 }
3882 
ClearMarkedObjects()3883 void Heap::ClearMarkedObjects() {
3884   // Clear all of the spaces' mark bitmaps.
3885   for (const auto& space : GetContinuousSpaces()) {
3886     accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
3887     if (space->GetLiveBitmap() != mark_bitmap) {
3888       mark_bitmap->Clear();
3889     }
3890   }
3891   // Clear the marked objects in the discontinous space object sets.
3892   for (const auto& space : GetDiscontinuousSpaces()) {
3893     space->GetMarkBitmap()->Clear();
3894   }
3895 }
3896 
SetAllocationRecords(AllocRecordObjectMap * records)3897 void Heap::SetAllocationRecords(AllocRecordObjectMap* records) {
3898   allocation_records_.reset(records);
3899 }
3900 
VisitAllocationRecords(RootVisitor * visitor) const3901 void Heap::VisitAllocationRecords(RootVisitor* visitor) const {
3902   if (IsAllocTrackingEnabled()) {
3903     MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
3904     if (IsAllocTrackingEnabled()) {
3905       GetAllocationRecords()->VisitRoots(visitor);
3906     }
3907   }
3908 }
3909 
SweepAllocationRecords(IsMarkedVisitor * visitor) const3910 void Heap::SweepAllocationRecords(IsMarkedVisitor* visitor) const {
3911   if (IsAllocTrackingEnabled()) {
3912     MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
3913     if (IsAllocTrackingEnabled()) {
3914       GetAllocationRecords()->SweepAllocationRecords(visitor);
3915     }
3916   }
3917 }
3918 
AllowNewAllocationRecords() const3919 void Heap::AllowNewAllocationRecords() const {
3920   CHECK(!kUseReadBarrier);
3921   MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
3922   AllocRecordObjectMap* allocation_records = GetAllocationRecords();
3923   if (allocation_records != nullptr) {
3924     allocation_records->AllowNewAllocationRecords();
3925   }
3926 }
3927 
DisallowNewAllocationRecords() const3928 void Heap::DisallowNewAllocationRecords() const {
3929   CHECK(!kUseReadBarrier);
3930   MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
3931   AllocRecordObjectMap* allocation_records = GetAllocationRecords();
3932   if (allocation_records != nullptr) {
3933     allocation_records->DisallowNewAllocationRecords();
3934   }
3935 }
3936 
BroadcastForNewAllocationRecords() const3937 void Heap::BroadcastForNewAllocationRecords() const {
3938   // Always broadcast without checking IsAllocTrackingEnabled() because IsAllocTrackingEnabled() may
3939   // be set to false while some threads are waiting for system weak access in
3940   // AllocRecordObjectMap::RecordAllocation() and we may fail to wake them up. b/27467554.
3941   MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
3942   AllocRecordObjectMap* allocation_records = GetAllocationRecords();
3943   if (allocation_records != nullptr) {
3944     allocation_records->BroadcastForNewAllocationRecords();
3945   }
3946 }
3947 
CheckGcStressMode(Thread * self,ObjPtr<mirror::Object> * obj)3948 void Heap::CheckGcStressMode(Thread* self, ObjPtr<mirror::Object>* obj) {
3949   auto* const runtime = Runtime::Current();
3950   if (gc_stress_mode_ && runtime->GetClassLinker()->IsInitialized() &&
3951       !runtime->IsActiveTransaction() && mirror::Class::HasJavaLangClass()) {
3952     // Check if we should GC.
3953     bool new_backtrace = false;
3954     {
3955       static constexpr size_t kMaxFrames = 16u;
3956       FixedSizeBacktrace<kMaxFrames> backtrace;
3957       backtrace.Collect(/* skip_frames */ 2);
3958       uint64_t hash = backtrace.Hash();
3959       MutexLock mu(self, *backtrace_lock_);
3960       new_backtrace = seen_backtraces_.find(hash) == seen_backtraces_.end();
3961       if (new_backtrace) {
3962         seen_backtraces_.insert(hash);
3963       }
3964     }
3965     if (new_backtrace) {
3966       StackHandleScope<1> hs(self);
3967       auto h = hs.NewHandleWrapper(obj);
3968       CollectGarbage(false);
3969       unique_backtrace_count_.FetchAndAddSequentiallyConsistent(1);
3970     } else {
3971       seen_backtrace_count_.FetchAndAddSequentiallyConsistent(1);
3972     }
3973   }
3974 }
3975 
DisableGCForShutdown()3976 void Heap::DisableGCForShutdown() {
3977   Thread* const self = Thread::Current();
3978   CHECK(Runtime::Current()->IsShuttingDown(self));
3979   MutexLock mu(self, *gc_complete_lock_);
3980   gc_disabled_for_shutdown_ = true;
3981 }
3982 
ObjectIsInBootImageSpace(ObjPtr<mirror::Object> obj) const3983 bool Heap::ObjectIsInBootImageSpace(ObjPtr<mirror::Object> obj) const {
3984   for (gc::space::ImageSpace* space : boot_image_spaces_) {
3985     if (space->HasAddress(obj.Ptr())) {
3986       return true;
3987     }
3988   }
3989   return false;
3990 }
3991 
IsInBootImageOatFile(const void * p) const3992 bool Heap::IsInBootImageOatFile(const void* p) const {
3993   for (gc::space::ImageSpace* space : boot_image_spaces_) {
3994     if (space->GetOatFile()->Contains(p)) {
3995       return true;
3996     }
3997   }
3998   return false;
3999 }
4000 
GetBootImagesSize(uint32_t * boot_image_begin,uint32_t * boot_image_end,uint32_t * boot_oat_begin,uint32_t * boot_oat_end)4001 void Heap::GetBootImagesSize(uint32_t* boot_image_begin,
4002                              uint32_t* boot_image_end,
4003                              uint32_t* boot_oat_begin,
4004                              uint32_t* boot_oat_end) {
4005   DCHECK(boot_image_begin != nullptr);
4006   DCHECK(boot_image_end != nullptr);
4007   DCHECK(boot_oat_begin != nullptr);
4008   DCHECK(boot_oat_end != nullptr);
4009   *boot_image_begin = 0u;
4010   *boot_image_end = 0u;
4011   *boot_oat_begin = 0u;
4012   *boot_oat_end = 0u;
4013   for (gc::space::ImageSpace* space_ : GetBootImageSpaces()) {
4014     const uint32_t image_begin = PointerToLowMemUInt32(space_->Begin());
4015     const uint32_t image_size = space_->GetImageHeader().GetImageSize();
4016     if (*boot_image_begin == 0 || image_begin < *boot_image_begin) {
4017       *boot_image_begin = image_begin;
4018     }
4019     *boot_image_end = std::max(*boot_image_end, image_begin + image_size);
4020     const OatFile* boot_oat_file = space_->GetOatFile();
4021     const uint32_t oat_begin = PointerToLowMemUInt32(boot_oat_file->Begin());
4022     const uint32_t oat_size = boot_oat_file->Size();
4023     if (*boot_oat_begin == 0 || oat_begin < *boot_oat_begin) {
4024       *boot_oat_begin = oat_begin;
4025     }
4026     *boot_oat_end = std::max(*boot_oat_end, oat_begin + oat_size);
4027   }
4028 }
4029 
SetAllocationListener(AllocationListener * l)4030 void Heap::SetAllocationListener(AllocationListener* l) {
4031   AllocationListener* old = GetAndOverwriteAllocationListener(&alloc_listener_, l);
4032 
4033   if (old == nullptr) {
4034     Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
4035   }
4036 }
4037 
RemoveAllocationListener()4038 void Heap::RemoveAllocationListener() {
4039   AllocationListener* old = GetAndOverwriteAllocationListener(&alloc_listener_, nullptr);
4040 
4041   if (old != nullptr) {
4042     Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints();
4043   }
4044 }
4045 
SetGcPauseListener(GcPauseListener * l)4046 void Heap::SetGcPauseListener(GcPauseListener* l) {
4047   gc_pause_listener_.StoreRelaxed(l);
4048 }
4049 
RemoveGcPauseListener()4050 void Heap::RemoveGcPauseListener() {
4051   gc_pause_listener_.StoreRelaxed(nullptr);
4052 }
4053 
AllocWithNewTLAB(Thread * self,size_t alloc_size,bool grow,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated)4054 mirror::Object* Heap::AllocWithNewTLAB(Thread* self,
4055                                        size_t alloc_size,
4056                                        bool grow,
4057                                        size_t* bytes_allocated,
4058                                        size_t* usable_size,
4059                                        size_t* bytes_tl_bulk_allocated) {
4060   const AllocatorType allocator_type = GetCurrentAllocator();
4061   if (kUsePartialTlabs && alloc_size <= self->TlabRemainingCapacity()) {
4062     DCHECK_GT(alloc_size, self->TlabSize());
4063     // There is enough space if we grow the TLAB. Lets do that. This increases the
4064     // TLAB bytes.
4065     const size_t min_expand_size = alloc_size - self->TlabSize();
4066     const size_t expand_bytes = std::max(
4067         min_expand_size,
4068         std::min(self->TlabRemainingCapacity() - self->TlabSize(), kPartialTlabSize));
4069     if (UNLIKELY(IsOutOfMemoryOnAllocation(allocator_type, expand_bytes, grow))) {
4070       return nullptr;
4071     }
4072     *bytes_tl_bulk_allocated = expand_bytes;
4073     self->ExpandTlab(expand_bytes);
4074     DCHECK_LE(alloc_size, self->TlabSize());
4075   } else if (allocator_type == kAllocatorTypeTLAB) {
4076     DCHECK(bump_pointer_space_ != nullptr);
4077     const size_t new_tlab_size = alloc_size + kDefaultTLABSize;
4078     if (UNLIKELY(IsOutOfMemoryOnAllocation(allocator_type, new_tlab_size, grow))) {
4079       return nullptr;
4080     }
4081     // Try allocating a new thread local buffer, if the allocation fails the space must be
4082     // full so return null.
4083     if (!bump_pointer_space_->AllocNewTlab(self, new_tlab_size)) {
4084       return nullptr;
4085     }
4086     *bytes_tl_bulk_allocated = new_tlab_size;
4087   } else {
4088     DCHECK(allocator_type == kAllocatorTypeRegionTLAB);
4089     DCHECK(region_space_ != nullptr);
4090     if (space::RegionSpace::kRegionSize >= alloc_size) {
4091       // Non-large. Check OOME for a tlab.
4092       if (LIKELY(!IsOutOfMemoryOnAllocation(allocator_type,
4093                                             space::RegionSpace::kRegionSize,
4094                                             grow))) {
4095         const size_t new_tlab_size = kUsePartialTlabs
4096             ? std::max(alloc_size, kPartialTlabSize)
4097             : gc::space::RegionSpace::kRegionSize;
4098         // Try to allocate a tlab.
4099         if (!region_space_->AllocNewTlab(self, new_tlab_size)) {
4100           // Failed to allocate a tlab. Try non-tlab.
4101           return region_space_->AllocNonvirtual<false>(alloc_size,
4102                                                        bytes_allocated,
4103                                                        usable_size,
4104                                                        bytes_tl_bulk_allocated);
4105         }
4106         *bytes_tl_bulk_allocated = new_tlab_size;
4107         // Fall-through to using the TLAB below.
4108       } else {
4109         // Check OOME for a non-tlab allocation.
4110         if (!IsOutOfMemoryOnAllocation(allocator_type, alloc_size, grow)) {
4111           return region_space_->AllocNonvirtual<false>(alloc_size,
4112                                                        bytes_allocated,
4113                                                        usable_size,
4114                                                        bytes_tl_bulk_allocated);
4115         }
4116         // Neither tlab or non-tlab works. Give up.
4117         return nullptr;
4118       }
4119     } else {
4120       // Large. Check OOME.
4121       if (LIKELY(!IsOutOfMemoryOnAllocation(allocator_type, alloc_size, grow))) {
4122         return region_space_->AllocNonvirtual<false>(alloc_size,
4123                                                      bytes_allocated,
4124                                                      usable_size,
4125                                                      bytes_tl_bulk_allocated);
4126       }
4127       return nullptr;
4128     }
4129   }
4130   // Refilled TLAB, return.
4131   mirror::Object* ret = self->AllocTlab(alloc_size);
4132   DCHECK(ret != nullptr);
4133   *bytes_allocated = alloc_size;
4134   *usable_size = alloc_size;
4135   return ret;
4136 }
4137 
GetVerification() const4138 const Verification* Heap::GetVerification() const {
4139   return verification_.get();
4140 }
4141 
4142 }  // namespace gc
4143 }  // namespace art
4144