• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "heap.h"
18 
19 #define ATRACE_TAG ATRACE_TAG_DALVIK
20 #include <cutils/trace.h>
21 
22 #include <limits>
23 #include <memory>
24 #include <vector>
25 
26 #include "base/allocator.h"
27 #include "base/histogram-inl.h"
28 #include "base/stl_util.h"
29 #include "common_throws.h"
30 #include "cutils/sched_policy.h"
31 #include "debugger.h"
32 #include "gc/accounting/atomic_stack.h"
33 #include "gc/accounting/card_table-inl.h"
34 #include "gc/accounting/heap_bitmap-inl.h"
35 #include "gc/accounting/mod_union_table.h"
36 #include "gc/accounting/mod_union_table-inl.h"
37 #include "gc/accounting/remembered_set.h"
38 #include "gc/accounting/space_bitmap-inl.h"
39 #include "gc/collector/concurrent_copying.h"
40 #include "gc/collector/mark_compact.h"
41 #include "gc/collector/mark_sweep-inl.h"
42 #include "gc/collector/partial_mark_sweep.h"
43 #include "gc/collector/semi_space.h"
44 #include "gc/collector/sticky_mark_sweep.h"
45 #include "gc/reference_processor.h"
46 #include "gc/space/bump_pointer_space.h"
47 #include "gc/space/dlmalloc_space-inl.h"
48 #include "gc/space/image_space.h"
49 #include "gc/space/large_object_space.h"
50 #include "gc/space/rosalloc_space-inl.h"
51 #include "gc/space/space-inl.h"
52 #include "gc/space/zygote_space.h"
53 #include "entrypoints/quick/quick_alloc_entrypoints.h"
54 #include "heap-inl.h"
55 #include "image.h"
56 #include "mirror/art_field-inl.h"
57 #include "mirror/class-inl.h"
58 #include "mirror/object.h"
59 #include "mirror/object-inl.h"
60 #include "mirror/object_array-inl.h"
61 #include "mirror/reference-inl.h"
62 #include "os.h"
63 #include "reflection.h"
64 #include "runtime.h"
65 #include "ScopedLocalRef.h"
66 #include "scoped_thread_state_change.h"
67 #include "handle_scope-inl.h"
68 #include "thread_list.h"
69 #include "well_known_classes.h"
70 
71 namespace art {
72 
73 namespace gc {
74 
75 static constexpr size_t kCollectorTransitionStressIterations = 0;
76 static constexpr size_t kCollectorTransitionStressWait = 10 * 1000;  // Microseconds
77 static constexpr bool kGCALotMode = false;
78 static constexpr size_t kGcAlotInterval = KB;
79 // Minimum amount of remaining bytes before a concurrent GC is triggered.
80 static constexpr size_t kMinConcurrentRemainingBytes = 128 * KB;
81 static constexpr size_t kMaxConcurrentRemainingBytes = 512 * KB;
82 // Sticky GC throughput adjustment, divided by 4. Increasing this causes sticky GC to occur more
83 // relative to partial/full GC. This may be desirable since sticky GCs interfere less with mutator
84 // threads (lower pauses, use less memory bandwidth).
85 static constexpr double kStickyGcThroughputAdjustment = 1.0;
86 // Whether or not we use the free list large object space. Only use it if USE_ART_LOW_4G_ALLOCATOR
87 // since this means that we have to use the slow msync loop in MemMap::MapAnonymous.
88 #if USE_ART_LOW_4G_ALLOCATOR
89 static constexpr bool kUseFreeListSpaceForLOS = true;
90 #else
91 static constexpr bool kUseFreeListSpaceForLOS = false;
92 #endif
93 // Whether or not we compact the zygote in PreZygoteFork.
94 static constexpr bool kCompactZygote = kMovingCollector;
95 // How many reserve entries are at the end of the allocation stack, these are only needed if the
96 // allocation stack overflows.
97 static constexpr size_t kAllocationStackReserveSize = 1024;
98 // Default mark stack size in bytes.
99 static const size_t kDefaultMarkStackSize = 64 * KB;
100 // Define space name.
101 static const char* kDlMallocSpaceName[2] = {"main dlmalloc space", "main dlmalloc space 1"};
102 static const char* kRosAllocSpaceName[2] = {"main rosalloc space", "main rosalloc space 1"};
103 static const char* kMemMapSpaceName[2] = {"main space", "main space 1"};
104 static constexpr size_t kGSSBumpPointerSpaceCapacity = 32 * MB;
105 
Heap(size_t initial_size,size_t growth_limit,size_t min_free,size_t max_free,double target_utilization,double foreground_heap_growth_multiplier,size_t capacity,size_t non_moving_space_capacity,const std::string & image_file_name,const InstructionSet image_instruction_set,CollectorType foreground_collector_type,CollectorType background_collector_type,size_t parallel_gc_threads,size_t conc_gc_threads,bool low_memory_mode,size_t long_pause_log_threshold,size_t long_gc_log_threshold,bool ignore_max_footprint,bool use_tlab,bool verify_pre_gc_heap,bool verify_pre_sweeping_heap,bool verify_post_gc_heap,bool verify_pre_gc_rosalloc,bool verify_pre_sweeping_rosalloc,bool verify_post_gc_rosalloc,bool use_homogeneous_space_compaction_for_oom,uint64_t min_interval_homogeneous_space_compaction_by_oom)106 Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max_free,
107            double target_utilization, double foreground_heap_growth_multiplier,
108            size_t capacity, size_t non_moving_space_capacity, const std::string& image_file_name,
109            const InstructionSet image_instruction_set, CollectorType foreground_collector_type,
110            CollectorType background_collector_type, size_t parallel_gc_threads,
111            size_t conc_gc_threads, bool low_memory_mode,
112            size_t long_pause_log_threshold, size_t long_gc_log_threshold,
113            bool ignore_max_footprint, bool use_tlab,
114            bool verify_pre_gc_heap, bool verify_pre_sweeping_heap, bool verify_post_gc_heap,
115            bool verify_pre_gc_rosalloc, bool verify_pre_sweeping_rosalloc,
116            bool verify_post_gc_rosalloc, bool use_homogeneous_space_compaction_for_oom,
117            uint64_t min_interval_homogeneous_space_compaction_by_oom)
118     : non_moving_space_(nullptr),
119       rosalloc_space_(nullptr),
120       dlmalloc_space_(nullptr),
121       main_space_(nullptr),
122       collector_type_(kCollectorTypeNone),
123       foreground_collector_type_(foreground_collector_type),
124       background_collector_type_(background_collector_type),
125       desired_collector_type_(foreground_collector_type_),
126       heap_trim_request_lock_(nullptr),
127       last_trim_time_(0),
128       heap_transition_or_trim_target_time_(0),
129       heap_trim_request_pending_(false),
130       parallel_gc_threads_(parallel_gc_threads),
131       conc_gc_threads_(conc_gc_threads),
132       low_memory_mode_(low_memory_mode),
133       long_pause_log_threshold_(long_pause_log_threshold),
134       long_gc_log_threshold_(long_gc_log_threshold),
135       ignore_max_footprint_(ignore_max_footprint),
136       zygote_creation_lock_("zygote creation lock", kZygoteCreationLock),
137       have_zygote_space_(false),
138       large_object_threshold_(std::numeric_limits<size_t>::max()),  // Starts out disabled.
139       collector_type_running_(kCollectorTypeNone),
140       last_gc_type_(collector::kGcTypeNone),
141       next_gc_type_(collector::kGcTypePartial),
142       capacity_(capacity),
143       growth_limit_(growth_limit),
144       max_allowed_footprint_(initial_size),
145       native_footprint_gc_watermark_(initial_size),
146       native_need_to_run_finalization_(false),
147       // Initially assume we perceive jank in case the process state is never updated.
148       process_state_(kProcessStateJankPerceptible),
149       concurrent_start_bytes_(std::numeric_limits<size_t>::max()),
150       total_bytes_freed_ever_(0),
151       total_objects_freed_ever_(0),
152       num_bytes_allocated_(0),
153       native_bytes_allocated_(0),
154       verify_missing_card_marks_(false),
155       verify_system_weaks_(false),
156       verify_pre_gc_heap_(verify_pre_gc_heap),
157       verify_pre_sweeping_heap_(verify_pre_sweeping_heap),
158       verify_post_gc_heap_(verify_post_gc_heap),
159       verify_mod_union_table_(false),
160       verify_pre_gc_rosalloc_(verify_pre_gc_rosalloc),
161       verify_pre_sweeping_rosalloc_(verify_pre_sweeping_rosalloc),
162       verify_post_gc_rosalloc_(verify_post_gc_rosalloc),
163       last_gc_time_ns_(NanoTime()),
164       allocation_rate_(0),
165       /* For GC a lot mode, we limit the allocations stacks to be kGcAlotInterval allocations. This
166        * causes a lot of GC since we do a GC for alloc whenever the stack is full. When heap
167        * verification is enabled, we limit the size of allocation stacks to speed up their
168        * searching.
169        */
170       max_allocation_stack_size_(kGCALotMode ? kGcAlotInterval
171           : (kVerifyObjectSupport > kVerifyObjectModeFast) ? KB : MB),
172       current_allocator_(kAllocatorTypeDlMalloc),
173       current_non_moving_allocator_(kAllocatorTypeNonMoving),
174       bump_pointer_space_(nullptr),
175       temp_space_(nullptr),
176       min_free_(min_free),
177       max_free_(max_free),
178       target_utilization_(target_utilization),
179       foreground_heap_growth_multiplier_(foreground_heap_growth_multiplier),
180       total_wait_time_(0),
181       total_allocation_time_(0),
182       verify_object_mode_(kVerifyObjectModeDisabled),
183       disable_moving_gc_count_(0),
184       running_on_valgrind_(Runtime::Current()->RunningOnValgrind()),
185       use_tlab_(use_tlab),
186       main_space_backup_(nullptr),
187       min_interval_homogeneous_space_compaction_by_oom_(
188           min_interval_homogeneous_space_compaction_by_oom),
189       last_time_homogeneous_space_compaction_by_oom_(NanoTime()),
190       use_homogeneous_space_compaction_for_oom_(use_homogeneous_space_compaction_for_oom) {
191   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
192     LOG(INFO) << "Heap() entering";
193   }
194   // If we aren't the zygote, switch to the default non zygote allocator. This may update the
195   // entrypoints.
196   const bool is_zygote = Runtime::Current()->IsZygote();
197   if (!is_zygote) {
198     large_object_threshold_ = kDefaultLargeObjectThreshold;
199     // Background compaction is currently not supported for command line runs.
200     if (background_collector_type_ != foreground_collector_type_) {
201       VLOG(heap) << "Disabling background compaction for non zygote";
202       background_collector_type_ = foreground_collector_type_;
203     }
204   }
205   ChangeCollector(desired_collector_type_);
206   live_bitmap_.reset(new accounting::HeapBitmap(this));
207   mark_bitmap_.reset(new accounting::HeapBitmap(this));
208   // Requested begin for the alloc space, to follow the mapped image and oat files
209   byte* requested_alloc_space_begin = nullptr;
210   if (!image_file_name.empty()) {
211     std::string error_msg;
212     space::ImageSpace* image_space = space::ImageSpace::Create(image_file_name.c_str(),
213                                                                image_instruction_set,
214                                                                &error_msg);
215     if (image_space != nullptr) {
216       AddSpace(image_space);
217       // Oat files referenced by image files immediately follow them in memory, ensure alloc space
218       // isn't going to get in the middle
219       byte* oat_file_end_addr = image_space->GetImageHeader().GetOatFileEnd();
220       CHECK_GT(oat_file_end_addr, image_space->End());
221       requested_alloc_space_begin = AlignUp(oat_file_end_addr, kPageSize);
222     } else {
223       LOG(WARNING) << "Could not create image space with image file '" << image_file_name << "'. "
224                    << "Attempting to fall back to imageless running. Error was: " << error_msg;
225     }
226   }
227   /*
228   requested_alloc_space_begin ->     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
229                                      +-  nonmoving space (non_moving_space_capacity)+-
230                                      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
231                                      +-????????????????????????????????????????????+-
232                                      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
233                                      +-main alloc space / bump space 1 (capacity_) +-
234                                      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
235                                      +-????????????????????????????????????????????+-
236                                      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
237                                      +-main alloc space2 / bump space 2 (capacity_)+-
238                                      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
239   */
240   bool support_homogeneous_space_compaction =
241       background_collector_type_ == gc::kCollectorTypeHomogeneousSpaceCompact ||
242       use_homogeneous_space_compaction_for_oom;
243   // We may use the same space the main space for the non moving space if we don't need to compact
244   // from the main space.
245   // This is not the case if we support homogeneous compaction or have a moving background
246   // collector type.
247   bool separate_non_moving_space = is_zygote ||
248       support_homogeneous_space_compaction || IsMovingGc(foreground_collector_type_) ||
249       IsMovingGc(background_collector_type_);
250   if (foreground_collector_type == kCollectorTypeGSS) {
251     separate_non_moving_space = false;
252   }
253   std::unique_ptr<MemMap> main_mem_map_1;
254   std::unique_ptr<MemMap> main_mem_map_2;
255   byte* request_begin = requested_alloc_space_begin;
256   if (request_begin != nullptr && separate_non_moving_space) {
257     request_begin += non_moving_space_capacity;
258   }
259   std::string error_str;
260   std::unique_ptr<MemMap> non_moving_space_mem_map;
261   if (separate_non_moving_space) {
262     // Reserve the non moving mem map before the other two since it needs to be at a specific
263     // address.
264     non_moving_space_mem_map.reset(
265         MemMap::MapAnonymous("non moving space", requested_alloc_space_begin,
266                              non_moving_space_capacity, PROT_READ | PROT_WRITE, true, &error_str));
267     CHECK(non_moving_space_mem_map != nullptr) << error_str;
268     // Try to reserve virtual memory at a lower address if we have a separate non moving space.
269     request_begin = reinterpret_cast<byte*>(300 * MB);
270   }
271   // Attempt to create 2 mem maps at or after the requested begin.
272   main_mem_map_1.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[0], request_begin, capacity_,
273                                                     PROT_READ | PROT_WRITE, &error_str));
274   CHECK(main_mem_map_1.get() != nullptr) << error_str;
275   if (support_homogeneous_space_compaction ||
276       background_collector_type_ == kCollectorTypeSS ||
277       foreground_collector_type_ == kCollectorTypeSS) {
278     main_mem_map_2.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[1], main_mem_map_1->End(),
279                                                       capacity_, PROT_READ | PROT_WRITE,
280                                                       &error_str));
281     CHECK(main_mem_map_2.get() != nullptr) << error_str;
282   }
283   // Create the non moving space first so that bitmaps don't take up the address range.
284   if (separate_non_moving_space) {
285     // Non moving space is always dlmalloc since we currently don't have support for multiple
286     // active rosalloc spaces.
287     const size_t size = non_moving_space_mem_map->Size();
288     non_moving_space_ = space::DlMallocSpace::CreateFromMemMap(
289         non_moving_space_mem_map.release(), "zygote / non moving space", kDefaultStartingSize,
290         initial_size, size, size, false);
291     non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
292     CHECK(non_moving_space_ != nullptr) << "Failed creating non moving space "
293         << requested_alloc_space_begin;
294     AddSpace(non_moving_space_);
295   }
296   // Create other spaces based on whether or not we have a moving GC.
297   if (IsMovingGc(foreground_collector_type_) && foreground_collector_type_ != kCollectorTypeGSS) {
298     // Create bump pointer spaces.
299     // We only to create the bump pointer if the foreground collector is a compacting GC.
300     // TODO: Place bump-pointer spaces somewhere to minimize size of card table.
301     bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 1",
302                                                                     main_mem_map_1.release());
303     CHECK(bump_pointer_space_ != nullptr) << "Failed to create bump pointer space";
304     AddSpace(bump_pointer_space_);
305     temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
306                                                             main_mem_map_2.release());
307     CHECK(temp_space_ != nullptr) << "Failed to create bump pointer space";
308     AddSpace(temp_space_);
309     CHECK(separate_non_moving_space);
310   } else {
311     CreateMainMallocSpace(main_mem_map_1.release(), initial_size, growth_limit_, capacity_);
312     CHECK(main_space_ != nullptr);
313     AddSpace(main_space_);
314     if (!separate_non_moving_space) {
315       non_moving_space_ = main_space_;
316       CHECK(!non_moving_space_->CanMoveObjects());
317     }
318     if (foreground_collector_type_ == kCollectorTypeGSS) {
319       CHECK_EQ(foreground_collector_type_, background_collector_type_);
320       // Create bump pointer spaces instead of a backup space.
321       main_mem_map_2.release();
322       bump_pointer_space_ = space::BumpPointerSpace::Create("Bump pointer space 1",
323                                                             kGSSBumpPointerSpaceCapacity, nullptr);
324       CHECK(bump_pointer_space_ != nullptr);
325       AddSpace(bump_pointer_space_);
326       temp_space_ = space::BumpPointerSpace::Create("Bump pointer space 2",
327                                                     kGSSBumpPointerSpaceCapacity, nullptr);
328       CHECK(temp_space_ != nullptr);
329       AddSpace(temp_space_);
330     } else if (main_mem_map_2.get() != nullptr) {
331       const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
332       main_space_backup_.reset(CreateMallocSpaceFromMemMap(main_mem_map_2.release(), initial_size,
333                                                            growth_limit_, capacity_, name, true));
334       CHECK(main_space_backup_.get() != nullptr);
335       // Add the space so its accounted for in the heap_begin and heap_end.
336       AddSpace(main_space_backup_.get());
337     }
338   }
339   CHECK(non_moving_space_ != nullptr);
340   CHECK(!non_moving_space_->CanMoveObjects());
341   // Allocate the large object space.
342   if (kUseFreeListSpaceForLOS) {
343     large_object_space_ = space::FreeListSpace::Create("large object space", nullptr, capacity_);
344   } else {
345     large_object_space_ = space::LargeObjectMapSpace::Create("large object space");
346   }
347   CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
348   AddSpace(large_object_space_);
349   // Compute heap capacity. Continuous spaces are sorted in order of Begin().
350   CHECK(!continuous_spaces_.empty());
351   // Relies on the spaces being sorted.
352   byte* heap_begin = continuous_spaces_.front()->Begin();
353   byte* heap_end = continuous_spaces_.back()->Limit();
354   size_t heap_capacity = heap_end - heap_begin;
355   // Remove the main backup space since it slows down the GC to have unused extra spaces.
356   if (main_space_backup_.get() != nullptr) {
357     RemoveSpace(main_space_backup_.get());
358   }
359   // Allocate the card table.
360   card_table_.reset(accounting::CardTable::Create(heap_begin, heap_capacity));
361   CHECK(card_table_.get() != NULL) << "Failed to create card table";
362   // Card cache for now since it makes it easier for us to update the references to the copying
363   // spaces.
364   accounting::ModUnionTable* mod_union_table =
365       new accounting::ModUnionTableToZygoteAllocspace("Image mod-union table", this,
366                                                       GetImageSpace());
367   CHECK(mod_union_table != nullptr) << "Failed to create image mod-union table";
368   AddModUnionTable(mod_union_table);
369   if (collector::SemiSpace::kUseRememberedSet && non_moving_space_ != main_space_) {
370     accounting::RememberedSet* non_moving_space_rem_set =
371         new accounting::RememberedSet("Non-moving space remembered set", this, non_moving_space_);
372     CHECK(non_moving_space_rem_set != nullptr) << "Failed to create non-moving space remembered set";
373     AddRememberedSet(non_moving_space_rem_set);
374   }
375   // TODO: Count objects in the image space here?
376   num_bytes_allocated_.StoreRelaxed(0);
377   mark_stack_.reset(accounting::ObjectStack::Create("mark stack", kDefaultMarkStackSize,
378                                                     kDefaultMarkStackSize));
379   const size_t alloc_stack_capacity = max_allocation_stack_size_ + kAllocationStackReserveSize;
380   allocation_stack_.reset(accounting::ObjectStack::Create(
381       "allocation stack", max_allocation_stack_size_, alloc_stack_capacity));
382   live_stack_.reset(accounting::ObjectStack::Create(
383       "live stack", max_allocation_stack_size_, alloc_stack_capacity));
384   // It's still too early to take a lock because there are no threads yet, but we can create locks
385   // now. We don't create it earlier to make it clear that you can't use locks during heap
386   // initialization.
387   gc_complete_lock_ = new Mutex("GC complete lock");
388   gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable",
389                                                 *gc_complete_lock_));
390   heap_trim_request_lock_ = new Mutex("Heap trim request lock");
391   last_gc_size_ = GetBytesAllocated();
392   if (ignore_max_footprint_) {
393     SetIdealFootprint(std::numeric_limits<size_t>::max());
394     concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
395   }
396   CHECK_NE(max_allowed_footprint_, 0U);
397   // Create our garbage collectors.
398   for (size_t i = 0; i < 2; ++i) {
399     const bool concurrent = i != 0;
400     garbage_collectors_.push_back(new collector::MarkSweep(this, concurrent));
401     garbage_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent));
402     garbage_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent));
403   }
404   if (kMovingCollector) {
405     // TODO: Clean this up.
406     const bool generational = foreground_collector_type_ == kCollectorTypeGSS;
407     semi_space_collector_ = new collector::SemiSpace(this, generational,
408                                                      generational ? "generational" : "");
409     garbage_collectors_.push_back(semi_space_collector_);
410     concurrent_copying_collector_ = new collector::ConcurrentCopying(this);
411     garbage_collectors_.push_back(concurrent_copying_collector_);
412     mark_compact_collector_ = new collector::MarkCompact(this);
413     garbage_collectors_.push_back(mark_compact_collector_);
414   }
415   if (GetImageSpace() != nullptr && non_moving_space_ != nullptr) {
416     // Check that there's no gap between the image space and the non moving space so that the
417     // immune region won't break (eg. due to a large object allocated in the gap).
418     bool no_gap = MemMap::CheckNoGaps(GetImageSpace()->GetMemMap(),
419                                       non_moving_space_->GetMemMap());
420     if (!no_gap) {
421       MemMap::DumpMaps(LOG(ERROR));
422       LOG(FATAL) << "There's a gap between the image space and the main space";
423     }
424   }
425   if (running_on_valgrind_) {
426     Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
427   }
428   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
429     LOG(INFO) << "Heap() exiting";
430   }
431 }
432 
MapAnonymousPreferredAddress(const char * name,byte * request_begin,size_t capacity,int prot_flags,std::string * out_error_str)433 MemMap* Heap::MapAnonymousPreferredAddress(const char* name, byte* request_begin, size_t capacity,
434                                            int prot_flags, std::string* out_error_str) {
435   while (true) {
436     MemMap* map = MemMap::MapAnonymous(kMemMapSpaceName[0], request_begin, capacity,
437                                        PROT_READ | PROT_WRITE, true, out_error_str);
438     if (map != nullptr || request_begin == nullptr) {
439       return map;
440     }
441     // Retry a  second time with no specified request begin.
442     request_begin = nullptr;
443   }
444   return nullptr;
445 }
446 
CreateMallocSpaceFromMemMap(MemMap * mem_map,size_t initial_size,size_t growth_limit,size_t capacity,const char * name,bool can_move_objects)447 space::MallocSpace* Heap::CreateMallocSpaceFromMemMap(MemMap* mem_map, size_t initial_size,
448                                                       size_t growth_limit, size_t capacity,
449                                                       const char* name, bool can_move_objects) {
450   space::MallocSpace* malloc_space = nullptr;
451   if (kUseRosAlloc) {
452     // Create rosalloc space.
453     malloc_space = space::RosAllocSpace::CreateFromMemMap(mem_map, name, kDefaultStartingSize,
454                                                           initial_size, growth_limit, capacity,
455                                                           low_memory_mode_, can_move_objects);
456   } else {
457     malloc_space = space::DlMallocSpace::CreateFromMemMap(mem_map, name, kDefaultStartingSize,
458                                                           initial_size, growth_limit, capacity,
459                                                           can_move_objects);
460   }
461   if (collector::SemiSpace::kUseRememberedSet) {
462     accounting::RememberedSet* rem_set  =
463         new accounting::RememberedSet(std::string(name) + " remembered set", this, malloc_space);
464     CHECK(rem_set != nullptr) << "Failed to create main space remembered set";
465     AddRememberedSet(rem_set);
466   }
467   CHECK(malloc_space != nullptr) << "Failed to create " << name;
468   malloc_space->SetFootprintLimit(malloc_space->Capacity());
469   return malloc_space;
470 }
471 
CreateMainMallocSpace(MemMap * mem_map,size_t initial_size,size_t growth_limit,size_t capacity)472 void Heap::CreateMainMallocSpace(MemMap* mem_map, size_t initial_size, size_t growth_limit,
473                                  size_t capacity) {
474   // Is background compaction is enabled?
475   bool can_move_objects = IsMovingGc(background_collector_type_) !=
476       IsMovingGc(foreground_collector_type_) || use_homogeneous_space_compaction_for_oom_;
477   // If we are the zygote and don't yet have a zygote space, it means that the zygote fork will
478   // happen in the future. If this happens and we have kCompactZygote enabled we wish to compact
479   // from the main space to the zygote space. If background compaction is enabled, always pass in
480   // that we can move objets.
481   if (kCompactZygote && Runtime::Current()->IsZygote() && !can_move_objects) {
482     // After the zygote we want this to be false if we don't have background compaction enabled so
483     // that getting primitive array elements is faster.
484     // We never have homogeneous compaction with GSS and don't need a space with movable objects.
485     can_move_objects = !have_zygote_space_ && foreground_collector_type_ != kCollectorTypeGSS;
486   }
487   if (collector::SemiSpace::kUseRememberedSet && main_space_ != nullptr) {
488     RemoveRememberedSet(main_space_);
489   }
490   const char* name = kUseRosAlloc ? kRosAllocSpaceName[0] : kDlMallocSpaceName[0];
491   main_space_ = CreateMallocSpaceFromMemMap(mem_map, initial_size, growth_limit, capacity, name,
492                                             can_move_objects);
493   SetSpaceAsDefault(main_space_);
494   VLOG(heap) << "Created main space " << main_space_;
495 }
496 
ChangeAllocator(AllocatorType allocator)497 void Heap::ChangeAllocator(AllocatorType allocator) {
498   if (current_allocator_ != allocator) {
499     // These two allocators are only used internally and don't have any entrypoints.
500     CHECK_NE(allocator, kAllocatorTypeLOS);
501     CHECK_NE(allocator, kAllocatorTypeNonMoving);
502     current_allocator_ = allocator;
503     MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
504     SetQuickAllocEntryPointsAllocator(current_allocator_);
505     Runtime::Current()->GetInstrumentation()->ResetQuickAllocEntryPoints();
506   }
507 }
508 
DisableMovingGc()509 void Heap::DisableMovingGc() {
510   if (IsMovingGc(foreground_collector_type_)) {
511     foreground_collector_type_ = kCollectorTypeCMS;
512   }
513   if (IsMovingGc(background_collector_type_)) {
514     background_collector_type_ = foreground_collector_type_;
515   }
516   TransitionCollector(foreground_collector_type_);
517   ThreadList* tl = Runtime::Current()->GetThreadList();
518   Thread* self = Thread::Current();
519   ScopedThreadStateChange tsc(self, kSuspended);
520   tl->SuspendAll();
521   // Something may have caused the transition to fail.
522   if (!IsMovingGc(collector_type_) && non_moving_space_ != main_space_) {
523     CHECK(main_space_ != nullptr);
524     // The allocation stack may have non movable objects in it. We need to flush it since the GC
525     // can't only handle marking allocation stack objects of one non moving space and one main
526     // space.
527     {
528       WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
529       FlushAllocStack();
530     }
531     main_space_->DisableMovingObjects();
532     non_moving_space_ = main_space_;
533     CHECK(!non_moving_space_->CanMoveObjects());
534   }
535   tl->ResumeAll();
536 }
537 
SafeGetClassDescriptor(mirror::Class * klass)538 std::string Heap::SafeGetClassDescriptor(mirror::Class* klass) {
539   if (!IsValidContinuousSpaceObjectAddress(klass)) {
540     return StringPrintf("<non heap address klass %p>", klass);
541   }
542   mirror::Class* component_type = klass->GetComponentType<kVerifyNone>();
543   if (IsValidContinuousSpaceObjectAddress(component_type) && klass->IsArrayClass<kVerifyNone>()) {
544     std::string result("[");
545     result += SafeGetClassDescriptor(component_type);
546     return result;
547   } else if (UNLIKELY(klass->IsPrimitive<kVerifyNone>())) {
548     return Primitive::Descriptor(klass->GetPrimitiveType<kVerifyNone>());
549   } else if (UNLIKELY(klass->IsProxyClass<kVerifyNone>())) {
550     return Runtime::Current()->GetClassLinker()->GetDescriptorForProxy(klass);
551   } else {
552     mirror::DexCache* dex_cache = klass->GetDexCache<kVerifyNone>();
553     if (!IsValidContinuousSpaceObjectAddress(dex_cache)) {
554       return StringPrintf("<non heap address dex_cache %p>", dex_cache);
555     }
556     const DexFile* dex_file = dex_cache->GetDexFile();
557     uint16_t class_def_idx = klass->GetDexClassDefIndex();
558     if (class_def_idx == DexFile::kDexNoIndex16) {
559       return "<class def not found>";
560     }
561     const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_idx);
562     const DexFile::TypeId& type_id = dex_file->GetTypeId(class_def.class_idx_);
563     return dex_file->GetTypeDescriptor(type_id);
564   }
565 }
566 
SafePrettyTypeOf(mirror::Object * obj)567 std::string Heap::SafePrettyTypeOf(mirror::Object* obj) {
568   if (obj == nullptr) {
569     return "null";
570   }
571   mirror::Class* klass = obj->GetClass<kVerifyNone>();
572   if (klass == nullptr) {
573     return "(class=null)";
574   }
575   std::string result(SafeGetClassDescriptor(klass));
576   if (obj->IsClass()) {
577     result += "<" + SafeGetClassDescriptor(obj->AsClass<kVerifyNone>()) + ">";
578   }
579   return result;
580 }
581 
DumpObject(std::ostream & stream,mirror::Object * obj)582 void Heap::DumpObject(std::ostream& stream, mirror::Object* obj) {
583   if (obj == nullptr) {
584     stream << "(obj=null)";
585     return;
586   }
587   if (IsAligned<kObjectAlignment>(obj)) {
588     space::Space* space = nullptr;
589     // Don't use find space since it only finds spaces which actually contain objects instead of
590     // spaces which may contain objects (e.g. cleared bump pointer spaces).
591     for (const auto& cur_space : continuous_spaces_) {
592       if (cur_space->HasAddress(obj)) {
593         space = cur_space;
594         break;
595       }
596     }
597     // Unprotect all the spaces.
598     for (const auto& space : continuous_spaces_) {
599       mprotect(space->Begin(), space->Capacity(), PROT_READ | PROT_WRITE);
600     }
601     stream << "Object " << obj;
602     if (space != nullptr) {
603       stream << " in space " << *space;
604     }
605     mirror::Class* klass = obj->GetClass<kVerifyNone>();
606     stream << "\nclass=" << klass;
607     if (klass != nullptr) {
608       stream << " type= " << SafePrettyTypeOf(obj);
609     }
610     // Re-protect the address we faulted on.
611     mprotect(AlignDown(obj, kPageSize), kPageSize, PROT_NONE);
612   }
613 }
614 
IsCompilingBoot() const615 bool Heap::IsCompilingBoot() const {
616   if (!Runtime::Current()->IsCompiler()) {
617     return false;
618   }
619   for (const auto& space : continuous_spaces_) {
620     if (space->IsImageSpace() || space->IsZygoteSpace()) {
621       return false;
622     }
623   }
624   return true;
625 }
626 
HasImageSpace() const627 bool Heap::HasImageSpace() const {
628   for (const auto& space : continuous_spaces_) {
629     if (space->IsImageSpace()) {
630       return true;
631     }
632   }
633   return false;
634 }
635 
IncrementDisableMovingGC(Thread * self)636 void Heap::IncrementDisableMovingGC(Thread* self) {
637   // Need to do this holding the lock to prevent races where the GC is about to run / running when
638   // we attempt to disable it.
639   ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
640   MutexLock mu(self, *gc_complete_lock_);
641   ++disable_moving_gc_count_;
642   if (IsMovingGc(collector_type_running_)) {
643     WaitForGcToCompleteLocked(kGcCauseDisableMovingGc, self);
644   }
645 }
646 
DecrementDisableMovingGC(Thread * self)647 void Heap::DecrementDisableMovingGC(Thread* self) {
648   MutexLock mu(self, *gc_complete_lock_);
649   CHECK_GE(disable_moving_gc_count_, 0U);
650   --disable_moving_gc_count_;
651 }
652 
UpdateProcessState(ProcessState process_state)653 void Heap::UpdateProcessState(ProcessState process_state) {
654   if (process_state_ != process_state) {
655     process_state_ = process_state;
656     for (size_t i = 1; i <= kCollectorTransitionStressIterations; ++i) {
657       // Start at index 1 to avoid "is always false" warning.
658       // Have iteration 1 always transition the collector.
659       TransitionCollector((((i & 1) == 1) == (process_state_ == kProcessStateJankPerceptible))
660                           ? foreground_collector_type_ : background_collector_type_);
661       usleep(kCollectorTransitionStressWait);
662     }
663     if (process_state_ == kProcessStateJankPerceptible) {
664       // Transition back to foreground right away to prevent jank.
665       RequestCollectorTransition(foreground_collector_type_, 0);
666     } else {
667       // Don't delay for debug builds since we may want to stress test the GC.
668       // If background_collector_type_ is kCollectorTypeHomogeneousSpaceCompact then we have
669       // special handling which does a homogenous space compaction once but then doesn't transition
670       // the collector.
671       RequestCollectorTransition(background_collector_type_,
672                                  kIsDebugBuild ? 0 : kCollectorTransitionWait);
673     }
674   }
675 }
676 
CreateThreadPool()677 void Heap::CreateThreadPool() {
678   const size_t num_threads = std::max(parallel_gc_threads_, conc_gc_threads_);
679   if (num_threads != 0) {
680     thread_pool_.reset(new ThreadPool("Heap thread pool", num_threads));
681   }
682 }
683 
VisitObjects(ObjectCallback callback,void * arg)684 void Heap::VisitObjects(ObjectCallback callback, void* arg) {
685   Thread* self = Thread::Current();
686   // GCs can move objects, so don't allow this.
687   const char* old_cause = self->StartAssertNoThreadSuspension("Visiting objects");
688   if (bump_pointer_space_ != nullptr) {
689     // Visit objects in bump pointer space.
690     bump_pointer_space_->Walk(callback, arg);
691   }
692   // TODO: Switch to standard begin and end to use ranged a based loop.
693   for (mirror::Object** it = allocation_stack_->Begin(), **end = allocation_stack_->End();
694       it < end; ++it) {
695     mirror::Object* obj = *it;
696     if (obj != nullptr && obj->GetClass() != nullptr) {
697       // Avoid the race condition caused by the object not yet being written into the allocation
698       // stack or the class not yet being written in the object. Or, if kUseThreadLocalAllocationStack,
699       // there can be nulls on the allocation stack.
700       callback(obj, arg);
701     }
702   }
703   GetLiveBitmap()->Walk(callback, arg);
704   self->EndAssertNoThreadSuspension(old_cause);
705 }
706 
MarkAllocStackAsLive(accounting::ObjectStack * stack)707 void Heap::MarkAllocStackAsLive(accounting::ObjectStack* stack) {
708   space::ContinuousSpace* space1 = main_space_ != nullptr ? main_space_ : non_moving_space_;
709   space::ContinuousSpace* space2 = non_moving_space_;
710   // TODO: Generalize this to n bitmaps?
711   CHECK(space1 != nullptr);
712   CHECK(space2 != nullptr);
713   MarkAllocStack(space1->GetLiveBitmap(), space2->GetLiveBitmap(),
714                  large_object_space_->GetLiveBitmap(), stack);
715 }
716 
DeleteThreadPool()717 void Heap::DeleteThreadPool() {
718   thread_pool_.reset(nullptr);
719 }
720 
AddSpace(space::Space * space)721 void Heap::AddSpace(space::Space* space) {
722   CHECK(space != nullptr);
723   WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
724   if (space->IsContinuousSpace()) {
725     DCHECK(!space->IsDiscontinuousSpace());
726     space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
727     // Continuous spaces don't necessarily have bitmaps.
728     accounting::ContinuousSpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
729     accounting::ContinuousSpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
730     if (live_bitmap != nullptr) {
731       CHECK(mark_bitmap != nullptr);
732       live_bitmap_->AddContinuousSpaceBitmap(live_bitmap);
733       mark_bitmap_->AddContinuousSpaceBitmap(mark_bitmap);
734     }
735     continuous_spaces_.push_back(continuous_space);
736     // Ensure that spaces remain sorted in increasing order of start address.
737     std::sort(continuous_spaces_.begin(), continuous_spaces_.end(),
738               [](const space::ContinuousSpace* a, const space::ContinuousSpace* b) {
739       return a->Begin() < b->Begin();
740     });
741   } else {
742     CHECK(space->IsDiscontinuousSpace());
743     space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
744     live_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetLiveBitmap());
745     mark_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetMarkBitmap());
746     discontinuous_spaces_.push_back(discontinuous_space);
747   }
748   if (space->IsAllocSpace()) {
749     alloc_spaces_.push_back(space->AsAllocSpace());
750   }
751 }
752 
SetSpaceAsDefault(space::ContinuousSpace * continuous_space)753 void Heap::SetSpaceAsDefault(space::ContinuousSpace* continuous_space) {
754   WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
755   if (continuous_space->IsDlMallocSpace()) {
756     dlmalloc_space_ = continuous_space->AsDlMallocSpace();
757   } else if (continuous_space->IsRosAllocSpace()) {
758     rosalloc_space_ = continuous_space->AsRosAllocSpace();
759   }
760 }
761 
RemoveSpace(space::Space * space)762 void Heap::RemoveSpace(space::Space* space) {
763   DCHECK(space != nullptr);
764   WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
765   if (space->IsContinuousSpace()) {
766     DCHECK(!space->IsDiscontinuousSpace());
767     space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
768     // Continuous spaces don't necessarily have bitmaps.
769     accounting::ContinuousSpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
770     accounting::ContinuousSpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
771     if (live_bitmap != nullptr) {
772       DCHECK(mark_bitmap != nullptr);
773       live_bitmap_->RemoveContinuousSpaceBitmap(live_bitmap);
774       mark_bitmap_->RemoveContinuousSpaceBitmap(mark_bitmap);
775     }
776     auto it = std::find(continuous_spaces_.begin(), continuous_spaces_.end(), continuous_space);
777     DCHECK(it != continuous_spaces_.end());
778     continuous_spaces_.erase(it);
779   } else {
780     DCHECK(space->IsDiscontinuousSpace());
781     space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
782     live_bitmap_->RemoveLargeObjectBitmap(discontinuous_space->GetLiveBitmap());
783     mark_bitmap_->RemoveLargeObjectBitmap(discontinuous_space->GetMarkBitmap());
784     auto it = std::find(discontinuous_spaces_.begin(), discontinuous_spaces_.end(),
785                         discontinuous_space);
786     DCHECK(it != discontinuous_spaces_.end());
787     discontinuous_spaces_.erase(it);
788   }
789   if (space->IsAllocSpace()) {
790     auto it = std::find(alloc_spaces_.begin(), alloc_spaces_.end(), space->AsAllocSpace());
791     DCHECK(it != alloc_spaces_.end());
792     alloc_spaces_.erase(it);
793   }
794 }
795 
DumpGcPerformanceInfo(std::ostream & os)796 void Heap::DumpGcPerformanceInfo(std::ostream& os) {
797   // Dump cumulative timings.
798   os << "Dumping cumulative Gc timings\n";
799   uint64_t total_duration = 0;
800   // Dump cumulative loggers for each GC type.
801   uint64_t total_paused_time = 0;
802   for (auto& collector : garbage_collectors_) {
803     const CumulativeLogger& logger = collector->GetCumulativeTimings();
804     const size_t iterations = logger.GetIterations();
805     const Histogram<uint64_t>& pause_histogram = collector->GetPauseHistogram();
806     if (iterations != 0 && pause_histogram.SampleSize() != 0) {
807       os << ConstDumpable<CumulativeLogger>(logger);
808       const uint64_t total_ns = logger.GetTotalNs();
809       const uint64_t total_pause_ns = collector->GetTotalPausedTimeNs();
810       double seconds = NsToMs(logger.GetTotalNs()) / 1000.0;
811       const uint64_t freed_bytes = collector->GetTotalFreedBytes();
812       const uint64_t freed_objects = collector->GetTotalFreedObjects();
813       Histogram<uint64_t>::CumulativeData cumulative_data;
814       pause_histogram.CreateHistogram(&cumulative_data);
815       pause_histogram.PrintConfidenceIntervals(os, 0.99, cumulative_data);
816       os << collector->GetName() << " total time: " << PrettyDuration(total_ns)
817          << " mean time: " << PrettyDuration(total_ns / iterations) << "\n"
818          << collector->GetName() << " freed: " << freed_objects
819          << " objects with total size " << PrettySize(freed_bytes) << "\n"
820          << collector->GetName() << " throughput: " << freed_objects / seconds << "/s / "
821          << PrettySize(freed_bytes / seconds) << "/s\n";
822       total_duration += total_ns;
823       total_paused_time += total_pause_ns;
824     }
825     collector->ResetMeasurements();
826   }
827   uint64_t allocation_time =
828       static_cast<uint64_t>(total_allocation_time_.LoadRelaxed()) * kTimeAdjust;
829   if (total_duration != 0) {
830     const double total_seconds = static_cast<double>(total_duration / 1000) / 1000000.0;
831     os << "Total time spent in GC: " << PrettyDuration(total_duration) << "\n";
832     os << "Mean GC size throughput: "
833        << PrettySize(GetBytesFreedEver() / total_seconds) << "/s\n";
834     os << "Mean GC object throughput: "
835        << (GetObjectsFreedEver() / total_seconds) << " objects/s\n";
836   }
837   uint64_t total_objects_allocated = GetObjectsAllocatedEver();
838   os << "Total number of allocations " << total_objects_allocated << "\n";
839   uint64_t total_bytes_allocated = GetBytesAllocatedEver();
840   os << "Total bytes allocated " << PrettySize(total_bytes_allocated) << "\n";
841   os << "Free memory " << PrettySize(GetFreeMemory()) << "\n";
842   os << "Free memory until GC " << PrettySize(GetFreeMemoryUntilGC()) << "\n";
843   os << "Free memory until OOME " << PrettySize(GetFreeMemoryUntilOOME()) << "\n";
844   os << "Total memory " << PrettySize(GetTotalMemory()) << "\n";
845   os << "Max memory " << PrettySize(GetMaxMemory()) << "\n";
846   if (kMeasureAllocationTime) {
847     os << "Total time spent allocating: " << PrettyDuration(allocation_time) << "\n";
848     os << "Mean allocation time: " << PrettyDuration(allocation_time / total_objects_allocated)
849        << "\n";
850   }
851   os << "Total mutator paused time: " << PrettyDuration(total_paused_time) << "\n";
852   os << "Total time waiting for GC to complete: " << PrettyDuration(total_wait_time_) << "\n";
853   BaseMutex::DumpAll(os);
854 }
855 
~Heap()856 Heap::~Heap() {
857   VLOG(heap) << "Starting ~Heap()";
858   STLDeleteElements(&garbage_collectors_);
859   // If we don't reset then the mark stack complains in its destructor.
860   allocation_stack_->Reset();
861   live_stack_->Reset();
862   STLDeleteValues(&mod_union_tables_);
863   STLDeleteValues(&remembered_sets_);
864   STLDeleteElements(&continuous_spaces_);
865   STLDeleteElements(&discontinuous_spaces_);
866   delete gc_complete_lock_;
867   delete heap_trim_request_lock_;
868   VLOG(heap) << "Finished ~Heap()";
869 }
870 
FindContinuousSpaceFromObject(const mirror::Object * obj,bool fail_ok) const871 space::ContinuousSpace* Heap::FindContinuousSpaceFromObject(const mirror::Object* obj,
872                                                             bool fail_ok) const {
873   for (const auto& space : continuous_spaces_) {
874     if (space->Contains(obj)) {
875       return space;
876     }
877   }
878   if (!fail_ok) {
879     LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
880   }
881   return NULL;
882 }
883 
FindDiscontinuousSpaceFromObject(const mirror::Object * obj,bool fail_ok) const884 space::DiscontinuousSpace* Heap::FindDiscontinuousSpaceFromObject(const mirror::Object* obj,
885                                                                   bool fail_ok) const {
886   for (const auto& space : discontinuous_spaces_) {
887     if (space->Contains(obj)) {
888       return space;
889     }
890   }
891   if (!fail_ok) {
892     LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
893   }
894   return NULL;
895 }
896 
FindSpaceFromObject(const mirror::Object * obj,bool fail_ok) const897 space::Space* Heap::FindSpaceFromObject(const mirror::Object* obj, bool fail_ok) const {
898   space::Space* result = FindContinuousSpaceFromObject(obj, true);
899   if (result != NULL) {
900     return result;
901   }
902   return FindDiscontinuousSpaceFromObject(obj, true);
903 }
904 
GetImageSpace() const905 space::ImageSpace* Heap::GetImageSpace() const {
906   for (const auto& space : continuous_spaces_) {
907     if (space->IsImageSpace()) {
908       return space->AsImageSpace();
909     }
910   }
911   return NULL;
912 }
913 
ThrowOutOfMemoryError(Thread * self,size_t byte_count,AllocatorType allocator_type)914 void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type) {
915   std::ostringstream oss;
916   size_t total_bytes_free = GetFreeMemory();
917   oss << "Failed to allocate a " << byte_count << " byte allocation with " << total_bytes_free
918       << " free bytes and " << PrettySize(GetFreeMemoryUntilOOME()) << " until OOM";
919   // If the allocation failed due to fragmentation, print out the largest continuous allocation.
920   if (total_bytes_free >= byte_count) {
921     space::AllocSpace* space = nullptr;
922     if (allocator_type == kAllocatorTypeNonMoving) {
923       space = non_moving_space_;
924     } else if (allocator_type == kAllocatorTypeRosAlloc ||
925                allocator_type == kAllocatorTypeDlMalloc) {
926       space = main_space_;
927     } else if (allocator_type == kAllocatorTypeBumpPointer ||
928                allocator_type == kAllocatorTypeTLAB) {
929       space = bump_pointer_space_;
930     }
931     if (space != nullptr) {
932       space->LogFragmentationAllocFailure(oss, byte_count);
933     }
934   }
935   self->ThrowOutOfMemoryError(oss.str().c_str());
936 }
937 
DoPendingTransitionOrTrim()938 void Heap::DoPendingTransitionOrTrim() {
939   Thread* self = Thread::Current();
940   CollectorType desired_collector_type;
941   // Wait until we reach the desired transition time.
942   while (true) {
943     uint64_t wait_time;
944     {
945       MutexLock mu(self, *heap_trim_request_lock_);
946       desired_collector_type = desired_collector_type_;
947       uint64_t current_time = NanoTime();
948       if (current_time >= heap_transition_or_trim_target_time_) {
949         break;
950       }
951       wait_time = heap_transition_or_trim_target_time_ - current_time;
952     }
953     ScopedThreadStateChange tsc(self, kSleeping);
954     usleep(wait_time / 1000);  // Usleep takes microseconds.
955   }
956   // Launch homogeneous space compaction if it is desired.
957   if (desired_collector_type == kCollectorTypeHomogeneousSpaceCompact) {
958     if (!CareAboutPauseTimes()) {
959       PerformHomogeneousSpaceCompact();
960     }
961     // No need to Trim(). Homogeneous space compaction may free more virtual and physical memory.
962     desired_collector_type = collector_type_;
963     return;
964   }
965   // Transition the collector if the desired collector type is not the same as the current
966   // collector type.
967   TransitionCollector(desired_collector_type);
968   if (!CareAboutPauseTimes()) {
969     // Deflate the monitors, this can cause a pause but shouldn't matter since we don't care
970     // about pauses.
971     Runtime* runtime = Runtime::Current();
972     runtime->GetThreadList()->SuspendAll();
973     uint64_t start_time = NanoTime();
974     size_t count = runtime->GetMonitorList()->DeflateMonitors();
975     VLOG(heap) << "Deflating " << count << " monitors took "
976         << PrettyDuration(NanoTime() - start_time);
977     runtime->GetThreadList()->ResumeAll();
978   }
979   // Do a heap trim if it is needed.
980   Trim();
981 }
982 
Trim()983 void Heap::Trim() {
984   Thread* self = Thread::Current();
985   {
986     MutexLock mu(self, *heap_trim_request_lock_);
987     if (!heap_trim_request_pending_ || last_trim_time_ + kHeapTrimWait >= NanoTime()) {
988       return;
989     }
990     last_trim_time_ = NanoTime();
991     heap_trim_request_pending_ = false;
992   }
993   {
994     // Need to do this before acquiring the locks since we don't want to get suspended while
995     // holding any locks.
996     ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
997     // Pretend we are doing a GC to prevent background compaction from deleting the space we are
998     // trimming.
999     MutexLock mu(self, *gc_complete_lock_);
1000     // Ensure there is only one GC at a time.
1001     WaitForGcToCompleteLocked(kGcCauseTrim, self);
1002     collector_type_running_ = kCollectorTypeHeapTrim;
1003   }
1004   uint64_t start_ns = NanoTime();
1005   // Trim the managed spaces.
1006   uint64_t total_alloc_space_allocated = 0;
1007   uint64_t total_alloc_space_size = 0;
1008   uint64_t managed_reclaimed = 0;
1009   for (const auto& space : continuous_spaces_) {
1010     if (space->IsMallocSpace()) {
1011       gc::space::MallocSpace* malloc_space = space->AsMallocSpace();
1012       if (malloc_space->IsRosAllocSpace() || !CareAboutPauseTimes()) {
1013         // Don't trim dlmalloc spaces if we care about pauses since this can hold the space lock
1014         // for a long period of time.
1015         managed_reclaimed += malloc_space->Trim();
1016       }
1017       total_alloc_space_size += malloc_space->Size();
1018     }
1019   }
1020   total_alloc_space_allocated = GetBytesAllocated() - large_object_space_->GetBytesAllocated();
1021   if (bump_pointer_space_ != nullptr) {
1022     total_alloc_space_allocated -= bump_pointer_space_->Size();
1023   }
1024   const float managed_utilization = static_cast<float>(total_alloc_space_allocated) /
1025       static_cast<float>(total_alloc_space_size);
1026   uint64_t gc_heap_end_ns = NanoTime();
1027   // We never move things in the native heap, so we can finish the GC at this point.
1028   FinishGC(self, collector::kGcTypeNone);
1029   size_t native_reclaimed = 0;
1030   // Only trim the native heap if we don't care about pauses.
1031   if (!CareAboutPauseTimes()) {
1032 #if defined(USE_DLMALLOC)
1033     // Trim the native heap.
1034     dlmalloc_trim(0);
1035     dlmalloc_inspect_all(DlmallocMadviseCallback, &native_reclaimed);
1036 #elif defined(USE_JEMALLOC)
1037     // Jemalloc does it's own internal trimming.
1038 #else
1039     UNIMPLEMENTED(WARNING) << "Add trimming support";
1040 #endif
1041   }
1042   uint64_t end_ns = NanoTime();
1043   VLOG(heap) << "Heap trim of managed (duration=" << PrettyDuration(gc_heap_end_ns - start_ns)
1044       << ", advised=" << PrettySize(managed_reclaimed) << ") and native (duration="
1045       << PrettyDuration(end_ns - gc_heap_end_ns) << ", advised=" << PrettySize(native_reclaimed)
1046       << ") heaps. Managed heap utilization of " << static_cast<int>(100 * managed_utilization)
1047       << "%.";
1048 }
1049 
IsValidObjectAddress(const mirror::Object * obj) const1050 bool Heap::IsValidObjectAddress(const mirror::Object* obj) const {
1051   // Note: we deliberately don't take the lock here, and mustn't test anything that would require
1052   // taking the lock.
1053   if (obj == nullptr) {
1054     return true;
1055   }
1056   return IsAligned<kObjectAlignment>(obj) && FindSpaceFromObject(obj, true) != nullptr;
1057 }
1058 
IsNonDiscontinuousSpaceHeapAddress(const mirror::Object * obj) const1059 bool Heap::IsNonDiscontinuousSpaceHeapAddress(const mirror::Object* obj) const {
1060   return FindContinuousSpaceFromObject(obj, true) != nullptr;
1061 }
1062 
IsValidContinuousSpaceObjectAddress(const mirror::Object * obj) const1063 bool Heap::IsValidContinuousSpaceObjectAddress(const mirror::Object* obj) const {
1064   if (obj == nullptr || !IsAligned<kObjectAlignment>(obj)) {
1065     return false;
1066   }
1067   for (const auto& space : continuous_spaces_) {
1068     if (space->HasAddress(obj)) {
1069       return true;
1070     }
1071   }
1072   return false;
1073 }
1074 
IsLiveObjectLocked(mirror::Object * obj,bool search_allocation_stack,bool search_live_stack,bool sorted)1075 bool Heap::IsLiveObjectLocked(mirror::Object* obj, bool search_allocation_stack,
1076                               bool search_live_stack, bool sorted) {
1077   if (UNLIKELY(!IsAligned<kObjectAlignment>(obj))) {
1078     return false;
1079   }
1080   if (bump_pointer_space_ != nullptr && bump_pointer_space_->HasAddress(obj)) {
1081     mirror::Class* klass = obj->GetClass<kVerifyNone>();
1082     if (obj == klass) {
1083       // This case happens for java.lang.Class.
1084       return true;
1085     }
1086     return VerifyClassClass(klass) && IsLiveObjectLocked(klass);
1087   } else if (temp_space_ != nullptr && temp_space_->HasAddress(obj)) {
1088     // If we are in the allocated region of the temp space, then we are probably live (e.g. during
1089     // a GC). When a GC isn't running End() - Begin() is 0 which means no objects are contained.
1090     return temp_space_->Contains(obj);
1091   }
1092   space::ContinuousSpace* c_space = FindContinuousSpaceFromObject(obj, true);
1093   space::DiscontinuousSpace* d_space = nullptr;
1094   if (c_space != nullptr) {
1095     if (c_space->GetLiveBitmap()->Test(obj)) {
1096       return true;
1097     }
1098   } else {
1099     d_space = FindDiscontinuousSpaceFromObject(obj, true);
1100     if (d_space != nullptr) {
1101       if (d_space->GetLiveBitmap()->Test(obj)) {
1102         return true;
1103       }
1104     }
1105   }
1106   // This is covering the allocation/live stack swapping that is done without mutators suspended.
1107   for (size_t i = 0; i < (sorted ? 1 : 5); ++i) {
1108     if (i > 0) {
1109       NanoSleep(MsToNs(10));
1110     }
1111     if (search_allocation_stack) {
1112       if (sorted) {
1113         if (allocation_stack_->ContainsSorted(obj)) {
1114           return true;
1115         }
1116       } else if (allocation_stack_->Contains(obj)) {
1117         return true;
1118       }
1119     }
1120 
1121     if (search_live_stack) {
1122       if (sorted) {
1123         if (live_stack_->ContainsSorted(obj)) {
1124           return true;
1125         }
1126       } else if (live_stack_->Contains(obj)) {
1127         return true;
1128       }
1129     }
1130   }
1131   // We need to check the bitmaps again since there is a race where we mark something as live and
1132   // then clear the stack containing it.
1133   if (c_space != nullptr) {
1134     if (c_space->GetLiveBitmap()->Test(obj)) {
1135       return true;
1136     }
1137   } else {
1138     d_space = FindDiscontinuousSpaceFromObject(obj, true);
1139     if (d_space != nullptr && d_space->GetLiveBitmap()->Test(obj)) {
1140       return true;
1141     }
1142   }
1143   return false;
1144 }
1145 
DumpSpaces() const1146 std::string Heap::DumpSpaces() const {
1147   std::ostringstream oss;
1148   DumpSpaces(oss);
1149   return oss.str();
1150 }
1151 
DumpSpaces(std::ostream & stream) const1152 void Heap::DumpSpaces(std::ostream& stream) const {
1153   for (const auto& space : continuous_spaces_) {
1154     accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
1155     accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
1156     stream << space << " " << *space << "\n";
1157     if (live_bitmap != nullptr) {
1158       stream << live_bitmap << " " << *live_bitmap << "\n";
1159     }
1160     if (mark_bitmap != nullptr) {
1161       stream << mark_bitmap << " " << *mark_bitmap << "\n";
1162     }
1163   }
1164   for (const auto& space : discontinuous_spaces_) {
1165     stream << space << " " << *space << "\n";
1166   }
1167 }
1168 
VerifyObjectBody(mirror::Object * obj)1169 void Heap::VerifyObjectBody(mirror::Object* obj) {
1170   if (verify_object_mode_ == kVerifyObjectModeDisabled) {
1171     return;
1172   }
1173 
1174   // Ignore early dawn of the universe verifications.
1175   if (UNLIKELY(static_cast<size_t>(num_bytes_allocated_.LoadRelaxed()) < 10 * KB)) {
1176     return;
1177   }
1178   CHECK(IsAligned<kObjectAlignment>(obj)) << "Object isn't aligned: " << obj;
1179   mirror::Class* c = obj->GetFieldObject<mirror::Class, kVerifyNone>(mirror::Object::ClassOffset());
1180   CHECK(c != nullptr) << "Null class in object " << obj;
1181   CHECK(IsAligned<kObjectAlignment>(c)) << "Class " << c << " not aligned in object " << obj;
1182   CHECK(VerifyClassClass(c));
1183 
1184   if (verify_object_mode_ > kVerifyObjectModeFast) {
1185     // Note: the bitmap tests below are racy since we don't hold the heap bitmap lock.
1186     CHECK(IsLiveObjectLocked(obj)) << "Object is dead " << obj << "\n" << DumpSpaces();
1187   }
1188 }
1189 
VerificationCallback(mirror::Object * obj,void * arg)1190 void Heap::VerificationCallback(mirror::Object* obj, void* arg) {
1191   reinterpret_cast<Heap*>(arg)->VerifyObjectBody(obj);
1192 }
1193 
VerifyHeap()1194 void Heap::VerifyHeap() {
1195   ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1196   GetLiveBitmap()->Walk(Heap::VerificationCallback, this);
1197 }
1198 
RecordFree(uint64_t freed_objects,int64_t freed_bytes)1199 void Heap::RecordFree(uint64_t freed_objects, int64_t freed_bytes) {
1200   // Use signed comparison since freed bytes can be negative when background compaction foreground
1201   // transitions occurs. This is caused by the moving objects from a bump pointer space to a
1202   // free list backed space typically increasing memory footprint due to padding and binning.
1203   DCHECK_LE(freed_bytes, static_cast<int64_t>(num_bytes_allocated_.LoadRelaxed()));
1204   // Note: This relies on 2s complement for handling negative freed_bytes.
1205   num_bytes_allocated_.FetchAndSubSequentiallyConsistent(static_cast<ssize_t>(freed_bytes));
1206   if (Runtime::Current()->HasStatsEnabled()) {
1207     RuntimeStats* thread_stats = Thread::Current()->GetStats();
1208     thread_stats->freed_objects += freed_objects;
1209     thread_stats->freed_bytes += freed_bytes;
1210     // TODO: Do this concurrently.
1211     RuntimeStats* global_stats = Runtime::Current()->GetStats();
1212     global_stats->freed_objects += freed_objects;
1213     global_stats->freed_bytes += freed_bytes;
1214   }
1215 }
1216 
GetRosAllocSpace(gc::allocator::RosAlloc * rosalloc) const1217 space::RosAllocSpace* Heap::GetRosAllocSpace(gc::allocator::RosAlloc* rosalloc) const {
1218   for (const auto& space : continuous_spaces_) {
1219     if (space->AsContinuousSpace()->IsRosAllocSpace()) {
1220       if (space->AsContinuousSpace()->AsRosAllocSpace()->GetRosAlloc() == rosalloc) {
1221         return space->AsContinuousSpace()->AsRosAllocSpace();
1222       }
1223     }
1224   }
1225   return nullptr;
1226 }
1227 
AllocateInternalWithGc(Thread * self,AllocatorType allocator,size_t alloc_size,size_t * bytes_allocated,size_t * usable_size,mirror::Class ** klass)1228 mirror::Object* Heap::AllocateInternalWithGc(Thread* self, AllocatorType allocator,
1229                                              size_t alloc_size, size_t* bytes_allocated,
1230                                              size_t* usable_size,
1231                                              mirror::Class** klass) {
1232   bool was_default_allocator = allocator == GetCurrentAllocator();
1233   // Make sure there is no pending exception since we may need to throw an OOME.
1234   self->AssertNoPendingException();
1235   DCHECK(klass != nullptr);
1236   StackHandleScope<1> hs(self);
1237   HandleWrapper<mirror::Class> h(hs.NewHandleWrapper(klass));
1238   klass = nullptr;  // Invalidate for safety.
1239   // The allocation failed. If the GC is running, block until it completes, and then retry the
1240   // allocation.
1241   collector::GcType last_gc = WaitForGcToComplete(kGcCauseForAlloc, self);
1242   if (last_gc != collector::kGcTypeNone) {
1243     // If we were the default allocator but the allocator changed while we were suspended,
1244     // abort the allocation.
1245     if (was_default_allocator && allocator != GetCurrentAllocator()) {
1246       return nullptr;
1247     }
1248     // A GC was in progress and we blocked, retry allocation now that memory has been freed.
1249     mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
1250                                                      usable_size);
1251     if (ptr != nullptr) {
1252       return ptr;
1253     }
1254   }
1255 
1256   collector::GcType tried_type = next_gc_type_;
1257   const bool gc_ran =
1258       CollectGarbageInternal(tried_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
1259   if (was_default_allocator && allocator != GetCurrentAllocator()) {
1260     return nullptr;
1261   }
1262   if (gc_ran) {
1263     mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
1264                                                      usable_size);
1265     if (ptr != nullptr) {
1266       return ptr;
1267     }
1268   }
1269 
1270   // Loop through our different Gc types and try to Gc until we get enough free memory.
1271   for (collector::GcType gc_type : gc_plan_) {
1272     if (gc_type == tried_type) {
1273       continue;
1274     }
1275     // Attempt to run the collector, if we succeed, re-try the allocation.
1276     const bool gc_ran =
1277         CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
1278     if (was_default_allocator && allocator != GetCurrentAllocator()) {
1279       return nullptr;
1280     }
1281     if (gc_ran) {
1282       // Did we free sufficient memory for the allocation to succeed?
1283       mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
1284                                                        usable_size);
1285       if (ptr != nullptr) {
1286         return ptr;
1287       }
1288     }
1289   }
1290   // Allocations have failed after GCs;  this is an exceptional state.
1291   // Try harder, growing the heap if necessary.
1292   mirror::Object* ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
1293                                                   usable_size);
1294   if (ptr != nullptr) {
1295     return ptr;
1296   }
1297   // Most allocations should have succeeded by now, so the heap is really full, really fragmented,
1298   // or the requested size is really big. Do another GC, collecting SoftReferences this time. The
1299   // VM spec requires that all SoftReferences have been collected and cleared before throwing
1300   // OOME.
1301   VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size)
1302            << " allocation";
1303   // TODO: Run finalization, but this may cause more allocations to occur.
1304   // We don't need a WaitForGcToComplete here either.
1305   DCHECK(!gc_plan_.empty());
1306   CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true);
1307   if (was_default_allocator && allocator != GetCurrentAllocator()) {
1308     return nullptr;
1309   }
1310   ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size);
1311   if (ptr == nullptr) {
1312     const uint64_t current_time = NanoTime();
1313     switch (allocator) {
1314       case kAllocatorTypeRosAlloc:
1315         // Fall-through.
1316       case kAllocatorTypeDlMalloc: {
1317         if (use_homogeneous_space_compaction_for_oom_ &&
1318             current_time - last_time_homogeneous_space_compaction_by_oom_ >
1319             min_interval_homogeneous_space_compaction_by_oom_) {
1320           last_time_homogeneous_space_compaction_by_oom_ = current_time;
1321           HomogeneousSpaceCompactResult result = PerformHomogeneousSpaceCompact();
1322           switch (result) {
1323             case HomogeneousSpaceCompactResult::kSuccess:
1324               // If the allocation succeeded, we delayed an oom.
1325               ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
1326                                               usable_size);
1327               if (ptr != nullptr) {
1328                 count_delayed_oom_++;
1329               }
1330               break;
1331             case HomogeneousSpaceCompactResult::kErrorReject:
1332               // Reject due to disabled moving GC.
1333               break;
1334             case HomogeneousSpaceCompactResult::kErrorVMShuttingDown:
1335               // Throw OOM by default.
1336               break;
1337             default: {
1338               LOG(FATAL) << "Unimplemented homogeneous space compaction result "
1339                          << static_cast<size_t>(result);
1340             }
1341           }
1342           // Always print that we ran homogeneous space compation since this can cause jank.
1343           VLOG(heap) << "Ran heap homogeneous space compaction, "
1344                     << " requested defragmentation "
1345                     << count_requested_homogeneous_space_compaction_.LoadSequentiallyConsistent()
1346                     << " performed defragmentation "
1347                     << count_performed_homogeneous_space_compaction_.LoadSequentiallyConsistent()
1348                     << " ignored homogeneous space compaction "
1349                     << count_ignored_homogeneous_space_compaction_.LoadSequentiallyConsistent()
1350                     << " delayed count = "
1351                     << count_delayed_oom_.LoadSequentiallyConsistent();
1352         }
1353         break;
1354       }
1355       case kAllocatorTypeNonMoving: {
1356         // Try to transition the heap if the allocation failure was due to the space being full.
1357         if (!IsOutOfMemoryOnAllocation<false>(allocator, alloc_size)) {
1358           // If we aren't out of memory then the OOM was probably from the non moving space being
1359           // full. Attempt to disable compaction and turn the main space into a non moving space.
1360           DisableMovingGc();
1361           // If we are still a moving GC then something must have caused the transition to fail.
1362           if (IsMovingGc(collector_type_)) {
1363             MutexLock mu(self, *gc_complete_lock_);
1364             // If we couldn't disable moving GC, just throw OOME and return null.
1365             LOG(WARNING) << "Couldn't disable moving GC with disable GC count "
1366                          << disable_moving_gc_count_;
1367           } else {
1368             LOG(WARNING) << "Disabled moving GC due to the non moving space being full";
1369             ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
1370                                             usable_size);
1371           }
1372         }
1373         break;
1374       }
1375       default: {
1376         // Do nothing for others allocators.
1377       }
1378     }
1379   }
1380   // If the allocation hasn't succeeded by this point, throw an OOM error.
1381   if (ptr == nullptr) {
1382     ThrowOutOfMemoryError(self, alloc_size, allocator);
1383   }
1384   return ptr;
1385 }
1386 
SetTargetHeapUtilization(float target)1387 void Heap::SetTargetHeapUtilization(float target) {
1388   DCHECK_GT(target, 0.0f);  // asserted in Java code
1389   DCHECK_LT(target, 1.0f);
1390   target_utilization_ = target;
1391 }
1392 
GetObjectsAllocated() const1393 size_t Heap::GetObjectsAllocated() const {
1394   size_t total = 0;
1395   for (space::AllocSpace* space : alloc_spaces_) {
1396     total += space->GetObjectsAllocated();
1397   }
1398   return total;
1399 }
1400 
GetObjectsAllocatedEver() const1401 uint64_t Heap::GetObjectsAllocatedEver() const {
1402   return GetObjectsFreedEver() + GetObjectsAllocated();
1403 }
1404 
GetBytesAllocatedEver() const1405 uint64_t Heap::GetBytesAllocatedEver() const {
1406   return GetBytesFreedEver() + GetBytesAllocated();
1407 }
1408 
1409 class InstanceCounter {
1410  public:
InstanceCounter(const std::vector<mirror::Class * > & classes,bool use_is_assignable_from,uint64_t * counts)1411   InstanceCounter(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from, uint64_t* counts)
1412       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1413       : classes_(classes), use_is_assignable_from_(use_is_assignable_from), counts_(counts) {
1414   }
Callback(mirror::Object * obj,void * arg)1415   static void Callback(mirror::Object* obj, void* arg)
1416       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1417     InstanceCounter* instance_counter = reinterpret_cast<InstanceCounter*>(arg);
1418     mirror::Class* instance_class = obj->GetClass();
1419     CHECK(instance_class != nullptr);
1420     for (size_t i = 0; i < instance_counter->classes_.size(); ++i) {
1421       if (instance_counter->use_is_assignable_from_) {
1422         if (instance_counter->classes_[i]->IsAssignableFrom(instance_class)) {
1423           ++instance_counter->counts_[i];
1424         }
1425       } else if (instance_class == instance_counter->classes_[i]) {
1426         ++instance_counter->counts_[i];
1427       }
1428     }
1429   }
1430 
1431  private:
1432   const std::vector<mirror::Class*>& classes_;
1433   bool use_is_assignable_from_;
1434   uint64_t* const counts_;
1435   DISALLOW_COPY_AND_ASSIGN(InstanceCounter);
1436 };
1437 
CountInstances(const std::vector<mirror::Class * > & classes,bool use_is_assignable_from,uint64_t * counts)1438 void Heap::CountInstances(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from,
1439                           uint64_t* counts) {
1440   // Can't do any GC in this function since this may move classes.
1441   Thread* self = Thread::Current();
1442   auto* old_cause = self->StartAssertNoThreadSuspension("CountInstances");
1443   InstanceCounter counter(classes, use_is_assignable_from, counts);
1444   WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
1445   VisitObjects(InstanceCounter::Callback, &counter);
1446   self->EndAssertNoThreadSuspension(old_cause);
1447 }
1448 
1449 class InstanceCollector {
1450  public:
InstanceCollector(mirror::Class * c,int32_t max_count,std::vector<mirror::Object * > & instances)1451   InstanceCollector(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances)
1452       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1453       : class_(c), max_count_(max_count), instances_(instances) {
1454   }
Callback(mirror::Object * obj,void * arg)1455   static void Callback(mirror::Object* obj, void* arg)
1456       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1457     DCHECK(arg != nullptr);
1458     InstanceCollector* instance_collector = reinterpret_cast<InstanceCollector*>(arg);
1459     mirror::Class* instance_class = obj->GetClass();
1460     if (instance_class == instance_collector->class_) {
1461       if (instance_collector->max_count_ == 0 ||
1462           instance_collector->instances_.size() < instance_collector->max_count_) {
1463         instance_collector->instances_.push_back(obj);
1464       }
1465     }
1466   }
1467 
1468  private:
1469   mirror::Class* class_;
1470   uint32_t max_count_;
1471   std::vector<mirror::Object*>& instances_;
1472   DISALLOW_COPY_AND_ASSIGN(InstanceCollector);
1473 };
1474 
GetInstances(mirror::Class * c,int32_t max_count,std::vector<mirror::Object * > & instances)1475 void Heap::GetInstances(mirror::Class* c, int32_t max_count,
1476                         std::vector<mirror::Object*>& instances) {
1477   // Can't do any GC in this function since this may move classes.
1478   Thread* self = Thread::Current();
1479   auto* old_cause = self->StartAssertNoThreadSuspension("GetInstances");
1480   InstanceCollector collector(c, max_count, instances);
1481   WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
1482   VisitObjects(&InstanceCollector::Callback, &collector);
1483   self->EndAssertNoThreadSuspension(old_cause);
1484 }
1485 
1486 class ReferringObjectsFinder {
1487  public:
ReferringObjectsFinder(mirror::Object * object,int32_t max_count,std::vector<mirror::Object * > & referring_objects)1488   ReferringObjectsFinder(mirror::Object* object, int32_t max_count,
1489                          std::vector<mirror::Object*>& referring_objects)
1490       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1491       : object_(object), max_count_(max_count), referring_objects_(referring_objects) {
1492   }
1493 
Callback(mirror::Object * obj,void * arg)1494   static void Callback(mirror::Object* obj, void* arg)
1495       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1496     reinterpret_cast<ReferringObjectsFinder*>(arg)->operator()(obj);
1497   }
1498 
1499   // For bitmap Visit.
1500   // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
1501   // annotalysis on visitors.
operator ()(mirror::Object * o) const1502   void operator()(mirror::Object* o) const NO_THREAD_SAFETY_ANALYSIS {
1503     o->VisitReferences<true>(*this, VoidFunctor());
1504   }
1505 
1506   // For Object::VisitReferences.
operator ()(mirror::Object * obj,MemberOffset offset,bool) const1507   void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const
1508       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1509     mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
1510     if (ref == object_ && (max_count_ == 0 || referring_objects_.size() < max_count_)) {
1511       referring_objects_.push_back(obj);
1512     }
1513   }
1514 
1515  private:
1516   mirror::Object* object_;
1517   uint32_t max_count_;
1518   std::vector<mirror::Object*>& referring_objects_;
1519   DISALLOW_COPY_AND_ASSIGN(ReferringObjectsFinder);
1520 };
1521 
GetReferringObjects(mirror::Object * o,int32_t max_count,std::vector<mirror::Object * > & referring_objects)1522 void Heap::GetReferringObjects(mirror::Object* o, int32_t max_count,
1523                                std::vector<mirror::Object*>& referring_objects) {
1524   // Can't do any GC in this function since this may move the object o.
1525   Thread* self = Thread::Current();
1526   auto* old_cause = self->StartAssertNoThreadSuspension("GetReferringObjects");
1527   ReferringObjectsFinder finder(o, max_count, referring_objects);
1528   WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
1529   VisitObjects(&ReferringObjectsFinder::Callback, &finder);
1530   self->EndAssertNoThreadSuspension(old_cause);
1531 }
1532 
CollectGarbage(bool clear_soft_references)1533 void Heap::CollectGarbage(bool clear_soft_references) {
1534   // Even if we waited for a GC we still need to do another GC since weaks allocated during the
1535   // last GC will not have necessarily been cleared.
1536   CollectGarbageInternal(gc_plan_.back(), kGcCauseExplicit, clear_soft_references);
1537 }
1538 
PerformHomogeneousSpaceCompact()1539 HomogeneousSpaceCompactResult Heap::PerformHomogeneousSpaceCompact() {
1540   Thread* self = Thread::Current();
1541   // Inc requested homogeneous space compaction.
1542   count_requested_homogeneous_space_compaction_++;
1543   // Store performed homogeneous space compaction at a new request arrival.
1544   ThreadList* tl = Runtime::Current()->GetThreadList();
1545   ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
1546   Locks::mutator_lock_->AssertNotHeld(self);
1547   {
1548     ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
1549     MutexLock mu(self, *gc_complete_lock_);
1550     // Ensure there is only one GC at a time.
1551     WaitForGcToCompleteLocked(kGcCauseHomogeneousSpaceCompact, self);
1552     // Homogeneous space compaction is a copying transition, can't run it if the moving GC disable count
1553     // is non zero.
1554     // If the collector type changed to something which doesn't benefit from homogeneous space compaction,
1555     // exit.
1556     if (disable_moving_gc_count_ != 0 || IsMovingGc(collector_type_) ||
1557         !main_space_->CanMoveObjects()) {
1558       return HomogeneousSpaceCompactResult::kErrorReject;
1559     }
1560     collector_type_running_ = kCollectorTypeHomogeneousSpaceCompact;
1561   }
1562   if (Runtime::Current()->IsShuttingDown(self)) {
1563     // Don't allow heap transitions to happen if the runtime is shutting down since these can
1564     // cause objects to get finalized.
1565     FinishGC(self, collector::kGcTypeNone);
1566     return HomogeneousSpaceCompactResult::kErrorVMShuttingDown;
1567   }
1568   // Suspend all threads.
1569   tl->SuspendAll();
1570   uint64_t start_time = NanoTime();
1571   // Launch compaction.
1572   space::MallocSpace* to_space = main_space_backup_.release();
1573   space::MallocSpace* from_space = main_space_;
1574   to_space->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
1575   const uint64_t space_size_before_compaction = from_space->Size();
1576   AddSpace(to_space);
1577   Compact(to_space, from_space, kGcCauseHomogeneousSpaceCompact);
1578   // Leave as prot read so that we can still run ROSAlloc verification on this space.
1579   from_space->GetMemMap()->Protect(PROT_READ);
1580   const uint64_t space_size_after_compaction = to_space->Size();
1581   main_space_ = to_space;
1582   main_space_backup_.reset(from_space);
1583   RemoveSpace(from_space);
1584   SetSpaceAsDefault(main_space_);  // Set as default to reset the proper dlmalloc space.
1585   // Update performed homogeneous space compaction count.
1586   count_performed_homogeneous_space_compaction_++;
1587   // Print statics log and resume all threads.
1588   uint64_t duration = NanoTime() - start_time;
1589   VLOG(heap) << "Heap homogeneous space compaction took " << PrettyDuration(duration) << " size: "
1590              << PrettySize(space_size_before_compaction) << " -> "
1591              << PrettySize(space_size_after_compaction) << " compact-ratio: "
1592              << std::fixed << static_cast<double>(space_size_after_compaction) /
1593              static_cast<double>(space_size_before_compaction);
1594   tl->ResumeAll();
1595   // Finish GC.
1596   reference_processor_.EnqueueClearedReferences(self);
1597   GrowForUtilization(semi_space_collector_);
1598   FinishGC(self, collector::kGcTypeFull);
1599   return HomogeneousSpaceCompactResult::kSuccess;
1600 }
1601 
1602 
TransitionCollector(CollectorType collector_type)1603 void Heap::TransitionCollector(CollectorType collector_type) {
1604   if (collector_type == collector_type_) {
1605     return;
1606   }
1607   VLOG(heap) << "TransitionCollector: " << static_cast<int>(collector_type_)
1608              << " -> " << static_cast<int>(collector_type);
1609   uint64_t start_time = NanoTime();
1610   uint32_t before_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
1611   Runtime* const runtime = Runtime::Current();
1612   ThreadList* const tl = runtime->GetThreadList();
1613   Thread* const self = Thread::Current();
1614   ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
1615   Locks::mutator_lock_->AssertNotHeld(self);
1616   // Busy wait until we can GC (StartGC can fail if we have a non-zero
1617   // compacting_gc_disable_count_, this should rarely occurs).
1618   for (;;) {
1619     {
1620       ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
1621       MutexLock mu(self, *gc_complete_lock_);
1622       // Ensure there is only one GC at a time.
1623       WaitForGcToCompleteLocked(kGcCauseCollectorTransition, self);
1624       // Currently we only need a heap transition if we switch from a moving collector to a
1625       // non-moving one, or visa versa.
1626       const bool copying_transition = IsMovingGc(collector_type_) != IsMovingGc(collector_type);
1627       // If someone else beat us to it and changed the collector before we could, exit.
1628       // This is safe to do before the suspend all since we set the collector_type_running_ before
1629       // we exit the loop. If another thread attempts to do the heap transition before we exit,
1630       // then it would get blocked on WaitForGcToCompleteLocked.
1631       if (collector_type == collector_type_) {
1632         return;
1633       }
1634       // GC can be disabled if someone has a used GetPrimitiveArrayCritical but not yet released.
1635       if (!copying_transition || disable_moving_gc_count_ == 0) {
1636         // TODO: Not hard code in semi-space collector?
1637         collector_type_running_ = copying_transition ? kCollectorTypeSS : collector_type;
1638         break;
1639       }
1640     }
1641     usleep(1000);
1642   }
1643   if (runtime->IsShuttingDown(self)) {
1644     // Don't allow heap transitions to happen if the runtime is shutting down since these can
1645     // cause objects to get finalized.
1646     FinishGC(self, collector::kGcTypeNone);
1647     return;
1648   }
1649   tl->SuspendAll();
1650   switch (collector_type) {
1651     case kCollectorTypeSS: {
1652       if (!IsMovingGc(collector_type_)) {
1653         // Create the bump pointer space from the backup space.
1654         CHECK(main_space_backup_ != nullptr);
1655         std::unique_ptr<MemMap> mem_map(main_space_backup_->ReleaseMemMap());
1656         // We are transitioning from non moving GC -> moving GC, since we copied from the bump
1657         // pointer space last transition it will be protected.
1658         CHECK(mem_map != nullptr);
1659         mem_map->Protect(PROT_READ | PROT_WRITE);
1660         bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space",
1661                                                                         mem_map.release());
1662         AddSpace(bump_pointer_space_);
1663         Compact(bump_pointer_space_, main_space_, kGcCauseCollectorTransition);
1664         // Use the now empty main space mem map for the bump pointer temp space.
1665         mem_map.reset(main_space_->ReleaseMemMap());
1666         // Unset the pointers just in case.
1667         if (dlmalloc_space_ == main_space_) {
1668           dlmalloc_space_ = nullptr;
1669         } else if (rosalloc_space_ == main_space_) {
1670           rosalloc_space_ = nullptr;
1671         }
1672         // Remove the main space so that we don't try to trim it, this doens't work for debug
1673         // builds since RosAlloc attempts to read the magic number from a protected page.
1674         RemoveSpace(main_space_);
1675         RemoveRememberedSet(main_space_);
1676         delete main_space_;  // Delete the space since it has been removed.
1677         main_space_ = nullptr;
1678         RemoveRememberedSet(main_space_backup_.get());
1679         main_space_backup_.reset(nullptr);  // Deletes the space.
1680         temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
1681                                                                 mem_map.release());
1682         AddSpace(temp_space_);
1683       }
1684       break;
1685     }
1686     case kCollectorTypeMS:
1687       // Fall through.
1688     case kCollectorTypeCMS: {
1689       if (IsMovingGc(collector_type_)) {
1690         CHECK(temp_space_ != nullptr);
1691         std::unique_ptr<MemMap> mem_map(temp_space_->ReleaseMemMap());
1692         RemoveSpace(temp_space_);
1693         temp_space_ = nullptr;
1694         mem_map->Protect(PROT_READ | PROT_WRITE);
1695         CreateMainMallocSpace(mem_map.get(), kDefaultInitialSize, mem_map->Size(),
1696                               mem_map->Size());
1697         mem_map.release();
1698         // Compact to the main space from the bump pointer space, don't need to swap semispaces.
1699         AddSpace(main_space_);
1700         Compact(main_space_, bump_pointer_space_, kGcCauseCollectorTransition);
1701         mem_map.reset(bump_pointer_space_->ReleaseMemMap());
1702         RemoveSpace(bump_pointer_space_);
1703         bump_pointer_space_ = nullptr;
1704         const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
1705         // Temporarily unprotect the backup mem map so rosalloc can write the debug magic number.
1706         if (kIsDebugBuild && kUseRosAlloc) {
1707           mem_map->Protect(PROT_READ | PROT_WRITE);
1708         }
1709         main_space_backup_.reset(CreateMallocSpaceFromMemMap(mem_map.get(), kDefaultInitialSize,
1710                                                              mem_map->Size(), mem_map->Size(),
1711                                                              name, true));
1712         if (kIsDebugBuild && kUseRosAlloc) {
1713           mem_map->Protect(PROT_NONE);
1714         }
1715         mem_map.release();
1716       }
1717       break;
1718     }
1719     default: {
1720       LOG(FATAL) << "Attempted to transition to invalid collector type "
1721                  << static_cast<size_t>(collector_type);
1722       break;
1723     }
1724   }
1725   ChangeCollector(collector_type);
1726   tl->ResumeAll();
1727   // Can't call into java code with all threads suspended.
1728   reference_processor_.EnqueueClearedReferences(self);
1729   uint64_t duration = NanoTime() - start_time;
1730   GrowForUtilization(semi_space_collector_);
1731   FinishGC(self, collector::kGcTypeFull);
1732   int32_t after_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
1733   int32_t delta_allocated = before_allocated - after_allocated;
1734   std::string saved_str;
1735   if (delta_allocated >= 0) {
1736     saved_str = " saved at least " + PrettySize(delta_allocated);
1737   } else {
1738     saved_str = " expanded " + PrettySize(-delta_allocated);
1739   }
1740   VLOG(heap) << "Heap transition to " << process_state_ << " took "
1741       << PrettyDuration(duration) << saved_str;
1742 }
1743 
ChangeCollector(CollectorType collector_type)1744 void Heap::ChangeCollector(CollectorType collector_type) {
1745   // TODO: Only do this with all mutators suspended to avoid races.
1746   if (collector_type != collector_type_) {
1747     if (collector_type == kCollectorTypeMC) {
1748       // Don't allow mark compact unless support is compiled in.
1749       CHECK(kMarkCompactSupport);
1750     }
1751     collector_type_ = collector_type;
1752     gc_plan_.clear();
1753     switch (collector_type_) {
1754       case kCollectorTypeCC:  // Fall-through.
1755       case kCollectorTypeMC:  // Fall-through.
1756       case kCollectorTypeSS:  // Fall-through.
1757       case kCollectorTypeGSS: {
1758         gc_plan_.push_back(collector::kGcTypeFull);
1759         if (use_tlab_) {
1760           ChangeAllocator(kAllocatorTypeTLAB);
1761         } else {
1762           ChangeAllocator(kAllocatorTypeBumpPointer);
1763         }
1764         break;
1765       }
1766       case kCollectorTypeMS: {
1767         gc_plan_.push_back(collector::kGcTypeSticky);
1768         gc_plan_.push_back(collector::kGcTypePartial);
1769         gc_plan_.push_back(collector::kGcTypeFull);
1770         ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc);
1771         break;
1772       }
1773       case kCollectorTypeCMS: {
1774         gc_plan_.push_back(collector::kGcTypeSticky);
1775         gc_plan_.push_back(collector::kGcTypePartial);
1776         gc_plan_.push_back(collector::kGcTypeFull);
1777         ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc);
1778         break;
1779       }
1780       default: {
1781         LOG(FATAL) << "Unimplemented";
1782       }
1783     }
1784     if (IsGcConcurrent()) {
1785       concurrent_start_bytes_ =
1786           std::max(max_allowed_footprint_, kMinConcurrentRemainingBytes) - kMinConcurrentRemainingBytes;
1787     } else {
1788       concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
1789     }
1790   }
1791 }
1792 
1793 // Special compacting collector which uses sub-optimal bin packing to reduce zygote space size.
1794 class ZygoteCompactingCollector FINAL : public collector::SemiSpace {
1795  public:
ZygoteCompactingCollector(gc::Heap * heap)1796   explicit ZygoteCompactingCollector(gc::Heap* heap) : SemiSpace(heap, false, "zygote collector"),
1797       bin_live_bitmap_(nullptr), bin_mark_bitmap_(nullptr) {
1798   }
1799 
BuildBins(space::ContinuousSpace * space)1800   void BuildBins(space::ContinuousSpace* space) {
1801     bin_live_bitmap_ = space->GetLiveBitmap();
1802     bin_mark_bitmap_ = space->GetMarkBitmap();
1803     BinContext context;
1804     context.prev_ = reinterpret_cast<uintptr_t>(space->Begin());
1805     context.collector_ = this;
1806     WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1807     // Note: This requires traversing the space in increasing order of object addresses.
1808     bin_live_bitmap_->Walk(Callback, reinterpret_cast<void*>(&context));
1809     // Add the last bin which spans after the last object to the end of the space.
1810     AddBin(reinterpret_cast<uintptr_t>(space->End()) - context.prev_, context.prev_);
1811   }
1812 
1813  private:
1814   struct BinContext {
1815     uintptr_t prev_;  // The end of the previous object.
1816     ZygoteCompactingCollector* collector_;
1817   };
1818   // Maps from bin sizes to locations.
1819   std::multimap<size_t, uintptr_t> bins_;
1820   // Live bitmap of the space which contains the bins.
1821   accounting::ContinuousSpaceBitmap* bin_live_bitmap_;
1822   // Mark bitmap of the space which contains the bins.
1823   accounting::ContinuousSpaceBitmap* bin_mark_bitmap_;
1824 
Callback(mirror::Object * obj,void * arg)1825   static void Callback(mirror::Object* obj, void* arg)
1826       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1827     DCHECK(arg != nullptr);
1828     BinContext* context = reinterpret_cast<BinContext*>(arg);
1829     ZygoteCompactingCollector* collector = context->collector_;
1830     uintptr_t object_addr = reinterpret_cast<uintptr_t>(obj);
1831     size_t bin_size = object_addr - context->prev_;
1832     // Add the bin consisting of the end of the previous object to the start of the current object.
1833     collector->AddBin(bin_size, context->prev_);
1834     context->prev_ = object_addr + RoundUp(obj->SizeOf(), kObjectAlignment);
1835   }
1836 
AddBin(size_t size,uintptr_t position)1837   void AddBin(size_t size, uintptr_t position) {
1838     if (size != 0) {
1839       bins_.insert(std::make_pair(size, position));
1840     }
1841   }
1842 
ShouldSweepSpace(space::ContinuousSpace * space) const1843   virtual bool ShouldSweepSpace(space::ContinuousSpace* space) const {
1844     // Don't sweep any spaces since we probably blasted the internal accounting of the free list
1845     // allocator.
1846     return false;
1847   }
1848 
MarkNonForwardedObject(mirror::Object * obj)1849   virtual mirror::Object* MarkNonForwardedObject(mirror::Object* obj)
1850       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
1851     size_t object_size = RoundUp(obj->SizeOf(), kObjectAlignment);
1852     mirror::Object* forward_address;
1853     // Find the smallest bin which we can move obj in.
1854     auto it = bins_.lower_bound(object_size);
1855     if (it == bins_.end()) {
1856       // No available space in the bins, place it in the target space instead (grows the zygote
1857       // space).
1858       size_t bytes_allocated;
1859       forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated, nullptr);
1860       if (to_space_live_bitmap_ != nullptr) {
1861         to_space_live_bitmap_->Set(forward_address);
1862       } else {
1863         GetHeap()->GetNonMovingSpace()->GetLiveBitmap()->Set(forward_address);
1864         GetHeap()->GetNonMovingSpace()->GetMarkBitmap()->Set(forward_address);
1865       }
1866     } else {
1867       size_t size = it->first;
1868       uintptr_t pos = it->second;
1869       bins_.erase(it);  // Erase the old bin which we replace with the new smaller bin.
1870       forward_address = reinterpret_cast<mirror::Object*>(pos);
1871       // Set the live and mark bits so that sweeping system weaks works properly.
1872       bin_live_bitmap_->Set(forward_address);
1873       bin_mark_bitmap_->Set(forward_address);
1874       DCHECK_GE(size, object_size);
1875       AddBin(size - object_size, pos + object_size);  // Add a new bin with the remaining space.
1876     }
1877     // Copy the object over to its new location.
1878     memcpy(reinterpret_cast<void*>(forward_address), obj, object_size);
1879     if (kUseBakerOrBrooksReadBarrier) {
1880       obj->AssertReadBarrierPointer();
1881       if (kUseBrooksReadBarrier) {
1882         DCHECK_EQ(forward_address->GetReadBarrierPointer(), obj);
1883         forward_address->SetReadBarrierPointer(forward_address);
1884       }
1885       forward_address->AssertReadBarrierPointer();
1886     }
1887     return forward_address;
1888   }
1889 };
1890 
UnBindBitmaps()1891 void Heap::UnBindBitmaps() {
1892   TimingLogger::ScopedTiming t("UnBindBitmaps", GetCurrentGcIteration()->GetTimings());
1893   for (const auto& space : GetContinuousSpaces()) {
1894     if (space->IsContinuousMemMapAllocSpace()) {
1895       space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
1896       if (alloc_space->HasBoundBitmaps()) {
1897         alloc_space->UnBindBitmaps();
1898       }
1899     }
1900   }
1901 }
1902 
PreZygoteFork()1903 void Heap::PreZygoteFork() {
1904   CollectGarbageInternal(collector::kGcTypeFull, kGcCauseBackground, false);
1905   Thread* self = Thread::Current();
1906   MutexLock mu(self, zygote_creation_lock_);
1907   // Try to see if we have any Zygote spaces.
1908   if (have_zygote_space_) {
1909     return;
1910   }
1911   VLOG(heap) << "Starting PreZygoteFork";
1912   // Trim the pages at the end of the non moving space.
1913   non_moving_space_->Trim();
1914   // The end of the non-moving space may be protected, unprotect it so that we can copy the zygote
1915   // there.
1916   non_moving_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
1917   const bool same_space = non_moving_space_ == main_space_;
1918   if (kCompactZygote) {
1919     // Can't compact if the non moving space is the same as the main space.
1920     DCHECK(semi_space_collector_ != nullptr);
1921     // Temporarily disable rosalloc verification because the zygote
1922     // compaction will mess up the rosalloc internal metadata.
1923     ScopedDisableRosAllocVerification disable_rosalloc_verif(this);
1924     ZygoteCompactingCollector zygote_collector(this);
1925     zygote_collector.BuildBins(non_moving_space_);
1926     // Create a new bump pointer space which we will compact into.
1927     space::BumpPointerSpace target_space("zygote bump space", non_moving_space_->End(),
1928                                          non_moving_space_->Limit());
1929     // Compact the bump pointer space to a new zygote bump pointer space.
1930     bool reset_main_space = false;
1931     if (IsMovingGc(collector_type_)) {
1932       zygote_collector.SetFromSpace(bump_pointer_space_);
1933     } else {
1934       CHECK(main_space_ != nullptr);
1935       // Copy from the main space.
1936       zygote_collector.SetFromSpace(main_space_);
1937       reset_main_space = true;
1938     }
1939     zygote_collector.SetToSpace(&target_space);
1940     zygote_collector.SetSwapSemiSpaces(false);
1941     zygote_collector.Run(kGcCauseCollectorTransition, false);
1942     if (reset_main_space) {
1943       main_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
1944       madvise(main_space_->Begin(), main_space_->Capacity(), MADV_DONTNEED);
1945       MemMap* mem_map = main_space_->ReleaseMemMap();
1946       RemoveSpace(main_space_);
1947       space::Space* old_main_space = main_space_;
1948       CreateMainMallocSpace(mem_map, kDefaultInitialSize, mem_map->Size(), mem_map->Size());
1949       delete old_main_space;
1950       AddSpace(main_space_);
1951     } else {
1952       bump_pointer_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
1953     }
1954     if (temp_space_ != nullptr) {
1955       CHECK(temp_space_->IsEmpty());
1956     }
1957     total_objects_freed_ever_ += GetCurrentGcIteration()->GetFreedObjects();
1958     total_bytes_freed_ever_ += GetCurrentGcIteration()->GetFreedBytes();
1959     // Update the end and write out image.
1960     non_moving_space_->SetEnd(target_space.End());
1961     non_moving_space_->SetLimit(target_space.Limit());
1962     VLOG(heap) << "Zygote space size " << non_moving_space_->Size() << " bytes";
1963   }
1964   // Change the collector to the post zygote one.
1965   ChangeCollector(foreground_collector_type_);
1966   // Save the old space so that we can remove it after we complete creating the zygote space.
1967   space::MallocSpace* old_alloc_space = non_moving_space_;
1968   // Turn the current alloc space into a zygote space and obtain the new alloc space composed of
1969   // the remaining available space.
1970   // Remove the old space before creating the zygote space since creating the zygote space sets
1971   // the old alloc space's bitmaps to nullptr.
1972   RemoveSpace(old_alloc_space);
1973   if (collector::SemiSpace::kUseRememberedSet) {
1974     // Sanity bound check.
1975     FindRememberedSetFromSpace(old_alloc_space)->AssertAllDirtyCardsAreWithinSpace();
1976     // Remove the remembered set for the now zygote space (the old
1977     // non-moving space). Note now that we have compacted objects into
1978     // the zygote space, the data in the remembered set is no longer
1979     // needed. The zygote space will instead have a mod-union table
1980     // from this point on.
1981     RemoveRememberedSet(old_alloc_space);
1982   }
1983   space::ZygoteSpace* zygote_space = old_alloc_space->CreateZygoteSpace("alloc space",
1984                                                                         low_memory_mode_,
1985                                                                         &non_moving_space_);
1986   CHECK(!non_moving_space_->CanMoveObjects());
1987   if (same_space) {
1988     main_space_ = non_moving_space_;
1989     SetSpaceAsDefault(main_space_);
1990   }
1991   delete old_alloc_space;
1992   CHECK(zygote_space != nullptr) << "Failed creating zygote space";
1993   AddSpace(zygote_space);
1994   non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
1995   AddSpace(non_moving_space_);
1996   have_zygote_space_ = true;
1997   // Enable large object space allocations.
1998   large_object_threshold_ = kDefaultLargeObjectThreshold;
1999   // Create the zygote space mod union table.
2000   accounting::ModUnionTable* mod_union_table =
2001       new accounting::ModUnionTableCardCache("zygote space mod-union table", this, zygote_space);
2002   CHECK(mod_union_table != nullptr) << "Failed to create zygote space mod-union table";
2003   AddModUnionTable(mod_union_table);
2004   if (collector::SemiSpace::kUseRememberedSet) {
2005     // Add a new remembered set for the post-zygote non-moving space.
2006     accounting::RememberedSet* post_zygote_non_moving_space_rem_set =
2007         new accounting::RememberedSet("Post-zygote non-moving space remembered set", this,
2008                                       non_moving_space_);
2009     CHECK(post_zygote_non_moving_space_rem_set != nullptr)
2010         << "Failed to create post-zygote non-moving space remembered set";
2011     AddRememberedSet(post_zygote_non_moving_space_rem_set);
2012   }
2013 }
2014 
FlushAllocStack()2015 void Heap::FlushAllocStack() {
2016   MarkAllocStackAsLive(allocation_stack_.get());
2017   allocation_stack_->Reset();
2018 }
2019 
MarkAllocStack(accounting::ContinuousSpaceBitmap * bitmap1,accounting::ContinuousSpaceBitmap * bitmap2,accounting::LargeObjectBitmap * large_objects,accounting::ObjectStack * stack)2020 void Heap::MarkAllocStack(accounting::ContinuousSpaceBitmap* bitmap1,
2021                           accounting::ContinuousSpaceBitmap* bitmap2,
2022                           accounting::LargeObjectBitmap* large_objects,
2023                           accounting::ObjectStack* stack) {
2024   DCHECK(bitmap1 != nullptr);
2025   DCHECK(bitmap2 != nullptr);
2026   mirror::Object** limit = stack->End();
2027   for (mirror::Object** it = stack->Begin(); it != limit; ++it) {
2028     const mirror::Object* obj = *it;
2029     if (!kUseThreadLocalAllocationStack || obj != nullptr) {
2030       if (bitmap1->HasAddress(obj)) {
2031         bitmap1->Set(obj);
2032       } else if (bitmap2->HasAddress(obj)) {
2033         bitmap2->Set(obj);
2034       } else {
2035         large_objects->Set(obj);
2036       }
2037     }
2038   }
2039 }
2040 
SwapSemiSpaces()2041 void Heap::SwapSemiSpaces() {
2042   CHECK(bump_pointer_space_ != nullptr);
2043   CHECK(temp_space_ != nullptr);
2044   std::swap(bump_pointer_space_, temp_space_);
2045 }
2046 
Compact(space::ContinuousMemMapAllocSpace * target_space,space::ContinuousMemMapAllocSpace * source_space,GcCause gc_cause)2047 void Heap::Compact(space::ContinuousMemMapAllocSpace* target_space,
2048                    space::ContinuousMemMapAllocSpace* source_space,
2049                    GcCause gc_cause) {
2050   CHECK(kMovingCollector);
2051   if (target_space != source_space) {
2052     // Don't swap spaces since this isn't a typical semi space collection.
2053     semi_space_collector_->SetSwapSemiSpaces(false);
2054     semi_space_collector_->SetFromSpace(source_space);
2055     semi_space_collector_->SetToSpace(target_space);
2056     semi_space_collector_->Run(gc_cause, false);
2057   } else {
2058     CHECK(target_space->IsBumpPointerSpace())
2059         << "In-place compaction is only supported for bump pointer spaces";
2060     mark_compact_collector_->SetSpace(target_space->AsBumpPointerSpace());
2061     mark_compact_collector_->Run(kGcCauseCollectorTransition, false);
2062   }
2063 }
2064 
CollectGarbageInternal(collector::GcType gc_type,GcCause gc_cause,bool clear_soft_references)2065 collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCause gc_cause,
2066                                                bool clear_soft_references) {
2067   Thread* self = Thread::Current();
2068   Runtime* runtime = Runtime::Current();
2069   // If the heap can't run the GC, silently fail and return that no GC was run.
2070   switch (gc_type) {
2071     case collector::kGcTypePartial: {
2072       if (!have_zygote_space_) {
2073         return collector::kGcTypeNone;
2074       }
2075       break;
2076     }
2077     default: {
2078       // Other GC types don't have any special cases which makes them not runnable. The main case
2079       // here is full GC.
2080     }
2081   }
2082   ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
2083   Locks::mutator_lock_->AssertNotHeld(self);
2084   if (self->IsHandlingStackOverflow()) {
2085     LOG(WARNING) << "Performing GC on a thread that is handling a stack overflow.";
2086   }
2087   bool compacting_gc;
2088   {
2089     gc_complete_lock_->AssertNotHeld(self);
2090     ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
2091     MutexLock mu(self, *gc_complete_lock_);
2092     // Ensure there is only one GC at a time.
2093     WaitForGcToCompleteLocked(gc_cause, self);
2094     compacting_gc = IsMovingGc(collector_type_);
2095     // GC can be disabled if someone has a used GetPrimitiveArrayCritical.
2096     if (compacting_gc && disable_moving_gc_count_ != 0) {
2097       LOG(WARNING) << "Skipping GC due to disable moving GC count " << disable_moving_gc_count_;
2098       return collector::kGcTypeNone;
2099     }
2100     collector_type_running_ = collector_type_;
2101   }
2102 
2103   if (gc_cause == kGcCauseForAlloc && runtime->HasStatsEnabled()) {
2104     ++runtime->GetStats()->gc_for_alloc_count;
2105     ++self->GetStats()->gc_for_alloc_count;
2106   }
2107   uint64_t gc_start_time_ns = NanoTime();
2108   uint64_t gc_start_size = GetBytesAllocated();
2109   // Approximate allocation rate in bytes / second.
2110   uint64_t ms_delta = NsToMs(gc_start_time_ns - last_gc_time_ns_);
2111   // Back to back GCs can cause 0 ms of wait time in between GC invocations.
2112   if (LIKELY(ms_delta != 0)) {
2113     allocation_rate_ = ((gc_start_size - last_gc_size_) * 1000) / ms_delta;
2114     ATRACE_INT("Allocation rate KB/s", allocation_rate_ / KB);
2115     VLOG(heap) << "Allocation rate: " << PrettySize(allocation_rate_) << "/s";
2116   }
2117 
2118   DCHECK_LT(gc_type, collector::kGcTypeMax);
2119   DCHECK_NE(gc_type, collector::kGcTypeNone);
2120 
2121   collector::GarbageCollector* collector = nullptr;
2122   // TODO: Clean this up.
2123   if (compacting_gc) {
2124     DCHECK(current_allocator_ == kAllocatorTypeBumpPointer ||
2125            current_allocator_ == kAllocatorTypeTLAB);
2126     switch (collector_type_) {
2127       case kCollectorTypeSS:
2128         // Fall-through.
2129       case kCollectorTypeGSS:
2130         semi_space_collector_->SetFromSpace(bump_pointer_space_);
2131         semi_space_collector_->SetToSpace(temp_space_);
2132         semi_space_collector_->SetSwapSemiSpaces(true);
2133         collector = semi_space_collector_;
2134         break;
2135       case kCollectorTypeCC:
2136         collector = concurrent_copying_collector_;
2137         break;
2138       case kCollectorTypeMC:
2139         mark_compact_collector_->SetSpace(bump_pointer_space_);
2140         collector = mark_compact_collector_;
2141         break;
2142       default:
2143         LOG(FATAL) << "Invalid collector type " << static_cast<size_t>(collector_type_);
2144     }
2145     if (collector != mark_compact_collector_) {
2146       temp_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2147       CHECK(temp_space_->IsEmpty());
2148     }
2149     gc_type = collector::kGcTypeFull;  // TODO: Not hard code this in.
2150   } else if (current_allocator_ == kAllocatorTypeRosAlloc ||
2151       current_allocator_ == kAllocatorTypeDlMalloc) {
2152     collector = FindCollectorByGcType(gc_type);
2153   } else {
2154     LOG(FATAL) << "Invalid current allocator " << current_allocator_;
2155   }
2156   CHECK(collector != nullptr)
2157       << "Could not find garbage collector with collector_type="
2158       << static_cast<size_t>(collector_type_) << " and gc_type=" << gc_type;
2159   collector->Run(gc_cause, clear_soft_references || runtime->IsZygote());
2160   total_objects_freed_ever_ += GetCurrentGcIteration()->GetFreedObjects();
2161   total_bytes_freed_ever_ += GetCurrentGcIteration()->GetFreedBytes();
2162   RequestHeapTrim();
2163   // Enqueue cleared references.
2164   reference_processor_.EnqueueClearedReferences(self);
2165   // Grow the heap so that we know when to perform the next GC.
2166   GrowForUtilization(collector);
2167   const size_t duration = GetCurrentGcIteration()->GetDurationNs();
2168   const std::vector<uint64_t>& pause_times = GetCurrentGcIteration()->GetPauseTimes();
2169   // Print the GC if it is an explicit GC (e.g. Runtime.gc()) or a slow GC
2170   // (mutator time blocked >= long_pause_log_threshold_).
2171   bool log_gc = gc_cause == kGcCauseExplicit;
2172   if (!log_gc && CareAboutPauseTimes()) {
2173     // GC for alloc pauses the allocating thread, so consider it as a pause.
2174     log_gc = duration > long_gc_log_threshold_ ||
2175         (gc_cause == kGcCauseForAlloc && duration > long_pause_log_threshold_);
2176     for (uint64_t pause : pause_times) {
2177       log_gc = log_gc || pause >= long_pause_log_threshold_;
2178     }
2179   }
2180   if (log_gc) {
2181     const size_t percent_free = GetPercentFree();
2182     const size_t current_heap_size = GetBytesAllocated();
2183     const size_t total_memory = GetTotalMemory();
2184     std::ostringstream pause_string;
2185     for (size_t i = 0; i < pause_times.size(); ++i) {
2186         pause_string << PrettyDuration((pause_times[i] / 1000) * 1000)
2187                      << ((i != pause_times.size() - 1) ? "," : "");
2188     }
2189     LOG(INFO) << gc_cause << " " << collector->GetName()
2190               << " GC freed "  << current_gc_iteration_.GetFreedObjects() << "("
2191               << PrettySize(current_gc_iteration_.GetFreedBytes()) << ") AllocSpace objects, "
2192               << current_gc_iteration_.GetFreedLargeObjects() << "("
2193               << PrettySize(current_gc_iteration_.GetFreedLargeObjectBytes()) << ") LOS objects, "
2194               << percent_free << "% free, " << PrettySize(current_heap_size) << "/"
2195               << PrettySize(total_memory) << ", " << "paused " << pause_string.str()
2196               << " total " << PrettyDuration((duration / 1000) * 1000);
2197     VLOG(heap) << ConstDumpable<TimingLogger>(*current_gc_iteration_.GetTimings());
2198   }
2199   FinishGC(self, gc_type);
2200   // Inform DDMS that a GC completed.
2201   Dbg::GcDidFinish();
2202   return gc_type;
2203 }
2204 
FinishGC(Thread * self,collector::GcType gc_type)2205 void Heap::FinishGC(Thread* self, collector::GcType gc_type) {
2206   MutexLock mu(self, *gc_complete_lock_);
2207   collector_type_running_ = kCollectorTypeNone;
2208   if (gc_type != collector::kGcTypeNone) {
2209     last_gc_type_ = gc_type;
2210   }
2211   // Wake anyone who may have been waiting for the GC to complete.
2212   gc_complete_cond_->Broadcast(self);
2213 }
2214 
RootMatchesObjectVisitor(mirror::Object ** root,void * arg,uint32_t,RootType)2215 static void RootMatchesObjectVisitor(mirror::Object** root, void* arg, uint32_t /*thread_id*/,
2216                                      RootType /*root_type*/) {
2217   mirror::Object* obj = reinterpret_cast<mirror::Object*>(arg);
2218   if (*root == obj) {
2219     LOG(INFO) << "Object " << obj << " is a root";
2220   }
2221 }
2222 
2223 class ScanVisitor {
2224  public:
operator ()(const mirror::Object * obj) const2225   void operator()(const mirror::Object* obj) const {
2226     LOG(ERROR) << "Would have rescanned object " << obj;
2227   }
2228 };
2229 
2230 // Verify a reference from an object.
2231 class VerifyReferenceVisitor {
2232  public:
VerifyReferenceVisitor(Heap * heap,Atomic<size_t> * fail_count,bool verify_referent)2233   explicit VerifyReferenceVisitor(Heap* heap, Atomic<size_t>* fail_count, bool verify_referent)
2234       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_)
2235       : heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {}
2236 
GetFailureCount() const2237   size_t GetFailureCount() const {
2238     return fail_count_->LoadSequentiallyConsistent();
2239   }
2240 
operator ()(mirror::Class * klass,mirror::Reference * ref) const2241   void operator()(mirror::Class* klass, mirror::Reference* ref) const
2242       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2243     if (verify_referent_) {
2244       VerifyReference(ref, ref->GetReferent(), mirror::Reference::ReferentOffset());
2245     }
2246   }
2247 
operator ()(mirror::Object * obj,MemberOffset offset,bool) const2248   void operator()(mirror::Object* obj, MemberOffset offset, bool /*is_static*/) const
2249       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2250     VerifyReference(obj, obj->GetFieldObject<mirror::Object>(offset), offset);
2251   }
2252 
IsLive(mirror::Object * obj) const2253   bool IsLive(mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
2254     return heap_->IsLiveObjectLocked(obj, true, false, true);
2255   }
2256 
VerifyRootCallback(mirror::Object ** root,void * arg,uint32_t thread_id,RootType root_type)2257   static void VerifyRootCallback(mirror::Object** root, void* arg, uint32_t thread_id,
2258                                  RootType root_type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2259     VerifyReferenceVisitor* visitor = reinterpret_cast<VerifyReferenceVisitor*>(arg);
2260     if (!visitor->VerifyReference(nullptr, *root, MemberOffset(0))) {
2261       LOG(ERROR) << "Root " << *root << " is dead with type " << PrettyTypeOf(*root)
2262           << " thread_id= " << thread_id << " root_type= " << root_type;
2263     }
2264   }
2265 
2266  private:
2267   // TODO: Fix the no thread safety analysis.
2268   // Returns false on failure.
VerifyReference(mirror::Object * obj,mirror::Object * ref,MemberOffset offset) const2269   bool VerifyReference(mirror::Object* obj, mirror::Object* ref, MemberOffset offset) const
2270       NO_THREAD_SAFETY_ANALYSIS {
2271     if (ref == nullptr || IsLive(ref)) {
2272       // Verify that the reference is live.
2273       return true;
2274     }
2275     if (fail_count_->FetchAndAddSequentiallyConsistent(1) == 0) {
2276       // Print message on only on first failure to prevent spam.
2277       LOG(ERROR) << "!!!!!!!!!!!!!!Heap corruption detected!!!!!!!!!!!!!!!!!!!";
2278     }
2279     if (obj != nullptr) {
2280       // Only do this part for non roots.
2281       accounting::CardTable* card_table = heap_->GetCardTable();
2282       accounting::ObjectStack* alloc_stack = heap_->allocation_stack_.get();
2283       accounting::ObjectStack* live_stack = heap_->live_stack_.get();
2284       byte* card_addr = card_table->CardFromAddr(obj);
2285       LOG(ERROR) << "Object " << obj << " references dead object " << ref << " at offset "
2286                  << offset << "\n card value = " << static_cast<int>(*card_addr);
2287       if (heap_->IsValidObjectAddress(obj->GetClass())) {
2288         LOG(ERROR) << "Obj type " << PrettyTypeOf(obj);
2289       } else {
2290         LOG(ERROR) << "Object " << obj << " class(" << obj->GetClass() << ") not a heap address";
2291       }
2292 
2293       // Attempt to find the class inside of the recently freed objects.
2294       space::ContinuousSpace* ref_space = heap_->FindContinuousSpaceFromObject(ref, true);
2295       if (ref_space != nullptr && ref_space->IsMallocSpace()) {
2296         space::MallocSpace* space = ref_space->AsMallocSpace();
2297         mirror::Class* ref_class = space->FindRecentFreedObject(ref);
2298         if (ref_class != nullptr) {
2299           LOG(ERROR) << "Reference " << ref << " found as a recently freed object with class "
2300                      << PrettyClass(ref_class);
2301         } else {
2302           LOG(ERROR) << "Reference " << ref << " not found as a recently freed object";
2303         }
2304       }
2305 
2306       if (ref->GetClass() != nullptr && heap_->IsValidObjectAddress(ref->GetClass()) &&
2307           ref->GetClass()->IsClass()) {
2308         LOG(ERROR) << "Ref type " << PrettyTypeOf(ref);
2309       } else {
2310         LOG(ERROR) << "Ref " << ref << " class(" << ref->GetClass()
2311                    << ") is not a valid heap address";
2312       }
2313 
2314       card_table->CheckAddrIsInCardTable(reinterpret_cast<const byte*>(obj));
2315       void* cover_begin = card_table->AddrFromCard(card_addr);
2316       void* cover_end = reinterpret_cast<void*>(reinterpret_cast<size_t>(cover_begin) +
2317           accounting::CardTable::kCardSize);
2318       LOG(ERROR) << "Card " << reinterpret_cast<void*>(card_addr) << " covers " << cover_begin
2319           << "-" << cover_end;
2320       accounting::ContinuousSpaceBitmap* bitmap =
2321           heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(obj);
2322 
2323       if (bitmap == nullptr) {
2324         LOG(ERROR) << "Object " << obj << " has no bitmap";
2325         if (!VerifyClassClass(obj->GetClass())) {
2326           LOG(ERROR) << "Object " << obj << " failed class verification!";
2327         }
2328       } else {
2329         // Print out how the object is live.
2330         if (bitmap->Test(obj)) {
2331           LOG(ERROR) << "Object " << obj << " found in live bitmap";
2332         }
2333         if (alloc_stack->Contains(const_cast<mirror::Object*>(obj))) {
2334           LOG(ERROR) << "Object " << obj << " found in allocation stack";
2335         }
2336         if (live_stack->Contains(const_cast<mirror::Object*>(obj))) {
2337           LOG(ERROR) << "Object " << obj << " found in live stack";
2338         }
2339         if (alloc_stack->Contains(const_cast<mirror::Object*>(ref))) {
2340           LOG(ERROR) << "Ref " << ref << " found in allocation stack";
2341         }
2342         if (live_stack->Contains(const_cast<mirror::Object*>(ref))) {
2343           LOG(ERROR) << "Ref " << ref << " found in live stack";
2344         }
2345         // Attempt to see if the card table missed the reference.
2346         ScanVisitor scan_visitor;
2347         byte* byte_cover_begin = reinterpret_cast<byte*>(card_table->AddrFromCard(card_addr));
2348         card_table->Scan(bitmap, byte_cover_begin,
2349                          byte_cover_begin + accounting::CardTable::kCardSize, scan_visitor);
2350       }
2351 
2352       // Search to see if any of the roots reference our object.
2353       void* arg = const_cast<void*>(reinterpret_cast<const void*>(obj));
2354       Runtime::Current()->VisitRoots(&RootMatchesObjectVisitor, arg);
2355 
2356       // Search to see if any of the roots reference our reference.
2357       arg = const_cast<void*>(reinterpret_cast<const void*>(ref));
2358       Runtime::Current()->VisitRoots(&RootMatchesObjectVisitor, arg);
2359     }
2360     return false;
2361   }
2362 
2363   Heap* const heap_;
2364   Atomic<size_t>* const fail_count_;
2365   const bool verify_referent_;
2366 };
2367 
2368 // Verify all references within an object, for use with HeapBitmap::Visit.
2369 class VerifyObjectVisitor {
2370  public:
VerifyObjectVisitor(Heap * heap,Atomic<size_t> * fail_count,bool verify_referent)2371   explicit VerifyObjectVisitor(Heap* heap, Atomic<size_t>* fail_count, bool verify_referent)
2372       : heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {
2373   }
2374 
operator ()(mirror::Object * obj) const2375   void operator()(mirror::Object* obj) const
2376       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
2377     // Note: we are verifying the references in obj but not obj itself, this is because obj must
2378     // be live or else how did we find it in the live bitmap?
2379     VerifyReferenceVisitor visitor(heap_, fail_count_, verify_referent_);
2380     // The class doesn't count as a reference but we should verify it anyways.
2381     obj->VisitReferences<true>(visitor, visitor);
2382   }
2383 
VisitCallback(mirror::Object * obj,void * arg)2384   static void VisitCallback(mirror::Object* obj, void* arg)
2385       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
2386     VerifyObjectVisitor* visitor = reinterpret_cast<VerifyObjectVisitor*>(arg);
2387     visitor->operator()(obj);
2388   }
2389 
GetFailureCount() const2390   size_t GetFailureCount() const {
2391     return fail_count_->LoadSequentiallyConsistent();
2392   }
2393 
2394  private:
2395   Heap* const heap_;
2396   Atomic<size_t>* const fail_count_;
2397   const bool verify_referent_;
2398 };
2399 
PushOnAllocationStackWithInternalGC(Thread * self,mirror::Object ** obj)2400 void Heap::PushOnAllocationStackWithInternalGC(Thread* self, mirror::Object** obj) {
2401   // Slow path, the allocation stack push back must have already failed.
2402   DCHECK(!allocation_stack_->AtomicPushBack(*obj));
2403   do {
2404     // TODO: Add handle VerifyObject.
2405     StackHandleScope<1> hs(self);
2406     HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
2407     // Push our object into the reserve region of the allocaiton stack. This is only required due
2408     // to heap verification requiring that roots are live (either in the live bitmap or in the
2409     // allocation stack).
2410     CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(*obj));
2411     CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
2412   } while (!allocation_stack_->AtomicPushBack(*obj));
2413 }
2414 
PushOnThreadLocalAllocationStackWithInternalGC(Thread * self,mirror::Object ** obj)2415 void Heap::PushOnThreadLocalAllocationStackWithInternalGC(Thread* self, mirror::Object** obj) {
2416   // Slow path, the allocation stack push back must have already failed.
2417   DCHECK(!self->PushOnThreadLocalAllocationStack(*obj));
2418   mirror::Object** start_address;
2419   mirror::Object** end_address;
2420   while (!allocation_stack_->AtomicBumpBack(kThreadLocalAllocationStackSize, &start_address,
2421                                             &end_address)) {
2422     // TODO: Add handle VerifyObject.
2423     StackHandleScope<1> hs(self);
2424     HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
2425     // Push our object into the reserve region of the allocaiton stack. This is only required due
2426     // to heap verification requiring that roots are live (either in the live bitmap or in the
2427     // allocation stack).
2428     CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(*obj));
2429     // Push into the reserve allocation stack.
2430     CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
2431   }
2432   self->SetThreadLocalAllocationStack(start_address, end_address);
2433   // Retry on the new thread-local allocation stack.
2434   CHECK(self->PushOnThreadLocalAllocationStack(*obj));  // Must succeed.
2435 }
2436 
2437 // Must do this with mutators suspended since we are directly accessing the allocation stacks.
VerifyHeapReferences(bool verify_referents)2438 size_t Heap::VerifyHeapReferences(bool verify_referents) {
2439   Thread* self = Thread::Current();
2440   Locks::mutator_lock_->AssertExclusiveHeld(self);
2441   // Lets sort our allocation stacks so that we can efficiently binary search them.
2442   allocation_stack_->Sort();
2443   live_stack_->Sort();
2444   // Since we sorted the allocation stack content, need to revoke all
2445   // thread-local allocation stacks.
2446   RevokeAllThreadLocalAllocationStacks(self);
2447   Atomic<size_t> fail_count_(0);
2448   VerifyObjectVisitor visitor(this, &fail_count_, verify_referents);
2449   // Verify objects in the allocation stack since these will be objects which were:
2450   // 1. Allocated prior to the GC (pre GC verification).
2451   // 2. Allocated during the GC (pre sweep GC verification).
2452   // We don't want to verify the objects in the live stack since they themselves may be
2453   // pointing to dead objects if they are not reachable.
2454   VisitObjects(VerifyObjectVisitor::VisitCallback, &visitor);
2455   // Verify the roots:
2456   Runtime::Current()->VisitRoots(VerifyReferenceVisitor::VerifyRootCallback, &visitor);
2457   if (visitor.GetFailureCount() > 0) {
2458     // Dump mod-union tables.
2459     for (const auto& table_pair : mod_union_tables_) {
2460       accounting::ModUnionTable* mod_union_table = table_pair.second;
2461       mod_union_table->Dump(LOG(ERROR) << mod_union_table->GetName() << ": ");
2462     }
2463     // Dump remembered sets.
2464     for (const auto& table_pair : remembered_sets_) {
2465       accounting::RememberedSet* remembered_set = table_pair.second;
2466       remembered_set->Dump(LOG(ERROR) << remembered_set->GetName() << ": ");
2467     }
2468     DumpSpaces(LOG(ERROR));
2469   }
2470   return visitor.GetFailureCount();
2471 }
2472 
2473 class VerifyReferenceCardVisitor {
2474  public:
VerifyReferenceCardVisitor(Heap * heap,bool * failed)2475   VerifyReferenceCardVisitor(Heap* heap, bool* failed)
2476       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_,
2477                             Locks::heap_bitmap_lock_)
2478       : heap_(heap), failed_(failed) {
2479   }
2480 
2481   // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
2482   // annotalysis on visitors.
operator ()(mirror::Object * obj,MemberOffset offset,bool is_static) const2483   void operator()(mirror::Object* obj, MemberOffset offset, bool is_static) const
2484       NO_THREAD_SAFETY_ANALYSIS {
2485     mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
2486     // Filter out class references since changing an object's class does not mark the card as dirty.
2487     // Also handles large objects, since the only reference they hold is a class reference.
2488     if (ref != nullptr && !ref->IsClass()) {
2489       accounting::CardTable* card_table = heap_->GetCardTable();
2490       // If the object is not dirty and it is referencing something in the live stack other than
2491       // class, then it must be on a dirty card.
2492       if (!card_table->AddrIsInCardTable(obj)) {
2493         LOG(ERROR) << "Object " << obj << " is not in the address range of the card table";
2494         *failed_ = true;
2495       } else if (!card_table->IsDirty(obj)) {
2496         // TODO: Check mod-union tables.
2497         // Card should be either kCardDirty if it got re-dirtied after we aged it, or
2498         // kCardDirty - 1 if it didnt get touched since we aged it.
2499         accounting::ObjectStack* live_stack = heap_->live_stack_.get();
2500         if (live_stack->ContainsSorted(ref)) {
2501           if (live_stack->ContainsSorted(obj)) {
2502             LOG(ERROR) << "Object " << obj << " found in live stack";
2503           }
2504           if (heap_->GetLiveBitmap()->Test(obj)) {
2505             LOG(ERROR) << "Object " << obj << " found in live bitmap";
2506           }
2507           LOG(ERROR) << "Object " << obj << " " << PrettyTypeOf(obj)
2508                     << " references " << ref << " " << PrettyTypeOf(ref) << " in live stack";
2509 
2510           // Print which field of the object is dead.
2511           if (!obj->IsObjectArray()) {
2512             mirror::Class* klass = is_static ? obj->AsClass() : obj->GetClass();
2513             CHECK(klass != NULL);
2514             mirror::ObjectArray<mirror::ArtField>* fields = is_static ? klass->GetSFields()
2515                                                                       : klass->GetIFields();
2516             CHECK(fields != NULL);
2517             for (int32_t i = 0; i < fields->GetLength(); ++i) {
2518               mirror::ArtField* cur = fields->Get(i);
2519               if (cur->GetOffset().Int32Value() == offset.Int32Value()) {
2520                 LOG(ERROR) << (is_static ? "Static " : "") << "field in the live stack is "
2521                           << PrettyField(cur);
2522                 break;
2523               }
2524             }
2525           } else {
2526             mirror::ObjectArray<mirror::Object>* object_array =
2527                 obj->AsObjectArray<mirror::Object>();
2528             for (int32_t i = 0; i < object_array->GetLength(); ++i) {
2529               if (object_array->Get(i) == ref) {
2530                 LOG(ERROR) << (is_static ? "Static " : "") << "obj[" << i << "] = ref";
2531               }
2532             }
2533           }
2534 
2535           *failed_ = true;
2536         }
2537       }
2538     }
2539   }
2540 
2541  private:
2542   Heap* const heap_;
2543   bool* const failed_;
2544 };
2545 
2546 class VerifyLiveStackReferences {
2547  public:
VerifyLiveStackReferences(Heap * heap)2548   explicit VerifyLiveStackReferences(Heap* heap)
2549       : heap_(heap),
2550         failed_(false) {}
2551 
operator ()(mirror::Object * obj) const2552   void operator()(mirror::Object* obj) const
2553       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
2554     VerifyReferenceCardVisitor visitor(heap_, const_cast<bool*>(&failed_));
2555     obj->VisitReferences<true>(visitor, VoidFunctor());
2556   }
2557 
Failed() const2558   bool Failed() const {
2559     return failed_;
2560   }
2561 
2562  private:
2563   Heap* const heap_;
2564   bool failed_;
2565 };
2566 
VerifyMissingCardMarks()2567 bool Heap::VerifyMissingCardMarks() {
2568   Thread* self = Thread::Current();
2569   Locks::mutator_lock_->AssertExclusiveHeld(self);
2570   // We need to sort the live stack since we binary search it.
2571   live_stack_->Sort();
2572   // Since we sorted the allocation stack content, need to revoke all
2573   // thread-local allocation stacks.
2574   RevokeAllThreadLocalAllocationStacks(self);
2575   VerifyLiveStackReferences visitor(this);
2576   GetLiveBitmap()->Visit(visitor);
2577   // We can verify objects in the live stack since none of these should reference dead objects.
2578   for (mirror::Object** it = live_stack_->Begin(); it != live_stack_->End(); ++it) {
2579     if (!kUseThreadLocalAllocationStack || *it != nullptr) {
2580       visitor(*it);
2581     }
2582   }
2583   return !visitor.Failed();
2584 }
2585 
SwapStacks(Thread * self)2586 void Heap::SwapStacks(Thread* self) {
2587   if (kUseThreadLocalAllocationStack) {
2588     live_stack_->AssertAllZero();
2589   }
2590   allocation_stack_.swap(live_stack_);
2591 }
2592 
RevokeAllThreadLocalAllocationStacks(Thread * self)2593 void Heap::RevokeAllThreadLocalAllocationStacks(Thread* self) {
2594   // This must be called only during the pause.
2595   CHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
2596   MutexLock mu(self, *Locks::runtime_shutdown_lock_);
2597   MutexLock mu2(self, *Locks::thread_list_lock_);
2598   std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
2599   for (Thread* t : thread_list) {
2600     t->RevokeThreadLocalAllocationStack();
2601   }
2602 }
2603 
AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked()2604 void Heap::AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked() {
2605   if (kIsDebugBuild) {
2606     if (bump_pointer_space_ != nullptr) {
2607       bump_pointer_space_->AssertAllThreadLocalBuffersAreRevoked();
2608     }
2609   }
2610 }
2611 
FindModUnionTableFromSpace(space::Space * space)2612 accounting::ModUnionTable* Heap::FindModUnionTableFromSpace(space::Space* space) {
2613   auto it = mod_union_tables_.find(space);
2614   if (it == mod_union_tables_.end()) {
2615     return nullptr;
2616   }
2617   return it->second;
2618 }
2619 
FindRememberedSetFromSpace(space::Space * space)2620 accounting::RememberedSet* Heap::FindRememberedSetFromSpace(space::Space* space) {
2621   auto it = remembered_sets_.find(space);
2622   if (it == remembered_sets_.end()) {
2623     return nullptr;
2624   }
2625   return it->second;
2626 }
2627 
ProcessCards(TimingLogger * timings,bool use_rem_sets)2628 void Heap::ProcessCards(TimingLogger* timings, bool use_rem_sets) {
2629   TimingLogger::ScopedTiming t(__FUNCTION__, timings);
2630   // Clear cards and keep track of cards cleared in the mod-union table.
2631   for (const auto& space : continuous_spaces_) {
2632     accounting::ModUnionTable* table = FindModUnionTableFromSpace(space);
2633     accounting::RememberedSet* rem_set = FindRememberedSetFromSpace(space);
2634     if (table != nullptr) {
2635       const char* name = space->IsZygoteSpace() ? "ZygoteModUnionClearCards" :
2636           "ImageModUnionClearCards";
2637       TimingLogger::ScopedTiming t(name, timings);
2638       table->ClearCards();
2639     } else if (use_rem_sets && rem_set != nullptr) {
2640       DCHECK(collector::SemiSpace::kUseRememberedSet && collector_type_ == kCollectorTypeGSS)
2641           << static_cast<int>(collector_type_);
2642       TimingLogger::ScopedTiming t("AllocSpaceRemSetClearCards", timings);
2643       rem_set->ClearCards();
2644     } else if (space->GetType() != space::kSpaceTypeBumpPointerSpace) {
2645       TimingLogger::ScopedTiming t("AllocSpaceClearCards", timings);
2646       // No mod union table for the AllocSpace. Age the cards so that the GC knows that these cards
2647       // were dirty before the GC started.
2648       // TODO: Need to use atomic for the case where aged(cleaning thread) -> dirty(other thread)
2649       // -> clean(cleaning thread).
2650       // The races are we either end up with: Aged card, unaged card. Since we have the checkpoint
2651       // roots and then we scan / update mod union tables after. We will always scan either card.
2652       // If we end up with the non aged card, we scan it it in the pause.
2653       card_table_->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(),
2654                                      VoidFunctor());
2655     }
2656   }
2657 }
2658 
IdentityMarkHeapReferenceCallback(mirror::HeapReference<mirror::Object> *,void *)2659 static void IdentityMarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>*, void*) {
2660 }
2661 
PreGcVerificationPaused(collector::GarbageCollector * gc)2662 void Heap::PreGcVerificationPaused(collector::GarbageCollector* gc) {
2663   Thread* const self = Thread::Current();
2664   TimingLogger* const timings = current_gc_iteration_.GetTimings();
2665   TimingLogger::ScopedTiming t(__FUNCTION__, timings);
2666   if (verify_pre_gc_heap_) {
2667     TimingLogger::ScopedTiming t("(Paused)PreGcVerifyHeapReferences", timings);
2668     ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
2669     size_t failures = VerifyHeapReferences();
2670     if (failures > 0) {
2671       LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed with " << failures
2672           << " failures";
2673     }
2674   }
2675   // Check that all objects which reference things in the live stack are on dirty cards.
2676   if (verify_missing_card_marks_) {
2677     TimingLogger::ScopedTiming t("(Paused)PreGcVerifyMissingCardMarks", timings);
2678     ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
2679     SwapStacks(self);
2680     // Sort the live stack so that we can quickly binary search it later.
2681     CHECK(VerifyMissingCardMarks()) << "Pre " << gc->GetName()
2682                                     << " missing card mark verification failed\n" << DumpSpaces();
2683     SwapStacks(self);
2684   }
2685   if (verify_mod_union_table_) {
2686     TimingLogger::ScopedTiming t("(Paused)PreGcVerifyModUnionTables", timings);
2687     ReaderMutexLock reader_lock(self, *Locks::heap_bitmap_lock_);
2688     for (const auto& table_pair : mod_union_tables_) {
2689       accounting::ModUnionTable* mod_union_table = table_pair.second;
2690       mod_union_table->UpdateAndMarkReferences(IdentityMarkHeapReferenceCallback, nullptr);
2691       mod_union_table->Verify();
2692     }
2693   }
2694 }
2695 
PreGcVerification(collector::GarbageCollector * gc)2696 void Heap::PreGcVerification(collector::GarbageCollector* gc) {
2697   if (verify_pre_gc_heap_ || verify_missing_card_marks_ || verify_mod_union_table_) {
2698     collector::GarbageCollector::ScopedPause pause(gc);
2699     PreGcVerificationPaused(gc);
2700   }
2701 }
2702 
PrePauseRosAllocVerification(collector::GarbageCollector * gc)2703 void Heap::PrePauseRosAllocVerification(collector::GarbageCollector* gc) {
2704   // TODO: Add a new runtime option for this?
2705   if (verify_pre_gc_rosalloc_) {
2706     RosAllocVerification(current_gc_iteration_.GetTimings(), "PreGcRosAllocVerification");
2707   }
2708 }
2709 
PreSweepingGcVerification(collector::GarbageCollector * gc)2710 void Heap::PreSweepingGcVerification(collector::GarbageCollector* gc) {
2711   Thread* const self = Thread::Current();
2712   TimingLogger* const timings = current_gc_iteration_.GetTimings();
2713   TimingLogger::ScopedTiming t(__FUNCTION__, timings);
2714   // Called before sweeping occurs since we want to make sure we are not going so reclaim any
2715   // reachable objects.
2716   if (verify_pre_sweeping_heap_) {
2717     TimingLogger::ScopedTiming t("(Paused)PostSweepingVerifyHeapReferences", timings);
2718     CHECK_NE(self->GetState(), kRunnable);
2719     WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
2720     // Swapping bound bitmaps does nothing.
2721     gc->SwapBitmaps();
2722     // Pass in false since concurrent reference processing can mean that the reference referents
2723     // may point to dead objects at the point which PreSweepingGcVerification is called.
2724     size_t failures = VerifyHeapReferences(false);
2725     if (failures > 0) {
2726       LOG(FATAL) << "Pre sweeping " << gc->GetName() << " GC verification failed with " << failures
2727           << " failures";
2728     }
2729     gc->SwapBitmaps();
2730   }
2731   if (verify_pre_sweeping_rosalloc_) {
2732     RosAllocVerification(timings, "PreSweepingRosAllocVerification");
2733   }
2734 }
2735 
PostGcVerificationPaused(collector::GarbageCollector * gc)2736 void Heap::PostGcVerificationPaused(collector::GarbageCollector* gc) {
2737   // Only pause if we have to do some verification.
2738   Thread* const self = Thread::Current();
2739   TimingLogger* const timings = GetCurrentGcIteration()->GetTimings();
2740   TimingLogger::ScopedTiming t(__FUNCTION__, timings);
2741   if (verify_system_weaks_) {
2742     ReaderMutexLock mu2(self, *Locks::heap_bitmap_lock_);
2743     collector::MarkSweep* mark_sweep = down_cast<collector::MarkSweep*>(gc);
2744     mark_sweep->VerifySystemWeaks();
2745   }
2746   if (verify_post_gc_rosalloc_) {
2747     RosAllocVerification(timings, "(Paused)PostGcRosAllocVerification");
2748   }
2749   if (verify_post_gc_heap_) {
2750     TimingLogger::ScopedTiming t("(Paused)PostGcVerifyHeapReferences", timings);
2751     ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
2752     size_t failures = VerifyHeapReferences();
2753     if (failures > 0) {
2754       LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed with " << failures
2755           << " failures";
2756     }
2757   }
2758 }
2759 
PostGcVerification(collector::GarbageCollector * gc)2760 void Heap::PostGcVerification(collector::GarbageCollector* gc) {
2761   if (verify_system_weaks_ || verify_post_gc_rosalloc_ || verify_post_gc_heap_) {
2762     collector::GarbageCollector::ScopedPause pause(gc);
2763     PostGcVerificationPaused(gc);
2764   }
2765 }
2766 
RosAllocVerification(TimingLogger * timings,const char * name)2767 void Heap::RosAllocVerification(TimingLogger* timings, const char* name) {
2768   TimingLogger::ScopedTiming t(name, timings);
2769   for (const auto& space : continuous_spaces_) {
2770     if (space->IsRosAllocSpace()) {
2771       VLOG(heap) << name << " : " << space->GetName();
2772       space->AsRosAllocSpace()->Verify();
2773     }
2774   }
2775 }
2776 
WaitForGcToComplete(GcCause cause,Thread * self)2777 collector::GcType Heap::WaitForGcToComplete(GcCause cause, Thread* self) {
2778   ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
2779   MutexLock mu(self, *gc_complete_lock_);
2780   return WaitForGcToCompleteLocked(cause, self);
2781 }
2782 
WaitForGcToCompleteLocked(GcCause cause,Thread * self)2783 collector::GcType Heap::WaitForGcToCompleteLocked(GcCause cause, Thread* self) {
2784   collector::GcType last_gc_type = collector::kGcTypeNone;
2785   uint64_t wait_start = NanoTime();
2786   while (collector_type_running_ != kCollectorTypeNone) {
2787     ATRACE_BEGIN("GC: Wait For Completion");
2788     // We must wait, change thread state then sleep on gc_complete_cond_;
2789     gc_complete_cond_->Wait(self);
2790     last_gc_type = last_gc_type_;
2791     ATRACE_END();
2792   }
2793   uint64_t wait_time = NanoTime() - wait_start;
2794   total_wait_time_ += wait_time;
2795   if (wait_time > long_pause_log_threshold_) {
2796     LOG(INFO) << "WaitForGcToComplete blocked for " << PrettyDuration(wait_time)
2797         << " for cause " << cause;
2798   }
2799   return last_gc_type;
2800 }
2801 
DumpForSigQuit(std::ostream & os)2802 void Heap::DumpForSigQuit(std::ostream& os) {
2803   os << "Heap: " << GetPercentFree() << "% free, " << PrettySize(GetBytesAllocated()) << "/"
2804      << PrettySize(GetTotalMemory()) << "; " << GetObjectsAllocated() << " objects\n";
2805   DumpGcPerformanceInfo(os);
2806 }
2807 
GetPercentFree()2808 size_t Heap::GetPercentFree() {
2809   return static_cast<size_t>(100.0f * static_cast<float>(GetFreeMemory()) / max_allowed_footprint_);
2810 }
2811 
SetIdealFootprint(size_t max_allowed_footprint)2812 void Heap::SetIdealFootprint(size_t max_allowed_footprint) {
2813   if (max_allowed_footprint > GetMaxMemory()) {
2814     VLOG(gc) << "Clamp target GC heap from " << PrettySize(max_allowed_footprint) << " to "
2815              << PrettySize(GetMaxMemory());
2816     max_allowed_footprint = GetMaxMemory();
2817   }
2818   max_allowed_footprint_ = max_allowed_footprint;
2819 }
2820 
IsMovableObject(const mirror::Object * obj) const2821 bool Heap::IsMovableObject(const mirror::Object* obj) const {
2822   if (kMovingCollector) {
2823     space::Space* space = FindContinuousSpaceFromObject(obj, true);
2824     if (space != nullptr) {
2825       // TODO: Check large object?
2826       return space->CanMoveObjects();
2827     }
2828   }
2829   return false;
2830 }
2831 
UpdateMaxNativeFootprint()2832 void Heap::UpdateMaxNativeFootprint() {
2833   size_t native_size = native_bytes_allocated_.LoadRelaxed();
2834   // TODO: Tune the native heap utilization to be a value other than the java heap utilization.
2835   size_t target_size = native_size / GetTargetHeapUtilization();
2836   if (target_size > native_size + max_free_) {
2837     target_size = native_size + max_free_;
2838   } else if (target_size < native_size + min_free_) {
2839     target_size = native_size + min_free_;
2840   }
2841   native_footprint_gc_watermark_ = std::min(growth_limit_, target_size);
2842 }
2843 
FindCollectorByGcType(collector::GcType gc_type)2844 collector::GarbageCollector* Heap::FindCollectorByGcType(collector::GcType gc_type) {
2845   for (const auto& collector : garbage_collectors_) {
2846     if (collector->GetCollectorType() == collector_type_ &&
2847         collector->GetGcType() == gc_type) {
2848       return collector;
2849     }
2850   }
2851   return nullptr;
2852 }
2853 
HeapGrowthMultiplier() const2854 double Heap::HeapGrowthMultiplier() const {
2855   // If we don't care about pause times we are background, so return 1.0.
2856   if (!CareAboutPauseTimes() || IsLowMemoryMode()) {
2857     return 1.0;
2858   }
2859   return foreground_heap_growth_multiplier_;
2860 }
2861 
GrowForUtilization(collector::GarbageCollector * collector_ran)2862 void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran) {
2863   // We know what our utilization is at this moment.
2864   // This doesn't actually resize any memory. It just lets the heap grow more when necessary.
2865   const uint64_t bytes_allocated = GetBytesAllocated();
2866   last_gc_size_ = bytes_allocated;
2867   last_gc_time_ns_ = NanoTime();
2868   uint64_t target_size;
2869   collector::GcType gc_type = collector_ran->GetGcType();
2870   if (gc_type != collector::kGcTypeSticky) {
2871     // Grow the heap for non sticky GC.
2872     const float multiplier = HeapGrowthMultiplier();  // Use the multiplier to grow more for
2873     // foreground.
2874     intptr_t delta = bytes_allocated / GetTargetHeapUtilization() - bytes_allocated;
2875     CHECK_GE(delta, 0);
2876     target_size = bytes_allocated + delta * multiplier;
2877     target_size = std::min(target_size,
2878                            bytes_allocated + static_cast<uint64_t>(max_free_ * multiplier));
2879     target_size = std::max(target_size,
2880                            bytes_allocated + static_cast<uint64_t>(min_free_ * multiplier));
2881     native_need_to_run_finalization_ = true;
2882     next_gc_type_ = collector::kGcTypeSticky;
2883   } else {
2884     collector::GcType non_sticky_gc_type =
2885         have_zygote_space_ ? collector::kGcTypePartial : collector::kGcTypeFull;
2886     // Find what the next non sticky collector will be.
2887     collector::GarbageCollector* non_sticky_collector = FindCollectorByGcType(non_sticky_gc_type);
2888     // If the throughput of the current sticky GC >= throughput of the non sticky collector, then
2889     // do another sticky collection next.
2890     // We also check that the bytes allocated aren't over the footprint limit in order to prevent a
2891     // pathological case where dead objects which aren't reclaimed by sticky could get accumulated
2892     // if the sticky GC throughput always remained >= the full/partial throughput.
2893     if (current_gc_iteration_.GetEstimatedThroughput() * kStickyGcThroughputAdjustment >=
2894         non_sticky_collector->GetEstimatedMeanThroughput() &&
2895         non_sticky_collector->NumberOfIterations() > 0 &&
2896         bytes_allocated <= max_allowed_footprint_) {
2897       next_gc_type_ = collector::kGcTypeSticky;
2898     } else {
2899       next_gc_type_ = non_sticky_gc_type;
2900     }
2901     // If we have freed enough memory, shrink the heap back down.
2902     if (bytes_allocated + max_free_ < max_allowed_footprint_) {
2903       target_size = bytes_allocated + max_free_;
2904     } else {
2905       target_size = std::max(bytes_allocated, static_cast<uint64_t>(max_allowed_footprint_));
2906     }
2907   }
2908   if (!ignore_max_footprint_) {
2909     SetIdealFootprint(target_size);
2910     if (IsGcConcurrent()) {
2911       // Calculate when to perform the next ConcurrentGC.
2912       // Calculate the estimated GC duration.
2913       const double gc_duration_seconds = NsToMs(current_gc_iteration_.GetDurationNs()) / 1000.0;
2914       // Estimate how many remaining bytes we will have when we need to start the next GC.
2915       size_t remaining_bytes = allocation_rate_ * gc_duration_seconds;
2916       remaining_bytes = std::min(remaining_bytes, kMaxConcurrentRemainingBytes);
2917       remaining_bytes = std::max(remaining_bytes, kMinConcurrentRemainingBytes);
2918       if (UNLIKELY(remaining_bytes > max_allowed_footprint_)) {
2919         // A never going to happen situation that from the estimated allocation rate we will exceed
2920         // the applications entire footprint with the given estimated allocation rate. Schedule
2921         // another GC nearly straight away.
2922         remaining_bytes = kMinConcurrentRemainingBytes;
2923       }
2924       DCHECK_LE(remaining_bytes, max_allowed_footprint_);
2925       DCHECK_LE(max_allowed_footprint_, GetMaxMemory());
2926       // Start a concurrent GC when we get close to the estimated remaining bytes. When the
2927       // allocation rate is very high, remaining_bytes could tell us that we should start a GC
2928       // right away.
2929       concurrent_start_bytes_ = std::max(max_allowed_footprint_ - remaining_bytes,
2930                                          static_cast<size_t>(bytes_allocated));
2931     }
2932   }
2933 }
2934 
ClearGrowthLimit()2935 void Heap::ClearGrowthLimit() {
2936   growth_limit_ = capacity_;
2937   non_moving_space_->ClearGrowthLimit();
2938 }
2939 
AddFinalizerReference(Thread * self,mirror::Object ** object)2940 void Heap::AddFinalizerReference(Thread* self, mirror::Object** object) {
2941   ScopedObjectAccess soa(self);
2942   ScopedLocalRef<jobject> arg(self->GetJniEnv(), soa.AddLocalReference<jobject>(*object));
2943   jvalue args[1];
2944   args[0].l = arg.get();
2945   InvokeWithJValues(soa, nullptr, WellKnownClasses::java_lang_ref_FinalizerReference_add, args);
2946   // Restore object in case it gets moved.
2947   *object = soa.Decode<mirror::Object*>(arg.get());
2948 }
2949 
RequestConcurrentGCAndSaveObject(Thread * self,mirror::Object ** obj)2950 void Heap::RequestConcurrentGCAndSaveObject(Thread* self, mirror::Object** obj) {
2951   StackHandleScope<1> hs(self);
2952   HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
2953   RequestConcurrentGC(self);
2954 }
2955 
RequestConcurrentGC(Thread * self)2956 void Heap::RequestConcurrentGC(Thread* self) {
2957   // Make sure that we can do a concurrent GC.
2958   Runtime* runtime = Runtime::Current();
2959   if (runtime == nullptr || !runtime->IsFinishedStarting() || runtime->IsShuttingDown(self) ||
2960       self->IsHandlingStackOverflow()) {
2961     return;
2962   }
2963   // We already have a request pending, no reason to start more until we update
2964   // concurrent_start_bytes_.
2965   concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
2966   JNIEnv* env = self->GetJniEnv();
2967   DCHECK(WellKnownClasses::java_lang_Daemons != nullptr);
2968   DCHECK(WellKnownClasses::java_lang_Daemons_requestGC != nullptr);
2969   env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
2970                             WellKnownClasses::java_lang_Daemons_requestGC);
2971   CHECK(!env->ExceptionCheck());
2972 }
2973 
ConcurrentGC(Thread * self)2974 void Heap::ConcurrentGC(Thread* self) {
2975   if (Runtime::Current()->IsShuttingDown(self)) {
2976     return;
2977   }
2978   // Wait for any GCs currently running to finish.
2979   if (WaitForGcToComplete(kGcCauseBackground, self) == collector::kGcTypeNone) {
2980     // If the we can't run the GC type we wanted to run, find the next appropriate one and try that
2981     // instead. E.g. can't do partial, so do full instead.
2982     if (CollectGarbageInternal(next_gc_type_, kGcCauseBackground, false) ==
2983         collector::kGcTypeNone) {
2984       for (collector::GcType gc_type : gc_plan_) {
2985         // Attempt to run the collector, if we succeed, we are done.
2986         if (gc_type > next_gc_type_ &&
2987             CollectGarbageInternal(gc_type, kGcCauseBackground, false) != collector::kGcTypeNone) {
2988           break;
2989         }
2990       }
2991     }
2992   }
2993 }
2994 
RequestCollectorTransition(CollectorType desired_collector_type,uint64_t delta_time)2995 void Heap::RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time) {
2996   Thread* self = Thread::Current();
2997   {
2998     MutexLock mu(self, *heap_trim_request_lock_);
2999     if (desired_collector_type_ == desired_collector_type) {
3000       return;
3001     }
3002     heap_transition_or_trim_target_time_ =
3003         std::max(heap_transition_or_trim_target_time_, NanoTime() + delta_time);
3004     desired_collector_type_ = desired_collector_type;
3005   }
3006   SignalHeapTrimDaemon(self);
3007 }
3008 
RequestHeapTrim()3009 void Heap::RequestHeapTrim() {
3010   // GC completed and now we must decide whether to request a heap trim (advising pages back to the
3011   // kernel) or not. Issuing a request will also cause trimming of the libc heap. As a trim scans
3012   // a space it will hold its lock and can become a cause of jank.
3013   // Note, the large object space self trims and the Zygote space was trimmed and unchanging since
3014   // forking.
3015 
3016   // We don't have a good measure of how worthwhile a trim might be. We can't use the live bitmap
3017   // because that only marks object heads, so a large array looks like lots of empty space. We
3018   // don't just call dlmalloc all the time, because the cost of an _attempted_ trim is proportional
3019   // to utilization (which is probably inversely proportional to how much benefit we can expect).
3020   // We could try mincore(2) but that's only a measure of how many pages we haven't given away,
3021   // not how much use we're making of those pages.
3022 
3023   Thread* self = Thread::Current();
3024   Runtime* runtime = Runtime::Current();
3025   if (runtime == nullptr || !runtime->IsFinishedStarting() || runtime->IsShuttingDown(self) ||
3026       runtime->IsZygote()) {
3027     // Ignore the request if we are the zygote to prevent app launching lag due to sleep in heap
3028     // trimmer daemon. b/17310019
3029     // Heap trimming isn't supported without a Java runtime or Daemons (such as at dex2oat time)
3030     // Also: we do not wish to start a heap trim if the runtime is shutting down (a racy check
3031     // as we don't hold the lock while requesting the trim).
3032     return;
3033   }
3034   {
3035     MutexLock mu(self, *heap_trim_request_lock_);
3036     if (last_trim_time_ + kHeapTrimWait >= NanoTime()) {
3037       // We have done a heap trim in the last kHeapTrimWait nanosecs, don't request another one
3038       // just yet.
3039       return;
3040     }
3041     heap_trim_request_pending_ = true;
3042     uint64_t current_time = NanoTime();
3043     if (heap_transition_or_trim_target_time_ < current_time) {
3044       heap_transition_or_trim_target_time_ = current_time + kHeapTrimWait;
3045     }
3046   }
3047   // Notify the daemon thread which will actually do the heap trim.
3048   SignalHeapTrimDaemon(self);
3049 }
3050 
SignalHeapTrimDaemon(Thread * self)3051 void Heap::SignalHeapTrimDaemon(Thread* self) {
3052   JNIEnv* env = self->GetJniEnv();
3053   DCHECK(WellKnownClasses::java_lang_Daemons != nullptr);
3054   DCHECK(WellKnownClasses::java_lang_Daemons_requestHeapTrim != nullptr);
3055   env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
3056                             WellKnownClasses::java_lang_Daemons_requestHeapTrim);
3057   CHECK(!env->ExceptionCheck());
3058 }
3059 
RevokeThreadLocalBuffers(Thread * thread)3060 void Heap::RevokeThreadLocalBuffers(Thread* thread) {
3061   if (rosalloc_space_ != nullptr) {
3062     rosalloc_space_->RevokeThreadLocalBuffers(thread);
3063   }
3064   if (bump_pointer_space_ != nullptr) {
3065     bump_pointer_space_->RevokeThreadLocalBuffers(thread);
3066   }
3067 }
3068 
RevokeRosAllocThreadLocalBuffers(Thread * thread)3069 void Heap::RevokeRosAllocThreadLocalBuffers(Thread* thread) {
3070   if (rosalloc_space_ != nullptr) {
3071     rosalloc_space_->RevokeThreadLocalBuffers(thread);
3072   }
3073 }
3074 
RevokeAllThreadLocalBuffers()3075 void Heap::RevokeAllThreadLocalBuffers() {
3076   if (rosalloc_space_ != nullptr) {
3077     rosalloc_space_->RevokeAllThreadLocalBuffers();
3078   }
3079   if (bump_pointer_space_ != nullptr) {
3080     bump_pointer_space_->RevokeAllThreadLocalBuffers();
3081   }
3082 }
3083 
IsGCRequestPending() const3084 bool Heap::IsGCRequestPending() const {
3085   return concurrent_start_bytes_ != std::numeric_limits<size_t>::max();
3086 }
3087 
RunFinalization(JNIEnv * env)3088 void Heap::RunFinalization(JNIEnv* env) {
3089   // Can't do this in WellKnownClasses::Init since System is not properly set up at that point.
3090   if (WellKnownClasses::java_lang_System_runFinalization == nullptr) {
3091     CHECK(WellKnownClasses::java_lang_System != nullptr);
3092     WellKnownClasses::java_lang_System_runFinalization =
3093         CacheMethod(env, WellKnownClasses::java_lang_System, true, "runFinalization", "()V");
3094     CHECK(WellKnownClasses::java_lang_System_runFinalization != nullptr);
3095   }
3096   env->CallStaticVoidMethod(WellKnownClasses::java_lang_System,
3097                             WellKnownClasses::java_lang_System_runFinalization);
3098   env->CallStaticVoidMethod(WellKnownClasses::java_lang_System,
3099                             WellKnownClasses::java_lang_System_runFinalization);
3100 }
3101 
RegisterNativeAllocation(JNIEnv * env,size_t bytes)3102 void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) {
3103   Thread* self = ThreadForEnv(env);
3104   if (native_need_to_run_finalization_) {
3105     RunFinalization(env);
3106     UpdateMaxNativeFootprint();
3107     native_need_to_run_finalization_ = false;
3108   }
3109   // Total number of native bytes allocated.
3110   size_t new_native_bytes_allocated = native_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes);
3111   new_native_bytes_allocated += bytes;
3112   if (new_native_bytes_allocated > native_footprint_gc_watermark_) {
3113     collector::GcType gc_type = have_zygote_space_ ? collector::kGcTypePartial :
3114         collector::kGcTypeFull;
3115 
3116     // The second watermark is higher than the gc watermark. If you hit this it means you are
3117     // allocating native objects faster than the GC can keep up with.
3118     if (new_native_bytes_allocated > growth_limit_) {
3119       if (WaitForGcToComplete(kGcCauseForNativeAlloc, self) != collector::kGcTypeNone) {
3120         // Just finished a GC, attempt to run finalizers.
3121         RunFinalization(env);
3122         CHECK(!env->ExceptionCheck());
3123       }
3124       // If we still are over the watermark, attempt a GC for alloc and run finalizers.
3125       if (new_native_bytes_allocated > growth_limit_) {
3126         CollectGarbageInternal(gc_type, kGcCauseForNativeAlloc, false);
3127         RunFinalization(env);
3128         native_need_to_run_finalization_ = false;
3129         CHECK(!env->ExceptionCheck());
3130       }
3131       // We have just run finalizers, update the native watermark since it is very likely that
3132       // finalizers released native managed allocations.
3133       UpdateMaxNativeFootprint();
3134     } else if (!IsGCRequestPending()) {
3135       if (IsGcConcurrent()) {
3136         RequestConcurrentGC(self);
3137       } else {
3138         CollectGarbageInternal(gc_type, kGcCauseForNativeAlloc, false);
3139       }
3140     }
3141   }
3142 }
3143 
RegisterNativeFree(JNIEnv * env,size_t bytes)3144 void Heap::RegisterNativeFree(JNIEnv* env, size_t bytes) {
3145   size_t expected_size;
3146   do {
3147     expected_size = native_bytes_allocated_.LoadRelaxed();
3148     if (UNLIKELY(bytes > expected_size)) {
3149       ScopedObjectAccess soa(env);
3150       env->ThrowNew(WellKnownClasses::java_lang_RuntimeException,
3151                     StringPrintf("Attempted to free %zd native bytes with only %zd native bytes "
3152                                  "registered as allocated", bytes, expected_size).c_str());
3153       break;
3154     }
3155   } while (!native_bytes_allocated_.CompareExchangeWeakRelaxed(expected_size,
3156                                                                expected_size - bytes));
3157 }
3158 
GetTotalMemory() const3159 size_t Heap::GetTotalMemory() const {
3160   return std::max(max_allowed_footprint_, GetBytesAllocated());
3161 }
3162 
AddModUnionTable(accounting::ModUnionTable * mod_union_table)3163 void Heap::AddModUnionTable(accounting::ModUnionTable* mod_union_table) {
3164   DCHECK(mod_union_table != nullptr);
3165   mod_union_tables_.Put(mod_union_table->GetSpace(), mod_union_table);
3166 }
3167 
CheckPreconditionsForAllocObject(mirror::Class * c,size_t byte_count)3168 void Heap::CheckPreconditionsForAllocObject(mirror::Class* c, size_t byte_count) {
3169   CHECK(c == NULL || (c->IsClassClass() && byte_count >= sizeof(mirror::Class)) ||
3170         (c->IsVariableSize() || c->GetObjectSize() == byte_count));
3171   CHECK_GE(byte_count, sizeof(mirror::Object));
3172 }
3173 
AddRememberedSet(accounting::RememberedSet * remembered_set)3174 void Heap::AddRememberedSet(accounting::RememberedSet* remembered_set) {
3175   CHECK(remembered_set != nullptr);
3176   space::Space* space = remembered_set->GetSpace();
3177   CHECK(space != nullptr);
3178   CHECK(remembered_sets_.find(space) == remembered_sets_.end()) << space;
3179   remembered_sets_.Put(space, remembered_set);
3180   CHECK(remembered_sets_.find(space) != remembered_sets_.end()) << space;
3181 }
3182 
RemoveRememberedSet(space::Space * space)3183 void Heap::RemoveRememberedSet(space::Space* space) {
3184   CHECK(space != nullptr);
3185   auto it = remembered_sets_.find(space);
3186   CHECK(it != remembered_sets_.end());
3187   delete it->second;
3188   remembered_sets_.erase(it);
3189   CHECK(remembered_sets_.find(space) == remembered_sets_.end());
3190 }
3191 
ClearMarkedObjects()3192 void Heap::ClearMarkedObjects() {
3193   // Clear all of the spaces' mark bitmaps.
3194   for (const auto& space : GetContinuousSpaces()) {
3195     accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
3196     if (space->GetLiveBitmap() != mark_bitmap) {
3197       mark_bitmap->Clear();
3198     }
3199   }
3200   // Clear the marked objects in the discontinous space object sets.
3201   for (const auto& space : GetDiscontinuousSpaces()) {
3202     space->GetMarkBitmap()->Clear();
3203   }
3204 }
3205 
3206 }  // namespace gc
3207 }  // namespace art
3208