1 /* 2 * Copyright (C) 2013 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_H_ 18 #define ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_H_ 19 20 #include "base/mutex.h" 21 #include "space.h" 22 23 #include <deque> 24 25 namespace art { 26 27 namespace mirror { 28 class Object; 29 } 30 31 namespace gc { 32 33 namespace collector { 34 class MarkCompact; 35 class MarkSweep; 36 } // namespace collector 37 38 namespace space { 39 40 // A bump pointer space allocates by incrementing a pointer, it doesn't provide a free 41 // implementation as its intended to be evacuated. 42 class BumpPointerSpace final : public ContinuousMemMapAllocSpace { 43 public: 44 using WalkCallback = void (*)(void *, void *, int, void *); 45 GetType()46 SpaceType GetType() const override { 47 return kSpaceTypeBumpPointerSpace; 48 } 49 50 // Create a bump pointer space with the requested sizes. The requested base address is not 51 // guaranteed to be granted, if it is required, the caller should call Begin on the returned 52 // space to confirm the request was granted. 53 static BumpPointerSpace* Create(const std::string& name, size_t capacity); 54 static BumpPointerSpace* CreateFromMemMap(const std::string& name, MemMap&& mem_map); 55 56 // Allocate num_bytes, returns null if the space is full. 57 mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated, 58 size_t* usable_size, size_t* bytes_tl_bulk_allocated) override; 59 // Thread-unsafe allocation for when mutators are suspended, used by the semispace collector. 60 mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated, 61 size_t* usable_size, size_t* bytes_tl_bulk_allocated) 62 override REQUIRES(Locks::mutator_lock_); 63 64 mirror::Object* AllocNonvirtual(size_t num_bytes); 65 mirror::Object* AllocNonvirtualWithoutAccounting(size_t num_bytes); 66 67 // Return the storage space required by obj. AllocationSize(mirror::Object * obj,size_t * usable_size)68 size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override 69 REQUIRES_SHARED(Locks::mutator_lock_) { 70 return AllocationSizeNonvirtual(obj, usable_size); 71 } 72 73 // NOPS unless we support free lists. Free(Thread *,mirror::Object *)74 size_t Free(Thread*, mirror::Object*) override { 75 return 0; 76 } 77 FreeList(Thread *,size_t,mirror::Object **)78 size_t FreeList(Thread*, size_t, mirror::Object**) override { 79 return 0; 80 } 81 82 size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) 83 REQUIRES_SHARED(Locks::mutator_lock_); 84 85 // Removes the fork time growth limit on capacity, allowing the application to allocate up to the 86 // maximum reserved size of the heap. ClearGrowthLimit()87 void ClearGrowthLimit() { 88 growth_end_ = Limit(); 89 } 90 91 // Override capacity so that we only return the possibly limited capacity Capacity()92 size_t Capacity() const override { 93 return growth_end_ - begin_; 94 } 95 96 // The total amount of memory reserved for the space. NonGrowthLimitCapacity()97 size_t NonGrowthLimitCapacity() const override { 98 return GetMemMap()->Size(); 99 } 100 GetLiveBitmap()101 accounting::ContinuousSpaceBitmap* GetLiveBitmap() override { 102 return nullptr; 103 } 104 105 // Reset the space to empty. 106 void Clear() override REQUIRES(!block_lock_); 107 108 void Dump(std::ostream& os) const override; 109 110 size_t RevokeThreadLocalBuffers(Thread* thread) override REQUIRES(!block_lock_); 111 size_t RevokeAllThreadLocalBuffers() override 112 REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !block_lock_); 113 void AssertThreadLocalBuffersAreRevoked(Thread* thread) REQUIRES(!block_lock_); 114 void AssertAllThreadLocalBuffersAreRevoked() 115 REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !block_lock_); 116 117 uint64_t GetBytesAllocated() override REQUIRES_SHARED(Locks::mutator_lock_) 118 REQUIRES(!*Locks::runtime_shutdown_lock_, !*Locks::thread_list_lock_, !block_lock_); 119 uint64_t GetObjectsAllocated() override REQUIRES_SHARED(Locks::mutator_lock_) 120 REQUIRES(!*Locks::runtime_shutdown_lock_, !*Locks::thread_list_lock_, !block_lock_); 121 // Return the pre-determined allocated object count. This could be beneficial 122 // when we know that all the TLABs are revoked. GetAccumulatedObjectsAllocated()123 int32_t GetAccumulatedObjectsAllocated() REQUIRES_SHARED(Locks::mutator_lock_) { 124 return objects_allocated_.load(std::memory_order_relaxed); 125 } IsEmpty()126 bool IsEmpty() const { 127 return Begin() == End(); 128 } 129 CanMoveObjects()130 bool CanMoveObjects() const override { 131 return true; 132 } 133 134 // TODO: Change this? Mainly used for compacting to a particular region of memory. 135 BumpPointerSpace(const std::string& name, uint8_t* begin, uint8_t* limit); 136 137 // Allocate a new TLAB, returns false if the allocation failed. 138 bool AllocNewTlab(Thread* self, size_t bytes) REQUIRES(!block_lock_); 139 AsBumpPointerSpace()140 BumpPointerSpace* AsBumpPointerSpace() override { 141 return this; 142 } 143 144 // Go through all of the blocks and visit the continuous objects. 145 template <typename Visitor> 146 ALWAYS_INLINE void Walk(Visitor&& visitor) 147 REQUIRES_SHARED(Locks::mutator_lock_) 148 REQUIRES(!block_lock_); 149 150 accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() override; 151 152 // Record objects / bytes freed. RecordFree(int32_t objects,int32_t bytes)153 void RecordFree(int32_t objects, int32_t bytes) { 154 objects_allocated_.fetch_sub(objects, std::memory_order_relaxed); 155 bytes_allocated_.fetch_sub(bytes, std::memory_order_relaxed); 156 } 157 158 bool LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override 159 REQUIRES_SHARED(Locks::mutator_lock_); 160 161 // Object alignment within the space. 162 static constexpr size_t kAlignment = kObjectAlignment; 163 164 protected: 165 BumpPointerSpace(const std::string& name, MemMap&& mem_map); 166 167 // Allocate a raw block of bytes. 168 uint8_t* AllocBlock(size_t bytes) REQUIRES(block_lock_); 169 void RevokeThreadLocalBuffersLocked(Thread* thread) REQUIRES(block_lock_); 170 171 // The main block is an unbounded block where objects go when there are no other blocks. This 172 // enables us to maintain tightly packed objects when you are not using thread local buffers for 173 // allocation. The main block starts at the space Begin(). 174 void UpdateMainBlock() REQUIRES(block_lock_); 175 176 uint8_t* growth_end_; 177 AtomicInteger objects_allocated_; // Accumulated from revoked thread local regions. 178 AtomicInteger bytes_allocated_; // Accumulated from revoked thread local regions. 179 Mutex block_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; 180 // The objects at the start of the space are stored in the main block. 181 size_t main_block_size_ GUARDED_BY(block_lock_); 182 // List of block sizes (in bytes) after the main-block. Needed for Walk(). 183 // If empty then the space has only one long continuous block. Each TLAB 184 // allocation has one entry in this deque. 185 // Keeping block-sizes off-heap simplifies sliding compaction algorithms. 186 // The compaction algorithm should ideally compact all objects into the main 187 // block, thereby enabling erasing corresponding entries from here. 188 std::deque<size_t> block_sizes_ GUARDED_BY(block_lock_); 189 190 private: 191 // Return the object which comes after obj, while ensuring alignment. 192 static mirror::Object* GetNextObject(mirror::Object* obj) 193 REQUIRES_SHARED(Locks::mutator_lock_); 194 195 // Return a vector of block sizes on the space. Required by MarkCompact GC for 196 // walking black objects allocated after marking phase. 197 std::vector<size_t>* GetBlockSizes(Thread* self, size_t* main_block_size) REQUIRES(!block_lock_); 198 199 // Once the MarkCompact decides the post-compact layout of the space in the 200 // pre-compaction pause, it calls this function to update the block sizes. It is 201 // done by passing the new main-block size, which consumes a bunch of blocks 202 // into itself, and the index of first unconsumed block. This works as all the 203 // block sizes are ordered. Also updates 'end_' to reflect the change. 204 void SetBlockSizes(Thread* self, const size_t main_block_size, const size_t first_valid_idx) 205 REQUIRES(!block_lock_, Locks::mutator_lock_); 206 207 // Align end to the given alignment. This is done in MarkCompact GC when 208 // mutators are suspended so that upcoming TLAB allocations start with a new 209 // page. Returns the pre-alignment end. 210 uint8_t* AlignEnd(Thread* self, size_t alignment) REQUIRES(Locks::mutator_lock_); 211 212 friend class collector::MarkSweep; 213 friend class collector::MarkCompact; 214 DISALLOW_COPY_AND_ASSIGN(BumpPointerSpace); 215 }; 216 217 } // namespace space 218 } // namespace gc 219 } // namespace art 220 221 #endif // ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_H_ 222