1 /* 2 * Copyright (C) 2013 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_H_ 18 #define ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_H_ 19 20 #include "base/mutex.h" 21 #include "space.h" 22 23 #include <deque> 24 25 namespace art HIDDEN { 26 27 namespace mirror { 28 class Object; 29 } 30 31 namespace gc { 32 33 namespace collector { 34 class MarkCompact; 35 class MarkSweep; 36 } // namespace collector 37 38 namespace space { 39 40 // A bump pointer space allocates by incrementing a pointer, it doesn't provide a free 41 // implementation as its intended to be evacuated. 42 class EXPORT BumpPointerSpace final : public ContinuousMemMapAllocSpace { 43 public: 44 using WalkCallback = void (*)(void *, void *, int, void *); 45 GetType()46 SpaceType GetType() const override { 47 return kSpaceTypeBumpPointerSpace; 48 } 49 50 // Create a bump pointer space with the requested sizes. The requested base address is not 51 // guaranteed to be granted, if it is required, the caller should call Begin on the returned 52 // space to confirm the request was granted. 53 static BumpPointerSpace* Create(const std::string& name, size_t capacity); 54 static BumpPointerSpace* CreateFromMemMap(const std::string& name, MemMap&& mem_map); 55 56 // Allocate num_bytes, returns null if the space is full. 57 mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated, 58 size_t* usable_size, size_t* bytes_tl_bulk_allocated) override; 59 // Thread-unsafe allocation for when mutators are suspended, used by the semispace collector. 60 mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated, 61 size_t* usable_size, size_t* bytes_tl_bulk_allocated) 62 override REQUIRES(Locks::mutator_lock_); 63 64 mirror::Object* AllocNonvirtual(size_t num_bytes); 65 mirror::Object* AllocNonvirtualWithoutAccounting(size_t num_bytes); 66 67 // Return the storage space required by obj. AllocationSize(mirror::Object * obj,size_t * usable_size)68 size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override 69 REQUIRES_SHARED(Locks::mutator_lock_) { 70 return AllocationSizeNonvirtual(obj, usable_size); 71 } 72 73 // NOPS unless we support free lists. Free(Thread *,mirror::Object *)74 size_t Free(Thread*, mirror::Object*) override { 75 return 0; 76 } 77 FreeList(Thread *,size_t,mirror::Object **)78 size_t FreeList(Thread*, size_t, mirror::Object**) override { 79 return 0; 80 } 81 82 size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) 83 REQUIRES_SHARED(Locks::mutator_lock_); 84 85 // Removes the fork time growth limit on capacity, allowing the application to allocate up to the 86 // maximum reserved size of the heap. ClearGrowthLimit()87 void ClearGrowthLimit() { 88 growth_end_ = Limit(); 89 } 90 91 // Attempts to clamp the space limit to 'new_capacity'. If not possible, then 92 // clamps to whatever possible. Returns the new capacity. 'lock_' is used to 93 // ensure that TLAB allocations, which are the only ones which may be happening 94 // concurrently with this function are synchronized. The other Alloc* functions 95 // are either used in single-threaded mode, or when used in multi-threaded mode, 96 // then the space is used by GCs (like SS) which don't have clamping implemented. 97 size_t ClampGrowthLimit(size_t new_capacity) REQUIRES(!lock_); 98 99 // Override capacity so that we only return the possibly limited capacity Capacity()100 size_t Capacity() const override { 101 return growth_end_ - begin_; 102 } 103 104 // The total amount of memory reserved for the space. NonGrowthLimitCapacity()105 size_t NonGrowthLimitCapacity() const override { 106 return GetMemMap()->Size(); 107 } 108 GetLiveBitmap()109 accounting::ContinuousSpaceBitmap* GetLiveBitmap() override { 110 return nullptr; 111 } 112 113 // Reset the space to empty. 114 void Clear() override REQUIRES(!lock_); 115 116 void Dump(std::ostream& os) const override; 117 118 size_t RevokeThreadLocalBuffers(Thread* thread) override REQUIRES(!lock_); 119 size_t RevokeAllThreadLocalBuffers() override 120 REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !lock_); 121 void AssertThreadLocalBuffersAreRevoked(Thread* thread) REQUIRES(!lock_); 122 void AssertAllThreadLocalBuffersAreRevoked() 123 REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !lock_); 124 125 uint64_t GetBytesAllocated() override REQUIRES_SHARED(Locks::mutator_lock_) 126 REQUIRES(!*Locks::runtime_shutdown_lock_, !*Locks::thread_list_lock_, !lock_); 127 uint64_t GetObjectsAllocated() override REQUIRES_SHARED(Locks::mutator_lock_) 128 REQUIRES(!*Locks::runtime_shutdown_lock_, !*Locks::thread_list_lock_, !lock_); 129 // Return the pre-determined allocated object count. This could be beneficial 130 // when we know that all the TLABs are revoked. GetAccumulatedObjectsAllocated()131 int32_t GetAccumulatedObjectsAllocated() REQUIRES_SHARED(Locks::mutator_lock_) { 132 return objects_allocated_.load(std::memory_order_relaxed); 133 } IsEmpty()134 bool IsEmpty() const { 135 return Begin() == End(); 136 } 137 CanMoveObjects()138 bool CanMoveObjects() const override { 139 return true; 140 } 141 142 // TODO: Change this? Mainly used for compacting to a particular region of memory. 143 BumpPointerSpace(const std::string& name, uint8_t* begin, uint8_t* limit); 144 145 // Allocate a new TLAB and updates bytes_tl_bulk_allocated with the 146 // allocation-size, returns false if the allocation failed. 147 bool AllocNewTlab(Thread* self, size_t bytes, size_t* bytes_tl_bulk_allocated) REQUIRES(!lock_); 148 AsBumpPointerSpace()149 BumpPointerSpace* AsBumpPointerSpace() override { 150 return this; 151 } 152 153 // Go through all of the blocks and visit the continuous objects. 154 template <typename Visitor> 155 ALWAYS_INLINE void Walk(Visitor&& visitor) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!lock_); 156 157 accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() override; 158 159 // Record objects / bytes freed. RecordFree(int32_t objects,int32_t bytes)160 void RecordFree(int32_t objects, int32_t bytes) { 161 objects_allocated_.fetch_sub(objects, std::memory_order_relaxed); 162 bytes_allocated_.fetch_sub(bytes, std::memory_order_relaxed); 163 } 164 165 bool LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override 166 REQUIRES_SHARED(Locks::mutator_lock_); 167 168 // Object alignment within the space. 169 static constexpr size_t kAlignment = kObjectAlignment; 170 171 protected: 172 BumpPointerSpace(const std::string& name, MemMap&& mem_map); 173 174 // Allocate a raw block of bytes. 175 uint8_t* AllocBlock(size_t bytes) REQUIRES(lock_); 176 void RevokeThreadLocalBuffersLocked(Thread* thread) REQUIRES(lock_); 177 178 // The main block is an unbounded block where objects go when there are no other blocks. This 179 // enables us to maintain tightly packed objects when you are not using thread local buffers for 180 // allocation. The main block starts at the space Begin(). 181 void UpdateMainBlock() REQUIRES(lock_); 182 183 uint8_t* growth_end_; 184 AtomicInteger objects_allocated_; // Accumulated from revoked thread local regions. 185 AtomicInteger bytes_allocated_; // Accumulated from revoked thread local regions. 186 Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; 187 // The objects at the start of the space are stored in the main block. 188 size_t main_block_size_ GUARDED_BY(lock_); 189 // List of block sizes (in bytes) after the main-block. Needed for Walk(). 190 // If empty then the space has only one long continuous block. Each TLAB 191 // allocation has one entry in this deque. 192 // Keeping block-sizes off-heap simplifies sliding compaction algorithms. 193 // The compaction algorithm should ideally compact all objects into the main 194 // block, thereby enabling erasing corresponding entries from here. 195 std::deque<size_t> block_sizes_ GUARDED_BY(lock_); 196 197 private: 198 // Return the object which comes after obj, while ensuring alignment. 199 static mirror::Object* GetNextObject(mirror::Object* obj) 200 REQUIRES_SHARED(Locks::mutator_lock_); 201 202 // Return a vector of block sizes on the space. Required by MarkCompact GC for 203 // walking black objects allocated after marking phase. 204 std::vector<size_t>* GetBlockSizes(Thread* self, size_t* main_block_size) REQUIRES(!lock_); 205 206 // Once the MarkCompact decides the post-compact layout of the space in the 207 // pre-compaction pause, it calls this function to update the block sizes. It is 208 // done by passing the new main-block size, which consumes a bunch of blocks 209 // into itself, and the index of first unconsumed block. This works as all the 210 // block sizes are ordered. Also updates 'end_' to reflect the change. 211 void SetBlockSizes(Thread* self, const size_t main_block_size, const size_t first_valid_idx) 212 REQUIRES(!lock_, Locks::mutator_lock_); 213 214 // Align end to the given alignment. This is done in MarkCompact GC when 215 // mutators are suspended so that upcoming TLAB allocations start with a new 216 // page. Adjust's heap's bytes_allocated accordingly. Returns the aligned end. 217 uint8_t* AlignEnd(Thread* self, size_t alignment, Heap* heap) REQUIRES(Locks::mutator_lock_); 218 219 friend class collector::MarkSweep; 220 friend class collector::MarkCompact; 221 DISALLOW_COPY_AND_ASSIGN(BumpPointerSpace); 222 }; 223 224 } // namespace space 225 } // namespace gc 226 } // namespace art 227 228 #endif // ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_H_ 229