1 /* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef ART_RUNTIME_GC_SPACE_DLMALLOC_SPACE_H_ 18 #define ART_RUNTIME_GC_SPACE_DLMALLOC_SPACE_H_ 19 20 #include "malloc_space.h" 21 #include "space.h" 22 23 namespace art { 24 namespace gc { 25 26 namespace collector { 27 class MarkSweep; 28 } // namespace collector 29 30 namespace space { 31 32 // An alloc space is a space where objects may be allocated and garbage collected. Not final as may 33 // be overridden by a MemoryToolMallocSpace. 34 class DlMallocSpace : public MallocSpace { 35 public: 36 // Create a DlMallocSpace from an existing mem_map. 37 static DlMallocSpace* CreateFromMemMap(MemMap* mem_map, const std::string& name, 38 size_t starting_size, size_t initial_size, 39 size_t growth_limit, size_t capacity, 40 bool can_move_objects); 41 42 // Create a DlMallocSpace with the requested sizes. The requested 43 // base address is not guaranteed to be granted, if it is required, 44 // the caller should call Begin on the returned space to confirm the 45 // request was granted. 46 static DlMallocSpace* Create(const std::string& name, size_t initial_size, size_t growth_limit, 47 size_t capacity, uint8_t* requested_begin, bool can_move_objects); 48 49 // Virtual to allow MemoryToolMallocSpace to intercept. 50 virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated, 51 size_t* usable_size, 52 size_t* bytes_tl_bulk_allocated) 53 OVERRIDE REQUIRES(!lock_); 54 // Virtual to allow MemoryToolMallocSpace to intercept. Alloc(Thread * self,size_t num_bytes,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated)55 virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated, 56 size_t* usable_size, size_t* bytes_tl_bulk_allocated) 57 OVERRIDE REQUIRES(!lock_) { 58 return AllocNonvirtual(self, num_bytes, bytes_allocated, usable_size, 59 bytes_tl_bulk_allocated); 60 } 61 // Virtual to allow MemoryToolMallocSpace to intercept. AllocationSize(mirror::Object * obj,size_t * usable_size)62 virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE { 63 return AllocationSizeNonvirtual(obj, usable_size); 64 } 65 // Virtual to allow MemoryToolMallocSpace to intercept. 66 virtual size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE 67 REQUIRES(!lock_) 68 SHARED_REQUIRES(Locks::mutator_lock_); 69 // Virtual to allow MemoryToolMallocSpace to intercept. 70 virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE 71 REQUIRES(!lock_) 72 SHARED_REQUIRES(Locks::mutator_lock_); 73 MaxBytesBulkAllocatedFor(size_t num_bytes)74 size_t MaxBytesBulkAllocatedFor(size_t num_bytes) OVERRIDE { 75 return num_bytes; 76 } 77 78 // DlMallocSpaces don't have thread local state. RevokeThreadLocalBuffers(art::Thread *)79 size_t RevokeThreadLocalBuffers(art::Thread*) OVERRIDE { 80 return 0U; 81 } RevokeAllThreadLocalBuffers()82 size_t RevokeAllThreadLocalBuffers() OVERRIDE { 83 return 0U; 84 } 85 86 // Faster non-virtual allocation path. 87 mirror::Object* AllocNonvirtual(Thread* self, size_t num_bytes, size_t* bytes_allocated, 88 size_t* usable_size, size_t* bytes_tl_bulk_allocated) 89 REQUIRES(!lock_); 90 91 // Faster non-virtual allocation size path. 92 size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size); 93 94 #ifndef NDEBUG 95 // Override only in the debug build. 96 void CheckMoreCoreForPrecondition(); 97 #endif 98 GetMspace()99 void* GetMspace() const { 100 return mspace_; 101 } 102 103 size_t Trim() OVERRIDE; 104 105 // Perform a mspace_inspect_all which calls back for each allocation chunk. The chunk may not be 106 // in use, indicated by num_bytes equaling zero. 107 void Walk(WalkCallback callback, void* arg) OVERRIDE REQUIRES(!lock_); 108 109 // Returns the number of bytes that the space has currently obtained from the system. This is 110 // greater or equal to the amount of live data in the space. 111 size_t GetFootprint() OVERRIDE; 112 113 // Returns the number of bytes that the heap is allowed to obtain from the system via MoreCore. 114 size_t GetFootprintLimit() OVERRIDE; 115 116 // Set the maximum number of bytes that the heap is allowed to obtain from the system via 117 // MoreCore. Note this is used to stop the mspace growing beyond the limit to Capacity. When 118 // allocations fail we GC before increasing the footprint limit and allowing the mspace to grow. 119 void SetFootprintLimit(size_t limit) OVERRIDE; 120 121 MallocSpace* CreateInstance(MemMap* mem_map, const std::string& name, void* allocator, 122 uint8_t* begin, uint8_t* end, uint8_t* limit, size_t growth_limit, 123 bool can_move_objects); 124 125 uint64_t GetBytesAllocated() OVERRIDE; 126 uint64_t GetObjectsAllocated() OVERRIDE; 127 128 virtual void Clear() OVERRIDE; 129 IsDlMallocSpace()130 bool IsDlMallocSpace() const OVERRIDE { 131 return true; 132 } 133 AsDlMallocSpace()134 DlMallocSpace* AsDlMallocSpace() OVERRIDE { 135 return this; 136 } 137 138 void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE 139 SHARED_REQUIRES(Locks::mutator_lock_); 140 141 protected: 142 DlMallocSpace(MemMap* mem_map, size_t initial_size, const std::string& name, void* mspace, 143 uint8_t* begin, uint8_t* end, uint8_t* limit, size_t growth_limit, 144 bool can_move_objects, size_t starting_size); 145 146 private: 147 mirror::Object* AllocWithoutGrowthLocked(Thread* self, size_t num_bytes, size_t* bytes_allocated, 148 size_t* usable_size, 149 size_t* bytes_tl_bulk_allocated) 150 REQUIRES(lock_); 151 CreateAllocator(void * base,size_t morecore_start,size_t initial_size,size_t,bool)152 void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size, 153 size_t /*maximum_size*/, bool /*low_memory_mode*/) OVERRIDE { 154 return CreateMspace(base, morecore_start, initial_size); 155 } 156 static void* CreateMspace(void* base, size_t morecore_start, size_t initial_size); 157 158 // The boundary tag overhead. 159 static const size_t kChunkOverhead = sizeof(intptr_t); 160 161 // Underlying malloc space. 162 void* mspace_; 163 164 friend class collector::MarkSweep; 165 166 DISALLOW_COPY_AND_ASSIGN(DlMallocSpace); 167 }; 168 169 } // namespace space 170 } // namespace gc 171 } // namespace art 172 173 #endif // ART_RUNTIME_GC_SPACE_DLMALLOC_SPACE_H_ 174