• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2015 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_LINEAR_ALLOC_INL_H_
18 #define ART_RUNTIME_LINEAR_ALLOC_INL_H_
19 
20 #include "linear_alloc.h"
21 
22 #include "base/gc_visited_arena_pool.h"
23 #include "thread-current-inl.h"
24 
25 namespace art HIDDEN {
26 
SetFirstObject(void * begin,size_t bytes)27 inline void LinearAlloc::SetFirstObject(void* begin, size_t bytes) const {
28   DCHECK(track_allocations_);
29   if (ArenaAllocator::IsRunningOnMemoryTool()) {
30     bytes += ArenaAllocator::kMemoryToolRedZoneBytes;
31   }
32   uint8_t* end = static_cast<uint8_t*>(begin) + bytes;
33   Arena* arena = allocator_.GetHeadArena();
34   DCHECK_NE(arena, nullptr);
35   // The object would either be in the head arena or the next one.
36   if (UNLIKELY(begin < arena->Begin() || begin >= arena->End())) {
37     arena = arena->Next();
38   }
39   DCHECK(begin >= arena->Begin() && end <= arena->End());
40   down_cast<TrackedArena*>(arena)->SetFirstObject(static_cast<uint8_t*>(begin), end);
41 }
42 
ConvertToNoGcRoots(void * ptr,LinearAllocKind orig_kind)43 inline void LinearAlloc::ConvertToNoGcRoots(void* ptr, LinearAllocKind orig_kind) {
44   if (track_allocations_ && ptr != nullptr) {
45     TrackingHeader* header = static_cast<TrackingHeader*>(ptr);
46     header--;
47     DCHECK_EQ(header->GetKind(), orig_kind);
48     DCHECK_GT(header->GetSize(), 0u);
49     // 16-byte allocations are not supported yet.
50     DCHECK(!header->Is16Aligned());
51     header->SetKind(LinearAllocKind::kNoGCRoots);
52   }
53 }
54 
SetupForPostZygoteFork(Thread * self)55 inline void LinearAlloc::SetupForPostZygoteFork(Thread* self) {
56   MutexLock mu(self, lock_);
57   DCHECK(track_allocations_);
58   allocator_.ResetCurrentArena();
59 }
60 
Realloc(Thread * self,void * ptr,size_t old_size,size_t new_size,LinearAllocKind kind)61 inline void* LinearAlloc::Realloc(Thread* self,
62                                   void* ptr,
63                                   size_t old_size,
64                                   size_t new_size,
65                                   LinearAllocKind kind) {
66   MutexLock mu(self, lock_);
67   if (track_allocations_) {
68     if (ptr != nullptr) {
69       // Realloc cannot be called on 16-byte aligned as Realloc doesn't guarantee
70       // that. So the header must be immediately prior to ptr.
71       TrackingHeader* header = reinterpret_cast<TrackingHeader*>(ptr) - 1;
72       DCHECK_EQ(header->GetKind(), kind);
73       old_size += sizeof(TrackingHeader);
74       DCHECK_EQ(header->GetSize(), old_size);
75       ptr = header;
76     } else {
77       DCHECK_EQ(old_size, 0u);
78     }
79     new_size += sizeof(TrackingHeader);
80     void* ret = allocator_.Realloc(ptr, old_size, new_size);
81     new (ret) TrackingHeader(new_size, kind);
82     SetFirstObject(ret, new_size);
83     return static_cast<TrackingHeader*>(ret) + 1;
84   } else {
85     return allocator_.Realloc(ptr, old_size, new_size);
86   }
87 }
88 
Alloc(Thread * self,size_t size,LinearAllocKind kind)89 inline void* LinearAlloc::Alloc(Thread* self, size_t size, LinearAllocKind kind) {
90   MutexLock mu(self, lock_);
91   if (track_allocations_) {
92     size += sizeof(TrackingHeader);
93     TrackingHeader* storage = new (allocator_.Alloc(size)) TrackingHeader(size, kind);
94     SetFirstObject(storage, size);
95     return storage + 1;
96   } else {
97     return allocator_.Alloc(size);
98   }
99 }
100 
AllocAlign16(Thread * self,size_t size,LinearAllocKind kind)101 inline void* LinearAlloc::AllocAlign16(Thread* self, size_t size, LinearAllocKind kind) {
102   MutexLock mu(self, lock_);
103   DCHECK_ALIGNED(size, 16);
104   if (track_allocations_) {
105     size_t mem_tool_bytes = ArenaAllocator::IsRunningOnMemoryTool()
106                             ? ArenaAllocator::kMemoryToolRedZoneBytes : 0;
107     uint8_t* ptr = allocator_.CurrentPtr() + sizeof(TrackingHeader);
108     uintptr_t padding =
109         RoundUp(reinterpret_cast<uintptr_t>(ptr), 16) - reinterpret_cast<uintptr_t>(ptr);
110     DCHECK_LT(padding, 16u);
111     size_t required_size = size + sizeof(TrackingHeader) + padding;
112 
113     if (allocator_.CurrentArenaUnusedBytes() < required_size + mem_tool_bytes) {
114       // The allocator will require a new arena, which is expected to be
115       // 16-byte aligned.
116       static_assert(ArenaAllocator::kArenaAlignment >= 16,
117                     "Expecting sufficient alignment for new Arena.");
118       required_size = size + RoundUp(sizeof(TrackingHeader), 16);
119     }
120     // Using ArenaAllocator's AllocAlign16 now would disturb the alignment by
121     // trying to make header 16-byte aligned. The alignment requirements are
122     // already addressed here. Now we want allocator to just bump the pointer.
123     ptr = static_cast<uint8_t*>(allocator_.Alloc(required_size));
124     new (ptr) TrackingHeader(required_size, kind, /*is_16_aligned=*/true);
125     SetFirstObject(ptr, required_size);
126     return AlignUp(ptr + sizeof(TrackingHeader), 16);
127   } else {
128     return allocator_.AllocAlign16(size);
129   }
130 }
131 
GetUsedMemory()132 inline size_t LinearAlloc::GetUsedMemory() const {
133   MutexLock mu(Thread::Current(), lock_);
134   return allocator_.BytesUsed();
135 }
136 
GetArenaPool()137 inline ArenaPool* LinearAlloc::GetArenaPool() {
138   MutexLock mu(Thread::Current(), lock_);
139   return allocator_.GetArenaPool();
140 }
141 
Contains(void * ptr)142 inline bool LinearAlloc::Contains(void* ptr) const {
143   MutexLock mu(Thread::Current(), lock_);
144   return allocator_.Contains(ptr);
145 }
146 
147 }  // namespace art
148 
149 #endif  // ART_RUNTIME_LINEAR_ALLOC_INL_H_
150