• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2023 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "src/trace_processor/util/bump_allocator.h"
18 
19 #include <cstddef>
20 #include <cstdint>
21 #include <limits>
22 #include <optional>
23 #include <utility>
24 
25 #include "perfetto/base/compiler.h"
26 #include "perfetto/base/logging.h"
27 #include "perfetto/ext/base/utils.h"
28 
29 namespace perfetto::trace_processor {
30 namespace {
31 
32 // TODO(b/266983484): consider using base::PagedMemory unless a) we are on a
33 // platform where that doesn't make sense (WASM) b) we are trying to do heap
34 // profiling.
Allocate(uint32_t size)35 base::AlignedUniquePtr<uint8_t[]> Allocate(uint32_t size) {
36   uint8_t* ptr = static_cast<uint8_t*>(base::AlignedAlloc(8, size));
37   // Poison the region to try and catch out of bound accesses.
38   PERFETTO_ASAN_POISON(ptr, size);
39   return base::AlignedUniquePtr<uint8_t[]>(ptr);
40 }
41 
42 }  // namespace
43 
44 BumpAllocator::BumpAllocator() = default;
45 
~BumpAllocator()46 BumpAllocator::~BumpAllocator() {
47   for (const auto& chunk : chunks_) {
48     PERFETTO_CHECK(chunk.unfreed_allocations == 0);
49   }
50 }
51 
Alloc(uint32_t size)52 BumpAllocator::AllocId BumpAllocator::Alloc(uint32_t size) {
53   // Size is required to be a multiple of 8 to avoid needing to deal with
54   // alignment. It must also be at most kChunkSize as we do not support cross
55   // chunk spanning allocations.
56   PERFETTO_DCHECK(size % 8 == 0);
57   PERFETTO_DCHECK(size <= kChunkSize);
58 
59   // Fast path: check if we have space to service this allocation in the current
60   // chunk.
61   std::optional<AllocId> alloc_id = TryAllocInLastChunk(size);
62   if (alloc_id) {
63     return *alloc_id;
64   }
65 
66   // Slow path: we don't have enough space in the last chunk so we create one.
67   Chunk chunk;
68   chunk.allocation = Allocate(kChunkSize);
69   chunks_.emplace_back(std::move(chunk));
70 
71   // Ensure that we haven't exceeded the maximum number of chunks.
72   PERFETTO_CHECK(LastChunkIndex() < kMaxChunkCount);
73 
74   // This time the allocation should definitely succeed in the last chunk (which
75   // we just added).
76   alloc_id = TryAllocInLastChunk(size);
77   PERFETTO_CHECK(alloc_id);
78   return *alloc_id;
79 }
80 
Free(AllocId id)81 void BumpAllocator::Free(AllocId id) {
82   uint64_t queue_index = ChunkIndexToQueueIndex(id.chunk_index);
83   PERFETTO_DCHECK(queue_index <= std::numeric_limits<size_t>::max());
84   Chunk& chunk = chunks_.at(static_cast<size_t>(queue_index));
85   PERFETTO_DCHECK(chunk.unfreed_allocations > 0);
86   chunk.unfreed_allocations--;
87 }
88 
GetPointer(AllocId id)89 void* BumpAllocator::GetPointer(AllocId id) {
90   uint64_t queue_index = ChunkIndexToQueueIndex(id.chunk_index);
91   PERFETTO_CHECK(queue_index <= std::numeric_limits<size_t>::max());
92   return chunks_.at(static_cast<size_t>(queue_index)).allocation.get() +
93          id.chunk_offset;
94 }
95 
EraseFrontFreeChunks()96 uint64_t BumpAllocator::EraseFrontFreeChunks() {
97   size_t to_erase_chunks = 0;
98   for (; to_erase_chunks < chunks_.size(); ++to_erase_chunks) {
99     // Break on the first chunk which still has unfreed allocations.
100     if (chunks_.at(to_erase_chunks).unfreed_allocations > 0) {
101       break;
102     }
103   }
104   chunks_.erase_front(to_erase_chunks);
105   erased_front_chunks_count_ += to_erase_chunks;
106   return to_erase_chunks;
107 }
108 
PastTheEndId()109 BumpAllocator::AllocId BumpAllocator::PastTheEndId() {
110   if (chunks_.empty()) {
111     return AllocId{erased_front_chunks_count_, 0};
112   }
113   if (chunks_.back().bump_offset == kChunkSize) {
114     return AllocId{LastChunkIndex() + 1, 0};
115   }
116   return AllocId{LastChunkIndex(), chunks_.back().bump_offset};
117 }
118 
TryAllocInLastChunk(uint32_t size)119 std::optional<BumpAllocator::AllocId> BumpAllocator::TryAllocInLastChunk(
120     uint32_t size) {
121   if (chunks_.empty()) {
122     return std::nullopt;
123   }
124 
125   // TODO(266983484): consider switching this to bump downwards instead of
126   // upwards for more efficient code generation.
127   Chunk& chunk = chunks_.back();
128 
129   // Verify some invariants:
130   // 1) The allocation must exist
131   // 2) The bump must be in the bounds of the chunk.
132   PERFETTO_DCHECK(chunk.allocation);
133   PERFETTO_DCHECK(chunk.bump_offset <= kChunkSize);
134 
135   // If the end of the allocation ends up after this chunk, we cannot service it
136   // in this chunk.
137   uint32_t alloc_offset = chunk.bump_offset;
138   uint32_t new_bump_offset = chunk.bump_offset + size;
139   if (new_bump_offset > kChunkSize) {
140     return std::nullopt;
141   }
142 
143   // Set the new offset equal to the end of this allocation and increment the
144   // unfreed allocation counter.
145   chunk.bump_offset = new_bump_offset;
146   chunk.unfreed_allocations++;
147 
148   // Unpoison the allocation range to allow access to it on ASAN builds.
149   PERFETTO_ASAN_UNPOISON(chunk.allocation.get() + alloc_offset, size);
150 
151   return AllocId{LastChunkIndex(), alloc_offset};
152 }
153 
154 }  // namespace perfetto::trace_processor
155