1 // Copyright 2021 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_RESERVATION_OFFSET_TABLE_H_
6 #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_RESERVATION_OFFSET_TABLE_H_
7
8 #include <cstddef>
9 #include <cstdint>
10 #include <limits>
11 #include <tuple>
12
13 #include "build/build_config.h"
14 #include "partition_alloc/address_pool_manager.h"
15 #include "partition_alloc/partition_address_space.h"
16 #include "partition_alloc/partition_alloc_base/compiler_specific.h"
17 #include "partition_alloc/partition_alloc_base/component_export.h"
18 #include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
19 #include "partition_alloc/partition_alloc_buildflags.h"
20 #include "partition_alloc/partition_alloc_check.h"
21 #include "partition_alloc/partition_alloc_constants.h"
22 #include "partition_alloc/tagging.h"
23 #include "partition_alloc/thread_isolation/alignment.h"
24
25 namespace partition_alloc::internal {
26
27 static constexpr uint16_t kOffsetTagNotAllocated =
28 std::numeric_limits<uint16_t>::max();
29 static constexpr uint16_t kOffsetTagNormalBuckets =
30 std::numeric_limits<uint16_t>::max() - 1;
31
32 // The main purpose of the reservation offset table is to easily locate the
33 // direct map reservation start address for any given address. There is one
34 // entry in the table for each super page.
35 //
36 // When PartitionAlloc reserves an address region it is always aligned to
37 // super page boundary. However, in 32-bit mode, the size may not be aligned
38 // super-page-aligned, so it may look like this:
39 // |<--------- actual reservation size --------->|
40 // +----------+----------+-----+-----------+-----+ - - - +
41 // |SuperPage0|SuperPage1| ... |SuperPage K|SuperPage K+1|
42 // +----------+----------+-----+-----------+-----+ - - -.+
43 // |<-X->|<-Y*)->|
44 //
45 // The table entries for reserved super pages say how many pages away from the
46 // reservation the super page is:
47 // +----------+----------+-----+-----------+-------------+
48 // |Entry for |Entry for | ... |Entry for |Entry for |
49 // |SuperPage0|SuperPage1| |SuperPage K|SuperPage K+1|
50 // +----------+----------+-----+-----------+-------------+
51 // | 0 | 1 | ... | K | K + 1 |
52 // +----------+----------+-----+-----------+-------------+
53 //
54 // For an address Z, the reservation start can be found using this formula:
55 // ((Z >> kSuperPageShift) - (the entry for Z)) << kSuperPageShift
56 //
57 // kOffsetTagNotAllocated is a special tag denoting that the super page isn't
58 // allocated by PartitionAlloc and kOffsetTagNormalBuckets denotes that it is
59 // used for a normal-bucket allocation, not for a direct-map allocation.
60 //
61 // *) In 32-bit mode, Y is not used by PartitionAlloc, and cannot be used
62 // until X is unreserved, because PartitionAlloc always uses kSuperPageSize
63 // alignment when reserving address spaces. One can use check "is in pool?"
64 // to further determine which part of the super page is used by
65 // PartitionAlloc. This isn't a problem in 64-bit mode, where allocation
66 // granularity is kSuperPageSize.
PA_COMPONENT_EXPORT(PARTITION_ALLOC)67 class PA_COMPONENT_EXPORT(PARTITION_ALLOC)
68 PA_THREAD_ISOLATED_ALIGN ReservationOffsetTable {
69 public:
70 #if BUILDFLAG(HAS_64_BIT_POINTERS)
71 // There is one reservation offset table per Pool in 64-bit mode.
72 static constexpr size_t kReservationOffsetTableCoverage = kPoolMaxSize;
73 static constexpr size_t kReservationOffsetTableLength =
74 kReservationOffsetTableCoverage >> kSuperPageShift;
75 #else
76 // The size of the reservation offset table should cover the entire 32-bit
77 // address space, one element per super page.
78 static constexpr uint64_t kGiB = 1024 * 1024 * 1024ull;
79 static constexpr size_t kReservationOffsetTableLength =
80 4 * kGiB / kSuperPageSize;
81 #endif // BUILDFLAG(HAS_64_BIT_POINTERS)
82 static_assert(kReservationOffsetTableLength < kOffsetTagNormalBuckets,
83 "Offsets should be smaller than kOffsetTagNormalBuckets.");
84
85 struct _ReservationOffsetTable {
86 // The number of table elements is less than MAX_UINT16, so the element type
87 // can be uint16_t.
88 static_assert(
89 kReservationOffsetTableLength <= std::numeric_limits<uint16_t>::max(),
90 "Length of the reservation offset table must be less than MAX_UINT16");
91 uint16_t offsets[kReservationOffsetTableLength] = {};
92
93 constexpr _ReservationOffsetTable() {
94 for (uint16_t& offset : offsets) {
95 offset = kOffsetTagNotAllocated;
96 }
97 }
98 };
99 #if BUILDFLAG(HAS_64_BIT_POINTERS)
100 // If thread isolation support is enabled, we need to write-protect the tables
101 // of the thread isolated pool. For this, we need to pad the tables so that
102 // the thread isolated ones start on a page boundary.
103 char pad_[PA_THREAD_ISOLATED_ARRAY_PAD_SZ(_ReservationOffsetTable,
104 kNumPools)] = {};
105 struct _ReservationOffsetTable tables[kNumPools];
106 static PA_CONSTINIT ReservationOffsetTable singleton_;
107 #else
108 // A single table for the entire 32-bit address space.
109 static PA_CONSTINIT struct _ReservationOffsetTable reservation_offset_table_;
110 #endif // BUILDFLAG(HAS_64_BIT_POINTERS)
111 };
112
113 #if BUILDFLAG(HAS_64_BIT_POINTERS)
GetReservationOffsetTable(pool_handle handle)114 PA_ALWAYS_INLINE uint16_t* GetReservationOffsetTable(pool_handle handle) {
115 PA_DCHECK(kNullPoolHandle < handle && handle <= kNumPools);
116 return ReservationOffsetTable::singleton_.tables[handle - 1].offsets;
117 }
118
GetReservationOffsetTableEnd(pool_handle handle)119 PA_ALWAYS_INLINE const uint16_t* GetReservationOffsetTableEnd(
120 pool_handle handle) {
121 return GetReservationOffsetTable(handle) +
122 ReservationOffsetTable::kReservationOffsetTableLength;
123 }
124
GetReservationOffsetTable(uintptr_t address)125 PA_ALWAYS_INLINE uint16_t* GetReservationOffsetTable(uintptr_t address) {
126 pool_handle handle = GetPool(address);
127 return GetReservationOffsetTable(handle);
128 }
129
GetReservationOffsetTableEnd(uintptr_t address)130 PA_ALWAYS_INLINE const uint16_t* GetReservationOffsetTableEnd(
131 uintptr_t address) {
132 pool_handle handle = GetPool(address);
133 return GetReservationOffsetTableEnd(handle);
134 }
135
ReservationOffsetPointer(pool_handle pool,uintptr_t offset_in_pool)136 PA_ALWAYS_INLINE uint16_t* ReservationOffsetPointer(pool_handle pool,
137 uintptr_t offset_in_pool) {
138 size_t table_index = offset_in_pool >> kSuperPageShift;
139 PA_DCHECK(table_index <
140 ReservationOffsetTable::kReservationOffsetTableLength);
141 return GetReservationOffsetTable(pool) + table_index;
142 }
143 #else // BUILDFLAG(HAS_64_BIT_POINTERS)
GetReservationOffsetTable(uintptr_t address)144 PA_ALWAYS_INLINE uint16_t* GetReservationOffsetTable(uintptr_t address) {
145 return ReservationOffsetTable::reservation_offset_table_.offsets;
146 }
147
GetReservationOffsetTableEnd(uintptr_t address)148 PA_ALWAYS_INLINE const uint16_t* GetReservationOffsetTableEnd(
149 uintptr_t address) {
150 return ReservationOffsetTable::reservation_offset_table_.offsets +
151 ReservationOffsetTable::kReservationOffsetTableLength;
152 }
153 #endif // BUILDFLAG(HAS_64_BIT_POINTERS)
154
ReservationOffsetPointer(uintptr_t address)155 PA_ALWAYS_INLINE uint16_t* ReservationOffsetPointer(uintptr_t address) {
156 #if BUILDFLAG(HAS_64_BIT_POINTERS)
157 // In 64-bit mode, find the owning Pool and compute the offset from its base.
158 auto [pool, unused_base, offset] = GetPoolInfo(address);
159 return ReservationOffsetPointer(pool, offset);
160 #else
161 size_t table_index = address >> kSuperPageShift;
162 PA_DCHECK(table_index <
163 ReservationOffsetTable::kReservationOffsetTableLength);
164 return GetReservationOffsetTable(address) + table_index;
165 #endif
166 }
167
ComputeReservationStart(uintptr_t address,uint16_t * offset_ptr)168 PA_ALWAYS_INLINE uintptr_t ComputeReservationStart(uintptr_t address,
169 uint16_t* offset_ptr) {
170 return (address & kSuperPageBaseMask) -
171 (static_cast<size_t>(*offset_ptr) << kSuperPageShift);
172 }
173
174 // If the given address doesn't point to direct-map allocated memory,
175 // returns 0.
GetDirectMapReservationStart(uintptr_t address)176 PA_ALWAYS_INLINE uintptr_t GetDirectMapReservationStart(uintptr_t address) {
177 #if BUILDFLAG(PA_DCHECK_IS_ON)
178 bool is_in_brp_pool = IsManagedByPartitionAllocBRPPool(address);
179 bool is_in_regular_pool = IsManagedByPartitionAllocRegularPool(address);
180 bool is_in_configurable_pool =
181 IsManagedByPartitionAllocConfigurablePool(address);
182 #if BUILDFLAG(ENABLE_THREAD_ISOLATION)
183 bool is_in_thread_isolated_pool =
184 IsManagedByPartitionAllocThreadIsolatedPool(address);
185 #endif
186
187 // When ENABLE_BACKUP_REF_PTR_SUPPORT is off, BRP pool isn't used.
188 #if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
189 PA_DCHECK(!is_in_brp_pool);
190 #endif
191 #endif // BUILDFLAG(PA_DCHECK_IS_ON)
192 uint16_t* offset_ptr = ReservationOffsetPointer(address);
193 PA_DCHECK(*offset_ptr != kOffsetTagNotAllocated);
194 if (*offset_ptr == kOffsetTagNormalBuckets) {
195 return 0;
196 }
197 uintptr_t reservation_start = ComputeReservationStart(address, offset_ptr);
198 #if BUILDFLAG(PA_DCHECK_IS_ON)
199 // MSVC workaround: the preprocessor seems to choke on an `#if` embedded
200 // inside another macro (PA_DCHECK).
201 #if !BUILDFLAG(HAS_64_BIT_POINTERS)
202 constexpr size_t kBRPOffset =
203 AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap *
204 AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap;
205 #else
206 constexpr size_t kBRPOffset = 0ull;
207 #endif // !BUILDFLAG(HAS_64_BIT_POINTERS)
208 // Make sure the reservation start is in the same pool as |address|.
209 // In the 32-bit mode, the beginning of a reservation may be excluded
210 // from the BRP pool, so shift the pointer. The other pools don't have
211 // this logic.
212 PA_DCHECK(is_in_brp_pool ==
213 IsManagedByPartitionAllocBRPPool(reservation_start + kBRPOffset));
214 PA_DCHECK(is_in_regular_pool ==
215 IsManagedByPartitionAllocRegularPool(reservation_start));
216 PA_DCHECK(is_in_configurable_pool ==
217 IsManagedByPartitionAllocConfigurablePool(reservation_start));
218 #if BUILDFLAG(ENABLE_THREAD_ISOLATION)
219 PA_DCHECK(is_in_thread_isolated_pool ==
220 IsManagedByPartitionAllocThreadIsolatedPool(reservation_start));
221 #endif
222 PA_DCHECK(*ReservationOffsetPointer(reservation_start) == 0);
223 #endif // BUILDFLAG(PA_DCHECK_IS_ON)
224
225 return reservation_start;
226 }
227
228 #if BUILDFLAG(HAS_64_BIT_POINTERS)
229 // If the given address doesn't point to direct-map allocated memory,
230 // returns 0.
231 // This variant has better performance than the regular one on 64-bit builds if
232 // the Pool that an allocation belongs to is known.
233 PA_ALWAYS_INLINE uintptr_t
GetDirectMapReservationStart(uintptr_t address,pool_handle pool,uintptr_t offset_in_pool)234 GetDirectMapReservationStart(uintptr_t address,
235 pool_handle pool,
236 uintptr_t offset_in_pool) {
237 PA_DCHECK(AddressPoolManager::GetInstance().GetPoolBaseAddress(pool) +
238 offset_in_pool ==
239 address);
240 uint16_t* offset_ptr = ReservationOffsetPointer(pool, offset_in_pool);
241 PA_DCHECK(*offset_ptr != kOffsetTagNotAllocated);
242 if (*offset_ptr == kOffsetTagNormalBuckets) {
243 return 0;
244 }
245 uintptr_t reservation_start = ComputeReservationStart(address, offset_ptr);
246 PA_DCHECK(*ReservationOffsetPointer(reservation_start) == 0);
247 return reservation_start;
248 }
249 #endif // BUILDFLAG(HAS_64_BIT_POINTERS)
250
251 // Returns true if |address| is the beginning of the first super page of a
252 // reservation, i.e. either a normal bucket super page, or the first super page
253 // of direct map.
254 // |address| must belong to an allocated super page.
IsReservationStart(uintptr_t address)255 PA_ALWAYS_INLINE bool IsReservationStart(uintptr_t address) {
256 uint16_t* offset_ptr = ReservationOffsetPointer(address);
257 PA_DCHECK(*offset_ptr != kOffsetTagNotAllocated);
258 return ((*offset_ptr == kOffsetTagNormalBuckets) || (*offset_ptr == 0)) &&
259 (address % kSuperPageSize == 0);
260 }
261
262 // Returns true if |address| belongs to a normal bucket super page.
IsManagedByNormalBuckets(uintptr_t address)263 PA_ALWAYS_INLINE bool IsManagedByNormalBuckets(uintptr_t address) {
264 uint16_t* offset_ptr = ReservationOffsetPointer(address);
265 return *offset_ptr == kOffsetTagNormalBuckets;
266 }
267
268 // Returns true if |address| belongs to a direct map region.
IsManagedByDirectMap(uintptr_t address)269 PA_ALWAYS_INLINE bool IsManagedByDirectMap(uintptr_t address) {
270 uint16_t* offset_ptr = ReservationOffsetPointer(address);
271 return *offset_ptr != kOffsetTagNormalBuckets &&
272 *offset_ptr != kOffsetTagNotAllocated;
273 }
274
275 // Returns true if |address| belongs to a normal bucket super page or a direct
276 // map region, i.e. belongs to an allocated super page.
IsManagedByNormalBucketsOrDirectMap(uintptr_t address)277 PA_ALWAYS_INLINE bool IsManagedByNormalBucketsOrDirectMap(uintptr_t address) {
278 uint16_t* offset_ptr = ReservationOffsetPointer(address);
279 return *offset_ptr != kOffsetTagNotAllocated;
280 }
281
282 } // namespace partition_alloc::internal
283
284 #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_RESERVATION_OFFSET_TABLE_H_
285