• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2021 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_RESERVATION_OFFSET_TABLE_H_
6 #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_RESERVATION_OFFSET_TABLE_H_
7 
8 #include <cstddef>
9 #include <cstdint>
10 #include <limits>
11 #include <tuple>
12 
13 #include "base/allocator/partition_allocator/address_pool_manager.h"
14 #include "base/allocator/partition_allocator/partition_address_space.h"
15 #include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
16 #include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
17 #include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
18 #include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
19 #include "base/allocator/partition_allocator/partition_alloc_check.h"
20 #include "base/allocator/partition_allocator/partition_alloc_constants.h"
21 #include "base/allocator/partition_allocator/pkey.h"
22 #include "base/allocator/partition_allocator/tagging.h"
23 #include "build/build_config.h"
24 
25 namespace partition_alloc::internal {
26 
27 static constexpr uint16_t kOffsetTagNotAllocated =
28     std::numeric_limits<uint16_t>::max();
29 static constexpr uint16_t kOffsetTagNormalBuckets =
30     std::numeric_limits<uint16_t>::max() - 1;
31 
32 // The main purpose of the reservation offset table is to easily locate the
33 // direct map reservation start address for any given address. There is one
34 // entry in the table for each super page.
35 //
36 // When PartitionAlloc reserves an address region it is always aligned to
37 // super page boundary. However, in 32-bit mode, the size may not be aligned
38 // super-page-aligned, so it may look like this:
39 //   |<--------- actual reservation size --------->|
40 //   +----------+----------+-----+-----------+-----+ - - - +
41 //   |SuperPage0|SuperPage1| ... |SuperPage K|SuperPage K+1|
42 //   +----------+----------+-----+-----------+-----+ - - -.+
43 //                                           |<-X->|<-Y*)->|
44 //
45 // The table entries for reserved super pages say how many pages away from the
46 // reservation the super page is:
47 //   +----------+----------+-----+-----------+-------------+
48 //   |Entry for |Entry for | ... |Entry for  |Entry for    |
49 //   |SuperPage0|SuperPage1|     |SuperPage K|SuperPage K+1|
50 //   +----------+----------+-----+-----------+-------------+
51 //   |     0    |    1     | ... |     K     |   K + 1     |
52 //   +----------+----------+-----+-----------+-------------+
53 //
54 // For an address Z, the reservation start can be found using this formula:
55 //   ((Z >> kSuperPageShift) - (the entry for Z)) << kSuperPageShift
56 //
57 // kOffsetTagNotAllocated is a special tag denoting that the super page isn't
58 // allocated by PartitionAlloc and kOffsetTagNormalBuckets denotes that it is
59 // used for a normal-bucket allocation, not for a direct-map allocation.
60 //
61 // *) In 32-bit mode, Y is not used by PartitionAlloc, and cannot be used
62 //    until X is unreserved, because PartitionAlloc always uses kSuperPageSize
63 //    alignment when reserving address spaces. One can use check "is in pool?"
64 //    to further determine which part of the super page is used by
65 //    PartitionAlloc. This isn't a problem in 64-bit mode, where allocation
66 //    granularity is kSuperPageSize.
PA_COMPONENT_EXPORT(PARTITION_ALLOC)67 class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ReservationOffsetTable {
68  public:
69 #if BUILDFLAG(HAS_64_BIT_POINTERS)
70   // There is one reservation offset table per Pool in 64-bit mode.
71   static constexpr size_t kReservationOffsetTableCoverage = kPoolMaxSize;
72   static constexpr size_t kReservationOffsetTableLength =
73       kReservationOffsetTableCoverage >> kSuperPageShift;
74 #else
75   // The size of the reservation offset table should cover the entire 32-bit
76   // address space, one element per super page.
77   static constexpr uint64_t kGiB = 1024 * 1024 * 1024ull;
78   static constexpr size_t kReservationOffsetTableLength =
79       4 * kGiB / kSuperPageSize;
80 #endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
81   static_assert(kReservationOffsetTableLength < kOffsetTagNormalBuckets,
82                 "Offsets should be smaller than kOffsetTagNormalBuckets.");
83 
84   struct _ReservationOffsetTable {
85     // The number of table elements is less than MAX_UINT16, so the element type
86     // can be uint16_t.
87     static_assert(
88         kReservationOffsetTableLength <= std::numeric_limits<uint16_t>::max(),
89         "Length of the reservation offset table must be less than MAX_UINT16");
90     uint16_t offsets[kReservationOffsetTableLength] = {};
91 
92     constexpr _ReservationOffsetTable() {
93       for (uint16_t& offset : offsets) {
94         offset = kOffsetTagNotAllocated;
95       }
96     }
97   };
98 #if BUILDFLAG(HAS_64_BIT_POINTERS)
99   // If pkey support is enabled, we need to pkey-tag the tables of the pkey
100   // pool. For this, we need to pad the tables so that the pkey ones start on a
101   // page boundary.
102   struct _PaddedReservationOffsetTables {
103     char pad_[PA_PKEY_ARRAY_PAD_SZ(_ReservationOffsetTable, kNumPools)] = {};
104     struct _ReservationOffsetTable tables[kNumPools];
105     char pad_after_[PA_PKEY_FILL_PAGE_SZ(sizeof(_ReservationOffsetTable))] = {};
106   };
107   static PA_CONSTINIT _PaddedReservationOffsetTables
108       padded_reservation_offset_tables_ PA_PKEY_ALIGN;
109 #else
110   // A single table for the entire 32-bit address space.
111   static PA_CONSTINIT struct _ReservationOffsetTable reservation_offset_table_;
112 #endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
113 };
114 
115 #if BUILDFLAG(HAS_64_BIT_POINTERS)
GetReservationOffsetTable(pool_handle handle)116 PA_ALWAYS_INLINE uint16_t* GetReservationOffsetTable(pool_handle handle) {
117   PA_DCHECK(kNullPoolHandle < handle && handle <= kNumPools);
118   return ReservationOffsetTable::padded_reservation_offset_tables_
119       .tables[handle - 1]
120       .offsets;
121 }
122 
GetReservationOffsetTableEnd(pool_handle handle)123 PA_ALWAYS_INLINE const uint16_t* GetReservationOffsetTableEnd(
124     pool_handle handle) {
125   return GetReservationOffsetTable(handle) +
126          ReservationOffsetTable::kReservationOffsetTableLength;
127 }
128 
GetReservationOffsetTable(uintptr_t address)129 PA_ALWAYS_INLINE uint16_t* GetReservationOffsetTable(uintptr_t address) {
130   pool_handle handle = GetPool(address);
131   return GetReservationOffsetTable(handle);
132 }
133 
GetReservationOffsetTableEnd(uintptr_t address)134 PA_ALWAYS_INLINE const uint16_t* GetReservationOffsetTableEnd(
135     uintptr_t address) {
136   pool_handle handle = GetPool(address);
137   return GetReservationOffsetTableEnd(handle);
138 }
139 
ReservationOffsetPointer(pool_handle pool,uintptr_t offset_in_pool)140 PA_ALWAYS_INLINE uint16_t* ReservationOffsetPointer(pool_handle pool,
141                                                     uintptr_t offset_in_pool) {
142   size_t table_index = offset_in_pool >> kSuperPageShift;
143   PA_DCHECK(table_index <
144             ReservationOffsetTable::kReservationOffsetTableLength);
145   return GetReservationOffsetTable(pool) + table_index;
146 }
147 #else   // BUILDFLAG(HAS_64_BIT_POINTERS)
GetReservationOffsetTable(uintptr_t address)148 PA_ALWAYS_INLINE uint16_t* GetReservationOffsetTable(uintptr_t address) {
149   return ReservationOffsetTable::reservation_offset_table_.offsets;
150 }
151 
GetReservationOffsetTableEnd(uintptr_t address)152 PA_ALWAYS_INLINE const uint16_t* GetReservationOffsetTableEnd(
153     uintptr_t address) {
154   return ReservationOffsetTable::reservation_offset_table_.offsets +
155          ReservationOffsetTable::kReservationOffsetTableLength;
156 }
157 #endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
158 
ReservationOffsetPointer(uintptr_t address)159 PA_ALWAYS_INLINE uint16_t* ReservationOffsetPointer(uintptr_t address) {
160 #if BUILDFLAG(HAS_64_BIT_POINTERS)
161   // In 64-bit mode, find the owning Pool and compute the offset from its base.
162   auto [pool, offset] = GetPoolAndOffset(address);
163   return ReservationOffsetPointer(pool, offset);
164 #else
165   size_t table_index = address >> kSuperPageShift;
166   PA_DCHECK(table_index <
167             ReservationOffsetTable::kReservationOffsetTableLength);
168   return GetReservationOffsetTable(address) + table_index;
169 #endif
170 }
171 
ComputeReservationStart(uintptr_t address,uint16_t * offset_ptr)172 PA_ALWAYS_INLINE uintptr_t ComputeReservationStart(uintptr_t address,
173                                                    uint16_t* offset_ptr) {
174   return (address & kSuperPageBaseMask) -
175          (static_cast<size_t>(*offset_ptr) << kSuperPageShift);
176 }
177 
178 // If the given address doesn't point to direct-map allocated memory,
179 // returns 0.
GetDirectMapReservationStart(uintptr_t address)180 PA_ALWAYS_INLINE uintptr_t GetDirectMapReservationStart(uintptr_t address) {
181 #if BUILDFLAG(PA_DCHECK_IS_ON)
182   bool is_in_brp_pool = IsManagedByPartitionAllocBRPPool(address);
183   bool is_in_regular_pool = IsManagedByPartitionAllocRegularPool(address);
184   bool is_in_configurable_pool =
185       IsManagedByPartitionAllocConfigurablePool(address);
186 #if BUILDFLAG(ENABLE_PKEYS)
187   bool is_in_pkey_pool = IsManagedByPartitionAllocPkeyPool(address);
188 #endif
189 
190   // When ENABLE_BACKUP_REF_PTR_SUPPORT is off, BRP pool isn't used.
191 #if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
192   PA_DCHECK(!is_in_brp_pool);
193 #endif
194 #endif  // BUILDFLAG(PA_DCHECK_IS_ON)
195   uint16_t* offset_ptr = ReservationOffsetPointer(address);
196   PA_DCHECK(*offset_ptr != kOffsetTagNotAllocated);
197   if (*offset_ptr == kOffsetTagNormalBuckets) {
198     return 0;
199   }
200   uintptr_t reservation_start = ComputeReservationStart(address, offset_ptr);
201 #if BUILDFLAG(PA_DCHECK_IS_ON)
202   // MSVC workaround: the preprocessor seems to choke on an `#if` embedded
203   // inside another macro (PA_DCHECK).
204 #if !BUILDFLAG(HAS_64_BIT_POINTERS)
205   constexpr size_t kBRPOffset =
206       AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap *
207       AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap;
208 #else
209   constexpr size_t kBRPOffset = 0ull;
210 #endif  // !BUILDFLAG(HAS_64_BIT_POINTERS)
211   // Make sure the reservation start is in the same pool as |address|.
212   // In the 32-bit mode, the beginning of a reservation may be excluded
213   // from the BRP pool, so shift the pointer. The other pools don't have
214   // this logic.
215   PA_DCHECK(is_in_brp_pool ==
216             IsManagedByPartitionAllocBRPPool(reservation_start + kBRPOffset));
217   PA_DCHECK(is_in_regular_pool ==
218             IsManagedByPartitionAllocRegularPool(reservation_start));
219   PA_DCHECK(is_in_configurable_pool ==
220             IsManagedByPartitionAllocConfigurablePool(reservation_start));
221 #if BUILDFLAG(ENABLE_PKEYS)
222   PA_DCHECK(is_in_pkey_pool ==
223             IsManagedByPartitionAllocPkeyPool(reservation_start));
224 #endif
225   PA_DCHECK(*ReservationOffsetPointer(reservation_start) == 0);
226 #endif  // BUILDFLAG(PA_DCHECK_IS_ON)
227 
228   return reservation_start;
229 }
230 
231 #if BUILDFLAG(HAS_64_BIT_POINTERS)
232 // If the given address doesn't point to direct-map allocated memory,
233 // returns 0.
234 // This variant has better performance than the regular one on 64-bit builds if
235 // the Pool that an allocation belongs to is known.
236 PA_ALWAYS_INLINE uintptr_t
GetDirectMapReservationStart(uintptr_t address,pool_handle pool,uintptr_t offset_in_pool)237 GetDirectMapReservationStart(uintptr_t address,
238                              pool_handle pool,
239                              uintptr_t offset_in_pool) {
240   PA_DCHECK(AddressPoolManager::GetInstance().GetPoolBaseAddress(pool) +
241                 offset_in_pool ==
242             address);
243   uint16_t* offset_ptr = ReservationOffsetPointer(pool, offset_in_pool);
244   PA_DCHECK(*offset_ptr != kOffsetTagNotAllocated);
245   if (*offset_ptr == kOffsetTagNormalBuckets) {
246     return 0;
247   }
248   uintptr_t reservation_start = ComputeReservationStart(address, offset_ptr);
249   PA_DCHECK(*ReservationOffsetPointer(reservation_start) == 0);
250   return reservation_start;
251 }
252 #endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
253 
254 // Returns true if |address| is the beginning of the first super page of a
255 // reservation, i.e. either a normal bucket super page, or the first super page
256 // of direct map.
257 // |address| must belong to an allocated super page.
IsReservationStart(uintptr_t address)258 PA_ALWAYS_INLINE bool IsReservationStart(uintptr_t address) {
259   uint16_t* offset_ptr = ReservationOffsetPointer(address);
260   PA_DCHECK(*offset_ptr != kOffsetTagNotAllocated);
261   return ((*offset_ptr == kOffsetTagNormalBuckets) || (*offset_ptr == 0)) &&
262          (address % kSuperPageSize == 0);
263 }
264 
265 // Returns true if |address| belongs to a normal bucket super page.
IsManagedByNormalBuckets(uintptr_t address)266 PA_ALWAYS_INLINE bool IsManagedByNormalBuckets(uintptr_t address) {
267   uint16_t* offset_ptr = ReservationOffsetPointer(address);
268   return *offset_ptr == kOffsetTagNormalBuckets;
269 }
270 
271 // Returns true if |address| belongs to a direct map region.
IsManagedByDirectMap(uintptr_t address)272 PA_ALWAYS_INLINE bool IsManagedByDirectMap(uintptr_t address) {
273   uint16_t* offset_ptr = ReservationOffsetPointer(address);
274   return *offset_ptr != kOffsetTagNormalBuckets &&
275          *offset_ptr != kOffsetTagNotAllocated;
276 }
277 
278 // Returns true if |address| belongs to a normal bucket super page or a direct
279 // map region, i.e. belongs to an allocated super page.
IsManagedByNormalBucketsOrDirectMap(uintptr_t address)280 PA_ALWAYS_INLINE bool IsManagedByNormalBucketsOrDirectMap(uintptr_t address) {
281   uint16_t* offset_ptr = ReservationOffsetPointer(address);
282   return *offset_ptr != kOffsetTagNotAllocated;
283 }
284 
285 }  // namespace partition_alloc::internal
286 
287 #endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_RESERVATION_OFFSET_TABLE_H_
288