• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2018 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_FREELIST_ENTRY_H_
6 #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_FREELIST_ENTRY_H_
7 
8 #include <cstddef>
9 #include <cstdint>
10 
11 #include "base/allocator/partition_allocator/freeslot_bitmap.h"
12 #include "base/allocator/partition_allocator/partition_alloc-inl.h"
13 #include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
14 #include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
15 #include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
16 #include "base/allocator/partition_allocator/partition_alloc_base/immediate_crash.h"
17 #include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
18 #include "base/allocator/partition_allocator/partition_alloc_check.h"
19 #include "base/allocator/partition_allocator/partition_alloc_config.h"
20 #include "base/allocator/partition_allocator/partition_alloc_constants.h"
21 #include "base/allocator/partition_allocator/partition_ref_count.h"
22 #include "build/build_config.h"
23 
24 #if !defined(ARCH_CPU_BIG_ENDIAN)
25 #include "base/allocator/partition_allocator/reverse_bytes.h"
26 #endif  // !defined(ARCH_CPU_BIG_ENDIAN)
27 
28 namespace partition_alloc::internal {
29 
30 namespace {
31 
FreelistCorruptionDetected(size_t extra)32 [[noreturn]] PA_NOINLINE void FreelistCorruptionDetected(size_t extra) {
33   // Make it visible in minidumps.
34   PA_DEBUG_DATA_ON_STACK("extra", extra);
35   PA_IMMEDIATE_CRASH();
36 }
37 
38 }  // namespace
39 
40 class PartitionFreelistEntry;
41 
42 class EncodedPartitionFreelistEntryPtr {
43  private:
EncodedPartitionFreelistEntryPtr(std::nullptr_t)44   PA_ALWAYS_INLINE constexpr explicit EncodedPartitionFreelistEntryPtr(
45       std::nullptr_t)
46       : encoded_(Transform(0)) {}
EncodedPartitionFreelistEntryPtr(void * ptr)47   PA_ALWAYS_INLINE explicit EncodedPartitionFreelistEntryPtr(void* ptr)
48       // The encoded pointer stays MTE-tagged.
49       : encoded_(Transform(reinterpret_cast<uintptr_t>(ptr))) {}
50 
Decode()51   PA_ALWAYS_INLINE PartitionFreelistEntry* Decode() const {
52     return reinterpret_cast<PartitionFreelistEntry*>(Transform(encoded_));
53   }
54 
Inverted()55   PA_ALWAYS_INLINE constexpr uintptr_t Inverted() const { return ~encoded_; }
56 
Override(uintptr_t encoded)57   PA_ALWAYS_INLINE constexpr void Override(uintptr_t encoded) {
58     encoded_ = encoded;
59   }
60 
61   PA_ALWAYS_INLINE constexpr explicit operator bool() const { return encoded_; }
62 
63   // Transform() works the same in both directions, so can be used for
64   // encoding and decoding.
Transform(uintptr_t address)65   PA_ALWAYS_INLINE static constexpr uintptr_t Transform(uintptr_t address) {
66     // We use bswap on little endian as a fast transformation for two reasons:
67     // 1) On 64 bit architectures, the pointer is very unlikely to be a
68     //    canonical address. Therefore, if an object is freed and its vtable is
69     //    used where the attacker doesn't get the chance to run allocations
70     //    between the free and use, the vtable dereference is likely to fault.
71     // 2) If the attacker has a linear buffer overflow and elects to try and
72     //    corrupt a freelist pointer, partial pointer overwrite attacks are
73     //    thwarted.
74     // For big endian, similar guarantees are arrived at with a negation.
75 #if defined(ARCH_CPU_BIG_ENDIAN)
76     uintptr_t transformed = ~address;
77 #else
78     uintptr_t transformed = ReverseBytes(address);
79 #endif
80     return transformed;
81   }
82 
83   uintptr_t encoded_;
84 
85   friend PartitionFreelistEntry;
86 };
87 
88 // Freelist entries are encoded for security reasons. See
89 // //base/allocator/partition_allocator/PartitionAlloc.md and |Transform()| for
90 // the rationale and mechanism, respectively.
91 class PartitionFreelistEntry {
92  private:
PartitionFreelistEntry(std::nullptr_t)93   constexpr explicit PartitionFreelistEntry(std::nullptr_t)
94       : encoded_next_(EncodedPartitionFreelistEntryPtr(nullptr))
95 #if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
96         ,
97         shadow_(encoded_next_.Inverted())
98 #endif
99   {
100   }
PartitionFreelistEntry(PartitionFreelistEntry * next)101   explicit PartitionFreelistEntry(PartitionFreelistEntry* next)
102       : encoded_next_(EncodedPartitionFreelistEntryPtr(next))
103 #if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
104         ,
105         shadow_(encoded_next_.Inverted())
106 #endif
107   {
108   }
109   // For testing only.
PartitionFreelistEntry(void * next,bool make_shadow_match)110   PartitionFreelistEntry(void* next, bool make_shadow_match)
111       : encoded_next_(EncodedPartitionFreelistEntryPtr(next))
112 #if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
113         ,
114         shadow_(make_shadow_match ? encoded_next_.Inverted() : 12345)
115 #endif
116   {
117   }
118 
119  public:
120   ~PartitionFreelistEntry() = delete;
121 
122   // Emplaces the freelist entry at the beginning of the given slot span, and
123   // initializes it as null-terminated.
EmplaceAndInitNull(void * slot_start_tagged)124   PA_ALWAYS_INLINE static PartitionFreelistEntry* EmplaceAndInitNull(
125       void* slot_start_tagged) {
126     // |slot_start_tagged| is MTE-tagged.
127     auto* entry = new (slot_start_tagged) PartitionFreelistEntry(nullptr);
128     return entry;
129   }
EmplaceAndInitNull(uintptr_t slot_start)130   PA_ALWAYS_INLINE static PartitionFreelistEntry* EmplaceAndInitNull(
131       uintptr_t slot_start) {
132     return EmplaceAndInitNull(SlotStartAddr2Ptr(slot_start));
133   }
134 
135   // Emplaces the freelist entry at the beginning of the given slot span, and
136   // initializes it with the given |next| pointer, but encoded.
137   //
138   // This freelist is built for the purpose of thread-cache. This means that we
139   // can't perform a check that this and the next pointer belong to the same
140   // super page, as thread-cache spans may chain slots across super pages.
EmplaceAndInitForThreadCache(uintptr_t slot_start,PartitionFreelistEntry * next)141   PA_ALWAYS_INLINE static PartitionFreelistEntry* EmplaceAndInitForThreadCache(
142       uintptr_t slot_start,
143       PartitionFreelistEntry* next) {
144     auto* entry =
145         new (SlotStartAddr2Ptr(slot_start)) PartitionFreelistEntry(next);
146     return entry;
147   }
148 
149   // Emplaces the freelist entry at the beginning of the given slot span, and
150   // initializes it with the given |next| pointer.
151   //
152   // This is for testing purposes only! |make_shadow_match| allows you to choose
153   // if the shadow matches the next pointer properly or is trash.
EmplaceAndInitForTest(uintptr_t slot_start,void * next,bool make_shadow_match)154   PA_ALWAYS_INLINE static void EmplaceAndInitForTest(uintptr_t slot_start,
155                                                      void* next,
156                                                      bool make_shadow_match) {
157     new (SlotStartAddr2Ptr(slot_start))
158         PartitionFreelistEntry(next, make_shadow_match);
159   }
160 
CorruptNextForTesting(uintptr_t v)161   void CorruptNextForTesting(uintptr_t v) {
162     // We just need a value that can never be a valid pointer here.
163     encoded_next_.Override(EncodedPartitionFreelistEntryPtr::Transform(v));
164   }
165 
166   // Puts |extra| on the stack before crashing in case of memory
167   // corruption. Meant to be used to report the failed allocation size.
168   template <bool crash_on_corruption>
169   PA_ALWAYS_INLINE PartitionFreelistEntry* GetNextForThreadCache(
170       size_t extra) const;
171   PA_ALWAYS_INLINE PartitionFreelistEntry* GetNext(size_t extra) const;
172 
CheckFreeList(size_t extra)173   PA_NOINLINE void CheckFreeList(size_t extra) const {
174     for (auto* entry = this; entry; entry = entry->GetNext(extra)) {
175       // |GetNext()| checks freelist integrity.
176     }
177   }
178 
CheckFreeListForThreadCache(size_t extra)179   PA_NOINLINE void CheckFreeListForThreadCache(size_t extra) const {
180     for (auto* entry = this; entry;
181          entry = entry->GetNextForThreadCache<true>(extra)) {
182       // |GetNextForThreadCache()| checks freelist integrity.
183     }
184   }
185 
SetNext(PartitionFreelistEntry * entry)186   PA_ALWAYS_INLINE void SetNext(PartitionFreelistEntry* entry) {
187     // SetNext() is either called on the freelist head, when provisioning new
188     // slots, or when GetNext() has been called before, no need to pass the
189     // size.
190 #if BUILDFLAG(PA_DCHECK_IS_ON)
191     // Regular freelists always point to an entry within the same super page.
192     //
193     // This is most likely a PartitionAlloc bug if this triggers.
194     if (PA_UNLIKELY(entry &&
195                     (SlotStartPtr2Addr(this) & kSuperPageBaseMask) !=
196                         (SlotStartPtr2Addr(entry) & kSuperPageBaseMask))) {
197       FreelistCorruptionDetected(0);
198     }
199 #endif  // BUILDFLAG(PA_DCHECK_IS_ON)
200 
201     encoded_next_ = EncodedPartitionFreelistEntryPtr(entry);
202 #if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
203     shadow_ = encoded_next_.Inverted();
204 #endif
205   }
206 
207   // Zeroes out |this| before returning the slot. The pointer to this memory
208   // will be returned to the user (caller of Alloc()), thus can't have internal
209   // data.
ClearForAllocation()210   PA_ALWAYS_INLINE uintptr_t ClearForAllocation() {
211     encoded_next_.Override(0);
212 #if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
213     shadow_ = 0;
214 #endif
215     return SlotStartPtr2Addr(this);
216   }
217 
IsEncodedNextPtrZero()218   PA_ALWAYS_INLINE constexpr bool IsEncodedNextPtrZero() const {
219     return !encoded_next_;
220   }
221 
222  private:
223   template <bool crash_on_corruption>
224   PA_ALWAYS_INLINE PartitionFreelistEntry* GetNextInternal(
225       size_t extra,
226       bool for_thread_cache) const;
227 
IsSane(const PartitionFreelistEntry * here,const PartitionFreelistEntry * next,bool for_thread_cache)228   PA_ALWAYS_INLINE static bool IsSane(const PartitionFreelistEntry* here,
229                                       const PartitionFreelistEntry* next,
230                                       bool for_thread_cache) {
231     // Don't allow the freelist to be blindly followed to any location.
232     // Checks two constraints:
233     // - here and next must belong to the same superpage, unless this is in the
234     //   thread cache (they even always belong to the same slot span).
235     // - next cannot point inside the metadata area.
236     //
237     // Also, the lightweight UaF detection (pointer shadow) is checked.
238 
239     uintptr_t here_address = SlotStartPtr2Addr(here);
240     uintptr_t next_address = SlotStartPtr2Addr(next);
241 
242 #if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
243     bool shadow_ptr_ok = here->encoded_next_.Inverted() == here->shadow_;
244 #else
245     bool shadow_ptr_ok = true;
246 #endif
247 
248     bool same_superpage = (here_address & kSuperPageBaseMask) ==
249                           (next_address & kSuperPageBaseMask);
250 #if BUILDFLAG(USE_FREESLOT_BITMAP)
251     bool marked_as_free_in_bitmap =
252         for_thread_cache ? true : !FreeSlotBitmapSlotIsUsed(next_address);
253 #else
254     bool marked_as_free_in_bitmap = true;
255 #endif
256 
257     // This is necessary but not sufficient when quarantine is enabled, see
258     // SuperPagePayloadBegin() in partition_page.h. However we don't want to
259     // fetch anything from the root in this function.
260     bool not_in_metadata =
261         (next_address & kSuperPageOffsetMask) >= PartitionPageSize();
262 
263     if (for_thread_cache) {
264       return shadow_ptr_ok & not_in_metadata;
265     } else {
266       return shadow_ptr_ok & same_superpage & marked_as_free_in_bitmap &
267              not_in_metadata;
268     }
269   }
270 
271   EncodedPartitionFreelistEntryPtr encoded_next_;
272   // This is intended to detect unintentional corruptions of the freelist.
273   // These can happen due to a Use-after-Free, or overflow of the previous
274   // allocation in the slot span.
275 #if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
276   uintptr_t shadow_;
277 #endif
278 };
279 
280 static_assert(kSmallestBucket >= sizeof(PartitionFreelistEntry),
281               "Need enough space for freelist entries in the smallest slot");
282 #if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
283 // The smallest bucket actually used. Note that the smallest request is 1 (if
284 // it's 0, it gets patched to 1), and ref-count gets added to it.
285 namespace {
286 constexpr size_t kSmallestUsedBucket =
287     base::bits::AlignUp(1 + sizeof(PartitionRefCount), kSmallestBucket);
288 }
289 static_assert(kSmallestUsedBucket >=
290                   sizeof(PartitionFreelistEntry) + sizeof(PartitionRefCount),
291               "Need enough space for freelist entries and the ref-count in the "
292               "smallest *used* slot");
293 #endif  // BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
294 
295 template <bool crash_on_corruption>
296 PA_ALWAYS_INLINE PartitionFreelistEntry*
GetNextInternal(size_t extra,bool for_thread_cache)297 PartitionFreelistEntry::GetNextInternal(size_t extra,
298                                         bool for_thread_cache) const {
299   // GetNext() can be called on discarded memory, in which case |encoded_next_|
300   // is 0, and none of the checks apply. Don't prefetch nullptr either.
301   if (IsEncodedNextPtrZero()) {
302     return nullptr;
303   }
304 
305   auto* ret = encoded_next_.Decode();
306   // We rely on constant propagation to remove the branches coming from
307   // |for_thread_cache|, since the argument is always a compile-time constant.
308   if (PA_UNLIKELY(!IsSane(this, ret, for_thread_cache))) {
309     if constexpr (crash_on_corruption) {
310       // Put the corrupted data on the stack, it may give us more information
311       // about what kind of corruption that was.
312       PA_DEBUG_DATA_ON_STACK("first",
313                              static_cast<size_t>(encoded_next_.encoded_));
314 #if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
315       PA_DEBUG_DATA_ON_STACK("second", static_cast<size_t>(shadow_));
316 #endif
317       FreelistCorruptionDetected(extra);
318     } else {
319       return nullptr;
320     }
321   }
322 
323   // In real-world profiles, the load of |encoded_next_| above is responsible
324   // for a large fraction of the allocation cost. However, we cannot anticipate
325   // it enough since it is accessed right after we know its address.
326   //
327   // In the case of repeated allocations, we can prefetch the access that will
328   // be done at the *next* allocation, which will touch *ret, prefetch it.
329   PA_PREFETCH(ret);
330 
331   return ret;
332 }
333 
334 template <bool crash_on_corruption>
335 PA_ALWAYS_INLINE PartitionFreelistEntry*
GetNextForThreadCache(size_t extra)336 PartitionFreelistEntry::GetNextForThreadCache(size_t extra) const {
337   return GetNextInternal<crash_on_corruption>(extra, true);
338 }
339 
GetNext(size_t extra)340 PA_ALWAYS_INLINE PartitionFreelistEntry* PartitionFreelistEntry::GetNext(
341     size_t extra) const {
342   return GetNextInternal<true>(extra, false);
343 }
344 
345 }  // namespace partition_alloc::internal
346 
347 #endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_FREELIST_ENTRY_H_
348