1 // Copyright 2016 The Chromium Authors 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_ALLOCATOR_SHIM_H_ 6 #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_ALLOCATOR_SHIM_H_ 7 8 #include <stddef.h> 9 #include <stdint.h> 10 11 #include "base/allocator/partition_alloc_features.h" 12 #include "base/allocator/partition_allocator/partition_alloc_buildflags.h" 13 #include "base/base_export.h" 14 #include "base/types/strong_alias.h" 15 #include "build/build_config.h" 16 17 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(USE_STARSCAN) 18 #include "base/allocator/partition_allocator/starscan/pcscan.h" 19 #endif 20 21 namespace allocator_shim { 22 23 // Allocator Shim API. Allows to: 24 // - Configure the behavior of the allocator (what to do on OOM failures). 25 // - Install new hooks (AllocatorDispatch) in the allocator chain. 26 27 // When this shim layer is enabled, the route of an allocation is as-follows: 28 // 29 // [allocator_shim_override_*.h] Intercept malloc() / operator new calls: 30 // The override_* headers define the symbols required to intercept calls to 31 // malloc() and operator new (if not overridden by specific C++ classes). 32 // 33 // [allocator_shim.cc] Routing allocation calls to the shim: 34 // The headers above route the calls to the internal ShimMalloc(), ShimFree(), 35 // ShimCppNew() etc. methods defined in allocator_shim.cc. 36 // These methods will: (1) forward the allocation call to the front of the 37 // AllocatorDispatch chain. (2) perform security hardenings (e.g., might 38 // call std::new_handler on OOM failure). 39 // 40 // [allocator_shim_default_dispatch_to_*.cc] The AllocatorDispatch chain: 41 // It is a singly linked list where each element is a struct with function 42 // pointers (|malloc_function|, |free_function|, etc). Normally the chain 43 // consists of a single AllocatorDispatch element, herein called 44 // the "default dispatch", which is statically defined at build time and 45 // ultimately routes the calls to the actual allocator defined by the build 46 // config (glibc, ...). 47 // 48 // It is possible to dynamically insert further AllocatorDispatch stages 49 // to the front of the chain, for debugging / profiling purposes. 50 // 51 // All the functions must be thread safe. The shim does not enforce any 52 // serialization. This is to route to thread-aware allocators without 53 // introducing unnecessary perf hits. 54 55 struct AllocatorDispatch { 56 using AllocFn = void*(const AllocatorDispatch* self, 57 size_t size, 58 void* context); 59 using AllocUncheckedFn = void*(const AllocatorDispatch* self, 60 size_t size, 61 void* context); 62 using AllocZeroInitializedFn = void*(const AllocatorDispatch* self, 63 size_t n, 64 size_t size, 65 void* context); 66 using AllocAlignedFn = void*(const AllocatorDispatch* self, 67 size_t alignment, 68 size_t size, 69 void* context); 70 using ReallocFn = void*(const AllocatorDispatch* self, 71 void* address, 72 size_t size, 73 void* context); 74 using FreeFn = void(const AllocatorDispatch* self, 75 void* address, 76 void* context); 77 // Returns the allocated size of user data (not including heap overhead). 78 // Can be larger than the requested size. 79 using GetSizeEstimateFn = size_t(const AllocatorDispatch* self, 80 void* address, 81 void* context); 82 using ClaimedAddressFn = bool(const AllocatorDispatch* self, 83 void* address, 84 void* context); 85 using BatchMallocFn = unsigned(const AllocatorDispatch* self, 86 size_t size, 87 void** results, 88 unsigned num_requested, 89 void* context); 90 using BatchFreeFn = void(const AllocatorDispatch* self, 91 void** to_be_freed, 92 unsigned num_to_be_freed, 93 void* context); 94 using FreeDefiniteSizeFn = void(const AllocatorDispatch* self, 95 void* ptr, 96 size_t size, 97 void* context); 98 using TryFreeDefaultFn = void(const AllocatorDispatch* self, 99 void* ptr, 100 void* context); 101 using AlignedMallocFn = void*(const AllocatorDispatch* self, 102 size_t size, 103 size_t alignment, 104 void* context); 105 using AlignedReallocFn = void*(const AllocatorDispatch* self, 106 void* address, 107 size_t size, 108 size_t alignment, 109 void* context); 110 using AlignedFreeFn = void(const AllocatorDispatch* self, 111 void* address, 112 void* context); 113 114 AllocFn* const alloc_function; 115 AllocUncheckedFn* const alloc_unchecked_function; 116 AllocZeroInitializedFn* const alloc_zero_initialized_function; 117 AllocAlignedFn* const alloc_aligned_function; 118 ReallocFn* const realloc_function; 119 FreeFn* const free_function; 120 GetSizeEstimateFn* const get_size_estimate_function; 121 // claimed_address, batch_malloc, batch_free, free_definite_size and 122 // try_free_default are specific to the OSX and iOS allocators. 123 ClaimedAddressFn* const claimed_address_function; 124 BatchMallocFn* const batch_malloc_function; 125 BatchFreeFn* const batch_free_function; 126 FreeDefiniteSizeFn* const free_definite_size_function; 127 TryFreeDefaultFn* const try_free_default_function; 128 // _aligned_malloc, _aligned_realloc, and _aligned_free are specific to the 129 // Windows allocator. 130 AlignedMallocFn* const aligned_malloc_function; 131 AlignedReallocFn* const aligned_realloc_function; 132 AlignedFreeFn* const aligned_free_function; 133 134 const AllocatorDispatch* next; 135 136 // |default_dispatch| is statically defined by one (and only one) of the 137 // allocator_shim_default_dispatch_to_*.cc files, depending on the build 138 // configuration. 139 static const AllocatorDispatch default_dispatch; 140 }; 141 142 // When true makes malloc behave like new, w.r.t calling the new_handler if 143 // the allocation fails (see set_new_mode() in Windows). 144 BASE_EXPORT void SetCallNewHandlerOnMallocFailure(bool value); 145 146 // Allocates |size| bytes or returns nullptr. It does NOT call the new_handler, 147 // regardless of SetCallNewHandlerOnMallocFailure(). 148 BASE_EXPORT void* UncheckedAlloc(size_t size); 149 150 // Frees memory allocated with UncheckedAlloc(). 151 BASE_EXPORT void UncheckedFree(void* ptr); 152 153 // Inserts |dispatch| in front of the allocator chain. This method is 154 // thread-safe w.r.t concurrent invocations of InsertAllocatorDispatch(). 155 // The callers have responsibility for inserting a single dispatch no more 156 // than once. 157 BASE_EXPORT void InsertAllocatorDispatch(AllocatorDispatch* dispatch); 158 159 // Test-only. Rationale: (1) lack of use cases; (2) dealing safely with a 160 // removal of arbitrary elements from a singly linked list would require a lock 161 // in malloc(), which we really don't want. 162 BASE_EXPORT void RemoveAllocatorDispatchForTesting(AllocatorDispatch* dispatch); 163 164 #if BUILDFLAG(IS_APPLE) 165 // The fallback function to be called when try_free_default_function receives a 166 // pointer which doesn't belong to the allocator. 167 BASE_EXPORT void TryFreeDefaultFallbackToFindZoneAndFree(void* ptr); 168 #endif // BUILDFLAG(IS_APPLE) 169 170 #if BUILDFLAG(IS_APPLE) 171 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) 172 BASE_EXPORT void InitializeDefaultAllocatorPartitionRoot(); 173 bool IsDefaultAllocatorPartitionRootInitialized(); 174 #endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) 175 // On macOS, the allocator shim needs to be turned on during runtime. 176 BASE_EXPORT void InitializeAllocatorShim(); 177 #endif // BUILDFLAG(IS_APPLE) 178 179 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) 180 BASE_EXPORT void EnablePartitionAllocMemoryReclaimer(); 181 182 using EnableBrp = base::StrongAlias<class EnableBrpTag, bool>; 183 using EnableBrpZapping = base::StrongAlias<class EnableBrpZappingTag, bool>; 184 using EnableBrpPartitionMemoryReclaimer = 185 base::StrongAlias<class EnableBrpPartitionMemoryReclaimerTag, bool>; 186 using SplitMainPartition = base::StrongAlias<class SplitMainPartitionTag, bool>; 187 using UseDedicatedAlignedPartition = 188 base::StrongAlias<class UseDedicatedAlignedPartitionTag, bool>; 189 using AddDummyRefCount = base::StrongAlias<class AddDummyRefCountTag, bool>; 190 using AlternateBucketDistribution = 191 base::features::AlternateBucketDistributionMode; 192 193 // If |thread_cache_on_non_quarantinable_partition| is specified, the 194 // thread-cache will be enabled on the non-quarantinable partition. The 195 // thread-cache on the main (malloc) partition will be disabled. 196 BASE_EXPORT void ConfigurePartitions( 197 EnableBrp enable_brp, 198 EnableBrpZapping enable_brp_zapping, 199 EnableBrpPartitionMemoryReclaimer enable_brp_memory_reclaimer, 200 SplitMainPartition split_main_partition, 201 UseDedicatedAlignedPartition use_dedicated_aligned_partition, 202 AddDummyRefCount add_dummy_ref_count, 203 AlternateBucketDistribution use_alternate_bucket_distribution); 204 205 #if BUILDFLAG(USE_STARSCAN) 206 BASE_EXPORT void EnablePCScan(partition_alloc::internal::PCScan::InitConfig); 207 #endif 208 #endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) 209 210 } // namespace allocator_shim 211 212 #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_ALLOCATOR_SHIM_H_ 213