1 //===-- tsd_shared.h --------------------------------------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #ifndef SCUDO_TSD_SHARED_H_ 10 #define SCUDO_TSD_SHARED_H_ 11 12 #include "linux.h" // for getAndroidTlsPtr() 13 #include "tsd.h" 14 15 namespace scudo { 16 17 template <class Allocator, u32 TSDsArraySize, u32 DefaultTSDCount> 18 struct TSDRegistrySharedT { initLinkerInitializedTSDRegistrySharedT19 void initLinkerInitialized(Allocator *Instance) { 20 Instance->initLinkerInitialized(); 21 CHECK_EQ(pthread_key_create(&PThreadKey, nullptr), 0); // For non-TLS 22 for (u32 I = 0; I < TSDsArraySize; I++) 23 TSDs[I].initLinkerInitialized(Instance); 24 const u32 NumberOfCPUs = getNumberOfCPUs(); 25 setNumberOfTSDs((NumberOfCPUs == 0) ? DefaultTSDCount 26 : Min(NumberOfCPUs, DefaultTSDCount)); 27 Initialized = true; 28 } initTSDRegistrySharedT29 void init(Allocator *Instance) { 30 memset(this, 0, sizeof(*this)); 31 initLinkerInitialized(Instance); 32 } 33 unmapTestOnlyTSDRegistrySharedT34 void unmapTestOnly() { 35 setCurrentTSD(nullptr); 36 pthread_key_delete(PThreadKey); 37 } 38 initThreadMaybeTSDRegistrySharedT39 ALWAYS_INLINE void initThreadMaybe(Allocator *Instance, 40 UNUSED bool MinimalInit) { 41 if (LIKELY(getCurrentTSD())) 42 return; 43 initThread(Instance); 44 } 45 getTSDAndLockTSDRegistrySharedT46 ALWAYS_INLINE TSD<Allocator> *getTSDAndLock(bool *UnlockRequired) { 47 TSD<Allocator> *TSD = getCurrentTSD(); 48 DCHECK(TSD); 49 *UnlockRequired = true; 50 // Try to lock the currently associated context. 51 if (TSD->tryLock()) 52 return TSD; 53 // If that fails, go down the slow path. 54 if (TSDsArraySize == 1U) { 55 // Only 1 TSD, not need to go any further. 56 // The compiler will optimize this one way or the other. 57 TSD->lock(); 58 return TSD; 59 } 60 return getTSDAndLockSlow(TSD); 61 } 62 disableTSDRegistrySharedT63 void disable() { 64 Mutex.lock(); 65 for (u32 I = 0; I < TSDsArraySize; I++) 66 TSDs[I].lock(); 67 } 68 enableTSDRegistrySharedT69 void enable() { 70 for (s32 I = static_cast<s32>(TSDsArraySize - 1); I >= 0; I--) 71 TSDs[I].unlock(); 72 Mutex.unlock(); 73 } 74 setOptionTSDRegistrySharedT75 bool setOption(Option O, sptr Value) { 76 if (O == Option::MaxTSDsCount) 77 return setNumberOfTSDs(static_cast<u32>(Value)); 78 // Not supported by the TSD Registry, but not an error either. 79 return true; 80 } 81 82 private: setCurrentTSDTSDRegistrySharedT83 ALWAYS_INLINE void setCurrentTSD(TSD<Allocator> *CurrentTSD) { 84 #if _BIONIC 85 *getAndroidTlsPtr() = reinterpret_cast<uptr>(CurrentTSD); 86 #elif SCUDO_LINUX 87 ThreadTSD = CurrentTSD; 88 #else 89 CHECK_EQ( 90 pthread_setspecific(PThreadKey, reinterpret_cast<void *>(CurrentTSD)), 91 0); 92 #endif 93 } 94 getCurrentTSDTSDRegistrySharedT95 ALWAYS_INLINE TSD<Allocator> *getCurrentTSD() { 96 #if _BIONIC 97 return reinterpret_cast<TSD<Allocator> *>(*getAndroidTlsPtr()); 98 #elif SCUDO_LINUX 99 return ThreadTSD; 100 #else 101 return reinterpret_cast<TSD<Allocator> *>(pthread_getspecific(PThreadKey)); 102 #endif 103 } 104 setNumberOfTSDsTSDRegistrySharedT105 bool setNumberOfTSDs(u32 N) { 106 ScopedLock L(MutexTSDs); 107 if (N < NumberOfTSDs) 108 return false; 109 if (N > TSDsArraySize) 110 N = TSDsArraySize; 111 NumberOfTSDs = N; 112 NumberOfCoPrimes = 0; 113 // Compute all the coprimes of NumberOfTSDs. This will be used to walk the 114 // array of TSDs in a random order. For details, see: 115 // https://lemire.me/blog/2017/09/18/visiting-all-values-in-an-array-exactly-once-in-random-order/ 116 for (u32 I = 0; I < N; I++) { 117 u32 A = I + 1; 118 u32 B = N; 119 // Find the GCD between I + 1 and N. If 1, they are coprimes. 120 while (B != 0) { 121 const u32 T = A; 122 A = B; 123 B = T % B; 124 } 125 if (A == 1) 126 CoPrimes[NumberOfCoPrimes++] = I + 1; 127 } 128 return true; 129 } 130 initOnceMaybeTSDRegistrySharedT131 void initOnceMaybe(Allocator *Instance) { 132 ScopedLock L(Mutex); 133 if (LIKELY(Initialized)) 134 return; 135 initLinkerInitialized(Instance); // Sets Initialized. 136 } 137 initThreadTSDRegistrySharedT138 NOINLINE void initThread(Allocator *Instance) { 139 initOnceMaybe(Instance); 140 // Initial context assignment is done in a plain round-robin fashion. 141 const u32 Index = atomic_fetch_add(&CurrentIndex, 1U, memory_order_relaxed); 142 setCurrentTSD(&TSDs[Index % NumberOfTSDs]); 143 Instance->callPostInitCallback(); 144 } 145 getTSDAndLockSlowTSDRegistrySharedT146 NOINLINE TSD<Allocator> *getTSDAndLockSlow(TSD<Allocator> *CurrentTSD) { 147 // Use the Precedence of the current TSD as our random seed. Since we are 148 // in the slow path, it means that tryLock failed, and as a result it's 149 // very likely that said Precedence is non-zero. 150 const u32 R = static_cast<u32>(CurrentTSD->getPrecedence()); 151 u32 N, Inc; 152 { 153 ScopedLock L(MutexTSDs); 154 N = NumberOfTSDs; 155 DCHECK_NE(NumberOfCoPrimes, 0U); 156 Inc = CoPrimes[R % NumberOfCoPrimes]; 157 } 158 if (N > 1U) { 159 u32 Index = R % N; 160 uptr LowestPrecedence = UINTPTR_MAX; 161 TSD<Allocator> *CandidateTSD = nullptr; 162 // Go randomly through at most 4 contexts and find a candidate. 163 for (u32 I = 0; I < Min(4U, N); I++) { 164 if (TSDs[Index].tryLock()) { 165 setCurrentTSD(&TSDs[Index]); 166 return &TSDs[Index]; 167 } 168 const uptr Precedence = TSDs[Index].getPrecedence(); 169 // A 0 precedence here means another thread just locked this TSD. 170 if (Precedence && Precedence < LowestPrecedence) { 171 CandidateTSD = &TSDs[Index]; 172 LowestPrecedence = Precedence; 173 } 174 Index += Inc; 175 if (Index >= N) 176 Index -= N; 177 } 178 if (CandidateTSD) { 179 CandidateTSD->lock(); 180 setCurrentTSD(CandidateTSD); 181 return CandidateTSD; 182 } 183 } 184 // Last resort, stick with the current one. 185 CurrentTSD->lock(); 186 return CurrentTSD; 187 } 188 189 pthread_key_t PThreadKey; 190 atomic_u32 CurrentIndex; 191 u32 NumberOfTSDs; 192 u32 NumberOfCoPrimes; 193 u32 CoPrimes[TSDsArraySize]; 194 bool Initialized; 195 HybridMutex Mutex; 196 HybridMutex MutexTSDs; 197 TSD<Allocator> TSDs[TSDsArraySize]; 198 #if SCUDO_LINUX && !_BIONIC 199 static THREADLOCAL TSD<Allocator> *ThreadTSD; 200 #endif 201 }; 202 203 #if SCUDO_LINUX && !_BIONIC 204 template <class Allocator, u32 TSDsArraySize, u32 DefaultTSDCount> 205 THREADLOCAL TSD<Allocator> 206 *TSDRegistrySharedT<Allocator, TSDsArraySize, DefaultTSDCount>::ThreadTSD; 207 #endif 208 209 } // namespace scudo 210 211 #endif // SCUDO_TSD_SHARED_H_ 212