• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- tsd_exclusive.h -----------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #ifndef SCUDO_TSD_EXCLUSIVE_H_
10 #define SCUDO_TSD_EXCLUSIVE_H_
11 
12 #include "tsd.h"
13 
14 namespace scudo {
15 
16 struct ThreadState {
17   bool DisableMemInit : 1;
18   enum {
19     NotInitialized = 0,
20     Initialized,
21     TornDown,
22   } InitState : 2;
23 };
24 
25 template <class Allocator> void teardownThread(void *Ptr);
26 
27 template <class Allocator> struct TSDRegistryExT {
initLinkerInitializedTSDRegistryExT28   void initLinkerInitialized(Allocator *Instance) {
29     Instance->initLinkerInitialized();
30     CHECK_EQ(pthread_key_create(&PThreadKey, teardownThread<Allocator>), 0);
31     FallbackTSD.initLinkerInitialized(Instance);
32     Initialized = true;
33   }
initTSDRegistryExT34   void init(Allocator *Instance) {
35     memset(this, 0, sizeof(*this));
36     initLinkerInitialized(Instance);
37   }
38 
initOnceMaybeTSDRegistryExT39   void initOnceMaybe(Allocator *Instance) {
40     ScopedLock L(Mutex);
41     if (LIKELY(Initialized))
42       return;
43     initLinkerInitialized(Instance); // Sets Initialized.
44   }
45 
unmapTestOnlyTSDRegistryExT46   void unmapTestOnly() {
47     Allocator *Instance =
48         reinterpret_cast<Allocator *>(pthread_getspecific(PThreadKey));
49     if (!Instance)
50       return;
51     ThreadTSD.commitBack(Instance);
52     State = {};
53   }
54 
initThreadMaybeTSDRegistryExT55   ALWAYS_INLINE void initThreadMaybe(Allocator *Instance, bool MinimalInit) {
56     if (LIKELY(State.InitState != ThreadState::NotInitialized))
57       return;
58     initThread(Instance, MinimalInit);
59   }
60 
getTSDAndLockTSDRegistryExT61   ALWAYS_INLINE TSD<Allocator> *getTSDAndLock(bool *UnlockRequired) {
62     if (LIKELY(State.InitState == ThreadState::Initialized &&
63                !atomic_load(&Disabled, memory_order_acquire))) {
64       *UnlockRequired = false;
65       return &ThreadTSD;
66     }
67     FallbackTSD.lock();
68     *UnlockRequired = true;
69     return &FallbackTSD;
70   }
71 
72   // To disable the exclusive TSD registry, we effectively lock the fallback TSD
73   // and force all threads to attempt to use it instead of their local one.
disableTSDRegistryExT74   void disable() {
75     Mutex.lock();
76     FallbackTSD.lock();
77     atomic_store(&Disabled, 1U, memory_order_release);
78   }
79 
enableTSDRegistryExT80   void enable() {
81     atomic_store(&Disabled, 0U, memory_order_release);
82     FallbackTSD.unlock();
83     Mutex.unlock();
84   }
85 
setOptionTSDRegistryExT86   bool setOption(Option O, UNUSED sptr Value) {
87     if (O == Option::ThreadDisableMemInit)
88       State.DisableMemInit = Value;
89     if (O == Option::MaxTSDsCount)
90       return false;
91     return true;
92   }
93 
getDisableMemInitTSDRegistryExT94   bool getDisableMemInit() { return State.DisableMemInit; }
95 
96 private:
97   // Using minimal initialization allows for global initialization while keeping
98   // the thread specific structure untouched. The fallback structure will be
99   // used instead.
initThreadTSDRegistryExT100   NOINLINE void initThread(Allocator *Instance, bool MinimalInit) {
101     initOnceMaybe(Instance);
102     if (UNLIKELY(MinimalInit))
103       return;
104     CHECK_EQ(
105         pthread_setspecific(PThreadKey, reinterpret_cast<void *>(Instance)), 0);
106     ThreadTSD.initLinkerInitialized(Instance);
107     State.InitState = ThreadState::Initialized;
108     Instance->callPostInitCallback();
109   }
110 
111   pthread_key_t PThreadKey = {};
112   bool Initialized = false;
113   atomic_u8 Disabled = {};
114   TSD<Allocator> FallbackTSD;
115   HybridMutex Mutex;
116   static thread_local ThreadState State;
117   static thread_local TSD<Allocator> ThreadTSD;
118 
119   friend void teardownThread<Allocator>(void *Ptr);
120 };
121 
122 template <class Allocator>
123 thread_local TSD<Allocator> TSDRegistryExT<Allocator>::ThreadTSD;
124 template <class Allocator>
125 thread_local ThreadState TSDRegistryExT<Allocator>::State;
126 
teardownThread(void * Ptr)127 template <class Allocator> void teardownThread(void *Ptr) {
128   typedef TSDRegistryExT<Allocator> TSDRegistryT;
129   Allocator *Instance = reinterpret_cast<Allocator *>(Ptr);
130   // The glibc POSIX thread-local-storage deallocation routine calls user
131   // provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS.
132   // We want to be called last since other destructors might call free and the
133   // like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the
134   // quarantine and swallowing the cache.
135   if (TSDRegistryT::ThreadTSD.DestructorIterations > 1) {
136     TSDRegistryT::ThreadTSD.DestructorIterations--;
137     // If pthread_setspecific fails, we will go ahead with the teardown.
138     if (LIKELY(pthread_setspecific(Instance->getTSDRegistry()->PThreadKey,
139                                    Ptr) == 0))
140       return;
141   }
142   TSDRegistryT::ThreadTSD.commitBack(Instance);
143   TSDRegistryT::State.InitState = ThreadState::TornDown;
144 }
145 
146 } // namespace scudo
147 
148 #endif // SCUDO_TSD_EXCLUSIVE_H_
149