1 //===-- tsd_exclusive.h -----------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #ifndef SCUDO_TSD_EXCLUSIVE_H_
10 #define SCUDO_TSD_EXCLUSIVE_H_
11
12 #include "tsd.h"
13
14 namespace scudo {
15
16 struct ThreadState {
17 bool DisableMemInit : 1;
18 enum {
19 NotInitialized = 0,
20 Initialized,
21 TornDown,
22 } InitState : 2;
23 };
24
25 template <class Allocator> void teardownThread(void *Ptr);
26
27 template <class Allocator> struct TSDRegistryExT {
initLinkerInitializedTSDRegistryExT28 void initLinkerInitialized(Allocator *Instance) {
29 Instance->initLinkerInitialized();
30 CHECK_EQ(pthread_key_create(&PThreadKey, teardownThread<Allocator>), 0);
31 FallbackTSD.initLinkerInitialized(Instance);
32 Initialized = true;
33 }
initTSDRegistryExT34 void init(Allocator *Instance) {
35 memset(this, 0, sizeof(*this));
36 initLinkerInitialized(Instance);
37 }
38
unmapTestOnlyTSDRegistryExT39 void unmapTestOnly() {}
40
initThreadMaybeTSDRegistryExT41 ALWAYS_INLINE void initThreadMaybe(Allocator *Instance, bool MinimalInit) {
42 if (LIKELY(State.InitState != ThreadState::NotInitialized))
43 return;
44 initThread(Instance, MinimalInit);
45 }
46
getTSDAndLockTSDRegistryExT47 ALWAYS_INLINE TSD<Allocator> *getTSDAndLock(bool *UnlockRequired) {
48 if (LIKELY(State.InitState == ThreadState::Initialized &&
49 !atomic_load(&Disabled, memory_order_acquire))) {
50 *UnlockRequired = false;
51 return &ThreadTSD;
52 }
53 FallbackTSD.lock();
54 *UnlockRequired = true;
55 return &FallbackTSD;
56 }
57
58 // To disable the exclusive TSD registry, we effectively lock the fallback TSD
59 // and force all threads to attempt to use it instead of their local one.
disableTSDRegistryExT60 void disable() {
61 Mutex.lock();
62 FallbackTSD.lock();
63 atomic_store(&Disabled, 1U, memory_order_release);
64 }
65
enableTSDRegistryExT66 void enable() {
67 atomic_store(&Disabled, 0U, memory_order_release);
68 FallbackTSD.unlock();
69 Mutex.unlock();
70 }
71
setOptionTSDRegistryExT72 bool setOption(Option O, UNUSED sptr Value) {
73 if (O == Option::ThreadDisableMemInit)
74 State.DisableMemInit = Value;
75 if (O == Option::MaxTSDsCount)
76 return false;
77 return true;
78 }
79
getDisableMemInitTSDRegistryExT80 bool getDisableMemInit() { return State.DisableMemInit; }
81
82 private:
initOnceMaybeTSDRegistryExT83 void initOnceMaybe(Allocator *Instance) {
84 ScopedLock L(Mutex);
85 if (LIKELY(Initialized))
86 return;
87 initLinkerInitialized(Instance); // Sets Initialized.
88 }
89
90 // Using minimal initialization allows for global initialization while keeping
91 // the thread specific structure untouched. The fallback structure will be
92 // used instead.
initThreadTSDRegistryExT93 NOINLINE void initThread(Allocator *Instance, bool MinimalInit) {
94 initOnceMaybe(Instance);
95 if (UNLIKELY(MinimalInit))
96 return;
97 CHECK_EQ(
98 pthread_setspecific(PThreadKey, reinterpret_cast<void *>(Instance)), 0);
99 ThreadTSD.initLinkerInitialized(Instance);
100 State.InitState = ThreadState::Initialized;
101 Instance->callPostInitCallback();
102 }
103
104 pthread_key_t PThreadKey;
105 bool Initialized;
106 atomic_u8 Disabled;
107 TSD<Allocator> FallbackTSD;
108 HybridMutex Mutex;
109 static thread_local ThreadState State;
110 static thread_local TSD<Allocator> ThreadTSD;
111
112 friend void teardownThread<Allocator>(void *Ptr);
113 };
114
115 template <class Allocator>
116 thread_local TSD<Allocator> TSDRegistryExT<Allocator>::ThreadTSD;
117 template <class Allocator>
118 thread_local ThreadState TSDRegistryExT<Allocator>::State;
119
teardownThread(void * Ptr)120 template <class Allocator> void teardownThread(void *Ptr) {
121 typedef TSDRegistryExT<Allocator> TSDRegistryT;
122 Allocator *Instance = reinterpret_cast<Allocator *>(Ptr);
123 // The glibc POSIX thread-local-storage deallocation routine calls user
124 // provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS.
125 // We want to be called last since other destructors might call free and the
126 // like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the
127 // quarantine and swallowing the cache.
128 if (TSDRegistryT::ThreadTSD.DestructorIterations > 1) {
129 TSDRegistryT::ThreadTSD.DestructorIterations--;
130 // If pthread_setspecific fails, we will go ahead with the teardown.
131 if (LIKELY(pthread_setspecific(Instance->getTSDRegistry()->PThreadKey,
132 Ptr) == 0))
133 return;
134 }
135 TSDRegistryT::ThreadTSD.commitBack(Instance);
136 TSDRegistryT::State.InitState = ThreadState::TornDown;
137 }
138
139 } // namespace scudo
140
141 #endif // SCUDO_TSD_EXCLUSIVE_H_
142