• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2014 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifdef UNSAFE_BUFFERS_BUILD
6 // TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
7 #pragma allow_unsafe_buffers
8 #endif
9 
10 #include "base/threading/thread_local_storage.h"
11 
12 #include <algorithm>
13 #include <atomic>
14 
15 #include "base/check_op.h"
16 #include "base/compiler_specific.h"
17 #include "base/memory/raw_ptr_exclusion.h"
18 #include "base/notreached.h"
19 #include "base/synchronization/lock.h"
20 #include "build/build_config.h"
21 
22 #if BUILDFLAG(IS_MAC) && defined(ARCH_CPU_X86_64)
23 #include <pthread.h>
24 #include <type_traits>
25 #endif
26 
27 using base::internal::PlatformThreadLocalStorage;
28 
29 // Chrome Thread Local Storage (TLS)
30 //
31 // This TLS system allows Chrome to use a single OS level TLS slot process-wide,
32 // and allows us to control the slot limits instead of being at the mercy of the
33 // platform. To do this, Chrome TLS replicates an array commonly found in the OS
34 // thread metadata.
35 //
36 // Overview:
37 //
38 // OS TLS Slots       Per-Thread                 Per-Process Global
39 //     ...
40 //     []             Chrome TLS Array           Chrome TLS Metadata
41 //     [] ----------> [][][][][ ][][][][]        [][][][][ ][][][][]
42 //     []                      |                          |
43 //     ...                     V                          V
44 //                      Metadata Version           Slot Information
45 //                         Your Data!
46 //
47 // Using a single OS TLS slot, Chrome TLS allocates an array on demand for the
48 // lifetime of each thread that requests Chrome TLS data. Each per-thread TLS
49 // array matches the length of the per-process global metadata array.
50 //
51 // A per-process global TLS metadata array tracks information about each item in
52 // the per-thread array:
53 //   * Status: Tracks if the slot is allocated or free to assign.
54 //   * Destructor: An optional destructor to call on thread destruction for that
55 //                 specific slot.
56 //   * Version: Tracks the current version of the TLS slot. Each TLS slot
57 //              allocation is associated with a unique version number.
58 //
59 //              Most OS TLS APIs guarantee that a newly allocated TLS slot is
60 //              initialized to 0 for all threads. The Chrome TLS system provides
61 //              this guarantee by tracking the version for each TLS slot here
62 //              on each per-thread Chrome TLS array entry. Threads that access
63 //              a slot with a mismatched version will receive 0 as their value.
64 //              The metadata version is incremented when the client frees a
65 //              slot. The per-thread metadata version is updated when a client
66 //              writes to the slot. This scheme allows for constant time
67 //              invalidation and avoids the need to iterate through each Chrome
68 //              TLS array to mark the slot as zero.
69 //
70 // Just like an OS TLS API, clients of the Chrome TLS are responsible for
71 // managing any necessary lifetime of the data in their slots. The only
72 // convenience provided is automatic destruction when a thread ends. If a client
73 // frees a slot, that client is responsible for destroying the data in the slot.
74 
75 namespace {
76 // In order to make TLS destructors work, we need to keep around a function
77 // pointer to the destructor for each slot. We keep this array of pointers in a
78 // global (static) array.
79 // We use the single OS-level TLS slot (giving us one pointer per thread) to
80 // hold a pointer to a per-thread array (table) of slots that we allocate to
81 // Chromium consumers.
82 
83 // g_native_tls_key is the one native TLS that we use. It stores our table.
84 
85 std::atomic<PlatformThreadLocalStorage::TLSKey> g_native_tls_key{
86     PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES};
87 
88 // The OS TLS slot has the following states. The TLS slot's lower 2 bits contain
89 // the state, the upper bits the TlsVectorEntry*.
90 //   * kUninitialized: Any call to Slot::Get()/Set() will create the base
91 //     per-thread TLS state. kUninitialized must be null.
92 //   * kInUse: value has been created and is in use.
93 //   * kDestroying: Set when the thread is exiting prior to deleting any of the
94 //     values stored in the TlsVectorEntry*. This state is necessary so that
95 //     sequence/task checks won't be done while in the process of deleting the
96 //     tls entries (see comments in SequenceCheckerImpl for more details).
97 //   * kDestroyed: All of the values in the vector have been deallocated and
98 //     the TlsVectorEntry has been deleted.
99 //
100 // Final States:
101 //   * Windows: kDestroyed. Windows does not iterate through the OS TLS to clean
102 //     up the values.
103 //   * POSIX: kUninitialized. POSIX iterates through TLS until all slots contain
104 //     nullptr.
105 //
106 // More details on this design:
107 //   We need some type of thread-local state to indicate that the TLS system has
108 //   been destroyed. To do so, we leverage the multi-pass nature of destruction
109 //   of pthread_key.
110 //
111 //    a) After destruction of TLS system, we set the pthread_key to a sentinel
112 //       kDestroyed.
113 //    b) All calls to Slot::Get() DCHECK that the state is not kDestroyed, and
114 //       any system which might potentially invoke Slot::Get() after destruction
115 //       of TLS must check ThreadLocalStorage::ThreadIsBeingDestroyed().
116 //    c) After a full pass of the pthread_keys, on the next invocation of
117 //       ConstructTlsVector(), we'll then set the key to nullptr.
118 //    d) At this stage, the TLS system is back in its uninitialized state.
119 //    e) If in the second pass of destruction of pthread_keys something were to
120 //       re-initialize TLS [this should never happen! Since the only code which
121 //       uses Chrome TLS is Chrome controlled, we should really be striving for
122 //       single-pass destruction], then TLS will be re-initialized and then go
123 //       through the 2-pass destruction system again. Everything should just
124 //       work (TM).
125 
126 // The state of the tls-entry.
127 enum class TlsVectorState {
128   kUninitialized = 0,
129 
130   // In the process of destroying the entries in the vector.
131   kDestroying,
132 
133   // All of the entries and the vector has been destroyed.
134   kDestroyed,
135 
136   // The vector has been initialized and is in use.
137   kInUse,
138 
139   kMaxValue = kInUse
140 };
141 
142 // Bit-mask used to store TlsVectorState.
143 constexpr uintptr_t kVectorStateBitMask = 3;
144 static_assert(static_cast<int>(TlsVectorState::kMaxValue) <=
145                   kVectorStateBitMask,
146               "number of states must fit in header");
147 static_assert(static_cast<int>(TlsVectorState::kUninitialized) == 0,
148               "kUninitialized must be null");
149 
150 // The maximum number of slots in our thread local storage stack.
151 constexpr size_t kThreadLocalStorageSize = 256;
152 
153 enum TlsStatus {
154   FREE,
155   IN_USE,
156 };
157 
158 struct TlsMetadata {
159   TlsStatus status;
160   base::ThreadLocalStorage::TLSDestructorFunc destructor;
161   // Incremented every time a slot is reused. Used to detect reuse of slots.
162   uint32_t version;
163   // Tracks slot creation order. Used to destroy slots in the reverse order:
164   // from last created to first created.
165   uint32_t sequence_num;
166 };
167 
168 struct TlsVectorEntry {
169   // `data` is not a raw_ptr<...> for performance reasons (based on analysis of
170   // sampling profiler data and tab_search:top100:2020).
171   RAW_PTR_EXCLUSION void* data;
172 
173   uint32_t version;
174 };
175 
176 // This lock isn't needed until after we've constructed the per-thread TLS
177 // vector, so it's safe to use.
GetTLSMetadataLock()178 base::Lock* GetTLSMetadataLock() {
179   static auto* lock = new base::Lock();
180   return lock;
181 }
182 TlsMetadata g_tls_metadata[kThreadLocalStorageSize];
183 size_t g_last_assigned_slot = 0;
184 uint32_t g_sequence_num = 0;
185 
186 // The maximum number of times to try to clear slots by calling destructors.
187 // Use pthread naming convention for clarity.
188 constexpr size_t kMaxDestructorIterations = kThreadLocalStorageSize;
189 
190 // Sets the value and state of the vector.
SetTlsVectorValue(PlatformThreadLocalStorage::TLSKey key,TlsVectorEntry * tls_data,TlsVectorState state)191 void SetTlsVectorValue(PlatformThreadLocalStorage::TLSKey key,
192                        TlsVectorEntry* tls_data,
193                        TlsVectorState state) {
194   DCHECK(tls_data || (state == TlsVectorState::kUninitialized) ||
195          (state == TlsVectorState::kDestroyed));
196   PlatformThreadLocalStorage::SetTLSValue(
197       key, reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(tls_data) |
198                                    static_cast<uintptr_t>(state)));
199 }
200 
201 // Returns the tls vector and current state from the raw tls value.
GetTlsVectorStateAndValue(void * tls_value,TlsVectorEntry ** entry=nullptr)202 TlsVectorState GetTlsVectorStateAndValue(void* tls_value,
203                                          TlsVectorEntry** entry = nullptr) {
204   if (entry) {
205     *entry = reinterpret_cast<TlsVectorEntry*>(
206         reinterpret_cast<uintptr_t>(tls_value) & ~kVectorStateBitMask);
207   }
208   return static_cast<TlsVectorState>(reinterpret_cast<uintptr_t>(tls_value) &
209                                      kVectorStateBitMask);
210 }
211 
212 // Returns the tls vector and state using the tls key.
GetTlsVectorStateAndValue(PlatformThreadLocalStorage::TLSKey key,TlsVectorEntry ** entry=nullptr)213 TlsVectorState GetTlsVectorStateAndValue(PlatformThreadLocalStorage::TLSKey key,
214                                          TlsVectorEntry** entry = nullptr) {
215 // Only on x86_64, the implementation is not stable on ARM64. For instance, in
216 // macOS 11, the TPIDRRO_EL0 registers holds the CPU index in the low bits,
217 // which is not the case in macOS 12. See libsyscall/os/tsd.h in XNU
218 // (_os_tsd_get_direct() is used by pthread_getspecific() internally).
219 #if BUILDFLAG(IS_MAC) && defined(ARCH_CPU_X86_64)
220   // On macOS, pthread_getspecific() is in libSystem, so a call to it has to go
221   // through PLT. However, and contrary to some other platforms, *all* TLS keys
222   // are in a static array in the thread structure. So they are *always* at a
223   // fixed offset from the segment register holding the thread structure
224   // address.
225   //
226   // We could use _pthread_getspecific_direct(), but it is not
227   // exported. However, on all macOS versions we support, the TLS array is at
228   // %gs. This is used in V8 and PartitionAlloc, and can also be seen by looking
229   // at pthread_getspecific() disassembly:
230   //
231   // libsystem_pthread.dylib`pthread_getspecific:
232   // libsystem_pthread.dylib[0x7ff800316099] <+0>: movq   %gs:(,%rdi,8), %rax
233   // libsystem_pthread.dylib[0x7ff8003160a2] <+9>: retq
234   //
235   // This function is essentially inlining the content of pthread_getspecific()
236   // here.
237   //
238   // Note that this likely ends up being even faster than thread_local for
239   // typical Chromium builds where the code is in a dynamic library. For the
240   // static executable case, this is likely equivalent.
241   static_assert(
242       std::is_same_v<PlatformThreadLocalStorage::TLSKey, pthread_key_t>,
243       "The special-case below assumes that the platform TLS implementation is "
244       "pthread.");
245 
246   intptr_t platform_tls_value;
247   asm("movq %%gs:(,%1,8), %0;" : "=r"(platform_tls_value) : "r"(key));
248 
249   return GetTlsVectorStateAndValue(reinterpret_cast<void*>(platform_tls_value),
250                                    entry);
251 #else
252   return GetTlsVectorStateAndValue(PlatformThreadLocalStorage::GetTLSValue(key),
253                                    entry);
254 #endif
255 }
256 
257 // This function is called to initialize our entire Chromium TLS system.
258 // It may be called very early, and we need to complete most all of the setup
259 // (initialization) before calling *any* memory allocator functions, which may
260 // recursively depend on this initialization.
261 // As a result, we use Atomics, and avoid anything (like a singleton) that might
262 // require memory allocations.
ConstructTlsVector()263 TlsVectorEntry* ConstructTlsVector() {
264   PlatformThreadLocalStorage::TLSKey key =
265       g_native_tls_key.load(std::memory_order_relaxed);
266   if (key == PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES) {
267     CHECK(PlatformThreadLocalStorage::AllocTLS(&key));
268 
269     // The TLS_KEY_OUT_OF_INDEXES is used to find out whether the key is set or
270     // not in NoBarrier_CompareAndSwap, but Posix doesn't have invalid key, we
271     // define an almost impossible value be it.
272     // If we really get TLS_KEY_OUT_OF_INDEXES as value of key, just alloc
273     // another TLS slot.
274     if (key == PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES) {
275       PlatformThreadLocalStorage::TLSKey tmp = key;
276       CHECK(PlatformThreadLocalStorage::AllocTLS(&key) &&
277             key != PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES);
278       PlatformThreadLocalStorage::FreeTLS(tmp);
279     }
280     // Atomically test-and-set the tls_key. If the key is
281     // TLS_KEY_OUT_OF_INDEXES, go ahead and set it. Otherwise, do nothing, as
282     // another thread already did our dirty work.
283     PlatformThreadLocalStorage::TLSKey old_key =
284         PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES;
285     if (!g_native_tls_key.compare_exchange_strong(old_key, key,
286                                                   std::memory_order_relaxed,
287                                                   std::memory_order_relaxed)) {
288       // We've been shortcut. Another thread replaced g_native_tls_key first so
289       // we need to destroy our index and use the one the other thread got
290       // first.
291       PlatformThreadLocalStorage::FreeTLS(key);
292       key = g_native_tls_key.load(std::memory_order_relaxed);
293     }
294   }
295   CHECK_EQ(GetTlsVectorStateAndValue(key), TlsVectorState::kUninitialized);
296 
297   // Some allocators, such as TCMalloc, make use of thread local storage. As a
298   // result, any attempt to call new (or malloc) will lazily cause such a system
299   // to initialize, which will include registering for a TLS key. If we are not
300   // careful here, then that request to create a key will call new back, and
301   // we'll have an infinite loop. We avoid that as follows: Use a stack
302   // allocated vector, so that we don't have dependence on our allocator until
303   // our service is in place. (i.e., don't even call new until after we're
304   // setup)
305   TlsVectorEntry stack_allocated_tls_data[kThreadLocalStorageSize];
306   memset(stack_allocated_tls_data, 0, sizeof(stack_allocated_tls_data));
307   // Ensure that any rentrant calls change the temp version.
308   SetTlsVectorValue(key, stack_allocated_tls_data, TlsVectorState::kInUse);
309 
310   // Allocate an array to store our data.
311   TlsVectorEntry* tls_data = new TlsVectorEntry[kThreadLocalStorageSize];
312   memcpy(tls_data, stack_allocated_tls_data, sizeof(stack_allocated_tls_data));
313   SetTlsVectorValue(key, tls_data, TlsVectorState::kInUse);
314   return tls_data;
315 }
316 
OnThreadExitInternal(TlsVectorEntry * tls_data)317 void OnThreadExitInternal(TlsVectorEntry* tls_data) {
318   DCHECK(tls_data);
319   // Some allocators, such as TCMalloc, use TLS. As a result, when a thread
320   // terminates, one of the destructor calls we make may be to shut down an
321   // allocator. We have to be careful that after we've shutdown all of the known
322   // destructors (perchance including an allocator), that we don't call the
323   // allocator and cause it to resurrect itself (with no possibly destructor
324   // call to follow). We handle this problem as follows: Switch to using a stack
325   // allocated vector, so that we don't have dependence on our allocator after
326   // we have called all g_tls_metadata destructors. (i.e., don't even call
327   // delete[] after we're done with destructors.)
328   TlsVectorEntry stack_allocated_tls_data[kThreadLocalStorageSize];
329   memcpy(stack_allocated_tls_data, tls_data, sizeof(stack_allocated_tls_data));
330   // Ensure that any re-entrant calls change the temp version.
331   PlatformThreadLocalStorage::TLSKey key =
332       g_native_tls_key.load(std::memory_order_relaxed);
333   SetTlsVectorValue(key, stack_allocated_tls_data, TlsVectorState::kDestroying);
334   delete[] tls_data;  // Our last dependence on an allocator.
335 
336   size_t remaining_attempts = kMaxDestructorIterations + 1;
337   bool need_to_scan_destructors = true;
338   while (need_to_scan_destructors) {
339     need_to_scan_destructors = false;
340 
341     // Snapshot the TLS Metadata so we don't have to lock on every access.
342     TlsMetadata tls_metadata[kThreadLocalStorageSize];
343     {
344       base::AutoLock auto_lock(*GetTLSMetadataLock());
345       memcpy(tls_metadata, g_tls_metadata, sizeof(g_tls_metadata));
346     }
347 
348     // We destroy slots in reverse order (i.e. destroy the first-created slot
349     // last), for the following reasons:
350     // 1) Slots that are created early belong to basic services (like an
351     // allocator) and might have to be recreated by destructors of other
352     // services. So we save iterations here by destroying them last.
353     // 2) Perfetto tracing service allocates a slot early and relies on it to
354     // keep emitting trace events while destructors of other slots are called,
355     // so it's important to keep it live to avoid use-after-free errors.
356     // To achieve this, we sort all slots in the order of decreasing sequence
357     // numbers.
358     struct OrderedSlot {
359       uint32_t sequence_num;
360       uint16_t slot;
361     } slot_destruction_order[kThreadLocalStorageSize];
362     for (uint16_t i = 0; i < kThreadLocalStorageSize; ++i) {
363       slot_destruction_order[i].sequence_num = tls_metadata[i].sequence_num;
364       slot_destruction_order[i].slot = i;
365     }
366     std::sort(std::begin(slot_destruction_order),
367               std::end(slot_destruction_order),
368               [](const OrderedSlot& s1, const OrderedSlot& s2) {
369                 return s1.sequence_num > s2.sequence_num;
370               });
371 
372     for (const auto& ordered_slot : slot_destruction_order) {
373       size_t slot = ordered_slot.slot;
374       void* tls_value = stack_allocated_tls_data[slot].data;
375       if (!tls_value || tls_metadata[slot].status == TlsStatus::FREE ||
376           stack_allocated_tls_data[slot].version != tls_metadata[slot].version)
377         continue;
378 
379       base::ThreadLocalStorage::TLSDestructorFunc destructor =
380           tls_metadata[slot].destructor;
381       if (!destructor)
382         continue;
383       stack_allocated_tls_data[slot].data = nullptr;  // pre-clear the slot.
384       destructor(tls_value);
385       // Any destructor might have called a different service, which then set a
386       // different slot to a non-null value. Hence we need to check the whole
387       // vector again. This is a pthread standard.
388       need_to_scan_destructors = true;
389     }
390 
391     if (--remaining_attempts == 0) {
392       NOTREACHED();  // Destructors might not have been called.
393     }
394   }
395 
396   // Remove our stack allocated vector.
397   SetTlsVectorValue(key, nullptr, TlsVectorState::kDestroyed);
398 }
399 
400 }  // namespace
401 
402 namespace base {
403 
404 namespace internal {
405 
406 #if BUILDFLAG(IS_WIN)
OnThreadExit()407 void PlatformThreadLocalStorage::OnThreadExit() {
408   PlatformThreadLocalStorage::TLSKey key =
409       g_native_tls_key.load(std::memory_order_relaxed);
410   if (key == PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES)
411     return;
412   TlsVectorEntry* tls_vector = nullptr;
413   const TlsVectorState state = GetTlsVectorStateAndValue(key, &tls_vector);
414 
415   // On Windows, thread destruction callbacks are only invoked once per module,
416   // so there should be no way that this could be invoked twice.
417   DCHECK_NE(state, TlsVectorState::kDestroyed);
418 
419   // Maybe we have never initialized TLS for this thread.
420   if (state == TlsVectorState::kUninitialized)
421     return;
422   OnThreadExitInternal(tls_vector);
423 }
424 #elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
425 void PlatformThreadLocalStorage::OnThreadExit(void* value) {
426   // On posix this function may be called twice. The first pass calls dtors and
427   // sets state to kDestroyed. The second pass sets kDestroyed to
428   // kUninitialized.
429   TlsVectorEntry* tls_vector = nullptr;
430   const TlsVectorState state = GetTlsVectorStateAndValue(value, &tls_vector);
431   if (state == TlsVectorState::kDestroyed) {
432     PlatformThreadLocalStorage::TLSKey key =
433         g_native_tls_key.load(std::memory_order_relaxed);
434     SetTlsVectorValue(key, nullptr, TlsVectorState::kUninitialized);
435     return;
436   }
437 
438   OnThreadExitInternal(tls_vector);
439 }
440 #endif  // BUILDFLAG(IS_WIN)
441 
442 }  // namespace internal
443 
444 // static
HasBeenDestroyed()445 bool ThreadLocalStorage::HasBeenDestroyed() {
446   PlatformThreadLocalStorage::TLSKey key =
447       g_native_tls_key.load(std::memory_order_relaxed);
448   if (key == PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES)
449     return false;
450   const TlsVectorState state = GetTlsVectorStateAndValue(key);
451   return state == TlsVectorState::kDestroying ||
452          state == TlsVectorState::kDestroyed;
453 }
454 
Initialize(TLSDestructorFunc destructor)455 void ThreadLocalStorage::Slot::Initialize(TLSDestructorFunc destructor) {
456   PlatformThreadLocalStorage::TLSKey key =
457       g_native_tls_key.load(std::memory_order_relaxed);
458   if (key == PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES ||
459       GetTlsVectorStateAndValue(key) == TlsVectorState::kUninitialized) {
460     ConstructTlsVector();
461   }
462 
463   // Grab a new slot.
464   {
465     base::AutoLock auto_lock(*GetTLSMetadataLock());
466     for (size_t i = 0; i < kThreadLocalStorageSize; ++i) {
467       // Tracking the last assigned slot is an attempt to find the next
468       // available slot within one iteration. Under normal usage, slots remain
469       // in use for the lifetime of the process (otherwise before we reclaimed
470       // slots, we would have run out of slots). This makes it highly likely the
471       // next slot is going to be a free slot.
472       size_t slot_candidate =
473           (g_last_assigned_slot + 1 + i) % kThreadLocalStorageSize;
474       if (g_tls_metadata[slot_candidate].status == TlsStatus::FREE) {
475         g_tls_metadata[slot_candidate].status = TlsStatus::IN_USE;
476         g_tls_metadata[slot_candidate].destructor = destructor;
477         g_tls_metadata[slot_candidate].sequence_num = ++g_sequence_num;
478         g_last_assigned_slot = slot_candidate;
479         DCHECK_EQ(kInvalidSlotValue, slot_);
480         slot_ = slot_candidate;
481         version_ = g_tls_metadata[slot_candidate].version;
482         break;
483       }
484     }
485   }
486   CHECK_LT(slot_, kThreadLocalStorageSize);
487 }
488 
Free()489 void ThreadLocalStorage::Slot::Free() {
490   DCHECK_LT(slot_, kThreadLocalStorageSize);
491   {
492     base::AutoLock auto_lock(*GetTLSMetadataLock());
493     g_tls_metadata[slot_].status = TlsStatus::FREE;
494     g_tls_metadata[slot_].destructor = nullptr;
495     ++(g_tls_metadata[slot_].version);
496   }
497   slot_ = kInvalidSlotValue;
498 }
499 
Get() const500 void* ThreadLocalStorage::Slot::Get() const {
501   TlsVectorEntry* tls_data = nullptr;
502   const TlsVectorState state = GetTlsVectorStateAndValue(
503       g_native_tls_key.load(std::memory_order_relaxed), &tls_data);
504   DCHECK_NE(state, TlsVectorState::kDestroyed);
505   if (!tls_data)
506     return nullptr;
507   DCHECK_LT(slot_, kThreadLocalStorageSize);
508   // Version mismatches means this slot was previously freed.
509   if (tls_data[slot_].version != version_)
510     return nullptr;
511   return tls_data[slot_].data;
512 }
513 
Set(void * value)514 void ThreadLocalStorage::Slot::Set(void* value) {
515   TlsVectorEntry* tls_data = nullptr;
516   const TlsVectorState state = GetTlsVectorStateAndValue(
517       g_native_tls_key.load(std::memory_order_relaxed), &tls_data);
518   DCHECK_NE(state, TlsVectorState::kDestroyed);
519   if (!tls_data) [[unlikely]] {
520     if (!value)
521       return;
522     tls_data = ConstructTlsVector();
523   }
524   DCHECK_LT(slot_, kThreadLocalStorageSize);
525   tls_data[slot_].data = value;
526   tls_data[slot_].version = version_;
527 }
528 
Slot(TLSDestructorFunc destructor)529 ThreadLocalStorage::Slot::Slot(TLSDestructorFunc destructor) {
530   Initialize(destructor);
531 }
532 
~Slot()533 ThreadLocalStorage::Slot::~Slot() {
534   Free();
535 }
536 
537 }  // namespace base
538