1 /*
2 * Copyright (C) 2008 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <errno.h>
30 #include <pthread.h>
31 #include <stdatomic.h>
32
33 #include "private/bionic_tls.h"
34 #include "pthread_internal.h"
35
36 typedef void (*key_destructor_t)(void*);
37
38 #define SEQ_KEY_IN_USE_BIT 0
39
40 #define SEQ_INCREMENT_STEP (1 << SEQ_KEY_IN_USE_BIT)
41
42 // pthread_key_internal_t records the use of each pthread key slot:
43 // seq records the state of the slot.
44 // bit 0 is 1 when the key is in use, 0 when it is unused. Each time we create or delete the
45 // pthread key in the slot, we increse the seq by 1 (which inverts bit 0). The reason to use
46 // a sequence number instead of a boolean value here is that when the key slot is deleted and
47 // reused for a new key, pthread_getspecific will not return stale data.
48 // key_destructor records the destructor called at thread exit.
49 struct pthread_key_internal_t {
50 atomic_uintptr_t seq;
51 atomic_uintptr_t key_destructor;
52 };
53
54 static pthread_key_internal_t key_map[BIONIC_PTHREAD_KEY_COUNT];
55
SeqOfKeyInUse(uintptr_t seq)56 static inline bool SeqOfKeyInUse(uintptr_t seq) {
57 return seq & (1 << SEQ_KEY_IN_USE_BIT);
58 }
59
60 #define KEY_VALID_FLAG (1 << 31)
61
62 static_assert(sizeof(pthread_key_t) == sizeof(int) && static_cast<pthread_key_t>(-1) < 0,
63 "pthread_key_t should be typedef to int");
64
KeyInValidRange(pthread_key_t key)65 static inline bool KeyInValidRange(pthread_key_t key) {
66 // key < 0 means bit 31 is set.
67 // Then key < (2^31 | BIONIC_PTHREAD_KEY_COUNT) means the index part of key < BIONIC_PTHREAD_KEY_COUNT.
68 return (key < (KEY_VALID_FLAG | BIONIC_PTHREAD_KEY_COUNT));
69 }
70
71 // Called from pthread_exit() to remove all pthread keys. This must call the destructor of
72 // all keys that have a non-NULL data value and a non-NULL destructor.
pthread_key_clean_all()73 __LIBC_HIDDEN__ void pthread_key_clean_all() {
74 // Because destructors can do funky things like deleting/creating other keys,
75 // we need to implement this in a loop.
76 pthread_key_data_t* key_data = __get_thread()->key_data;
77 for (size_t rounds = PTHREAD_DESTRUCTOR_ITERATIONS; rounds > 0; --rounds) {
78 size_t called_destructor_count = 0;
79 for (size_t i = 0; i < BIONIC_PTHREAD_KEY_COUNT; ++i) {
80 uintptr_t seq = atomic_load_explicit(&key_map[i].seq, memory_order_relaxed);
81 if (SeqOfKeyInUse(seq) && seq == key_data[i].seq && key_data[i].data != NULL) {
82 // Other threads may be calling pthread_key_delete/pthread_key_create while current thread
83 // is exiting. So we need to ensure we read the right key_destructor.
84 // We can rely on a user-established happens-before relationship between the creation and
85 // use of pthread key to ensure that we're not getting an earlier key_destructor.
86 // To avoid using the key_destructor of the newly created key in the same slot, we need to
87 // recheck the sequence number after reading key_destructor. As a result, we either see the
88 // right key_destructor, or the sequence number must have changed when we reread it below.
89 key_destructor_t key_destructor = reinterpret_cast<key_destructor_t>(
90 atomic_load_explicit(&key_map[i].key_destructor, memory_order_relaxed));
91 if (key_destructor == NULL) {
92 continue;
93 }
94 atomic_thread_fence(memory_order_acquire);
95 if (atomic_load_explicit(&key_map[i].seq, memory_order_relaxed) != seq) {
96 continue;
97 }
98
99 // We need to clear the key data now, this will prevent the destructor (or a later one)
100 // from seeing the old value if it calls pthread_getspecific().
101 // We don't do this if 'key_destructor == NULL' just in case another destructor
102 // function is responsible for manually releasing the corresponding data.
103 void* data = key_data[i].data;
104 key_data[i].data = NULL;
105
106 (*key_destructor)(data);
107 ++called_destructor_count;
108 }
109 }
110
111 // If we didn't call any destructors, there is no need to check the pthread keys again.
112 if (called_destructor_count == 0) {
113 break;
114 }
115 }
116 }
117
pthread_key_create(pthread_key_t * key,void (* key_destructor)(void *))118 int pthread_key_create(pthread_key_t* key, void (*key_destructor)(void*)) {
119 for (size_t i = 0; i < BIONIC_PTHREAD_KEY_COUNT; ++i) {
120 uintptr_t seq = atomic_load_explicit(&key_map[i].seq, memory_order_relaxed);
121 while (!SeqOfKeyInUse(seq)) {
122 if (atomic_compare_exchange_weak(&key_map[i].seq, &seq, seq + SEQ_INCREMENT_STEP)) {
123 atomic_store(&key_map[i].key_destructor, reinterpret_cast<uintptr_t>(key_destructor));
124 *key = i | KEY_VALID_FLAG;
125 return 0;
126 }
127 }
128 }
129 return EAGAIN;
130 }
131
132 // Deletes a pthread_key_t. note that the standard mandates that this does
133 // not call the destructors for non-NULL key values. Instead, it is the
134 // responsibility of the caller to properly dispose of the corresponding data
135 // and resources, using any means it finds suitable.
pthread_key_delete(pthread_key_t key)136 int pthread_key_delete(pthread_key_t key) {
137 if (__predict_false(!KeyInValidRange(key))) {
138 return EINVAL;
139 }
140 key &= ~KEY_VALID_FLAG;
141 // Increase seq to invalidate values in all threads.
142 uintptr_t seq = atomic_load_explicit(&key_map[key].seq, memory_order_relaxed);
143 if (SeqOfKeyInUse(seq)) {
144 if (atomic_compare_exchange_strong(&key_map[key].seq, &seq, seq + SEQ_INCREMENT_STEP)) {
145 return 0;
146 }
147 }
148 return EINVAL;
149 }
150
pthread_getspecific(pthread_key_t key)151 void* pthread_getspecific(pthread_key_t key) {
152 if (__predict_false(!KeyInValidRange(key))) {
153 return NULL;
154 }
155 key &= ~KEY_VALID_FLAG;
156 uintptr_t seq = atomic_load_explicit(&key_map[key].seq, memory_order_relaxed);
157 pthread_key_data_t* data = &(__get_thread()->key_data[key]);
158 // It is user's responsibility to synchornize between the creation and use of pthread keys,
159 // so we use memory_order_relaxed when checking the sequence number.
160 if (__predict_true(SeqOfKeyInUse(seq) && data->seq == seq)) {
161 return data->data;
162 }
163 // We arrive here when current thread holds the seq of an deleted pthread key. So the
164 // data is for the deleted pthread key, and should be cleared.
165 data->data = NULL;
166 return NULL;
167 }
168
pthread_setspecific(pthread_key_t key,const void * ptr)169 int pthread_setspecific(pthread_key_t key, const void* ptr) {
170 if (__predict_false(!KeyInValidRange(key))) {
171 return EINVAL;
172 }
173 key &= ~KEY_VALID_FLAG;
174 uintptr_t seq = atomic_load_explicit(&key_map[key].seq, memory_order_relaxed);
175 if (__predict_true(SeqOfKeyInUse(seq))) {
176 pthread_key_data_t* data = &(__get_thread()->key_data[key]);
177 data->seq = seq;
178 data->data = const_cast<void*>(ptr);
179 return 0;
180 }
181 return EINVAL;
182 }
183