1 /*
2 * Copyright (C) 2008 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <errno.h>
30 #include <pthread.h>
31 #include <stdatomic.h>
32
33 #include "private/bionic_defs.h"
34 #include "private/bionic_tls.h"
35 #include "pthread_internal.h"
36
37 typedef void (*key_destructor_t)(void*);
38
39 #define SEQ_KEY_IN_USE_BIT 0
40
41 #define SEQ_INCREMENT_STEP (1 << SEQ_KEY_IN_USE_BIT)
42
43 // pthread_key_internal_t records the use of each pthread key slot:
44 // seq records the state of the slot.
45 // bit 0 is 1 when the key is in use, 0 when it is unused. Each time we create or delete the
46 // pthread key in the slot, we increse the seq by 1 (which inverts bit 0). The reason to use
47 // a sequence number instead of a boolean value here is that when the key slot is deleted and
48 // reused for a new key, pthread_getspecific will not return stale data.
49 // key_destructor records the destructor called at thread exit.
50 struct pthread_key_internal_t {
51 atomic_uintptr_t seq;
52 atomic_uintptr_t key_destructor;
53 };
54
55 static pthread_key_internal_t key_map[BIONIC_PTHREAD_KEY_COUNT];
56
SeqOfKeyInUse(uintptr_t seq)57 static inline bool SeqOfKeyInUse(uintptr_t seq) {
58 return seq & (1 << SEQ_KEY_IN_USE_BIT);
59 }
60
61 #define KEY_VALID_FLAG (1 << 31)
62
63 static_assert(sizeof(pthread_key_t) == sizeof(int) && static_cast<pthread_key_t>(-1) < 0,
64 "pthread_key_t should be typedef to int");
65
KeyInValidRange(pthread_key_t key)66 static inline bool KeyInValidRange(pthread_key_t key) {
67 // key < 0 means bit 31 is set.
68 // Then key < (2^31 | BIONIC_PTHREAD_KEY_COUNT) means the index part of key < BIONIC_PTHREAD_KEY_COUNT.
69 return (key < (KEY_VALID_FLAG | BIONIC_PTHREAD_KEY_COUNT));
70 }
71
get_thread_key_data()72 static inline pthread_key_data_t* get_thread_key_data() {
73 return __get_bionic_tls().key_data;
74 }
75
76 // Called from pthread_exit() to remove all pthread keys. This must call the destructor of
77 // all keys that have a non-NULL data value and a non-NULL destructor.
pthread_key_clean_all()78 __LIBC_HIDDEN__ void pthread_key_clean_all() {
79 // Because destructors can do funky things like deleting/creating other keys,
80 // we need to implement this in a loop.
81 pthread_key_data_t* key_data = get_thread_key_data();
82 for (size_t rounds = PTHREAD_DESTRUCTOR_ITERATIONS; rounds > 0; --rounds) {
83 size_t called_destructor_count = 0;
84 for (size_t i = 0; i < BIONIC_PTHREAD_KEY_COUNT; ++i) {
85 uintptr_t seq = atomic_load_explicit(&key_map[i].seq, memory_order_relaxed);
86 if (SeqOfKeyInUse(seq) && seq == key_data[i].seq && key_data[i].data != nullptr) {
87 // Other threads may be calling pthread_key_delete/pthread_key_create while current thread
88 // is exiting. So we need to ensure we read the right key_destructor.
89 // We can rely on a user-established happens-before relationship between the creation and
90 // use of pthread key to ensure that we're not getting an earlier key_destructor.
91 // To avoid using the key_destructor of the newly created key in the same slot, we need to
92 // recheck the sequence number after reading key_destructor. As a result, we either see the
93 // right key_destructor, or the sequence number must have changed when we reread it below.
94 key_destructor_t key_destructor = reinterpret_cast<key_destructor_t>(
95 atomic_load_explicit(&key_map[i].key_destructor, memory_order_relaxed));
96 if (key_destructor == nullptr) {
97 continue;
98 }
99 atomic_thread_fence(memory_order_acquire);
100 if (atomic_load_explicit(&key_map[i].seq, memory_order_relaxed) != seq) {
101 continue;
102 }
103
104 // We need to clear the key data now, this will prevent the destructor (or a later one)
105 // from seeing the old value if it calls pthread_getspecific().
106 // We don't do this if 'key_destructor == NULL' just in case another destructor
107 // function is responsible for manually releasing the corresponding data.
108 void* data = key_data[i].data;
109 key_data[i].data = nullptr;
110
111 (*key_destructor)(data);
112 ++called_destructor_count;
113 }
114 }
115
116 // If we didn't call any destructors, there is no need to check the pthread keys again.
117 if (called_destructor_count == 0) {
118 break;
119 }
120 }
121 }
122
123 __BIONIC_WEAK_FOR_NATIVE_BRIDGE
pthread_key_create(pthread_key_t * key,void (* key_destructor)(void *))124 int pthread_key_create(pthread_key_t* key, void (*key_destructor)(void*)) {
125 for (size_t i = 0; i < BIONIC_PTHREAD_KEY_COUNT; ++i) {
126 uintptr_t seq = atomic_load_explicit(&key_map[i].seq, memory_order_relaxed);
127 while (!SeqOfKeyInUse(seq)) {
128 if (atomic_compare_exchange_weak(&key_map[i].seq, &seq, seq + SEQ_INCREMENT_STEP)) {
129 atomic_store(&key_map[i].key_destructor, reinterpret_cast<uintptr_t>(key_destructor));
130 *key = i | KEY_VALID_FLAG;
131 return 0;
132 }
133 }
134 }
135 return EAGAIN;
136 }
137
138 // Deletes a pthread_key_t. note that the standard mandates that this does
139 // not call the destructors for non-NULL key values. Instead, it is the
140 // responsibility of the caller to properly dispose of the corresponding data
141 // and resources, using any means it finds suitable.
142 __BIONIC_WEAK_FOR_NATIVE_BRIDGE
pthread_key_delete(pthread_key_t key)143 int pthread_key_delete(pthread_key_t key) {
144 if (__predict_false(!KeyInValidRange(key))) {
145 return EINVAL;
146 }
147 key &= ~KEY_VALID_FLAG;
148 // Increase seq to invalidate values in all threads.
149 uintptr_t seq = atomic_load_explicit(&key_map[key].seq, memory_order_relaxed);
150 if (SeqOfKeyInUse(seq)) {
151 if (atomic_compare_exchange_strong(&key_map[key].seq, &seq, seq + SEQ_INCREMENT_STEP)) {
152 return 0;
153 }
154 }
155 return EINVAL;
156 }
157
158 __BIONIC_WEAK_FOR_NATIVE_BRIDGE
pthread_getspecific(pthread_key_t key)159 void* pthread_getspecific(pthread_key_t key) {
160 if (__predict_false(!KeyInValidRange(key))) {
161 return nullptr;
162 }
163 key &= ~KEY_VALID_FLAG;
164 uintptr_t seq = atomic_load_explicit(&key_map[key].seq, memory_order_relaxed);
165 pthread_key_data_t* data = &get_thread_key_data()[key];
166 // It is user's responsibility to synchornize between the creation and use of pthread keys,
167 // so we use memory_order_relaxed when checking the sequence number.
168 if (__predict_true(SeqOfKeyInUse(seq) && data->seq == seq)) {
169 return data->data;
170 }
171 // We arrive here when current thread holds the seq of an deleted pthread key. So the
172 // data is for the deleted pthread key, and should be cleared.
173 data->data = nullptr;
174 return nullptr;
175 }
176
177 __BIONIC_WEAK_FOR_NATIVE_BRIDGE
pthread_setspecific(pthread_key_t key,const void * ptr)178 int pthread_setspecific(pthread_key_t key, const void* ptr) {
179 if (__predict_false(!KeyInValidRange(key))) {
180 return EINVAL;
181 }
182 key &= ~KEY_VALID_FLAG;
183 uintptr_t seq = atomic_load_explicit(&key_map[key].seq, memory_order_relaxed);
184 if (__predict_true(SeqOfKeyInUse(seq))) {
185 pthread_key_data_t* data = &get_thread_key_data()[key];
186 data->seq = seq;
187 data->data = const_cast<void*>(ptr);
188 return 0;
189 }
190 return EINVAL;
191 }
192