1 #define JEMALLOC_MUTEX_C_
2 #include "jemalloc/internal/jemalloc_preamble.h"
3 #include "jemalloc/internal/jemalloc_internal_includes.h"
4
5 #include "jemalloc/internal/assert.h"
6 #include "jemalloc/internal/malloc_io.h"
7 #include "jemalloc/internal/spin.h"
8
9 #ifndef _CRT_SPINCOUNT
10 #define _CRT_SPINCOUNT 4000
11 #endif
12
13 /******************************************************************************/
14 /* Data. */
15
16 #ifdef JEMALLOC_LAZY_LOCK
17 bool isthreaded = false;
18 #endif
19 #ifdef JEMALLOC_MUTEX_INIT_CB
20 static bool postpone_init = true;
21 static malloc_mutex_t *postponed_mutexes = NULL;
22 #endif
23
24 /******************************************************************************/
25 /*
26 * We intercept pthread_create() calls in order to toggle isthreaded if the
27 * process goes multi-threaded.
28 */
29
30 #if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
31 JEMALLOC_EXPORT int
pthread_create(pthread_t * __restrict thread,const pthread_attr_t * __restrict attr,void * (* start_routine)(void *),void * __restrict arg)32 pthread_create(pthread_t *__restrict thread,
33 const pthread_attr_t *__restrict attr, void *(*start_routine)(void *),
34 void *__restrict arg) {
35 return pthread_create_wrapper(thread, attr, start_routine, arg);
36 }
37 #endif
38
39 /******************************************************************************/
40
41 #ifdef JEMALLOC_MUTEX_INIT_CB
42 JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
43 void *(calloc_cb)(size_t, size_t));
44 #endif
45
46 void
malloc_mutex_lock_slow(malloc_mutex_t * mutex)47 malloc_mutex_lock_slow(malloc_mutex_t *mutex) {
48 mutex_prof_data_t *data = &mutex->prof_data;
49 UNUSED nstime_t before = NSTIME_ZERO_INITIALIZER;
50
51 if (ncpus == 1) {
52 goto label_spin_done;
53 }
54
55 int cnt = 0, max_cnt = MALLOC_MUTEX_MAX_SPIN;
56 do {
57 spin_cpu_spinwait();
58 if (!malloc_mutex_trylock_final(mutex)) {
59 data->n_spin_acquired++;
60 return;
61 }
62 } while (cnt++ < max_cnt);
63
64 if (!config_stats) {
65 /* Only spin is useful when stats is off. */
66 malloc_mutex_lock_final(mutex);
67 return;
68 }
69 label_spin_done:
70 nstime_update(&before);
71 /* Copy before to after to avoid clock skews. */
72 nstime_t after;
73 nstime_copy(&after, &before);
74 uint32_t n_thds = atomic_fetch_add_u32(&data->n_waiting_thds, 1,
75 ATOMIC_RELAXED) + 1;
76 /* One last try as above two calls may take quite some cycles. */
77 if (!malloc_mutex_trylock_final(mutex)) {
78 atomic_fetch_sub_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED);
79 data->n_spin_acquired++;
80 return;
81 }
82
83 /* True slow path. */
84 malloc_mutex_lock_final(mutex);
85 /* Update more slow-path only counters. */
86 atomic_fetch_sub_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED);
87 nstime_update(&after);
88
89 nstime_t delta;
90 nstime_copy(&delta, &after);
91 nstime_subtract(&delta, &before);
92
93 data->n_wait_times++;
94 nstime_add(&data->tot_wait_time, &delta);
95 if (nstime_compare(&data->max_wait_time, &delta) < 0) {
96 nstime_copy(&data->max_wait_time, &delta);
97 }
98 if (n_thds > data->max_n_thds) {
99 data->max_n_thds = n_thds;
100 }
101 }
102
103 static void
mutex_prof_data_init(mutex_prof_data_t * data)104 mutex_prof_data_init(mutex_prof_data_t *data) {
105 memset(data, 0, sizeof(mutex_prof_data_t));
106 nstime_init(&data->max_wait_time, 0);
107 nstime_init(&data->tot_wait_time, 0);
108 data->prev_owner = NULL;
109 }
110
111 void
malloc_mutex_prof_data_reset(tsdn_t * tsdn,malloc_mutex_t * mutex)112 malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex) {
113 malloc_mutex_assert_owner(tsdn, mutex);
114 mutex_prof_data_init(&mutex->prof_data);
115 }
116
117 static int
mutex_addr_comp(const witness_t * witness1,void * mutex1,const witness_t * witness2,void * mutex2)118 mutex_addr_comp(const witness_t *witness1, void *mutex1,
119 const witness_t *witness2, void *mutex2) {
120 assert(mutex1 != NULL);
121 assert(mutex2 != NULL);
122 uintptr_t mu1int = (uintptr_t)mutex1;
123 uintptr_t mu2int = (uintptr_t)mutex2;
124 if (mu1int < mu2int) {
125 return -1;
126 } else if (mu1int == mu2int) {
127 return 0;
128 } else {
129 return 1;
130 }
131 }
132
133 bool
malloc_mutex_init(malloc_mutex_t * mutex,const char * name,witness_rank_t rank,malloc_mutex_lock_order_t lock_order)134 malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
135 witness_rank_t rank, malloc_mutex_lock_order_t lock_order) {
136 mutex_prof_data_init(&mutex->prof_data);
137 #ifdef _WIN32
138 # if _WIN32_WINNT >= 0x0600
139 InitializeSRWLock(&mutex->lock);
140 # else
141 if (!InitializeCriticalSectionAndSpinCount(&mutex->lock,
142 _CRT_SPINCOUNT)) {
143 return true;
144 }
145 # endif
146 #elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
147 mutex->lock = OS_UNFAIR_LOCK_INIT;
148 #elif (defined(JEMALLOC_OSSPIN))
149 mutex->lock = 0;
150 #elif (defined(JEMALLOC_MUTEX_INIT_CB))
151 if (postpone_init) {
152 mutex->postponed_next = postponed_mutexes;
153 postponed_mutexes = mutex;
154 } else {
155 if (_pthread_mutex_init_calloc_cb(&mutex->lock,
156 bootstrap_calloc) != 0) {
157 return true;
158 }
159 }
160 #else
161 pthread_mutexattr_t attr;
162
163 if (pthread_mutexattr_init(&attr) != 0) {
164 return true;
165 }
166 pthread_mutexattr_settype(&attr, MALLOC_MUTEX_TYPE);
167 if (pthread_mutex_init(&mutex->lock, &attr) != 0) {
168 pthread_mutexattr_destroy(&attr);
169 return true;
170 }
171 pthread_mutexattr_destroy(&attr);
172 #endif
173 if (config_debug) {
174 mutex->lock_order = lock_order;
175 if (lock_order == malloc_mutex_address_ordered) {
176 witness_init(&mutex->witness, name, rank,
177 mutex_addr_comp, mutex);
178 } else {
179 witness_init(&mutex->witness, name, rank, NULL, NULL);
180 }
181 }
182 return false;
183 }
184
185 void
malloc_mutex_prefork(tsdn_t * tsdn,malloc_mutex_t * mutex)186 malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex) {
187 malloc_mutex_lock(tsdn, mutex);
188 }
189
190 void
malloc_mutex_postfork_parent(tsdn_t * tsdn,malloc_mutex_t * mutex)191 malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex) {
192 malloc_mutex_unlock(tsdn, mutex);
193 }
194
195 void
malloc_mutex_postfork_child(tsdn_t * tsdn,malloc_mutex_t * mutex)196 malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex) {
197 #ifdef JEMALLOC_MUTEX_INIT_CB
198 malloc_mutex_unlock(tsdn, mutex);
199 #else
200 if (malloc_mutex_init(mutex, mutex->witness.name,
201 mutex->witness.rank, mutex->lock_order)) {
202 malloc_printf("<jemalloc>: Error re-initializing mutex in "
203 "child\n");
204 if (opt_abort) {
205 abort();
206 }
207 }
208 #endif
209 }
210
211 bool
malloc_mutex_boot(void)212 malloc_mutex_boot(void) {
213 #ifdef JEMALLOC_MUTEX_INIT_CB
214 postpone_init = false;
215 while (postponed_mutexes != NULL) {
216 if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock,
217 bootstrap_calloc) != 0) {
218 return true;
219 }
220 postponed_mutexes = postponed_mutexes->postponed_next;
221 }
222 #endif
223 return false;
224 }
225