• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2008-2012 Niels Provos, Nick Mathewson
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  * 3. The name of the author may not be used to endorse or promote products
13  *    derived from this software without specific prior written permission.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include "event2/event-config.h"
28 
29 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
30 
31 #include "event2/thread.h"
32 
33 #include <stdlib.h>
34 #include <string.h>
35 
36 #include "log-internal.h"
37 #include "mm-internal.h"
38 #include "util-internal.h"
39 #include "evthread-internal.h"
40 
41 #ifdef EVTHREAD_EXPOSE_STRUCTS
42 #define GLOBAL
43 #else
44 #define GLOBAL static
45 #endif
46 
47 /* globals */
48 GLOBAL int _evthread_lock_debugging_enabled = 0;
49 GLOBAL struct evthread_lock_callbacks _evthread_lock_fns = {
50 	0, 0, NULL, NULL, NULL, NULL
51 };
52 GLOBAL unsigned long (*_evthread_id_fn)(void) = NULL;
53 GLOBAL struct evthread_condition_callbacks _evthread_cond_fns = {
54 	0, NULL, NULL, NULL, NULL
55 };
56 
57 /* Used for debugging */
58 static struct evthread_lock_callbacks _original_lock_fns = {
59 	0, 0, NULL, NULL, NULL, NULL
60 };
61 static struct evthread_condition_callbacks _original_cond_fns = {
62 	0, NULL, NULL, NULL, NULL
63 };
64 
65 void
evthread_set_id_callback(unsigned long (* id_fn)(void))66 evthread_set_id_callback(unsigned long (*id_fn)(void))
67 {
68 	_evthread_id_fn = id_fn;
69 }
70 
71 int
evthread_set_lock_callbacks(const struct evthread_lock_callbacks * cbs)72 evthread_set_lock_callbacks(const struct evthread_lock_callbacks *cbs)
73 {
74 	struct evthread_lock_callbacks *target =
75 	    _evthread_lock_debugging_enabled
76 	    ? &_original_lock_fns : &_evthread_lock_fns;
77 
78 	if (!cbs) {
79 		if (target->alloc)
80 			event_warnx("Trying to disable lock functions after "
81 			    "they have been set up will probaby not work.");
82 		memset(target, 0, sizeof(_evthread_lock_fns));
83 		return 0;
84 	}
85 	if (target->alloc) {
86 		/* Uh oh; we already had locking callbacks set up.*/
87 		if (target->lock_api_version == cbs->lock_api_version &&
88 			target->supported_locktypes == cbs->supported_locktypes &&
89 			target->alloc == cbs->alloc &&
90 			target->free == cbs->free &&
91 			target->lock == cbs->lock &&
92 			target->unlock == cbs->unlock) {
93 			/* no change -- allow this. */
94 			return 0;
95 		}
96 		event_warnx("Can't change lock callbacks once they have been "
97 		    "initialized.");
98 		return -1;
99 	}
100 	if (cbs->alloc && cbs->free && cbs->lock && cbs->unlock) {
101 		memcpy(target, cbs, sizeof(_evthread_lock_fns));
102 		return event_global_setup_locks_(1);
103 	} else {
104 		return -1;
105 	}
106 }
107 
108 int
evthread_set_condition_callbacks(const struct evthread_condition_callbacks * cbs)109 evthread_set_condition_callbacks(const struct evthread_condition_callbacks *cbs)
110 {
111 	struct evthread_condition_callbacks *target =
112 	    _evthread_lock_debugging_enabled
113 	    ? &_original_cond_fns : &_evthread_cond_fns;
114 
115 	if (!cbs) {
116 		if (target->alloc_condition)
117 			event_warnx("Trying to disable condition functions "
118 			    "after they have been set up will probaby not "
119 			    "work.");
120 		memset(target, 0, sizeof(_evthread_cond_fns));
121 		return 0;
122 	}
123 	if (target->alloc_condition) {
124 		/* Uh oh; we already had condition callbacks set up.*/
125 		if (target->condition_api_version == cbs->condition_api_version &&
126 			target->alloc_condition == cbs->alloc_condition &&
127 			target->free_condition == cbs->free_condition &&
128 			target->signal_condition == cbs->signal_condition &&
129 			target->wait_condition == cbs->wait_condition) {
130 			/* no change -- allow this. */
131 			return 0;
132 		}
133 		event_warnx("Can't change condition callbacks once they "
134 		    "have been initialized.");
135 		return -1;
136 	}
137 	if (cbs->alloc_condition && cbs->free_condition &&
138 	    cbs->signal_condition && cbs->wait_condition) {
139 		memcpy(target, cbs, sizeof(_evthread_cond_fns));
140 	}
141 	if (_evthread_lock_debugging_enabled) {
142 		_evthread_cond_fns.alloc_condition = cbs->alloc_condition;
143 		_evthread_cond_fns.free_condition = cbs->free_condition;
144 		_evthread_cond_fns.signal_condition = cbs->signal_condition;
145 	}
146 	return 0;
147 }
148 
149 struct debug_lock {
150 	unsigned locktype;
151 	unsigned long held_by;
152 	/* XXXX if we ever use read-write locks, we will need a separate
153 	 * lock to protect count. */
154 	int count;
155 	void *lock;
156 };
157 
158 static void *
debug_lock_alloc(unsigned locktype)159 debug_lock_alloc(unsigned locktype)
160 {
161 	struct debug_lock *result = mm_malloc(sizeof(struct debug_lock));
162 	if (!result)
163 		return NULL;
164 	if (_original_lock_fns.alloc) {
165 		if (!(result->lock = _original_lock_fns.alloc(
166 				locktype|EVTHREAD_LOCKTYPE_RECURSIVE))) {
167 			mm_free(result);
168 			return NULL;
169 		}
170 	} else {
171 		result->lock = NULL;
172 	}
173 	result->locktype = locktype;
174 	result->count = 0;
175 	result->held_by = 0;
176 	return result;
177 }
178 
179 static void
debug_lock_free(void * lock_,unsigned locktype)180 debug_lock_free(void *lock_, unsigned locktype)
181 {
182 	struct debug_lock *lock = lock_;
183 	EVUTIL_ASSERT(lock->count == 0);
184 	EVUTIL_ASSERT(locktype == lock->locktype);
185 	if (_original_lock_fns.free) {
186 		_original_lock_fns.free(lock->lock,
187 		    lock->locktype|EVTHREAD_LOCKTYPE_RECURSIVE);
188 	}
189 	lock->lock = NULL;
190 	lock->count = -100;
191 	mm_free(lock);
192 }
193 
194 static void
evthread_debug_lock_mark_locked(unsigned mode,struct debug_lock * lock)195 evthread_debug_lock_mark_locked(unsigned mode, struct debug_lock *lock)
196 {
197 	++lock->count;
198 	if (!(lock->locktype & EVTHREAD_LOCKTYPE_RECURSIVE))
199 		EVUTIL_ASSERT(lock->count == 1);
200 	if (_evthread_id_fn) {
201 		unsigned long me;
202 		me = _evthread_id_fn();
203 		if (lock->count > 1)
204 			EVUTIL_ASSERT(lock->held_by == me);
205 		lock->held_by = me;
206 	}
207 }
208 
209 static int
debug_lock_lock(unsigned mode,void * lock_)210 debug_lock_lock(unsigned mode, void *lock_)
211 {
212 	struct debug_lock *lock = lock_;
213 	int res = 0;
214 	if (lock->locktype & EVTHREAD_LOCKTYPE_READWRITE)
215 		EVUTIL_ASSERT(mode & (EVTHREAD_READ|EVTHREAD_WRITE));
216 	else
217 		EVUTIL_ASSERT((mode & (EVTHREAD_READ|EVTHREAD_WRITE)) == 0);
218 	if (_original_lock_fns.lock)
219 		res = _original_lock_fns.lock(mode, lock->lock);
220 	if (!res) {
221 		evthread_debug_lock_mark_locked(mode, lock);
222 	}
223 	return res;
224 }
225 
226 static void
evthread_debug_lock_mark_unlocked(unsigned mode,struct debug_lock * lock)227 evthread_debug_lock_mark_unlocked(unsigned mode, struct debug_lock *lock)
228 {
229 	if (lock->locktype & EVTHREAD_LOCKTYPE_READWRITE)
230 		EVUTIL_ASSERT(mode & (EVTHREAD_READ|EVTHREAD_WRITE));
231 	else
232 		EVUTIL_ASSERT((mode & (EVTHREAD_READ|EVTHREAD_WRITE)) == 0);
233 	if (_evthread_id_fn) {
234 		EVUTIL_ASSERT(lock->held_by == _evthread_id_fn());
235 		if (lock->count == 1)
236 			lock->held_by = 0;
237 	}
238 	--lock->count;
239 	EVUTIL_ASSERT(lock->count >= 0);
240 }
241 
242 static int
debug_lock_unlock(unsigned mode,void * lock_)243 debug_lock_unlock(unsigned mode, void *lock_)
244 {
245 	struct debug_lock *lock = lock_;
246 	int res = 0;
247 	evthread_debug_lock_mark_unlocked(mode, lock);
248 	if (_original_lock_fns.unlock)
249 		res = _original_lock_fns.unlock(mode, lock->lock);
250 	return res;
251 }
252 
253 static int
debug_cond_wait(void * _cond,void * _lock,const struct timeval * tv)254 debug_cond_wait(void *_cond, void *_lock, const struct timeval *tv)
255 {
256 	int r;
257 	struct debug_lock *lock = _lock;
258 	EVUTIL_ASSERT(lock);
259 	EVLOCK_ASSERT_LOCKED(_lock);
260 	evthread_debug_lock_mark_unlocked(0, lock);
261 	r = _original_cond_fns.wait_condition(_cond, lock->lock, tv);
262 	evthread_debug_lock_mark_locked(0, lock);
263 	return r;
264 }
265 
266 void
evthread_enable_lock_debuging(void)267 evthread_enable_lock_debuging(void)
268 {
269 	struct evthread_lock_callbacks cbs = {
270 		EVTHREAD_LOCK_API_VERSION,
271 		EVTHREAD_LOCKTYPE_RECURSIVE,
272 		debug_lock_alloc,
273 		debug_lock_free,
274 		debug_lock_lock,
275 		debug_lock_unlock
276 	};
277 	if (_evthread_lock_debugging_enabled)
278 		return;
279 	memcpy(&_original_lock_fns, &_evthread_lock_fns,
280 	    sizeof(struct evthread_lock_callbacks));
281 	memcpy(&_evthread_lock_fns, &cbs,
282 	    sizeof(struct evthread_lock_callbacks));
283 
284 	memcpy(&_original_cond_fns, &_evthread_cond_fns,
285 	    sizeof(struct evthread_condition_callbacks));
286 	_evthread_cond_fns.wait_condition = debug_cond_wait;
287 	_evthread_lock_debugging_enabled = 1;
288 
289 	/* XXX return value should get checked. */
290 	event_global_setup_locks_(0);
291 }
292 
293 int
_evthread_is_debug_lock_held(void * lock_)294 _evthread_is_debug_lock_held(void *lock_)
295 {
296 	struct debug_lock *lock = lock_;
297 	if (! lock->count)
298 		return 0;
299 	if (_evthread_id_fn) {
300 		unsigned long me = _evthread_id_fn();
301 		if (lock->held_by != me)
302 			return 0;
303 	}
304 	return 1;
305 }
306 
307 void *
_evthread_debug_get_real_lock(void * lock_)308 _evthread_debug_get_real_lock(void *lock_)
309 {
310 	struct debug_lock *lock = lock_;
311 	return lock->lock;
312 }
313 
314 void *
evthread_setup_global_lock_(void * lock_,unsigned locktype,int enable_locks)315 evthread_setup_global_lock_(void *lock_, unsigned locktype, int enable_locks)
316 {
317 	/* there are four cases here:
318 	   1) we're turning on debugging; locking is not on.
319 	   2) we're turning on debugging; locking is on.
320 	   3) we're turning on locking; debugging is not on.
321 	   4) we're turning on locking; debugging is on. */
322 
323 	if (!enable_locks && _original_lock_fns.alloc == NULL) {
324 		/* Case 1: allocate a debug lock. */
325 		EVUTIL_ASSERT(lock_ == NULL);
326 		return debug_lock_alloc(locktype);
327 	} else if (!enable_locks && _original_lock_fns.alloc != NULL) {
328 		/* Case 2: wrap the lock in a debug lock. */
329 		struct debug_lock *lock;
330 		EVUTIL_ASSERT(lock_ != NULL);
331 
332 		if (!(locktype & EVTHREAD_LOCKTYPE_RECURSIVE)) {
333 			/* We can't wrap it: We need a recursive lock */
334 			_original_lock_fns.free(lock_, locktype);
335 			return debug_lock_alloc(locktype);
336 		}
337 		lock = mm_malloc(sizeof(struct debug_lock));
338 		if (!lock) {
339 			_original_lock_fns.free(lock_, locktype);
340 			return NULL;
341 		}
342 		lock->lock = lock_;
343 		lock->locktype = locktype;
344 		lock->count = 0;
345 		lock->held_by = 0;
346 		return lock;
347 	} else if (enable_locks && ! _evthread_lock_debugging_enabled) {
348 		/* Case 3: allocate a regular lock */
349 		EVUTIL_ASSERT(lock_ == NULL);
350 		return _evthread_lock_fns.alloc(locktype);
351 	} else {
352 		/* Case 4: Fill in a debug lock with a real lock */
353 		struct debug_lock *lock = lock_;
354 		EVUTIL_ASSERT(enable_locks &&
355 		              _evthread_lock_debugging_enabled);
356 		EVUTIL_ASSERT(lock->locktype == locktype);
357 		EVUTIL_ASSERT(lock->lock == NULL);
358 		lock->lock = _original_lock_fns.alloc(
359 			locktype|EVTHREAD_LOCKTYPE_RECURSIVE);
360 		if (!lock->lock) {
361 			lock->count = -200;
362 			mm_free(lock);
363 			return NULL;
364 		}
365 		return lock;
366 	}
367 }
368 
369 
370 #ifndef EVTHREAD_EXPOSE_STRUCTS
371 unsigned long
_evthreadimpl_get_id()372 _evthreadimpl_get_id()
373 {
374 	return _evthread_id_fn ? _evthread_id_fn() : 1;
375 }
376 void *
_evthreadimpl_lock_alloc(unsigned locktype)377 _evthreadimpl_lock_alloc(unsigned locktype)
378 {
379 	return _evthread_lock_fns.alloc ?
380 	    _evthread_lock_fns.alloc(locktype) : NULL;
381 }
382 void
_evthreadimpl_lock_free(void * lock,unsigned locktype)383 _evthreadimpl_lock_free(void *lock, unsigned locktype)
384 {
385 	if (_evthread_lock_fns.free)
386 		_evthread_lock_fns.free(lock, locktype);
387 }
388 int
_evthreadimpl_lock_lock(unsigned mode,void * lock)389 _evthreadimpl_lock_lock(unsigned mode, void *lock)
390 {
391 	if (_evthread_lock_fns.lock)
392 		return _evthread_lock_fns.lock(mode, lock);
393 	else
394 		return 0;
395 }
396 int
_evthreadimpl_lock_unlock(unsigned mode,void * lock)397 _evthreadimpl_lock_unlock(unsigned mode, void *lock)
398 {
399 	if (_evthread_lock_fns.unlock)
400 		return _evthread_lock_fns.unlock(mode, lock);
401 	else
402 		return 0;
403 }
404 void *
_evthreadimpl_cond_alloc(unsigned condtype)405 _evthreadimpl_cond_alloc(unsigned condtype)
406 {
407 	return _evthread_cond_fns.alloc_condition ?
408 	    _evthread_cond_fns.alloc_condition(condtype) : NULL;
409 }
410 void
_evthreadimpl_cond_free(void * cond)411 _evthreadimpl_cond_free(void *cond)
412 {
413 	if (_evthread_cond_fns.free_condition)
414 		_evthread_cond_fns.free_condition(cond);
415 }
416 int
_evthreadimpl_cond_signal(void * cond,int broadcast)417 _evthreadimpl_cond_signal(void *cond, int broadcast)
418 {
419 	if (_evthread_cond_fns.signal_condition)
420 		return _evthread_cond_fns.signal_condition(cond, broadcast);
421 	else
422 		return 0;
423 }
424 int
_evthreadimpl_cond_wait(void * cond,void * lock,const struct timeval * tv)425 _evthreadimpl_cond_wait(void *cond, void *lock, const struct timeval *tv)
426 {
427 	if (_evthread_cond_fns.wait_condition)
428 		return _evthread_cond_fns.wait_condition(cond, lock, tv);
429 	else
430 		return 0;
431 }
432 int
_evthreadimpl_is_lock_debugging_enabled(void)433 _evthreadimpl_is_lock_debugging_enabled(void)
434 {
435 	return _evthread_lock_debugging_enabled;
436 }
437 
438 int
_evthreadimpl_locking_enabled(void)439 _evthreadimpl_locking_enabled(void)
440 {
441 	return _evthread_lock_fns.lock != NULL;
442 }
443 #endif
444 
445 #endif
446