• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * kernel/mutex.c
3  *
4  * Mutexes: blocking mutual exclusion locks
5  *
6  * Started by Ingo Molnar:
7  *
8  *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9  *
10  * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
11  * David Howells for suggestions and improvements.
12  *
13  * Also see Documentation/mutex-design.txt.
14  */
15 #include <linux/mutex.h>
16 #include <linux/sched.h>
17 #include <linux/module.h>
18 #include <linux/spinlock.h>
19 #include <linux/interrupt.h>
20 #include <linux/debug_locks.h>
21 
22 /*
23  * In the DEBUG case we are using the "NULL fastpath" for mutexes,
24  * which forces all calls into the slowpath:
25  */
26 #ifdef CONFIG_DEBUG_MUTEXES
27 # include "mutex-debug.h"
28 # include <asm-generic/mutex-null.h>
29 #else
30 # include "mutex.h"
31 # include <asm/mutex.h>
32 #endif
33 
34 /***
35  * mutex_init - initialize the mutex
36  * @lock: the mutex to be initialized
37  * @key: the lock_class_key for the class; used by mutex lock debugging
38  *
39  * Initialize the mutex to unlocked state.
40  *
41  * It is not allowed to initialize an already locked mutex.
42  */
43 void
__mutex_init(struct mutex * lock,const char * name,struct lock_class_key * key)44 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
45 {
46 	atomic_set(&lock->count, 1);
47 	spin_lock_init(&lock->wait_lock);
48 	INIT_LIST_HEAD(&lock->wait_list);
49 
50 	debug_mutex_init(lock, name, key);
51 }
52 
53 EXPORT_SYMBOL(__mutex_init);
54 
55 #ifndef CONFIG_DEBUG_LOCK_ALLOC
56 /*
57  * We split the mutex lock/unlock logic into separate fastpath and
58  * slowpath functions, to reduce the register pressure on the fastpath.
59  * We also put the fastpath first in the kernel image, to make sure the
60  * branch is predicted by the CPU as default-untaken.
61  */
62 static __used noinline void __sched
63 __mutex_lock_slowpath(atomic_t *lock_count);
64 
65 /***
66  * mutex_lock - acquire the mutex
67  * @lock: the mutex to be acquired
68  *
69  * Lock the mutex exclusively for this task. If the mutex is not
70  * available right now, it will sleep until it can get it.
71  *
72  * The mutex must later on be released by the same task that
73  * acquired it. Recursive locking is not allowed. The task
74  * may not exit without first unlocking the mutex. Also, kernel
75  * memory where the mutex resides mutex must not be freed with
76  * the mutex still locked. The mutex must first be initialized
77  * (or statically defined) before it can be locked. memset()-ing
78  * the mutex to 0 is not allowed.
79  *
80  * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
81  *   checks that will enforce the restrictions and will also do
82  *   deadlock debugging. )
83  *
84  * This function is similar to (but not equivalent to) down().
85  */
mutex_lock(struct mutex * lock)86 void inline __sched mutex_lock(struct mutex *lock)
87 {
88 	might_sleep();
89 	/*
90 	 * The locking fastpath is the 1->0 transition from
91 	 * 'unlocked' into 'locked' state.
92 	 */
93 	__mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
94 }
95 
96 EXPORT_SYMBOL(mutex_lock);
97 #endif
98 
99 static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
100 
101 /***
102  * mutex_unlock - release the mutex
103  * @lock: the mutex to be released
104  *
105  * Unlock a mutex that has been locked by this task previously.
106  *
107  * This function must not be used in interrupt context. Unlocking
108  * of a not locked mutex is not allowed.
109  *
110  * This function is similar to (but not equivalent to) up().
111  */
mutex_unlock(struct mutex * lock)112 void __sched mutex_unlock(struct mutex *lock)
113 {
114 	/*
115 	 * The unlocking fastpath is the 0->1 transition from 'locked'
116 	 * into 'unlocked' state:
117 	 */
118 	__mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
119 }
120 
121 EXPORT_SYMBOL(mutex_unlock);
122 
123 /*
124  * Lock a mutex (possibly interruptible), slowpath:
125  */
126 static inline int __sched
__mutex_lock_common(struct mutex * lock,long state,unsigned int subclass,unsigned long ip)127 __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
128 	       	unsigned long ip)
129 {
130 	struct task_struct *task = current;
131 	struct mutex_waiter waiter;
132 	unsigned int old_val;
133 	unsigned long flags;
134 
135 	spin_lock_mutex(&lock->wait_lock, flags);
136 
137 	debug_mutex_lock_common(lock, &waiter);
138 	mutex_acquire(&lock->dep_map, subclass, 0, ip);
139 	debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
140 
141 	/* add waiting tasks to the end of the waitqueue (FIFO): */
142 	list_add_tail(&waiter.list, &lock->wait_list);
143 	waiter.task = task;
144 
145 	old_val = atomic_xchg(&lock->count, -1);
146 	if (old_val == 1)
147 		goto done;
148 
149 	lock_contended(&lock->dep_map, ip);
150 
151 	for (;;) {
152 		/*
153 		 * Lets try to take the lock again - this is needed even if
154 		 * we get here for the first time (shortly after failing to
155 		 * acquire the lock), to make sure that we get a wakeup once
156 		 * it's unlocked. Later on, if we sleep, this is the
157 		 * operation that gives us the lock. We xchg it to -1, so
158 		 * that when we release the lock, we properly wake up the
159 		 * other waiters:
160 		 */
161 		old_val = atomic_xchg(&lock->count, -1);
162 		if (old_val == 1)
163 			break;
164 
165 		/*
166 		 * got a signal? (This code gets eliminated in the
167 		 * TASK_UNINTERRUPTIBLE case.)
168 		 */
169 		if (unlikely(signal_pending_state(state, task))) {
170 			mutex_remove_waiter(lock, &waiter,
171 					    task_thread_info(task));
172 			mutex_release(&lock->dep_map, 1, ip);
173 			spin_unlock_mutex(&lock->wait_lock, flags);
174 
175 			debug_mutex_free_waiter(&waiter);
176 			return -EINTR;
177 		}
178 		__set_task_state(task, state);
179 
180 		/* didnt get the lock, go to sleep: */
181 		spin_unlock_mutex(&lock->wait_lock, flags);
182 		schedule();
183 		spin_lock_mutex(&lock->wait_lock, flags);
184 	}
185 
186 done:
187 	lock_acquired(&lock->dep_map, ip);
188 	/* got the lock - rejoice! */
189 	mutex_remove_waiter(lock, &waiter, task_thread_info(task));
190 	debug_mutex_set_owner(lock, task_thread_info(task));
191 
192 	/* set it to 0 if there are no waiters left: */
193 	if (likely(list_empty(&lock->wait_list)))
194 		atomic_set(&lock->count, 0);
195 
196 	spin_unlock_mutex(&lock->wait_lock, flags);
197 
198 	debug_mutex_free_waiter(&waiter);
199 
200 	return 0;
201 }
202 
203 #ifdef CONFIG_DEBUG_LOCK_ALLOC
204 void __sched
mutex_lock_nested(struct mutex * lock,unsigned int subclass)205 mutex_lock_nested(struct mutex *lock, unsigned int subclass)
206 {
207 	might_sleep();
208 	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, _RET_IP_);
209 }
210 
211 EXPORT_SYMBOL_GPL(mutex_lock_nested);
212 
213 int __sched
mutex_lock_killable_nested(struct mutex * lock,unsigned int subclass)214 mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
215 {
216 	might_sleep();
217 	return __mutex_lock_common(lock, TASK_KILLABLE, subclass, _RET_IP_);
218 }
219 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
220 
221 int __sched
mutex_lock_interruptible_nested(struct mutex * lock,unsigned int subclass)222 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
223 {
224 	might_sleep();
225 	return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass, _RET_IP_);
226 }
227 
228 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
229 #endif
230 
231 /*
232  * Release the lock, slowpath:
233  */
234 static inline void
__mutex_unlock_common_slowpath(atomic_t * lock_count,int nested)235 __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
236 {
237 	struct mutex *lock = container_of(lock_count, struct mutex, count);
238 	unsigned long flags;
239 
240 	spin_lock_mutex(&lock->wait_lock, flags);
241 	mutex_release(&lock->dep_map, nested, _RET_IP_);
242 	debug_mutex_unlock(lock);
243 
244 	/*
245 	 * some architectures leave the lock unlocked in the fastpath failure
246 	 * case, others need to leave it locked. In the later case we have to
247 	 * unlock it here
248 	 */
249 	if (__mutex_slowpath_needs_to_unlock())
250 		atomic_set(&lock->count, 1);
251 
252 	if (!list_empty(&lock->wait_list)) {
253 		/* get the first entry from the wait-list: */
254 		struct mutex_waiter *waiter =
255 				list_entry(lock->wait_list.next,
256 					   struct mutex_waiter, list);
257 
258 		debug_mutex_wake_waiter(lock, waiter);
259 
260 		wake_up_process(waiter->task);
261 	}
262 
263 	debug_mutex_clear_owner(lock);
264 
265 	spin_unlock_mutex(&lock->wait_lock, flags);
266 }
267 
268 /*
269  * Release the lock, slowpath:
270  */
271 static __used noinline void
__mutex_unlock_slowpath(atomic_t * lock_count)272 __mutex_unlock_slowpath(atomic_t *lock_count)
273 {
274 	__mutex_unlock_common_slowpath(lock_count, 1);
275 }
276 
277 #ifndef CONFIG_DEBUG_LOCK_ALLOC
278 /*
279  * Here come the less common (and hence less performance-critical) APIs:
280  * mutex_lock_interruptible() and mutex_trylock().
281  */
282 static noinline int __sched
283 __mutex_lock_killable_slowpath(atomic_t *lock_count);
284 
285 static noinline int __sched
286 __mutex_lock_interruptible_slowpath(atomic_t *lock_count);
287 
288 /***
289  * mutex_lock_interruptible - acquire the mutex, interruptable
290  * @lock: the mutex to be acquired
291  *
292  * Lock the mutex like mutex_lock(), and return 0 if the mutex has
293  * been acquired or sleep until the mutex becomes available. If a
294  * signal arrives while waiting for the lock then this function
295  * returns -EINTR.
296  *
297  * This function is similar to (but not equivalent to) down_interruptible().
298  */
mutex_lock_interruptible(struct mutex * lock)299 int __sched mutex_lock_interruptible(struct mutex *lock)
300 {
301 	might_sleep();
302 	return __mutex_fastpath_lock_retval
303 			(&lock->count, __mutex_lock_interruptible_slowpath);
304 }
305 
306 EXPORT_SYMBOL(mutex_lock_interruptible);
307 
mutex_lock_killable(struct mutex * lock)308 int __sched mutex_lock_killable(struct mutex *lock)
309 {
310 	might_sleep();
311 	return __mutex_fastpath_lock_retval
312 			(&lock->count, __mutex_lock_killable_slowpath);
313 }
314 EXPORT_SYMBOL(mutex_lock_killable);
315 
316 static __used noinline void __sched
__mutex_lock_slowpath(atomic_t * lock_count)317 __mutex_lock_slowpath(atomic_t *lock_count)
318 {
319 	struct mutex *lock = container_of(lock_count, struct mutex, count);
320 
321 	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, _RET_IP_);
322 }
323 
324 static noinline int __sched
__mutex_lock_killable_slowpath(atomic_t * lock_count)325 __mutex_lock_killable_slowpath(atomic_t *lock_count)
326 {
327 	struct mutex *lock = container_of(lock_count, struct mutex, count);
328 
329 	return __mutex_lock_common(lock, TASK_KILLABLE, 0, _RET_IP_);
330 }
331 
332 static noinline int __sched
__mutex_lock_interruptible_slowpath(atomic_t * lock_count)333 __mutex_lock_interruptible_slowpath(atomic_t *lock_count)
334 {
335 	struct mutex *lock = container_of(lock_count, struct mutex, count);
336 
337 	return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, _RET_IP_);
338 }
339 #endif
340 
341 /*
342  * Spinlock based trylock, we take the spinlock and check whether we
343  * can get the lock:
344  */
__mutex_trylock_slowpath(atomic_t * lock_count)345 static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
346 {
347 	struct mutex *lock = container_of(lock_count, struct mutex, count);
348 	unsigned long flags;
349 	int prev;
350 
351 	spin_lock_mutex(&lock->wait_lock, flags);
352 
353 	prev = atomic_xchg(&lock->count, -1);
354 	if (likely(prev == 1)) {
355 		debug_mutex_set_owner(lock, current_thread_info());
356 		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
357 	}
358 	/* Set it back to 0 if there are no waiters: */
359 	if (likely(list_empty(&lock->wait_list)))
360 		atomic_set(&lock->count, 0);
361 
362 	spin_unlock_mutex(&lock->wait_lock, flags);
363 
364 	return prev == 1;
365 }
366 
367 /***
368  * mutex_trylock - try acquire the mutex, without waiting
369  * @lock: the mutex to be acquired
370  *
371  * Try to acquire the mutex atomically. Returns 1 if the mutex
372  * has been acquired successfully, and 0 on contention.
373  *
374  * NOTE: this function follows the spin_trylock() convention, so
375  * it is negated to the down_trylock() return values! Be careful
376  * about this when converting semaphore users to mutexes.
377  *
378  * This function must not be used in interrupt context. The
379  * mutex must be released by the same task that acquired it.
380  */
mutex_trylock(struct mutex * lock)381 int __sched mutex_trylock(struct mutex *lock)
382 {
383 	return __mutex_fastpath_trylock(&lock->count,
384 					__mutex_trylock_slowpath);
385 }
386 
387 EXPORT_SYMBOL(mutex_trylock);
388