• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Optimised mutex implementation of include/asm-generic/mutex-dec.h algorithm
3  */
4 #ifndef _ASM_POWERPC_MUTEX_H
5 #define _ASM_POWERPC_MUTEX_H
6 
__mutex_cmpxchg_lock(atomic_t * v,int old,int new)7 static inline int __mutex_cmpxchg_lock(atomic_t *v, int old, int new)
8 {
9 	int t;
10 
11 	__asm__ __volatile__ (
12 "1:	lwarx	%0,0,%1		# mutex trylock\n\
13 	cmpw	0,%0,%2\n\
14 	bne-	2f\n"
15 	PPC405_ERR77(0,%1)
16 "	stwcx.	%3,0,%1\n\
17 	bne-	1b"
18 	PPC_ACQUIRE_BARRIER
19 	"\n\
20 2:"
21 	: "=&r" (t)
22 	: "r" (&v->counter), "r" (old), "r" (new)
23 	: "cc", "memory");
24 
25 	return t;
26 }
27 
__mutex_dec_return_lock(atomic_t * v)28 static inline int __mutex_dec_return_lock(atomic_t *v)
29 {
30 	int t;
31 
32 	__asm__ __volatile__(
33 "1:	lwarx	%0,0,%1		# mutex lock\n\
34 	addic	%0,%0,-1\n"
35 	PPC405_ERR77(0,%1)
36 "	stwcx.	%0,0,%1\n\
37 	bne-	1b"
38 	PPC_ACQUIRE_BARRIER
39 	: "=&r" (t)
40 	: "r" (&v->counter)
41 	: "cc", "memory");
42 
43 	return t;
44 }
45 
__mutex_inc_return_unlock(atomic_t * v)46 static inline int __mutex_inc_return_unlock(atomic_t *v)
47 {
48 	int t;
49 
50 	__asm__ __volatile__(
51 	PPC_RELEASE_BARRIER
52 "1:	lwarx	%0,0,%1		# mutex unlock\n\
53 	addic	%0,%0,1\n"
54 	PPC405_ERR77(0,%1)
55 "	stwcx.	%0,0,%1 \n\
56 	bne-	1b"
57 	: "=&r" (t)
58 	: "r" (&v->counter)
59 	: "cc", "memory");
60 
61 	return t;
62 }
63 
64 /**
65  *  __mutex_fastpath_lock - try to take the lock by moving the count
66  *                          from 1 to a 0 value
67  *  @count: pointer of type atomic_t
68  *  @fail_fn: function to call if the original value was not 1
69  *
70  * Change the count from 1 to a value lower than 1, and call <fail_fn> if
71  * it wasn't 1 originally. This function MUST leave the value lower than
72  * 1 even when the "1" assertion wasn't true.
73  */
74 static inline void
__mutex_fastpath_lock(atomic_t * count,void (* fail_fn)(atomic_t *))75 __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
76 {
77 	if (unlikely(__mutex_dec_return_lock(count) < 0))
78 		fail_fn(count);
79 }
80 
81 /**
82  *  __mutex_fastpath_lock_retval - try to take the lock by moving the count
83  *                                 from 1 to a 0 value
84  *  @count: pointer of type atomic_t
85  *
86  * Change the count from 1 to a value lower than 1. This function returns 0
87  * if the fastpath succeeds, or -1 otherwise.
88  */
89 static inline int
__mutex_fastpath_lock_retval(atomic_t * count)90 __mutex_fastpath_lock_retval(atomic_t *count)
91 {
92 	if (unlikely(__mutex_dec_return_lock(count) < 0))
93 		return -1;
94 	return 0;
95 }
96 
97 /**
98  *  __mutex_fastpath_unlock - try to promote the count from 0 to 1
99  *  @count: pointer of type atomic_t
100  *  @fail_fn: function to call if the original value was not 0
101  *
102  * Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>.
103  * In the failure case, this function is allowed to either set the value to
104  * 1, or to set it to a value lower than 1.
105  */
106 static inline void
__mutex_fastpath_unlock(atomic_t * count,void (* fail_fn)(atomic_t *))107 __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
108 {
109 	if (unlikely(__mutex_inc_return_unlock(count) <= 0))
110 		fail_fn(count);
111 }
112 
113 #define __mutex_slowpath_needs_to_unlock()		1
114 
115 /**
116  * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
117  *
118  *  @count: pointer of type atomic_t
119  *  @fail_fn: fallback function
120  *
121  * Change the count from 1 to 0, and return 1 (success), or if the count
122  * was not 1, then return 0 (failure).
123  */
124 static inline int
__mutex_fastpath_trylock(atomic_t * count,int (* fail_fn)(atomic_t *))125 __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
126 {
127 	if (likely(atomic_read(count) == 1 && __mutex_cmpxchg_lock(count, 1, 0) == 1))
128 		return 1;
129 	return 0;
130 }
131 
132 #endif
133