• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef _ASM_X86_LOCAL_H
2 #define _ASM_X86_LOCAL_H
3 
4 #include <linux/percpu.h>
5 
6 #include <asm/system.h>
7 #include <asm/atomic.h>
8 #include <asm/asm.h>
9 
10 typedef struct {
11 	atomic_long_t a;
12 } local_t;
13 
14 #define LOCAL_INIT(i)	{ ATOMIC_LONG_INIT(i) }
15 
16 #define local_read(l)	atomic_long_read(&(l)->a)
17 #define local_set(l, i)	atomic_long_set(&(l)->a, (i))
18 
local_inc(local_t * l)19 static inline void local_inc(local_t *l)
20 {
21 	asm volatile(_ASM_INC "%0"
22 		     : "+m" (l->a.counter));
23 }
24 
local_dec(local_t * l)25 static inline void local_dec(local_t *l)
26 {
27 	asm volatile(_ASM_DEC "%0"
28 		     : "+m" (l->a.counter));
29 }
30 
local_add(long i,local_t * l)31 static inline void local_add(long i, local_t *l)
32 {
33 	asm volatile(_ASM_ADD "%1,%0"
34 		     : "+m" (l->a.counter)
35 		     : "ir" (i));
36 }
37 
local_sub(long i,local_t * l)38 static inline void local_sub(long i, local_t *l)
39 {
40 	asm volatile(_ASM_SUB "%1,%0"
41 		     : "+m" (l->a.counter)
42 		     : "ir" (i));
43 }
44 
45 /**
46  * local_sub_and_test - subtract value from variable and test result
47  * @i: integer value to subtract
48  * @l: pointer to type local_t
49  *
50  * Atomically subtracts @i from @l and returns
51  * true if the result is zero, or false for all
52  * other cases.
53  */
local_sub_and_test(long i,local_t * l)54 static inline int local_sub_and_test(long i, local_t *l)
55 {
56 	unsigned char c;
57 
58 	asm volatile(_ASM_SUB "%2,%0; sete %1"
59 		     : "+m" (l->a.counter), "=qm" (c)
60 		     : "ir" (i) : "memory");
61 	return c;
62 }
63 
64 /**
65  * local_dec_and_test - decrement and test
66  * @l: pointer to type local_t
67  *
68  * Atomically decrements @l by 1 and
69  * returns true if the result is 0, or false for all other
70  * cases.
71  */
local_dec_and_test(local_t * l)72 static inline int local_dec_and_test(local_t *l)
73 {
74 	unsigned char c;
75 
76 	asm volatile(_ASM_DEC "%0; sete %1"
77 		     : "+m" (l->a.counter), "=qm" (c)
78 		     : : "memory");
79 	return c != 0;
80 }
81 
82 /**
83  * local_inc_and_test - increment and test
84  * @l: pointer to type local_t
85  *
86  * Atomically increments @l by 1
87  * and returns true if the result is zero, or false for all
88  * other cases.
89  */
local_inc_and_test(local_t * l)90 static inline int local_inc_and_test(local_t *l)
91 {
92 	unsigned char c;
93 
94 	asm volatile(_ASM_INC "%0; sete %1"
95 		     : "+m" (l->a.counter), "=qm" (c)
96 		     : : "memory");
97 	return c != 0;
98 }
99 
100 /**
101  * local_add_negative - add and test if negative
102  * @i: integer value to add
103  * @l: pointer to type local_t
104  *
105  * Atomically adds @i to @l and returns true
106  * if the result is negative, or false when
107  * result is greater than or equal to zero.
108  */
local_add_negative(long i,local_t * l)109 static inline int local_add_negative(long i, local_t *l)
110 {
111 	unsigned char c;
112 
113 	asm volatile(_ASM_ADD "%2,%0; sets %1"
114 		     : "+m" (l->a.counter), "=qm" (c)
115 		     : "ir" (i) : "memory");
116 	return c;
117 }
118 
119 /**
120  * local_add_return - add and return
121  * @i: integer value to add
122  * @l: pointer to type local_t
123  *
124  * Atomically adds @i to @l and returns @i + @l
125  */
local_add_return(long i,local_t * l)126 static inline long local_add_return(long i, local_t *l)
127 {
128 	long __i;
129 #ifdef CONFIG_M386
130 	unsigned long flags;
131 	if (unlikely(boot_cpu_data.x86 <= 3))
132 		goto no_xadd;
133 #endif
134 	/* Modern 486+ processor */
135 	__i = i;
136 	asm volatile(_ASM_XADD "%0, %1;"
137 		     : "+r" (i), "+m" (l->a.counter)
138 		     : : "memory");
139 	return i + __i;
140 
141 #ifdef CONFIG_M386
142 no_xadd: /* Legacy 386 processor */
143 	local_irq_save(flags);
144 	__i = local_read(l);
145 	local_set(l, i + __i);
146 	local_irq_restore(flags);
147 	return i + __i;
148 #endif
149 }
150 
local_sub_return(long i,local_t * l)151 static inline long local_sub_return(long i, local_t *l)
152 {
153 	return local_add_return(-i, l);
154 }
155 
156 #define local_inc_return(l)  (local_add_return(1, l))
157 #define local_dec_return(l)  (local_sub_return(1, l))
158 
159 #define local_cmpxchg(l, o, n) \
160 	(cmpxchg_local(&((l)->a.counter), (o), (n)))
161 /* Always has a lock prefix */
162 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
163 
164 /**
165  * local_add_unless - add unless the number is a given value
166  * @l: pointer of type local_t
167  * @a: the amount to add to l...
168  * @u: ...unless l is equal to u.
169  *
170  * Atomically adds @a to @l, so long as it was not @u.
171  * Returns non-zero if @l was not @u, and zero otherwise.
172  */
173 #define local_add_unless(l, a, u)				\
174 ({								\
175 	long c, old;						\
176 	c = local_read((l));					\
177 	for (;;) {						\
178 		if (unlikely(c == (u)))				\
179 			break;					\
180 		old = local_cmpxchg((l), c, c + (a));		\
181 		if (likely(old == c))				\
182 			break;					\
183 		c = old;					\
184 	}							\
185 	c != (u);						\
186 })
187 #define local_inc_not_zero(l) local_add_unless((l), 1, 0)
188 
189 /* On x86_32, these are no better than the atomic variants.
190  * On x86-64 these are better than the atomic variants on SMP kernels
191  * because they dont use a lock prefix.
192  */
193 #define __local_inc(l)		local_inc(l)
194 #define __local_dec(l)		local_dec(l)
195 #define __local_add(i, l)	local_add((i), (l))
196 #define __local_sub(i, l)	local_sub((i), (l))
197 
198 /* Use these for per-cpu local_t variables: on some archs they are
199  * much more efficient than these naive implementations.  Note they take
200  * a variable, not an address.
201  *
202  * X86_64: This could be done better if we moved the per cpu data directly
203  * after GS.
204  */
205 
206 /* Need to disable preemption for the cpu local counters otherwise we could
207    still access a variable of a previous CPU in a non atomic way. */
208 #define cpu_local_wrap_v(l)		\
209 ({					\
210 	local_t res__;			\
211 	preempt_disable(); 		\
212 	res__ = (l);			\
213 	preempt_enable();		\
214 	res__;				\
215 })
216 #define cpu_local_wrap(l)		\
217 ({					\
218 	preempt_disable();		\
219 	(l);				\
220 	preempt_enable();		\
221 })					\
222 
223 #define cpu_local_read(l)    cpu_local_wrap_v(local_read(&__get_cpu_var((l))))
224 #define cpu_local_set(l, i)  cpu_local_wrap(local_set(&__get_cpu_var((l)), (i)))
225 #define cpu_local_inc(l)     cpu_local_wrap(local_inc(&__get_cpu_var((l))))
226 #define cpu_local_dec(l)     cpu_local_wrap(local_dec(&__get_cpu_var((l))))
227 #define cpu_local_add(i, l)  cpu_local_wrap(local_add((i), &__get_cpu_var((l))))
228 #define cpu_local_sub(i, l)  cpu_local_wrap(local_sub((i), &__get_cpu_var((l))))
229 
230 #define __cpu_local_inc(l)	cpu_local_inc((l))
231 #define __cpu_local_dec(l)	cpu_local_dec((l))
232 #define __cpu_local_add(i, l)	cpu_local_add((i), (l))
233 #define __cpu_local_sub(i, l)	cpu_local_sub((i), (l))
234 
235 #endif /* _ASM_X86_LOCAL_H */
236