• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2010 Tilera Corporation. All Rights Reserved.
3  *
4  *   This program is free software; you can redistribute it and/or
5  *   modify it under the terms of the GNU General Public License
6  *   as published by the Free Software Foundation, version 2.
7  *
8  *   This program is distributed in the hope that it will be useful, but
9  *   WITHOUT ANY WARRANTY; without even the implied warranty of
10  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11  *   NON INFRINGEMENT.  See the GNU General Public License for
12  *   more details.
13  *
14  * Do not include directly; use <linux/atomic.h>.
15  */
16 
17 #ifndef _ASM_TILE_ATOMIC_32_H
18 #define _ASM_TILE_ATOMIC_32_H
19 
20 #include <asm/barrier.h>
21 #include <arch/chip.h>
22 
23 #ifndef __ASSEMBLY__
24 
25 /* Tile-specific routines to support <linux/atomic.h>. */
26 int _atomic_xchg(atomic_t *v, int n);
27 int _atomic_xchg_add(atomic_t *v, int i);
28 int _atomic_xchg_add_unless(atomic_t *v, int a, int u);
29 int _atomic_cmpxchg(atomic_t *v, int o, int n);
30 
31 /**
32  * atomic_xchg - atomically exchange contents of memory with a new value
33  * @v: pointer of type atomic_t
34  * @i: integer value to store in memory
35  *
36  * Atomically sets @v to @i and returns old @v
37  */
atomic_xchg(atomic_t * v,int n)38 static inline int atomic_xchg(atomic_t *v, int n)
39 {
40 	smp_mb();  /* barrier for proper semantics */
41 	return _atomic_xchg(v, n);
42 }
43 
44 /**
45  * atomic_cmpxchg - atomically exchange contents of memory if it matches
46  * @v: pointer of type atomic_t
47  * @o: old value that memory should have
48  * @n: new value to write to memory if it matches
49  *
50  * Atomically checks if @v holds @o and replaces it with @n if so.
51  * Returns the old value at @v.
52  */
atomic_cmpxchg(atomic_t * v,int o,int n)53 static inline int atomic_cmpxchg(atomic_t *v, int o, int n)
54 {
55 	smp_mb();  /* barrier for proper semantics */
56 	return _atomic_cmpxchg(v, o, n);
57 }
58 
59 /**
60  * atomic_add - add integer to atomic variable
61  * @i: integer value to add
62  * @v: pointer of type atomic_t
63  *
64  * Atomically adds @i to @v.
65  */
atomic_add(int i,atomic_t * v)66 static inline void atomic_add(int i, atomic_t *v)
67 {
68 	_atomic_xchg_add(v, i);
69 }
70 
71 /**
72  * atomic_add_return - add integer and return
73  * @v: pointer of type atomic_t
74  * @i: integer value to add
75  *
76  * Atomically adds @i to @v and returns @i + @v
77  */
atomic_add_return(int i,atomic_t * v)78 static inline int atomic_add_return(int i, atomic_t *v)
79 {
80 	smp_mb();  /* barrier for proper semantics */
81 	return _atomic_xchg_add(v, i) + i;
82 }
83 
84 /**
85  * __atomic_add_unless - add unless the number is already a given value
86  * @v: pointer of type atomic_t
87  * @a: the amount to add to v...
88  * @u: ...unless v is equal to u.
89  *
90  * Atomically adds @a to @v, so long as @v was not already @u.
91  * Returns the old value of @v.
92  */
__atomic_add_unless(atomic_t * v,int a,int u)93 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
94 {
95 	smp_mb();  /* barrier for proper semantics */
96 	return _atomic_xchg_add_unless(v, a, u);
97 }
98 
99 /**
100  * atomic_set - set atomic variable
101  * @v: pointer of type atomic_t
102  * @i: required value
103  *
104  * Atomically sets the value of @v to @i.
105  *
106  * atomic_set() can't be just a raw store, since it would be lost if it
107  * fell between the load and store of one of the other atomic ops.
108  */
atomic_set(atomic_t * v,int n)109 static inline void atomic_set(atomic_t *v, int n)
110 {
111 	_atomic_xchg(v, n);
112 }
113 
114 /* A 64bit atomic type */
115 
116 typedef struct {
117 	u64 __aligned(8) counter;
118 } atomic64_t;
119 
120 #define ATOMIC64_INIT(val) { (val) }
121 
122 u64 _atomic64_xchg(atomic64_t *v, u64 n);
123 u64 _atomic64_xchg_add(atomic64_t *v, u64 i);
124 u64 _atomic64_xchg_add_unless(atomic64_t *v, u64 a, u64 u);
125 u64 _atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n);
126 
127 /**
128  * atomic64_read - read atomic variable
129  * @v: pointer of type atomic64_t
130  *
131  * Atomically reads the value of @v.
132  */
atomic64_read(const atomic64_t * v)133 static inline u64 atomic64_read(const atomic64_t *v)
134 {
135 	/*
136 	 * Requires an atomic op to read both 32-bit parts consistently.
137 	 * Casting away const is safe since the atomic support routines
138 	 * do not write to memory if the value has not been modified.
139 	 */
140 	return _atomic64_xchg_add((atomic64_t *)v, 0);
141 }
142 
143 /**
144  * atomic64_xchg - atomically exchange contents of memory with a new value
145  * @v: pointer of type atomic64_t
146  * @i: integer value to store in memory
147  *
148  * Atomically sets @v to @i and returns old @v
149  */
atomic64_xchg(atomic64_t * v,u64 n)150 static inline u64 atomic64_xchg(atomic64_t *v, u64 n)
151 {
152 	smp_mb();  /* barrier for proper semantics */
153 	return _atomic64_xchg(v, n);
154 }
155 
156 /**
157  * atomic64_cmpxchg - atomically exchange contents of memory if it matches
158  * @v: pointer of type atomic64_t
159  * @o: old value that memory should have
160  * @n: new value to write to memory if it matches
161  *
162  * Atomically checks if @v holds @o and replaces it with @n if so.
163  * Returns the old value at @v.
164  */
atomic64_cmpxchg(atomic64_t * v,u64 o,u64 n)165 static inline u64 atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n)
166 {
167 	smp_mb();  /* barrier for proper semantics */
168 	return _atomic64_cmpxchg(v, o, n);
169 }
170 
171 /**
172  * atomic64_add - add integer to atomic variable
173  * @i: integer value to add
174  * @v: pointer of type atomic64_t
175  *
176  * Atomically adds @i to @v.
177  */
atomic64_add(u64 i,atomic64_t * v)178 static inline void atomic64_add(u64 i, atomic64_t *v)
179 {
180 	_atomic64_xchg_add(v, i);
181 }
182 
183 /**
184  * atomic64_add_return - add integer and return
185  * @v: pointer of type atomic64_t
186  * @i: integer value to add
187  *
188  * Atomically adds @i to @v and returns @i + @v
189  */
atomic64_add_return(u64 i,atomic64_t * v)190 static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
191 {
192 	smp_mb();  /* barrier for proper semantics */
193 	return _atomic64_xchg_add(v, i) + i;
194 }
195 
196 /**
197  * atomic64_add_unless - add unless the number is already a given value
198  * @v: pointer of type atomic64_t
199  * @a: the amount to add to v...
200  * @u: ...unless v is equal to u.
201  *
202  * Atomically adds @a to @v, so long as @v was not already @u.
203  * Returns non-zero if @v was not @u, and zero otherwise.
204  */
atomic64_add_unless(atomic64_t * v,u64 a,u64 u)205 static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
206 {
207 	smp_mb();  /* barrier for proper semantics */
208 	return _atomic64_xchg_add_unless(v, a, u) != u;
209 }
210 
211 /**
212  * atomic64_set - set atomic variable
213  * @v: pointer of type atomic64_t
214  * @i: required value
215  *
216  * Atomically sets the value of @v to @i.
217  *
218  * atomic64_set() can't be just a raw store, since it would be lost if it
219  * fell between the load and store of one of the other atomic ops.
220  */
atomic64_set(atomic64_t * v,u64 n)221 static inline void atomic64_set(atomic64_t *v, u64 n)
222 {
223 	_atomic64_xchg(v, n);
224 }
225 
226 #define atomic64_add_negative(a, v)	(atomic64_add_return((a), (v)) < 0)
227 #define atomic64_inc(v)			atomic64_add(1LL, (v))
228 #define atomic64_inc_return(v)		atomic64_add_return(1LL, (v))
229 #define atomic64_inc_and_test(v)	(atomic64_inc_return(v) == 0)
230 #define atomic64_sub_return(i, v)	atomic64_add_return(-(i), (v))
231 #define atomic64_sub_and_test(a, v)	(atomic64_sub_return((a), (v)) == 0)
232 #define atomic64_sub(i, v)		atomic64_add(-(i), (v))
233 #define atomic64_dec(v)			atomic64_sub(1LL, (v))
234 #define atomic64_dec_return(v)		atomic64_sub_return(1LL, (v))
235 #define atomic64_dec_and_test(v)	(atomic64_dec_return((v)) == 0)
236 #define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1LL, 0LL)
237 
238 /*
239  * We need to barrier before modifying the word, since the _atomic_xxx()
240  * routines just tns the lock and then read/modify/write of the word.
241  * But after the word is updated, the routine issues an "mf" before returning,
242  * and since it's a function call, we don't even need a compiler barrier.
243  */
244 #define smp_mb__before_atomic_dec()	smp_mb()
245 #define smp_mb__before_atomic_inc()	smp_mb()
246 #define smp_mb__after_atomic_dec()	do { } while (0)
247 #define smp_mb__after_atomic_inc()	do { } while (0)
248 
249 #endif /* !__ASSEMBLY__ */
250 
251 /*
252  * Internal definitions only beyond this point.
253  */
254 
255 #define ATOMIC_LOCKS_FOUND_VIA_TABLE() \
256   (!CHIP_HAS_CBOX_HOME_MAP() && defined(CONFIG_SMP))
257 
258 #if ATOMIC_LOCKS_FOUND_VIA_TABLE()
259 
260 /* Number of entries in atomic_lock_ptr[]. */
261 #define ATOMIC_HASH_L1_SHIFT 6
262 #define ATOMIC_HASH_L1_SIZE (1 << ATOMIC_HASH_L1_SHIFT)
263 
264 /* Number of locks in each struct pointed to by atomic_lock_ptr[]. */
265 #define ATOMIC_HASH_L2_SHIFT (CHIP_L2_LOG_LINE_SIZE() - 2)
266 #define ATOMIC_HASH_L2_SIZE (1 << ATOMIC_HASH_L2_SHIFT)
267 
268 #else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
269 
270 /*
271  * Number of atomic locks in atomic_locks[]. Must be a power of two.
272  * There is no reason for more than PAGE_SIZE / 8 entries, since that
273  * is the maximum number of pointer bits we can use to index this.
274  * And we cannot have more than PAGE_SIZE / 4, since this has to
275  * fit on a single page and each entry takes 4 bytes.
276  */
277 #define ATOMIC_HASH_SHIFT (PAGE_SHIFT - 3)
278 #define ATOMIC_HASH_SIZE (1 << ATOMIC_HASH_SHIFT)
279 
280 #ifndef __ASSEMBLY__
281 extern int atomic_locks[];
282 #endif
283 
284 #endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
285 
286 /*
287  * All the code that may fault while holding an atomic lock must
288  * place the pointer to the lock in ATOMIC_LOCK_REG so the fault code
289  * can correctly release and reacquire the lock.  Note that we
290  * mention the register number in a comment in "lib/atomic_asm.S" to help
291  * assembly coders from using this register by mistake, so if it
292  * is changed here, change that comment as well.
293  */
294 #define ATOMIC_LOCK_REG 20
295 #define ATOMIC_LOCK_REG_NAME r20
296 
297 #ifndef __ASSEMBLY__
298 /* Called from setup to initialize a hash table to point to per_cpu locks. */
299 void __init_atomic_per_cpu(void);
300 
301 #ifdef CONFIG_SMP
302 /* Support releasing the atomic lock in do_page_fault_ics(). */
303 void __atomic_fault_unlock(int *lock_ptr);
304 #endif
305 
306 /* Private helper routines in lib/atomic_asm_32.S */
307 extern struct __get_user __atomic_cmpxchg(volatile int *p,
308 					  int *lock, int o, int n);
309 extern struct __get_user __atomic_xchg(volatile int *p, int *lock, int n);
310 extern struct __get_user __atomic_xchg_add(volatile int *p, int *lock, int n);
311 extern struct __get_user __atomic_xchg_add_unless(volatile int *p,
312 						  int *lock, int o, int n);
313 extern struct __get_user __atomic_or(volatile int *p, int *lock, int n);
314 extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n);
315 extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n);
316 extern u64 __atomic64_cmpxchg(volatile u64 *p, int *lock, u64 o, u64 n);
317 extern u64 __atomic64_xchg(volatile u64 *p, int *lock, u64 n);
318 extern u64 __atomic64_xchg_add(volatile u64 *p, int *lock, u64 n);
319 extern u64 __atomic64_xchg_add_unless(volatile u64 *p,
320 				      int *lock, u64 o, u64 n);
321 
322 #endif /* !__ASSEMBLY__ */
323 
324 #endif /* _ASM_TILE_ATOMIC_32_H */
325