• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2010 Tilera Corporation. All Rights Reserved.
3  *
4  *   This program is free software; you can redistribute it and/or
5  *   modify it under the terms of the GNU General Public License
6  *   as published by the Free Software Foundation, version 2.
7  *
8  *   This program is distributed in the hope that it will be useful, but
9  *   WITHOUT ANY WARRANTY; without even the implied warranty of
10  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11  *   NON INFRINGEMENT.  See the GNU General Public License for
12  *   more details.
13  *
14  * Do not include directly; use <linux/atomic.h>.
15  */
16 
17 #ifndef _ASM_TILE_ATOMIC_32_H
18 #define _ASM_TILE_ATOMIC_32_H
19 
20 #include <asm/barrier.h>
21 #include <arch/chip.h>
22 
23 #ifndef __ASSEMBLY__
24 
25 /**
26  * atomic_add - add integer to atomic variable
27  * @i: integer value to add
28  * @v: pointer of type atomic_t
29  *
30  * Atomically adds @i to @v.
31  */
atomic_add(int i,atomic_t * v)32 static inline void atomic_add(int i, atomic_t *v)
33 {
34 	_atomic_xchg_add(&v->counter, i);
35 }
36 
37 #define ATOMIC_OPS(op)							\
38 unsigned long _atomic_fetch_##op(volatile unsigned long *p, unsigned long mask); \
39 static inline void atomic_##op(int i, atomic_t *v)			\
40 {									\
41 	_atomic_fetch_##op((unsigned long *)&v->counter, i);		\
42 }									\
43 static inline int atomic_fetch_##op(int i, atomic_t *v)			\
44 {									\
45 	smp_mb();							\
46 	return _atomic_fetch_##op((unsigned long *)&v->counter, i);	\
47 }
48 
49 ATOMIC_OPS(and)
ATOMIC_OPS(or)50 ATOMIC_OPS(or)
51 ATOMIC_OPS(xor)
52 
53 #undef ATOMIC_OPS
54 
55 static inline int atomic_fetch_add(int i, atomic_t *v)
56 {
57 	smp_mb();
58 	return _atomic_xchg_add(&v->counter, i);
59 }
60 
61 /**
62  * atomic_add_return - add integer and return
63  * @v: pointer of type atomic_t
64  * @i: integer value to add
65  *
66  * Atomically adds @i to @v and returns @i + @v
67  */
atomic_add_return(int i,atomic_t * v)68 static inline int atomic_add_return(int i, atomic_t *v)
69 {
70 	smp_mb();  /* barrier for proper semantics */
71 	return _atomic_xchg_add(&v->counter, i) + i;
72 }
73 
74 /**
75  * __atomic_add_unless - add unless the number is already a given value
76  * @v: pointer of type atomic_t
77  * @a: the amount to add to v...
78  * @u: ...unless v is equal to u.
79  *
80  * Atomically adds @a to @v, so long as @v was not already @u.
81  * Returns the old value of @v.
82  */
__atomic_add_unless(atomic_t * v,int a,int u)83 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
84 {
85 	smp_mb();  /* barrier for proper semantics */
86 	return _atomic_xchg_add_unless(&v->counter, a, u);
87 }
88 
89 /**
90  * atomic_set - set atomic variable
91  * @v: pointer of type atomic_t
92  * @i: required value
93  *
94  * Atomically sets the value of @v to @i.
95  *
96  * atomic_set() can't be just a raw store, since it would be lost if it
97  * fell between the load and store of one of the other atomic ops.
98  */
atomic_set(atomic_t * v,int n)99 static inline void atomic_set(atomic_t *v, int n)
100 {
101 	_atomic_xchg(&v->counter, n);
102 }
103 
104 #define atomic_set_release(v, i)	atomic_set((v), (i))
105 
106 /* A 64bit atomic type */
107 
108 typedef struct {
109 	long long counter;
110 } atomic64_t;
111 
112 #define ATOMIC64_INIT(val) { (val) }
113 
114 /**
115  * atomic64_read - read atomic variable
116  * @v: pointer of type atomic64_t
117  *
118  * Atomically reads the value of @v.
119  */
atomic64_read(const atomic64_t * v)120 static inline long long atomic64_read(const atomic64_t *v)
121 {
122 	/*
123 	 * Requires an atomic op to read both 32-bit parts consistently.
124 	 * Casting away const is safe since the atomic support routines
125 	 * do not write to memory if the value has not been modified.
126 	 */
127 	return _atomic64_xchg_add((long long *)&v->counter, 0);
128 }
129 
130 /**
131  * atomic64_add - add integer to atomic variable
132  * @i: integer value to add
133  * @v: pointer of type atomic64_t
134  *
135  * Atomically adds @i to @v.
136  */
atomic64_add(long long i,atomic64_t * v)137 static inline void atomic64_add(long long i, atomic64_t *v)
138 {
139 	_atomic64_xchg_add(&v->counter, i);
140 }
141 
142 #define ATOMIC64_OPS(op)					\
143 long long _atomic64_fetch_##op(long long *v, long long n);	\
144 static inline void atomic64_##op(long long i, atomic64_t *v)	\
145 {								\
146 	_atomic64_fetch_##op(&v->counter, i);			\
147 }								\
148 static inline long long atomic64_fetch_##op(long long i, atomic64_t *v)	\
149 {								\
150 	smp_mb();						\
151 	return _atomic64_fetch_##op(&v->counter, i);		\
152 }
153 
154 ATOMIC64_OPS(and)
ATOMIC64_OPS(or)155 ATOMIC64_OPS(or)
156 ATOMIC64_OPS(xor)
157 
158 #undef ATOMIC64_OPS
159 
160 static inline long long atomic64_fetch_add(long long i, atomic64_t *v)
161 {
162 	smp_mb();
163 	return _atomic64_xchg_add(&v->counter, i);
164 }
165 
166 /**
167  * atomic64_add_return - add integer and return
168  * @v: pointer of type atomic64_t
169  * @i: integer value to add
170  *
171  * Atomically adds @i to @v and returns @i + @v
172  */
atomic64_add_return(long long i,atomic64_t * v)173 static inline long long atomic64_add_return(long long i, atomic64_t *v)
174 {
175 	smp_mb();  /* barrier for proper semantics */
176 	return _atomic64_xchg_add(&v->counter, i) + i;
177 }
178 
179 /**
180  * atomic64_add_unless - add unless the number is already a given value
181  * @v: pointer of type atomic64_t
182  * @a: the amount to add to v...
183  * @u: ...unless v is equal to u.
184  *
185  * Atomically adds @a to @v, so long as @v was not already @u.
186  * Returns non-zero if @v was not @u, and zero otherwise.
187  */
atomic64_add_unless(atomic64_t * v,long long a,long long u)188 static inline long long atomic64_add_unless(atomic64_t *v, long long a,
189 					long long u)
190 {
191 	smp_mb();  /* barrier for proper semantics */
192 	return _atomic64_xchg_add_unless(&v->counter, a, u) != u;
193 }
194 
195 /**
196  * atomic64_set - set atomic variable
197  * @v: pointer of type atomic64_t
198  * @i: required value
199  *
200  * Atomically sets the value of @v to @i.
201  *
202  * atomic64_set() can't be just a raw store, since it would be lost if it
203  * fell between the load and store of one of the other atomic ops.
204  */
atomic64_set(atomic64_t * v,long long n)205 static inline void atomic64_set(atomic64_t *v, long long n)
206 {
207 	_atomic64_xchg(&v->counter, n);
208 }
209 
210 #define atomic64_add_negative(a, v)	(atomic64_add_return((a), (v)) < 0)
211 #define atomic64_inc(v)			atomic64_add(1LL, (v))
212 #define atomic64_inc_return(v)		atomic64_add_return(1LL, (v))
213 #define atomic64_inc_and_test(v)	(atomic64_inc_return(v) == 0)
214 #define atomic64_sub_return(i, v)	atomic64_add_return(-(i), (v))
215 #define atomic64_fetch_sub(i, v)	atomic64_fetch_add(-(i), (v))
216 #define atomic64_sub_and_test(a, v)	(atomic64_sub_return((a), (v)) == 0)
217 #define atomic64_sub(i, v)		atomic64_add(-(i), (v))
218 #define atomic64_dec(v)			atomic64_sub(1LL, (v))
219 #define atomic64_dec_return(v)		atomic64_sub_return(1LL, (v))
220 #define atomic64_dec_and_test(v)	(atomic64_dec_return((v)) == 0)
221 #define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1LL, 0LL)
222 
223 #endif /* !__ASSEMBLY__ */
224 
225 /*
226  * Internal definitions only beyond this point.
227  */
228 
229 /*
230  * Number of atomic locks in atomic_locks[]. Must be a power of two.
231  * There is no reason for more than PAGE_SIZE / 8 entries, since that
232  * is the maximum number of pointer bits we can use to index this.
233  * And we cannot have more than PAGE_SIZE / 4, since this has to
234  * fit on a single page and each entry takes 4 bytes.
235  */
236 #define ATOMIC_HASH_SHIFT (PAGE_SHIFT - 3)
237 #define ATOMIC_HASH_SIZE (1 << ATOMIC_HASH_SHIFT)
238 
239 #ifndef __ASSEMBLY__
240 extern int atomic_locks[];
241 #endif
242 
243 /*
244  * All the code that may fault while holding an atomic lock must
245  * place the pointer to the lock in ATOMIC_LOCK_REG so the fault code
246  * can correctly release and reacquire the lock.  Note that we
247  * mention the register number in a comment in "lib/atomic_asm.S" to help
248  * assembly coders from using this register by mistake, so if it
249  * is changed here, change that comment as well.
250  */
251 #define ATOMIC_LOCK_REG 20
252 #define ATOMIC_LOCK_REG_NAME r20
253 
254 #ifndef __ASSEMBLY__
255 /* Called from setup to initialize a hash table to point to per_cpu locks. */
256 void __init_atomic_per_cpu(void);
257 
258 #ifdef CONFIG_SMP
259 /* Support releasing the atomic lock in do_page_fault_ics(). */
260 void __atomic_fault_unlock(int *lock_ptr);
261 #endif
262 
263 /* Return a pointer to the lock for the given address. */
264 int *__atomic_hashed_lock(volatile void *v);
265 
266 /* Private helper routines in lib/atomic_asm_32.S */
267 struct __get_user {
268 	unsigned long val;
269 	int err;
270 };
271 extern struct __get_user __atomic32_cmpxchg(volatile int *p,
272 					  int *lock, int o, int n);
273 extern struct __get_user __atomic32_xchg(volatile int *p, int *lock, int n);
274 extern struct __get_user __atomic32_xchg_add(volatile int *p, int *lock, int n);
275 extern struct __get_user __atomic32_xchg_add_unless(volatile int *p,
276 						  int *lock, int o, int n);
277 extern struct __get_user __atomic32_fetch_or(volatile int *p, int *lock, int n);
278 extern struct __get_user __atomic32_fetch_and(volatile int *p, int *lock, int n);
279 extern struct __get_user __atomic32_fetch_andn(volatile int *p, int *lock, int n);
280 extern struct __get_user __atomic32_fetch_xor(volatile int *p, int *lock, int n);
281 extern long long __atomic64_cmpxchg(volatile long long *p, int *lock,
282 					long long o, long long n);
283 extern long long __atomic64_xchg(volatile long long *p, int *lock, long long n);
284 extern long long __atomic64_xchg_add(volatile long long *p, int *lock,
285 					long long n);
286 extern long long __atomic64_xchg_add_unless(volatile long long *p,
287 					int *lock, long long o, long long n);
288 extern long long __atomic64_fetch_and(volatile long long *p, int *lock, long long n);
289 extern long long __atomic64_fetch_or(volatile long long *p, int *lock, long long n);
290 extern long long __atomic64_fetch_xor(volatile long long *p, int *lock, long long n);
291 
292 /* Return failure from the atomic wrappers. */
293 struct __get_user __atomic_bad_address(int __user *addr);
294 
295 #endif /* !__ASSEMBLY__ */
296 
297 #endif /* _ASM_TILE_ATOMIC_32_H */
298