• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2010 Tilera Corporation. All Rights Reserved.
3  *
4  *   This program is free software; you can redistribute it and/or
5  *   modify it under the terms of the GNU General Public License
6  *   as published by the Free Software Foundation, version 2.
7  *
8  *   This program is distributed in the hope that it will be useful, but
9  *   WITHOUT ANY WARRANTY; without even the implied warranty of
10  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11  *   NON INFRINGEMENT.  See the GNU General Public License for
12  *   more details.
13  *
14  * Do not include directly; use <linux/atomic.h>.
15  */
16 
17 #ifndef _ASM_TILE_ATOMIC_32_H
18 #define _ASM_TILE_ATOMIC_32_H
19 
20 #include <asm/barrier.h>
21 #include <arch/chip.h>
22 
23 #ifndef __ASSEMBLY__
24 
25 /**
26  * atomic_add - add integer to atomic variable
27  * @i: integer value to add
28  * @v: pointer of type atomic_t
29  *
30  * Atomically adds @i to @v.
31  */
atomic_add(int i,atomic_t * v)32 static inline void atomic_add(int i, atomic_t *v)
33 {
34 	_atomic_xchg_add(&v->counter, i);
35 }
36 
37 #define ATOMIC_OP(op)							\
38 unsigned long _atomic_##op(volatile unsigned long *p, unsigned long mask); \
39 static inline void atomic_##op(int i, atomic_t *v)			\
40 {									\
41 	_atomic_##op((unsigned long *)&v->counter, i);			\
42 }
43 
44 ATOMIC_OP(and)
ATOMIC_OP(or)45 ATOMIC_OP(or)
46 ATOMIC_OP(xor)
47 
48 #undef ATOMIC_OP
49 
50 /**
51  * atomic_add_return - add integer and return
52  * @v: pointer of type atomic_t
53  * @i: integer value to add
54  *
55  * Atomically adds @i to @v and returns @i + @v
56  */
57 static inline int atomic_add_return(int i, atomic_t *v)
58 {
59 	smp_mb();  /* barrier for proper semantics */
60 	return _atomic_xchg_add(&v->counter, i) + i;
61 }
62 
63 /**
64  * __atomic_add_unless - add unless the number is already a given value
65  * @v: pointer of type atomic_t
66  * @a: the amount to add to v...
67  * @u: ...unless v is equal to u.
68  *
69  * Atomically adds @a to @v, so long as @v was not already @u.
70  * Returns the old value of @v.
71  */
__atomic_add_unless(atomic_t * v,int a,int u)72 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
73 {
74 	smp_mb();  /* barrier for proper semantics */
75 	return _atomic_xchg_add_unless(&v->counter, a, u);
76 }
77 
78 /**
79  * atomic_set - set atomic variable
80  * @v: pointer of type atomic_t
81  * @i: required value
82  *
83  * Atomically sets the value of @v to @i.
84  *
85  * atomic_set() can't be just a raw store, since it would be lost if it
86  * fell between the load and store of one of the other atomic ops.
87  */
atomic_set(atomic_t * v,int n)88 static inline void atomic_set(atomic_t *v, int n)
89 {
90 	_atomic_xchg(&v->counter, n);
91 }
92 
93 /* A 64bit atomic type */
94 
95 typedef struct {
96 	long long counter;
97 } atomic64_t;
98 
99 #define ATOMIC64_INIT(val) { (val) }
100 
101 /**
102  * atomic64_read - read atomic variable
103  * @v: pointer of type atomic64_t
104  *
105  * Atomically reads the value of @v.
106  */
atomic64_read(const atomic64_t * v)107 static inline long long atomic64_read(const atomic64_t *v)
108 {
109 	/*
110 	 * Requires an atomic op to read both 32-bit parts consistently.
111 	 * Casting away const is safe since the atomic support routines
112 	 * do not write to memory if the value has not been modified.
113 	 */
114 	return _atomic64_xchg_add((long long *)&v->counter, 0);
115 }
116 
117 /**
118  * atomic64_add - add integer to atomic variable
119  * @i: integer value to add
120  * @v: pointer of type atomic64_t
121  *
122  * Atomically adds @i to @v.
123  */
atomic64_add(long long i,atomic64_t * v)124 static inline void atomic64_add(long long i, atomic64_t *v)
125 {
126 	_atomic64_xchg_add(&v->counter, i);
127 }
128 
129 #define ATOMIC64_OP(op)						\
130 long long _atomic64_##op(long long *v, long long n);		\
131 static inline void atomic64_##op(long long i, atomic64_t *v)	\
132 {								\
133 	_atomic64_##op(&v->counter, i);				\
134 }
135 
136 ATOMIC64_OP(and)
ATOMIC64_OP(or)137 ATOMIC64_OP(or)
138 ATOMIC64_OP(xor)
139 
140 /**
141  * atomic64_add_return - add integer and return
142  * @v: pointer of type atomic64_t
143  * @i: integer value to add
144  *
145  * Atomically adds @i to @v and returns @i + @v
146  */
147 static inline long long atomic64_add_return(long long i, atomic64_t *v)
148 {
149 	smp_mb();  /* barrier for proper semantics */
150 	return _atomic64_xchg_add(&v->counter, i) + i;
151 }
152 
153 /**
154  * atomic64_add_unless - add unless the number is already a given value
155  * @v: pointer of type atomic64_t
156  * @a: the amount to add to v...
157  * @u: ...unless v is equal to u.
158  *
159  * Atomically adds @a to @v, so long as @v was not already @u.
160  * Returns non-zero if @v was not @u, and zero otherwise.
161  */
atomic64_add_unless(atomic64_t * v,long long a,long long u)162 static inline long long atomic64_add_unless(atomic64_t *v, long long a,
163 					long long u)
164 {
165 	smp_mb();  /* barrier for proper semantics */
166 	return _atomic64_xchg_add_unless(&v->counter, a, u) != u;
167 }
168 
169 /**
170  * atomic64_set - set atomic variable
171  * @v: pointer of type atomic64_t
172  * @i: required value
173  *
174  * Atomically sets the value of @v to @i.
175  *
176  * atomic64_set() can't be just a raw store, since it would be lost if it
177  * fell between the load and store of one of the other atomic ops.
178  */
atomic64_set(atomic64_t * v,long long n)179 static inline void atomic64_set(atomic64_t *v, long long n)
180 {
181 	_atomic64_xchg(&v->counter, n);
182 }
183 
184 #define atomic64_add_negative(a, v)	(atomic64_add_return((a), (v)) < 0)
185 #define atomic64_inc(v)			atomic64_add(1LL, (v))
186 #define atomic64_inc_return(v)		atomic64_add_return(1LL, (v))
187 #define atomic64_inc_and_test(v)	(atomic64_inc_return(v) == 0)
188 #define atomic64_sub_return(i, v)	atomic64_add_return(-(i), (v))
189 #define atomic64_sub_and_test(a, v)	(atomic64_sub_return((a), (v)) == 0)
190 #define atomic64_sub(i, v)		atomic64_add(-(i), (v))
191 #define atomic64_dec(v)			atomic64_sub(1LL, (v))
192 #define atomic64_dec_return(v)		atomic64_sub_return(1LL, (v))
193 #define atomic64_dec_and_test(v)	(atomic64_dec_return((v)) == 0)
194 #define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1LL, 0LL)
195 
196 
197 #endif /* !__ASSEMBLY__ */
198 
199 /*
200  * Internal definitions only beyond this point.
201  */
202 
203 /*
204  * Number of atomic locks in atomic_locks[]. Must be a power of two.
205  * There is no reason for more than PAGE_SIZE / 8 entries, since that
206  * is the maximum number of pointer bits we can use to index this.
207  * And we cannot have more than PAGE_SIZE / 4, since this has to
208  * fit on a single page and each entry takes 4 bytes.
209  */
210 #define ATOMIC_HASH_SHIFT (PAGE_SHIFT - 3)
211 #define ATOMIC_HASH_SIZE (1 << ATOMIC_HASH_SHIFT)
212 
213 #ifndef __ASSEMBLY__
214 extern int atomic_locks[];
215 #endif
216 
217 /*
218  * All the code that may fault while holding an atomic lock must
219  * place the pointer to the lock in ATOMIC_LOCK_REG so the fault code
220  * can correctly release and reacquire the lock.  Note that we
221  * mention the register number in a comment in "lib/atomic_asm.S" to help
222  * assembly coders from using this register by mistake, so if it
223  * is changed here, change that comment as well.
224  */
225 #define ATOMIC_LOCK_REG 20
226 #define ATOMIC_LOCK_REG_NAME r20
227 
228 #ifndef __ASSEMBLY__
229 /* Called from setup to initialize a hash table to point to per_cpu locks. */
230 void __init_atomic_per_cpu(void);
231 
232 #ifdef CONFIG_SMP
233 /* Support releasing the atomic lock in do_page_fault_ics(). */
234 void __atomic_fault_unlock(int *lock_ptr);
235 #endif
236 
237 /* Return a pointer to the lock for the given address. */
238 int *__atomic_hashed_lock(volatile void *v);
239 
240 /* Private helper routines in lib/atomic_asm_32.S */
241 struct __get_user {
242 	unsigned long val;
243 	int err;
244 };
245 extern struct __get_user __atomic_cmpxchg(volatile int *p,
246 					  int *lock, int o, int n);
247 extern struct __get_user __atomic_xchg(volatile int *p, int *lock, int n);
248 extern struct __get_user __atomic_xchg_add(volatile int *p, int *lock, int n);
249 extern struct __get_user __atomic_xchg_add_unless(volatile int *p,
250 						  int *lock, int o, int n);
251 extern struct __get_user __atomic_or(volatile int *p, int *lock, int n);
252 extern struct __get_user __atomic_and(volatile int *p, int *lock, int n);
253 extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n);
254 extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n);
255 extern long long __atomic64_cmpxchg(volatile long long *p, int *lock,
256 					long long o, long long n);
257 extern long long __atomic64_xchg(volatile long long *p, int *lock, long long n);
258 extern long long __atomic64_xchg_add(volatile long long *p, int *lock,
259 					long long n);
260 extern long long __atomic64_xchg_add_unless(volatile long long *p,
261 					int *lock, long long o, long long n);
262 extern long long __atomic64_and(volatile long long *p, int *lock, long long n);
263 extern long long __atomic64_or(volatile long long *p, int *lock, long long n);
264 extern long long __atomic64_xor(volatile long long *p, int *lock, long long n);
265 
266 /* Return failure from the atomic wrappers. */
267 struct __get_user __atomic_bad_address(int __user *addr);
268 
269 #endif /* !__ASSEMBLY__ */
270 
271 #endif /* _ASM_TILE_ATOMIC_32_H */
272