• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef __ASM_METAG_ATOMIC_LOCK1_H
2 #define __ASM_METAG_ATOMIC_LOCK1_H
3 
4 #define ATOMIC_INIT(i)	{ (i) }
5 
6 #include <linux/compiler.h>
7 
8 #include <asm/barrier.h>
9 #include <asm/global_lock.h>
10 
atomic_read(const atomic_t * v)11 static inline int atomic_read(const atomic_t *v)
12 {
13 	return READ_ONCE((v)->counter);
14 }
15 
16 /*
17  * atomic_set needs to be take the lock to protect atomic_add_unless from a
18  * possible race, as it reads the counter twice:
19  *
20  *  CPU0                               CPU1
21  *  atomic_add_unless(1, 0)
22  *    ret = v->counter (non-zero)
23  *    if (ret != u)                    v->counter = 0
24  *      v->counter += 1 (counter set to 1)
25  *
26  * Making atomic_set take the lock ensures that ordering and logical
27  * consistency is preserved.
28  */
atomic_set(atomic_t * v,int i)29 static inline int atomic_set(atomic_t *v, int i)
30 {
31 	unsigned long flags;
32 
33 	__global_lock1(flags);
34 	fence();
35 	v->counter = i;
36 	__global_unlock1(flags);
37 	return i;
38 }
39 
40 #define ATOMIC_OP(op, c_op)						\
41 static inline void atomic_##op(int i, atomic_t *v)			\
42 {									\
43 	unsigned long flags;						\
44 									\
45 	__global_lock1(flags);						\
46 	fence();							\
47 	v->counter c_op i;						\
48 	__global_unlock1(flags);					\
49 }									\
50 
51 #define ATOMIC_OP_RETURN(op, c_op)					\
52 static inline int atomic_##op##_return(int i, atomic_t *v)		\
53 {									\
54 	unsigned long result;						\
55 	unsigned long flags;						\
56 									\
57 	__global_lock1(flags);						\
58 	result = v->counter;						\
59 	result c_op i;							\
60 	fence();							\
61 	v->counter = result;						\
62 	__global_unlock1(flags);					\
63 									\
64 	return result;							\
65 }
66 
67 #define ATOMIC_FETCH_OP(op, c_op)					\
68 static inline int atomic_fetch_##op(int i, atomic_t *v)			\
69 {									\
70 	unsigned long result;						\
71 	unsigned long flags;						\
72 									\
73 	__global_lock1(flags);						\
74 	result = v->counter;						\
75 	fence();							\
76 	v->counter c_op i;						\
77 	__global_unlock1(flags);					\
78 									\
79 	return result;							\
80 }
81 
82 #define ATOMIC_OPS(op, c_op)						\
83 	ATOMIC_OP(op, c_op)						\
84 	ATOMIC_OP_RETURN(op, c_op)					\
85 	ATOMIC_FETCH_OP(op, c_op)
86 
87 ATOMIC_OPS(add, +=)
88 ATOMIC_OPS(sub, -=)
89 
90 #undef ATOMIC_OPS
91 #define ATOMIC_OPS(op, c_op)						\
92 	ATOMIC_OP(op, c_op)						\
93 	ATOMIC_FETCH_OP(op, c_op)
94 
95 ATOMIC_OPS(and, &=)
96 ATOMIC_OPS(or, |=)
97 ATOMIC_OPS(xor, ^=)
98 
99 #undef ATOMIC_OPS
100 #undef ATOMIC_FETCH_OP
101 #undef ATOMIC_OP_RETURN
102 #undef ATOMIC_OP
103 
atomic_cmpxchg(atomic_t * v,int old,int new)104 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
105 {
106 	int ret;
107 	unsigned long flags;
108 
109 	__global_lock1(flags);
110 	ret = v->counter;
111 	if (ret == old) {
112 		fence();
113 		v->counter = new;
114 	}
115 	__global_unlock1(flags);
116 
117 	return ret;
118 }
119 
120 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
121 
__atomic_add_unless(atomic_t * v,int a,int u)122 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
123 {
124 	int ret;
125 	unsigned long flags;
126 
127 	__global_lock1(flags);
128 	ret = v->counter;
129 	if (ret != u) {
130 		fence();
131 		v->counter += a;
132 	}
133 	__global_unlock1(flags);
134 
135 	return ret;
136 }
137 
atomic_sub_if_positive(int i,atomic_t * v)138 static inline int atomic_sub_if_positive(int i, atomic_t *v)
139 {
140 	int ret;
141 	unsigned long flags;
142 
143 	__global_lock1(flags);
144 	ret = v->counter - 1;
145 	if (ret >= 0) {
146 		fence();
147 		v->counter = ret;
148 	}
149 	__global_unlock1(flags);
150 
151 	return ret;
152 }
153 
154 #endif /* __ASM_METAG_ATOMIC_LOCK1_H */
155