• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ASM_METAG_ATOMIC_LOCK1_H
3 #define __ASM_METAG_ATOMIC_LOCK1_H
4 
5 #define ATOMIC_INIT(i)	{ (i) }
6 
7 #include <linux/compiler.h>
8 
9 #include <asm/barrier.h>
10 #include <asm/global_lock.h>
11 
atomic_read(const atomic_t * v)12 static inline int atomic_read(const atomic_t *v)
13 {
14 	return READ_ONCE((v)->counter);
15 }
16 
17 /*
18  * atomic_set needs to be take the lock to protect atomic_add_unless from a
19  * possible race, as it reads the counter twice:
20  *
21  *  CPU0                               CPU1
22  *  atomic_add_unless(1, 0)
23  *    ret = v->counter (non-zero)
24  *    if (ret != u)                    v->counter = 0
25  *      v->counter += 1 (counter set to 1)
26  *
27  * Making atomic_set take the lock ensures that ordering and logical
28  * consistency is preserved.
29  */
atomic_set(atomic_t * v,int i)30 static inline int atomic_set(atomic_t *v, int i)
31 {
32 	unsigned long flags;
33 
34 	__global_lock1(flags);
35 	fence();
36 	v->counter = i;
37 	__global_unlock1(flags);
38 	return i;
39 }
40 
41 #define atomic_set_release(v, i) atomic_set((v), (i))
42 
43 #define ATOMIC_OP(op, c_op)						\
44 static inline void atomic_##op(int i, atomic_t *v)			\
45 {									\
46 	unsigned long flags;						\
47 									\
48 	__global_lock1(flags);						\
49 	fence();							\
50 	v->counter c_op i;						\
51 	__global_unlock1(flags);					\
52 }									\
53 
54 #define ATOMIC_OP_RETURN(op, c_op)					\
55 static inline int atomic_##op##_return(int i, atomic_t *v)		\
56 {									\
57 	unsigned long result;						\
58 	unsigned long flags;						\
59 									\
60 	__global_lock1(flags);						\
61 	result = v->counter;						\
62 	result c_op i;							\
63 	fence();							\
64 	v->counter = result;						\
65 	__global_unlock1(flags);					\
66 									\
67 	return result;							\
68 }
69 
70 #define ATOMIC_FETCH_OP(op, c_op)					\
71 static inline int atomic_fetch_##op(int i, atomic_t *v)			\
72 {									\
73 	unsigned long result;						\
74 	unsigned long flags;						\
75 									\
76 	__global_lock1(flags);						\
77 	result = v->counter;						\
78 	fence();							\
79 	v->counter c_op i;						\
80 	__global_unlock1(flags);					\
81 									\
82 	return result;							\
83 }
84 
85 #define ATOMIC_OPS(op, c_op)						\
86 	ATOMIC_OP(op, c_op)						\
87 	ATOMIC_OP_RETURN(op, c_op)					\
88 	ATOMIC_FETCH_OP(op, c_op)
89 
90 ATOMIC_OPS(add, +=)
91 ATOMIC_OPS(sub, -=)
92 
93 #undef ATOMIC_OPS
94 #define ATOMIC_OPS(op, c_op)						\
95 	ATOMIC_OP(op, c_op)						\
96 	ATOMIC_FETCH_OP(op, c_op)
97 
98 ATOMIC_OPS(and, &=)
99 ATOMIC_OPS(or, |=)
100 ATOMIC_OPS(xor, ^=)
101 
102 #undef ATOMIC_OPS
103 #undef ATOMIC_FETCH_OP
104 #undef ATOMIC_OP_RETURN
105 #undef ATOMIC_OP
106 
atomic_cmpxchg(atomic_t * v,int old,int new)107 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
108 {
109 	int ret;
110 	unsigned long flags;
111 
112 	__global_lock1(flags);
113 	ret = v->counter;
114 	if (ret == old) {
115 		fence();
116 		v->counter = new;
117 	}
118 	__global_unlock1(flags);
119 
120 	return ret;
121 }
122 
123 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
124 
__atomic_add_unless(atomic_t * v,int a,int u)125 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
126 {
127 	int ret;
128 	unsigned long flags;
129 
130 	__global_lock1(flags);
131 	ret = v->counter;
132 	if (ret != u) {
133 		fence();
134 		v->counter += a;
135 	}
136 	__global_unlock1(flags);
137 
138 	return ret;
139 }
140 
atomic_sub_if_positive(int i,atomic_t * v)141 static inline int atomic_sub_if_positive(int i, atomic_t *v)
142 {
143 	int ret;
144 	unsigned long flags;
145 
146 	__global_lock1(flags);
147 	ret = v->counter - 1;
148 	if (ret >= 0) {
149 		fence();
150 		v->counter = ret;
151 	}
152 	__global_unlock1(flags);
153 
154 	return ret;
155 }
156 
157 #endif /* __ASM_METAG_ATOMIC_LOCK1_H */
158