1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Copyright IBM Corp. 1999, 2016
4 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
5 * Denis Joseph Barrow,
6 * Arnd Bergmann,
7 */
8
9 #ifndef __ARCH_S390_ATOMIC__
10 #define __ARCH_S390_ATOMIC__
11
12 #include <linux/compiler.h>
13 #include <linux/types.h>
14 #include <asm/atomic_ops.h>
15 #include <asm/barrier.h>
16 #include <asm/cmpxchg.h>
17
atomic_read(const atomic_t * v)18 static inline int atomic_read(const atomic_t *v)
19 {
20 int c;
21
22 asm volatile(
23 " l %0,%1\n"
24 : "=d" (c) : "Q" (v->counter));
25 return c;
26 }
27
atomic_set(atomic_t * v,int i)28 static inline void atomic_set(atomic_t *v, int i)
29 {
30 asm volatile(
31 " st %1,%0\n"
32 : "=Q" (v->counter) : "d" (i));
33 }
34
atomic_add_return(int i,atomic_t * v)35 static inline int atomic_add_return(int i, atomic_t *v)
36 {
37 return __atomic_add_barrier(i, &v->counter) + i;
38 }
39
atomic_fetch_add(int i,atomic_t * v)40 static inline int atomic_fetch_add(int i, atomic_t *v)
41 {
42 return __atomic_add_barrier(i, &v->counter);
43 }
44
atomic_add(int i,atomic_t * v)45 static inline void atomic_add(int i, atomic_t *v)
46 {
47 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
48 /*
49 * Order of conditions is important to circumvent gcc 10 bug:
50 * https://gcc.gnu.org/pipermail/gcc-patches/2020-July/549318.html
51 */
52 if ((i > -129) && (i < 128) && __builtin_constant_p(i)) {
53 __atomic_add_const(i, &v->counter);
54 return;
55 }
56 #endif
57 __atomic_add(i, &v->counter);
58 }
59
60 #define atomic_sub(_i, _v) atomic_add(-(int)(_i), _v)
61 #define atomic_sub_return(_i, _v) atomic_add_return(-(int)(_i), _v)
62 #define atomic_fetch_sub(_i, _v) atomic_fetch_add(-(int)(_i), _v)
63
64 #define ATOMIC_OPS(op) \
65 static inline void atomic_##op(int i, atomic_t *v) \
66 { \
67 __atomic_##op(i, &v->counter); \
68 } \
69 static inline int atomic_fetch_##op(int i, atomic_t *v) \
70 { \
71 return __atomic_##op##_barrier(i, &v->counter); \
72 }
73
74 ATOMIC_OPS(and)
ATOMIC_OPS(or)75 ATOMIC_OPS(or)
76 ATOMIC_OPS(xor)
77
78 #undef ATOMIC_OPS
79
80 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
81
82 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
83 {
84 return __atomic_cmpxchg(&v->counter, old, new);
85 }
86
87 #define ATOMIC64_INIT(i) { (i) }
88
atomic64_read(const atomic64_t * v)89 static inline s64 atomic64_read(const atomic64_t *v)
90 {
91 s64 c;
92
93 asm volatile(
94 " lg %0,%1\n"
95 : "=d" (c) : "Q" (v->counter));
96 return c;
97 }
98
atomic64_set(atomic64_t * v,s64 i)99 static inline void atomic64_set(atomic64_t *v, s64 i)
100 {
101 asm volatile(
102 " stg %1,%0\n"
103 : "=Q" (v->counter) : "d" (i));
104 }
105
atomic64_add_return(s64 i,atomic64_t * v)106 static inline s64 atomic64_add_return(s64 i, atomic64_t *v)
107 {
108 return __atomic64_add_barrier(i, (long *)&v->counter) + i;
109 }
110
atomic64_fetch_add(s64 i,atomic64_t * v)111 static inline s64 atomic64_fetch_add(s64 i, atomic64_t *v)
112 {
113 return __atomic64_add_barrier(i, (long *)&v->counter);
114 }
115
atomic64_add(s64 i,atomic64_t * v)116 static inline void atomic64_add(s64 i, atomic64_t *v)
117 {
118 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
119 /*
120 * Order of conditions is important to circumvent gcc 10 bug:
121 * https://gcc.gnu.org/pipermail/gcc-patches/2020-July/549318.html
122 */
123 if ((i > -129) && (i < 128) && __builtin_constant_p(i)) {
124 __atomic64_add_const(i, (long *)&v->counter);
125 return;
126 }
127 #endif
128 __atomic64_add(i, (long *)&v->counter);
129 }
130
131 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
132
atomic64_cmpxchg(atomic64_t * v,s64 old,s64 new)133 static inline s64 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
134 {
135 return __atomic64_cmpxchg((long *)&v->counter, old, new);
136 }
137
138 #define ATOMIC64_OPS(op) \
139 static inline void atomic64_##op(s64 i, atomic64_t *v) \
140 { \
141 __atomic64_##op(i, (long *)&v->counter); \
142 } \
143 static inline long atomic64_fetch_##op(s64 i, atomic64_t *v) \
144 { \
145 return __atomic64_##op##_barrier(i, (long *)&v->counter); \
146 }
147
148 ATOMIC64_OPS(and)
149 ATOMIC64_OPS(or)
150 ATOMIC64_OPS(xor)
151
152 #undef ATOMIC64_OPS
153
154 #define atomic64_sub_return(_i, _v) atomic64_add_return(-(s64)(_i), _v)
155 #define atomic64_fetch_sub(_i, _v) atomic64_fetch_add(-(s64)(_i), _v)
156 #define atomic64_sub(_i, _v) atomic64_add(-(s64)(_i), _v)
157
158 #endif /* __ARCH_S390_ATOMIC__ */
159