1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Low level function for atomic operations
4 *
5 * Copyright IBM Corp. 1999, 2016
6 */
7
8 #ifndef __ARCH_S390_ATOMIC_OPS__
9 #define __ARCH_S390_ATOMIC_OPS__
10
11 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
12
13 #define __ATOMIC_OP(op_name, op_type, op_string, op_barrier) \
14 static inline op_type op_name(op_type val, op_type *ptr) \
15 { \
16 op_type old; \
17 \
18 asm volatile( \
19 op_string " %[old],%[val],%[ptr]\n" \
20 op_barrier \
21 : [old] "=d" (old), [ptr] "+Q" (*ptr) \
22 : [val] "d" (val) : "cc", "memory"); \
23 return old; \
24 } \
25
26 #define __ATOMIC_OPS(op_name, op_type, op_string) \
27 __ATOMIC_OP(op_name, op_type, op_string, "\n") \
28 __ATOMIC_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n")
29
30 __ATOMIC_OPS(__atomic_add, int, "laa")
31 __ATOMIC_OPS(__atomic_and, int, "lan")
32 __ATOMIC_OPS(__atomic_or, int, "lao")
33 __ATOMIC_OPS(__atomic_xor, int, "lax")
34
35 __ATOMIC_OPS(__atomic64_add, long, "laag")
36 __ATOMIC_OPS(__atomic64_and, long, "lang")
37 __ATOMIC_OPS(__atomic64_or, long, "laog")
38 __ATOMIC_OPS(__atomic64_xor, long, "laxg")
39
40 #undef __ATOMIC_OPS
41 #undef __ATOMIC_OP
42
__atomic_add_const(int val,int * ptr)43 static inline void __atomic_add_const(int val, int *ptr)
44 {
45 asm volatile(
46 " asi %[ptr],%[val]\n"
47 : [ptr] "+Q" (*ptr) : [val] "i" (val) : "cc");
48 }
49
__atomic64_add_const(long val,long * ptr)50 static inline void __atomic64_add_const(long val, long *ptr)
51 {
52 asm volatile(
53 " agsi %[ptr],%[val]\n"
54 : [ptr] "+Q" (*ptr) : [val] "i" (val) : "cc");
55 }
56
57 #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
58
59 #define __ATOMIC_OP(op_name, op_string) \
60 static inline int op_name(int val, int *ptr) \
61 { \
62 int old, new; \
63 \
64 asm volatile( \
65 "0: lr %[new],%[old]\n" \
66 op_string " %[new],%[val]\n" \
67 " cs %[old],%[new],%[ptr]\n" \
68 " jl 0b" \
69 : [old] "=d" (old), [new] "=&d" (new), [ptr] "+Q" (*ptr)\
70 : [val] "d" (val), "0" (*ptr) : "cc", "memory"); \
71 return old; \
72 }
73
74 #define __ATOMIC_OPS(op_name, op_string) \
75 __ATOMIC_OP(op_name, op_string) \
76 __ATOMIC_OP(op_name##_barrier, op_string)
77
78 __ATOMIC_OPS(__atomic_add, "ar")
79 __ATOMIC_OPS(__atomic_and, "nr")
80 __ATOMIC_OPS(__atomic_or, "or")
81 __ATOMIC_OPS(__atomic_xor, "xr")
82
83 #undef __ATOMIC_OPS
84
85 #define __ATOMIC64_OP(op_name, op_string) \
86 static inline long op_name(long val, long *ptr) \
87 { \
88 long old, new; \
89 \
90 asm volatile( \
91 "0: lgr %[new],%[old]\n" \
92 op_string " %[new],%[val]\n" \
93 " csg %[old],%[new],%[ptr]\n" \
94 " jl 0b" \
95 : [old] "=d" (old), [new] "=&d" (new), [ptr] "+Q" (*ptr)\
96 : [val] "d" (val), "0" (*ptr) : "cc", "memory"); \
97 return old; \
98 }
99
100 #define __ATOMIC64_OPS(op_name, op_string) \
101 __ATOMIC64_OP(op_name, op_string) \
102 __ATOMIC64_OP(op_name##_barrier, op_string)
103
104 __ATOMIC64_OPS(__atomic64_add, "agr")
105 __ATOMIC64_OPS(__atomic64_and, "ngr")
106 __ATOMIC64_OPS(__atomic64_or, "ogr")
107 __ATOMIC64_OPS(__atomic64_xor, "xgr")
108
109 #undef __ATOMIC64_OPS
110
111 #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
112
__atomic_cmpxchg(int * ptr,int old,int new)113 static inline int __atomic_cmpxchg(int *ptr, int old, int new)
114 {
115 return __sync_val_compare_and_swap(ptr, old, new);
116 }
117
__atomic_cmpxchg_bool(int * ptr,int old,int new)118 static inline int __atomic_cmpxchg_bool(int *ptr, int old, int new)
119 {
120 return __sync_bool_compare_and_swap(ptr, old, new);
121 }
122
__atomic64_cmpxchg(long * ptr,long old,long new)123 static inline long __atomic64_cmpxchg(long *ptr, long old, long new)
124 {
125 return __sync_val_compare_and_swap(ptr, old, new);
126 }
127
__atomic64_cmpxchg_bool(long * ptr,long old,long new)128 static inline long __atomic64_cmpxchg_bool(long *ptr, long old, long new)
129 {
130 return __sync_bool_compare_and_swap(ptr, old, new);
131 }
132
133 #endif /* __ARCH_S390_ATOMIC_OPS__ */
134