1 /*
2 * Copyright (c) 2016 Cyril Hrubis <chrubis@suse.cz>
3 *
4 * This program is free software: you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation, either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18 #ifndef TST_ATOMIC_H__
19 #define TST_ATOMIC_H__
20
21 #include "config.h"
22
23 #if HAVE_SYNC_ADD_AND_FETCH == 1
tst_atomic_add_return(int i,int * v)24 static inline int tst_atomic_add_return(int i, int *v)
25 {
26 return __sync_add_and_fetch(v, i);
27 }
28
29 #elif defined(__i386__) || defined(__x86_64__)
tst_atomic_add_return(int i,int * v)30 static inline int tst_atomic_add_return(int i, int *v)
31 {
32 int __ret = i;
33
34 /*
35 * taken from arch/x86/include/asm/cmpxchg.h
36 * Since we always pass int sized parameter, we can simplify it
37 * and cherry-pick only that specific case.
38 *
39 switch (sizeof(*v)) {
40 case 1:
41 asm volatile ("lock; xaddb %b0, %1\n"
42 : "+q" (__ret), "+m" (*v) : : "memory", "cc");
43 break;
44 case 2:
45 asm volatile ("lock; xaddw %w0, %1\n"
46 : "+r" (__ret), "+m" (*v) : : "memory", "cc");
47 break;
48 case 4:
49 asm volatile ("lock; xaddl %0, %1\n"
50 : "+r" (__ret), "+m" (*v) : : "memory", "cc");
51 break;
52 case 8:
53 asm volatile ("lock; xaddq %q0, %1\n"
54 : "+r" (__ret), "+m" (*v) : : "memory", "cc");
55 break;
56 default:
57 __xadd_wrong_size();
58 }
59 */
60 asm volatile ("lock; xaddl %0, %1\n"
61 : "+r" (__ret), "+m" (*v) : : "memory", "cc");
62
63 return i + __ret;
64 }
65
66 #elif defined(__powerpc__) || defined(__powerpc64__)
tst_atomic_add_return(int i,int * v)67 static inline int tst_atomic_add_return(int i, int *v)
68 {
69 int t;
70
71 /* taken from arch/powerpc/include/asm/atomic.h */
72 asm volatile(
73 " sync\n"
74 "1: lwarx %0,0,%2 # atomic_add_return\n"
75 " add %0,%1,%0\n"
76 " stwcx. %0,0,%2 \n"
77 " bne- 1b\n"
78 " sync\n"
79 : "=&r" (t)
80 : "r" (i), "r" (v)
81 : "cc", "memory");
82
83 return t;
84 }
85
86 #elif defined(__s390__) || defined(__s390x__)
tst_atomic_add_return(int i,int * v)87 static inline int tst_atomic_add_return(int i, int *v)
88 {
89 int old_val, new_val;
90
91 /* taken from arch/s390/include/asm/atomic.h */
92 asm volatile(
93 " l %0,%2\n"
94 "0: lr %1,%0\n"
95 " ar %1,%3\n"
96 " cs %0,%1,%2\n"
97 " jl 0b"
98 : "=&d" (old_val), "=&d" (new_val), "+Q" (*v)
99 : "d" (i)
100 : "cc", "memory");
101
102 return old_val + i;
103 }
104
105 #elif defined(__arc__)
106
107 /*ARCv2 defines the smp barriers */
108 #ifdef __ARC700__
109 #define smp_mb()
110 #else
111 #define smp_mb() asm volatile("dmb 3\n" : : : "memory")
112 #endif
113
tst_atomic_add_return(int i,int * v)114 static inline int tst_atomic_add_return(int i, int *v)
115 {
116 unsigned int val;
117
118 smp_mb();
119
120 asm volatile(
121 "1: llock %[val], [%[ctr]] \n"
122 " add %[val], %[val], %[i] \n"
123 " scond %[val], [%[ctr]] \n"
124 " bnz 1b \n"
125 : [val] "=&r" (val)
126 : [ctr] "r" (v),
127 [i] "ir" (i)
128 : "cc", "memory");
129
130 smp_mb();
131
132 return val;
133 }
134
135 #elif defined (__aarch64__)
tst_atomic_add_return(int i,int * v)136 static inline int tst_atomic_add_return(int i, int *v)
137 {
138 unsigned long tmp;
139 int result;
140
141 __asm__ __volatile__(
142 " prfm pstl1strm, %2 \n"
143 "1: ldxr %w0, %2 \n"
144 " add %w0, %w0, %w3 \n"
145 " stlxr %w1, %w0, %2 \n"
146 " cbnz %w1, 1b \n"
147 " dmb ish \n"
148 : "=&r" (result), "=&r" (tmp), "+Q" (*v)
149 : "Ir" (i)
150 : "memory");
151
152 return result;
153 }
154
155 #else /* HAVE_SYNC_ADD_AND_FETCH == 1 */
156 # error Your compiler does not provide __sync_add_and_fetch and LTP\
157 implementation is missing for your architecture.
158 #endif
159
tst_atomic_inc(int * v)160 static inline int tst_atomic_inc(int *v)
161 {
162 return tst_atomic_add_return(1, v);
163 }
164
tst_atomic_dec(int * v)165 static inline int tst_atomic_dec(int *v)
166 {
167 return tst_atomic_add_return(-1, v);
168 }
169
170 #endif /* TST_ATOMIC_H__ */
171