1 /* 2 * Based on arch/arm/include/asm/cmpxchg.h 3 * 4 * Copyright (C) 2012 ARM Ltd. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 */ 18 #ifndef __ASM_CMPXCHG_H 19 #define __ASM_CMPXCHG_H 20 21 #include <linux/bug.h> 22 23 #include <asm/atomic.h> 24 #include <asm/barrier.h> 25 #include <asm/lse.h> 26 27 /* 28 * We need separate acquire parameters for ll/sc and lse, since the full 29 * barrier case is generated as release+dmb for the former and 30 * acquire+release for the latter. 31 */ 32 #define __XCHG_CASE(w, sz, name, mb, nop_lse, acq, acq_lse, rel, cl) \ 33 static inline unsigned long __xchg_case_##name(unsigned long x, \ 34 volatile void *ptr) \ 35 { \ 36 unsigned long ret, tmp; \ 37 \ 38 asm volatile(ARM64_LSE_ATOMIC_INSN( \ 39 /* LL/SC */ \ 40 " prfm pstl1strm, %2\n" \ 41 "1: ld" #acq "xr" #sz "\t%" #w "0, %2\n" \ 42 " st" #rel "xr" #sz "\t%w1, %" #w "3, %2\n" \ 43 " cbnz %w1, 1b\n" \ 44 " " #mb, \ 45 /* LSE atomics */ \ 46 " nop\n" \ 47 " nop\n" \ 48 " swp" #acq_lse #rel #sz "\t%" #w "3, %" #w "0, %2\n" \ 49 " nop\n" \ 50 " " #nop_lse) \ 51 : "=&r" (ret), "=&r" (tmp), "+Q" (*(unsigned long *)ptr) \ 52 : "r" (x) \ 53 : cl); \ 54 \ 55 return ret; \ 56 } 57 58 __XCHG_CASE(w, b, 1, , , , , , ) 59 __XCHG_CASE(w, h, 2, , , , , , ) 60 __XCHG_CASE(w, , 4, , , , , , ) 61 __XCHG_CASE( , , 8, , , , , , ) 62 __XCHG_CASE(w, b, acq_1, , , a, a, , "memory") 63 __XCHG_CASE(w, h, acq_2, , , a, a, , "memory") 64 __XCHG_CASE(w, , acq_4, , , a, a, , "memory") 65 __XCHG_CASE( , , acq_8, , , a, a, , "memory") 66 __XCHG_CASE(w, b, rel_1, , , , , l, "memory") 67 __XCHG_CASE(w, h, rel_2, , , , , l, "memory") 68 __XCHG_CASE(w, , rel_4, , , , , l, "memory") 69 __XCHG_CASE( , , rel_8, , , , , l, "memory") 70 __XCHG_CASE(w, b, mb_1, dmb ish, nop, , a, l, "memory") 71 __XCHG_CASE(w, h, mb_2, dmb ish, nop, , a, l, "memory") 72 __XCHG_CASE(w, , mb_4, dmb ish, nop, , a, l, "memory") 73 __XCHG_CASE( , , mb_8, dmb ish, nop, , a, l, "memory") 74 75 #undef __XCHG_CASE 76 77 #define __XCHG_GEN(sfx) \ 78 static inline unsigned long __xchg##sfx(unsigned long x, \ 79 volatile void *ptr, \ 80 int size) \ 81 { \ 82 switch (size) { \ 83 case 1: \ 84 return __xchg_case##sfx##_1(x, ptr); \ 85 case 2: \ 86 return __xchg_case##sfx##_2(x, ptr); \ 87 case 4: \ 88 return __xchg_case##sfx##_4(x, ptr); \ 89 case 8: \ 90 return __xchg_case##sfx##_8(x, ptr); \ 91 default: \ 92 BUILD_BUG(); \ 93 } \ 94 \ 95 unreachable(); \ 96 } 97 98 __XCHG_GEN() 99 __XCHG_GEN(_acq) 100 __XCHG_GEN(_rel) 101 __XCHG_GEN(_mb) 102 103 #undef __XCHG_GEN 104 105 #define __xchg_wrapper(sfx, ptr, x) \ 106 ({ \ 107 __typeof__(*(ptr)) __ret; \ 108 __ret = (__typeof__(*(ptr))) \ 109 __xchg##sfx((unsigned long)(x), (ptr), sizeof(*(ptr))); \ 110 __ret; \ 111 }) 112 113 /* xchg */ 114 #define xchg_relaxed(...) __xchg_wrapper( , __VA_ARGS__) 115 #define xchg_acquire(...) __xchg_wrapper(_acq, __VA_ARGS__) 116 #define xchg_release(...) __xchg_wrapper(_rel, __VA_ARGS__) 117 #define xchg(...) __xchg_wrapper( _mb, __VA_ARGS__) 118 119 #define __CMPXCHG_GEN(sfx) \ 120 static inline unsigned long __cmpxchg##sfx(volatile void *ptr, \ 121 unsigned long old, \ 122 unsigned long new, \ 123 int size) \ 124 { \ 125 switch (size) { \ 126 case 1: \ 127 return __cmpxchg_case##sfx##_1(ptr, (u8)old, new); \ 128 case 2: \ 129 return __cmpxchg_case##sfx##_2(ptr, (u16)old, new); \ 130 case 4: \ 131 return __cmpxchg_case##sfx##_4(ptr, old, new); \ 132 case 8: \ 133 return __cmpxchg_case##sfx##_8(ptr, old, new); \ 134 default: \ 135 BUILD_BUG(); \ 136 } \ 137 \ 138 unreachable(); \ 139 } 140 141 __CMPXCHG_GEN() 142 __CMPXCHG_GEN(_acq) 143 __CMPXCHG_GEN(_rel) 144 __CMPXCHG_GEN(_mb) 145 146 #undef __CMPXCHG_GEN 147 148 #define __cmpxchg_wrapper(sfx, ptr, o, n) \ 149 ({ \ 150 __typeof__(*(ptr)) __ret; \ 151 __ret = (__typeof__(*(ptr))) \ 152 __cmpxchg##sfx((ptr), (unsigned long)(o), \ 153 (unsigned long)(n), sizeof(*(ptr))); \ 154 __ret; \ 155 }) 156 157 /* cmpxchg */ 158 #define cmpxchg_relaxed(...) __cmpxchg_wrapper( , __VA_ARGS__) 159 #define cmpxchg_acquire(...) __cmpxchg_wrapper(_acq, __VA_ARGS__) 160 #define cmpxchg_release(...) __cmpxchg_wrapper(_rel, __VA_ARGS__) 161 #define cmpxchg(...) __cmpxchg_wrapper( _mb, __VA_ARGS__) 162 #define cmpxchg_local cmpxchg_relaxed 163 164 /* cmpxchg64 */ 165 #define cmpxchg64_relaxed cmpxchg_relaxed 166 #define cmpxchg64_acquire cmpxchg_acquire 167 #define cmpxchg64_release cmpxchg_release 168 #define cmpxchg64 cmpxchg 169 #define cmpxchg64_local cmpxchg_local 170 171 /* cmpxchg_double */ 172 #define system_has_cmpxchg_double() 1 173 174 #define __cmpxchg_double_check(ptr1, ptr2) \ 175 ({ \ 176 if (sizeof(*(ptr1)) != 8) \ 177 BUILD_BUG(); \ 178 VM_BUG_ON((unsigned long *)(ptr2) - (unsigned long *)(ptr1) != 1); \ 179 }) 180 181 #define cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \ 182 ({\ 183 int __ret;\ 184 __cmpxchg_double_check(ptr1, ptr2); \ 185 __ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2), \ 186 (unsigned long)(n1), (unsigned long)(n2), \ 187 ptr1); \ 188 __ret; \ 189 }) 190 191 #define cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \ 192 ({\ 193 int __ret;\ 194 __cmpxchg_double_check(ptr1, ptr2); \ 195 __ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2), \ 196 (unsigned long)(n1), (unsigned long)(n2), \ 197 ptr1); \ 198 __ret; \ 199 }) 200 201 /* this_cpu_cmpxchg */ 202 #define _protect_cmpxchg_local(pcp, o, n) \ 203 ({ \ 204 typeof(*raw_cpu_ptr(&(pcp))) __ret; \ 205 preempt_disable(); \ 206 __ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n); \ 207 preempt_enable(); \ 208 __ret; \ 209 }) 210 211 #define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) 212 #define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) 213 #define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) 214 #define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) 215 216 #define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \ 217 ({ \ 218 int __ret; \ 219 preempt_disable(); \ 220 __ret = cmpxchg_double_local( raw_cpu_ptr(&(ptr1)), \ 221 raw_cpu_ptr(&(ptr2)), \ 222 o1, o2, n1, n2); \ 223 preempt_enable(); \ 224 __ret; \ 225 }) 226 227 #endif /* __ASM_CMPXCHG_H */ 228