1 /*
2 * Atomic operations for the Hexagon architecture
3 *
4 * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 and
9 * only version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 */
21
22 #ifndef _ASM_ATOMIC_H
23 #define _ASM_ATOMIC_H
24
25 #include <linux/types.h>
26 #include <asm/cmpxchg.h>
27 #include <asm/barrier.h>
28
29 #define ATOMIC_INIT(i) { (i) }
30
31 /* Normal writes in our arch don't clear lock reservations */
32
atomic_set(atomic_t * v,int new)33 static inline void atomic_set(atomic_t *v, int new)
34 {
35 asm volatile(
36 "1: r6 = memw_locked(%0);\n"
37 " memw_locked(%0,p0) = %1;\n"
38 " if (!P0) jump 1b;\n"
39 :
40 : "r" (&v->counter), "r" (new)
41 : "memory", "p0", "r6"
42 );
43 }
44
45 /**
46 * atomic_read - reads a word, atomically
47 * @v: pointer to atomic value
48 *
49 * Assumes all word reads on our architecture are atomic.
50 */
51 #define atomic_read(v) ((v)->counter)
52
53 /**
54 * atomic_xchg - atomic
55 * @v: pointer to memory to change
56 * @new: new value (technically passed in a register -- see xchg)
57 */
58 #define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
59
60
61 /**
62 * atomic_cmpxchg - atomic compare-and-exchange values
63 * @v: pointer to value to change
64 * @old: desired old value to match
65 * @new: new value to put in
66 *
67 * Parameters are then pointer, value-in-register, value-in-register,
68 * and the output is the old value.
69 *
70 * Apparently this is complicated for archs that don't support
71 * the memw_locked like we do (or it's broken or whatever).
72 *
73 * Kind of the lynchpin of the rest of the generically defined routines.
74 * Remember V2 had that bug with dotnew predicate set by memw_locked.
75 *
76 * "old" is "expected" old val, __oldval is actual old value
77 */
atomic_cmpxchg(atomic_t * v,int old,int new)78 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
79 {
80 int __oldval;
81
82 asm volatile(
83 "1: %0 = memw_locked(%1);\n"
84 " { P0 = cmp.eq(%0,%2);\n"
85 " if (!P0.new) jump:nt 2f; }\n"
86 " memw_locked(%1,P0) = %3;\n"
87 " if (!P0) jump 1b;\n"
88 "2:\n"
89 : "=&r" (__oldval)
90 : "r" (&v->counter), "r" (old), "r" (new)
91 : "memory", "p0"
92 );
93
94 return __oldval;
95 }
96
97 #define ATOMIC_OP(op) \
98 static inline void atomic_##op(int i, atomic_t *v) \
99 { \
100 int output; \
101 \
102 __asm__ __volatile__ ( \
103 "1: %0 = memw_locked(%1);\n" \
104 " %0 = "#op "(%0,%2);\n" \
105 " memw_locked(%1,P3)=%0;\n" \
106 " if !P3 jump 1b;\n" \
107 : "=&r" (output) \
108 : "r" (&v->counter), "r" (i) \
109 : "memory", "p3" \
110 ); \
111 } \
112
113 #define ATOMIC_OP_RETURN(op) \
114 static inline int atomic_##op##_return(int i, atomic_t *v) \
115 { \
116 int output; \
117 \
118 __asm__ __volatile__ ( \
119 "1: %0 = memw_locked(%1);\n" \
120 " %0 = "#op "(%0,%2);\n" \
121 " memw_locked(%1,P3)=%0;\n" \
122 " if !P3 jump 1b;\n" \
123 : "=&r" (output) \
124 : "r" (&v->counter), "r" (i) \
125 : "memory", "p3" \
126 ); \
127 return output; \
128 }
129
130 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
131
132 ATOMIC_OPS(add)
ATOMIC_OPS(sub)133 ATOMIC_OPS(sub)
134
135 #undef ATOMIC_OPS
136 #undef ATOMIC_OP_RETURN
137 #undef ATOMIC_OP
138
139 /**
140 * __atomic_add_unless - add unless the number is a given value
141 * @v: pointer to value
142 * @a: amount to add
143 * @u: unless value is equal to u
144 *
145 * Returns old value.
146 *
147 */
148
149 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
150 {
151 int __oldval;
152 register int tmp;
153
154 asm volatile(
155 "1: %0 = memw_locked(%2);"
156 " {"
157 " p3 = cmp.eq(%0, %4);"
158 " if (p3.new) jump:nt 2f;"
159 " %1 = add(%0, %3);"
160 " }"
161 " memw_locked(%2, p3) = %1;"
162 " {"
163 " if !p3 jump 1b;"
164 " }"
165 "2:"
166 : "=&r" (__oldval), "=&r" (tmp)
167 : "r" (v), "r" (a), "r" (u)
168 : "memory", "p3"
169 );
170 return __oldval;
171 }
172
173 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
174
175 #define atomic_inc(v) atomic_add(1, (v))
176 #define atomic_dec(v) atomic_sub(1, (v))
177
178 #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
179 #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
180 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, (v)) == 0)
181 #define atomic_add_negative(i, v) (atomic_add_return(i, (v)) < 0)
182
183 #define atomic_inc_return(v) (atomic_add_return(1, v))
184 #define atomic_dec_return(v) (atomic_sub_return(1, v))
185
186 #endif
187