1 #ifndef _ASM_M32R_BITOPS_H
2 #define _ASM_M32R_BITOPS_H
3
4 /*
5 * linux/include/asm-m32r/bitops.h
6 *
7 * Copyright 1992, Linus Torvalds.
8 *
9 * M32R version:
10 * Copyright (C) 2001, 2002 Hitoshi Yamamoto
11 * Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org>
12 */
13
14 #ifndef _LINUX_BITOPS_H
15 #error only <linux/bitops.h> can be included directly
16 #endif
17
18 #include <linux/compiler.h>
19 #include <linux/irqflags.h>
20 #include <asm/assembler.h>
21 #include <asm/byteorder.h>
22 #include <asm/dcache_clear.h>
23 #include <asm/types.h>
24 #include <asm/barrier.h>
25
26 /*
27 * These have to be done with inline assembly: that way the bit-setting
28 * is guaranteed to be atomic. All bit operations return 0 if the bit
29 * was cleared before the operation and != 0 if it was not.
30 *
31 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
32 */
33
34 /**
35 * set_bit - Atomically set a bit in memory
36 * @nr: the bit to set
37 * @addr: the address to start counting from
38 *
39 * This function is atomic and may not be reordered. See __set_bit()
40 * if you do not require the atomic guarantees.
41 * Note that @nr may be almost arbitrarily large; this function is not
42 * restricted to acting on a single-word quantity.
43 */
set_bit(int nr,volatile void * addr)44 static __inline__ void set_bit(int nr, volatile void * addr)
45 {
46 __u32 mask;
47 volatile __u32 *a = addr;
48 unsigned long flags;
49 unsigned long tmp;
50
51 a += (nr >> 5);
52 mask = (1 << (nr & 0x1F));
53
54 local_irq_save(flags);
55 __asm__ __volatile__ (
56 DCACHE_CLEAR("%0", "r6", "%1")
57 M32R_LOCK" %0, @%1; \n\t"
58 "or %0, %2; \n\t"
59 M32R_UNLOCK" %0, @%1; \n\t"
60 : "=&r" (tmp)
61 : "r" (a), "r" (mask)
62 : "memory"
63 #ifdef CONFIG_CHIP_M32700_TS1
64 , "r6"
65 #endif /* CONFIG_CHIP_M32700_TS1 */
66 );
67 local_irq_restore(flags);
68 }
69
70 /**
71 * clear_bit - Clears a bit in memory
72 * @nr: Bit to clear
73 * @addr: Address to start counting from
74 *
75 * clear_bit() is atomic and may not be reordered. However, it does
76 * not contain a memory barrier, so if it is used for locking purposes,
77 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
78 * in order to ensure changes are visible on other processors.
79 */
clear_bit(int nr,volatile void * addr)80 static __inline__ void clear_bit(int nr, volatile void * addr)
81 {
82 __u32 mask;
83 volatile __u32 *a = addr;
84 unsigned long flags;
85 unsigned long tmp;
86
87 a += (nr >> 5);
88 mask = (1 << (nr & 0x1F));
89
90 local_irq_save(flags);
91
92 __asm__ __volatile__ (
93 DCACHE_CLEAR("%0", "r6", "%1")
94 M32R_LOCK" %0, @%1; \n\t"
95 "and %0, %2; \n\t"
96 M32R_UNLOCK" %0, @%1; \n\t"
97 : "=&r" (tmp)
98 : "r" (a), "r" (~mask)
99 : "memory"
100 #ifdef CONFIG_CHIP_M32700_TS1
101 , "r6"
102 #endif /* CONFIG_CHIP_M32700_TS1 */
103 );
104 local_irq_restore(flags);
105 }
106
107 /**
108 * change_bit - Toggle a bit in memory
109 * @nr: Bit to clear
110 * @addr: Address to start counting from
111 *
112 * change_bit() is atomic and may not be reordered.
113 * Note that @nr may be almost arbitrarily large; this function is not
114 * restricted to acting on a single-word quantity.
115 */
change_bit(int nr,volatile void * addr)116 static __inline__ void change_bit(int nr, volatile void * addr)
117 {
118 __u32 mask;
119 volatile __u32 *a = addr;
120 unsigned long flags;
121 unsigned long tmp;
122
123 a += (nr >> 5);
124 mask = (1 << (nr & 0x1F));
125
126 local_irq_save(flags);
127 __asm__ __volatile__ (
128 DCACHE_CLEAR("%0", "r6", "%1")
129 M32R_LOCK" %0, @%1; \n\t"
130 "xor %0, %2; \n\t"
131 M32R_UNLOCK" %0, @%1; \n\t"
132 : "=&r" (tmp)
133 : "r" (a), "r" (mask)
134 : "memory"
135 #ifdef CONFIG_CHIP_M32700_TS1
136 , "r6"
137 #endif /* CONFIG_CHIP_M32700_TS1 */
138 );
139 local_irq_restore(flags);
140 }
141
142 /**
143 * test_and_set_bit - Set a bit and return its old value
144 * @nr: Bit to set
145 * @addr: Address to count from
146 *
147 * This operation is atomic and cannot be reordered.
148 * It also implies a memory barrier.
149 */
test_and_set_bit(int nr,volatile void * addr)150 static __inline__ int test_and_set_bit(int nr, volatile void * addr)
151 {
152 __u32 mask, oldbit;
153 volatile __u32 *a = addr;
154 unsigned long flags;
155 unsigned long tmp;
156
157 a += (nr >> 5);
158 mask = (1 << (nr & 0x1F));
159
160 local_irq_save(flags);
161 __asm__ __volatile__ (
162 DCACHE_CLEAR("%0", "%1", "%2")
163 M32R_LOCK" %0, @%2; \n\t"
164 "mv %1, %0; \n\t"
165 "and %0, %3; \n\t"
166 "or %1, %3; \n\t"
167 M32R_UNLOCK" %1, @%2; \n\t"
168 : "=&r" (oldbit), "=&r" (tmp)
169 : "r" (a), "r" (mask)
170 : "memory"
171 );
172 local_irq_restore(flags);
173
174 return (oldbit != 0);
175 }
176
177 /**
178 * test_and_clear_bit - Clear a bit and return its old value
179 * @nr: Bit to set
180 * @addr: Address to count from
181 *
182 * This operation is atomic and cannot be reordered.
183 * It also implies a memory barrier.
184 */
test_and_clear_bit(int nr,volatile void * addr)185 static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
186 {
187 __u32 mask, oldbit;
188 volatile __u32 *a = addr;
189 unsigned long flags;
190 unsigned long tmp;
191
192 a += (nr >> 5);
193 mask = (1 << (nr & 0x1F));
194
195 local_irq_save(flags);
196
197 __asm__ __volatile__ (
198 DCACHE_CLEAR("%0", "%1", "%3")
199 M32R_LOCK" %0, @%3; \n\t"
200 "mv %1, %0; \n\t"
201 "and %0, %2; \n\t"
202 "not %2, %2; \n\t"
203 "and %1, %2; \n\t"
204 M32R_UNLOCK" %1, @%3; \n\t"
205 : "=&r" (oldbit), "=&r" (tmp), "+r" (mask)
206 : "r" (a)
207 : "memory"
208 );
209 local_irq_restore(flags);
210
211 return (oldbit != 0);
212 }
213
214 /**
215 * test_and_change_bit - Change a bit and return its old value
216 * @nr: Bit to set
217 * @addr: Address to count from
218 *
219 * This operation is atomic and cannot be reordered.
220 * It also implies a memory barrier.
221 */
test_and_change_bit(int nr,volatile void * addr)222 static __inline__ int test_and_change_bit(int nr, volatile void * addr)
223 {
224 __u32 mask, oldbit;
225 volatile __u32 *a = addr;
226 unsigned long flags;
227 unsigned long tmp;
228
229 a += (nr >> 5);
230 mask = (1 << (nr & 0x1F));
231
232 local_irq_save(flags);
233 __asm__ __volatile__ (
234 DCACHE_CLEAR("%0", "%1", "%2")
235 M32R_LOCK" %0, @%2; \n\t"
236 "mv %1, %0; \n\t"
237 "and %0, %3; \n\t"
238 "xor %1, %3; \n\t"
239 M32R_UNLOCK" %1, @%2; \n\t"
240 : "=&r" (oldbit), "=&r" (tmp)
241 : "r" (a), "r" (mask)
242 : "memory"
243 );
244 local_irq_restore(flags);
245
246 return (oldbit != 0);
247 }
248
249 #include <asm-generic/bitops/non-atomic.h>
250 #include <asm-generic/bitops/ffz.h>
251 #include <asm-generic/bitops/__ffs.h>
252 #include <asm-generic/bitops/fls.h>
253 #include <asm-generic/bitops/__fls.h>
254 #include <asm-generic/bitops/fls64.h>
255
256 #ifdef __KERNEL__
257
258 #include <asm-generic/bitops/sched.h>
259 #include <asm-generic/bitops/find.h>
260 #include <asm-generic/bitops/ffs.h>
261 #include <asm-generic/bitops/hweight.h>
262 #include <asm-generic/bitops/lock.h>
263
264 #endif /* __KERNEL__ */
265
266 #ifdef __KERNEL__
267
268 #include <asm-generic/bitops/le.h>
269 #include <asm-generic/bitops/ext2-atomic.h>
270
271 #endif /* __KERNEL__ */
272
273 #endif /* _ASM_M32R_BITOPS_H */
274