• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  #ifndef _ASM_M32R_BITOPS_H
2  #define _ASM_M32R_BITOPS_H
3  
4  /*
5   *  linux/include/asm-m32r/bitops.h
6   *
7   *  Copyright 1992, Linus Torvalds.
8   *
9   *  M32R version:
10   *    Copyright (C) 2001, 2002  Hitoshi Yamamoto
11   *    Copyright (C) 2004  Hirokazu Takata <takata at linux-m32r.org>
12   */
13  
14  #ifndef _LINUX_BITOPS_H
15  #error only <linux/bitops.h> can be included directly
16  #endif
17  
18  #include <linux/compiler.h>
19  #include <linux/irqflags.h>
20  #include <asm/assembler.h>
21  #include <asm/byteorder.h>
22  #include <asm/dcache_clear.h>
23  #include <asm/types.h>
24  
25  /*
26   * These have to be done with inline assembly: that way the bit-setting
27   * is guaranteed to be atomic. All bit operations return 0 if the bit
28   * was cleared before the operation and != 0 if it was not.
29   *
30   * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
31   */
32  
33  /**
34   * set_bit - Atomically set a bit in memory
35   * @nr: the bit to set
36   * @addr: the address to start counting from
37   *
38   * This function is atomic and may not be reordered.  See __set_bit()
39   * if you do not require the atomic guarantees.
40   * Note that @nr may be almost arbitrarily large; this function is not
41   * restricted to acting on a single-word quantity.
42   */
set_bit(int nr,volatile void * addr)43  static __inline__ void set_bit(int nr, volatile void * addr)
44  {
45  	__u32 mask;
46  	volatile __u32 *a = addr;
47  	unsigned long flags;
48  	unsigned long tmp;
49  
50  	a += (nr >> 5);
51  	mask = (1 << (nr & 0x1F));
52  
53  	local_irq_save(flags);
54  	__asm__ __volatile__ (
55  		DCACHE_CLEAR("%0", "r6", "%1")
56  		M32R_LOCK" %0, @%1;		\n\t"
57  		"or	%0, %2;			\n\t"
58  		M32R_UNLOCK" %0, @%1;		\n\t"
59  		: "=&r" (tmp)
60  		: "r" (a), "r" (mask)
61  		: "memory"
62  #ifdef CONFIG_CHIP_M32700_TS1
63  		, "r6"
64  #endif	/* CONFIG_CHIP_M32700_TS1 */
65  	);
66  	local_irq_restore(flags);
67  }
68  
69  /**
70   * clear_bit - Clears a bit in memory
71   * @nr: Bit to clear
72   * @addr: Address to start counting from
73   *
74   * clear_bit() is atomic and may not be reordered.  However, it does
75   * not contain a memory barrier, so if it is used for locking purposes,
76   * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
77   * in order to ensure changes are visible on other processors.
78   */
clear_bit(int nr,volatile void * addr)79  static __inline__ void clear_bit(int nr, volatile void * addr)
80  {
81  	__u32 mask;
82  	volatile __u32 *a = addr;
83  	unsigned long flags;
84  	unsigned long tmp;
85  
86  	a += (nr >> 5);
87  	mask = (1 << (nr & 0x1F));
88  
89  	local_irq_save(flags);
90  
91  	__asm__ __volatile__ (
92  		DCACHE_CLEAR("%0", "r6", "%1")
93  		M32R_LOCK" %0, @%1;		\n\t"
94  		"and	%0, %2;			\n\t"
95  		M32R_UNLOCK" %0, @%1;		\n\t"
96  		: "=&r" (tmp)
97  		: "r" (a), "r" (~mask)
98  		: "memory"
99  #ifdef CONFIG_CHIP_M32700_TS1
100  		, "r6"
101  #endif	/* CONFIG_CHIP_M32700_TS1 */
102  	);
103  	local_irq_restore(flags);
104  }
105  
106  #define smp_mb__before_clear_bit()	barrier()
107  #define smp_mb__after_clear_bit()	barrier()
108  
109  /**
110   * change_bit - Toggle a bit in memory
111   * @nr: Bit to clear
112   * @addr: Address to start counting from
113   *
114   * change_bit() is atomic and may not be reordered.
115   * Note that @nr may be almost arbitrarily large; this function is not
116   * restricted to acting on a single-word quantity.
117   */
change_bit(int nr,volatile void * addr)118  static __inline__ void change_bit(int nr, volatile void * addr)
119  {
120  	__u32  mask;
121  	volatile __u32  *a = addr;
122  	unsigned long flags;
123  	unsigned long tmp;
124  
125  	a += (nr >> 5);
126  	mask = (1 << (nr & 0x1F));
127  
128  	local_irq_save(flags);
129  	__asm__ __volatile__ (
130  		DCACHE_CLEAR("%0", "r6", "%1")
131  		M32R_LOCK" %0, @%1;		\n\t"
132  		"xor	%0, %2;			\n\t"
133  		M32R_UNLOCK" %0, @%1;		\n\t"
134  		: "=&r" (tmp)
135  		: "r" (a), "r" (mask)
136  		: "memory"
137  #ifdef CONFIG_CHIP_M32700_TS1
138  		, "r6"
139  #endif	/* CONFIG_CHIP_M32700_TS1 */
140  	);
141  	local_irq_restore(flags);
142  }
143  
144  /**
145   * test_and_set_bit - Set a bit and return its old value
146   * @nr: Bit to set
147   * @addr: Address to count from
148   *
149   * This operation is atomic and cannot be reordered.
150   * It also implies a memory barrier.
151   */
test_and_set_bit(int nr,volatile void * addr)152  static __inline__ int test_and_set_bit(int nr, volatile void * addr)
153  {
154  	__u32 mask, oldbit;
155  	volatile __u32 *a = addr;
156  	unsigned long flags;
157  	unsigned long tmp;
158  
159  	a += (nr >> 5);
160  	mask = (1 << (nr & 0x1F));
161  
162  	local_irq_save(flags);
163  	__asm__ __volatile__ (
164  		DCACHE_CLEAR("%0", "%1", "%2")
165  		M32R_LOCK" %0, @%2;		\n\t"
166  		"mv	%1, %0;			\n\t"
167  		"and	%0, %3;			\n\t"
168  		"or	%1, %3;			\n\t"
169  		M32R_UNLOCK" %1, @%2;		\n\t"
170  		: "=&r" (oldbit), "=&r" (tmp)
171  		: "r" (a), "r" (mask)
172  		: "memory"
173  	);
174  	local_irq_restore(flags);
175  
176  	return (oldbit != 0);
177  }
178  
179  /**
180   * test_and_clear_bit - Clear a bit and return its old value
181   * @nr: Bit to set
182   * @addr: Address to count from
183   *
184   * This operation is atomic and cannot be reordered.
185   * It also implies a memory barrier.
186   */
test_and_clear_bit(int nr,volatile void * addr)187  static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
188  {
189  	__u32 mask, oldbit;
190  	volatile __u32 *a = addr;
191  	unsigned long flags;
192  	unsigned long tmp;
193  
194  	a += (nr >> 5);
195  	mask = (1 << (nr & 0x1F));
196  
197  	local_irq_save(flags);
198  
199  	__asm__ __volatile__ (
200  		DCACHE_CLEAR("%0", "%1", "%3")
201  		M32R_LOCK" %0, @%3;		\n\t"
202  		"mv	%1, %0;			\n\t"
203  		"and	%0, %2;			\n\t"
204  		"not	%2, %2;			\n\t"
205  		"and	%1, %2;			\n\t"
206  		M32R_UNLOCK" %1, @%3;		\n\t"
207  		: "=&r" (oldbit), "=&r" (tmp), "+r" (mask)
208  		: "r" (a)
209  		: "memory"
210  	);
211  	local_irq_restore(flags);
212  
213  	return (oldbit != 0);
214  }
215  
216  /**
217   * test_and_change_bit - Change a bit and return its old value
218   * @nr: Bit to set
219   * @addr: Address to count from
220   *
221   * This operation is atomic and cannot be reordered.
222   * It also implies a memory barrier.
223   */
test_and_change_bit(int nr,volatile void * addr)224  static __inline__ int test_and_change_bit(int nr, volatile void * addr)
225  {
226  	__u32 mask, oldbit;
227  	volatile __u32 *a = addr;
228  	unsigned long flags;
229  	unsigned long tmp;
230  
231  	a += (nr >> 5);
232  	mask = (1 << (nr & 0x1F));
233  
234  	local_irq_save(flags);
235  	__asm__ __volatile__ (
236  		DCACHE_CLEAR("%0", "%1", "%2")
237  		M32R_LOCK" %0, @%2;		\n\t"
238  		"mv	%1, %0;			\n\t"
239  		"and	%0, %3;			\n\t"
240  		"xor	%1, %3;			\n\t"
241  		M32R_UNLOCK" %1, @%2;		\n\t"
242  		: "=&r" (oldbit), "=&r" (tmp)
243  		: "r" (a), "r" (mask)
244  		: "memory"
245  	);
246  	local_irq_restore(flags);
247  
248  	return (oldbit != 0);
249  }
250  
251  #include <asm-generic/bitops/non-atomic.h>
252  #include <asm-generic/bitops/ffz.h>
253  #include <asm-generic/bitops/__ffs.h>
254  #include <asm-generic/bitops/fls.h>
255  #include <asm-generic/bitops/__fls.h>
256  #include <asm-generic/bitops/fls64.h>
257  
258  #ifdef __KERNEL__
259  
260  #include <asm-generic/bitops/sched.h>
261  #include <asm-generic/bitops/find.h>
262  #include <asm-generic/bitops/ffs.h>
263  #include <asm-generic/bitops/hweight.h>
264  #include <asm-generic/bitops/lock.h>
265  
266  #endif /* __KERNEL__ */
267  
268  #ifdef __KERNEL__
269  
270  #include <asm-generic/bitops/le.h>
271  #include <asm-generic/bitops/ext2-atomic.h>
272  
273  #endif /* __KERNEL__ */
274  
275  #endif /* _ASM_M32R_BITOPS_H */
276