• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef _M68KNOMMU_BITOPS_H
2 #define _M68KNOMMU_BITOPS_H
3 
4 /*
5  * Copyright 1992, Linus Torvalds.
6  */
7 
8 #include <linux/compiler.h>
9 #include <asm/byteorder.h>	/* swab32 */
10 
11 #ifdef __KERNEL__
12 
13 #ifndef _LINUX_BITOPS_H
14 #error only <linux/bitops.h> can be included directly
15 #endif
16 
17 #if defined (__mcfisaaplus__) || defined (__mcfisac__)
ffs(unsigned int val)18 static inline int ffs(unsigned int val)
19 {
20         if (!val)
21                 return 0;
22 
23         asm volatile(
24                         "bitrev %0\n\t"
25                         "ff1 %0\n\t"
26                         : "=d" (val)
27                         : "0" (val)
28 		    );
29         val++;
30         return val;
31 }
32 
__ffs(unsigned int val)33 static inline int __ffs(unsigned int val)
34 {
35         asm volatile(
36                         "bitrev %0\n\t"
37                         "ff1 %0\n\t"
38                         : "=d" (val)
39                         : "0" (val)
40 		    );
41         return val;
42 }
43 
44 #else
45 #include <asm-generic/bitops/ffs.h>
46 #include <asm-generic/bitops/__ffs.h>
47 #endif
48 
49 #include <asm-generic/bitops/sched.h>
50 #include <asm-generic/bitops/ffz.h>
51 
set_bit(int nr,volatile unsigned long * addr)52 static __inline__ void set_bit(int nr, volatile unsigned long * addr)
53 {
54 #ifdef CONFIG_COLDFIRE
55 	__asm__ __volatile__ ("lea %0,%%a0; bset %1,(%%a0)"
56 	     : "+m" (((volatile char *)addr)[(nr^31) >> 3])
57 	     : "d" (nr)
58 	     : "%a0", "cc");
59 #else
60 	__asm__ __volatile__ ("bset %1,%0"
61 	     : "+m" (((volatile char *)addr)[(nr^31) >> 3])
62 	     : "di" (nr)
63 	     : "cc");
64 #endif
65 }
66 
67 #define __set_bit(nr, addr) set_bit(nr, addr)
68 
69 /*
70  * clear_bit() doesn't provide any barrier for the compiler.
71  */
72 #define smp_mb__before_clear_bit()	barrier()
73 #define smp_mb__after_clear_bit()	barrier()
74 
clear_bit(int nr,volatile unsigned long * addr)75 static __inline__ void clear_bit(int nr, volatile unsigned long * addr)
76 {
77 #ifdef CONFIG_COLDFIRE
78 	__asm__ __volatile__ ("lea %0,%%a0; bclr %1,(%%a0)"
79 	     : "+m" (((volatile char *)addr)[(nr^31) >> 3])
80 	     : "d" (nr)
81 	     : "%a0", "cc");
82 #else
83 	__asm__ __volatile__ ("bclr %1,%0"
84 	     : "+m" (((volatile char *)addr)[(nr^31) >> 3])
85 	     : "di" (nr)
86 	     : "cc");
87 #endif
88 }
89 
90 #define __clear_bit(nr, addr) clear_bit(nr, addr)
91 
change_bit(int nr,volatile unsigned long * addr)92 static __inline__ void change_bit(int nr, volatile unsigned long * addr)
93 {
94 #ifdef CONFIG_COLDFIRE
95 	__asm__ __volatile__ ("lea %0,%%a0; bchg %1,(%%a0)"
96 	     : "+m" (((volatile char *)addr)[(nr^31) >> 3])
97 	     : "d" (nr)
98 	     : "%a0", "cc");
99 #else
100 	__asm__ __volatile__ ("bchg %1,%0"
101 	     : "+m" (((volatile char *)addr)[(nr^31) >> 3])
102 	     : "di" (nr)
103 	     : "cc");
104 #endif
105 }
106 
107 #define __change_bit(nr, addr) change_bit(nr, addr)
108 
test_and_set_bit(int nr,volatile unsigned long * addr)109 static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
110 {
111 	char retval;
112 
113 #ifdef CONFIG_COLDFIRE
114 	__asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0"
115 	     : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
116 	     : "d" (nr)
117 	     : "%a0");
118 #else
119 	__asm__ __volatile__ ("bset %2,%1; sne %0"
120 	     : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
121 	     : "di" (nr)
122 	     /* No clobber */);
123 #endif
124 
125 	return retval;
126 }
127 
128 #define __test_and_set_bit(nr, addr) test_and_set_bit(nr, addr)
129 
test_and_clear_bit(int nr,volatile unsigned long * addr)130 static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
131 {
132 	char retval;
133 
134 #ifdef CONFIG_COLDFIRE
135 	__asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0"
136 	     : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
137 	     : "d" (nr)
138 	     : "%a0");
139 #else
140 	__asm__ __volatile__ ("bclr %2,%1; sne %0"
141 	     : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
142 	     : "di" (nr)
143 	     /* No clobber */);
144 #endif
145 
146 	return retval;
147 }
148 
149 #define __test_and_clear_bit(nr, addr) test_and_clear_bit(nr, addr)
150 
test_and_change_bit(int nr,volatile unsigned long * addr)151 static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
152 {
153 	char retval;
154 
155 #ifdef CONFIG_COLDFIRE
156 	__asm__ __volatile__ ("lea %1,%%a0\n\tbchg %2,(%%a0)\n\tsne %0"
157 	     : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
158 	     : "d" (nr)
159 	     : "%a0");
160 #else
161 	__asm__ __volatile__ ("bchg %2,%1; sne %0"
162 	     : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
163 	     : "di" (nr)
164 	     /* No clobber */);
165 #endif
166 
167 	return retval;
168 }
169 
170 #define __test_and_change_bit(nr, addr) test_and_change_bit(nr, addr)
171 
172 /*
173  * This routine doesn't need to be atomic.
174  */
__constant_test_bit(int nr,const volatile unsigned long * addr)175 static __inline__ int __constant_test_bit(int nr, const volatile unsigned long * addr)
176 {
177 	return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
178 }
179 
__test_bit(int nr,const volatile unsigned long * addr)180 static __inline__ int __test_bit(int nr, const volatile unsigned long * addr)
181 {
182 	int 	* a = (int *) addr;
183 	int	mask;
184 
185 	a += nr >> 5;
186 	mask = 1 << (nr & 0x1f);
187 	return ((mask & *a) != 0);
188 }
189 
190 #define test_bit(nr,addr) \
191 (__builtin_constant_p(nr) ? \
192  __constant_test_bit((nr),(addr)) : \
193  __test_bit((nr),(addr)))
194 
195 #include <asm-generic/bitops/find.h>
196 #include <asm-generic/bitops/hweight.h>
197 #include <asm-generic/bitops/lock.h>
198 
ext2_set_bit(int nr,volatile void * addr)199 static __inline__ int ext2_set_bit(int nr, volatile void * addr)
200 {
201 	char retval;
202 
203 #ifdef CONFIG_COLDFIRE
204 	__asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0"
205 	     : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
206 	     : "d" (nr)
207 	     : "%a0");
208 #else
209 	__asm__ __volatile__ ("bset %2,%1; sne %0"
210 	     : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
211 	     : "di" (nr)
212 	     /* No clobber */);
213 #endif
214 
215 	return retval;
216 }
217 
ext2_clear_bit(int nr,volatile void * addr)218 static __inline__ int ext2_clear_bit(int nr, volatile void * addr)
219 {
220 	char retval;
221 
222 #ifdef CONFIG_COLDFIRE
223 	__asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0"
224 	     : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
225 	     : "d" (nr)
226 	     : "%a0");
227 #else
228 	__asm__ __volatile__ ("bclr %2,%1; sne %0"
229 	     : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
230 	     : "di" (nr)
231 	     /* No clobber */);
232 #endif
233 
234 	return retval;
235 }
236 
237 #define ext2_set_bit_atomic(lock, nr, addr)		\
238 	({						\
239 		int ret;				\
240 		spin_lock(lock);			\
241 		ret = ext2_set_bit((nr), (addr));	\
242 		spin_unlock(lock);			\
243 		ret;					\
244 	})
245 
246 #define ext2_clear_bit_atomic(lock, nr, addr)		\
247 	({						\
248 		int ret;				\
249 		spin_lock(lock);			\
250 		ret = ext2_clear_bit((nr), (addr));	\
251 		spin_unlock(lock);			\
252 		ret;					\
253 	})
254 
ext2_test_bit(int nr,const volatile void * addr)255 static __inline__ int ext2_test_bit(int nr, const volatile void * addr)
256 {
257 	char retval;
258 
259 #ifdef CONFIG_COLDFIRE
260 	__asm__ __volatile__ ("lea %1,%%a0; btst %2,(%%a0); sne %0"
261 	     : "=d" (retval)
262 	     : "m" (((const volatile char *)addr)[nr >> 3]), "d" (nr)
263 	     : "%a0");
264 #else
265 	__asm__ __volatile__ ("btst %2,%1; sne %0"
266 	     : "=d" (retval)
267 	     : "m" (((const volatile char *)addr)[nr >> 3]), "di" (nr)
268 	     /* No clobber */);
269 #endif
270 
271 	return retval;
272 }
273 
274 #define ext2_find_first_zero_bit(addr, size) \
275         ext2_find_next_zero_bit((addr), (size), 0)
276 
ext2_find_next_zero_bit(void * addr,unsigned long size,unsigned long offset)277 static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
278 {
279 	unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
280 	unsigned long result = offset & ~31UL;
281 	unsigned long tmp;
282 
283 	if (offset >= size)
284 		return size;
285 	size -= result;
286 	offset &= 31UL;
287 	if(offset) {
288 		/* We hold the little endian value in tmp, but then the
289 		 * shift is illegal. So we could keep a big endian value
290 		 * in tmp, like this:
291 		 *
292 		 * tmp = __swab32(*(p++));
293 		 * tmp |= ~0UL >> (32-offset);
294 		 *
295 		 * but this would decrease performance, so we change the
296 		 * shift:
297 		 */
298 		tmp = *(p++);
299 		tmp |= __swab32(~0UL >> (32-offset));
300 		if(size < 32)
301 			goto found_first;
302 		if(~tmp)
303 			goto found_middle;
304 		size -= 32;
305 		result += 32;
306 	}
307 	while(size & ~31UL) {
308 		if(~(tmp = *(p++)))
309 			goto found_middle;
310 		result += 32;
311 		size -= 32;
312 	}
313 	if(!size)
314 		return result;
315 	tmp = *p;
316 
317 found_first:
318 	/* tmp is little endian, so we would have to swab the shift,
319 	 * see above. But then we have to swab tmp below for ffz, so
320 	 * we might as well do this here.
321 	 */
322 	return result + ffz(__swab32(tmp) | (~0UL << size));
323 found_middle:
324 	return result + ffz(__swab32(tmp));
325 }
326 
327 #define ext2_find_next_bit(addr, size, off) \
328 	generic_find_next_le_bit((unsigned long *)(addr), (size), (off))
329 #include <asm-generic/bitops/minix.h>
330 
331 #endif /* __KERNEL__ */
332 
333 #include <asm-generic/bitops/fls.h>
334 #include <asm-generic/bitops/__fls.h>
335 #include <asm-generic/bitops/fls64.h>
336 
337 #endif /* _M68KNOMMU_BITOPS_H */
338