• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef _M68K_BITOPS_H
2 #define _M68K_BITOPS_H
3 /*
4  * Copyright 1992, Linus Torvalds.
5  *
6  * This file is subject to the terms and conditions of the GNU General Public
7  * License.  See the file COPYING in the main directory of this archive
8  * for more details.
9  */
10 
11 #ifndef _LINUX_BITOPS_H
12 #error only <linux/bitops.h> can be included directly
13 #endif
14 
15 #include <linux/compiler.h>
16 
17 /*
18  * Require 68020 or better.
19  *
20  * They use the standard big-endian m680x0 bit ordering.
21  */
22 
23 #define test_and_set_bit(nr,vaddr) \
24   (__builtin_constant_p(nr) ? \
25    __constant_test_and_set_bit(nr, vaddr) : \
26    __generic_test_and_set_bit(nr, vaddr))
27 
28 #define __test_and_set_bit(nr,vaddr) test_and_set_bit(nr,vaddr)
29 
__constant_test_and_set_bit(int nr,unsigned long * vaddr)30 static inline int __constant_test_and_set_bit(int nr, unsigned long *vaddr)
31 {
32 	char *p = (char *)vaddr + (nr ^ 31) / 8;
33 	char retval;
34 
35 	__asm__ __volatile__ ("bset %2,%1; sne %0"
36 			: "=d" (retval), "+m" (*p)
37 			: "di" (nr & 7));
38 
39 	return retval;
40 }
41 
__generic_test_and_set_bit(int nr,unsigned long * vaddr)42 static inline int __generic_test_and_set_bit(int nr, unsigned long *vaddr)
43 {
44 	char retval;
45 
46 	__asm__ __volatile__ ("bfset %2{%1:#1}; sne %0"
47 			: "=d" (retval) : "d" (nr^31), "o" (*vaddr) : "memory");
48 
49 	return retval;
50 }
51 
52 #define set_bit(nr,vaddr) \
53   (__builtin_constant_p(nr) ? \
54    __constant_set_bit(nr, vaddr) : \
55    __generic_set_bit(nr, vaddr))
56 
57 #define __set_bit(nr,vaddr) set_bit(nr,vaddr)
58 
__constant_set_bit(int nr,volatile unsigned long * vaddr)59 static inline void __constant_set_bit(int nr, volatile unsigned long *vaddr)
60 {
61 	char *p = (char *)vaddr + (nr ^ 31) / 8;
62 	__asm__ __volatile__ ("bset %1,%0"
63 			: "+m" (*p) : "di" (nr & 7));
64 }
65 
__generic_set_bit(int nr,volatile unsigned long * vaddr)66 static inline void __generic_set_bit(int nr, volatile unsigned long *vaddr)
67 {
68 	__asm__ __volatile__ ("bfset %1{%0:#1}"
69 			: : "d" (nr^31), "o" (*vaddr) : "memory");
70 }
71 
72 #define test_and_clear_bit(nr,vaddr) \
73   (__builtin_constant_p(nr) ? \
74    __constant_test_and_clear_bit(nr, vaddr) : \
75    __generic_test_and_clear_bit(nr, vaddr))
76 
77 #define __test_and_clear_bit(nr,vaddr) test_and_clear_bit(nr,vaddr)
78 
__constant_test_and_clear_bit(int nr,unsigned long * vaddr)79 static inline int __constant_test_and_clear_bit(int nr, unsigned long *vaddr)
80 {
81 	char *p = (char *)vaddr + (nr ^ 31) / 8;
82 	char retval;
83 
84 	__asm__ __volatile__ ("bclr %2,%1; sne %0"
85 			: "=d" (retval), "+m" (*p)
86 			: "di" (nr & 7));
87 
88 	return retval;
89 }
90 
__generic_test_and_clear_bit(int nr,unsigned long * vaddr)91 static inline int __generic_test_and_clear_bit(int nr, unsigned long *vaddr)
92 {
93 	char retval;
94 
95 	__asm__ __volatile__ ("bfclr %2{%1:#1}; sne %0"
96 			: "=d" (retval) : "d" (nr^31), "o" (*vaddr) : "memory");
97 
98 	return retval;
99 }
100 
101 /*
102  * clear_bit() doesn't provide any barrier for the compiler.
103  */
104 #define smp_mb__before_clear_bit()	barrier()
105 #define smp_mb__after_clear_bit()	barrier()
106 
107 #define clear_bit(nr,vaddr) \
108   (__builtin_constant_p(nr) ? \
109    __constant_clear_bit(nr, vaddr) : \
110    __generic_clear_bit(nr, vaddr))
111 #define __clear_bit(nr,vaddr) clear_bit(nr,vaddr)
112 
__constant_clear_bit(int nr,volatile unsigned long * vaddr)113 static inline void __constant_clear_bit(int nr, volatile unsigned long *vaddr)
114 {
115 	char *p = (char *)vaddr + (nr ^ 31) / 8;
116 	__asm__ __volatile__ ("bclr %1,%0"
117 			: "+m" (*p) : "di" (nr & 7));
118 }
119 
__generic_clear_bit(int nr,volatile unsigned long * vaddr)120 static inline void __generic_clear_bit(int nr, volatile unsigned long *vaddr)
121 {
122 	__asm__ __volatile__ ("bfclr %1{%0:#1}"
123 			: : "d" (nr^31), "o" (*vaddr) : "memory");
124 }
125 
126 #define test_and_change_bit(nr,vaddr) \
127   (__builtin_constant_p(nr) ? \
128    __constant_test_and_change_bit(nr, vaddr) : \
129    __generic_test_and_change_bit(nr, vaddr))
130 
131 #define __test_and_change_bit(nr,vaddr) test_and_change_bit(nr,vaddr)
132 #define __change_bit(nr,vaddr) change_bit(nr,vaddr)
133 
__constant_test_and_change_bit(int nr,unsigned long * vaddr)134 static inline int __constant_test_and_change_bit(int nr, unsigned long *vaddr)
135 {
136 	char *p = (char *)vaddr + (nr ^ 31) / 8;
137 	char retval;
138 
139 	__asm__ __volatile__ ("bchg %2,%1; sne %0"
140 			: "=d" (retval), "+m" (*p)
141 			: "di" (nr & 7));
142 
143 	return retval;
144 }
145 
__generic_test_and_change_bit(int nr,unsigned long * vaddr)146 static inline int __generic_test_and_change_bit(int nr, unsigned long *vaddr)
147 {
148 	char retval;
149 
150 	__asm__ __volatile__ ("bfchg %2{%1:#1}; sne %0"
151 			: "=d" (retval) : "d" (nr^31), "o" (*vaddr) : "memory");
152 
153 	return retval;
154 }
155 
156 #define change_bit(nr,vaddr) \
157   (__builtin_constant_p(nr) ? \
158    __constant_change_bit(nr, vaddr) : \
159    __generic_change_bit(nr, vaddr))
160 
__constant_change_bit(int nr,unsigned long * vaddr)161 static inline void __constant_change_bit(int nr, unsigned long *vaddr)
162 {
163 	char *p = (char *)vaddr + (nr ^ 31) / 8;
164 	__asm__ __volatile__ ("bchg %1,%0"
165 			: "+m" (*p) : "di" (nr & 7));
166 }
167 
__generic_change_bit(int nr,unsigned long * vaddr)168 static inline void __generic_change_bit(int nr, unsigned long *vaddr)
169 {
170 	__asm__ __volatile__ ("bfchg %1{%0:#1}"
171 			: : "d" (nr^31), "o" (*vaddr) : "memory");
172 }
173 
test_bit(int nr,const unsigned long * vaddr)174 static inline int test_bit(int nr, const unsigned long *vaddr)
175 {
176 	return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0;
177 }
178 
find_first_zero_bit(const unsigned long * vaddr,unsigned size)179 static inline int find_first_zero_bit(const unsigned long *vaddr,
180 				      unsigned size)
181 {
182 	const unsigned long *p = vaddr;
183 	int res = 32;
184 	unsigned long num;
185 
186 	if (!size)
187 		return 0;
188 
189 	size = (size + 31) >> 5;
190 	while (!(num = ~*p++)) {
191 		if (!--size)
192 			goto out;
193 	}
194 
195 	__asm__ __volatile__ ("bfffo %1{#0,#0},%0"
196 			      : "=d" (res) : "d" (num & -num));
197 	res ^= 31;
198 out:
199 	return ((long)p - (long)vaddr - 4) * 8 + res;
200 }
201 
find_next_zero_bit(const unsigned long * vaddr,int size,int offset)202 static inline int find_next_zero_bit(const unsigned long *vaddr, int size,
203 				     int offset)
204 {
205 	const unsigned long *p = vaddr + (offset >> 5);
206 	int bit = offset & 31UL, res;
207 
208 	if (offset >= size)
209 		return size;
210 
211 	if (bit) {
212 		unsigned long num = ~*p++ & (~0UL << bit);
213 		offset -= bit;
214 
215 		/* Look for zero in first longword */
216 		__asm__ __volatile__ ("bfffo %1{#0,#0},%0"
217 				      : "=d" (res) : "d" (num & -num));
218 		if (res < 32)
219 			return offset + (res ^ 31);
220 		offset += 32;
221 	}
222 	/* No zero yet, search remaining full bytes for a zero */
223 	res = find_first_zero_bit(p, size - ((long)p - (long)vaddr) * 8);
224 	return offset + res;
225 }
226 
find_first_bit(const unsigned long * vaddr,unsigned size)227 static inline int find_first_bit(const unsigned long *vaddr, unsigned size)
228 {
229 	const unsigned long *p = vaddr;
230 	int res = 32;
231 	unsigned long num;
232 
233 	if (!size)
234 		return 0;
235 
236 	size = (size + 31) >> 5;
237 	while (!(num = *p++)) {
238 		if (!--size)
239 			goto out;
240 	}
241 
242 	__asm__ __volatile__ ("bfffo %1{#0,#0},%0"
243 			      : "=d" (res) : "d" (num & -num));
244 	res ^= 31;
245 out:
246 	return ((long)p - (long)vaddr - 4) * 8 + res;
247 }
248 
find_next_bit(const unsigned long * vaddr,int size,int offset)249 static inline int find_next_bit(const unsigned long *vaddr, int size,
250 				int offset)
251 {
252 	const unsigned long *p = vaddr + (offset >> 5);
253 	int bit = offset & 31UL, res;
254 
255 	if (offset >= size)
256 		return size;
257 
258 	if (bit) {
259 		unsigned long num = *p++ & (~0UL << bit);
260 		offset -= bit;
261 
262 		/* Look for one in first longword */
263 		__asm__ __volatile__ ("bfffo %1{#0,#0},%0"
264 				      : "=d" (res) : "d" (num & -num));
265 		if (res < 32)
266 			return offset + (res ^ 31);
267 		offset += 32;
268 	}
269 	/* No one yet, search remaining full bytes for a one */
270 	res = find_first_bit(p, size - ((long)p - (long)vaddr) * 8);
271 	return offset + res;
272 }
273 
274 /*
275  * ffz = Find First Zero in word. Undefined if no zero exists,
276  * so code should check against ~0UL first..
277  */
ffz(unsigned long word)278 static inline unsigned long ffz(unsigned long word)
279 {
280 	int res;
281 
282 	__asm__ __volatile__ ("bfffo %1{#0,#0},%0"
283 			      : "=d" (res) : "d" (~word & -~word));
284 	return res ^ 31;
285 }
286 
287 #ifdef __KERNEL__
288 
289 /*
290  * ffs: find first bit set. This is defined the same way as
291  * the libc and compiler builtin ffs routines, therefore
292  * differs in spirit from the above ffz (man ffs).
293  */
294 
ffs(int x)295 static inline int ffs(int x)
296 {
297 	int cnt;
298 
299 	asm ("bfffo %1{#0:#0},%0" : "=d" (cnt) : "dm" (x & -x));
300 
301 	return 32 - cnt;
302 }
303 #define __ffs(x) (ffs(x) - 1)
304 
305 /*
306  * fls: find last bit set.
307  */
308 
fls(int x)309 static inline int fls(int x)
310 {
311 	int cnt;
312 
313 	asm ("bfffo %1{#0,#0},%0" : "=d" (cnt) : "dm" (x));
314 
315 	return 32 - cnt;
316 }
317 
__fls(int x)318 static inline int __fls(int x)
319 {
320 	return fls(x) - 1;
321 }
322 
323 #include <asm-generic/bitops/fls64.h>
324 #include <asm-generic/bitops/sched.h>
325 #include <asm-generic/bitops/hweight.h>
326 #include <asm-generic/bitops/lock.h>
327 
328 /* Bitmap functions for the minix filesystem */
329 
minix_find_first_zero_bit(const void * vaddr,unsigned size)330 static inline int minix_find_first_zero_bit(const void *vaddr, unsigned size)
331 {
332 	const unsigned short *p = vaddr, *addr = vaddr;
333 	int res;
334 	unsigned short num;
335 
336 	if (!size)
337 		return 0;
338 
339 	size = (size >> 4) + ((size & 15) > 0);
340 	while (*p++ == 0xffff)
341 	{
342 		if (--size == 0)
343 			return (p - addr) << 4;
344 	}
345 
346 	num = ~*--p;
347 	__asm__ __volatile__ ("bfffo %1{#16,#16},%0"
348 			      : "=d" (res) : "d" (num & -num));
349 	return ((p - addr) << 4) + (res ^ 31);
350 }
351 
352 #define minix_test_and_set_bit(nr, addr)	__test_and_set_bit((nr) ^ 16, (unsigned long *)(addr))
353 #define minix_set_bit(nr,addr)			__set_bit((nr) ^ 16, (unsigned long *)(addr))
354 #define minix_test_and_clear_bit(nr, addr)	__test_and_clear_bit((nr) ^ 16, (unsigned long *)(addr))
355 
minix_test_bit(int nr,const void * vaddr)356 static inline int minix_test_bit(int nr, const void *vaddr)
357 {
358 	const unsigned short *p = vaddr;
359 	return (p[nr >> 4] & (1U << (nr & 15))) != 0;
360 }
361 
362 /* Bitmap functions for the ext2 filesystem. */
363 
364 #define ext2_set_bit(nr, addr)			__test_and_set_bit((nr) ^ 24, (unsigned long *)(addr))
365 #define ext2_set_bit_atomic(lock, nr, addr)	test_and_set_bit((nr) ^ 24, (unsigned long *)(addr))
366 #define ext2_clear_bit(nr, addr)		__test_and_clear_bit((nr) ^ 24, (unsigned long *)(addr))
367 #define ext2_clear_bit_atomic(lock, nr, addr)	test_and_clear_bit((nr) ^ 24, (unsigned long *)(addr))
368 
ext2_test_bit(int nr,const void * vaddr)369 static inline int ext2_test_bit(int nr, const void *vaddr)
370 {
371 	const unsigned char *p = vaddr;
372 	return (p[nr >> 3] & (1U << (nr & 7))) != 0;
373 }
374 
ext2_find_first_zero_bit(const void * vaddr,unsigned size)375 static inline int ext2_find_first_zero_bit(const void *vaddr, unsigned size)
376 {
377 	const unsigned long *p = vaddr, *addr = vaddr;
378 	int res;
379 
380 	if (!size)
381 		return 0;
382 
383 	size = (size >> 5) + ((size & 31) > 0);
384 	while (*p++ == ~0UL)
385 	{
386 		if (--size == 0)
387 			return (p - addr) << 5;
388 	}
389 
390 	--p;
391 	for (res = 0; res < 32; res++)
392 		if (!ext2_test_bit (res, p))
393 			break;
394 	return (p - addr) * 32 + res;
395 }
396 
ext2_find_next_zero_bit(const void * vaddr,unsigned size,unsigned offset)397 static inline int ext2_find_next_zero_bit(const void *vaddr, unsigned size,
398 					  unsigned offset)
399 {
400 	const unsigned long *addr = vaddr;
401 	const unsigned long *p = addr + (offset >> 5);
402 	int bit = offset & 31UL, res;
403 
404 	if (offset >= size)
405 		return size;
406 
407 	if (bit) {
408 		/* Look for zero in first longword */
409 		for (res = bit; res < 32; res++)
410 			if (!ext2_test_bit (res, p))
411 				return (p - addr) * 32 + res;
412 		p++;
413 	}
414 	/* No zero yet, search remaining full bytes for a zero */
415 	res = ext2_find_first_zero_bit (p, size - 32 * (p - addr));
416 	return (p - addr) * 32 + res;
417 }
418 
ext2_find_first_bit(const void * vaddr,unsigned size)419 static inline int ext2_find_first_bit(const void *vaddr, unsigned size)
420 {
421 	const unsigned long *p = vaddr, *addr = vaddr;
422 	int res;
423 
424 	if (!size)
425 		return 0;
426 
427 	size = (size >> 5) + ((size & 31) > 0);
428 	while (*p++ == 0UL) {
429 		if (--size == 0)
430 			return (p - addr) << 5;
431 	}
432 
433 	--p;
434 	for (res = 0; res < 32; res++)
435 		if (ext2_test_bit(res, p))
436 			break;
437 	return (p - addr) * 32 + res;
438 }
439 
ext2_find_next_bit(const void * vaddr,unsigned size,unsigned offset)440 static inline int ext2_find_next_bit(const void *vaddr, unsigned size,
441 				     unsigned offset)
442 {
443 	const unsigned long *addr = vaddr;
444 	const unsigned long *p = addr + (offset >> 5);
445 	int bit = offset & 31UL, res;
446 
447 	if (offset >= size)
448 		return size;
449 
450 	if (bit) {
451 		/* Look for one in first longword */
452 		for (res = bit; res < 32; res++)
453 			if (ext2_test_bit(res, p))
454 				return (p - addr) * 32 + res;
455 		p++;
456 	}
457 	/* No set bit yet, search remaining full bytes for a set bit */
458 	res = ext2_find_first_bit(p, size - 32 * (p - addr));
459 	return (p - addr) * 32 + res;
460 }
461 
462 #endif /* __KERNEL__ */
463 
464 #endif /* _M68K_BITOPS_H */
465