1 #ifndef __ALSA_IATOMIC_H
2 #define __ALSA_IATOMIC_H
3
4 #if defined(__i386__) || defined(__x86_64__)
5
6 /*
7 * Atomic operations that C can't guarantee us. Useful for
8 * resource counting etc..
9 */
10
11 #define ATOMIC_SMP_LOCK "lock ; "
12
13 /*
14 * Make sure gcc doesn't try to be clever and move things around
15 * on us. We need to use _exactly_ the address the user gave us,
16 * not some alias that contains the same information.
17 */
18 typedef struct { volatile int counter; } atomic_t;
19
20 #define ATOMIC_INIT(i) { (i) }
21
22 /**
23 * atomic_read - read atomic variable
24 * @v: pointer of type atomic_t
25 *
26 * Atomically reads the value of @v. Note that the guaranteed
27 * useful range of an atomic_t is only 24 bits.
28 */
29 #define atomic_read(v) ((v)->counter)
30
31 /**
32 * atomic_set - set atomic variable
33 * @v: pointer of type atomic_t
34 * @i: required value
35 *
36 * Atomically sets the value of @v to @i. Note that the guaranteed
37 * useful range of an atomic_t is only 24 bits.
38 */
39 #define atomic_set(v,i) (((v)->counter) = (i))
40
41 /**
42 * atomic_add - add integer to atomic variable
43 * @i: integer value to add
44 * @v: pointer of type atomic_t
45 *
46 * Atomically adds @i to @v. Note that the guaranteed useful range
47 * of an atomic_t is only 24 bits.
48 */
atomic_add(int i,atomic_t * v)49 static __inline__ void atomic_add(int i, atomic_t *v)
50 {
51 __asm__ __volatile__(
52 ATOMIC_SMP_LOCK "addl %1,%0"
53 :"=m" (v->counter)
54 :"ir" (i), "m" (v->counter));
55 }
56
57 /**
58 * atomic_sub - subtract the atomic variable
59 * @i: integer value to subtract
60 * @v: pointer of type atomic_t
61 *
62 * Atomically subtracts @i from @v. Note that the guaranteed
63 * useful range of an atomic_t is only 24 bits.
64 */
atomic_sub(int i,atomic_t * v)65 static __inline__ void atomic_sub(int i, atomic_t *v)
66 {
67 __asm__ __volatile__(
68 ATOMIC_SMP_LOCK "subl %1,%0"
69 :"=m" (v->counter)
70 :"ir" (i), "m" (v->counter));
71 }
72
73 /**
74 * atomic_sub_and_test - subtract value from variable and test result
75 * @i: integer value to subtract
76 * @v: pointer of type atomic_t
77 *
78 * Atomically subtracts @i from @v and returns
79 * true if the result is zero, or false for all
80 * other cases. Note that the guaranteed
81 * useful range of an atomic_t is only 24 bits.
82 */
atomic_sub_and_test(int i,atomic_t * v)83 static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
84 {
85 unsigned char c;
86
87 __asm__ __volatile__(
88 ATOMIC_SMP_LOCK "subl %2,%0; sete %1"
89 :"=m" (v->counter), "=qm" (c)
90 :"ir" (i), "m" (v->counter) : "memory");
91 return c;
92 }
93
94 /**
95 * atomic_inc - increment atomic variable
96 * @v: pointer of type atomic_t
97 *
98 * Atomically increments @v by 1. Note that the guaranteed
99 * useful range of an atomic_t is only 24 bits.
100 */
atomic_inc(atomic_t * v)101 static __inline__ void atomic_inc(atomic_t *v)
102 {
103 __asm__ __volatile__(
104 ATOMIC_SMP_LOCK "incl %0"
105 :"=m" (v->counter)
106 :"m" (v->counter));
107 }
108
109 /**
110 * atomic_dec - decrement atomic variable
111 * @v: pointer of type atomic_t
112 *
113 * Atomically decrements @v by 1. Note that the guaranteed
114 * useful range of an atomic_t is only 24 bits.
115 */
atomic_dec(atomic_t * v)116 static __inline__ void atomic_dec(atomic_t *v)
117 {
118 __asm__ __volatile__(
119 ATOMIC_SMP_LOCK "decl %0"
120 :"=m" (v->counter)
121 :"m" (v->counter));
122 }
123
124 /**
125 * atomic_dec_and_test - decrement and test
126 * @v: pointer of type atomic_t
127 *
128 * Atomically decrements @v by 1 and
129 * returns true if the result is 0, or false for all other
130 * cases. Note that the guaranteed
131 * useful range of an atomic_t is only 24 bits.
132 */
atomic_dec_and_test(atomic_t * v)133 static __inline__ int atomic_dec_and_test(atomic_t *v)
134 {
135 unsigned char c;
136
137 __asm__ __volatile__(
138 ATOMIC_SMP_LOCK "decl %0; sete %1"
139 :"=m" (v->counter), "=qm" (c)
140 :"m" (v->counter) : "memory");
141 return c != 0;
142 }
143
144 /**
145 * atomic_inc_and_test - increment and test
146 * @v: pointer of type atomic_t
147 *
148 * Atomically increments @v by 1
149 * and returns true if the result is zero, or false for all
150 * other cases. Note that the guaranteed
151 * useful range of an atomic_t is only 24 bits.
152 */
atomic_inc_and_test(atomic_t * v)153 static __inline__ int atomic_inc_and_test(atomic_t *v)
154 {
155 unsigned char c;
156
157 __asm__ __volatile__(
158 ATOMIC_SMP_LOCK "incl %0; sete %1"
159 :"=m" (v->counter), "=qm" (c)
160 :"m" (v->counter) : "memory");
161 return c != 0;
162 }
163
164 /**
165 * atomic_add_negative - add and test if negative
166 * @v: pointer of type atomic_t
167 * @i: integer value to add
168 *
169 * Atomically adds @i to @v and returns true
170 * if the result is negative, or false when
171 * result is greater than or equal to zero. Note that the guaranteed
172 * useful range of an atomic_t is only 24 bits.
173 */
atomic_add_negative(int i,atomic_t * v)174 static __inline__ int atomic_add_negative(int i, atomic_t *v)
175 {
176 unsigned char c;
177
178 __asm__ __volatile__(
179 ATOMIC_SMP_LOCK "addl %2,%0; sets %1"
180 :"=m" (v->counter), "=qm" (c)
181 :"ir" (i), "m" (v->counter) : "memory");
182 return c;
183 }
184
185 /* These are x86-specific, used by some header files */
186 #define atomic_clear_mask(mask, addr) \
187 __asm__ __volatile__(ATOMIC_SMP_LOCK "andl %0,%1" \
188 : : "r" (~(mask)),"m" (*addr) : "memory")
189
190 #define atomic_set_mask(mask, addr) \
191 __asm__ __volatile__(ATOMIC_SMP_LOCK "orl %0,%1" \
192 : : "r" (mask),"m" (*addr) : "memory")
193
194 /*
195 * Force strict CPU ordering.
196 * And yes, this is required on UP too when we're talking
197 * to devices.
198 *
199 * For now, "wmb()" doesn't actually do anything, as all
200 * Intel CPU's follow what Intel calls a *Processor Order*,
201 * in which all writes are seen in the program order even
202 * outside the CPU.
203 *
204 * I expect future Intel CPU's to have a weaker ordering,
205 * but I'd also expect them to finally get their act together
206 * and add some real memory barriers if so.
207 */
208
209 #ifdef __i386__
210 #define mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
211 #define rmb() mb()
212 #define wmb() __asm__ __volatile__ ("": : :"memory")
213 #else
214 #define mb() asm volatile("mfence":::"memory")
215 #define rmb() asm volatile("lfence":::"memory")
216 #define wmb() asm volatile("sfence":::"memory")
217 #endif
218
219 #undef ATOMIC_SMP_LOCK
220
221 #define IATOMIC_DEFINED 1
222
223 #endif /* __i386__ */
224
225 #ifdef __ia64__
226
227 /*
228 * On IA-64, counter must always be volatile to ensure that that the
229 * memory accesses are ordered.
230 */
231 typedef struct { volatile int counter; } atomic_t;
232
233 #define ATOMIC_INIT(i) ((atomic_t) { (i) })
234
235 #define atomic_read(v) ((v)->counter)
236 #define atomic_set(v,i) (((v)->counter) = (i))
237
238 /* stripped version - we need only 4byte version */
239 #define ia64_cmpxchg(sem,ptr,old,new,size) \
240 ({ \
241 __typeof__(ptr) _p_ = (ptr); \
242 __typeof__(new) _n_ = (new); \
243 unsigned long _o_, _r_; \
244 _o_ = (unsigned int) (long) (old); \
245 __asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(_o_)); \
246 __asm__ __volatile__ ("cmpxchg4."sem" %0=[%1],%2,ar.ccv" \
247 : "=r"(_r_) : "r"(_p_), "r"(_n_) : "memory"); \
248 (__typeof__(old)) _r_; \
249 })
250
251 static __inline__ int
ia64_atomic_add(int i,atomic_t * v)252 ia64_atomic_add (int i, atomic_t *v)
253 {
254 int old, new;
255 // CMPXCHG_BUGCHECK_DECL
256
257 do {
258 // CMPXCHG_BUGCHECK(v);
259 old = atomic_read(v);
260 new = old + i;
261 } while (ia64_cmpxchg("acq", v, old, old + i, sizeof(atomic_t)) != old);
262 return new;
263 }
264
265 static __inline__ int
ia64_atomic_sub(int i,atomic_t * v)266 ia64_atomic_sub (int i, atomic_t *v)
267 {
268 int old, new;
269 // CMPXCHG_BUGCHECK_DECL
270
271 do {
272 // CMPXCHG_BUGCHECK(v);
273 old = atomic_read(v);
274 new = old - i;
275 } while (ia64_cmpxchg("acq", v, old, new, sizeof(atomic_t)) != old);
276 return new;
277 }
278
279 #define IA64_FETCHADD(tmp,v,n,sz) \
280 ({ \
281 switch (sz) { \
282 case 4: \
283 __asm__ __volatile__ ("fetchadd4.rel %0=[%1],%2" \
284 : "=r"(tmp) : "r"(v), "i"(n) : "memory"); \
285 break; \
286 \
287 case 8: \
288 __asm__ __volatile__ ("fetchadd8.rel %0=[%1],%2" \
289 : "=r"(tmp) : "r"(v), "i"(n) : "memory"); \
290 break; \
291 } \
292 })
293
294 #define ia64_fetch_and_add(i,v) \
295 ({ \
296 unsigned long _tmp; \
297 volatile __typeof__(*(v)) *_v = (v); \
298 switch (i) { \
299 case -16: IA64_FETCHADD(_tmp, _v, -16, sizeof(*(v))); break; \
300 case -8: IA64_FETCHADD(_tmp, _v, -8, sizeof(*(v))); break; \
301 case -4: IA64_FETCHADD(_tmp, _v, -4, sizeof(*(v))); break; \
302 case -1: IA64_FETCHADD(_tmp, _v, -1, sizeof(*(v))); break; \
303 case 1: IA64_FETCHADD(_tmp, _v, 1, sizeof(*(v))); break; \
304 case 4: IA64_FETCHADD(_tmp, _v, 4, sizeof(*(v))); break; \
305 case 8: IA64_FETCHADD(_tmp, _v, 8, sizeof(*(v))); break; \
306 case 16: IA64_FETCHADD(_tmp, _v, 16, sizeof(*(v))); break; \
307 } \
308 (__typeof__(*v)) (_tmp + (i)); /* return new value */ \
309 })
310
311 /*
312 * Atomically add I to V and return TRUE if the resulting value is
313 * negative.
314 */
315 static __inline__ int
atomic_add_negative(int i,atomic_t * v)316 atomic_add_negative (int i, atomic_t *v)
317 {
318 return ia64_atomic_add(i, v) < 0;
319 }
320
321 #define atomic_add_return(i,v) \
322 ((__builtin_constant_p(i) && \
323 ( (i == 1) || (i == 4) || (i == 8) || (i == 16) \
324 || (i == -1) || (i == -4) || (i == -8) || (i == -16))) \
325 ? ia64_fetch_and_add(i, &(v)->counter) \
326 : ia64_atomic_add(i, v))
327
328 #define atomic_sub_return(i,v) \
329 ((__builtin_constant_p(i) && \
330 ( (i == 1) || (i == 4) || (i == 8) || (i == 16) \
331 || (i == -1) || (i == -4) || (i == -8) || (i == -16))) \
332 ? ia64_fetch_and_add(-(i), &(v)->counter) \
333 : ia64_atomic_sub(i, v))
334
335 #define atomic_dec_return(v) atomic_sub_return(1, (v))
336 #define atomic_inc_return(v) atomic_add_return(1, (v))
337
338 #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
339 #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
340 #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) != 0)
341
342 #define atomic_add(i,v) atomic_add_return((i), (v))
343 #define atomic_sub(i,v) atomic_sub_return((i), (v))
344 #define atomic_inc(v) atomic_add(1, (v))
345 #define atomic_dec(v) atomic_sub(1, (v))
346
347 /*
348 * Macros to force memory ordering. In these descriptions, "previous"
349 * and "subsequent" refer to program order; "visible" means that all
350 * architecturally visible effects of a memory access have occurred
351 * (at a minimum, this means the memory has been read or written).
352 *
353 * wmb(): Guarantees that all preceding stores to memory-
354 * like regions are visible before any subsequent
355 * stores and that all following stores will be
356 * visible only after all previous stores.
357 * rmb(): Like wmb(), but for reads.
358 * mb(): wmb()/rmb() combo, i.e., all previous memory
359 * accesses are visible before all subsequent
360 * accesses and vice versa. This is also known as
361 * a "fence."
362 *
363 * Note: "mb()" and its variants cannot be used as a fence to order
364 * accesses to memory mapped I/O registers. For that, mf.a needs to
365 * be used. However, we don't want to always use mf.a because (a)
366 * it's (presumably) much slower than mf and (b) mf.a is supported for
367 * sequential memory pages only.
368 */
369 #define mb() __asm__ __volatile__ ("mf" ::: "memory")
370 #define rmb() mb()
371 #define wmb() mb()
372
373 #define IATOMIC_DEFINED 1
374
375 #endif /* __ia64__ */
376
377 #ifdef __alpha__
378
379 /*
380 * Atomic operations that C can't guarantee us. Useful for
381 * resource counting etc...
382 *
383 * But use these as seldom as possible since they are much slower
384 * than regular operations.
385 */
386
387
388 /*
389 * Counter is volatile to make sure gcc doesn't try to be clever
390 * and move things around on us. We need to use _exactly_ the address
391 * the user gave us, not some alias that contains the same information.
392 */
393 typedef struct { volatile int counter; } atomic_t;
394
395 #define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
396
397 #define atomic_read(v) ((v)->counter)
398 #define atomic_set(v,i) ((v)->counter = (i))
399
400 /*
401 * To get proper branch prediction for the main line, we must branch
402 * forward to code at the end of this object's .text section, then
403 * branch back to restart the operation.
404 */
405
atomic_add(int i,atomic_t * v)406 static __inline__ void atomic_add(int i, atomic_t * v)
407 {
408 unsigned long temp;
409 __asm__ __volatile__(
410 "1: ldl_l %0,%1\n"
411 " addl %0,%2,%0\n"
412 " stl_c %0,%1\n"
413 " beq %0,2f\n"
414 ".subsection 2\n"
415 "2: br 1b\n"
416 ".previous"
417 :"=&r" (temp), "=m" (v->counter)
418 :"Ir" (i), "m" (v->counter));
419 }
420
atomic_sub(int i,atomic_t * v)421 static __inline__ void atomic_sub(int i, atomic_t * v)
422 {
423 unsigned long temp;
424 __asm__ __volatile__(
425 "1: ldl_l %0,%1\n"
426 " subl %0,%2,%0\n"
427 " stl_c %0,%1\n"
428 " beq %0,2f\n"
429 ".subsection 2\n"
430 "2: br 1b\n"
431 ".previous"
432 :"=&r" (temp), "=m" (v->counter)
433 :"Ir" (i), "m" (v->counter));
434 }
435
436 /*
437 * Same as above, but return the result value
438 */
atomic_add_return(int i,atomic_t * v)439 static __inline__ long atomic_add_return(int i, atomic_t * v)
440 {
441 long temp, result;
442 __asm__ __volatile__(
443 "1: ldl_l %0,%1\n"
444 " addl %0,%3,%2\n"
445 " addl %0,%3,%0\n"
446 " stl_c %0,%1\n"
447 " beq %0,2f\n"
448 " mb\n"
449 ".subsection 2\n"
450 "2: br 1b\n"
451 ".previous"
452 :"=&r" (temp), "=m" (v->counter), "=&r" (result)
453 :"Ir" (i), "m" (v->counter) : "memory");
454 return result;
455 }
456
atomic_sub_return(int i,atomic_t * v)457 static __inline__ long atomic_sub_return(int i, atomic_t * v)
458 {
459 long temp, result;
460 __asm__ __volatile__(
461 "1: ldl_l %0,%1\n"
462 " subl %0,%3,%2\n"
463 " subl %0,%3,%0\n"
464 " stl_c %0,%1\n"
465 " beq %0,2f\n"
466 " mb\n"
467 ".subsection 2\n"
468 "2: br 1b\n"
469 ".previous"
470 :"=&r" (temp), "=m" (v->counter), "=&r" (result)
471 :"Ir" (i), "m" (v->counter) : "memory");
472 return result;
473 }
474
475 #define atomic_dec_return(v) atomic_sub_return(1,(v))
476 #define atomic_inc_return(v) atomic_add_return(1,(v))
477
478 #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
479 #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
480
481 #define atomic_inc(v) atomic_add(1,(v))
482 #define atomic_dec(v) atomic_sub(1,(v))
483
484 #define mb() \
485 __asm__ __volatile__("mb": : :"memory")
486
487 #define rmb() \
488 __asm__ __volatile__("mb": : :"memory")
489
490 #define wmb() \
491 __asm__ __volatile__("wmb": : :"memory")
492
493 #define IATOMIC_DEFINED 1
494
495 #endif /* __alpha__ */
496
497 #ifdef __powerpc__
498
499 typedef struct { volatile int counter; } atomic_t;
500
501 #define ATOMIC_INIT(i) { (i) }
502
503 #define atomic_read(v) ((v)->counter)
504 #define atomic_set(v,i) (((v)->counter) = (i))
505
506 extern void atomic_clear_mask(unsigned long mask, unsigned long *addr);
507 extern void atomic_set_mask(unsigned long mask, unsigned long *addr);
508
509 #define SMP_ISYNC "\n\tisync"
510
atomic_add(int a,atomic_t * v)511 static __inline__ void atomic_add(int a, atomic_t *v)
512 {
513 int t;
514
515 __asm__ __volatile__(
516 "1: lwarx %0,0,%3 # atomic_add\n\
517 add %0,%2,%0\n\
518 stwcx. %0,0,%3\n\
519 bne- 1b"
520 : "=&r" (t), "=m" (v->counter)
521 : "r" (a), "r" (&v->counter), "m" (v->counter)
522 : "cc");
523 }
524
atomic_add_return(int a,atomic_t * v)525 static __inline__ int atomic_add_return(int a, atomic_t *v)
526 {
527 int t;
528
529 __asm__ __volatile__(
530 "1: lwarx %0,0,%2 # atomic_add_return\n\
531 add %0,%1,%0\n\
532 stwcx. %0,0,%2\n\
533 bne- 1b"
534 SMP_ISYNC
535 : "=&r" (t)
536 : "r" (a), "r" (&v->counter)
537 : "cc", "memory");
538
539 return t;
540 }
541
atomic_sub(int a,atomic_t * v)542 static __inline__ void atomic_sub(int a, atomic_t *v)
543 {
544 int t;
545
546 __asm__ __volatile__(
547 "1: lwarx %0,0,%3 # atomic_sub\n\
548 subf %0,%2,%0\n\
549 stwcx. %0,0,%3\n\
550 bne- 1b"
551 : "=&r" (t), "=m" (v->counter)
552 : "r" (a), "r" (&v->counter), "m" (v->counter)
553 : "cc");
554 }
555
atomic_sub_return(int a,atomic_t * v)556 static __inline__ int atomic_sub_return(int a, atomic_t *v)
557 {
558 int t;
559
560 __asm__ __volatile__(
561 "1: lwarx %0,0,%2 # atomic_sub_return\n\
562 subf %0,%1,%0\n\
563 stwcx. %0,0,%2\n\
564 bne- 1b"
565 SMP_ISYNC
566 : "=&r" (t)
567 : "r" (a), "r" (&v->counter)
568 : "cc", "memory");
569
570 return t;
571 }
572
atomic_inc(atomic_t * v)573 static __inline__ void atomic_inc(atomic_t *v)
574 {
575 int t;
576
577 __asm__ __volatile__(
578 "1: lwarx %0,0,%2 # atomic_inc\n\
579 addic %0,%0,1\n\
580 stwcx. %0,0,%2\n\
581 bne- 1b"
582 : "=&r" (t), "=m" (v->counter)
583 : "r" (&v->counter), "m" (v->counter)
584 : "cc");
585 }
586
atomic_inc_return(atomic_t * v)587 static __inline__ int atomic_inc_return(atomic_t *v)
588 {
589 int t;
590
591 __asm__ __volatile__(
592 "1: lwarx %0,0,%1 # atomic_inc_return\n\
593 addic %0,%0,1\n\
594 stwcx. %0,0,%1\n\
595 bne- 1b"
596 SMP_ISYNC
597 : "=&r" (t)
598 : "r" (&v->counter)
599 : "cc", "memory");
600
601 return t;
602 }
603
atomic_dec(atomic_t * v)604 static __inline__ void atomic_dec(atomic_t *v)
605 {
606 int t;
607
608 __asm__ __volatile__(
609 "1: lwarx %0,0,%2 # atomic_dec\n\
610 addic %0,%0,-1\n\
611 stwcx. %0,0,%2\n\
612 bne- 1b"
613 : "=&r" (t), "=m" (v->counter)
614 : "r" (&v->counter), "m" (v->counter)
615 : "cc");
616 }
617
atomic_dec_return(atomic_t * v)618 static __inline__ int atomic_dec_return(atomic_t *v)
619 {
620 int t;
621
622 __asm__ __volatile__(
623 "1: lwarx %0,0,%1 # atomic_dec_return\n\
624 addic %0,%0,-1\n\
625 stwcx. %0,0,%1\n\
626 bne- 1b"
627 SMP_ISYNC
628 : "=&r" (t)
629 : "r" (&v->counter)
630 : "cc", "memory");
631
632 return t;
633 }
634
635 #define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
636 #define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0)
637
638 /*
639 * Atomically test *v and decrement if it is greater than 0.
640 * The function returns the old value of *v minus 1.
641 */
atomic_dec_if_positive(atomic_t * v)642 static __inline__ int atomic_dec_if_positive(atomic_t *v)
643 {
644 int t;
645
646 __asm__ __volatile__(
647 "1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
648 addic. %0,%0,-1\n\
649 blt- 2f\n\
650 stwcx. %0,0,%1\n\
651 bne- 1b"
652 SMP_ISYNC
653 "\n\
654 2:" : "=&r" (t)
655 : "r" (&v->counter)
656 : "cc", "memory");
657
658 return t;
659 }
660
661 /*
662 * Memory barrier.
663 * The sync instruction guarantees that all memory accesses initiated
664 * by this processor have been performed (with respect to all other
665 * mechanisms that access memory). The eieio instruction is a barrier
666 * providing an ordering (separately) for (a) cacheable stores and (b)
667 * loads and stores to non-cacheable memory (e.g. I/O devices).
668 *
669 * mb() prevents loads and stores being reordered across this point.
670 * rmb() prevents loads being reordered across this point.
671 * wmb() prevents stores being reordered across this point.
672 *
673 * We can use the eieio instruction for wmb, but since it doesn't
674 * give any ordering guarantees about loads, we have to use the
675 * stronger but slower sync instruction for mb and rmb.
676 */
677 #define mb() __asm__ __volatile__ ("sync" : : : "memory")
678 #define rmb() __asm__ __volatile__ ("sync" : : : "memory")
679 #define wmb() __asm__ __volatile__ ("eieio" : : : "memory")
680
681 #define IATOMIC_DEFINED 1
682
683 #endif /* __powerpc__ */
684
685 #ifdef __mips__
686
687 typedef struct { volatile int counter; } atomic_t;
688
689 #define ATOMIC_INIT(i) { (i) }
690
691 /*
692 * atomic_read - read atomic variable
693 * @v: pointer of type atomic_t
694 *
695 * Atomically reads the value of @v. Note that the guaranteed
696 * useful range of an atomic_t is only 24 bits.
697 */
698 #define atomic_read(v) ((v)->counter)
699
700 /*
701 * atomic_set - set atomic variable
702 * @v: pointer of type atomic_t
703 * @i: required value
704 *
705 * Atomically sets the value of @v to @i. Note that the guaranteed
706 * useful range of an atomic_t is only 24 bits.
707 */
708 #define atomic_set(v,i) ((v)->counter = (i))
709
710 /*
711 * for MIPS II and better we can use ll/sc instruction, and kernel 2.4.3+
712 * will emulate it on MIPS I.
713 */
714
715 /*
716 * atomic_add - add integer to atomic variable
717 * @i: integer value to add
718 * @v: pointer of type atomic_t
719 *
720 * Atomically adds @i to @v. Note that the guaranteed useful range
721 * of an atomic_t is only 24 bits.
722 */
atomic_add(int i,atomic_t * v)723 extern __inline__ void atomic_add(int i, atomic_t * v)
724 {
725 unsigned long temp;
726
727 __asm__ __volatile__(
728 ".set push \n"
729 ".set mips2 \n"
730 "1: ll %0, %1 # atomic_add\n"
731 " addu %0, %2 \n"
732 " sc %0, %1 \n"
733 " beqz %0, 1b \n"
734 ".set pop \n"
735 : "=&r" (temp), "=m" (v->counter)
736 : "Ir" (i), "m" (v->counter));
737 }
738
739 /*
740 * atomic_sub - subtract the atomic variable
741 * @i: integer value to subtract
742 * @v: pointer of type atomic_t
743 *
744 * Atomically subtracts @i from @v. Note that the guaranteed
745 * useful range of an atomic_t is only 24 bits.
746 */
atomic_sub(int i,atomic_t * v)747 extern __inline__ void atomic_sub(int i, atomic_t * v)
748 {
749 unsigned long temp;
750
751 __asm__ __volatile__(
752 ".set push \n"
753 ".set mips2 \n"
754 "1: ll %0, %1 # atomic_sub\n"
755 " subu %0, %2 \n"
756 " sc %0, %1 \n"
757 " beqz %0, 1b \n"
758 ".set pop \n"
759 : "=&r" (temp), "=m" (v->counter)
760 : "Ir" (i), "m" (v->counter));
761 }
762
763 /*
764 * Same as above, but return the result value
765 */
atomic_add_return(int i,atomic_t * v)766 extern __inline__ int atomic_add_return(int i, atomic_t * v)
767 {
768 unsigned long temp, result;
769
770 __asm__ __volatile__(
771 ".set push # atomic_add_return\n"
772 ".set noreorder \n"
773 ".set mips2 \n"
774 "1: ll %1, %2 \n"
775 " addu %0, %1, %3 \n"
776 " sc %0, %2 \n"
777 " beqz %0, 1b \n"
778 " addu %0, %1, %3 \n"
779 ".set pop \n"
780 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
781 : "Ir" (i), "m" (v->counter)
782 : "memory");
783
784 return result;
785 }
786
atomic_sub_return(int i,atomic_t * v)787 extern __inline__ int atomic_sub_return(int i, atomic_t * v)
788 {
789 unsigned long temp, result;
790
791 __asm__ __volatile__(
792 ".set push \n"
793 ".set mips2 \n"
794 ".set noreorder # atomic_sub_return\n"
795 "1: ll %1, %2 \n"
796 " subu %0, %1, %3 \n"
797 " sc %0, %2 \n"
798 " beqz %0, 1b \n"
799 " subu %0, %1, %3 \n"
800 ".set pop \n"
801 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
802 : "Ir" (i), "m" (v->counter)
803 : "memory");
804
805 return result;
806 }
807
808 #define atomic_dec_return(v) atomic_sub_return(1,(v))
809 #define atomic_inc_return(v) atomic_add_return(1,(v))
810
811 /*
812 * atomic_sub_and_test - subtract value from variable and test result
813 * @i: integer value to subtract
814 * @v: pointer of type atomic_t
815 *
816 * Atomically subtracts @i from @v and returns
817 * true if the result is zero, or false for all
818 * other cases. Note that the guaranteed
819 * useful range of an atomic_t is only 24 bits.
820 */
821 #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
822
823 /*
824 * atomic_inc_and_test - increment and test
825 * @v: pointer of type atomic_t
826 *
827 * Atomically increments @v by 1
828 * and returns true if the result is zero, or false for all
829 * other cases. Note that the guaranteed
830 * useful range of an atomic_t is only 24 bits.
831 */
832 #define atomic_inc_and_test(v) (atomic_inc_return(1, (v)) == 0)
833
834 /*
835 * atomic_dec_and_test - decrement by 1 and test
836 * @v: pointer of type atomic_t
837 *
838 * Atomically decrements @v by 1 and
839 * returns true if the result is 0, or false for all other
840 * cases. Note that the guaranteed
841 * useful range of an atomic_t is only 24 bits.
842 */
843 #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
844
845 /*
846 * atomic_inc - increment atomic variable
847 * @v: pointer of type atomic_t
848 *
849 * Atomically increments @v by 1. Note that the guaranteed
850 * useful range of an atomic_t is only 24 bits.
851 */
852 #define atomic_inc(v) atomic_add(1,(v))
853
854 /*
855 * atomic_dec - decrement and test
856 * @v: pointer of type atomic_t
857 *
858 * Atomically decrements @v by 1. Note that the guaranteed
859 * useful range of an atomic_t is only 24 bits.
860 */
861 #define atomic_dec(v) atomic_sub(1,(v))
862
863 /*
864 * atomic_add_negative - add and test if negative
865 * @v: pointer of type atomic_t
866 * @i: integer value to add
867 *
868 * Atomically adds @i to @v and returns true
869 * if the result is negative, or false when
870 * result is greater than or equal to zero. Note that the guaranteed
871 * useful range of an atomic_t is only 24 bits.
872 *
873 * Currently not implemented for MIPS.
874 */
875
876 #define mb() \
877 __asm__ __volatile__( \
878 "# prevent instructions being moved around\n\t" \
879 ".set\tnoreorder\n\t" \
880 "# 8 nops to fool the R4400 pipeline\n\t" \
881 "nop;nop;nop;nop;nop;nop;nop;nop\n\t" \
882 ".set\treorder" \
883 : /* no output */ \
884 : /* no input */ \
885 : "memory")
886 #define rmb() mb()
887 #define wmb() mb()
888
889 #define IATOMIC_DEFINED 1
890
891 #endif /* __mips__ */
892
893 #ifdef __arm__
894
895 /*
896 * FIXME: bellow code is valid only for SA11xx
897 */
898
899 /*
900 * Save the current interrupt enable state & disable IRQs
901 */
902 #define local_irq_save(x) \
903 ({ \
904 unsigned long temp; \
905 __asm__ __volatile__( \
906 "mrs %0, cpsr @ local_irq_save\n" \
907 " orr %1, %0, #128\n" \
908 " msr cpsr_c, %1" \
909 : "=r" (x), "=r" (temp) \
910 : \
911 : "memory"); \
912 })
913
914 /*
915 * restore saved IRQ & FIQ state
916 */
917 #define local_irq_restore(x) \
918 __asm__ __volatile__( \
919 "msr cpsr_c, %0 @ local_irq_restore\n" \
920 : \
921 : "r" (x) \
922 : "memory")
923
924 #define __save_flags_cli(x) local_irq_save(x)
925 #define __restore_flags(x) local_irq_restore(x)
926
927 typedef struct { volatile int counter; } atomic_t;
928
929 #define ATOMIC_INIT(i) { (i) }
930
931 #define atomic_read(v) ((v)->counter)
932 #define atomic_set(v,i) (((v)->counter) = (i))
933
atomic_add(int i,volatile atomic_t * v)934 static __inline__ void atomic_add(int i, volatile atomic_t *v)
935 {
936 unsigned long flags;
937
938 __save_flags_cli(flags);
939 v->counter += i;
940 __restore_flags(flags);
941 }
942
atomic_sub(int i,volatile atomic_t * v)943 static __inline__ void atomic_sub(int i, volatile atomic_t *v)
944 {
945 unsigned long flags;
946
947 __save_flags_cli(flags);
948 v->counter -= i;
949 __restore_flags(flags);
950 }
951
atomic_inc(volatile atomic_t * v)952 static __inline__ void atomic_inc(volatile atomic_t *v)
953 {
954 unsigned long flags;
955
956 __save_flags_cli(flags);
957 v->counter += 1;
958 __restore_flags(flags);
959 }
960
atomic_dec(volatile atomic_t * v)961 static __inline__ void atomic_dec(volatile atomic_t *v)
962 {
963 unsigned long flags;
964
965 __save_flags_cli(flags);
966 v->counter -= 1;
967 __restore_flags(flags);
968 }
969
atomic_dec_and_test(volatile atomic_t * v)970 static __inline__ int atomic_dec_and_test(volatile atomic_t *v)
971 {
972 unsigned long flags;
973 int result;
974
975 __save_flags_cli(flags);
976 v->counter -= 1;
977 result = (v->counter == 0);
978 __restore_flags(flags);
979
980 return result;
981 }
982
atomic_add_negative(int i,volatile atomic_t * v)983 static inline int atomic_add_negative(int i, volatile atomic_t *v)
984 {
985 unsigned long flags;
986 int result;
987
988 __save_flags_cli(flags);
989 v->counter += i;
990 result = (v->counter < 0);
991 __restore_flags(flags);
992
993 return result;
994 }
995
atomic_clear_mask(unsigned long mask,unsigned long * addr)996 static __inline__ void atomic_clear_mask(unsigned long mask, unsigned long *addr)
997 {
998 unsigned long flags;
999
1000 __save_flags_cli(flags);
1001 *addr &= ~mask;
1002 __restore_flags(flags);
1003 }
1004
1005 #define mb() __asm__ __volatile__ ("" : : : "memory")
1006 #define rmb() mb()
1007 #define wmb() mb()
1008
1009 #define IATOMIC_DEFINED 1
1010
1011 #endif /* __arm__ */
1012
1013 #ifdef __sh__
1014
1015 typedef struct { volatile int counter; } atomic_t;
1016
1017 #define ATOMIC_INIT(i) { (i) }
1018
1019 #define atomic_read(v) ((v)->counter)
1020 #define atomic_set(v,i) (((v)->counter) = (i))
1021
1022 #define atomic_dec_return(v) atomic_sub_return(1,(v))
1023 #define atomic_inc_return(v) atomic_add_return(1,(v))
1024
1025 #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
1026 #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
1027 #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) != 0)
1028
1029 #define atomic_add(i,v) atomic_add_return((i),(v))
1030 #define atomic_sub(i,v) atomic_sub_return((i),(v))
1031 #define atomic_inc(v) atomic_add(1,(v))
1032 #define atomic_dec(v) atomic_sub(1,(v))
1033
atomic_add_return(int i,volatile atomic_t * v)1034 static __inline__ int atomic_add_return(int i, volatile atomic_t *v)
1035 {
1036 int result;
1037
1038 asm volatile (
1039 " .align 2\n"
1040 " mova 99f, r0\n"
1041 " mov r15, r1\n"
1042 " mov #-6, r15\n"
1043 " mov.l @%2, %0\n"
1044 " add %1, %0\n"
1045 " mov.l %0, @%2\n"
1046 "99: mov r1, r15"
1047 : "=&r"(result)
1048 : "r"(i), "r"(v)
1049 : "r0", "r1");
1050
1051 return result;
1052 }
1053
atomic_sub_return(int i,volatile atomic_t * v)1054 static __inline__ int atomic_sub_return(int i, volatile atomic_t *v)
1055 {
1056 int result;
1057
1058 asm volatile (
1059 " .align 2\n"
1060 " mova 99f, r0\n"
1061 " mov r15, r1\n"
1062 " mov #-6, r15\n"
1063 " mov.l @%2, %0\n"
1064 " sub %1, %0\n"
1065 " mov.l %0, @%2\n"
1066 "99: mov r1, r15"
1067 : "=&r"(result)
1068 : "r"(i), "r"(v)
1069 : "r0", "r1");
1070
1071 return result;
1072 }
1073
1074 #define mb() __asm__ __volatile__ ("" : : : "memory")
1075 #define rmb() mb()
1076 #define wmb() mb()
1077
1078 #define IATOMIC_DEFINED 1
1079
1080 #endif /* __sh__ */
1081
1082 #ifdef __bfin__
1083
1084 #include <bfin_fixed_code.h>
1085
1086 typedef struct { volatile int counter; } atomic_t;
1087
1088 #define ATOMIC_INIT(i) { (i) }
1089
1090 #define atomic_read(v) ((v)->counter)
1091 #define atomic_set(v,i) (((v)->counter) = (i))
1092 #define atomic_add(i,v) bfin_atomic_add32(&(v)->counter, i)
1093 #define atomic_sub(i,v) bfin_atomic_sub32(&(v)->counter, i)
1094 #define atomic_inc(v) bfin_atomic_inc32(&(v)->counter);
1095 #define atomic_dec(v) bfin_atomic_dec32(&(v)->counter);
1096
1097 #define mb() __asm__ __volatile__ ("" : : : "memory")
1098 #define rmb() mb()
1099 #define wmb() mb()
1100
1101 #define IATOMIC_DEFINED 1
1102
1103 #endif /* __bfin__ */
1104
1105 #ifndef IATOMIC_DEFINED
1106 /*
1107 * non supported architecture.
1108 */
1109 #warning "Atomic operations are not supported on this architecture."
1110
1111 typedef struct { volatile int counter; } atomic_t;
1112
1113 #define ATOMIC_INIT(i) { (i) }
1114
1115 #define atomic_read(v) ((v)->counter)
1116 #define atomic_set(v,i) (((v)->counter) = (i))
1117 #define atomic_add(i,v) (((v)->counter) += (i))
1118 #define atomic_sub(i,v) (((v)->counter) -= (i))
1119 #define atomic_inc(v) (((v)->counter)++)
1120 #define atomic_dec(v) (((v)->counter)--)
1121
1122 #define mb()
1123 #define rmb()
1124 #define wmb()
1125
1126 #define IATOMIC_DEFINED 1
1127
1128 #endif /* IATOMIC_DEFINED */
1129
1130 /*
1131 * Atomic read/write
1132 * Copyright (c) 2001 by Abramo Bagnara <abramo@alsa-project.org>
1133 */
1134
1135 /* Max number of times we must spin on a spin-lock calling sched_yield().
1136 After MAX_SPIN_COUNT iterations, we put the calling thread to sleep. */
1137
1138 #ifndef MAX_SPIN_COUNT
1139 #define MAX_SPIN_COUNT 50
1140 #endif
1141
1142 /* Duration of sleep (in nanoseconds) when we can't acquire a spin-lock
1143 after MAX_SPIN_COUNT iterations of sched_yield().
1144 This MUST BE > 2ms.
1145 (Otherwise the kernel does busy-waiting for real-time threads,
1146 giving other threads no chance to run.) */
1147
1148 #ifndef SPIN_SLEEP_DURATION
1149 #define SPIN_SLEEP_DURATION 2000001
1150 #endif
1151
1152 typedef struct {
1153 unsigned int begin, end;
1154 } snd_atomic_write_t;
1155
1156 typedef struct {
1157 volatile const snd_atomic_write_t *write;
1158 unsigned int end;
1159 } snd_atomic_read_t;
1160
1161 void snd_atomic_read_wait(snd_atomic_read_t *t);
1162
snd_atomic_write_init(snd_atomic_write_t * w)1163 static inline void snd_atomic_write_init(snd_atomic_write_t *w)
1164 {
1165 w->begin = 0;
1166 w->end = 0;
1167 }
1168
snd_atomic_write_begin(snd_atomic_write_t * w)1169 static inline void snd_atomic_write_begin(snd_atomic_write_t *w)
1170 {
1171 w->begin++;
1172 wmb();
1173 }
1174
snd_atomic_write_end(snd_atomic_write_t * w)1175 static inline void snd_atomic_write_end(snd_atomic_write_t *w)
1176 {
1177 wmb();
1178 w->end++;
1179 }
1180
snd_atomic_read_init(snd_atomic_read_t * r,snd_atomic_write_t * w)1181 static inline void snd_atomic_read_init(snd_atomic_read_t *r, snd_atomic_write_t *w)
1182 {
1183 r->write = w;
1184 }
1185
snd_atomic_read_begin(snd_atomic_read_t * r)1186 static inline void snd_atomic_read_begin(snd_atomic_read_t *r)
1187 {
1188 r->end = r->write->end;
1189 rmb();
1190 }
1191
snd_atomic_read_ok(snd_atomic_read_t * r)1192 static inline int snd_atomic_read_ok(snd_atomic_read_t *r)
1193 {
1194 rmb();
1195 return r->end == r->write->begin;
1196 }
1197
1198 #endif /* __ALSA_IATOMIC_H */
1199