• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * cmpxchg.h -- forked from asm/atomic.h with this copyright:
3  *
4  * Copyright 2010 Tilera Corporation. All Rights Reserved.
5  *
6  *   This program is free software; you can redistribute it and/or
7  *   modify it under the terms of the GNU General Public License
8  *   as published by the Free Software Foundation, version 2.
9  *
10  *   This program is distributed in the hope that it will be useful, but
11  *   WITHOUT ANY WARRANTY; without even the implied warranty of
12  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13  *   NON INFRINGEMENT.  See the GNU General Public License for
14  *   more details.
15  *
16  */
17 
18 #ifndef _ASM_TILE_CMPXCHG_H
19 #define _ASM_TILE_CMPXCHG_H
20 
21 #ifndef __ASSEMBLY__
22 
23 #include <asm/barrier.h>
24 
25 /* Nonexistent functions intended to cause compile errors. */
26 extern void __xchg_called_with_bad_pointer(void)
27 	__compiletime_error("Bad argument size for xchg");
28 extern void __cmpxchg_called_with_bad_pointer(void)
29 	__compiletime_error("Bad argument size for cmpxchg");
30 
31 #ifndef __tilegx__
32 
33 /* Note the _atomic_xxx() routines include a final mb(). */
34 int _atomic_xchg(int *ptr, int n);
35 int _atomic_xchg_add(int *v, int i);
36 int _atomic_xchg_add_unless(int *v, int a, int u);
37 int _atomic_cmpxchg(int *ptr, int o, int n);
38 long long _atomic64_xchg(long long *v, long long n);
39 long long _atomic64_xchg_add(long long *v, long long i);
40 long long _atomic64_xchg_add_unless(long long *v, long long a, long long u);
41 long long _atomic64_cmpxchg(long long *v, long long o, long long n);
42 
43 #define xchg(ptr, n)							\
44 	({								\
45 		if (sizeof(*(ptr)) != 4)				\
46 			__xchg_called_with_bad_pointer();		\
47 		smp_mb();						\
48 		(typeof(*(ptr)))_atomic_xchg((int *)(ptr), (int)(n));	\
49 	})
50 
51 #define cmpxchg(ptr, o, n)						\
52 	({								\
53 		if (sizeof(*(ptr)) != 4)				\
54 			__cmpxchg_called_with_bad_pointer();		\
55 		smp_mb();						\
56 		(typeof(*(ptr)))_atomic_cmpxchg((int *)ptr, (int)o,	\
57 						(int)n);		\
58 	})
59 
60 #define xchg64(ptr, n)							\
61 	({								\
62 		if (sizeof(*(ptr)) != 8)				\
63 			__xchg_called_with_bad_pointer();		\
64 		smp_mb();						\
65 		(typeof(*(ptr)))_atomic64_xchg((long long *)(ptr),	\
66 						(long long)(n));	\
67 	})
68 
69 #define cmpxchg64(ptr, o, n)						\
70 	({								\
71 		if (sizeof(*(ptr)) != 8)				\
72 			__cmpxchg_called_with_bad_pointer();		\
73 		smp_mb();						\
74 		(typeof(*(ptr)))_atomic64_cmpxchg((long long *)ptr,	\
75 					(long long)o, (long long)n);	\
76 	})
77 
78 #else
79 
80 #define xchg(ptr, n)							\
81 	({								\
82 		typeof(*(ptr)) __x;					\
83 		smp_mb();						\
84 		switch (sizeof(*(ptr))) {				\
85 		case 4:							\
86 			__x = (typeof(__x))(unsigned long)		\
87 				__insn_exch4((ptr),			\
88 					(u32)(unsigned long)(n));	\
89 			break;						\
90 		case 8:							\
91 			__x = (typeof(__x))				\
92 				__insn_exch((ptr), (unsigned long)(n));	\
93 			break;						\
94 		default:						\
95 			__xchg_called_with_bad_pointer();		\
96 			break;						\
97 		}							\
98 		smp_mb();						\
99 		__x;							\
100 	})
101 
102 #define cmpxchg(ptr, o, n)						\
103 	({								\
104 		typeof(*(ptr)) __x;					\
105 		__insn_mtspr(SPR_CMPEXCH_VALUE, (unsigned long)(o));	\
106 		smp_mb();						\
107 		switch (sizeof(*(ptr))) {				\
108 		case 4:							\
109 			__x = (typeof(__x))(unsigned long)		\
110 				__insn_cmpexch4((ptr),			\
111 					(u32)(unsigned long)(n));	\
112 			break;						\
113 		case 8:							\
114 			__x = (typeof(__x))__insn_cmpexch((ptr),	\
115 						(long long)(n));	\
116 			break;						\
117 		default:						\
118 			__cmpxchg_called_with_bad_pointer();		\
119 			break;						\
120 		}							\
121 		smp_mb();						\
122 		__x;							\
123 	})
124 
125 #define xchg64 xchg
126 #define cmpxchg64 cmpxchg
127 
128 #endif
129 
130 #endif /* __ASSEMBLY__ */
131 
132 #endif /* _ASM_TILE_CMPXCHG_H */
133