• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef __SPARC_CHECKSUM_H
2 #define __SPARC_CHECKSUM_H
3 
4 /*  checksum.h:  IP/UDP/TCP checksum routines on the Sparc.
5  *
6  *  Copyright(C) 1995 Linus Torvalds
7  *  Copyright(C) 1995 Miguel de Icaza
8  *  Copyright(C) 1996 David S. Miller
9  *  Copyright(C) 1996 Eddie C. Dost
10  *  Copyright(C) 1997 Jakub Jelinek
11  *
12  * derived from:
13  *	Alpha checksum c-code
14  *      ix86 inline assembly
15  *      RFC1071 Computing the Internet Checksum
16  */
17 
18 #include <linux/in6.h>
19 #include <asm/uaccess.h>
20 
21 /* computes the checksum of a memory block at buff, length len,
22  * and adds in "sum" (32-bit)
23  *
24  * returns a 32-bit number suitable for feeding into itself
25  * or csum_tcpudp_magic
26  *
27  * this function must be called with even lengths, except
28  * for the last fragment, which may be odd
29  *
30  * it's best to have buff aligned on a 32-bit boundary
31  */
32 __wsum csum_partial(const void *buff, int len, __wsum sum);
33 
34 /* the same as csum_partial, but copies from fs:src while it
35  * checksums
36  *
37  * here even more important to align src and dst on a 32-bit (or even
38  * better 64-bit) boundary
39  */
40 
41 unsigned int __csum_partial_copy_sparc_generic (const unsigned char *, unsigned char *);
42 
43 static inline __wsum
csum_partial_copy_nocheck(const void * src,void * dst,int len,__wsum sum)44 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
45 {
46 	register unsigned int ret asm("o0") = (unsigned int)src;
47 	register char *d asm("o1") = dst;
48 	register int l asm("g1") = len;
49 
50 	__asm__ __volatile__ (
51 		"call __csum_partial_copy_sparc_generic\n\t"
52 		" mov %6, %%g7\n"
53 	: "=&r" (ret), "=&r" (d), "=&r" (l)
54 	: "0" (ret), "1" (d), "2" (l), "r" (sum)
55 	: "o2", "o3", "o4", "o5", "o7",
56 	  "g2", "g3", "g4", "g5", "g7",
57 	  "memory", "cc");
58 	return (__force __wsum)ret;
59 }
60 
61 static inline __wsum
csum_partial_copy_from_user(const void __user * src,void * dst,int len,__wsum sum,int * err)62 csum_partial_copy_from_user(const void __user *src, void *dst, int len,
63 			    __wsum sum, int *err)
64   {
65 	register unsigned long ret asm("o0") = (unsigned long)src;
66 	register char *d asm("o1") = dst;
67 	register int l asm("g1") = len;
68 	register __wsum s asm("g7") = sum;
69 
70 	__asm__ __volatile__ (
71 	".section __ex_table,#alloc\n\t"
72 	".align 4\n\t"
73 	".word 1f,2\n\t"
74 	".previous\n"
75 	"1:\n\t"
76 	"call __csum_partial_copy_sparc_generic\n\t"
77 	" st %8, [%%sp + 64]\n"
78 	: "=&r" (ret), "=&r" (d), "=&r" (l), "=&r" (s)
79 	: "0" (ret), "1" (d), "2" (l), "3" (s), "r" (err)
80 	: "o2", "o3", "o4", "o5", "o7", "g2", "g3", "g4", "g5",
81 	  "cc", "memory");
82 	return (__force __wsum)ret;
83 }
84 
85 static inline __wsum
csum_partial_copy_to_user(const void * src,void __user * dst,int len,__wsum sum,int * err)86 csum_partial_copy_to_user(const void *src, void __user *dst, int len,
87 			  __wsum sum, int *err)
88 {
89 	if (!access_ok (VERIFY_WRITE, dst, len)) {
90 		*err = -EFAULT;
91 		return sum;
92 	} else {
93 		register unsigned long ret asm("o0") = (unsigned long)src;
94 		register char __user *d asm("o1") = dst;
95 		register int l asm("g1") = len;
96 		register __wsum s asm("g7") = sum;
97 
98 		__asm__ __volatile__ (
99 		".section __ex_table,#alloc\n\t"
100 		".align 4\n\t"
101 		".word 1f,1\n\t"
102 		".previous\n"
103 		"1:\n\t"
104 		"call __csum_partial_copy_sparc_generic\n\t"
105 		" st %8, [%%sp + 64]\n"
106 		: "=&r" (ret), "=&r" (d), "=&r" (l), "=&r" (s)
107 		: "0" (ret), "1" (d), "2" (l), "3" (s), "r" (err)
108 		: "o2", "o3", "o4", "o5", "o7",
109 		  "g2", "g3", "g4", "g5",
110 		  "cc", "memory");
111 		return (__force __wsum)ret;
112 	}
113 }
114 
115 #define HAVE_CSUM_COPY_USER
116 #define csum_and_copy_to_user csum_partial_copy_to_user
117 
118 /* ihl is always 5 or greater, almost always is 5, and iph is word aligned
119  * the majority of the time.
120  */
ip_fast_csum(const void * iph,unsigned int ihl)121 static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
122 {
123 	__sum16 sum;
124 
125 	/* Note: We must read %2 before we touch %0 for the first time,
126 	 *       because GCC can legitimately use the same register for
127 	 *       both operands.
128 	 */
129 	__asm__ __volatile__("sub\t%2, 4, %%g4\n\t"
130 			     "ld\t[%1 + 0x00], %0\n\t"
131 			     "ld\t[%1 + 0x04], %%g2\n\t"
132 			     "ld\t[%1 + 0x08], %%g3\n\t"
133 			     "addcc\t%%g2, %0, %0\n\t"
134 			     "addxcc\t%%g3, %0, %0\n\t"
135 			     "ld\t[%1 + 0x0c], %%g2\n\t"
136 			     "ld\t[%1 + 0x10], %%g3\n\t"
137 			     "addxcc\t%%g2, %0, %0\n\t"
138 			     "addx\t%0, %%g0, %0\n"
139 			     "1:\taddcc\t%%g3, %0, %0\n\t"
140 			     "add\t%1, 4, %1\n\t"
141 			     "addxcc\t%0, %%g0, %0\n\t"
142 			     "subcc\t%%g4, 1, %%g4\n\t"
143 			     "be,a\t2f\n\t"
144 			     "sll\t%0, 16, %%g2\n\t"
145 			     "b\t1b\n\t"
146 			     "ld\t[%1 + 0x10], %%g3\n"
147 			     "2:\taddcc\t%0, %%g2, %%g2\n\t"
148 			     "srl\t%%g2, 16, %0\n\t"
149 			     "addx\t%0, %%g0, %0\n\t"
150 			     "xnor\t%%g0, %0, %0"
151 			     : "=r" (sum), "=&r" (iph)
152 			     : "r" (ihl), "1" (iph)
153 			     : "g2", "g3", "g4", "cc", "memory");
154 	return sum;
155 }
156 
157 /* Fold a partial checksum without adding pseudo headers. */
csum_fold(__wsum sum)158 static inline __sum16 csum_fold(__wsum sum)
159 {
160 	unsigned int tmp;
161 
162 	__asm__ __volatile__("addcc\t%0, %1, %1\n\t"
163 			     "srl\t%1, 16, %1\n\t"
164 			     "addx\t%1, %%g0, %1\n\t"
165 			     "xnor\t%%g0, %1, %0"
166 			     : "=&r" (sum), "=r" (tmp)
167 			     : "0" (sum), "1" ((__force u32)sum<<16)
168 			     : "cc");
169 	return (__force __sum16)sum;
170 }
171 
csum_tcpudp_nofold(__be32 saddr,__be32 daddr,unsigned short len,unsigned short proto,__wsum sum)172 static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
173 					       unsigned short len,
174 					       unsigned short proto,
175 					       __wsum sum)
176 {
177 	__asm__ __volatile__("addcc\t%1, %0, %0\n\t"
178 			     "addxcc\t%2, %0, %0\n\t"
179 			     "addxcc\t%3, %0, %0\n\t"
180 			     "addx\t%0, %%g0, %0\n\t"
181 			     : "=r" (sum), "=r" (saddr)
182 			     : "r" (daddr), "r" (proto + len), "0" (sum),
183 			       "1" (saddr)
184 			     : "cc");
185 	return sum;
186 }
187 
188 /*
189  * computes the checksum of the TCP/UDP pseudo-header
190  * returns a 16-bit checksum, already complemented
191  */
csum_tcpudp_magic(__be32 saddr,__be32 daddr,unsigned short len,unsigned short proto,__wsum sum)192 static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
193 						   unsigned short len,
194 						   unsigned short proto,
195 						   __wsum sum)
196 {
197 	return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
198 }
199 
200 #define _HAVE_ARCH_IPV6_CSUM
201 
csum_ipv6_magic(const struct in6_addr * saddr,const struct in6_addr * daddr,__u32 len,unsigned short proto,__wsum sum)202 static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
203 				      const struct in6_addr *daddr,
204 				      __u32 len, unsigned short proto,
205 				      __wsum sum)
206 {
207 	__asm__ __volatile__ (
208 		"addcc	%3, %4, %%g4\n\t"
209 		"addxcc	%5, %%g4, %%g4\n\t"
210 		"ld	[%2 + 0x0c], %%g2\n\t"
211 		"ld	[%2 + 0x08], %%g3\n\t"
212 		"addxcc	%%g2, %%g4, %%g4\n\t"
213 		"ld	[%2 + 0x04], %%g2\n\t"
214 		"addxcc	%%g3, %%g4, %%g4\n\t"
215 		"ld	[%2 + 0x00], %%g3\n\t"
216 		"addxcc	%%g2, %%g4, %%g4\n\t"
217 		"ld	[%1 + 0x0c], %%g2\n\t"
218 		"addxcc	%%g3, %%g4, %%g4\n\t"
219 		"ld	[%1 + 0x08], %%g3\n\t"
220 		"addxcc	%%g2, %%g4, %%g4\n\t"
221 		"ld	[%1 + 0x04], %%g2\n\t"
222 		"addxcc	%%g3, %%g4, %%g4\n\t"
223 		"ld	[%1 + 0x00], %%g3\n\t"
224 		"addxcc	%%g2, %%g4, %%g4\n\t"
225 		"addxcc	%%g3, %%g4, %0\n\t"
226 		"addx	0, %0, %0\n"
227 		: "=&r" (sum)
228 		: "r" (saddr), "r" (daddr),
229 		  "r"(htonl(len)), "r"(htonl(proto)), "r"(sum)
230 		: "g2", "g3", "g4", "cc");
231 
232 	return csum_fold(sum);
233 }
234 
235 /* this routine is used for miscellaneous IP-like checksums, mainly in icmp.c */
ip_compute_csum(const void * buff,int len)236 static inline __sum16 ip_compute_csum(const void *buff, int len)
237 {
238 	return csum_fold(csum_partial(buff, len, 0));
239 }
240 
241 #define HAVE_ARCH_CSUM_ADD
csum_add(__wsum csum,__wsum addend)242 static inline __wsum csum_add(__wsum csum, __wsum addend)
243 {
244 	__asm__ __volatile__(
245 		"addcc   %0, %1, %0\n"
246 		"addx    %0, %%g0, %0"
247 		: "=r" (csum)
248 		: "r" (addend), "0" (csum));
249 
250 	return csum;
251 }
252 
253 #endif /* !(__SPARC_CHECKSUM_H) */
254