• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * csum_partial_copy - do IP checksumming and copy
3  *
4  * (C) Copyright 1996 Linus Torvalds
5  * accelerated versions (and 21264 assembly versions ) contributed by
6  *	Rick Gorton	<rick.gorton@alpha-processor.com>
7  *
8  * Don't look at this too closely - you'll go mad. The things
9  * we do for performance..
10  */
11 
12 #include <linux/types.h>
13 #include <linux/string.h>
14 #include <asm/uaccess.h>
15 
16 
17 #define ldq_u(x,y) \
18 __asm__ __volatile__("ldq_u %0,%1":"=r" (x):"m" (*(const unsigned long *)(y)))
19 
20 #define stq_u(x,y) \
21 __asm__ __volatile__("stq_u %1,%0":"=m" (*(unsigned long *)(y)):"r" (x))
22 
23 #define extql(x,y,z) \
24 __asm__ __volatile__("extql %1,%2,%0":"=r" (z):"r" (x),"r" (y))
25 
26 #define extqh(x,y,z) \
27 __asm__ __volatile__("extqh %1,%2,%0":"=r" (z):"r" (x),"r" (y))
28 
29 #define mskql(x,y,z) \
30 __asm__ __volatile__("mskql %1,%2,%0":"=r" (z):"r" (x),"r" (y))
31 
32 #define mskqh(x,y,z) \
33 __asm__ __volatile__("mskqh %1,%2,%0":"=r" (z):"r" (x),"r" (y))
34 
35 #define insql(x,y,z) \
36 __asm__ __volatile__("insql %1,%2,%0":"=r" (z):"r" (x),"r" (y))
37 
38 #define insqh(x,y,z) \
39 __asm__ __volatile__("insqh %1,%2,%0":"=r" (z):"r" (x),"r" (y))
40 
41 
42 #define __get_user_u(x,ptr)				\
43 ({							\
44 	long __guu_err;					\
45 	__asm__ __volatile__(				\
46 	"1:	ldq_u %0,%2\n"				\
47 	"2:\n"						\
48 	".section __ex_table,\"a\"\n"			\
49 	"	.long 1b - .\n"				\
50 	"	lda %0,2b-1b(%1)\n"			\
51 	".previous"					\
52 		: "=r"(x), "=r"(__guu_err)		\
53 		: "m"(__m(ptr)), "1"(0));		\
54 	__guu_err;					\
55 })
56 
57 #define __put_user_u(x,ptr)				\
58 ({							\
59 	long __puu_err;					\
60 	__asm__ __volatile__(				\
61 	"1:	stq_u %2,%1\n"				\
62 	"2:\n"						\
63 	".section __ex_table,\"a\"\n"			\
64 	"	.long 1b - ."				\
65 	"	lda $31,2b-1b(%0)\n"			\
66 	".previous"					\
67 		: "=r"(__puu_err)			\
68 		: "m"(__m(addr)), "rJ"(x), "0"(0));	\
69 	__puu_err;					\
70 })
71 
72 
from64to16(unsigned long x)73 static inline unsigned short from64to16(unsigned long x)
74 {
75 	/* Using extract instructions is a bit more efficient
76 	   than the original shift/bitmask version.  */
77 
78 	union {
79 		unsigned long	ul;
80 		unsigned int	ui[2];
81 		unsigned short	us[4];
82 	} in_v, tmp_v, out_v;
83 
84 	in_v.ul = x;
85 	tmp_v.ul = (unsigned long) in_v.ui[0] + (unsigned long) in_v.ui[1];
86 
87 	/* Since the bits of tmp_v.sh[3] are going to always be zero,
88 	   we don't have to bother to add that in.  */
89 	out_v.ul = (unsigned long) tmp_v.us[0] + (unsigned long) tmp_v.us[1]
90 			+ (unsigned long) tmp_v.us[2];
91 
92 	/* Similarly, out_v.us[2] is always zero for the final add.  */
93 	return out_v.us[0] + out_v.us[1];
94 }
95 
96 
97 
98 /*
99  * Ok. This isn't fun, but this is the EASY case.
100  */
101 static inline unsigned long
csum_partial_cfu_aligned(const unsigned long __user * src,unsigned long * dst,long len,unsigned long checksum,int * errp)102 csum_partial_cfu_aligned(const unsigned long __user *src, unsigned long *dst,
103 			 long len, unsigned long checksum,
104 			 int *errp)
105 {
106 	unsigned long carry = 0;
107 	int err = 0;
108 
109 	while (len >= 0) {
110 		unsigned long word;
111 		err |= __get_user(word, src);
112 		checksum += carry;
113 		src++;
114 		checksum += word;
115 		len -= 8;
116 		carry = checksum < word;
117 		*dst = word;
118 		dst++;
119 	}
120 	len += 8;
121 	checksum += carry;
122 	if (len) {
123 		unsigned long word, tmp;
124 		err |= __get_user(word, src);
125 		tmp = *dst;
126 		mskql(word, len, word);
127 		checksum += word;
128 		mskqh(tmp, len, tmp);
129 		carry = checksum < word;
130 		*dst = word | tmp;
131 		checksum += carry;
132 	}
133 	if (err && errp) *errp = err;
134 	return checksum;
135 }
136 
137 /*
138  * This is even less fun, but this is still reasonably
139  * easy.
140  */
141 static inline unsigned long
csum_partial_cfu_dest_aligned(const unsigned long __user * src,unsigned long * dst,unsigned long soff,long len,unsigned long checksum,int * errp)142 csum_partial_cfu_dest_aligned(const unsigned long __user *src,
143 			      unsigned long *dst,
144 			      unsigned long soff,
145 			      long len, unsigned long checksum,
146 			      int *errp)
147 {
148 	unsigned long first;
149 	unsigned long word, carry;
150 	unsigned long lastsrc = 7+len+(unsigned long)src;
151 	int err = 0;
152 
153 	err |= __get_user_u(first,src);
154 	carry = 0;
155 	while (len >= 0) {
156 		unsigned long second;
157 
158 		err |= __get_user_u(second, src+1);
159 		extql(first, soff, word);
160 		len -= 8;
161 		src++;
162 		extqh(second, soff, first);
163 		checksum += carry;
164 		word |= first;
165 		first = second;
166 		checksum += word;
167 		*dst = word;
168 		dst++;
169 		carry = checksum < word;
170 	}
171 	len += 8;
172 	checksum += carry;
173 	if (len) {
174 		unsigned long tmp;
175 		unsigned long second;
176 		err |= __get_user_u(second, lastsrc);
177 		tmp = *dst;
178 		extql(first, soff, word);
179 		extqh(second, soff, first);
180 		word |= first;
181 		mskql(word, len, word);
182 		checksum += word;
183 		mskqh(tmp, len, tmp);
184 		carry = checksum < word;
185 		*dst = word | tmp;
186 		checksum += carry;
187 	}
188 	if (err && errp) *errp = err;
189 	return checksum;
190 }
191 
192 /*
193  * This is slightly less fun than the above..
194  */
195 static inline unsigned long
csum_partial_cfu_src_aligned(const unsigned long __user * src,unsigned long * dst,unsigned long doff,long len,unsigned long checksum,unsigned long partial_dest,int * errp)196 csum_partial_cfu_src_aligned(const unsigned long __user *src,
197 			     unsigned long *dst,
198 			     unsigned long doff,
199 			     long len, unsigned long checksum,
200 			     unsigned long partial_dest,
201 			     int *errp)
202 {
203 	unsigned long carry = 0;
204 	unsigned long word;
205 	unsigned long second_dest;
206 	int err = 0;
207 
208 	mskql(partial_dest, doff, partial_dest);
209 	while (len >= 0) {
210 		err |= __get_user(word, src);
211 		len -= 8;
212 		insql(word, doff, second_dest);
213 		checksum += carry;
214 		stq_u(partial_dest | second_dest, dst);
215 		src++;
216 		checksum += word;
217 		insqh(word, doff, partial_dest);
218 		carry = checksum < word;
219 		dst++;
220 	}
221 	len += 8;
222 	if (len) {
223 		checksum += carry;
224 		err |= __get_user(word, src);
225 		mskql(word, len, word);
226 		len -= 8;
227 		checksum += word;
228 		insql(word, doff, second_dest);
229 		len += doff;
230 		carry = checksum < word;
231 		partial_dest |= second_dest;
232 		if (len >= 0) {
233 			stq_u(partial_dest, dst);
234 			if (!len) goto out;
235 			dst++;
236 			insqh(word, doff, partial_dest);
237 		}
238 		doff = len;
239 	}
240 	ldq_u(second_dest, dst);
241 	mskqh(second_dest, doff, second_dest);
242 	stq_u(partial_dest | second_dest, dst);
243 out:
244 	checksum += carry;
245 	if (err && errp) *errp = err;
246 	return checksum;
247 }
248 
249 /*
250  * This is so totally un-fun that it's frightening. Don't
251  * look at this too closely, you'll go blind.
252  */
253 static inline unsigned long
csum_partial_cfu_unaligned(const unsigned long __user * src,unsigned long * dst,unsigned long soff,unsigned long doff,long len,unsigned long checksum,unsigned long partial_dest,int * errp)254 csum_partial_cfu_unaligned(const unsigned long __user * src,
255 			   unsigned long * dst,
256 			   unsigned long soff, unsigned long doff,
257 			   long len, unsigned long checksum,
258 			   unsigned long partial_dest,
259 			   int *errp)
260 {
261 	unsigned long carry = 0;
262 	unsigned long first;
263 	unsigned long lastsrc;
264 	int err = 0;
265 
266 	err |= __get_user_u(first, src);
267 	lastsrc = 7+len+(unsigned long)src;
268 	mskql(partial_dest, doff, partial_dest);
269 	while (len >= 0) {
270 		unsigned long second, word;
271 		unsigned long second_dest;
272 
273 		err |= __get_user_u(second, src+1);
274 		extql(first, soff, word);
275 		checksum += carry;
276 		len -= 8;
277 		extqh(second, soff, first);
278 		src++;
279 		word |= first;
280 		first = second;
281 		insql(word, doff, second_dest);
282 		checksum += word;
283 		stq_u(partial_dest | second_dest, dst);
284 		carry = checksum < word;
285 		insqh(word, doff, partial_dest);
286 		dst++;
287 	}
288 	len += doff;
289 	checksum += carry;
290 	if (len >= 0) {
291 		unsigned long second, word;
292 		unsigned long second_dest;
293 
294 		err |= __get_user_u(second, lastsrc);
295 		extql(first, soff, word);
296 		extqh(second, soff, first);
297 		word |= first;
298 		first = second;
299 		mskql(word, len-doff, word);
300 		checksum += word;
301 		insql(word, doff, second_dest);
302 		carry = checksum < word;
303 		stq_u(partial_dest | second_dest, dst);
304 		if (len) {
305 			ldq_u(second_dest, dst+1);
306 			insqh(word, doff, partial_dest);
307 			mskqh(second_dest, len, second_dest);
308 			stq_u(partial_dest | second_dest, dst+1);
309 		}
310 		checksum += carry;
311 	} else {
312 		unsigned long second, word;
313 		unsigned long second_dest;
314 
315 		err |= __get_user_u(second, lastsrc);
316 		extql(first, soff, word);
317 		extqh(second, soff, first);
318 		word |= first;
319 		ldq_u(second_dest, dst);
320 		mskql(word, len-doff, word);
321 		checksum += word;
322 		mskqh(second_dest, len, second_dest);
323 		carry = checksum < word;
324 		insql(word, doff, word);
325 		stq_u(partial_dest | word | second_dest, dst);
326 		checksum += carry;
327 	}
328 	if (err && errp) *errp = err;
329 	return checksum;
330 }
331 
332 __wsum
csum_partial_copy_from_user(const void __user * src,void * dst,int len,__wsum sum,int * errp)333 csum_partial_copy_from_user(const void __user *src, void *dst, int len,
334 			       __wsum sum, int *errp)
335 {
336 	unsigned long checksum = (__force u32) sum;
337 	unsigned long soff = 7 & (unsigned long) src;
338 	unsigned long doff = 7 & (unsigned long) dst;
339 
340 	if (len) {
341 		if (!access_ok(VERIFY_READ, src, len)) {
342 			if (errp) *errp = -EFAULT;
343 			memset(dst, 0, len);
344 			return sum;
345 		}
346 		if (!doff) {
347 			if (!soff)
348 				checksum = csum_partial_cfu_aligned(
349 					(const unsigned long __user *) src,
350 					(unsigned long *) dst,
351 					len-8, checksum, errp);
352 			else
353 				checksum = csum_partial_cfu_dest_aligned(
354 					(const unsigned long __user *) src,
355 					(unsigned long *) dst,
356 					soff, len-8, checksum, errp);
357 		} else {
358 			unsigned long partial_dest;
359 			ldq_u(partial_dest, dst);
360 			if (!soff)
361 				checksum = csum_partial_cfu_src_aligned(
362 					(const unsigned long __user *) src,
363 					(unsigned long *) dst,
364 					doff, len-8, checksum,
365 					partial_dest, errp);
366 			else
367 				checksum = csum_partial_cfu_unaligned(
368 					(const unsigned long __user *) src,
369 					(unsigned long *) dst,
370 					soff, doff, len-8, checksum,
371 					partial_dest, errp);
372 		}
373 		checksum = from64to16 (checksum);
374 	}
375 	return (__force __wsum)checksum;
376 }
377 EXPORT_SYMBOL(csum_partial_copy_from_user);
378 
379 __wsum
csum_partial_copy_nocheck(const void * src,void * dst,int len,__wsum sum)380 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
381 {
382 	__wsum checksum;
383 	mm_segment_t oldfs = get_fs();
384 	set_fs(KERNEL_DS);
385 	checksum = csum_partial_copy_from_user((__force const void __user *)src,
386 						dst, len, sum, NULL);
387 	set_fs(oldfs);
388 	return checksum;
389 }
390 EXPORT_SYMBOL(csum_partial_copy_nocheck);
391