• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *
4  * INET		An implementation of the TCP/IP protocol suite for the LINUX
5  *		operating system.  INET is implemented using the  BSD Socket
6  *		interface as the means of communication with the user level.
7  *
8  *		IP/TCP/UDP checksumming routines
9  *
10  * Authors:	Jorge Cwik, <jorge@laser.satlink.net>
11  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
12  *		Tom May, <ftom@netcom.com>
13  *		Andreas Schwab, <schwab@issan.informatik.uni-dortmund.de>
14  *		Lots of code moved from tcp.c and ip.c; see those files
15  *		for more names.
16  *
17  * 03/02/96	Jes Sorensen, Andreas Schwab, Roman Hodek:
18  *		Fixed some nasty bugs, causing some horrible crashes.
19  *		A: At some points, the sum (%0) was used as
20  *		length-counter instead of the length counter
21  *		(%1). Thanks to Roman Hodek for pointing this out.
22  *		B: GCC seems to mess up if one uses too many
23  *		data-registers to hold input values and one tries to
24  *		specify d0 and d1 as scratch registers. Letting gcc
25  *		choose these registers itself solves the problem.
26  */
27 
28 /* Revised by Kenneth Albanowski for m68knommu. Basic problem: unaligned access
29  kills, so most of the assembly has to go. */
30 
31 #include <linux/export.h>
32 #include <net/checksum.h>
33 
34 #include <asm/byteorder.h>
35 
36 #ifndef do_csum
from32to16(unsigned int x)37 static inline unsigned short from32to16(unsigned int x)
38 {
39 	/* add up 16-bit and 16-bit for 16+c bit */
40 	x = (x & 0xffff) + (x >> 16);
41 	/* add up carry.. */
42 	x = (x & 0xffff) + (x >> 16);
43 	return x;
44 }
45 
do_csum(const unsigned char * buff,int len)46 static unsigned int do_csum(const unsigned char *buff, int len)
47 {
48 	int odd;
49 	unsigned int result = 0;
50 
51 	if (len <= 0)
52 		goto out;
53 	odd = 1 & (unsigned long) buff;
54 	if (odd) {
55 #ifdef __LITTLE_ENDIAN
56 		result += (*buff << 8);
57 #else
58 		result = *buff;
59 #endif
60 		len--;
61 		buff++;
62 	}
63 	if (len >= 2) {
64 		if (2 & (unsigned long) buff) {
65 			result += *(unsigned short *) buff;
66 			len -= 2;
67 			buff += 2;
68 		}
69 		if (len >= 4) {
70 			const unsigned char *end = buff + ((unsigned)len & ~3);
71 			unsigned int carry = 0;
72 			do {
73 				unsigned int w = *(unsigned int *) buff;
74 				buff += 4;
75 				result += carry;
76 				result += w;
77 				carry = (w > result);
78 			} while (buff < end);
79 			result += carry;
80 			result = (result & 0xffff) + (result >> 16);
81 		}
82 		if (len & 2) {
83 			result += *(unsigned short *) buff;
84 			buff += 2;
85 		}
86 	}
87 	if (len & 1)
88 #ifdef __LITTLE_ENDIAN
89 		result += *buff;
90 #else
91 		result += (*buff << 8);
92 #endif
93 	result = from32to16(result);
94 	if (odd)
95 		result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
96 out:
97 	return result;
98 }
99 #endif
100 
101 #ifndef ip_fast_csum
102 /*
103  *	This is a version of ip_compute_csum() optimized for IP headers,
104  *	which always checksum on 4 octet boundaries.
105  */
ip_fast_csum(const void * iph,unsigned int ihl)106 __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
107 {
108 	return (__force __sum16)~do_csum(iph, ihl*4);
109 }
110 EXPORT_SYMBOL(ip_fast_csum);
111 #endif
112 
113 /*
114  * computes the checksum of a memory block at buff, length len,
115  * and adds in "sum" (32-bit)
116  *
117  * returns a 32-bit number suitable for feeding into itself
118  * or csum_tcpudp_magic
119  *
120  * this function must be called with even lengths, except
121  * for the last fragment, which may be odd
122  *
123  * it's best to have buff aligned on a 32-bit boundary
124  */
csum_partial(const void * buff,int len,__wsum wsum)125 __wsum csum_partial(const void *buff, int len, __wsum wsum)
126 {
127 	unsigned int sum = (__force unsigned int)wsum;
128 	unsigned int result = do_csum(buff, len);
129 
130 	/* add in old sum, and carry.. */
131 	result += sum;
132 	if (sum > result)
133 		result += 1;
134 	return (__force __wsum)result;
135 }
136 EXPORT_SYMBOL(csum_partial);
137 
138 /*
139  * this routine is used for miscellaneous IP-like checksums, mainly
140  * in icmp.c
141  */
ip_compute_csum(const void * buff,int len)142 __sum16 ip_compute_csum(const void *buff, int len)
143 {
144 	return (__force __sum16)~do_csum(buff, len);
145 }
146 EXPORT_SYMBOL(ip_compute_csum);
147 
148 /*
149  * copy from fs while checksumming, otherwise like csum_partial
150  */
151 __wsum
csum_partial_copy_from_user(const void __user * src,void * dst,int len,__wsum sum,int * csum_err)152 csum_partial_copy_from_user(const void __user *src, void *dst, int len,
153 						__wsum sum, int *csum_err)
154 {
155 	int missing;
156 
157 	missing = __copy_from_user(dst, src, len);
158 	if (missing) {
159 		memset(dst + len - missing, 0, missing);
160 		*csum_err = -EFAULT;
161 	} else
162 		*csum_err = 0;
163 
164 	return csum_partial(dst, len, sum);
165 }
166 EXPORT_SYMBOL(csum_partial_copy_from_user);
167 
168 /*
169  * copy from ds while checksumming, otherwise like csum_partial
170  */
171 __wsum
csum_partial_copy(const void * src,void * dst,int len,__wsum sum)172 csum_partial_copy(const void *src, void *dst, int len, __wsum sum)
173 {
174 	memcpy(dst, src, len);
175 	return csum_partial(dst, len, sum);
176 }
177 EXPORT_SYMBOL(csum_partial_copy);
178 
179 #ifndef csum_tcpudp_nofold
from64to32(u64 x)180 static inline u32 from64to32(u64 x)
181 {
182 	/* add up 32-bit and 32-bit for 32+c bit */
183 	x = (x & 0xffffffff) + (x >> 32);
184 	/* add up carry.. */
185 	x = (x & 0xffffffff) + (x >> 32);
186 	return (u32)x;
187 }
188 
csum_tcpudp_nofold(__be32 saddr,__be32 daddr,__u32 len,__u8 proto,__wsum sum)189 __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
190 			  __u32 len, __u8 proto, __wsum sum)
191 {
192 	unsigned long long s = (__force u32)sum;
193 
194 	s += (__force u32)saddr;
195 	s += (__force u32)daddr;
196 #ifdef __BIG_ENDIAN
197 	s += proto + len;
198 #else
199 	s += (proto + len) << 8;
200 #endif
201 	return (__force __wsum)from64to32(s);
202 }
203 EXPORT_SYMBOL(csum_tcpudp_nofold);
204 #endif
205