1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * INET An implementation of the TCP/IP protocol suite for the LINUX
5*4882a593Smuzhiyun * operating system. INET is implemented using the BSD Socket
6*4882a593Smuzhiyun * interface as the means of communication with the user level.
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * IP/TCP/UDP checksumming routines
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * Authors: Jorge Cwik, <jorge@laser.satlink.net>
11*4882a593Smuzhiyun * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
12*4882a593Smuzhiyun * Tom May, <ftom@netcom.com>
13*4882a593Smuzhiyun * Andreas Schwab, <schwab@issan.informatik.uni-dortmund.de>
14*4882a593Smuzhiyun * Lots of code moved from tcp.c and ip.c; see those files
15*4882a593Smuzhiyun * for more names.
16*4882a593Smuzhiyun *
17*4882a593Smuzhiyun * 03/02/96 Jes Sorensen, Andreas Schwab, Roman Hodek:
18*4882a593Smuzhiyun * Fixed some nasty bugs, causing some horrible crashes.
19*4882a593Smuzhiyun * A: At some points, the sum (%0) was used as
20*4882a593Smuzhiyun * length-counter instead of the length counter
21*4882a593Smuzhiyun * (%1). Thanks to Roman Hodek for pointing this out.
22*4882a593Smuzhiyun * B: GCC seems to mess up if one uses too many
23*4882a593Smuzhiyun * data-registers to hold input values and one tries to
24*4882a593Smuzhiyun * specify d0 and d1 as scratch registers. Letting gcc
25*4882a593Smuzhiyun * choose these registers itself solves the problem.
26*4882a593Smuzhiyun */
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun /* Revised by Kenneth Albanowski for m68knommu. Basic problem: unaligned access
29*4882a593Smuzhiyun kills, so most of the assembly has to go. */
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun #include <linux/export.h>
32*4882a593Smuzhiyun #include <net/checksum.h>
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun #include <asm/byteorder.h>
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun #ifndef do_csum
from32to16(unsigned int x)37*4882a593Smuzhiyun static inline unsigned short from32to16(unsigned int x)
38*4882a593Smuzhiyun {
39*4882a593Smuzhiyun /* add up 16-bit and 16-bit for 16+c bit */
40*4882a593Smuzhiyun x = (x & 0xffff) + (x >> 16);
41*4882a593Smuzhiyun /* add up carry.. */
42*4882a593Smuzhiyun x = (x & 0xffff) + (x >> 16);
43*4882a593Smuzhiyun return x;
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun
do_csum(const unsigned char * buff,int len)46*4882a593Smuzhiyun static unsigned int do_csum(const unsigned char *buff, int len)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun int odd;
49*4882a593Smuzhiyun unsigned int result = 0;
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun if (len <= 0)
52*4882a593Smuzhiyun goto out;
53*4882a593Smuzhiyun odd = 1 & (unsigned long) buff;
54*4882a593Smuzhiyun if (odd) {
55*4882a593Smuzhiyun #ifdef __LITTLE_ENDIAN
56*4882a593Smuzhiyun result += (*buff << 8);
57*4882a593Smuzhiyun #else
58*4882a593Smuzhiyun result = *buff;
59*4882a593Smuzhiyun #endif
60*4882a593Smuzhiyun len--;
61*4882a593Smuzhiyun buff++;
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun if (len >= 2) {
64*4882a593Smuzhiyun if (2 & (unsigned long) buff) {
65*4882a593Smuzhiyun result += *(unsigned short *) buff;
66*4882a593Smuzhiyun len -= 2;
67*4882a593Smuzhiyun buff += 2;
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun if (len >= 4) {
70*4882a593Smuzhiyun const unsigned char *end = buff + ((unsigned)len & ~3);
71*4882a593Smuzhiyun unsigned int carry = 0;
72*4882a593Smuzhiyun do {
73*4882a593Smuzhiyun unsigned int w = *(unsigned int *) buff;
74*4882a593Smuzhiyun buff += 4;
75*4882a593Smuzhiyun result += carry;
76*4882a593Smuzhiyun result += w;
77*4882a593Smuzhiyun carry = (w > result);
78*4882a593Smuzhiyun } while (buff < end);
79*4882a593Smuzhiyun result += carry;
80*4882a593Smuzhiyun result = (result & 0xffff) + (result >> 16);
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun if (len & 2) {
83*4882a593Smuzhiyun result += *(unsigned short *) buff;
84*4882a593Smuzhiyun buff += 2;
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun if (len & 1)
88*4882a593Smuzhiyun #ifdef __LITTLE_ENDIAN
89*4882a593Smuzhiyun result += *buff;
90*4882a593Smuzhiyun #else
91*4882a593Smuzhiyun result += (*buff << 8);
92*4882a593Smuzhiyun #endif
93*4882a593Smuzhiyun result = from32to16(result);
94*4882a593Smuzhiyun if (odd)
95*4882a593Smuzhiyun result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
96*4882a593Smuzhiyun out:
97*4882a593Smuzhiyun return result;
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun #endif
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun #ifndef ip_fast_csum
102*4882a593Smuzhiyun /*
103*4882a593Smuzhiyun * This is a version of ip_compute_csum() optimized for IP headers,
104*4882a593Smuzhiyun * which always checksum on 4 octet boundaries.
105*4882a593Smuzhiyun */
ip_fast_csum(const void * iph,unsigned int ihl)106*4882a593Smuzhiyun __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun return (__force __sum16)~do_csum(iph, ihl*4);
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun EXPORT_SYMBOL(ip_fast_csum);
111*4882a593Smuzhiyun #endif
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun /*
114*4882a593Smuzhiyun * computes the checksum of a memory block at buff, length len,
115*4882a593Smuzhiyun * and adds in "sum" (32-bit)
116*4882a593Smuzhiyun *
117*4882a593Smuzhiyun * returns a 32-bit number suitable for feeding into itself
118*4882a593Smuzhiyun * or csum_tcpudp_magic
119*4882a593Smuzhiyun *
120*4882a593Smuzhiyun * this function must be called with even lengths, except
121*4882a593Smuzhiyun * for the last fragment, which may be odd
122*4882a593Smuzhiyun *
123*4882a593Smuzhiyun * it's best to have buff aligned on a 32-bit boundary
124*4882a593Smuzhiyun */
csum_partial(const void * buff,int len,__wsum wsum)125*4882a593Smuzhiyun __wsum csum_partial(const void *buff, int len, __wsum wsum)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun unsigned int sum = (__force unsigned int)wsum;
128*4882a593Smuzhiyun unsigned int result = do_csum(buff, len);
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun /* add in old sum, and carry.. */
131*4882a593Smuzhiyun result += sum;
132*4882a593Smuzhiyun if (sum > result)
133*4882a593Smuzhiyun result += 1;
134*4882a593Smuzhiyun return (__force __wsum)result;
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun EXPORT_SYMBOL(csum_partial);
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun /*
139*4882a593Smuzhiyun * this routine is used for miscellaneous IP-like checksums, mainly
140*4882a593Smuzhiyun * in icmp.c
141*4882a593Smuzhiyun */
ip_compute_csum(const void * buff,int len)142*4882a593Smuzhiyun __sum16 ip_compute_csum(const void *buff, int len)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun return (__force __sum16)~do_csum(buff, len);
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun EXPORT_SYMBOL(ip_compute_csum);
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun #ifndef csum_tcpudp_nofold
from64to32(u64 x)149*4882a593Smuzhiyun static inline u32 from64to32(u64 x)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun /* add up 32-bit and 32-bit for 32+c bit */
152*4882a593Smuzhiyun x = (x & 0xffffffff) + (x >> 32);
153*4882a593Smuzhiyun /* add up carry.. */
154*4882a593Smuzhiyun x = (x & 0xffffffff) + (x >> 32);
155*4882a593Smuzhiyun return (u32)x;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun
csum_tcpudp_nofold(__be32 saddr,__be32 daddr,__u32 len,__u8 proto,__wsum sum)158*4882a593Smuzhiyun __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
159*4882a593Smuzhiyun __u32 len, __u8 proto, __wsum sum)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun unsigned long long s = (__force u32)sum;
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun s += (__force u32)saddr;
164*4882a593Smuzhiyun s += (__force u32)daddr;
165*4882a593Smuzhiyun #ifdef __BIG_ENDIAN
166*4882a593Smuzhiyun s += proto + len;
167*4882a593Smuzhiyun #else
168*4882a593Smuzhiyun s += (proto + len) << 8;
169*4882a593Smuzhiyun #endif
170*4882a593Smuzhiyun return (__force __wsum)from64to32(s);
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun EXPORT_SYMBOL(csum_tcpudp_nofold);
173*4882a593Smuzhiyun #endif
174