1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-or-later */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * INET An implementation of the TCP/IP protocol suite for the LINUX
4*4882a593Smuzhiyun * operating system. INET is implemented using the BSD Socket
5*4882a593Smuzhiyun * interface as the means of communication with the user level.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Checksumming functions for IP, TCP, UDP and so on
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * Authors: Jorge Cwik, <jorge@laser.satlink.net>
10*4882a593Smuzhiyun * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
11*4882a593Smuzhiyun * Borrows very liberally from tcp.c and ip.c, see those
12*4882a593Smuzhiyun * files for more names.
13*4882a593Smuzhiyun */
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #ifndef _CHECKSUM_H
16*4882a593Smuzhiyun #define _CHECKSUM_H
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #include <linux/errno.h>
19*4882a593Smuzhiyun #include <asm/types.h>
20*4882a593Smuzhiyun #include <asm/byteorder.h>
21*4882a593Smuzhiyun #include <linux/uaccess.h>
22*4882a593Smuzhiyun #include <asm/checksum.h>
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #ifndef _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
25*4882a593Smuzhiyun static __always_inline
csum_and_copy_from_user(const void __user * src,void * dst,int len)26*4882a593Smuzhiyun __wsum csum_and_copy_from_user (const void __user *src, void *dst,
27*4882a593Smuzhiyun int len)
28*4882a593Smuzhiyun {
29*4882a593Smuzhiyun if (copy_from_user(dst, src, len))
30*4882a593Smuzhiyun return 0;
31*4882a593Smuzhiyun return csum_partial(dst, len, ~0U);
32*4882a593Smuzhiyun }
33*4882a593Smuzhiyun #endif
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun #ifndef HAVE_CSUM_COPY_USER
csum_and_copy_to_user(const void * src,void __user * dst,int len)36*4882a593Smuzhiyun static __always_inline __wsum csum_and_copy_to_user
37*4882a593Smuzhiyun (const void *src, void __user *dst, int len)
38*4882a593Smuzhiyun {
39*4882a593Smuzhiyun __wsum sum = csum_partial(src, len, ~0U);
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun if (copy_to_user(dst, src, len) == 0)
42*4882a593Smuzhiyun return sum;
43*4882a593Smuzhiyun return 0;
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun #endif
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun #ifndef _HAVE_ARCH_CSUM_AND_COPY
48*4882a593Smuzhiyun static __always_inline __wsum
csum_partial_copy_nocheck(const void * src,void * dst,int len)49*4882a593Smuzhiyun csum_partial_copy_nocheck(const void *src, void *dst, int len)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun memcpy(dst, src, len);
52*4882a593Smuzhiyun return csum_partial(dst, len, 0);
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun #endif
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun #ifndef HAVE_ARCH_CSUM_ADD
csum_add(__wsum csum,__wsum addend)57*4882a593Smuzhiyun static __always_inline __wsum csum_add(__wsum csum, __wsum addend)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun u32 res = (__force u32)csum;
60*4882a593Smuzhiyun res += (__force u32)addend;
61*4882a593Smuzhiyun return (__force __wsum)(res + (res < (__force u32)addend));
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun #endif
64*4882a593Smuzhiyun
csum_sub(__wsum csum,__wsum addend)65*4882a593Smuzhiyun static __always_inline __wsum csum_sub(__wsum csum, __wsum addend)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun return csum_add(csum, ~addend);
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun
csum16_add(__sum16 csum,__be16 addend)70*4882a593Smuzhiyun static __always_inline __sum16 csum16_add(__sum16 csum, __be16 addend)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun u16 res = (__force u16)csum;
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun res += (__force u16)addend;
75*4882a593Smuzhiyun return (__force __sum16)(res + (res < (__force u16)addend));
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun
csum16_sub(__sum16 csum,__be16 addend)78*4882a593Smuzhiyun static __always_inline __sum16 csum16_sub(__sum16 csum, __be16 addend)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun return csum16_add(csum, ~addend);
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun static __always_inline __wsum
csum_block_add(__wsum csum,__wsum csum2,int offset)84*4882a593Smuzhiyun csum_block_add(__wsum csum, __wsum csum2, int offset)
85*4882a593Smuzhiyun {
86*4882a593Smuzhiyun u32 sum = (__force u32)csum2;
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun /* rotate sum to align it with a 16b boundary */
89*4882a593Smuzhiyun if (offset & 1)
90*4882a593Smuzhiyun sum = ror32(sum, 8);
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun return csum_add(csum, (__force __wsum)sum);
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun static __always_inline __wsum
csum_block_add_ext(__wsum csum,__wsum csum2,int offset,int len)96*4882a593Smuzhiyun csum_block_add_ext(__wsum csum, __wsum csum2, int offset, int len)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun return csum_block_add(csum, csum2, offset);
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun static __always_inline __wsum
csum_block_sub(__wsum csum,__wsum csum2,int offset)102*4882a593Smuzhiyun csum_block_sub(__wsum csum, __wsum csum2, int offset)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun return csum_block_add(csum, ~csum2, offset);
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun
csum_unfold(__sum16 n)107*4882a593Smuzhiyun static __always_inline __wsum csum_unfold(__sum16 n)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun return (__force __wsum)n;
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun static __always_inline
csum_partial_ext(const void * buff,int len,__wsum sum)113*4882a593Smuzhiyun __wsum csum_partial_ext(const void *buff, int len, __wsum sum)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun return csum_partial(buff, len, sum);
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun #define CSUM_MANGLED_0 ((__force __sum16)0xffff)
119*4882a593Smuzhiyun
csum_replace_by_diff(__sum16 * sum,__wsum diff)120*4882a593Smuzhiyun static __always_inline void csum_replace_by_diff(__sum16 *sum, __wsum diff)
121*4882a593Smuzhiyun {
122*4882a593Smuzhiyun *sum = csum_fold(csum_add(diff, ~csum_unfold(*sum)));
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun
csum_replace4(__sum16 * sum,__be32 from,__be32 to)125*4882a593Smuzhiyun static __always_inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun __wsum tmp = csum_sub(~csum_unfold(*sum), (__force __wsum)from);
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun *sum = csum_fold(csum_add(tmp, (__force __wsum)to));
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun /* Implements RFC 1624 (Incremental Internet Checksum)
133*4882a593Smuzhiyun * 3. Discussion states :
134*4882a593Smuzhiyun * HC' = ~(~HC + ~m + m')
135*4882a593Smuzhiyun * m : old value of a 16bit field
136*4882a593Smuzhiyun * m' : new value of a 16bit field
137*4882a593Smuzhiyun */
csum_replace2(__sum16 * sum,__be16 old,__be16 new)138*4882a593Smuzhiyun static __always_inline void csum_replace2(__sum16 *sum, __be16 old, __be16 new)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun *sum = ~csum16_add(csum16_sub(~(*sum), old), new);
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun
csum_replace(__wsum * csum,__wsum old,__wsum new)143*4882a593Smuzhiyun static inline void csum_replace(__wsum *csum, __wsum old, __wsum new)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun *csum = csum_add(csum_sub(*csum, old), new);
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun struct sk_buff;
149*4882a593Smuzhiyun void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
150*4882a593Smuzhiyun __be32 from, __be32 to, bool pseudohdr);
151*4882a593Smuzhiyun void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
152*4882a593Smuzhiyun const __be32 *from, const __be32 *to,
153*4882a593Smuzhiyun bool pseudohdr);
154*4882a593Smuzhiyun void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb,
155*4882a593Smuzhiyun __wsum diff, bool pseudohdr);
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun static __always_inline
inet_proto_csum_replace2(__sum16 * sum,struct sk_buff * skb,__be16 from,__be16 to,bool pseudohdr)158*4882a593Smuzhiyun void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb,
159*4882a593Smuzhiyun __be16 from, __be16 to, bool pseudohdr)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun inet_proto_csum_replace4(sum, skb, (__force __be32)from,
162*4882a593Smuzhiyun (__force __be32)to, pseudohdr);
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
remcsum_adjust(void * ptr,__wsum csum,int start,int offset)165*4882a593Smuzhiyun static __always_inline __wsum remcsum_adjust(void *ptr, __wsum csum,
166*4882a593Smuzhiyun int start, int offset)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun __sum16 *psum = (__sum16 *)(ptr + offset);
169*4882a593Smuzhiyun __wsum delta;
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun /* Subtract out checksum up to start */
172*4882a593Smuzhiyun csum = csum_sub(csum, csum_partial(ptr, start, 0));
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun /* Set derived checksum in packet */
175*4882a593Smuzhiyun delta = csum_sub((__force __wsum)csum_fold(csum),
176*4882a593Smuzhiyun (__force __wsum)*psum);
177*4882a593Smuzhiyun *psum = csum_fold(csum);
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun return delta;
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun
remcsum_unadjust(__sum16 * psum,__wsum delta)182*4882a593Smuzhiyun static __always_inline void remcsum_unadjust(__sum16 *psum, __wsum delta)
183*4882a593Smuzhiyun {
184*4882a593Smuzhiyun *psum = csum_fold(csum_sub(delta, (__force __wsum)*psum));
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun #endif
188