xref: /OK3568_Linux_fs/kernel/arch/arm/include/asm/checksum.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *  arch/arm/include/asm/checksum.h
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * IP checksum routines
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Copyright (C) Original authors of ../asm-i386/checksum.h
8*4882a593Smuzhiyun  * Copyright (C) 1996-1999 Russell King
9*4882a593Smuzhiyun  */
10*4882a593Smuzhiyun #ifndef __ASM_ARM_CHECKSUM_H
11*4882a593Smuzhiyun #define __ASM_ARM_CHECKSUM_H
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include <linux/in6.h>
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun /*
16*4882a593Smuzhiyun  * computes the checksum of a memory block at buff, length len,
17*4882a593Smuzhiyun  * and adds in "sum" (32-bit)
18*4882a593Smuzhiyun  *
19*4882a593Smuzhiyun  * returns a 32-bit number suitable for feeding into itself
20*4882a593Smuzhiyun  * or csum_tcpudp_magic
21*4882a593Smuzhiyun  *
22*4882a593Smuzhiyun  * this function must be called with even lengths, except
23*4882a593Smuzhiyun  * for the last fragment, which may be odd
24*4882a593Smuzhiyun  *
25*4882a593Smuzhiyun  * it's best to have buff aligned on a 32-bit boundary
26*4882a593Smuzhiyun  */
27*4882a593Smuzhiyun __wsum csum_partial(const void *buff, int len, __wsum sum);
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun /*
30*4882a593Smuzhiyun  * the same as csum_partial, but copies from src while it
31*4882a593Smuzhiyun  * checksums, and handles user-space pointer exceptions correctly, when needed.
32*4882a593Smuzhiyun  *
33*4882a593Smuzhiyun  * here even more important to align src and dst on a 32-bit (or even
34*4882a593Smuzhiyun  * better 64-bit) boundary
35*4882a593Smuzhiyun  */
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun __wsum
38*4882a593Smuzhiyun csum_partial_copy_nocheck(const void *src, void *dst, int len);
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun __wsum
41*4882a593Smuzhiyun csum_partial_copy_from_user(const void __user *src, void *dst, int len);
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
44*4882a593Smuzhiyun #define _HAVE_ARCH_CSUM_AND_COPY
45*4882a593Smuzhiyun static inline
csum_and_copy_from_user(const void __user * src,void * dst,int len)46*4882a593Smuzhiyun __wsum csum_and_copy_from_user(const void __user *src, void *dst, int len)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun 	if (!access_ok(src, len))
49*4882a593Smuzhiyun 		return 0;
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	return csum_partial_copy_from_user(src, dst, len);
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun /*
55*4882a593Smuzhiyun  * 	Fold a partial checksum without adding pseudo headers
56*4882a593Smuzhiyun  */
csum_fold(__wsum sum)57*4882a593Smuzhiyun static inline __sum16 csum_fold(__wsum sum)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun 	__asm__(
60*4882a593Smuzhiyun 	"add	%0, %1, %1, ror #16	@ csum_fold"
61*4882a593Smuzhiyun 	: "=r" (sum)
62*4882a593Smuzhiyun 	: "r" (sum)
63*4882a593Smuzhiyun 	: "cc");
64*4882a593Smuzhiyun 	return (__force __sum16)(~(__force u32)sum >> 16);
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun /*
68*4882a593Smuzhiyun  *	This is a version of ip_compute_csum() optimized for IP headers,
69*4882a593Smuzhiyun  *	which always checksum on 4 octet boundaries.
70*4882a593Smuzhiyun  */
71*4882a593Smuzhiyun static inline __sum16
ip_fast_csum(const void * iph,unsigned int ihl)72*4882a593Smuzhiyun ip_fast_csum(const void *iph, unsigned int ihl)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun 	unsigned int tmp1;
75*4882a593Smuzhiyun 	__wsum sum;
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	__asm__ __volatile__(
78*4882a593Smuzhiyun 	"ldr	%0, [%1], #4		@ ip_fast_csum		\n\
79*4882a593Smuzhiyun 	ldr	%3, [%1], #4					\n\
80*4882a593Smuzhiyun 	sub	%2, %2, #5					\n\
81*4882a593Smuzhiyun 	adds	%0, %0, %3					\n\
82*4882a593Smuzhiyun 	ldr	%3, [%1], #4					\n\
83*4882a593Smuzhiyun 	adcs	%0, %0, %3					\n\
84*4882a593Smuzhiyun 	ldr	%3, [%1], #4					\n\
85*4882a593Smuzhiyun 1:	adcs	%0, %0, %3					\n\
86*4882a593Smuzhiyun 	ldr	%3, [%1], #4					\n\
87*4882a593Smuzhiyun 	tst	%2, #15			@ do this carefully	\n\
88*4882a593Smuzhiyun 	subne	%2, %2, #1		@ without destroying	\n\
89*4882a593Smuzhiyun 	bne	1b			@ the carry flag	\n\
90*4882a593Smuzhiyun 	adcs	%0, %0, %3					\n\
91*4882a593Smuzhiyun 	adc	%0, %0, #0"
92*4882a593Smuzhiyun 	: "=r" (sum), "=r" (iph), "=r" (ihl), "=r" (tmp1)
93*4882a593Smuzhiyun 	: "1" (iph), "2" (ihl)
94*4882a593Smuzhiyun 	: "cc", "memory");
95*4882a593Smuzhiyun 	return csum_fold(sum);
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun static inline __wsum
csum_tcpudp_nofold(__be32 saddr,__be32 daddr,__u32 len,__u8 proto,__wsum sum)99*4882a593Smuzhiyun csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len,
100*4882a593Smuzhiyun 		   __u8 proto, __wsum sum)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun 	u32 lenprot = len + proto;
103*4882a593Smuzhiyun 	if (__builtin_constant_p(sum) && sum == 0) {
104*4882a593Smuzhiyun 		__asm__(
105*4882a593Smuzhiyun 		"adds	%0, %1, %2	@ csum_tcpudp_nofold0	\n\t"
106*4882a593Smuzhiyun #ifdef __ARMEB__
107*4882a593Smuzhiyun 		"adcs	%0, %0, %3				\n\t"
108*4882a593Smuzhiyun #else
109*4882a593Smuzhiyun 		"adcs	%0, %0, %3, ror #8			\n\t"
110*4882a593Smuzhiyun #endif
111*4882a593Smuzhiyun 		"adc	%0, %0, #0"
112*4882a593Smuzhiyun 		: "=&r" (sum)
113*4882a593Smuzhiyun 		: "r" (daddr), "r" (saddr), "r" (lenprot)
114*4882a593Smuzhiyun 		: "cc");
115*4882a593Smuzhiyun 	} else {
116*4882a593Smuzhiyun 		__asm__(
117*4882a593Smuzhiyun 		"adds	%0, %1, %2	@ csum_tcpudp_nofold	\n\t"
118*4882a593Smuzhiyun 		"adcs	%0, %0, %3				\n\t"
119*4882a593Smuzhiyun #ifdef __ARMEB__
120*4882a593Smuzhiyun 		"adcs	%0, %0, %4				\n\t"
121*4882a593Smuzhiyun #else
122*4882a593Smuzhiyun 		"adcs	%0, %0, %4, ror #8			\n\t"
123*4882a593Smuzhiyun #endif
124*4882a593Smuzhiyun 		"adc	%0, %0, #0"
125*4882a593Smuzhiyun 		: "=&r"(sum)
126*4882a593Smuzhiyun 		: "r" (sum), "r" (daddr), "r" (saddr), "r" (lenprot)
127*4882a593Smuzhiyun 		: "cc");
128*4882a593Smuzhiyun 	}
129*4882a593Smuzhiyun 	return sum;
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun /*
132*4882a593Smuzhiyun  * computes the checksum of the TCP/UDP pseudo-header
133*4882a593Smuzhiyun  * returns a 16-bit checksum, already complemented
134*4882a593Smuzhiyun  */
135*4882a593Smuzhiyun static inline __sum16
csum_tcpudp_magic(__be32 saddr,__be32 daddr,__u32 len,__u8 proto,__wsum sum)136*4882a593Smuzhiyun csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len,
137*4882a593Smuzhiyun 		  __u8 proto, __wsum sum)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun 	return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun /*
144*4882a593Smuzhiyun  * this routine is used for miscellaneous IP-like checksums, mainly
145*4882a593Smuzhiyun  * in icmp.c
146*4882a593Smuzhiyun  */
147*4882a593Smuzhiyun static inline __sum16
ip_compute_csum(const void * buff,int len)148*4882a593Smuzhiyun ip_compute_csum(const void *buff, int len)
149*4882a593Smuzhiyun {
150*4882a593Smuzhiyun 	return csum_fold(csum_partial(buff, len, 0));
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun #define _HAVE_ARCH_IPV6_CSUM
154*4882a593Smuzhiyun extern __wsum
155*4882a593Smuzhiyun __csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, __be32 len,
156*4882a593Smuzhiyun 		__be32 proto, __wsum sum);
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun static inline __sum16
csum_ipv6_magic(const struct in6_addr * saddr,const struct in6_addr * daddr,__u32 len,__u8 proto,__wsum sum)159*4882a593Smuzhiyun csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr,
160*4882a593Smuzhiyun 		__u32 len, __u8 proto, __wsum sum)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun 	return csum_fold(__csum_ipv6_magic(saddr, daddr, htonl(len),
163*4882a593Smuzhiyun 					   htonl(proto), sum));
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun #endif
166