xref: /OK3568_Linux_fs/kernel/net/ipv6/esp6_offload.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * IPV6 GSO/GRO offload support
4*4882a593Smuzhiyun  * Linux INET implementation
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Copyright (C) 2016 secunet Security Networks AG
7*4882a593Smuzhiyun  * Author: Steffen Klassert <steffen.klassert@secunet.com>
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * ESP GRO support
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <linux/skbuff.h>
13*4882a593Smuzhiyun #include <linux/init.h>
14*4882a593Smuzhiyun #include <net/protocol.h>
15*4882a593Smuzhiyun #include <crypto/aead.h>
16*4882a593Smuzhiyun #include <crypto/authenc.h>
17*4882a593Smuzhiyun #include <linux/err.h>
18*4882a593Smuzhiyun #include <linux/module.h>
19*4882a593Smuzhiyun #include <net/ip.h>
20*4882a593Smuzhiyun #include <net/xfrm.h>
21*4882a593Smuzhiyun #include <net/esp.h>
22*4882a593Smuzhiyun #include <linux/scatterlist.h>
23*4882a593Smuzhiyun #include <linux/kernel.h>
24*4882a593Smuzhiyun #include <linux/slab.h>
25*4882a593Smuzhiyun #include <linux/spinlock.h>
26*4882a593Smuzhiyun #include <net/ip6_route.h>
27*4882a593Smuzhiyun #include <net/ipv6.h>
28*4882a593Smuzhiyun #include <linux/icmpv6.h>
29*4882a593Smuzhiyun 
esp6_nexthdr_esp_offset(struct ipv6hdr * ipv6_hdr,int nhlen)30*4882a593Smuzhiyun static __u16 esp6_nexthdr_esp_offset(struct ipv6hdr *ipv6_hdr, int nhlen)
31*4882a593Smuzhiyun {
32*4882a593Smuzhiyun 	int off = sizeof(struct ipv6hdr);
33*4882a593Smuzhiyun 	struct ipv6_opt_hdr *exthdr;
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun 	if (likely(ipv6_hdr->nexthdr == NEXTHDR_ESP))
36*4882a593Smuzhiyun 		return offsetof(struct ipv6hdr, nexthdr);
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun 	while (off < nhlen) {
39*4882a593Smuzhiyun 		exthdr = (void *)ipv6_hdr + off;
40*4882a593Smuzhiyun 		if (exthdr->nexthdr == NEXTHDR_ESP)
41*4882a593Smuzhiyun 			return off;
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun 		off += ipv6_optlen(exthdr);
44*4882a593Smuzhiyun 	}
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun 	return 0;
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun 
esp6_gro_receive(struct list_head * head,struct sk_buff * skb)49*4882a593Smuzhiyun static struct sk_buff *esp6_gro_receive(struct list_head *head,
50*4882a593Smuzhiyun 					struct sk_buff *skb)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun 	int offset = skb_gro_offset(skb);
53*4882a593Smuzhiyun 	struct xfrm_offload *xo;
54*4882a593Smuzhiyun 	struct xfrm_state *x;
55*4882a593Smuzhiyun 	__be32 seq;
56*4882a593Smuzhiyun 	__be32 spi;
57*4882a593Smuzhiyun 	int nhoff;
58*4882a593Smuzhiyun 	int err;
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	if (!pskb_pull(skb, offset))
61*4882a593Smuzhiyun 		return NULL;
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0)
64*4882a593Smuzhiyun 		goto out;
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	xo = xfrm_offload(skb);
67*4882a593Smuzhiyun 	if (!xo || !(xo->flags & CRYPTO_DONE)) {
68*4882a593Smuzhiyun 		struct sec_path *sp = secpath_set(skb);
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 		if (!sp)
71*4882a593Smuzhiyun 			goto out;
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 		if (sp->len == XFRM_MAX_DEPTH)
74*4882a593Smuzhiyun 			goto out_reset;
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 		x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
77*4882a593Smuzhiyun 				      (xfrm_address_t *)&ipv6_hdr(skb)->daddr,
78*4882a593Smuzhiyun 				      spi, IPPROTO_ESP, AF_INET6);
79*4882a593Smuzhiyun 		if (!x)
80*4882a593Smuzhiyun 			goto out_reset;
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 		skb->mark = xfrm_smark_get(skb->mark, x);
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 		sp->xvec[sp->len++] = x;
85*4882a593Smuzhiyun 		sp->olen++;
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 		xo = xfrm_offload(skb);
88*4882a593Smuzhiyun 		if (!xo)
89*4882a593Smuzhiyun 			goto out_reset;
90*4882a593Smuzhiyun 	}
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	xo->flags |= XFRM_GRO;
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	nhoff = esp6_nexthdr_esp_offset(ipv6_hdr(skb), offset);
95*4882a593Smuzhiyun 	if (!nhoff)
96*4882a593Smuzhiyun 		goto out;
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	IP6CB(skb)->nhoff = nhoff;
99*4882a593Smuzhiyun 	XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL;
100*4882a593Smuzhiyun 	XFRM_SPI_SKB_CB(skb)->family = AF_INET6;
101*4882a593Smuzhiyun 	XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr);
102*4882a593Smuzhiyun 	XFRM_SPI_SKB_CB(skb)->seq = seq;
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	/* We don't need to handle errors from xfrm_input, it does all
105*4882a593Smuzhiyun 	 * the error handling and frees the resources on error. */
106*4882a593Smuzhiyun 	xfrm_input(skb, IPPROTO_ESP, spi, -2);
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	return ERR_PTR(-EINPROGRESS);
109*4882a593Smuzhiyun out_reset:
110*4882a593Smuzhiyun 	secpath_reset(skb);
111*4882a593Smuzhiyun out:
112*4882a593Smuzhiyun 	skb_push(skb, offset);
113*4882a593Smuzhiyun 	NAPI_GRO_CB(skb)->same_flow = 0;
114*4882a593Smuzhiyun 	NAPI_GRO_CB(skb)->flush = 1;
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	return NULL;
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun 
esp6_gso_encap(struct xfrm_state * x,struct sk_buff * skb)119*4882a593Smuzhiyun static void esp6_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun 	struct ip_esp_hdr *esph;
122*4882a593Smuzhiyun 	struct ipv6hdr *iph = ipv6_hdr(skb);
123*4882a593Smuzhiyun 	struct xfrm_offload *xo = xfrm_offload(skb);
124*4882a593Smuzhiyun 	u8 proto = iph->nexthdr;
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	skb_push(skb, -skb_network_offset(skb));
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	if (x->outer_mode.encap == XFRM_MODE_TRANSPORT) {
129*4882a593Smuzhiyun 		__be16 frag;
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 		ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &proto, &frag);
132*4882a593Smuzhiyun 	}
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	esph = ip_esp_hdr(skb);
135*4882a593Smuzhiyun 	*skb_mac_header(skb) = IPPROTO_ESP;
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	esph->spi = x->id.spi;
138*4882a593Smuzhiyun 	esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	xo->proto = proto;
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun 
xfrm6_tunnel_gso_segment(struct xfrm_state * x,struct sk_buff * skb,netdev_features_t features)143*4882a593Smuzhiyun static struct sk_buff *xfrm6_tunnel_gso_segment(struct xfrm_state *x,
144*4882a593Smuzhiyun 						struct sk_buff *skb,
145*4882a593Smuzhiyun 						netdev_features_t features)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun 	__skb_push(skb, skb->mac_len);
148*4882a593Smuzhiyun 	return skb_mac_gso_segment(skb, features);
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun 
xfrm6_transport_gso_segment(struct xfrm_state * x,struct sk_buff * skb,netdev_features_t features)151*4882a593Smuzhiyun static struct sk_buff *xfrm6_transport_gso_segment(struct xfrm_state *x,
152*4882a593Smuzhiyun 						   struct sk_buff *skb,
153*4882a593Smuzhiyun 						   netdev_features_t features)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun 	const struct net_offload *ops;
156*4882a593Smuzhiyun 	struct sk_buff *segs = ERR_PTR(-EINVAL);
157*4882a593Smuzhiyun 	struct xfrm_offload *xo = xfrm_offload(skb);
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	skb->transport_header += x->props.header_len;
160*4882a593Smuzhiyun 	ops = rcu_dereference(inet6_offloads[xo->proto]);
161*4882a593Smuzhiyun 	if (likely(ops && ops->callbacks.gso_segment))
162*4882a593Smuzhiyun 		segs = ops->callbacks.gso_segment(skb, features);
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	return segs;
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun 
xfrm6_beet_gso_segment(struct xfrm_state * x,struct sk_buff * skb,netdev_features_t features)167*4882a593Smuzhiyun static struct sk_buff *xfrm6_beet_gso_segment(struct xfrm_state *x,
168*4882a593Smuzhiyun 					      struct sk_buff *skb,
169*4882a593Smuzhiyun 					      netdev_features_t features)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun 	struct xfrm_offload *xo = xfrm_offload(skb);
172*4882a593Smuzhiyun 	struct sk_buff *segs = ERR_PTR(-EINVAL);
173*4882a593Smuzhiyun 	const struct net_offload *ops;
174*4882a593Smuzhiyun 	u8 proto = xo->proto;
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	skb->transport_header += x->props.header_len;
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	if (x->sel.family != AF_INET6) {
179*4882a593Smuzhiyun 		skb->transport_header -=
180*4882a593Smuzhiyun 			(sizeof(struct ipv6hdr) - sizeof(struct iphdr));
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 		if (proto == IPPROTO_BEETPH) {
183*4882a593Smuzhiyun 			struct ip_beet_phdr *ph =
184*4882a593Smuzhiyun 				(struct ip_beet_phdr *)skb->data;
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 			skb->transport_header += ph->hdrlen * 8;
187*4882a593Smuzhiyun 			proto = ph->nexthdr;
188*4882a593Smuzhiyun 		} else {
189*4882a593Smuzhiyun 			skb->transport_header -= IPV4_BEET_PHMAXLEN;
190*4882a593Smuzhiyun 		}
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 		if (proto == IPPROTO_TCP)
193*4882a593Smuzhiyun 			skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6;
194*4882a593Smuzhiyun 	} else {
195*4882a593Smuzhiyun 		__be16 frag;
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 		skb->transport_header +=
198*4882a593Smuzhiyun 			ipv6_skip_exthdr(skb, 0, &proto, &frag);
199*4882a593Smuzhiyun 	}
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	if (proto == IPPROTO_IPIP)
202*4882a593Smuzhiyun 		skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP6;
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	__skb_pull(skb, skb_transport_offset(skb));
205*4882a593Smuzhiyun 	ops = rcu_dereference(inet6_offloads[proto]);
206*4882a593Smuzhiyun 	if (likely(ops && ops->callbacks.gso_segment))
207*4882a593Smuzhiyun 		segs = ops->callbacks.gso_segment(skb, features);
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	return segs;
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun 
xfrm6_outer_mode_gso_segment(struct xfrm_state * x,struct sk_buff * skb,netdev_features_t features)212*4882a593Smuzhiyun static struct sk_buff *xfrm6_outer_mode_gso_segment(struct xfrm_state *x,
213*4882a593Smuzhiyun 						    struct sk_buff *skb,
214*4882a593Smuzhiyun 						    netdev_features_t features)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun 	switch (x->outer_mode.encap) {
217*4882a593Smuzhiyun 	case XFRM_MODE_TUNNEL:
218*4882a593Smuzhiyun 		return xfrm6_tunnel_gso_segment(x, skb, features);
219*4882a593Smuzhiyun 	case XFRM_MODE_TRANSPORT:
220*4882a593Smuzhiyun 		return xfrm6_transport_gso_segment(x, skb, features);
221*4882a593Smuzhiyun 	case XFRM_MODE_BEET:
222*4882a593Smuzhiyun 		return xfrm6_beet_gso_segment(x, skb, features);
223*4882a593Smuzhiyun 	}
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	return ERR_PTR(-EOPNOTSUPP);
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun 
esp6_gso_segment(struct sk_buff * skb,netdev_features_t features)228*4882a593Smuzhiyun static struct sk_buff *esp6_gso_segment(struct sk_buff *skb,
229*4882a593Smuzhiyun 				        netdev_features_t features)
230*4882a593Smuzhiyun {
231*4882a593Smuzhiyun 	struct xfrm_state *x;
232*4882a593Smuzhiyun 	struct ip_esp_hdr *esph;
233*4882a593Smuzhiyun 	struct crypto_aead *aead;
234*4882a593Smuzhiyun 	netdev_features_t esp_features = features;
235*4882a593Smuzhiyun 	struct xfrm_offload *xo = xfrm_offload(skb);
236*4882a593Smuzhiyun 	struct sec_path *sp;
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	if (!xo)
239*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
242*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	sp = skb_sec_path(skb);
245*4882a593Smuzhiyun 	x = sp->xvec[sp->len - 1];
246*4882a593Smuzhiyun 	aead = x->data;
247*4882a593Smuzhiyun 	esph = ip_esp_hdr(skb);
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	if (esph->spi != x->id.spi)
250*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
253*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	__skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	skb->encap_hdr_csum = 1;
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	if (!(features & NETIF_F_HW_ESP) || x->xso.dev != skb->dev)
260*4882a593Smuzhiyun 		esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK |
261*4882a593Smuzhiyun 					    NETIF_F_SCTP_CRC);
262*4882a593Smuzhiyun 	else if (!(features & NETIF_F_HW_ESP_TX_CSUM))
263*4882a593Smuzhiyun 		esp_features = features & ~(NETIF_F_CSUM_MASK |
264*4882a593Smuzhiyun 					    NETIF_F_SCTP_CRC);
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	xo->flags |= XFRM_GSO_SEGMENT;
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	return xfrm6_outer_mode_gso_segment(x, skb, esp_features);
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun 
esp6_input_tail(struct xfrm_state * x,struct sk_buff * skb)271*4882a593Smuzhiyun static int esp6_input_tail(struct xfrm_state *x, struct sk_buff *skb)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun 	struct crypto_aead *aead = x->data;
274*4882a593Smuzhiyun 	struct xfrm_offload *xo = xfrm_offload(skb);
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead)))
277*4882a593Smuzhiyun 		return -EINVAL;
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	if (!(xo->flags & CRYPTO_DONE))
280*4882a593Smuzhiyun 		skb->ip_summed = CHECKSUM_NONE;
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	return esp6_input_done2(skb, 0);
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun 
esp6_xmit(struct xfrm_state * x,struct sk_buff * skb,netdev_features_t features)285*4882a593Smuzhiyun static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb,  netdev_features_t features)
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun 	int len;
288*4882a593Smuzhiyun 	int err;
289*4882a593Smuzhiyun 	int alen;
290*4882a593Smuzhiyun 	int blksize;
291*4882a593Smuzhiyun 	struct xfrm_offload *xo;
292*4882a593Smuzhiyun 	struct crypto_aead *aead;
293*4882a593Smuzhiyun 	struct esp_info esp;
294*4882a593Smuzhiyun 	bool hw_offload = true;
295*4882a593Smuzhiyun 	__u32 seq;
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	esp.inplace = true;
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	xo = xfrm_offload(skb);
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	if (!xo)
302*4882a593Smuzhiyun 		return -EINVAL;
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	if (!(features & NETIF_F_HW_ESP) || x->xso.dev != skb->dev) {
305*4882a593Smuzhiyun 		xo->flags |= CRYPTO_FALLBACK;
306*4882a593Smuzhiyun 		hw_offload = false;
307*4882a593Smuzhiyun 	}
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	esp.proto = xo->proto;
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	/* skb is pure payload to encrypt */
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	aead = x->data;
314*4882a593Smuzhiyun 	alen = crypto_aead_authsize(aead);
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	esp.tfclen = 0;
317*4882a593Smuzhiyun 	/* XXX: Add support for tfc padding here. */
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	blksize = ALIGN(crypto_aead_blocksize(aead), 4);
320*4882a593Smuzhiyun 	esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
321*4882a593Smuzhiyun 	esp.plen = esp.clen - skb->len - esp.tfclen;
322*4882a593Smuzhiyun 	esp.tailen = esp.tfclen + esp.plen + alen;
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	if (!hw_offload || (hw_offload && !skb_is_gso(skb))) {
325*4882a593Smuzhiyun 		esp.nfrags = esp6_output_head(x, skb, &esp);
326*4882a593Smuzhiyun 		if (esp.nfrags < 0)
327*4882a593Smuzhiyun 			return esp.nfrags;
328*4882a593Smuzhiyun 	}
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	seq = xo->seq.low;
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	esp.esph = ip_esp_hdr(skb);
333*4882a593Smuzhiyun 	esp.esph->spi = x->id.spi;
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	skb_push(skb, -skb_network_offset(skb));
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	if (xo->flags & XFRM_GSO_SEGMENT) {
338*4882a593Smuzhiyun 		esp.esph->seq_no = htonl(seq);
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 		if (!skb_is_gso(skb))
341*4882a593Smuzhiyun 			xo->seq.low++;
342*4882a593Smuzhiyun 		else
343*4882a593Smuzhiyun 			xo->seq.low += skb_shinfo(skb)->gso_segs;
344*4882a593Smuzhiyun 	}
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 	if (xo->seq.low < seq)
347*4882a593Smuzhiyun 		xo->seq.hi++;
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 	esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 	len = skb->len - sizeof(struct ipv6hdr);
352*4882a593Smuzhiyun 	if (len > IPV6_MAXPLEN)
353*4882a593Smuzhiyun 		len = 0;
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	ipv6_hdr(skb)->payload_len = htons(len);
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 	if (hw_offload) {
358*4882a593Smuzhiyun 		if (!skb_ext_add(skb, SKB_EXT_SEC_PATH))
359*4882a593Smuzhiyun 			return -ENOMEM;
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 		xo = xfrm_offload(skb);
362*4882a593Smuzhiyun 		if (!xo)
363*4882a593Smuzhiyun 			return -EINVAL;
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 		xo->flags |= XFRM_XMIT;
366*4882a593Smuzhiyun 		return 0;
367*4882a593Smuzhiyun 	}
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 	err = esp6_output_tail(x, skb, &esp);
370*4882a593Smuzhiyun 	if (err)
371*4882a593Smuzhiyun 		return err;
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 	secpath_reset(skb);
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 	return 0;
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun static const struct net_offload esp6_offload = {
379*4882a593Smuzhiyun 	.callbacks = {
380*4882a593Smuzhiyun 		.gro_receive = esp6_gro_receive,
381*4882a593Smuzhiyun 		.gso_segment = esp6_gso_segment,
382*4882a593Smuzhiyun 	},
383*4882a593Smuzhiyun };
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun static const struct xfrm_type_offload esp6_type_offload = {
386*4882a593Smuzhiyun 	.description	= "ESP6 OFFLOAD",
387*4882a593Smuzhiyun 	.owner		= THIS_MODULE,
388*4882a593Smuzhiyun 	.proto	     	= IPPROTO_ESP,
389*4882a593Smuzhiyun 	.input_tail	= esp6_input_tail,
390*4882a593Smuzhiyun 	.xmit		= esp6_xmit,
391*4882a593Smuzhiyun 	.encap		= esp6_gso_encap,
392*4882a593Smuzhiyun };
393*4882a593Smuzhiyun 
esp6_offload_init(void)394*4882a593Smuzhiyun static int __init esp6_offload_init(void)
395*4882a593Smuzhiyun {
396*4882a593Smuzhiyun 	if (xfrm_register_type_offload(&esp6_type_offload, AF_INET6) < 0) {
397*4882a593Smuzhiyun 		pr_info("%s: can't add xfrm type offload\n", __func__);
398*4882a593Smuzhiyun 		return -EAGAIN;
399*4882a593Smuzhiyun 	}
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	return inet6_add_offload(&esp6_offload, IPPROTO_ESP);
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun 
esp6_offload_exit(void)404*4882a593Smuzhiyun static void __exit esp6_offload_exit(void)
405*4882a593Smuzhiyun {
406*4882a593Smuzhiyun 	xfrm_unregister_type_offload(&esp6_type_offload, AF_INET6);
407*4882a593Smuzhiyun 	inet6_del_offload(&esp6_offload, IPPROTO_ESP);
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun module_init(esp6_offload_init);
411*4882a593Smuzhiyun module_exit(esp6_offload_exit);
412*4882a593Smuzhiyun MODULE_LICENSE("GPL");
413*4882a593Smuzhiyun MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
414*4882a593Smuzhiyun MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET6, XFRM_PROTO_ESP);
415*4882a593Smuzhiyun MODULE_DESCRIPTION("IPV6 GSO/GRO offload support");
416