1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * IPV4 GSO/GRO offload support
4*4882a593Smuzhiyun * Linux INET implementation
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Copyright (C) 2016 secunet Security Networks AG
7*4882a593Smuzhiyun * Author: Steffen Klassert <steffen.klassert@secunet.com>
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * ESP GRO support
10*4882a593Smuzhiyun */
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <linux/skbuff.h>
13*4882a593Smuzhiyun #include <linux/init.h>
14*4882a593Smuzhiyun #include <net/protocol.h>
15*4882a593Smuzhiyun #include <crypto/aead.h>
16*4882a593Smuzhiyun #include <crypto/authenc.h>
17*4882a593Smuzhiyun #include <linux/err.h>
18*4882a593Smuzhiyun #include <linux/module.h>
19*4882a593Smuzhiyun #include <net/ip.h>
20*4882a593Smuzhiyun #include <net/xfrm.h>
21*4882a593Smuzhiyun #include <net/esp.h>
22*4882a593Smuzhiyun #include <linux/scatterlist.h>
23*4882a593Smuzhiyun #include <linux/kernel.h>
24*4882a593Smuzhiyun #include <linux/slab.h>
25*4882a593Smuzhiyun #include <linux/spinlock.h>
26*4882a593Smuzhiyun #include <net/udp.h>
27*4882a593Smuzhiyun
esp4_gro_receive(struct list_head * head,struct sk_buff * skb)28*4882a593Smuzhiyun static struct sk_buff *esp4_gro_receive(struct list_head *head,
29*4882a593Smuzhiyun struct sk_buff *skb)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun int offset = skb_gro_offset(skb);
32*4882a593Smuzhiyun struct xfrm_offload *xo;
33*4882a593Smuzhiyun struct xfrm_state *x;
34*4882a593Smuzhiyun __be32 seq;
35*4882a593Smuzhiyun __be32 spi;
36*4882a593Smuzhiyun int err;
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun if (!pskb_pull(skb, offset))
39*4882a593Smuzhiyun return NULL;
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0)
42*4882a593Smuzhiyun goto out;
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun xo = xfrm_offload(skb);
45*4882a593Smuzhiyun if (!xo || !(xo->flags & CRYPTO_DONE)) {
46*4882a593Smuzhiyun struct sec_path *sp = secpath_set(skb);
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun if (!sp)
49*4882a593Smuzhiyun goto out;
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun if (sp->len == XFRM_MAX_DEPTH)
52*4882a593Smuzhiyun goto out_reset;
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
55*4882a593Smuzhiyun (xfrm_address_t *)&ip_hdr(skb)->daddr,
56*4882a593Smuzhiyun spi, IPPROTO_ESP, AF_INET);
57*4882a593Smuzhiyun if (!x)
58*4882a593Smuzhiyun goto out_reset;
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun skb->mark = xfrm_smark_get(skb->mark, x);
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun sp->xvec[sp->len++] = x;
63*4882a593Smuzhiyun sp->olen++;
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun xo = xfrm_offload(skb);
66*4882a593Smuzhiyun if (!xo)
67*4882a593Smuzhiyun goto out_reset;
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun xo->flags |= XFRM_GRO;
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
73*4882a593Smuzhiyun XFRM_SPI_SKB_CB(skb)->family = AF_INET;
74*4882a593Smuzhiyun XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
75*4882a593Smuzhiyun XFRM_SPI_SKB_CB(skb)->seq = seq;
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun /* We don't need to handle errors from xfrm_input, it does all
78*4882a593Smuzhiyun * the error handling and frees the resources on error. */
79*4882a593Smuzhiyun xfrm_input(skb, IPPROTO_ESP, spi, -2);
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun return ERR_PTR(-EINPROGRESS);
82*4882a593Smuzhiyun out_reset:
83*4882a593Smuzhiyun secpath_reset(skb);
84*4882a593Smuzhiyun out:
85*4882a593Smuzhiyun skb_push(skb, offset);
86*4882a593Smuzhiyun NAPI_GRO_CB(skb)->same_flow = 0;
87*4882a593Smuzhiyun NAPI_GRO_CB(skb)->flush = 1;
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun return NULL;
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun
esp4_gso_encap(struct xfrm_state * x,struct sk_buff * skb)92*4882a593Smuzhiyun static void esp4_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun struct ip_esp_hdr *esph;
95*4882a593Smuzhiyun struct iphdr *iph = ip_hdr(skb);
96*4882a593Smuzhiyun struct xfrm_offload *xo = xfrm_offload(skb);
97*4882a593Smuzhiyun int proto = iph->protocol;
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun skb_push(skb, -skb_network_offset(skb));
100*4882a593Smuzhiyun esph = ip_esp_hdr(skb);
101*4882a593Smuzhiyun *skb_mac_header(skb) = IPPROTO_ESP;
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun esph->spi = x->id.spi;
104*4882a593Smuzhiyun esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun xo->proto = proto;
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun
xfrm4_tunnel_gso_segment(struct xfrm_state * x,struct sk_buff * skb,netdev_features_t features)109*4882a593Smuzhiyun static struct sk_buff *xfrm4_tunnel_gso_segment(struct xfrm_state *x,
110*4882a593Smuzhiyun struct sk_buff *skb,
111*4882a593Smuzhiyun netdev_features_t features)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun __skb_push(skb, skb->mac_len);
114*4882a593Smuzhiyun return skb_mac_gso_segment(skb, features);
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun
xfrm4_transport_gso_segment(struct xfrm_state * x,struct sk_buff * skb,netdev_features_t features)117*4882a593Smuzhiyun static struct sk_buff *xfrm4_transport_gso_segment(struct xfrm_state *x,
118*4882a593Smuzhiyun struct sk_buff *skb,
119*4882a593Smuzhiyun netdev_features_t features)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun const struct net_offload *ops;
122*4882a593Smuzhiyun struct sk_buff *segs = ERR_PTR(-EINVAL);
123*4882a593Smuzhiyun struct xfrm_offload *xo = xfrm_offload(skb);
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun skb->transport_header += x->props.header_len;
126*4882a593Smuzhiyun ops = rcu_dereference(inet_offloads[xo->proto]);
127*4882a593Smuzhiyun if (likely(ops && ops->callbacks.gso_segment))
128*4882a593Smuzhiyun segs = ops->callbacks.gso_segment(skb, features);
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun return segs;
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun
xfrm4_beet_gso_segment(struct xfrm_state * x,struct sk_buff * skb,netdev_features_t features)133*4882a593Smuzhiyun static struct sk_buff *xfrm4_beet_gso_segment(struct xfrm_state *x,
134*4882a593Smuzhiyun struct sk_buff *skb,
135*4882a593Smuzhiyun netdev_features_t features)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun struct xfrm_offload *xo = xfrm_offload(skb);
138*4882a593Smuzhiyun struct sk_buff *segs = ERR_PTR(-EINVAL);
139*4882a593Smuzhiyun const struct net_offload *ops;
140*4882a593Smuzhiyun u8 proto = xo->proto;
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun skb->transport_header += x->props.header_len;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun if (x->sel.family != AF_INET6) {
145*4882a593Smuzhiyun if (proto == IPPROTO_BEETPH) {
146*4882a593Smuzhiyun struct ip_beet_phdr *ph =
147*4882a593Smuzhiyun (struct ip_beet_phdr *)skb->data;
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun skb->transport_header += ph->hdrlen * 8;
150*4882a593Smuzhiyun proto = ph->nexthdr;
151*4882a593Smuzhiyun } else {
152*4882a593Smuzhiyun skb->transport_header -= IPV4_BEET_PHMAXLEN;
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun } else {
155*4882a593Smuzhiyun __be16 frag;
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun skb->transport_header +=
158*4882a593Smuzhiyun ipv6_skip_exthdr(skb, 0, &proto, &frag);
159*4882a593Smuzhiyun if (proto == IPPROTO_TCP)
160*4882a593Smuzhiyun skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun if (proto == IPPROTO_IPV6)
164*4882a593Smuzhiyun skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4;
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun __skb_pull(skb, skb_transport_offset(skb));
167*4882a593Smuzhiyun ops = rcu_dereference(inet_offloads[proto]);
168*4882a593Smuzhiyun if (likely(ops && ops->callbacks.gso_segment))
169*4882a593Smuzhiyun segs = ops->callbacks.gso_segment(skb, features);
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun return segs;
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun
xfrm4_outer_mode_gso_segment(struct xfrm_state * x,struct sk_buff * skb,netdev_features_t features)174*4882a593Smuzhiyun static struct sk_buff *xfrm4_outer_mode_gso_segment(struct xfrm_state *x,
175*4882a593Smuzhiyun struct sk_buff *skb,
176*4882a593Smuzhiyun netdev_features_t features)
177*4882a593Smuzhiyun {
178*4882a593Smuzhiyun switch (x->outer_mode.encap) {
179*4882a593Smuzhiyun case XFRM_MODE_TUNNEL:
180*4882a593Smuzhiyun return xfrm4_tunnel_gso_segment(x, skb, features);
181*4882a593Smuzhiyun case XFRM_MODE_TRANSPORT:
182*4882a593Smuzhiyun return xfrm4_transport_gso_segment(x, skb, features);
183*4882a593Smuzhiyun case XFRM_MODE_BEET:
184*4882a593Smuzhiyun return xfrm4_beet_gso_segment(x, skb, features);
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun return ERR_PTR(-EOPNOTSUPP);
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun
esp4_gso_segment(struct sk_buff * skb,netdev_features_t features)190*4882a593Smuzhiyun static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
191*4882a593Smuzhiyun netdev_features_t features)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun struct xfrm_state *x;
194*4882a593Smuzhiyun struct ip_esp_hdr *esph;
195*4882a593Smuzhiyun struct crypto_aead *aead;
196*4882a593Smuzhiyun netdev_features_t esp_features = features;
197*4882a593Smuzhiyun struct xfrm_offload *xo = xfrm_offload(skb);
198*4882a593Smuzhiyun struct sec_path *sp;
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun if (!xo)
201*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
204*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun sp = skb_sec_path(skb);
207*4882a593Smuzhiyun x = sp->xvec[sp->len - 1];
208*4882a593Smuzhiyun aead = x->data;
209*4882a593Smuzhiyun esph = ip_esp_hdr(skb);
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun if (esph->spi != x->id.spi)
212*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
215*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun __skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun skb->encap_hdr_csum = 1;
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun if ((!(skb->dev->gso_partial_features & NETIF_F_HW_ESP) &&
222*4882a593Smuzhiyun !(features & NETIF_F_HW_ESP)) || x->xso.dev != skb->dev)
223*4882a593Smuzhiyun esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK |
224*4882a593Smuzhiyun NETIF_F_SCTP_CRC);
225*4882a593Smuzhiyun else if (!(features & NETIF_F_HW_ESP_TX_CSUM) &&
226*4882a593Smuzhiyun !(skb->dev->gso_partial_features & NETIF_F_HW_ESP_TX_CSUM))
227*4882a593Smuzhiyun esp_features = features & ~(NETIF_F_CSUM_MASK |
228*4882a593Smuzhiyun NETIF_F_SCTP_CRC);
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun xo->flags |= XFRM_GSO_SEGMENT;
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun return xfrm4_outer_mode_gso_segment(x, skb, esp_features);
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun
esp_input_tail(struct xfrm_state * x,struct sk_buff * skb)235*4882a593Smuzhiyun static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun struct crypto_aead *aead = x->data;
238*4882a593Smuzhiyun struct xfrm_offload *xo = xfrm_offload(skb);
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead)))
241*4882a593Smuzhiyun return -EINVAL;
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun if (!(xo->flags & CRYPTO_DONE))
244*4882a593Smuzhiyun skb->ip_summed = CHECKSUM_NONE;
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun return esp_input_done2(skb, 0);
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun
esp_xmit(struct xfrm_state * x,struct sk_buff * skb,netdev_features_t features)249*4882a593Smuzhiyun static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun int err;
252*4882a593Smuzhiyun int alen;
253*4882a593Smuzhiyun int blksize;
254*4882a593Smuzhiyun struct xfrm_offload *xo;
255*4882a593Smuzhiyun struct ip_esp_hdr *esph;
256*4882a593Smuzhiyun struct crypto_aead *aead;
257*4882a593Smuzhiyun struct esp_info esp;
258*4882a593Smuzhiyun bool hw_offload = true;
259*4882a593Smuzhiyun __u32 seq;
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun esp.inplace = true;
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun xo = xfrm_offload(skb);
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun if (!xo)
266*4882a593Smuzhiyun return -EINVAL;
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun if ((!(features & NETIF_F_HW_ESP) &&
269*4882a593Smuzhiyun !(skb->dev->gso_partial_features & NETIF_F_HW_ESP)) ||
270*4882a593Smuzhiyun x->xso.dev != skb->dev) {
271*4882a593Smuzhiyun xo->flags |= CRYPTO_FALLBACK;
272*4882a593Smuzhiyun hw_offload = false;
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun esp.proto = xo->proto;
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun /* skb is pure payload to encrypt */
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun aead = x->data;
280*4882a593Smuzhiyun alen = crypto_aead_authsize(aead);
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun esp.tfclen = 0;
283*4882a593Smuzhiyun /* XXX: Add support for tfc padding here. */
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun blksize = ALIGN(crypto_aead_blocksize(aead), 4);
286*4882a593Smuzhiyun esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
287*4882a593Smuzhiyun esp.plen = esp.clen - skb->len - esp.tfclen;
288*4882a593Smuzhiyun esp.tailen = esp.tfclen + esp.plen + alen;
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun esp.esph = ip_esp_hdr(skb);
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun if (!hw_offload || (hw_offload && !skb_is_gso(skb))) {
294*4882a593Smuzhiyun esp.nfrags = esp_output_head(x, skb, &esp);
295*4882a593Smuzhiyun if (esp.nfrags < 0)
296*4882a593Smuzhiyun return esp.nfrags;
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun seq = xo->seq.low;
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun esph = esp.esph;
302*4882a593Smuzhiyun esph->spi = x->id.spi;
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun skb_push(skb, -skb_network_offset(skb));
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun if (xo->flags & XFRM_GSO_SEGMENT) {
307*4882a593Smuzhiyun esph->seq_no = htonl(seq);
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun if (!skb_is_gso(skb))
310*4882a593Smuzhiyun xo->seq.low++;
311*4882a593Smuzhiyun else
312*4882a593Smuzhiyun xo->seq.low += skb_shinfo(skb)->gso_segs;
313*4882a593Smuzhiyun }
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun if (xo->seq.low < seq)
316*4882a593Smuzhiyun xo->seq.hi++;
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32));
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun ip_hdr(skb)->tot_len = htons(skb->len);
321*4882a593Smuzhiyun ip_send_check(ip_hdr(skb));
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun if (hw_offload) {
324*4882a593Smuzhiyun if (!skb_ext_add(skb, SKB_EXT_SEC_PATH))
325*4882a593Smuzhiyun return -ENOMEM;
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun xo = xfrm_offload(skb);
328*4882a593Smuzhiyun if (!xo)
329*4882a593Smuzhiyun return -EINVAL;
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun xo->flags |= XFRM_XMIT;
332*4882a593Smuzhiyun return 0;
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun err = esp_output_tail(x, skb, &esp);
336*4882a593Smuzhiyun if (err)
337*4882a593Smuzhiyun return err;
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun secpath_reset(skb);
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun return 0;
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun static const struct net_offload esp4_offload = {
345*4882a593Smuzhiyun .callbacks = {
346*4882a593Smuzhiyun .gro_receive = esp4_gro_receive,
347*4882a593Smuzhiyun .gso_segment = esp4_gso_segment,
348*4882a593Smuzhiyun },
349*4882a593Smuzhiyun };
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun static const struct xfrm_type_offload esp_type_offload = {
352*4882a593Smuzhiyun .description = "ESP4 OFFLOAD",
353*4882a593Smuzhiyun .owner = THIS_MODULE,
354*4882a593Smuzhiyun .proto = IPPROTO_ESP,
355*4882a593Smuzhiyun .input_tail = esp_input_tail,
356*4882a593Smuzhiyun .xmit = esp_xmit,
357*4882a593Smuzhiyun .encap = esp4_gso_encap,
358*4882a593Smuzhiyun };
359*4882a593Smuzhiyun
esp4_offload_init(void)360*4882a593Smuzhiyun static int __init esp4_offload_init(void)
361*4882a593Smuzhiyun {
362*4882a593Smuzhiyun if (xfrm_register_type_offload(&esp_type_offload, AF_INET) < 0) {
363*4882a593Smuzhiyun pr_info("%s: can't add xfrm type offload\n", __func__);
364*4882a593Smuzhiyun return -EAGAIN;
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun return inet_add_offload(&esp4_offload, IPPROTO_ESP);
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun
esp4_offload_exit(void)370*4882a593Smuzhiyun static void __exit esp4_offload_exit(void)
371*4882a593Smuzhiyun {
372*4882a593Smuzhiyun xfrm_unregister_type_offload(&esp_type_offload, AF_INET);
373*4882a593Smuzhiyun inet_del_offload(&esp4_offload, IPPROTO_ESP);
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun module_init(esp4_offload_init);
377*4882a593Smuzhiyun module_exit(esp4_offload_exit);
378*4882a593Smuzhiyun MODULE_LICENSE("GPL");
379*4882a593Smuzhiyun MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
380*4882a593Smuzhiyun MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET, XFRM_PROTO_ESP);
381*4882a593Smuzhiyun MODULE_DESCRIPTION("IPV4 GSO/GRO offload support");
382