1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * xfrm_device.c - IPsec device offloading code.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (c) 2015 secunet Security Networks AG
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Author:
8*4882a593Smuzhiyun * Steffen Klassert <steffen.klassert@secunet.com>
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <linux/errno.h>
12*4882a593Smuzhiyun #include <linux/module.h>
13*4882a593Smuzhiyun #include <linux/netdevice.h>
14*4882a593Smuzhiyun #include <linux/skbuff.h>
15*4882a593Smuzhiyun #include <linux/slab.h>
16*4882a593Smuzhiyun #include <linux/spinlock.h>
17*4882a593Smuzhiyun #include <net/dst.h>
18*4882a593Smuzhiyun #include <net/xfrm.h>
19*4882a593Smuzhiyun #include <linux/notifier.h>
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #ifdef CONFIG_XFRM_OFFLOAD
__xfrm_transport_prep(struct xfrm_state * x,struct sk_buff * skb,unsigned int hsize)22*4882a593Smuzhiyun static void __xfrm_transport_prep(struct xfrm_state *x, struct sk_buff *skb,
23*4882a593Smuzhiyun unsigned int hsize)
24*4882a593Smuzhiyun {
25*4882a593Smuzhiyun struct xfrm_offload *xo = xfrm_offload(skb);
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun skb_reset_mac_len(skb);
28*4882a593Smuzhiyun if (xo->flags & XFRM_GSO_SEGMENT)
29*4882a593Smuzhiyun skb->transport_header -= x->props.header_len;
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun pskb_pull(skb, skb_transport_offset(skb) + x->props.header_len);
32*4882a593Smuzhiyun }
33*4882a593Smuzhiyun
__xfrm_mode_tunnel_prep(struct xfrm_state * x,struct sk_buff * skb,unsigned int hsize)34*4882a593Smuzhiyun static void __xfrm_mode_tunnel_prep(struct xfrm_state *x, struct sk_buff *skb,
35*4882a593Smuzhiyun unsigned int hsize)
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun struct xfrm_offload *xo = xfrm_offload(skb);
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun if (xo->flags & XFRM_GSO_SEGMENT)
41*4882a593Smuzhiyun skb->transport_header = skb->network_header + hsize;
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun skb_reset_mac_len(skb);
44*4882a593Smuzhiyun pskb_pull(skb, skb->mac_len + x->props.header_len);
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun
__xfrm_mode_beet_prep(struct xfrm_state * x,struct sk_buff * skb,unsigned int hsize)47*4882a593Smuzhiyun static void __xfrm_mode_beet_prep(struct xfrm_state *x, struct sk_buff *skb,
48*4882a593Smuzhiyun unsigned int hsize)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun struct xfrm_offload *xo = xfrm_offload(skb);
51*4882a593Smuzhiyun int phlen = 0;
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun if (xo->flags & XFRM_GSO_SEGMENT)
54*4882a593Smuzhiyun skb->transport_header = skb->network_header + hsize;
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun skb_reset_mac_len(skb);
57*4882a593Smuzhiyun if (x->sel.family != AF_INET6) {
58*4882a593Smuzhiyun phlen = IPV4_BEET_PHMAXLEN;
59*4882a593Smuzhiyun if (x->outer_mode.family == AF_INET6)
60*4882a593Smuzhiyun phlen += sizeof(struct ipv6hdr) - sizeof(struct iphdr);
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun pskb_pull(skb, skb->mac_len + hsize + (x->props.header_len - phlen));
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun /* Adjust pointers into the packet when IPsec is done at layer2 */
xfrm_outer_mode_prep(struct xfrm_state * x,struct sk_buff * skb)67*4882a593Smuzhiyun static void xfrm_outer_mode_prep(struct xfrm_state *x, struct sk_buff *skb)
68*4882a593Smuzhiyun {
69*4882a593Smuzhiyun switch (x->outer_mode.encap) {
70*4882a593Smuzhiyun case XFRM_MODE_TUNNEL:
71*4882a593Smuzhiyun if (x->outer_mode.family == AF_INET)
72*4882a593Smuzhiyun return __xfrm_mode_tunnel_prep(x, skb,
73*4882a593Smuzhiyun sizeof(struct iphdr));
74*4882a593Smuzhiyun if (x->outer_mode.family == AF_INET6)
75*4882a593Smuzhiyun return __xfrm_mode_tunnel_prep(x, skb,
76*4882a593Smuzhiyun sizeof(struct ipv6hdr));
77*4882a593Smuzhiyun break;
78*4882a593Smuzhiyun case XFRM_MODE_TRANSPORT:
79*4882a593Smuzhiyun if (x->outer_mode.family == AF_INET)
80*4882a593Smuzhiyun return __xfrm_transport_prep(x, skb,
81*4882a593Smuzhiyun sizeof(struct iphdr));
82*4882a593Smuzhiyun if (x->outer_mode.family == AF_INET6)
83*4882a593Smuzhiyun return __xfrm_transport_prep(x, skb,
84*4882a593Smuzhiyun sizeof(struct ipv6hdr));
85*4882a593Smuzhiyun break;
86*4882a593Smuzhiyun case XFRM_MODE_BEET:
87*4882a593Smuzhiyun if (x->outer_mode.family == AF_INET)
88*4882a593Smuzhiyun return __xfrm_mode_beet_prep(x, skb,
89*4882a593Smuzhiyun sizeof(struct iphdr));
90*4882a593Smuzhiyun if (x->outer_mode.family == AF_INET6)
91*4882a593Smuzhiyun return __xfrm_mode_beet_prep(x, skb,
92*4882a593Smuzhiyun sizeof(struct ipv6hdr));
93*4882a593Smuzhiyun break;
94*4882a593Smuzhiyun case XFRM_MODE_ROUTEOPTIMIZATION:
95*4882a593Smuzhiyun case XFRM_MODE_IN_TRIGGER:
96*4882a593Smuzhiyun break;
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun
xmit_xfrm_check_overflow(struct sk_buff * skb)100*4882a593Smuzhiyun static inline bool xmit_xfrm_check_overflow(struct sk_buff *skb)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun struct xfrm_offload *xo = xfrm_offload(skb);
103*4882a593Smuzhiyun __u32 seq = xo->seq.low;
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun seq += skb_shinfo(skb)->gso_segs;
106*4882a593Smuzhiyun if (unlikely(seq < xo->seq.low))
107*4882a593Smuzhiyun return true;
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun return false;
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun
validate_xmit_xfrm(struct sk_buff * skb,netdev_features_t features,bool * again)112*4882a593Smuzhiyun struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun int err;
115*4882a593Smuzhiyun unsigned long flags;
116*4882a593Smuzhiyun struct xfrm_state *x;
117*4882a593Smuzhiyun struct softnet_data *sd;
118*4882a593Smuzhiyun struct sk_buff *skb2, *nskb, *pskb = NULL;
119*4882a593Smuzhiyun netdev_features_t esp_features = features;
120*4882a593Smuzhiyun struct xfrm_offload *xo = xfrm_offload(skb);
121*4882a593Smuzhiyun struct net_device *dev = skb->dev;
122*4882a593Smuzhiyun struct sec_path *sp;
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun if (!xo || (xo->flags & XFRM_XMIT))
125*4882a593Smuzhiyun return skb;
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun if (!(features & NETIF_F_HW_ESP))
128*4882a593Smuzhiyun esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun sp = skb_sec_path(skb);
131*4882a593Smuzhiyun x = sp->xvec[sp->len - 1];
132*4882a593Smuzhiyun if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND)
133*4882a593Smuzhiyun return skb;
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun /* This skb was already validated on the upper/virtual dev */
136*4882a593Smuzhiyun if ((x->xso.dev != dev) && (x->xso.real_dev == dev))
137*4882a593Smuzhiyun return skb;
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun local_irq_save(flags);
140*4882a593Smuzhiyun sd = this_cpu_ptr(&softnet_data);
141*4882a593Smuzhiyun err = !skb_queue_empty(&sd->xfrm_backlog);
142*4882a593Smuzhiyun local_irq_restore(flags);
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun if (err) {
145*4882a593Smuzhiyun *again = true;
146*4882a593Smuzhiyun return skb;
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun if (skb_is_gso(skb) && (unlikely(x->xso.dev != dev) ||
150*4882a593Smuzhiyun unlikely(xmit_xfrm_check_overflow(skb)))) {
151*4882a593Smuzhiyun struct sk_buff *segs;
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun /* Packet got rerouted, fixup features and segment it. */
154*4882a593Smuzhiyun esp_features = esp_features & ~(NETIF_F_HW_ESP | NETIF_F_GSO_ESP);
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun segs = skb_gso_segment(skb, esp_features);
157*4882a593Smuzhiyun if (IS_ERR(segs)) {
158*4882a593Smuzhiyun kfree_skb(skb);
159*4882a593Smuzhiyun atomic_long_inc(&dev->tx_dropped);
160*4882a593Smuzhiyun return NULL;
161*4882a593Smuzhiyun } else {
162*4882a593Smuzhiyun consume_skb(skb);
163*4882a593Smuzhiyun skb = segs;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun if (!skb->next) {
168*4882a593Smuzhiyun esp_features |= skb->dev->gso_partial_features;
169*4882a593Smuzhiyun xfrm_outer_mode_prep(x, skb);
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun xo->flags |= XFRM_DEV_RESUME;
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun err = x->type_offload->xmit(x, skb, esp_features);
174*4882a593Smuzhiyun if (err) {
175*4882a593Smuzhiyun if (err == -EINPROGRESS)
176*4882a593Smuzhiyun return NULL;
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
179*4882a593Smuzhiyun kfree_skb(skb);
180*4882a593Smuzhiyun return NULL;
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun skb_push(skb, skb->data - skb_mac_header(skb));
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun return skb;
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun skb_list_walk_safe(skb, skb2, nskb) {
189*4882a593Smuzhiyun esp_features |= skb->dev->gso_partial_features;
190*4882a593Smuzhiyun skb_mark_not_on_list(skb2);
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun xo = xfrm_offload(skb2);
193*4882a593Smuzhiyun xo->flags |= XFRM_DEV_RESUME;
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun xfrm_outer_mode_prep(x, skb2);
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun err = x->type_offload->xmit(x, skb2, esp_features);
198*4882a593Smuzhiyun if (!err) {
199*4882a593Smuzhiyun skb2->next = nskb;
200*4882a593Smuzhiyun } else if (err != -EINPROGRESS) {
201*4882a593Smuzhiyun XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
202*4882a593Smuzhiyun skb2->next = nskb;
203*4882a593Smuzhiyun kfree_skb_list(skb2);
204*4882a593Smuzhiyun return NULL;
205*4882a593Smuzhiyun } else {
206*4882a593Smuzhiyun if (skb == skb2)
207*4882a593Smuzhiyun skb = nskb;
208*4882a593Smuzhiyun else
209*4882a593Smuzhiyun pskb->next = nskb;
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun continue;
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun skb_push(skb2, skb2->data - skb_mac_header(skb2));
215*4882a593Smuzhiyun pskb = skb2;
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun return skb;
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(validate_xmit_xfrm);
221*4882a593Smuzhiyun
xfrm_dev_state_add(struct net * net,struct xfrm_state * x,struct xfrm_user_offload * xuo)222*4882a593Smuzhiyun int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
223*4882a593Smuzhiyun struct xfrm_user_offload *xuo)
224*4882a593Smuzhiyun {
225*4882a593Smuzhiyun int err;
226*4882a593Smuzhiyun struct dst_entry *dst;
227*4882a593Smuzhiyun struct net_device *dev;
228*4882a593Smuzhiyun struct xfrm_state_offload *xso = &x->xso;
229*4882a593Smuzhiyun xfrm_address_t *saddr;
230*4882a593Smuzhiyun xfrm_address_t *daddr;
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun if (!x->type_offload)
233*4882a593Smuzhiyun return -EINVAL;
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun /* We don't yet support UDP encapsulation and TFC padding. */
236*4882a593Smuzhiyun if (x->encap || x->tfcpad)
237*4882a593Smuzhiyun return -EINVAL;
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun if (xuo->flags & ~(XFRM_OFFLOAD_IPV6 | XFRM_OFFLOAD_INBOUND))
240*4882a593Smuzhiyun return -EINVAL;
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun dev = dev_get_by_index(net, xuo->ifindex);
243*4882a593Smuzhiyun if (!dev) {
244*4882a593Smuzhiyun if (!(xuo->flags & XFRM_OFFLOAD_INBOUND)) {
245*4882a593Smuzhiyun saddr = &x->props.saddr;
246*4882a593Smuzhiyun daddr = &x->id.daddr;
247*4882a593Smuzhiyun } else {
248*4882a593Smuzhiyun saddr = &x->id.daddr;
249*4882a593Smuzhiyun daddr = &x->props.saddr;
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun dst = __xfrm_dst_lookup(net, 0, 0, saddr, daddr,
253*4882a593Smuzhiyun x->props.family,
254*4882a593Smuzhiyun xfrm_smark_get(0, x));
255*4882a593Smuzhiyun if (IS_ERR(dst))
256*4882a593Smuzhiyun return 0;
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun dev = dst->dev;
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun dev_hold(dev);
261*4882a593Smuzhiyun dst_release(dst);
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_state_add) {
265*4882a593Smuzhiyun xso->dev = NULL;
266*4882a593Smuzhiyun dev_put(dev);
267*4882a593Smuzhiyun return 0;
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun if (x->props.flags & XFRM_STATE_ESN &&
271*4882a593Smuzhiyun !dev->xfrmdev_ops->xdo_dev_state_advance_esn) {
272*4882a593Smuzhiyun xso->dev = NULL;
273*4882a593Smuzhiyun dev_put(dev);
274*4882a593Smuzhiyun return -EINVAL;
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun xso->dev = dev;
278*4882a593Smuzhiyun xso->real_dev = dev;
279*4882a593Smuzhiyun xso->num_exthdrs = 1;
280*4882a593Smuzhiyun /* Don't forward bit that is not implemented */
281*4882a593Smuzhiyun xso->flags = xuo->flags & ~XFRM_OFFLOAD_IPV6;
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun err = dev->xfrmdev_ops->xdo_dev_state_add(x);
284*4882a593Smuzhiyun if (err) {
285*4882a593Smuzhiyun xso->num_exthdrs = 0;
286*4882a593Smuzhiyun xso->flags = 0;
287*4882a593Smuzhiyun xso->dev = NULL;
288*4882a593Smuzhiyun xso->real_dev = NULL;
289*4882a593Smuzhiyun dev_put(dev);
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun if (err != -EOPNOTSUPP)
292*4882a593Smuzhiyun return err;
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun return 0;
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xfrm_dev_state_add);
298*4882a593Smuzhiyun
xfrm_dev_offload_ok(struct sk_buff * skb,struct xfrm_state * x)299*4882a593Smuzhiyun bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun int mtu;
302*4882a593Smuzhiyun struct dst_entry *dst = skb_dst(skb);
303*4882a593Smuzhiyun struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
304*4882a593Smuzhiyun struct net_device *dev = x->xso.dev;
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun if (!x->type_offload || x->encap)
307*4882a593Smuzhiyun return false;
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun if ((!dev || (dev == xfrm_dst_path(dst)->dev)) &&
310*4882a593Smuzhiyun (!xdst->child->xfrm)) {
311*4882a593Smuzhiyun mtu = xfrm_state_mtu(x, xdst->child_mtu_cached);
312*4882a593Smuzhiyun if (skb->len <= mtu)
313*4882a593Smuzhiyun goto ok;
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
316*4882a593Smuzhiyun goto ok;
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun return false;
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun ok:
322*4882a593Smuzhiyun if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_offload_ok)
323*4882a593Smuzhiyun return x->xso.dev->xfrmdev_ops->xdo_dev_offload_ok(skb, x);
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun return true;
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok);
328*4882a593Smuzhiyun
xfrm_dev_resume(struct sk_buff * skb)329*4882a593Smuzhiyun void xfrm_dev_resume(struct sk_buff *skb)
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun struct net_device *dev = skb->dev;
332*4882a593Smuzhiyun int ret = NETDEV_TX_BUSY;
333*4882a593Smuzhiyun struct netdev_queue *txq;
334*4882a593Smuzhiyun struct softnet_data *sd;
335*4882a593Smuzhiyun unsigned long flags;
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun rcu_read_lock();
338*4882a593Smuzhiyun txq = netdev_core_pick_tx(dev, skb, NULL);
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun HARD_TX_LOCK(dev, txq, smp_processor_id());
341*4882a593Smuzhiyun if (!netif_xmit_frozen_or_stopped(txq))
342*4882a593Smuzhiyun skb = dev_hard_start_xmit(skb, dev, txq, &ret);
343*4882a593Smuzhiyun HARD_TX_UNLOCK(dev, txq);
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun if (!dev_xmit_complete(ret)) {
346*4882a593Smuzhiyun local_irq_save(flags);
347*4882a593Smuzhiyun sd = this_cpu_ptr(&softnet_data);
348*4882a593Smuzhiyun skb_queue_tail(&sd->xfrm_backlog, skb);
349*4882a593Smuzhiyun raise_softirq_irqoff(NET_TX_SOFTIRQ);
350*4882a593Smuzhiyun local_irq_restore(flags);
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun rcu_read_unlock();
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xfrm_dev_resume);
355*4882a593Smuzhiyun
xfrm_dev_backlog(struct softnet_data * sd)356*4882a593Smuzhiyun void xfrm_dev_backlog(struct softnet_data *sd)
357*4882a593Smuzhiyun {
358*4882a593Smuzhiyun struct sk_buff_head *xfrm_backlog = &sd->xfrm_backlog;
359*4882a593Smuzhiyun struct sk_buff_head list;
360*4882a593Smuzhiyun struct sk_buff *skb;
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun if (skb_queue_empty(xfrm_backlog))
363*4882a593Smuzhiyun return;
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun __skb_queue_head_init(&list);
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun spin_lock(&xfrm_backlog->lock);
368*4882a593Smuzhiyun skb_queue_splice_init(xfrm_backlog, &list);
369*4882a593Smuzhiyun spin_unlock(&xfrm_backlog->lock);
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun while (!skb_queue_empty(&list)) {
372*4882a593Smuzhiyun skb = __skb_dequeue(&list);
373*4882a593Smuzhiyun xfrm_dev_resume(skb);
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun #endif
378*4882a593Smuzhiyun
xfrm_api_check(struct net_device * dev)379*4882a593Smuzhiyun static int xfrm_api_check(struct net_device *dev)
380*4882a593Smuzhiyun {
381*4882a593Smuzhiyun #ifdef CONFIG_XFRM_OFFLOAD
382*4882a593Smuzhiyun if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) &&
383*4882a593Smuzhiyun !(dev->features & NETIF_F_HW_ESP))
384*4882a593Smuzhiyun return NOTIFY_BAD;
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun if ((dev->features & NETIF_F_HW_ESP) &&
387*4882a593Smuzhiyun (!(dev->xfrmdev_ops &&
388*4882a593Smuzhiyun dev->xfrmdev_ops->xdo_dev_state_add &&
389*4882a593Smuzhiyun dev->xfrmdev_ops->xdo_dev_state_delete)))
390*4882a593Smuzhiyun return NOTIFY_BAD;
391*4882a593Smuzhiyun #else
392*4882a593Smuzhiyun if (dev->features & (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM))
393*4882a593Smuzhiyun return NOTIFY_BAD;
394*4882a593Smuzhiyun #endif
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun return NOTIFY_DONE;
397*4882a593Smuzhiyun }
398*4882a593Smuzhiyun
xfrm_dev_register(struct net_device * dev)399*4882a593Smuzhiyun static int xfrm_dev_register(struct net_device *dev)
400*4882a593Smuzhiyun {
401*4882a593Smuzhiyun return xfrm_api_check(dev);
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun
xfrm_dev_feat_change(struct net_device * dev)404*4882a593Smuzhiyun static int xfrm_dev_feat_change(struct net_device *dev)
405*4882a593Smuzhiyun {
406*4882a593Smuzhiyun return xfrm_api_check(dev);
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun
xfrm_dev_down(struct net_device * dev)409*4882a593Smuzhiyun static int xfrm_dev_down(struct net_device *dev)
410*4882a593Smuzhiyun {
411*4882a593Smuzhiyun if (dev->features & NETIF_F_HW_ESP)
412*4882a593Smuzhiyun xfrm_dev_state_flush(dev_net(dev), dev, true);
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun return NOTIFY_DONE;
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun
xfrm_dev_event(struct notifier_block * this,unsigned long event,void * ptr)417*4882a593Smuzhiyun static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
418*4882a593Smuzhiyun {
419*4882a593Smuzhiyun struct net_device *dev = netdev_notifier_info_to_dev(ptr);
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun switch (event) {
422*4882a593Smuzhiyun case NETDEV_REGISTER:
423*4882a593Smuzhiyun return xfrm_dev_register(dev);
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun case NETDEV_FEAT_CHANGE:
426*4882a593Smuzhiyun return xfrm_dev_feat_change(dev);
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun case NETDEV_DOWN:
429*4882a593Smuzhiyun case NETDEV_UNREGISTER:
430*4882a593Smuzhiyun return xfrm_dev_down(dev);
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun return NOTIFY_DONE;
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun static struct notifier_block xfrm_dev_notifier = {
436*4882a593Smuzhiyun .notifier_call = xfrm_dev_event,
437*4882a593Smuzhiyun };
438*4882a593Smuzhiyun
xfrm_dev_init(void)439*4882a593Smuzhiyun void __init xfrm_dev_init(void)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun register_netdevice_notifier(&xfrm_dev_notifier);
442*4882a593Smuzhiyun }
443