1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /* Copyright(c) 2018 Oracle and/or its affiliates. All rights reserved. */
3*4882a593Smuzhiyun
4*4882a593Smuzhiyun #include "ixgbevf.h"
5*4882a593Smuzhiyun #include <net/xfrm.h>
6*4882a593Smuzhiyun #include <crypto/aead.h>
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #define IXGBE_IPSEC_KEY_BITS 160
9*4882a593Smuzhiyun static const char aes_gcm_name[] = "rfc4106(gcm(aes))";
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun /**
12*4882a593Smuzhiyun * ixgbevf_ipsec_set_pf_sa - ask the PF to set up an SA
13*4882a593Smuzhiyun * @adapter: board private structure
14*4882a593Smuzhiyun * @xs: xfrm info to be sent to the PF
15*4882a593Smuzhiyun *
16*4882a593Smuzhiyun * Returns: positive offload handle from the PF, or negative error code
17*4882a593Smuzhiyun **/
ixgbevf_ipsec_set_pf_sa(struct ixgbevf_adapter * adapter,struct xfrm_state * xs)18*4882a593Smuzhiyun static int ixgbevf_ipsec_set_pf_sa(struct ixgbevf_adapter *adapter,
19*4882a593Smuzhiyun struct xfrm_state *xs)
20*4882a593Smuzhiyun {
21*4882a593Smuzhiyun u32 msgbuf[IXGBE_VFMAILBOX_SIZE] = { 0 };
22*4882a593Smuzhiyun struct ixgbe_hw *hw = &adapter->hw;
23*4882a593Smuzhiyun struct sa_mbx_msg *sam;
24*4882a593Smuzhiyun int ret;
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun /* send the important bits to the PF */
27*4882a593Smuzhiyun sam = (struct sa_mbx_msg *)(&msgbuf[1]);
28*4882a593Smuzhiyun sam->flags = xs->xso.flags;
29*4882a593Smuzhiyun sam->spi = xs->id.spi;
30*4882a593Smuzhiyun sam->proto = xs->id.proto;
31*4882a593Smuzhiyun sam->family = xs->props.family;
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun if (xs->props.family == AF_INET6)
34*4882a593Smuzhiyun memcpy(sam->addr, &xs->id.daddr.a6, sizeof(xs->id.daddr.a6));
35*4882a593Smuzhiyun else
36*4882a593Smuzhiyun memcpy(sam->addr, &xs->id.daddr.a4, sizeof(xs->id.daddr.a4));
37*4882a593Smuzhiyun memcpy(sam->key, xs->aead->alg_key, sizeof(sam->key));
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun msgbuf[0] = IXGBE_VF_IPSEC_ADD;
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun spin_lock_bh(&adapter->mbx_lock);
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun ret = hw->mbx.ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE);
44*4882a593Smuzhiyun if (ret)
45*4882a593Smuzhiyun goto out;
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun ret = hw->mbx.ops.read_posted(hw, msgbuf, 2);
48*4882a593Smuzhiyun if (ret)
49*4882a593Smuzhiyun goto out;
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun ret = (int)msgbuf[1];
52*4882a593Smuzhiyun if (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK && ret >= 0)
53*4882a593Smuzhiyun ret = -1;
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun out:
56*4882a593Smuzhiyun spin_unlock_bh(&adapter->mbx_lock);
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun return ret;
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun /**
62*4882a593Smuzhiyun * ixgbevf_ipsec_del_pf_sa - ask the PF to delete an SA
63*4882a593Smuzhiyun * @adapter: board private structure
64*4882a593Smuzhiyun * @pfsa: sa index returned from PF when created, -1 for all
65*4882a593Smuzhiyun *
66*4882a593Smuzhiyun * Returns: 0 on success, or negative error code
67*4882a593Smuzhiyun **/
ixgbevf_ipsec_del_pf_sa(struct ixgbevf_adapter * adapter,int pfsa)68*4882a593Smuzhiyun static int ixgbevf_ipsec_del_pf_sa(struct ixgbevf_adapter *adapter, int pfsa)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun struct ixgbe_hw *hw = &adapter->hw;
71*4882a593Smuzhiyun u32 msgbuf[2];
72*4882a593Smuzhiyun int err;
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun memset(msgbuf, 0, sizeof(msgbuf));
75*4882a593Smuzhiyun msgbuf[0] = IXGBE_VF_IPSEC_DEL;
76*4882a593Smuzhiyun msgbuf[1] = (u32)pfsa;
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun spin_lock_bh(&adapter->mbx_lock);
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun err = hw->mbx.ops.write_posted(hw, msgbuf, 2);
81*4882a593Smuzhiyun if (err)
82*4882a593Smuzhiyun goto out;
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun err = hw->mbx.ops.read_posted(hw, msgbuf, 2);
85*4882a593Smuzhiyun if (err)
86*4882a593Smuzhiyun goto out;
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun out:
89*4882a593Smuzhiyun spin_unlock_bh(&adapter->mbx_lock);
90*4882a593Smuzhiyun return err;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun /**
94*4882a593Smuzhiyun * ixgbevf_ipsec_restore - restore the IPsec HW settings after a reset
95*4882a593Smuzhiyun * @adapter: board private structure
96*4882a593Smuzhiyun *
97*4882a593Smuzhiyun * Reload the HW tables from the SW tables after they've been bashed
98*4882a593Smuzhiyun * by a chip reset. While we're here, make sure any stale VF data is
99*4882a593Smuzhiyun * removed, since we go through reset when num_vfs changes.
100*4882a593Smuzhiyun **/
ixgbevf_ipsec_restore(struct ixgbevf_adapter * adapter)101*4882a593Smuzhiyun void ixgbevf_ipsec_restore(struct ixgbevf_adapter *adapter)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun struct ixgbevf_ipsec *ipsec = adapter->ipsec;
104*4882a593Smuzhiyun struct net_device *netdev = adapter->netdev;
105*4882a593Smuzhiyun int i;
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun if (!(adapter->netdev->features & NETIF_F_HW_ESP))
108*4882a593Smuzhiyun return;
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun /* reload the Rx and Tx keys */
111*4882a593Smuzhiyun for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
112*4882a593Smuzhiyun struct rx_sa *r = &ipsec->rx_tbl[i];
113*4882a593Smuzhiyun struct tx_sa *t = &ipsec->tx_tbl[i];
114*4882a593Smuzhiyun int ret;
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun if (r->used) {
117*4882a593Smuzhiyun ret = ixgbevf_ipsec_set_pf_sa(adapter, r->xs);
118*4882a593Smuzhiyun if (ret < 0)
119*4882a593Smuzhiyun netdev_err(netdev, "reload rx_tbl[%d] failed = %d\n",
120*4882a593Smuzhiyun i, ret);
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun if (t->used) {
124*4882a593Smuzhiyun ret = ixgbevf_ipsec_set_pf_sa(adapter, t->xs);
125*4882a593Smuzhiyun if (ret < 0)
126*4882a593Smuzhiyun netdev_err(netdev, "reload tx_tbl[%d] failed = %d\n",
127*4882a593Smuzhiyun i, ret);
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun /**
133*4882a593Smuzhiyun * ixgbevf_ipsec_find_empty_idx - find the first unused security parameter index
134*4882a593Smuzhiyun * @ipsec: pointer to IPsec struct
135*4882a593Smuzhiyun * @rxtable: true if we need to look in the Rx table
136*4882a593Smuzhiyun *
137*4882a593Smuzhiyun * Returns the first unused index in either the Rx or Tx SA table
138*4882a593Smuzhiyun **/
139*4882a593Smuzhiyun static
ixgbevf_ipsec_find_empty_idx(struct ixgbevf_ipsec * ipsec,bool rxtable)140*4882a593Smuzhiyun int ixgbevf_ipsec_find_empty_idx(struct ixgbevf_ipsec *ipsec, bool rxtable)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun u32 i;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun if (rxtable) {
145*4882a593Smuzhiyun if (ipsec->num_rx_sa == IXGBE_IPSEC_MAX_SA_COUNT)
146*4882a593Smuzhiyun return -ENOSPC;
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun /* search rx sa table */
149*4882a593Smuzhiyun for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
150*4882a593Smuzhiyun if (!ipsec->rx_tbl[i].used)
151*4882a593Smuzhiyun return i;
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun } else {
154*4882a593Smuzhiyun if (ipsec->num_tx_sa == IXGBE_IPSEC_MAX_SA_COUNT)
155*4882a593Smuzhiyun return -ENOSPC;
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun /* search tx sa table */
158*4882a593Smuzhiyun for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
159*4882a593Smuzhiyun if (!ipsec->tx_tbl[i].used)
160*4882a593Smuzhiyun return i;
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun return -ENOSPC;
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun /**
168*4882a593Smuzhiyun * ixgbevf_ipsec_find_rx_state - find the state that matches
169*4882a593Smuzhiyun * @ipsec: pointer to IPsec struct
170*4882a593Smuzhiyun * @daddr: inbound address to match
171*4882a593Smuzhiyun * @proto: protocol to match
172*4882a593Smuzhiyun * @spi: SPI to match
173*4882a593Smuzhiyun * @ip4: true if using an IPv4 address
174*4882a593Smuzhiyun *
175*4882a593Smuzhiyun * Returns a pointer to the matching SA state information
176*4882a593Smuzhiyun **/
177*4882a593Smuzhiyun static
ixgbevf_ipsec_find_rx_state(struct ixgbevf_ipsec * ipsec,__be32 * daddr,u8 proto,__be32 spi,bool ip4)178*4882a593Smuzhiyun struct xfrm_state *ixgbevf_ipsec_find_rx_state(struct ixgbevf_ipsec *ipsec,
179*4882a593Smuzhiyun __be32 *daddr, u8 proto,
180*4882a593Smuzhiyun __be32 spi, bool ip4)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun struct xfrm_state *ret = NULL;
183*4882a593Smuzhiyun struct rx_sa *rsa;
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun rcu_read_lock();
186*4882a593Smuzhiyun hash_for_each_possible_rcu(ipsec->rx_sa_list, rsa, hlist,
187*4882a593Smuzhiyun (__force u32)spi) {
188*4882a593Smuzhiyun if (spi == rsa->xs->id.spi &&
189*4882a593Smuzhiyun ((ip4 && *daddr == rsa->xs->id.daddr.a4) ||
190*4882a593Smuzhiyun (!ip4 && !memcmp(daddr, &rsa->xs->id.daddr.a6,
191*4882a593Smuzhiyun sizeof(rsa->xs->id.daddr.a6)))) &&
192*4882a593Smuzhiyun proto == rsa->xs->id.proto) {
193*4882a593Smuzhiyun ret = rsa->xs;
194*4882a593Smuzhiyun xfrm_state_hold(ret);
195*4882a593Smuzhiyun break;
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun rcu_read_unlock();
199*4882a593Smuzhiyun return ret;
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun /**
203*4882a593Smuzhiyun * ixgbevf_ipsec_parse_proto_keys - find the key and salt based on the protocol
204*4882a593Smuzhiyun * @xs: pointer to xfrm_state struct
205*4882a593Smuzhiyun * @mykey: pointer to key array to populate
206*4882a593Smuzhiyun * @mysalt: pointer to salt value to populate
207*4882a593Smuzhiyun *
208*4882a593Smuzhiyun * This copies the protocol keys and salt to our own data tables. The
209*4882a593Smuzhiyun * 82599 family only supports the one algorithm.
210*4882a593Smuzhiyun **/
ixgbevf_ipsec_parse_proto_keys(struct xfrm_state * xs,u32 * mykey,u32 * mysalt)211*4882a593Smuzhiyun static int ixgbevf_ipsec_parse_proto_keys(struct xfrm_state *xs,
212*4882a593Smuzhiyun u32 *mykey, u32 *mysalt)
213*4882a593Smuzhiyun {
214*4882a593Smuzhiyun struct net_device *dev = xs->xso.real_dev;
215*4882a593Smuzhiyun unsigned char *key_data;
216*4882a593Smuzhiyun char *alg_name = NULL;
217*4882a593Smuzhiyun int key_len;
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun if (!xs->aead) {
220*4882a593Smuzhiyun netdev_err(dev, "Unsupported IPsec algorithm\n");
221*4882a593Smuzhiyun return -EINVAL;
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun if (xs->aead->alg_icv_len != IXGBE_IPSEC_AUTH_BITS) {
225*4882a593Smuzhiyun netdev_err(dev, "IPsec offload requires %d bit authentication\n",
226*4882a593Smuzhiyun IXGBE_IPSEC_AUTH_BITS);
227*4882a593Smuzhiyun return -EINVAL;
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun key_data = &xs->aead->alg_key[0];
231*4882a593Smuzhiyun key_len = xs->aead->alg_key_len;
232*4882a593Smuzhiyun alg_name = xs->aead->alg_name;
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun if (strcmp(alg_name, aes_gcm_name)) {
235*4882a593Smuzhiyun netdev_err(dev, "Unsupported IPsec algorithm - please use %s\n",
236*4882a593Smuzhiyun aes_gcm_name);
237*4882a593Smuzhiyun return -EINVAL;
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun /* The key bytes come down in a big endian array of bytes, so
241*4882a593Smuzhiyun * we don't need to do any byte swapping.
242*4882a593Smuzhiyun * 160 accounts for 16 byte key and 4 byte salt
243*4882a593Smuzhiyun */
244*4882a593Smuzhiyun if (key_len > IXGBE_IPSEC_KEY_BITS) {
245*4882a593Smuzhiyun *mysalt = ((u32 *)key_data)[4];
246*4882a593Smuzhiyun } else if (key_len == IXGBE_IPSEC_KEY_BITS) {
247*4882a593Smuzhiyun *mysalt = 0;
248*4882a593Smuzhiyun } else {
249*4882a593Smuzhiyun netdev_err(dev, "IPsec hw offload only supports keys up to 128 bits with a 32 bit salt\n");
250*4882a593Smuzhiyun return -EINVAL;
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun memcpy(mykey, key_data, 16);
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun return 0;
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun /**
258*4882a593Smuzhiyun * ixgbevf_ipsec_add_sa - program device with a security association
259*4882a593Smuzhiyun * @xs: pointer to transformer state struct
260*4882a593Smuzhiyun **/
ixgbevf_ipsec_add_sa(struct xfrm_state * xs)261*4882a593Smuzhiyun static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs)
262*4882a593Smuzhiyun {
263*4882a593Smuzhiyun struct net_device *dev = xs->xso.real_dev;
264*4882a593Smuzhiyun struct ixgbevf_adapter *adapter;
265*4882a593Smuzhiyun struct ixgbevf_ipsec *ipsec;
266*4882a593Smuzhiyun u16 sa_idx;
267*4882a593Smuzhiyun int ret;
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun adapter = netdev_priv(dev);
270*4882a593Smuzhiyun ipsec = adapter->ipsec;
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
273*4882a593Smuzhiyun netdev_err(dev, "Unsupported protocol 0x%04x for IPsec offload\n",
274*4882a593Smuzhiyun xs->id.proto);
275*4882a593Smuzhiyun return -EINVAL;
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun if (xs->props.mode != XFRM_MODE_TRANSPORT) {
279*4882a593Smuzhiyun netdev_err(dev, "Unsupported mode for ipsec offload\n");
280*4882a593Smuzhiyun return -EINVAL;
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
284*4882a593Smuzhiyun struct rx_sa rsa;
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun if (xs->calg) {
287*4882a593Smuzhiyun netdev_err(dev, "Compression offload not supported\n");
288*4882a593Smuzhiyun return -EINVAL;
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun /* find the first unused index */
292*4882a593Smuzhiyun ret = ixgbevf_ipsec_find_empty_idx(ipsec, true);
293*4882a593Smuzhiyun if (ret < 0) {
294*4882a593Smuzhiyun netdev_err(dev, "No space for SA in Rx table!\n");
295*4882a593Smuzhiyun return ret;
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun sa_idx = (u16)ret;
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun memset(&rsa, 0, sizeof(rsa));
300*4882a593Smuzhiyun rsa.used = true;
301*4882a593Smuzhiyun rsa.xs = xs;
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun if (rsa.xs->id.proto & IPPROTO_ESP)
304*4882a593Smuzhiyun rsa.decrypt = xs->ealg || xs->aead;
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun /* get the key and salt */
307*4882a593Smuzhiyun ret = ixgbevf_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt);
308*4882a593Smuzhiyun if (ret) {
309*4882a593Smuzhiyun netdev_err(dev, "Failed to get key data for Rx SA table\n");
310*4882a593Smuzhiyun return ret;
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun /* get ip for rx sa table */
314*4882a593Smuzhiyun if (xs->props.family == AF_INET6)
315*4882a593Smuzhiyun memcpy(rsa.ipaddr, &xs->id.daddr.a6, 16);
316*4882a593Smuzhiyun else
317*4882a593Smuzhiyun memcpy(&rsa.ipaddr[3], &xs->id.daddr.a4, 4);
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun rsa.mode = IXGBE_RXMOD_VALID;
320*4882a593Smuzhiyun if (rsa.xs->id.proto & IPPROTO_ESP)
321*4882a593Smuzhiyun rsa.mode |= IXGBE_RXMOD_PROTO_ESP;
322*4882a593Smuzhiyun if (rsa.decrypt)
323*4882a593Smuzhiyun rsa.mode |= IXGBE_RXMOD_DECRYPT;
324*4882a593Smuzhiyun if (rsa.xs->props.family == AF_INET6)
325*4882a593Smuzhiyun rsa.mode |= IXGBE_RXMOD_IPV6;
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun ret = ixgbevf_ipsec_set_pf_sa(adapter, xs);
328*4882a593Smuzhiyun if (ret < 0)
329*4882a593Smuzhiyun return ret;
330*4882a593Smuzhiyun rsa.pfsa = ret;
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun /* the preparations worked, so save the info */
333*4882a593Smuzhiyun memcpy(&ipsec->rx_tbl[sa_idx], &rsa, sizeof(rsa));
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_RX_INDEX;
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun ipsec->num_rx_sa++;
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun /* hash the new entry for faster search in Rx path */
340*4882a593Smuzhiyun hash_add_rcu(ipsec->rx_sa_list, &ipsec->rx_tbl[sa_idx].hlist,
341*4882a593Smuzhiyun (__force u32)rsa.xs->id.spi);
342*4882a593Smuzhiyun } else {
343*4882a593Smuzhiyun struct tx_sa tsa;
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun /* find the first unused index */
346*4882a593Smuzhiyun ret = ixgbevf_ipsec_find_empty_idx(ipsec, false);
347*4882a593Smuzhiyun if (ret < 0) {
348*4882a593Smuzhiyun netdev_err(dev, "No space for SA in Tx table\n");
349*4882a593Smuzhiyun return ret;
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun sa_idx = (u16)ret;
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun memset(&tsa, 0, sizeof(tsa));
354*4882a593Smuzhiyun tsa.used = true;
355*4882a593Smuzhiyun tsa.xs = xs;
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun if (xs->id.proto & IPPROTO_ESP)
358*4882a593Smuzhiyun tsa.encrypt = xs->ealg || xs->aead;
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun ret = ixgbevf_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt);
361*4882a593Smuzhiyun if (ret) {
362*4882a593Smuzhiyun netdev_err(dev, "Failed to get key data for Tx SA table\n");
363*4882a593Smuzhiyun memset(&tsa, 0, sizeof(tsa));
364*4882a593Smuzhiyun return ret;
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun ret = ixgbevf_ipsec_set_pf_sa(adapter, xs);
368*4882a593Smuzhiyun if (ret < 0)
369*4882a593Smuzhiyun return ret;
370*4882a593Smuzhiyun tsa.pfsa = ret;
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun /* the preparations worked, so save the info */
373*4882a593Smuzhiyun memcpy(&ipsec->tx_tbl[sa_idx], &tsa, sizeof(tsa));
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_TX_INDEX;
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun ipsec->num_tx_sa++;
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun return 0;
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun /**
384*4882a593Smuzhiyun * ixgbevf_ipsec_del_sa - clear out this specific SA
385*4882a593Smuzhiyun * @xs: pointer to transformer state struct
386*4882a593Smuzhiyun **/
ixgbevf_ipsec_del_sa(struct xfrm_state * xs)387*4882a593Smuzhiyun static void ixgbevf_ipsec_del_sa(struct xfrm_state *xs)
388*4882a593Smuzhiyun {
389*4882a593Smuzhiyun struct net_device *dev = xs->xso.real_dev;
390*4882a593Smuzhiyun struct ixgbevf_adapter *adapter;
391*4882a593Smuzhiyun struct ixgbevf_ipsec *ipsec;
392*4882a593Smuzhiyun u16 sa_idx;
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun adapter = netdev_priv(dev);
395*4882a593Smuzhiyun ipsec = adapter->ipsec;
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
398*4882a593Smuzhiyun sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX;
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun if (!ipsec->rx_tbl[sa_idx].used) {
401*4882a593Smuzhiyun netdev_err(dev, "Invalid Rx SA selected sa_idx=%d offload_handle=%lu\n",
402*4882a593Smuzhiyun sa_idx, xs->xso.offload_handle);
403*4882a593Smuzhiyun return;
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun ixgbevf_ipsec_del_pf_sa(adapter, ipsec->rx_tbl[sa_idx].pfsa);
407*4882a593Smuzhiyun hash_del_rcu(&ipsec->rx_tbl[sa_idx].hlist);
408*4882a593Smuzhiyun memset(&ipsec->rx_tbl[sa_idx], 0, sizeof(struct rx_sa));
409*4882a593Smuzhiyun ipsec->num_rx_sa--;
410*4882a593Smuzhiyun } else {
411*4882a593Smuzhiyun sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun if (!ipsec->tx_tbl[sa_idx].used) {
414*4882a593Smuzhiyun netdev_err(dev, "Invalid Tx SA selected sa_idx=%d offload_handle=%lu\n",
415*4882a593Smuzhiyun sa_idx, xs->xso.offload_handle);
416*4882a593Smuzhiyun return;
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun ixgbevf_ipsec_del_pf_sa(adapter, ipsec->tx_tbl[sa_idx].pfsa);
420*4882a593Smuzhiyun memset(&ipsec->tx_tbl[sa_idx], 0, sizeof(struct tx_sa));
421*4882a593Smuzhiyun ipsec->num_tx_sa--;
422*4882a593Smuzhiyun }
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun /**
426*4882a593Smuzhiyun * ixgbevf_ipsec_offload_ok - can this packet use the xfrm hw offload
427*4882a593Smuzhiyun * @skb: current data packet
428*4882a593Smuzhiyun * @xs: pointer to transformer state struct
429*4882a593Smuzhiyun **/
ixgbevf_ipsec_offload_ok(struct sk_buff * skb,struct xfrm_state * xs)430*4882a593Smuzhiyun static bool ixgbevf_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
431*4882a593Smuzhiyun {
432*4882a593Smuzhiyun if (xs->props.family == AF_INET) {
433*4882a593Smuzhiyun /* Offload with IPv4 options is not supported yet */
434*4882a593Smuzhiyun if (ip_hdr(skb)->ihl != 5)
435*4882a593Smuzhiyun return false;
436*4882a593Smuzhiyun } else {
437*4882a593Smuzhiyun /* Offload with IPv6 extension headers is not support yet */
438*4882a593Smuzhiyun if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
439*4882a593Smuzhiyun return false;
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun return true;
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun static const struct xfrmdev_ops ixgbevf_xfrmdev_ops = {
446*4882a593Smuzhiyun .xdo_dev_state_add = ixgbevf_ipsec_add_sa,
447*4882a593Smuzhiyun .xdo_dev_state_delete = ixgbevf_ipsec_del_sa,
448*4882a593Smuzhiyun .xdo_dev_offload_ok = ixgbevf_ipsec_offload_ok,
449*4882a593Smuzhiyun };
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun /**
452*4882a593Smuzhiyun * ixgbevf_ipsec_tx - setup Tx flags for IPsec offload
453*4882a593Smuzhiyun * @tx_ring: outgoing context
454*4882a593Smuzhiyun * @first: current data packet
455*4882a593Smuzhiyun * @itd: ipsec Tx data for later use in building context descriptor
456*4882a593Smuzhiyun **/
ixgbevf_ipsec_tx(struct ixgbevf_ring * tx_ring,struct ixgbevf_tx_buffer * first,struct ixgbevf_ipsec_tx_data * itd)457*4882a593Smuzhiyun int ixgbevf_ipsec_tx(struct ixgbevf_ring *tx_ring,
458*4882a593Smuzhiyun struct ixgbevf_tx_buffer *first,
459*4882a593Smuzhiyun struct ixgbevf_ipsec_tx_data *itd)
460*4882a593Smuzhiyun {
461*4882a593Smuzhiyun struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
462*4882a593Smuzhiyun struct ixgbevf_ipsec *ipsec = adapter->ipsec;
463*4882a593Smuzhiyun struct xfrm_state *xs;
464*4882a593Smuzhiyun struct sec_path *sp;
465*4882a593Smuzhiyun struct tx_sa *tsa;
466*4882a593Smuzhiyun u16 sa_idx;
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun sp = skb_sec_path(first->skb);
469*4882a593Smuzhiyun if (unlikely(!sp->len)) {
470*4882a593Smuzhiyun netdev_err(tx_ring->netdev, "%s: no xfrm state len = %d\n",
471*4882a593Smuzhiyun __func__, sp->len);
472*4882a593Smuzhiyun return 0;
473*4882a593Smuzhiyun }
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun xs = xfrm_input_state(first->skb);
476*4882a593Smuzhiyun if (unlikely(!xs)) {
477*4882a593Smuzhiyun netdev_err(tx_ring->netdev, "%s: no xfrm_input_state() xs = %p\n",
478*4882a593Smuzhiyun __func__, xs);
479*4882a593Smuzhiyun return 0;
480*4882a593Smuzhiyun }
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
483*4882a593Smuzhiyun if (unlikely(sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT)) {
484*4882a593Smuzhiyun netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n",
485*4882a593Smuzhiyun __func__, sa_idx, xs->xso.offload_handle);
486*4882a593Smuzhiyun return 0;
487*4882a593Smuzhiyun }
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun tsa = &ipsec->tx_tbl[sa_idx];
490*4882a593Smuzhiyun if (unlikely(!tsa->used)) {
491*4882a593Smuzhiyun netdev_err(tx_ring->netdev, "%s: unused sa_idx=%d\n",
492*4882a593Smuzhiyun __func__, sa_idx);
493*4882a593Smuzhiyun return 0;
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun itd->pfsa = tsa->pfsa - IXGBE_IPSEC_BASE_TX_INDEX;
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun first->tx_flags |= IXGBE_TX_FLAGS_IPSEC | IXGBE_TX_FLAGS_CSUM;
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun if (xs->id.proto == IPPROTO_ESP) {
501*4882a593Smuzhiyun itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP |
502*4882a593Smuzhiyun IXGBE_ADVTXD_TUCMD_L4T_TCP;
503*4882a593Smuzhiyun if (first->protocol == htons(ETH_P_IP))
504*4882a593Smuzhiyun itd->flags |= IXGBE_ADVTXD_TUCMD_IPV4;
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun /* The actual trailer length is authlen (16 bytes) plus
507*4882a593Smuzhiyun * 2 bytes for the proto and the padlen values, plus
508*4882a593Smuzhiyun * padlen bytes of padding. This ends up not the same
509*4882a593Smuzhiyun * as the static value found in xs->props.trailer_len (21).
510*4882a593Smuzhiyun *
511*4882a593Smuzhiyun * ... but if we're doing GSO, don't bother as the stack
512*4882a593Smuzhiyun * doesn't add a trailer for those.
513*4882a593Smuzhiyun */
514*4882a593Smuzhiyun if (!skb_is_gso(first->skb)) {
515*4882a593Smuzhiyun /* The "correct" way to get the auth length would be
516*4882a593Smuzhiyun * to use
517*4882a593Smuzhiyun * authlen = crypto_aead_authsize(xs->data);
518*4882a593Smuzhiyun * but since we know we only have one size to worry
519*4882a593Smuzhiyun * about * we can let the compiler use the constant
520*4882a593Smuzhiyun * and save us a few CPU cycles.
521*4882a593Smuzhiyun */
522*4882a593Smuzhiyun const int authlen = IXGBE_IPSEC_AUTH_BITS / 8;
523*4882a593Smuzhiyun struct sk_buff *skb = first->skb;
524*4882a593Smuzhiyun u8 padlen;
525*4882a593Smuzhiyun int ret;
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun ret = skb_copy_bits(skb, skb->len - (authlen + 2),
528*4882a593Smuzhiyun &padlen, 1);
529*4882a593Smuzhiyun if (unlikely(ret))
530*4882a593Smuzhiyun return 0;
531*4882a593Smuzhiyun itd->trailer_len = authlen + 2 + padlen;
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun }
534*4882a593Smuzhiyun if (tsa->encrypt)
535*4882a593Smuzhiyun itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN;
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun return 1;
538*4882a593Smuzhiyun }
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun /**
541*4882a593Smuzhiyun * ixgbevf_ipsec_rx - decode IPsec bits from Rx descriptor
542*4882a593Smuzhiyun * @rx_ring: receiving ring
543*4882a593Smuzhiyun * @rx_desc: receive data descriptor
544*4882a593Smuzhiyun * @skb: current data packet
545*4882a593Smuzhiyun *
546*4882a593Smuzhiyun * Determine if there was an IPsec encapsulation noticed, and if so set up
547*4882a593Smuzhiyun * the resulting status for later in the receive stack.
548*4882a593Smuzhiyun **/
ixgbevf_ipsec_rx(struct ixgbevf_ring * rx_ring,union ixgbe_adv_rx_desc * rx_desc,struct sk_buff * skb)549*4882a593Smuzhiyun void ixgbevf_ipsec_rx(struct ixgbevf_ring *rx_ring,
550*4882a593Smuzhiyun union ixgbe_adv_rx_desc *rx_desc,
551*4882a593Smuzhiyun struct sk_buff *skb)
552*4882a593Smuzhiyun {
553*4882a593Smuzhiyun struct ixgbevf_adapter *adapter = netdev_priv(rx_ring->netdev);
554*4882a593Smuzhiyun __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
555*4882a593Smuzhiyun __le16 ipsec_pkt_types = cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH |
556*4882a593Smuzhiyun IXGBE_RXDADV_PKTTYPE_IPSEC_ESP);
557*4882a593Smuzhiyun struct ixgbevf_ipsec *ipsec = adapter->ipsec;
558*4882a593Smuzhiyun struct xfrm_offload *xo = NULL;
559*4882a593Smuzhiyun struct xfrm_state *xs = NULL;
560*4882a593Smuzhiyun struct ipv6hdr *ip6 = NULL;
561*4882a593Smuzhiyun struct iphdr *ip4 = NULL;
562*4882a593Smuzhiyun struct sec_path *sp;
563*4882a593Smuzhiyun void *daddr;
564*4882a593Smuzhiyun __be32 spi;
565*4882a593Smuzhiyun u8 *c_hdr;
566*4882a593Smuzhiyun u8 proto;
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun /* Find the IP and crypto headers in the data.
569*4882a593Smuzhiyun * We can assume no VLAN header in the way, b/c the
570*4882a593Smuzhiyun * hw won't recognize the IPsec packet and anyway the
571*4882a593Smuzhiyun * currently VLAN device doesn't support xfrm offload.
572*4882a593Smuzhiyun */
573*4882a593Smuzhiyun if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV4)) {
574*4882a593Smuzhiyun ip4 = (struct iphdr *)(skb->data + ETH_HLEN);
575*4882a593Smuzhiyun daddr = &ip4->daddr;
576*4882a593Smuzhiyun c_hdr = (u8 *)ip4 + ip4->ihl * 4;
577*4882a593Smuzhiyun } else if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV6)) {
578*4882a593Smuzhiyun ip6 = (struct ipv6hdr *)(skb->data + ETH_HLEN);
579*4882a593Smuzhiyun daddr = &ip6->daddr;
580*4882a593Smuzhiyun c_hdr = (u8 *)ip6 + sizeof(struct ipv6hdr);
581*4882a593Smuzhiyun } else {
582*4882a593Smuzhiyun return;
583*4882a593Smuzhiyun }
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun switch (pkt_info & ipsec_pkt_types) {
586*4882a593Smuzhiyun case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH):
587*4882a593Smuzhiyun spi = ((struct ip_auth_hdr *)c_hdr)->spi;
588*4882a593Smuzhiyun proto = IPPROTO_AH;
589*4882a593Smuzhiyun break;
590*4882a593Smuzhiyun case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_ESP):
591*4882a593Smuzhiyun spi = ((struct ip_esp_hdr *)c_hdr)->spi;
592*4882a593Smuzhiyun proto = IPPROTO_ESP;
593*4882a593Smuzhiyun break;
594*4882a593Smuzhiyun default:
595*4882a593Smuzhiyun return;
596*4882a593Smuzhiyun }
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun xs = ixgbevf_ipsec_find_rx_state(ipsec, daddr, proto, spi, !!ip4);
599*4882a593Smuzhiyun if (unlikely(!xs))
600*4882a593Smuzhiyun return;
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun sp = secpath_set(skb);
603*4882a593Smuzhiyun if (unlikely(!sp))
604*4882a593Smuzhiyun return;
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun sp->xvec[sp->len++] = xs;
607*4882a593Smuzhiyun sp->olen++;
608*4882a593Smuzhiyun xo = xfrm_offload(skb);
609*4882a593Smuzhiyun xo->flags = CRYPTO_DONE;
610*4882a593Smuzhiyun xo->status = CRYPTO_SUCCESS;
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun adapter->rx_ipsec++;
613*4882a593Smuzhiyun }
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun /**
616*4882a593Smuzhiyun * ixgbevf_init_ipsec_offload - initialize registers for IPsec operation
617*4882a593Smuzhiyun * @adapter: board private structure
618*4882a593Smuzhiyun **/
ixgbevf_init_ipsec_offload(struct ixgbevf_adapter * adapter)619*4882a593Smuzhiyun void ixgbevf_init_ipsec_offload(struct ixgbevf_adapter *adapter)
620*4882a593Smuzhiyun {
621*4882a593Smuzhiyun struct ixgbevf_ipsec *ipsec;
622*4882a593Smuzhiyun size_t size;
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun switch (adapter->hw.api_version) {
625*4882a593Smuzhiyun case ixgbe_mbox_api_14:
626*4882a593Smuzhiyun break;
627*4882a593Smuzhiyun default:
628*4882a593Smuzhiyun return;
629*4882a593Smuzhiyun }
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL);
632*4882a593Smuzhiyun if (!ipsec)
633*4882a593Smuzhiyun goto err1;
634*4882a593Smuzhiyun hash_init(ipsec->rx_sa_list);
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun size = sizeof(struct rx_sa) * IXGBE_IPSEC_MAX_SA_COUNT;
637*4882a593Smuzhiyun ipsec->rx_tbl = kzalloc(size, GFP_KERNEL);
638*4882a593Smuzhiyun if (!ipsec->rx_tbl)
639*4882a593Smuzhiyun goto err2;
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun size = sizeof(struct tx_sa) * IXGBE_IPSEC_MAX_SA_COUNT;
642*4882a593Smuzhiyun ipsec->tx_tbl = kzalloc(size, GFP_KERNEL);
643*4882a593Smuzhiyun if (!ipsec->tx_tbl)
644*4882a593Smuzhiyun goto err2;
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun ipsec->num_rx_sa = 0;
647*4882a593Smuzhiyun ipsec->num_tx_sa = 0;
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun adapter->ipsec = ipsec;
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun adapter->netdev->xfrmdev_ops = &ixgbevf_xfrmdev_ops;
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun #define IXGBEVF_ESP_FEATURES (NETIF_F_HW_ESP | \
654*4882a593Smuzhiyun NETIF_F_HW_ESP_TX_CSUM | \
655*4882a593Smuzhiyun NETIF_F_GSO_ESP)
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun adapter->netdev->features |= IXGBEVF_ESP_FEATURES;
658*4882a593Smuzhiyun adapter->netdev->hw_enc_features |= IXGBEVF_ESP_FEATURES;
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun return;
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun err2:
663*4882a593Smuzhiyun kfree(ipsec->rx_tbl);
664*4882a593Smuzhiyun kfree(ipsec->tx_tbl);
665*4882a593Smuzhiyun kfree(ipsec);
666*4882a593Smuzhiyun err1:
667*4882a593Smuzhiyun netdev_err(adapter->netdev, "Unable to allocate memory for SA tables");
668*4882a593Smuzhiyun }
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun /**
671*4882a593Smuzhiyun * ixgbevf_stop_ipsec_offload - tear down the IPsec offload
672*4882a593Smuzhiyun * @adapter: board private structure
673*4882a593Smuzhiyun **/
ixgbevf_stop_ipsec_offload(struct ixgbevf_adapter * adapter)674*4882a593Smuzhiyun void ixgbevf_stop_ipsec_offload(struct ixgbevf_adapter *adapter)
675*4882a593Smuzhiyun {
676*4882a593Smuzhiyun struct ixgbevf_ipsec *ipsec = adapter->ipsec;
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun adapter->ipsec = NULL;
679*4882a593Smuzhiyun if (ipsec) {
680*4882a593Smuzhiyun kfree(ipsec->rx_tbl);
681*4882a593Smuzhiyun kfree(ipsec->tx_tbl);
682*4882a593Smuzhiyun kfree(ipsec);
683*4882a593Smuzhiyun }
684*4882a593Smuzhiyun }
685