1*4882a593Smuzhiyun /* Broadcom NetXtreme-C/E network driver.
2*4882a593Smuzhiyun *
3*4882a593Smuzhiyun * Copyright (c) 2017 Broadcom Limited
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * This program is free software; you can redistribute it and/or modify
6*4882a593Smuzhiyun * it under the terms of the GNU General Public License as published by
7*4882a593Smuzhiyun * the Free Software Foundation.
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include <linux/netdevice.h>
11*4882a593Smuzhiyun #include <linux/inetdevice.h>
12*4882a593Smuzhiyun #include <linux/if_vlan.h>
13*4882a593Smuzhiyun #include <net/flow_dissector.h>
14*4882a593Smuzhiyun #include <net/pkt_cls.h>
15*4882a593Smuzhiyun #include <net/tc_act/tc_gact.h>
16*4882a593Smuzhiyun #include <net/tc_act/tc_skbedit.h>
17*4882a593Smuzhiyun #include <net/tc_act/tc_mirred.h>
18*4882a593Smuzhiyun #include <net/tc_act/tc_vlan.h>
19*4882a593Smuzhiyun #include <net/tc_act/tc_pedit.h>
20*4882a593Smuzhiyun #include <net/tc_act/tc_tunnel_key.h>
21*4882a593Smuzhiyun #include <net/vxlan.h>
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun #include "bnxt_hsi.h"
24*4882a593Smuzhiyun #include "bnxt.h"
25*4882a593Smuzhiyun #include "bnxt_sriov.h"
26*4882a593Smuzhiyun #include "bnxt_tc.h"
27*4882a593Smuzhiyun #include "bnxt_vfr.h"
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #define BNXT_FID_INVALID 0xffff
30*4882a593Smuzhiyun #define VLAN_TCI(vid, prio) ((vid) | ((prio) << VLAN_PRIO_SHIFT))
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun #define is_vlan_pcp_wildcarded(vlan_tci_mask) \
33*4882a593Smuzhiyun ((ntohs(vlan_tci_mask) & VLAN_PRIO_MASK) == 0x0000)
34*4882a593Smuzhiyun #define is_vlan_pcp_exactmatch(vlan_tci_mask) \
35*4882a593Smuzhiyun ((ntohs(vlan_tci_mask) & VLAN_PRIO_MASK) == VLAN_PRIO_MASK)
36*4882a593Smuzhiyun #define is_vlan_pcp_zero(vlan_tci) \
37*4882a593Smuzhiyun ((ntohs(vlan_tci) & VLAN_PRIO_MASK) == 0x0000)
38*4882a593Smuzhiyun #define is_vid_exactmatch(vlan_tci_mask) \
39*4882a593Smuzhiyun ((ntohs(vlan_tci_mask) & VLAN_VID_MASK) == VLAN_VID_MASK)
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun static bool is_wildcard(void *mask, int len);
42*4882a593Smuzhiyun static bool is_exactmatch(void *mask, int len);
43*4882a593Smuzhiyun /* Return the dst fid of the func for flow forwarding
44*4882a593Smuzhiyun * For PFs: src_fid is the fid of the PF
45*4882a593Smuzhiyun * For VF-reps: src_fid the fid of the VF
46*4882a593Smuzhiyun */
bnxt_flow_get_dst_fid(struct bnxt * pf_bp,struct net_device * dev)47*4882a593Smuzhiyun static u16 bnxt_flow_get_dst_fid(struct bnxt *pf_bp, struct net_device *dev)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun struct bnxt *bp;
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun /* check if dev belongs to the same switch */
52*4882a593Smuzhiyun if (!netdev_port_same_parent_id(pf_bp->dev, dev)) {
53*4882a593Smuzhiyun netdev_info(pf_bp->dev, "dev(ifindex=%d) not on same switch\n",
54*4882a593Smuzhiyun dev->ifindex);
55*4882a593Smuzhiyun return BNXT_FID_INVALID;
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun /* Is dev a VF-rep? */
59*4882a593Smuzhiyun if (bnxt_dev_is_vf_rep(dev))
60*4882a593Smuzhiyun return bnxt_vf_rep_get_fid(dev);
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun bp = netdev_priv(dev);
63*4882a593Smuzhiyun return bp->pf.fw_fid;
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun
bnxt_tc_parse_redir(struct bnxt * bp,struct bnxt_tc_actions * actions,const struct flow_action_entry * act)66*4882a593Smuzhiyun static int bnxt_tc_parse_redir(struct bnxt *bp,
67*4882a593Smuzhiyun struct bnxt_tc_actions *actions,
68*4882a593Smuzhiyun const struct flow_action_entry *act)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun struct net_device *dev = act->dev;
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun if (!dev) {
73*4882a593Smuzhiyun netdev_info(bp->dev, "no dev in mirred action\n");
74*4882a593Smuzhiyun return -EINVAL;
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun actions->flags |= BNXT_TC_ACTION_FLAG_FWD;
78*4882a593Smuzhiyun actions->dst_dev = dev;
79*4882a593Smuzhiyun return 0;
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun
bnxt_tc_parse_vlan(struct bnxt * bp,struct bnxt_tc_actions * actions,const struct flow_action_entry * act)82*4882a593Smuzhiyun static int bnxt_tc_parse_vlan(struct bnxt *bp,
83*4882a593Smuzhiyun struct bnxt_tc_actions *actions,
84*4882a593Smuzhiyun const struct flow_action_entry *act)
85*4882a593Smuzhiyun {
86*4882a593Smuzhiyun switch (act->id) {
87*4882a593Smuzhiyun case FLOW_ACTION_VLAN_POP:
88*4882a593Smuzhiyun actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN;
89*4882a593Smuzhiyun break;
90*4882a593Smuzhiyun case FLOW_ACTION_VLAN_PUSH:
91*4882a593Smuzhiyun actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN;
92*4882a593Smuzhiyun actions->push_vlan_tci = htons(act->vlan.vid);
93*4882a593Smuzhiyun actions->push_vlan_tpid = act->vlan.proto;
94*4882a593Smuzhiyun break;
95*4882a593Smuzhiyun default:
96*4882a593Smuzhiyun return -EOPNOTSUPP;
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun return 0;
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun
bnxt_tc_parse_tunnel_set(struct bnxt * bp,struct bnxt_tc_actions * actions,const struct flow_action_entry * act)101*4882a593Smuzhiyun static int bnxt_tc_parse_tunnel_set(struct bnxt *bp,
102*4882a593Smuzhiyun struct bnxt_tc_actions *actions,
103*4882a593Smuzhiyun const struct flow_action_entry *act)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun const struct ip_tunnel_info *tun_info = act->tunnel;
106*4882a593Smuzhiyun const struct ip_tunnel_key *tun_key = &tun_info->key;
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun if (ip_tunnel_info_af(tun_info) != AF_INET) {
109*4882a593Smuzhiyun netdev_info(bp->dev, "only IPv4 tunnel-encap is supported\n");
110*4882a593Smuzhiyun return -EOPNOTSUPP;
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun actions->tun_encap_key = *tun_key;
114*4882a593Smuzhiyun actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP;
115*4882a593Smuzhiyun return 0;
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun /* Key & Mask from the stack comes unaligned in multiple iterations of 4 bytes
119*4882a593Smuzhiyun * each(u32).
120*4882a593Smuzhiyun * This routine consolidates such multiple unaligned values into one
121*4882a593Smuzhiyun * field each for Key & Mask (for src and dst macs separately)
122*4882a593Smuzhiyun * For example,
123*4882a593Smuzhiyun * Mask/Key Offset Iteration
124*4882a593Smuzhiyun * ========== ====== =========
125*4882a593Smuzhiyun * dst mac 0xffffffff 0 1
126*4882a593Smuzhiyun * dst mac 0x0000ffff 4 2
127*4882a593Smuzhiyun *
128*4882a593Smuzhiyun * src mac 0xffff0000 4 1
129*4882a593Smuzhiyun * src mac 0xffffffff 8 2
130*4882a593Smuzhiyun *
131*4882a593Smuzhiyun * The above combination coming from the stack will be consolidated as
132*4882a593Smuzhiyun * Mask/Key
133*4882a593Smuzhiyun * ==============
134*4882a593Smuzhiyun * src mac: 0xffffffffffff
135*4882a593Smuzhiyun * dst mac: 0xffffffffffff
136*4882a593Smuzhiyun */
bnxt_set_l2_key_mask(u32 part_key,u32 part_mask,u8 * actual_key,u8 * actual_mask)137*4882a593Smuzhiyun static void bnxt_set_l2_key_mask(u32 part_key, u32 part_mask,
138*4882a593Smuzhiyun u8 *actual_key, u8 *actual_mask)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun u32 key = get_unaligned((u32 *)actual_key);
141*4882a593Smuzhiyun u32 mask = get_unaligned((u32 *)actual_mask);
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun part_key &= part_mask;
144*4882a593Smuzhiyun part_key |= key & ~part_mask;
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun put_unaligned(mask | part_mask, (u32 *)actual_mask);
147*4882a593Smuzhiyun put_unaligned(part_key, (u32 *)actual_key);
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun static int
bnxt_fill_l2_rewrite_fields(struct bnxt_tc_actions * actions,u16 * eth_addr,u16 * eth_addr_mask)151*4882a593Smuzhiyun bnxt_fill_l2_rewrite_fields(struct bnxt_tc_actions *actions,
152*4882a593Smuzhiyun u16 *eth_addr, u16 *eth_addr_mask)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun u16 *p;
155*4882a593Smuzhiyun int j;
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun if (unlikely(bnxt_eth_addr_key_mask_invalid(eth_addr, eth_addr_mask)))
158*4882a593Smuzhiyun return -EINVAL;
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun if (!is_wildcard(ð_addr_mask[0], ETH_ALEN)) {
161*4882a593Smuzhiyun if (!is_exactmatch(ð_addr_mask[0], ETH_ALEN))
162*4882a593Smuzhiyun return -EINVAL;
163*4882a593Smuzhiyun /* FW expects dmac to be in u16 array format */
164*4882a593Smuzhiyun p = eth_addr;
165*4882a593Smuzhiyun for (j = 0; j < 3; j++)
166*4882a593Smuzhiyun actions->l2_rewrite_dmac[j] = cpu_to_be16(*(p + j));
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun if (!is_wildcard(ð_addr_mask[ETH_ALEN / 2], ETH_ALEN)) {
170*4882a593Smuzhiyun if (!is_exactmatch(ð_addr_mask[ETH_ALEN / 2], ETH_ALEN))
171*4882a593Smuzhiyun return -EINVAL;
172*4882a593Smuzhiyun /* FW expects smac to be in u16 array format */
173*4882a593Smuzhiyun p = ð_addr[ETH_ALEN / 2];
174*4882a593Smuzhiyun for (j = 0; j < 3; j++)
175*4882a593Smuzhiyun actions->l2_rewrite_smac[j] = cpu_to_be16(*(p + j));
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun return 0;
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun static int
bnxt_tc_parse_pedit(struct bnxt * bp,struct bnxt_tc_actions * actions,struct flow_action_entry * act,int act_idx,u8 * eth_addr,u8 * eth_addr_mask)182*4882a593Smuzhiyun bnxt_tc_parse_pedit(struct bnxt *bp, struct bnxt_tc_actions *actions,
183*4882a593Smuzhiyun struct flow_action_entry *act, int act_idx, u8 *eth_addr,
184*4882a593Smuzhiyun u8 *eth_addr_mask)
185*4882a593Smuzhiyun {
186*4882a593Smuzhiyun size_t offset_of_ip6_daddr = offsetof(struct ipv6hdr, daddr);
187*4882a593Smuzhiyun size_t offset_of_ip6_saddr = offsetof(struct ipv6hdr, saddr);
188*4882a593Smuzhiyun u32 mask, val, offset, idx;
189*4882a593Smuzhiyun u8 htype;
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun offset = act->mangle.offset;
192*4882a593Smuzhiyun htype = act->mangle.htype;
193*4882a593Smuzhiyun mask = ~act->mangle.mask;
194*4882a593Smuzhiyun val = act->mangle.val;
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun switch (htype) {
197*4882a593Smuzhiyun case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
198*4882a593Smuzhiyun if (offset > PEDIT_OFFSET_SMAC_LAST_4_BYTES) {
199*4882a593Smuzhiyun netdev_err(bp->dev,
200*4882a593Smuzhiyun "%s: eth_hdr: Invalid pedit field\n",
201*4882a593Smuzhiyun __func__);
202*4882a593Smuzhiyun return -EINVAL;
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun actions->flags |= BNXT_TC_ACTION_FLAG_L2_REWRITE;
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun bnxt_set_l2_key_mask(val, mask, ð_addr[offset],
207*4882a593Smuzhiyun ð_addr_mask[offset]);
208*4882a593Smuzhiyun break;
209*4882a593Smuzhiyun case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
210*4882a593Smuzhiyun actions->flags |= BNXT_TC_ACTION_FLAG_NAT_XLATE;
211*4882a593Smuzhiyun actions->nat.l3_is_ipv4 = true;
212*4882a593Smuzhiyun if (offset == offsetof(struct iphdr, saddr)) {
213*4882a593Smuzhiyun actions->nat.src_xlate = true;
214*4882a593Smuzhiyun actions->nat.l3.ipv4.saddr.s_addr = htonl(val);
215*4882a593Smuzhiyun } else if (offset == offsetof(struct iphdr, daddr)) {
216*4882a593Smuzhiyun actions->nat.src_xlate = false;
217*4882a593Smuzhiyun actions->nat.l3.ipv4.daddr.s_addr = htonl(val);
218*4882a593Smuzhiyun } else {
219*4882a593Smuzhiyun netdev_err(bp->dev,
220*4882a593Smuzhiyun "%s: IPv4_hdr: Invalid pedit field\n",
221*4882a593Smuzhiyun __func__);
222*4882a593Smuzhiyun return -EINVAL;
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun netdev_dbg(bp->dev, "nat.src_xlate = %d src IP: %pI4 dst ip : %pI4\n",
226*4882a593Smuzhiyun actions->nat.src_xlate, &actions->nat.l3.ipv4.saddr,
227*4882a593Smuzhiyun &actions->nat.l3.ipv4.daddr);
228*4882a593Smuzhiyun break;
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
231*4882a593Smuzhiyun actions->flags |= BNXT_TC_ACTION_FLAG_NAT_XLATE;
232*4882a593Smuzhiyun actions->nat.l3_is_ipv4 = false;
233*4882a593Smuzhiyun if (offset >= offsetof(struct ipv6hdr, saddr) &&
234*4882a593Smuzhiyun offset < offset_of_ip6_daddr) {
235*4882a593Smuzhiyun /* 16 byte IPv6 address comes in 4 iterations of
236*4882a593Smuzhiyun * 4byte chunks each
237*4882a593Smuzhiyun */
238*4882a593Smuzhiyun actions->nat.src_xlate = true;
239*4882a593Smuzhiyun idx = (offset - offset_of_ip6_saddr) / 4;
240*4882a593Smuzhiyun /* First 4bytes will be copied to idx 0 and so on */
241*4882a593Smuzhiyun actions->nat.l3.ipv6.saddr.s6_addr32[idx] = htonl(val);
242*4882a593Smuzhiyun } else if (offset >= offset_of_ip6_daddr &&
243*4882a593Smuzhiyun offset < offset_of_ip6_daddr + 16) {
244*4882a593Smuzhiyun actions->nat.src_xlate = false;
245*4882a593Smuzhiyun idx = (offset - offset_of_ip6_daddr) / 4;
246*4882a593Smuzhiyun actions->nat.l3.ipv6.saddr.s6_addr32[idx] = htonl(val);
247*4882a593Smuzhiyun } else {
248*4882a593Smuzhiyun netdev_err(bp->dev,
249*4882a593Smuzhiyun "%s: IPv6_hdr: Invalid pedit field\n",
250*4882a593Smuzhiyun __func__);
251*4882a593Smuzhiyun return -EINVAL;
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun break;
254*4882a593Smuzhiyun case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
255*4882a593Smuzhiyun case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
256*4882a593Smuzhiyun /* HW does not support L4 rewrite alone without L3
257*4882a593Smuzhiyun * rewrite
258*4882a593Smuzhiyun */
259*4882a593Smuzhiyun if (!(actions->flags & BNXT_TC_ACTION_FLAG_NAT_XLATE)) {
260*4882a593Smuzhiyun netdev_err(bp->dev,
261*4882a593Smuzhiyun "Need to specify L3 rewrite as well\n");
262*4882a593Smuzhiyun return -EINVAL;
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun if (actions->nat.src_xlate)
265*4882a593Smuzhiyun actions->nat.l4.ports.sport = htons(val);
266*4882a593Smuzhiyun else
267*4882a593Smuzhiyun actions->nat.l4.ports.dport = htons(val);
268*4882a593Smuzhiyun netdev_dbg(bp->dev, "actions->nat.sport = %d dport = %d\n",
269*4882a593Smuzhiyun actions->nat.l4.ports.sport,
270*4882a593Smuzhiyun actions->nat.l4.ports.dport);
271*4882a593Smuzhiyun break;
272*4882a593Smuzhiyun default:
273*4882a593Smuzhiyun netdev_err(bp->dev, "%s: Unsupported pedit hdr type\n",
274*4882a593Smuzhiyun __func__);
275*4882a593Smuzhiyun return -EINVAL;
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun return 0;
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun
bnxt_tc_parse_actions(struct bnxt * bp,struct bnxt_tc_actions * actions,struct flow_action * flow_action,struct netlink_ext_ack * extack)280*4882a593Smuzhiyun static int bnxt_tc_parse_actions(struct bnxt *bp,
281*4882a593Smuzhiyun struct bnxt_tc_actions *actions,
282*4882a593Smuzhiyun struct flow_action *flow_action,
283*4882a593Smuzhiyun struct netlink_ext_ack *extack)
284*4882a593Smuzhiyun {
285*4882a593Smuzhiyun /* Used to store the L2 rewrite mask for dmac (6 bytes) followed by
286*4882a593Smuzhiyun * smac (6 bytes) if rewrite of both is specified, otherwise either
287*4882a593Smuzhiyun * dmac or smac
288*4882a593Smuzhiyun */
289*4882a593Smuzhiyun u16 eth_addr_mask[ETH_ALEN] = { 0 };
290*4882a593Smuzhiyun /* Used to store the L2 rewrite key for dmac (6 bytes) followed by
291*4882a593Smuzhiyun * smac (6 bytes) if rewrite of both is specified, otherwise either
292*4882a593Smuzhiyun * dmac or smac
293*4882a593Smuzhiyun */
294*4882a593Smuzhiyun u16 eth_addr[ETH_ALEN] = { 0 };
295*4882a593Smuzhiyun struct flow_action_entry *act;
296*4882a593Smuzhiyun int i, rc;
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun if (!flow_action_has_entries(flow_action)) {
299*4882a593Smuzhiyun netdev_info(bp->dev, "no actions\n");
300*4882a593Smuzhiyun return -EINVAL;
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun if (!flow_action_basic_hw_stats_check(flow_action, extack))
304*4882a593Smuzhiyun return -EOPNOTSUPP;
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun flow_action_for_each(i, act, flow_action) {
307*4882a593Smuzhiyun switch (act->id) {
308*4882a593Smuzhiyun case FLOW_ACTION_DROP:
309*4882a593Smuzhiyun actions->flags |= BNXT_TC_ACTION_FLAG_DROP;
310*4882a593Smuzhiyun return 0; /* don't bother with other actions */
311*4882a593Smuzhiyun case FLOW_ACTION_REDIRECT:
312*4882a593Smuzhiyun rc = bnxt_tc_parse_redir(bp, actions, act);
313*4882a593Smuzhiyun if (rc)
314*4882a593Smuzhiyun return rc;
315*4882a593Smuzhiyun break;
316*4882a593Smuzhiyun case FLOW_ACTION_VLAN_POP:
317*4882a593Smuzhiyun case FLOW_ACTION_VLAN_PUSH:
318*4882a593Smuzhiyun case FLOW_ACTION_VLAN_MANGLE:
319*4882a593Smuzhiyun rc = bnxt_tc_parse_vlan(bp, actions, act);
320*4882a593Smuzhiyun if (rc)
321*4882a593Smuzhiyun return rc;
322*4882a593Smuzhiyun break;
323*4882a593Smuzhiyun case FLOW_ACTION_TUNNEL_ENCAP:
324*4882a593Smuzhiyun rc = bnxt_tc_parse_tunnel_set(bp, actions, act);
325*4882a593Smuzhiyun if (rc)
326*4882a593Smuzhiyun return rc;
327*4882a593Smuzhiyun break;
328*4882a593Smuzhiyun case FLOW_ACTION_TUNNEL_DECAP:
329*4882a593Smuzhiyun actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_DECAP;
330*4882a593Smuzhiyun break;
331*4882a593Smuzhiyun /* Packet edit: L2 rewrite, NAT, NAPT */
332*4882a593Smuzhiyun case FLOW_ACTION_MANGLE:
333*4882a593Smuzhiyun rc = bnxt_tc_parse_pedit(bp, actions, act, i,
334*4882a593Smuzhiyun (u8 *)eth_addr,
335*4882a593Smuzhiyun (u8 *)eth_addr_mask);
336*4882a593Smuzhiyun if (rc)
337*4882a593Smuzhiyun return rc;
338*4882a593Smuzhiyun break;
339*4882a593Smuzhiyun default:
340*4882a593Smuzhiyun break;
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun if (actions->flags & BNXT_TC_ACTION_FLAG_L2_REWRITE) {
345*4882a593Smuzhiyun rc = bnxt_fill_l2_rewrite_fields(actions, eth_addr,
346*4882a593Smuzhiyun eth_addr_mask);
347*4882a593Smuzhiyun if (rc)
348*4882a593Smuzhiyun return rc;
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) {
352*4882a593Smuzhiyun if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) {
353*4882a593Smuzhiyun /* dst_fid is PF's fid */
354*4882a593Smuzhiyun actions->dst_fid = bp->pf.fw_fid;
355*4882a593Smuzhiyun } else {
356*4882a593Smuzhiyun /* find the FID from dst_dev */
357*4882a593Smuzhiyun actions->dst_fid =
358*4882a593Smuzhiyun bnxt_flow_get_dst_fid(bp, actions->dst_dev);
359*4882a593Smuzhiyun if (actions->dst_fid == BNXT_FID_INVALID)
360*4882a593Smuzhiyun return -EINVAL;
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun return 0;
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun
bnxt_tc_parse_flow(struct bnxt * bp,struct flow_cls_offload * tc_flow_cmd,struct bnxt_tc_flow * flow)367*4882a593Smuzhiyun static int bnxt_tc_parse_flow(struct bnxt *bp,
368*4882a593Smuzhiyun struct flow_cls_offload *tc_flow_cmd,
369*4882a593Smuzhiyun struct bnxt_tc_flow *flow)
370*4882a593Smuzhiyun {
371*4882a593Smuzhiyun struct flow_rule *rule = flow_cls_offload_flow_rule(tc_flow_cmd);
372*4882a593Smuzhiyun struct flow_dissector *dissector = rule->match.dissector;
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun /* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */
375*4882a593Smuzhiyun if ((dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) == 0 ||
376*4882a593Smuzhiyun (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_BASIC)) == 0) {
377*4882a593Smuzhiyun netdev_info(bp->dev, "cannot form TC key: used_keys = 0x%x\n",
378*4882a593Smuzhiyun dissector->used_keys);
379*4882a593Smuzhiyun return -EOPNOTSUPP;
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
383*4882a593Smuzhiyun struct flow_match_basic match;
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun flow_rule_match_basic(rule, &match);
386*4882a593Smuzhiyun flow->l2_key.ether_type = match.key->n_proto;
387*4882a593Smuzhiyun flow->l2_mask.ether_type = match.mask->n_proto;
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun if (match.key->n_proto == htons(ETH_P_IP) ||
390*4882a593Smuzhiyun match.key->n_proto == htons(ETH_P_IPV6)) {
391*4882a593Smuzhiyun flow->l4_key.ip_proto = match.key->ip_proto;
392*4882a593Smuzhiyun flow->l4_mask.ip_proto = match.mask->ip_proto;
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun }
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
397*4882a593Smuzhiyun struct flow_match_eth_addrs match;
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun flow_rule_match_eth_addrs(rule, &match);
400*4882a593Smuzhiyun flow->flags |= BNXT_TC_FLOW_FLAGS_ETH_ADDRS;
401*4882a593Smuzhiyun ether_addr_copy(flow->l2_key.dmac, match.key->dst);
402*4882a593Smuzhiyun ether_addr_copy(flow->l2_mask.dmac, match.mask->dst);
403*4882a593Smuzhiyun ether_addr_copy(flow->l2_key.smac, match.key->src);
404*4882a593Smuzhiyun ether_addr_copy(flow->l2_mask.smac, match.mask->src);
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
408*4882a593Smuzhiyun struct flow_match_vlan match;
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun flow_rule_match_vlan(rule, &match);
411*4882a593Smuzhiyun flow->l2_key.inner_vlan_tci =
412*4882a593Smuzhiyun cpu_to_be16(VLAN_TCI(match.key->vlan_id,
413*4882a593Smuzhiyun match.key->vlan_priority));
414*4882a593Smuzhiyun flow->l2_mask.inner_vlan_tci =
415*4882a593Smuzhiyun cpu_to_be16((VLAN_TCI(match.mask->vlan_id,
416*4882a593Smuzhiyun match.mask->vlan_priority)));
417*4882a593Smuzhiyun flow->l2_key.inner_vlan_tpid = htons(ETH_P_8021Q);
418*4882a593Smuzhiyun flow->l2_mask.inner_vlan_tpid = htons(0xffff);
419*4882a593Smuzhiyun flow->l2_key.num_vlans = 1;
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
423*4882a593Smuzhiyun struct flow_match_ipv4_addrs match;
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun flow_rule_match_ipv4_addrs(rule, &match);
426*4882a593Smuzhiyun flow->flags |= BNXT_TC_FLOW_FLAGS_IPV4_ADDRS;
427*4882a593Smuzhiyun flow->l3_key.ipv4.daddr.s_addr = match.key->dst;
428*4882a593Smuzhiyun flow->l3_mask.ipv4.daddr.s_addr = match.mask->dst;
429*4882a593Smuzhiyun flow->l3_key.ipv4.saddr.s_addr = match.key->src;
430*4882a593Smuzhiyun flow->l3_mask.ipv4.saddr.s_addr = match.mask->src;
431*4882a593Smuzhiyun } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
432*4882a593Smuzhiyun struct flow_match_ipv6_addrs match;
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun flow_rule_match_ipv6_addrs(rule, &match);
435*4882a593Smuzhiyun flow->flags |= BNXT_TC_FLOW_FLAGS_IPV6_ADDRS;
436*4882a593Smuzhiyun flow->l3_key.ipv6.daddr = match.key->dst;
437*4882a593Smuzhiyun flow->l3_mask.ipv6.daddr = match.mask->dst;
438*4882a593Smuzhiyun flow->l3_key.ipv6.saddr = match.key->src;
439*4882a593Smuzhiyun flow->l3_mask.ipv6.saddr = match.mask->src;
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
443*4882a593Smuzhiyun struct flow_match_ports match;
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun flow_rule_match_ports(rule, &match);
446*4882a593Smuzhiyun flow->flags |= BNXT_TC_FLOW_FLAGS_PORTS;
447*4882a593Smuzhiyun flow->l4_key.ports.dport = match.key->dst;
448*4882a593Smuzhiyun flow->l4_mask.ports.dport = match.mask->dst;
449*4882a593Smuzhiyun flow->l4_key.ports.sport = match.key->src;
450*4882a593Smuzhiyun flow->l4_mask.ports.sport = match.mask->src;
451*4882a593Smuzhiyun }
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP)) {
454*4882a593Smuzhiyun struct flow_match_icmp match;
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun flow_rule_match_icmp(rule, &match);
457*4882a593Smuzhiyun flow->flags |= BNXT_TC_FLOW_FLAGS_ICMP;
458*4882a593Smuzhiyun flow->l4_key.icmp.type = match.key->type;
459*4882a593Smuzhiyun flow->l4_key.icmp.code = match.key->code;
460*4882a593Smuzhiyun flow->l4_mask.icmp.type = match.mask->type;
461*4882a593Smuzhiyun flow->l4_mask.icmp.code = match.mask->code;
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
465*4882a593Smuzhiyun struct flow_match_ipv4_addrs match;
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun flow_rule_match_enc_ipv4_addrs(rule, &match);
468*4882a593Smuzhiyun flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS;
469*4882a593Smuzhiyun flow->tun_key.u.ipv4.dst = match.key->dst;
470*4882a593Smuzhiyun flow->tun_mask.u.ipv4.dst = match.mask->dst;
471*4882a593Smuzhiyun flow->tun_key.u.ipv4.src = match.key->src;
472*4882a593Smuzhiyun flow->tun_mask.u.ipv4.src = match.mask->src;
473*4882a593Smuzhiyun } else if (flow_rule_match_key(rule,
474*4882a593Smuzhiyun FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
475*4882a593Smuzhiyun return -EOPNOTSUPP;
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
479*4882a593Smuzhiyun struct flow_match_enc_keyid match;
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun flow_rule_match_enc_keyid(rule, &match);
482*4882a593Smuzhiyun flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_ID;
483*4882a593Smuzhiyun flow->tun_key.tun_id = key32_to_tunnel_id(match.key->keyid);
484*4882a593Smuzhiyun flow->tun_mask.tun_id = key32_to_tunnel_id(match.mask->keyid);
485*4882a593Smuzhiyun }
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
488*4882a593Smuzhiyun struct flow_match_ports match;
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun flow_rule_match_enc_ports(rule, &match);
491*4882a593Smuzhiyun flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_PORTS;
492*4882a593Smuzhiyun flow->tun_key.tp_dst = match.key->dst;
493*4882a593Smuzhiyun flow->tun_mask.tp_dst = match.mask->dst;
494*4882a593Smuzhiyun flow->tun_key.tp_src = match.key->src;
495*4882a593Smuzhiyun flow->tun_mask.tp_src = match.mask->src;
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun return bnxt_tc_parse_actions(bp, &flow->actions, &rule->action,
499*4882a593Smuzhiyun tc_flow_cmd->common.extack);
500*4882a593Smuzhiyun }
501*4882a593Smuzhiyun
bnxt_hwrm_cfa_flow_free(struct bnxt * bp,struct bnxt_tc_flow_node * flow_node)502*4882a593Smuzhiyun static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp,
503*4882a593Smuzhiyun struct bnxt_tc_flow_node *flow_node)
504*4882a593Smuzhiyun {
505*4882a593Smuzhiyun struct hwrm_cfa_flow_free_input req = { 0 };
506*4882a593Smuzhiyun int rc;
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_FREE, -1, -1);
509*4882a593Smuzhiyun if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
510*4882a593Smuzhiyun req.ext_flow_handle = flow_node->ext_flow_handle;
511*4882a593Smuzhiyun else
512*4882a593Smuzhiyun req.flow_handle = flow_node->flow_handle;
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
515*4882a593Smuzhiyun if (rc)
516*4882a593Smuzhiyun netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc);
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun return rc;
519*4882a593Smuzhiyun }
520*4882a593Smuzhiyun
ipv6_mask_len(struct in6_addr * mask)521*4882a593Smuzhiyun static int ipv6_mask_len(struct in6_addr *mask)
522*4882a593Smuzhiyun {
523*4882a593Smuzhiyun int mask_len = 0, i;
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun for (i = 0; i < 4; i++)
526*4882a593Smuzhiyun mask_len += inet_mask_len(mask->s6_addr32[i]);
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun return mask_len;
529*4882a593Smuzhiyun }
530*4882a593Smuzhiyun
is_wildcard(void * mask,int len)531*4882a593Smuzhiyun static bool is_wildcard(void *mask, int len)
532*4882a593Smuzhiyun {
533*4882a593Smuzhiyun const u8 *p = mask;
534*4882a593Smuzhiyun int i;
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun for (i = 0; i < len; i++) {
537*4882a593Smuzhiyun if (p[i] != 0)
538*4882a593Smuzhiyun return false;
539*4882a593Smuzhiyun }
540*4882a593Smuzhiyun return true;
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun
is_exactmatch(void * mask,int len)543*4882a593Smuzhiyun static bool is_exactmatch(void *mask, int len)
544*4882a593Smuzhiyun {
545*4882a593Smuzhiyun const u8 *p = mask;
546*4882a593Smuzhiyun int i;
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun for (i = 0; i < len; i++)
549*4882a593Smuzhiyun if (p[i] != 0xff)
550*4882a593Smuzhiyun return false;
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun return true;
553*4882a593Smuzhiyun }
554*4882a593Smuzhiyun
is_vlan_tci_allowed(__be16 vlan_tci_mask,__be16 vlan_tci)555*4882a593Smuzhiyun static bool is_vlan_tci_allowed(__be16 vlan_tci_mask,
556*4882a593Smuzhiyun __be16 vlan_tci)
557*4882a593Smuzhiyun {
558*4882a593Smuzhiyun /* VLAN priority must be either exactly zero or fully wildcarded and
559*4882a593Smuzhiyun * VLAN id must be exact match.
560*4882a593Smuzhiyun */
561*4882a593Smuzhiyun if (is_vid_exactmatch(vlan_tci_mask) &&
562*4882a593Smuzhiyun ((is_vlan_pcp_exactmatch(vlan_tci_mask) &&
563*4882a593Smuzhiyun is_vlan_pcp_zero(vlan_tci)) ||
564*4882a593Smuzhiyun is_vlan_pcp_wildcarded(vlan_tci_mask)))
565*4882a593Smuzhiyun return true;
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun return false;
568*4882a593Smuzhiyun }
569*4882a593Smuzhiyun
bits_set(void * key,int len)570*4882a593Smuzhiyun static bool bits_set(void *key, int len)
571*4882a593Smuzhiyun {
572*4882a593Smuzhiyun const u8 *p = key;
573*4882a593Smuzhiyun int i;
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun for (i = 0; i < len; i++)
576*4882a593Smuzhiyun if (p[i] != 0)
577*4882a593Smuzhiyun return true;
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun return false;
580*4882a593Smuzhiyun }
581*4882a593Smuzhiyun
bnxt_hwrm_cfa_flow_alloc(struct bnxt * bp,struct bnxt_tc_flow * flow,__le16 ref_flow_handle,__le32 tunnel_handle,struct bnxt_tc_flow_node * flow_node)582*4882a593Smuzhiyun static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
583*4882a593Smuzhiyun __le16 ref_flow_handle,
584*4882a593Smuzhiyun __le32 tunnel_handle,
585*4882a593Smuzhiyun struct bnxt_tc_flow_node *flow_node)
586*4882a593Smuzhiyun {
587*4882a593Smuzhiyun struct bnxt_tc_actions *actions = &flow->actions;
588*4882a593Smuzhiyun struct bnxt_tc_l3_key *l3_mask = &flow->l3_mask;
589*4882a593Smuzhiyun struct bnxt_tc_l3_key *l3_key = &flow->l3_key;
590*4882a593Smuzhiyun struct hwrm_cfa_flow_alloc_input req = { 0 };
591*4882a593Smuzhiyun struct hwrm_cfa_flow_alloc_output *resp;
592*4882a593Smuzhiyun u16 flow_flags = 0, action_flags = 0;
593*4882a593Smuzhiyun int rc;
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_ALLOC, -1, -1);
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun req.src_fid = cpu_to_le16(flow->src_fid);
598*4882a593Smuzhiyun req.ref_flow_handle = ref_flow_handle;
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun if (actions->flags & BNXT_TC_ACTION_FLAG_L2_REWRITE) {
601*4882a593Smuzhiyun memcpy(req.l2_rewrite_dmac, actions->l2_rewrite_dmac,
602*4882a593Smuzhiyun ETH_ALEN);
603*4882a593Smuzhiyun memcpy(req.l2_rewrite_smac, actions->l2_rewrite_smac,
604*4882a593Smuzhiyun ETH_ALEN);
605*4882a593Smuzhiyun action_flags |=
606*4882a593Smuzhiyun CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE;
607*4882a593Smuzhiyun }
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun if (actions->flags & BNXT_TC_ACTION_FLAG_NAT_XLATE) {
610*4882a593Smuzhiyun if (actions->nat.l3_is_ipv4) {
611*4882a593Smuzhiyun action_flags |=
612*4882a593Smuzhiyun CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS;
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun if (actions->nat.src_xlate) {
615*4882a593Smuzhiyun action_flags |=
616*4882a593Smuzhiyun CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC;
617*4882a593Smuzhiyun /* L3 source rewrite */
618*4882a593Smuzhiyun req.nat_ip_address[0] =
619*4882a593Smuzhiyun actions->nat.l3.ipv4.saddr.s_addr;
620*4882a593Smuzhiyun /* L4 source port */
621*4882a593Smuzhiyun if (actions->nat.l4.ports.sport)
622*4882a593Smuzhiyun req.nat_port =
623*4882a593Smuzhiyun actions->nat.l4.ports.sport;
624*4882a593Smuzhiyun } else {
625*4882a593Smuzhiyun action_flags |=
626*4882a593Smuzhiyun CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST;
627*4882a593Smuzhiyun /* L3 destination rewrite */
628*4882a593Smuzhiyun req.nat_ip_address[0] =
629*4882a593Smuzhiyun actions->nat.l3.ipv4.daddr.s_addr;
630*4882a593Smuzhiyun /* L4 destination port */
631*4882a593Smuzhiyun if (actions->nat.l4.ports.dport)
632*4882a593Smuzhiyun req.nat_port =
633*4882a593Smuzhiyun actions->nat.l4.ports.dport;
634*4882a593Smuzhiyun }
635*4882a593Smuzhiyun netdev_dbg(bp->dev,
636*4882a593Smuzhiyun "req.nat_ip_address: %pI4 src_xlate: %d req.nat_port: %x\n",
637*4882a593Smuzhiyun req.nat_ip_address, actions->nat.src_xlate,
638*4882a593Smuzhiyun req.nat_port);
639*4882a593Smuzhiyun } else {
640*4882a593Smuzhiyun if (actions->nat.src_xlate) {
641*4882a593Smuzhiyun action_flags |=
642*4882a593Smuzhiyun CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC;
643*4882a593Smuzhiyun /* L3 source rewrite */
644*4882a593Smuzhiyun memcpy(req.nat_ip_address,
645*4882a593Smuzhiyun actions->nat.l3.ipv6.saddr.s6_addr32,
646*4882a593Smuzhiyun sizeof(req.nat_ip_address));
647*4882a593Smuzhiyun /* L4 source port */
648*4882a593Smuzhiyun if (actions->nat.l4.ports.sport)
649*4882a593Smuzhiyun req.nat_port =
650*4882a593Smuzhiyun actions->nat.l4.ports.sport;
651*4882a593Smuzhiyun } else {
652*4882a593Smuzhiyun action_flags |=
653*4882a593Smuzhiyun CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST;
654*4882a593Smuzhiyun /* L3 destination rewrite */
655*4882a593Smuzhiyun memcpy(req.nat_ip_address,
656*4882a593Smuzhiyun actions->nat.l3.ipv6.daddr.s6_addr32,
657*4882a593Smuzhiyun sizeof(req.nat_ip_address));
658*4882a593Smuzhiyun /* L4 destination port */
659*4882a593Smuzhiyun if (actions->nat.l4.ports.dport)
660*4882a593Smuzhiyun req.nat_port =
661*4882a593Smuzhiyun actions->nat.l4.ports.dport;
662*4882a593Smuzhiyun }
663*4882a593Smuzhiyun netdev_dbg(bp->dev,
664*4882a593Smuzhiyun "req.nat_ip_address: %pI6 src_xlate: %d req.nat_port: %x\n",
665*4882a593Smuzhiyun req.nat_ip_address, actions->nat.src_xlate,
666*4882a593Smuzhiyun req.nat_port);
667*4882a593Smuzhiyun }
668*4882a593Smuzhiyun }
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP ||
671*4882a593Smuzhiyun actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) {
672*4882a593Smuzhiyun req.tunnel_handle = tunnel_handle;
673*4882a593Smuzhiyun flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL;
674*4882a593Smuzhiyun action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL;
675*4882a593Smuzhiyun }
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun req.ethertype = flow->l2_key.ether_type;
678*4882a593Smuzhiyun req.ip_proto = flow->l4_key.ip_proto;
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun if (flow->flags & BNXT_TC_FLOW_FLAGS_ETH_ADDRS) {
681*4882a593Smuzhiyun memcpy(req.dmac, flow->l2_key.dmac, ETH_ALEN);
682*4882a593Smuzhiyun memcpy(req.smac, flow->l2_key.smac, ETH_ALEN);
683*4882a593Smuzhiyun }
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun if (flow->l2_key.num_vlans > 0) {
686*4882a593Smuzhiyun flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE;
687*4882a593Smuzhiyun /* FW expects the inner_vlan_tci value to be set
688*4882a593Smuzhiyun * in outer_vlan_tci when num_vlans is 1 (which is
689*4882a593Smuzhiyun * always the case in TC.)
690*4882a593Smuzhiyun */
691*4882a593Smuzhiyun req.outer_vlan_tci = flow->l2_key.inner_vlan_tci;
692*4882a593Smuzhiyun }
693*4882a593Smuzhiyun
694*4882a593Smuzhiyun /* If all IP and L4 fields are wildcarded then this is an L2 flow */
695*4882a593Smuzhiyun if (is_wildcard(l3_mask, sizeof(*l3_mask)) &&
696*4882a593Smuzhiyun is_wildcard(&flow->l4_mask, sizeof(flow->l4_mask))) {
697*4882a593Smuzhiyun flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2;
698*4882a593Smuzhiyun } else {
699*4882a593Smuzhiyun flow_flags |= flow->l2_key.ether_type == htons(ETH_P_IP) ?
700*4882a593Smuzhiyun CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 :
701*4882a593Smuzhiyun CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6;
702*4882a593Smuzhiyun
703*4882a593Smuzhiyun if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV4_ADDRS) {
704*4882a593Smuzhiyun req.ip_dst[0] = l3_key->ipv4.daddr.s_addr;
705*4882a593Smuzhiyun req.ip_dst_mask_len =
706*4882a593Smuzhiyun inet_mask_len(l3_mask->ipv4.daddr.s_addr);
707*4882a593Smuzhiyun req.ip_src[0] = l3_key->ipv4.saddr.s_addr;
708*4882a593Smuzhiyun req.ip_src_mask_len =
709*4882a593Smuzhiyun inet_mask_len(l3_mask->ipv4.saddr.s_addr);
710*4882a593Smuzhiyun } else if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV6_ADDRS) {
711*4882a593Smuzhiyun memcpy(req.ip_dst, l3_key->ipv6.daddr.s6_addr32,
712*4882a593Smuzhiyun sizeof(req.ip_dst));
713*4882a593Smuzhiyun req.ip_dst_mask_len =
714*4882a593Smuzhiyun ipv6_mask_len(&l3_mask->ipv6.daddr);
715*4882a593Smuzhiyun memcpy(req.ip_src, l3_key->ipv6.saddr.s6_addr32,
716*4882a593Smuzhiyun sizeof(req.ip_src));
717*4882a593Smuzhiyun req.ip_src_mask_len =
718*4882a593Smuzhiyun ipv6_mask_len(&l3_mask->ipv6.saddr);
719*4882a593Smuzhiyun }
720*4882a593Smuzhiyun }
721*4882a593Smuzhiyun
722*4882a593Smuzhiyun if (flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) {
723*4882a593Smuzhiyun req.l4_src_port = flow->l4_key.ports.sport;
724*4882a593Smuzhiyun req.l4_src_port_mask = flow->l4_mask.ports.sport;
725*4882a593Smuzhiyun req.l4_dst_port = flow->l4_key.ports.dport;
726*4882a593Smuzhiyun req.l4_dst_port_mask = flow->l4_mask.ports.dport;
727*4882a593Smuzhiyun } else if (flow->flags & BNXT_TC_FLOW_FLAGS_ICMP) {
728*4882a593Smuzhiyun /* l4 ports serve as type/code when ip_proto is ICMP */
729*4882a593Smuzhiyun req.l4_src_port = htons(flow->l4_key.icmp.type);
730*4882a593Smuzhiyun req.l4_src_port_mask = htons(flow->l4_mask.icmp.type);
731*4882a593Smuzhiyun req.l4_dst_port = htons(flow->l4_key.icmp.code);
732*4882a593Smuzhiyun req.l4_dst_port_mask = htons(flow->l4_mask.icmp.code);
733*4882a593Smuzhiyun }
734*4882a593Smuzhiyun req.flags = cpu_to_le16(flow_flags);
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun if (actions->flags & BNXT_TC_ACTION_FLAG_DROP) {
737*4882a593Smuzhiyun action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP;
738*4882a593Smuzhiyun } else {
739*4882a593Smuzhiyun if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) {
740*4882a593Smuzhiyun action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD;
741*4882a593Smuzhiyun req.dst_fid = cpu_to_le16(actions->dst_fid);
742*4882a593Smuzhiyun }
743*4882a593Smuzhiyun if (actions->flags & BNXT_TC_ACTION_FLAG_PUSH_VLAN) {
744*4882a593Smuzhiyun action_flags |=
745*4882a593Smuzhiyun CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE;
746*4882a593Smuzhiyun req.l2_rewrite_vlan_tpid = actions->push_vlan_tpid;
747*4882a593Smuzhiyun req.l2_rewrite_vlan_tci = actions->push_vlan_tci;
748*4882a593Smuzhiyun memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN);
749*4882a593Smuzhiyun memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN);
750*4882a593Smuzhiyun }
751*4882a593Smuzhiyun if (actions->flags & BNXT_TC_ACTION_FLAG_POP_VLAN) {
752*4882a593Smuzhiyun action_flags |=
753*4882a593Smuzhiyun CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE;
754*4882a593Smuzhiyun /* Rewrite config with tpid = 0 implies vlan pop */
755*4882a593Smuzhiyun req.l2_rewrite_vlan_tpid = 0;
756*4882a593Smuzhiyun memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN);
757*4882a593Smuzhiyun memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN);
758*4882a593Smuzhiyun }
759*4882a593Smuzhiyun }
760*4882a593Smuzhiyun req.action_flags = cpu_to_le16(action_flags);
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun mutex_lock(&bp->hwrm_cmd_lock);
763*4882a593Smuzhiyun rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
764*4882a593Smuzhiyun if (!rc) {
765*4882a593Smuzhiyun resp = bnxt_get_hwrm_resp_addr(bp, &req);
766*4882a593Smuzhiyun /* CFA_FLOW_ALLOC response interpretation:
767*4882a593Smuzhiyun * fw with fw with
768*4882a593Smuzhiyun * 16-bit 64-bit
769*4882a593Smuzhiyun * flow handle flow handle
770*4882a593Smuzhiyun * =========== ===========
771*4882a593Smuzhiyun * flow_handle flow handle flow context id
772*4882a593Smuzhiyun * ext_flow_handle INVALID flow handle
773*4882a593Smuzhiyun * flow_id INVALID flow counter id
774*4882a593Smuzhiyun */
775*4882a593Smuzhiyun flow_node->flow_handle = resp->flow_handle;
776*4882a593Smuzhiyun if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) {
777*4882a593Smuzhiyun flow_node->ext_flow_handle = resp->ext_flow_handle;
778*4882a593Smuzhiyun flow_node->flow_id = resp->flow_id;
779*4882a593Smuzhiyun }
780*4882a593Smuzhiyun }
781*4882a593Smuzhiyun mutex_unlock(&bp->hwrm_cmd_lock);
782*4882a593Smuzhiyun return rc;
783*4882a593Smuzhiyun }
784*4882a593Smuzhiyun
hwrm_cfa_decap_filter_alloc(struct bnxt * bp,struct bnxt_tc_flow * flow,struct bnxt_tc_l2_key * l2_info,__le32 ref_decap_handle,__le32 * decap_filter_handle)785*4882a593Smuzhiyun static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,
786*4882a593Smuzhiyun struct bnxt_tc_flow *flow,
787*4882a593Smuzhiyun struct bnxt_tc_l2_key *l2_info,
788*4882a593Smuzhiyun __le32 ref_decap_handle,
789*4882a593Smuzhiyun __le32 *decap_filter_handle)
790*4882a593Smuzhiyun {
791*4882a593Smuzhiyun struct hwrm_cfa_decap_filter_alloc_input req = { 0 };
792*4882a593Smuzhiyun struct hwrm_cfa_decap_filter_alloc_output *resp;
793*4882a593Smuzhiyun struct ip_tunnel_key *tun_key = &flow->tun_key;
794*4882a593Smuzhiyun u32 enables = 0;
795*4882a593Smuzhiyun int rc;
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_DECAP_FILTER_ALLOC, -1, -1);
798*4882a593Smuzhiyun
799*4882a593Smuzhiyun req.flags = cpu_to_le32(CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL);
800*4882a593Smuzhiyun enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE |
801*4882a593Smuzhiyun CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL;
802*4882a593Smuzhiyun req.tunnel_type = CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
803*4882a593Smuzhiyun req.ip_protocol = CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP;
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ID) {
806*4882a593Smuzhiyun enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID;
807*4882a593Smuzhiyun /* tunnel_id is wrongly defined in hsi defn. as __le32 */
808*4882a593Smuzhiyun req.tunnel_id = tunnel_id_to_key32(tun_key->tun_id);
809*4882a593Smuzhiyun }
810*4882a593Smuzhiyun
811*4882a593Smuzhiyun if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS) {
812*4882a593Smuzhiyun enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR;
813*4882a593Smuzhiyun ether_addr_copy(req.dst_macaddr, l2_info->dmac);
814*4882a593Smuzhiyun }
815*4882a593Smuzhiyun if (l2_info->num_vlans) {
816*4882a593Smuzhiyun enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID;
817*4882a593Smuzhiyun req.t_ivlan_vid = l2_info->inner_vlan_tci;
818*4882a593Smuzhiyun }
819*4882a593Smuzhiyun
820*4882a593Smuzhiyun enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE;
821*4882a593Smuzhiyun req.ethertype = htons(ETH_P_IP);
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS) {
824*4882a593Smuzhiyun enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |
825*4882a593Smuzhiyun CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |
826*4882a593Smuzhiyun CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE;
827*4882a593Smuzhiyun req.ip_addr_type = CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
828*4882a593Smuzhiyun req.dst_ipaddr[0] = tun_key->u.ipv4.dst;
829*4882a593Smuzhiyun req.src_ipaddr[0] = tun_key->u.ipv4.src;
830*4882a593Smuzhiyun }
831*4882a593Smuzhiyun
832*4882a593Smuzhiyun if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_PORTS) {
833*4882a593Smuzhiyun enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT;
834*4882a593Smuzhiyun req.dst_port = tun_key->tp_dst;
835*4882a593Smuzhiyun }
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun /* Eventhough the decap_handle returned by hwrm_cfa_decap_filter_alloc
838*4882a593Smuzhiyun * is defined as __le32, l2_ctxt_ref_id is defined in HSI as __le16.
839*4882a593Smuzhiyun */
840*4882a593Smuzhiyun req.l2_ctxt_ref_id = (__force __le16)ref_decap_handle;
841*4882a593Smuzhiyun req.enables = cpu_to_le32(enables);
842*4882a593Smuzhiyun
843*4882a593Smuzhiyun mutex_lock(&bp->hwrm_cmd_lock);
844*4882a593Smuzhiyun rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
845*4882a593Smuzhiyun if (!rc) {
846*4882a593Smuzhiyun resp = bnxt_get_hwrm_resp_addr(bp, &req);
847*4882a593Smuzhiyun *decap_filter_handle = resp->decap_filter_id;
848*4882a593Smuzhiyun } else {
849*4882a593Smuzhiyun netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc);
850*4882a593Smuzhiyun }
851*4882a593Smuzhiyun mutex_unlock(&bp->hwrm_cmd_lock);
852*4882a593Smuzhiyun
853*4882a593Smuzhiyun return rc;
854*4882a593Smuzhiyun }
855*4882a593Smuzhiyun
hwrm_cfa_decap_filter_free(struct bnxt * bp,__le32 decap_filter_handle)856*4882a593Smuzhiyun static int hwrm_cfa_decap_filter_free(struct bnxt *bp,
857*4882a593Smuzhiyun __le32 decap_filter_handle)
858*4882a593Smuzhiyun {
859*4882a593Smuzhiyun struct hwrm_cfa_decap_filter_free_input req = { 0 };
860*4882a593Smuzhiyun int rc;
861*4882a593Smuzhiyun
862*4882a593Smuzhiyun bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_DECAP_FILTER_FREE, -1, -1);
863*4882a593Smuzhiyun req.decap_filter_id = decap_filter_handle;
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
866*4882a593Smuzhiyun if (rc)
867*4882a593Smuzhiyun netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc);
868*4882a593Smuzhiyun
869*4882a593Smuzhiyun return rc;
870*4882a593Smuzhiyun }
871*4882a593Smuzhiyun
hwrm_cfa_encap_record_alloc(struct bnxt * bp,struct ip_tunnel_key * encap_key,struct bnxt_tc_l2_key * l2_info,__le32 * encap_record_handle)872*4882a593Smuzhiyun static int hwrm_cfa_encap_record_alloc(struct bnxt *bp,
873*4882a593Smuzhiyun struct ip_tunnel_key *encap_key,
874*4882a593Smuzhiyun struct bnxt_tc_l2_key *l2_info,
875*4882a593Smuzhiyun __le32 *encap_record_handle)
876*4882a593Smuzhiyun {
877*4882a593Smuzhiyun struct hwrm_cfa_encap_record_alloc_input req = { 0 };
878*4882a593Smuzhiyun struct hwrm_cfa_encap_record_alloc_output *resp;
879*4882a593Smuzhiyun struct hwrm_cfa_encap_data_vxlan *encap =
880*4882a593Smuzhiyun (struct hwrm_cfa_encap_data_vxlan *)&req.encap_data;
881*4882a593Smuzhiyun struct hwrm_vxlan_ipv4_hdr *encap_ipv4 =
882*4882a593Smuzhiyun (struct hwrm_vxlan_ipv4_hdr *)encap->l3;
883*4882a593Smuzhiyun int rc;
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_ALLOC, -1, -1);
886*4882a593Smuzhiyun
887*4882a593Smuzhiyun req.encap_type = CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN;
888*4882a593Smuzhiyun
889*4882a593Smuzhiyun ether_addr_copy(encap->dst_mac_addr, l2_info->dmac);
890*4882a593Smuzhiyun ether_addr_copy(encap->src_mac_addr, l2_info->smac);
891*4882a593Smuzhiyun if (l2_info->num_vlans) {
892*4882a593Smuzhiyun encap->num_vlan_tags = l2_info->num_vlans;
893*4882a593Smuzhiyun encap->ovlan_tci = l2_info->inner_vlan_tci;
894*4882a593Smuzhiyun encap->ovlan_tpid = l2_info->inner_vlan_tpid;
895*4882a593Smuzhiyun }
896*4882a593Smuzhiyun
897*4882a593Smuzhiyun encap_ipv4->ver_hlen = 4 << VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT;
898*4882a593Smuzhiyun encap_ipv4->ver_hlen |= 5 << VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT;
899*4882a593Smuzhiyun encap_ipv4->ttl = encap_key->ttl;
900*4882a593Smuzhiyun
901*4882a593Smuzhiyun encap_ipv4->dest_ip_addr = encap_key->u.ipv4.dst;
902*4882a593Smuzhiyun encap_ipv4->src_ip_addr = encap_key->u.ipv4.src;
903*4882a593Smuzhiyun encap_ipv4->protocol = IPPROTO_UDP;
904*4882a593Smuzhiyun
905*4882a593Smuzhiyun encap->dst_port = encap_key->tp_dst;
906*4882a593Smuzhiyun encap->vni = tunnel_id_to_key32(encap_key->tun_id);
907*4882a593Smuzhiyun
908*4882a593Smuzhiyun mutex_lock(&bp->hwrm_cmd_lock);
909*4882a593Smuzhiyun rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
910*4882a593Smuzhiyun if (!rc) {
911*4882a593Smuzhiyun resp = bnxt_get_hwrm_resp_addr(bp, &req);
912*4882a593Smuzhiyun *encap_record_handle = resp->encap_record_id;
913*4882a593Smuzhiyun } else {
914*4882a593Smuzhiyun netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc);
915*4882a593Smuzhiyun }
916*4882a593Smuzhiyun mutex_unlock(&bp->hwrm_cmd_lock);
917*4882a593Smuzhiyun
918*4882a593Smuzhiyun return rc;
919*4882a593Smuzhiyun }
920*4882a593Smuzhiyun
hwrm_cfa_encap_record_free(struct bnxt * bp,__le32 encap_record_handle)921*4882a593Smuzhiyun static int hwrm_cfa_encap_record_free(struct bnxt *bp,
922*4882a593Smuzhiyun __le32 encap_record_handle)
923*4882a593Smuzhiyun {
924*4882a593Smuzhiyun struct hwrm_cfa_encap_record_free_input req = { 0 };
925*4882a593Smuzhiyun int rc;
926*4882a593Smuzhiyun
927*4882a593Smuzhiyun bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_FREE, -1, -1);
928*4882a593Smuzhiyun req.encap_record_id = encap_record_handle;
929*4882a593Smuzhiyun
930*4882a593Smuzhiyun rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
931*4882a593Smuzhiyun if (rc)
932*4882a593Smuzhiyun netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc);
933*4882a593Smuzhiyun
934*4882a593Smuzhiyun return rc;
935*4882a593Smuzhiyun }
936*4882a593Smuzhiyun
bnxt_tc_put_l2_node(struct bnxt * bp,struct bnxt_tc_flow_node * flow_node)937*4882a593Smuzhiyun static int bnxt_tc_put_l2_node(struct bnxt *bp,
938*4882a593Smuzhiyun struct bnxt_tc_flow_node *flow_node)
939*4882a593Smuzhiyun {
940*4882a593Smuzhiyun struct bnxt_tc_l2_node *l2_node = flow_node->l2_node;
941*4882a593Smuzhiyun struct bnxt_tc_info *tc_info = bp->tc_info;
942*4882a593Smuzhiyun int rc;
943*4882a593Smuzhiyun
944*4882a593Smuzhiyun /* remove flow_node from the L2 shared flow list */
945*4882a593Smuzhiyun list_del(&flow_node->l2_list_node);
946*4882a593Smuzhiyun if (--l2_node->refcount == 0) {
947*4882a593Smuzhiyun rc = rhashtable_remove_fast(&tc_info->l2_table, &l2_node->node,
948*4882a593Smuzhiyun tc_info->l2_ht_params);
949*4882a593Smuzhiyun if (rc)
950*4882a593Smuzhiyun netdev_err(bp->dev,
951*4882a593Smuzhiyun "Error: %s: rhashtable_remove_fast: %d\n",
952*4882a593Smuzhiyun __func__, rc);
953*4882a593Smuzhiyun kfree_rcu(l2_node, rcu);
954*4882a593Smuzhiyun }
955*4882a593Smuzhiyun return 0;
956*4882a593Smuzhiyun }
957*4882a593Smuzhiyun
958*4882a593Smuzhiyun static struct bnxt_tc_l2_node *
bnxt_tc_get_l2_node(struct bnxt * bp,struct rhashtable * l2_table,struct rhashtable_params ht_params,struct bnxt_tc_l2_key * l2_key)959*4882a593Smuzhiyun bnxt_tc_get_l2_node(struct bnxt *bp, struct rhashtable *l2_table,
960*4882a593Smuzhiyun struct rhashtable_params ht_params,
961*4882a593Smuzhiyun struct bnxt_tc_l2_key *l2_key)
962*4882a593Smuzhiyun {
963*4882a593Smuzhiyun struct bnxt_tc_l2_node *l2_node;
964*4882a593Smuzhiyun int rc;
965*4882a593Smuzhiyun
966*4882a593Smuzhiyun l2_node = rhashtable_lookup_fast(l2_table, l2_key, ht_params);
967*4882a593Smuzhiyun if (!l2_node) {
968*4882a593Smuzhiyun l2_node = kzalloc(sizeof(*l2_node), GFP_KERNEL);
969*4882a593Smuzhiyun if (!l2_node) {
970*4882a593Smuzhiyun rc = -ENOMEM;
971*4882a593Smuzhiyun return NULL;
972*4882a593Smuzhiyun }
973*4882a593Smuzhiyun
974*4882a593Smuzhiyun l2_node->key = *l2_key;
975*4882a593Smuzhiyun rc = rhashtable_insert_fast(l2_table, &l2_node->node,
976*4882a593Smuzhiyun ht_params);
977*4882a593Smuzhiyun if (rc) {
978*4882a593Smuzhiyun kfree_rcu(l2_node, rcu);
979*4882a593Smuzhiyun netdev_err(bp->dev,
980*4882a593Smuzhiyun "Error: %s: rhashtable_insert_fast: %d\n",
981*4882a593Smuzhiyun __func__, rc);
982*4882a593Smuzhiyun return NULL;
983*4882a593Smuzhiyun }
984*4882a593Smuzhiyun INIT_LIST_HEAD(&l2_node->common_l2_flows);
985*4882a593Smuzhiyun }
986*4882a593Smuzhiyun return l2_node;
987*4882a593Smuzhiyun }
988*4882a593Smuzhiyun
989*4882a593Smuzhiyun /* Get the ref_flow_handle for a flow by checking if there are any other
990*4882a593Smuzhiyun * flows that share the same L2 key as this flow.
991*4882a593Smuzhiyun */
992*4882a593Smuzhiyun static int
bnxt_tc_get_ref_flow_handle(struct bnxt * bp,struct bnxt_tc_flow * flow,struct bnxt_tc_flow_node * flow_node,__le16 * ref_flow_handle)993*4882a593Smuzhiyun bnxt_tc_get_ref_flow_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
994*4882a593Smuzhiyun struct bnxt_tc_flow_node *flow_node,
995*4882a593Smuzhiyun __le16 *ref_flow_handle)
996*4882a593Smuzhiyun {
997*4882a593Smuzhiyun struct bnxt_tc_info *tc_info = bp->tc_info;
998*4882a593Smuzhiyun struct bnxt_tc_flow_node *ref_flow_node;
999*4882a593Smuzhiyun struct bnxt_tc_l2_node *l2_node;
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyun l2_node = bnxt_tc_get_l2_node(bp, &tc_info->l2_table,
1002*4882a593Smuzhiyun tc_info->l2_ht_params,
1003*4882a593Smuzhiyun &flow->l2_key);
1004*4882a593Smuzhiyun if (!l2_node)
1005*4882a593Smuzhiyun return -1;
1006*4882a593Smuzhiyun
1007*4882a593Smuzhiyun /* If any other flow is using this l2_node, use it's flow_handle
1008*4882a593Smuzhiyun * as the ref_flow_handle
1009*4882a593Smuzhiyun */
1010*4882a593Smuzhiyun if (l2_node->refcount > 0) {
1011*4882a593Smuzhiyun ref_flow_node = list_first_entry(&l2_node->common_l2_flows,
1012*4882a593Smuzhiyun struct bnxt_tc_flow_node,
1013*4882a593Smuzhiyun l2_list_node);
1014*4882a593Smuzhiyun *ref_flow_handle = ref_flow_node->flow_handle;
1015*4882a593Smuzhiyun } else {
1016*4882a593Smuzhiyun *ref_flow_handle = cpu_to_le16(0xffff);
1017*4882a593Smuzhiyun }
1018*4882a593Smuzhiyun
1019*4882a593Smuzhiyun /* Insert the l2_node into the flow_node so that subsequent flows
1020*4882a593Smuzhiyun * with a matching l2 key can use the flow_handle of this flow
1021*4882a593Smuzhiyun * as their ref_flow_handle
1022*4882a593Smuzhiyun */
1023*4882a593Smuzhiyun flow_node->l2_node = l2_node;
1024*4882a593Smuzhiyun list_add(&flow_node->l2_list_node, &l2_node->common_l2_flows);
1025*4882a593Smuzhiyun l2_node->refcount++;
1026*4882a593Smuzhiyun return 0;
1027*4882a593Smuzhiyun }
1028*4882a593Smuzhiyun
1029*4882a593Smuzhiyun /* After the flow parsing is done, this routine is used for checking
1030*4882a593Smuzhiyun * if there are any aspects of the flow that prevent it from being
1031*4882a593Smuzhiyun * offloaded.
1032*4882a593Smuzhiyun */
bnxt_tc_can_offload(struct bnxt * bp,struct bnxt_tc_flow * flow)1033*4882a593Smuzhiyun static bool bnxt_tc_can_offload(struct bnxt *bp, struct bnxt_tc_flow *flow)
1034*4882a593Smuzhiyun {
1035*4882a593Smuzhiyun /* If L4 ports are specified then ip_proto must be TCP or UDP */
1036*4882a593Smuzhiyun if ((flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) &&
1037*4882a593Smuzhiyun (flow->l4_key.ip_proto != IPPROTO_TCP &&
1038*4882a593Smuzhiyun flow->l4_key.ip_proto != IPPROTO_UDP)) {
1039*4882a593Smuzhiyun netdev_info(bp->dev, "Cannot offload non-TCP/UDP (%d) ports\n",
1040*4882a593Smuzhiyun flow->l4_key.ip_proto);
1041*4882a593Smuzhiyun return false;
1042*4882a593Smuzhiyun }
1043*4882a593Smuzhiyun
1044*4882a593Smuzhiyun /* Currently source/dest MAC cannot be partial wildcard */
1045*4882a593Smuzhiyun if (bits_set(&flow->l2_key.smac, sizeof(flow->l2_key.smac)) &&
1046*4882a593Smuzhiyun !is_exactmatch(flow->l2_mask.smac, sizeof(flow->l2_mask.smac))) {
1047*4882a593Smuzhiyun netdev_info(bp->dev, "Wildcard match unsupported for Source MAC\n");
1048*4882a593Smuzhiyun return false;
1049*4882a593Smuzhiyun }
1050*4882a593Smuzhiyun if (bits_set(&flow->l2_key.dmac, sizeof(flow->l2_key.dmac)) &&
1051*4882a593Smuzhiyun !is_exactmatch(&flow->l2_mask.dmac, sizeof(flow->l2_mask.dmac))) {
1052*4882a593Smuzhiyun netdev_info(bp->dev, "Wildcard match unsupported for Dest MAC\n");
1053*4882a593Smuzhiyun return false;
1054*4882a593Smuzhiyun }
1055*4882a593Smuzhiyun
1056*4882a593Smuzhiyun /* Currently VLAN fields cannot be partial wildcard */
1057*4882a593Smuzhiyun if (bits_set(&flow->l2_key.inner_vlan_tci,
1058*4882a593Smuzhiyun sizeof(flow->l2_key.inner_vlan_tci)) &&
1059*4882a593Smuzhiyun !is_vlan_tci_allowed(flow->l2_mask.inner_vlan_tci,
1060*4882a593Smuzhiyun flow->l2_key.inner_vlan_tci)) {
1061*4882a593Smuzhiyun netdev_info(bp->dev, "Unsupported VLAN TCI\n");
1062*4882a593Smuzhiyun return false;
1063*4882a593Smuzhiyun }
1064*4882a593Smuzhiyun if (bits_set(&flow->l2_key.inner_vlan_tpid,
1065*4882a593Smuzhiyun sizeof(flow->l2_key.inner_vlan_tpid)) &&
1066*4882a593Smuzhiyun !is_exactmatch(&flow->l2_mask.inner_vlan_tpid,
1067*4882a593Smuzhiyun sizeof(flow->l2_mask.inner_vlan_tpid))) {
1068*4882a593Smuzhiyun netdev_info(bp->dev, "Wildcard match unsupported for VLAN TPID\n");
1069*4882a593Smuzhiyun return false;
1070*4882a593Smuzhiyun }
1071*4882a593Smuzhiyun
1072*4882a593Smuzhiyun /* Currently Ethertype must be set */
1073*4882a593Smuzhiyun if (!is_exactmatch(&flow->l2_mask.ether_type,
1074*4882a593Smuzhiyun sizeof(flow->l2_mask.ether_type))) {
1075*4882a593Smuzhiyun netdev_info(bp->dev, "Wildcard match unsupported for Ethertype\n");
1076*4882a593Smuzhiyun return false;
1077*4882a593Smuzhiyun }
1078*4882a593Smuzhiyun
1079*4882a593Smuzhiyun return true;
1080*4882a593Smuzhiyun }
1081*4882a593Smuzhiyun
1082*4882a593Smuzhiyun /* Returns the final refcount of the node on success
1083*4882a593Smuzhiyun * or a -ve error code on failure
1084*4882a593Smuzhiyun */
bnxt_tc_put_tunnel_node(struct bnxt * bp,struct rhashtable * tunnel_table,struct rhashtable_params * ht_params,struct bnxt_tc_tunnel_node * tunnel_node)1085*4882a593Smuzhiyun static int bnxt_tc_put_tunnel_node(struct bnxt *bp,
1086*4882a593Smuzhiyun struct rhashtable *tunnel_table,
1087*4882a593Smuzhiyun struct rhashtable_params *ht_params,
1088*4882a593Smuzhiyun struct bnxt_tc_tunnel_node *tunnel_node)
1089*4882a593Smuzhiyun {
1090*4882a593Smuzhiyun int rc;
1091*4882a593Smuzhiyun
1092*4882a593Smuzhiyun if (--tunnel_node->refcount == 0) {
1093*4882a593Smuzhiyun rc = rhashtable_remove_fast(tunnel_table, &tunnel_node->node,
1094*4882a593Smuzhiyun *ht_params);
1095*4882a593Smuzhiyun if (rc) {
1096*4882a593Smuzhiyun netdev_err(bp->dev, "rhashtable_remove_fast rc=%d\n", rc);
1097*4882a593Smuzhiyun rc = -1;
1098*4882a593Smuzhiyun }
1099*4882a593Smuzhiyun kfree_rcu(tunnel_node, rcu);
1100*4882a593Smuzhiyun return rc;
1101*4882a593Smuzhiyun } else {
1102*4882a593Smuzhiyun return tunnel_node->refcount;
1103*4882a593Smuzhiyun }
1104*4882a593Smuzhiyun }
1105*4882a593Smuzhiyun
1106*4882a593Smuzhiyun /* Get (or add) either encap or decap tunnel node from/to the supplied
1107*4882a593Smuzhiyun * hash table.
1108*4882a593Smuzhiyun */
1109*4882a593Smuzhiyun static struct bnxt_tc_tunnel_node *
bnxt_tc_get_tunnel_node(struct bnxt * bp,struct rhashtable * tunnel_table,struct rhashtable_params * ht_params,struct ip_tunnel_key * tun_key)1110*4882a593Smuzhiyun bnxt_tc_get_tunnel_node(struct bnxt *bp, struct rhashtable *tunnel_table,
1111*4882a593Smuzhiyun struct rhashtable_params *ht_params,
1112*4882a593Smuzhiyun struct ip_tunnel_key *tun_key)
1113*4882a593Smuzhiyun {
1114*4882a593Smuzhiyun struct bnxt_tc_tunnel_node *tunnel_node;
1115*4882a593Smuzhiyun int rc;
1116*4882a593Smuzhiyun
1117*4882a593Smuzhiyun tunnel_node = rhashtable_lookup_fast(tunnel_table, tun_key, *ht_params);
1118*4882a593Smuzhiyun if (!tunnel_node) {
1119*4882a593Smuzhiyun tunnel_node = kzalloc(sizeof(*tunnel_node), GFP_KERNEL);
1120*4882a593Smuzhiyun if (!tunnel_node) {
1121*4882a593Smuzhiyun rc = -ENOMEM;
1122*4882a593Smuzhiyun goto err;
1123*4882a593Smuzhiyun }
1124*4882a593Smuzhiyun
1125*4882a593Smuzhiyun tunnel_node->key = *tun_key;
1126*4882a593Smuzhiyun tunnel_node->tunnel_handle = INVALID_TUNNEL_HANDLE;
1127*4882a593Smuzhiyun rc = rhashtable_insert_fast(tunnel_table, &tunnel_node->node,
1128*4882a593Smuzhiyun *ht_params);
1129*4882a593Smuzhiyun if (rc) {
1130*4882a593Smuzhiyun kfree_rcu(tunnel_node, rcu);
1131*4882a593Smuzhiyun goto err;
1132*4882a593Smuzhiyun }
1133*4882a593Smuzhiyun }
1134*4882a593Smuzhiyun tunnel_node->refcount++;
1135*4882a593Smuzhiyun return tunnel_node;
1136*4882a593Smuzhiyun err:
1137*4882a593Smuzhiyun netdev_info(bp->dev, "error rc=%d\n", rc);
1138*4882a593Smuzhiyun return NULL;
1139*4882a593Smuzhiyun }
1140*4882a593Smuzhiyun
bnxt_tc_get_ref_decap_handle(struct bnxt * bp,struct bnxt_tc_flow * flow,struct bnxt_tc_l2_key * l2_key,struct bnxt_tc_flow_node * flow_node,__le32 * ref_decap_handle)1141*4882a593Smuzhiyun static int bnxt_tc_get_ref_decap_handle(struct bnxt *bp,
1142*4882a593Smuzhiyun struct bnxt_tc_flow *flow,
1143*4882a593Smuzhiyun struct bnxt_tc_l2_key *l2_key,
1144*4882a593Smuzhiyun struct bnxt_tc_flow_node *flow_node,
1145*4882a593Smuzhiyun __le32 *ref_decap_handle)
1146*4882a593Smuzhiyun {
1147*4882a593Smuzhiyun struct bnxt_tc_info *tc_info = bp->tc_info;
1148*4882a593Smuzhiyun struct bnxt_tc_flow_node *ref_flow_node;
1149*4882a593Smuzhiyun struct bnxt_tc_l2_node *decap_l2_node;
1150*4882a593Smuzhiyun
1151*4882a593Smuzhiyun decap_l2_node = bnxt_tc_get_l2_node(bp, &tc_info->decap_l2_table,
1152*4882a593Smuzhiyun tc_info->decap_l2_ht_params,
1153*4882a593Smuzhiyun l2_key);
1154*4882a593Smuzhiyun if (!decap_l2_node)
1155*4882a593Smuzhiyun return -1;
1156*4882a593Smuzhiyun
1157*4882a593Smuzhiyun /* If any other flow is using this decap_l2_node, use it's decap_handle
1158*4882a593Smuzhiyun * as the ref_decap_handle
1159*4882a593Smuzhiyun */
1160*4882a593Smuzhiyun if (decap_l2_node->refcount > 0) {
1161*4882a593Smuzhiyun ref_flow_node =
1162*4882a593Smuzhiyun list_first_entry(&decap_l2_node->common_l2_flows,
1163*4882a593Smuzhiyun struct bnxt_tc_flow_node,
1164*4882a593Smuzhiyun decap_l2_list_node);
1165*4882a593Smuzhiyun *ref_decap_handle = ref_flow_node->decap_node->tunnel_handle;
1166*4882a593Smuzhiyun } else {
1167*4882a593Smuzhiyun *ref_decap_handle = INVALID_TUNNEL_HANDLE;
1168*4882a593Smuzhiyun }
1169*4882a593Smuzhiyun
1170*4882a593Smuzhiyun /* Insert the l2_node into the flow_node so that subsequent flows
1171*4882a593Smuzhiyun * with a matching decap l2 key can use the decap_filter_handle of
1172*4882a593Smuzhiyun * this flow as their ref_decap_handle
1173*4882a593Smuzhiyun */
1174*4882a593Smuzhiyun flow_node->decap_l2_node = decap_l2_node;
1175*4882a593Smuzhiyun list_add(&flow_node->decap_l2_list_node,
1176*4882a593Smuzhiyun &decap_l2_node->common_l2_flows);
1177*4882a593Smuzhiyun decap_l2_node->refcount++;
1178*4882a593Smuzhiyun return 0;
1179*4882a593Smuzhiyun }
1180*4882a593Smuzhiyun
bnxt_tc_put_decap_l2_node(struct bnxt * bp,struct bnxt_tc_flow_node * flow_node)1181*4882a593Smuzhiyun static void bnxt_tc_put_decap_l2_node(struct bnxt *bp,
1182*4882a593Smuzhiyun struct bnxt_tc_flow_node *flow_node)
1183*4882a593Smuzhiyun {
1184*4882a593Smuzhiyun struct bnxt_tc_l2_node *decap_l2_node = flow_node->decap_l2_node;
1185*4882a593Smuzhiyun struct bnxt_tc_info *tc_info = bp->tc_info;
1186*4882a593Smuzhiyun int rc;
1187*4882a593Smuzhiyun
1188*4882a593Smuzhiyun /* remove flow_node from the decap L2 sharing flow list */
1189*4882a593Smuzhiyun list_del(&flow_node->decap_l2_list_node);
1190*4882a593Smuzhiyun if (--decap_l2_node->refcount == 0) {
1191*4882a593Smuzhiyun rc = rhashtable_remove_fast(&tc_info->decap_l2_table,
1192*4882a593Smuzhiyun &decap_l2_node->node,
1193*4882a593Smuzhiyun tc_info->decap_l2_ht_params);
1194*4882a593Smuzhiyun if (rc)
1195*4882a593Smuzhiyun netdev_err(bp->dev, "rhashtable_remove_fast rc=%d\n", rc);
1196*4882a593Smuzhiyun kfree_rcu(decap_l2_node, rcu);
1197*4882a593Smuzhiyun }
1198*4882a593Smuzhiyun }
1199*4882a593Smuzhiyun
bnxt_tc_put_decap_handle(struct bnxt * bp,struct bnxt_tc_flow_node * flow_node)1200*4882a593Smuzhiyun static void bnxt_tc_put_decap_handle(struct bnxt *bp,
1201*4882a593Smuzhiyun struct bnxt_tc_flow_node *flow_node)
1202*4882a593Smuzhiyun {
1203*4882a593Smuzhiyun __le32 decap_handle = flow_node->decap_node->tunnel_handle;
1204*4882a593Smuzhiyun struct bnxt_tc_info *tc_info = bp->tc_info;
1205*4882a593Smuzhiyun int rc;
1206*4882a593Smuzhiyun
1207*4882a593Smuzhiyun if (flow_node->decap_l2_node)
1208*4882a593Smuzhiyun bnxt_tc_put_decap_l2_node(bp, flow_node);
1209*4882a593Smuzhiyun
1210*4882a593Smuzhiyun rc = bnxt_tc_put_tunnel_node(bp, &tc_info->decap_table,
1211*4882a593Smuzhiyun &tc_info->decap_ht_params,
1212*4882a593Smuzhiyun flow_node->decap_node);
1213*4882a593Smuzhiyun if (!rc && decap_handle != INVALID_TUNNEL_HANDLE)
1214*4882a593Smuzhiyun hwrm_cfa_decap_filter_free(bp, decap_handle);
1215*4882a593Smuzhiyun }
1216*4882a593Smuzhiyun
bnxt_tc_resolve_tunnel_hdrs(struct bnxt * bp,struct ip_tunnel_key * tun_key,struct bnxt_tc_l2_key * l2_info)1217*4882a593Smuzhiyun static int bnxt_tc_resolve_tunnel_hdrs(struct bnxt *bp,
1218*4882a593Smuzhiyun struct ip_tunnel_key *tun_key,
1219*4882a593Smuzhiyun struct bnxt_tc_l2_key *l2_info)
1220*4882a593Smuzhiyun {
1221*4882a593Smuzhiyun #ifdef CONFIG_INET
1222*4882a593Smuzhiyun struct net_device *real_dst_dev = bp->dev;
1223*4882a593Smuzhiyun struct flowi4 flow = { {0} };
1224*4882a593Smuzhiyun struct net_device *dst_dev;
1225*4882a593Smuzhiyun struct neighbour *nbr;
1226*4882a593Smuzhiyun struct rtable *rt;
1227*4882a593Smuzhiyun int rc;
1228*4882a593Smuzhiyun
1229*4882a593Smuzhiyun flow.flowi4_proto = IPPROTO_UDP;
1230*4882a593Smuzhiyun flow.fl4_dport = tun_key->tp_dst;
1231*4882a593Smuzhiyun flow.daddr = tun_key->u.ipv4.dst;
1232*4882a593Smuzhiyun
1233*4882a593Smuzhiyun rt = ip_route_output_key(dev_net(real_dst_dev), &flow);
1234*4882a593Smuzhiyun if (IS_ERR(rt)) {
1235*4882a593Smuzhiyun netdev_info(bp->dev, "no route to %pI4b\n", &flow.daddr);
1236*4882a593Smuzhiyun return -EOPNOTSUPP;
1237*4882a593Smuzhiyun }
1238*4882a593Smuzhiyun
1239*4882a593Smuzhiyun /* The route must either point to the real_dst_dev or a dst_dev that
1240*4882a593Smuzhiyun * uses the real_dst_dev.
1241*4882a593Smuzhiyun */
1242*4882a593Smuzhiyun dst_dev = rt->dst.dev;
1243*4882a593Smuzhiyun if (is_vlan_dev(dst_dev)) {
1244*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_VLAN_8021Q)
1245*4882a593Smuzhiyun struct vlan_dev_priv *vlan = vlan_dev_priv(dst_dev);
1246*4882a593Smuzhiyun
1247*4882a593Smuzhiyun if (vlan->real_dev != real_dst_dev) {
1248*4882a593Smuzhiyun netdev_info(bp->dev,
1249*4882a593Smuzhiyun "dst_dev(%s) doesn't use PF-if(%s)\n",
1250*4882a593Smuzhiyun netdev_name(dst_dev),
1251*4882a593Smuzhiyun netdev_name(real_dst_dev));
1252*4882a593Smuzhiyun rc = -EOPNOTSUPP;
1253*4882a593Smuzhiyun goto put_rt;
1254*4882a593Smuzhiyun }
1255*4882a593Smuzhiyun l2_info->inner_vlan_tci = htons(vlan->vlan_id);
1256*4882a593Smuzhiyun l2_info->inner_vlan_tpid = vlan->vlan_proto;
1257*4882a593Smuzhiyun l2_info->num_vlans = 1;
1258*4882a593Smuzhiyun #endif
1259*4882a593Smuzhiyun } else if (dst_dev != real_dst_dev) {
1260*4882a593Smuzhiyun netdev_info(bp->dev,
1261*4882a593Smuzhiyun "dst_dev(%s) for %pI4b is not PF-if(%s)\n",
1262*4882a593Smuzhiyun netdev_name(dst_dev), &flow.daddr,
1263*4882a593Smuzhiyun netdev_name(real_dst_dev));
1264*4882a593Smuzhiyun rc = -EOPNOTSUPP;
1265*4882a593Smuzhiyun goto put_rt;
1266*4882a593Smuzhiyun }
1267*4882a593Smuzhiyun
1268*4882a593Smuzhiyun nbr = dst_neigh_lookup(&rt->dst, &flow.daddr);
1269*4882a593Smuzhiyun if (!nbr) {
1270*4882a593Smuzhiyun netdev_info(bp->dev, "can't lookup neighbor for %pI4b\n",
1271*4882a593Smuzhiyun &flow.daddr);
1272*4882a593Smuzhiyun rc = -EOPNOTSUPP;
1273*4882a593Smuzhiyun goto put_rt;
1274*4882a593Smuzhiyun }
1275*4882a593Smuzhiyun
1276*4882a593Smuzhiyun tun_key->u.ipv4.src = flow.saddr;
1277*4882a593Smuzhiyun tun_key->ttl = ip4_dst_hoplimit(&rt->dst);
1278*4882a593Smuzhiyun neigh_ha_snapshot(l2_info->dmac, nbr, dst_dev);
1279*4882a593Smuzhiyun ether_addr_copy(l2_info->smac, dst_dev->dev_addr);
1280*4882a593Smuzhiyun neigh_release(nbr);
1281*4882a593Smuzhiyun ip_rt_put(rt);
1282*4882a593Smuzhiyun
1283*4882a593Smuzhiyun return 0;
1284*4882a593Smuzhiyun put_rt:
1285*4882a593Smuzhiyun ip_rt_put(rt);
1286*4882a593Smuzhiyun return rc;
1287*4882a593Smuzhiyun #else
1288*4882a593Smuzhiyun return -EOPNOTSUPP;
1289*4882a593Smuzhiyun #endif
1290*4882a593Smuzhiyun }
1291*4882a593Smuzhiyun
bnxt_tc_get_decap_handle(struct bnxt * bp,struct bnxt_tc_flow * flow,struct bnxt_tc_flow_node * flow_node,__le32 * decap_filter_handle)1292*4882a593Smuzhiyun static int bnxt_tc_get_decap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
1293*4882a593Smuzhiyun struct bnxt_tc_flow_node *flow_node,
1294*4882a593Smuzhiyun __le32 *decap_filter_handle)
1295*4882a593Smuzhiyun {
1296*4882a593Smuzhiyun struct ip_tunnel_key *decap_key = &flow->tun_key;
1297*4882a593Smuzhiyun struct bnxt_tc_info *tc_info = bp->tc_info;
1298*4882a593Smuzhiyun struct bnxt_tc_l2_key l2_info = { {0} };
1299*4882a593Smuzhiyun struct bnxt_tc_tunnel_node *decap_node;
1300*4882a593Smuzhiyun struct ip_tunnel_key tun_key = { 0 };
1301*4882a593Smuzhiyun struct bnxt_tc_l2_key *decap_l2_info;
1302*4882a593Smuzhiyun __le32 ref_decap_handle;
1303*4882a593Smuzhiyun int rc;
1304*4882a593Smuzhiyun
1305*4882a593Smuzhiyun /* Check if there's another flow using the same tunnel decap.
1306*4882a593Smuzhiyun * If not, add this tunnel to the table and resolve the other
1307*4882a593Smuzhiyun * tunnel header fileds. Ignore src_port in the tunnel_key,
1308*4882a593Smuzhiyun * since it is not required for decap filters.
1309*4882a593Smuzhiyun */
1310*4882a593Smuzhiyun decap_key->tp_src = 0;
1311*4882a593Smuzhiyun decap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->decap_table,
1312*4882a593Smuzhiyun &tc_info->decap_ht_params,
1313*4882a593Smuzhiyun decap_key);
1314*4882a593Smuzhiyun if (!decap_node)
1315*4882a593Smuzhiyun return -ENOMEM;
1316*4882a593Smuzhiyun
1317*4882a593Smuzhiyun flow_node->decap_node = decap_node;
1318*4882a593Smuzhiyun
1319*4882a593Smuzhiyun if (decap_node->tunnel_handle != INVALID_TUNNEL_HANDLE)
1320*4882a593Smuzhiyun goto done;
1321*4882a593Smuzhiyun
1322*4882a593Smuzhiyun /* Resolve the L2 fields for tunnel decap
1323*4882a593Smuzhiyun * Resolve the route for remote vtep (saddr) of the decap key
1324*4882a593Smuzhiyun * Find it's next-hop mac addrs
1325*4882a593Smuzhiyun */
1326*4882a593Smuzhiyun tun_key.u.ipv4.dst = flow->tun_key.u.ipv4.src;
1327*4882a593Smuzhiyun tun_key.tp_dst = flow->tun_key.tp_dst;
1328*4882a593Smuzhiyun rc = bnxt_tc_resolve_tunnel_hdrs(bp, &tun_key, &l2_info);
1329*4882a593Smuzhiyun if (rc)
1330*4882a593Smuzhiyun goto put_decap;
1331*4882a593Smuzhiyun
1332*4882a593Smuzhiyun decap_l2_info = &decap_node->l2_info;
1333*4882a593Smuzhiyun /* decap smac is wildcarded */
1334*4882a593Smuzhiyun ether_addr_copy(decap_l2_info->dmac, l2_info.smac);
1335*4882a593Smuzhiyun if (l2_info.num_vlans) {
1336*4882a593Smuzhiyun decap_l2_info->num_vlans = l2_info.num_vlans;
1337*4882a593Smuzhiyun decap_l2_info->inner_vlan_tpid = l2_info.inner_vlan_tpid;
1338*4882a593Smuzhiyun decap_l2_info->inner_vlan_tci = l2_info.inner_vlan_tci;
1339*4882a593Smuzhiyun }
1340*4882a593Smuzhiyun flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS;
1341*4882a593Smuzhiyun
1342*4882a593Smuzhiyun /* For getting a decap_filter_handle we first need to check if
1343*4882a593Smuzhiyun * there are any other decap flows that share the same tunnel L2
1344*4882a593Smuzhiyun * key and if so, pass that flow's decap_filter_handle as the
1345*4882a593Smuzhiyun * ref_decap_handle for this flow.
1346*4882a593Smuzhiyun */
1347*4882a593Smuzhiyun rc = bnxt_tc_get_ref_decap_handle(bp, flow, decap_l2_info, flow_node,
1348*4882a593Smuzhiyun &ref_decap_handle);
1349*4882a593Smuzhiyun if (rc)
1350*4882a593Smuzhiyun goto put_decap;
1351*4882a593Smuzhiyun
1352*4882a593Smuzhiyun /* Issue the hwrm cmd to allocate a decap filter handle */
1353*4882a593Smuzhiyun rc = hwrm_cfa_decap_filter_alloc(bp, flow, decap_l2_info,
1354*4882a593Smuzhiyun ref_decap_handle,
1355*4882a593Smuzhiyun &decap_node->tunnel_handle);
1356*4882a593Smuzhiyun if (rc)
1357*4882a593Smuzhiyun goto put_decap_l2;
1358*4882a593Smuzhiyun
1359*4882a593Smuzhiyun done:
1360*4882a593Smuzhiyun *decap_filter_handle = decap_node->tunnel_handle;
1361*4882a593Smuzhiyun return 0;
1362*4882a593Smuzhiyun
1363*4882a593Smuzhiyun put_decap_l2:
1364*4882a593Smuzhiyun bnxt_tc_put_decap_l2_node(bp, flow_node);
1365*4882a593Smuzhiyun put_decap:
1366*4882a593Smuzhiyun bnxt_tc_put_tunnel_node(bp, &tc_info->decap_table,
1367*4882a593Smuzhiyun &tc_info->decap_ht_params,
1368*4882a593Smuzhiyun flow_node->decap_node);
1369*4882a593Smuzhiyun return rc;
1370*4882a593Smuzhiyun }
1371*4882a593Smuzhiyun
bnxt_tc_put_encap_handle(struct bnxt * bp,struct bnxt_tc_tunnel_node * encap_node)1372*4882a593Smuzhiyun static void bnxt_tc_put_encap_handle(struct bnxt *bp,
1373*4882a593Smuzhiyun struct bnxt_tc_tunnel_node *encap_node)
1374*4882a593Smuzhiyun {
1375*4882a593Smuzhiyun __le32 encap_handle = encap_node->tunnel_handle;
1376*4882a593Smuzhiyun struct bnxt_tc_info *tc_info = bp->tc_info;
1377*4882a593Smuzhiyun int rc;
1378*4882a593Smuzhiyun
1379*4882a593Smuzhiyun rc = bnxt_tc_put_tunnel_node(bp, &tc_info->encap_table,
1380*4882a593Smuzhiyun &tc_info->encap_ht_params, encap_node);
1381*4882a593Smuzhiyun if (!rc && encap_handle != INVALID_TUNNEL_HANDLE)
1382*4882a593Smuzhiyun hwrm_cfa_encap_record_free(bp, encap_handle);
1383*4882a593Smuzhiyun }
1384*4882a593Smuzhiyun
1385*4882a593Smuzhiyun /* Lookup the tunnel encap table and check if there's an encap_handle
1386*4882a593Smuzhiyun * alloc'd already.
1387*4882a593Smuzhiyun * If not, query L2 info via a route lookup and issue an encap_record_alloc
1388*4882a593Smuzhiyun * cmd to FW.
1389*4882a593Smuzhiyun */
bnxt_tc_get_encap_handle(struct bnxt * bp,struct bnxt_tc_flow * flow,struct bnxt_tc_flow_node * flow_node,__le32 * encap_handle)1390*4882a593Smuzhiyun static int bnxt_tc_get_encap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
1391*4882a593Smuzhiyun struct bnxt_tc_flow_node *flow_node,
1392*4882a593Smuzhiyun __le32 *encap_handle)
1393*4882a593Smuzhiyun {
1394*4882a593Smuzhiyun struct ip_tunnel_key *encap_key = &flow->actions.tun_encap_key;
1395*4882a593Smuzhiyun struct bnxt_tc_info *tc_info = bp->tc_info;
1396*4882a593Smuzhiyun struct bnxt_tc_tunnel_node *encap_node;
1397*4882a593Smuzhiyun int rc;
1398*4882a593Smuzhiyun
1399*4882a593Smuzhiyun /* Check if there's another flow using the same tunnel encap.
1400*4882a593Smuzhiyun * If not, add this tunnel to the table and resolve the other
1401*4882a593Smuzhiyun * tunnel header fileds
1402*4882a593Smuzhiyun */
1403*4882a593Smuzhiyun encap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->encap_table,
1404*4882a593Smuzhiyun &tc_info->encap_ht_params,
1405*4882a593Smuzhiyun encap_key);
1406*4882a593Smuzhiyun if (!encap_node)
1407*4882a593Smuzhiyun return -ENOMEM;
1408*4882a593Smuzhiyun
1409*4882a593Smuzhiyun flow_node->encap_node = encap_node;
1410*4882a593Smuzhiyun
1411*4882a593Smuzhiyun if (encap_node->tunnel_handle != INVALID_TUNNEL_HANDLE)
1412*4882a593Smuzhiyun goto done;
1413*4882a593Smuzhiyun
1414*4882a593Smuzhiyun rc = bnxt_tc_resolve_tunnel_hdrs(bp, encap_key, &encap_node->l2_info);
1415*4882a593Smuzhiyun if (rc)
1416*4882a593Smuzhiyun goto put_encap;
1417*4882a593Smuzhiyun
1418*4882a593Smuzhiyun /* Allocate a new tunnel encap record */
1419*4882a593Smuzhiyun rc = hwrm_cfa_encap_record_alloc(bp, encap_key, &encap_node->l2_info,
1420*4882a593Smuzhiyun &encap_node->tunnel_handle);
1421*4882a593Smuzhiyun if (rc)
1422*4882a593Smuzhiyun goto put_encap;
1423*4882a593Smuzhiyun
1424*4882a593Smuzhiyun done:
1425*4882a593Smuzhiyun *encap_handle = encap_node->tunnel_handle;
1426*4882a593Smuzhiyun return 0;
1427*4882a593Smuzhiyun
1428*4882a593Smuzhiyun put_encap:
1429*4882a593Smuzhiyun bnxt_tc_put_tunnel_node(bp, &tc_info->encap_table,
1430*4882a593Smuzhiyun &tc_info->encap_ht_params, encap_node);
1431*4882a593Smuzhiyun return rc;
1432*4882a593Smuzhiyun }
1433*4882a593Smuzhiyun
bnxt_tc_put_tunnel_handle(struct bnxt * bp,struct bnxt_tc_flow * flow,struct bnxt_tc_flow_node * flow_node)1434*4882a593Smuzhiyun static void bnxt_tc_put_tunnel_handle(struct bnxt *bp,
1435*4882a593Smuzhiyun struct bnxt_tc_flow *flow,
1436*4882a593Smuzhiyun struct bnxt_tc_flow_node *flow_node)
1437*4882a593Smuzhiyun {
1438*4882a593Smuzhiyun if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP)
1439*4882a593Smuzhiyun bnxt_tc_put_decap_handle(bp, flow_node);
1440*4882a593Smuzhiyun else if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP)
1441*4882a593Smuzhiyun bnxt_tc_put_encap_handle(bp, flow_node->encap_node);
1442*4882a593Smuzhiyun }
1443*4882a593Smuzhiyun
bnxt_tc_get_tunnel_handle(struct bnxt * bp,struct bnxt_tc_flow * flow,struct bnxt_tc_flow_node * flow_node,__le32 * tunnel_handle)1444*4882a593Smuzhiyun static int bnxt_tc_get_tunnel_handle(struct bnxt *bp,
1445*4882a593Smuzhiyun struct bnxt_tc_flow *flow,
1446*4882a593Smuzhiyun struct bnxt_tc_flow_node *flow_node,
1447*4882a593Smuzhiyun __le32 *tunnel_handle)
1448*4882a593Smuzhiyun {
1449*4882a593Smuzhiyun if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP)
1450*4882a593Smuzhiyun return bnxt_tc_get_decap_handle(bp, flow, flow_node,
1451*4882a593Smuzhiyun tunnel_handle);
1452*4882a593Smuzhiyun else if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP)
1453*4882a593Smuzhiyun return bnxt_tc_get_encap_handle(bp, flow, flow_node,
1454*4882a593Smuzhiyun tunnel_handle);
1455*4882a593Smuzhiyun else
1456*4882a593Smuzhiyun return 0;
1457*4882a593Smuzhiyun }
__bnxt_tc_del_flow(struct bnxt * bp,struct bnxt_tc_flow_node * flow_node)1458*4882a593Smuzhiyun static int __bnxt_tc_del_flow(struct bnxt *bp,
1459*4882a593Smuzhiyun struct bnxt_tc_flow_node *flow_node)
1460*4882a593Smuzhiyun {
1461*4882a593Smuzhiyun struct bnxt_tc_info *tc_info = bp->tc_info;
1462*4882a593Smuzhiyun int rc;
1463*4882a593Smuzhiyun
1464*4882a593Smuzhiyun /* send HWRM cmd to free the flow-id */
1465*4882a593Smuzhiyun bnxt_hwrm_cfa_flow_free(bp, flow_node);
1466*4882a593Smuzhiyun
1467*4882a593Smuzhiyun mutex_lock(&tc_info->lock);
1468*4882a593Smuzhiyun
1469*4882a593Smuzhiyun /* release references to any tunnel encap/decap nodes */
1470*4882a593Smuzhiyun bnxt_tc_put_tunnel_handle(bp, &flow_node->flow, flow_node);
1471*4882a593Smuzhiyun
1472*4882a593Smuzhiyun /* release reference to l2 node */
1473*4882a593Smuzhiyun bnxt_tc_put_l2_node(bp, flow_node);
1474*4882a593Smuzhiyun
1475*4882a593Smuzhiyun mutex_unlock(&tc_info->lock);
1476*4882a593Smuzhiyun
1477*4882a593Smuzhiyun rc = rhashtable_remove_fast(&tc_info->flow_table, &flow_node->node,
1478*4882a593Smuzhiyun tc_info->flow_ht_params);
1479*4882a593Smuzhiyun if (rc)
1480*4882a593Smuzhiyun netdev_err(bp->dev, "Error: %s: rhashtable_remove_fast rc=%d\n",
1481*4882a593Smuzhiyun __func__, rc);
1482*4882a593Smuzhiyun
1483*4882a593Smuzhiyun kfree_rcu(flow_node, rcu);
1484*4882a593Smuzhiyun return 0;
1485*4882a593Smuzhiyun }
1486*4882a593Smuzhiyun
bnxt_tc_set_flow_dir(struct bnxt * bp,struct bnxt_tc_flow * flow,u16 src_fid)1487*4882a593Smuzhiyun static void bnxt_tc_set_flow_dir(struct bnxt *bp, struct bnxt_tc_flow *flow,
1488*4882a593Smuzhiyun u16 src_fid)
1489*4882a593Smuzhiyun {
1490*4882a593Smuzhiyun flow->l2_key.dir = (bp->pf.fw_fid == src_fid) ? BNXT_DIR_RX : BNXT_DIR_TX;
1491*4882a593Smuzhiyun }
1492*4882a593Smuzhiyun
bnxt_tc_set_src_fid(struct bnxt * bp,struct bnxt_tc_flow * flow,u16 src_fid)1493*4882a593Smuzhiyun static void bnxt_tc_set_src_fid(struct bnxt *bp, struct bnxt_tc_flow *flow,
1494*4882a593Smuzhiyun u16 src_fid)
1495*4882a593Smuzhiyun {
1496*4882a593Smuzhiyun if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP)
1497*4882a593Smuzhiyun flow->src_fid = bp->pf.fw_fid;
1498*4882a593Smuzhiyun else
1499*4882a593Smuzhiyun flow->src_fid = src_fid;
1500*4882a593Smuzhiyun }
1501*4882a593Smuzhiyun
1502*4882a593Smuzhiyun /* Add a new flow or replace an existing flow.
1503*4882a593Smuzhiyun * Notes on locking:
1504*4882a593Smuzhiyun * There are essentially two critical sections here.
1505*4882a593Smuzhiyun * 1. while adding a new flow
1506*4882a593Smuzhiyun * a) lookup l2-key
1507*4882a593Smuzhiyun * b) issue HWRM cmd and get flow_handle
1508*4882a593Smuzhiyun * c) link l2-key with flow
1509*4882a593Smuzhiyun * 2. while deleting a flow
1510*4882a593Smuzhiyun * a) unlinking l2-key from flow
1511*4882a593Smuzhiyun * A lock is needed to protect these two critical sections.
1512*4882a593Smuzhiyun *
1513*4882a593Smuzhiyun * The hash-tables are already protected by the rhashtable API.
1514*4882a593Smuzhiyun */
bnxt_tc_add_flow(struct bnxt * bp,u16 src_fid,struct flow_cls_offload * tc_flow_cmd)1515*4882a593Smuzhiyun static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
1516*4882a593Smuzhiyun struct flow_cls_offload *tc_flow_cmd)
1517*4882a593Smuzhiyun {
1518*4882a593Smuzhiyun struct bnxt_tc_flow_node *new_node, *old_node;
1519*4882a593Smuzhiyun struct bnxt_tc_info *tc_info = bp->tc_info;
1520*4882a593Smuzhiyun struct bnxt_tc_flow *flow;
1521*4882a593Smuzhiyun __le32 tunnel_handle = 0;
1522*4882a593Smuzhiyun __le16 ref_flow_handle;
1523*4882a593Smuzhiyun int rc;
1524*4882a593Smuzhiyun
1525*4882a593Smuzhiyun /* allocate memory for the new flow and it's node */
1526*4882a593Smuzhiyun new_node = kzalloc(sizeof(*new_node), GFP_KERNEL);
1527*4882a593Smuzhiyun if (!new_node) {
1528*4882a593Smuzhiyun rc = -ENOMEM;
1529*4882a593Smuzhiyun goto done;
1530*4882a593Smuzhiyun }
1531*4882a593Smuzhiyun new_node->cookie = tc_flow_cmd->cookie;
1532*4882a593Smuzhiyun flow = &new_node->flow;
1533*4882a593Smuzhiyun
1534*4882a593Smuzhiyun rc = bnxt_tc_parse_flow(bp, tc_flow_cmd, flow);
1535*4882a593Smuzhiyun if (rc)
1536*4882a593Smuzhiyun goto free_node;
1537*4882a593Smuzhiyun
1538*4882a593Smuzhiyun bnxt_tc_set_src_fid(bp, flow, src_fid);
1539*4882a593Smuzhiyun bnxt_tc_set_flow_dir(bp, flow, flow->src_fid);
1540*4882a593Smuzhiyun
1541*4882a593Smuzhiyun if (!bnxt_tc_can_offload(bp, flow)) {
1542*4882a593Smuzhiyun rc = -EOPNOTSUPP;
1543*4882a593Smuzhiyun kfree_rcu(new_node, rcu);
1544*4882a593Smuzhiyun return rc;
1545*4882a593Smuzhiyun }
1546*4882a593Smuzhiyun
1547*4882a593Smuzhiyun /* If a flow exists with the same cookie, delete it */
1548*4882a593Smuzhiyun old_node = rhashtable_lookup_fast(&tc_info->flow_table,
1549*4882a593Smuzhiyun &tc_flow_cmd->cookie,
1550*4882a593Smuzhiyun tc_info->flow_ht_params);
1551*4882a593Smuzhiyun if (old_node)
1552*4882a593Smuzhiyun __bnxt_tc_del_flow(bp, old_node);
1553*4882a593Smuzhiyun
1554*4882a593Smuzhiyun /* Check if the L2 part of the flow has been offloaded already.
1555*4882a593Smuzhiyun * If so, bump up it's refcnt and get it's reference handle.
1556*4882a593Smuzhiyun */
1557*4882a593Smuzhiyun mutex_lock(&tc_info->lock);
1558*4882a593Smuzhiyun rc = bnxt_tc_get_ref_flow_handle(bp, flow, new_node, &ref_flow_handle);
1559*4882a593Smuzhiyun if (rc)
1560*4882a593Smuzhiyun goto unlock;
1561*4882a593Smuzhiyun
1562*4882a593Smuzhiyun /* If the flow involves tunnel encap/decap, get tunnel_handle */
1563*4882a593Smuzhiyun rc = bnxt_tc_get_tunnel_handle(bp, flow, new_node, &tunnel_handle);
1564*4882a593Smuzhiyun if (rc)
1565*4882a593Smuzhiyun goto put_l2;
1566*4882a593Smuzhiyun
1567*4882a593Smuzhiyun /* send HWRM cmd to alloc the flow */
1568*4882a593Smuzhiyun rc = bnxt_hwrm_cfa_flow_alloc(bp, flow, ref_flow_handle,
1569*4882a593Smuzhiyun tunnel_handle, new_node);
1570*4882a593Smuzhiyun if (rc)
1571*4882a593Smuzhiyun goto put_tunnel;
1572*4882a593Smuzhiyun
1573*4882a593Smuzhiyun flow->lastused = jiffies;
1574*4882a593Smuzhiyun spin_lock_init(&flow->stats_lock);
1575*4882a593Smuzhiyun /* add new flow to flow-table */
1576*4882a593Smuzhiyun rc = rhashtable_insert_fast(&tc_info->flow_table, &new_node->node,
1577*4882a593Smuzhiyun tc_info->flow_ht_params);
1578*4882a593Smuzhiyun if (rc)
1579*4882a593Smuzhiyun goto hwrm_flow_free;
1580*4882a593Smuzhiyun
1581*4882a593Smuzhiyun mutex_unlock(&tc_info->lock);
1582*4882a593Smuzhiyun return 0;
1583*4882a593Smuzhiyun
1584*4882a593Smuzhiyun hwrm_flow_free:
1585*4882a593Smuzhiyun bnxt_hwrm_cfa_flow_free(bp, new_node);
1586*4882a593Smuzhiyun put_tunnel:
1587*4882a593Smuzhiyun bnxt_tc_put_tunnel_handle(bp, flow, new_node);
1588*4882a593Smuzhiyun put_l2:
1589*4882a593Smuzhiyun bnxt_tc_put_l2_node(bp, new_node);
1590*4882a593Smuzhiyun unlock:
1591*4882a593Smuzhiyun mutex_unlock(&tc_info->lock);
1592*4882a593Smuzhiyun free_node:
1593*4882a593Smuzhiyun kfree_rcu(new_node, rcu);
1594*4882a593Smuzhiyun done:
1595*4882a593Smuzhiyun netdev_err(bp->dev, "Error: %s: cookie=0x%lx error=%d\n",
1596*4882a593Smuzhiyun __func__, tc_flow_cmd->cookie, rc);
1597*4882a593Smuzhiyun return rc;
1598*4882a593Smuzhiyun }
1599*4882a593Smuzhiyun
bnxt_tc_del_flow(struct bnxt * bp,struct flow_cls_offload * tc_flow_cmd)1600*4882a593Smuzhiyun static int bnxt_tc_del_flow(struct bnxt *bp,
1601*4882a593Smuzhiyun struct flow_cls_offload *tc_flow_cmd)
1602*4882a593Smuzhiyun {
1603*4882a593Smuzhiyun struct bnxt_tc_info *tc_info = bp->tc_info;
1604*4882a593Smuzhiyun struct bnxt_tc_flow_node *flow_node;
1605*4882a593Smuzhiyun
1606*4882a593Smuzhiyun flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
1607*4882a593Smuzhiyun &tc_flow_cmd->cookie,
1608*4882a593Smuzhiyun tc_info->flow_ht_params);
1609*4882a593Smuzhiyun if (!flow_node)
1610*4882a593Smuzhiyun return -EINVAL;
1611*4882a593Smuzhiyun
1612*4882a593Smuzhiyun return __bnxt_tc_del_flow(bp, flow_node);
1613*4882a593Smuzhiyun }
1614*4882a593Smuzhiyun
bnxt_tc_get_flow_stats(struct bnxt * bp,struct flow_cls_offload * tc_flow_cmd)1615*4882a593Smuzhiyun static int bnxt_tc_get_flow_stats(struct bnxt *bp,
1616*4882a593Smuzhiyun struct flow_cls_offload *tc_flow_cmd)
1617*4882a593Smuzhiyun {
1618*4882a593Smuzhiyun struct bnxt_tc_flow_stats stats, *curr_stats, *prev_stats;
1619*4882a593Smuzhiyun struct bnxt_tc_info *tc_info = bp->tc_info;
1620*4882a593Smuzhiyun struct bnxt_tc_flow_node *flow_node;
1621*4882a593Smuzhiyun struct bnxt_tc_flow *flow;
1622*4882a593Smuzhiyun unsigned long lastused;
1623*4882a593Smuzhiyun
1624*4882a593Smuzhiyun flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
1625*4882a593Smuzhiyun &tc_flow_cmd->cookie,
1626*4882a593Smuzhiyun tc_info->flow_ht_params);
1627*4882a593Smuzhiyun if (!flow_node)
1628*4882a593Smuzhiyun return -1;
1629*4882a593Smuzhiyun
1630*4882a593Smuzhiyun flow = &flow_node->flow;
1631*4882a593Smuzhiyun curr_stats = &flow->stats;
1632*4882a593Smuzhiyun prev_stats = &flow->prev_stats;
1633*4882a593Smuzhiyun
1634*4882a593Smuzhiyun spin_lock(&flow->stats_lock);
1635*4882a593Smuzhiyun stats.packets = curr_stats->packets - prev_stats->packets;
1636*4882a593Smuzhiyun stats.bytes = curr_stats->bytes - prev_stats->bytes;
1637*4882a593Smuzhiyun *prev_stats = *curr_stats;
1638*4882a593Smuzhiyun lastused = flow->lastused;
1639*4882a593Smuzhiyun spin_unlock(&flow->stats_lock);
1640*4882a593Smuzhiyun
1641*4882a593Smuzhiyun flow_stats_update(&tc_flow_cmd->stats, stats.bytes, stats.packets, 0,
1642*4882a593Smuzhiyun lastused, FLOW_ACTION_HW_STATS_DELAYED);
1643*4882a593Smuzhiyun return 0;
1644*4882a593Smuzhiyun }
1645*4882a593Smuzhiyun
bnxt_fill_cfa_stats_req(struct bnxt * bp,struct bnxt_tc_flow_node * flow_node,__le16 * flow_handle,__le32 * flow_id)1646*4882a593Smuzhiyun static void bnxt_fill_cfa_stats_req(struct bnxt *bp,
1647*4882a593Smuzhiyun struct bnxt_tc_flow_node *flow_node,
1648*4882a593Smuzhiyun __le16 *flow_handle, __le32 *flow_id)
1649*4882a593Smuzhiyun {
1650*4882a593Smuzhiyun u16 handle;
1651*4882a593Smuzhiyun
1652*4882a593Smuzhiyun if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) {
1653*4882a593Smuzhiyun *flow_id = flow_node->flow_id;
1654*4882a593Smuzhiyun
1655*4882a593Smuzhiyun /* If flow_id is used to fetch flow stats then:
1656*4882a593Smuzhiyun * 1. lower 12 bits of flow_handle must be set to all 1s.
1657*4882a593Smuzhiyun * 2. 15th bit of flow_handle must specify the flow
1658*4882a593Smuzhiyun * direction (TX/RX).
1659*4882a593Smuzhiyun */
1660*4882a593Smuzhiyun if (flow_node->flow.l2_key.dir == BNXT_DIR_RX)
1661*4882a593Smuzhiyun handle = CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX |
1662*4882a593Smuzhiyun CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK;
1663*4882a593Smuzhiyun else
1664*4882a593Smuzhiyun handle = CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK;
1665*4882a593Smuzhiyun
1666*4882a593Smuzhiyun *flow_handle = cpu_to_le16(handle);
1667*4882a593Smuzhiyun } else {
1668*4882a593Smuzhiyun *flow_handle = flow_node->flow_handle;
1669*4882a593Smuzhiyun }
1670*4882a593Smuzhiyun }
1671*4882a593Smuzhiyun
1672*4882a593Smuzhiyun static int
bnxt_hwrm_cfa_flow_stats_get(struct bnxt * bp,int num_flows,struct bnxt_tc_stats_batch stats_batch[])1673*4882a593Smuzhiyun bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows,
1674*4882a593Smuzhiyun struct bnxt_tc_stats_batch stats_batch[])
1675*4882a593Smuzhiyun {
1676*4882a593Smuzhiyun struct hwrm_cfa_flow_stats_input req = { 0 };
1677*4882a593Smuzhiyun struct hwrm_cfa_flow_stats_output *resp;
1678*4882a593Smuzhiyun __le16 *req_flow_handles = &req.flow_handle_0;
1679*4882a593Smuzhiyun __le32 *req_flow_ids = &req.flow_id_0;
1680*4882a593Smuzhiyun int rc, i;
1681*4882a593Smuzhiyun
1682*4882a593Smuzhiyun bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_STATS, -1, -1);
1683*4882a593Smuzhiyun req.num_flows = cpu_to_le16(num_flows);
1684*4882a593Smuzhiyun for (i = 0; i < num_flows; i++) {
1685*4882a593Smuzhiyun struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node;
1686*4882a593Smuzhiyun
1687*4882a593Smuzhiyun bnxt_fill_cfa_stats_req(bp, flow_node,
1688*4882a593Smuzhiyun &req_flow_handles[i], &req_flow_ids[i]);
1689*4882a593Smuzhiyun }
1690*4882a593Smuzhiyun
1691*4882a593Smuzhiyun mutex_lock(&bp->hwrm_cmd_lock);
1692*4882a593Smuzhiyun rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1693*4882a593Smuzhiyun if (!rc) {
1694*4882a593Smuzhiyun __le64 *resp_packets;
1695*4882a593Smuzhiyun __le64 *resp_bytes;
1696*4882a593Smuzhiyun
1697*4882a593Smuzhiyun resp = bnxt_get_hwrm_resp_addr(bp, &req);
1698*4882a593Smuzhiyun resp_packets = &resp->packet_0;
1699*4882a593Smuzhiyun resp_bytes = &resp->byte_0;
1700*4882a593Smuzhiyun
1701*4882a593Smuzhiyun for (i = 0; i < num_flows; i++) {
1702*4882a593Smuzhiyun stats_batch[i].hw_stats.packets =
1703*4882a593Smuzhiyun le64_to_cpu(resp_packets[i]);
1704*4882a593Smuzhiyun stats_batch[i].hw_stats.bytes =
1705*4882a593Smuzhiyun le64_to_cpu(resp_bytes[i]);
1706*4882a593Smuzhiyun }
1707*4882a593Smuzhiyun } else {
1708*4882a593Smuzhiyun netdev_info(bp->dev, "error rc=%d\n", rc);
1709*4882a593Smuzhiyun }
1710*4882a593Smuzhiyun mutex_unlock(&bp->hwrm_cmd_lock);
1711*4882a593Smuzhiyun
1712*4882a593Smuzhiyun return rc;
1713*4882a593Smuzhiyun }
1714*4882a593Smuzhiyun
1715*4882a593Smuzhiyun /* Add val to accum while handling a possible wraparound
1716*4882a593Smuzhiyun * of val. Eventhough val is of type u64, its actual width
1717*4882a593Smuzhiyun * is denoted by mask and will wrap-around beyond that width.
1718*4882a593Smuzhiyun */
accumulate_val(u64 * accum,u64 val,u64 mask)1719*4882a593Smuzhiyun static void accumulate_val(u64 *accum, u64 val, u64 mask)
1720*4882a593Smuzhiyun {
1721*4882a593Smuzhiyun #define low_bits(x, mask) ((x) & (mask))
1722*4882a593Smuzhiyun #define high_bits(x, mask) ((x) & ~(mask))
1723*4882a593Smuzhiyun bool wrapped = val < low_bits(*accum, mask);
1724*4882a593Smuzhiyun
1725*4882a593Smuzhiyun *accum = high_bits(*accum, mask) + val;
1726*4882a593Smuzhiyun if (wrapped)
1727*4882a593Smuzhiyun *accum += (mask + 1);
1728*4882a593Smuzhiyun }
1729*4882a593Smuzhiyun
1730*4882a593Smuzhiyun /* The HW counters' width is much less than 64bits.
1731*4882a593Smuzhiyun * Handle possible wrap-around while updating the stat counters
1732*4882a593Smuzhiyun */
bnxt_flow_stats_accum(struct bnxt_tc_info * tc_info,struct bnxt_tc_flow_stats * acc_stats,struct bnxt_tc_flow_stats * hw_stats)1733*4882a593Smuzhiyun static void bnxt_flow_stats_accum(struct bnxt_tc_info *tc_info,
1734*4882a593Smuzhiyun struct bnxt_tc_flow_stats *acc_stats,
1735*4882a593Smuzhiyun struct bnxt_tc_flow_stats *hw_stats)
1736*4882a593Smuzhiyun {
1737*4882a593Smuzhiyun accumulate_val(&acc_stats->bytes, hw_stats->bytes, tc_info->bytes_mask);
1738*4882a593Smuzhiyun accumulate_val(&acc_stats->packets, hw_stats->packets,
1739*4882a593Smuzhiyun tc_info->packets_mask);
1740*4882a593Smuzhiyun }
1741*4882a593Smuzhiyun
1742*4882a593Smuzhiyun static int
bnxt_tc_flow_stats_batch_update(struct bnxt * bp,int num_flows,struct bnxt_tc_stats_batch stats_batch[])1743*4882a593Smuzhiyun bnxt_tc_flow_stats_batch_update(struct bnxt *bp, int num_flows,
1744*4882a593Smuzhiyun struct bnxt_tc_stats_batch stats_batch[])
1745*4882a593Smuzhiyun {
1746*4882a593Smuzhiyun struct bnxt_tc_info *tc_info = bp->tc_info;
1747*4882a593Smuzhiyun int rc, i;
1748*4882a593Smuzhiyun
1749*4882a593Smuzhiyun rc = bnxt_hwrm_cfa_flow_stats_get(bp, num_flows, stats_batch);
1750*4882a593Smuzhiyun if (rc)
1751*4882a593Smuzhiyun return rc;
1752*4882a593Smuzhiyun
1753*4882a593Smuzhiyun for (i = 0; i < num_flows; i++) {
1754*4882a593Smuzhiyun struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node;
1755*4882a593Smuzhiyun struct bnxt_tc_flow *flow = &flow_node->flow;
1756*4882a593Smuzhiyun
1757*4882a593Smuzhiyun spin_lock(&flow->stats_lock);
1758*4882a593Smuzhiyun bnxt_flow_stats_accum(tc_info, &flow->stats,
1759*4882a593Smuzhiyun &stats_batch[i].hw_stats);
1760*4882a593Smuzhiyun if (flow->stats.packets != flow->prev_stats.packets)
1761*4882a593Smuzhiyun flow->lastused = jiffies;
1762*4882a593Smuzhiyun spin_unlock(&flow->stats_lock);
1763*4882a593Smuzhiyun }
1764*4882a593Smuzhiyun
1765*4882a593Smuzhiyun return 0;
1766*4882a593Smuzhiyun }
1767*4882a593Smuzhiyun
1768*4882a593Smuzhiyun static int
bnxt_tc_flow_stats_batch_prep(struct bnxt * bp,struct bnxt_tc_stats_batch stats_batch[],int * num_flows)1769*4882a593Smuzhiyun bnxt_tc_flow_stats_batch_prep(struct bnxt *bp,
1770*4882a593Smuzhiyun struct bnxt_tc_stats_batch stats_batch[],
1771*4882a593Smuzhiyun int *num_flows)
1772*4882a593Smuzhiyun {
1773*4882a593Smuzhiyun struct bnxt_tc_info *tc_info = bp->tc_info;
1774*4882a593Smuzhiyun struct rhashtable_iter *iter = &tc_info->iter;
1775*4882a593Smuzhiyun void *flow_node;
1776*4882a593Smuzhiyun int rc, i;
1777*4882a593Smuzhiyun
1778*4882a593Smuzhiyun rhashtable_walk_start(iter);
1779*4882a593Smuzhiyun
1780*4882a593Smuzhiyun rc = 0;
1781*4882a593Smuzhiyun for (i = 0; i < BNXT_FLOW_STATS_BATCH_MAX; i++) {
1782*4882a593Smuzhiyun flow_node = rhashtable_walk_next(iter);
1783*4882a593Smuzhiyun if (IS_ERR(flow_node)) {
1784*4882a593Smuzhiyun i = 0;
1785*4882a593Smuzhiyun if (PTR_ERR(flow_node) == -EAGAIN) {
1786*4882a593Smuzhiyun continue;
1787*4882a593Smuzhiyun } else {
1788*4882a593Smuzhiyun rc = PTR_ERR(flow_node);
1789*4882a593Smuzhiyun goto done;
1790*4882a593Smuzhiyun }
1791*4882a593Smuzhiyun }
1792*4882a593Smuzhiyun
1793*4882a593Smuzhiyun /* No more flows */
1794*4882a593Smuzhiyun if (!flow_node)
1795*4882a593Smuzhiyun goto done;
1796*4882a593Smuzhiyun
1797*4882a593Smuzhiyun stats_batch[i].flow_node = flow_node;
1798*4882a593Smuzhiyun }
1799*4882a593Smuzhiyun done:
1800*4882a593Smuzhiyun rhashtable_walk_stop(iter);
1801*4882a593Smuzhiyun *num_flows = i;
1802*4882a593Smuzhiyun return rc;
1803*4882a593Smuzhiyun }
1804*4882a593Smuzhiyun
bnxt_tc_flow_stats_work(struct bnxt * bp)1805*4882a593Smuzhiyun void bnxt_tc_flow_stats_work(struct bnxt *bp)
1806*4882a593Smuzhiyun {
1807*4882a593Smuzhiyun struct bnxt_tc_info *tc_info = bp->tc_info;
1808*4882a593Smuzhiyun int num_flows, rc;
1809*4882a593Smuzhiyun
1810*4882a593Smuzhiyun num_flows = atomic_read(&tc_info->flow_table.nelems);
1811*4882a593Smuzhiyun if (!num_flows)
1812*4882a593Smuzhiyun return;
1813*4882a593Smuzhiyun
1814*4882a593Smuzhiyun rhashtable_walk_enter(&tc_info->flow_table, &tc_info->iter);
1815*4882a593Smuzhiyun
1816*4882a593Smuzhiyun for (;;) {
1817*4882a593Smuzhiyun rc = bnxt_tc_flow_stats_batch_prep(bp, tc_info->stats_batch,
1818*4882a593Smuzhiyun &num_flows);
1819*4882a593Smuzhiyun if (rc) {
1820*4882a593Smuzhiyun if (rc == -EAGAIN)
1821*4882a593Smuzhiyun continue;
1822*4882a593Smuzhiyun break;
1823*4882a593Smuzhiyun }
1824*4882a593Smuzhiyun
1825*4882a593Smuzhiyun if (!num_flows)
1826*4882a593Smuzhiyun break;
1827*4882a593Smuzhiyun
1828*4882a593Smuzhiyun bnxt_tc_flow_stats_batch_update(bp, num_flows,
1829*4882a593Smuzhiyun tc_info->stats_batch);
1830*4882a593Smuzhiyun }
1831*4882a593Smuzhiyun
1832*4882a593Smuzhiyun rhashtable_walk_exit(&tc_info->iter);
1833*4882a593Smuzhiyun }
1834*4882a593Smuzhiyun
bnxt_tc_setup_flower(struct bnxt * bp,u16 src_fid,struct flow_cls_offload * cls_flower)1835*4882a593Smuzhiyun int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
1836*4882a593Smuzhiyun struct flow_cls_offload *cls_flower)
1837*4882a593Smuzhiyun {
1838*4882a593Smuzhiyun switch (cls_flower->command) {
1839*4882a593Smuzhiyun case FLOW_CLS_REPLACE:
1840*4882a593Smuzhiyun return bnxt_tc_add_flow(bp, src_fid, cls_flower);
1841*4882a593Smuzhiyun case FLOW_CLS_DESTROY:
1842*4882a593Smuzhiyun return bnxt_tc_del_flow(bp, cls_flower);
1843*4882a593Smuzhiyun case FLOW_CLS_STATS:
1844*4882a593Smuzhiyun return bnxt_tc_get_flow_stats(bp, cls_flower);
1845*4882a593Smuzhiyun default:
1846*4882a593Smuzhiyun return -EOPNOTSUPP;
1847*4882a593Smuzhiyun }
1848*4882a593Smuzhiyun }
1849*4882a593Smuzhiyun
bnxt_tc_setup_indr_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)1850*4882a593Smuzhiyun static int bnxt_tc_setup_indr_block_cb(enum tc_setup_type type,
1851*4882a593Smuzhiyun void *type_data, void *cb_priv)
1852*4882a593Smuzhiyun {
1853*4882a593Smuzhiyun struct bnxt_flower_indr_block_cb_priv *priv = cb_priv;
1854*4882a593Smuzhiyun struct flow_cls_offload *flower = type_data;
1855*4882a593Smuzhiyun struct bnxt *bp = priv->bp;
1856*4882a593Smuzhiyun
1857*4882a593Smuzhiyun if (!tc_cls_can_offload_and_chain0(bp->dev, type_data))
1858*4882a593Smuzhiyun return -EOPNOTSUPP;
1859*4882a593Smuzhiyun
1860*4882a593Smuzhiyun switch (type) {
1861*4882a593Smuzhiyun case TC_SETUP_CLSFLOWER:
1862*4882a593Smuzhiyun return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, flower);
1863*4882a593Smuzhiyun default:
1864*4882a593Smuzhiyun return -EOPNOTSUPP;
1865*4882a593Smuzhiyun }
1866*4882a593Smuzhiyun }
1867*4882a593Smuzhiyun
1868*4882a593Smuzhiyun static struct bnxt_flower_indr_block_cb_priv *
bnxt_tc_indr_block_cb_lookup(struct bnxt * bp,struct net_device * netdev)1869*4882a593Smuzhiyun bnxt_tc_indr_block_cb_lookup(struct bnxt *bp, struct net_device *netdev)
1870*4882a593Smuzhiyun {
1871*4882a593Smuzhiyun struct bnxt_flower_indr_block_cb_priv *cb_priv;
1872*4882a593Smuzhiyun
1873*4882a593Smuzhiyun list_for_each_entry(cb_priv, &bp->tc_indr_block_list, list)
1874*4882a593Smuzhiyun if (cb_priv->tunnel_netdev == netdev)
1875*4882a593Smuzhiyun return cb_priv;
1876*4882a593Smuzhiyun
1877*4882a593Smuzhiyun return NULL;
1878*4882a593Smuzhiyun }
1879*4882a593Smuzhiyun
bnxt_tc_setup_indr_rel(void * cb_priv)1880*4882a593Smuzhiyun static void bnxt_tc_setup_indr_rel(void *cb_priv)
1881*4882a593Smuzhiyun {
1882*4882a593Smuzhiyun struct bnxt_flower_indr_block_cb_priv *priv = cb_priv;
1883*4882a593Smuzhiyun
1884*4882a593Smuzhiyun list_del(&priv->list);
1885*4882a593Smuzhiyun kfree(priv);
1886*4882a593Smuzhiyun }
1887*4882a593Smuzhiyun
bnxt_tc_setup_indr_block(struct net_device * netdev,struct Qdisc * sch,struct bnxt * bp,struct flow_block_offload * f,void * data,void (* cleanup)(struct flow_block_cb * block_cb))1888*4882a593Smuzhiyun static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct Qdisc *sch, struct bnxt *bp,
1889*4882a593Smuzhiyun struct flow_block_offload *f, void *data,
1890*4882a593Smuzhiyun void (*cleanup)(struct flow_block_cb *block_cb))
1891*4882a593Smuzhiyun {
1892*4882a593Smuzhiyun struct bnxt_flower_indr_block_cb_priv *cb_priv;
1893*4882a593Smuzhiyun struct flow_block_cb *block_cb;
1894*4882a593Smuzhiyun
1895*4882a593Smuzhiyun if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1896*4882a593Smuzhiyun return -EOPNOTSUPP;
1897*4882a593Smuzhiyun
1898*4882a593Smuzhiyun switch (f->command) {
1899*4882a593Smuzhiyun case FLOW_BLOCK_BIND:
1900*4882a593Smuzhiyun cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL);
1901*4882a593Smuzhiyun if (!cb_priv)
1902*4882a593Smuzhiyun return -ENOMEM;
1903*4882a593Smuzhiyun
1904*4882a593Smuzhiyun cb_priv->tunnel_netdev = netdev;
1905*4882a593Smuzhiyun cb_priv->bp = bp;
1906*4882a593Smuzhiyun list_add(&cb_priv->list, &bp->tc_indr_block_list);
1907*4882a593Smuzhiyun
1908*4882a593Smuzhiyun block_cb = flow_indr_block_cb_alloc(bnxt_tc_setup_indr_block_cb,
1909*4882a593Smuzhiyun cb_priv, cb_priv,
1910*4882a593Smuzhiyun bnxt_tc_setup_indr_rel, f,
1911*4882a593Smuzhiyun netdev, sch, data, bp, cleanup);
1912*4882a593Smuzhiyun if (IS_ERR(block_cb)) {
1913*4882a593Smuzhiyun list_del(&cb_priv->list);
1914*4882a593Smuzhiyun kfree(cb_priv);
1915*4882a593Smuzhiyun return PTR_ERR(block_cb);
1916*4882a593Smuzhiyun }
1917*4882a593Smuzhiyun
1918*4882a593Smuzhiyun flow_block_cb_add(block_cb, f);
1919*4882a593Smuzhiyun list_add_tail(&block_cb->driver_list, &bnxt_block_cb_list);
1920*4882a593Smuzhiyun break;
1921*4882a593Smuzhiyun case FLOW_BLOCK_UNBIND:
1922*4882a593Smuzhiyun cb_priv = bnxt_tc_indr_block_cb_lookup(bp, netdev);
1923*4882a593Smuzhiyun if (!cb_priv)
1924*4882a593Smuzhiyun return -ENOENT;
1925*4882a593Smuzhiyun
1926*4882a593Smuzhiyun block_cb = flow_block_cb_lookup(f->block,
1927*4882a593Smuzhiyun bnxt_tc_setup_indr_block_cb,
1928*4882a593Smuzhiyun cb_priv);
1929*4882a593Smuzhiyun if (!block_cb)
1930*4882a593Smuzhiyun return -ENOENT;
1931*4882a593Smuzhiyun
1932*4882a593Smuzhiyun flow_indr_block_cb_remove(block_cb, f);
1933*4882a593Smuzhiyun list_del(&block_cb->driver_list);
1934*4882a593Smuzhiyun break;
1935*4882a593Smuzhiyun default:
1936*4882a593Smuzhiyun return -EOPNOTSUPP;
1937*4882a593Smuzhiyun }
1938*4882a593Smuzhiyun return 0;
1939*4882a593Smuzhiyun }
1940*4882a593Smuzhiyun
bnxt_is_netdev_indr_offload(struct net_device * netdev)1941*4882a593Smuzhiyun static bool bnxt_is_netdev_indr_offload(struct net_device *netdev)
1942*4882a593Smuzhiyun {
1943*4882a593Smuzhiyun return netif_is_vxlan(netdev);
1944*4882a593Smuzhiyun }
1945*4882a593Smuzhiyun
bnxt_tc_setup_indr_cb(struct net_device * netdev,struct Qdisc * sch,void * cb_priv,enum tc_setup_type type,void * type_data,void * data,void (* cleanup)(struct flow_block_cb * block_cb))1946*4882a593Smuzhiyun static int bnxt_tc_setup_indr_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv,
1947*4882a593Smuzhiyun enum tc_setup_type type, void *type_data,
1948*4882a593Smuzhiyun void *data,
1949*4882a593Smuzhiyun void (*cleanup)(struct flow_block_cb *block_cb))
1950*4882a593Smuzhiyun {
1951*4882a593Smuzhiyun if (!bnxt_is_netdev_indr_offload(netdev))
1952*4882a593Smuzhiyun return -EOPNOTSUPP;
1953*4882a593Smuzhiyun
1954*4882a593Smuzhiyun switch (type) {
1955*4882a593Smuzhiyun case TC_SETUP_BLOCK:
1956*4882a593Smuzhiyun return bnxt_tc_setup_indr_block(netdev, sch, cb_priv, type_data, data, cleanup);
1957*4882a593Smuzhiyun default:
1958*4882a593Smuzhiyun break;
1959*4882a593Smuzhiyun }
1960*4882a593Smuzhiyun
1961*4882a593Smuzhiyun return -EOPNOTSUPP;
1962*4882a593Smuzhiyun }
1963*4882a593Smuzhiyun
1964*4882a593Smuzhiyun static const struct rhashtable_params bnxt_tc_flow_ht_params = {
1965*4882a593Smuzhiyun .head_offset = offsetof(struct bnxt_tc_flow_node, node),
1966*4882a593Smuzhiyun .key_offset = offsetof(struct bnxt_tc_flow_node, cookie),
1967*4882a593Smuzhiyun .key_len = sizeof(((struct bnxt_tc_flow_node *)0)->cookie),
1968*4882a593Smuzhiyun .automatic_shrinking = true
1969*4882a593Smuzhiyun };
1970*4882a593Smuzhiyun
1971*4882a593Smuzhiyun static const struct rhashtable_params bnxt_tc_l2_ht_params = {
1972*4882a593Smuzhiyun .head_offset = offsetof(struct bnxt_tc_l2_node, node),
1973*4882a593Smuzhiyun .key_offset = offsetof(struct bnxt_tc_l2_node, key),
1974*4882a593Smuzhiyun .key_len = BNXT_TC_L2_KEY_LEN,
1975*4882a593Smuzhiyun .automatic_shrinking = true
1976*4882a593Smuzhiyun };
1977*4882a593Smuzhiyun
1978*4882a593Smuzhiyun static const struct rhashtable_params bnxt_tc_decap_l2_ht_params = {
1979*4882a593Smuzhiyun .head_offset = offsetof(struct bnxt_tc_l2_node, node),
1980*4882a593Smuzhiyun .key_offset = offsetof(struct bnxt_tc_l2_node, key),
1981*4882a593Smuzhiyun .key_len = BNXT_TC_L2_KEY_LEN,
1982*4882a593Smuzhiyun .automatic_shrinking = true
1983*4882a593Smuzhiyun };
1984*4882a593Smuzhiyun
1985*4882a593Smuzhiyun static const struct rhashtable_params bnxt_tc_tunnel_ht_params = {
1986*4882a593Smuzhiyun .head_offset = offsetof(struct bnxt_tc_tunnel_node, node),
1987*4882a593Smuzhiyun .key_offset = offsetof(struct bnxt_tc_tunnel_node, key),
1988*4882a593Smuzhiyun .key_len = sizeof(struct ip_tunnel_key),
1989*4882a593Smuzhiyun .automatic_shrinking = true
1990*4882a593Smuzhiyun };
1991*4882a593Smuzhiyun
1992*4882a593Smuzhiyun /* convert counter width in bits to a mask */
1993*4882a593Smuzhiyun #define mask(width) ((u64)~0 >> (64 - (width)))
1994*4882a593Smuzhiyun
bnxt_init_tc(struct bnxt * bp)1995*4882a593Smuzhiyun int bnxt_init_tc(struct bnxt *bp)
1996*4882a593Smuzhiyun {
1997*4882a593Smuzhiyun struct bnxt_tc_info *tc_info;
1998*4882a593Smuzhiyun int rc;
1999*4882a593Smuzhiyun
2000*4882a593Smuzhiyun if (bp->hwrm_spec_code < 0x10803)
2001*4882a593Smuzhiyun return 0;
2002*4882a593Smuzhiyun
2003*4882a593Smuzhiyun tc_info = kzalloc(sizeof(*tc_info), GFP_KERNEL);
2004*4882a593Smuzhiyun if (!tc_info)
2005*4882a593Smuzhiyun return -ENOMEM;
2006*4882a593Smuzhiyun mutex_init(&tc_info->lock);
2007*4882a593Smuzhiyun
2008*4882a593Smuzhiyun /* Counter widths are programmed by FW */
2009*4882a593Smuzhiyun tc_info->bytes_mask = mask(36);
2010*4882a593Smuzhiyun tc_info->packets_mask = mask(28);
2011*4882a593Smuzhiyun
2012*4882a593Smuzhiyun tc_info->flow_ht_params = bnxt_tc_flow_ht_params;
2013*4882a593Smuzhiyun rc = rhashtable_init(&tc_info->flow_table, &tc_info->flow_ht_params);
2014*4882a593Smuzhiyun if (rc)
2015*4882a593Smuzhiyun goto free_tc_info;
2016*4882a593Smuzhiyun
2017*4882a593Smuzhiyun tc_info->l2_ht_params = bnxt_tc_l2_ht_params;
2018*4882a593Smuzhiyun rc = rhashtable_init(&tc_info->l2_table, &tc_info->l2_ht_params);
2019*4882a593Smuzhiyun if (rc)
2020*4882a593Smuzhiyun goto destroy_flow_table;
2021*4882a593Smuzhiyun
2022*4882a593Smuzhiyun tc_info->decap_l2_ht_params = bnxt_tc_decap_l2_ht_params;
2023*4882a593Smuzhiyun rc = rhashtable_init(&tc_info->decap_l2_table,
2024*4882a593Smuzhiyun &tc_info->decap_l2_ht_params);
2025*4882a593Smuzhiyun if (rc)
2026*4882a593Smuzhiyun goto destroy_l2_table;
2027*4882a593Smuzhiyun
2028*4882a593Smuzhiyun tc_info->decap_ht_params = bnxt_tc_tunnel_ht_params;
2029*4882a593Smuzhiyun rc = rhashtable_init(&tc_info->decap_table,
2030*4882a593Smuzhiyun &tc_info->decap_ht_params);
2031*4882a593Smuzhiyun if (rc)
2032*4882a593Smuzhiyun goto destroy_decap_l2_table;
2033*4882a593Smuzhiyun
2034*4882a593Smuzhiyun tc_info->encap_ht_params = bnxt_tc_tunnel_ht_params;
2035*4882a593Smuzhiyun rc = rhashtable_init(&tc_info->encap_table,
2036*4882a593Smuzhiyun &tc_info->encap_ht_params);
2037*4882a593Smuzhiyun if (rc)
2038*4882a593Smuzhiyun goto destroy_decap_table;
2039*4882a593Smuzhiyun
2040*4882a593Smuzhiyun tc_info->enabled = true;
2041*4882a593Smuzhiyun bp->dev->hw_features |= NETIF_F_HW_TC;
2042*4882a593Smuzhiyun bp->dev->features |= NETIF_F_HW_TC;
2043*4882a593Smuzhiyun bp->tc_info = tc_info;
2044*4882a593Smuzhiyun
2045*4882a593Smuzhiyun /* init indirect block notifications */
2046*4882a593Smuzhiyun INIT_LIST_HEAD(&bp->tc_indr_block_list);
2047*4882a593Smuzhiyun
2048*4882a593Smuzhiyun rc = flow_indr_dev_register(bnxt_tc_setup_indr_cb, bp);
2049*4882a593Smuzhiyun if (!rc)
2050*4882a593Smuzhiyun return 0;
2051*4882a593Smuzhiyun
2052*4882a593Smuzhiyun rhashtable_destroy(&tc_info->encap_table);
2053*4882a593Smuzhiyun
2054*4882a593Smuzhiyun destroy_decap_table:
2055*4882a593Smuzhiyun rhashtable_destroy(&tc_info->decap_table);
2056*4882a593Smuzhiyun destroy_decap_l2_table:
2057*4882a593Smuzhiyun rhashtable_destroy(&tc_info->decap_l2_table);
2058*4882a593Smuzhiyun destroy_l2_table:
2059*4882a593Smuzhiyun rhashtable_destroy(&tc_info->l2_table);
2060*4882a593Smuzhiyun destroy_flow_table:
2061*4882a593Smuzhiyun rhashtable_destroy(&tc_info->flow_table);
2062*4882a593Smuzhiyun free_tc_info:
2063*4882a593Smuzhiyun kfree(tc_info);
2064*4882a593Smuzhiyun return rc;
2065*4882a593Smuzhiyun }
2066*4882a593Smuzhiyun
bnxt_shutdown_tc(struct bnxt * bp)2067*4882a593Smuzhiyun void bnxt_shutdown_tc(struct bnxt *bp)
2068*4882a593Smuzhiyun {
2069*4882a593Smuzhiyun struct bnxt_tc_info *tc_info = bp->tc_info;
2070*4882a593Smuzhiyun
2071*4882a593Smuzhiyun if (!bnxt_tc_flower_enabled(bp))
2072*4882a593Smuzhiyun return;
2073*4882a593Smuzhiyun
2074*4882a593Smuzhiyun flow_indr_dev_unregister(bnxt_tc_setup_indr_cb, bp,
2075*4882a593Smuzhiyun bnxt_tc_setup_indr_rel);
2076*4882a593Smuzhiyun rhashtable_destroy(&tc_info->flow_table);
2077*4882a593Smuzhiyun rhashtable_destroy(&tc_info->l2_table);
2078*4882a593Smuzhiyun rhashtable_destroy(&tc_info->decap_l2_table);
2079*4882a593Smuzhiyun rhashtable_destroy(&tc_info->decap_table);
2080*4882a593Smuzhiyun rhashtable_destroy(&tc_info->encap_table);
2081*4882a593Smuzhiyun kfree(tc_info);
2082*4882a593Smuzhiyun bp->tc_info = NULL;
2083*4882a593Smuzhiyun }
2084