xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/intel/ice/ice_arfs.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /* Copyright (C) 2018-2020, Intel Corporation. */
3*4882a593Smuzhiyun 
4*4882a593Smuzhiyun #include "ice.h"
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun /**
7*4882a593Smuzhiyun  * ice_is_arfs_active - helper to check is aRFS is active
8*4882a593Smuzhiyun  * @vsi: VSI to check
9*4882a593Smuzhiyun  */
ice_is_arfs_active(struct ice_vsi * vsi)10*4882a593Smuzhiyun static bool ice_is_arfs_active(struct ice_vsi *vsi)
11*4882a593Smuzhiyun {
12*4882a593Smuzhiyun 	return !!vsi->arfs_fltr_list;
13*4882a593Smuzhiyun }
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun /**
16*4882a593Smuzhiyun  * ice_is_arfs_using_perfect_flow - check if aRFS has active perfect filters
17*4882a593Smuzhiyun  * @hw: pointer to the HW structure
18*4882a593Smuzhiyun  * @flow_type: flow type as Flow Director understands it
19*4882a593Smuzhiyun  *
20*4882a593Smuzhiyun  * Flow Director will query this function to see if aRFS is currently using
21*4882a593Smuzhiyun  * the specified flow_type for perfect (4-tuple) filters.
22*4882a593Smuzhiyun  */
23*4882a593Smuzhiyun bool
ice_is_arfs_using_perfect_flow(struct ice_hw * hw,enum ice_fltr_ptype flow_type)24*4882a593Smuzhiyun ice_is_arfs_using_perfect_flow(struct ice_hw *hw, enum ice_fltr_ptype flow_type)
25*4882a593Smuzhiyun {
26*4882a593Smuzhiyun 	struct ice_arfs_active_fltr_cntrs *arfs_fltr_cntrs;
27*4882a593Smuzhiyun 	struct ice_pf *pf = hw->back;
28*4882a593Smuzhiyun 	struct ice_vsi *vsi;
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun 	vsi = ice_get_main_vsi(pf);
31*4882a593Smuzhiyun 	if (!vsi)
32*4882a593Smuzhiyun 		return false;
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun 	arfs_fltr_cntrs = vsi->arfs_fltr_cntrs;
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun 	/* active counters can be updated by multiple CPUs */
37*4882a593Smuzhiyun 	smp_mb__before_atomic();
38*4882a593Smuzhiyun 	switch (flow_type) {
39*4882a593Smuzhiyun 	case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
40*4882a593Smuzhiyun 		return atomic_read(&arfs_fltr_cntrs->active_udpv4_cnt) > 0;
41*4882a593Smuzhiyun 	case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
42*4882a593Smuzhiyun 		return atomic_read(&arfs_fltr_cntrs->active_udpv6_cnt) > 0;
43*4882a593Smuzhiyun 	case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
44*4882a593Smuzhiyun 		return atomic_read(&arfs_fltr_cntrs->active_tcpv4_cnt) > 0;
45*4882a593Smuzhiyun 	case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
46*4882a593Smuzhiyun 		return atomic_read(&arfs_fltr_cntrs->active_tcpv6_cnt) > 0;
47*4882a593Smuzhiyun 	default:
48*4882a593Smuzhiyun 		return false;
49*4882a593Smuzhiyun 	}
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun /**
53*4882a593Smuzhiyun  * ice_arfs_update_active_fltr_cntrs - update active filter counters for aRFS
54*4882a593Smuzhiyun  * @vsi: VSI that aRFS is active on
55*4882a593Smuzhiyun  * @entry: aRFS entry used to change counters
56*4882a593Smuzhiyun  * @add: true to increment counter, false to decrement
57*4882a593Smuzhiyun  */
58*4882a593Smuzhiyun static void
ice_arfs_update_active_fltr_cntrs(struct ice_vsi * vsi,struct ice_arfs_entry * entry,bool add)59*4882a593Smuzhiyun ice_arfs_update_active_fltr_cntrs(struct ice_vsi *vsi,
60*4882a593Smuzhiyun 				  struct ice_arfs_entry *entry, bool add)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun 	struct ice_arfs_active_fltr_cntrs *fltr_cntrs = vsi->arfs_fltr_cntrs;
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 	switch (entry->fltr_info.flow_type) {
65*4882a593Smuzhiyun 	case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
66*4882a593Smuzhiyun 		if (add)
67*4882a593Smuzhiyun 			atomic_inc(&fltr_cntrs->active_tcpv4_cnt);
68*4882a593Smuzhiyun 		else
69*4882a593Smuzhiyun 			atomic_dec(&fltr_cntrs->active_tcpv4_cnt);
70*4882a593Smuzhiyun 		break;
71*4882a593Smuzhiyun 	case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
72*4882a593Smuzhiyun 		if (add)
73*4882a593Smuzhiyun 			atomic_inc(&fltr_cntrs->active_tcpv6_cnt);
74*4882a593Smuzhiyun 		else
75*4882a593Smuzhiyun 			atomic_dec(&fltr_cntrs->active_tcpv6_cnt);
76*4882a593Smuzhiyun 		break;
77*4882a593Smuzhiyun 	case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
78*4882a593Smuzhiyun 		if (add)
79*4882a593Smuzhiyun 			atomic_inc(&fltr_cntrs->active_udpv4_cnt);
80*4882a593Smuzhiyun 		else
81*4882a593Smuzhiyun 			atomic_dec(&fltr_cntrs->active_udpv4_cnt);
82*4882a593Smuzhiyun 		break;
83*4882a593Smuzhiyun 	case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
84*4882a593Smuzhiyun 		if (add)
85*4882a593Smuzhiyun 			atomic_inc(&fltr_cntrs->active_udpv6_cnt);
86*4882a593Smuzhiyun 		else
87*4882a593Smuzhiyun 			atomic_dec(&fltr_cntrs->active_udpv6_cnt);
88*4882a593Smuzhiyun 		break;
89*4882a593Smuzhiyun 	default:
90*4882a593Smuzhiyun 		dev_err(ice_pf_to_dev(vsi->back), "aRFS: Failed to update filter counters, invalid filter type %d\n",
91*4882a593Smuzhiyun 			entry->fltr_info.flow_type);
92*4882a593Smuzhiyun 	}
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun /**
96*4882a593Smuzhiyun  * ice_arfs_del_flow_rules - delete the rules passed in from HW
97*4882a593Smuzhiyun  * @vsi: VSI for the flow rules that need to be deleted
98*4882a593Smuzhiyun  * @del_list_head: head of the list of ice_arfs_entry(s) for rule deletion
99*4882a593Smuzhiyun  *
100*4882a593Smuzhiyun  * Loop through the delete list passed in and remove the rules from HW. After
101*4882a593Smuzhiyun  * each rule is deleted, disconnect and free the ice_arfs_entry because it is no
102*4882a593Smuzhiyun  * longer being referenced by the aRFS hash table.
103*4882a593Smuzhiyun  */
104*4882a593Smuzhiyun static void
ice_arfs_del_flow_rules(struct ice_vsi * vsi,struct hlist_head * del_list_head)105*4882a593Smuzhiyun ice_arfs_del_flow_rules(struct ice_vsi *vsi, struct hlist_head *del_list_head)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun 	struct ice_arfs_entry *e;
108*4882a593Smuzhiyun 	struct hlist_node *n;
109*4882a593Smuzhiyun 	struct device *dev;
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	dev = ice_pf_to_dev(vsi->back);
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	hlist_for_each_entry_safe(e, n, del_list_head, list_entry) {
114*4882a593Smuzhiyun 		int result;
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 		result = ice_fdir_write_fltr(vsi->back, &e->fltr_info, false,
117*4882a593Smuzhiyun 					     false);
118*4882a593Smuzhiyun 		if (!result)
119*4882a593Smuzhiyun 			ice_arfs_update_active_fltr_cntrs(vsi, e, false);
120*4882a593Smuzhiyun 		else
121*4882a593Smuzhiyun 			dev_dbg(dev, "Unable to delete aRFS entry, err %d fltr_state %d fltr_id %d flow_id %d Q %d\n",
122*4882a593Smuzhiyun 				result, e->fltr_state, e->fltr_info.fltr_id,
123*4882a593Smuzhiyun 				e->flow_id, e->fltr_info.q_index);
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 		/* The aRFS hash table is no longer referencing this entry */
126*4882a593Smuzhiyun 		hlist_del(&e->list_entry);
127*4882a593Smuzhiyun 		devm_kfree(dev, e);
128*4882a593Smuzhiyun 	}
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun /**
132*4882a593Smuzhiyun  * ice_arfs_add_flow_rules - add the rules passed in from HW
133*4882a593Smuzhiyun  * @vsi: VSI for the flow rules that need to be added
134*4882a593Smuzhiyun  * @add_list_head: head of the list of ice_arfs_entry_ptr(s) for rule addition
135*4882a593Smuzhiyun  *
136*4882a593Smuzhiyun  * Loop through the add list passed in and remove the rules from HW. After each
137*4882a593Smuzhiyun  * rule is added, disconnect and free the ice_arfs_entry_ptr node. Don't free
138*4882a593Smuzhiyun  * the ice_arfs_entry(s) because they are still being referenced in the aRFS
139*4882a593Smuzhiyun  * hash table.
140*4882a593Smuzhiyun  */
141*4882a593Smuzhiyun static void
ice_arfs_add_flow_rules(struct ice_vsi * vsi,struct hlist_head * add_list_head)142*4882a593Smuzhiyun ice_arfs_add_flow_rules(struct ice_vsi *vsi, struct hlist_head *add_list_head)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun 	struct ice_arfs_entry_ptr *ep;
145*4882a593Smuzhiyun 	struct hlist_node *n;
146*4882a593Smuzhiyun 	struct device *dev;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	dev = ice_pf_to_dev(vsi->back);
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	hlist_for_each_entry_safe(ep, n, add_list_head, list_entry) {
151*4882a593Smuzhiyun 		int result;
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 		result = ice_fdir_write_fltr(vsi->back,
154*4882a593Smuzhiyun 					     &ep->arfs_entry->fltr_info, true,
155*4882a593Smuzhiyun 					     false);
156*4882a593Smuzhiyun 		if (!result)
157*4882a593Smuzhiyun 			ice_arfs_update_active_fltr_cntrs(vsi, ep->arfs_entry,
158*4882a593Smuzhiyun 							  true);
159*4882a593Smuzhiyun 		else
160*4882a593Smuzhiyun 			dev_dbg(dev, "Unable to add aRFS entry, err %d fltr_state %d fltr_id %d flow_id %d Q %d\n",
161*4882a593Smuzhiyun 				result, ep->arfs_entry->fltr_state,
162*4882a593Smuzhiyun 				ep->arfs_entry->fltr_info.fltr_id,
163*4882a593Smuzhiyun 				ep->arfs_entry->flow_id,
164*4882a593Smuzhiyun 				ep->arfs_entry->fltr_info.q_index);
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 		hlist_del(&ep->list_entry);
167*4882a593Smuzhiyun 		devm_kfree(dev, ep);
168*4882a593Smuzhiyun 	}
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun /**
172*4882a593Smuzhiyun  * ice_arfs_is_flow_expired - check if the aRFS entry has expired
173*4882a593Smuzhiyun  * @vsi: VSI containing the aRFS entry
174*4882a593Smuzhiyun  * @arfs_entry: aRFS entry that's being checked for expiration
175*4882a593Smuzhiyun  *
176*4882a593Smuzhiyun  * Return true if the flow has expired, else false. This function should be used
177*4882a593Smuzhiyun  * to determine whether or not an aRFS entry should be removed from the hardware
178*4882a593Smuzhiyun  * and software structures.
179*4882a593Smuzhiyun  */
180*4882a593Smuzhiyun static bool
ice_arfs_is_flow_expired(struct ice_vsi * vsi,struct ice_arfs_entry * arfs_entry)181*4882a593Smuzhiyun ice_arfs_is_flow_expired(struct ice_vsi *vsi, struct ice_arfs_entry *arfs_entry)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun #define ICE_ARFS_TIME_DELTA_EXPIRATION	msecs_to_jiffies(5000)
184*4882a593Smuzhiyun 	if (rps_may_expire_flow(vsi->netdev, arfs_entry->fltr_info.q_index,
185*4882a593Smuzhiyun 				arfs_entry->flow_id,
186*4882a593Smuzhiyun 				arfs_entry->fltr_info.fltr_id))
187*4882a593Smuzhiyun 		return true;
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	/* expiration timer only used for UDP filters */
190*4882a593Smuzhiyun 	if (arfs_entry->fltr_info.flow_type != ICE_FLTR_PTYPE_NONF_IPV4_UDP &&
191*4882a593Smuzhiyun 	    arfs_entry->fltr_info.flow_type != ICE_FLTR_PTYPE_NONF_IPV6_UDP)
192*4882a593Smuzhiyun 		return false;
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	return time_in_range64(arfs_entry->time_activated +
195*4882a593Smuzhiyun 			       ICE_ARFS_TIME_DELTA_EXPIRATION,
196*4882a593Smuzhiyun 			       arfs_entry->time_activated, get_jiffies_64());
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun /**
200*4882a593Smuzhiyun  * ice_arfs_update_flow_rules - add/delete aRFS rules in HW
201*4882a593Smuzhiyun  * @vsi: the VSI to be forwarded to
202*4882a593Smuzhiyun  * @idx: index into the table of aRFS filter lists. Obtained from skb->hash
203*4882a593Smuzhiyun  * @add_list: list to populate with filters to be added to Flow Director
204*4882a593Smuzhiyun  * @del_list: list to populate with filters to be deleted from Flow Director
205*4882a593Smuzhiyun  *
206*4882a593Smuzhiyun  * Iterate over the hlist at the index given in the aRFS hash table and
207*4882a593Smuzhiyun  * determine if there are any aRFS entries that need to be either added or
208*4882a593Smuzhiyun  * deleted in the HW. If the aRFS entry is marked as ICE_ARFS_INACTIVE the
209*4882a593Smuzhiyun  * filter needs to be added to HW, else if it's marked as ICE_ARFS_ACTIVE and
210*4882a593Smuzhiyun  * the flow has expired delete the filter from HW. The caller of this function
211*4882a593Smuzhiyun  * is expected to add/delete rules on the add_list/del_list respectively.
212*4882a593Smuzhiyun  */
213*4882a593Smuzhiyun static void
ice_arfs_update_flow_rules(struct ice_vsi * vsi,u16 idx,struct hlist_head * add_list,struct hlist_head * del_list)214*4882a593Smuzhiyun ice_arfs_update_flow_rules(struct ice_vsi *vsi, u16 idx,
215*4882a593Smuzhiyun 			   struct hlist_head *add_list,
216*4882a593Smuzhiyun 			   struct hlist_head *del_list)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun 	struct ice_arfs_entry *e;
219*4882a593Smuzhiyun 	struct hlist_node *n;
220*4882a593Smuzhiyun 	struct device *dev;
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	dev = ice_pf_to_dev(vsi->back);
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	/* go through the aRFS hlist at this idx and check for needed updates */
225*4882a593Smuzhiyun 	hlist_for_each_entry_safe(e, n, &vsi->arfs_fltr_list[idx], list_entry)
226*4882a593Smuzhiyun 		/* check if filter needs to be added to HW */
227*4882a593Smuzhiyun 		if (e->fltr_state == ICE_ARFS_INACTIVE) {
228*4882a593Smuzhiyun 			enum ice_fltr_ptype flow_type = e->fltr_info.flow_type;
229*4882a593Smuzhiyun 			struct ice_arfs_entry_ptr *ep =
230*4882a593Smuzhiyun 				devm_kzalloc(dev, sizeof(*ep), GFP_ATOMIC);
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 			if (!ep)
233*4882a593Smuzhiyun 				continue;
234*4882a593Smuzhiyun 			INIT_HLIST_NODE(&ep->list_entry);
235*4882a593Smuzhiyun 			/* reference aRFS entry to add HW filter */
236*4882a593Smuzhiyun 			ep->arfs_entry = e;
237*4882a593Smuzhiyun 			hlist_add_head(&ep->list_entry, add_list);
238*4882a593Smuzhiyun 			e->fltr_state = ICE_ARFS_ACTIVE;
239*4882a593Smuzhiyun 			/* expiration timer only used for UDP flows */
240*4882a593Smuzhiyun 			if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
241*4882a593Smuzhiyun 			    flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
242*4882a593Smuzhiyun 				e->time_activated = get_jiffies_64();
243*4882a593Smuzhiyun 		} else if (e->fltr_state == ICE_ARFS_ACTIVE) {
244*4882a593Smuzhiyun 			/* check if filter needs to be removed from HW */
245*4882a593Smuzhiyun 			if (ice_arfs_is_flow_expired(vsi, e)) {
246*4882a593Smuzhiyun 				/* remove aRFS entry from hash table for delete
247*4882a593Smuzhiyun 				 * and to prevent referencing it the next time
248*4882a593Smuzhiyun 				 * through this hlist index
249*4882a593Smuzhiyun 				 */
250*4882a593Smuzhiyun 				hlist_del(&e->list_entry);
251*4882a593Smuzhiyun 				e->fltr_state = ICE_ARFS_TODEL;
252*4882a593Smuzhiyun 				/* save reference to aRFS entry for delete */
253*4882a593Smuzhiyun 				hlist_add_head(&e->list_entry, del_list);
254*4882a593Smuzhiyun 			}
255*4882a593Smuzhiyun 		}
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun /**
259*4882a593Smuzhiyun  * ice_sync_arfs_fltrs - update all aRFS filters
260*4882a593Smuzhiyun  * @pf: board private structure
261*4882a593Smuzhiyun  */
ice_sync_arfs_fltrs(struct ice_pf * pf)262*4882a593Smuzhiyun void ice_sync_arfs_fltrs(struct ice_pf *pf)
263*4882a593Smuzhiyun {
264*4882a593Smuzhiyun 	HLIST_HEAD(tmp_del_list);
265*4882a593Smuzhiyun 	HLIST_HEAD(tmp_add_list);
266*4882a593Smuzhiyun 	struct ice_vsi *pf_vsi;
267*4882a593Smuzhiyun 	unsigned int i;
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	pf_vsi = ice_get_main_vsi(pf);
270*4882a593Smuzhiyun 	if (!pf_vsi)
271*4882a593Smuzhiyun 		return;
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	if (!ice_is_arfs_active(pf_vsi))
274*4882a593Smuzhiyun 		return;
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	spin_lock_bh(&pf_vsi->arfs_lock);
277*4882a593Smuzhiyun 	/* Once we process aRFS for the PF VSI get out */
278*4882a593Smuzhiyun 	for (i = 0; i < ICE_MAX_ARFS_LIST; i++)
279*4882a593Smuzhiyun 		ice_arfs_update_flow_rules(pf_vsi, i, &tmp_add_list,
280*4882a593Smuzhiyun 					   &tmp_del_list);
281*4882a593Smuzhiyun 	spin_unlock_bh(&pf_vsi->arfs_lock);
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 	/* use list of ice_arfs_entry(s) for delete */
284*4882a593Smuzhiyun 	ice_arfs_del_flow_rules(pf_vsi, &tmp_del_list);
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	/* use list of ice_arfs_entry_ptr(s) for add */
287*4882a593Smuzhiyun 	ice_arfs_add_flow_rules(pf_vsi, &tmp_add_list);
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun /**
291*4882a593Smuzhiyun  * ice_arfs_build_entry - builds an aRFS entry based on input
292*4882a593Smuzhiyun  * @vsi: destination VSI for this flow
293*4882a593Smuzhiyun  * @fk: flow dissector keys for creating the tuple
294*4882a593Smuzhiyun  * @rxq_idx: Rx queue to steer this flow to
295*4882a593Smuzhiyun  * @flow_id: passed down from the stack and saved for flow expiration
296*4882a593Smuzhiyun  *
297*4882a593Smuzhiyun  * returns an aRFS entry on success and NULL on failure
298*4882a593Smuzhiyun  */
299*4882a593Smuzhiyun static struct ice_arfs_entry *
ice_arfs_build_entry(struct ice_vsi * vsi,const struct flow_keys * fk,u16 rxq_idx,u32 flow_id)300*4882a593Smuzhiyun ice_arfs_build_entry(struct ice_vsi *vsi, const struct flow_keys *fk,
301*4882a593Smuzhiyun 		     u16 rxq_idx, u32 flow_id)
302*4882a593Smuzhiyun {
303*4882a593Smuzhiyun 	struct ice_arfs_entry *arfs_entry;
304*4882a593Smuzhiyun 	struct ice_fdir_fltr *fltr_info;
305*4882a593Smuzhiyun 	u8 ip_proto;
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	arfs_entry = devm_kzalloc(ice_pf_to_dev(vsi->back),
308*4882a593Smuzhiyun 				  sizeof(*arfs_entry),
309*4882a593Smuzhiyun 				  GFP_ATOMIC | __GFP_NOWARN);
310*4882a593Smuzhiyun 	if (!arfs_entry)
311*4882a593Smuzhiyun 		return NULL;
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	fltr_info = &arfs_entry->fltr_info;
314*4882a593Smuzhiyun 	fltr_info->q_index = rxq_idx;
315*4882a593Smuzhiyun 	fltr_info->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
316*4882a593Smuzhiyun 	fltr_info->dest_vsi = vsi->idx;
317*4882a593Smuzhiyun 	ip_proto = fk->basic.ip_proto;
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	if (fk->basic.n_proto == htons(ETH_P_IP)) {
320*4882a593Smuzhiyun 		fltr_info->ip.v4.proto = ip_proto;
321*4882a593Smuzhiyun 		fltr_info->flow_type = (ip_proto == IPPROTO_TCP) ?
322*4882a593Smuzhiyun 			ICE_FLTR_PTYPE_NONF_IPV4_TCP :
323*4882a593Smuzhiyun 			ICE_FLTR_PTYPE_NONF_IPV4_UDP;
324*4882a593Smuzhiyun 		fltr_info->ip.v4.src_ip = fk->addrs.v4addrs.src;
325*4882a593Smuzhiyun 		fltr_info->ip.v4.dst_ip = fk->addrs.v4addrs.dst;
326*4882a593Smuzhiyun 		fltr_info->ip.v4.src_port = fk->ports.src;
327*4882a593Smuzhiyun 		fltr_info->ip.v4.dst_port = fk->ports.dst;
328*4882a593Smuzhiyun 	} else { /* ETH_P_IPV6 */
329*4882a593Smuzhiyun 		fltr_info->ip.v6.proto = ip_proto;
330*4882a593Smuzhiyun 		fltr_info->flow_type = (ip_proto == IPPROTO_TCP) ?
331*4882a593Smuzhiyun 			ICE_FLTR_PTYPE_NONF_IPV6_TCP :
332*4882a593Smuzhiyun 			ICE_FLTR_PTYPE_NONF_IPV6_UDP;
333*4882a593Smuzhiyun 		memcpy(&fltr_info->ip.v6.src_ip, &fk->addrs.v6addrs.src,
334*4882a593Smuzhiyun 		       sizeof(struct in6_addr));
335*4882a593Smuzhiyun 		memcpy(&fltr_info->ip.v6.dst_ip, &fk->addrs.v6addrs.dst,
336*4882a593Smuzhiyun 		       sizeof(struct in6_addr));
337*4882a593Smuzhiyun 		fltr_info->ip.v6.src_port = fk->ports.src;
338*4882a593Smuzhiyun 		fltr_info->ip.v6.dst_port = fk->ports.dst;
339*4882a593Smuzhiyun 	}
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	arfs_entry->flow_id = flow_id;
342*4882a593Smuzhiyun 	fltr_info->fltr_id =
343*4882a593Smuzhiyun 		atomic_inc_return(vsi->arfs_last_fltr_id) % RPS_NO_FILTER;
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	return arfs_entry;
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun /**
349*4882a593Smuzhiyun  * ice_arfs_is_perfect_flow_set - Check to see if perfect flow is set
350*4882a593Smuzhiyun  * @hw: pointer to HW structure
351*4882a593Smuzhiyun  * @l3_proto: ETH_P_IP or ETH_P_IPV6 in network order
352*4882a593Smuzhiyun  * @l4_proto: IPPROTO_UDP or IPPROTO_TCP
353*4882a593Smuzhiyun  *
354*4882a593Smuzhiyun  * We only support perfect (4-tuple) filters for aRFS. This function allows aRFS
355*4882a593Smuzhiyun  * to check if perfect (4-tuple) flow rules are currently in place by Flow
356*4882a593Smuzhiyun  * Director.
357*4882a593Smuzhiyun  */
358*4882a593Smuzhiyun static bool
ice_arfs_is_perfect_flow_set(struct ice_hw * hw,__be16 l3_proto,u8 l4_proto)359*4882a593Smuzhiyun ice_arfs_is_perfect_flow_set(struct ice_hw *hw, __be16 l3_proto, u8 l4_proto)
360*4882a593Smuzhiyun {
361*4882a593Smuzhiyun 	unsigned long *perfect_fltr = hw->fdir_perfect_fltr;
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	/* advanced Flow Director disabled, perfect filters always supported */
364*4882a593Smuzhiyun 	if (!perfect_fltr)
365*4882a593Smuzhiyun 		return true;
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 	if (l3_proto == htons(ETH_P_IP) && l4_proto == IPPROTO_UDP)
368*4882a593Smuzhiyun 		return test_bit(ICE_FLTR_PTYPE_NONF_IPV4_UDP, perfect_fltr);
369*4882a593Smuzhiyun 	else if (l3_proto == htons(ETH_P_IP) && l4_proto == IPPROTO_TCP)
370*4882a593Smuzhiyun 		return test_bit(ICE_FLTR_PTYPE_NONF_IPV4_TCP, perfect_fltr);
371*4882a593Smuzhiyun 	else if (l3_proto == htons(ETH_P_IPV6) && l4_proto == IPPROTO_UDP)
372*4882a593Smuzhiyun 		return test_bit(ICE_FLTR_PTYPE_NONF_IPV6_UDP, perfect_fltr);
373*4882a593Smuzhiyun 	else if (l3_proto == htons(ETH_P_IPV6) && l4_proto == IPPROTO_TCP)
374*4882a593Smuzhiyun 		return test_bit(ICE_FLTR_PTYPE_NONF_IPV6_TCP, perfect_fltr);
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 	return false;
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun /**
380*4882a593Smuzhiyun  * ice_rx_flow_steer - steer the Rx flow to where application is being run
381*4882a593Smuzhiyun  * @netdev: ptr to the netdev being adjusted
382*4882a593Smuzhiyun  * @skb: buffer with required header information
383*4882a593Smuzhiyun  * @rxq_idx: queue to which the flow needs to move
384*4882a593Smuzhiyun  * @flow_id: flow identifier provided by the netdev
385*4882a593Smuzhiyun  *
386*4882a593Smuzhiyun  * Based on the skb, rxq_idx, and flow_id passed in add/update an entry in the
387*4882a593Smuzhiyun  * aRFS hash table. Iterate over one of the hlists in the aRFS hash table and
388*4882a593Smuzhiyun  * if the flow_id already exists in the hash table but the rxq_idx has changed
389*4882a593Smuzhiyun  * mark the entry as ICE_ARFS_INACTIVE so it can get updated in HW, else
390*4882a593Smuzhiyun  * if the entry is marked as ICE_ARFS_TODEL delete it from the aRFS hash table.
391*4882a593Smuzhiyun  * If neither of the previous conditions are true then add a new entry in the
392*4882a593Smuzhiyun  * aRFS hash table, which gets set to ICE_ARFS_INACTIVE by default so it can be
393*4882a593Smuzhiyun  * added to HW.
394*4882a593Smuzhiyun  */
395*4882a593Smuzhiyun int
ice_rx_flow_steer(struct net_device * netdev,const struct sk_buff * skb,u16 rxq_idx,u32 flow_id)396*4882a593Smuzhiyun ice_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb,
397*4882a593Smuzhiyun 		  u16 rxq_idx, u32 flow_id)
398*4882a593Smuzhiyun {
399*4882a593Smuzhiyun 	struct ice_netdev_priv *np = netdev_priv(netdev);
400*4882a593Smuzhiyun 	struct ice_arfs_entry *arfs_entry;
401*4882a593Smuzhiyun 	struct ice_vsi *vsi = np->vsi;
402*4882a593Smuzhiyun 	struct flow_keys fk;
403*4882a593Smuzhiyun 	struct ice_pf *pf;
404*4882a593Smuzhiyun 	__be16 n_proto;
405*4882a593Smuzhiyun 	u8 ip_proto;
406*4882a593Smuzhiyun 	u16 idx;
407*4882a593Smuzhiyun 	int ret;
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 	/* failed to allocate memory for aRFS so don't crash */
410*4882a593Smuzhiyun 	if (unlikely(!vsi->arfs_fltr_list))
411*4882a593Smuzhiyun 		return -ENODEV;
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 	pf = vsi->back;
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	if (skb->encapsulation)
416*4882a593Smuzhiyun 		return -EPROTONOSUPPORT;
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 	if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
419*4882a593Smuzhiyun 		return -EPROTONOSUPPORT;
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 	n_proto = fk.basic.n_proto;
422*4882a593Smuzhiyun 	/* Support only IPV4 and IPV6 */
423*4882a593Smuzhiyun 	if ((n_proto == htons(ETH_P_IP) && !ip_is_fragment(ip_hdr(skb))) ||
424*4882a593Smuzhiyun 	    n_proto == htons(ETH_P_IPV6))
425*4882a593Smuzhiyun 		ip_proto = fk.basic.ip_proto;
426*4882a593Smuzhiyun 	else
427*4882a593Smuzhiyun 		return -EPROTONOSUPPORT;
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 	/* Support only TCP and UDP */
430*4882a593Smuzhiyun 	if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP)
431*4882a593Smuzhiyun 		return -EPROTONOSUPPORT;
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun 	/* only support 4-tuple filters for aRFS */
434*4882a593Smuzhiyun 	if (!ice_arfs_is_perfect_flow_set(&pf->hw, n_proto, ip_proto))
435*4882a593Smuzhiyun 		return -EOPNOTSUPP;
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 	/* choose the aRFS list bucket based on skb hash */
438*4882a593Smuzhiyun 	idx = skb_get_hash_raw(skb) & ICE_ARFS_LST_MASK;
439*4882a593Smuzhiyun 	/* search for entry in the bucket */
440*4882a593Smuzhiyun 	spin_lock_bh(&vsi->arfs_lock);
441*4882a593Smuzhiyun 	hlist_for_each_entry(arfs_entry, &vsi->arfs_fltr_list[idx],
442*4882a593Smuzhiyun 			     list_entry) {
443*4882a593Smuzhiyun 		struct ice_fdir_fltr *fltr_info;
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 		/* keep searching for the already existing arfs_entry flow */
446*4882a593Smuzhiyun 		if (arfs_entry->flow_id != flow_id)
447*4882a593Smuzhiyun 			continue;
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 		fltr_info = &arfs_entry->fltr_info;
450*4882a593Smuzhiyun 		ret = fltr_info->fltr_id;
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 		if (fltr_info->q_index == rxq_idx ||
453*4882a593Smuzhiyun 		    arfs_entry->fltr_state != ICE_ARFS_ACTIVE)
454*4882a593Smuzhiyun 			goto out;
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 		/* update the queue to forward to on an already existing flow */
457*4882a593Smuzhiyun 		fltr_info->q_index = rxq_idx;
458*4882a593Smuzhiyun 		arfs_entry->fltr_state = ICE_ARFS_INACTIVE;
459*4882a593Smuzhiyun 		ice_arfs_update_active_fltr_cntrs(vsi, arfs_entry, false);
460*4882a593Smuzhiyun 		goto out_schedule_service_task;
461*4882a593Smuzhiyun 	}
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 	arfs_entry = ice_arfs_build_entry(vsi, &fk, rxq_idx, flow_id);
464*4882a593Smuzhiyun 	if (!arfs_entry) {
465*4882a593Smuzhiyun 		ret = -ENOMEM;
466*4882a593Smuzhiyun 		goto out;
467*4882a593Smuzhiyun 	}
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 	ret = arfs_entry->fltr_info.fltr_id;
470*4882a593Smuzhiyun 	INIT_HLIST_NODE(&arfs_entry->list_entry);
471*4882a593Smuzhiyun 	hlist_add_head(&arfs_entry->list_entry, &vsi->arfs_fltr_list[idx]);
472*4882a593Smuzhiyun out_schedule_service_task:
473*4882a593Smuzhiyun 	ice_service_task_schedule(pf);
474*4882a593Smuzhiyun out:
475*4882a593Smuzhiyun 	spin_unlock_bh(&vsi->arfs_lock);
476*4882a593Smuzhiyun 	return ret;
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun /**
480*4882a593Smuzhiyun  * ice_init_arfs_cntrs - initialize aRFS counter values
481*4882a593Smuzhiyun  * @vsi: VSI that aRFS counters need to be initialized on
482*4882a593Smuzhiyun  */
ice_init_arfs_cntrs(struct ice_vsi * vsi)483*4882a593Smuzhiyun static int ice_init_arfs_cntrs(struct ice_vsi *vsi)
484*4882a593Smuzhiyun {
485*4882a593Smuzhiyun 	if (!vsi || vsi->type != ICE_VSI_PF)
486*4882a593Smuzhiyun 		return -EINVAL;
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun 	vsi->arfs_fltr_cntrs = kzalloc(sizeof(*vsi->arfs_fltr_cntrs),
489*4882a593Smuzhiyun 				       GFP_KERNEL);
490*4882a593Smuzhiyun 	if (!vsi->arfs_fltr_cntrs)
491*4882a593Smuzhiyun 		return -ENOMEM;
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun 	vsi->arfs_last_fltr_id = kzalloc(sizeof(*vsi->arfs_last_fltr_id),
494*4882a593Smuzhiyun 					 GFP_KERNEL);
495*4882a593Smuzhiyun 	if (!vsi->arfs_last_fltr_id) {
496*4882a593Smuzhiyun 		kfree(vsi->arfs_fltr_cntrs);
497*4882a593Smuzhiyun 		vsi->arfs_fltr_cntrs = NULL;
498*4882a593Smuzhiyun 		return -ENOMEM;
499*4882a593Smuzhiyun 	}
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun 	return 0;
502*4882a593Smuzhiyun }
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun /**
505*4882a593Smuzhiyun  * ice_init_arfs - initialize aRFS resources
506*4882a593Smuzhiyun  * @vsi: the VSI to be forwarded to
507*4882a593Smuzhiyun  */
ice_init_arfs(struct ice_vsi * vsi)508*4882a593Smuzhiyun void ice_init_arfs(struct ice_vsi *vsi)
509*4882a593Smuzhiyun {
510*4882a593Smuzhiyun 	struct hlist_head *arfs_fltr_list;
511*4882a593Smuzhiyun 	unsigned int i;
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 	if (!vsi || vsi->type != ICE_VSI_PF)
514*4882a593Smuzhiyun 		return;
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun 	arfs_fltr_list = kzalloc(sizeof(*arfs_fltr_list) * ICE_MAX_ARFS_LIST,
517*4882a593Smuzhiyun 				 GFP_KERNEL);
518*4882a593Smuzhiyun 	if (!arfs_fltr_list)
519*4882a593Smuzhiyun 		return;
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 	if (ice_init_arfs_cntrs(vsi))
522*4882a593Smuzhiyun 		goto free_arfs_fltr_list;
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 	for (i = 0; i < ICE_MAX_ARFS_LIST; i++)
525*4882a593Smuzhiyun 		INIT_HLIST_HEAD(&arfs_fltr_list[i]);
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 	spin_lock_init(&vsi->arfs_lock);
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun 	vsi->arfs_fltr_list = arfs_fltr_list;
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 	return;
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun free_arfs_fltr_list:
534*4882a593Smuzhiyun 	kfree(arfs_fltr_list);
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun /**
538*4882a593Smuzhiyun  * ice_clear_arfs - clear the aRFS hash table and any memory used for aRFS
539*4882a593Smuzhiyun  * @vsi: the VSI to be forwarded to
540*4882a593Smuzhiyun  */
ice_clear_arfs(struct ice_vsi * vsi)541*4882a593Smuzhiyun void ice_clear_arfs(struct ice_vsi *vsi)
542*4882a593Smuzhiyun {
543*4882a593Smuzhiyun 	struct device *dev;
544*4882a593Smuzhiyun 	unsigned int i;
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun 	if (!vsi || vsi->type != ICE_VSI_PF || !vsi->back ||
547*4882a593Smuzhiyun 	    !vsi->arfs_fltr_list)
548*4882a593Smuzhiyun 		return;
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 	dev = ice_pf_to_dev(vsi->back);
551*4882a593Smuzhiyun 	for (i = 0; i < ICE_MAX_ARFS_LIST; i++) {
552*4882a593Smuzhiyun 		struct ice_arfs_entry *r;
553*4882a593Smuzhiyun 		struct hlist_node *n;
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 		spin_lock_bh(&vsi->arfs_lock);
556*4882a593Smuzhiyun 		hlist_for_each_entry_safe(r, n, &vsi->arfs_fltr_list[i],
557*4882a593Smuzhiyun 					  list_entry) {
558*4882a593Smuzhiyun 			hlist_del(&r->list_entry);
559*4882a593Smuzhiyun 			devm_kfree(dev, r);
560*4882a593Smuzhiyun 		}
561*4882a593Smuzhiyun 		spin_unlock_bh(&vsi->arfs_lock);
562*4882a593Smuzhiyun 	}
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 	kfree(vsi->arfs_fltr_list);
565*4882a593Smuzhiyun 	vsi->arfs_fltr_list = NULL;
566*4882a593Smuzhiyun 	kfree(vsi->arfs_last_fltr_id);
567*4882a593Smuzhiyun 	vsi->arfs_last_fltr_id = NULL;
568*4882a593Smuzhiyun 	kfree(vsi->arfs_fltr_cntrs);
569*4882a593Smuzhiyun 	vsi->arfs_fltr_cntrs = NULL;
570*4882a593Smuzhiyun }
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun /**
573*4882a593Smuzhiyun  * ice_free_cpu_rx_rmap - free setup CPU reverse map
574*4882a593Smuzhiyun  * @vsi: the VSI to be forwarded to
575*4882a593Smuzhiyun  */
ice_free_cpu_rx_rmap(struct ice_vsi * vsi)576*4882a593Smuzhiyun void ice_free_cpu_rx_rmap(struct ice_vsi *vsi)
577*4882a593Smuzhiyun {
578*4882a593Smuzhiyun 	struct net_device *netdev;
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun 	if (!vsi || vsi->type != ICE_VSI_PF || !vsi->arfs_fltr_list)
581*4882a593Smuzhiyun 		return;
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun 	netdev = vsi->netdev;
584*4882a593Smuzhiyun 	if (!netdev || !netdev->rx_cpu_rmap ||
585*4882a593Smuzhiyun 	    netdev->reg_state != NETREG_REGISTERED)
586*4882a593Smuzhiyun 		return;
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun 	free_irq_cpu_rmap(netdev->rx_cpu_rmap);
589*4882a593Smuzhiyun 	netdev->rx_cpu_rmap = NULL;
590*4882a593Smuzhiyun }
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun /**
593*4882a593Smuzhiyun  * ice_set_cpu_rx_rmap - setup CPU reverse map for each queue
594*4882a593Smuzhiyun  * @vsi: the VSI to be forwarded to
595*4882a593Smuzhiyun  */
ice_set_cpu_rx_rmap(struct ice_vsi * vsi)596*4882a593Smuzhiyun int ice_set_cpu_rx_rmap(struct ice_vsi *vsi)
597*4882a593Smuzhiyun {
598*4882a593Smuzhiyun 	struct net_device *netdev;
599*4882a593Smuzhiyun 	struct ice_pf *pf;
600*4882a593Smuzhiyun 	int base_idx, i;
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun 	if (!vsi || vsi->type != ICE_VSI_PF)
603*4882a593Smuzhiyun 		return -EINVAL;
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun 	pf = vsi->back;
606*4882a593Smuzhiyun 	netdev = vsi->netdev;
607*4882a593Smuzhiyun 	if (!pf || !netdev || !vsi->num_q_vectors ||
608*4882a593Smuzhiyun 	    vsi->netdev->reg_state != NETREG_REGISTERED)
609*4882a593Smuzhiyun 		return -EINVAL;
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 	netdev_dbg(netdev, "Setup CPU RMAP: vsi type 0x%x, ifname %s, q_vectors %d\n",
612*4882a593Smuzhiyun 		   vsi->type, netdev->name, vsi->num_q_vectors);
613*4882a593Smuzhiyun 
614*4882a593Smuzhiyun 	netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(vsi->num_q_vectors);
615*4882a593Smuzhiyun 	if (unlikely(!netdev->rx_cpu_rmap))
616*4882a593Smuzhiyun 		return -EINVAL;
617*4882a593Smuzhiyun 
618*4882a593Smuzhiyun 	base_idx = vsi->base_vector;
619*4882a593Smuzhiyun 	for (i = 0; i < vsi->num_q_vectors; i++)
620*4882a593Smuzhiyun 		if (irq_cpu_rmap_add(netdev->rx_cpu_rmap,
621*4882a593Smuzhiyun 				     pf->msix_entries[base_idx + i].vector)) {
622*4882a593Smuzhiyun 			ice_free_cpu_rx_rmap(vsi);
623*4882a593Smuzhiyun 			return -EINVAL;
624*4882a593Smuzhiyun 		}
625*4882a593Smuzhiyun 
626*4882a593Smuzhiyun 	return 0;
627*4882a593Smuzhiyun }
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun /**
630*4882a593Smuzhiyun  * ice_remove_arfs - remove/clear all aRFS resources
631*4882a593Smuzhiyun  * @pf: device private structure
632*4882a593Smuzhiyun  */
ice_remove_arfs(struct ice_pf * pf)633*4882a593Smuzhiyun void ice_remove_arfs(struct ice_pf *pf)
634*4882a593Smuzhiyun {
635*4882a593Smuzhiyun 	struct ice_vsi *pf_vsi;
636*4882a593Smuzhiyun 
637*4882a593Smuzhiyun 	pf_vsi = ice_get_main_vsi(pf);
638*4882a593Smuzhiyun 	if (!pf_vsi)
639*4882a593Smuzhiyun 		return;
640*4882a593Smuzhiyun 
641*4882a593Smuzhiyun 	ice_free_cpu_rx_rmap(pf_vsi);
642*4882a593Smuzhiyun 	ice_clear_arfs(pf_vsi);
643*4882a593Smuzhiyun }
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun /**
646*4882a593Smuzhiyun  * ice_rebuild_arfs - remove/clear all aRFS resources and rebuild after reset
647*4882a593Smuzhiyun  * @pf: device private structure
648*4882a593Smuzhiyun  */
ice_rebuild_arfs(struct ice_pf * pf)649*4882a593Smuzhiyun void ice_rebuild_arfs(struct ice_pf *pf)
650*4882a593Smuzhiyun {
651*4882a593Smuzhiyun 	struct ice_vsi *pf_vsi;
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun 	pf_vsi = ice_get_main_vsi(pf);
654*4882a593Smuzhiyun 	if (!pf_vsi)
655*4882a593Smuzhiyun 		return;
656*4882a593Smuzhiyun 
657*4882a593Smuzhiyun 	ice_remove_arfs(pf);
658*4882a593Smuzhiyun 	if (ice_set_cpu_rx_rmap(pf_vsi)) {
659*4882a593Smuzhiyun 		dev_err(ice_pf_to_dev(pf), "Failed to rebuild aRFS\n");
660*4882a593Smuzhiyun 		return;
661*4882a593Smuzhiyun 	}
662*4882a593Smuzhiyun 	ice_init_arfs(pf_vsi);
663*4882a593Smuzhiyun }
664