1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun #include <linux/if.h>
3*4882a593Smuzhiyun #include <linux/if_ether.h>
4*4882a593Smuzhiyun #include <linux/if_link.h>
5*4882a593Smuzhiyun #include <linux/netdevice.h>
6*4882a593Smuzhiyun #include <linux/in.h>
7*4882a593Smuzhiyun #include <linux/types.h>
8*4882a593Smuzhiyun #include <linux/skbuff.h>
9*4882a593Smuzhiyun #include <net/flow_dissector.h>
10*4882a593Smuzhiyun #include "enic_res.h"
11*4882a593Smuzhiyun #include "enic_clsf.h"
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun /* enic_addfltr_5t - Add ipv4 5tuple filter
14*4882a593Smuzhiyun * @enic: enic struct of vnic
15*4882a593Smuzhiyun * @keys: flow_keys of ipv4 5tuple
16*4882a593Smuzhiyun * @rq: rq number to steer to
17*4882a593Smuzhiyun *
18*4882a593Smuzhiyun * This function returns filter_id(hardware_id) of the filter
19*4882a593Smuzhiyun * added. In case of error it returns a negative number.
20*4882a593Smuzhiyun */
enic_addfltr_5t(struct enic * enic,struct flow_keys * keys,u16 rq)21*4882a593Smuzhiyun int enic_addfltr_5t(struct enic *enic, struct flow_keys *keys, u16 rq)
22*4882a593Smuzhiyun {
23*4882a593Smuzhiyun int res;
24*4882a593Smuzhiyun struct filter data;
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun switch (keys->basic.ip_proto) {
27*4882a593Smuzhiyun case IPPROTO_TCP:
28*4882a593Smuzhiyun data.u.ipv4.protocol = PROTO_TCP;
29*4882a593Smuzhiyun break;
30*4882a593Smuzhiyun case IPPROTO_UDP:
31*4882a593Smuzhiyun data.u.ipv4.protocol = PROTO_UDP;
32*4882a593Smuzhiyun break;
33*4882a593Smuzhiyun default:
34*4882a593Smuzhiyun return -EPROTONOSUPPORT;
35*4882a593Smuzhiyun }
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun data.type = FILTER_IPV4_5TUPLE;
38*4882a593Smuzhiyun data.u.ipv4.src_addr = ntohl(keys->addrs.v4addrs.src);
39*4882a593Smuzhiyun data.u.ipv4.dst_addr = ntohl(keys->addrs.v4addrs.dst);
40*4882a593Smuzhiyun data.u.ipv4.src_port = ntohs(keys->ports.src);
41*4882a593Smuzhiyun data.u.ipv4.dst_port = ntohs(keys->ports.dst);
42*4882a593Smuzhiyun data.u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun spin_lock_bh(&enic->devcmd_lock);
45*4882a593Smuzhiyun res = vnic_dev_classifier(enic->vdev, CLSF_ADD, &rq, &data);
46*4882a593Smuzhiyun spin_unlock_bh(&enic->devcmd_lock);
47*4882a593Smuzhiyun res = (res == 0) ? rq : res;
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun return res;
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun /* enic_delfltr - Delete clsf filter
53*4882a593Smuzhiyun * @enic: enic struct of vnic
54*4882a593Smuzhiyun * @filter_id: filter_is(hardware_id) of filter to be deleted
55*4882a593Smuzhiyun *
56*4882a593Smuzhiyun * This function returns zero in case of success, negative number incase of
57*4882a593Smuzhiyun * error.
58*4882a593Smuzhiyun */
enic_delfltr(struct enic * enic,u16 filter_id)59*4882a593Smuzhiyun int enic_delfltr(struct enic *enic, u16 filter_id)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun int ret;
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun spin_lock_bh(&enic->devcmd_lock);
64*4882a593Smuzhiyun ret = vnic_dev_classifier(enic->vdev, CLSF_DEL, &filter_id, NULL);
65*4882a593Smuzhiyun spin_unlock_bh(&enic->devcmd_lock);
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun return ret;
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun /* enic_rfs_flw_tbl_init - initialize enic->rfs_h members
71*4882a593Smuzhiyun * @enic: enic data
72*4882a593Smuzhiyun */
enic_rfs_flw_tbl_init(struct enic * enic)73*4882a593Smuzhiyun void enic_rfs_flw_tbl_init(struct enic *enic)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun int i;
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun spin_lock_init(&enic->rfs_h.lock);
78*4882a593Smuzhiyun for (i = 0; i <= ENIC_RFS_FLW_MASK; i++)
79*4882a593Smuzhiyun INIT_HLIST_HEAD(&enic->rfs_h.ht_head[i]);
80*4882a593Smuzhiyun enic->rfs_h.max = enic->config.num_arfs;
81*4882a593Smuzhiyun enic->rfs_h.free = enic->rfs_h.max;
82*4882a593Smuzhiyun enic->rfs_h.toclean = 0;
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun
enic_rfs_flw_tbl_free(struct enic * enic)85*4882a593Smuzhiyun void enic_rfs_flw_tbl_free(struct enic *enic)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun int i;
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun enic_rfs_timer_stop(enic);
90*4882a593Smuzhiyun spin_lock_bh(&enic->rfs_h.lock);
91*4882a593Smuzhiyun for (i = 0; i < (1 << ENIC_RFS_FLW_BITSHIFT); i++) {
92*4882a593Smuzhiyun struct hlist_head *hhead;
93*4882a593Smuzhiyun struct hlist_node *tmp;
94*4882a593Smuzhiyun struct enic_rfs_fltr_node *n;
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun hhead = &enic->rfs_h.ht_head[i];
97*4882a593Smuzhiyun hlist_for_each_entry_safe(n, tmp, hhead, node) {
98*4882a593Smuzhiyun enic_delfltr(enic, n->fltr_id);
99*4882a593Smuzhiyun hlist_del(&n->node);
100*4882a593Smuzhiyun kfree(n);
101*4882a593Smuzhiyun enic->rfs_h.free++;
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun spin_unlock_bh(&enic->rfs_h.lock);
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun
htbl_fltr_search(struct enic * enic,u16 fltr_id)107*4882a593Smuzhiyun struct enic_rfs_fltr_node *htbl_fltr_search(struct enic *enic, u16 fltr_id)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun int i;
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun for (i = 0; i < (1 << ENIC_RFS_FLW_BITSHIFT); i++) {
112*4882a593Smuzhiyun struct hlist_head *hhead;
113*4882a593Smuzhiyun struct hlist_node *tmp;
114*4882a593Smuzhiyun struct enic_rfs_fltr_node *n;
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun hhead = &enic->rfs_h.ht_head[i];
117*4882a593Smuzhiyun hlist_for_each_entry_safe(n, tmp, hhead, node)
118*4882a593Smuzhiyun if (n->fltr_id == fltr_id)
119*4882a593Smuzhiyun return n;
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun return NULL;
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun #ifdef CONFIG_RFS_ACCEL
enic_flow_may_expire(struct timer_list * t)126*4882a593Smuzhiyun void enic_flow_may_expire(struct timer_list *t)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun struct enic *enic = from_timer(enic, t, rfs_h.rfs_may_expire);
129*4882a593Smuzhiyun bool res;
130*4882a593Smuzhiyun int j;
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun spin_lock_bh(&enic->rfs_h.lock);
133*4882a593Smuzhiyun for (j = 0; j < ENIC_CLSF_EXPIRE_COUNT; j++) {
134*4882a593Smuzhiyun struct hlist_head *hhead;
135*4882a593Smuzhiyun struct hlist_node *tmp;
136*4882a593Smuzhiyun struct enic_rfs_fltr_node *n;
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun hhead = &enic->rfs_h.ht_head[enic->rfs_h.toclean++];
139*4882a593Smuzhiyun hlist_for_each_entry_safe(n, tmp, hhead, node) {
140*4882a593Smuzhiyun res = rps_may_expire_flow(enic->netdev, n->rq_id,
141*4882a593Smuzhiyun n->flow_id, n->fltr_id);
142*4882a593Smuzhiyun if (res) {
143*4882a593Smuzhiyun res = enic_delfltr(enic, n->fltr_id);
144*4882a593Smuzhiyun if (unlikely(res))
145*4882a593Smuzhiyun continue;
146*4882a593Smuzhiyun hlist_del(&n->node);
147*4882a593Smuzhiyun kfree(n);
148*4882a593Smuzhiyun enic->rfs_h.free++;
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun spin_unlock_bh(&enic->rfs_h.lock);
153*4882a593Smuzhiyun mod_timer(&enic->rfs_h.rfs_may_expire, jiffies + HZ/4);
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun
htbl_key_search(struct hlist_head * h,struct flow_keys * k)156*4882a593Smuzhiyun static struct enic_rfs_fltr_node *htbl_key_search(struct hlist_head *h,
157*4882a593Smuzhiyun struct flow_keys *k)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun struct enic_rfs_fltr_node *tpos;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun hlist_for_each_entry(tpos, h, node)
162*4882a593Smuzhiyun if (tpos->keys.addrs.v4addrs.src == k->addrs.v4addrs.src &&
163*4882a593Smuzhiyun tpos->keys.addrs.v4addrs.dst == k->addrs.v4addrs.dst &&
164*4882a593Smuzhiyun tpos->keys.ports.ports == k->ports.ports &&
165*4882a593Smuzhiyun tpos->keys.basic.ip_proto == k->basic.ip_proto &&
166*4882a593Smuzhiyun tpos->keys.basic.n_proto == k->basic.n_proto)
167*4882a593Smuzhiyun return tpos;
168*4882a593Smuzhiyun return NULL;
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun
enic_rx_flow_steer(struct net_device * dev,const struct sk_buff * skb,u16 rxq_index,u32 flow_id)171*4882a593Smuzhiyun int enic_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
172*4882a593Smuzhiyun u16 rxq_index, u32 flow_id)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun struct flow_keys keys;
175*4882a593Smuzhiyun struct enic_rfs_fltr_node *n;
176*4882a593Smuzhiyun struct enic *enic;
177*4882a593Smuzhiyun u16 tbl_idx;
178*4882a593Smuzhiyun int res, i;
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun enic = netdev_priv(dev);
181*4882a593Smuzhiyun res = skb_flow_dissect_flow_keys(skb, &keys, 0);
182*4882a593Smuzhiyun if (!res || keys.basic.n_proto != htons(ETH_P_IP) ||
183*4882a593Smuzhiyun (keys.basic.ip_proto != IPPROTO_TCP &&
184*4882a593Smuzhiyun keys.basic.ip_proto != IPPROTO_UDP))
185*4882a593Smuzhiyun return -EPROTONOSUPPORT;
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun tbl_idx = skb_get_hash_raw(skb) & ENIC_RFS_FLW_MASK;
188*4882a593Smuzhiyun spin_lock_bh(&enic->rfs_h.lock);
189*4882a593Smuzhiyun n = htbl_key_search(&enic->rfs_h.ht_head[tbl_idx], &keys);
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun if (n) { /* entry already present */
192*4882a593Smuzhiyun if (rxq_index == n->rq_id) {
193*4882a593Smuzhiyun res = -EEXIST;
194*4882a593Smuzhiyun goto ret_unlock;
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun /* desired rq changed for the flow, we need to delete
198*4882a593Smuzhiyun * old fltr and add new one
199*4882a593Smuzhiyun *
200*4882a593Smuzhiyun * The moment we delete the fltr, the upcoming pkts
201*4882a593Smuzhiyun * are put it default rq based on rss. When we add
202*4882a593Smuzhiyun * new filter, upcoming pkts are put in desired queue.
203*4882a593Smuzhiyun * This could cause ooo pkts.
204*4882a593Smuzhiyun *
205*4882a593Smuzhiyun * Lets 1st try adding new fltr and then del old one.
206*4882a593Smuzhiyun */
207*4882a593Smuzhiyun i = --enic->rfs_h.free;
208*4882a593Smuzhiyun /* clsf tbl is full, we have to del old fltr first*/
209*4882a593Smuzhiyun if (unlikely(i < 0)) {
210*4882a593Smuzhiyun enic->rfs_h.free++;
211*4882a593Smuzhiyun res = enic_delfltr(enic, n->fltr_id);
212*4882a593Smuzhiyun if (unlikely(res < 0))
213*4882a593Smuzhiyun goto ret_unlock;
214*4882a593Smuzhiyun res = enic_addfltr_5t(enic, &keys, rxq_index);
215*4882a593Smuzhiyun if (res < 0) {
216*4882a593Smuzhiyun hlist_del(&n->node);
217*4882a593Smuzhiyun enic->rfs_h.free++;
218*4882a593Smuzhiyun goto ret_unlock;
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun /* add new fltr 1st then del old fltr */
221*4882a593Smuzhiyun } else {
222*4882a593Smuzhiyun int ret;
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun res = enic_addfltr_5t(enic, &keys, rxq_index);
225*4882a593Smuzhiyun if (res < 0) {
226*4882a593Smuzhiyun enic->rfs_h.free++;
227*4882a593Smuzhiyun goto ret_unlock;
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun ret = enic_delfltr(enic, n->fltr_id);
230*4882a593Smuzhiyun /* deleting old fltr failed. Add old fltr to list.
231*4882a593Smuzhiyun * enic_flow_may_expire() will try to delete it later.
232*4882a593Smuzhiyun */
233*4882a593Smuzhiyun if (unlikely(ret < 0)) {
234*4882a593Smuzhiyun struct enic_rfs_fltr_node *d;
235*4882a593Smuzhiyun struct hlist_head *head;
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun head = &enic->rfs_h.ht_head[tbl_idx];
238*4882a593Smuzhiyun d = kmalloc(sizeof(*d), GFP_ATOMIC);
239*4882a593Smuzhiyun if (d) {
240*4882a593Smuzhiyun d->fltr_id = n->fltr_id;
241*4882a593Smuzhiyun INIT_HLIST_NODE(&d->node);
242*4882a593Smuzhiyun hlist_add_head(&d->node, head);
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun } else {
245*4882a593Smuzhiyun enic->rfs_h.free++;
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun n->rq_id = rxq_index;
249*4882a593Smuzhiyun n->fltr_id = res;
250*4882a593Smuzhiyun n->flow_id = flow_id;
251*4882a593Smuzhiyun /* entry not present */
252*4882a593Smuzhiyun } else {
253*4882a593Smuzhiyun i = --enic->rfs_h.free;
254*4882a593Smuzhiyun if (i <= 0) {
255*4882a593Smuzhiyun enic->rfs_h.free++;
256*4882a593Smuzhiyun res = -EBUSY;
257*4882a593Smuzhiyun goto ret_unlock;
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun n = kmalloc(sizeof(*n), GFP_ATOMIC);
261*4882a593Smuzhiyun if (!n) {
262*4882a593Smuzhiyun res = -ENOMEM;
263*4882a593Smuzhiyun enic->rfs_h.free++;
264*4882a593Smuzhiyun goto ret_unlock;
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun res = enic_addfltr_5t(enic, &keys, rxq_index);
268*4882a593Smuzhiyun if (res < 0) {
269*4882a593Smuzhiyun kfree(n);
270*4882a593Smuzhiyun enic->rfs_h.free++;
271*4882a593Smuzhiyun goto ret_unlock;
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun n->rq_id = rxq_index;
274*4882a593Smuzhiyun n->fltr_id = res;
275*4882a593Smuzhiyun n->flow_id = flow_id;
276*4882a593Smuzhiyun n->keys = keys;
277*4882a593Smuzhiyun INIT_HLIST_NODE(&n->node);
278*4882a593Smuzhiyun hlist_add_head(&n->node, &enic->rfs_h.ht_head[tbl_idx]);
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun ret_unlock:
282*4882a593Smuzhiyun spin_unlock_bh(&enic->rfs_h.lock);
283*4882a593Smuzhiyun return res;
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun #endif /* CONFIG_RFS_ACCEL */
287