xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/netronome/nfp/flower/metadata.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2*4882a593Smuzhiyun /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3*4882a593Smuzhiyun 
4*4882a593Smuzhiyun #include <linux/hash.h>
5*4882a593Smuzhiyun #include <linux/hashtable.h>
6*4882a593Smuzhiyun #include <linux/jhash.h>
7*4882a593Smuzhiyun #include <linux/math64.h>
8*4882a593Smuzhiyun #include <linux/vmalloc.h>
9*4882a593Smuzhiyun #include <net/pkt_cls.h>
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include "cmsg.h"
12*4882a593Smuzhiyun #include "main.h"
13*4882a593Smuzhiyun #include "../nfp_app.h"
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun struct nfp_mask_id_table {
16*4882a593Smuzhiyun 	struct hlist_node link;
17*4882a593Smuzhiyun 	u32 hash_key;
18*4882a593Smuzhiyun 	u32 ref_cnt;
19*4882a593Smuzhiyun 	u8 mask_id;
20*4882a593Smuzhiyun };
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun struct nfp_fl_flow_table_cmp_arg {
23*4882a593Smuzhiyun 	struct net_device *netdev;
24*4882a593Smuzhiyun 	unsigned long cookie;
25*4882a593Smuzhiyun };
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun struct nfp_fl_stats_ctx_to_flow {
28*4882a593Smuzhiyun 	struct rhash_head ht_node;
29*4882a593Smuzhiyun 	u32 stats_cxt;
30*4882a593Smuzhiyun 	struct nfp_fl_payload *flow;
31*4882a593Smuzhiyun };
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun static const struct rhashtable_params stats_ctx_table_params = {
34*4882a593Smuzhiyun 	.key_offset	= offsetof(struct nfp_fl_stats_ctx_to_flow, stats_cxt),
35*4882a593Smuzhiyun 	.head_offset	= offsetof(struct nfp_fl_stats_ctx_to_flow, ht_node),
36*4882a593Smuzhiyun 	.key_len	= sizeof(u32),
37*4882a593Smuzhiyun };
38*4882a593Smuzhiyun 
nfp_release_stats_entry(struct nfp_app * app,u32 stats_context_id)39*4882a593Smuzhiyun static int nfp_release_stats_entry(struct nfp_app *app, u32 stats_context_id)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun 	struct nfp_flower_priv *priv = app->priv;
42*4882a593Smuzhiyun 	struct circ_buf *ring;
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun 	ring = &priv->stats_ids.free_list;
45*4882a593Smuzhiyun 	/* Check if buffer is full. */
46*4882a593Smuzhiyun 	if (!CIRC_SPACE(ring->head, ring->tail,
47*4882a593Smuzhiyun 			priv->stats_ring_size * NFP_FL_STATS_ELEM_RS -
48*4882a593Smuzhiyun 			NFP_FL_STATS_ELEM_RS + 1))
49*4882a593Smuzhiyun 		return -ENOBUFS;
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	memcpy(&ring->buf[ring->head], &stats_context_id, NFP_FL_STATS_ELEM_RS);
52*4882a593Smuzhiyun 	ring->head = (ring->head + NFP_FL_STATS_ELEM_RS) %
53*4882a593Smuzhiyun 		     (priv->stats_ring_size * NFP_FL_STATS_ELEM_RS);
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	return 0;
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun 
nfp_get_stats_entry(struct nfp_app * app,u32 * stats_context_id)58*4882a593Smuzhiyun static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun 	struct nfp_flower_priv *priv = app->priv;
61*4882a593Smuzhiyun 	u32 freed_stats_id, temp_stats_id;
62*4882a593Smuzhiyun 	struct circ_buf *ring;
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 	ring = &priv->stats_ids.free_list;
65*4882a593Smuzhiyun 	freed_stats_id = priv->stats_ring_size;
66*4882a593Smuzhiyun 	/* Check for unallocated entries first. */
67*4882a593Smuzhiyun 	if (priv->stats_ids.init_unalloc > 0) {
68*4882a593Smuzhiyun 		*stats_context_id =
69*4882a593Smuzhiyun 			FIELD_PREP(NFP_FL_STAT_ID_STAT,
70*4882a593Smuzhiyun 				   priv->stats_ids.init_unalloc - 1) |
71*4882a593Smuzhiyun 			FIELD_PREP(NFP_FL_STAT_ID_MU_NUM,
72*4882a593Smuzhiyun 				   priv->active_mem_unit);
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 		if (++priv->active_mem_unit == priv->total_mem_units) {
75*4882a593Smuzhiyun 			priv->stats_ids.init_unalloc--;
76*4882a593Smuzhiyun 			priv->active_mem_unit = 0;
77*4882a593Smuzhiyun 		}
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 		return 0;
80*4882a593Smuzhiyun 	}
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	/* Check if buffer is empty. */
83*4882a593Smuzhiyun 	if (ring->head == ring->tail) {
84*4882a593Smuzhiyun 		*stats_context_id = freed_stats_id;
85*4882a593Smuzhiyun 		return -ENOENT;
86*4882a593Smuzhiyun 	}
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	memcpy(&temp_stats_id, &ring->buf[ring->tail], NFP_FL_STATS_ELEM_RS);
89*4882a593Smuzhiyun 	*stats_context_id = temp_stats_id;
90*4882a593Smuzhiyun 	memcpy(&ring->buf[ring->tail], &freed_stats_id, NFP_FL_STATS_ELEM_RS);
91*4882a593Smuzhiyun 	ring->tail = (ring->tail + NFP_FL_STATS_ELEM_RS) %
92*4882a593Smuzhiyun 		     (priv->stats_ring_size * NFP_FL_STATS_ELEM_RS);
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	return 0;
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun /* Must be called with either RTNL or rcu_read_lock */
98*4882a593Smuzhiyun struct nfp_fl_payload *
nfp_flower_search_fl_table(struct nfp_app * app,unsigned long tc_flower_cookie,struct net_device * netdev)99*4882a593Smuzhiyun nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie,
100*4882a593Smuzhiyun 			   struct net_device *netdev)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun 	struct nfp_fl_flow_table_cmp_arg flower_cmp_arg;
103*4882a593Smuzhiyun 	struct nfp_flower_priv *priv = app->priv;
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	flower_cmp_arg.netdev = netdev;
106*4882a593Smuzhiyun 	flower_cmp_arg.cookie = tc_flower_cookie;
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	return rhashtable_lookup_fast(&priv->flow_table, &flower_cmp_arg,
109*4882a593Smuzhiyun 				      nfp_flower_table_params);
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun 
nfp_flower_rx_flow_stats(struct nfp_app * app,struct sk_buff * skb)112*4882a593Smuzhiyun void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun 	unsigned int msg_len = nfp_flower_cmsg_get_data_len(skb);
115*4882a593Smuzhiyun 	struct nfp_flower_priv *priv = app->priv;
116*4882a593Smuzhiyun 	struct nfp_fl_stats_frame *stats;
117*4882a593Smuzhiyun 	unsigned char *msg;
118*4882a593Smuzhiyun 	u32 ctx_id;
119*4882a593Smuzhiyun 	int i;
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	msg = nfp_flower_cmsg_get_data(skb);
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	spin_lock(&priv->stats_lock);
124*4882a593Smuzhiyun 	for (i = 0; i < msg_len / sizeof(*stats); i++) {
125*4882a593Smuzhiyun 		stats = (struct nfp_fl_stats_frame *)msg + i;
126*4882a593Smuzhiyun 		ctx_id = be32_to_cpu(stats->stats_con_id);
127*4882a593Smuzhiyun 		priv->stats[ctx_id].pkts += be32_to_cpu(stats->pkt_count);
128*4882a593Smuzhiyun 		priv->stats[ctx_id].bytes += be64_to_cpu(stats->byte_count);
129*4882a593Smuzhiyun 		priv->stats[ctx_id].used = jiffies;
130*4882a593Smuzhiyun 	}
131*4882a593Smuzhiyun 	spin_unlock(&priv->stats_lock);
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun 
nfp_release_mask_id(struct nfp_app * app,u8 mask_id)134*4882a593Smuzhiyun static int nfp_release_mask_id(struct nfp_app *app, u8 mask_id)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun 	struct nfp_flower_priv *priv = app->priv;
137*4882a593Smuzhiyun 	struct circ_buf *ring;
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	ring = &priv->mask_ids.mask_id_free_list;
140*4882a593Smuzhiyun 	/* Checking if buffer is full. */
141*4882a593Smuzhiyun 	if (CIRC_SPACE(ring->head, ring->tail, NFP_FLOWER_MASK_ENTRY_RS) == 0)
142*4882a593Smuzhiyun 		return -ENOBUFS;
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	memcpy(&ring->buf[ring->head], &mask_id, NFP_FLOWER_MASK_ELEMENT_RS);
145*4882a593Smuzhiyun 	ring->head = (ring->head + NFP_FLOWER_MASK_ELEMENT_RS) %
146*4882a593Smuzhiyun 		     (NFP_FLOWER_MASK_ENTRY_RS * NFP_FLOWER_MASK_ELEMENT_RS);
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	priv->mask_ids.last_used[mask_id] = ktime_get();
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	return 0;
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun 
nfp_mask_alloc(struct nfp_app * app,u8 * mask_id)153*4882a593Smuzhiyun static int nfp_mask_alloc(struct nfp_app *app, u8 *mask_id)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun 	struct nfp_flower_priv *priv = app->priv;
156*4882a593Smuzhiyun 	ktime_t reuse_timeout;
157*4882a593Smuzhiyun 	struct circ_buf *ring;
158*4882a593Smuzhiyun 	u8 temp_id, freed_id;
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	ring = &priv->mask_ids.mask_id_free_list;
161*4882a593Smuzhiyun 	freed_id = NFP_FLOWER_MASK_ENTRY_RS - 1;
162*4882a593Smuzhiyun 	/* Checking for unallocated entries first. */
163*4882a593Smuzhiyun 	if (priv->mask_ids.init_unallocated > 0) {
164*4882a593Smuzhiyun 		*mask_id = priv->mask_ids.init_unallocated;
165*4882a593Smuzhiyun 		priv->mask_ids.init_unallocated--;
166*4882a593Smuzhiyun 		return 0;
167*4882a593Smuzhiyun 	}
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	/* Checking if buffer is empty. */
170*4882a593Smuzhiyun 	if (ring->head == ring->tail)
171*4882a593Smuzhiyun 		goto err_not_found;
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	memcpy(&temp_id, &ring->buf[ring->tail], NFP_FLOWER_MASK_ELEMENT_RS);
174*4882a593Smuzhiyun 	*mask_id = temp_id;
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	reuse_timeout = ktime_add_ns(priv->mask_ids.last_used[*mask_id],
177*4882a593Smuzhiyun 				     NFP_FL_MASK_REUSE_TIME_NS);
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	if (ktime_before(ktime_get(), reuse_timeout))
180*4882a593Smuzhiyun 		goto err_not_found;
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	memcpy(&ring->buf[ring->tail], &freed_id, NFP_FLOWER_MASK_ELEMENT_RS);
183*4882a593Smuzhiyun 	ring->tail = (ring->tail + NFP_FLOWER_MASK_ELEMENT_RS) %
184*4882a593Smuzhiyun 		     (NFP_FLOWER_MASK_ENTRY_RS * NFP_FLOWER_MASK_ELEMENT_RS);
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	return 0;
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun err_not_found:
189*4882a593Smuzhiyun 	*mask_id = freed_id;
190*4882a593Smuzhiyun 	return -ENOENT;
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun static int
nfp_add_mask_table(struct nfp_app * app,char * mask_data,u32 mask_len)194*4882a593Smuzhiyun nfp_add_mask_table(struct nfp_app *app, char *mask_data, u32 mask_len)
195*4882a593Smuzhiyun {
196*4882a593Smuzhiyun 	struct nfp_flower_priv *priv = app->priv;
197*4882a593Smuzhiyun 	struct nfp_mask_id_table *mask_entry;
198*4882a593Smuzhiyun 	unsigned long hash_key;
199*4882a593Smuzhiyun 	u8 mask_id;
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	if (nfp_mask_alloc(app, &mask_id))
202*4882a593Smuzhiyun 		return -ENOENT;
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	mask_entry = kmalloc(sizeof(*mask_entry), GFP_KERNEL);
205*4882a593Smuzhiyun 	if (!mask_entry) {
206*4882a593Smuzhiyun 		nfp_release_mask_id(app, mask_id);
207*4882a593Smuzhiyun 		return -ENOMEM;
208*4882a593Smuzhiyun 	}
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	INIT_HLIST_NODE(&mask_entry->link);
211*4882a593Smuzhiyun 	mask_entry->mask_id = mask_id;
212*4882a593Smuzhiyun 	hash_key = jhash(mask_data, mask_len, priv->mask_id_seed);
213*4882a593Smuzhiyun 	mask_entry->hash_key = hash_key;
214*4882a593Smuzhiyun 	mask_entry->ref_cnt = 1;
215*4882a593Smuzhiyun 	hash_add(priv->mask_table, &mask_entry->link, hash_key);
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	return mask_id;
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun static struct nfp_mask_id_table *
nfp_search_mask_table(struct nfp_app * app,char * mask_data,u32 mask_len)221*4882a593Smuzhiyun nfp_search_mask_table(struct nfp_app *app, char *mask_data, u32 mask_len)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun 	struct nfp_flower_priv *priv = app->priv;
224*4882a593Smuzhiyun 	struct nfp_mask_id_table *mask_entry;
225*4882a593Smuzhiyun 	unsigned long hash_key;
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	hash_key = jhash(mask_data, mask_len, priv->mask_id_seed);
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	hash_for_each_possible(priv->mask_table, mask_entry, link, hash_key)
230*4882a593Smuzhiyun 		if (mask_entry->hash_key == hash_key)
231*4882a593Smuzhiyun 			return mask_entry;
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	return NULL;
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun static int
nfp_find_in_mask_table(struct nfp_app * app,char * mask_data,u32 mask_len)237*4882a593Smuzhiyun nfp_find_in_mask_table(struct nfp_app *app, char *mask_data, u32 mask_len)
238*4882a593Smuzhiyun {
239*4882a593Smuzhiyun 	struct nfp_mask_id_table *mask_entry;
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	mask_entry = nfp_search_mask_table(app, mask_data, mask_len);
242*4882a593Smuzhiyun 	if (!mask_entry)
243*4882a593Smuzhiyun 		return -ENOENT;
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	mask_entry->ref_cnt++;
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 	/* Casting u8 to int for later use. */
248*4882a593Smuzhiyun 	return mask_entry->mask_id;
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun static bool
nfp_check_mask_add(struct nfp_app * app,char * mask_data,u32 mask_len,u8 * meta_flags,u8 * mask_id)252*4882a593Smuzhiyun nfp_check_mask_add(struct nfp_app *app, char *mask_data, u32 mask_len,
253*4882a593Smuzhiyun 		   u8 *meta_flags, u8 *mask_id)
254*4882a593Smuzhiyun {
255*4882a593Smuzhiyun 	int id;
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	id = nfp_find_in_mask_table(app, mask_data, mask_len);
258*4882a593Smuzhiyun 	if (id < 0) {
259*4882a593Smuzhiyun 		id = nfp_add_mask_table(app, mask_data, mask_len);
260*4882a593Smuzhiyun 		if (id < 0)
261*4882a593Smuzhiyun 			return false;
262*4882a593Smuzhiyun 		*meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK;
263*4882a593Smuzhiyun 	}
264*4882a593Smuzhiyun 	*mask_id = id;
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	return true;
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun static bool
nfp_check_mask_remove(struct nfp_app * app,char * mask_data,u32 mask_len,u8 * meta_flags,u8 * mask_id)270*4882a593Smuzhiyun nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len,
271*4882a593Smuzhiyun 		      u8 *meta_flags, u8 *mask_id)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun 	struct nfp_mask_id_table *mask_entry;
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	mask_entry = nfp_search_mask_table(app, mask_data, mask_len);
276*4882a593Smuzhiyun 	if (!mask_entry)
277*4882a593Smuzhiyun 		return false;
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	*mask_id = mask_entry->mask_id;
280*4882a593Smuzhiyun 	mask_entry->ref_cnt--;
281*4882a593Smuzhiyun 	if (!mask_entry->ref_cnt) {
282*4882a593Smuzhiyun 		hash_del(&mask_entry->link);
283*4882a593Smuzhiyun 		nfp_release_mask_id(app, *mask_id);
284*4882a593Smuzhiyun 		kfree(mask_entry);
285*4882a593Smuzhiyun 		if (meta_flags)
286*4882a593Smuzhiyun 			*meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK;
287*4882a593Smuzhiyun 	}
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	return true;
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun 
nfp_compile_flow_metadata(struct nfp_app * app,struct flow_cls_offload * flow,struct nfp_fl_payload * nfp_flow,struct net_device * netdev,struct netlink_ext_ack * extack)292*4882a593Smuzhiyun int nfp_compile_flow_metadata(struct nfp_app *app,
293*4882a593Smuzhiyun 			      struct flow_cls_offload *flow,
294*4882a593Smuzhiyun 			      struct nfp_fl_payload *nfp_flow,
295*4882a593Smuzhiyun 			      struct net_device *netdev,
296*4882a593Smuzhiyun 			      struct netlink_ext_ack *extack)
297*4882a593Smuzhiyun {
298*4882a593Smuzhiyun 	struct nfp_fl_stats_ctx_to_flow *ctx_entry;
299*4882a593Smuzhiyun 	struct nfp_flower_priv *priv = app->priv;
300*4882a593Smuzhiyun 	struct nfp_fl_payload *check_entry;
301*4882a593Smuzhiyun 	u8 new_mask_id;
302*4882a593Smuzhiyun 	u32 stats_cxt;
303*4882a593Smuzhiyun 	int err;
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	err = nfp_get_stats_entry(app, &stats_cxt);
306*4882a593Smuzhiyun 	if (err) {
307*4882a593Smuzhiyun 		NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot allocate new stats context");
308*4882a593Smuzhiyun 		return err;
309*4882a593Smuzhiyun 	}
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	nfp_flow->meta.host_ctx_id = cpu_to_be32(stats_cxt);
312*4882a593Smuzhiyun 	nfp_flow->meta.host_cookie = cpu_to_be64(flow->cookie);
313*4882a593Smuzhiyun 	nfp_flow->ingress_dev = netdev;
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 	ctx_entry = kzalloc(sizeof(*ctx_entry), GFP_KERNEL);
316*4882a593Smuzhiyun 	if (!ctx_entry) {
317*4882a593Smuzhiyun 		err = -ENOMEM;
318*4882a593Smuzhiyun 		goto err_release_stats;
319*4882a593Smuzhiyun 	}
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	ctx_entry->stats_cxt = stats_cxt;
322*4882a593Smuzhiyun 	ctx_entry->flow = nfp_flow;
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	if (rhashtable_insert_fast(&priv->stats_ctx_table, &ctx_entry->ht_node,
325*4882a593Smuzhiyun 				   stats_ctx_table_params)) {
326*4882a593Smuzhiyun 		err = -ENOMEM;
327*4882a593Smuzhiyun 		goto err_free_ctx_entry;
328*4882a593Smuzhiyun 	}
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	/* Do net allocate a mask-id for pre_tun_rules. These flows are used to
331*4882a593Smuzhiyun 	 * configure the pre_tun table and are never actually send to the
332*4882a593Smuzhiyun 	 * firmware as an add-flow message. This causes the mask-id allocation
333*4882a593Smuzhiyun 	 * on the firmware to get out of sync if allocated here.
334*4882a593Smuzhiyun 	 */
335*4882a593Smuzhiyun 	new_mask_id = 0;
336*4882a593Smuzhiyun 	if (!nfp_flow->pre_tun_rule.dev &&
337*4882a593Smuzhiyun 	    !nfp_check_mask_add(app, nfp_flow->mask_data,
338*4882a593Smuzhiyun 				nfp_flow->meta.mask_len,
339*4882a593Smuzhiyun 				&nfp_flow->meta.flags, &new_mask_id)) {
340*4882a593Smuzhiyun 		NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot allocate a new mask id");
341*4882a593Smuzhiyun 		if (nfp_release_stats_entry(app, stats_cxt)) {
342*4882a593Smuzhiyun 			NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot release stats context");
343*4882a593Smuzhiyun 			err = -EINVAL;
344*4882a593Smuzhiyun 			goto err_remove_rhash;
345*4882a593Smuzhiyun 		}
346*4882a593Smuzhiyun 		err = -ENOENT;
347*4882a593Smuzhiyun 		goto err_remove_rhash;
348*4882a593Smuzhiyun 	}
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version);
351*4882a593Smuzhiyun 	priv->flower_version++;
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	/* Update flow payload with mask ids. */
354*4882a593Smuzhiyun 	nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id;
355*4882a593Smuzhiyun 	priv->stats[stats_cxt].pkts = 0;
356*4882a593Smuzhiyun 	priv->stats[stats_cxt].bytes = 0;
357*4882a593Smuzhiyun 	priv->stats[stats_cxt].used = jiffies;
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev);
360*4882a593Smuzhiyun 	if (check_entry) {
361*4882a593Smuzhiyun 		NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot offload duplicate flow entry");
362*4882a593Smuzhiyun 		if (nfp_release_stats_entry(app, stats_cxt)) {
363*4882a593Smuzhiyun 			NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot release stats context");
364*4882a593Smuzhiyun 			err = -EINVAL;
365*4882a593Smuzhiyun 			goto err_remove_mask;
366*4882a593Smuzhiyun 		}
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 		if (!nfp_flow->pre_tun_rule.dev &&
369*4882a593Smuzhiyun 		    !nfp_check_mask_remove(app, nfp_flow->mask_data,
370*4882a593Smuzhiyun 					   nfp_flow->meta.mask_len,
371*4882a593Smuzhiyun 					   NULL, &new_mask_id)) {
372*4882a593Smuzhiyun 			NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot release mask id");
373*4882a593Smuzhiyun 			err = -EINVAL;
374*4882a593Smuzhiyun 			goto err_remove_mask;
375*4882a593Smuzhiyun 		}
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 		err = -EEXIST;
378*4882a593Smuzhiyun 		goto err_remove_mask;
379*4882a593Smuzhiyun 	}
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 	return 0;
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun err_remove_mask:
384*4882a593Smuzhiyun 	if (!nfp_flow->pre_tun_rule.dev)
385*4882a593Smuzhiyun 		nfp_check_mask_remove(app, nfp_flow->mask_data,
386*4882a593Smuzhiyun 				      nfp_flow->meta.mask_len,
387*4882a593Smuzhiyun 				      NULL, &new_mask_id);
388*4882a593Smuzhiyun err_remove_rhash:
389*4882a593Smuzhiyun 	WARN_ON_ONCE(rhashtable_remove_fast(&priv->stats_ctx_table,
390*4882a593Smuzhiyun 					    &ctx_entry->ht_node,
391*4882a593Smuzhiyun 					    stats_ctx_table_params));
392*4882a593Smuzhiyun err_free_ctx_entry:
393*4882a593Smuzhiyun 	kfree(ctx_entry);
394*4882a593Smuzhiyun err_release_stats:
395*4882a593Smuzhiyun 	nfp_release_stats_entry(app, stats_cxt);
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	return err;
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun 
__nfp_modify_flow_metadata(struct nfp_flower_priv * priv,struct nfp_fl_payload * nfp_flow)400*4882a593Smuzhiyun void __nfp_modify_flow_metadata(struct nfp_flower_priv *priv,
401*4882a593Smuzhiyun 				struct nfp_fl_payload *nfp_flow)
402*4882a593Smuzhiyun {
403*4882a593Smuzhiyun 	nfp_flow->meta.flags &= ~NFP_FL_META_FLAG_MANAGE_MASK;
404*4882a593Smuzhiyun 	nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version);
405*4882a593Smuzhiyun 	priv->flower_version++;
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun 
nfp_modify_flow_metadata(struct nfp_app * app,struct nfp_fl_payload * nfp_flow)408*4882a593Smuzhiyun int nfp_modify_flow_metadata(struct nfp_app *app,
409*4882a593Smuzhiyun 			     struct nfp_fl_payload *nfp_flow)
410*4882a593Smuzhiyun {
411*4882a593Smuzhiyun 	struct nfp_fl_stats_ctx_to_flow *ctx_entry;
412*4882a593Smuzhiyun 	struct nfp_flower_priv *priv = app->priv;
413*4882a593Smuzhiyun 	u8 new_mask_id = 0;
414*4882a593Smuzhiyun 	u32 temp_ctx_id;
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	__nfp_modify_flow_metadata(priv, nfp_flow);
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 	if (!nfp_flow->pre_tun_rule.dev)
419*4882a593Smuzhiyun 		nfp_check_mask_remove(app, nfp_flow->mask_data,
420*4882a593Smuzhiyun 				      nfp_flow->meta.mask_len, &nfp_flow->meta.flags,
421*4882a593Smuzhiyun 				      &new_mask_id);
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun 	/* Update flow payload with mask ids. */
424*4882a593Smuzhiyun 	nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id;
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun 	/* Release the stats ctx id and ctx to flow table entry. */
427*4882a593Smuzhiyun 	temp_ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 	ctx_entry = rhashtable_lookup_fast(&priv->stats_ctx_table, &temp_ctx_id,
430*4882a593Smuzhiyun 					   stats_ctx_table_params);
431*4882a593Smuzhiyun 	if (!ctx_entry)
432*4882a593Smuzhiyun 		return -ENOENT;
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 	WARN_ON_ONCE(rhashtable_remove_fast(&priv->stats_ctx_table,
435*4882a593Smuzhiyun 					    &ctx_entry->ht_node,
436*4882a593Smuzhiyun 					    stats_ctx_table_params));
437*4882a593Smuzhiyun 	kfree(ctx_entry);
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	return nfp_release_stats_entry(app, temp_ctx_id);
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun struct nfp_fl_payload *
nfp_flower_get_fl_payload_from_ctx(struct nfp_app * app,u32 ctx_id)443*4882a593Smuzhiyun nfp_flower_get_fl_payload_from_ctx(struct nfp_app *app, u32 ctx_id)
444*4882a593Smuzhiyun {
445*4882a593Smuzhiyun 	struct nfp_fl_stats_ctx_to_flow *ctx_entry;
446*4882a593Smuzhiyun 	struct nfp_flower_priv *priv = app->priv;
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 	ctx_entry = rhashtable_lookup_fast(&priv->stats_ctx_table, &ctx_id,
449*4882a593Smuzhiyun 					   stats_ctx_table_params);
450*4882a593Smuzhiyun 	if (!ctx_entry)
451*4882a593Smuzhiyun 		return NULL;
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	return ctx_entry->flow;
454*4882a593Smuzhiyun }
455*4882a593Smuzhiyun 
nfp_fl_obj_cmpfn(struct rhashtable_compare_arg * arg,const void * obj)456*4882a593Smuzhiyun static int nfp_fl_obj_cmpfn(struct rhashtable_compare_arg *arg,
457*4882a593Smuzhiyun 			    const void *obj)
458*4882a593Smuzhiyun {
459*4882a593Smuzhiyun 	const struct nfp_fl_flow_table_cmp_arg *cmp_arg = arg->key;
460*4882a593Smuzhiyun 	const struct nfp_fl_payload *flow_entry = obj;
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 	if (flow_entry->ingress_dev == cmp_arg->netdev)
463*4882a593Smuzhiyun 		return flow_entry->tc_flower_cookie != cmp_arg->cookie;
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun 	return 1;
466*4882a593Smuzhiyun }
467*4882a593Smuzhiyun 
nfp_fl_obj_hashfn(const void * data,u32 len,u32 seed)468*4882a593Smuzhiyun static u32 nfp_fl_obj_hashfn(const void *data, u32 len, u32 seed)
469*4882a593Smuzhiyun {
470*4882a593Smuzhiyun 	const struct nfp_fl_payload *flower_entry = data;
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 	return jhash2((u32 *)&flower_entry->tc_flower_cookie,
473*4882a593Smuzhiyun 		      sizeof(flower_entry->tc_flower_cookie) / sizeof(u32),
474*4882a593Smuzhiyun 		      seed);
475*4882a593Smuzhiyun }
476*4882a593Smuzhiyun 
nfp_fl_key_hashfn(const void * data,u32 len,u32 seed)477*4882a593Smuzhiyun static u32 nfp_fl_key_hashfn(const void *data, u32 len, u32 seed)
478*4882a593Smuzhiyun {
479*4882a593Smuzhiyun 	const struct nfp_fl_flow_table_cmp_arg *cmp_arg = data;
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 	return jhash2((u32 *)&cmp_arg->cookie,
482*4882a593Smuzhiyun 		      sizeof(cmp_arg->cookie) / sizeof(u32), seed);
483*4882a593Smuzhiyun }
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun const struct rhashtable_params nfp_flower_table_params = {
486*4882a593Smuzhiyun 	.head_offset		= offsetof(struct nfp_fl_payload, fl_node),
487*4882a593Smuzhiyun 	.hashfn			= nfp_fl_key_hashfn,
488*4882a593Smuzhiyun 	.obj_cmpfn		= nfp_fl_obj_cmpfn,
489*4882a593Smuzhiyun 	.obj_hashfn		= nfp_fl_obj_hashfn,
490*4882a593Smuzhiyun 	.automatic_shrinking	= true,
491*4882a593Smuzhiyun };
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun const struct rhashtable_params merge_table_params = {
494*4882a593Smuzhiyun 	.key_offset	= offsetof(struct nfp_merge_info, parent_ctx),
495*4882a593Smuzhiyun 	.head_offset	= offsetof(struct nfp_merge_info, ht_node),
496*4882a593Smuzhiyun 	.key_len	= sizeof(u64),
497*4882a593Smuzhiyun };
498*4882a593Smuzhiyun 
nfp_flower_metadata_init(struct nfp_app * app,u64 host_ctx_count,unsigned int host_num_mems)499*4882a593Smuzhiyun int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
500*4882a593Smuzhiyun 			     unsigned int host_num_mems)
501*4882a593Smuzhiyun {
502*4882a593Smuzhiyun 	struct nfp_flower_priv *priv = app->priv;
503*4882a593Smuzhiyun 	int err, stats_size;
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 	hash_init(priv->mask_table);
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun 	err = rhashtable_init(&priv->flow_table, &nfp_flower_table_params);
508*4882a593Smuzhiyun 	if (err)
509*4882a593Smuzhiyun 		return err;
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 	err = rhashtable_init(&priv->stats_ctx_table, &stats_ctx_table_params);
512*4882a593Smuzhiyun 	if (err)
513*4882a593Smuzhiyun 		goto err_free_flow_table;
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun 	err = rhashtable_init(&priv->merge_table, &merge_table_params);
516*4882a593Smuzhiyun 	if (err)
517*4882a593Smuzhiyun 		goto err_free_stats_ctx_table;
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun 	get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed));
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 	/* Init ring buffer and unallocated mask_ids. */
522*4882a593Smuzhiyun 	priv->mask_ids.mask_id_free_list.buf =
523*4882a593Smuzhiyun 		kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
524*4882a593Smuzhiyun 			      NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL);
525*4882a593Smuzhiyun 	if (!priv->mask_ids.mask_id_free_list.buf)
526*4882a593Smuzhiyun 		goto err_free_merge_table;
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 	priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1;
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun 	/* Init timestamps for mask id*/
531*4882a593Smuzhiyun 	priv->mask_ids.last_used =
532*4882a593Smuzhiyun 		kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
533*4882a593Smuzhiyun 			      sizeof(*priv->mask_ids.last_used), GFP_KERNEL);
534*4882a593Smuzhiyun 	if (!priv->mask_ids.last_used)
535*4882a593Smuzhiyun 		goto err_free_mask_id;
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 	/* Init ring buffer and unallocated stats_ids. */
538*4882a593Smuzhiyun 	priv->stats_ids.free_list.buf =
539*4882a593Smuzhiyun 		vmalloc(array_size(NFP_FL_STATS_ELEM_RS,
540*4882a593Smuzhiyun 				   priv->stats_ring_size));
541*4882a593Smuzhiyun 	if (!priv->stats_ids.free_list.buf)
542*4882a593Smuzhiyun 		goto err_free_last_used;
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 	priv->stats_ids.init_unalloc = div_u64(host_ctx_count, host_num_mems);
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun 	stats_size = FIELD_PREP(NFP_FL_STAT_ID_STAT, host_ctx_count) |
547*4882a593Smuzhiyun 		     FIELD_PREP(NFP_FL_STAT_ID_MU_NUM, host_num_mems - 1);
548*4882a593Smuzhiyun 	priv->stats = kvmalloc_array(stats_size, sizeof(struct nfp_fl_stats),
549*4882a593Smuzhiyun 				     GFP_KERNEL);
550*4882a593Smuzhiyun 	if (!priv->stats)
551*4882a593Smuzhiyun 		goto err_free_ring_buf;
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	spin_lock_init(&priv->stats_lock);
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 	return 0;
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun err_free_ring_buf:
558*4882a593Smuzhiyun 	vfree(priv->stats_ids.free_list.buf);
559*4882a593Smuzhiyun err_free_last_used:
560*4882a593Smuzhiyun 	kfree(priv->mask_ids.last_used);
561*4882a593Smuzhiyun err_free_mask_id:
562*4882a593Smuzhiyun 	kfree(priv->mask_ids.mask_id_free_list.buf);
563*4882a593Smuzhiyun err_free_merge_table:
564*4882a593Smuzhiyun 	rhashtable_destroy(&priv->merge_table);
565*4882a593Smuzhiyun err_free_stats_ctx_table:
566*4882a593Smuzhiyun 	rhashtable_destroy(&priv->stats_ctx_table);
567*4882a593Smuzhiyun err_free_flow_table:
568*4882a593Smuzhiyun 	rhashtable_destroy(&priv->flow_table);
569*4882a593Smuzhiyun 	return -ENOMEM;
570*4882a593Smuzhiyun }
571*4882a593Smuzhiyun 
nfp_flower_metadata_cleanup(struct nfp_app * app)572*4882a593Smuzhiyun void nfp_flower_metadata_cleanup(struct nfp_app *app)
573*4882a593Smuzhiyun {
574*4882a593Smuzhiyun 	struct nfp_flower_priv *priv = app->priv;
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 	if (!priv)
577*4882a593Smuzhiyun 		return;
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun 	rhashtable_free_and_destroy(&priv->flow_table,
580*4882a593Smuzhiyun 				    nfp_check_rhashtable_empty, NULL);
581*4882a593Smuzhiyun 	rhashtable_free_and_destroy(&priv->stats_ctx_table,
582*4882a593Smuzhiyun 				    nfp_check_rhashtable_empty, NULL);
583*4882a593Smuzhiyun 	rhashtable_free_and_destroy(&priv->merge_table,
584*4882a593Smuzhiyun 				    nfp_check_rhashtable_empty, NULL);
585*4882a593Smuzhiyun 	kvfree(priv->stats);
586*4882a593Smuzhiyun 	kfree(priv->mask_ids.mask_id_free_list.buf);
587*4882a593Smuzhiyun 	kfree(priv->mask_ids.last_used);
588*4882a593Smuzhiyun 	vfree(priv->stats_ids.free_list.buf);
589*4882a593Smuzhiyun }
590