xref: /OK3568_Linux_fs/kernel/net/sched/cls_tcindex.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * net/sched/cls_tcindex.c	Packet classifier for skb->tc_index
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Written 1998,1999 by Werner Almesberger, EPFL ICA
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <linux/module.h>
9*4882a593Smuzhiyun #include <linux/types.h>
10*4882a593Smuzhiyun #include <linux/kernel.h>
11*4882a593Smuzhiyun #include <linux/skbuff.h>
12*4882a593Smuzhiyun #include <linux/errno.h>
13*4882a593Smuzhiyun #include <linux/slab.h>
14*4882a593Smuzhiyun #include <linux/refcount.h>
15*4882a593Smuzhiyun #include <net/act_api.h>
16*4882a593Smuzhiyun #include <net/netlink.h>
17*4882a593Smuzhiyun #include <net/pkt_cls.h>
18*4882a593Smuzhiyun #include <net/sch_generic.h>
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun /*
21*4882a593Smuzhiyun  * Passing parameters to the root seems to be done more awkwardly than really
22*4882a593Smuzhiyun  * necessary. At least, u32 doesn't seem to use such dirty hacks. To be
23*4882a593Smuzhiyun  * verified. FIXME.
24*4882a593Smuzhiyun  */
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun #define PERFECT_HASH_THRESHOLD	64	/* use perfect hash if not bigger */
27*4882a593Smuzhiyun #define DEFAULT_HASH_SIZE	64	/* optimized for diffserv */
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun struct tcindex_data;
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun struct tcindex_filter_result {
33*4882a593Smuzhiyun 	struct tcf_exts		exts;
34*4882a593Smuzhiyun 	struct tcf_result	res;
35*4882a593Smuzhiyun 	struct tcindex_data	*p;
36*4882a593Smuzhiyun 	struct rcu_work		rwork;
37*4882a593Smuzhiyun };
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun struct tcindex_filter {
40*4882a593Smuzhiyun 	u16 key;
41*4882a593Smuzhiyun 	struct tcindex_filter_result result;
42*4882a593Smuzhiyun 	struct tcindex_filter __rcu *next;
43*4882a593Smuzhiyun 	struct rcu_work rwork;
44*4882a593Smuzhiyun };
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun struct tcindex_data {
48*4882a593Smuzhiyun 	struct tcindex_filter_result *perfect; /* perfect hash; NULL if none */
49*4882a593Smuzhiyun 	struct tcindex_filter __rcu **h; /* imperfect hash; */
50*4882a593Smuzhiyun 	struct tcf_proto *tp;
51*4882a593Smuzhiyun 	u16 mask;		/* AND key with mask */
52*4882a593Smuzhiyun 	u32 shift;		/* shift ANDed key to the right */
53*4882a593Smuzhiyun 	u32 hash;		/* hash table size; 0 if undefined */
54*4882a593Smuzhiyun 	u32 alloc_hash;		/* allocated size */
55*4882a593Smuzhiyun 	u32 fall_through;	/* 0: only classify if explicit match */
56*4882a593Smuzhiyun 	refcount_t refcnt;	/* a temporary refcnt for perfect hash */
57*4882a593Smuzhiyun 	struct rcu_work rwork;
58*4882a593Smuzhiyun };
59*4882a593Smuzhiyun 
tcindex_filter_is_set(struct tcindex_filter_result * r)60*4882a593Smuzhiyun static inline int tcindex_filter_is_set(struct tcindex_filter_result *r)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun 	return tcf_exts_has_actions(&r->exts) || r->res.classid;
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun 
tcindex_data_get(struct tcindex_data * p)65*4882a593Smuzhiyun static void tcindex_data_get(struct tcindex_data *p)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun 	refcount_inc(&p->refcnt);
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun 
tcindex_data_put(struct tcindex_data * p)70*4882a593Smuzhiyun static void tcindex_data_put(struct tcindex_data *p)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun 	if (refcount_dec_and_test(&p->refcnt)) {
73*4882a593Smuzhiyun 		kfree(p->perfect);
74*4882a593Smuzhiyun 		kfree(p->h);
75*4882a593Smuzhiyun 		kfree(p);
76*4882a593Smuzhiyun 	}
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun 
tcindex_lookup(struct tcindex_data * p,u16 key)79*4882a593Smuzhiyun static struct tcindex_filter_result *tcindex_lookup(struct tcindex_data *p,
80*4882a593Smuzhiyun 						    u16 key)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun 	if (p->perfect) {
83*4882a593Smuzhiyun 		struct tcindex_filter_result *f = p->perfect + key;
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 		return tcindex_filter_is_set(f) ? f : NULL;
86*4882a593Smuzhiyun 	} else if (p->h) {
87*4882a593Smuzhiyun 		struct tcindex_filter __rcu **fp;
88*4882a593Smuzhiyun 		struct tcindex_filter *f;
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 		fp = &p->h[key % p->hash];
91*4882a593Smuzhiyun 		for (f = rcu_dereference_bh_rtnl(*fp);
92*4882a593Smuzhiyun 		     f;
93*4882a593Smuzhiyun 		     fp = &f->next, f = rcu_dereference_bh_rtnl(*fp))
94*4882a593Smuzhiyun 			if (f->key == key)
95*4882a593Smuzhiyun 				return &f->result;
96*4882a593Smuzhiyun 	}
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	return NULL;
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 
tcindex_classify(struct sk_buff * skb,const struct tcf_proto * tp,struct tcf_result * res)102*4882a593Smuzhiyun static int tcindex_classify(struct sk_buff *skb, const struct tcf_proto *tp,
103*4882a593Smuzhiyun 			    struct tcf_result *res)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun 	struct tcindex_data *p = rcu_dereference_bh(tp->root);
106*4882a593Smuzhiyun 	struct tcindex_filter_result *f;
107*4882a593Smuzhiyun 	int key = (skb->tc_index & p->mask) >> p->shift;
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	pr_debug("tcindex_classify(skb %p,tp %p,res %p),p %p\n",
110*4882a593Smuzhiyun 		 skb, tp, res, p);
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	f = tcindex_lookup(p, key);
113*4882a593Smuzhiyun 	if (!f) {
114*4882a593Smuzhiyun 		struct Qdisc *q = tcf_block_q(tp->chain->block);
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 		if (!p->fall_through)
117*4882a593Smuzhiyun 			return -1;
118*4882a593Smuzhiyun 		res->classid = TC_H_MAKE(TC_H_MAJ(q->handle), key);
119*4882a593Smuzhiyun 		res->class = 0;
120*4882a593Smuzhiyun 		pr_debug("alg 0x%x\n", res->classid);
121*4882a593Smuzhiyun 		return 0;
122*4882a593Smuzhiyun 	}
123*4882a593Smuzhiyun 	*res = f->res;
124*4882a593Smuzhiyun 	pr_debug("map 0x%x\n", res->classid);
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	return tcf_exts_exec(skb, &f->exts, res);
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 
tcindex_get(struct tcf_proto * tp,u32 handle)130*4882a593Smuzhiyun static void *tcindex_get(struct tcf_proto *tp, u32 handle)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun 	struct tcindex_data *p = rtnl_dereference(tp->root);
133*4882a593Smuzhiyun 	struct tcindex_filter_result *r;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	pr_debug("tcindex_get(tp %p,handle 0x%08x)\n", tp, handle);
136*4882a593Smuzhiyun 	if (p->perfect && handle >= p->alloc_hash)
137*4882a593Smuzhiyun 		return NULL;
138*4882a593Smuzhiyun 	r = tcindex_lookup(p, handle);
139*4882a593Smuzhiyun 	return r && tcindex_filter_is_set(r) ? r : NULL;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun 
tcindex_init(struct tcf_proto * tp)142*4882a593Smuzhiyun static int tcindex_init(struct tcf_proto *tp)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun 	struct tcindex_data *p;
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	pr_debug("tcindex_init(tp %p)\n", tp);
147*4882a593Smuzhiyun 	p = kzalloc(sizeof(struct tcindex_data), GFP_KERNEL);
148*4882a593Smuzhiyun 	if (!p)
149*4882a593Smuzhiyun 		return -ENOMEM;
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	p->mask = 0xffff;
152*4882a593Smuzhiyun 	p->hash = DEFAULT_HASH_SIZE;
153*4882a593Smuzhiyun 	p->fall_through = 1;
154*4882a593Smuzhiyun 	refcount_set(&p->refcnt, 1); /* Paired with tcindex_destroy_work() */
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	rcu_assign_pointer(tp->root, p);
157*4882a593Smuzhiyun 	return 0;
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun 
__tcindex_destroy_rexts(struct tcindex_filter_result * r)160*4882a593Smuzhiyun static void __tcindex_destroy_rexts(struct tcindex_filter_result *r)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun 	tcf_exts_destroy(&r->exts);
163*4882a593Smuzhiyun 	tcf_exts_put_net(&r->exts);
164*4882a593Smuzhiyun 	tcindex_data_put(r->p);
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun 
tcindex_destroy_rexts_work(struct work_struct * work)167*4882a593Smuzhiyun static void tcindex_destroy_rexts_work(struct work_struct *work)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun 	struct tcindex_filter_result *r;
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	r = container_of(to_rcu_work(work),
172*4882a593Smuzhiyun 			 struct tcindex_filter_result,
173*4882a593Smuzhiyun 			 rwork);
174*4882a593Smuzhiyun 	rtnl_lock();
175*4882a593Smuzhiyun 	__tcindex_destroy_rexts(r);
176*4882a593Smuzhiyun 	rtnl_unlock();
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun 
__tcindex_destroy_fexts(struct tcindex_filter * f)179*4882a593Smuzhiyun static void __tcindex_destroy_fexts(struct tcindex_filter *f)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun 	tcf_exts_destroy(&f->result.exts);
182*4882a593Smuzhiyun 	tcf_exts_put_net(&f->result.exts);
183*4882a593Smuzhiyun 	kfree(f);
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun 
tcindex_destroy_fexts_work(struct work_struct * work)186*4882a593Smuzhiyun static void tcindex_destroy_fexts_work(struct work_struct *work)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun 	struct tcindex_filter *f = container_of(to_rcu_work(work),
189*4882a593Smuzhiyun 						struct tcindex_filter,
190*4882a593Smuzhiyun 						rwork);
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	rtnl_lock();
193*4882a593Smuzhiyun 	__tcindex_destroy_fexts(f);
194*4882a593Smuzhiyun 	rtnl_unlock();
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun 
tcindex_delete(struct tcf_proto * tp,void * arg,bool * last,bool rtnl_held,struct netlink_ext_ack * extack)197*4882a593Smuzhiyun static int tcindex_delete(struct tcf_proto *tp, void *arg, bool *last,
198*4882a593Smuzhiyun 			  bool rtnl_held, struct netlink_ext_ack *extack)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun 	struct tcindex_data *p = rtnl_dereference(tp->root);
201*4882a593Smuzhiyun 	struct tcindex_filter_result *r = arg;
202*4882a593Smuzhiyun 	struct tcindex_filter __rcu **walk;
203*4882a593Smuzhiyun 	struct tcindex_filter *f = NULL;
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	pr_debug("tcindex_delete(tp %p,arg %p),p %p\n", tp, arg, p);
206*4882a593Smuzhiyun 	if (p->perfect) {
207*4882a593Smuzhiyun 		if (!r->res.class)
208*4882a593Smuzhiyun 			return -ENOENT;
209*4882a593Smuzhiyun 	} else {
210*4882a593Smuzhiyun 		int i;
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 		for (i = 0; i < p->hash; i++) {
213*4882a593Smuzhiyun 			walk = p->h + i;
214*4882a593Smuzhiyun 			for (f = rtnl_dereference(*walk); f;
215*4882a593Smuzhiyun 			     walk = &f->next, f = rtnl_dereference(*walk)) {
216*4882a593Smuzhiyun 				if (&f->result == r)
217*4882a593Smuzhiyun 					goto found;
218*4882a593Smuzhiyun 			}
219*4882a593Smuzhiyun 		}
220*4882a593Smuzhiyun 		return -ENOENT;
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun found:
223*4882a593Smuzhiyun 		rcu_assign_pointer(*walk, rtnl_dereference(f->next));
224*4882a593Smuzhiyun 	}
225*4882a593Smuzhiyun 	tcf_unbind_filter(tp, &r->res);
226*4882a593Smuzhiyun 	/* all classifiers are required to call tcf_exts_destroy() after rcu
227*4882a593Smuzhiyun 	 * grace period, since converted-to-rcu actions are relying on that
228*4882a593Smuzhiyun 	 * in cleanup() callback
229*4882a593Smuzhiyun 	 */
230*4882a593Smuzhiyun 	if (f) {
231*4882a593Smuzhiyun 		if (tcf_exts_get_net(&f->result.exts))
232*4882a593Smuzhiyun 			tcf_queue_work(&f->rwork, tcindex_destroy_fexts_work);
233*4882a593Smuzhiyun 		else
234*4882a593Smuzhiyun 			__tcindex_destroy_fexts(f);
235*4882a593Smuzhiyun 	} else {
236*4882a593Smuzhiyun 		tcindex_data_get(p);
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 		if (tcf_exts_get_net(&r->exts))
239*4882a593Smuzhiyun 			tcf_queue_work(&r->rwork, tcindex_destroy_rexts_work);
240*4882a593Smuzhiyun 		else
241*4882a593Smuzhiyun 			__tcindex_destroy_rexts(r);
242*4882a593Smuzhiyun 	}
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	*last = false;
245*4882a593Smuzhiyun 	return 0;
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun 
tcindex_destroy_work(struct work_struct * work)248*4882a593Smuzhiyun static void tcindex_destroy_work(struct work_struct *work)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun 	struct tcindex_data *p = container_of(to_rcu_work(work),
251*4882a593Smuzhiyun 					      struct tcindex_data,
252*4882a593Smuzhiyun 					      rwork);
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	tcindex_data_put(p);
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun static inline int
valid_perfect_hash(struct tcindex_data * p)258*4882a593Smuzhiyun valid_perfect_hash(struct tcindex_data *p)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun 	return  p->hash > (p->mask >> p->shift);
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = {
264*4882a593Smuzhiyun 	[TCA_TCINDEX_HASH]		= { .type = NLA_U32 },
265*4882a593Smuzhiyun 	[TCA_TCINDEX_MASK]		= { .type = NLA_U16 },
266*4882a593Smuzhiyun 	[TCA_TCINDEX_SHIFT]		= { .type = NLA_U32 },
267*4882a593Smuzhiyun 	[TCA_TCINDEX_FALL_THROUGH]	= { .type = NLA_U32 },
268*4882a593Smuzhiyun 	[TCA_TCINDEX_CLASSID]		= { .type = NLA_U32 },
269*4882a593Smuzhiyun };
270*4882a593Smuzhiyun 
tcindex_filter_result_init(struct tcindex_filter_result * r,struct tcindex_data * p,struct net * net)271*4882a593Smuzhiyun static int tcindex_filter_result_init(struct tcindex_filter_result *r,
272*4882a593Smuzhiyun 				      struct tcindex_data *p,
273*4882a593Smuzhiyun 				      struct net *net)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun 	memset(r, 0, sizeof(*r));
276*4882a593Smuzhiyun 	r->p = p;
277*4882a593Smuzhiyun 	return tcf_exts_init(&r->exts, net, TCA_TCINDEX_ACT,
278*4882a593Smuzhiyun 			     TCA_TCINDEX_POLICE);
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun static void tcindex_free_perfect_hash(struct tcindex_data *cp);
282*4882a593Smuzhiyun 
tcindex_partial_destroy_work(struct work_struct * work)283*4882a593Smuzhiyun static void tcindex_partial_destroy_work(struct work_struct *work)
284*4882a593Smuzhiyun {
285*4882a593Smuzhiyun 	struct tcindex_data *p = container_of(to_rcu_work(work),
286*4882a593Smuzhiyun 					      struct tcindex_data,
287*4882a593Smuzhiyun 					      rwork);
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	rtnl_lock();
290*4882a593Smuzhiyun 	if (p->perfect)
291*4882a593Smuzhiyun 		tcindex_free_perfect_hash(p);
292*4882a593Smuzhiyun 	kfree(p);
293*4882a593Smuzhiyun 	rtnl_unlock();
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun 
tcindex_free_perfect_hash(struct tcindex_data * cp)296*4882a593Smuzhiyun static void tcindex_free_perfect_hash(struct tcindex_data *cp)
297*4882a593Smuzhiyun {
298*4882a593Smuzhiyun 	int i;
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	for (i = 0; i < cp->hash; i++)
301*4882a593Smuzhiyun 		tcf_exts_destroy(&cp->perfect[i].exts);
302*4882a593Smuzhiyun 	kfree(cp->perfect);
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun 
tcindex_alloc_perfect_hash(struct net * net,struct tcindex_data * cp)305*4882a593Smuzhiyun static int tcindex_alloc_perfect_hash(struct net *net, struct tcindex_data *cp)
306*4882a593Smuzhiyun {
307*4882a593Smuzhiyun 	int i, err = 0;
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	cp->perfect = kcalloc(cp->hash, sizeof(struct tcindex_filter_result),
310*4882a593Smuzhiyun 			      GFP_KERNEL | __GFP_NOWARN);
311*4882a593Smuzhiyun 	if (!cp->perfect)
312*4882a593Smuzhiyun 		return -ENOMEM;
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	for (i = 0; i < cp->hash; i++) {
315*4882a593Smuzhiyun 		err = tcf_exts_init(&cp->perfect[i].exts, net,
316*4882a593Smuzhiyun 				    TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
317*4882a593Smuzhiyun 		if (err < 0)
318*4882a593Smuzhiyun 			goto errout;
319*4882a593Smuzhiyun 		cp->perfect[i].p = cp;
320*4882a593Smuzhiyun 	}
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	return 0;
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun errout:
325*4882a593Smuzhiyun 	tcindex_free_perfect_hash(cp);
326*4882a593Smuzhiyun 	return err;
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun static int
tcindex_set_parms(struct net * net,struct tcf_proto * tp,unsigned long base,u32 handle,struct tcindex_data * p,struct tcindex_filter_result * r,struct nlattr ** tb,struct nlattr * est,bool ovr,struct netlink_ext_ack * extack)330*4882a593Smuzhiyun tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
331*4882a593Smuzhiyun 		  u32 handle, struct tcindex_data *p,
332*4882a593Smuzhiyun 		  struct tcindex_filter_result *r, struct nlattr **tb,
333*4882a593Smuzhiyun 		  struct nlattr *est, bool ovr, struct netlink_ext_ack *extack)
334*4882a593Smuzhiyun {
335*4882a593Smuzhiyun 	struct tcindex_filter_result new_filter_result, *old_r = r;
336*4882a593Smuzhiyun 	struct tcindex_data *cp = NULL, *oldp;
337*4882a593Smuzhiyun 	struct tcindex_filter *f = NULL; /* make gcc behave */
338*4882a593Smuzhiyun 	struct tcf_result cr = {};
339*4882a593Smuzhiyun 	int err, balloc = 0;
340*4882a593Smuzhiyun 	struct tcf_exts e;
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	err = tcf_exts_init(&e, net, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
343*4882a593Smuzhiyun 	if (err < 0)
344*4882a593Smuzhiyun 		return err;
345*4882a593Smuzhiyun 	err = tcf_exts_validate(net, tp, tb, est, &e, ovr, true, extack);
346*4882a593Smuzhiyun 	if (err < 0)
347*4882a593Smuzhiyun 		goto errout;
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 	err = -ENOMEM;
350*4882a593Smuzhiyun 	/* tcindex_data attributes must look atomic to classifier/lookup so
351*4882a593Smuzhiyun 	 * allocate new tcindex data and RCU assign it onto root. Keeping
352*4882a593Smuzhiyun 	 * perfect hash and hash pointers from old data.
353*4882a593Smuzhiyun 	 */
354*4882a593Smuzhiyun 	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
355*4882a593Smuzhiyun 	if (!cp)
356*4882a593Smuzhiyun 		goto errout;
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	cp->mask = p->mask;
359*4882a593Smuzhiyun 	cp->shift = p->shift;
360*4882a593Smuzhiyun 	cp->hash = p->hash;
361*4882a593Smuzhiyun 	cp->alloc_hash = p->alloc_hash;
362*4882a593Smuzhiyun 	cp->fall_through = p->fall_through;
363*4882a593Smuzhiyun 	cp->tp = tp;
364*4882a593Smuzhiyun 	refcount_set(&cp->refcnt, 1); /* Paired with tcindex_destroy_work() */
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	if (tb[TCA_TCINDEX_HASH])
367*4882a593Smuzhiyun 		cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 	if (tb[TCA_TCINDEX_MASK])
370*4882a593Smuzhiyun 		cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]);
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	if (tb[TCA_TCINDEX_SHIFT]) {
373*4882a593Smuzhiyun 		cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]);
374*4882a593Smuzhiyun 		if (cp->shift > 16) {
375*4882a593Smuzhiyun 			err = -EINVAL;
376*4882a593Smuzhiyun 			goto errout;
377*4882a593Smuzhiyun 		}
378*4882a593Smuzhiyun 	}
379*4882a593Smuzhiyun 	if (!cp->hash) {
380*4882a593Smuzhiyun 		/* Hash not specified, use perfect hash if the upper limit
381*4882a593Smuzhiyun 		 * of the hashing index is below the threshold.
382*4882a593Smuzhiyun 		 */
383*4882a593Smuzhiyun 		if ((cp->mask >> cp->shift) < PERFECT_HASH_THRESHOLD)
384*4882a593Smuzhiyun 			cp->hash = (cp->mask >> cp->shift) + 1;
385*4882a593Smuzhiyun 		else
386*4882a593Smuzhiyun 			cp->hash = DEFAULT_HASH_SIZE;
387*4882a593Smuzhiyun 	}
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	if (p->perfect) {
390*4882a593Smuzhiyun 		int i;
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 		if (tcindex_alloc_perfect_hash(net, cp) < 0)
393*4882a593Smuzhiyun 			goto errout;
394*4882a593Smuzhiyun 		cp->alloc_hash = cp->hash;
395*4882a593Smuzhiyun 		for (i = 0; i < min(cp->hash, p->hash); i++)
396*4882a593Smuzhiyun 			cp->perfect[i].res = p->perfect[i].res;
397*4882a593Smuzhiyun 		balloc = 1;
398*4882a593Smuzhiyun 	}
399*4882a593Smuzhiyun 	cp->h = p->h;
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	err = tcindex_filter_result_init(&new_filter_result, cp, net);
402*4882a593Smuzhiyun 	if (err < 0)
403*4882a593Smuzhiyun 		goto errout_alloc;
404*4882a593Smuzhiyun 	if (old_r)
405*4882a593Smuzhiyun 		cr = r->res;
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 	err = -EBUSY;
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 	/* Hash already allocated, make sure that we still meet the
410*4882a593Smuzhiyun 	 * requirements for the allocated hash.
411*4882a593Smuzhiyun 	 */
412*4882a593Smuzhiyun 	if (cp->perfect) {
413*4882a593Smuzhiyun 		if (!valid_perfect_hash(cp) ||
414*4882a593Smuzhiyun 		    cp->hash > cp->alloc_hash)
415*4882a593Smuzhiyun 			goto errout_alloc;
416*4882a593Smuzhiyun 	} else if (cp->h && cp->hash != cp->alloc_hash) {
417*4882a593Smuzhiyun 		goto errout_alloc;
418*4882a593Smuzhiyun 	}
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun 	err = -EINVAL;
421*4882a593Smuzhiyun 	if (tb[TCA_TCINDEX_FALL_THROUGH])
422*4882a593Smuzhiyun 		cp->fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]);
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 	if (!cp->perfect && !cp->h)
425*4882a593Smuzhiyun 		cp->alloc_hash = cp->hash;
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 	/* Note: this could be as restrictive as if (handle & ~(mask >> shift))
428*4882a593Smuzhiyun 	 * but then, we'd fail handles that may become valid after some future
429*4882a593Smuzhiyun 	 * mask change. While this is extremely unlikely to ever matter,
430*4882a593Smuzhiyun 	 * the check below is safer (and also more backwards-compatible).
431*4882a593Smuzhiyun 	 */
432*4882a593Smuzhiyun 	if (cp->perfect || valid_perfect_hash(cp))
433*4882a593Smuzhiyun 		if (handle >= cp->alloc_hash)
434*4882a593Smuzhiyun 			goto errout_alloc;
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 	err = -ENOMEM;
438*4882a593Smuzhiyun 	if (!cp->perfect && !cp->h) {
439*4882a593Smuzhiyun 		if (valid_perfect_hash(cp)) {
440*4882a593Smuzhiyun 			if (tcindex_alloc_perfect_hash(net, cp) < 0)
441*4882a593Smuzhiyun 				goto errout_alloc;
442*4882a593Smuzhiyun 			balloc = 1;
443*4882a593Smuzhiyun 		} else {
444*4882a593Smuzhiyun 			struct tcindex_filter __rcu **hash;
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 			hash = kcalloc(cp->hash,
447*4882a593Smuzhiyun 				       sizeof(struct tcindex_filter *),
448*4882a593Smuzhiyun 				       GFP_KERNEL);
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 			if (!hash)
451*4882a593Smuzhiyun 				goto errout_alloc;
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 			cp->h = hash;
454*4882a593Smuzhiyun 			balloc = 2;
455*4882a593Smuzhiyun 		}
456*4882a593Smuzhiyun 	}
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	if (cp->perfect)
459*4882a593Smuzhiyun 		r = cp->perfect + handle;
460*4882a593Smuzhiyun 	else
461*4882a593Smuzhiyun 		r = tcindex_lookup(cp, handle) ? : &new_filter_result;
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 	if (r == &new_filter_result) {
464*4882a593Smuzhiyun 		f = kzalloc(sizeof(*f), GFP_KERNEL);
465*4882a593Smuzhiyun 		if (!f)
466*4882a593Smuzhiyun 			goto errout_alloc;
467*4882a593Smuzhiyun 		f->key = handle;
468*4882a593Smuzhiyun 		f->next = NULL;
469*4882a593Smuzhiyun 		err = tcindex_filter_result_init(&f->result, cp, net);
470*4882a593Smuzhiyun 		if (err < 0) {
471*4882a593Smuzhiyun 			kfree(f);
472*4882a593Smuzhiyun 			goto errout_alloc;
473*4882a593Smuzhiyun 		}
474*4882a593Smuzhiyun 	}
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 	if (tb[TCA_TCINDEX_CLASSID]) {
477*4882a593Smuzhiyun 		cr.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]);
478*4882a593Smuzhiyun 		tcf_bind_filter(tp, &cr, base);
479*4882a593Smuzhiyun 	}
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 	if (old_r && old_r != r) {
482*4882a593Smuzhiyun 		err = tcindex_filter_result_init(old_r, cp, net);
483*4882a593Smuzhiyun 		if (err < 0) {
484*4882a593Smuzhiyun 			kfree(f);
485*4882a593Smuzhiyun 			goto errout_alloc;
486*4882a593Smuzhiyun 		}
487*4882a593Smuzhiyun 	}
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun 	oldp = p;
490*4882a593Smuzhiyun 	r->res = cr;
491*4882a593Smuzhiyun 	tcf_exts_change(&r->exts, &e);
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun 	rcu_assign_pointer(tp->root, cp);
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	if (r == &new_filter_result) {
496*4882a593Smuzhiyun 		struct tcindex_filter *nfp;
497*4882a593Smuzhiyun 		struct tcindex_filter __rcu **fp;
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun 		f->result.res = r->res;
500*4882a593Smuzhiyun 		tcf_exts_change(&f->result.exts, &r->exts);
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun 		fp = cp->h + (handle % cp->hash);
503*4882a593Smuzhiyun 		for (nfp = rtnl_dereference(*fp);
504*4882a593Smuzhiyun 		     nfp;
505*4882a593Smuzhiyun 		     fp = &nfp->next, nfp = rtnl_dereference(*fp))
506*4882a593Smuzhiyun 				; /* nothing */
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun 		rcu_assign_pointer(*fp, f);
509*4882a593Smuzhiyun 	} else {
510*4882a593Smuzhiyun 		tcf_exts_destroy(&new_filter_result.exts);
511*4882a593Smuzhiyun 	}
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 	if (oldp)
514*4882a593Smuzhiyun 		tcf_queue_work(&oldp->rwork, tcindex_partial_destroy_work);
515*4882a593Smuzhiyun 	return 0;
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun errout_alloc:
518*4882a593Smuzhiyun 	if (balloc == 1)
519*4882a593Smuzhiyun 		tcindex_free_perfect_hash(cp);
520*4882a593Smuzhiyun 	else if (balloc == 2)
521*4882a593Smuzhiyun 		kfree(cp->h);
522*4882a593Smuzhiyun 	tcf_exts_destroy(&new_filter_result.exts);
523*4882a593Smuzhiyun errout:
524*4882a593Smuzhiyun 	kfree(cp);
525*4882a593Smuzhiyun 	tcf_exts_destroy(&e);
526*4882a593Smuzhiyun 	return err;
527*4882a593Smuzhiyun }
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun static int
tcindex_change(struct net * net,struct sk_buff * in_skb,struct tcf_proto * tp,unsigned long base,u32 handle,struct nlattr ** tca,void ** arg,bool ovr,bool rtnl_held,struct netlink_ext_ack * extack)530*4882a593Smuzhiyun tcindex_change(struct net *net, struct sk_buff *in_skb,
531*4882a593Smuzhiyun 	       struct tcf_proto *tp, unsigned long base, u32 handle,
532*4882a593Smuzhiyun 	       struct nlattr **tca, void **arg, bool ovr,
533*4882a593Smuzhiyun 	       bool rtnl_held, struct netlink_ext_ack *extack)
534*4882a593Smuzhiyun {
535*4882a593Smuzhiyun 	struct nlattr *opt = tca[TCA_OPTIONS];
536*4882a593Smuzhiyun 	struct nlattr *tb[TCA_TCINDEX_MAX + 1];
537*4882a593Smuzhiyun 	struct tcindex_data *p = rtnl_dereference(tp->root);
538*4882a593Smuzhiyun 	struct tcindex_filter_result *r = *arg;
539*4882a593Smuzhiyun 	int err;
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 	pr_debug("tcindex_change(tp %p,handle 0x%08x,tca %p,arg %p),opt %p,"
542*4882a593Smuzhiyun 	    "p %p,r %p,*arg %p\n",
543*4882a593Smuzhiyun 	    tp, handle, tca, arg, opt, p, r, *arg);
544*4882a593Smuzhiyun 
545*4882a593Smuzhiyun 	if (!opt)
546*4882a593Smuzhiyun 		return 0;
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun 	err = nla_parse_nested_deprecated(tb, TCA_TCINDEX_MAX, opt,
549*4882a593Smuzhiyun 					  tcindex_policy, NULL);
550*4882a593Smuzhiyun 	if (err < 0)
551*4882a593Smuzhiyun 		return err;
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	return tcindex_set_parms(net, tp, base, handle, p, r, tb,
554*4882a593Smuzhiyun 				 tca[TCA_RATE], ovr, extack);
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun 
tcindex_walk(struct tcf_proto * tp,struct tcf_walker * walker,bool rtnl_held)557*4882a593Smuzhiyun static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker,
558*4882a593Smuzhiyun 			 bool rtnl_held)
559*4882a593Smuzhiyun {
560*4882a593Smuzhiyun 	struct tcindex_data *p = rtnl_dereference(tp->root);
561*4882a593Smuzhiyun 	struct tcindex_filter *f, *next;
562*4882a593Smuzhiyun 	int i;
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 	pr_debug("tcindex_walk(tp %p,walker %p),p %p\n", tp, walker, p);
565*4882a593Smuzhiyun 	if (p->perfect) {
566*4882a593Smuzhiyun 		for (i = 0; i < p->hash; i++) {
567*4882a593Smuzhiyun 			if (!p->perfect[i].res.class)
568*4882a593Smuzhiyun 				continue;
569*4882a593Smuzhiyun 			if (walker->count >= walker->skip) {
570*4882a593Smuzhiyun 				if (walker->fn(tp, p->perfect + i, walker) < 0) {
571*4882a593Smuzhiyun 					walker->stop = 1;
572*4882a593Smuzhiyun 					return;
573*4882a593Smuzhiyun 				}
574*4882a593Smuzhiyun 			}
575*4882a593Smuzhiyun 			walker->count++;
576*4882a593Smuzhiyun 		}
577*4882a593Smuzhiyun 	}
578*4882a593Smuzhiyun 	if (!p->h)
579*4882a593Smuzhiyun 		return;
580*4882a593Smuzhiyun 	for (i = 0; i < p->hash; i++) {
581*4882a593Smuzhiyun 		for (f = rtnl_dereference(p->h[i]); f; f = next) {
582*4882a593Smuzhiyun 			next = rtnl_dereference(f->next);
583*4882a593Smuzhiyun 			if (walker->count >= walker->skip) {
584*4882a593Smuzhiyun 				if (walker->fn(tp, &f->result, walker) < 0) {
585*4882a593Smuzhiyun 					walker->stop = 1;
586*4882a593Smuzhiyun 					return;
587*4882a593Smuzhiyun 				}
588*4882a593Smuzhiyun 			}
589*4882a593Smuzhiyun 			walker->count++;
590*4882a593Smuzhiyun 		}
591*4882a593Smuzhiyun 	}
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun 
tcindex_destroy(struct tcf_proto * tp,bool rtnl_held,struct netlink_ext_ack * extack)594*4882a593Smuzhiyun static void tcindex_destroy(struct tcf_proto *tp, bool rtnl_held,
595*4882a593Smuzhiyun 			    struct netlink_ext_ack *extack)
596*4882a593Smuzhiyun {
597*4882a593Smuzhiyun 	struct tcindex_data *p = rtnl_dereference(tp->root);
598*4882a593Smuzhiyun 	int i;
599*4882a593Smuzhiyun 
600*4882a593Smuzhiyun 	pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p);
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun 	if (p->perfect) {
603*4882a593Smuzhiyun 		for (i = 0; i < p->hash; i++) {
604*4882a593Smuzhiyun 			struct tcindex_filter_result *r = p->perfect + i;
605*4882a593Smuzhiyun 
606*4882a593Smuzhiyun 			/* tcf_queue_work() does not guarantee the ordering we
607*4882a593Smuzhiyun 			 * want, so we have to take this refcnt temporarily to
608*4882a593Smuzhiyun 			 * ensure 'p' is freed after all tcindex_filter_result
609*4882a593Smuzhiyun 			 * here. Imperfect hash does not need this, because it
610*4882a593Smuzhiyun 			 * uses linked lists rather than an array.
611*4882a593Smuzhiyun 			 */
612*4882a593Smuzhiyun 			tcindex_data_get(p);
613*4882a593Smuzhiyun 
614*4882a593Smuzhiyun 			tcf_unbind_filter(tp, &r->res);
615*4882a593Smuzhiyun 			if (tcf_exts_get_net(&r->exts))
616*4882a593Smuzhiyun 				tcf_queue_work(&r->rwork,
617*4882a593Smuzhiyun 					       tcindex_destroy_rexts_work);
618*4882a593Smuzhiyun 			else
619*4882a593Smuzhiyun 				__tcindex_destroy_rexts(r);
620*4882a593Smuzhiyun 		}
621*4882a593Smuzhiyun 	}
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun 	for (i = 0; p->h && i < p->hash; i++) {
624*4882a593Smuzhiyun 		struct tcindex_filter *f, *next;
625*4882a593Smuzhiyun 		bool last;
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun 		for (f = rtnl_dereference(p->h[i]); f; f = next) {
628*4882a593Smuzhiyun 			next = rtnl_dereference(f->next);
629*4882a593Smuzhiyun 			tcindex_delete(tp, &f->result, &last, rtnl_held, NULL);
630*4882a593Smuzhiyun 		}
631*4882a593Smuzhiyun 	}
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun 	tcf_queue_work(&p->rwork, tcindex_destroy_work);
634*4882a593Smuzhiyun }
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun 
tcindex_dump(struct net * net,struct tcf_proto * tp,void * fh,struct sk_buff * skb,struct tcmsg * t,bool rtnl_held)637*4882a593Smuzhiyun static int tcindex_dump(struct net *net, struct tcf_proto *tp, void *fh,
638*4882a593Smuzhiyun 			struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
639*4882a593Smuzhiyun {
640*4882a593Smuzhiyun 	struct tcindex_data *p = rtnl_dereference(tp->root);
641*4882a593Smuzhiyun 	struct tcindex_filter_result *r = fh;
642*4882a593Smuzhiyun 	struct nlattr *nest;
643*4882a593Smuzhiyun 
644*4882a593Smuzhiyun 	pr_debug("tcindex_dump(tp %p,fh %p,skb %p,t %p),p %p,r %p\n",
645*4882a593Smuzhiyun 		 tp, fh, skb, t, p, r);
646*4882a593Smuzhiyun 	pr_debug("p->perfect %p p->h %p\n", p->perfect, p->h);
647*4882a593Smuzhiyun 
648*4882a593Smuzhiyun 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
649*4882a593Smuzhiyun 	if (nest == NULL)
650*4882a593Smuzhiyun 		goto nla_put_failure;
651*4882a593Smuzhiyun 
652*4882a593Smuzhiyun 	if (!fh) {
653*4882a593Smuzhiyun 		t->tcm_handle = ~0; /* whatever ... */
654*4882a593Smuzhiyun 		if (nla_put_u32(skb, TCA_TCINDEX_HASH, p->hash) ||
655*4882a593Smuzhiyun 		    nla_put_u16(skb, TCA_TCINDEX_MASK, p->mask) ||
656*4882a593Smuzhiyun 		    nla_put_u32(skb, TCA_TCINDEX_SHIFT, p->shift) ||
657*4882a593Smuzhiyun 		    nla_put_u32(skb, TCA_TCINDEX_FALL_THROUGH, p->fall_through))
658*4882a593Smuzhiyun 			goto nla_put_failure;
659*4882a593Smuzhiyun 		nla_nest_end(skb, nest);
660*4882a593Smuzhiyun 	} else {
661*4882a593Smuzhiyun 		if (p->perfect) {
662*4882a593Smuzhiyun 			t->tcm_handle = r - p->perfect;
663*4882a593Smuzhiyun 		} else {
664*4882a593Smuzhiyun 			struct tcindex_filter *f;
665*4882a593Smuzhiyun 			struct tcindex_filter __rcu **fp;
666*4882a593Smuzhiyun 			int i;
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun 			t->tcm_handle = 0;
669*4882a593Smuzhiyun 			for (i = 0; !t->tcm_handle && i < p->hash; i++) {
670*4882a593Smuzhiyun 				fp = &p->h[i];
671*4882a593Smuzhiyun 				for (f = rtnl_dereference(*fp);
672*4882a593Smuzhiyun 				     !t->tcm_handle && f;
673*4882a593Smuzhiyun 				     fp = &f->next, f = rtnl_dereference(*fp)) {
674*4882a593Smuzhiyun 					if (&f->result == r)
675*4882a593Smuzhiyun 						t->tcm_handle = f->key;
676*4882a593Smuzhiyun 				}
677*4882a593Smuzhiyun 			}
678*4882a593Smuzhiyun 		}
679*4882a593Smuzhiyun 		pr_debug("handle = %d\n", t->tcm_handle);
680*4882a593Smuzhiyun 		if (r->res.class &&
681*4882a593Smuzhiyun 		    nla_put_u32(skb, TCA_TCINDEX_CLASSID, r->res.classid))
682*4882a593Smuzhiyun 			goto nla_put_failure;
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun 		if (tcf_exts_dump(skb, &r->exts) < 0)
685*4882a593Smuzhiyun 			goto nla_put_failure;
686*4882a593Smuzhiyun 		nla_nest_end(skb, nest);
687*4882a593Smuzhiyun 
688*4882a593Smuzhiyun 		if (tcf_exts_dump_stats(skb, &r->exts) < 0)
689*4882a593Smuzhiyun 			goto nla_put_failure;
690*4882a593Smuzhiyun 	}
691*4882a593Smuzhiyun 
692*4882a593Smuzhiyun 	return skb->len;
693*4882a593Smuzhiyun 
694*4882a593Smuzhiyun nla_put_failure:
695*4882a593Smuzhiyun 	nla_nest_cancel(skb, nest);
696*4882a593Smuzhiyun 	return -1;
697*4882a593Smuzhiyun }
698*4882a593Smuzhiyun 
tcindex_bind_class(void * fh,u32 classid,unsigned long cl,void * q,unsigned long base)699*4882a593Smuzhiyun static void tcindex_bind_class(void *fh, u32 classid, unsigned long cl,
700*4882a593Smuzhiyun 			       void *q, unsigned long base)
701*4882a593Smuzhiyun {
702*4882a593Smuzhiyun 	struct tcindex_filter_result *r = fh;
703*4882a593Smuzhiyun 
704*4882a593Smuzhiyun 	if (r && r->res.classid == classid) {
705*4882a593Smuzhiyun 		if (cl)
706*4882a593Smuzhiyun 			__tcf_bind_filter(q, &r->res, base);
707*4882a593Smuzhiyun 		else
708*4882a593Smuzhiyun 			__tcf_unbind_filter(q, &r->res);
709*4882a593Smuzhiyun 	}
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun 
712*4882a593Smuzhiyun static struct tcf_proto_ops cls_tcindex_ops __read_mostly = {
713*4882a593Smuzhiyun 	.kind		=	"tcindex",
714*4882a593Smuzhiyun 	.classify	=	tcindex_classify,
715*4882a593Smuzhiyun 	.init		=	tcindex_init,
716*4882a593Smuzhiyun 	.destroy	=	tcindex_destroy,
717*4882a593Smuzhiyun 	.get		=	tcindex_get,
718*4882a593Smuzhiyun 	.change		=	tcindex_change,
719*4882a593Smuzhiyun 	.delete		=	tcindex_delete,
720*4882a593Smuzhiyun 	.walk		=	tcindex_walk,
721*4882a593Smuzhiyun 	.dump		=	tcindex_dump,
722*4882a593Smuzhiyun 	.bind_class	=	tcindex_bind_class,
723*4882a593Smuzhiyun 	.owner		=	THIS_MODULE,
724*4882a593Smuzhiyun };
725*4882a593Smuzhiyun 
init_tcindex(void)726*4882a593Smuzhiyun static int __init init_tcindex(void)
727*4882a593Smuzhiyun {
728*4882a593Smuzhiyun 	return register_tcf_proto_ops(&cls_tcindex_ops);
729*4882a593Smuzhiyun }
730*4882a593Smuzhiyun 
exit_tcindex(void)731*4882a593Smuzhiyun static void __exit exit_tcindex(void)
732*4882a593Smuzhiyun {
733*4882a593Smuzhiyun 	unregister_tcf_proto_ops(&cls_tcindex_ops);
734*4882a593Smuzhiyun }
735*4882a593Smuzhiyun 
736*4882a593Smuzhiyun module_init(init_tcindex)
737*4882a593Smuzhiyun module_exit(exit_tcindex)
738*4882a593Smuzhiyun MODULE_LICENSE("GPL");
739