1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * net/sched/cls_api.c Packet classifier API.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Changes:
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
10*4882a593Smuzhiyun */
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <linux/module.h>
13*4882a593Smuzhiyun #include <linux/types.h>
14*4882a593Smuzhiyun #include <linux/kernel.h>
15*4882a593Smuzhiyun #include <linux/string.h>
16*4882a593Smuzhiyun #include <linux/errno.h>
17*4882a593Smuzhiyun #include <linux/err.h>
18*4882a593Smuzhiyun #include <linux/skbuff.h>
19*4882a593Smuzhiyun #include <linux/init.h>
20*4882a593Smuzhiyun #include <linux/kmod.h>
21*4882a593Smuzhiyun #include <linux/slab.h>
22*4882a593Smuzhiyun #include <linux/idr.h>
23*4882a593Smuzhiyun #include <linux/jhash.h>
24*4882a593Smuzhiyun #include <linux/rculist.h>
25*4882a593Smuzhiyun #include <net/net_namespace.h>
26*4882a593Smuzhiyun #include <net/sock.h>
27*4882a593Smuzhiyun #include <net/netlink.h>
28*4882a593Smuzhiyun #include <net/pkt_sched.h>
29*4882a593Smuzhiyun #include <net/pkt_cls.h>
30*4882a593Smuzhiyun #include <net/tc_act/tc_pedit.h>
31*4882a593Smuzhiyun #include <net/tc_act/tc_mirred.h>
32*4882a593Smuzhiyun #include <net/tc_act/tc_vlan.h>
33*4882a593Smuzhiyun #include <net/tc_act/tc_tunnel_key.h>
34*4882a593Smuzhiyun #include <net/tc_act/tc_csum.h>
35*4882a593Smuzhiyun #include <net/tc_act/tc_gact.h>
36*4882a593Smuzhiyun #include <net/tc_act/tc_police.h>
37*4882a593Smuzhiyun #include <net/tc_act/tc_sample.h>
38*4882a593Smuzhiyun #include <net/tc_act/tc_skbedit.h>
39*4882a593Smuzhiyun #include <net/tc_act/tc_ct.h>
40*4882a593Smuzhiyun #include <net/tc_act/tc_mpls.h>
41*4882a593Smuzhiyun #include <net/tc_act/tc_gate.h>
42*4882a593Smuzhiyun #include <net/flow_offload.h>
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun /* The list of all installed classifier types */
47*4882a593Smuzhiyun static LIST_HEAD(tcf_proto_base);
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun /* Protects list of registered TC modules. It is pure SMP lock. */
50*4882a593Smuzhiyun static DEFINE_RWLOCK(cls_mod_lock);
51*4882a593Smuzhiyun
destroy_obj_hashfn(const struct tcf_proto * tp)52*4882a593Smuzhiyun static u32 destroy_obj_hashfn(const struct tcf_proto *tp)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun return jhash_3words(tp->chain->index, tp->prio,
55*4882a593Smuzhiyun (__force __u32)tp->protocol, 0);
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun
tcf_proto_signal_destroying(struct tcf_chain * chain,struct tcf_proto * tp)58*4882a593Smuzhiyun static void tcf_proto_signal_destroying(struct tcf_chain *chain,
59*4882a593Smuzhiyun struct tcf_proto *tp)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun struct tcf_block *block = chain->block;
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun mutex_lock(&block->proto_destroy_lock);
64*4882a593Smuzhiyun hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node,
65*4882a593Smuzhiyun destroy_obj_hashfn(tp));
66*4882a593Smuzhiyun mutex_unlock(&block->proto_destroy_lock);
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun
tcf_proto_cmp(const struct tcf_proto * tp1,const struct tcf_proto * tp2)69*4882a593Smuzhiyun static bool tcf_proto_cmp(const struct tcf_proto *tp1,
70*4882a593Smuzhiyun const struct tcf_proto *tp2)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun return tp1->chain->index == tp2->chain->index &&
73*4882a593Smuzhiyun tp1->prio == tp2->prio &&
74*4882a593Smuzhiyun tp1->protocol == tp2->protocol;
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun
tcf_proto_exists_destroying(struct tcf_chain * chain,struct tcf_proto * tp)77*4882a593Smuzhiyun static bool tcf_proto_exists_destroying(struct tcf_chain *chain,
78*4882a593Smuzhiyun struct tcf_proto *tp)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun u32 hash = destroy_obj_hashfn(tp);
81*4882a593Smuzhiyun struct tcf_proto *iter;
82*4882a593Smuzhiyun bool found = false;
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun rcu_read_lock();
85*4882a593Smuzhiyun hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter,
86*4882a593Smuzhiyun destroy_ht_node, hash) {
87*4882a593Smuzhiyun if (tcf_proto_cmp(tp, iter)) {
88*4882a593Smuzhiyun found = true;
89*4882a593Smuzhiyun break;
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun rcu_read_unlock();
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun return found;
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun static void
tcf_proto_signal_destroyed(struct tcf_chain * chain,struct tcf_proto * tp)98*4882a593Smuzhiyun tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun struct tcf_block *block = chain->block;
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun mutex_lock(&block->proto_destroy_lock);
103*4882a593Smuzhiyun if (hash_hashed(&tp->destroy_ht_node))
104*4882a593Smuzhiyun hash_del_rcu(&tp->destroy_ht_node);
105*4882a593Smuzhiyun mutex_unlock(&block->proto_destroy_lock);
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun /* Find classifier type by string name */
109*4882a593Smuzhiyun
__tcf_proto_lookup_ops(const char * kind)110*4882a593Smuzhiyun static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun const struct tcf_proto_ops *t, *res = NULL;
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun if (kind) {
115*4882a593Smuzhiyun read_lock(&cls_mod_lock);
116*4882a593Smuzhiyun list_for_each_entry(t, &tcf_proto_base, head) {
117*4882a593Smuzhiyun if (strcmp(kind, t->kind) == 0) {
118*4882a593Smuzhiyun if (try_module_get(t->owner))
119*4882a593Smuzhiyun res = t;
120*4882a593Smuzhiyun break;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun read_unlock(&cls_mod_lock);
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun return res;
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun static const struct tcf_proto_ops *
tcf_proto_lookup_ops(const char * kind,bool rtnl_held,struct netlink_ext_ack * extack)129*4882a593Smuzhiyun tcf_proto_lookup_ops(const char *kind, bool rtnl_held,
130*4882a593Smuzhiyun struct netlink_ext_ack *extack)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun const struct tcf_proto_ops *ops;
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun ops = __tcf_proto_lookup_ops(kind);
135*4882a593Smuzhiyun if (ops)
136*4882a593Smuzhiyun return ops;
137*4882a593Smuzhiyun #ifdef CONFIG_MODULES
138*4882a593Smuzhiyun if (rtnl_held)
139*4882a593Smuzhiyun rtnl_unlock();
140*4882a593Smuzhiyun request_module("cls_%s", kind);
141*4882a593Smuzhiyun if (rtnl_held)
142*4882a593Smuzhiyun rtnl_lock();
143*4882a593Smuzhiyun ops = __tcf_proto_lookup_ops(kind);
144*4882a593Smuzhiyun /* We dropped the RTNL semaphore in order to perform
145*4882a593Smuzhiyun * the module load. So, even if we succeeded in loading
146*4882a593Smuzhiyun * the module we have to replay the request. We indicate
147*4882a593Smuzhiyun * this using -EAGAIN.
148*4882a593Smuzhiyun */
149*4882a593Smuzhiyun if (ops) {
150*4882a593Smuzhiyun module_put(ops->owner);
151*4882a593Smuzhiyun return ERR_PTR(-EAGAIN);
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun #endif
154*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "TC classifier not found");
155*4882a593Smuzhiyun return ERR_PTR(-ENOENT);
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun /* Register(unregister) new classifier type */
159*4882a593Smuzhiyun
register_tcf_proto_ops(struct tcf_proto_ops * ops)160*4882a593Smuzhiyun int register_tcf_proto_ops(struct tcf_proto_ops *ops)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun struct tcf_proto_ops *t;
163*4882a593Smuzhiyun int rc = -EEXIST;
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun write_lock(&cls_mod_lock);
166*4882a593Smuzhiyun list_for_each_entry(t, &tcf_proto_base, head)
167*4882a593Smuzhiyun if (!strcmp(ops->kind, t->kind))
168*4882a593Smuzhiyun goto out;
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun list_add_tail(&ops->head, &tcf_proto_base);
171*4882a593Smuzhiyun rc = 0;
172*4882a593Smuzhiyun out:
173*4882a593Smuzhiyun write_unlock(&cls_mod_lock);
174*4882a593Smuzhiyun return rc;
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun EXPORT_SYMBOL(register_tcf_proto_ops);
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun static struct workqueue_struct *tc_filter_wq;
179*4882a593Smuzhiyun
unregister_tcf_proto_ops(struct tcf_proto_ops * ops)180*4882a593Smuzhiyun int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun struct tcf_proto_ops *t;
183*4882a593Smuzhiyun int rc = -ENOENT;
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun /* Wait for outstanding call_rcu()s, if any, from a
186*4882a593Smuzhiyun * tcf_proto_ops's destroy() handler.
187*4882a593Smuzhiyun */
188*4882a593Smuzhiyun rcu_barrier();
189*4882a593Smuzhiyun flush_workqueue(tc_filter_wq);
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun write_lock(&cls_mod_lock);
192*4882a593Smuzhiyun list_for_each_entry(t, &tcf_proto_base, head) {
193*4882a593Smuzhiyun if (t == ops) {
194*4882a593Smuzhiyun list_del(&t->head);
195*4882a593Smuzhiyun rc = 0;
196*4882a593Smuzhiyun break;
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun write_unlock(&cls_mod_lock);
200*4882a593Smuzhiyun return rc;
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun EXPORT_SYMBOL(unregister_tcf_proto_ops);
203*4882a593Smuzhiyun
tcf_queue_work(struct rcu_work * rwork,work_func_t func)204*4882a593Smuzhiyun bool tcf_queue_work(struct rcu_work *rwork, work_func_t func)
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun INIT_RCU_WORK(rwork, func);
207*4882a593Smuzhiyun return queue_rcu_work(tc_filter_wq, rwork);
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun EXPORT_SYMBOL(tcf_queue_work);
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun /* Select new prio value from the range, managed by kernel. */
212*4882a593Smuzhiyun
tcf_auto_prio(struct tcf_proto * tp)213*4882a593Smuzhiyun static inline u32 tcf_auto_prio(struct tcf_proto *tp)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun u32 first = TC_H_MAKE(0xC0000000U, 0U);
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun if (tp)
218*4882a593Smuzhiyun first = tp->prio - 1;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun return TC_H_MAJ(first);
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun
tcf_proto_check_kind(struct nlattr * kind,char * name)223*4882a593Smuzhiyun static bool tcf_proto_check_kind(struct nlattr *kind, char *name)
224*4882a593Smuzhiyun {
225*4882a593Smuzhiyun if (kind)
226*4882a593Smuzhiyun return nla_strlcpy(name, kind, IFNAMSIZ) >= IFNAMSIZ;
227*4882a593Smuzhiyun memset(name, 0, IFNAMSIZ);
228*4882a593Smuzhiyun return false;
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun
tcf_proto_is_unlocked(const char * kind)231*4882a593Smuzhiyun static bool tcf_proto_is_unlocked(const char *kind)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun const struct tcf_proto_ops *ops;
234*4882a593Smuzhiyun bool ret;
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun if (strlen(kind) == 0)
237*4882a593Smuzhiyun return false;
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun ops = tcf_proto_lookup_ops(kind, false, NULL);
240*4882a593Smuzhiyun /* On error return false to take rtnl lock. Proto lookup/create
241*4882a593Smuzhiyun * functions will perform lookup again and properly handle errors.
242*4882a593Smuzhiyun */
243*4882a593Smuzhiyun if (IS_ERR(ops))
244*4882a593Smuzhiyun return false;
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED);
247*4882a593Smuzhiyun module_put(ops->owner);
248*4882a593Smuzhiyun return ret;
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun
tcf_proto_create(const char * kind,u32 protocol,u32 prio,struct tcf_chain * chain,bool rtnl_held,struct netlink_ext_ack * extack)251*4882a593Smuzhiyun static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
252*4882a593Smuzhiyun u32 prio, struct tcf_chain *chain,
253*4882a593Smuzhiyun bool rtnl_held,
254*4882a593Smuzhiyun struct netlink_ext_ack *extack)
255*4882a593Smuzhiyun {
256*4882a593Smuzhiyun struct tcf_proto *tp;
257*4882a593Smuzhiyun int err;
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun tp = kzalloc(sizeof(*tp), GFP_KERNEL);
260*4882a593Smuzhiyun if (!tp)
261*4882a593Smuzhiyun return ERR_PTR(-ENOBUFS);
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack);
264*4882a593Smuzhiyun if (IS_ERR(tp->ops)) {
265*4882a593Smuzhiyun err = PTR_ERR(tp->ops);
266*4882a593Smuzhiyun goto errout;
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun tp->classify = tp->ops->classify;
269*4882a593Smuzhiyun tp->protocol = protocol;
270*4882a593Smuzhiyun tp->prio = prio;
271*4882a593Smuzhiyun tp->chain = chain;
272*4882a593Smuzhiyun spin_lock_init(&tp->lock);
273*4882a593Smuzhiyun refcount_set(&tp->refcnt, 1);
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun err = tp->ops->init(tp);
276*4882a593Smuzhiyun if (err) {
277*4882a593Smuzhiyun module_put(tp->ops->owner);
278*4882a593Smuzhiyun goto errout;
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun return tp;
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun errout:
283*4882a593Smuzhiyun kfree(tp);
284*4882a593Smuzhiyun return ERR_PTR(err);
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun
tcf_proto_get(struct tcf_proto * tp)287*4882a593Smuzhiyun static void tcf_proto_get(struct tcf_proto *tp)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun refcount_inc(&tp->refcnt);
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun static void tcf_chain_put(struct tcf_chain *chain);
293*4882a593Smuzhiyun
tcf_proto_destroy(struct tcf_proto * tp,bool rtnl_held,bool sig_destroy,struct netlink_ext_ack * extack)294*4882a593Smuzhiyun static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
295*4882a593Smuzhiyun bool sig_destroy, struct netlink_ext_ack *extack)
296*4882a593Smuzhiyun {
297*4882a593Smuzhiyun tp->ops->destroy(tp, rtnl_held, extack);
298*4882a593Smuzhiyun if (sig_destroy)
299*4882a593Smuzhiyun tcf_proto_signal_destroyed(tp->chain, tp);
300*4882a593Smuzhiyun tcf_chain_put(tp->chain);
301*4882a593Smuzhiyun module_put(tp->ops->owner);
302*4882a593Smuzhiyun kfree_rcu(tp, rcu);
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun
tcf_proto_put(struct tcf_proto * tp,bool rtnl_held,struct netlink_ext_ack * extack)305*4882a593Smuzhiyun static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
306*4882a593Smuzhiyun struct netlink_ext_ack *extack)
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun if (refcount_dec_and_test(&tp->refcnt))
309*4882a593Smuzhiyun tcf_proto_destroy(tp, rtnl_held, true, extack);
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun
tcf_proto_check_delete(struct tcf_proto * tp)312*4882a593Smuzhiyun static bool tcf_proto_check_delete(struct tcf_proto *tp)
313*4882a593Smuzhiyun {
314*4882a593Smuzhiyun if (tp->ops->delete_empty)
315*4882a593Smuzhiyun return tp->ops->delete_empty(tp);
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun tp->deleting = true;
318*4882a593Smuzhiyun return tp->deleting;
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun
tcf_proto_mark_delete(struct tcf_proto * tp)321*4882a593Smuzhiyun static void tcf_proto_mark_delete(struct tcf_proto *tp)
322*4882a593Smuzhiyun {
323*4882a593Smuzhiyun spin_lock(&tp->lock);
324*4882a593Smuzhiyun tp->deleting = true;
325*4882a593Smuzhiyun spin_unlock(&tp->lock);
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun
tcf_proto_is_deleting(struct tcf_proto * tp)328*4882a593Smuzhiyun static bool tcf_proto_is_deleting(struct tcf_proto *tp)
329*4882a593Smuzhiyun {
330*4882a593Smuzhiyun bool deleting;
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun spin_lock(&tp->lock);
333*4882a593Smuzhiyun deleting = tp->deleting;
334*4882a593Smuzhiyun spin_unlock(&tp->lock);
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun return deleting;
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun #define ASSERT_BLOCK_LOCKED(block) \
340*4882a593Smuzhiyun lockdep_assert_held(&(block)->lock)
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun struct tcf_filter_chain_list_item {
343*4882a593Smuzhiyun struct list_head list;
344*4882a593Smuzhiyun tcf_chain_head_change_t *chain_head_change;
345*4882a593Smuzhiyun void *chain_head_change_priv;
346*4882a593Smuzhiyun };
347*4882a593Smuzhiyun
tcf_chain_create(struct tcf_block * block,u32 chain_index)348*4882a593Smuzhiyun static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
349*4882a593Smuzhiyun u32 chain_index)
350*4882a593Smuzhiyun {
351*4882a593Smuzhiyun struct tcf_chain *chain;
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun ASSERT_BLOCK_LOCKED(block);
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun chain = kzalloc(sizeof(*chain), GFP_KERNEL);
356*4882a593Smuzhiyun if (!chain)
357*4882a593Smuzhiyun return NULL;
358*4882a593Smuzhiyun list_add_tail_rcu(&chain->list, &block->chain_list);
359*4882a593Smuzhiyun mutex_init(&chain->filter_chain_lock);
360*4882a593Smuzhiyun chain->block = block;
361*4882a593Smuzhiyun chain->index = chain_index;
362*4882a593Smuzhiyun chain->refcnt = 1;
363*4882a593Smuzhiyun if (!chain->index)
364*4882a593Smuzhiyun block->chain0.chain = chain;
365*4882a593Smuzhiyun return chain;
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun
tcf_chain_head_change_item(struct tcf_filter_chain_list_item * item,struct tcf_proto * tp_head)368*4882a593Smuzhiyun static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
369*4882a593Smuzhiyun struct tcf_proto *tp_head)
370*4882a593Smuzhiyun {
371*4882a593Smuzhiyun if (item->chain_head_change)
372*4882a593Smuzhiyun item->chain_head_change(tp_head, item->chain_head_change_priv);
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun
tcf_chain0_head_change(struct tcf_chain * chain,struct tcf_proto * tp_head)375*4882a593Smuzhiyun static void tcf_chain0_head_change(struct tcf_chain *chain,
376*4882a593Smuzhiyun struct tcf_proto *tp_head)
377*4882a593Smuzhiyun {
378*4882a593Smuzhiyun struct tcf_filter_chain_list_item *item;
379*4882a593Smuzhiyun struct tcf_block *block = chain->block;
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun if (chain->index)
382*4882a593Smuzhiyun return;
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun mutex_lock(&block->lock);
385*4882a593Smuzhiyun list_for_each_entry(item, &block->chain0.filter_chain_list, list)
386*4882a593Smuzhiyun tcf_chain_head_change_item(item, tp_head);
387*4882a593Smuzhiyun mutex_unlock(&block->lock);
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun /* Returns true if block can be safely freed. */
391*4882a593Smuzhiyun
tcf_chain_detach(struct tcf_chain * chain)392*4882a593Smuzhiyun static bool tcf_chain_detach(struct tcf_chain *chain)
393*4882a593Smuzhiyun {
394*4882a593Smuzhiyun struct tcf_block *block = chain->block;
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun ASSERT_BLOCK_LOCKED(block);
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun list_del_rcu(&chain->list);
399*4882a593Smuzhiyun if (!chain->index)
400*4882a593Smuzhiyun block->chain0.chain = NULL;
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun if (list_empty(&block->chain_list) &&
403*4882a593Smuzhiyun refcount_read(&block->refcnt) == 0)
404*4882a593Smuzhiyun return true;
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun return false;
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun
tcf_block_destroy(struct tcf_block * block)409*4882a593Smuzhiyun static void tcf_block_destroy(struct tcf_block *block)
410*4882a593Smuzhiyun {
411*4882a593Smuzhiyun mutex_destroy(&block->lock);
412*4882a593Smuzhiyun mutex_destroy(&block->proto_destroy_lock);
413*4882a593Smuzhiyun kfree_rcu(block, rcu);
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun
tcf_chain_destroy(struct tcf_chain * chain,bool free_block)416*4882a593Smuzhiyun static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block)
417*4882a593Smuzhiyun {
418*4882a593Smuzhiyun struct tcf_block *block = chain->block;
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun mutex_destroy(&chain->filter_chain_lock);
421*4882a593Smuzhiyun kfree_rcu(chain, rcu);
422*4882a593Smuzhiyun if (free_block)
423*4882a593Smuzhiyun tcf_block_destroy(block);
424*4882a593Smuzhiyun }
425*4882a593Smuzhiyun
tcf_chain_hold(struct tcf_chain * chain)426*4882a593Smuzhiyun static void tcf_chain_hold(struct tcf_chain *chain)
427*4882a593Smuzhiyun {
428*4882a593Smuzhiyun ASSERT_BLOCK_LOCKED(chain->block);
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun ++chain->refcnt;
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun
tcf_chain_held_by_acts_only(struct tcf_chain * chain)433*4882a593Smuzhiyun static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain)
434*4882a593Smuzhiyun {
435*4882a593Smuzhiyun ASSERT_BLOCK_LOCKED(chain->block);
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun /* In case all the references are action references, this
438*4882a593Smuzhiyun * chain should not be shown to the user.
439*4882a593Smuzhiyun */
440*4882a593Smuzhiyun return chain->refcnt == chain->action_refcnt;
441*4882a593Smuzhiyun }
442*4882a593Smuzhiyun
tcf_chain_lookup(struct tcf_block * block,u32 chain_index)443*4882a593Smuzhiyun static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
444*4882a593Smuzhiyun u32 chain_index)
445*4882a593Smuzhiyun {
446*4882a593Smuzhiyun struct tcf_chain *chain;
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun ASSERT_BLOCK_LOCKED(block);
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun list_for_each_entry(chain, &block->chain_list, list) {
451*4882a593Smuzhiyun if (chain->index == chain_index)
452*4882a593Smuzhiyun return chain;
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun return NULL;
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
tcf_chain_lookup_rcu(const struct tcf_block * block,u32 chain_index)458*4882a593Smuzhiyun static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block,
459*4882a593Smuzhiyun u32 chain_index)
460*4882a593Smuzhiyun {
461*4882a593Smuzhiyun struct tcf_chain *chain;
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun list_for_each_entry_rcu(chain, &block->chain_list, list) {
464*4882a593Smuzhiyun if (chain->index == chain_index)
465*4882a593Smuzhiyun return chain;
466*4882a593Smuzhiyun }
467*4882a593Smuzhiyun return NULL;
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun #endif
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
472*4882a593Smuzhiyun u32 seq, u16 flags, int event, bool unicast);
473*4882a593Smuzhiyun
__tcf_chain_get(struct tcf_block * block,u32 chain_index,bool create,bool by_act)474*4882a593Smuzhiyun static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
475*4882a593Smuzhiyun u32 chain_index, bool create,
476*4882a593Smuzhiyun bool by_act)
477*4882a593Smuzhiyun {
478*4882a593Smuzhiyun struct tcf_chain *chain = NULL;
479*4882a593Smuzhiyun bool is_first_reference;
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun mutex_lock(&block->lock);
482*4882a593Smuzhiyun chain = tcf_chain_lookup(block, chain_index);
483*4882a593Smuzhiyun if (chain) {
484*4882a593Smuzhiyun tcf_chain_hold(chain);
485*4882a593Smuzhiyun } else {
486*4882a593Smuzhiyun if (!create)
487*4882a593Smuzhiyun goto errout;
488*4882a593Smuzhiyun chain = tcf_chain_create(block, chain_index);
489*4882a593Smuzhiyun if (!chain)
490*4882a593Smuzhiyun goto errout;
491*4882a593Smuzhiyun }
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun if (by_act)
494*4882a593Smuzhiyun ++chain->action_refcnt;
495*4882a593Smuzhiyun is_first_reference = chain->refcnt - chain->action_refcnt == 1;
496*4882a593Smuzhiyun mutex_unlock(&block->lock);
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun /* Send notification only in case we got the first
499*4882a593Smuzhiyun * non-action reference. Until then, the chain acts only as
500*4882a593Smuzhiyun * a placeholder for actions pointing to it and user ought
501*4882a593Smuzhiyun * not know about them.
502*4882a593Smuzhiyun */
503*4882a593Smuzhiyun if (is_first_reference && !by_act)
504*4882a593Smuzhiyun tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
505*4882a593Smuzhiyun RTM_NEWCHAIN, false);
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun return chain;
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun errout:
510*4882a593Smuzhiyun mutex_unlock(&block->lock);
511*4882a593Smuzhiyun return chain;
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun
tcf_chain_get(struct tcf_block * block,u32 chain_index,bool create)514*4882a593Smuzhiyun static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
515*4882a593Smuzhiyun bool create)
516*4882a593Smuzhiyun {
517*4882a593Smuzhiyun return __tcf_chain_get(block, chain_index, create, false);
518*4882a593Smuzhiyun }
519*4882a593Smuzhiyun
tcf_chain_get_by_act(struct tcf_block * block,u32 chain_index)520*4882a593Smuzhiyun struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index)
521*4882a593Smuzhiyun {
522*4882a593Smuzhiyun return __tcf_chain_get(block, chain_index, true, true);
523*4882a593Smuzhiyun }
524*4882a593Smuzhiyun EXPORT_SYMBOL(tcf_chain_get_by_act);
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
527*4882a593Smuzhiyun void *tmplt_priv);
528*4882a593Smuzhiyun static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
529*4882a593Smuzhiyun void *tmplt_priv, u32 chain_index,
530*4882a593Smuzhiyun struct tcf_block *block, struct sk_buff *oskb,
531*4882a593Smuzhiyun u32 seq, u16 flags, bool unicast);
532*4882a593Smuzhiyun
__tcf_chain_put(struct tcf_chain * chain,bool by_act,bool explicitly_created)533*4882a593Smuzhiyun static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
534*4882a593Smuzhiyun bool explicitly_created)
535*4882a593Smuzhiyun {
536*4882a593Smuzhiyun struct tcf_block *block = chain->block;
537*4882a593Smuzhiyun const struct tcf_proto_ops *tmplt_ops;
538*4882a593Smuzhiyun bool free_block = false;
539*4882a593Smuzhiyun unsigned int refcnt;
540*4882a593Smuzhiyun void *tmplt_priv;
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun mutex_lock(&block->lock);
543*4882a593Smuzhiyun if (explicitly_created) {
544*4882a593Smuzhiyun if (!chain->explicitly_created) {
545*4882a593Smuzhiyun mutex_unlock(&block->lock);
546*4882a593Smuzhiyun return;
547*4882a593Smuzhiyun }
548*4882a593Smuzhiyun chain->explicitly_created = false;
549*4882a593Smuzhiyun }
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun if (by_act)
552*4882a593Smuzhiyun chain->action_refcnt--;
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun /* tc_chain_notify_delete can't be called while holding block lock.
555*4882a593Smuzhiyun * However, when block is unlocked chain can be changed concurrently, so
556*4882a593Smuzhiyun * save these to temporary variables.
557*4882a593Smuzhiyun */
558*4882a593Smuzhiyun refcnt = --chain->refcnt;
559*4882a593Smuzhiyun tmplt_ops = chain->tmplt_ops;
560*4882a593Smuzhiyun tmplt_priv = chain->tmplt_priv;
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun /* The last dropped non-action reference will trigger notification. */
563*4882a593Smuzhiyun if (refcnt - chain->action_refcnt == 0 && !by_act) {
564*4882a593Smuzhiyun tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain->index,
565*4882a593Smuzhiyun block, NULL, 0, 0, false);
566*4882a593Smuzhiyun /* Last reference to chain, no need to lock. */
567*4882a593Smuzhiyun chain->flushing = false;
568*4882a593Smuzhiyun }
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun if (refcnt == 0)
571*4882a593Smuzhiyun free_block = tcf_chain_detach(chain);
572*4882a593Smuzhiyun mutex_unlock(&block->lock);
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun if (refcnt == 0) {
575*4882a593Smuzhiyun tc_chain_tmplt_del(tmplt_ops, tmplt_priv);
576*4882a593Smuzhiyun tcf_chain_destroy(chain, free_block);
577*4882a593Smuzhiyun }
578*4882a593Smuzhiyun }
579*4882a593Smuzhiyun
tcf_chain_put(struct tcf_chain * chain)580*4882a593Smuzhiyun static void tcf_chain_put(struct tcf_chain *chain)
581*4882a593Smuzhiyun {
582*4882a593Smuzhiyun __tcf_chain_put(chain, false, false);
583*4882a593Smuzhiyun }
584*4882a593Smuzhiyun
tcf_chain_put_by_act(struct tcf_chain * chain)585*4882a593Smuzhiyun void tcf_chain_put_by_act(struct tcf_chain *chain)
586*4882a593Smuzhiyun {
587*4882a593Smuzhiyun __tcf_chain_put(chain, true, false);
588*4882a593Smuzhiyun }
589*4882a593Smuzhiyun EXPORT_SYMBOL(tcf_chain_put_by_act);
590*4882a593Smuzhiyun
tcf_chain_put_explicitly_created(struct tcf_chain * chain)591*4882a593Smuzhiyun static void tcf_chain_put_explicitly_created(struct tcf_chain *chain)
592*4882a593Smuzhiyun {
593*4882a593Smuzhiyun __tcf_chain_put(chain, false, true);
594*4882a593Smuzhiyun }
595*4882a593Smuzhiyun
tcf_chain_flush(struct tcf_chain * chain,bool rtnl_held)596*4882a593Smuzhiyun static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
597*4882a593Smuzhiyun {
598*4882a593Smuzhiyun struct tcf_proto *tp, *tp_next;
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun mutex_lock(&chain->filter_chain_lock);
601*4882a593Smuzhiyun tp = tcf_chain_dereference(chain->filter_chain, chain);
602*4882a593Smuzhiyun while (tp) {
603*4882a593Smuzhiyun tp_next = rcu_dereference_protected(tp->next, 1);
604*4882a593Smuzhiyun tcf_proto_signal_destroying(chain, tp);
605*4882a593Smuzhiyun tp = tp_next;
606*4882a593Smuzhiyun }
607*4882a593Smuzhiyun tp = tcf_chain_dereference(chain->filter_chain, chain);
608*4882a593Smuzhiyun RCU_INIT_POINTER(chain->filter_chain, NULL);
609*4882a593Smuzhiyun tcf_chain0_head_change(chain, NULL);
610*4882a593Smuzhiyun chain->flushing = true;
611*4882a593Smuzhiyun mutex_unlock(&chain->filter_chain_lock);
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun while (tp) {
614*4882a593Smuzhiyun tp_next = rcu_dereference_protected(tp->next, 1);
615*4882a593Smuzhiyun tcf_proto_put(tp, rtnl_held, NULL);
616*4882a593Smuzhiyun tp = tp_next;
617*4882a593Smuzhiyun }
618*4882a593Smuzhiyun }
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun static int tcf_block_setup(struct tcf_block *block,
621*4882a593Smuzhiyun struct flow_block_offload *bo);
622*4882a593Smuzhiyun
tcf_block_offload_init(struct flow_block_offload * bo,struct net_device * dev,struct Qdisc * sch,enum flow_block_command command,enum flow_block_binder_type binder_type,struct flow_block * flow_block,bool shared,struct netlink_ext_ack * extack)623*4882a593Smuzhiyun static void tcf_block_offload_init(struct flow_block_offload *bo,
624*4882a593Smuzhiyun struct net_device *dev, struct Qdisc *sch,
625*4882a593Smuzhiyun enum flow_block_command command,
626*4882a593Smuzhiyun enum flow_block_binder_type binder_type,
627*4882a593Smuzhiyun struct flow_block *flow_block,
628*4882a593Smuzhiyun bool shared, struct netlink_ext_ack *extack)
629*4882a593Smuzhiyun {
630*4882a593Smuzhiyun bo->net = dev_net(dev);
631*4882a593Smuzhiyun bo->command = command;
632*4882a593Smuzhiyun bo->binder_type = binder_type;
633*4882a593Smuzhiyun bo->block = flow_block;
634*4882a593Smuzhiyun bo->block_shared = shared;
635*4882a593Smuzhiyun bo->extack = extack;
636*4882a593Smuzhiyun bo->sch = sch;
637*4882a593Smuzhiyun bo->cb_list_head = &flow_block->cb_list;
638*4882a593Smuzhiyun INIT_LIST_HEAD(&bo->cb_list);
639*4882a593Smuzhiyun }
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun static void tcf_block_unbind(struct tcf_block *block,
642*4882a593Smuzhiyun struct flow_block_offload *bo);
643*4882a593Smuzhiyun
tc_block_indr_cleanup(struct flow_block_cb * block_cb)644*4882a593Smuzhiyun static void tc_block_indr_cleanup(struct flow_block_cb *block_cb)
645*4882a593Smuzhiyun {
646*4882a593Smuzhiyun struct tcf_block *block = block_cb->indr.data;
647*4882a593Smuzhiyun struct net_device *dev = block_cb->indr.dev;
648*4882a593Smuzhiyun struct Qdisc *sch = block_cb->indr.sch;
649*4882a593Smuzhiyun struct netlink_ext_ack extack = {};
650*4882a593Smuzhiyun struct flow_block_offload bo = {};
651*4882a593Smuzhiyun
652*4882a593Smuzhiyun tcf_block_offload_init(&bo, dev, sch, FLOW_BLOCK_UNBIND,
653*4882a593Smuzhiyun block_cb->indr.binder_type,
654*4882a593Smuzhiyun &block->flow_block, tcf_block_shared(block),
655*4882a593Smuzhiyun &extack);
656*4882a593Smuzhiyun rtnl_lock();
657*4882a593Smuzhiyun down_write(&block->cb_lock);
658*4882a593Smuzhiyun list_del(&block_cb->driver_list);
659*4882a593Smuzhiyun list_move(&block_cb->list, &bo.cb_list);
660*4882a593Smuzhiyun tcf_block_unbind(block, &bo);
661*4882a593Smuzhiyun up_write(&block->cb_lock);
662*4882a593Smuzhiyun rtnl_unlock();
663*4882a593Smuzhiyun }
664*4882a593Smuzhiyun
tcf_block_offload_in_use(struct tcf_block * block)665*4882a593Smuzhiyun static bool tcf_block_offload_in_use(struct tcf_block *block)
666*4882a593Smuzhiyun {
667*4882a593Smuzhiyun return atomic_read(&block->offloadcnt);
668*4882a593Smuzhiyun }
669*4882a593Smuzhiyun
tcf_block_offload_cmd(struct tcf_block * block,struct net_device * dev,struct Qdisc * sch,struct tcf_block_ext_info * ei,enum flow_block_command command,struct netlink_ext_ack * extack)670*4882a593Smuzhiyun static int tcf_block_offload_cmd(struct tcf_block *block,
671*4882a593Smuzhiyun struct net_device *dev, struct Qdisc *sch,
672*4882a593Smuzhiyun struct tcf_block_ext_info *ei,
673*4882a593Smuzhiyun enum flow_block_command command,
674*4882a593Smuzhiyun struct netlink_ext_ack *extack)
675*4882a593Smuzhiyun {
676*4882a593Smuzhiyun struct flow_block_offload bo = {};
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun tcf_block_offload_init(&bo, dev, sch, command, ei->binder_type,
679*4882a593Smuzhiyun &block->flow_block, tcf_block_shared(block),
680*4882a593Smuzhiyun extack);
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun if (dev->netdev_ops->ndo_setup_tc) {
683*4882a593Smuzhiyun int err;
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
686*4882a593Smuzhiyun if (err < 0) {
687*4882a593Smuzhiyun if (err != -EOPNOTSUPP)
688*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed");
689*4882a593Smuzhiyun return err;
690*4882a593Smuzhiyun }
691*4882a593Smuzhiyun
692*4882a593Smuzhiyun return tcf_block_setup(block, &bo);
693*4882a593Smuzhiyun }
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun flow_indr_dev_setup_offload(dev, sch, TC_SETUP_BLOCK, block, &bo,
696*4882a593Smuzhiyun tc_block_indr_cleanup);
697*4882a593Smuzhiyun tcf_block_setup(block, &bo);
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun return -EOPNOTSUPP;
700*4882a593Smuzhiyun }
701*4882a593Smuzhiyun
tcf_block_offload_bind(struct tcf_block * block,struct Qdisc * q,struct tcf_block_ext_info * ei,struct netlink_ext_ack * extack)702*4882a593Smuzhiyun static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
703*4882a593Smuzhiyun struct tcf_block_ext_info *ei,
704*4882a593Smuzhiyun struct netlink_ext_ack *extack)
705*4882a593Smuzhiyun {
706*4882a593Smuzhiyun struct net_device *dev = q->dev_queue->dev;
707*4882a593Smuzhiyun int err;
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun down_write(&block->cb_lock);
710*4882a593Smuzhiyun
711*4882a593Smuzhiyun /* If tc offload feature is disabled and the block we try to bind
712*4882a593Smuzhiyun * to already has some offloaded filters, forbid to bind.
713*4882a593Smuzhiyun */
714*4882a593Smuzhiyun if (dev->netdev_ops->ndo_setup_tc &&
715*4882a593Smuzhiyun !tc_can_offload(dev) &&
716*4882a593Smuzhiyun tcf_block_offload_in_use(block)) {
717*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
718*4882a593Smuzhiyun err = -EOPNOTSUPP;
719*4882a593Smuzhiyun goto err_unlock;
720*4882a593Smuzhiyun }
721*4882a593Smuzhiyun
722*4882a593Smuzhiyun err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_BIND, extack);
723*4882a593Smuzhiyun if (err == -EOPNOTSUPP)
724*4882a593Smuzhiyun goto no_offload_dev_inc;
725*4882a593Smuzhiyun if (err)
726*4882a593Smuzhiyun goto err_unlock;
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun up_write(&block->cb_lock);
729*4882a593Smuzhiyun return 0;
730*4882a593Smuzhiyun
731*4882a593Smuzhiyun no_offload_dev_inc:
732*4882a593Smuzhiyun if (tcf_block_offload_in_use(block))
733*4882a593Smuzhiyun goto err_unlock;
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun err = 0;
736*4882a593Smuzhiyun block->nooffloaddevcnt++;
737*4882a593Smuzhiyun err_unlock:
738*4882a593Smuzhiyun up_write(&block->cb_lock);
739*4882a593Smuzhiyun return err;
740*4882a593Smuzhiyun }
741*4882a593Smuzhiyun
tcf_block_offload_unbind(struct tcf_block * block,struct Qdisc * q,struct tcf_block_ext_info * ei)742*4882a593Smuzhiyun static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
743*4882a593Smuzhiyun struct tcf_block_ext_info *ei)
744*4882a593Smuzhiyun {
745*4882a593Smuzhiyun struct net_device *dev = q->dev_queue->dev;
746*4882a593Smuzhiyun int err;
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun down_write(&block->cb_lock);
749*4882a593Smuzhiyun err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_UNBIND, NULL);
750*4882a593Smuzhiyun if (err == -EOPNOTSUPP)
751*4882a593Smuzhiyun goto no_offload_dev_dec;
752*4882a593Smuzhiyun up_write(&block->cb_lock);
753*4882a593Smuzhiyun return;
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun no_offload_dev_dec:
756*4882a593Smuzhiyun WARN_ON(block->nooffloaddevcnt-- == 0);
757*4882a593Smuzhiyun up_write(&block->cb_lock);
758*4882a593Smuzhiyun }
759*4882a593Smuzhiyun
760*4882a593Smuzhiyun static int
tcf_chain0_head_change_cb_add(struct tcf_block * block,struct tcf_block_ext_info * ei,struct netlink_ext_ack * extack)761*4882a593Smuzhiyun tcf_chain0_head_change_cb_add(struct tcf_block *block,
762*4882a593Smuzhiyun struct tcf_block_ext_info *ei,
763*4882a593Smuzhiyun struct netlink_ext_ack *extack)
764*4882a593Smuzhiyun {
765*4882a593Smuzhiyun struct tcf_filter_chain_list_item *item;
766*4882a593Smuzhiyun struct tcf_chain *chain0;
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun item = kmalloc(sizeof(*item), GFP_KERNEL);
769*4882a593Smuzhiyun if (!item) {
770*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
771*4882a593Smuzhiyun return -ENOMEM;
772*4882a593Smuzhiyun }
773*4882a593Smuzhiyun item->chain_head_change = ei->chain_head_change;
774*4882a593Smuzhiyun item->chain_head_change_priv = ei->chain_head_change_priv;
775*4882a593Smuzhiyun
776*4882a593Smuzhiyun mutex_lock(&block->lock);
777*4882a593Smuzhiyun chain0 = block->chain0.chain;
778*4882a593Smuzhiyun if (chain0)
779*4882a593Smuzhiyun tcf_chain_hold(chain0);
780*4882a593Smuzhiyun else
781*4882a593Smuzhiyun list_add(&item->list, &block->chain0.filter_chain_list);
782*4882a593Smuzhiyun mutex_unlock(&block->lock);
783*4882a593Smuzhiyun
784*4882a593Smuzhiyun if (chain0) {
785*4882a593Smuzhiyun struct tcf_proto *tp_head;
786*4882a593Smuzhiyun
787*4882a593Smuzhiyun mutex_lock(&chain0->filter_chain_lock);
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun tp_head = tcf_chain_dereference(chain0->filter_chain, chain0);
790*4882a593Smuzhiyun if (tp_head)
791*4882a593Smuzhiyun tcf_chain_head_change_item(item, tp_head);
792*4882a593Smuzhiyun
793*4882a593Smuzhiyun mutex_lock(&block->lock);
794*4882a593Smuzhiyun list_add(&item->list, &block->chain0.filter_chain_list);
795*4882a593Smuzhiyun mutex_unlock(&block->lock);
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun mutex_unlock(&chain0->filter_chain_lock);
798*4882a593Smuzhiyun tcf_chain_put(chain0);
799*4882a593Smuzhiyun }
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun return 0;
802*4882a593Smuzhiyun }
803*4882a593Smuzhiyun
804*4882a593Smuzhiyun static void
tcf_chain0_head_change_cb_del(struct tcf_block * block,struct tcf_block_ext_info * ei)805*4882a593Smuzhiyun tcf_chain0_head_change_cb_del(struct tcf_block *block,
806*4882a593Smuzhiyun struct tcf_block_ext_info *ei)
807*4882a593Smuzhiyun {
808*4882a593Smuzhiyun struct tcf_filter_chain_list_item *item;
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun mutex_lock(&block->lock);
811*4882a593Smuzhiyun list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
812*4882a593Smuzhiyun if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
813*4882a593Smuzhiyun (item->chain_head_change == ei->chain_head_change &&
814*4882a593Smuzhiyun item->chain_head_change_priv == ei->chain_head_change_priv)) {
815*4882a593Smuzhiyun if (block->chain0.chain)
816*4882a593Smuzhiyun tcf_chain_head_change_item(item, NULL);
817*4882a593Smuzhiyun list_del(&item->list);
818*4882a593Smuzhiyun mutex_unlock(&block->lock);
819*4882a593Smuzhiyun
820*4882a593Smuzhiyun kfree(item);
821*4882a593Smuzhiyun return;
822*4882a593Smuzhiyun }
823*4882a593Smuzhiyun }
824*4882a593Smuzhiyun mutex_unlock(&block->lock);
825*4882a593Smuzhiyun WARN_ON(1);
826*4882a593Smuzhiyun }
827*4882a593Smuzhiyun
828*4882a593Smuzhiyun struct tcf_net {
829*4882a593Smuzhiyun spinlock_t idr_lock; /* Protects idr */
830*4882a593Smuzhiyun struct idr idr;
831*4882a593Smuzhiyun };
832*4882a593Smuzhiyun
833*4882a593Smuzhiyun static unsigned int tcf_net_id;
834*4882a593Smuzhiyun
tcf_block_insert(struct tcf_block * block,struct net * net,struct netlink_ext_ack * extack)835*4882a593Smuzhiyun static int tcf_block_insert(struct tcf_block *block, struct net *net,
836*4882a593Smuzhiyun struct netlink_ext_ack *extack)
837*4882a593Smuzhiyun {
838*4882a593Smuzhiyun struct tcf_net *tn = net_generic(net, tcf_net_id);
839*4882a593Smuzhiyun int err;
840*4882a593Smuzhiyun
841*4882a593Smuzhiyun idr_preload(GFP_KERNEL);
842*4882a593Smuzhiyun spin_lock(&tn->idr_lock);
843*4882a593Smuzhiyun err = idr_alloc_u32(&tn->idr, block, &block->index, block->index,
844*4882a593Smuzhiyun GFP_NOWAIT);
845*4882a593Smuzhiyun spin_unlock(&tn->idr_lock);
846*4882a593Smuzhiyun idr_preload_end();
847*4882a593Smuzhiyun
848*4882a593Smuzhiyun return err;
849*4882a593Smuzhiyun }
850*4882a593Smuzhiyun
tcf_block_remove(struct tcf_block * block,struct net * net)851*4882a593Smuzhiyun static void tcf_block_remove(struct tcf_block *block, struct net *net)
852*4882a593Smuzhiyun {
853*4882a593Smuzhiyun struct tcf_net *tn = net_generic(net, tcf_net_id);
854*4882a593Smuzhiyun
855*4882a593Smuzhiyun spin_lock(&tn->idr_lock);
856*4882a593Smuzhiyun idr_remove(&tn->idr, block->index);
857*4882a593Smuzhiyun spin_unlock(&tn->idr_lock);
858*4882a593Smuzhiyun }
859*4882a593Smuzhiyun
tcf_block_create(struct net * net,struct Qdisc * q,u32 block_index,struct netlink_ext_ack * extack)860*4882a593Smuzhiyun static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
861*4882a593Smuzhiyun u32 block_index,
862*4882a593Smuzhiyun struct netlink_ext_ack *extack)
863*4882a593Smuzhiyun {
864*4882a593Smuzhiyun struct tcf_block *block;
865*4882a593Smuzhiyun
866*4882a593Smuzhiyun block = kzalloc(sizeof(*block), GFP_KERNEL);
867*4882a593Smuzhiyun if (!block) {
868*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
869*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
870*4882a593Smuzhiyun }
871*4882a593Smuzhiyun mutex_init(&block->lock);
872*4882a593Smuzhiyun mutex_init(&block->proto_destroy_lock);
873*4882a593Smuzhiyun init_rwsem(&block->cb_lock);
874*4882a593Smuzhiyun flow_block_init(&block->flow_block);
875*4882a593Smuzhiyun INIT_LIST_HEAD(&block->chain_list);
876*4882a593Smuzhiyun INIT_LIST_HEAD(&block->owner_list);
877*4882a593Smuzhiyun INIT_LIST_HEAD(&block->chain0.filter_chain_list);
878*4882a593Smuzhiyun
879*4882a593Smuzhiyun refcount_set(&block->refcnt, 1);
880*4882a593Smuzhiyun block->net = net;
881*4882a593Smuzhiyun block->index = block_index;
882*4882a593Smuzhiyun
883*4882a593Smuzhiyun /* Don't store q pointer for blocks which are shared */
884*4882a593Smuzhiyun if (!tcf_block_shared(block))
885*4882a593Smuzhiyun block->q = q;
886*4882a593Smuzhiyun return block;
887*4882a593Smuzhiyun }
888*4882a593Smuzhiyun
tcf_block_lookup(struct net * net,u32 block_index)889*4882a593Smuzhiyun static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
890*4882a593Smuzhiyun {
891*4882a593Smuzhiyun struct tcf_net *tn = net_generic(net, tcf_net_id);
892*4882a593Smuzhiyun
893*4882a593Smuzhiyun return idr_find(&tn->idr, block_index);
894*4882a593Smuzhiyun }
895*4882a593Smuzhiyun
tcf_block_refcnt_get(struct net * net,u32 block_index)896*4882a593Smuzhiyun static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index)
897*4882a593Smuzhiyun {
898*4882a593Smuzhiyun struct tcf_block *block;
899*4882a593Smuzhiyun
900*4882a593Smuzhiyun rcu_read_lock();
901*4882a593Smuzhiyun block = tcf_block_lookup(net, block_index);
902*4882a593Smuzhiyun if (block && !refcount_inc_not_zero(&block->refcnt))
903*4882a593Smuzhiyun block = NULL;
904*4882a593Smuzhiyun rcu_read_unlock();
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun return block;
907*4882a593Smuzhiyun }
908*4882a593Smuzhiyun
909*4882a593Smuzhiyun static struct tcf_chain *
__tcf_get_next_chain(struct tcf_block * block,struct tcf_chain * chain)910*4882a593Smuzhiyun __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
911*4882a593Smuzhiyun {
912*4882a593Smuzhiyun mutex_lock(&block->lock);
913*4882a593Smuzhiyun if (chain)
914*4882a593Smuzhiyun chain = list_is_last(&chain->list, &block->chain_list) ?
915*4882a593Smuzhiyun NULL : list_next_entry(chain, list);
916*4882a593Smuzhiyun else
917*4882a593Smuzhiyun chain = list_first_entry_or_null(&block->chain_list,
918*4882a593Smuzhiyun struct tcf_chain, list);
919*4882a593Smuzhiyun
920*4882a593Smuzhiyun /* skip all action-only chains */
921*4882a593Smuzhiyun while (chain && tcf_chain_held_by_acts_only(chain))
922*4882a593Smuzhiyun chain = list_is_last(&chain->list, &block->chain_list) ?
923*4882a593Smuzhiyun NULL : list_next_entry(chain, list);
924*4882a593Smuzhiyun
925*4882a593Smuzhiyun if (chain)
926*4882a593Smuzhiyun tcf_chain_hold(chain);
927*4882a593Smuzhiyun mutex_unlock(&block->lock);
928*4882a593Smuzhiyun
929*4882a593Smuzhiyun return chain;
930*4882a593Smuzhiyun }
931*4882a593Smuzhiyun
932*4882a593Smuzhiyun /* Function to be used by all clients that want to iterate over all chains on
933*4882a593Smuzhiyun * block. It properly obtains block->lock and takes reference to chain before
934*4882a593Smuzhiyun * returning it. Users of this function must be tolerant to concurrent chain
935*4882a593Smuzhiyun * insertion/deletion or ensure that no concurrent chain modification is
936*4882a593Smuzhiyun * possible. Note that all netlink dump callbacks cannot guarantee to provide
937*4882a593Smuzhiyun * consistent dump because rtnl lock is released each time skb is filled with
938*4882a593Smuzhiyun * data and sent to user-space.
939*4882a593Smuzhiyun */
940*4882a593Smuzhiyun
941*4882a593Smuzhiyun struct tcf_chain *
tcf_get_next_chain(struct tcf_block * block,struct tcf_chain * chain)942*4882a593Smuzhiyun tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
943*4882a593Smuzhiyun {
944*4882a593Smuzhiyun struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain);
945*4882a593Smuzhiyun
946*4882a593Smuzhiyun if (chain)
947*4882a593Smuzhiyun tcf_chain_put(chain);
948*4882a593Smuzhiyun
949*4882a593Smuzhiyun return chain_next;
950*4882a593Smuzhiyun }
951*4882a593Smuzhiyun EXPORT_SYMBOL(tcf_get_next_chain);
952*4882a593Smuzhiyun
953*4882a593Smuzhiyun static struct tcf_proto *
__tcf_get_next_proto(struct tcf_chain * chain,struct tcf_proto * tp)954*4882a593Smuzhiyun __tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
955*4882a593Smuzhiyun {
956*4882a593Smuzhiyun u32 prio = 0;
957*4882a593Smuzhiyun
958*4882a593Smuzhiyun ASSERT_RTNL();
959*4882a593Smuzhiyun mutex_lock(&chain->filter_chain_lock);
960*4882a593Smuzhiyun
961*4882a593Smuzhiyun if (!tp) {
962*4882a593Smuzhiyun tp = tcf_chain_dereference(chain->filter_chain, chain);
963*4882a593Smuzhiyun } else if (tcf_proto_is_deleting(tp)) {
964*4882a593Smuzhiyun /* 'deleting' flag is set and chain->filter_chain_lock was
965*4882a593Smuzhiyun * unlocked, which means next pointer could be invalid. Restart
966*4882a593Smuzhiyun * search.
967*4882a593Smuzhiyun */
968*4882a593Smuzhiyun prio = tp->prio + 1;
969*4882a593Smuzhiyun tp = tcf_chain_dereference(chain->filter_chain, chain);
970*4882a593Smuzhiyun
971*4882a593Smuzhiyun for (; tp; tp = tcf_chain_dereference(tp->next, chain))
972*4882a593Smuzhiyun if (!tp->deleting && tp->prio >= prio)
973*4882a593Smuzhiyun break;
974*4882a593Smuzhiyun } else {
975*4882a593Smuzhiyun tp = tcf_chain_dereference(tp->next, chain);
976*4882a593Smuzhiyun }
977*4882a593Smuzhiyun
978*4882a593Smuzhiyun if (tp)
979*4882a593Smuzhiyun tcf_proto_get(tp);
980*4882a593Smuzhiyun
981*4882a593Smuzhiyun mutex_unlock(&chain->filter_chain_lock);
982*4882a593Smuzhiyun
983*4882a593Smuzhiyun return tp;
984*4882a593Smuzhiyun }
985*4882a593Smuzhiyun
986*4882a593Smuzhiyun /* Function to be used by all clients that want to iterate over all tp's on
987*4882a593Smuzhiyun * chain. Users of this function must be tolerant to concurrent tp
988*4882a593Smuzhiyun * insertion/deletion or ensure that no concurrent chain modification is
989*4882a593Smuzhiyun * possible. Note that all netlink dump callbacks cannot guarantee to provide
990*4882a593Smuzhiyun * consistent dump because rtnl lock is released each time skb is filled with
991*4882a593Smuzhiyun * data and sent to user-space.
992*4882a593Smuzhiyun */
993*4882a593Smuzhiyun
994*4882a593Smuzhiyun struct tcf_proto *
tcf_get_next_proto(struct tcf_chain * chain,struct tcf_proto * tp,bool rtnl_held)995*4882a593Smuzhiyun tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp,
996*4882a593Smuzhiyun bool rtnl_held)
997*4882a593Smuzhiyun {
998*4882a593Smuzhiyun struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp);
999*4882a593Smuzhiyun
1000*4882a593Smuzhiyun if (tp)
1001*4882a593Smuzhiyun tcf_proto_put(tp, rtnl_held, NULL);
1002*4882a593Smuzhiyun
1003*4882a593Smuzhiyun return tp_next;
1004*4882a593Smuzhiyun }
1005*4882a593Smuzhiyun EXPORT_SYMBOL(tcf_get_next_proto);
1006*4882a593Smuzhiyun
tcf_block_flush_all_chains(struct tcf_block * block,bool rtnl_held)1007*4882a593Smuzhiyun static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held)
1008*4882a593Smuzhiyun {
1009*4882a593Smuzhiyun struct tcf_chain *chain;
1010*4882a593Smuzhiyun
1011*4882a593Smuzhiyun /* Last reference to block. At this point chains cannot be added or
1012*4882a593Smuzhiyun * removed concurrently.
1013*4882a593Smuzhiyun */
1014*4882a593Smuzhiyun for (chain = tcf_get_next_chain(block, NULL);
1015*4882a593Smuzhiyun chain;
1016*4882a593Smuzhiyun chain = tcf_get_next_chain(block, chain)) {
1017*4882a593Smuzhiyun tcf_chain_put_explicitly_created(chain);
1018*4882a593Smuzhiyun tcf_chain_flush(chain, rtnl_held);
1019*4882a593Smuzhiyun }
1020*4882a593Smuzhiyun }
1021*4882a593Smuzhiyun
1022*4882a593Smuzhiyun /* Lookup Qdisc and increments its reference counter.
1023*4882a593Smuzhiyun * Set parent, if necessary.
1024*4882a593Smuzhiyun */
1025*4882a593Smuzhiyun
__tcf_qdisc_find(struct net * net,struct Qdisc ** q,u32 * parent,int ifindex,bool rtnl_held,struct netlink_ext_ack * extack)1026*4882a593Smuzhiyun static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
1027*4882a593Smuzhiyun u32 *parent, int ifindex, bool rtnl_held,
1028*4882a593Smuzhiyun struct netlink_ext_ack *extack)
1029*4882a593Smuzhiyun {
1030*4882a593Smuzhiyun const struct Qdisc_class_ops *cops;
1031*4882a593Smuzhiyun struct net_device *dev;
1032*4882a593Smuzhiyun int err = 0;
1033*4882a593Smuzhiyun
1034*4882a593Smuzhiyun if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1035*4882a593Smuzhiyun return 0;
1036*4882a593Smuzhiyun
1037*4882a593Smuzhiyun rcu_read_lock();
1038*4882a593Smuzhiyun
1039*4882a593Smuzhiyun /* Find link */
1040*4882a593Smuzhiyun dev = dev_get_by_index_rcu(net, ifindex);
1041*4882a593Smuzhiyun if (!dev) {
1042*4882a593Smuzhiyun rcu_read_unlock();
1043*4882a593Smuzhiyun return -ENODEV;
1044*4882a593Smuzhiyun }
1045*4882a593Smuzhiyun
1046*4882a593Smuzhiyun /* Find qdisc */
1047*4882a593Smuzhiyun if (!*parent) {
1048*4882a593Smuzhiyun *q = rcu_dereference(dev->qdisc);
1049*4882a593Smuzhiyun *parent = (*q)->handle;
1050*4882a593Smuzhiyun } else {
1051*4882a593Smuzhiyun *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
1052*4882a593Smuzhiyun if (!*q) {
1053*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1054*4882a593Smuzhiyun err = -EINVAL;
1055*4882a593Smuzhiyun goto errout_rcu;
1056*4882a593Smuzhiyun }
1057*4882a593Smuzhiyun }
1058*4882a593Smuzhiyun
1059*4882a593Smuzhiyun *q = qdisc_refcount_inc_nz(*q);
1060*4882a593Smuzhiyun if (!*q) {
1061*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1062*4882a593Smuzhiyun err = -EINVAL;
1063*4882a593Smuzhiyun goto errout_rcu;
1064*4882a593Smuzhiyun }
1065*4882a593Smuzhiyun
1066*4882a593Smuzhiyun /* Is it classful? */
1067*4882a593Smuzhiyun cops = (*q)->ops->cl_ops;
1068*4882a593Smuzhiyun if (!cops) {
1069*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Qdisc not classful");
1070*4882a593Smuzhiyun err = -EINVAL;
1071*4882a593Smuzhiyun goto errout_qdisc;
1072*4882a593Smuzhiyun }
1073*4882a593Smuzhiyun
1074*4882a593Smuzhiyun if (!cops->tcf_block) {
1075*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
1076*4882a593Smuzhiyun err = -EOPNOTSUPP;
1077*4882a593Smuzhiyun goto errout_qdisc;
1078*4882a593Smuzhiyun }
1079*4882a593Smuzhiyun
1080*4882a593Smuzhiyun errout_rcu:
1081*4882a593Smuzhiyun /* At this point we know that qdisc is not noop_qdisc,
1082*4882a593Smuzhiyun * which means that qdisc holds a reference to net_device
1083*4882a593Smuzhiyun * and we hold a reference to qdisc, so it is safe to release
1084*4882a593Smuzhiyun * rcu read lock.
1085*4882a593Smuzhiyun */
1086*4882a593Smuzhiyun rcu_read_unlock();
1087*4882a593Smuzhiyun return err;
1088*4882a593Smuzhiyun
1089*4882a593Smuzhiyun errout_qdisc:
1090*4882a593Smuzhiyun rcu_read_unlock();
1091*4882a593Smuzhiyun
1092*4882a593Smuzhiyun if (rtnl_held)
1093*4882a593Smuzhiyun qdisc_put(*q);
1094*4882a593Smuzhiyun else
1095*4882a593Smuzhiyun qdisc_put_unlocked(*q);
1096*4882a593Smuzhiyun *q = NULL;
1097*4882a593Smuzhiyun
1098*4882a593Smuzhiyun return err;
1099*4882a593Smuzhiyun }
1100*4882a593Smuzhiyun
__tcf_qdisc_cl_find(struct Qdisc * q,u32 parent,unsigned long * cl,int ifindex,struct netlink_ext_ack * extack)1101*4882a593Smuzhiyun static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl,
1102*4882a593Smuzhiyun int ifindex, struct netlink_ext_ack *extack)
1103*4882a593Smuzhiyun {
1104*4882a593Smuzhiyun if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1105*4882a593Smuzhiyun return 0;
1106*4882a593Smuzhiyun
1107*4882a593Smuzhiyun /* Do we search for filter, attached to class? */
1108*4882a593Smuzhiyun if (TC_H_MIN(parent)) {
1109*4882a593Smuzhiyun const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1110*4882a593Smuzhiyun
1111*4882a593Smuzhiyun *cl = cops->find(q, parent);
1112*4882a593Smuzhiyun if (*cl == 0) {
1113*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
1114*4882a593Smuzhiyun return -ENOENT;
1115*4882a593Smuzhiyun }
1116*4882a593Smuzhiyun }
1117*4882a593Smuzhiyun
1118*4882a593Smuzhiyun return 0;
1119*4882a593Smuzhiyun }
1120*4882a593Smuzhiyun
__tcf_block_find(struct net * net,struct Qdisc * q,unsigned long cl,int ifindex,u32 block_index,struct netlink_ext_ack * extack)1121*4882a593Smuzhiyun static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q,
1122*4882a593Smuzhiyun unsigned long cl, int ifindex,
1123*4882a593Smuzhiyun u32 block_index,
1124*4882a593Smuzhiyun struct netlink_ext_ack *extack)
1125*4882a593Smuzhiyun {
1126*4882a593Smuzhiyun struct tcf_block *block;
1127*4882a593Smuzhiyun
1128*4882a593Smuzhiyun if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
1129*4882a593Smuzhiyun block = tcf_block_refcnt_get(net, block_index);
1130*4882a593Smuzhiyun if (!block) {
1131*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Block of given index was not found");
1132*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
1133*4882a593Smuzhiyun }
1134*4882a593Smuzhiyun } else {
1135*4882a593Smuzhiyun const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1136*4882a593Smuzhiyun
1137*4882a593Smuzhiyun block = cops->tcf_block(q, cl, extack);
1138*4882a593Smuzhiyun if (!block)
1139*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
1140*4882a593Smuzhiyun
1141*4882a593Smuzhiyun if (tcf_block_shared(block)) {
1142*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
1143*4882a593Smuzhiyun return ERR_PTR(-EOPNOTSUPP);
1144*4882a593Smuzhiyun }
1145*4882a593Smuzhiyun
1146*4882a593Smuzhiyun /* Always take reference to block in order to support execution
1147*4882a593Smuzhiyun * of rules update path of cls API without rtnl lock. Caller
1148*4882a593Smuzhiyun * must release block when it is finished using it. 'if' block
1149*4882a593Smuzhiyun * of this conditional obtain reference to block by calling
1150*4882a593Smuzhiyun * tcf_block_refcnt_get().
1151*4882a593Smuzhiyun */
1152*4882a593Smuzhiyun refcount_inc(&block->refcnt);
1153*4882a593Smuzhiyun }
1154*4882a593Smuzhiyun
1155*4882a593Smuzhiyun return block;
1156*4882a593Smuzhiyun }
1157*4882a593Smuzhiyun
__tcf_block_put(struct tcf_block * block,struct Qdisc * q,struct tcf_block_ext_info * ei,bool rtnl_held)1158*4882a593Smuzhiyun static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
1159*4882a593Smuzhiyun struct tcf_block_ext_info *ei, bool rtnl_held)
1160*4882a593Smuzhiyun {
1161*4882a593Smuzhiyun if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) {
1162*4882a593Smuzhiyun /* Flushing/putting all chains will cause the block to be
1163*4882a593Smuzhiyun * deallocated when last chain is freed. However, if chain_list
1164*4882a593Smuzhiyun * is empty, block has to be manually deallocated. After block
1165*4882a593Smuzhiyun * reference counter reached 0, it is no longer possible to
1166*4882a593Smuzhiyun * increment it or add new chains to block.
1167*4882a593Smuzhiyun */
1168*4882a593Smuzhiyun bool free_block = list_empty(&block->chain_list);
1169*4882a593Smuzhiyun
1170*4882a593Smuzhiyun mutex_unlock(&block->lock);
1171*4882a593Smuzhiyun if (tcf_block_shared(block))
1172*4882a593Smuzhiyun tcf_block_remove(block, block->net);
1173*4882a593Smuzhiyun
1174*4882a593Smuzhiyun if (q)
1175*4882a593Smuzhiyun tcf_block_offload_unbind(block, q, ei);
1176*4882a593Smuzhiyun
1177*4882a593Smuzhiyun if (free_block)
1178*4882a593Smuzhiyun tcf_block_destroy(block);
1179*4882a593Smuzhiyun else
1180*4882a593Smuzhiyun tcf_block_flush_all_chains(block, rtnl_held);
1181*4882a593Smuzhiyun } else if (q) {
1182*4882a593Smuzhiyun tcf_block_offload_unbind(block, q, ei);
1183*4882a593Smuzhiyun }
1184*4882a593Smuzhiyun }
1185*4882a593Smuzhiyun
tcf_block_refcnt_put(struct tcf_block * block,bool rtnl_held)1186*4882a593Smuzhiyun static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held)
1187*4882a593Smuzhiyun {
1188*4882a593Smuzhiyun __tcf_block_put(block, NULL, NULL, rtnl_held);
1189*4882a593Smuzhiyun }
1190*4882a593Smuzhiyun
1191*4882a593Smuzhiyun /* Find tcf block.
1192*4882a593Smuzhiyun * Set q, parent, cl when appropriate.
1193*4882a593Smuzhiyun */
1194*4882a593Smuzhiyun
tcf_block_find(struct net * net,struct Qdisc ** q,u32 * parent,unsigned long * cl,int ifindex,u32 block_index,struct netlink_ext_ack * extack)1195*4882a593Smuzhiyun static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
1196*4882a593Smuzhiyun u32 *parent, unsigned long *cl,
1197*4882a593Smuzhiyun int ifindex, u32 block_index,
1198*4882a593Smuzhiyun struct netlink_ext_ack *extack)
1199*4882a593Smuzhiyun {
1200*4882a593Smuzhiyun struct tcf_block *block;
1201*4882a593Smuzhiyun int err = 0;
1202*4882a593Smuzhiyun
1203*4882a593Smuzhiyun ASSERT_RTNL();
1204*4882a593Smuzhiyun
1205*4882a593Smuzhiyun err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack);
1206*4882a593Smuzhiyun if (err)
1207*4882a593Smuzhiyun goto errout;
1208*4882a593Smuzhiyun
1209*4882a593Smuzhiyun err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack);
1210*4882a593Smuzhiyun if (err)
1211*4882a593Smuzhiyun goto errout_qdisc;
1212*4882a593Smuzhiyun
1213*4882a593Smuzhiyun block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack);
1214*4882a593Smuzhiyun if (IS_ERR(block)) {
1215*4882a593Smuzhiyun err = PTR_ERR(block);
1216*4882a593Smuzhiyun goto errout_qdisc;
1217*4882a593Smuzhiyun }
1218*4882a593Smuzhiyun
1219*4882a593Smuzhiyun return block;
1220*4882a593Smuzhiyun
1221*4882a593Smuzhiyun errout_qdisc:
1222*4882a593Smuzhiyun if (*q)
1223*4882a593Smuzhiyun qdisc_put(*q);
1224*4882a593Smuzhiyun errout:
1225*4882a593Smuzhiyun *q = NULL;
1226*4882a593Smuzhiyun return ERR_PTR(err);
1227*4882a593Smuzhiyun }
1228*4882a593Smuzhiyun
tcf_block_release(struct Qdisc * q,struct tcf_block * block,bool rtnl_held)1229*4882a593Smuzhiyun static void tcf_block_release(struct Qdisc *q, struct tcf_block *block,
1230*4882a593Smuzhiyun bool rtnl_held)
1231*4882a593Smuzhiyun {
1232*4882a593Smuzhiyun if (!IS_ERR_OR_NULL(block))
1233*4882a593Smuzhiyun tcf_block_refcnt_put(block, rtnl_held);
1234*4882a593Smuzhiyun
1235*4882a593Smuzhiyun if (q) {
1236*4882a593Smuzhiyun if (rtnl_held)
1237*4882a593Smuzhiyun qdisc_put(q);
1238*4882a593Smuzhiyun else
1239*4882a593Smuzhiyun qdisc_put_unlocked(q);
1240*4882a593Smuzhiyun }
1241*4882a593Smuzhiyun }
1242*4882a593Smuzhiyun
1243*4882a593Smuzhiyun struct tcf_block_owner_item {
1244*4882a593Smuzhiyun struct list_head list;
1245*4882a593Smuzhiyun struct Qdisc *q;
1246*4882a593Smuzhiyun enum flow_block_binder_type binder_type;
1247*4882a593Smuzhiyun };
1248*4882a593Smuzhiyun
1249*4882a593Smuzhiyun static void
tcf_block_owner_netif_keep_dst(struct tcf_block * block,struct Qdisc * q,enum flow_block_binder_type binder_type)1250*4882a593Smuzhiyun tcf_block_owner_netif_keep_dst(struct tcf_block *block,
1251*4882a593Smuzhiyun struct Qdisc *q,
1252*4882a593Smuzhiyun enum flow_block_binder_type binder_type)
1253*4882a593Smuzhiyun {
1254*4882a593Smuzhiyun if (block->keep_dst &&
1255*4882a593Smuzhiyun binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1256*4882a593Smuzhiyun binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1257*4882a593Smuzhiyun netif_keep_dst(qdisc_dev(q));
1258*4882a593Smuzhiyun }
1259*4882a593Smuzhiyun
tcf_block_netif_keep_dst(struct tcf_block * block)1260*4882a593Smuzhiyun void tcf_block_netif_keep_dst(struct tcf_block *block)
1261*4882a593Smuzhiyun {
1262*4882a593Smuzhiyun struct tcf_block_owner_item *item;
1263*4882a593Smuzhiyun
1264*4882a593Smuzhiyun block->keep_dst = true;
1265*4882a593Smuzhiyun list_for_each_entry(item, &block->owner_list, list)
1266*4882a593Smuzhiyun tcf_block_owner_netif_keep_dst(block, item->q,
1267*4882a593Smuzhiyun item->binder_type);
1268*4882a593Smuzhiyun }
1269*4882a593Smuzhiyun EXPORT_SYMBOL(tcf_block_netif_keep_dst);
1270*4882a593Smuzhiyun
tcf_block_owner_add(struct tcf_block * block,struct Qdisc * q,enum flow_block_binder_type binder_type)1271*4882a593Smuzhiyun static int tcf_block_owner_add(struct tcf_block *block,
1272*4882a593Smuzhiyun struct Qdisc *q,
1273*4882a593Smuzhiyun enum flow_block_binder_type binder_type)
1274*4882a593Smuzhiyun {
1275*4882a593Smuzhiyun struct tcf_block_owner_item *item;
1276*4882a593Smuzhiyun
1277*4882a593Smuzhiyun item = kmalloc(sizeof(*item), GFP_KERNEL);
1278*4882a593Smuzhiyun if (!item)
1279*4882a593Smuzhiyun return -ENOMEM;
1280*4882a593Smuzhiyun item->q = q;
1281*4882a593Smuzhiyun item->binder_type = binder_type;
1282*4882a593Smuzhiyun list_add(&item->list, &block->owner_list);
1283*4882a593Smuzhiyun return 0;
1284*4882a593Smuzhiyun }
1285*4882a593Smuzhiyun
tcf_block_owner_del(struct tcf_block * block,struct Qdisc * q,enum flow_block_binder_type binder_type)1286*4882a593Smuzhiyun static void tcf_block_owner_del(struct tcf_block *block,
1287*4882a593Smuzhiyun struct Qdisc *q,
1288*4882a593Smuzhiyun enum flow_block_binder_type binder_type)
1289*4882a593Smuzhiyun {
1290*4882a593Smuzhiyun struct tcf_block_owner_item *item;
1291*4882a593Smuzhiyun
1292*4882a593Smuzhiyun list_for_each_entry(item, &block->owner_list, list) {
1293*4882a593Smuzhiyun if (item->q == q && item->binder_type == binder_type) {
1294*4882a593Smuzhiyun list_del(&item->list);
1295*4882a593Smuzhiyun kfree(item);
1296*4882a593Smuzhiyun return;
1297*4882a593Smuzhiyun }
1298*4882a593Smuzhiyun }
1299*4882a593Smuzhiyun WARN_ON(1);
1300*4882a593Smuzhiyun }
1301*4882a593Smuzhiyun
tcf_block_get_ext(struct tcf_block ** p_block,struct Qdisc * q,struct tcf_block_ext_info * ei,struct netlink_ext_ack * extack)1302*4882a593Smuzhiyun int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
1303*4882a593Smuzhiyun struct tcf_block_ext_info *ei,
1304*4882a593Smuzhiyun struct netlink_ext_ack *extack)
1305*4882a593Smuzhiyun {
1306*4882a593Smuzhiyun struct net *net = qdisc_net(q);
1307*4882a593Smuzhiyun struct tcf_block *block = NULL;
1308*4882a593Smuzhiyun int err;
1309*4882a593Smuzhiyun
1310*4882a593Smuzhiyun if (ei->block_index)
1311*4882a593Smuzhiyun /* block_index not 0 means the shared block is requested */
1312*4882a593Smuzhiyun block = tcf_block_refcnt_get(net, ei->block_index);
1313*4882a593Smuzhiyun
1314*4882a593Smuzhiyun if (!block) {
1315*4882a593Smuzhiyun block = tcf_block_create(net, q, ei->block_index, extack);
1316*4882a593Smuzhiyun if (IS_ERR(block))
1317*4882a593Smuzhiyun return PTR_ERR(block);
1318*4882a593Smuzhiyun if (tcf_block_shared(block)) {
1319*4882a593Smuzhiyun err = tcf_block_insert(block, net, extack);
1320*4882a593Smuzhiyun if (err)
1321*4882a593Smuzhiyun goto err_block_insert;
1322*4882a593Smuzhiyun }
1323*4882a593Smuzhiyun }
1324*4882a593Smuzhiyun
1325*4882a593Smuzhiyun err = tcf_block_owner_add(block, q, ei->binder_type);
1326*4882a593Smuzhiyun if (err)
1327*4882a593Smuzhiyun goto err_block_owner_add;
1328*4882a593Smuzhiyun
1329*4882a593Smuzhiyun tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
1330*4882a593Smuzhiyun
1331*4882a593Smuzhiyun err = tcf_chain0_head_change_cb_add(block, ei, extack);
1332*4882a593Smuzhiyun if (err)
1333*4882a593Smuzhiyun goto err_chain0_head_change_cb_add;
1334*4882a593Smuzhiyun
1335*4882a593Smuzhiyun err = tcf_block_offload_bind(block, q, ei, extack);
1336*4882a593Smuzhiyun if (err)
1337*4882a593Smuzhiyun goto err_block_offload_bind;
1338*4882a593Smuzhiyun
1339*4882a593Smuzhiyun *p_block = block;
1340*4882a593Smuzhiyun return 0;
1341*4882a593Smuzhiyun
1342*4882a593Smuzhiyun err_block_offload_bind:
1343*4882a593Smuzhiyun tcf_chain0_head_change_cb_del(block, ei);
1344*4882a593Smuzhiyun err_chain0_head_change_cb_add:
1345*4882a593Smuzhiyun tcf_block_owner_del(block, q, ei->binder_type);
1346*4882a593Smuzhiyun err_block_owner_add:
1347*4882a593Smuzhiyun err_block_insert:
1348*4882a593Smuzhiyun tcf_block_refcnt_put(block, true);
1349*4882a593Smuzhiyun return err;
1350*4882a593Smuzhiyun }
1351*4882a593Smuzhiyun EXPORT_SYMBOL(tcf_block_get_ext);
1352*4882a593Smuzhiyun
tcf_chain_head_change_dflt(struct tcf_proto * tp_head,void * priv)1353*4882a593Smuzhiyun static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
1354*4882a593Smuzhiyun {
1355*4882a593Smuzhiyun struct tcf_proto __rcu **p_filter_chain = priv;
1356*4882a593Smuzhiyun
1357*4882a593Smuzhiyun rcu_assign_pointer(*p_filter_chain, tp_head);
1358*4882a593Smuzhiyun }
1359*4882a593Smuzhiyun
tcf_block_get(struct tcf_block ** p_block,struct tcf_proto __rcu ** p_filter_chain,struct Qdisc * q,struct netlink_ext_ack * extack)1360*4882a593Smuzhiyun int tcf_block_get(struct tcf_block **p_block,
1361*4882a593Smuzhiyun struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
1362*4882a593Smuzhiyun struct netlink_ext_ack *extack)
1363*4882a593Smuzhiyun {
1364*4882a593Smuzhiyun struct tcf_block_ext_info ei = {
1365*4882a593Smuzhiyun .chain_head_change = tcf_chain_head_change_dflt,
1366*4882a593Smuzhiyun .chain_head_change_priv = p_filter_chain,
1367*4882a593Smuzhiyun };
1368*4882a593Smuzhiyun
1369*4882a593Smuzhiyun WARN_ON(!p_filter_chain);
1370*4882a593Smuzhiyun return tcf_block_get_ext(p_block, q, &ei, extack);
1371*4882a593Smuzhiyun }
1372*4882a593Smuzhiyun EXPORT_SYMBOL(tcf_block_get);
1373*4882a593Smuzhiyun
1374*4882a593Smuzhiyun /* XXX: Standalone actions are not allowed to jump to any chain, and bound
1375*4882a593Smuzhiyun * actions should be all removed after flushing.
1376*4882a593Smuzhiyun */
tcf_block_put_ext(struct tcf_block * block,struct Qdisc * q,struct tcf_block_ext_info * ei)1377*4882a593Smuzhiyun void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
1378*4882a593Smuzhiyun struct tcf_block_ext_info *ei)
1379*4882a593Smuzhiyun {
1380*4882a593Smuzhiyun if (!block)
1381*4882a593Smuzhiyun return;
1382*4882a593Smuzhiyun tcf_chain0_head_change_cb_del(block, ei);
1383*4882a593Smuzhiyun tcf_block_owner_del(block, q, ei->binder_type);
1384*4882a593Smuzhiyun
1385*4882a593Smuzhiyun __tcf_block_put(block, q, ei, true);
1386*4882a593Smuzhiyun }
1387*4882a593Smuzhiyun EXPORT_SYMBOL(tcf_block_put_ext);
1388*4882a593Smuzhiyun
tcf_block_put(struct tcf_block * block)1389*4882a593Smuzhiyun void tcf_block_put(struct tcf_block *block)
1390*4882a593Smuzhiyun {
1391*4882a593Smuzhiyun struct tcf_block_ext_info ei = {0, };
1392*4882a593Smuzhiyun
1393*4882a593Smuzhiyun if (!block)
1394*4882a593Smuzhiyun return;
1395*4882a593Smuzhiyun tcf_block_put_ext(block, block->q, &ei);
1396*4882a593Smuzhiyun }
1397*4882a593Smuzhiyun
1398*4882a593Smuzhiyun EXPORT_SYMBOL(tcf_block_put);
1399*4882a593Smuzhiyun
1400*4882a593Smuzhiyun static int
tcf_block_playback_offloads(struct tcf_block * block,flow_setup_cb_t * cb,void * cb_priv,bool add,bool offload_in_use,struct netlink_ext_ack * extack)1401*4882a593Smuzhiyun tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
1402*4882a593Smuzhiyun void *cb_priv, bool add, bool offload_in_use,
1403*4882a593Smuzhiyun struct netlink_ext_ack *extack)
1404*4882a593Smuzhiyun {
1405*4882a593Smuzhiyun struct tcf_chain *chain, *chain_prev;
1406*4882a593Smuzhiyun struct tcf_proto *tp, *tp_prev;
1407*4882a593Smuzhiyun int err;
1408*4882a593Smuzhiyun
1409*4882a593Smuzhiyun lockdep_assert_held(&block->cb_lock);
1410*4882a593Smuzhiyun
1411*4882a593Smuzhiyun for (chain = __tcf_get_next_chain(block, NULL);
1412*4882a593Smuzhiyun chain;
1413*4882a593Smuzhiyun chain_prev = chain,
1414*4882a593Smuzhiyun chain = __tcf_get_next_chain(block, chain),
1415*4882a593Smuzhiyun tcf_chain_put(chain_prev)) {
1416*4882a593Smuzhiyun for (tp = __tcf_get_next_proto(chain, NULL); tp;
1417*4882a593Smuzhiyun tp_prev = tp,
1418*4882a593Smuzhiyun tp = __tcf_get_next_proto(chain, tp),
1419*4882a593Smuzhiyun tcf_proto_put(tp_prev, true, NULL)) {
1420*4882a593Smuzhiyun if (tp->ops->reoffload) {
1421*4882a593Smuzhiyun err = tp->ops->reoffload(tp, add, cb, cb_priv,
1422*4882a593Smuzhiyun extack);
1423*4882a593Smuzhiyun if (err && add)
1424*4882a593Smuzhiyun goto err_playback_remove;
1425*4882a593Smuzhiyun } else if (add && offload_in_use) {
1426*4882a593Smuzhiyun err = -EOPNOTSUPP;
1427*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support");
1428*4882a593Smuzhiyun goto err_playback_remove;
1429*4882a593Smuzhiyun }
1430*4882a593Smuzhiyun }
1431*4882a593Smuzhiyun }
1432*4882a593Smuzhiyun
1433*4882a593Smuzhiyun return 0;
1434*4882a593Smuzhiyun
1435*4882a593Smuzhiyun err_playback_remove:
1436*4882a593Smuzhiyun tcf_proto_put(tp, true, NULL);
1437*4882a593Smuzhiyun tcf_chain_put(chain);
1438*4882a593Smuzhiyun tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
1439*4882a593Smuzhiyun extack);
1440*4882a593Smuzhiyun return err;
1441*4882a593Smuzhiyun }
1442*4882a593Smuzhiyun
tcf_block_bind(struct tcf_block * block,struct flow_block_offload * bo)1443*4882a593Smuzhiyun static int tcf_block_bind(struct tcf_block *block,
1444*4882a593Smuzhiyun struct flow_block_offload *bo)
1445*4882a593Smuzhiyun {
1446*4882a593Smuzhiyun struct flow_block_cb *block_cb, *next;
1447*4882a593Smuzhiyun int err, i = 0;
1448*4882a593Smuzhiyun
1449*4882a593Smuzhiyun lockdep_assert_held(&block->cb_lock);
1450*4882a593Smuzhiyun
1451*4882a593Smuzhiyun list_for_each_entry(block_cb, &bo->cb_list, list) {
1452*4882a593Smuzhiyun err = tcf_block_playback_offloads(block, block_cb->cb,
1453*4882a593Smuzhiyun block_cb->cb_priv, true,
1454*4882a593Smuzhiyun tcf_block_offload_in_use(block),
1455*4882a593Smuzhiyun bo->extack);
1456*4882a593Smuzhiyun if (err)
1457*4882a593Smuzhiyun goto err_unroll;
1458*4882a593Smuzhiyun if (!bo->unlocked_driver_cb)
1459*4882a593Smuzhiyun block->lockeddevcnt++;
1460*4882a593Smuzhiyun
1461*4882a593Smuzhiyun i++;
1462*4882a593Smuzhiyun }
1463*4882a593Smuzhiyun list_splice(&bo->cb_list, &block->flow_block.cb_list);
1464*4882a593Smuzhiyun
1465*4882a593Smuzhiyun return 0;
1466*4882a593Smuzhiyun
1467*4882a593Smuzhiyun err_unroll:
1468*4882a593Smuzhiyun list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1469*4882a593Smuzhiyun if (i-- > 0) {
1470*4882a593Smuzhiyun list_del(&block_cb->list);
1471*4882a593Smuzhiyun tcf_block_playback_offloads(block, block_cb->cb,
1472*4882a593Smuzhiyun block_cb->cb_priv, false,
1473*4882a593Smuzhiyun tcf_block_offload_in_use(block),
1474*4882a593Smuzhiyun NULL);
1475*4882a593Smuzhiyun if (!bo->unlocked_driver_cb)
1476*4882a593Smuzhiyun block->lockeddevcnt--;
1477*4882a593Smuzhiyun }
1478*4882a593Smuzhiyun flow_block_cb_free(block_cb);
1479*4882a593Smuzhiyun }
1480*4882a593Smuzhiyun
1481*4882a593Smuzhiyun return err;
1482*4882a593Smuzhiyun }
1483*4882a593Smuzhiyun
tcf_block_unbind(struct tcf_block * block,struct flow_block_offload * bo)1484*4882a593Smuzhiyun static void tcf_block_unbind(struct tcf_block *block,
1485*4882a593Smuzhiyun struct flow_block_offload *bo)
1486*4882a593Smuzhiyun {
1487*4882a593Smuzhiyun struct flow_block_cb *block_cb, *next;
1488*4882a593Smuzhiyun
1489*4882a593Smuzhiyun lockdep_assert_held(&block->cb_lock);
1490*4882a593Smuzhiyun
1491*4882a593Smuzhiyun list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1492*4882a593Smuzhiyun tcf_block_playback_offloads(block, block_cb->cb,
1493*4882a593Smuzhiyun block_cb->cb_priv, false,
1494*4882a593Smuzhiyun tcf_block_offload_in_use(block),
1495*4882a593Smuzhiyun NULL);
1496*4882a593Smuzhiyun list_del(&block_cb->list);
1497*4882a593Smuzhiyun flow_block_cb_free(block_cb);
1498*4882a593Smuzhiyun if (!bo->unlocked_driver_cb)
1499*4882a593Smuzhiyun block->lockeddevcnt--;
1500*4882a593Smuzhiyun }
1501*4882a593Smuzhiyun }
1502*4882a593Smuzhiyun
tcf_block_setup(struct tcf_block * block,struct flow_block_offload * bo)1503*4882a593Smuzhiyun static int tcf_block_setup(struct tcf_block *block,
1504*4882a593Smuzhiyun struct flow_block_offload *bo)
1505*4882a593Smuzhiyun {
1506*4882a593Smuzhiyun int err;
1507*4882a593Smuzhiyun
1508*4882a593Smuzhiyun switch (bo->command) {
1509*4882a593Smuzhiyun case FLOW_BLOCK_BIND:
1510*4882a593Smuzhiyun err = tcf_block_bind(block, bo);
1511*4882a593Smuzhiyun break;
1512*4882a593Smuzhiyun case FLOW_BLOCK_UNBIND:
1513*4882a593Smuzhiyun err = 0;
1514*4882a593Smuzhiyun tcf_block_unbind(block, bo);
1515*4882a593Smuzhiyun break;
1516*4882a593Smuzhiyun default:
1517*4882a593Smuzhiyun WARN_ON_ONCE(1);
1518*4882a593Smuzhiyun err = -EOPNOTSUPP;
1519*4882a593Smuzhiyun }
1520*4882a593Smuzhiyun
1521*4882a593Smuzhiyun return err;
1522*4882a593Smuzhiyun }
1523*4882a593Smuzhiyun
1524*4882a593Smuzhiyun /* Main classifier routine: scans classifier chain attached
1525*4882a593Smuzhiyun * to this qdisc, (optionally) tests for protocol and asks
1526*4882a593Smuzhiyun * specific classifiers.
1527*4882a593Smuzhiyun */
__tcf_classify(struct sk_buff * skb,const struct tcf_proto * tp,const struct tcf_proto * orig_tp,struct tcf_result * res,bool compat_mode,u32 * last_executed_chain)1528*4882a593Smuzhiyun static inline int __tcf_classify(struct sk_buff *skb,
1529*4882a593Smuzhiyun const struct tcf_proto *tp,
1530*4882a593Smuzhiyun const struct tcf_proto *orig_tp,
1531*4882a593Smuzhiyun struct tcf_result *res,
1532*4882a593Smuzhiyun bool compat_mode,
1533*4882a593Smuzhiyun u32 *last_executed_chain)
1534*4882a593Smuzhiyun {
1535*4882a593Smuzhiyun #ifdef CONFIG_NET_CLS_ACT
1536*4882a593Smuzhiyun const int max_reclassify_loop = 16;
1537*4882a593Smuzhiyun const struct tcf_proto *first_tp;
1538*4882a593Smuzhiyun int limit = 0;
1539*4882a593Smuzhiyun
1540*4882a593Smuzhiyun reclassify:
1541*4882a593Smuzhiyun #endif
1542*4882a593Smuzhiyun for (; tp; tp = rcu_dereference_bh(tp->next)) {
1543*4882a593Smuzhiyun __be16 protocol = skb_protocol(skb, false);
1544*4882a593Smuzhiyun int err;
1545*4882a593Smuzhiyun
1546*4882a593Smuzhiyun if (tp->protocol != protocol &&
1547*4882a593Smuzhiyun tp->protocol != htons(ETH_P_ALL))
1548*4882a593Smuzhiyun continue;
1549*4882a593Smuzhiyun
1550*4882a593Smuzhiyun err = tp->classify(skb, tp, res);
1551*4882a593Smuzhiyun #ifdef CONFIG_NET_CLS_ACT
1552*4882a593Smuzhiyun if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
1553*4882a593Smuzhiyun first_tp = orig_tp;
1554*4882a593Smuzhiyun *last_executed_chain = first_tp->chain->index;
1555*4882a593Smuzhiyun goto reset;
1556*4882a593Smuzhiyun } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
1557*4882a593Smuzhiyun first_tp = res->goto_tp;
1558*4882a593Smuzhiyun *last_executed_chain = err & TC_ACT_EXT_VAL_MASK;
1559*4882a593Smuzhiyun goto reset;
1560*4882a593Smuzhiyun }
1561*4882a593Smuzhiyun #endif
1562*4882a593Smuzhiyun if (err >= 0)
1563*4882a593Smuzhiyun return err;
1564*4882a593Smuzhiyun }
1565*4882a593Smuzhiyun
1566*4882a593Smuzhiyun return TC_ACT_UNSPEC; /* signal: continue lookup */
1567*4882a593Smuzhiyun #ifdef CONFIG_NET_CLS_ACT
1568*4882a593Smuzhiyun reset:
1569*4882a593Smuzhiyun if (unlikely(limit++ >= max_reclassify_loop)) {
1570*4882a593Smuzhiyun net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
1571*4882a593Smuzhiyun tp->chain->block->index,
1572*4882a593Smuzhiyun tp->prio & 0xffff,
1573*4882a593Smuzhiyun ntohs(tp->protocol));
1574*4882a593Smuzhiyun return TC_ACT_SHOT;
1575*4882a593Smuzhiyun }
1576*4882a593Smuzhiyun
1577*4882a593Smuzhiyun tp = first_tp;
1578*4882a593Smuzhiyun goto reclassify;
1579*4882a593Smuzhiyun #endif
1580*4882a593Smuzhiyun }
1581*4882a593Smuzhiyun
tcf_classify(struct sk_buff * skb,const struct tcf_proto * tp,struct tcf_result * res,bool compat_mode)1582*4882a593Smuzhiyun int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
1583*4882a593Smuzhiyun struct tcf_result *res, bool compat_mode)
1584*4882a593Smuzhiyun {
1585*4882a593Smuzhiyun u32 last_executed_chain = 0;
1586*4882a593Smuzhiyun
1587*4882a593Smuzhiyun return __tcf_classify(skb, tp, tp, res, compat_mode,
1588*4882a593Smuzhiyun &last_executed_chain);
1589*4882a593Smuzhiyun }
1590*4882a593Smuzhiyun EXPORT_SYMBOL(tcf_classify);
1591*4882a593Smuzhiyun
tcf_classify_ingress(struct sk_buff * skb,const struct tcf_block * ingress_block,const struct tcf_proto * tp,struct tcf_result * res,bool compat_mode)1592*4882a593Smuzhiyun int tcf_classify_ingress(struct sk_buff *skb,
1593*4882a593Smuzhiyun const struct tcf_block *ingress_block,
1594*4882a593Smuzhiyun const struct tcf_proto *tp,
1595*4882a593Smuzhiyun struct tcf_result *res, bool compat_mode)
1596*4882a593Smuzhiyun {
1597*4882a593Smuzhiyun #if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
1598*4882a593Smuzhiyun u32 last_executed_chain = 0;
1599*4882a593Smuzhiyun
1600*4882a593Smuzhiyun return __tcf_classify(skb, tp, tp, res, compat_mode,
1601*4882a593Smuzhiyun &last_executed_chain);
1602*4882a593Smuzhiyun #else
1603*4882a593Smuzhiyun u32 last_executed_chain = tp ? tp->chain->index : 0;
1604*4882a593Smuzhiyun const struct tcf_proto *orig_tp = tp;
1605*4882a593Smuzhiyun struct tc_skb_ext *ext;
1606*4882a593Smuzhiyun int ret;
1607*4882a593Smuzhiyun
1608*4882a593Smuzhiyun ext = skb_ext_find(skb, TC_SKB_EXT);
1609*4882a593Smuzhiyun
1610*4882a593Smuzhiyun if (ext && ext->chain) {
1611*4882a593Smuzhiyun struct tcf_chain *fchain;
1612*4882a593Smuzhiyun
1613*4882a593Smuzhiyun fchain = tcf_chain_lookup_rcu(ingress_block, ext->chain);
1614*4882a593Smuzhiyun if (!fchain)
1615*4882a593Smuzhiyun return TC_ACT_SHOT;
1616*4882a593Smuzhiyun
1617*4882a593Smuzhiyun /* Consume, so cloned/redirect skbs won't inherit ext */
1618*4882a593Smuzhiyun skb_ext_del(skb, TC_SKB_EXT);
1619*4882a593Smuzhiyun
1620*4882a593Smuzhiyun tp = rcu_dereference_bh(fchain->filter_chain);
1621*4882a593Smuzhiyun last_executed_chain = fchain->index;
1622*4882a593Smuzhiyun }
1623*4882a593Smuzhiyun
1624*4882a593Smuzhiyun ret = __tcf_classify(skb, tp, orig_tp, res, compat_mode,
1625*4882a593Smuzhiyun &last_executed_chain);
1626*4882a593Smuzhiyun
1627*4882a593Smuzhiyun /* If we missed on some chain */
1628*4882a593Smuzhiyun if (ret == TC_ACT_UNSPEC && last_executed_chain) {
1629*4882a593Smuzhiyun ext = tc_skb_ext_alloc(skb);
1630*4882a593Smuzhiyun if (WARN_ON_ONCE(!ext))
1631*4882a593Smuzhiyun return TC_ACT_SHOT;
1632*4882a593Smuzhiyun ext->chain = last_executed_chain;
1633*4882a593Smuzhiyun ext->mru = qdisc_skb_cb(skb)->mru;
1634*4882a593Smuzhiyun }
1635*4882a593Smuzhiyun
1636*4882a593Smuzhiyun return ret;
1637*4882a593Smuzhiyun #endif
1638*4882a593Smuzhiyun }
1639*4882a593Smuzhiyun EXPORT_SYMBOL(tcf_classify_ingress);
1640*4882a593Smuzhiyun
1641*4882a593Smuzhiyun struct tcf_chain_info {
1642*4882a593Smuzhiyun struct tcf_proto __rcu **pprev;
1643*4882a593Smuzhiyun struct tcf_proto __rcu *next;
1644*4882a593Smuzhiyun };
1645*4882a593Smuzhiyun
tcf_chain_tp_prev(struct tcf_chain * chain,struct tcf_chain_info * chain_info)1646*4882a593Smuzhiyun static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain,
1647*4882a593Smuzhiyun struct tcf_chain_info *chain_info)
1648*4882a593Smuzhiyun {
1649*4882a593Smuzhiyun return tcf_chain_dereference(*chain_info->pprev, chain);
1650*4882a593Smuzhiyun }
1651*4882a593Smuzhiyun
tcf_chain_tp_insert(struct tcf_chain * chain,struct tcf_chain_info * chain_info,struct tcf_proto * tp)1652*4882a593Smuzhiyun static int tcf_chain_tp_insert(struct tcf_chain *chain,
1653*4882a593Smuzhiyun struct tcf_chain_info *chain_info,
1654*4882a593Smuzhiyun struct tcf_proto *tp)
1655*4882a593Smuzhiyun {
1656*4882a593Smuzhiyun if (chain->flushing)
1657*4882a593Smuzhiyun return -EAGAIN;
1658*4882a593Smuzhiyun
1659*4882a593Smuzhiyun RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
1660*4882a593Smuzhiyun if (*chain_info->pprev == chain->filter_chain)
1661*4882a593Smuzhiyun tcf_chain0_head_change(chain, tp);
1662*4882a593Smuzhiyun tcf_proto_get(tp);
1663*4882a593Smuzhiyun rcu_assign_pointer(*chain_info->pprev, tp);
1664*4882a593Smuzhiyun
1665*4882a593Smuzhiyun return 0;
1666*4882a593Smuzhiyun }
1667*4882a593Smuzhiyun
tcf_chain_tp_remove(struct tcf_chain * chain,struct tcf_chain_info * chain_info,struct tcf_proto * tp)1668*4882a593Smuzhiyun static void tcf_chain_tp_remove(struct tcf_chain *chain,
1669*4882a593Smuzhiyun struct tcf_chain_info *chain_info,
1670*4882a593Smuzhiyun struct tcf_proto *tp)
1671*4882a593Smuzhiyun {
1672*4882a593Smuzhiyun struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain);
1673*4882a593Smuzhiyun
1674*4882a593Smuzhiyun tcf_proto_mark_delete(tp);
1675*4882a593Smuzhiyun if (tp == chain->filter_chain)
1676*4882a593Smuzhiyun tcf_chain0_head_change(chain, next);
1677*4882a593Smuzhiyun RCU_INIT_POINTER(*chain_info->pprev, next);
1678*4882a593Smuzhiyun }
1679*4882a593Smuzhiyun
1680*4882a593Smuzhiyun static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1681*4882a593Smuzhiyun struct tcf_chain_info *chain_info,
1682*4882a593Smuzhiyun u32 protocol, u32 prio,
1683*4882a593Smuzhiyun bool prio_allocate);
1684*4882a593Smuzhiyun
1685*4882a593Smuzhiyun /* Try to insert new proto.
1686*4882a593Smuzhiyun * If proto with specified priority already exists, free new proto
1687*4882a593Smuzhiyun * and return existing one.
1688*4882a593Smuzhiyun */
1689*4882a593Smuzhiyun
tcf_chain_tp_insert_unique(struct tcf_chain * chain,struct tcf_proto * tp_new,u32 protocol,u32 prio,bool rtnl_held)1690*4882a593Smuzhiyun static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
1691*4882a593Smuzhiyun struct tcf_proto *tp_new,
1692*4882a593Smuzhiyun u32 protocol, u32 prio,
1693*4882a593Smuzhiyun bool rtnl_held)
1694*4882a593Smuzhiyun {
1695*4882a593Smuzhiyun struct tcf_chain_info chain_info;
1696*4882a593Smuzhiyun struct tcf_proto *tp;
1697*4882a593Smuzhiyun int err = 0;
1698*4882a593Smuzhiyun
1699*4882a593Smuzhiyun mutex_lock(&chain->filter_chain_lock);
1700*4882a593Smuzhiyun
1701*4882a593Smuzhiyun if (tcf_proto_exists_destroying(chain, tp_new)) {
1702*4882a593Smuzhiyun mutex_unlock(&chain->filter_chain_lock);
1703*4882a593Smuzhiyun tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1704*4882a593Smuzhiyun return ERR_PTR(-EAGAIN);
1705*4882a593Smuzhiyun }
1706*4882a593Smuzhiyun
1707*4882a593Smuzhiyun tp = tcf_chain_tp_find(chain, &chain_info,
1708*4882a593Smuzhiyun protocol, prio, false);
1709*4882a593Smuzhiyun if (!tp)
1710*4882a593Smuzhiyun err = tcf_chain_tp_insert(chain, &chain_info, tp_new);
1711*4882a593Smuzhiyun mutex_unlock(&chain->filter_chain_lock);
1712*4882a593Smuzhiyun
1713*4882a593Smuzhiyun if (tp) {
1714*4882a593Smuzhiyun tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1715*4882a593Smuzhiyun tp_new = tp;
1716*4882a593Smuzhiyun } else if (err) {
1717*4882a593Smuzhiyun tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1718*4882a593Smuzhiyun tp_new = ERR_PTR(err);
1719*4882a593Smuzhiyun }
1720*4882a593Smuzhiyun
1721*4882a593Smuzhiyun return tp_new;
1722*4882a593Smuzhiyun }
1723*4882a593Smuzhiyun
tcf_chain_tp_delete_empty(struct tcf_chain * chain,struct tcf_proto * tp,bool rtnl_held,struct netlink_ext_ack * extack)1724*4882a593Smuzhiyun static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
1725*4882a593Smuzhiyun struct tcf_proto *tp, bool rtnl_held,
1726*4882a593Smuzhiyun struct netlink_ext_ack *extack)
1727*4882a593Smuzhiyun {
1728*4882a593Smuzhiyun struct tcf_chain_info chain_info;
1729*4882a593Smuzhiyun struct tcf_proto *tp_iter;
1730*4882a593Smuzhiyun struct tcf_proto **pprev;
1731*4882a593Smuzhiyun struct tcf_proto *next;
1732*4882a593Smuzhiyun
1733*4882a593Smuzhiyun mutex_lock(&chain->filter_chain_lock);
1734*4882a593Smuzhiyun
1735*4882a593Smuzhiyun /* Atomically find and remove tp from chain. */
1736*4882a593Smuzhiyun for (pprev = &chain->filter_chain;
1737*4882a593Smuzhiyun (tp_iter = tcf_chain_dereference(*pprev, chain));
1738*4882a593Smuzhiyun pprev = &tp_iter->next) {
1739*4882a593Smuzhiyun if (tp_iter == tp) {
1740*4882a593Smuzhiyun chain_info.pprev = pprev;
1741*4882a593Smuzhiyun chain_info.next = tp_iter->next;
1742*4882a593Smuzhiyun WARN_ON(tp_iter->deleting);
1743*4882a593Smuzhiyun break;
1744*4882a593Smuzhiyun }
1745*4882a593Smuzhiyun }
1746*4882a593Smuzhiyun /* Verify that tp still exists and no new filters were inserted
1747*4882a593Smuzhiyun * concurrently.
1748*4882a593Smuzhiyun * Mark tp for deletion if it is empty.
1749*4882a593Smuzhiyun */
1750*4882a593Smuzhiyun if (!tp_iter || !tcf_proto_check_delete(tp)) {
1751*4882a593Smuzhiyun mutex_unlock(&chain->filter_chain_lock);
1752*4882a593Smuzhiyun return;
1753*4882a593Smuzhiyun }
1754*4882a593Smuzhiyun
1755*4882a593Smuzhiyun tcf_proto_signal_destroying(chain, tp);
1756*4882a593Smuzhiyun next = tcf_chain_dereference(chain_info.next, chain);
1757*4882a593Smuzhiyun if (tp == chain->filter_chain)
1758*4882a593Smuzhiyun tcf_chain0_head_change(chain, next);
1759*4882a593Smuzhiyun RCU_INIT_POINTER(*chain_info.pprev, next);
1760*4882a593Smuzhiyun mutex_unlock(&chain->filter_chain_lock);
1761*4882a593Smuzhiyun
1762*4882a593Smuzhiyun tcf_proto_put(tp, rtnl_held, extack);
1763*4882a593Smuzhiyun }
1764*4882a593Smuzhiyun
tcf_chain_tp_find(struct tcf_chain * chain,struct tcf_chain_info * chain_info,u32 protocol,u32 prio,bool prio_allocate)1765*4882a593Smuzhiyun static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1766*4882a593Smuzhiyun struct tcf_chain_info *chain_info,
1767*4882a593Smuzhiyun u32 protocol, u32 prio,
1768*4882a593Smuzhiyun bool prio_allocate)
1769*4882a593Smuzhiyun {
1770*4882a593Smuzhiyun struct tcf_proto **pprev;
1771*4882a593Smuzhiyun struct tcf_proto *tp;
1772*4882a593Smuzhiyun
1773*4882a593Smuzhiyun /* Check the chain for existence of proto-tcf with this priority */
1774*4882a593Smuzhiyun for (pprev = &chain->filter_chain;
1775*4882a593Smuzhiyun (tp = tcf_chain_dereference(*pprev, chain));
1776*4882a593Smuzhiyun pprev = &tp->next) {
1777*4882a593Smuzhiyun if (tp->prio >= prio) {
1778*4882a593Smuzhiyun if (tp->prio == prio) {
1779*4882a593Smuzhiyun if (prio_allocate ||
1780*4882a593Smuzhiyun (tp->protocol != protocol && protocol))
1781*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
1782*4882a593Smuzhiyun } else {
1783*4882a593Smuzhiyun tp = NULL;
1784*4882a593Smuzhiyun }
1785*4882a593Smuzhiyun break;
1786*4882a593Smuzhiyun }
1787*4882a593Smuzhiyun }
1788*4882a593Smuzhiyun chain_info->pprev = pprev;
1789*4882a593Smuzhiyun if (tp) {
1790*4882a593Smuzhiyun chain_info->next = tp->next;
1791*4882a593Smuzhiyun tcf_proto_get(tp);
1792*4882a593Smuzhiyun } else {
1793*4882a593Smuzhiyun chain_info->next = NULL;
1794*4882a593Smuzhiyun }
1795*4882a593Smuzhiyun return tp;
1796*4882a593Smuzhiyun }
1797*4882a593Smuzhiyun
tcf_fill_node(struct net * net,struct sk_buff * skb,struct tcf_proto * tp,struct tcf_block * block,struct Qdisc * q,u32 parent,void * fh,u32 portid,u32 seq,u16 flags,int event,bool terse_dump,bool rtnl_held)1798*4882a593Smuzhiyun static int tcf_fill_node(struct net *net, struct sk_buff *skb,
1799*4882a593Smuzhiyun struct tcf_proto *tp, struct tcf_block *block,
1800*4882a593Smuzhiyun struct Qdisc *q, u32 parent, void *fh,
1801*4882a593Smuzhiyun u32 portid, u32 seq, u16 flags, int event,
1802*4882a593Smuzhiyun bool terse_dump, bool rtnl_held)
1803*4882a593Smuzhiyun {
1804*4882a593Smuzhiyun struct tcmsg *tcm;
1805*4882a593Smuzhiyun struct nlmsghdr *nlh;
1806*4882a593Smuzhiyun unsigned char *b = skb_tail_pointer(skb);
1807*4882a593Smuzhiyun
1808*4882a593Smuzhiyun nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1809*4882a593Smuzhiyun if (!nlh)
1810*4882a593Smuzhiyun goto out_nlmsg_trim;
1811*4882a593Smuzhiyun tcm = nlmsg_data(nlh);
1812*4882a593Smuzhiyun tcm->tcm_family = AF_UNSPEC;
1813*4882a593Smuzhiyun tcm->tcm__pad1 = 0;
1814*4882a593Smuzhiyun tcm->tcm__pad2 = 0;
1815*4882a593Smuzhiyun if (q) {
1816*4882a593Smuzhiyun tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1817*4882a593Smuzhiyun tcm->tcm_parent = parent;
1818*4882a593Smuzhiyun } else {
1819*4882a593Smuzhiyun tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
1820*4882a593Smuzhiyun tcm->tcm_block_index = block->index;
1821*4882a593Smuzhiyun }
1822*4882a593Smuzhiyun tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
1823*4882a593Smuzhiyun if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
1824*4882a593Smuzhiyun goto nla_put_failure;
1825*4882a593Smuzhiyun if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
1826*4882a593Smuzhiyun goto nla_put_failure;
1827*4882a593Smuzhiyun if (!fh) {
1828*4882a593Smuzhiyun tcm->tcm_handle = 0;
1829*4882a593Smuzhiyun } else if (terse_dump) {
1830*4882a593Smuzhiyun if (tp->ops->terse_dump) {
1831*4882a593Smuzhiyun if (tp->ops->terse_dump(net, tp, fh, skb, tcm,
1832*4882a593Smuzhiyun rtnl_held) < 0)
1833*4882a593Smuzhiyun goto nla_put_failure;
1834*4882a593Smuzhiyun } else {
1835*4882a593Smuzhiyun goto cls_op_not_supp;
1836*4882a593Smuzhiyun }
1837*4882a593Smuzhiyun } else {
1838*4882a593Smuzhiyun if (tp->ops->dump &&
1839*4882a593Smuzhiyun tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0)
1840*4882a593Smuzhiyun goto nla_put_failure;
1841*4882a593Smuzhiyun }
1842*4882a593Smuzhiyun nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1843*4882a593Smuzhiyun return skb->len;
1844*4882a593Smuzhiyun
1845*4882a593Smuzhiyun out_nlmsg_trim:
1846*4882a593Smuzhiyun nla_put_failure:
1847*4882a593Smuzhiyun cls_op_not_supp:
1848*4882a593Smuzhiyun nlmsg_trim(skb, b);
1849*4882a593Smuzhiyun return -1;
1850*4882a593Smuzhiyun }
1851*4882a593Smuzhiyun
tfilter_notify(struct net * net,struct sk_buff * oskb,struct nlmsghdr * n,struct tcf_proto * tp,struct tcf_block * block,struct Qdisc * q,u32 parent,void * fh,int event,bool unicast,bool rtnl_held)1852*4882a593Smuzhiyun static int tfilter_notify(struct net *net, struct sk_buff *oskb,
1853*4882a593Smuzhiyun struct nlmsghdr *n, struct tcf_proto *tp,
1854*4882a593Smuzhiyun struct tcf_block *block, struct Qdisc *q,
1855*4882a593Smuzhiyun u32 parent, void *fh, int event, bool unicast,
1856*4882a593Smuzhiyun bool rtnl_held)
1857*4882a593Smuzhiyun {
1858*4882a593Smuzhiyun struct sk_buff *skb;
1859*4882a593Smuzhiyun u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1860*4882a593Smuzhiyun int err = 0;
1861*4882a593Smuzhiyun
1862*4882a593Smuzhiyun skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1863*4882a593Smuzhiyun if (!skb)
1864*4882a593Smuzhiyun return -ENOBUFS;
1865*4882a593Smuzhiyun
1866*4882a593Smuzhiyun if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1867*4882a593Smuzhiyun n->nlmsg_seq, n->nlmsg_flags, event,
1868*4882a593Smuzhiyun false, rtnl_held) <= 0) {
1869*4882a593Smuzhiyun kfree_skb(skb);
1870*4882a593Smuzhiyun return -EINVAL;
1871*4882a593Smuzhiyun }
1872*4882a593Smuzhiyun
1873*4882a593Smuzhiyun if (unicast)
1874*4882a593Smuzhiyun err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
1875*4882a593Smuzhiyun else
1876*4882a593Smuzhiyun err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1877*4882a593Smuzhiyun n->nlmsg_flags & NLM_F_ECHO);
1878*4882a593Smuzhiyun
1879*4882a593Smuzhiyun if (err > 0)
1880*4882a593Smuzhiyun err = 0;
1881*4882a593Smuzhiyun return err;
1882*4882a593Smuzhiyun }
1883*4882a593Smuzhiyun
tfilter_del_notify(struct net * net,struct sk_buff * oskb,struct nlmsghdr * n,struct tcf_proto * tp,struct tcf_block * block,struct Qdisc * q,u32 parent,void * fh,bool unicast,bool * last,bool rtnl_held,struct netlink_ext_ack * extack)1884*4882a593Smuzhiyun static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
1885*4882a593Smuzhiyun struct nlmsghdr *n, struct tcf_proto *tp,
1886*4882a593Smuzhiyun struct tcf_block *block, struct Qdisc *q,
1887*4882a593Smuzhiyun u32 parent, void *fh, bool unicast, bool *last,
1888*4882a593Smuzhiyun bool rtnl_held, struct netlink_ext_ack *extack)
1889*4882a593Smuzhiyun {
1890*4882a593Smuzhiyun struct sk_buff *skb;
1891*4882a593Smuzhiyun u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1892*4882a593Smuzhiyun int err;
1893*4882a593Smuzhiyun
1894*4882a593Smuzhiyun skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1895*4882a593Smuzhiyun if (!skb)
1896*4882a593Smuzhiyun return -ENOBUFS;
1897*4882a593Smuzhiyun
1898*4882a593Smuzhiyun if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1899*4882a593Smuzhiyun n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER,
1900*4882a593Smuzhiyun false, rtnl_held) <= 0) {
1901*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Failed to build del event notification");
1902*4882a593Smuzhiyun kfree_skb(skb);
1903*4882a593Smuzhiyun return -EINVAL;
1904*4882a593Smuzhiyun }
1905*4882a593Smuzhiyun
1906*4882a593Smuzhiyun err = tp->ops->delete(tp, fh, last, rtnl_held, extack);
1907*4882a593Smuzhiyun if (err) {
1908*4882a593Smuzhiyun kfree_skb(skb);
1909*4882a593Smuzhiyun return err;
1910*4882a593Smuzhiyun }
1911*4882a593Smuzhiyun
1912*4882a593Smuzhiyun if (unicast)
1913*4882a593Smuzhiyun err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
1914*4882a593Smuzhiyun else
1915*4882a593Smuzhiyun err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1916*4882a593Smuzhiyun n->nlmsg_flags & NLM_F_ECHO);
1917*4882a593Smuzhiyun if (err < 0)
1918*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
1919*4882a593Smuzhiyun
1920*4882a593Smuzhiyun if (err > 0)
1921*4882a593Smuzhiyun err = 0;
1922*4882a593Smuzhiyun return err;
1923*4882a593Smuzhiyun }
1924*4882a593Smuzhiyun
tfilter_notify_chain(struct net * net,struct sk_buff * oskb,struct tcf_block * block,struct Qdisc * q,u32 parent,struct nlmsghdr * n,struct tcf_chain * chain,int event,bool rtnl_held)1925*4882a593Smuzhiyun static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
1926*4882a593Smuzhiyun struct tcf_block *block, struct Qdisc *q,
1927*4882a593Smuzhiyun u32 parent, struct nlmsghdr *n,
1928*4882a593Smuzhiyun struct tcf_chain *chain, int event,
1929*4882a593Smuzhiyun bool rtnl_held)
1930*4882a593Smuzhiyun {
1931*4882a593Smuzhiyun struct tcf_proto *tp;
1932*4882a593Smuzhiyun
1933*4882a593Smuzhiyun for (tp = tcf_get_next_proto(chain, NULL, rtnl_held);
1934*4882a593Smuzhiyun tp; tp = tcf_get_next_proto(chain, tp, rtnl_held))
1935*4882a593Smuzhiyun tfilter_notify(net, oskb, n, tp, block,
1936*4882a593Smuzhiyun q, parent, NULL, event, false, rtnl_held);
1937*4882a593Smuzhiyun }
1938*4882a593Smuzhiyun
tfilter_put(struct tcf_proto * tp,void * fh)1939*4882a593Smuzhiyun static void tfilter_put(struct tcf_proto *tp, void *fh)
1940*4882a593Smuzhiyun {
1941*4882a593Smuzhiyun if (tp->ops->put && fh)
1942*4882a593Smuzhiyun tp->ops->put(tp, fh);
1943*4882a593Smuzhiyun }
1944*4882a593Smuzhiyun
tc_new_tfilter(struct sk_buff * skb,struct nlmsghdr * n,struct netlink_ext_ack * extack)1945*4882a593Smuzhiyun static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
1946*4882a593Smuzhiyun struct netlink_ext_ack *extack)
1947*4882a593Smuzhiyun {
1948*4882a593Smuzhiyun struct net *net = sock_net(skb->sk);
1949*4882a593Smuzhiyun struct nlattr *tca[TCA_MAX + 1];
1950*4882a593Smuzhiyun char name[IFNAMSIZ];
1951*4882a593Smuzhiyun struct tcmsg *t;
1952*4882a593Smuzhiyun u32 protocol;
1953*4882a593Smuzhiyun u32 prio;
1954*4882a593Smuzhiyun bool prio_allocate;
1955*4882a593Smuzhiyun u32 parent;
1956*4882a593Smuzhiyun u32 chain_index;
1957*4882a593Smuzhiyun struct Qdisc *q;
1958*4882a593Smuzhiyun struct tcf_chain_info chain_info;
1959*4882a593Smuzhiyun struct tcf_chain *chain;
1960*4882a593Smuzhiyun struct tcf_block *block;
1961*4882a593Smuzhiyun struct tcf_proto *tp;
1962*4882a593Smuzhiyun unsigned long cl;
1963*4882a593Smuzhiyun void *fh;
1964*4882a593Smuzhiyun int err;
1965*4882a593Smuzhiyun int tp_created;
1966*4882a593Smuzhiyun bool rtnl_held = false;
1967*4882a593Smuzhiyun
1968*4882a593Smuzhiyun if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1969*4882a593Smuzhiyun return -EPERM;
1970*4882a593Smuzhiyun
1971*4882a593Smuzhiyun replay:
1972*4882a593Smuzhiyun tp_created = 0;
1973*4882a593Smuzhiyun
1974*4882a593Smuzhiyun err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
1975*4882a593Smuzhiyun rtm_tca_policy, extack);
1976*4882a593Smuzhiyun if (err < 0)
1977*4882a593Smuzhiyun return err;
1978*4882a593Smuzhiyun
1979*4882a593Smuzhiyun t = nlmsg_data(n);
1980*4882a593Smuzhiyun protocol = TC_H_MIN(t->tcm_info);
1981*4882a593Smuzhiyun prio = TC_H_MAJ(t->tcm_info);
1982*4882a593Smuzhiyun prio_allocate = false;
1983*4882a593Smuzhiyun parent = t->tcm_parent;
1984*4882a593Smuzhiyun tp = NULL;
1985*4882a593Smuzhiyun cl = 0;
1986*4882a593Smuzhiyun block = NULL;
1987*4882a593Smuzhiyun q = NULL;
1988*4882a593Smuzhiyun chain = NULL;
1989*4882a593Smuzhiyun
1990*4882a593Smuzhiyun if (prio == 0) {
1991*4882a593Smuzhiyun /* If no priority is provided by the user,
1992*4882a593Smuzhiyun * we allocate one.
1993*4882a593Smuzhiyun */
1994*4882a593Smuzhiyun if (n->nlmsg_flags & NLM_F_CREATE) {
1995*4882a593Smuzhiyun prio = TC_H_MAKE(0x80000000U, 0U);
1996*4882a593Smuzhiyun prio_allocate = true;
1997*4882a593Smuzhiyun } else {
1998*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
1999*4882a593Smuzhiyun return -ENOENT;
2000*4882a593Smuzhiyun }
2001*4882a593Smuzhiyun }
2002*4882a593Smuzhiyun
2003*4882a593Smuzhiyun /* Find head of filter chain. */
2004*4882a593Smuzhiyun
2005*4882a593Smuzhiyun err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2006*4882a593Smuzhiyun if (err)
2007*4882a593Smuzhiyun return err;
2008*4882a593Smuzhiyun
2009*4882a593Smuzhiyun if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2010*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2011*4882a593Smuzhiyun err = -EINVAL;
2012*4882a593Smuzhiyun goto errout;
2013*4882a593Smuzhiyun }
2014*4882a593Smuzhiyun
2015*4882a593Smuzhiyun /* Take rtnl mutex if rtnl_held was set to true on previous iteration,
2016*4882a593Smuzhiyun * block is shared (no qdisc found), qdisc is not unlocked, classifier
2017*4882a593Smuzhiyun * type is not specified, classifier is not unlocked.
2018*4882a593Smuzhiyun */
2019*4882a593Smuzhiyun if (rtnl_held ||
2020*4882a593Smuzhiyun (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2021*4882a593Smuzhiyun !tcf_proto_is_unlocked(name)) {
2022*4882a593Smuzhiyun rtnl_held = true;
2023*4882a593Smuzhiyun rtnl_lock();
2024*4882a593Smuzhiyun }
2025*4882a593Smuzhiyun
2026*4882a593Smuzhiyun err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2027*4882a593Smuzhiyun if (err)
2028*4882a593Smuzhiyun goto errout;
2029*4882a593Smuzhiyun
2030*4882a593Smuzhiyun block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2031*4882a593Smuzhiyun extack);
2032*4882a593Smuzhiyun if (IS_ERR(block)) {
2033*4882a593Smuzhiyun err = PTR_ERR(block);
2034*4882a593Smuzhiyun goto errout;
2035*4882a593Smuzhiyun }
2036*4882a593Smuzhiyun block->classid = parent;
2037*4882a593Smuzhiyun
2038*4882a593Smuzhiyun chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2039*4882a593Smuzhiyun if (chain_index > TC_ACT_EXT_VAL_MASK) {
2040*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2041*4882a593Smuzhiyun err = -EINVAL;
2042*4882a593Smuzhiyun goto errout;
2043*4882a593Smuzhiyun }
2044*4882a593Smuzhiyun chain = tcf_chain_get(block, chain_index, true);
2045*4882a593Smuzhiyun if (!chain) {
2046*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
2047*4882a593Smuzhiyun err = -ENOMEM;
2048*4882a593Smuzhiyun goto errout;
2049*4882a593Smuzhiyun }
2050*4882a593Smuzhiyun
2051*4882a593Smuzhiyun mutex_lock(&chain->filter_chain_lock);
2052*4882a593Smuzhiyun tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2053*4882a593Smuzhiyun prio, prio_allocate);
2054*4882a593Smuzhiyun if (IS_ERR(tp)) {
2055*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2056*4882a593Smuzhiyun err = PTR_ERR(tp);
2057*4882a593Smuzhiyun goto errout_locked;
2058*4882a593Smuzhiyun }
2059*4882a593Smuzhiyun
2060*4882a593Smuzhiyun if (tp == NULL) {
2061*4882a593Smuzhiyun struct tcf_proto *tp_new = NULL;
2062*4882a593Smuzhiyun
2063*4882a593Smuzhiyun if (chain->flushing) {
2064*4882a593Smuzhiyun err = -EAGAIN;
2065*4882a593Smuzhiyun goto errout_locked;
2066*4882a593Smuzhiyun }
2067*4882a593Smuzhiyun
2068*4882a593Smuzhiyun /* Proto-tcf does not exist, create new one */
2069*4882a593Smuzhiyun
2070*4882a593Smuzhiyun if (tca[TCA_KIND] == NULL || !protocol) {
2071*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
2072*4882a593Smuzhiyun err = -EINVAL;
2073*4882a593Smuzhiyun goto errout_locked;
2074*4882a593Smuzhiyun }
2075*4882a593Smuzhiyun
2076*4882a593Smuzhiyun if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2077*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2078*4882a593Smuzhiyun err = -ENOENT;
2079*4882a593Smuzhiyun goto errout_locked;
2080*4882a593Smuzhiyun }
2081*4882a593Smuzhiyun
2082*4882a593Smuzhiyun if (prio_allocate)
2083*4882a593Smuzhiyun prio = tcf_auto_prio(tcf_chain_tp_prev(chain,
2084*4882a593Smuzhiyun &chain_info));
2085*4882a593Smuzhiyun
2086*4882a593Smuzhiyun mutex_unlock(&chain->filter_chain_lock);
2087*4882a593Smuzhiyun tp_new = tcf_proto_create(name, protocol, prio, chain,
2088*4882a593Smuzhiyun rtnl_held, extack);
2089*4882a593Smuzhiyun if (IS_ERR(tp_new)) {
2090*4882a593Smuzhiyun err = PTR_ERR(tp_new);
2091*4882a593Smuzhiyun goto errout_tp;
2092*4882a593Smuzhiyun }
2093*4882a593Smuzhiyun
2094*4882a593Smuzhiyun tp_created = 1;
2095*4882a593Smuzhiyun tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio,
2096*4882a593Smuzhiyun rtnl_held);
2097*4882a593Smuzhiyun if (IS_ERR(tp)) {
2098*4882a593Smuzhiyun err = PTR_ERR(tp);
2099*4882a593Smuzhiyun goto errout_tp;
2100*4882a593Smuzhiyun }
2101*4882a593Smuzhiyun } else {
2102*4882a593Smuzhiyun mutex_unlock(&chain->filter_chain_lock);
2103*4882a593Smuzhiyun }
2104*4882a593Smuzhiyun
2105*4882a593Smuzhiyun if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2106*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2107*4882a593Smuzhiyun err = -EINVAL;
2108*4882a593Smuzhiyun goto errout;
2109*4882a593Smuzhiyun }
2110*4882a593Smuzhiyun
2111*4882a593Smuzhiyun fh = tp->ops->get(tp, t->tcm_handle);
2112*4882a593Smuzhiyun
2113*4882a593Smuzhiyun if (!fh) {
2114*4882a593Smuzhiyun if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2115*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2116*4882a593Smuzhiyun err = -ENOENT;
2117*4882a593Smuzhiyun goto errout;
2118*4882a593Smuzhiyun }
2119*4882a593Smuzhiyun } else if (n->nlmsg_flags & NLM_F_EXCL) {
2120*4882a593Smuzhiyun tfilter_put(tp, fh);
2121*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Filter already exists");
2122*4882a593Smuzhiyun err = -EEXIST;
2123*4882a593Smuzhiyun goto errout;
2124*4882a593Smuzhiyun }
2125*4882a593Smuzhiyun
2126*4882a593Smuzhiyun if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
2127*4882a593Smuzhiyun tfilter_put(tp, fh);
2128*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
2129*4882a593Smuzhiyun err = -EINVAL;
2130*4882a593Smuzhiyun goto errout;
2131*4882a593Smuzhiyun }
2132*4882a593Smuzhiyun
2133*4882a593Smuzhiyun err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
2134*4882a593Smuzhiyun n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE,
2135*4882a593Smuzhiyun rtnl_held, extack);
2136*4882a593Smuzhiyun if (err == 0) {
2137*4882a593Smuzhiyun tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2138*4882a593Smuzhiyun RTM_NEWTFILTER, false, rtnl_held);
2139*4882a593Smuzhiyun tfilter_put(tp, fh);
2140*4882a593Smuzhiyun /* q pointer is NULL for shared blocks */
2141*4882a593Smuzhiyun if (q)
2142*4882a593Smuzhiyun q->flags &= ~TCQ_F_CAN_BYPASS;
2143*4882a593Smuzhiyun }
2144*4882a593Smuzhiyun
2145*4882a593Smuzhiyun errout:
2146*4882a593Smuzhiyun if (err && tp_created)
2147*4882a593Smuzhiyun tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL);
2148*4882a593Smuzhiyun errout_tp:
2149*4882a593Smuzhiyun if (chain) {
2150*4882a593Smuzhiyun if (tp && !IS_ERR(tp))
2151*4882a593Smuzhiyun tcf_proto_put(tp, rtnl_held, NULL);
2152*4882a593Smuzhiyun if (!tp_created)
2153*4882a593Smuzhiyun tcf_chain_put(chain);
2154*4882a593Smuzhiyun }
2155*4882a593Smuzhiyun tcf_block_release(q, block, rtnl_held);
2156*4882a593Smuzhiyun
2157*4882a593Smuzhiyun if (rtnl_held)
2158*4882a593Smuzhiyun rtnl_unlock();
2159*4882a593Smuzhiyun
2160*4882a593Smuzhiyun if (err == -EAGAIN) {
2161*4882a593Smuzhiyun /* Take rtnl lock in case EAGAIN is caused by concurrent flush
2162*4882a593Smuzhiyun * of target chain.
2163*4882a593Smuzhiyun */
2164*4882a593Smuzhiyun rtnl_held = true;
2165*4882a593Smuzhiyun /* Replay the request. */
2166*4882a593Smuzhiyun goto replay;
2167*4882a593Smuzhiyun }
2168*4882a593Smuzhiyun return err;
2169*4882a593Smuzhiyun
2170*4882a593Smuzhiyun errout_locked:
2171*4882a593Smuzhiyun mutex_unlock(&chain->filter_chain_lock);
2172*4882a593Smuzhiyun goto errout;
2173*4882a593Smuzhiyun }
2174*4882a593Smuzhiyun
tc_del_tfilter(struct sk_buff * skb,struct nlmsghdr * n,struct netlink_ext_ack * extack)2175*4882a593Smuzhiyun static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2176*4882a593Smuzhiyun struct netlink_ext_ack *extack)
2177*4882a593Smuzhiyun {
2178*4882a593Smuzhiyun struct net *net = sock_net(skb->sk);
2179*4882a593Smuzhiyun struct nlattr *tca[TCA_MAX + 1];
2180*4882a593Smuzhiyun char name[IFNAMSIZ];
2181*4882a593Smuzhiyun struct tcmsg *t;
2182*4882a593Smuzhiyun u32 protocol;
2183*4882a593Smuzhiyun u32 prio;
2184*4882a593Smuzhiyun u32 parent;
2185*4882a593Smuzhiyun u32 chain_index;
2186*4882a593Smuzhiyun struct Qdisc *q = NULL;
2187*4882a593Smuzhiyun struct tcf_chain_info chain_info;
2188*4882a593Smuzhiyun struct tcf_chain *chain = NULL;
2189*4882a593Smuzhiyun struct tcf_block *block = NULL;
2190*4882a593Smuzhiyun struct tcf_proto *tp = NULL;
2191*4882a593Smuzhiyun unsigned long cl = 0;
2192*4882a593Smuzhiyun void *fh = NULL;
2193*4882a593Smuzhiyun int err;
2194*4882a593Smuzhiyun bool rtnl_held = false;
2195*4882a593Smuzhiyun
2196*4882a593Smuzhiyun if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2197*4882a593Smuzhiyun return -EPERM;
2198*4882a593Smuzhiyun
2199*4882a593Smuzhiyun err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2200*4882a593Smuzhiyun rtm_tca_policy, extack);
2201*4882a593Smuzhiyun if (err < 0)
2202*4882a593Smuzhiyun return err;
2203*4882a593Smuzhiyun
2204*4882a593Smuzhiyun t = nlmsg_data(n);
2205*4882a593Smuzhiyun protocol = TC_H_MIN(t->tcm_info);
2206*4882a593Smuzhiyun prio = TC_H_MAJ(t->tcm_info);
2207*4882a593Smuzhiyun parent = t->tcm_parent;
2208*4882a593Smuzhiyun
2209*4882a593Smuzhiyun if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) {
2210*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
2211*4882a593Smuzhiyun return -ENOENT;
2212*4882a593Smuzhiyun }
2213*4882a593Smuzhiyun
2214*4882a593Smuzhiyun /* Find head of filter chain. */
2215*4882a593Smuzhiyun
2216*4882a593Smuzhiyun err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2217*4882a593Smuzhiyun if (err)
2218*4882a593Smuzhiyun return err;
2219*4882a593Smuzhiyun
2220*4882a593Smuzhiyun if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2221*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2222*4882a593Smuzhiyun err = -EINVAL;
2223*4882a593Smuzhiyun goto errout;
2224*4882a593Smuzhiyun }
2225*4882a593Smuzhiyun /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc
2226*4882a593Smuzhiyun * found), qdisc is not unlocked, classifier type is not specified,
2227*4882a593Smuzhiyun * classifier is not unlocked.
2228*4882a593Smuzhiyun */
2229*4882a593Smuzhiyun if (!prio ||
2230*4882a593Smuzhiyun (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2231*4882a593Smuzhiyun !tcf_proto_is_unlocked(name)) {
2232*4882a593Smuzhiyun rtnl_held = true;
2233*4882a593Smuzhiyun rtnl_lock();
2234*4882a593Smuzhiyun }
2235*4882a593Smuzhiyun
2236*4882a593Smuzhiyun err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2237*4882a593Smuzhiyun if (err)
2238*4882a593Smuzhiyun goto errout;
2239*4882a593Smuzhiyun
2240*4882a593Smuzhiyun block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2241*4882a593Smuzhiyun extack);
2242*4882a593Smuzhiyun if (IS_ERR(block)) {
2243*4882a593Smuzhiyun err = PTR_ERR(block);
2244*4882a593Smuzhiyun goto errout;
2245*4882a593Smuzhiyun }
2246*4882a593Smuzhiyun
2247*4882a593Smuzhiyun chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2248*4882a593Smuzhiyun if (chain_index > TC_ACT_EXT_VAL_MASK) {
2249*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2250*4882a593Smuzhiyun err = -EINVAL;
2251*4882a593Smuzhiyun goto errout;
2252*4882a593Smuzhiyun }
2253*4882a593Smuzhiyun chain = tcf_chain_get(block, chain_index, false);
2254*4882a593Smuzhiyun if (!chain) {
2255*4882a593Smuzhiyun /* User requested flush on non-existent chain. Nothing to do,
2256*4882a593Smuzhiyun * so just return success.
2257*4882a593Smuzhiyun */
2258*4882a593Smuzhiyun if (prio == 0) {
2259*4882a593Smuzhiyun err = 0;
2260*4882a593Smuzhiyun goto errout;
2261*4882a593Smuzhiyun }
2262*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2263*4882a593Smuzhiyun err = -ENOENT;
2264*4882a593Smuzhiyun goto errout;
2265*4882a593Smuzhiyun }
2266*4882a593Smuzhiyun
2267*4882a593Smuzhiyun if (prio == 0) {
2268*4882a593Smuzhiyun tfilter_notify_chain(net, skb, block, q, parent, n,
2269*4882a593Smuzhiyun chain, RTM_DELTFILTER, rtnl_held);
2270*4882a593Smuzhiyun tcf_chain_flush(chain, rtnl_held);
2271*4882a593Smuzhiyun err = 0;
2272*4882a593Smuzhiyun goto errout;
2273*4882a593Smuzhiyun }
2274*4882a593Smuzhiyun
2275*4882a593Smuzhiyun mutex_lock(&chain->filter_chain_lock);
2276*4882a593Smuzhiyun tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2277*4882a593Smuzhiyun prio, false);
2278*4882a593Smuzhiyun if (!tp || IS_ERR(tp)) {
2279*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2280*4882a593Smuzhiyun err = tp ? PTR_ERR(tp) : -ENOENT;
2281*4882a593Smuzhiyun goto errout_locked;
2282*4882a593Smuzhiyun } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2283*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2284*4882a593Smuzhiyun err = -EINVAL;
2285*4882a593Smuzhiyun goto errout_locked;
2286*4882a593Smuzhiyun } else if (t->tcm_handle == 0) {
2287*4882a593Smuzhiyun tcf_proto_signal_destroying(chain, tp);
2288*4882a593Smuzhiyun tcf_chain_tp_remove(chain, &chain_info, tp);
2289*4882a593Smuzhiyun mutex_unlock(&chain->filter_chain_lock);
2290*4882a593Smuzhiyun
2291*4882a593Smuzhiyun tcf_proto_put(tp, rtnl_held, NULL);
2292*4882a593Smuzhiyun tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2293*4882a593Smuzhiyun RTM_DELTFILTER, false, rtnl_held);
2294*4882a593Smuzhiyun err = 0;
2295*4882a593Smuzhiyun goto errout;
2296*4882a593Smuzhiyun }
2297*4882a593Smuzhiyun mutex_unlock(&chain->filter_chain_lock);
2298*4882a593Smuzhiyun
2299*4882a593Smuzhiyun fh = tp->ops->get(tp, t->tcm_handle);
2300*4882a593Smuzhiyun
2301*4882a593Smuzhiyun if (!fh) {
2302*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2303*4882a593Smuzhiyun err = -ENOENT;
2304*4882a593Smuzhiyun } else {
2305*4882a593Smuzhiyun bool last;
2306*4882a593Smuzhiyun
2307*4882a593Smuzhiyun err = tfilter_del_notify(net, skb, n, tp, block,
2308*4882a593Smuzhiyun q, parent, fh, false, &last,
2309*4882a593Smuzhiyun rtnl_held, extack);
2310*4882a593Smuzhiyun
2311*4882a593Smuzhiyun if (err)
2312*4882a593Smuzhiyun goto errout;
2313*4882a593Smuzhiyun if (last)
2314*4882a593Smuzhiyun tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack);
2315*4882a593Smuzhiyun }
2316*4882a593Smuzhiyun
2317*4882a593Smuzhiyun errout:
2318*4882a593Smuzhiyun if (chain) {
2319*4882a593Smuzhiyun if (tp && !IS_ERR(tp))
2320*4882a593Smuzhiyun tcf_proto_put(tp, rtnl_held, NULL);
2321*4882a593Smuzhiyun tcf_chain_put(chain);
2322*4882a593Smuzhiyun }
2323*4882a593Smuzhiyun tcf_block_release(q, block, rtnl_held);
2324*4882a593Smuzhiyun
2325*4882a593Smuzhiyun if (rtnl_held)
2326*4882a593Smuzhiyun rtnl_unlock();
2327*4882a593Smuzhiyun
2328*4882a593Smuzhiyun return err;
2329*4882a593Smuzhiyun
2330*4882a593Smuzhiyun errout_locked:
2331*4882a593Smuzhiyun mutex_unlock(&chain->filter_chain_lock);
2332*4882a593Smuzhiyun goto errout;
2333*4882a593Smuzhiyun }
2334*4882a593Smuzhiyun
tc_get_tfilter(struct sk_buff * skb,struct nlmsghdr * n,struct netlink_ext_ack * extack)2335*4882a593Smuzhiyun static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2336*4882a593Smuzhiyun struct netlink_ext_ack *extack)
2337*4882a593Smuzhiyun {
2338*4882a593Smuzhiyun struct net *net = sock_net(skb->sk);
2339*4882a593Smuzhiyun struct nlattr *tca[TCA_MAX + 1];
2340*4882a593Smuzhiyun char name[IFNAMSIZ];
2341*4882a593Smuzhiyun struct tcmsg *t;
2342*4882a593Smuzhiyun u32 protocol;
2343*4882a593Smuzhiyun u32 prio;
2344*4882a593Smuzhiyun u32 parent;
2345*4882a593Smuzhiyun u32 chain_index;
2346*4882a593Smuzhiyun struct Qdisc *q = NULL;
2347*4882a593Smuzhiyun struct tcf_chain_info chain_info;
2348*4882a593Smuzhiyun struct tcf_chain *chain = NULL;
2349*4882a593Smuzhiyun struct tcf_block *block = NULL;
2350*4882a593Smuzhiyun struct tcf_proto *tp = NULL;
2351*4882a593Smuzhiyun unsigned long cl = 0;
2352*4882a593Smuzhiyun void *fh = NULL;
2353*4882a593Smuzhiyun int err;
2354*4882a593Smuzhiyun bool rtnl_held = false;
2355*4882a593Smuzhiyun
2356*4882a593Smuzhiyun err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2357*4882a593Smuzhiyun rtm_tca_policy, extack);
2358*4882a593Smuzhiyun if (err < 0)
2359*4882a593Smuzhiyun return err;
2360*4882a593Smuzhiyun
2361*4882a593Smuzhiyun t = nlmsg_data(n);
2362*4882a593Smuzhiyun protocol = TC_H_MIN(t->tcm_info);
2363*4882a593Smuzhiyun prio = TC_H_MAJ(t->tcm_info);
2364*4882a593Smuzhiyun parent = t->tcm_parent;
2365*4882a593Smuzhiyun
2366*4882a593Smuzhiyun if (prio == 0) {
2367*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2368*4882a593Smuzhiyun return -ENOENT;
2369*4882a593Smuzhiyun }
2370*4882a593Smuzhiyun
2371*4882a593Smuzhiyun /* Find head of filter chain. */
2372*4882a593Smuzhiyun
2373*4882a593Smuzhiyun err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2374*4882a593Smuzhiyun if (err)
2375*4882a593Smuzhiyun return err;
2376*4882a593Smuzhiyun
2377*4882a593Smuzhiyun if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2378*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2379*4882a593Smuzhiyun err = -EINVAL;
2380*4882a593Smuzhiyun goto errout;
2381*4882a593Smuzhiyun }
2382*4882a593Smuzhiyun /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not
2383*4882a593Smuzhiyun * unlocked, classifier type is not specified, classifier is not
2384*4882a593Smuzhiyun * unlocked.
2385*4882a593Smuzhiyun */
2386*4882a593Smuzhiyun if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2387*4882a593Smuzhiyun !tcf_proto_is_unlocked(name)) {
2388*4882a593Smuzhiyun rtnl_held = true;
2389*4882a593Smuzhiyun rtnl_lock();
2390*4882a593Smuzhiyun }
2391*4882a593Smuzhiyun
2392*4882a593Smuzhiyun err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2393*4882a593Smuzhiyun if (err)
2394*4882a593Smuzhiyun goto errout;
2395*4882a593Smuzhiyun
2396*4882a593Smuzhiyun block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2397*4882a593Smuzhiyun extack);
2398*4882a593Smuzhiyun if (IS_ERR(block)) {
2399*4882a593Smuzhiyun err = PTR_ERR(block);
2400*4882a593Smuzhiyun goto errout;
2401*4882a593Smuzhiyun }
2402*4882a593Smuzhiyun
2403*4882a593Smuzhiyun chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2404*4882a593Smuzhiyun if (chain_index > TC_ACT_EXT_VAL_MASK) {
2405*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2406*4882a593Smuzhiyun err = -EINVAL;
2407*4882a593Smuzhiyun goto errout;
2408*4882a593Smuzhiyun }
2409*4882a593Smuzhiyun chain = tcf_chain_get(block, chain_index, false);
2410*4882a593Smuzhiyun if (!chain) {
2411*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2412*4882a593Smuzhiyun err = -EINVAL;
2413*4882a593Smuzhiyun goto errout;
2414*4882a593Smuzhiyun }
2415*4882a593Smuzhiyun
2416*4882a593Smuzhiyun mutex_lock(&chain->filter_chain_lock);
2417*4882a593Smuzhiyun tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2418*4882a593Smuzhiyun prio, false);
2419*4882a593Smuzhiyun mutex_unlock(&chain->filter_chain_lock);
2420*4882a593Smuzhiyun if (!tp || IS_ERR(tp)) {
2421*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2422*4882a593Smuzhiyun err = tp ? PTR_ERR(tp) : -ENOENT;
2423*4882a593Smuzhiyun goto errout;
2424*4882a593Smuzhiyun } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2425*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2426*4882a593Smuzhiyun err = -EINVAL;
2427*4882a593Smuzhiyun goto errout;
2428*4882a593Smuzhiyun }
2429*4882a593Smuzhiyun
2430*4882a593Smuzhiyun fh = tp->ops->get(tp, t->tcm_handle);
2431*4882a593Smuzhiyun
2432*4882a593Smuzhiyun if (!fh) {
2433*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2434*4882a593Smuzhiyun err = -ENOENT;
2435*4882a593Smuzhiyun } else {
2436*4882a593Smuzhiyun err = tfilter_notify(net, skb, n, tp, block, q, parent,
2437*4882a593Smuzhiyun fh, RTM_NEWTFILTER, true, rtnl_held);
2438*4882a593Smuzhiyun if (err < 0)
2439*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
2440*4882a593Smuzhiyun }
2441*4882a593Smuzhiyun
2442*4882a593Smuzhiyun tfilter_put(tp, fh);
2443*4882a593Smuzhiyun errout:
2444*4882a593Smuzhiyun if (chain) {
2445*4882a593Smuzhiyun if (tp && !IS_ERR(tp))
2446*4882a593Smuzhiyun tcf_proto_put(tp, rtnl_held, NULL);
2447*4882a593Smuzhiyun tcf_chain_put(chain);
2448*4882a593Smuzhiyun }
2449*4882a593Smuzhiyun tcf_block_release(q, block, rtnl_held);
2450*4882a593Smuzhiyun
2451*4882a593Smuzhiyun if (rtnl_held)
2452*4882a593Smuzhiyun rtnl_unlock();
2453*4882a593Smuzhiyun
2454*4882a593Smuzhiyun return err;
2455*4882a593Smuzhiyun }
2456*4882a593Smuzhiyun
2457*4882a593Smuzhiyun struct tcf_dump_args {
2458*4882a593Smuzhiyun struct tcf_walker w;
2459*4882a593Smuzhiyun struct sk_buff *skb;
2460*4882a593Smuzhiyun struct netlink_callback *cb;
2461*4882a593Smuzhiyun struct tcf_block *block;
2462*4882a593Smuzhiyun struct Qdisc *q;
2463*4882a593Smuzhiyun u32 parent;
2464*4882a593Smuzhiyun bool terse_dump;
2465*4882a593Smuzhiyun };
2466*4882a593Smuzhiyun
tcf_node_dump(struct tcf_proto * tp,void * n,struct tcf_walker * arg)2467*4882a593Smuzhiyun static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
2468*4882a593Smuzhiyun {
2469*4882a593Smuzhiyun struct tcf_dump_args *a = (void *)arg;
2470*4882a593Smuzhiyun struct net *net = sock_net(a->skb->sk);
2471*4882a593Smuzhiyun
2472*4882a593Smuzhiyun return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
2473*4882a593Smuzhiyun n, NETLINK_CB(a->cb->skb).portid,
2474*4882a593Smuzhiyun a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2475*4882a593Smuzhiyun RTM_NEWTFILTER, a->terse_dump, true);
2476*4882a593Smuzhiyun }
2477*4882a593Smuzhiyun
tcf_chain_dump(struct tcf_chain * chain,struct Qdisc * q,u32 parent,struct sk_buff * skb,struct netlink_callback * cb,long index_start,long * p_index,bool terse)2478*4882a593Smuzhiyun static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
2479*4882a593Smuzhiyun struct sk_buff *skb, struct netlink_callback *cb,
2480*4882a593Smuzhiyun long index_start, long *p_index, bool terse)
2481*4882a593Smuzhiyun {
2482*4882a593Smuzhiyun struct net *net = sock_net(skb->sk);
2483*4882a593Smuzhiyun struct tcf_block *block = chain->block;
2484*4882a593Smuzhiyun struct tcmsg *tcm = nlmsg_data(cb->nlh);
2485*4882a593Smuzhiyun struct tcf_proto *tp, *tp_prev;
2486*4882a593Smuzhiyun struct tcf_dump_args arg;
2487*4882a593Smuzhiyun
2488*4882a593Smuzhiyun for (tp = __tcf_get_next_proto(chain, NULL);
2489*4882a593Smuzhiyun tp;
2490*4882a593Smuzhiyun tp_prev = tp,
2491*4882a593Smuzhiyun tp = __tcf_get_next_proto(chain, tp),
2492*4882a593Smuzhiyun tcf_proto_put(tp_prev, true, NULL),
2493*4882a593Smuzhiyun (*p_index)++) {
2494*4882a593Smuzhiyun if (*p_index < index_start)
2495*4882a593Smuzhiyun continue;
2496*4882a593Smuzhiyun if (TC_H_MAJ(tcm->tcm_info) &&
2497*4882a593Smuzhiyun TC_H_MAJ(tcm->tcm_info) != tp->prio)
2498*4882a593Smuzhiyun continue;
2499*4882a593Smuzhiyun if (TC_H_MIN(tcm->tcm_info) &&
2500*4882a593Smuzhiyun TC_H_MIN(tcm->tcm_info) != tp->protocol)
2501*4882a593Smuzhiyun continue;
2502*4882a593Smuzhiyun if (*p_index > index_start)
2503*4882a593Smuzhiyun memset(&cb->args[1], 0,
2504*4882a593Smuzhiyun sizeof(cb->args) - sizeof(cb->args[0]));
2505*4882a593Smuzhiyun if (cb->args[1] == 0) {
2506*4882a593Smuzhiyun if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
2507*4882a593Smuzhiyun NETLINK_CB(cb->skb).portid,
2508*4882a593Smuzhiyun cb->nlh->nlmsg_seq, NLM_F_MULTI,
2509*4882a593Smuzhiyun RTM_NEWTFILTER, false, true) <= 0)
2510*4882a593Smuzhiyun goto errout;
2511*4882a593Smuzhiyun cb->args[1] = 1;
2512*4882a593Smuzhiyun }
2513*4882a593Smuzhiyun if (!tp->ops->walk)
2514*4882a593Smuzhiyun continue;
2515*4882a593Smuzhiyun arg.w.fn = tcf_node_dump;
2516*4882a593Smuzhiyun arg.skb = skb;
2517*4882a593Smuzhiyun arg.cb = cb;
2518*4882a593Smuzhiyun arg.block = block;
2519*4882a593Smuzhiyun arg.q = q;
2520*4882a593Smuzhiyun arg.parent = parent;
2521*4882a593Smuzhiyun arg.w.stop = 0;
2522*4882a593Smuzhiyun arg.w.skip = cb->args[1] - 1;
2523*4882a593Smuzhiyun arg.w.count = 0;
2524*4882a593Smuzhiyun arg.w.cookie = cb->args[2];
2525*4882a593Smuzhiyun arg.terse_dump = terse;
2526*4882a593Smuzhiyun tp->ops->walk(tp, &arg.w, true);
2527*4882a593Smuzhiyun cb->args[2] = arg.w.cookie;
2528*4882a593Smuzhiyun cb->args[1] = arg.w.count + 1;
2529*4882a593Smuzhiyun if (arg.w.stop)
2530*4882a593Smuzhiyun goto errout;
2531*4882a593Smuzhiyun }
2532*4882a593Smuzhiyun return true;
2533*4882a593Smuzhiyun
2534*4882a593Smuzhiyun errout:
2535*4882a593Smuzhiyun tcf_proto_put(tp, true, NULL);
2536*4882a593Smuzhiyun return false;
2537*4882a593Smuzhiyun }
2538*4882a593Smuzhiyun
2539*4882a593Smuzhiyun static const struct nla_policy tcf_tfilter_dump_policy[TCA_MAX + 1] = {
2540*4882a593Smuzhiyun [TCA_DUMP_FLAGS] = NLA_POLICY_BITFIELD32(TCA_DUMP_FLAGS_TERSE),
2541*4882a593Smuzhiyun };
2542*4882a593Smuzhiyun
2543*4882a593Smuzhiyun /* called with RTNL */
tc_dump_tfilter(struct sk_buff * skb,struct netlink_callback * cb)2544*4882a593Smuzhiyun static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
2545*4882a593Smuzhiyun {
2546*4882a593Smuzhiyun struct tcf_chain *chain, *chain_prev;
2547*4882a593Smuzhiyun struct net *net = sock_net(skb->sk);
2548*4882a593Smuzhiyun struct nlattr *tca[TCA_MAX + 1];
2549*4882a593Smuzhiyun struct Qdisc *q = NULL;
2550*4882a593Smuzhiyun struct tcf_block *block;
2551*4882a593Smuzhiyun struct tcmsg *tcm = nlmsg_data(cb->nlh);
2552*4882a593Smuzhiyun bool terse_dump = false;
2553*4882a593Smuzhiyun long index_start;
2554*4882a593Smuzhiyun long index;
2555*4882a593Smuzhiyun u32 parent;
2556*4882a593Smuzhiyun int err;
2557*4882a593Smuzhiyun
2558*4882a593Smuzhiyun if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2559*4882a593Smuzhiyun return skb->len;
2560*4882a593Smuzhiyun
2561*4882a593Smuzhiyun err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2562*4882a593Smuzhiyun tcf_tfilter_dump_policy, cb->extack);
2563*4882a593Smuzhiyun if (err)
2564*4882a593Smuzhiyun return err;
2565*4882a593Smuzhiyun
2566*4882a593Smuzhiyun if (tca[TCA_DUMP_FLAGS]) {
2567*4882a593Smuzhiyun struct nla_bitfield32 flags =
2568*4882a593Smuzhiyun nla_get_bitfield32(tca[TCA_DUMP_FLAGS]);
2569*4882a593Smuzhiyun
2570*4882a593Smuzhiyun terse_dump = flags.value & TCA_DUMP_FLAGS_TERSE;
2571*4882a593Smuzhiyun }
2572*4882a593Smuzhiyun
2573*4882a593Smuzhiyun if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2574*4882a593Smuzhiyun block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2575*4882a593Smuzhiyun if (!block)
2576*4882a593Smuzhiyun goto out;
2577*4882a593Smuzhiyun /* If we work with block index, q is NULL and parent value
2578*4882a593Smuzhiyun * will never be used in the following code. The check
2579*4882a593Smuzhiyun * in tcf_fill_node prevents it. However, compiler does not
2580*4882a593Smuzhiyun * see that far, so set parent to zero to silence the warning
2581*4882a593Smuzhiyun * about parent being uninitialized.
2582*4882a593Smuzhiyun */
2583*4882a593Smuzhiyun parent = 0;
2584*4882a593Smuzhiyun } else {
2585*4882a593Smuzhiyun const struct Qdisc_class_ops *cops;
2586*4882a593Smuzhiyun struct net_device *dev;
2587*4882a593Smuzhiyun unsigned long cl = 0;
2588*4882a593Smuzhiyun
2589*4882a593Smuzhiyun dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2590*4882a593Smuzhiyun if (!dev)
2591*4882a593Smuzhiyun return skb->len;
2592*4882a593Smuzhiyun
2593*4882a593Smuzhiyun parent = tcm->tcm_parent;
2594*4882a593Smuzhiyun if (!parent)
2595*4882a593Smuzhiyun q = rtnl_dereference(dev->qdisc);
2596*4882a593Smuzhiyun else
2597*4882a593Smuzhiyun q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2598*4882a593Smuzhiyun if (!q)
2599*4882a593Smuzhiyun goto out;
2600*4882a593Smuzhiyun cops = q->ops->cl_ops;
2601*4882a593Smuzhiyun if (!cops)
2602*4882a593Smuzhiyun goto out;
2603*4882a593Smuzhiyun if (!cops->tcf_block)
2604*4882a593Smuzhiyun goto out;
2605*4882a593Smuzhiyun if (TC_H_MIN(tcm->tcm_parent)) {
2606*4882a593Smuzhiyun cl = cops->find(q, tcm->tcm_parent);
2607*4882a593Smuzhiyun if (cl == 0)
2608*4882a593Smuzhiyun goto out;
2609*4882a593Smuzhiyun }
2610*4882a593Smuzhiyun block = cops->tcf_block(q, cl, NULL);
2611*4882a593Smuzhiyun if (!block)
2612*4882a593Smuzhiyun goto out;
2613*4882a593Smuzhiyun parent = block->classid;
2614*4882a593Smuzhiyun if (tcf_block_shared(block))
2615*4882a593Smuzhiyun q = NULL;
2616*4882a593Smuzhiyun }
2617*4882a593Smuzhiyun
2618*4882a593Smuzhiyun index_start = cb->args[0];
2619*4882a593Smuzhiyun index = 0;
2620*4882a593Smuzhiyun
2621*4882a593Smuzhiyun for (chain = __tcf_get_next_chain(block, NULL);
2622*4882a593Smuzhiyun chain;
2623*4882a593Smuzhiyun chain_prev = chain,
2624*4882a593Smuzhiyun chain = __tcf_get_next_chain(block, chain),
2625*4882a593Smuzhiyun tcf_chain_put(chain_prev)) {
2626*4882a593Smuzhiyun if (tca[TCA_CHAIN] &&
2627*4882a593Smuzhiyun nla_get_u32(tca[TCA_CHAIN]) != chain->index)
2628*4882a593Smuzhiyun continue;
2629*4882a593Smuzhiyun if (!tcf_chain_dump(chain, q, parent, skb, cb,
2630*4882a593Smuzhiyun index_start, &index, terse_dump)) {
2631*4882a593Smuzhiyun tcf_chain_put(chain);
2632*4882a593Smuzhiyun err = -EMSGSIZE;
2633*4882a593Smuzhiyun break;
2634*4882a593Smuzhiyun }
2635*4882a593Smuzhiyun }
2636*4882a593Smuzhiyun
2637*4882a593Smuzhiyun if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2638*4882a593Smuzhiyun tcf_block_refcnt_put(block, true);
2639*4882a593Smuzhiyun cb->args[0] = index;
2640*4882a593Smuzhiyun
2641*4882a593Smuzhiyun out:
2642*4882a593Smuzhiyun /* If we did no progress, the error (EMSGSIZE) is real */
2643*4882a593Smuzhiyun if (skb->len == 0 && err)
2644*4882a593Smuzhiyun return err;
2645*4882a593Smuzhiyun return skb->len;
2646*4882a593Smuzhiyun }
2647*4882a593Smuzhiyun
tc_chain_fill_node(const struct tcf_proto_ops * tmplt_ops,void * tmplt_priv,u32 chain_index,struct net * net,struct sk_buff * skb,struct tcf_block * block,u32 portid,u32 seq,u16 flags,int event)2648*4882a593Smuzhiyun static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops,
2649*4882a593Smuzhiyun void *tmplt_priv, u32 chain_index,
2650*4882a593Smuzhiyun struct net *net, struct sk_buff *skb,
2651*4882a593Smuzhiyun struct tcf_block *block,
2652*4882a593Smuzhiyun u32 portid, u32 seq, u16 flags, int event)
2653*4882a593Smuzhiyun {
2654*4882a593Smuzhiyun unsigned char *b = skb_tail_pointer(skb);
2655*4882a593Smuzhiyun const struct tcf_proto_ops *ops;
2656*4882a593Smuzhiyun struct nlmsghdr *nlh;
2657*4882a593Smuzhiyun struct tcmsg *tcm;
2658*4882a593Smuzhiyun void *priv;
2659*4882a593Smuzhiyun
2660*4882a593Smuzhiyun ops = tmplt_ops;
2661*4882a593Smuzhiyun priv = tmplt_priv;
2662*4882a593Smuzhiyun
2663*4882a593Smuzhiyun nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2664*4882a593Smuzhiyun if (!nlh)
2665*4882a593Smuzhiyun goto out_nlmsg_trim;
2666*4882a593Smuzhiyun tcm = nlmsg_data(nlh);
2667*4882a593Smuzhiyun tcm->tcm_family = AF_UNSPEC;
2668*4882a593Smuzhiyun tcm->tcm__pad1 = 0;
2669*4882a593Smuzhiyun tcm->tcm__pad2 = 0;
2670*4882a593Smuzhiyun tcm->tcm_handle = 0;
2671*4882a593Smuzhiyun if (block->q) {
2672*4882a593Smuzhiyun tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex;
2673*4882a593Smuzhiyun tcm->tcm_parent = block->q->handle;
2674*4882a593Smuzhiyun } else {
2675*4882a593Smuzhiyun tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2676*4882a593Smuzhiyun tcm->tcm_block_index = block->index;
2677*4882a593Smuzhiyun }
2678*4882a593Smuzhiyun
2679*4882a593Smuzhiyun if (nla_put_u32(skb, TCA_CHAIN, chain_index))
2680*4882a593Smuzhiyun goto nla_put_failure;
2681*4882a593Smuzhiyun
2682*4882a593Smuzhiyun if (ops) {
2683*4882a593Smuzhiyun if (nla_put_string(skb, TCA_KIND, ops->kind))
2684*4882a593Smuzhiyun goto nla_put_failure;
2685*4882a593Smuzhiyun if (ops->tmplt_dump(skb, net, priv) < 0)
2686*4882a593Smuzhiyun goto nla_put_failure;
2687*4882a593Smuzhiyun }
2688*4882a593Smuzhiyun
2689*4882a593Smuzhiyun nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2690*4882a593Smuzhiyun return skb->len;
2691*4882a593Smuzhiyun
2692*4882a593Smuzhiyun out_nlmsg_trim:
2693*4882a593Smuzhiyun nla_put_failure:
2694*4882a593Smuzhiyun nlmsg_trim(skb, b);
2695*4882a593Smuzhiyun return -EMSGSIZE;
2696*4882a593Smuzhiyun }
2697*4882a593Smuzhiyun
tc_chain_notify(struct tcf_chain * chain,struct sk_buff * oskb,u32 seq,u16 flags,int event,bool unicast)2698*4882a593Smuzhiyun static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
2699*4882a593Smuzhiyun u32 seq, u16 flags, int event, bool unicast)
2700*4882a593Smuzhiyun {
2701*4882a593Smuzhiyun u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2702*4882a593Smuzhiyun struct tcf_block *block = chain->block;
2703*4882a593Smuzhiyun struct net *net = block->net;
2704*4882a593Smuzhiyun struct sk_buff *skb;
2705*4882a593Smuzhiyun int err = 0;
2706*4882a593Smuzhiyun
2707*4882a593Smuzhiyun skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2708*4882a593Smuzhiyun if (!skb)
2709*4882a593Smuzhiyun return -ENOBUFS;
2710*4882a593Smuzhiyun
2711*4882a593Smuzhiyun if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2712*4882a593Smuzhiyun chain->index, net, skb, block, portid,
2713*4882a593Smuzhiyun seq, flags, event) <= 0) {
2714*4882a593Smuzhiyun kfree_skb(skb);
2715*4882a593Smuzhiyun return -EINVAL;
2716*4882a593Smuzhiyun }
2717*4882a593Smuzhiyun
2718*4882a593Smuzhiyun if (unicast)
2719*4882a593Smuzhiyun err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
2720*4882a593Smuzhiyun else
2721*4882a593Smuzhiyun err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2722*4882a593Smuzhiyun flags & NLM_F_ECHO);
2723*4882a593Smuzhiyun
2724*4882a593Smuzhiyun if (err > 0)
2725*4882a593Smuzhiyun err = 0;
2726*4882a593Smuzhiyun return err;
2727*4882a593Smuzhiyun }
2728*4882a593Smuzhiyun
tc_chain_notify_delete(const struct tcf_proto_ops * tmplt_ops,void * tmplt_priv,u32 chain_index,struct tcf_block * block,struct sk_buff * oskb,u32 seq,u16 flags,bool unicast)2729*4882a593Smuzhiyun static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
2730*4882a593Smuzhiyun void *tmplt_priv, u32 chain_index,
2731*4882a593Smuzhiyun struct tcf_block *block, struct sk_buff *oskb,
2732*4882a593Smuzhiyun u32 seq, u16 flags, bool unicast)
2733*4882a593Smuzhiyun {
2734*4882a593Smuzhiyun u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2735*4882a593Smuzhiyun struct net *net = block->net;
2736*4882a593Smuzhiyun struct sk_buff *skb;
2737*4882a593Smuzhiyun
2738*4882a593Smuzhiyun skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2739*4882a593Smuzhiyun if (!skb)
2740*4882a593Smuzhiyun return -ENOBUFS;
2741*4882a593Smuzhiyun
2742*4882a593Smuzhiyun if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb,
2743*4882a593Smuzhiyun block, portid, seq, flags, RTM_DELCHAIN) <= 0) {
2744*4882a593Smuzhiyun kfree_skb(skb);
2745*4882a593Smuzhiyun return -EINVAL;
2746*4882a593Smuzhiyun }
2747*4882a593Smuzhiyun
2748*4882a593Smuzhiyun if (unicast)
2749*4882a593Smuzhiyun return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
2750*4882a593Smuzhiyun
2751*4882a593Smuzhiyun return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
2752*4882a593Smuzhiyun }
2753*4882a593Smuzhiyun
tc_chain_tmplt_add(struct tcf_chain * chain,struct net * net,struct nlattr ** tca,struct netlink_ext_ack * extack)2754*4882a593Smuzhiyun static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
2755*4882a593Smuzhiyun struct nlattr **tca,
2756*4882a593Smuzhiyun struct netlink_ext_ack *extack)
2757*4882a593Smuzhiyun {
2758*4882a593Smuzhiyun const struct tcf_proto_ops *ops;
2759*4882a593Smuzhiyun char name[IFNAMSIZ];
2760*4882a593Smuzhiyun void *tmplt_priv;
2761*4882a593Smuzhiyun
2762*4882a593Smuzhiyun /* If kind is not set, user did not specify template. */
2763*4882a593Smuzhiyun if (!tca[TCA_KIND])
2764*4882a593Smuzhiyun return 0;
2765*4882a593Smuzhiyun
2766*4882a593Smuzhiyun if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2767*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Specified TC chain template name too long");
2768*4882a593Smuzhiyun return -EINVAL;
2769*4882a593Smuzhiyun }
2770*4882a593Smuzhiyun
2771*4882a593Smuzhiyun ops = tcf_proto_lookup_ops(name, true, extack);
2772*4882a593Smuzhiyun if (IS_ERR(ops))
2773*4882a593Smuzhiyun return PTR_ERR(ops);
2774*4882a593Smuzhiyun if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
2775*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
2776*4882a593Smuzhiyun return -EOPNOTSUPP;
2777*4882a593Smuzhiyun }
2778*4882a593Smuzhiyun
2779*4882a593Smuzhiyun tmplt_priv = ops->tmplt_create(net, chain, tca, extack);
2780*4882a593Smuzhiyun if (IS_ERR(tmplt_priv)) {
2781*4882a593Smuzhiyun module_put(ops->owner);
2782*4882a593Smuzhiyun return PTR_ERR(tmplt_priv);
2783*4882a593Smuzhiyun }
2784*4882a593Smuzhiyun chain->tmplt_ops = ops;
2785*4882a593Smuzhiyun chain->tmplt_priv = tmplt_priv;
2786*4882a593Smuzhiyun return 0;
2787*4882a593Smuzhiyun }
2788*4882a593Smuzhiyun
tc_chain_tmplt_del(const struct tcf_proto_ops * tmplt_ops,void * tmplt_priv)2789*4882a593Smuzhiyun static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
2790*4882a593Smuzhiyun void *tmplt_priv)
2791*4882a593Smuzhiyun {
2792*4882a593Smuzhiyun /* If template ops are set, no work to do for us. */
2793*4882a593Smuzhiyun if (!tmplt_ops)
2794*4882a593Smuzhiyun return;
2795*4882a593Smuzhiyun
2796*4882a593Smuzhiyun tmplt_ops->tmplt_destroy(tmplt_priv);
2797*4882a593Smuzhiyun module_put(tmplt_ops->owner);
2798*4882a593Smuzhiyun }
2799*4882a593Smuzhiyun
2800*4882a593Smuzhiyun /* Add/delete/get a chain */
2801*4882a593Smuzhiyun
tc_ctl_chain(struct sk_buff * skb,struct nlmsghdr * n,struct netlink_ext_ack * extack)2802*4882a593Smuzhiyun static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
2803*4882a593Smuzhiyun struct netlink_ext_ack *extack)
2804*4882a593Smuzhiyun {
2805*4882a593Smuzhiyun struct net *net = sock_net(skb->sk);
2806*4882a593Smuzhiyun struct nlattr *tca[TCA_MAX + 1];
2807*4882a593Smuzhiyun struct tcmsg *t;
2808*4882a593Smuzhiyun u32 parent;
2809*4882a593Smuzhiyun u32 chain_index;
2810*4882a593Smuzhiyun struct Qdisc *q;
2811*4882a593Smuzhiyun struct tcf_chain *chain;
2812*4882a593Smuzhiyun struct tcf_block *block;
2813*4882a593Smuzhiyun unsigned long cl;
2814*4882a593Smuzhiyun int err;
2815*4882a593Smuzhiyun
2816*4882a593Smuzhiyun if (n->nlmsg_type != RTM_GETCHAIN &&
2817*4882a593Smuzhiyun !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2818*4882a593Smuzhiyun return -EPERM;
2819*4882a593Smuzhiyun
2820*4882a593Smuzhiyun replay:
2821*4882a593Smuzhiyun q = NULL;
2822*4882a593Smuzhiyun err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2823*4882a593Smuzhiyun rtm_tca_policy, extack);
2824*4882a593Smuzhiyun if (err < 0)
2825*4882a593Smuzhiyun return err;
2826*4882a593Smuzhiyun
2827*4882a593Smuzhiyun t = nlmsg_data(n);
2828*4882a593Smuzhiyun parent = t->tcm_parent;
2829*4882a593Smuzhiyun cl = 0;
2830*4882a593Smuzhiyun
2831*4882a593Smuzhiyun block = tcf_block_find(net, &q, &parent, &cl,
2832*4882a593Smuzhiyun t->tcm_ifindex, t->tcm_block_index, extack);
2833*4882a593Smuzhiyun if (IS_ERR(block))
2834*4882a593Smuzhiyun return PTR_ERR(block);
2835*4882a593Smuzhiyun
2836*4882a593Smuzhiyun chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2837*4882a593Smuzhiyun if (chain_index > TC_ACT_EXT_VAL_MASK) {
2838*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2839*4882a593Smuzhiyun err = -EINVAL;
2840*4882a593Smuzhiyun goto errout_block;
2841*4882a593Smuzhiyun }
2842*4882a593Smuzhiyun
2843*4882a593Smuzhiyun mutex_lock(&block->lock);
2844*4882a593Smuzhiyun chain = tcf_chain_lookup(block, chain_index);
2845*4882a593Smuzhiyun if (n->nlmsg_type == RTM_NEWCHAIN) {
2846*4882a593Smuzhiyun if (chain) {
2847*4882a593Smuzhiyun if (tcf_chain_held_by_acts_only(chain)) {
2848*4882a593Smuzhiyun /* The chain exists only because there is
2849*4882a593Smuzhiyun * some action referencing it.
2850*4882a593Smuzhiyun */
2851*4882a593Smuzhiyun tcf_chain_hold(chain);
2852*4882a593Smuzhiyun } else {
2853*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Filter chain already exists");
2854*4882a593Smuzhiyun err = -EEXIST;
2855*4882a593Smuzhiyun goto errout_block_locked;
2856*4882a593Smuzhiyun }
2857*4882a593Smuzhiyun } else {
2858*4882a593Smuzhiyun if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2859*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
2860*4882a593Smuzhiyun err = -ENOENT;
2861*4882a593Smuzhiyun goto errout_block_locked;
2862*4882a593Smuzhiyun }
2863*4882a593Smuzhiyun chain = tcf_chain_create(block, chain_index);
2864*4882a593Smuzhiyun if (!chain) {
2865*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Failed to create filter chain");
2866*4882a593Smuzhiyun err = -ENOMEM;
2867*4882a593Smuzhiyun goto errout_block_locked;
2868*4882a593Smuzhiyun }
2869*4882a593Smuzhiyun }
2870*4882a593Smuzhiyun } else {
2871*4882a593Smuzhiyun if (!chain || tcf_chain_held_by_acts_only(chain)) {
2872*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2873*4882a593Smuzhiyun err = -EINVAL;
2874*4882a593Smuzhiyun goto errout_block_locked;
2875*4882a593Smuzhiyun }
2876*4882a593Smuzhiyun tcf_chain_hold(chain);
2877*4882a593Smuzhiyun }
2878*4882a593Smuzhiyun
2879*4882a593Smuzhiyun if (n->nlmsg_type == RTM_NEWCHAIN) {
2880*4882a593Smuzhiyun /* Modifying chain requires holding parent block lock. In case
2881*4882a593Smuzhiyun * the chain was successfully added, take a reference to the
2882*4882a593Smuzhiyun * chain. This ensures that an empty chain does not disappear at
2883*4882a593Smuzhiyun * the end of this function.
2884*4882a593Smuzhiyun */
2885*4882a593Smuzhiyun tcf_chain_hold(chain);
2886*4882a593Smuzhiyun chain->explicitly_created = true;
2887*4882a593Smuzhiyun }
2888*4882a593Smuzhiyun mutex_unlock(&block->lock);
2889*4882a593Smuzhiyun
2890*4882a593Smuzhiyun switch (n->nlmsg_type) {
2891*4882a593Smuzhiyun case RTM_NEWCHAIN:
2892*4882a593Smuzhiyun err = tc_chain_tmplt_add(chain, net, tca, extack);
2893*4882a593Smuzhiyun if (err) {
2894*4882a593Smuzhiyun tcf_chain_put_explicitly_created(chain);
2895*4882a593Smuzhiyun goto errout;
2896*4882a593Smuzhiyun }
2897*4882a593Smuzhiyun
2898*4882a593Smuzhiyun tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
2899*4882a593Smuzhiyun RTM_NEWCHAIN, false);
2900*4882a593Smuzhiyun break;
2901*4882a593Smuzhiyun case RTM_DELCHAIN:
2902*4882a593Smuzhiyun tfilter_notify_chain(net, skb, block, q, parent, n,
2903*4882a593Smuzhiyun chain, RTM_DELTFILTER, true);
2904*4882a593Smuzhiyun /* Flush the chain first as the user requested chain removal. */
2905*4882a593Smuzhiyun tcf_chain_flush(chain, true);
2906*4882a593Smuzhiyun /* In case the chain was successfully deleted, put a reference
2907*4882a593Smuzhiyun * to the chain previously taken during addition.
2908*4882a593Smuzhiyun */
2909*4882a593Smuzhiyun tcf_chain_put_explicitly_created(chain);
2910*4882a593Smuzhiyun break;
2911*4882a593Smuzhiyun case RTM_GETCHAIN:
2912*4882a593Smuzhiyun err = tc_chain_notify(chain, skb, n->nlmsg_seq,
2913*4882a593Smuzhiyun n->nlmsg_flags, n->nlmsg_type, true);
2914*4882a593Smuzhiyun if (err < 0)
2915*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
2916*4882a593Smuzhiyun break;
2917*4882a593Smuzhiyun default:
2918*4882a593Smuzhiyun err = -EOPNOTSUPP;
2919*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Unsupported message type");
2920*4882a593Smuzhiyun goto errout;
2921*4882a593Smuzhiyun }
2922*4882a593Smuzhiyun
2923*4882a593Smuzhiyun errout:
2924*4882a593Smuzhiyun tcf_chain_put(chain);
2925*4882a593Smuzhiyun errout_block:
2926*4882a593Smuzhiyun tcf_block_release(q, block, true);
2927*4882a593Smuzhiyun if (err == -EAGAIN)
2928*4882a593Smuzhiyun /* Replay the request. */
2929*4882a593Smuzhiyun goto replay;
2930*4882a593Smuzhiyun return err;
2931*4882a593Smuzhiyun
2932*4882a593Smuzhiyun errout_block_locked:
2933*4882a593Smuzhiyun mutex_unlock(&block->lock);
2934*4882a593Smuzhiyun goto errout_block;
2935*4882a593Smuzhiyun }
2936*4882a593Smuzhiyun
2937*4882a593Smuzhiyun /* called with RTNL */
tc_dump_chain(struct sk_buff * skb,struct netlink_callback * cb)2938*4882a593Smuzhiyun static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
2939*4882a593Smuzhiyun {
2940*4882a593Smuzhiyun struct net *net = sock_net(skb->sk);
2941*4882a593Smuzhiyun struct nlattr *tca[TCA_MAX + 1];
2942*4882a593Smuzhiyun struct Qdisc *q = NULL;
2943*4882a593Smuzhiyun struct tcf_block *block;
2944*4882a593Smuzhiyun struct tcmsg *tcm = nlmsg_data(cb->nlh);
2945*4882a593Smuzhiyun struct tcf_chain *chain;
2946*4882a593Smuzhiyun long index_start;
2947*4882a593Smuzhiyun long index;
2948*4882a593Smuzhiyun u32 parent;
2949*4882a593Smuzhiyun int err;
2950*4882a593Smuzhiyun
2951*4882a593Smuzhiyun if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2952*4882a593Smuzhiyun return skb->len;
2953*4882a593Smuzhiyun
2954*4882a593Smuzhiyun err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2955*4882a593Smuzhiyun rtm_tca_policy, cb->extack);
2956*4882a593Smuzhiyun if (err)
2957*4882a593Smuzhiyun return err;
2958*4882a593Smuzhiyun
2959*4882a593Smuzhiyun if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2960*4882a593Smuzhiyun block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2961*4882a593Smuzhiyun if (!block)
2962*4882a593Smuzhiyun goto out;
2963*4882a593Smuzhiyun /* If we work with block index, q is NULL and parent value
2964*4882a593Smuzhiyun * will never be used in the following code. The check
2965*4882a593Smuzhiyun * in tcf_fill_node prevents it. However, compiler does not
2966*4882a593Smuzhiyun * see that far, so set parent to zero to silence the warning
2967*4882a593Smuzhiyun * about parent being uninitialized.
2968*4882a593Smuzhiyun */
2969*4882a593Smuzhiyun parent = 0;
2970*4882a593Smuzhiyun } else {
2971*4882a593Smuzhiyun const struct Qdisc_class_ops *cops;
2972*4882a593Smuzhiyun struct net_device *dev;
2973*4882a593Smuzhiyun unsigned long cl = 0;
2974*4882a593Smuzhiyun
2975*4882a593Smuzhiyun dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2976*4882a593Smuzhiyun if (!dev)
2977*4882a593Smuzhiyun return skb->len;
2978*4882a593Smuzhiyun
2979*4882a593Smuzhiyun parent = tcm->tcm_parent;
2980*4882a593Smuzhiyun if (!parent) {
2981*4882a593Smuzhiyun q = rtnl_dereference(dev->qdisc);
2982*4882a593Smuzhiyun parent = q->handle;
2983*4882a593Smuzhiyun } else {
2984*4882a593Smuzhiyun q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2985*4882a593Smuzhiyun }
2986*4882a593Smuzhiyun if (!q)
2987*4882a593Smuzhiyun goto out;
2988*4882a593Smuzhiyun cops = q->ops->cl_ops;
2989*4882a593Smuzhiyun if (!cops)
2990*4882a593Smuzhiyun goto out;
2991*4882a593Smuzhiyun if (!cops->tcf_block)
2992*4882a593Smuzhiyun goto out;
2993*4882a593Smuzhiyun if (TC_H_MIN(tcm->tcm_parent)) {
2994*4882a593Smuzhiyun cl = cops->find(q, tcm->tcm_parent);
2995*4882a593Smuzhiyun if (cl == 0)
2996*4882a593Smuzhiyun goto out;
2997*4882a593Smuzhiyun }
2998*4882a593Smuzhiyun block = cops->tcf_block(q, cl, NULL);
2999*4882a593Smuzhiyun if (!block)
3000*4882a593Smuzhiyun goto out;
3001*4882a593Smuzhiyun if (tcf_block_shared(block))
3002*4882a593Smuzhiyun q = NULL;
3003*4882a593Smuzhiyun }
3004*4882a593Smuzhiyun
3005*4882a593Smuzhiyun index_start = cb->args[0];
3006*4882a593Smuzhiyun index = 0;
3007*4882a593Smuzhiyun
3008*4882a593Smuzhiyun mutex_lock(&block->lock);
3009*4882a593Smuzhiyun list_for_each_entry(chain, &block->chain_list, list) {
3010*4882a593Smuzhiyun if ((tca[TCA_CHAIN] &&
3011*4882a593Smuzhiyun nla_get_u32(tca[TCA_CHAIN]) != chain->index))
3012*4882a593Smuzhiyun continue;
3013*4882a593Smuzhiyun if (index < index_start) {
3014*4882a593Smuzhiyun index++;
3015*4882a593Smuzhiyun continue;
3016*4882a593Smuzhiyun }
3017*4882a593Smuzhiyun if (tcf_chain_held_by_acts_only(chain))
3018*4882a593Smuzhiyun continue;
3019*4882a593Smuzhiyun err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
3020*4882a593Smuzhiyun chain->index, net, skb, block,
3021*4882a593Smuzhiyun NETLINK_CB(cb->skb).portid,
3022*4882a593Smuzhiyun cb->nlh->nlmsg_seq, NLM_F_MULTI,
3023*4882a593Smuzhiyun RTM_NEWCHAIN);
3024*4882a593Smuzhiyun if (err <= 0)
3025*4882a593Smuzhiyun break;
3026*4882a593Smuzhiyun index++;
3027*4882a593Smuzhiyun }
3028*4882a593Smuzhiyun mutex_unlock(&block->lock);
3029*4882a593Smuzhiyun
3030*4882a593Smuzhiyun if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
3031*4882a593Smuzhiyun tcf_block_refcnt_put(block, true);
3032*4882a593Smuzhiyun cb->args[0] = index;
3033*4882a593Smuzhiyun
3034*4882a593Smuzhiyun out:
3035*4882a593Smuzhiyun /* If we did no progress, the error (EMSGSIZE) is real */
3036*4882a593Smuzhiyun if (skb->len == 0 && err)
3037*4882a593Smuzhiyun return err;
3038*4882a593Smuzhiyun return skb->len;
3039*4882a593Smuzhiyun }
3040*4882a593Smuzhiyun
tcf_exts_destroy(struct tcf_exts * exts)3041*4882a593Smuzhiyun void tcf_exts_destroy(struct tcf_exts *exts)
3042*4882a593Smuzhiyun {
3043*4882a593Smuzhiyun #ifdef CONFIG_NET_CLS_ACT
3044*4882a593Smuzhiyun if (exts->actions) {
3045*4882a593Smuzhiyun tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
3046*4882a593Smuzhiyun kfree(exts->actions);
3047*4882a593Smuzhiyun }
3048*4882a593Smuzhiyun exts->nr_actions = 0;
3049*4882a593Smuzhiyun #endif
3050*4882a593Smuzhiyun }
3051*4882a593Smuzhiyun EXPORT_SYMBOL(tcf_exts_destroy);
3052*4882a593Smuzhiyun
tcf_exts_validate(struct net * net,struct tcf_proto * tp,struct nlattr ** tb,struct nlattr * rate_tlv,struct tcf_exts * exts,bool ovr,bool rtnl_held,struct netlink_ext_ack * extack)3053*4882a593Smuzhiyun int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3054*4882a593Smuzhiyun struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr,
3055*4882a593Smuzhiyun bool rtnl_held, struct netlink_ext_ack *extack)
3056*4882a593Smuzhiyun {
3057*4882a593Smuzhiyun #ifdef CONFIG_NET_CLS_ACT
3058*4882a593Smuzhiyun {
3059*4882a593Smuzhiyun int init_res[TCA_ACT_MAX_PRIO] = {};
3060*4882a593Smuzhiyun struct tc_action *act;
3061*4882a593Smuzhiyun size_t attr_size = 0;
3062*4882a593Smuzhiyun
3063*4882a593Smuzhiyun if (exts->police && tb[exts->police]) {
3064*4882a593Smuzhiyun struct tc_action_ops *a_o;
3065*4882a593Smuzhiyun
3066*4882a593Smuzhiyun a_o = tc_action_load_ops("police", tb[exts->police], rtnl_held, extack);
3067*4882a593Smuzhiyun if (IS_ERR(a_o))
3068*4882a593Smuzhiyun return PTR_ERR(a_o);
3069*4882a593Smuzhiyun act = tcf_action_init_1(net, tp, tb[exts->police],
3070*4882a593Smuzhiyun rate_tlv, "police", ovr,
3071*4882a593Smuzhiyun TCA_ACT_BIND, a_o, init_res,
3072*4882a593Smuzhiyun rtnl_held, extack);
3073*4882a593Smuzhiyun module_put(a_o->owner);
3074*4882a593Smuzhiyun if (IS_ERR(act))
3075*4882a593Smuzhiyun return PTR_ERR(act);
3076*4882a593Smuzhiyun
3077*4882a593Smuzhiyun act->type = exts->type = TCA_OLD_COMPAT;
3078*4882a593Smuzhiyun exts->actions[0] = act;
3079*4882a593Smuzhiyun exts->nr_actions = 1;
3080*4882a593Smuzhiyun tcf_idr_insert_many(exts->actions);
3081*4882a593Smuzhiyun } else if (exts->action && tb[exts->action]) {
3082*4882a593Smuzhiyun int err;
3083*4882a593Smuzhiyun
3084*4882a593Smuzhiyun err = tcf_action_init(net, tp, tb[exts->action],
3085*4882a593Smuzhiyun rate_tlv, NULL, ovr, TCA_ACT_BIND,
3086*4882a593Smuzhiyun exts->actions, init_res,
3087*4882a593Smuzhiyun &attr_size, rtnl_held, extack);
3088*4882a593Smuzhiyun if (err < 0)
3089*4882a593Smuzhiyun return err;
3090*4882a593Smuzhiyun exts->nr_actions = err;
3091*4882a593Smuzhiyun }
3092*4882a593Smuzhiyun }
3093*4882a593Smuzhiyun #else
3094*4882a593Smuzhiyun if ((exts->action && tb[exts->action]) ||
3095*4882a593Smuzhiyun (exts->police && tb[exts->police])) {
3096*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
3097*4882a593Smuzhiyun return -EOPNOTSUPP;
3098*4882a593Smuzhiyun }
3099*4882a593Smuzhiyun #endif
3100*4882a593Smuzhiyun
3101*4882a593Smuzhiyun return 0;
3102*4882a593Smuzhiyun }
3103*4882a593Smuzhiyun EXPORT_SYMBOL(tcf_exts_validate);
3104*4882a593Smuzhiyun
tcf_exts_change(struct tcf_exts * dst,struct tcf_exts * src)3105*4882a593Smuzhiyun void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
3106*4882a593Smuzhiyun {
3107*4882a593Smuzhiyun #ifdef CONFIG_NET_CLS_ACT
3108*4882a593Smuzhiyun struct tcf_exts old = *dst;
3109*4882a593Smuzhiyun
3110*4882a593Smuzhiyun *dst = *src;
3111*4882a593Smuzhiyun tcf_exts_destroy(&old);
3112*4882a593Smuzhiyun #endif
3113*4882a593Smuzhiyun }
3114*4882a593Smuzhiyun EXPORT_SYMBOL(tcf_exts_change);
3115*4882a593Smuzhiyun
3116*4882a593Smuzhiyun #ifdef CONFIG_NET_CLS_ACT
tcf_exts_first_act(struct tcf_exts * exts)3117*4882a593Smuzhiyun static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
3118*4882a593Smuzhiyun {
3119*4882a593Smuzhiyun if (exts->nr_actions == 0)
3120*4882a593Smuzhiyun return NULL;
3121*4882a593Smuzhiyun else
3122*4882a593Smuzhiyun return exts->actions[0];
3123*4882a593Smuzhiyun }
3124*4882a593Smuzhiyun #endif
3125*4882a593Smuzhiyun
tcf_exts_dump(struct sk_buff * skb,struct tcf_exts * exts)3126*4882a593Smuzhiyun int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
3127*4882a593Smuzhiyun {
3128*4882a593Smuzhiyun #ifdef CONFIG_NET_CLS_ACT
3129*4882a593Smuzhiyun struct nlattr *nest;
3130*4882a593Smuzhiyun
3131*4882a593Smuzhiyun if (exts->action && tcf_exts_has_actions(exts)) {
3132*4882a593Smuzhiyun /*
3133*4882a593Smuzhiyun * again for backward compatible mode - we want
3134*4882a593Smuzhiyun * to work with both old and new modes of entering
3135*4882a593Smuzhiyun * tc data even if iproute2 was newer - jhs
3136*4882a593Smuzhiyun */
3137*4882a593Smuzhiyun if (exts->type != TCA_OLD_COMPAT) {
3138*4882a593Smuzhiyun nest = nla_nest_start_noflag(skb, exts->action);
3139*4882a593Smuzhiyun if (nest == NULL)
3140*4882a593Smuzhiyun goto nla_put_failure;
3141*4882a593Smuzhiyun
3142*4882a593Smuzhiyun if (tcf_action_dump(skb, exts->actions, 0, 0, false)
3143*4882a593Smuzhiyun < 0)
3144*4882a593Smuzhiyun goto nla_put_failure;
3145*4882a593Smuzhiyun nla_nest_end(skb, nest);
3146*4882a593Smuzhiyun } else if (exts->police) {
3147*4882a593Smuzhiyun struct tc_action *act = tcf_exts_first_act(exts);
3148*4882a593Smuzhiyun nest = nla_nest_start_noflag(skb, exts->police);
3149*4882a593Smuzhiyun if (nest == NULL || !act)
3150*4882a593Smuzhiyun goto nla_put_failure;
3151*4882a593Smuzhiyun if (tcf_action_dump_old(skb, act, 0, 0) < 0)
3152*4882a593Smuzhiyun goto nla_put_failure;
3153*4882a593Smuzhiyun nla_nest_end(skb, nest);
3154*4882a593Smuzhiyun }
3155*4882a593Smuzhiyun }
3156*4882a593Smuzhiyun return 0;
3157*4882a593Smuzhiyun
3158*4882a593Smuzhiyun nla_put_failure:
3159*4882a593Smuzhiyun nla_nest_cancel(skb, nest);
3160*4882a593Smuzhiyun return -1;
3161*4882a593Smuzhiyun #else
3162*4882a593Smuzhiyun return 0;
3163*4882a593Smuzhiyun #endif
3164*4882a593Smuzhiyun }
3165*4882a593Smuzhiyun EXPORT_SYMBOL(tcf_exts_dump);
3166*4882a593Smuzhiyun
tcf_exts_terse_dump(struct sk_buff * skb,struct tcf_exts * exts)3167*4882a593Smuzhiyun int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts)
3168*4882a593Smuzhiyun {
3169*4882a593Smuzhiyun #ifdef CONFIG_NET_CLS_ACT
3170*4882a593Smuzhiyun struct nlattr *nest;
3171*4882a593Smuzhiyun
3172*4882a593Smuzhiyun if (!exts->action || !tcf_exts_has_actions(exts))
3173*4882a593Smuzhiyun return 0;
3174*4882a593Smuzhiyun
3175*4882a593Smuzhiyun nest = nla_nest_start_noflag(skb, exts->action);
3176*4882a593Smuzhiyun if (!nest)
3177*4882a593Smuzhiyun goto nla_put_failure;
3178*4882a593Smuzhiyun
3179*4882a593Smuzhiyun if (tcf_action_dump(skb, exts->actions, 0, 0, true) < 0)
3180*4882a593Smuzhiyun goto nla_put_failure;
3181*4882a593Smuzhiyun nla_nest_end(skb, nest);
3182*4882a593Smuzhiyun return 0;
3183*4882a593Smuzhiyun
3184*4882a593Smuzhiyun nla_put_failure:
3185*4882a593Smuzhiyun nla_nest_cancel(skb, nest);
3186*4882a593Smuzhiyun return -1;
3187*4882a593Smuzhiyun #else
3188*4882a593Smuzhiyun return 0;
3189*4882a593Smuzhiyun #endif
3190*4882a593Smuzhiyun }
3191*4882a593Smuzhiyun EXPORT_SYMBOL(tcf_exts_terse_dump);
3192*4882a593Smuzhiyun
tcf_exts_dump_stats(struct sk_buff * skb,struct tcf_exts * exts)3193*4882a593Smuzhiyun int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
3194*4882a593Smuzhiyun {
3195*4882a593Smuzhiyun #ifdef CONFIG_NET_CLS_ACT
3196*4882a593Smuzhiyun struct tc_action *a = tcf_exts_first_act(exts);
3197*4882a593Smuzhiyun if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
3198*4882a593Smuzhiyun return -1;
3199*4882a593Smuzhiyun #endif
3200*4882a593Smuzhiyun return 0;
3201*4882a593Smuzhiyun }
3202*4882a593Smuzhiyun EXPORT_SYMBOL(tcf_exts_dump_stats);
3203*4882a593Smuzhiyun
tcf_block_offload_inc(struct tcf_block * block,u32 * flags)3204*4882a593Smuzhiyun static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
3205*4882a593Smuzhiyun {
3206*4882a593Smuzhiyun if (*flags & TCA_CLS_FLAGS_IN_HW)
3207*4882a593Smuzhiyun return;
3208*4882a593Smuzhiyun *flags |= TCA_CLS_FLAGS_IN_HW;
3209*4882a593Smuzhiyun atomic_inc(&block->offloadcnt);
3210*4882a593Smuzhiyun }
3211*4882a593Smuzhiyun
tcf_block_offload_dec(struct tcf_block * block,u32 * flags)3212*4882a593Smuzhiyun static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
3213*4882a593Smuzhiyun {
3214*4882a593Smuzhiyun if (!(*flags & TCA_CLS_FLAGS_IN_HW))
3215*4882a593Smuzhiyun return;
3216*4882a593Smuzhiyun *flags &= ~TCA_CLS_FLAGS_IN_HW;
3217*4882a593Smuzhiyun atomic_dec(&block->offloadcnt);
3218*4882a593Smuzhiyun }
3219*4882a593Smuzhiyun
tc_cls_offload_cnt_update(struct tcf_block * block,struct tcf_proto * tp,u32 * cnt,u32 * flags,u32 diff,bool add)3220*4882a593Smuzhiyun static void tc_cls_offload_cnt_update(struct tcf_block *block,
3221*4882a593Smuzhiyun struct tcf_proto *tp, u32 *cnt,
3222*4882a593Smuzhiyun u32 *flags, u32 diff, bool add)
3223*4882a593Smuzhiyun {
3224*4882a593Smuzhiyun lockdep_assert_held(&block->cb_lock);
3225*4882a593Smuzhiyun
3226*4882a593Smuzhiyun spin_lock(&tp->lock);
3227*4882a593Smuzhiyun if (add) {
3228*4882a593Smuzhiyun if (!*cnt)
3229*4882a593Smuzhiyun tcf_block_offload_inc(block, flags);
3230*4882a593Smuzhiyun *cnt += diff;
3231*4882a593Smuzhiyun } else {
3232*4882a593Smuzhiyun *cnt -= diff;
3233*4882a593Smuzhiyun if (!*cnt)
3234*4882a593Smuzhiyun tcf_block_offload_dec(block, flags);
3235*4882a593Smuzhiyun }
3236*4882a593Smuzhiyun spin_unlock(&tp->lock);
3237*4882a593Smuzhiyun }
3238*4882a593Smuzhiyun
3239*4882a593Smuzhiyun static void
tc_cls_offload_cnt_reset(struct tcf_block * block,struct tcf_proto * tp,u32 * cnt,u32 * flags)3240*4882a593Smuzhiyun tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp,
3241*4882a593Smuzhiyun u32 *cnt, u32 *flags)
3242*4882a593Smuzhiyun {
3243*4882a593Smuzhiyun lockdep_assert_held(&block->cb_lock);
3244*4882a593Smuzhiyun
3245*4882a593Smuzhiyun spin_lock(&tp->lock);
3246*4882a593Smuzhiyun tcf_block_offload_dec(block, flags);
3247*4882a593Smuzhiyun *cnt = 0;
3248*4882a593Smuzhiyun spin_unlock(&tp->lock);
3249*4882a593Smuzhiyun }
3250*4882a593Smuzhiyun
3251*4882a593Smuzhiyun static int
__tc_setup_cb_call(struct tcf_block * block,enum tc_setup_type type,void * type_data,bool err_stop)3252*4882a593Smuzhiyun __tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3253*4882a593Smuzhiyun void *type_data, bool err_stop)
3254*4882a593Smuzhiyun {
3255*4882a593Smuzhiyun struct flow_block_cb *block_cb;
3256*4882a593Smuzhiyun int ok_count = 0;
3257*4882a593Smuzhiyun int err;
3258*4882a593Smuzhiyun
3259*4882a593Smuzhiyun list_for_each_entry(block_cb, &block->flow_block.cb_list, list) {
3260*4882a593Smuzhiyun err = block_cb->cb(type, type_data, block_cb->cb_priv);
3261*4882a593Smuzhiyun if (err) {
3262*4882a593Smuzhiyun if (err_stop)
3263*4882a593Smuzhiyun return err;
3264*4882a593Smuzhiyun } else {
3265*4882a593Smuzhiyun ok_count++;
3266*4882a593Smuzhiyun }
3267*4882a593Smuzhiyun }
3268*4882a593Smuzhiyun return ok_count;
3269*4882a593Smuzhiyun }
3270*4882a593Smuzhiyun
tc_setup_cb_call(struct tcf_block * block,enum tc_setup_type type,void * type_data,bool err_stop,bool rtnl_held)3271*4882a593Smuzhiyun int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3272*4882a593Smuzhiyun void *type_data, bool err_stop, bool rtnl_held)
3273*4882a593Smuzhiyun {
3274*4882a593Smuzhiyun bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3275*4882a593Smuzhiyun int ok_count;
3276*4882a593Smuzhiyun
3277*4882a593Smuzhiyun retry:
3278*4882a593Smuzhiyun if (take_rtnl)
3279*4882a593Smuzhiyun rtnl_lock();
3280*4882a593Smuzhiyun down_read(&block->cb_lock);
3281*4882a593Smuzhiyun /* Need to obtain rtnl lock if block is bound to devs that require it.
3282*4882a593Smuzhiyun * In block bind code cb_lock is obtained while holding rtnl, so we must
3283*4882a593Smuzhiyun * obtain the locks in same order here.
3284*4882a593Smuzhiyun */
3285*4882a593Smuzhiyun if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3286*4882a593Smuzhiyun up_read(&block->cb_lock);
3287*4882a593Smuzhiyun take_rtnl = true;
3288*4882a593Smuzhiyun goto retry;
3289*4882a593Smuzhiyun }
3290*4882a593Smuzhiyun
3291*4882a593Smuzhiyun ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3292*4882a593Smuzhiyun
3293*4882a593Smuzhiyun up_read(&block->cb_lock);
3294*4882a593Smuzhiyun if (take_rtnl)
3295*4882a593Smuzhiyun rtnl_unlock();
3296*4882a593Smuzhiyun return ok_count;
3297*4882a593Smuzhiyun }
3298*4882a593Smuzhiyun EXPORT_SYMBOL(tc_setup_cb_call);
3299*4882a593Smuzhiyun
3300*4882a593Smuzhiyun /* Non-destructive filter add. If filter that wasn't already in hardware is
3301*4882a593Smuzhiyun * successfully offloaded, increment block offloads counter. On failure,
3302*4882a593Smuzhiyun * previously offloaded filter is considered to be intact and offloads counter
3303*4882a593Smuzhiyun * is not decremented.
3304*4882a593Smuzhiyun */
3305*4882a593Smuzhiyun
tc_setup_cb_add(struct tcf_block * block,struct tcf_proto * tp,enum tc_setup_type type,void * type_data,bool err_stop,u32 * flags,unsigned int * in_hw_count,bool rtnl_held)3306*4882a593Smuzhiyun int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
3307*4882a593Smuzhiyun enum tc_setup_type type, void *type_data, bool err_stop,
3308*4882a593Smuzhiyun u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3309*4882a593Smuzhiyun {
3310*4882a593Smuzhiyun bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3311*4882a593Smuzhiyun int ok_count;
3312*4882a593Smuzhiyun
3313*4882a593Smuzhiyun retry:
3314*4882a593Smuzhiyun if (take_rtnl)
3315*4882a593Smuzhiyun rtnl_lock();
3316*4882a593Smuzhiyun down_read(&block->cb_lock);
3317*4882a593Smuzhiyun /* Need to obtain rtnl lock if block is bound to devs that require it.
3318*4882a593Smuzhiyun * In block bind code cb_lock is obtained while holding rtnl, so we must
3319*4882a593Smuzhiyun * obtain the locks in same order here.
3320*4882a593Smuzhiyun */
3321*4882a593Smuzhiyun if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3322*4882a593Smuzhiyun up_read(&block->cb_lock);
3323*4882a593Smuzhiyun take_rtnl = true;
3324*4882a593Smuzhiyun goto retry;
3325*4882a593Smuzhiyun }
3326*4882a593Smuzhiyun
3327*4882a593Smuzhiyun /* Make sure all netdevs sharing this block are offload-capable. */
3328*4882a593Smuzhiyun if (block->nooffloaddevcnt && err_stop) {
3329*4882a593Smuzhiyun ok_count = -EOPNOTSUPP;
3330*4882a593Smuzhiyun goto err_unlock;
3331*4882a593Smuzhiyun }
3332*4882a593Smuzhiyun
3333*4882a593Smuzhiyun ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3334*4882a593Smuzhiyun if (ok_count < 0)
3335*4882a593Smuzhiyun goto err_unlock;
3336*4882a593Smuzhiyun
3337*4882a593Smuzhiyun if (tp->ops->hw_add)
3338*4882a593Smuzhiyun tp->ops->hw_add(tp, type_data);
3339*4882a593Smuzhiyun if (ok_count > 0)
3340*4882a593Smuzhiyun tc_cls_offload_cnt_update(block, tp, in_hw_count, flags,
3341*4882a593Smuzhiyun ok_count, true);
3342*4882a593Smuzhiyun err_unlock:
3343*4882a593Smuzhiyun up_read(&block->cb_lock);
3344*4882a593Smuzhiyun if (take_rtnl)
3345*4882a593Smuzhiyun rtnl_unlock();
3346*4882a593Smuzhiyun return ok_count < 0 ? ok_count : 0;
3347*4882a593Smuzhiyun }
3348*4882a593Smuzhiyun EXPORT_SYMBOL(tc_setup_cb_add);
3349*4882a593Smuzhiyun
3350*4882a593Smuzhiyun /* Destructive filter replace. If filter that wasn't already in hardware is
3351*4882a593Smuzhiyun * successfully offloaded, increment block offload counter. On failure,
3352*4882a593Smuzhiyun * previously offloaded filter is considered to be destroyed and offload counter
3353*4882a593Smuzhiyun * is decremented.
3354*4882a593Smuzhiyun */
3355*4882a593Smuzhiyun
tc_setup_cb_replace(struct tcf_block * block,struct tcf_proto * tp,enum tc_setup_type type,void * type_data,bool err_stop,u32 * old_flags,unsigned int * old_in_hw_count,u32 * new_flags,unsigned int * new_in_hw_count,bool rtnl_held)3356*4882a593Smuzhiyun int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
3357*4882a593Smuzhiyun enum tc_setup_type type, void *type_data, bool err_stop,
3358*4882a593Smuzhiyun u32 *old_flags, unsigned int *old_in_hw_count,
3359*4882a593Smuzhiyun u32 *new_flags, unsigned int *new_in_hw_count,
3360*4882a593Smuzhiyun bool rtnl_held)
3361*4882a593Smuzhiyun {
3362*4882a593Smuzhiyun bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3363*4882a593Smuzhiyun int ok_count;
3364*4882a593Smuzhiyun
3365*4882a593Smuzhiyun retry:
3366*4882a593Smuzhiyun if (take_rtnl)
3367*4882a593Smuzhiyun rtnl_lock();
3368*4882a593Smuzhiyun down_read(&block->cb_lock);
3369*4882a593Smuzhiyun /* Need to obtain rtnl lock if block is bound to devs that require it.
3370*4882a593Smuzhiyun * In block bind code cb_lock is obtained while holding rtnl, so we must
3371*4882a593Smuzhiyun * obtain the locks in same order here.
3372*4882a593Smuzhiyun */
3373*4882a593Smuzhiyun if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3374*4882a593Smuzhiyun up_read(&block->cb_lock);
3375*4882a593Smuzhiyun take_rtnl = true;
3376*4882a593Smuzhiyun goto retry;
3377*4882a593Smuzhiyun }
3378*4882a593Smuzhiyun
3379*4882a593Smuzhiyun /* Make sure all netdevs sharing this block are offload-capable. */
3380*4882a593Smuzhiyun if (block->nooffloaddevcnt && err_stop) {
3381*4882a593Smuzhiyun ok_count = -EOPNOTSUPP;
3382*4882a593Smuzhiyun goto err_unlock;
3383*4882a593Smuzhiyun }
3384*4882a593Smuzhiyun
3385*4882a593Smuzhiyun tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags);
3386*4882a593Smuzhiyun if (tp->ops->hw_del)
3387*4882a593Smuzhiyun tp->ops->hw_del(tp, type_data);
3388*4882a593Smuzhiyun
3389*4882a593Smuzhiyun ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3390*4882a593Smuzhiyun if (ok_count < 0)
3391*4882a593Smuzhiyun goto err_unlock;
3392*4882a593Smuzhiyun
3393*4882a593Smuzhiyun if (tp->ops->hw_add)
3394*4882a593Smuzhiyun tp->ops->hw_add(tp, type_data);
3395*4882a593Smuzhiyun if (ok_count > 0)
3396*4882a593Smuzhiyun tc_cls_offload_cnt_update(block, tp, new_in_hw_count,
3397*4882a593Smuzhiyun new_flags, ok_count, true);
3398*4882a593Smuzhiyun err_unlock:
3399*4882a593Smuzhiyun up_read(&block->cb_lock);
3400*4882a593Smuzhiyun if (take_rtnl)
3401*4882a593Smuzhiyun rtnl_unlock();
3402*4882a593Smuzhiyun return ok_count < 0 ? ok_count : 0;
3403*4882a593Smuzhiyun }
3404*4882a593Smuzhiyun EXPORT_SYMBOL(tc_setup_cb_replace);
3405*4882a593Smuzhiyun
3406*4882a593Smuzhiyun /* Destroy filter and decrement block offload counter, if filter was previously
3407*4882a593Smuzhiyun * offloaded.
3408*4882a593Smuzhiyun */
3409*4882a593Smuzhiyun
tc_setup_cb_destroy(struct tcf_block * block,struct tcf_proto * tp,enum tc_setup_type type,void * type_data,bool err_stop,u32 * flags,unsigned int * in_hw_count,bool rtnl_held)3410*4882a593Smuzhiyun int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
3411*4882a593Smuzhiyun enum tc_setup_type type, void *type_data, bool err_stop,
3412*4882a593Smuzhiyun u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3413*4882a593Smuzhiyun {
3414*4882a593Smuzhiyun bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3415*4882a593Smuzhiyun int ok_count;
3416*4882a593Smuzhiyun
3417*4882a593Smuzhiyun retry:
3418*4882a593Smuzhiyun if (take_rtnl)
3419*4882a593Smuzhiyun rtnl_lock();
3420*4882a593Smuzhiyun down_read(&block->cb_lock);
3421*4882a593Smuzhiyun /* Need to obtain rtnl lock if block is bound to devs that require it.
3422*4882a593Smuzhiyun * In block bind code cb_lock is obtained while holding rtnl, so we must
3423*4882a593Smuzhiyun * obtain the locks in same order here.
3424*4882a593Smuzhiyun */
3425*4882a593Smuzhiyun if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3426*4882a593Smuzhiyun up_read(&block->cb_lock);
3427*4882a593Smuzhiyun take_rtnl = true;
3428*4882a593Smuzhiyun goto retry;
3429*4882a593Smuzhiyun }
3430*4882a593Smuzhiyun
3431*4882a593Smuzhiyun ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3432*4882a593Smuzhiyun
3433*4882a593Smuzhiyun tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags);
3434*4882a593Smuzhiyun if (tp->ops->hw_del)
3435*4882a593Smuzhiyun tp->ops->hw_del(tp, type_data);
3436*4882a593Smuzhiyun
3437*4882a593Smuzhiyun up_read(&block->cb_lock);
3438*4882a593Smuzhiyun if (take_rtnl)
3439*4882a593Smuzhiyun rtnl_unlock();
3440*4882a593Smuzhiyun return ok_count < 0 ? ok_count : 0;
3441*4882a593Smuzhiyun }
3442*4882a593Smuzhiyun EXPORT_SYMBOL(tc_setup_cb_destroy);
3443*4882a593Smuzhiyun
tc_setup_cb_reoffload(struct tcf_block * block,struct tcf_proto * tp,bool add,flow_setup_cb_t * cb,enum tc_setup_type type,void * type_data,void * cb_priv,u32 * flags,unsigned int * in_hw_count)3444*4882a593Smuzhiyun int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
3445*4882a593Smuzhiyun bool add, flow_setup_cb_t *cb,
3446*4882a593Smuzhiyun enum tc_setup_type type, void *type_data,
3447*4882a593Smuzhiyun void *cb_priv, u32 *flags, unsigned int *in_hw_count)
3448*4882a593Smuzhiyun {
3449*4882a593Smuzhiyun int err = cb(type, type_data, cb_priv);
3450*4882a593Smuzhiyun
3451*4882a593Smuzhiyun if (err) {
3452*4882a593Smuzhiyun if (add && tc_skip_sw(*flags))
3453*4882a593Smuzhiyun return err;
3454*4882a593Smuzhiyun } else {
3455*4882a593Smuzhiyun tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1,
3456*4882a593Smuzhiyun add);
3457*4882a593Smuzhiyun }
3458*4882a593Smuzhiyun
3459*4882a593Smuzhiyun return 0;
3460*4882a593Smuzhiyun }
3461*4882a593Smuzhiyun EXPORT_SYMBOL(tc_setup_cb_reoffload);
3462*4882a593Smuzhiyun
tcf_act_get_cookie(struct flow_action_entry * entry,const struct tc_action * act)3463*4882a593Smuzhiyun static int tcf_act_get_cookie(struct flow_action_entry *entry,
3464*4882a593Smuzhiyun const struct tc_action *act)
3465*4882a593Smuzhiyun {
3466*4882a593Smuzhiyun struct tc_cookie *cookie;
3467*4882a593Smuzhiyun int err = 0;
3468*4882a593Smuzhiyun
3469*4882a593Smuzhiyun rcu_read_lock();
3470*4882a593Smuzhiyun cookie = rcu_dereference(act->act_cookie);
3471*4882a593Smuzhiyun if (cookie) {
3472*4882a593Smuzhiyun entry->cookie = flow_action_cookie_create(cookie->data,
3473*4882a593Smuzhiyun cookie->len,
3474*4882a593Smuzhiyun GFP_ATOMIC);
3475*4882a593Smuzhiyun if (!entry->cookie)
3476*4882a593Smuzhiyun err = -ENOMEM;
3477*4882a593Smuzhiyun }
3478*4882a593Smuzhiyun rcu_read_unlock();
3479*4882a593Smuzhiyun return err;
3480*4882a593Smuzhiyun }
3481*4882a593Smuzhiyun
tcf_act_put_cookie(struct flow_action_entry * entry)3482*4882a593Smuzhiyun static void tcf_act_put_cookie(struct flow_action_entry *entry)
3483*4882a593Smuzhiyun {
3484*4882a593Smuzhiyun flow_action_cookie_destroy(entry->cookie);
3485*4882a593Smuzhiyun }
3486*4882a593Smuzhiyun
tc_cleanup_flow_action(struct flow_action * flow_action)3487*4882a593Smuzhiyun void tc_cleanup_flow_action(struct flow_action *flow_action)
3488*4882a593Smuzhiyun {
3489*4882a593Smuzhiyun struct flow_action_entry *entry;
3490*4882a593Smuzhiyun int i;
3491*4882a593Smuzhiyun
3492*4882a593Smuzhiyun flow_action_for_each(i, entry, flow_action) {
3493*4882a593Smuzhiyun tcf_act_put_cookie(entry);
3494*4882a593Smuzhiyun if (entry->destructor)
3495*4882a593Smuzhiyun entry->destructor(entry->destructor_priv);
3496*4882a593Smuzhiyun }
3497*4882a593Smuzhiyun }
3498*4882a593Smuzhiyun EXPORT_SYMBOL(tc_cleanup_flow_action);
3499*4882a593Smuzhiyun
tcf_mirred_get_dev(struct flow_action_entry * entry,const struct tc_action * act)3500*4882a593Smuzhiyun static void tcf_mirred_get_dev(struct flow_action_entry *entry,
3501*4882a593Smuzhiyun const struct tc_action *act)
3502*4882a593Smuzhiyun {
3503*4882a593Smuzhiyun #ifdef CONFIG_NET_CLS_ACT
3504*4882a593Smuzhiyun entry->dev = act->ops->get_dev(act, &entry->destructor);
3505*4882a593Smuzhiyun if (!entry->dev)
3506*4882a593Smuzhiyun return;
3507*4882a593Smuzhiyun entry->destructor_priv = entry->dev;
3508*4882a593Smuzhiyun #endif
3509*4882a593Smuzhiyun }
3510*4882a593Smuzhiyun
tcf_tunnel_encap_put_tunnel(void * priv)3511*4882a593Smuzhiyun static void tcf_tunnel_encap_put_tunnel(void *priv)
3512*4882a593Smuzhiyun {
3513*4882a593Smuzhiyun struct ip_tunnel_info *tunnel = priv;
3514*4882a593Smuzhiyun
3515*4882a593Smuzhiyun kfree(tunnel);
3516*4882a593Smuzhiyun }
3517*4882a593Smuzhiyun
tcf_tunnel_encap_get_tunnel(struct flow_action_entry * entry,const struct tc_action * act)3518*4882a593Smuzhiyun static int tcf_tunnel_encap_get_tunnel(struct flow_action_entry *entry,
3519*4882a593Smuzhiyun const struct tc_action *act)
3520*4882a593Smuzhiyun {
3521*4882a593Smuzhiyun entry->tunnel = tcf_tunnel_info_copy(act);
3522*4882a593Smuzhiyun if (!entry->tunnel)
3523*4882a593Smuzhiyun return -ENOMEM;
3524*4882a593Smuzhiyun entry->destructor = tcf_tunnel_encap_put_tunnel;
3525*4882a593Smuzhiyun entry->destructor_priv = entry->tunnel;
3526*4882a593Smuzhiyun return 0;
3527*4882a593Smuzhiyun }
3528*4882a593Smuzhiyun
tcf_sample_get_group(struct flow_action_entry * entry,const struct tc_action * act)3529*4882a593Smuzhiyun static void tcf_sample_get_group(struct flow_action_entry *entry,
3530*4882a593Smuzhiyun const struct tc_action *act)
3531*4882a593Smuzhiyun {
3532*4882a593Smuzhiyun #ifdef CONFIG_NET_CLS_ACT
3533*4882a593Smuzhiyun entry->sample.psample_group =
3534*4882a593Smuzhiyun act->ops->get_psample_group(act, &entry->destructor);
3535*4882a593Smuzhiyun entry->destructor_priv = entry->sample.psample_group;
3536*4882a593Smuzhiyun #endif
3537*4882a593Smuzhiyun }
3538*4882a593Smuzhiyun
tcf_gate_entry_destructor(void * priv)3539*4882a593Smuzhiyun static void tcf_gate_entry_destructor(void *priv)
3540*4882a593Smuzhiyun {
3541*4882a593Smuzhiyun struct action_gate_entry *oe = priv;
3542*4882a593Smuzhiyun
3543*4882a593Smuzhiyun kfree(oe);
3544*4882a593Smuzhiyun }
3545*4882a593Smuzhiyun
tcf_gate_get_entries(struct flow_action_entry * entry,const struct tc_action * act)3546*4882a593Smuzhiyun static int tcf_gate_get_entries(struct flow_action_entry *entry,
3547*4882a593Smuzhiyun const struct tc_action *act)
3548*4882a593Smuzhiyun {
3549*4882a593Smuzhiyun entry->gate.entries = tcf_gate_get_list(act);
3550*4882a593Smuzhiyun
3551*4882a593Smuzhiyun if (!entry->gate.entries)
3552*4882a593Smuzhiyun return -EINVAL;
3553*4882a593Smuzhiyun
3554*4882a593Smuzhiyun entry->destructor = tcf_gate_entry_destructor;
3555*4882a593Smuzhiyun entry->destructor_priv = entry->gate.entries;
3556*4882a593Smuzhiyun
3557*4882a593Smuzhiyun return 0;
3558*4882a593Smuzhiyun }
3559*4882a593Smuzhiyun
tc_act_hw_stats(u8 hw_stats)3560*4882a593Smuzhiyun static enum flow_action_hw_stats tc_act_hw_stats(u8 hw_stats)
3561*4882a593Smuzhiyun {
3562*4882a593Smuzhiyun if (WARN_ON_ONCE(hw_stats > TCA_ACT_HW_STATS_ANY))
3563*4882a593Smuzhiyun return FLOW_ACTION_HW_STATS_DONT_CARE;
3564*4882a593Smuzhiyun else if (!hw_stats)
3565*4882a593Smuzhiyun return FLOW_ACTION_HW_STATS_DISABLED;
3566*4882a593Smuzhiyun
3567*4882a593Smuzhiyun return hw_stats;
3568*4882a593Smuzhiyun }
3569*4882a593Smuzhiyun
tc_setup_flow_action(struct flow_action * flow_action,const struct tcf_exts * exts)3570*4882a593Smuzhiyun int tc_setup_flow_action(struct flow_action *flow_action,
3571*4882a593Smuzhiyun const struct tcf_exts *exts)
3572*4882a593Smuzhiyun {
3573*4882a593Smuzhiyun struct tc_action *act;
3574*4882a593Smuzhiyun int i, j, k, err = 0;
3575*4882a593Smuzhiyun
3576*4882a593Smuzhiyun BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY);
3577*4882a593Smuzhiyun BUILD_BUG_ON(TCA_ACT_HW_STATS_IMMEDIATE != FLOW_ACTION_HW_STATS_IMMEDIATE);
3578*4882a593Smuzhiyun BUILD_BUG_ON(TCA_ACT_HW_STATS_DELAYED != FLOW_ACTION_HW_STATS_DELAYED);
3579*4882a593Smuzhiyun
3580*4882a593Smuzhiyun if (!exts)
3581*4882a593Smuzhiyun return 0;
3582*4882a593Smuzhiyun
3583*4882a593Smuzhiyun j = 0;
3584*4882a593Smuzhiyun tcf_exts_for_each_action(i, act, exts) {
3585*4882a593Smuzhiyun struct flow_action_entry *entry;
3586*4882a593Smuzhiyun
3587*4882a593Smuzhiyun entry = &flow_action->entries[j];
3588*4882a593Smuzhiyun spin_lock_bh(&act->tcfa_lock);
3589*4882a593Smuzhiyun err = tcf_act_get_cookie(entry, act);
3590*4882a593Smuzhiyun if (err)
3591*4882a593Smuzhiyun goto err_out_locked;
3592*4882a593Smuzhiyun
3593*4882a593Smuzhiyun entry->hw_stats = tc_act_hw_stats(act->hw_stats);
3594*4882a593Smuzhiyun
3595*4882a593Smuzhiyun if (is_tcf_gact_ok(act)) {
3596*4882a593Smuzhiyun entry->id = FLOW_ACTION_ACCEPT;
3597*4882a593Smuzhiyun } else if (is_tcf_gact_shot(act)) {
3598*4882a593Smuzhiyun entry->id = FLOW_ACTION_DROP;
3599*4882a593Smuzhiyun } else if (is_tcf_gact_trap(act)) {
3600*4882a593Smuzhiyun entry->id = FLOW_ACTION_TRAP;
3601*4882a593Smuzhiyun } else if (is_tcf_gact_goto_chain(act)) {
3602*4882a593Smuzhiyun entry->id = FLOW_ACTION_GOTO;
3603*4882a593Smuzhiyun entry->chain_index = tcf_gact_goto_chain_index(act);
3604*4882a593Smuzhiyun } else if (is_tcf_mirred_egress_redirect(act)) {
3605*4882a593Smuzhiyun entry->id = FLOW_ACTION_REDIRECT;
3606*4882a593Smuzhiyun tcf_mirred_get_dev(entry, act);
3607*4882a593Smuzhiyun } else if (is_tcf_mirred_egress_mirror(act)) {
3608*4882a593Smuzhiyun entry->id = FLOW_ACTION_MIRRED;
3609*4882a593Smuzhiyun tcf_mirred_get_dev(entry, act);
3610*4882a593Smuzhiyun } else if (is_tcf_mirred_ingress_redirect(act)) {
3611*4882a593Smuzhiyun entry->id = FLOW_ACTION_REDIRECT_INGRESS;
3612*4882a593Smuzhiyun tcf_mirred_get_dev(entry, act);
3613*4882a593Smuzhiyun } else if (is_tcf_mirred_ingress_mirror(act)) {
3614*4882a593Smuzhiyun entry->id = FLOW_ACTION_MIRRED_INGRESS;
3615*4882a593Smuzhiyun tcf_mirred_get_dev(entry, act);
3616*4882a593Smuzhiyun } else if (is_tcf_vlan(act)) {
3617*4882a593Smuzhiyun switch (tcf_vlan_action(act)) {
3618*4882a593Smuzhiyun case TCA_VLAN_ACT_PUSH:
3619*4882a593Smuzhiyun entry->id = FLOW_ACTION_VLAN_PUSH;
3620*4882a593Smuzhiyun entry->vlan.vid = tcf_vlan_push_vid(act);
3621*4882a593Smuzhiyun entry->vlan.proto = tcf_vlan_push_proto(act);
3622*4882a593Smuzhiyun entry->vlan.prio = tcf_vlan_push_prio(act);
3623*4882a593Smuzhiyun break;
3624*4882a593Smuzhiyun case TCA_VLAN_ACT_POP:
3625*4882a593Smuzhiyun entry->id = FLOW_ACTION_VLAN_POP;
3626*4882a593Smuzhiyun break;
3627*4882a593Smuzhiyun case TCA_VLAN_ACT_MODIFY:
3628*4882a593Smuzhiyun entry->id = FLOW_ACTION_VLAN_MANGLE;
3629*4882a593Smuzhiyun entry->vlan.vid = tcf_vlan_push_vid(act);
3630*4882a593Smuzhiyun entry->vlan.proto = tcf_vlan_push_proto(act);
3631*4882a593Smuzhiyun entry->vlan.prio = tcf_vlan_push_prio(act);
3632*4882a593Smuzhiyun break;
3633*4882a593Smuzhiyun default:
3634*4882a593Smuzhiyun err = -EOPNOTSUPP;
3635*4882a593Smuzhiyun goto err_out_locked;
3636*4882a593Smuzhiyun }
3637*4882a593Smuzhiyun } else if (is_tcf_tunnel_set(act)) {
3638*4882a593Smuzhiyun entry->id = FLOW_ACTION_TUNNEL_ENCAP;
3639*4882a593Smuzhiyun err = tcf_tunnel_encap_get_tunnel(entry, act);
3640*4882a593Smuzhiyun if (err)
3641*4882a593Smuzhiyun goto err_out_locked;
3642*4882a593Smuzhiyun } else if (is_tcf_tunnel_release(act)) {
3643*4882a593Smuzhiyun entry->id = FLOW_ACTION_TUNNEL_DECAP;
3644*4882a593Smuzhiyun } else if (is_tcf_pedit(act)) {
3645*4882a593Smuzhiyun for (k = 0; k < tcf_pedit_nkeys(act); k++) {
3646*4882a593Smuzhiyun switch (tcf_pedit_cmd(act, k)) {
3647*4882a593Smuzhiyun case TCA_PEDIT_KEY_EX_CMD_SET:
3648*4882a593Smuzhiyun entry->id = FLOW_ACTION_MANGLE;
3649*4882a593Smuzhiyun break;
3650*4882a593Smuzhiyun case TCA_PEDIT_KEY_EX_CMD_ADD:
3651*4882a593Smuzhiyun entry->id = FLOW_ACTION_ADD;
3652*4882a593Smuzhiyun break;
3653*4882a593Smuzhiyun default:
3654*4882a593Smuzhiyun err = -EOPNOTSUPP;
3655*4882a593Smuzhiyun goto err_out_locked;
3656*4882a593Smuzhiyun }
3657*4882a593Smuzhiyun entry->mangle.htype = tcf_pedit_htype(act, k);
3658*4882a593Smuzhiyun entry->mangle.mask = tcf_pedit_mask(act, k);
3659*4882a593Smuzhiyun entry->mangle.val = tcf_pedit_val(act, k);
3660*4882a593Smuzhiyun entry->mangle.offset = tcf_pedit_offset(act, k);
3661*4882a593Smuzhiyun entry->hw_stats = tc_act_hw_stats(act->hw_stats);
3662*4882a593Smuzhiyun entry = &flow_action->entries[++j];
3663*4882a593Smuzhiyun }
3664*4882a593Smuzhiyun } else if (is_tcf_csum(act)) {
3665*4882a593Smuzhiyun entry->id = FLOW_ACTION_CSUM;
3666*4882a593Smuzhiyun entry->csum_flags = tcf_csum_update_flags(act);
3667*4882a593Smuzhiyun } else if (is_tcf_skbedit_mark(act)) {
3668*4882a593Smuzhiyun entry->id = FLOW_ACTION_MARK;
3669*4882a593Smuzhiyun entry->mark = tcf_skbedit_mark(act);
3670*4882a593Smuzhiyun } else if (is_tcf_sample(act)) {
3671*4882a593Smuzhiyun entry->id = FLOW_ACTION_SAMPLE;
3672*4882a593Smuzhiyun entry->sample.trunc_size = tcf_sample_trunc_size(act);
3673*4882a593Smuzhiyun entry->sample.truncate = tcf_sample_truncate(act);
3674*4882a593Smuzhiyun entry->sample.rate = tcf_sample_rate(act);
3675*4882a593Smuzhiyun tcf_sample_get_group(entry, act);
3676*4882a593Smuzhiyun } else if (is_tcf_police(act)) {
3677*4882a593Smuzhiyun entry->id = FLOW_ACTION_POLICE;
3678*4882a593Smuzhiyun entry->police.burst = tcf_police_burst(act);
3679*4882a593Smuzhiyun entry->police.rate_bytes_ps =
3680*4882a593Smuzhiyun tcf_police_rate_bytes_ps(act);
3681*4882a593Smuzhiyun entry->police.mtu = tcf_police_tcfp_mtu(act);
3682*4882a593Smuzhiyun entry->police.index = act->tcfa_index;
3683*4882a593Smuzhiyun } else if (is_tcf_ct(act)) {
3684*4882a593Smuzhiyun entry->id = FLOW_ACTION_CT;
3685*4882a593Smuzhiyun entry->ct.action = tcf_ct_action(act);
3686*4882a593Smuzhiyun entry->ct.zone = tcf_ct_zone(act);
3687*4882a593Smuzhiyun entry->ct.flow_table = tcf_ct_ft(act);
3688*4882a593Smuzhiyun } else if (is_tcf_mpls(act)) {
3689*4882a593Smuzhiyun switch (tcf_mpls_action(act)) {
3690*4882a593Smuzhiyun case TCA_MPLS_ACT_PUSH:
3691*4882a593Smuzhiyun entry->id = FLOW_ACTION_MPLS_PUSH;
3692*4882a593Smuzhiyun entry->mpls_push.proto = tcf_mpls_proto(act);
3693*4882a593Smuzhiyun entry->mpls_push.label = tcf_mpls_label(act);
3694*4882a593Smuzhiyun entry->mpls_push.tc = tcf_mpls_tc(act);
3695*4882a593Smuzhiyun entry->mpls_push.bos = tcf_mpls_bos(act);
3696*4882a593Smuzhiyun entry->mpls_push.ttl = tcf_mpls_ttl(act);
3697*4882a593Smuzhiyun break;
3698*4882a593Smuzhiyun case TCA_MPLS_ACT_POP:
3699*4882a593Smuzhiyun entry->id = FLOW_ACTION_MPLS_POP;
3700*4882a593Smuzhiyun entry->mpls_pop.proto = tcf_mpls_proto(act);
3701*4882a593Smuzhiyun break;
3702*4882a593Smuzhiyun case TCA_MPLS_ACT_MODIFY:
3703*4882a593Smuzhiyun entry->id = FLOW_ACTION_MPLS_MANGLE;
3704*4882a593Smuzhiyun entry->mpls_mangle.label = tcf_mpls_label(act);
3705*4882a593Smuzhiyun entry->mpls_mangle.tc = tcf_mpls_tc(act);
3706*4882a593Smuzhiyun entry->mpls_mangle.bos = tcf_mpls_bos(act);
3707*4882a593Smuzhiyun entry->mpls_mangle.ttl = tcf_mpls_ttl(act);
3708*4882a593Smuzhiyun break;
3709*4882a593Smuzhiyun default:
3710*4882a593Smuzhiyun err = -EOPNOTSUPP;
3711*4882a593Smuzhiyun goto err_out_locked;
3712*4882a593Smuzhiyun }
3713*4882a593Smuzhiyun } else if (is_tcf_skbedit_ptype(act)) {
3714*4882a593Smuzhiyun entry->id = FLOW_ACTION_PTYPE;
3715*4882a593Smuzhiyun entry->ptype = tcf_skbedit_ptype(act);
3716*4882a593Smuzhiyun } else if (is_tcf_skbedit_priority(act)) {
3717*4882a593Smuzhiyun entry->id = FLOW_ACTION_PRIORITY;
3718*4882a593Smuzhiyun entry->priority = tcf_skbedit_priority(act);
3719*4882a593Smuzhiyun } else if (is_tcf_gate(act)) {
3720*4882a593Smuzhiyun entry->id = FLOW_ACTION_GATE;
3721*4882a593Smuzhiyun entry->gate.index = tcf_gate_index(act);
3722*4882a593Smuzhiyun entry->gate.prio = tcf_gate_prio(act);
3723*4882a593Smuzhiyun entry->gate.basetime = tcf_gate_basetime(act);
3724*4882a593Smuzhiyun entry->gate.cycletime = tcf_gate_cycletime(act);
3725*4882a593Smuzhiyun entry->gate.cycletimeext = tcf_gate_cycletimeext(act);
3726*4882a593Smuzhiyun entry->gate.num_entries = tcf_gate_num_entries(act);
3727*4882a593Smuzhiyun err = tcf_gate_get_entries(entry, act);
3728*4882a593Smuzhiyun if (err)
3729*4882a593Smuzhiyun goto err_out_locked;
3730*4882a593Smuzhiyun } else {
3731*4882a593Smuzhiyun err = -EOPNOTSUPP;
3732*4882a593Smuzhiyun goto err_out_locked;
3733*4882a593Smuzhiyun }
3734*4882a593Smuzhiyun spin_unlock_bh(&act->tcfa_lock);
3735*4882a593Smuzhiyun
3736*4882a593Smuzhiyun if (!is_tcf_pedit(act))
3737*4882a593Smuzhiyun j++;
3738*4882a593Smuzhiyun }
3739*4882a593Smuzhiyun
3740*4882a593Smuzhiyun err_out:
3741*4882a593Smuzhiyun if (err)
3742*4882a593Smuzhiyun tc_cleanup_flow_action(flow_action);
3743*4882a593Smuzhiyun
3744*4882a593Smuzhiyun return err;
3745*4882a593Smuzhiyun err_out_locked:
3746*4882a593Smuzhiyun spin_unlock_bh(&act->tcfa_lock);
3747*4882a593Smuzhiyun goto err_out;
3748*4882a593Smuzhiyun }
3749*4882a593Smuzhiyun EXPORT_SYMBOL(tc_setup_flow_action);
3750*4882a593Smuzhiyun
tcf_exts_num_actions(struct tcf_exts * exts)3751*4882a593Smuzhiyun unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
3752*4882a593Smuzhiyun {
3753*4882a593Smuzhiyun unsigned int num_acts = 0;
3754*4882a593Smuzhiyun struct tc_action *act;
3755*4882a593Smuzhiyun int i;
3756*4882a593Smuzhiyun
3757*4882a593Smuzhiyun tcf_exts_for_each_action(i, act, exts) {
3758*4882a593Smuzhiyun if (is_tcf_pedit(act))
3759*4882a593Smuzhiyun num_acts += tcf_pedit_nkeys(act);
3760*4882a593Smuzhiyun else
3761*4882a593Smuzhiyun num_acts++;
3762*4882a593Smuzhiyun }
3763*4882a593Smuzhiyun return num_acts;
3764*4882a593Smuzhiyun }
3765*4882a593Smuzhiyun EXPORT_SYMBOL(tcf_exts_num_actions);
3766*4882a593Smuzhiyun
3767*4882a593Smuzhiyun #ifdef CONFIG_NET_CLS_ACT
tcf_qevent_parse_block_index(struct nlattr * block_index_attr,u32 * p_block_index,struct netlink_ext_ack * extack)3768*4882a593Smuzhiyun static int tcf_qevent_parse_block_index(struct nlattr *block_index_attr,
3769*4882a593Smuzhiyun u32 *p_block_index,
3770*4882a593Smuzhiyun struct netlink_ext_ack *extack)
3771*4882a593Smuzhiyun {
3772*4882a593Smuzhiyun *p_block_index = nla_get_u32(block_index_attr);
3773*4882a593Smuzhiyun if (!*p_block_index) {
3774*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Block number may not be zero");
3775*4882a593Smuzhiyun return -EINVAL;
3776*4882a593Smuzhiyun }
3777*4882a593Smuzhiyun
3778*4882a593Smuzhiyun return 0;
3779*4882a593Smuzhiyun }
3780*4882a593Smuzhiyun
tcf_qevent_init(struct tcf_qevent * qe,struct Qdisc * sch,enum flow_block_binder_type binder_type,struct nlattr * block_index_attr,struct netlink_ext_ack * extack)3781*4882a593Smuzhiyun int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
3782*4882a593Smuzhiyun enum flow_block_binder_type binder_type,
3783*4882a593Smuzhiyun struct nlattr *block_index_attr,
3784*4882a593Smuzhiyun struct netlink_ext_ack *extack)
3785*4882a593Smuzhiyun {
3786*4882a593Smuzhiyun u32 block_index;
3787*4882a593Smuzhiyun int err;
3788*4882a593Smuzhiyun
3789*4882a593Smuzhiyun if (!block_index_attr)
3790*4882a593Smuzhiyun return 0;
3791*4882a593Smuzhiyun
3792*4882a593Smuzhiyun err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3793*4882a593Smuzhiyun if (err)
3794*4882a593Smuzhiyun return err;
3795*4882a593Smuzhiyun
3796*4882a593Smuzhiyun if (!block_index)
3797*4882a593Smuzhiyun return 0;
3798*4882a593Smuzhiyun
3799*4882a593Smuzhiyun qe->info.binder_type = binder_type;
3800*4882a593Smuzhiyun qe->info.chain_head_change = tcf_chain_head_change_dflt;
3801*4882a593Smuzhiyun qe->info.chain_head_change_priv = &qe->filter_chain;
3802*4882a593Smuzhiyun qe->info.block_index = block_index;
3803*4882a593Smuzhiyun
3804*4882a593Smuzhiyun return tcf_block_get_ext(&qe->block, sch, &qe->info, extack);
3805*4882a593Smuzhiyun }
3806*4882a593Smuzhiyun EXPORT_SYMBOL(tcf_qevent_init);
3807*4882a593Smuzhiyun
tcf_qevent_destroy(struct tcf_qevent * qe,struct Qdisc * sch)3808*4882a593Smuzhiyun void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch)
3809*4882a593Smuzhiyun {
3810*4882a593Smuzhiyun if (qe->info.block_index)
3811*4882a593Smuzhiyun tcf_block_put_ext(qe->block, sch, &qe->info);
3812*4882a593Smuzhiyun }
3813*4882a593Smuzhiyun EXPORT_SYMBOL(tcf_qevent_destroy);
3814*4882a593Smuzhiyun
tcf_qevent_validate_change(struct tcf_qevent * qe,struct nlattr * block_index_attr,struct netlink_ext_ack * extack)3815*4882a593Smuzhiyun int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
3816*4882a593Smuzhiyun struct netlink_ext_ack *extack)
3817*4882a593Smuzhiyun {
3818*4882a593Smuzhiyun u32 block_index;
3819*4882a593Smuzhiyun int err;
3820*4882a593Smuzhiyun
3821*4882a593Smuzhiyun if (!block_index_attr)
3822*4882a593Smuzhiyun return 0;
3823*4882a593Smuzhiyun
3824*4882a593Smuzhiyun err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3825*4882a593Smuzhiyun if (err)
3826*4882a593Smuzhiyun return err;
3827*4882a593Smuzhiyun
3828*4882a593Smuzhiyun /* Bounce newly-configured block or change in block. */
3829*4882a593Smuzhiyun if (block_index != qe->info.block_index) {
3830*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
3831*4882a593Smuzhiyun return -EINVAL;
3832*4882a593Smuzhiyun }
3833*4882a593Smuzhiyun
3834*4882a593Smuzhiyun return 0;
3835*4882a593Smuzhiyun }
3836*4882a593Smuzhiyun EXPORT_SYMBOL(tcf_qevent_validate_change);
3837*4882a593Smuzhiyun
tcf_qevent_handle(struct tcf_qevent * qe,struct Qdisc * sch,struct sk_buff * skb,struct sk_buff ** to_free,int * ret)3838*4882a593Smuzhiyun struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
3839*4882a593Smuzhiyun struct sk_buff **to_free, int *ret)
3840*4882a593Smuzhiyun {
3841*4882a593Smuzhiyun struct tcf_result cl_res;
3842*4882a593Smuzhiyun struct tcf_proto *fl;
3843*4882a593Smuzhiyun
3844*4882a593Smuzhiyun if (!qe->info.block_index)
3845*4882a593Smuzhiyun return skb;
3846*4882a593Smuzhiyun
3847*4882a593Smuzhiyun fl = rcu_dereference_bh(qe->filter_chain);
3848*4882a593Smuzhiyun
3849*4882a593Smuzhiyun switch (tcf_classify(skb, fl, &cl_res, false)) {
3850*4882a593Smuzhiyun case TC_ACT_SHOT:
3851*4882a593Smuzhiyun qdisc_qstats_drop(sch);
3852*4882a593Smuzhiyun __qdisc_drop(skb, to_free);
3853*4882a593Smuzhiyun *ret = __NET_XMIT_BYPASS;
3854*4882a593Smuzhiyun return NULL;
3855*4882a593Smuzhiyun case TC_ACT_STOLEN:
3856*4882a593Smuzhiyun case TC_ACT_QUEUED:
3857*4882a593Smuzhiyun case TC_ACT_TRAP:
3858*4882a593Smuzhiyun __qdisc_drop(skb, to_free);
3859*4882a593Smuzhiyun *ret = __NET_XMIT_STOLEN;
3860*4882a593Smuzhiyun return NULL;
3861*4882a593Smuzhiyun case TC_ACT_REDIRECT:
3862*4882a593Smuzhiyun skb_do_redirect(skb);
3863*4882a593Smuzhiyun *ret = __NET_XMIT_STOLEN;
3864*4882a593Smuzhiyun return NULL;
3865*4882a593Smuzhiyun }
3866*4882a593Smuzhiyun
3867*4882a593Smuzhiyun return skb;
3868*4882a593Smuzhiyun }
3869*4882a593Smuzhiyun EXPORT_SYMBOL(tcf_qevent_handle);
3870*4882a593Smuzhiyun
tcf_qevent_dump(struct sk_buff * skb,int attr_name,struct tcf_qevent * qe)3871*4882a593Smuzhiyun int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe)
3872*4882a593Smuzhiyun {
3873*4882a593Smuzhiyun if (!qe->info.block_index)
3874*4882a593Smuzhiyun return 0;
3875*4882a593Smuzhiyun return nla_put_u32(skb, attr_name, qe->info.block_index);
3876*4882a593Smuzhiyun }
3877*4882a593Smuzhiyun EXPORT_SYMBOL(tcf_qevent_dump);
3878*4882a593Smuzhiyun #endif
3879*4882a593Smuzhiyun
tcf_net_init(struct net * net)3880*4882a593Smuzhiyun static __net_init int tcf_net_init(struct net *net)
3881*4882a593Smuzhiyun {
3882*4882a593Smuzhiyun struct tcf_net *tn = net_generic(net, tcf_net_id);
3883*4882a593Smuzhiyun
3884*4882a593Smuzhiyun spin_lock_init(&tn->idr_lock);
3885*4882a593Smuzhiyun idr_init(&tn->idr);
3886*4882a593Smuzhiyun return 0;
3887*4882a593Smuzhiyun }
3888*4882a593Smuzhiyun
tcf_net_exit(struct net * net)3889*4882a593Smuzhiyun static void __net_exit tcf_net_exit(struct net *net)
3890*4882a593Smuzhiyun {
3891*4882a593Smuzhiyun struct tcf_net *tn = net_generic(net, tcf_net_id);
3892*4882a593Smuzhiyun
3893*4882a593Smuzhiyun idr_destroy(&tn->idr);
3894*4882a593Smuzhiyun }
3895*4882a593Smuzhiyun
3896*4882a593Smuzhiyun static struct pernet_operations tcf_net_ops = {
3897*4882a593Smuzhiyun .init = tcf_net_init,
3898*4882a593Smuzhiyun .exit = tcf_net_exit,
3899*4882a593Smuzhiyun .id = &tcf_net_id,
3900*4882a593Smuzhiyun .size = sizeof(struct tcf_net),
3901*4882a593Smuzhiyun };
3902*4882a593Smuzhiyun
tc_filter_init(void)3903*4882a593Smuzhiyun static int __init tc_filter_init(void)
3904*4882a593Smuzhiyun {
3905*4882a593Smuzhiyun int err;
3906*4882a593Smuzhiyun
3907*4882a593Smuzhiyun tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
3908*4882a593Smuzhiyun if (!tc_filter_wq)
3909*4882a593Smuzhiyun return -ENOMEM;
3910*4882a593Smuzhiyun
3911*4882a593Smuzhiyun err = register_pernet_subsys(&tcf_net_ops);
3912*4882a593Smuzhiyun if (err)
3913*4882a593Smuzhiyun goto err_register_pernet_subsys;
3914*4882a593Smuzhiyun
3915*4882a593Smuzhiyun rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL,
3916*4882a593Smuzhiyun RTNL_FLAG_DOIT_UNLOCKED);
3917*4882a593Smuzhiyun rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL,
3918*4882a593Smuzhiyun RTNL_FLAG_DOIT_UNLOCKED);
3919*4882a593Smuzhiyun rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
3920*4882a593Smuzhiyun tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED);
3921*4882a593Smuzhiyun rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0);
3922*4882a593Smuzhiyun rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0);
3923*4882a593Smuzhiyun rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain,
3924*4882a593Smuzhiyun tc_dump_chain, 0);
3925*4882a593Smuzhiyun
3926*4882a593Smuzhiyun return 0;
3927*4882a593Smuzhiyun
3928*4882a593Smuzhiyun err_register_pernet_subsys:
3929*4882a593Smuzhiyun destroy_workqueue(tc_filter_wq);
3930*4882a593Smuzhiyun return err;
3931*4882a593Smuzhiyun }
3932*4882a593Smuzhiyun
3933*4882a593Smuzhiyun subsys_initcall(tc_filter_init);
3934