1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef __NET_PKT_CLS_H
3*4882a593Smuzhiyun #define __NET_PKT_CLS_H
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun #include <linux/pkt_cls.h>
6*4882a593Smuzhiyun #include <linux/workqueue.h>
7*4882a593Smuzhiyun #include <net/sch_generic.h>
8*4882a593Smuzhiyun #include <net/act_api.h>
9*4882a593Smuzhiyun #include <net/net_namespace.h>
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun /* TC action not accessible from user space */
12*4882a593Smuzhiyun #define TC_ACT_CONSUMED (TC_ACT_VALUE_MAX + 1)
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun /* Basic packet classifier frontend definitions. */
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun struct tcf_walker {
17*4882a593Smuzhiyun int stop;
18*4882a593Smuzhiyun int skip;
19*4882a593Smuzhiyun int count;
20*4882a593Smuzhiyun bool nonempty;
21*4882a593Smuzhiyun unsigned long cookie;
22*4882a593Smuzhiyun int (*fn)(struct tcf_proto *, void *node, struct tcf_walker *);
23*4882a593Smuzhiyun };
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun int register_tcf_proto_ops(struct tcf_proto_ops *ops);
26*4882a593Smuzhiyun int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun struct tcf_block_ext_info {
29*4882a593Smuzhiyun enum flow_block_binder_type binder_type;
30*4882a593Smuzhiyun tcf_chain_head_change_t *chain_head_change;
31*4882a593Smuzhiyun void *chain_head_change_priv;
32*4882a593Smuzhiyun u32 block_index;
33*4882a593Smuzhiyun };
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun struct tcf_qevent {
36*4882a593Smuzhiyun struct tcf_block *block;
37*4882a593Smuzhiyun struct tcf_block_ext_info info;
38*4882a593Smuzhiyun struct tcf_proto __rcu *filter_chain;
39*4882a593Smuzhiyun };
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun struct tcf_block_cb;
42*4882a593Smuzhiyun bool tcf_queue_work(struct rcu_work *rwork, work_func_t func);
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun #ifdef CONFIG_NET_CLS
45*4882a593Smuzhiyun struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block,
46*4882a593Smuzhiyun u32 chain_index);
47*4882a593Smuzhiyun void tcf_chain_put_by_act(struct tcf_chain *chain);
48*4882a593Smuzhiyun struct tcf_chain *tcf_get_next_chain(struct tcf_block *block,
49*4882a593Smuzhiyun struct tcf_chain *chain);
50*4882a593Smuzhiyun struct tcf_proto *tcf_get_next_proto(struct tcf_chain *chain,
51*4882a593Smuzhiyun struct tcf_proto *tp, bool rtnl_held);
52*4882a593Smuzhiyun void tcf_block_netif_keep_dst(struct tcf_block *block);
53*4882a593Smuzhiyun int tcf_block_get(struct tcf_block **p_block,
54*4882a593Smuzhiyun struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
55*4882a593Smuzhiyun struct netlink_ext_ack *extack);
56*4882a593Smuzhiyun int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
57*4882a593Smuzhiyun struct tcf_block_ext_info *ei,
58*4882a593Smuzhiyun struct netlink_ext_ack *extack);
59*4882a593Smuzhiyun void tcf_block_put(struct tcf_block *block);
60*4882a593Smuzhiyun void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
61*4882a593Smuzhiyun struct tcf_block_ext_info *ei);
62*4882a593Smuzhiyun
tcf_block_shared(struct tcf_block * block)63*4882a593Smuzhiyun static inline bool tcf_block_shared(struct tcf_block *block)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun return block->index;
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun
tcf_block_non_null_shared(struct tcf_block * block)68*4882a593Smuzhiyun static inline bool tcf_block_non_null_shared(struct tcf_block *block)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun return block && block->index;
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun
tcf_block_q(struct tcf_block * block)73*4882a593Smuzhiyun static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun WARN_ON(tcf_block_shared(block));
76*4882a593Smuzhiyun return block->q;
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
80*4882a593Smuzhiyun struct tcf_result *res, bool compat_mode);
81*4882a593Smuzhiyun int tcf_classify_ingress(struct sk_buff *skb,
82*4882a593Smuzhiyun const struct tcf_block *ingress_block,
83*4882a593Smuzhiyun const struct tcf_proto *tp, struct tcf_result *res,
84*4882a593Smuzhiyun bool compat_mode);
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun #else
tcf_block_shared(struct tcf_block * block)87*4882a593Smuzhiyun static inline bool tcf_block_shared(struct tcf_block *block)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun return false;
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun
tcf_block_non_null_shared(struct tcf_block * block)92*4882a593Smuzhiyun static inline bool tcf_block_non_null_shared(struct tcf_block *block)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun return false;
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun static inline
tcf_block_get(struct tcf_block ** p_block,struct tcf_proto __rcu ** p_filter_chain,struct Qdisc * q,struct netlink_ext_ack * extack)98*4882a593Smuzhiyun int tcf_block_get(struct tcf_block **p_block,
99*4882a593Smuzhiyun struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
100*4882a593Smuzhiyun struct netlink_ext_ack *extack)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun return 0;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun static inline
tcf_block_get_ext(struct tcf_block ** p_block,struct Qdisc * q,struct tcf_block_ext_info * ei,struct netlink_ext_ack * extack)106*4882a593Smuzhiyun int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
107*4882a593Smuzhiyun struct tcf_block_ext_info *ei,
108*4882a593Smuzhiyun struct netlink_ext_ack *extack)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun return 0;
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun
tcf_block_put(struct tcf_block * block)113*4882a593Smuzhiyun static inline void tcf_block_put(struct tcf_block *block)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun static inline
tcf_block_put_ext(struct tcf_block * block,struct Qdisc * q,struct tcf_block_ext_info * ei)118*4882a593Smuzhiyun void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
119*4882a593Smuzhiyun struct tcf_block_ext_info *ei)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
tcf_block_q(struct tcf_block * block)123*4882a593Smuzhiyun static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun return NULL;
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun static inline
tc_setup_cb_block_register(struct tcf_block * block,flow_setup_cb_t * cb,void * cb_priv)129*4882a593Smuzhiyun int tc_setup_cb_block_register(struct tcf_block *block, flow_setup_cb_t *cb,
130*4882a593Smuzhiyun void *cb_priv)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun return 0;
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun static inline
tc_setup_cb_block_unregister(struct tcf_block * block,flow_setup_cb_t * cb,void * cb_priv)136*4882a593Smuzhiyun void tc_setup_cb_block_unregister(struct tcf_block *block, flow_setup_cb_t *cb,
137*4882a593Smuzhiyun void *cb_priv)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
tcf_classify(struct sk_buff * skb,const struct tcf_proto * tp,struct tcf_result * res,bool compat_mode)141*4882a593Smuzhiyun static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
142*4882a593Smuzhiyun struct tcf_result *res, bool compat_mode)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun return TC_ACT_UNSPEC;
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun
tcf_classify_ingress(struct sk_buff * skb,const struct tcf_block * ingress_block,const struct tcf_proto * tp,struct tcf_result * res,bool compat_mode)147*4882a593Smuzhiyun static inline int tcf_classify_ingress(struct sk_buff *skb,
148*4882a593Smuzhiyun const struct tcf_block *ingress_block,
149*4882a593Smuzhiyun const struct tcf_proto *tp,
150*4882a593Smuzhiyun struct tcf_result *res, bool compat_mode)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun return TC_ACT_UNSPEC;
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun #endif
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun static inline unsigned long
__cls_set_class(unsigned long * clp,unsigned long cl)158*4882a593Smuzhiyun __cls_set_class(unsigned long *clp, unsigned long cl)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun return xchg(clp, cl);
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun static inline void
__tcf_bind_filter(struct Qdisc * q,struct tcf_result * r,unsigned long base)164*4882a593Smuzhiyun __tcf_bind_filter(struct Qdisc *q, struct tcf_result *r, unsigned long base)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun unsigned long cl;
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun cl = q->ops->cl_ops->bind_tcf(q, base, r->classid);
169*4882a593Smuzhiyun cl = __cls_set_class(&r->class, cl);
170*4882a593Smuzhiyun if (cl)
171*4882a593Smuzhiyun q->ops->cl_ops->unbind_tcf(q, cl);
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun static inline void
tcf_bind_filter(struct tcf_proto * tp,struct tcf_result * r,unsigned long base)175*4882a593Smuzhiyun tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun struct Qdisc *q = tp->chain->block->q;
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun /* Check q as it is not set for shared blocks. In that case,
180*4882a593Smuzhiyun * setting class is not supported.
181*4882a593Smuzhiyun */
182*4882a593Smuzhiyun if (!q)
183*4882a593Smuzhiyun return;
184*4882a593Smuzhiyun sch_tree_lock(q);
185*4882a593Smuzhiyun __tcf_bind_filter(q, r, base);
186*4882a593Smuzhiyun sch_tree_unlock(q);
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun static inline void
__tcf_unbind_filter(struct Qdisc * q,struct tcf_result * r)190*4882a593Smuzhiyun __tcf_unbind_filter(struct Qdisc *q, struct tcf_result *r)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun unsigned long cl;
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun if ((cl = __cls_set_class(&r->class, 0)) != 0)
195*4882a593Smuzhiyun q->ops->cl_ops->unbind_tcf(q, cl);
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun static inline void
tcf_unbind_filter(struct tcf_proto * tp,struct tcf_result * r)199*4882a593Smuzhiyun tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun struct Qdisc *q = tp->chain->block->q;
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun if (!q)
204*4882a593Smuzhiyun return;
205*4882a593Smuzhiyun __tcf_unbind_filter(q, r);
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun struct tcf_exts {
209*4882a593Smuzhiyun #ifdef CONFIG_NET_CLS_ACT
210*4882a593Smuzhiyun __u32 type; /* for backward compat(TCA_OLD_COMPAT) */
211*4882a593Smuzhiyun int nr_actions;
212*4882a593Smuzhiyun struct tc_action **actions;
213*4882a593Smuzhiyun struct net *net;
214*4882a593Smuzhiyun #endif
215*4882a593Smuzhiyun /* Map to export classifier specific extension TLV types to the
216*4882a593Smuzhiyun * generic extensions API. Unsupported extensions must be set to 0.
217*4882a593Smuzhiyun */
218*4882a593Smuzhiyun int action;
219*4882a593Smuzhiyun int police;
220*4882a593Smuzhiyun };
221*4882a593Smuzhiyun
tcf_exts_init(struct tcf_exts * exts,struct net * net,int action,int police)222*4882a593Smuzhiyun static inline int tcf_exts_init(struct tcf_exts *exts, struct net *net,
223*4882a593Smuzhiyun int action, int police)
224*4882a593Smuzhiyun {
225*4882a593Smuzhiyun #ifdef CONFIG_NET_CLS_ACT
226*4882a593Smuzhiyun exts->type = 0;
227*4882a593Smuzhiyun exts->nr_actions = 0;
228*4882a593Smuzhiyun exts->net = net;
229*4882a593Smuzhiyun exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
230*4882a593Smuzhiyun GFP_KERNEL);
231*4882a593Smuzhiyun if (!exts->actions)
232*4882a593Smuzhiyun return -ENOMEM;
233*4882a593Smuzhiyun #endif
234*4882a593Smuzhiyun exts->action = action;
235*4882a593Smuzhiyun exts->police = police;
236*4882a593Smuzhiyun return 0;
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun /* Return false if the netns is being destroyed in cleanup_net(). Callers
240*4882a593Smuzhiyun * need to do cleanup synchronously in this case, otherwise may race with
241*4882a593Smuzhiyun * tc_action_net_exit(). Return true for other cases.
242*4882a593Smuzhiyun */
tcf_exts_get_net(struct tcf_exts * exts)243*4882a593Smuzhiyun static inline bool tcf_exts_get_net(struct tcf_exts *exts)
244*4882a593Smuzhiyun {
245*4882a593Smuzhiyun #ifdef CONFIG_NET_CLS_ACT
246*4882a593Smuzhiyun exts->net = maybe_get_net(exts->net);
247*4882a593Smuzhiyun return exts->net != NULL;
248*4882a593Smuzhiyun #else
249*4882a593Smuzhiyun return true;
250*4882a593Smuzhiyun #endif
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun
tcf_exts_put_net(struct tcf_exts * exts)253*4882a593Smuzhiyun static inline void tcf_exts_put_net(struct tcf_exts *exts)
254*4882a593Smuzhiyun {
255*4882a593Smuzhiyun #ifdef CONFIG_NET_CLS_ACT
256*4882a593Smuzhiyun if (exts->net)
257*4882a593Smuzhiyun put_net(exts->net);
258*4882a593Smuzhiyun #endif
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun #ifdef CONFIG_NET_CLS_ACT
262*4882a593Smuzhiyun #define tcf_exts_for_each_action(i, a, exts) \
263*4882a593Smuzhiyun for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = (exts)->actions[i]); i++)
264*4882a593Smuzhiyun #else
265*4882a593Smuzhiyun #define tcf_exts_for_each_action(i, a, exts) \
266*4882a593Smuzhiyun for (; 0; (void)(i), (void)(a), (void)(exts))
267*4882a593Smuzhiyun #endif
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun static inline void
tcf_exts_stats_update(const struct tcf_exts * exts,u64 bytes,u64 packets,u64 drops,u64 lastuse,u8 used_hw_stats,bool used_hw_stats_valid)270*4882a593Smuzhiyun tcf_exts_stats_update(const struct tcf_exts *exts,
271*4882a593Smuzhiyun u64 bytes, u64 packets, u64 drops, u64 lastuse,
272*4882a593Smuzhiyun u8 used_hw_stats, bool used_hw_stats_valid)
273*4882a593Smuzhiyun {
274*4882a593Smuzhiyun #ifdef CONFIG_NET_CLS_ACT
275*4882a593Smuzhiyun int i;
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun preempt_disable();
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun for (i = 0; i < exts->nr_actions; i++) {
280*4882a593Smuzhiyun struct tc_action *a = exts->actions[i];
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun tcf_action_stats_update(a, bytes, packets, drops,
283*4882a593Smuzhiyun lastuse, true);
284*4882a593Smuzhiyun a->used_hw_stats = used_hw_stats;
285*4882a593Smuzhiyun a->used_hw_stats_valid = used_hw_stats_valid;
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun preempt_enable();
289*4882a593Smuzhiyun #endif
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun /**
293*4882a593Smuzhiyun * tcf_exts_has_actions - check if at least one action is present
294*4882a593Smuzhiyun * @exts: tc filter extensions handle
295*4882a593Smuzhiyun *
296*4882a593Smuzhiyun * Returns true if at least one action is present.
297*4882a593Smuzhiyun */
tcf_exts_has_actions(struct tcf_exts * exts)298*4882a593Smuzhiyun static inline bool tcf_exts_has_actions(struct tcf_exts *exts)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun #ifdef CONFIG_NET_CLS_ACT
301*4882a593Smuzhiyun return exts->nr_actions;
302*4882a593Smuzhiyun #else
303*4882a593Smuzhiyun return false;
304*4882a593Smuzhiyun #endif
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun /**
308*4882a593Smuzhiyun * tcf_exts_exec - execute tc filter extensions
309*4882a593Smuzhiyun * @skb: socket buffer
310*4882a593Smuzhiyun * @exts: tc filter extensions handle
311*4882a593Smuzhiyun * @res: desired result
312*4882a593Smuzhiyun *
313*4882a593Smuzhiyun * Executes all configured extensions. Returns TC_ACT_OK on a normal execution,
314*4882a593Smuzhiyun * a negative number if the filter must be considered unmatched or
315*4882a593Smuzhiyun * a positive action code (TC_ACT_*) which must be returned to the
316*4882a593Smuzhiyun * underlying layer.
317*4882a593Smuzhiyun */
318*4882a593Smuzhiyun static inline int
tcf_exts_exec(struct sk_buff * skb,struct tcf_exts * exts,struct tcf_result * res)319*4882a593Smuzhiyun tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
320*4882a593Smuzhiyun struct tcf_result *res)
321*4882a593Smuzhiyun {
322*4882a593Smuzhiyun #ifdef CONFIG_NET_CLS_ACT
323*4882a593Smuzhiyun return tcf_action_exec(skb, exts->actions, exts->nr_actions, res);
324*4882a593Smuzhiyun #endif
325*4882a593Smuzhiyun return TC_ACT_OK;
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
329*4882a593Smuzhiyun struct nlattr **tb, struct nlattr *rate_tlv,
330*4882a593Smuzhiyun struct tcf_exts *exts, bool ovr, bool rtnl_held,
331*4882a593Smuzhiyun struct netlink_ext_ack *extack);
332*4882a593Smuzhiyun void tcf_exts_destroy(struct tcf_exts *exts);
333*4882a593Smuzhiyun void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src);
334*4882a593Smuzhiyun int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
335*4882a593Smuzhiyun int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts);
336*4882a593Smuzhiyun int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun /**
339*4882a593Smuzhiyun * struct tcf_pkt_info - packet information
340*4882a593Smuzhiyun */
341*4882a593Smuzhiyun struct tcf_pkt_info {
342*4882a593Smuzhiyun unsigned char * ptr;
343*4882a593Smuzhiyun int nexthdr;
344*4882a593Smuzhiyun };
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun #ifdef CONFIG_NET_EMATCH
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun struct tcf_ematch_ops;
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun /**
351*4882a593Smuzhiyun * struct tcf_ematch - extended match (ematch)
352*4882a593Smuzhiyun *
353*4882a593Smuzhiyun * @matchid: identifier to allow userspace to reidentify a match
354*4882a593Smuzhiyun * @flags: flags specifying attributes and the relation to other matches
355*4882a593Smuzhiyun * @ops: the operations lookup table of the corresponding ematch module
356*4882a593Smuzhiyun * @datalen: length of the ematch specific configuration data
357*4882a593Smuzhiyun * @data: ematch specific data
358*4882a593Smuzhiyun */
359*4882a593Smuzhiyun struct tcf_ematch {
360*4882a593Smuzhiyun struct tcf_ematch_ops * ops;
361*4882a593Smuzhiyun unsigned long data;
362*4882a593Smuzhiyun unsigned int datalen;
363*4882a593Smuzhiyun u16 matchid;
364*4882a593Smuzhiyun u16 flags;
365*4882a593Smuzhiyun struct net *net;
366*4882a593Smuzhiyun };
367*4882a593Smuzhiyun
tcf_em_is_container(struct tcf_ematch * em)368*4882a593Smuzhiyun static inline int tcf_em_is_container(struct tcf_ematch *em)
369*4882a593Smuzhiyun {
370*4882a593Smuzhiyun return !em->ops;
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun
tcf_em_is_simple(struct tcf_ematch * em)373*4882a593Smuzhiyun static inline int tcf_em_is_simple(struct tcf_ematch *em)
374*4882a593Smuzhiyun {
375*4882a593Smuzhiyun return em->flags & TCF_EM_SIMPLE;
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun
tcf_em_is_inverted(struct tcf_ematch * em)378*4882a593Smuzhiyun static inline int tcf_em_is_inverted(struct tcf_ematch *em)
379*4882a593Smuzhiyun {
380*4882a593Smuzhiyun return em->flags & TCF_EM_INVERT;
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun
tcf_em_last_match(struct tcf_ematch * em)383*4882a593Smuzhiyun static inline int tcf_em_last_match(struct tcf_ematch *em)
384*4882a593Smuzhiyun {
385*4882a593Smuzhiyun return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END;
386*4882a593Smuzhiyun }
387*4882a593Smuzhiyun
tcf_em_early_end(struct tcf_ematch * em,int result)388*4882a593Smuzhiyun static inline int tcf_em_early_end(struct tcf_ematch *em, int result)
389*4882a593Smuzhiyun {
390*4882a593Smuzhiyun if (tcf_em_last_match(em))
391*4882a593Smuzhiyun return 1;
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun if (result == 0 && em->flags & TCF_EM_REL_AND)
394*4882a593Smuzhiyun return 1;
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun if (result != 0 && em->flags & TCF_EM_REL_OR)
397*4882a593Smuzhiyun return 1;
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun return 0;
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun /**
403*4882a593Smuzhiyun * struct tcf_ematch_tree - ematch tree handle
404*4882a593Smuzhiyun *
405*4882a593Smuzhiyun * @hdr: ematch tree header supplied by userspace
406*4882a593Smuzhiyun * @matches: array of ematches
407*4882a593Smuzhiyun */
408*4882a593Smuzhiyun struct tcf_ematch_tree {
409*4882a593Smuzhiyun struct tcf_ematch_tree_hdr hdr;
410*4882a593Smuzhiyun struct tcf_ematch * matches;
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun };
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun /**
415*4882a593Smuzhiyun * struct tcf_ematch_ops - ematch module operations
416*4882a593Smuzhiyun *
417*4882a593Smuzhiyun * @kind: identifier (kind) of this ematch module
418*4882a593Smuzhiyun * @datalen: length of expected configuration data (optional)
419*4882a593Smuzhiyun * @change: called during validation (optional)
420*4882a593Smuzhiyun * @match: called during ematch tree evaluation, must return 1/0
421*4882a593Smuzhiyun * @destroy: called during destroyage (optional)
422*4882a593Smuzhiyun * @dump: called during dumping process (optional)
423*4882a593Smuzhiyun * @owner: owner, must be set to THIS_MODULE
424*4882a593Smuzhiyun * @link: link to previous/next ematch module (internal use)
425*4882a593Smuzhiyun */
426*4882a593Smuzhiyun struct tcf_ematch_ops {
427*4882a593Smuzhiyun int kind;
428*4882a593Smuzhiyun int datalen;
429*4882a593Smuzhiyun int (*change)(struct net *net, void *,
430*4882a593Smuzhiyun int, struct tcf_ematch *);
431*4882a593Smuzhiyun int (*match)(struct sk_buff *, struct tcf_ematch *,
432*4882a593Smuzhiyun struct tcf_pkt_info *);
433*4882a593Smuzhiyun void (*destroy)(struct tcf_ematch *);
434*4882a593Smuzhiyun int (*dump)(struct sk_buff *, struct tcf_ematch *);
435*4882a593Smuzhiyun struct module *owner;
436*4882a593Smuzhiyun struct list_head link;
437*4882a593Smuzhiyun };
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun int tcf_em_register(struct tcf_ematch_ops *);
440*4882a593Smuzhiyun void tcf_em_unregister(struct tcf_ematch_ops *);
441*4882a593Smuzhiyun int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
442*4882a593Smuzhiyun struct tcf_ematch_tree *);
443*4882a593Smuzhiyun void tcf_em_tree_destroy(struct tcf_ematch_tree *);
444*4882a593Smuzhiyun int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
445*4882a593Smuzhiyun int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
446*4882a593Smuzhiyun struct tcf_pkt_info *);
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun /**
449*4882a593Smuzhiyun * tcf_em_tree_match - evaulate an ematch tree
450*4882a593Smuzhiyun *
451*4882a593Smuzhiyun * @skb: socket buffer of the packet in question
452*4882a593Smuzhiyun * @tree: ematch tree to be used for evaluation
453*4882a593Smuzhiyun * @info: packet information examined by classifier
454*4882a593Smuzhiyun *
455*4882a593Smuzhiyun * This function matches @skb against the ematch tree in @tree by going
456*4882a593Smuzhiyun * through all ematches respecting their logic relations returning
457*4882a593Smuzhiyun * as soon as the result is obvious.
458*4882a593Smuzhiyun *
459*4882a593Smuzhiyun * Returns 1 if the ematch tree as-one matches, no ematches are configured
460*4882a593Smuzhiyun * or ematch is not enabled in the kernel, otherwise 0 is returned.
461*4882a593Smuzhiyun */
tcf_em_tree_match(struct sk_buff * skb,struct tcf_ematch_tree * tree,struct tcf_pkt_info * info)462*4882a593Smuzhiyun static inline int tcf_em_tree_match(struct sk_buff *skb,
463*4882a593Smuzhiyun struct tcf_ematch_tree *tree,
464*4882a593Smuzhiyun struct tcf_pkt_info *info)
465*4882a593Smuzhiyun {
466*4882a593Smuzhiyun if (tree->hdr.nmatches)
467*4882a593Smuzhiyun return __tcf_em_tree_match(skb, tree, info);
468*4882a593Smuzhiyun else
469*4882a593Smuzhiyun return 1;
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun #define MODULE_ALIAS_TCF_EMATCH(kind) MODULE_ALIAS("ematch-kind-" __stringify(kind))
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun #else /* CONFIG_NET_EMATCH */
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun struct tcf_ematch_tree {
477*4882a593Smuzhiyun };
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun #define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
480*4882a593Smuzhiyun #define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
481*4882a593Smuzhiyun #define tcf_em_tree_dump(skb, t, tlv) (0)
482*4882a593Smuzhiyun #define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun #endif /* CONFIG_NET_EMATCH */
485*4882a593Smuzhiyun
tcf_get_base_ptr(struct sk_buff * skb,int layer)486*4882a593Smuzhiyun static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
487*4882a593Smuzhiyun {
488*4882a593Smuzhiyun switch (layer) {
489*4882a593Smuzhiyun case TCF_LAYER_LINK:
490*4882a593Smuzhiyun return skb_mac_header(skb);
491*4882a593Smuzhiyun case TCF_LAYER_NETWORK:
492*4882a593Smuzhiyun return skb_network_header(skb);
493*4882a593Smuzhiyun case TCF_LAYER_TRANSPORT:
494*4882a593Smuzhiyun return skb_transport_header(skb);
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun return NULL;
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun
tcf_valid_offset(const struct sk_buff * skb,const unsigned char * ptr,const int len)500*4882a593Smuzhiyun static inline int tcf_valid_offset(const struct sk_buff *skb,
501*4882a593Smuzhiyun const unsigned char *ptr, const int len)
502*4882a593Smuzhiyun {
503*4882a593Smuzhiyun return likely((ptr + len) <= skb_tail_pointer(skb) &&
504*4882a593Smuzhiyun ptr >= skb->head &&
505*4882a593Smuzhiyun (ptr <= (ptr + len)));
506*4882a593Smuzhiyun }
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun static inline int
tcf_change_indev(struct net * net,struct nlattr * indev_tlv,struct netlink_ext_ack * extack)509*4882a593Smuzhiyun tcf_change_indev(struct net *net, struct nlattr *indev_tlv,
510*4882a593Smuzhiyun struct netlink_ext_ack *extack)
511*4882a593Smuzhiyun {
512*4882a593Smuzhiyun char indev[IFNAMSIZ];
513*4882a593Smuzhiyun struct net_device *dev;
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ) {
516*4882a593Smuzhiyun NL_SET_ERR_MSG_ATTR(extack, indev_tlv,
517*4882a593Smuzhiyun "Interface name too long");
518*4882a593Smuzhiyun return -EINVAL;
519*4882a593Smuzhiyun }
520*4882a593Smuzhiyun dev = __dev_get_by_name(net, indev);
521*4882a593Smuzhiyun if (!dev) {
522*4882a593Smuzhiyun NL_SET_ERR_MSG_ATTR(extack, indev_tlv,
523*4882a593Smuzhiyun "Network device not found");
524*4882a593Smuzhiyun return -ENODEV;
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun return dev->ifindex;
527*4882a593Smuzhiyun }
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun static inline bool
tcf_match_indev(struct sk_buff * skb,int ifindex)530*4882a593Smuzhiyun tcf_match_indev(struct sk_buff *skb, int ifindex)
531*4882a593Smuzhiyun {
532*4882a593Smuzhiyun if (!ifindex)
533*4882a593Smuzhiyun return true;
534*4882a593Smuzhiyun if (!skb->skb_iif)
535*4882a593Smuzhiyun return false;
536*4882a593Smuzhiyun return ifindex == skb->skb_iif;
537*4882a593Smuzhiyun }
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun int tc_setup_flow_action(struct flow_action *flow_action,
540*4882a593Smuzhiyun const struct tcf_exts *exts);
541*4882a593Smuzhiyun void tc_cleanup_flow_action(struct flow_action *flow_action);
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
544*4882a593Smuzhiyun void *type_data, bool err_stop, bool rtnl_held);
545*4882a593Smuzhiyun int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
546*4882a593Smuzhiyun enum tc_setup_type type, void *type_data, bool err_stop,
547*4882a593Smuzhiyun u32 *flags, unsigned int *in_hw_count, bool rtnl_held);
548*4882a593Smuzhiyun int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
549*4882a593Smuzhiyun enum tc_setup_type type, void *type_data, bool err_stop,
550*4882a593Smuzhiyun u32 *old_flags, unsigned int *old_in_hw_count,
551*4882a593Smuzhiyun u32 *new_flags, unsigned int *new_in_hw_count,
552*4882a593Smuzhiyun bool rtnl_held);
553*4882a593Smuzhiyun int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
554*4882a593Smuzhiyun enum tc_setup_type type, void *type_data, bool err_stop,
555*4882a593Smuzhiyun u32 *flags, unsigned int *in_hw_count, bool rtnl_held);
556*4882a593Smuzhiyun int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
557*4882a593Smuzhiyun bool add, flow_setup_cb_t *cb,
558*4882a593Smuzhiyun enum tc_setup_type type, void *type_data,
559*4882a593Smuzhiyun void *cb_priv, u32 *flags, unsigned int *in_hw_count);
560*4882a593Smuzhiyun unsigned int tcf_exts_num_actions(struct tcf_exts *exts);
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun #ifdef CONFIG_NET_CLS_ACT
563*4882a593Smuzhiyun int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
564*4882a593Smuzhiyun enum flow_block_binder_type binder_type,
565*4882a593Smuzhiyun struct nlattr *block_index_attr,
566*4882a593Smuzhiyun struct netlink_ext_ack *extack);
567*4882a593Smuzhiyun void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch);
568*4882a593Smuzhiyun int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
569*4882a593Smuzhiyun struct netlink_ext_ack *extack);
570*4882a593Smuzhiyun struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
571*4882a593Smuzhiyun struct sk_buff **to_free, int *ret);
572*4882a593Smuzhiyun int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe);
573*4882a593Smuzhiyun #else
tcf_qevent_init(struct tcf_qevent * qe,struct Qdisc * sch,enum flow_block_binder_type binder_type,struct nlattr * block_index_attr,struct netlink_ext_ack * extack)574*4882a593Smuzhiyun static inline int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
575*4882a593Smuzhiyun enum flow_block_binder_type binder_type,
576*4882a593Smuzhiyun struct nlattr *block_index_attr,
577*4882a593Smuzhiyun struct netlink_ext_ack *extack)
578*4882a593Smuzhiyun {
579*4882a593Smuzhiyun return 0;
580*4882a593Smuzhiyun }
581*4882a593Smuzhiyun
tcf_qevent_destroy(struct tcf_qevent * qe,struct Qdisc * sch)582*4882a593Smuzhiyun static inline void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch)
583*4882a593Smuzhiyun {
584*4882a593Smuzhiyun }
585*4882a593Smuzhiyun
tcf_qevent_validate_change(struct tcf_qevent * qe,struct nlattr * block_index_attr,struct netlink_ext_ack * extack)586*4882a593Smuzhiyun static inline int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
587*4882a593Smuzhiyun struct netlink_ext_ack *extack)
588*4882a593Smuzhiyun {
589*4882a593Smuzhiyun return 0;
590*4882a593Smuzhiyun }
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun static inline struct sk_buff *
tcf_qevent_handle(struct tcf_qevent * qe,struct Qdisc * sch,struct sk_buff * skb,struct sk_buff ** to_free,int * ret)593*4882a593Smuzhiyun tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
594*4882a593Smuzhiyun struct sk_buff **to_free, int *ret)
595*4882a593Smuzhiyun {
596*4882a593Smuzhiyun return skb;
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun
tcf_qevent_dump(struct sk_buff * skb,int attr_name,struct tcf_qevent * qe)599*4882a593Smuzhiyun static inline int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe)
600*4882a593Smuzhiyun {
601*4882a593Smuzhiyun return 0;
602*4882a593Smuzhiyun }
603*4882a593Smuzhiyun #endif
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun struct tc_cls_u32_knode {
606*4882a593Smuzhiyun struct tcf_exts *exts;
607*4882a593Smuzhiyun struct tcf_result *res;
608*4882a593Smuzhiyun struct tc_u32_sel *sel;
609*4882a593Smuzhiyun u32 handle;
610*4882a593Smuzhiyun u32 val;
611*4882a593Smuzhiyun u32 mask;
612*4882a593Smuzhiyun u32 link_handle;
613*4882a593Smuzhiyun u8 fshift;
614*4882a593Smuzhiyun };
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun struct tc_cls_u32_hnode {
617*4882a593Smuzhiyun u32 handle;
618*4882a593Smuzhiyun u32 prio;
619*4882a593Smuzhiyun unsigned int divisor;
620*4882a593Smuzhiyun };
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun enum tc_clsu32_command {
623*4882a593Smuzhiyun TC_CLSU32_NEW_KNODE,
624*4882a593Smuzhiyun TC_CLSU32_REPLACE_KNODE,
625*4882a593Smuzhiyun TC_CLSU32_DELETE_KNODE,
626*4882a593Smuzhiyun TC_CLSU32_NEW_HNODE,
627*4882a593Smuzhiyun TC_CLSU32_REPLACE_HNODE,
628*4882a593Smuzhiyun TC_CLSU32_DELETE_HNODE,
629*4882a593Smuzhiyun };
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun struct tc_cls_u32_offload {
632*4882a593Smuzhiyun struct flow_cls_common_offload common;
633*4882a593Smuzhiyun /* knode values */
634*4882a593Smuzhiyun enum tc_clsu32_command command;
635*4882a593Smuzhiyun union {
636*4882a593Smuzhiyun struct tc_cls_u32_knode knode;
637*4882a593Smuzhiyun struct tc_cls_u32_hnode hnode;
638*4882a593Smuzhiyun };
639*4882a593Smuzhiyun };
640*4882a593Smuzhiyun
tc_can_offload(const struct net_device * dev)641*4882a593Smuzhiyun static inline bool tc_can_offload(const struct net_device *dev)
642*4882a593Smuzhiyun {
643*4882a593Smuzhiyun return dev->features & NETIF_F_HW_TC;
644*4882a593Smuzhiyun }
645*4882a593Smuzhiyun
tc_can_offload_extack(const struct net_device * dev,struct netlink_ext_ack * extack)646*4882a593Smuzhiyun static inline bool tc_can_offload_extack(const struct net_device *dev,
647*4882a593Smuzhiyun struct netlink_ext_ack *extack)
648*4882a593Smuzhiyun {
649*4882a593Smuzhiyun bool can = tc_can_offload(dev);
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun if (!can)
652*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "TC offload is disabled on net device");
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun return can;
655*4882a593Smuzhiyun }
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun static inline bool
tc_cls_can_offload_and_chain0(const struct net_device * dev,struct flow_cls_common_offload * common)658*4882a593Smuzhiyun tc_cls_can_offload_and_chain0(const struct net_device *dev,
659*4882a593Smuzhiyun struct flow_cls_common_offload *common)
660*4882a593Smuzhiyun {
661*4882a593Smuzhiyun if (!tc_can_offload_extack(dev, common->extack))
662*4882a593Smuzhiyun return false;
663*4882a593Smuzhiyun if (common->chain_index) {
664*4882a593Smuzhiyun NL_SET_ERR_MSG(common->extack,
665*4882a593Smuzhiyun "Driver supports only offload of chain 0");
666*4882a593Smuzhiyun return false;
667*4882a593Smuzhiyun }
668*4882a593Smuzhiyun return true;
669*4882a593Smuzhiyun }
670*4882a593Smuzhiyun
tc_skip_hw(u32 flags)671*4882a593Smuzhiyun static inline bool tc_skip_hw(u32 flags)
672*4882a593Smuzhiyun {
673*4882a593Smuzhiyun return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
674*4882a593Smuzhiyun }
675*4882a593Smuzhiyun
tc_skip_sw(u32 flags)676*4882a593Smuzhiyun static inline bool tc_skip_sw(u32 flags)
677*4882a593Smuzhiyun {
678*4882a593Smuzhiyun return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
679*4882a593Smuzhiyun }
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun /* SKIP_HW and SKIP_SW are mutually exclusive flags. */
tc_flags_valid(u32 flags)682*4882a593Smuzhiyun static inline bool tc_flags_valid(u32 flags)
683*4882a593Smuzhiyun {
684*4882a593Smuzhiyun if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW |
685*4882a593Smuzhiyun TCA_CLS_FLAGS_VERBOSE))
686*4882a593Smuzhiyun return false;
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun flags &= TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW;
689*4882a593Smuzhiyun if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)))
690*4882a593Smuzhiyun return false;
691*4882a593Smuzhiyun
692*4882a593Smuzhiyun return true;
693*4882a593Smuzhiyun }
694*4882a593Smuzhiyun
tc_in_hw(u32 flags)695*4882a593Smuzhiyun static inline bool tc_in_hw(u32 flags)
696*4882a593Smuzhiyun {
697*4882a593Smuzhiyun return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false;
698*4882a593Smuzhiyun }
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun static inline void
tc_cls_common_offload_init(struct flow_cls_common_offload * cls_common,const struct tcf_proto * tp,u32 flags,struct netlink_ext_ack * extack)701*4882a593Smuzhiyun tc_cls_common_offload_init(struct flow_cls_common_offload *cls_common,
702*4882a593Smuzhiyun const struct tcf_proto *tp, u32 flags,
703*4882a593Smuzhiyun struct netlink_ext_ack *extack)
704*4882a593Smuzhiyun {
705*4882a593Smuzhiyun cls_common->chain_index = tp->chain->index;
706*4882a593Smuzhiyun cls_common->protocol = tp->protocol;
707*4882a593Smuzhiyun cls_common->prio = tp->prio >> 16;
708*4882a593Smuzhiyun if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE)
709*4882a593Smuzhiyun cls_common->extack = extack;
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
tc_skb_ext_alloc(struct sk_buff * skb)713*4882a593Smuzhiyun static inline struct tc_skb_ext *tc_skb_ext_alloc(struct sk_buff *skb)
714*4882a593Smuzhiyun {
715*4882a593Smuzhiyun struct tc_skb_ext *tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun if (tc_skb_ext)
718*4882a593Smuzhiyun memset(tc_skb_ext, 0, sizeof(*tc_skb_ext));
719*4882a593Smuzhiyun return tc_skb_ext;
720*4882a593Smuzhiyun }
721*4882a593Smuzhiyun #endif
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun enum tc_matchall_command {
724*4882a593Smuzhiyun TC_CLSMATCHALL_REPLACE,
725*4882a593Smuzhiyun TC_CLSMATCHALL_DESTROY,
726*4882a593Smuzhiyun TC_CLSMATCHALL_STATS,
727*4882a593Smuzhiyun };
728*4882a593Smuzhiyun
729*4882a593Smuzhiyun struct tc_cls_matchall_offload {
730*4882a593Smuzhiyun struct flow_cls_common_offload common;
731*4882a593Smuzhiyun enum tc_matchall_command command;
732*4882a593Smuzhiyun struct flow_rule *rule;
733*4882a593Smuzhiyun struct flow_stats stats;
734*4882a593Smuzhiyun unsigned long cookie;
735*4882a593Smuzhiyun };
736*4882a593Smuzhiyun
737*4882a593Smuzhiyun enum tc_clsbpf_command {
738*4882a593Smuzhiyun TC_CLSBPF_OFFLOAD,
739*4882a593Smuzhiyun TC_CLSBPF_STATS,
740*4882a593Smuzhiyun };
741*4882a593Smuzhiyun
742*4882a593Smuzhiyun struct tc_cls_bpf_offload {
743*4882a593Smuzhiyun struct flow_cls_common_offload common;
744*4882a593Smuzhiyun enum tc_clsbpf_command command;
745*4882a593Smuzhiyun struct tcf_exts *exts;
746*4882a593Smuzhiyun struct bpf_prog *prog;
747*4882a593Smuzhiyun struct bpf_prog *oldprog;
748*4882a593Smuzhiyun const char *name;
749*4882a593Smuzhiyun bool exts_integrated;
750*4882a593Smuzhiyun };
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun struct tc_mqprio_qopt_offload {
753*4882a593Smuzhiyun /* struct tc_mqprio_qopt must always be the first element */
754*4882a593Smuzhiyun struct tc_mqprio_qopt qopt;
755*4882a593Smuzhiyun u16 mode;
756*4882a593Smuzhiyun u16 shaper;
757*4882a593Smuzhiyun u32 flags;
758*4882a593Smuzhiyun u64 min_rate[TC_QOPT_MAX_QUEUE];
759*4882a593Smuzhiyun u64 max_rate[TC_QOPT_MAX_QUEUE];
760*4882a593Smuzhiyun };
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun /* This structure holds cookie structure that is passed from user
763*4882a593Smuzhiyun * to the kernel for actions and classifiers
764*4882a593Smuzhiyun */
765*4882a593Smuzhiyun struct tc_cookie {
766*4882a593Smuzhiyun u8 *data;
767*4882a593Smuzhiyun u32 len;
768*4882a593Smuzhiyun struct rcu_head rcu;
769*4882a593Smuzhiyun };
770*4882a593Smuzhiyun
771*4882a593Smuzhiyun struct tc_qopt_offload_stats {
772*4882a593Smuzhiyun struct gnet_stats_basic_packed *bstats;
773*4882a593Smuzhiyun struct gnet_stats_queue *qstats;
774*4882a593Smuzhiyun };
775*4882a593Smuzhiyun
776*4882a593Smuzhiyun enum tc_mq_command {
777*4882a593Smuzhiyun TC_MQ_CREATE,
778*4882a593Smuzhiyun TC_MQ_DESTROY,
779*4882a593Smuzhiyun TC_MQ_STATS,
780*4882a593Smuzhiyun TC_MQ_GRAFT,
781*4882a593Smuzhiyun };
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun struct tc_mq_opt_offload_graft_params {
784*4882a593Smuzhiyun unsigned long queue;
785*4882a593Smuzhiyun u32 child_handle;
786*4882a593Smuzhiyun };
787*4882a593Smuzhiyun
788*4882a593Smuzhiyun struct tc_mq_qopt_offload {
789*4882a593Smuzhiyun enum tc_mq_command command;
790*4882a593Smuzhiyun u32 handle;
791*4882a593Smuzhiyun union {
792*4882a593Smuzhiyun struct tc_qopt_offload_stats stats;
793*4882a593Smuzhiyun struct tc_mq_opt_offload_graft_params graft_params;
794*4882a593Smuzhiyun };
795*4882a593Smuzhiyun };
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun enum tc_red_command {
798*4882a593Smuzhiyun TC_RED_REPLACE,
799*4882a593Smuzhiyun TC_RED_DESTROY,
800*4882a593Smuzhiyun TC_RED_STATS,
801*4882a593Smuzhiyun TC_RED_XSTATS,
802*4882a593Smuzhiyun TC_RED_GRAFT,
803*4882a593Smuzhiyun };
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun struct tc_red_qopt_offload_params {
806*4882a593Smuzhiyun u32 min;
807*4882a593Smuzhiyun u32 max;
808*4882a593Smuzhiyun u32 probability;
809*4882a593Smuzhiyun u32 limit;
810*4882a593Smuzhiyun bool is_ecn;
811*4882a593Smuzhiyun bool is_harddrop;
812*4882a593Smuzhiyun bool is_nodrop;
813*4882a593Smuzhiyun struct gnet_stats_queue *qstats;
814*4882a593Smuzhiyun };
815*4882a593Smuzhiyun
816*4882a593Smuzhiyun struct tc_red_qopt_offload {
817*4882a593Smuzhiyun enum tc_red_command command;
818*4882a593Smuzhiyun u32 handle;
819*4882a593Smuzhiyun u32 parent;
820*4882a593Smuzhiyun union {
821*4882a593Smuzhiyun struct tc_red_qopt_offload_params set;
822*4882a593Smuzhiyun struct tc_qopt_offload_stats stats;
823*4882a593Smuzhiyun struct red_stats *xstats;
824*4882a593Smuzhiyun u32 child_handle;
825*4882a593Smuzhiyun };
826*4882a593Smuzhiyun };
827*4882a593Smuzhiyun
828*4882a593Smuzhiyun enum tc_gred_command {
829*4882a593Smuzhiyun TC_GRED_REPLACE,
830*4882a593Smuzhiyun TC_GRED_DESTROY,
831*4882a593Smuzhiyun TC_GRED_STATS,
832*4882a593Smuzhiyun };
833*4882a593Smuzhiyun
834*4882a593Smuzhiyun struct tc_gred_vq_qopt_offload_params {
835*4882a593Smuzhiyun bool present;
836*4882a593Smuzhiyun u32 limit;
837*4882a593Smuzhiyun u32 prio;
838*4882a593Smuzhiyun u32 min;
839*4882a593Smuzhiyun u32 max;
840*4882a593Smuzhiyun bool is_ecn;
841*4882a593Smuzhiyun bool is_harddrop;
842*4882a593Smuzhiyun u32 probability;
843*4882a593Smuzhiyun /* Only need backlog, see struct tc_prio_qopt_offload_params */
844*4882a593Smuzhiyun u32 *backlog;
845*4882a593Smuzhiyun };
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun struct tc_gred_qopt_offload_params {
848*4882a593Smuzhiyun bool grio_on;
849*4882a593Smuzhiyun bool wred_on;
850*4882a593Smuzhiyun unsigned int dp_cnt;
851*4882a593Smuzhiyun unsigned int dp_def;
852*4882a593Smuzhiyun struct gnet_stats_queue *qstats;
853*4882a593Smuzhiyun struct tc_gred_vq_qopt_offload_params tab[MAX_DPs];
854*4882a593Smuzhiyun };
855*4882a593Smuzhiyun
856*4882a593Smuzhiyun struct tc_gred_qopt_offload_stats {
857*4882a593Smuzhiyun struct gnet_stats_basic_packed bstats[MAX_DPs];
858*4882a593Smuzhiyun struct gnet_stats_queue qstats[MAX_DPs];
859*4882a593Smuzhiyun struct red_stats *xstats[MAX_DPs];
860*4882a593Smuzhiyun };
861*4882a593Smuzhiyun
862*4882a593Smuzhiyun struct tc_gred_qopt_offload {
863*4882a593Smuzhiyun enum tc_gred_command command;
864*4882a593Smuzhiyun u32 handle;
865*4882a593Smuzhiyun u32 parent;
866*4882a593Smuzhiyun union {
867*4882a593Smuzhiyun struct tc_gred_qopt_offload_params set;
868*4882a593Smuzhiyun struct tc_gred_qopt_offload_stats stats;
869*4882a593Smuzhiyun };
870*4882a593Smuzhiyun };
871*4882a593Smuzhiyun
872*4882a593Smuzhiyun enum tc_prio_command {
873*4882a593Smuzhiyun TC_PRIO_REPLACE,
874*4882a593Smuzhiyun TC_PRIO_DESTROY,
875*4882a593Smuzhiyun TC_PRIO_STATS,
876*4882a593Smuzhiyun TC_PRIO_GRAFT,
877*4882a593Smuzhiyun };
878*4882a593Smuzhiyun
879*4882a593Smuzhiyun struct tc_prio_qopt_offload_params {
880*4882a593Smuzhiyun int bands;
881*4882a593Smuzhiyun u8 priomap[TC_PRIO_MAX + 1];
882*4882a593Smuzhiyun /* At the point of un-offloading the Qdisc, the reported backlog and
883*4882a593Smuzhiyun * qlen need to be reduced by the portion that is in HW.
884*4882a593Smuzhiyun */
885*4882a593Smuzhiyun struct gnet_stats_queue *qstats;
886*4882a593Smuzhiyun };
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun struct tc_prio_qopt_offload_graft_params {
889*4882a593Smuzhiyun u8 band;
890*4882a593Smuzhiyun u32 child_handle;
891*4882a593Smuzhiyun };
892*4882a593Smuzhiyun
893*4882a593Smuzhiyun struct tc_prio_qopt_offload {
894*4882a593Smuzhiyun enum tc_prio_command command;
895*4882a593Smuzhiyun u32 handle;
896*4882a593Smuzhiyun u32 parent;
897*4882a593Smuzhiyun union {
898*4882a593Smuzhiyun struct tc_prio_qopt_offload_params replace_params;
899*4882a593Smuzhiyun struct tc_qopt_offload_stats stats;
900*4882a593Smuzhiyun struct tc_prio_qopt_offload_graft_params graft_params;
901*4882a593Smuzhiyun };
902*4882a593Smuzhiyun };
903*4882a593Smuzhiyun
904*4882a593Smuzhiyun enum tc_root_command {
905*4882a593Smuzhiyun TC_ROOT_GRAFT,
906*4882a593Smuzhiyun };
907*4882a593Smuzhiyun
908*4882a593Smuzhiyun struct tc_root_qopt_offload {
909*4882a593Smuzhiyun enum tc_root_command command;
910*4882a593Smuzhiyun u32 handle;
911*4882a593Smuzhiyun bool ingress;
912*4882a593Smuzhiyun };
913*4882a593Smuzhiyun
914*4882a593Smuzhiyun enum tc_ets_command {
915*4882a593Smuzhiyun TC_ETS_REPLACE,
916*4882a593Smuzhiyun TC_ETS_DESTROY,
917*4882a593Smuzhiyun TC_ETS_STATS,
918*4882a593Smuzhiyun TC_ETS_GRAFT,
919*4882a593Smuzhiyun };
920*4882a593Smuzhiyun
921*4882a593Smuzhiyun struct tc_ets_qopt_offload_replace_params {
922*4882a593Smuzhiyun unsigned int bands;
923*4882a593Smuzhiyun u8 priomap[TC_PRIO_MAX + 1];
924*4882a593Smuzhiyun unsigned int quanta[TCQ_ETS_MAX_BANDS]; /* 0 for strict bands. */
925*4882a593Smuzhiyun unsigned int weights[TCQ_ETS_MAX_BANDS];
926*4882a593Smuzhiyun struct gnet_stats_queue *qstats;
927*4882a593Smuzhiyun };
928*4882a593Smuzhiyun
929*4882a593Smuzhiyun struct tc_ets_qopt_offload_graft_params {
930*4882a593Smuzhiyun u8 band;
931*4882a593Smuzhiyun u32 child_handle;
932*4882a593Smuzhiyun };
933*4882a593Smuzhiyun
934*4882a593Smuzhiyun struct tc_ets_qopt_offload {
935*4882a593Smuzhiyun enum tc_ets_command command;
936*4882a593Smuzhiyun u32 handle;
937*4882a593Smuzhiyun u32 parent;
938*4882a593Smuzhiyun union {
939*4882a593Smuzhiyun struct tc_ets_qopt_offload_replace_params replace_params;
940*4882a593Smuzhiyun struct tc_qopt_offload_stats stats;
941*4882a593Smuzhiyun struct tc_ets_qopt_offload_graft_params graft_params;
942*4882a593Smuzhiyun };
943*4882a593Smuzhiyun };
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun enum tc_tbf_command {
946*4882a593Smuzhiyun TC_TBF_REPLACE,
947*4882a593Smuzhiyun TC_TBF_DESTROY,
948*4882a593Smuzhiyun TC_TBF_STATS,
949*4882a593Smuzhiyun };
950*4882a593Smuzhiyun
951*4882a593Smuzhiyun struct tc_tbf_qopt_offload_replace_params {
952*4882a593Smuzhiyun struct psched_ratecfg rate;
953*4882a593Smuzhiyun u32 max_size;
954*4882a593Smuzhiyun struct gnet_stats_queue *qstats;
955*4882a593Smuzhiyun };
956*4882a593Smuzhiyun
957*4882a593Smuzhiyun struct tc_tbf_qopt_offload {
958*4882a593Smuzhiyun enum tc_tbf_command command;
959*4882a593Smuzhiyun u32 handle;
960*4882a593Smuzhiyun u32 parent;
961*4882a593Smuzhiyun union {
962*4882a593Smuzhiyun struct tc_tbf_qopt_offload_replace_params replace_params;
963*4882a593Smuzhiyun struct tc_qopt_offload_stats stats;
964*4882a593Smuzhiyun };
965*4882a593Smuzhiyun };
966*4882a593Smuzhiyun
967*4882a593Smuzhiyun enum tc_fifo_command {
968*4882a593Smuzhiyun TC_FIFO_REPLACE,
969*4882a593Smuzhiyun TC_FIFO_DESTROY,
970*4882a593Smuzhiyun TC_FIFO_STATS,
971*4882a593Smuzhiyun };
972*4882a593Smuzhiyun
973*4882a593Smuzhiyun struct tc_fifo_qopt_offload {
974*4882a593Smuzhiyun enum tc_fifo_command command;
975*4882a593Smuzhiyun u32 handle;
976*4882a593Smuzhiyun u32 parent;
977*4882a593Smuzhiyun union {
978*4882a593Smuzhiyun struct tc_qopt_offload_stats stats;
979*4882a593Smuzhiyun };
980*4882a593Smuzhiyun };
981*4882a593Smuzhiyun
982*4882a593Smuzhiyun #endif
983