xref: /OK3568_Linux_fs/kernel/net/sched/cls_flow.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * net/sched/cls_flow.c		Generic flow classifier
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (c) 2007, 2008 Patrick McHardy <kaber@trash.net>
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <linux/kernel.h>
9*4882a593Smuzhiyun #include <linux/init.h>
10*4882a593Smuzhiyun #include <linux/list.h>
11*4882a593Smuzhiyun #include <linux/jhash.h>
12*4882a593Smuzhiyun #include <linux/random.h>
13*4882a593Smuzhiyun #include <linux/pkt_cls.h>
14*4882a593Smuzhiyun #include <linux/skbuff.h>
15*4882a593Smuzhiyun #include <linux/in.h>
16*4882a593Smuzhiyun #include <linux/ip.h>
17*4882a593Smuzhiyun #include <linux/ipv6.h>
18*4882a593Smuzhiyun #include <linux/if_vlan.h>
19*4882a593Smuzhiyun #include <linux/slab.h>
20*4882a593Smuzhiyun #include <linux/module.h>
21*4882a593Smuzhiyun #include <net/inet_sock.h>
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun #include <net/pkt_cls.h>
24*4882a593Smuzhiyun #include <net/ip.h>
25*4882a593Smuzhiyun #include <net/route.h>
26*4882a593Smuzhiyun #include <net/flow_dissector.h>
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_NF_CONNTRACK)
29*4882a593Smuzhiyun #include <net/netfilter/nf_conntrack.h>
30*4882a593Smuzhiyun #endif
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun struct flow_head {
33*4882a593Smuzhiyun 	struct list_head	filters;
34*4882a593Smuzhiyun 	struct rcu_head		rcu;
35*4882a593Smuzhiyun };
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun struct flow_filter {
38*4882a593Smuzhiyun 	struct list_head	list;
39*4882a593Smuzhiyun 	struct tcf_exts		exts;
40*4882a593Smuzhiyun 	struct tcf_ematch_tree	ematches;
41*4882a593Smuzhiyun 	struct tcf_proto	*tp;
42*4882a593Smuzhiyun 	struct timer_list	perturb_timer;
43*4882a593Smuzhiyun 	u32			perturb_period;
44*4882a593Smuzhiyun 	u32			handle;
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun 	u32			nkeys;
47*4882a593Smuzhiyun 	u32			keymask;
48*4882a593Smuzhiyun 	u32			mode;
49*4882a593Smuzhiyun 	u32			mask;
50*4882a593Smuzhiyun 	u32			xor;
51*4882a593Smuzhiyun 	u32			rshift;
52*4882a593Smuzhiyun 	u32			addend;
53*4882a593Smuzhiyun 	u32			divisor;
54*4882a593Smuzhiyun 	u32			baseclass;
55*4882a593Smuzhiyun 	u32			hashrnd;
56*4882a593Smuzhiyun 	struct rcu_work		rwork;
57*4882a593Smuzhiyun };
58*4882a593Smuzhiyun 
addr_fold(void * addr)59*4882a593Smuzhiyun static inline u32 addr_fold(void *addr)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun 	unsigned long a = (unsigned long)addr;
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	return (a & 0xFFFFFFFF) ^ (BITS_PER_LONG > 32 ? a >> 32 : 0);
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun 
flow_get_src(const struct sk_buff * skb,const struct flow_keys * flow)66*4882a593Smuzhiyun static u32 flow_get_src(const struct sk_buff *skb, const struct flow_keys *flow)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun 	__be32 src = flow_get_u32_src(flow);
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	if (src)
71*4882a593Smuzhiyun 		return ntohl(src);
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	return addr_fold(skb->sk);
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun 
flow_get_dst(const struct sk_buff * skb,const struct flow_keys * flow)76*4882a593Smuzhiyun static u32 flow_get_dst(const struct sk_buff *skb, const struct flow_keys *flow)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun 	__be32 dst = flow_get_u32_dst(flow);
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	if (dst)
81*4882a593Smuzhiyun 		return ntohl(dst);
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	return addr_fold(skb_dst(skb)) ^ (__force u16)skb_protocol(skb, true);
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun 
flow_get_proto(const struct sk_buff * skb,const struct flow_keys * flow)86*4882a593Smuzhiyun static u32 flow_get_proto(const struct sk_buff *skb,
87*4882a593Smuzhiyun 			  const struct flow_keys *flow)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun 	return flow->basic.ip_proto;
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun 
flow_get_proto_src(const struct sk_buff * skb,const struct flow_keys * flow)92*4882a593Smuzhiyun static u32 flow_get_proto_src(const struct sk_buff *skb,
93*4882a593Smuzhiyun 			      const struct flow_keys *flow)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun 	if (flow->ports.ports)
96*4882a593Smuzhiyun 		return ntohs(flow->ports.src);
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	return addr_fold(skb->sk);
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun 
flow_get_proto_dst(const struct sk_buff * skb,const struct flow_keys * flow)101*4882a593Smuzhiyun static u32 flow_get_proto_dst(const struct sk_buff *skb,
102*4882a593Smuzhiyun 			      const struct flow_keys *flow)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun 	if (flow->ports.ports)
105*4882a593Smuzhiyun 		return ntohs(flow->ports.dst);
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	return addr_fold(skb_dst(skb)) ^ (__force u16)skb_protocol(skb, true);
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun 
flow_get_iif(const struct sk_buff * skb)110*4882a593Smuzhiyun static u32 flow_get_iif(const struct sk_buff *skb)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun 	return skb->skb_iif;
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun 
flow_get_priority(const struct sk_buff * skb)115*4882a593Smuzhiyun static u32 flow_get_priority(const struct sk_buff *skb)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun 	return skb->priority;
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun 
flow_get_mark(const struct sk_buff * skb)120*4882a593Smuzhiyun static u32 flow_get_mark(const struct sk_buff *skb)
121*4882a593Smuzhiyun {
122*4882a593Smuzhiyun 	return skb->mark;
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun 
flow_get_nfct(const struct sk_buff * skb)125*4882a593Smuzhiyun static u32 flow_get_nfct(const struct sk_buff *skb)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_NF_CONNTRACK)
128*4882a593Smuzhiyun 	return addr_fold(skb_nfct(skb));
129*4882a593Smuzhiyun #else
130*4882a593Smuzhiyun 	return 0;
131*4882a593Smuzhiyun #endif
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_NF_CONNTRACK)
135*4882a593Smuzhiyun #define CTTUPLE(skb, member)						\
136*4882a593Smuzhiyun ({									\
137*4882a593Smuzhiyun 	enum ip_conntrack_info ctinfo;					\
138*4882a593Smuzhiyun 	const struct nf_conn *ct = nf_ct_get(skb, &ctinfo);		\
139*4882a593Smuzhiyun 	if (ct == NULL)							\
140*4882a593Smuzhiyun 		goto fallback;						\
141*4882a593Smuzhiyun 	ct->tuplehash[CTINFO2DIR(ctinfo)].tuple.member;			\
142*4882a593Smuzhiyun })
143*4882a593Smuzhiyun #else
144*4882a593Smuzhiyun #define CTTUPLE(skb, member)						\
145*4882a593Smuzhiyun ({									\
146*4882a593Smuzhiyun 	goto fallback;							\
147*4882a593Smuzhiyun 	0;								\
148*4882a593Smuzhiyun })
149*4882a593Smuzhiyun #endif
150*4882a593Smuzhiyun 
flow_get_nfct_src(const struct sk_buff * skb,const struct flow_keys * flow)151*4882a593Smuzhiyun static u32 flow_get_nfct_src(const struct sk_buff *skb,
152*4882a593Smuzhiyun 			     const struct flow_keys *flow)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun 	switch (skb_protocol(skb, true)) {
155*4882a593Smuzhiyun 	case htons(ETH_P_IP):
156*4882a593Smuzhiyun 		return ntohl(CTTUPLE(skb, src.u3.ip));
157*4882a593Smuzhiyun 	case htons(ETH_P_IPV6):
158*4882a593Smuzhiyun 		return ntohl(CTTUPLE(skb, src.u3.ip6[3]));
159*4882a593Smuzhiyun 	}
160*4882a593Smuzhiyun fallback:
161*4882a593Smuzhiyun 	return flow_get_src(skb, flow);
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun 
flow_get_nfct_dst(const struct sk_buff * skb,const struct flow_keys * flow)164*4882a593Smuzhiyun static u32 flow_get_nfct_dst(const struct sk_buff *skb,
165*4882a593Smuzhiyun 			     const struct flow_keys *flow)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun 	switch (skb_protocol(skb, true)) {
168*4882a593Smuzhiyun 	case htons(ETH_P_IP):
169*4882a593Smuzhiyun 		return ntohl(CTTUPLE(skb, dst.u3.ip));
170*4882a593Smuzhiyun 	case htons(ETH_P_IPV6):
171*4882a593Smuzhiyun 		return ntohl(CTTUPLE(skb, dst.u3.ip6[3]));
172*4882a593Smuzhiyun 	}
173*4882a593Smuzhiyun fallback:
174*4882a593Smuzhiyun 	return flow_get_dst(skb, flow);
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun 
flow_get_nfct_proto_src(const struct sk_buff * skb,const struct flow_keys * flow)177*4882a593Smuzhiyun static u32 flow_get_nfct_proto_src(const struct sk_buff *skb,
178*4882a593Smuzhiyun 				   const struct flow_keys *flow)
179*4882a593Smuzhiyun {
180*4882a593Smuzhiyun 	return ntohs(CTTUPLE(skb, src.u.all));
181*4882a593Smuzhiyun fallback:
182*4882a593Smuzhiyun 	return flow_get_proto_src(skb, flow);
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun 
flow_get_nfct_proto_dst(const struct sk_buff * skb,const struct flow_keys * flow)185*4882a593Smuzhiyun static u32 flow_get_nfct_proto_dst(const struct sk_buff *skb,
186*4882a593Smuzhiyun 				   const struct flow_keys *flow)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun 	return ntohs(CTTUPLE(skb, dst.u.all));
189*4882a593Smuzhiyun fallback:
190*4882a593Smuzhiyun 	return flow_get_proto_dst(skb, flow);
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun 
flow_get_rtclassid(const struct sk_buff * skb)193*4882a593Smuzhiyun static u32 flow_get_rtclassid(const struct sk_buff *skb)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun #ifdef CONFIG_IP_ROUTE_CLASSID
196*4882a593Smuzhiyun 	if (skb_dst(skb))
197*4882a593Smuzhiyun 		return skb_dst(skb)->tclassid;
198*4882a593Smuzhiyun #endif
199*4882a593Smuzhiyun 	return 0;
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun 
flow_get_skuid(const struct sk_buff * skb)202*4882a593Smuzhiyun static u32 flow_get_skuid(const struct sk_buff *skb)
203*4882a593Smuzhiyun {
204*4882a593Smuzhiyun 	struct sock *sk = skb_to_full_sk(skb);
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	if (sk && sk->sk_socket && sk->sk_socket->file) {
207*4882a593Smuzhiyun 		kuid_t skuid = sk->sk_socket->file->f_cred->fsuid;
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 		return from_kuid(&init_user_ns, skuid);
210*4882a593Smuzhiyun 	}
211*4882a593Smuzhiyun 	return 0;
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun 
flow_get_skgid(const struct sk_buff * skb)214*4882a593Smuzhiyun static u32 flow_get_skgid(const struct sk_buff *skb)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun 	struct sock *sk = skb_to_full_sk(skb);
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	if (sk && sk->sk_socket && sk->sk_socket->file) {
219*4882a593Smuzhiyun 		kgid_t skgid = sk->sk_socket->file->f_cred->fsgid;
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 		return from_kgid(&init_user_ns, skgid);
222*4882a593Smuzhiyun 	}
223*4882a593Smuzhiyun 	return 0;
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun 
flow_get_vlan_tag(const struct sk_buff * skb)226*4882a593Smuzhiyun static u32 flow_get_vlan_tag(const struct sk_buff *skb)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun 	u16 tag;
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	if (vlan_get_tag(skb, &tag) < 0)
231*4882a593Smuzhiyun 		return 0;
232*4882a593Smuzhiyun 	return tag & VLAN_VID_MASK;
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun 
flow_get_rxhash(struct sk_buff * skb)235*4882a593Smuzhiyun static u32 flow_get_rxhash(struct sk_buff *skb)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun 	return skb_get_hash(skb);
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun 
flow_key_get(struct sk_buff * skb,int key,struct flow_keys * flow)240*4882a593Smuzhiyun static u32 flow_key_get(struct sk_buff *skb, int key, struct flow_keys *flow)
241*4882a593Smuzhiyun {
242*4882a593Smuzhiyun 	switch (key) {
243*4882a593Smuzhiyun 	case FLOW_KEY_SRC:
244*4882a593Smuzhiyun 		return flow_get_src(skb, flow);
245*4882a593Smuzhiyun 	case FLOW_KEY_DST:
246*4882a593Smuzhiyun 		return flow_get_dst(skb, flow);
247*4882a593Smuzhiyun 	case FLOW_KEY_PROTO:
248*4882a593Smuzhiyun 		return flow_get_proto(skb, flow);
249*4882a593Smuzhiyun 	case FLOW_KEY_PROTO_SRC:
250*4882a593Smuzhiyun 		return flow_get_proto_src(skb, flow);
251*4882a593Smuzhiyun 	case FLOW_KEY_PROTO_DST:
252*4882a593Smuzhiyun 		return flow_get_proto_dst(skb, flow);
253*4882a593Smuzhiyun 	case FLOW_KEY_IIF:
254*4882a593Smuzhiyun 		return flow_get_iif(skb);
255*4882a593Smuzhiyun 	case FLOW_KEY_PRIORITY:
256*4882a593Smuzhiyun 		return flow_get_priority(skb);
257*4882a593Smuzhiyun 	case FLOW_KEY_MARK:
258*4882a593Smuzhiyun 		return flow_get_mark(skb);
259*4882a593Smuzhiyun 	case FLOW_KEY_NFCT:
260*4882a593Smuzhiyun 		return flow_get_nfct(skb);
261*4882a593Smuzhiyun 	case FLOW_KEY_NFCT_SRC:
262*4882a593Smuzhiyun 		return flow_get_nfct_src(skb, flow);
263*4882a593Smuzhiyun 	case FLOW_KEY_NFCT_DST:
264*4882a593Smuzhiyun 		return flow_get_nfct_dst(skb, flow);
265*4882a593Smuzhiyun 	case FLOW_KEY_NFCT_PROTO_SRC:
266*4882a593Smuzhiyun 		return flow_get_nfct_proto_src(skb, flow);
267*4882a593Smuzhiyun 	case FLOW_KEY_NFCT_PROTO_DST:
268*4882a593Smuzhiyun 		return flow_get_nfct_proto_dst(skb, flow);
269*4882a593Smuzhiyun 	case FLOW_KEY_RTCLASSID:
270*4882a593Smuzhiyun 		return flow_get_rtclassid(skb);
271*4882a593Smuzhiyun 	case FLOW_KEY_SKUID:
272*4882a593Smuzhiyun 		return flow_get_skuid(skb);
273*4882a593Smuzhiyun 	case FLOW_KEY_SKGID:
274*4882a593Smuzhiyun 		return flow_get_skgid(skb);
275*4882a593Smuzhiyun 	case FLOW_KEY_VLAN_TAG:
276*4882a593Smuzhiyun 		return flow_get_vlan_tag(skb);
277*4882a593Smuzhiyun 	case FLOW_KEY_RXHASH:
278*4882a593Smuzhiyun 		return flow_get_rxhash(skb);
279*4882a593Smuzhiyun 	default:
280*4882a593Smuzhiyun 		WARN_ON(1);
281*4882a593Smuzhiyun 		return 0;
282*4882a593Smuzhiyun 	}
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun #define FLOW_KEYS_NEEDED ((1 << FLOW_KEY_SRC) | 		\
286*4882a593Smuzhiyun 			  (1 << FLOW_KEY_DST) |			\
287*4882a593Smuzhiyun 			  (1 << FLOW_KEY_PROTO) |		\
288*4882a593Smuzhiyun 			  (1 << FLOW_KEY_PROTO_SRC) |		\
289*4882a593Smuzhiyun 			  (1 << FLOW_KEY_PROTO_DST) | 		\
290*4882a593Smuzhiyun 			  (1 << FLOW_KEY_NFCT_SRC) |		\
291*4882a593Smuzhiyun 			  (1 << FLOW_KEY_NFCT_DST) |		\
292*4882a593Smuzhiyun 			  (1 << FLOW_KEY_NFCT_PROTO_SRC) |	\
293*4882a593Smuzhiyun 			  (1 << FLOW_KEY_NFCT_PROTO_DST))
294*4882a593Smuzhiyun 
flow_classify(struct sk_buff * skb,const struct tcf_proto * tp,struct tcf_result * res)295*4882a593Smuzhiyun static int flow_classify(struct sk_buff *skb, const struct tcf_proto *tp,
296*4882a593Smuzhiyun 			 struct tcf_result *res)
297*4882a593Smuzhiyun {
298*4882a593Smuzhiyun 	struct flow_head *head = rcu_dereference_bh(tp->root);
299*4882a593Smuzhiyun 	struct flow_filter *f;
300*4882a593Smuzhiyun 	u32 keymask;
301*4882a593Smuzhiyun 	u32 classid;
302*4882a593Smuzhiyun 	unsigned int n, key;
303*4882a593Smuzhiyun 	int r;
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	list_for_each_entry_rcu(f, &head->filters, list) {
306*4882a593Smuzhiyun 		u32 keys[FLOW_KEY_MAX + 1];
307*4882a593Smuzhiyun 		struct flow_keys flow_keys;
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 		if (!tcf_em_tree_match(skb, &f->ematches, NULL))
310*4882a593Smuzhiyun 			continue;
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 		keymask = f->keymask;
313*4882a593Smuzhiyun 		if (keymask & FLOW_KEYS_NEEDED)
314*4882a593Smuzhiyun 			skb_flow_dissect_flow_keys(skb, &flow_keys, 0);
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 		for (n = 0; n < f->nkeys; n++) {
317*4882a593Smuzhiyun 			key = ffs(keymask) - 1;
318*4882a593Smuzhiyun 			keymask &= ~(1 << key);
319*4882a593Smuzhiyun 			keys[n] = flow_key_get(skb, key, &flow_keys);
320*4882a593Smuzhiyun 		}
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 		if (f->mode == FLOW_MODE_HASH)
323*4882a593Smuzhiyun 			classid = jhash2(keys, f->nkeys, f->hashrnd);
324*4882a593Smuzhiyun 		else {
325*4882a593Smuzhiyun 			classid = keys[0];
326*4882a593Smuzhiyun 			classid = (classid & f->mask) ^ f->xor;
327*4882a593Smuzhiyun 			classid = (classid >> f->rshift) + f->addend;
328*4882a593Smuzhiyun 		}
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 		if (f->divisor)
331*4882a593Smuzhiyun 			classid %= f->divisor;
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 		res->class   = 0;
334*4882a593Smuzhiyun 		res->classid = TC_H_MAKE(f->baseclass, f->baseclass + classid);
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 		r = tcf_exts_exec(skb, &f->exts, res);
337*4882a593Smuzhiyun 		if (r < 0)
338*4882a593Smuzhiyun 			continue;
339*4882a593Smuzhiyun 		return r;
340*4882a593Smuzhiyun 	}
341*4882a593Smuzhiyun 	return -1;
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun 
flow_perturbation(struct timer_list * t)344*4882a593Smuzhiyun static void flow_perturbation(struct timer_list *t)
345*4882a593Smuzhiyun {
346*4882a593Smuzhiyun 	struct flow_filter *f = from_timer(f, t, perturb_timer);
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	get_random_bytes(&f->hashrnd, 4);
349*4882a593Smuzhiyun 	if (f->perturb_period)
350*4882a593Smuzhiyun 		mod_timer(&f->perturb_timer, jiffies + f->perturb_period);
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
354*4882a593Smuzhiyun 	[TCA_FLOW_KEYS]		= { .type = NLA_U32 },
355*4882a593Smuzhiyun 	[TCA_FLOW_MODE]		= { .type = NLA_U32 },
356*4882a593Smuzhiyun 	[TCA_FLOW_BASECLASS]	= { .type = NLA_U32 },
357*4882a593Smuzhiyun 	[TCA_FLOW_RSHIFT]	= { .type = NLA_U32 },
358*4882a593Smuzhiyun 	[TCA_FLOW_ADDEND]	= { .type = NLA_U32 },
359*4882a593Smuzhiyun 	[TCA_FLOW_MASK]		= { .type = NLA_U32 },
360*4882a593Smuzhiyun 	[TCA_FLOW_XOR]		= { .type = NLA_U32 },
361*4882a593Smuzhiyun 	[TCA_FLOW_DIVISOR]	= { .type = NLA_U32 },
362*4882a593Smuzhiyun 	[TCA_FLOW_ACT]		= { .type = NLA_NESTED },
363*4882a593Smuzhiyun 	[TCA_FLOW_POLICE]	= { .type = NLA_NESTED },
364*4882a593Smuzhiyun 	[TCA_FLOW_EMATCHES]	= { .type = NLA_NESTED },
365*4882a593Smuzhiyun 	[TCA_FLOW_PERTURB]	= { .type = NLA_U32 },
366*4882a593Smuzhiyun };
367*4882a593Smuzhiyun 
__flow_destroy_filter(struct flow_filter * f)368*4882a593Smuzhiyun static void __flow_destroy_filter(struct flow_filter *f)
369*4882a593Smuzhiyun {
370*4882a593Smuzhiyun 	del_timer_sync(&f->perturb_timer);
371*4882a593Smuzhiyun 	tcf_exts_destroy(&f->exts);
372*4882a593Smuzhiyun 	tcf_em_tree_destroy(&f->ematches);
373*4882a593Smuzhiyun 	tcf_exts_put_net(&f->exts);
374*4882a593Smuzhiyun 	kfree(f);
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun 
flow_destroy_filter_work(struct work_struct * work)377*4882a593Smuzhiyun static void flow_destroy_filter_work(struct work_struct *work)
378*4882a593Smuzhiyun {
379*4882a593Smuzhiyun 	struct flow_filter *f = container_of(to_rcu_work(work),
380*4882a593Smuzhiyun 					     struct flow_filter,
381*4882a593Smuzhiyun 					     rwork);
382*4882a593Smuzhiyun 	rtnl_lock();
383*4882a593Smuzhiyun 	__flow_destroy_filter(f);
384*4882a593Smuzhiyun 	rtnl_unlock();
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun 
flow_change(struct net * net,struct sk_buff * in_skb,struct tcf_proto * tp,unsigned long base,u32 handle,struct nlattr ** tca,void ** arg,bool ovr,bool rtnl_held,struct netlink_ext_ack * extack)387*4882a593Smuzhiyun static int flow_change(struct net *net, struct sk_buff *in_skb,
388*4882a593Smuzhiyun 		       struct tcf_proto *tp, unsigned long base,
389*4882a593Smuzhiyun 		       u32 handle, struct nlattr **tca,
390*4882a593Smuzhiyun 		       void **arg, bool ovr, bool rtnl_held,
391*4882a593Smuzhiyun 		       struct netlink_ext_ack *extack)
392*4882a593Smuzhiyun {
393*4882a593Smuzhiyun 	struct flow_head *head = rtnl_dereference(tp->root);
394*4882a593Smuzhiyun 	struct flow_filter *fold, *fnew;
395*4882a593Smuzhiyun 	struct nlattr *opt = tca[TCA_OPTIONS];
396*4882a593Smuzhiyun 	struct nlattr *tb[TCA_FLOW_MAX + 1];
397*4882a593Smuzhiyun 	unsigned int nkeys = 0;
398*4882a593Smuzhiyun 	unsigned int perturb_period = 0;
399*4882a593Smuzhiyun 	u32 baseclass = 0;
400*4882a593Smuzhiyun 	u32 keymask = 0;
401*4882a593Smuzhiyun 	u32 mode;
402*4882a593Smuzhiyun 	int err;
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	if (opt == NULL)
405*4882a593Smuzhiyun 		return -EINVAL;
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 	err = nla_parse_nested_deprecated(tb, TCA_FLOW_MAX, opt, flow_policy,
408*4882a593Smuzhiyun 					  NULL);
409*4882a593Smuzhiyun 	if (err < 0)
410*4882a593Smuzhiyun 		return err;
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 	if (tb[TCA_FLOW_BASECLASS]) {
413*4882a593Smuzhiyun 		baseclass = nla_get_u32(tb[TCA_FLOW_BASECLASS]);
414*4882a593Smuzhiyun 		if (TC_H_MIN(baseclass) == 0)
415*4882a593Smuzhiyun 			return -EINVAL;
416*4882a593Smuzhiyun 	}
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 	if (tb[TCA_FLOW_KEYS]) {
419*4882a593Smuzhiyun 		keymask = nla_get_u32(tb[TCA_FLOW_KEYS]);
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 		nkeys = hweight32(keymask);
422*4882a593Smuzhiyun 		if (nkeys == 0)
423*4882a593Smuzhiyun 			return -EINVAL;
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 		if (fls(keymask) - 1 > FLOW_KEY_MAX)
426*4882a593Smuzhiyun 			return -EOPNOTSUPP;
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 		if ((keymask & (FLOW_KEY_SKUID|FLOW_KEY_SKGID)) &&
429*4882a593Smuzhiyun 		    sk_user_ns(NETLINK_CB(in_skb).sk) != &init_user_ns)
430*4882a593Smuzhiyun 			return -EOPNOTSUPP;
431*4882a593Smuzhiyun 	}
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun 	fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
434*4882a593Smuzhiyun 	if (!fnew)
435*4882a593Smuzhiyun 		return -ENOBUFS;
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 	err = tcf_em_tree_validate(tp, tb[TCA_FLOW_EMATCHES], &fnew->ematches);
438*4882a593Smuzhiyun 	if (err < 0)
439*4882a593Smuzhiyun 		goto err1;
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 	err = tcf_exts_init(&fnew->exts, net, TCA_FLOW_ACT, TCA_FLOW_POLICE);
442*4882a593Smuzhiyun 	if (err < 0)
443*4882a593Smuzhiyun 		goto err2;
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 	err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &fnew->exts, ovr,
446*4882a593Smuzhiyun 				true, extack);
447*4882a593Smuzhiyun 	if (err < 0)
448*4882a593Smuzhiyun 		goto err2;
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 	fold = *arg;
451*4882a593Smuzhiyun 	if (fold) {
452*4882a593Smuzhiyun 		err = -EINVAL;
453*4882a593Smuzhiyun 		if (fold->handle != handle && handle)
454*4882a593Smuzhiyun 			goto err2;
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 		/* Copy fold into fnew */
457*4882a593Smuzhiyun 		fnew->tp = fold->tp;
458*4882a593Smuzhiyun 		fnew->handle = fold->handle;
459*4882a593Smuzhiyun 		fnew->nkeys = fold->nkeys;
460*4882a593Smuzhiyun 		fnew->keymask = fold->keymask;
461*4882a593Smuzhiyun 		fnew->mode = fold->mode;
462*4882a593Smuzhiyun 		fnew->mask = fold->mask;
463*4882a593Smuzhiyun 		fnew->xor = fold->xor;
464*4882a593Smuzhiyun 		fnew->rshift = fold->rshift;
465*4882a593Smuzhiyun 		fnew->addend = fold->addend;
466*4882a593Smuzhiyun 		fnew->divisor = fold->divisor;
467*4882a593Smuzhiyun 		fnew->baseclass = fold->baseclass;
468*4882a593Smuzhiyun 		fnew->hashrnd = fold->hashrnd;
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun 		mode = fold->mode;
471*4882a593Smuzhiyun 		if (tb[TCA_FLOW_MODE])
472*4882a593Smuzhiyun 			mode = nla_get_u32(tb[TCA_FLOW_MODE]);
473*4882a593Smuzhiyun 		if (mode != FLOW_MODE_HASH && nkeys > 1)
474*4882a593Smuzhiyun 			goto err2;
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 		if (mode == FLOW_MODE_HASH)
477*4882a593Smuzhiyun 			perturb_period = fold->perturb_period;
478*4882a593Smuzhiyun 		if (tb[TCA_FLOW_PERTURB]) {
479*4882a593Smuzhiyun 			if (mode != FLOW_MODE_HASH)
480*4882a593Smuzhiyun 				goto err2;
481*4882a593Smuzhiyun 			perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
482*4882a593Smuzhiyun 		}
483*4882a593Smuzhiyun 	} else {
484*4882a593Smuzhiyun 		err = -EINVAL;
485*4882a593Smuzhiyun 		if (!handle)
486*4882a593Smuzhiyun 			goto err2;
487*4882a593Smuzhiyun 		if (!tb[TCA_FLOW_KEYS])
488*4882a593Smuzhiyun 			goto err2;
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 		mode = FLOW_MODE_MAP;
491*4882a593Smuzhiyun 		if (tb[TCA_FLOW_MODE])
492*4882a593Smuzhiyun 			mode = nla_get_u32(tb[TCA_FLOW_MODE]);
493*4882a593Smuzhiyun 		if (mode != FLOW_MODE_HASH && nkeys > 1)
494*4882a593Smuzhiyun 			goto err2;
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 		if (tb[TCA_FLOW_PERTURB]) {
497*4882a593Smuzhiyun 			if (mode != FLOW_MODE_HASH)
498*4882a593Smuzhiyun 				goto err2;
499*4882a593Smuzhiyun 			perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
500*4882a593Smuzhiyun 		}
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun 		if (TC_H_MAJ(baseclass) == 0) {
503*4882a593Smuzhiyun 			struct Qdisc *q = tcf_block_q(tp->chain->block);
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 			baseclass = TC_H_MAKE(q->handle, baseclass);
506*4882a593Smuzhiyun 		}
507*4882a593Smuzhiyun 		if (TC_H_MIN(baseclass) == 0)
508*4882a593Smuzhiyun 			baseclass = TC_H_MAKE(baseclass, 1);
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 		fnew->handle = handle;
511*4882a593Smuzhiyun 		fnew->mask  = ~0U;
512*4882a593Smuzhiyun 		fnew->tp = tp;
513*4882a593Smuzhiyun 		get_random_bytes(&fnew->hashrnd, 4);
514*4882a593Smuzhiyun 	}
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun 	timer_setup(&fnew->perturb_timer, flow_perturbation, TIMER_DEFERRABLE);
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun 	tcf_block_netif_keep_dst(tp->chain->block);
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 	if (tb[TCA_FLOW_KEYS]) {
521*4882a593Smuzhiyun 		fnew->keymask = keymask;
522*4882a593Smuzhiyun 		fnew->nkeys   = nkeys;
523*4882a593Smuzhiyun 	}
524*4882a593Smuzhiyun 
525*4882a593Smuzhiyun 	fnew->mode = mode;
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 	if (tb[TCA_FLOW_MASK])
528*4882a593Smuzhiyun 		fnew->mask = nla_get_u32(tb[TCA_FLOW_MASK]);
529*4882a593Smuzhiyun 	if (tb[TCA_FLOW_XOR])
530*4882a593Smuzhiyun 		fnew->xor = nla_get_u32(tb[TCA_FLOW_XOR]);
531*4882a593Smuzhiyun 	if (tb[TCA_FLOW_RSHIFT])
532*4882a593Smuzhiyun 		fnew->rshift = nla_get_u32(tb[TCA_FLOW_RSHIFT]);
533*4882a593Smuzhiyun 	if (tb[TCA_FLOW_ADDEND])
534*4882a593Smuzhiyun 		fnew->addend = nla_get_u32(tb[TCA_FLOW_ADDEND]);
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun 	if (tb[TCA_FLOW_DIVISOR])
537*4882a593Smuzhiyun 		fnew->divisor = nla_get_u32(tb[TCA_FLOW_DIVISOR]);
538*4882a593Smuzhiyun 	if (baseclass)
539*4882a593Smuzhiyun 		fnew->baseclass = baseclass;
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 	fnew->perturb_period = perturb_period;
542*4882a593Smuzhiyun 	if (perturb_period)
543*4882a593Smuzhiyun 		mod_timer(&fnew->perturb_timer, jiffies + perturb_period);
544*4882a593Smuzhiyun 
545*4882a593Smuzhiyun 	if (!*arg)
546*4882a593Smuzhiyun 		list_add_tail_rcu(&fnew->list, &head->filters);
547*4882a593Smuzhiyun 	else
548*4882a593Smuzhiyun 		list_replace_rcu(&fold->list, &fnew->list);
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 	*arg = fnew;
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun 	if (fold) {
553*4882a593Smuzhiyun 		tcf_exts_get_net(&fold->exts);
554*4882a593Smuzhiyun 		tcf_queue_work(&fold->rwork, flow_destroy_filter_work);
555*4882a593Smuzhiyun 	}
556*4882a593Smuzhiyun 	return 0;
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun err2:
559*4882a593Smuzhiyun 	tcf_exts_destroy(&fnew->exts);
560*4882a593Smuzhiyun 	tcf_em_tree_destroy(&fnew->ematches);
561*4882a593Smuzhiyun err1:
562*4882a593Smuzhiyun 	kfree(fnew);
563*4882a593Smuzhiyun 	return err;
564*4882a593Smuzhiyun }
565*4882a593Smuzhiyun 
flow_delete(struct tcf_proto * tp,void * arg,bool * last,bool rtnl_held,struct netlink_ext_ack * extack)566*4882a593Smuzhiyun static int flow_delete(struct tcf_proto *tp, void *arg, bool *last,
567*4882a593Smuzhiyun 		       bool rtnl_held, struct netlink_ext_ack *extack)
568*4882a593Smuzhiyun {
569*4882a593Smuzhiyun 	struct flow_head *head = rtnl_dereference(tp->root);
570*4882a593Smuzhiyun 	struct flow_filter *f = arg;
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun 	list_del_rcu(&f->list);
573*4882a593Smuzhiyun 	tcf_exts_get_net(&f->exts);
574*4882a593Smuzhiyun 	tcf_queue_work(&f->rwork, flow_destroy_filter_work);
575*4882a593Smuzhiyun 	*last = list_empty(&head->filters);
576*4882a593Smuzhiyun 	return 0;
577*4882a593Smuzhiyun }
578*4882a593Smuzhiyun 
flow_init(struct tcf_proto * tp)579*4882a593Smuzhiyun static int flow_init(struct tcf_proto *tp)
580*4882a593Smuzhiyun {
581*4882a593Smuzhiyun 	struct flow_head *head;
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun 	head = kzalloc(sizeof(*head), GFP_KERNEL);
584*4882a593Smuzhiyun 	if (head == NULL)
585*4882a593Smuzhiyun 		return -ENOBUFS;
586*4882a593Smuzhiyun 	INIT_LIST_HEAD(&head->filters);
587*4882a593Smuzhiyun 	rcu_assign_pointer(tp->root, head);
588*4882a593Smuzhiyun 	return 0;
589*4882a593Smuzhiyun }
590*4882a593Smuzhiyun 
flow_destroy(struct tcf_proto * tp,bool rtnl_held,struct netlink_ext_ack * extack)591*4882a593Smuzhiyun static void flow_destroy(struct tcf_proto *tp, bool rtnl_held,
592*4882a593Smuzhiyun 			 struct netlink_ext_ack *extack)
593*4882a593Smuzhiyun {
594*4882a593Smuzhiyun 	struct flow_head *head = rtnl_dereference(tp->root);
595*4882a593Smuzhiyun 	struct flow_filter *f, *next;
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun 	list_for_each_entry_safe(f, next, &head->filters, list) {
598*4882a593Smuzhiyun 		list_del_rcu(&f->list);
599*4882a593Smuzhiyun 		if (tcf_exts_get_net(&f->exts))
600*4882a593Smuzhiyun 			tcf_queue_work(&f->rwork, flow_destroy_filter_work);
601*4882a593Smuzhiyun 		else
602*4882a593Smuzhiyun 			__flow_destroy_filter(f);
603*4882a593Smuzhiyun 	}
604*4882a593Smuzhiyun 	kfree_rcu(head, rcu);
605*4882a593Smuzhiyun }
606*4882a593Smuzhiyun 
flow_get(struct tcf_proto * tp,u32 handle)607*4882a593Smuzhiyun static void *flow_get(struct tcf_proto *tp, u32 handle)
608*4882a593Smuzhiyun {
609*4882a593Smuzhiyun 	struct flow_head *head = rtnl_dereference(tp->root);
610*4882a593Smuzhiyun 	struct flow_filter *f;
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun 	list_for_each_entry(f, &head->filters, list)
613*4882a593Smuzhiyun 		if (f->handle == handle)
614*4882a593Smuzhiyun 			return f;
615*4882a593Smuzhiyun 	return NULL;
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun 
flow_dump(struct net * net,struct tcf_proto * tp,void * fh,struct sk_buff * skb,struct tcmsg * t,bool rtnl_held)618*4882a593Smuzhiyun static int flow_dump(struct net *net, struct tcf_proto *tp, void *fh,
619*4882a593Smuzhiyun 		     struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
620*4882a593Smuzhiyun {
621*4882a593Smuzhiyun 	struct flow_filter *f = fh;
622*4882a593Smuzhiyun 	struct nlattr *nest;
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun 	if (f == NULL)
625*4882a593Smuzhiyun 		return skb->len;
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun 	t->tcm_handle = f->handle;
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
630*4882a593Smuzhiyun 	if (nest == NULL)
631*4882a593Smuzhiyun 		goto nla_put_failure;
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun 	if (nla_put_u32(skb, TCA_FLOW_KEYS, f->keymask) ||
634*4882a593Smuzhiyun 	    nla_put_u32(skb, TCA_FLOW_MODE, f->mode))
635*4882a593Smuzhiyun 		goto nla_put_failure;
636*4882a593Smuzhiyun 
637*4882a593Smuzhiyun 	if (f->mask != ~0 || f->xor != 0) {
638*4882a593Smuzhiyun 		if (nla_put_u32(skb, TCA_FLOW_MASK, f->mask) ||
639*4882a593Smuzhiyun 		    nla_put_u32(skb, TCA_FLOW_XOR, f->xor))
640*4882a593Smuzhiyun 			goto nla_put_failure;
641*4882a593Smuzhiyun 	}
642*4882a593Smuzhiyun 	if (f->rshift &&
643*4882a593Smuzhiyun 	    nla_put_u32(skb, TCA_FLOW_RSHIFT, f->rshift))
644*4882a593Smuzhiyun 		goto nla_put_failure;
645*4882a593Smuzhiyun 	if (f->addend &&
646*4882a593Smuzhiyun 	    nla_put_u32(skb, TCA_FLOW_ADDEND, f->addend))
647*4882a593Smuzhiyun 		goto nla_put_failure;
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun 	if (f->divisor &&
650*4882a593Smuzhiyun 	    nla_put_u32(skb, TCA_FLOW_DIVISOR, f->divisor))
651*4882a593Smuzhiyun 		goto nla_put_failure;
652*4882a593Smuzhiyun 	if (f->baseclass &&
653*4882a593Smuzhiyun 	    nla_put_u32(skb, TCA_FLOW_BASECLASS, f->baseclass))
654*4882a593Smuzhiyun 		goto nla_put_failure;
655*4882a593Smuzhiyun 
656*4882a593Smuzhiyun 	if (f->perturb_period &&
657*4882a593Smuzhiyun 	    nla_put_u32(skb, TCA_FLOW_PERTURB, f->perturb_period / HZ))
658*4882a593Smuzhiyun 		goto nla_put_failure;
659*4882a593Smuzhiyun 
660*4882a593Smuzhiyun 	if (tcf_exts_dump(skb, &f->exts) < 0)
661*4882a593Smuzhiyun 		goto nla_put_failure;
662*4882a593Smuzhiyun #ifdef CONFIG_NET_EMATCH
663*4882a593Smuzhiyun 	if (f->ematches.hdr.nmatches &&
664*4882a593Smuzhiyun 	    tcf_em_tree_dump(skb, &f->ematches, TCA_FLOW_EMATCHES) < 0)
665*4882a593Smuzhiyun 		goto nla_put_failure;
666*4882a593Smuzhiyun #endif
667*4882a593Smuzhiyun 	nla_nest_end(skb, nest);
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun 	if (tcf_exts_dump_stats(skb, &f->exts) < 0)
670*4882a593Smuzhiyun 		goto nla_put_failure;
671*4882a593Smuzhiyun 
672*4882a593Smuzhiyun 	return skb->len;
673*4882a593Smuzhiyun 
674*4882a593Smuzhiyun nla_put_failure:
675*4882a593Smuzhiyun 	nla_nest_cancel(skb, nest);
676*4882a593Smuzhiyun 	return -1;
677*4882a593Smuzhiyun }
678*4882a593Smuzhiyun 
flow_walk(struct tcf_proto * tp,struct tcf_walker * arg,bool rtnl_held)679*4882a593Smuzhiyun static void flow_walk(struct tcf_proto *tp, struct tcf_walker *arg,
680*4882a593Smuzhiyun 		      bool rtnl_held)
681*4882a593Smuzhiyun {
682*4882a593Smuzhiyun 	struct flow_head *head = rtnl_dereference(tp->root);
683*4882a593Smuzhiyun 	struct flow_filter *f;
684*4882a593Smuzhiyun 
685*4882a593Smuzhiyun 	list_for_each_entry(f, &head->filters, list) {
686*4882a593Smuzhiyun 		if (arg->count < arg->skip)
687*4882a593Smuzhiyun 			goto skip;
688*4882a593Smuzhiyun 		if (arg->fn(tp, f, arg) < 0) {
689*4882a593Smuzhiyun 			arg->stop = 1;
690*4882a593Smuzhiyun 			break;
691*4882a593Smuzhiyun 		}
692*4882a593Smuzhiyun skip:
693*4882a593Smuzhiyun 		arg->count++;
694*4882a593Smuzhiyun 	}
695*4882a593Smuzhiyun }
696*4882a593Smuzhiyun 
697*4882a593Smuzhiyun static struct tcf_proto_ops cls_flow_ops __read_mostly = {
698*4882a593Smuzhiyun 	.kind		= "flow",
699*4882a593Smuzhiyun 	.classify	= flow_classify,
700*4882a593Smuzhiyun 	.init		= flow_init,
701*4882a593Smuzhiyun 	.destroy	= flow_destroy,
702*4882a593Smuzhiyun 	.change		= flow_change,
703*4882a593Smuzhiyun 	.delete		= flow_delete,
704*4882a593Smuzhiyun 	.get		= flow_get,
705*4882a593Smuzhiyun 	.dump		= flow_dump,
706*4882a593Smuzhiyun 	.walk		= flow_walk,
707*4882a593Smuzhiyun 	.owner		= THIS_MODULE,
708*4882a593Smuzhiyun };
709*4882a593Smuzhiyun 
cls_flow_init(void)710*4882a593Smuzhiyun static int __init cls_flow_init(void)
711*4882a593Smuzhiyun {
712*4882a593Smuzhiyun 	return register_tcf_proto_ops(&cls_flow_ops);
713*4882a593Smuzhiyun }
714*4882a593Smuzhiyun 
cls_flow_exit(void)715*4882a593Smuzhiyun static void __exit cls_flow_exit(void)
716*4882a593Smuzhiyun {
717*4882a593Smuzhiyun 	unregister_tcf_proto_ops(&cls_flow_ops);
718*4882a593Smuzhiyun }
719*4882a593Smuzhiyun 
720*4882a593Smuzhiyun module_init(cls_flow_init);
721*4882a593Smuzhiyun module_exit(cls_flow_exit);
722*4882a593Smuzhiyun 
723*4882a593Smuzhiyun MODULE_LICENSE("GPL");
724*4882a593Smuzhiyun MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
725*4882a593Smuzhiyun MODULE_DESCRIPTION("TC flow classifier");
726