1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /* net/sched/sch_dsmark.c - Differentiated Services field marker */
3*4882a593Smuzhiyun
4*4882a593Smuzhiyun /* Written 1998-2000 by Werner Almesberger, EPFL ICA */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include <linux/module.h>
8*4882a593Smuzhiyun #include <linux/init.h>
9*4882a593Smuzhiyun #include <linux/slab.h>
10*4882a593Smuzhiyun #include <linux/types.h>
11*4882a593Smuzhiyun #include <linux/string.h>
12*4882a593Smuzhiyun #include <linux/errno.h>
13*4882a593Smuzhiyun #include <linux/skbuff.h>
14*4882a593Smuzhiyun #include <linux/rtnetlink.h>
15*4882a593Smuzhiyun #include <linux/bitops.h>
16*4882a593Smuzhiyun #include <net/pkt_sched.h>
17*4882a593Smuzhiyun #include <net/pkt_cls.h>
18*4882a593Smuzhiyun #include <net/dsfield.h>
19*4882a593Smuzhiyun #include <net/inet_ecn.h>
20*4882a593Smuzhiyun #include <asm/byteorder.h>
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun /*
23*4882a593Smuzhiyun * classid class marking
24*4882a593Smuzhiyun * ------- ----- -------
25*4882a593Smuzhiyun * n/a 0 n/a
26*4882a593Smuzhiyun * x:0 1 use entry [0]
27*4882a593Smuzhiyun * ... ... ...
28*4882a593Smuzhiyun * x:y y>0 y+1 use entry [y]
29*4882a593Smuzhiyun * ... ... ...
30*4882a593Smuzhiyun * x:indices-1 indices use entry [indices-1]
31*4882a593Smuzhiyun * ... ... ...
32*4882a593Smuzhiyun * x:y y+1 use entry [y & (indices-1)]
33*4882a593Smuzhiyun * ... ... ...
34*4882a593Smuzhiyun * 0xffff 0x10000 use entry [indices-1]
35*4882a593Smuzhiyun */
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun #define NO_DEFAULT_INDEX (1 << 16)
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun struct mask_value {
41*4882a593Smuzhiyun u8 mask;
42*4882a593Smuzhiyun u8 value;
43*4882a593Smuzhiyun };
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun struct dsmark_qdisc_data {
46*4882a593Smuzhiyun struct Qdisc *q;
47*4882a593Smuzhiyun struct tcf_proto __rcu *filter_list;
48*4882a593Smuzhiyun struct tcf_block *block;
49*4882a593Smuzhiyun struct mask_value *mv;
50*4882a593Smuzhiyun u16 indices;
51*4882a593Smuzhiyun u8 set_tc_index;
52*4882a593Smuzhiyun u32 default_index; /* index range is 0...0xffff */
53*4882a593Smuzhiyun #define DSMARK_EMBEDDED_SZ 16
54*4882a593Smuzhiyun struct mask_value embedded[DSMARK_EMBEDDED_SZ];
55*4882a593Smuzhiyun };
56*4882a593Smuzhiyun
dsmark_valid_index(struct dsmark_qdisc_data * p,u16 index)57*4882a593Smuzhiyun static inline int dsmark_valid_index(struct dsmark_qdisc_data *p, u16 index)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun return index <= p->indices && index > 0;
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun /* ------------------------- Class/flow operations ------------------------- */
63*4882a593Smuzhiyun
dsmark_graft(struct Qdisc * sch,unsigned long arg,struct Qdisc * new,struct Qdisc ** old,struct netlink_ext_ack * extack)64*4882a593Smuzhiyun static int dsmark_graft(struct Qdisc *sch, unsigned long arg,
65*4882a593Smuzhiyun struct Qdisc *new, struct Qdisc **old,
66*4882a593Smuzhiyun struct netlink_ext_ack *extack)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun struct dsmark_qdisc_data *p = qdisc_priv(sch);
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun pr_debug("%s(sch %p,[qdisc %p],new %p,old %p)\n",
71*4882a593Smuzhiyun __func__, sch, p, new, old);
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun if (new == NULL) {
74*4882a593Smuzhiyun new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
75*4882a593Smuzhiyun sch->handle, NULL);
76*4882a593Smuzhiyun if (new == NULL)
77*4882a593Smuzhiyun new = &noop_qdisc;
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun *old = qdisc_replace(sch, new, &p->q);
81*4882a593Smuzhiyun return 0;
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun
dsmark_leaf(struct Qdisc * sch,unsigned long arg)84*4882a593Smuzhiyun static struct Qdisc *dsmark_leaf(struct Qdisc *sch, unsigned long arg)
85*4882a593Smuzhiyun {
86*4882a593Smuzhiyun struct dsmark_qdisc_data *p = qdisc_priv(sch);
87*4882a593Smuzhiyun return p->q;
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun
dsmark_find(struct Qdisc * sch,u32 classid)90*4882a593Smuzhiyun static unsigned long dsmark_find(struct Qdisc *sch, u32 classid)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun return TC_H_MIN(classid) + 1;
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun
dsmark_bind_filter(struct Qdisc * sch,unsigned long parent,u32 classid)95*4882a593Smuzhiyun static unsigned long dsmark_bind_filter(struct Qdisc *sch,
96*4882a593Smuzhiyun unsigned long parent, u32 classid)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun pr_debug("%s(sch %p,[qdisc %p],classid %x)\n",
99*4882a593Smuzhiyun __func__, sch, qdisc_priv(sch), classid);
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun return dsmark_find(sch, classid);
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun
dsmark_unbind_filter(struct Qdisc * sch,unsigned long cl)104*4882a593Smuzhiyun static void dsmark_unbind_filter(struct Qdisc *sch, unsigned long cl)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun static const struct nla_policy dsmark_policy[TCA_DSMARK_MAX + 1] = {
109*4882a593Smuzhiyun [TCA_DSMARK_INDICES] = { .type = NLA_U16 },
110*4882a593Smuzhiyun [TCA_DSMARK_DEFAULT_INDEX] = { .type = NLA_U16 },
111*4882a593Smuzhiyun [TCA_DSMARK_SET_TC_INDEX] = { .type = NLA_FLAG },
112*4882a593Smuzhiyun [TCA_DSMARK_MASK] = { .type = NLA_U8 },
113*4882a593Smuzhiyun [TCA_DSMARK_VALUE] = { .type = NLA_U8 },
114*4882a593Smuzhiyun };
115*4882a593Smuzhiyun
dsmark_change(struct Qdisc * sch,u32 classid,u32 parent,struct nlattr ** tca,unsigned long * arg,struct netlink_ext_ack * extack)116*4882a593Smuzhiyun static int dsmark_change(struct Qdisc *sch, u32 classid, u32 parent,
117*4882a593Smuzhiyun struct nlattr **tca, unsigned long *arg,
118*4882a593Smuzhiyun struct netlink_ext_ack *extack)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun struct dsmark_qdisc_data *p = qdisc_priv(sch);
121*4882a593Smuzhiyun struct nlattr *opt = tca[TCA_OPTIONS];
122*4882a593Smuzhiyun struct nlattr *tb[TCA_DSMARK_MAX + 1];
123*4882a593Smuzhiyun int err = -EINVAL;
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun pr_debug("%s(sch %p,[qdisc %p],classid %x,parent %x), arg 0x%lx\n",
126*4882a593Smuzhiyun __func__, sch, p, classid, parent, *arg);
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun if (!dsmark_valid_index(p, *arg)) {
129*4882a593Smuzhiyun err = -ENOENT;
130*4882a593Smuzhiyun goto errout;
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun if (!opt)
134*4882a593Smuzhiyun goto errout;
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun err = nla_parse_nested_deprecated(tb, TCA_DSMARK_MAX, opt,
137*4882a593Smuzhiyun dsmark_policy, NULL);
138*4882a593Smuzhiyun if (err < 0)
139*4882a593Smuzhiyun goto errout;
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun if (tb[TCA_DSMARK_VALUE])
142*4882a593Smuzhiyun p->mv[*arg - 1].value = nla_get_u8(tb[TCA_DSMARK_VALUE]);
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun if (tb[TCA_DSMARK_MASK])
145*4882a593Smuzhiyun p->mv[*arg - 1].mask = nla_get_u8(tb[TCA_DSMARK_MASK]);
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun err = 0;
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun errout:
150*4882a593Smuzhiyun return err;
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun
dsmark_delete(struct Qdisc * sch,unsigned long arg)153*4882a593Smuzhiyun static int dsmark_delete(struct Qdisc *sch, unsigned long arg)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun struct dsmark_qdisc_data *p = qdisc_priv(sch);
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun if (!dsmark_valid_index(p, arg))
158*4882a593Smuzhiyun return -EINVAL;
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun p->mv[arg - 1].mask = 0xff;
161*4882a593Smuzhiyun p->mv[arg - 1].value = 0;
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun return 0;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
dsmark_walk(struct Qdisc * sch,struct qdisc_walker * walker)166*4882a593Smuzhiyun static void dsmark_walk(struct Qdisc *sch, struct qdisc_walker *walker)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun struct dsmark_qdisc_data *p = qdisc_priv(sch);
169*4882a593Smuzhiyun int i;
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun pr_debug("%s(sch %p,[qdisc %p],walker %p)\n",
172*4882a593Smuzhiyun __func__, sch, p, walker);
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun if (walker->stop)
175*4882a593Smuzhiyun return;
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun for (i = 0; i < p->indices; i++) {
178*4882a593Smuzhiyun if (p->mv[i].mask == 0xff && !p->mv[i].value)
179*4882a593Smuzhiyun goto ignore;
180*4882a593Smuzhiyun if (walker->count >= walker->skip) {
181*4882a593Smuzhiyun if (walker->fn(sch, i + 1, walker) < 0) {
182*4882a593Smuzhiyun walker->stop = 1;
183*4882a593Smuzhiyun break;
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun ignore:
187*4882a593Smuzhiyun walker->count++;
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun
dsmark_tcf_block(struct Qdisc * sch,unsigned long cl,struct netlink_ext_ack * extack)191*4882a593Smuzhiyun static struct tcf_block *dsmark_tcf_block(struct Qdisc *sch, unsigned long cl,
192*4882a593Smuzhiyun struct netlink_ext_ack *extack)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun struct dsmark_qdisc_data *p = qdisc_priv(sch);
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun return p->block;
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun /* --------------------------- Qdisc operations ---------------------------- */
200*4882a593Smuzhiyun
dsmark_enqueue(struct sk_buff * skb,struct Qdisc * sch,struct sk_buff ** to_free)201*4882a593Smuzhiyun static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
202*4882a593Smuzhiyun struct sk_buff **to_free)
203*4882a593Smuzhiyun {
204*4882a593Smuzhiyun unsigned int len = qdisc_pkt_len(skb);
205*4882a593Smuzhiyun struct dsmark_qdisc_data *p = qdisc_priv(sch);
206*4882a593Smuzhiyun int err;
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun pr_debug("%s(skb %p,sch %p,[qdisc %p])\n", __func__, skb, sch, p);
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun if (p->set_tc_index) {
211*4882a593Smuzhiyun int wlen = skb_network_offset(skb);
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun switch (skb_protocol(skb, true)) {
214*4882a593Smuzhiyun case htons(ETH_P_IP):
215*4882a593Smuzhiyun wlen += sizeof(struct iphdr);
216*4882a593Smuzhiyun if (!pskb_may_pull(skb, wlen) ||
217*4882a593Smuzhiyun skb_try_make_writable(skb, wlen))
218*4882a593Smuzhiyun goto drop;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun skb->tc_index = ipv4_get_dsfield(ip_hdr(skb))
221*4882a593Smuzhiyun & ~INET_ECN_MASK;
222*4882a593Smuzhiyun break;
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun case htons(ETH_P_IPV6):
225*4882a593Smuzhiyun wlen += sizeof(struct ipv6hdr);
226*4882a593Smuzhiyun if (!pskb_may_pull(skb, wlen) ||
227*4882a593Smuzhiyun skb_try_make_writable(skb, wlen))
228*4882a593Smuzhiyun goto drop;
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun skb->tc_index = ipv6_get_dsfield(ipv6_hdr(skb))
231*4882a593Smuzhiyun & ~INET_ECN_MASK;
232*4882a593Smuzhiyun break;
233*4882a593Smuzhiyun default:
234*4882a593Smuzhiyun skb->tc_index = 0;
235*4882a593Smuzhiyun break;
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun if (TC_H_MAJ(skb->priority) == sch->handle)
240*4882a593Smuzhiyun skb->tc_index = TC_H_MIN(skb->priority);
241*4882a593Smuzhiyun else {
242*4882a593Smuzhiyun struct tcf_result res;
243*4882a593Smuzhiyun struct tcf_proto *fl = rcu_dereference_bh(p->filter_list);
244*4882a593Smuzhiyun int result = tcf_classify(skb, fl, &res, false);
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun pr_debug("result %d class 0x%04x\n", result, res.classid);
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun switch (result) {
249*4882a593Smuzhiyun #ifdef CONFIG_NET_CLS_ACT
250*4882a593Smuzhiyun case TC_ACT_QUEUED:
251*4882a593Smuzhiyun case TC_ACT_STOLEN:
252*4882a593Smuzhiyun case TC_ACT_TRAP:
253*4882a593Smuzhiyun __qdisc_drop(skb, to_free);
254*4882a593Smuzhiyun return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun case TC_ACT_SHOT:
257*4882a593Smuzhiyun goto drop;
258*4882a593Smuzhiyun #endif
259*4882a593Smuzhiyun case TC_ACT_OK:
260*4882a593Smuzhiyun skb->tc_index = TC_H_MIN(res.classid);
261*4882a593Smuzhiyun break;
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun default:
264*4882a593Smuzhiyun if (p->default_index != NO_DEFAULT_INDEX)
265*4882a593Smuzhiyun skb->tc_index = p->default_index;
266*4882a593Smuzhiyun break;
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun err = qdisc_enqueue(skb, p->q, to_free);
271*4882a593Smuzhiyun if (err != NET_XMIT_SUCCESS) {
272*4882a593Smuzhiyun if (net_xmit_drop_count(err))
273*4882a593Smuzhiyun qdisc_qstats_drop(sch);
274*4882a593Smuzhiyun return err;
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun sch->qstats.backlog += len;
278*4882a593Smuzhiyun sch->q.qlen++;
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun return NET_XMIT_SUCCESS;
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun drop:
283*4882a593Smuzhiyun qdisc_drop(skb, sch, to_free);
284*4882a593Smuzhiyun return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun
dsmark_dequeue(struct Qdisc * sch)287*4882a593Smuzhiyun static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun struct dsmark_qdisc_data *p = qdisc_priv(sch);
290*4882a593Smuzhiyun struct sk_buff *skb;
291*4882a593Smuzhiyun u32 index;
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun skb = qdisc_dequeue_peeked(p->q);
296*4882a593Smuzhiyun if (skb == NULL)
297*4882a593Smuzhiyun return NULL;
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun qdisc_bstats_update(sch, skb);
300*4882a593Smuzhiyun qdisc_qstats_backlog_dec(sch, skb);
301*4882a593Smuzhiyun sch->q.qlen--;
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun index = skb->tc_index & (p->indices - 1);
304*4882a593Smuzhiyun pr_debug("index %d->%d\n", skb->tc_index, index);
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun switch (skb_protocol(skb, true)) {
307*4882a593Smuzhiyun case htons(ETH_P_IP):
308*4882a593Smuzhiyun ipv4_change_dsfield(ip_hdr(skb), p->mv[index].mask,
309*4882a593Smuzhiyun p->mv[index].value);
310*4882a593Smuzhiyun break;
311*4882a593Smuzhiyun case htons(ETH_P_IPV6):
312*4882a593Smuzhiyun ipv6_change_dsfield(ipv6_hdr(skb), p->mv[index].mask,
313*4882a593Smuzhiyun p->mv[index].value);
314*4882a593Smuzhiyun break;
315*4882a593Smuzhiyun default:
316*4882a593Smuzhiyun /*
317*4882a593Smuzhiyun * Only complain if a change was actually attempted.
318*4882a593Smuzhiyun * This way, we can send non-IP traffic through dsmark
319*4882a593Smuzhiyun * and don't need yet another qdisc as a bypass.
320*4882a593Smuzhiyun */
321*4882a593Smuzhiyun if (p->mv[index].mask != 0xff || p->mv[index].value)
322*4882a593Smuzhiyun pr_warn("%s: unsupported protocol %d\n",
323*4882a593Smuzhiyun __func__, ntohs(skb_protocol(skb, true)));
324*4882a593Smuzhiyun break;
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun return skb;
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun
dsmark_peek(struct Qdisc * sch)330*4882a593Smuzhiyun static struct sk_buff *dsmark_peek(struct Qdisc *sch)
331*4882a593Smuzhiyun {
332*4882a593Smuzhiyun struct dsmark_qdisc_data *p = qdisc_priv(sch);
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun return p->q->ops->peek(p->q);
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun
dsmark_init(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)339*4882a593Smuzhiyun static int dsmark_init(struct Qdisc *sch, struct nlattr *opt,
340*4882a593Smuzhiyun struct netlink_ext_ack *extack)
341*4882a593Smuzhiyun {
342*4882a593Smuzhiyun struct dsmark_qdisc_data *p = qdisc_priv(sch);
343*4882a593Smuzhiyun struct nlattr *tb[TCA_DSMARK_MAX + 1];
344*4882a593Smuzhiyun int err = -EINVAL;
345*4882a593Smuzhiyun u32 default_index = NO_DEFAULT_INDEX;
346*4882a593Smuzhiyun u16 indices;
347*4882a593Smuzhiyun int i;
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun pr_debug("%s(sch %p,[qdisc %p],opt %p)\n", __func__, sch, p, opt);
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun if (!opt)
352*4882a593Smuzhiyun goto errout;
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun err = tcf_block_get(&p->block, &p->filter_list, sch, extack);
355*4882a593Smuzhiyun if (err)
356*4882a593Smuzhiyun return err;
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun err = nla_parse_nested_deprecated(tb, TCA_DSMARK_MAX, opt,
359*4882a593Smuzhiyun dsmark_policy, NULL);
360*4882a593Smuzhiyun if (err < 0)
361*4882a593Smuzhiyun goto errout;
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun err = -EINVAL;
364*4882a593Smuzhiyun if (!tb[TCA_DSMARK_INDICES])
365*4882a593Smuzhiyun goto errout;
366*4882a593Smuzhiyun indices = nla_get_u16(tb[TCA_DSMARK_INDICES]);
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun if (hweight32(indices) != 1)
369*4882a593Smuzhiyun goto errout;
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun if (tb[TCA_DSMARK_DEFAULT_INDEX])
372*4882a593Smuzhiyun default_index = nla_get_u16(tb[TCA_DSMARK_DEFAULT_INDEX]);
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun if (indices <= DSMARK_EMBEDDED_SZ)
375*4882a593Smuzhiyun p->mv = p->embedded;
376*4882a593Smuzhiyun else
377*4882a593Smuzhiyun p->mv = kmalloc_array(indices, sizeof(*p->mv), GFP_KERNEL);
378*4882a593Smuzhiyun if (!p->mv) {
379*4882a593Smuzhiyun err = -ENOMEM;
380*4882a593Smuzhiyun goto errout;
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun for (i = 0; i < indices; i++) {
383*4882a593Smuzhiyun p->mv[i].mask = 0xff;
384*4882a593Smuzhiyun p->mv[i].value = 0;
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun p->indices = indices;
387*4882a593Smuzhiyun p->default_index = default_index;
388*4882a593Smuzhiyun p->set_tc_index = nla_get_flag(tb[TCA_DSMARK_SET_TC_INDEX]);
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun p->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, sch->handle,
391*4882a593Smuzhiyun NULL);
392*4882a593Smuzhiyun if (p->q == NULL)
393*4882a593Smuzhiyun p->q = &noop_qdisc;
394*4882a593Smuzhiyun else
395*4882a593Smuzhiyun qdisc_hash_add(p->q, true);
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun pr_debug("%s: qdisc %p\n", __func__, p->q);
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun err = 0;
400*4882a593Smuzhiyun errout:
401*4882a593Smuzhiyun return err;
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun
dsmark_reset(struct Qdisc * sch)404*4882a593Smuzhiyun static void dsmark_reset(struct Qdisc *sch)
405*4882a593Smuzhiyun {
406*4882a593Smuzhiyun struct dsmark_qdisc_data *p = qdisc_priv(sch);
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
409*4882a593Smuzhiyun if (p->q)
410*4882a593Smuzhiyun qdisc_reset(p->q);
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun
dsmark_destroy(struct Qdisc * sch)413*4882a593Smuzhiyun static void dsmark_destroy(struct Qdisc *sch)
414*4882a593Smuzhiyun {
415*4882a593Smuzhiyun struct dsmark_qdisc_data *p = qdisc_priv(sch);
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun tcf_block_put(p->block);
420*4882a593Smuzhiyun qdisc_put(p->q);
421*4882a593Smuzhiyun if (p->mv != p->embedded)
422*4882a593Smuzhiyun kfree(p->mv);
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun
dsmark_dump_class(struct Qdisc * sch,unsigned long cl,struct sk_buff * skb,struct tcmsg * tcm)425*4882a593Smuzhiyun static int dsmark_dump_class(struct Qdisc *sch, unsigned long cl,
426*4882a593Smuzhiyun struct sk_buff *skb, struct tcmsg *tcm)
427*4882a593Smuzhiyun {
428*4882a593Smuzhiyun struct dsmark_qdisc_data *p = qdisc_priv(sch);
429*4882a593Smuzhiyun struct nlattr *opts = NULL;
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun pr_debug("%s(sch %p,[qdisc %p],class %ld\n", __func__, sch, p, cl);
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun if (!dsmark_valid_index(p, cl))
434*4882a593Smuzhiyun return -EINVAL;
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun tcm->tcm_handle = TC_H_MAKE(TC_H_MAJ(sch->handle), cl - 1);
437*4882a593Smuzhiyun tcm->tcm_info = p->q->handle;
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
440*4882a593Smuzhiyun if (opts == NULL)
441*4882a593Smuzhiyun goto nla_put_failure;
442*4882a593Smuzhiyun if (nla_put_u8(skb, TCA_DSMARK_MASK, p->mv[cl - 1].mask) ||
443*4882a593Smuzhiyun nla_put_u8(skb, TCA_DSMARK_VALUE, p->mv[cl - 1].value))
444*4882a593Smuzhiyun goto nla_put_failure;
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun return nla_nest_end(skb, opts);
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun nla_put_failure:
449*4882a593Smuzhiyun nla_nest_cancel(skb, opts);
450*4882a593Smuzhiyun return -EMSGSIZE;
451*4882a593Smuzhiyun }
452*4882a593Smuzhiyun
dsmark_dump(struct Qdisc * sch,struct sk_buff * skb)453*4882a593Smuzhiyun static int dsmark_dump(struct Qdisc *sch, struct sk_buff *skb)
454*4882a593Smuzhiyun {
455*4882a593Smuzhiyun struct dsmark_qdisc_data *p = qdisc_priv(sch);
456*4882a593Smuzhiyun struct nlattr *opts = NULL;
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
459*4882a593Smuzhiyun if (opts == NULL)
460*4882a593Smuzhiyun goto nla_put_failure;
461*4882a593Smuzhiyun if (nla_put_u16(skb, TCA_DSMARK_INDICES, p->indices))
462*4882a593Smuzhiyun goto nla_put_failure;
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun if (p->default_index != NO_DEFAULT_INDEX &&
465*4882a593Smuzhiyun nla_put_u16(skb, TCA_DSMARK_DEFAULT_INDEX, p->default_index))
466*4882a593Smuzhiyun goto nla_put_failure;
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun if (p->set_tc_index &&
469*4882a593Smuzhiyun nla_put_flag(skb, TCA_DSMARK_SET_TC_INDEX))
470*4882a593Smuzhiyun goto nla_put_failure;
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun return nla_nest_end(skb, opts);
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun nla_put_failure:
475*4882a593Smuzhiyun nla_nest_cancel(skb, opts);
476*4882a593Smuzhiyun return -EMSGSIZE;
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun static const struct Qdisc_class_ops dsmark_class_ops = {
480*4882a593Smuzhiyun .graft = dsmark_graft,
481*4882a593Smuzhiyun .leaf = dsmark_leaf,
482*4882a593Smuzhiyun .find = dsmark_find,
483*4882a593Smuzhiyun .change = dsmark_change,
484*4882a593Smuzhiyun .delete = dsmark_delete,
485*4882a593Smuzhiyun .walk = dsmark_walk,
486*4882a593Smuzhiyun .tcf_block = dsmark_tcf_block,
487*4882a593Smuzhiyun .bind_tcf = dsmark_bind_filter,
488*4882a593Smuzhiyun .unbind_tcf = dsmark_unbind_filter,
489*4882a593Smuzhiyun .dump = dsmark_dump_class,
490*4882a593Smuzhiyun };
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun static struct Qdisc_ops dsmark_qdisc_ops __read_mostly = {
493*4882a593Smuzhiyun .next = NULL,
494*4882a593Smuzhiyun .cl_ops = &dsmark_class_ops,
495*4882a593Smuzhiyun .id = "dsmark",
496*4882a593Smuzhiyun .priv_size = sizeof(struct dsmark_qdisc_data),
497*4882a593Smuzhiyun .enqueue = dsmark_enqueue,
498*4882a593Smuzhiyun .dequeue = dsmark_dequeue,
499*4882a593Smuzhiyun .peek = dsmark_peek,
500*4882a593Smuzhiyun .init = dsmark_init,
501*4882a593Smuzhiyun .reset = dsmark_reset,
502*4882a593Smuzhiyun .destroy = dsmark_destroy,
503*4882a593Smuzhiyun .change = NULL,
504*4882a593Smuzhiyun .dump = dsmark_dump,
505*4882a593Smuzhiyun .owner = THIS_MODULE,
506*4882a593Smuzhiyun };
507*4882a593Smuzhiyun
dsmark_module_init(void)508*4882a593Smuzhiyun static int __init dsmark_module_init(void)
509*4882a593Smuzhiyun {
510*4882a593Smuzhiyun return register_qdisc(&dsmark_qdisc_ops);
511*4882a593Smuzhiyun }
512*4882a593Smuzhiyun
dsmark_module_exit(void)513*4882a593Smuzhiyun static void __exit dsmark_module_exit(void)
514*4882a593Smuzhiyun {
515*4882a593Smuzhiyun unregister_qdisc(&dsmark_qdisc_ops);
516*4882a593Smuzhiyun }
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun module_init(dsmark_module_init)
519*4882a593Smuzhiyun module_exit(dsmark_module_exit)
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun MODULE_LICENSE("GPL");
522