1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /* net/sched/sch_atm.c - ATM VC selection "queueing discipline" */
3*4882a593Smuzhiyun
4*4882a593Smuzhiyun /* Written 1998-2000 by Werner Almesberger, EPFL ICA */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #include <linux/module.h>
7*4882a593Smuzhiyun #include <linux/slab.h>
8*4882a593Smuzhiyun #include <linux/init.h>
9*4882a593Smuzhiyun #include <linux/interrupt.h>
10*4882a593Smuzhiyun #include <linux/string.h>
11*4882a593Smuzhiyun #include <linux/errno.h>
12*4882a593Smuzhiyun #include <linux/skbuff.h>
13*4882a593Smuzhiyun #include <linux/atmdev.h>
14*4882a593Smuzhiyun #include <linux/atmclip.h>
15*4882a593Smuzhiyun #include <linux/rtnetlink.h>
16*4882a593Smuzhiyun #include <linux/file.h> /* for fput */
17*4882a593Smuzhiyun #include <net/netlink.h>
18*4882a593Smuzhiyun #include <net/pkt_sched.h>
19*4882a593Smuzhiyun #include <net/pkt_cls.h>
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun /*
22*4882a593Smuzhiyun * The ATM queuing discipline provides a framework for invoking classifiers
23*4882a593Smuzhiyun * (aka "filters"), which in turn select classes of this queuing discipline.
24*4882a593Smuzhiyun * Each class maps the flow(s) it is handling to a given VC. Multiple classes
25*4882a593Smuzhiyun * may share the same VC.
26*4882a593Smuzhiyun *
27*4882a593Smuzhiyun * When creating a class, VCs are specified by passing the number of the open
28*4882a593Smuzhiyun * socket descriptor by which the calling process references the VC. The kernel
29*4882a593Smuzhiyun * keeps the VC open at least until all classes using it are removed.
30*4882a593Smuzhiyun *
31*4882a593Smuzhiyun * In this file, most functions are named atm_tc_* to avoid confusion with all
32*4882a593Smuzhiyun * the atm_* in net/atm. This naming convention differs from what's used in the
33*4882a593Smuzhiyun * rest of net/sched.
34*4882a593Smuzhiyun *
35*4882a593Smuzhiyun * Known bugs:
36*4882a593Smuzhiyun * - sometimes messes up the IP stack
37*4882a593Smuzhiyun * - any manipulations besides the few operations described in the README, are
38*4882a593Smuzhiyun * untested and likely to crash the system
39*4882a593Smuzhiyun * - should lock the flow while there is data in the queue (?)
40*4882a593Smuzhiyun */
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun #define VCC2FLOW(vcc) ((struct atm_flow_data *) ((vcc)->user_back))
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun struct atm_flow_data {
45*4882a593Smuzhiyun struct Qdisc_class_common common;
46*4882a593Smuzhiyun struct Qdisc *q; /* FIFO, TBF, etc. */
47*4882a593Smuzhiyun struct tcf_proto __rcu *filter_list;
48*4882a593Smuzhiyun struct tcf_block *block;
49*4882a593Smuzhiyun struct atm_vcc *vcc; /* VCC; NULL if VCC is closed */
50*4882a593Smuzhiyun void (*old_pop)(struct atm_vcc *vcc,
51*4882a593Smuzhiyun struct sk_buff *skb); /* chaining */
52*4882a593Smuzhiyun struct atm_qdisc_data *parent; /* parent qdisc */
53*4882a593Smuzhiyun struct socket *sock; /* for closing */
54*4882a593Smuzhiyun int ref; /* reference count */
55*4882a593Smuzhiyun struct gnet_stats_basic_packed bstats;
56*4882a593Smuzhiyun struct gnet_stats_queue qstats;
57*4882a593Smuzhiyun struct list_head list;
58*4882a593Smuzhiyun struct atm_flow_data *excess; /* flow for excess traffic;
59*4882a593Smuzhiyun NULL to set CLP instead */
60*4882a593Smuzhiyun int hdr_len;
61*4882a593Smuzhiyun unsigned char hdr[]; /* header data; MUST BE LAST */
62*4882a593Smuzhiyun };
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun struct atm_qdisc_data {
65*4882a593Smuzhiyun struct atm_flow_data link; /* unclassified skbs go here */
66*4882a593Smuzhiyun struct list_head flows; /* NB: "link" is also on this
67*4882a593Smuzhiyun list */
68*4882a593Smuzhiyun struct tasklet_struct task; /* dequeue tasklet */
69*4882a593Smuzhiyun };
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun /* ------------------------- Class/flow operations ------------------------- */
72*4882a593Smuzhiyun
lookup_flow(struct Qdisc * sch,u32 classid)73*4882a593Smuzhiyun static inline struct atm_flow_data *lookup_flow(struct Qdisc *sch, u32 classid)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun struct atm_qdisc_data *p = qdisc_priv(sch);
76*4882a593Smuzhiyun struct atm_flow_data *flow;
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun list_for_each_entry(flow, &p->flows, list) {
79*4882a593Smuzhiyun if (flow->common.classid == classid)
80*4882a593Smuzhiyun return flow;
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun return NULL;
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun
atm_tc_graft(struct Qdisc * sch,unsigned long arg,struct Qdisc * new,struct Qdisc ** old,struct netlink_ext_ack * extack)85*4882a593Smuzhiyun static int atm_tc_graft(struct Qdisc *sch, unsigned long arg,
86*4882a593Smuzhiyun struct Qdisc *new, struct Qdisc **old,
87*4882a593Smuzhiyun struct netlink_ext_ack *extack)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun struct atm_qdisc_data *p = qdisc_priv(sch);
90*4882a593Smuzhiyun struct atm_flow_data *flow = (struct atm_flow_data *)arg;
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun pr_debug("atm_tc_graft(sch %p,[qdisc %p],flow %p,new %p,old %p)\n",
93*4882a593Smuzhiyun sch, p, flow, new, old);
94*4882a593Smuzhiyun if (list_empty(&flow->list))
95*4882a593Smuzhiyun return -EINVAL;
96*4882a593Smuzhiyun if (!new)
97*4882a593Smuzhiyun new = &noop_qdisc;
98*4882a593Smuzhiyun *old = flow->q;
99*4882a593Smuzhiyun flow->q = new;
100*4882a593Smuzhiyun if (*old)
101*4882a593Smuzhiyun qdisc_reset(*old);
102*4882a593Smuzhiyun return 0;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
atm_tc_leaf(struct Qdisc * sch,unsigned long cl)105*4882a593Smuzhiyun static struct Qdisc *atm_tc_leaf(struct Qdisc *sch, unsigned long cl)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun struct atm_flow_data *flow = (struct atm_flow_data *)cl;
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun pr_debug("atm_tc_leaf(sch %p,flow %p)\n", sch, flow);
110*4882a593Smuzhiyun return flow ? flow->q : NULL;
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun
atm_tc_find(struct Qdisc * sch,u32 classid)113*4882a593Smuzhiyun static unsigned long atm_tc_find(struct Qdisc *sch, u32 classid)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun struct atm_qdisc_data *p __maybe_unused = qdisc_priv(sch);
116*4882a593Smuzhiyun struct atm_flow_data *flow;
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun pr_debug("%s(sch %p,[qdisc %p],classid %x)\n", __func__, sch, p, classid);
119*4882a593Smuzhiyun flow = lookup_flow(sch, classid);
120*4882a593Smuzhiyun pr_debug("%s: flow %p\n", __func__, flow);
121*4882a593Smuzhiyun return (unsigned long)flow;
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun
atm_tc_bind_filter(struct Qdisc * sch,unsigned long parent,u32 classid)124*4882a593Smuzhiyun static unsigned long atm_tc_bind_filter(struct Qdisc *sch,
125*4882a593Smuzhiyun unsigned long parent, u32 classid)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun struct atm_qdisc_data *p __maybe_unused = qdisc_priv(sch);
128*4882a593Smuzhiyun struct atm_flow_data *flow;
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun pr_debug("%s(sch %p,[qdisc %p],classid %x)\n", __func__, sch, p, classid);
131*4882a593Smuzhiyun flow = lookup_flow(sch, classid);
132*4882a593Smuzhiyun if (flow)
133*4882a593Smuzhiyun flow->ref++;
134*4882a593Smuzhiyun pr_debug("%s: flow %p\n", __func__, flow);
135*4882a593Smuzhiyun return (unsigned long)flow;
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun /*
139*4882a593Smuzhiyun * atm_tc_put handles all destructions, including the ones that are explicitly
140*4882a593Smuzhiyun * requested (atm_tc_destroy, etc.). The assumption here is that we never drop
141*4882a593Smuzhiyun * anything that still seems to be in use.
142*4882a593Smuzhiyun */
atm_tc_put(struct Qdisc * sch,unsigned long cl)143*4882a593Smuzhiyun static void atm_tc_put(struct Qdisc *sch, unsigned long cl)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun struct atm_qdisc_data *p = qdisc_priv(sch);
146*4882a593Smuzhiyun struct atm_flow_data *flow = (struct atm_flow_data *)cl;
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun pr_debug("atm_tc_put(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
149*4882a593Smuzhiyun if (--flow->ref)
150*4882a593Smuzhiyun return;
151*4882a593Smuzhiyun pr_debug("atm_tc_put: destroying\n");
152*4882a593Smuzhiyun list_del_init(&flow->list);
153*4882a593Smuzhiyun pr_debug("atm_tc_put: qdisc %p\n", flow->q);
154*4882a593Smuzhiyun qdisc_put(flow->q);
155*4882a593Smuzhiyun tcf_block_put(flow->block);
156*4882a593Smuzhiyun if (flow->sock) {
157*4882a593Smuzhiyun pr_debug("atm_tc_put: f_count %ld\n",
158*4882a593Smuzhiyun file_count(flow->sock->file));
159*4882a593Smuzhiyun flow->vcc->pop = flow->old_pop;
160*4882a593Smuzhiyun sockfd_put(flow->sock);
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun if (flow->excess)
163*4882a593Smuzhiyun atm_tc_put(sch, (unsigned long)flow->excess);
164*4882a593Smuzhiyun if (flow != &p->link)
165*4882a593Smuzhiyun kfree(flow);
166*4882a593Smuzhiyun /*
167*4882a593Smuzhiyun * If flow == &p->link, the qdisc no longer works at this point and
168*4882a593Smuzhiyun * needs to be removed. (By the caller of atm_tc_put.)
169*4882a593Smuzhiyun */
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun
sch_atm_pop(struct atm_vcc * vcc,struct sk_buff * skb)172*4882a593Smuzhiyun static void sch_atm_pop(struct atm_vcc *vcc, struct sk_buff *skb)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun struct atm_qdisc_data *p = VCC2FLOW(vcc)->parent;
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun pr_debug("sch_atm_pop(vcc %p,skb %p,[qdisc %p])\n", vcc, skb, p);
177*4882a593Smuzhiyun VCC2FLOW(vcc)->old_pop(vcc, skb);
178*4882a593Smuzhiyun tasklet_schedule(&p->task);
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun static const u8 llc_oui_ip[] = {
182*4882a593Smuzhiyun 0xaa, /* DSAP: non-ISO */
183*4882a593Smuzhiyun 0xaa, /* SSAP: non-ISO */
184*4882a593Smuzhiyun 0x03, /* Ctrl: Unnumbered Information Command PDU */
185*4882a593Smuzhiyun 0x00, /* OUI: EtherType */
186*4882a593Smuzhiyun 0x00, 0x00,
187*4882a593Smuzhiyun 0x08, 0x00
188*4882a593Smuzhiyun }; /* Ethertype IP (0800) */
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun static const struct nla_policy atm_policy[TCA_ATM_MAX + 1] = {
191*4882a593Smuzhiyun [TCA_ATM_FD] = { .type = NLA_U32 },
192*4882a593Smuzhiyun [TCA_ATM_EXCESS] = { .type = NLA_U32 },
193*4882a593Smuzhiyun };
194*4882a593Smuzhiyun
atm_tc_change(struct Qdisc * sch,u32 classid,u32 parent,struct nlattr ** tca,unsigned long * arg,struct netlink_ext_ack * extack)195*4882a593Smuzhiyun static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
196*4882a593Smuzhiyun struct nlattr **tca, unsigned long *arg,
197*4882a593Smuzhiyun struct netlink_ext_ack *extack)
198*4882a593Smuzhiyun {
199*4882a593Smuzhiyun struct atm_qdisc_data *p = qdisc_priv(sch);
200*4882a593Smuzhiyun struct atm_flow_data *flow = (struct atm_flow_data *)*arg;
201*4882a593Smuzhiyun struct atm_flow_data *excess = NULL;
202*4882a593Smuzhiyun struct nlattr *opt = tca[TCA_OPTIONS];
203*4882a593Smuzhiyun struct nlattr *tb[TCA_ATM_MAX + 1];
204*4882a593Smuzhiyun struct socket *sock;
205*4882a593Smuzhiyun int fd, error, hdr_len;
206*4882a593Smuzhiyun void *hdr;
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun pr_debug("atm_tc_change(sch %p,[qdisc %p],classid %x,parent %x,"
209*4882a593Smuzhiyun "flow %p,opt %p)\n", sch, p, classid, parent, flow, opt);
210*4882a593Smuzhiyun /*
211*4882a593Smuzhiyun * The concept of parents doesn't apply for this qdisc.
212*4882a593Smuzhiyun */
213*4882a593Smuzhiyun if (parent && parent != TC_H_ROOT && parent != sch->handle)
214*4882a593Smuzhiyun return -EINVAL;
215*4882a593Smuzhiyun /*
216*4882a593Smuzhiyun * ATM classes cannot be changed. In order to change properties of the
217*4882a593Smuzhiyun * ATM connection, that socket needs to be modified directly (via the
218*4882a593Smuzhiyun * native ATM API. In order to send a flow to a different VC, the old
219*4882a593Smuzhiyun * class needs to be removed and a new one added. (This may be changed
220*4882a593Smuzhiyun * later.)
221*4882a593Smuzhiyun */
222*4882a593Smuzhiyun if (flow)
223*4882a593Smuzhiyun return -EBUSY;
224*4882a593Smuzhiyun if (opt == NULL)
225*4882a593Smuzhiyun return -EINVAL;
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun error = nla_parse_nested_deprecated(tb, TCA_ATM_MAX, opt, atm_policy,
228*4882a593Smuzhiyun NULL);
229*4882a593Smuzhiyun if (error < 0)
230*4882a593Smuzhiyun return error;
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun if (!tb[TCA_ATM_FD])
233*4882a593Smuzhiyun return -EINVAL;
234*4882a593Smuzhiyun fd = nla_get_u32(tb[TCA_ATM_FD]);
235*4882a593Smuzhiyun pr_debug("atm_tc_change: fd %d\n", fd);
236*4882a593Smuzhiyun if (tb[TCA_ATM_HDR]) {
237*4882a593Smuzhiyun hdr_len = nla_len(tb[TCA_ATM_HDR]);
238*4882a593Smuzhiyun hdr = nla_data(tb[TCA_ATM_HDR]);
239*4882a593Smuzhiyun } else {
240*4882a593Smuzhiyun hdr_len = RFC1483LLC_LEN;
241*4882a593Smuzhiyun hdr = NULL; /* default LLC/SNAP for IP */
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun if (!tb[TCA_ATM_EXCESS])
244*4882a593Smuzhiyun excess = NULL;
245*4882a593Smuzhiyun else {
246*4882a593Smuzhiyun excess = (struct atm_flow_data *)
247*4882a593Smuzhiyun atm_tc_find(sch, nla_get_u32(tb[TCA_ATM_EXCESS]));
248*4882a593Smuzhiyun if (!excess)
249*4882a593Smuzhiyun return -ENOENT;
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun pr_debug("atm_tc_change: type %d, payload %d, hdr_len %d\n",
252*4882a593Smuzhiyun opt->nla_type, nla_len(opt), hdr_len);
253*4882a593Smuzhiyun sock = sockfd_lookup(fd, &error);
254*4882a593Smuzhiyun if (!sock)
255*4882a593Smuzhiyun return error; /* f_count++ */
256*4882a593Smuzhiyun pr_debug("atm_tc_change: f_count %ld\n", file_count(sock->file));
257*4882a593Smuzhiyun if (sock->ops->family != PF_ATMSVC && sock->ops->family != PF_ATMPVC) {
258*4882a593Smuzhiyun error = -EPROTOTYPE;
259*4882a593Smuzhiyun goto err_out;
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun /* @@@ should check if the socket is really operational or we'll crash
262*4882a593Smuzhiyun on vcc->send */
263*4882a593Smuzhiyun if (classid) {
264*4882a593Smuzhiyun if (TC_H_MAJ(classid ^ sch->handle)) {
265*4882a593Smuzhiyun pr_debug("atm_tc_change: classid mismatch\n");
266*4882a593Smuzhiyun error = -EINVAL;
267*4882a593Smuzhiyun goto err_out;
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun } else {
270*4882a593Smuzhiyun int i;
271*4882a593Smuzhiyun unsigned long cl;
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun for (i = 1; i < 0x8000; i++) {
274*4882a593Smuzhiyun classid = TC_H_MAKE(sch->handle, 0x8000 | i);
275*4882a593Smuzhiyun cl = atm_tc_find(sch, classid);
276*4882a593Smuzhiyun if (!cl)
277*4882a593Smuzhiyun break;
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun pr_debug("atm_tc_change: new id %x\n", classid);
281*4882a593Smuzhiyun flow = kzalloc(sizeof(struct atm_flow_data) + hdr_len, GFP_KERNEL);
282*4882a593Smuzhiyun pr_debug("atm_tc_change: flow %p\n", flow);
283*4882a593Smuzhiyun if (!flow) {
284*4882a593Smuzhiyun error = -ENOBUFS;
285*4882a593Smuzhiyun goto err_out;
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun error = tcf_block_get(&flow->block, &flow->filter_list, sch,
289*4882a593Smuzhiyun extack);
290*4882a593Smuzhiyun if (error) {
291*4882a593Smuzhiyun kfree(flow);
292*4882a593Smuzhiyun goto err_out;
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun flow->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid,
296*4882a593Smuzhiyun extack);
297*4882a593Smuzhiyun if (!flow->q)
298*4882a593Smuzhiyun flow->q = &noop_qdisc;
299*4882a593Smuzhiyun pr_debug("atm_tc_change: qdisc %p\n", flow->q);
300*4882a593Smuzhiyun flow->sock = sock;
301*4882a593Smuzhiyun flow->vcc = ATM_SD(sock); /* speedup */
302*4882a593Smuzhiyun flow->vcc->user_back = flow;
303*4882a593Smuzhiyun pr_debug("atm_tc_change: vcc %p\n", flow->vcc);
304*4882a593Smuzhiyun flow->old_pop = flow->vcc->pop;
305*4882a593Smuzhiyun flow->parent = p;
306*4882a593Smuzhiyun flow->vcc->pop = sch_atm_pop;
307*4882a593Smuzhiyun flow->common.classid = classid;
308*4882a593Smuzhiyun flow->ref = 1;
309*4882a593Smuzhiyun flow->excess = excess;
310*4882a593Smuzhiyun list_add(&flow->list, &p->link.list);
311*4882a593Smuzhiyun flow->hdr_len = hdr_len;
312*4882a593Smuzhiyun if (hdr)
313*4882a593Smuzhiyun memcpy(flow->hdr, hdr, hdr_len);
314*4882a593Smuzhiyun else
315*4882a593Smuzhiyun memcpy(flow->hdr, llc_oui_ip, sizeof(llc_oui_ip));
316*4882a593Smuzhiyun *arg = (unsigned long)flow;
317*4882a593Smuzhiyun return 0;
318*4882a593Smuzhiyun err_out:
319*4882a593Smuzhiyun sockfd_put(sock);
320*4882a593Smuzhiyun return error;
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun
atm_tc_delete(struct Qdisc * sch,unsigned long arg)323*4882a593Smuzhiyun static int atm_tc_delete(struct Qdisc *sch, unsigned long arg)
324*4882a593Smuzhiyun {
325*4882a593Smuzhiyun struct atm_qdisc_data *p = qdisc_priv(sch);
326*4882a593Smuzhiyun struct atm_flow_data *flow = (struct atm_flow_data *)arg;
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun pr_debug("atm_tc_delete(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
329*4882a593Smuzhiyun if (list_empty(&flow->list))
330*4882a593Smuzhiyun return -EINVAL;
331*4882a593Smuzhiyun if (rcu_access_pointer(flow->filter_list) || flow == &p->link)
332*4882a593Smuzhiyun return -EBUSY;
333*4882a593Smuzhiyun /*
334*4882a593Smuzhiyun * Reference count must be 2: one for "keepalive" (set at class
335*4882a593Smuzhiyun * creation), and one for the reference held when calling delete.
336*4882a593Smuzhiyun */
337*4882a593Smuzhiyun if (flow->ref < 2) {
338*4882a593Smuzhiyun pr_err("atm_tc_delete: flow->ref == %d\n", flow->ref);
339*4882a593Smuzhiyun return -EINVAL;
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun if (flow->ref > 2)
342*4882a593Smuzhiyun return -EBUSY; /* catch references via excess, etc. */
343*4882a593Smuzhiyun atm_tc_put(sch, arg);
344*4882a593Smuzhiyun return 0;
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun
atm_tc_walk(struct Qdisc * sch,struct qdisc_walker * walker)347*4882a593Smuzhiyun static void atm_tc_walk(struct Qdisc *sch, struct qdisc_walker *walker)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun struct atm_qdisc_data *p = qdisc_priv(sch);
350*4882a593Smuzhiyun struct atm_flow_data *flow;
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun pr_debug("atm_tc_walk(sch %p,[qdisc %p],walker %p)\n", sch, p, walker);
353*4882a593Smuzhiyun if (walker->stop)
354*4882a593Smuzhiyun return;
355*4882a593Smuzhiyun list_for_each_entry(flow, &p->flows, list) {
356*4882a593Smuzhiyun if (walker->count >= walker->skip &&
357*4882a593Smuzhiyun walker->fn(sch, (unsigned long)flow, walker) < 0) {
358*4882a593Smuzhiyun walker->stop = 1;
359*4882a593Smuzhiyun break;
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun walker->count++;
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun
atm_tc_tcf_block(struct Qdisc * sch,unsigned long cl,struct netlink_ext_ack * extack)365*4882a593Smuzhiyun static struct tcf_block *atm_tc_tcf_block(struct Qdisc *sch, unsigned long cl,
366*4882a593Smuzhiyun struct netlink_ext_ack *extack)
367*4882a593Smuzhiyun {
368*4882a593Smuzhiyun struct atm_qdisc_data *p = qdisc_priv(sch);
369*4882a593Smuzhiyun struct atm_flow_data *flow = (struct atm_flow_data *)cl;
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun pr_debug("atm_tc_find_tcf(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
372*4882a593Smuzhiyun return flow ? flow->block : p->link.block;
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun /* --------------------------- Qdisc operations ---------------------------- */
376*4882a593Smuzhiyun
atm_tc_enqueue(struct sk_buff * skb,struct Qdisc * sch,struct sk_buff ** to_free)377*4882a593Smuzhiyun static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
378*4882a593Smuzhiyun struct sk_buff **to_free)
379*4882a593Smuzhiyun {
380*4882a593Smuzhiyun struct atm_qdisc_data *p = qdisc_priv(sch);
381*4882a593Smuzhiyun struct atm_flow_data *flow;
382*4882a593Smuzhiyun struct tcf_result res;
383*4882a593Smuzhiyun int result;
384*4882a593Smuzhiyun int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun pr_debug("atm_tc_enqueue(skb %p,sch %p,[qdisc %p])\n", skb, sch, p);
387*4882a593Smuzhiyun result = TC_ACT_OK; /* be nice to gcc */
388*4882a593Smuzhiyun flow = NULL;
389*4882a593Smuzhiyun if (TC_H_MAJ(skb->priority) != sch->handle ||
390*4882a593Smuzhiyun !(flow = (struct atm_flow_data *)atm_tc_find(sch, skb->priority))) {
391*4882a593Smuzhiyun struct tcf_proto *fl;
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun list_for_each_entry(flow, &p->flows, list) {
394*4882a593Smuzhiyun fl = rcu_dereference_bh(flow->filter_list);
395*4882a593Smuzhiyun if (fl) {
396*4882a593Smuzhiyun result = tcf_classify(skb, fl, &res, true);
397*4882a593Smuzhiyun if (result < 0)
398*4882a593Smuzhiyun continue;
399*4882a593Smuzhiyun flow = (struct atm_flow_data *)res.class;
400*4882a593Smuzhiyun if (!flow)
401*4882a593Smuzhiyun flow = lookup_flow(sch, res.classid);
402*4882a593Smuzhiyun goto done;
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun flow = NULL;
406*4882a593Smuzhiyun done:
407*4882a593Smuzhiyun ;
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun if (!flow) {
410*4882a593Smuzhiyun flow = &p->link;
411*4882a593Smuzhiyun } else {
412*4882a593Smuzhiyun if (flow->vcc)
413*4882a593Smuzhiyun ATM_SKB(skb)->atm_options = flow->vcc->atm_options;
414*4882a593Smuzhiyun /*@@@ looks good ... but it's not supposed to work :-) */
415*4882a593Smuzhiyun #ifdef CONFIG_NET_CLS_ACT
416*4882a593Smuzhiyun switch (result) {
417*4882a593Smuzhiyun case TC_ACT_QUEUED:
418*4882a593Smuzhiyun case TC_ACT_STOLEN:
419*4882a593Smuzhiyun case TC_ACT_TRAP:
420*4882a593Smuzhiyun __qdisc_drop(skb, to_free);
421*4882a593Smuzhiyun return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
422*4882a593Smuzhiyun case TC_ACT_SHOT:
423*4882a593Smuzhiyun __qdisc_drop(skb, to_free);
424*4882a593Smuzhiyun goto drop;
425*4882a593Smuzhiyun case TC_ACT_RECLASSIFY:
426*4882a593Smuzhiyun if (flow->excess)
427*4882a593Smuzhiyun flow = flow->excess;
428*4882a593Smuzhiyun else
429*4882a593Smuzhiyun ATM_SKB(skb)->atm_options |= ATM_ATMOPT_CLP;
430*4882a593Smuzhiyun break;
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun #endif
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun ret = qdisc_enqueue(skb, flow->q, to_free);
436*4882a593Smuzhiyun if (ret != NET_XMIT_SUCCESS) {
437*4882a593Smuzhiyun drop: __maybe_unused
438*4882a593Smuzhiyun if (net_xmit_drop_count(ret)) {
439*4882a593Smuzhiyun qdisc_qstats_drop(sch);
440*4882a593Smuzhiyun if (flow)
441*4882a593Smuzhiyun flow->qstats.drops++;
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun return ret;
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun /*
446*4882a593Smuzhiyun * Okay, this may seem weird. We pretend we've dropped the packet if
447*4882a593Smuzhiyun * it goes via ATM. The reason for this is that the outer qdisc
448*4882a593Smuzhiyun * expects to be able to q->dequeue the packet later on if we return
449*4882a593Smuzhiyun * success at this place. Also, sch->q.qdisc needs to reflect whether
450*4882a593Smuzhiyun * there is a packet egligible for dequeuing or not. Note that the
451*4882a593Smuzhiyun * statistics of the outer qdisc are necessarily wrong because of all
452*4882a593Smuzhiyun * this. There's currently no correct solution for this.
453*4882a593Smuzhiyun */
454*4882a593Smuzhiyun if (flow == &p->link) {
455*4882a593Smuzhiyun sch->q.qlen++;
456*4882a593Smuzhiyun return NET_XMIT_SUCCESS;
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun tasklet_schedule(&p->task);
459*4882a593Smuzhiyun return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
460*4882a593Smuzhiyun }
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun /*
463*4882a593Smuzhiyun * Dequeue packets and send them over ATM. Note that we quite deliberately
464*4882a593Smuzhiyun * avoid checking net_device's flow control here, simply because sch_atm
465*4882a593Smuzhiyun * uses its own channels, which have nothing to do with any CLIP/LANE/or
466*4882a593Smuzhiyun * non-ATM interfaces.
467*4882a593Smuzhiyun */
468*4882a593Smuzhiyun
sch_atm_dequeue(unsigned long data)469*4882a593Smuzhiyun static void sch_atm_dequeue(unsigned long data)
470*4882a593Smuzhiyun {
471*4882a593Smuzhiyun struct Qdisc *sch = (struct Qdisc *)data;
472*4882a593Smuzhiyun struct atm_qdisc_data *p = qdisc_priv(sch);
473*4882a593Smuzhiyun struct atm_flow_data *flow;
474*4882a593Smuzhiyun struct sk_buff *skb;
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun pr_debug("sch_atm_dequeue(sch %p,[qdisc %p])\n", sch, p);
477*4882a593Smuzhiyun list_for_each_entry(flow, &p->flows, list) {
478*4882a593Smuzhiyun if (flow == &p->link)
479*4882a593Smuzhiyun continue;
480*4882a593Smuzhiyun /*
481*4882a593Smuzhiyun * If traffic is properly shaped, this won't generate nasty
482*4882a593Smuzhiyun * little bursts. Otherwise, it may ... (but that's okay)
483*4882a593Smuzhiyun */
484*4882a593Smuzhiyun while ((skb = flow->q->ops->peek(flow->q))) {
485*4882a593Smuzhiyun if (!atm_may_send(flow->vcc, skb->truesize))
486*4882a593Smuzhiyun break;
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun skb = qdisc_dequeue_peeked(flow->q);
489*4882a593Smuzhiyun if (unlikely(!skb))
490*4882a593Smuzhiyun break;
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun qdisc_bstats_update(sch, skb);
493*4882a593Smuzhiyun bstats_update(&flow->bstats, skb);
494*4882a593Smuzhiyun pr_debug("atm_tc_dequeue: sending on class %p\n", flow);
495*4882a593Smuzhiyun /* remove any LL header somebody else has attached */
496*4882a593Smuzhiyun skb_pull(skb, skb_network_offset(skb));
497*4882a593Smuzhiyun if (skb_headroom(skb) < flow->hdr_len) {
498*4882a593Smuzhiyun struct sk_buff *new;
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun new = skb_realloc_headroom(skb, flow->hdr_len);
501*4882a593Smuzhiyun dev_kfree_skb(skb);
502*4882a593Smuzhiyun if (!new)
503*4882a593Smuzhiyun continue;
504*4882a593Smuzhiyun skb = new;
505*4882a593Smuzhiyun }
506*4882a593Smuzhiyun pr_debug("sch_atm_dequeue: ip %p, data %p\n",
507*4882a593Smuzhiyun skb_network_header(skb), skb->data);
508*4882a593Smuzhiyun ATM_SKB(skb)->vcc = flow->vcc;
509*4882a593Smuzhiyun memcpy(skb_push(skb, flow->hdr_len), flow->hdr,
510*4882a593Smuzhiyun flow->hdr_len);
511*4882a593Smuzhiyun refcount_add(skb->truesize,
512*4882a593Smuzhiyun &sk_atm(flow->vcc)->sk_wmem_alloc);
513*4882a593Smuzhiyun /* atm.atm_options are already set by atm_tc_enqueue */
514*4882a593Smuzhiyun flow->vcc->send(flow->vcc, skb);
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun }
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun
atm_tc_dequeue(struct Qdisc * sch)519*4882a593Smuzhiyun static struct sk_buff *atm_tc_dequeue(struct Qdisc *sch)
520*4882a593Smuzhiyun {
521*4882a593Smuzhiyun struct atm_qdisc_data *p = qdisc_priv(sch);
522*4882a593Smuzhiyun struct sk_buff *skb;
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun pr_debug("atm_tc_dequeue(sch %p,[qdisc %p])\n", sch, p);
525*4882a593Smuzhiyun tasklet_schedule(&p->task);
526*4882a593Smuzhiyun skb = qdisc_dequeue_peeked(p->link.q);
527*4882a593Smuzhiyun if (skb)
528*4882a593Smuzhiyun sch->q.qlen--;
529*4882a593Smuzhiyun return skb;
530*4882a593Smuzhiyun }
531*4882a593Smuzhiyun
atm_tc_peek(struct Qdisc * sch)532*4882a593Smuzhiyun static struct sk_buff *atm_tc_peek(struct Qdisc *sch)
533*4882a593Smuzhiyun {
534*4882a593Smuzhiyun struct atm_qdisc_data *p = qdisc_priv(sch);
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun pr_debug("atm_tc_peek(sch %p,[qdisc %p])\n", sch, p);
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun return p->link.q->ops->peek(p->link.q);
539*4882a593Smuzhiyun }
540*4882a593Smuzhiyun
atm_tc_init(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)541*4882a593Smuzhiyun static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt,
542*4882a593Smuzhiyun struct netlink_ext_ack *extack)
543*4882a593Smuzhiyun {
544*4882a593Smuzhiyun struct atm_qdisc_data *p = qdisc_priv(sch);
545*4882a593Smuzhiyun int err;
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun pr_debug("atm_tc_init(sch %p,[qdisc %p],opt %p)\n", sch, p, opt);
548*4882a593Smuzhiyun INIT_LIST_HEAD(&p->flows);
549*4882a593Smuzhiyun INIT_LIST_HEAD(&p->link.list);
550*4882a593Smuzhiyun list_add(&p->link.list, &p->flows);
551*4882a593Smuzhiyun p->link.q = qdisc_create_dflt(sch->dev_queue,
552*4882a593Smuzhiyun &pfifo_qdisc_ops, sch->handle, extack);
553*4882a593Smuzhiyun if (!p->link.q)
554*4882a593Smuzhiyun p->link.q = &noop_qdisc;
555*4882a593Smuzhiyun pr_debug("atm_tc_init: link (%p) qdisc %p\n", &p->link, p->link.q);
556*4882a593Smuzhiyun p->link.vcc = NULL;
557*4882a593Smuzhiyun p->link.sock = NULL;
558*4882a593Smuzhiyun p->link.common.classid = sch->handle;
559*4882a593Smuzhiyun p->link.ref = 1;
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun err = tcf_block_get(&p->link.block, &p->link.filter_list, sch,
562*4882a593Smuzhiyun extack);
563*4882a593Smuzhiyun if (err)
564*4882a593Smuzhiyun return err;
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun tasklet_init(&p->task, sch_atm_dequeue, (unsigned long)sch);
567*4882a593Smuzhiyun return 0;
568*4882a593Smuzhiyun }
569*4882a593Smuzhiyun
atm_tc_reset(struct Qdisc * sch)570*4882a593Smuzhiyun static void atm_tc_reset(struct Qdisc *sch)
571*4882a593Smuzhiyun {
572*4882a593Smuzhiyun struct atm_qdisc_data *p = qdisc_priv(sch);
573*4882a593Smuzhiyun struct atm_flow_data *flow;
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun pr_debug("atm_tc_reset(sch %p,[qdisc %p])\n", sch, p);
576*4882a593Smuzhiyun list_for_each_entry(flow, &p->flows, list)
577*4882a593Smuzhiyun qdisc_reset(flow->q);
578*4882a593Smuzhiyun }
579*4882a593Smuzhiyun
atm_tc_destroy(struct Qdisc * sch)580*4882a593Smuzhiyun static void atm_tc_destroy(struct Qdisc *sch)
581*4882a593Smuzhiyun {
582*4882a593Smuzhiyun struct atm_qdisc_data *p = qdisc_priv(sch);
583*4882a593Smuzhiyun struct atm_flow_data *flow, *tmp;
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun pr_debug("atm_tc_destroy(sch %p,[qdisc %p])\n", sch, p);
586*4882a593Smuzhiyun list_for_each_entry(flow, &p->flows, list) {
587*4882a593Smuzhiyun tcf_block_put(flow->block);
588*4882a593Smuzhiyun flow->block = NULL;
589*4882a593Smuzhiyun }
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun list_for_each_entry_safe(flow, tmp, &p->flows, list) {
592*4882a593Smuzhiyun if (flow->ref > 1)
593*4882a593Smuzhiyun pr_err("atm_destroy: %p->ref = %d\n", flow, flow->ref);
594*4882a593Smuzhiyun atm_tc_put(sch, (unsigned long)flow);
595*4882a593Smuzhiyun }
596*4882a593Smuzhiyun tasklet_kill(&p->task);
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun
atm_tc_dump_class(struct Qdisc * sch,unsigned long cl,struct sk_buff * skb,struct tcmsg * tcm)599*4882a593Smuzhiyun static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl,
600*4882a593Smuzhiyun struct sk_buff *skb, struct tcmsg *tcm)
601*4882a593Smuzhiyun {
602*4882a593Smuzhiyun struct atm_qdisc_data *p = qdisc_priv(sch);
603*4882a593Smuzhiyun struct atm_flow_data *flow = (struct atm_flow_data *)cl;
604*4882a593Smuzhiyun struct nlattr *nest;
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun pr_debug("atm_tc_dump_class(sch %p,[qdisc %p],flow %p,skb %p,tcm %p)\n",
607*4882a593Smuzhiyun sch, p, flow, skb, tcm);
608*4882a593Smuzhiyun if (list_empty(&flow->list))
609*4882a593Smuzhiyun return -EINVAL;
610*4882a593Smuzhiyun tcm->tcm_handle = flow->common.classid;
611*4882a593Smuzhiyun tcm->tcm_info = flow->q->handle;
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
614*4882a593Smuzhiyun if (nest == NULL)
615*4882a593Smuzhiyun goto nla_put_failure;
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun if (nla_put(skb, TCA_ATM_HDR, flow->hdr_len, flow->hdr))
618*4882a593Smuzhiyun goto nla_put_failure;
619*4882a593Smuzhiyun if (flow->vcc) {
620*4882a593Smuzhiyun struct sockaddr_atmpvc pvc;
621*4882a593Smuzhiyun int state;
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun memset(&pvc, 0, sizeof(pvc));
624*4882a593Smuzhiyun pvc.sap_family = AF_ATMPVC;
625*4882a593Smuzhiyun pvc.sap_addr.itf = flow->vcc->dev ? flow->vcc->dev->number : -1;
626*4882a593Smuzhiyun pvc.sap_addr.vpi = flow->vcc->vpi;
627*4882a593Smuzhiyun pvc.sap_addr.vci = flow->vcc->vci;
628*4882a593Smuzhiyun if (nla_put(skb, TCA_ATM_ADDR, sizeof(pvc), &pvc))
629*4882a593Smuzhiyun goto nla_put_failure;
630*4882a593Smuzhiyun state = ATM_VF2VS(flow->vcc->flags);
631*4882a593Smuzhiyun if (nla_put_u32(skb, TCA_ATM_STATE, state))
632*4882a593Smuzhiyun goto nla_put_failure;
633*4882a593Smuzhiyun }
634*4882a593Smuzhiyun if (flow->excess) {
635*4882a593Smuzhiyun if (nla_put_u32(skb, TCA_ATM_EXCESS, flow->common.classid))
636*4882a593Smuzhiyun goto nla_put_failure;
637*4882a593Smuzhiyun } else {
638*4882a593Smuzhiyun if (nla_put_u32(skb, TCA_ATM_EXCESS, 0))
639*4882a593Smuzhiyun goto nla_put_failure;
640*4882a593Smuzhiyun }
641*4882a593Smuzhiyun return nla_nest_end(skb, nest);
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun nla_put_failure:
644*4882a593Smuzhiyun nla_nest_cancel(skb, nest);
645*4882a593Smuzhiyun return -1;
646*4882a593Smuzhiyun }
647*4882a593Smuzhiyun static int
atm_tc_dump_class_stats(struct Qdisc * sch,unsigned long arg,struct gnet_dump * d)648*4882a593Smuzhiyun atm_tc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
649*4882a593Smuzhiyun struct gnet_dump *d)
650*4882a593Smuzhiyun {
651*4882a593Smuzhiyun struct atm_flow_data *flow = (struct atm_flow_data *)arg;
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
654*4882a593Smuzhiyun d, NULL, &flow->bstats) < 0 ||
655*4882a593Smuzhiyun gnet_stats_copy_queue(d, NULL, &flow->qstats, flow->q->q.qlen) < 0)
656*4882a593Smuzhiyun return -1;
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun return 0;
659*4882a593Smuzhiyun }
660*4882a593Smuzhiyun
atm_tc_dump(struct Qdisc * sch,struct sk_buff * skb)661*4882a593Smuzhiyun static int atm_tc_dump(struct Qdisc *sch, struct sk_buff *skb)
662*4882a593Smuzhiyun {
663*4882a593Smuzhiyun return 0;
664*4882a593Smuzhiyun }
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun static const struct Qdisc_class_ops atm_class_ops = {
667*4882a593Smuzhiyun .graft = atm_tc_graft,
668*4882a593Smuzhiyun .leaf = atm_tc_leaf,
669*4882a593Smuzhiyun .find = atm_tc_find,
670*4882a593Smuzhiyun .change = atm_tc_change,
671*4882a593Smuzhiyun .delete = atm_tc_delete,
672*4882a593Smuzhiyun .walk = atm_tc_walk,
673*4882a593Smuzhiyun .tcf_block = atm_tc_tcf_block,
674*4882a593Smuzhiyun .bind_tcf = atm_tc_bind_filter,
675*4882a593Smuzhiyun .unbind_tcf = atm_tc_put,
676*4882a593Smuzhiyun .dump = atm_tc_dump_class,
677*4882a593Smuzhiyun .dump_stats = atm_tc_dump_class_stats,
678*4882a593Smuzhiyun };
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun static struct Qdisc_ops atm_qdisc_ops __read_mostly = {
681*4882a593Smuzhiyun .cl_ops = &atm_class_ops,
682*4882a593Smuzhiyun .id = "atm",
683*4882a593Smuzhiyun .priv_size = sizeof(struct atm_qdisc_data),
684*4882a593Smuzhiyun .enqueue = atm_tc_enqueue,
685*4882a593Smuzhiyun .dequeue = atm_tc_dequeue,
686*4882a593Smuzhiyun .peek = atm_tc_peek,
687*4882a593Smuzhiyun .init = atm_tc_init,
688*4882a593Smuzhiyun .reset = atm_tc_reset,
689*4882a593Smuzhiyun .destroy = atm_tc_destroy,
690*4882a593Smuzhiyun .dump = atm_tc_dump,
691*4882a593Smuzhiyun .owner = THIS_MODULE,
692*4882a593Smuzhiyun };
693*4882a593Smuzhiyun
atm_init(void)694*4882a593Smuzhiyun static int __init atm_init(void)
695*4882a593Smuzhiyun {
696*4882a593Smuzhiyun return register_qdisc(&atm_qdisc_ops);
697*4882a593Smuzhiyun }
698*4882a593Smuzhiyun
atm_exit(void)699*4882a593Smuzhiyun static void __exit atm_exit(void)
700*4882a593Smuzhiyun {
701*4882a593Smuzhiyun unregister_qdisc(&atm_qdisc_ops);
702*4882a593Smuzhiyun }
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun module_init(atm_init)
705*4882a593Smuzhiyun module_exit(atm_exit)
706*4882a593Smuzhiyun MODULE_LICENSE("GPL");
707