1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Equalizer Load-balancer for serial network interfaces.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * (c) Copyright 1995 Simon "Guru Aleph-Null" Janes
5*4882a593Smuzhiyun * NCM: Network and Communications Management, Inc.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * (c) Copyright 2002 David S. Miller (davem@redhat.com)
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * This software may be used and distributed according to the terms
10*4882a593Smuzhiyun * of the GNU General Public License, incorporated herein by reference.
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * The author may be reached as simon@ncm.com, or C/O
13*4882a593Smuzhiyun * NCM
14*4882a593Smuzhiyun * Attn: Simon Janes
15*4882a593Smuzhiyun * 6803 Whittier Ave
16*4882a593Smuzhiyun * McLean VA 22101
17*4882a593Smuzhiyun * Phone: 1-703-847-0040 ext 103
18*4882a593Smuzhiyun */
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun /*
21*4882a593Smuzhiyun * Sources:
22*4882a593Smuzhiyun * skeleton.c by Donald Becker.
23*4882a593Smuzhiyun * Inspirations:
24*4882a593Smuzhiyun * The Harried and Overworked Alan Cox
25*4882a593Smuzhiyun * Conspiracies:
26*4882a593Smuzhiyun * The Alan Cox and Mike McLagan plot to get someone else to do the code,
27*4882a593Smuzhiyun * which turned out to be me.
28*4882a593Smuzhiyun */
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun /*
31*4882a593Smuzhiyun * $Log: eql.c,v $
32*4882a593Smuzhiyun * Revision 1.2 1996/04/11 17:51:52 guru
33*4882a593Smuzhiyun * Added one-line eql_remove_slave patch.
34*4882a593Smuzhiyun *
35*4882a593Smuzhiyun * Revision 1.1 1996/04/11 17:44:17 guru
36*4882a593Smuzhiyun * Initial revision
37*4882a593Smuzhiyun *
38*4882a593Smuzhiyun * Revision 3.13 1996/01/21 15:17:18 alan
39*4882a593Smuzhiyun * tx_queue_len changes.
40*4882a593Smuzhiyun * reformatted.
41*4882a593Smuzhiyun *
42*4882a593Smuzhiyun * Revision 3.12 1995/03/22 21:07:51 anarchy
43*4882a593Smuzhiyun * Added capable() checks on configuration.
44*4882a593Smuzhiyun * Moved header file.
45*4882a593Smuzhiyun *
46*4882a593Smuzhiyun * Revision 3.11 1995/01/19 23:14:31 guru
47*4882a593Smuzhiyun * slave_load = (ULONG_MAX - (ULONG_MAX / 2)) -
48*4882a593Smuzhiyun * (priority_Bps) + bytes_queued * 8;
49*4882a593Smuzhiyun *
50*4882a593Smuzhiyun * Revision 3.10 1995/01/19 23:07:53 guru
51*4882a593Smuzhiyun * back to
52*4882a593Smuzhiyun * slave_load = (ULONG_MAX - (ULONG_MAX / 2)) -
53*4882a593Smuzhiyun * (priority_Bps) + bytes_queued;
54*4882a593Smuzhiyun *
55*4882a593Smuzhiyun * Revision 3.9 1995/01/19 22:38:20 guru
56*4882a593Smuzhiyun * slave_load = (ULONG_MAX - (ULONG_MAX / 2)) -
57*4882a593Smuzhiyun * (priority_Bps) + bytes_queued * 4;
58*4882a593Smuzhiyun *
59*4882a593Smuzhiyun * Revision 3.8 1995/01/19 22:30:55 guru
60*4882a593Smuzhiyun * slave_load = (ULONG_MAX - (ULONG_MAX / 2)) -
61*4882a593Smuzhiyun * (priority_Bps) + bytes_queued * 2;
62*4882a593Smuzhiyun *
63*4882a593Smuzhiyun * Revision 3.7 1995/01/19 21:52:35 guru
64*4882a593Smuzhiyun * printk's trimmed out.
65*4882a593Smuzhiyun *
66*4882a593Smuzhiyun * Revision 3.6 1995/01/19 21:49:56 guru
67*4882a593Smuzhiyun * This is working pretty well. I gained 1 K/s in speed.. now it's just
68*4882a593Smuzhiyun * robustness and printk's to be diked out.
69*4882a593Smuzhiyun *
70*4882a593Smuzhiyun * Revision 3.5 1995/01/18 22:29:59 guru
71*4882a593Smuzhiyun * still crashes the kernel when the lock_wait thing is woken up.
72*4882a593Smuzhiyun *
73*4882a593Smuzhiyun * Revision 3.4 1995/01/18 21:59:47 guru
74*4882a593Smuzhiyun * Broken set-bit locking snapshot
75*4882a593Smuzhiyun *
76*4882a593Smuzhiyun * Revision 3.3 1995/01/17 22:09:18 guru
77*4882a593Smuzhiyun * infinite sleep in a lock somewhere..
78*4882a593Smuzhiyun *
79*4882a593Smuzhiyun * Revision 3.2 1995/01/15 16:46:06 guru
80*4882a593Smuzhiyun * Log trimmed of non-pertinent 1.x branch messages
81*4882a593Smuzhiyun *
82*4882a593Smuzhiyun * Revision 3.1 1995/01/15 14:41:45 guru
83*4882a593Smuzhiyun * New Scheduler and timer stuff...
84*4882a593Smuzhiyun *
85*4882a593Smuzhiyun * Revision 1.15 1995/01/15 14:29:02 guru
86*4882a593Smuzhiyun * Will make 1.14 (now 1.15) the 3.0 branch, and the 1.12 the 2.0 branch, the one
87*4882a593Smuzhiyun * with the dumber scheduler
88*4882a593Smuzhiyun *
89*4882a593Smuzhiyun * Revision 1.14 1995/01/15 02:37:08 guru
90*4882a593Smuzhiyun * shock.. the kept-new-versions could have zonked working
91*4882a593Smuzhiyun * stuff.. shudder
92*4882a593Smuzhiyun *
93*4882a593Smuzhiyun * Revision 1.13 1995/01/15 02:36:31 guru
94*4882a593Smuzhiyun * big changes
95*4882a593Smuzhiyun *
96*4882a593Smuzhiyun * scheduler was torn out and replaced with something smarter
97*4882a593Smuzhiyun *
98*4882a593Smuzhiyun * global names not prefixed with eql_ were renamed to protect
99*4882a593Smuzhiyun * against namespace collisions
100*4882a593Smuzhiyun *
101*4882a593Smuzhiyun * a few more abstract interfaces were added to facilitate any
102*4882a593Smuzhiyun * potential change of datastructure. the driver is still using
103*4882a593Smuzhiyun * a linked list of slaves. going to a heap would be a bit of
104*4882a593Smuzhiyun * an overkill.
105*4882a593Smuzhiyun *
106*4882a593Smuzhiyun * this compiles fine with no warnings.
107*4882a593Smuzhiyun *
108*4882a593Smuzhiyun * the locking mechanism and timer stuff must be written however,
109*4882a593Smuzhiyun * this version will not work otherwise
110*4882a593Smuzhiyun *
111*4882a593Smuzhiyun * Sorry, I had to rewrite most of this for 2.5.x -DaveM
112*4882a593Smuzhiyun */
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun #include <linux/capability.h>
117*4882a593Smuzhiyun #include <linux/module.h>
118*4882a593Smuzhiyun #include <linux/kernel.h>
119*4882a593Smuzhiyun #include <linux/init.h>
120*4882a593Smuzhiyun #include <linux/slab.h>
121*4882a593Smuzhiyun #include <linux/timer.h>
122*4882a593Smuzhiyun #include <linux/netdevice.h>
123*4882a593Smuzhiyun #include <net/net_namespace.h>
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun #include <linux/if.h>
126*4882a593Smuzhiyun #include <linux/if_arp.h>
127*4882a593Smuzhiyun #include <linux/if_eql.h>
128*4882a593Smuzhiyun #include <linux/pkt_sched.h>
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun #include <linux/uaccess.h>
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun static int eql_open(struct net_device *dev);
133*4882a593Smuzhiyun static int eql_close(struct net_device *dev);
134*4882a593Smuzhiyun static int eql_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
135*4882a593Smuzhiyun static netdev_tx_t eql_slave_xmit(struct sk_buff *skb, struct net_device *dev);
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun #define eql_is_slave(dev) ((dev->flags & IFF_SLAVE) == IFF_SLAVE)
138*4882a593Smuzhiyun #define eql_is_master(dev) ((dev->flags & IFF_MASTER) == IFF_MASTER)
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave);
141*4882a593Smuzhiyun
eql_timer(struct timer_list * t)142*4882a593Smuzhiyun static void eql_timer(struct timer_list *t)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun equalizer_t *eql = from_timer(eql, t, timer);
145*4882a593Smuzhiyun struct list_head *this, *tmp, *head;
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun spin_lock(&eql->queue.lock);
148*4882a593Smuzhiyun head = &eql->queue.all_slaves;
149*4882a593Smuzhiyun list_for_each_safe(this, tmp, head) {
150*4882a593Smuzhiyun slave_t *slave = list_entry(this, slave_t, list);
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun if ((slave->dev->flags & IFF_UP) == IFF_UP) {
153*4882a593Smuzhiyun slave->bytes_queued -= slave->priority_Bps;
154*4882a593Smuzhiyun if (slave->bytes_queued < 0)
155*4882a593Smuzhiyun slave->bytes_queued = 0;
156*4882a593Smuzhiyun } else {
157*4882a593Smuzhiyun eql_kill_one_slave(&eql->queue, slave);
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun spin_unlock(&eql->queue.lock);
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun eql->timer.expires = jiffies + EQL_DEFAULT_RESCHED_IVAL;
164*4882a593Smuzhiyun add_timer(&eql->timer);
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun static const char version[] __initconst =
168*4882a593Smuzhiyun "Equalizer2002: Simon Janes (simon@ncm.com) and David S. Miller (davem@redhat.com)";
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun static const struct net_device_ops eql_netdev_ops = {
171*4882a593Smuzhiyun .ndo_open = eql_open,
172*4882a593Smuzhiyun .ndo_stop = eql_close,
173*4882a593Smuzhiyun .ndo_do_ioctl = eql_ioctl,
174*4882a593Smuzhiyun .ndo_start_xmit = eql_slave_xmit,
175*4882a593Smuzhiyun };
176*4882a593Smuzhiyun
eql_setup(struct net_device * dev)177*4882a593Smuzhiyun static void __init eql_setup(struct net_device *dev)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun equalizer_t *eql = netdev_priv(dev);
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun timer_setup(&eql->timer, eql_timer, 0);
182*4882a593Smuzhiyun eql->timer.expires = jiffies + EQL_DEFAULT_RESCHED_IVAL;
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun spin_lock_init(&eql->queue.lock);
185*4882a593Smuzhiyun INIT_LIST_HEAD(&eql->queue.all_slaves);
186*4882a593Smuzhiyun eql->queue.master_dev = dev;
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun dev->netdev_ops = &eql_netdev_ops;
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun /*
191*4882a593Smuzhiyun * Now we undo some of the things that eth_setup does
192*4882a593Smuzhiyun * that we don't like
193*4882a593Smuzhiyun */
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun dev->mtu = EQL_DEFAULT_MTU; /* set to 576 in if_eql.h */
196*4882a593Smuzhiyun dev->flags = IFF_MASTER;
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun dev->type = ARPHRD_SLIP;
199*4882a593Smuzhiyun dev->tx_queue_len = 5; /* Hands them off fast */
200*4882a593Smuzhiyun netif_keep_dst(dev);
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun
eql_open(struct net_device * dev)203*4882a593Smuzhiyun static int eql_open(struct net_device *dev)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun equalizer_t *eql = netdev_priv(dev);
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun /* XXX We should force this off automatically for the user. */
208*4882a593Smuzhiyun netdev_info(dev,
209*4882a593Smuzhiyun "remember to turn off Van-Jacobson compression on your slave devices\n");
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun BUG_ON(!list_empty(&eql->queue.all_slaves));
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun eql->min_slaves = 1;
214*4882a593Smuzhiyun eql->max_slaves = EQL_DEFAULT_MAX_SLAVES; /* 4 usually... */
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun add_timer(&eql->timer);
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun return 0;
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun
eql_kill_one_slave(slave_queue_t * queue,slave_t * slave)221*4882a593Smuzhiyun static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun list_del(&slave->list);
224*4882a593Smuzhiyun queue->num_slaves--;
225*4882a593Smuzhiyun slave->dev->flags &= ~IFF_SLAVE;
226*4882a593Smuzhiyun dev_put(slave->dev);
227*4882a593Smuzhiyun kfree(slave);
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun
eql_kill_slave_queue(slave_queue_t * queue)230*4882a593Smuzhiyun static void eql_kill_slave_queue(slave_queue_t *queue)
231*4882a593Smuzhiyun {
232*4882a593Smuzhiyun struct list_head *head, *tmp, *this;
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun spin_lock_bh(&queue->lock);
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun head = &queue->all_slaves;
237*4882a593Smuzhiyun list_for_each_safe(this, tmp, head) {
238*4882a593Smuzhiyun slave_t *s = list_entry(this, slave_t, list);
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun eql_kill_one_slave(queue, s);
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun spin_unlock_bh(&queue->lock);
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun
eql_close(struct net_device * dev)246*4882a593Smuzhiyun static int eql_close(struct net_device *dev)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun equalizer_t *eql = netdev_priv(dev);
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun /*
251*4882a593Smuzhiyun * The timer has to be stopped first before we start hacking away
252*4882a593Smuzhiyun * at the data structure it scans every so often...
253*4882a593Smuzhiyun */
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun del_timer_sync(&eql->timer);
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun eql_kill_slave_queue(&eql->queue);
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun return 0;
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun static int eql_enslave(struct net_device *dev, slaving_request_t __user *srq);
263*4882a593Smuzhiyun static int eql_emancipate(struct net_device *dev, slaving_request_t __user *srq);
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun static int eql_g_slave_cfg(struct net_device *dev, slave_config_t __user *sc);
266*4882a593Smuzhiyun static int eql_s_slave_cfg(struct net_device *dev, slave_config_t __user *sc);
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun static int eql_g_master_cfg(struct net_device *dev, master_config_t __user *mc);
269*4882a593Smuzhiyun static int eql_s_master_cfg(struct net_device *dev, master_config_t __user *mc);
270*4882a593Smuzhiyun
eql_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)271*4882a593Smuzhiyun static int eql_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun if (cmd != EQL_GETMASTRCFG && cmd != EQL_GETSLAVECFG &&
274*4882a593Smuzhiyun !capable(CAP_NET_ADMIN))
275*4882a593Smuzhiyun return -EPERM;
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun switch (cmd) {
278*4882a593Smuzhiyun case EQL_ENSLAVE:
279*4882a593Smuzhiyun return eql_enslave(dev, ifr->ifr_data);
280*4882a593Smuzhiyun case EQL_EMANCIPATE:
281*4882a593Smuzhiyun return eql_emancipate(dev, ifr->ifr_data);
282*4882a593Smuzhiyun case EQL_GETSLAVECFG:
283*4882a593Smuzhiyun return eql_g_slave_cfg(dev, ifr->ifr_data);
284*4882a593Smuzhiyun case EQL_SETSLAVECFG:
285*4882a593Smuzhiyun return eql_s_slave_cfg(dev, ifr->ifr_data);
286*4882a593Smuzhiyun case EQL_GETMASTRCFG:
287*4882a593Smuzhiyun return eql_g_master_cfg(dev, ifr->ifr_data);
288*4882a593Smuzhiyun case EQL_SETMASTRCFG:
289*4882a593Smuzhiyun return eql_s_master_cfg(dev, ifr->ifr_data);
290*4882a593Smuzhiyun default:
291*4882a593Smuzhiyun return -EOPNOTSUPP;
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun /* queue->lock must be held */
__eql_schedule_slaves(slave_queue_t * queue)296*4882a593Smuzhiyun static slave_t *__eql_schedule_slaves(slave_queue_t *queue)
297*4882a593Smuzhiyun {
298*4882a593Smuzhiyun unsigned long best_load = ~0UL;
299*4882a593Smuzhiyun struct list_head *this, *tmp, *head;
300*4882a593Smuzhiyun slave_t *best_slave;
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun best_slave = NULL;
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun /* Make a pass to set the best slave. */
305*4882a593Smuzhiyun head = &queue->all_slaves;
306*4882a593Smuzhiyun list_for_each_safe(this, tmp, head) {
307*4882a593Smuzhiyun slave_t *slave = list_entry(this, slave_t, list);
308*4882a593Smuzhiyun unsigned long slave_load, bytes_queued, priority_Bps;
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun /* Go through the slave list once, updating best_slave
311*4882a593Smuzhiyun * whenever a new best_load is found.
312*4882a593Smuzhiyun */
313*4882a593Smuzhiyun bytes_queued = slave->bytes_queued;
314*4882a593Smuzhiyun priority_Bps = slave->priority_Bps;
315*4882a593Smuzhiyun if ((slave->dev->flags & IFF_UP) == IFF_UP) {
316*4882a593Smuzhiyun slave_load = (~0UL - (~0UL / 2)) -
317*4882a593Smuzhiyun (priority_Bps) + bytes_queued * 8;
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun if (slave_load < best_load) {
320*4882a593Smuzhiyun best_load = slave_load;
321*4882a593Smuzhiyun best_slave = slave;
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun } else {
324*4882a593Smuzhiyun /* We found a dead slave, kill it. */
325*4882a593Smuzhiyun eql_kill_one_slave(queue, slave);
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun return best_slave;
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun
eql_slave_xmit(struct sk_buff * skb,struct net_device * dev)331*4882a593Smuzhiyun static netdev_tx_t eql_slave_xmit(struct sk_buff *skb, struct net_device *dev)
332*4882a593Smuzhiyun {
333*4882a593Smuzhiyun equalizer_t *eql = netdev_priv(dev);
334*4882a593Smuzhiyun slave_t *slave;
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun spin_lock(&eql->queue.lock);
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun slave = __eql_schedule_slaves(&eql->queue);
339*4882a593Smuzhiyun if (slave) {
340*4882a593Smuzhiyun struct net_device *slave_dev = slave->dev;
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun skb->dev = slave_dev;
343*4882a593Smuzhiyun skb->priority = TC_PRIO_FILLER;
344*4882a593Smuzhiyun slave->bytes_queued += skb->len;
345*4882a593Smuzhiyun dev_queue_xmit(skb);
346*4882a593Smuzhiyun dev->stats.tx_packets++;
347*4882a593Smuzhiyun } else {
348*4882a593Smuzhiyun dev->stats.tx_dropped++;
349*4882a593Smuzhiyun dev_kfree_skb(skb);
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun spin_unlock(&eql->queue.lock);
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun return NETDEV_TX_OK;
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun /*
358*4882a593Smuzhiyun * Private ioctl functions
359*4882a593Smuzhiyun */
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun /* queue->lock must be held */
__eql_find_slave_dev(slave_queue_t * queue,struct net_device * dev)362*4882a593Smuzhiyun static slave_t *__eql_find_slave_dev(slave_queue_t *queue, struct net_device *dev)
363*4882a593Smuzhiyun {
364*4882a593Smuzhiyun struct list_head *this, *head;
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun head = &queue->all_slaves;
367*4882a593Smuzhiyun list_for_each(this, head) {
368*4882a593Smuzhiyun slave_t *slave = list_entry(this, slave_t, list);
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun if (slave->dev == dev)
371*4882a593Smuzhiyun return slave;
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun return NULL;
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun
eql_is_full(slave_queue_t * queue)377*4882a593Smuzhiyun static inline int eql_is_full(slave_queue_t *queue)
378*4882a593Smuzhiyun {
379*4882a593Smuzhiyun equalizer_t *eql = netdev_priv(queue->master_dev);
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun if (queue->num_slaves >= eql->max_slaves)
382*4882a593Smuzhiyun return 1;
383*4882a593Smuzhiyun return 0;
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun /* queue->lock must be held */
__eql_insert_slave(slave_queue_t * queue,slave_t * slave)387*4882a593Smuzhiyun static int __eql_insert_slave(slave_queue_t *queue, slave_t *slave)
388*4882a593Smuzhiyun {
389*4882a593Smuzhiyun if (!eql_is_full(queue)) {
390*4882a593Smuzhiyun slave_t *duplicate_slave = NULL;
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun duplicate_slave = __eql_find_slave_dev(queue, slave->dev);
393*4882a593Smuzhiyun if (duplicate_slave)
394*4882a593Smuzhiyun eql_kill_one_slave(queue, duplicate_slave);
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun dev_hold(slave->dev);
397*4882a593Smuzhiyun list_add(&slave->list, &queue->all_slaves);
398*4882a593Smuzhiyun queue->num_slaves++;
399*4882a593Smuzhiyun slave->dev->flags |= IFF_SLAVE;
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun return 0;
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun return -ENOSPC;
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun
eql_enslave(struct net_device * master_dev,slaving_request_t __user * srqp)407*4882a593Smuzhiyun static int eql_enslave(struct net_device *master_dev, slaving_request_t __user *srqp)
408*4882a593Smuzhiyun {
409*4882a593Smuzhiyun struct net_device *slave_dev;
410*4882a593Smuzhiyun slaving_request_t srq;
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun if (copy_from_user(&srq, srqp, sizeof (slaving_request_t)))
413*4882a593Smuzhiyun return -EFAULT;
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun slave_dev = __dev_get_by_name(&init_net, srq.slave_name);
416*4882a593Smuzhiyun if (!slave_dev)
417*4882a593Smuzhiyun return -ENODEV;
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun if ((master_dev->flags & IFF_UP) == IFF_UP) {
420*4882a593Smuzhiyun /* slave is not a master & not already a slave: */
421*4882a593Smuzhiyun if (!eql_is_master(slave_dev) && !eql_is_slave(slave_dev)) {
422*4882a593Smuzhiyun slave_t *s = kmalloc(sizeof(*s), GFP_KERNEL);
423*4882a593Smuzhiyun equalizer_t *eql = netdev_priv(master_dev);
424*4882a593Smuzhiyun int ret;
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun if (!s)
427*4882a593Smuzhiyun return -ENOMEM;
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun memset(s, 0, sizeof(*s));
430*4882a593Smuzhiyun s->dev = slave_dev;
431*4882a593Smuzhiyun s->priority = srq.priority;
432*4882a593Smuzhiyun s->priority_bps = srq.priority;
433*4882a593Smuzhiyun s->priority_Bps = srq.priority / 8;
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun spin_lock_bh(&eql->queue.lock);
436*4882a593Smuzhiyun ret = __eql_insert_slave(&eql->queue, s);
437*4882a593Smuzhiyun if (ret)
438*4882a593Smuzhiyun kfree(s);
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun spin_unlock_bh(&eql->queue.lock);
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun return ret;
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun return -EINVAL;
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun
eql_emancipate(struct net_device * master_dev,slaving_request_t __user * srqp)449*4882a593Smuzhiyun static int eql_emancipate(struct net_device *master_dev, slaving_request_t __user *srqp)
450*4882a593Smuzhiyun {
451*4882a593Smuzhiyun equalizer_t *eql = netdev_priv(master_dev);
452*4882a593Smuzhiyun struct net_device *slave_dev;
453*4882a593Smuzhiyun slaving_request_t srq;
454*4882a593Smuzhiyun int ret;
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun if (copy_from_user(&srq, srqp, sizeof (slaving_request_t)))
457*4882a593Smuzhiyun return -EFAULT;
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun slave_dev = __dev_get_by_name(&init_net, srq.slave_name);
460*4882a593Smuzhiyun if (!slave_dev)
461*4882a593Smuzhiyun return -ENODEV;
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun ret = -EINVAL;
464*4882a593Smuzhiyun spin_lock_bh(&eql->queue.lock);
465*4882a593Smuzhiyun if (eql_is_slave(slave_dev)) {
466*4882a593Smuzhiyun slave_t *slave = __eql_find_slave_dev(&eql->queue, slave_dev);
467*4882a593Smuzhiyun if (slave) {
468*4882a593Smuzhiyun eql_kill_one_slave(&eql->queue, slave);
469*4882a593Smuzhiyun ret = 0;
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun }
472*4882a593Smuzhiyun spin_unlock_bh(&eql->queue.lock);
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun return ret;
475*4882a593Smuzhiyun }
476*4882a593Smuzhiyun
eql_g_slave_cfg(struct net_device * dev,slave_config_t __user * scp)477*4882a593Smuzhiyun static int eql_g_slave_cfg(struct net_device *dev, slave_config_t __user *scp)
478*4882a593Smuzhiyun {
479*4882a593Smuzhiyun equalizer_t *eql = netdev_priv(dev);
480*4882a593Smuzhiyun slave_t *slave;
481*4882a593Smuzhiyun struct net_device *slave_dev;
482*4882a593Smuzhiyun slave_config_t sc;
483*4882a593Smuzhiyun int ret;
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun if (copy_from_user(&sc, scp, sizeof (slave_config_t)))
486*4882a593Smuzhiyun return -EFAULT;
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun slave_dev = __dev_get_by_name(&init_net, sc.slave_name);
489*4882a593Smuzhiyun if (!slave_dev)
490*4882a593Smuzhiyun return -ENODEV;
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun ret = -EINVAL;
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun spin_lock_bh(&eql->queue.lock);
495*4882a593Smuzhiyun if (eql_is_slave(slave_dev)) {
496*4882a593Smuzhiyun slave = __eql_find_slave_dev(&eql->queue, slave_dev);
497*4882a593Smuzhiyun if (slave) {
498*4882a593Smuzhiyun sc.priority = slave->priority;
499*4882a593Smuzhiyun ret = 0;
500*4882a593Smuzhiyun }
501*4882a593Smuzhiyun }
502*4882a593Smuzhiyun spin_unlock_bh(&eql->queue.lock);
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun if (!ret && copy_to_user(scp, &sc, sizeof (slave_config_t)))
505*4882a593Smuzhiyun ret = -EFAULT;
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun return ret;
508*4882a593Smuzhiyun }
509*4882a593Smuzhiyun
eql_s_slave_cfg(struct net_device * dev,slave_config_t __user * scp)510*4882a593Smuzhiyun static int eql_s_slave_cfg(struct net_device *dev, slave_config_t __user *scp)
511*4882a593Smuzhiyun {
512*4882a593Smuzhiyun slave_t *slave;
513*4882a593Smuzhiyun equalizer_t *eql;
514*4882a593Smuzhiyun struct net_device *slave_dev;
515*4882a593Smuzhiyun slave_config_t sc;
516*4882a593Smuzhiyun int ret;
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun if (copy_from_user(&sc, scp, sizeof (slave_config_t)))
519*4882a593Smuzhiyun return -EFAULT;
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun slave_dev = __dev_get_by_name(&init_net, sc.slave_name);
522*4882a593Smuzhiyun if (!slave_dev)
523*4882a593Smuzhiyun return -ENODEV;
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun ret = -EINVAL;
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun eql = netdev_priv(dev);
528*4882a593Smuzhiyun spin_lock_bh(&eql->queue.lock);
529*4882a593Smuzhiyun if (eql_is_slave(slave_dev)) {
530*4882a593Smuzhiyun slave = __eql_find_slave_dev(&eql->queue, slave_dev);
531*4882a593Smuzhiyun if (slave) {
532*4882a593Smuzhiyun slave->priority = sc.priority;
533*4882a593Smuzhiyun slave->priority_bps = sc.priority;
534*4882a593Smuzhiyun slave->priority_Bps = sc.priority / 8;
535*4882a593Smuzhiyun ret = 0;
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun }
538*4882a593Smuzhiyun spin_unlock_bh(&eql->queue.lock);
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun return ret;
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun
eql_g_master_cfg(struct net_device * dev,master_config_t __user * mcp)543*4882a593Smuzhiyun static int eql_g_master_cfg(struct net_device *dev, master_config_t __user *mcp)
544*4882a593Smuzhiyun {
545*4882a593Smuzhiyun equalizer_t *eql;
546*4882a593Smuzhiyun master_config_t mc;
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun memset(&mc, 0, sizeof(master_config_t));
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun if (eql_is_master(dev)) {
551*4882a593Smuzhiyun eql = netdev_priv(dev);
552*4882a593Smuzhiyun mc.max_slaves = eql->max_slaves;
553*4882a593Smuzhiyun mc.min_slaves = eql->min_slaves;
554*4882a593Smuzhiyun if (copy_to_user(mcp, &mc, sizeof (master_config_t)))
555*4882a593Smuzhiyun return -EFAULT;
556*4882a593Smuzhiyun return 0;
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun return -EINVAL;
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun
eql_s_master_cfg(struct net_device * dev,master_config_t __user * mcp)561*4882a593Smuzhiyun static int eql_s_master_cfg(struct net_device *dev, master_config_t __user *mcp)
562*4882a593Smuzhiyun {
563*4882a593Smuzhiyun equalizer_t *eql;
564*4882a593Smuzhiyun master_config_t mc;
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun if (copy_from_user(&mc, mcp, sizeof (master_config_t)))
567*4882a593Smuzhiyun return -EFAULT;
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun if (eql_is_master(dev)) {
570*4882a593Smuzhiyun eql = netdev_priv(dev);
571*4882a593Smuzhiyun eql->max_slaves = mc.max_slaves;
572*4882a593Smuzhiyun eql->min_slaves = mc.min_slaves;
573*4882a593Smuzhiyun return 0;
574*4882a593Smuzhiyun }
575*4882a593Smuzhiyun return -EINVAL;
576*4882a593Smuzhiyun }
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun static struct net_device *dev_eql;
579*4882a593Smuzhiyun
eql_init_module(void)580*4882a593Smuzhiyun static int __init eql_init_module(void)
581*4882a593Smuzhiyun {
582*4882a593Smuzhiyun int err;
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun pr_info("%s\n", version);
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun dev_eql = alloc_netdev(sizeof(equalizer_t), "eql", NET_NAME_UNKNOWN,
587*4882a593Smuzhiyun eql_setup);
588*4882a593Smuzhiyun if (!dev_eql)
589*4882a593Smuzhiyun return -ENOMEM;
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun err = register_netdev(dev_eql);
592*4882a593Smuzhiyun if (err)
593*4882a593Smuzhiyun free_netdev(dev_eql);
594*4882a593Smuzhiyun return err;
595*4882a593Smuzhiyun }
596*4882a593Smuzhiyun
eql_cleanup_module(void)597*4882a593Smuzhiyun static void __exit eql_cleanup_module(void)
598*4882a593Smuzhiyun {
599*4882a593Smuzhiyun unregister_netdev(dev_eql);
600*4882a593Smuzhiyun free_netdev(dev_eql);
601*4882a593Smuzhiyun }
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun module_init(eql_init_module);
604*4882a593Smuzhiyun module_exit(eql_cleanup_module);
605*4882a593Smuzhiyun MODULE_LICENSE("GPL");
606