xref: /OK3568_Linux_fs/kernel/drivers/net/plip/plip.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /* $Id: plip.c,v 1.3.6.2 1997/04/16 15:07:56 phil Exp $ */
3*4882a593Smuzhiyun /* PLIP: A parallel port "network" driver for Linux. */
4*4882a593Smuzhiyun /* This driver is for parallel port with 5-bit cable (LapLink (R) cable). */
5*4882a593Smuzhiyun /*
6*4882a593Smuzhiyun  * Authors:	Donald Becker <becker@scyld.com>
7*4882a593Smuzhiyun  *		Tommy Thorn <thorn@daimi.aau.dk>
8*4882a593Smuzhiyun  *		Tanabe Hiroyasu <hiro@sanpo.t.u-tokyo.ac.jp>
9*4882a593Smuzhiyun  *		Alan Cox <gw4pts@gw4pts.ampr.org>
10*4882a593Smuzhiyun  *		Peter Bauer <100136.3530@compuserve.com>
11*4882a593Smuzhiyun  *		Niibe Yutaka <gniibe@mri.co.jp>
12*4882a593Smuzhiyun  *		Nimrod Zimerman <zimerman@mailandnews.com>
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  * Enhancements:
15*4882a593Smuzhiyun  *		Modularization and ifreq/ifmap support by Alan Cox.
16*4882a593Smuzhiyun  *		Rewritten by Niibe Yutaka.
17*4882a593Smuzhiyun  *		parport-sharing awareness code by Philip Blundell.
18*4882a593Smuzhiyun  *		SMP locking by Niibe Yutaka.
19*4882a593Smuzhiyun  *		Support for parallel ports with no IRQ (poll mode),
20*4882a593Smuzhiyun  *		Modifications to use the parallel port API
21*4882a593Smuzhiyun  *		by Nimrod Zimerman.
22*4882a593Smuzhiyun  *
23*4882a593Smuzhiyun  * Fixes:
24*4882a593Smuzhiyun  *		Niibe Yutaka
25*4882a593Smuzhiyun  *		  - Module initialization.
26*4882a593Smuzhiyun  *		  - MTU fix.
27*4882a593Smuzhiyun  *		  - Make sure other end is OK, before sending a packet.
28*4882a593Smuzhiyun  *		  - Fix immediate timer problem.
29*4882a593Smuzhiyun  *
30*4882a593Smuzhiyun  *		Al Viro
31*4882a593Smuzhiyun  *		  - Changed {enable,disable}_irq handling to make it work
32*4882a593Smuzhiyun  *		    with new ("stack") semantics.
33*4882a593Smuzhiyun  */
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun /*
36*4882a593Smuzhiyun  * Original version and the name 'PLIP' from Donald Becker <becker@scyld.com>
37*4882a593Smuzhiyun  * inspired by Russ Nelson's parallel port packet driver.
38*4882a593Smuzhiyun  *
39*4882a593Smuzhiyun  * NOTE:
40*4882a593Smuzhiyun  *     Tanabe Hiroyasu had changed the protocol, and it was in Linux v1.0.
41*4882a593Smuzhiyun  *     Because of the necessity to communicate to DOS machines with the
42*4882a593Smuzhiyun  *     Crynwr packet driver, Peter Bauer changed the protocol again
43*4882a593Smuzhiyun  *     back to original protocol.
44*4882a593Smuzhiyun  *
45*4882a593Smuzhiyun  *     This version follows original PLIP protocol.
46*4882a593Smuzhiyun  *     So, this PLIP can't communicate the PLIP of Linux v1.0.
47*4882a593Smuzhiyun  */
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun /*
50*4882a593Smuzhiyun  *     To use with DOS box, please do (Turn on ARP switch):
51*4882a593Smuzhiyun  *	# ifconfig plip[0-2] arp
52*4882a593Smuzhiyun  */
53*4882a593Smuzhiyun static const char version[] = "NET3 PLIP version 2.4-parport gniibe@mri.co.jp\n";
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun /*
56*4882a593Smuzhiyun   Sources:
57*4882a593Smuzhiyun 	Ideas and protocols came from Russ Nelson's <nelson@crynwr.com>
58*4882a593Smuzhiyun 	"parallel.asm" parallel port packet driver.
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun   The "Crynwr" parallel port standard specifies the following protocol:
61*4882a593Smuzhiyun     Trigger by sending nibble '0x8' (this causes interrupt on other end)
62*4882a593Smuzhiyun     count-low octet
63*4882a593Smuzhiyun     count-high octet
64*4882a593Smuzhiyun     ... data octets
65*4882a593Smuzhiyun     checksum octet
66*4882a593Smuzhiyun   Each octet is sent as <wait for rx. '0x1?'> <send 0x10+(octet&0x0F)>
67*4882a593Smuzhiyun 			<wait for rx. '0x0?'> <send 0x00+((octet>>4)&0x0F)>
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun   The packet is encapsulated as if it were ethernet.
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun   The cable used is a de facto standard parallel null cable -- sold as
72*4882a593Smuzhiyun   a "LapLink" cable by various places.  You'll need a 12-conductor cable to
73*4882a593Smuzhiyun   make one yourself.  The wiring is:
74*4882a593Smuzhiyun     SLCTIN	17 - 17
75*4882a593Smuzhiyun     GROUND	25 - 25
76*4882a593Smuzhiyun     D0->ERROR	2 - 15		15 - 2
77*4882a593Smuzhiyun     D1->SLCT	3 - 13		13 - 3
78*4882a593Smuzhiyun     D2->PAPOUT	4 - 12		12 - 4
79*4882a593Smuzhiyun     D3->ACK	5 - 10		10 - 5
80*4882a593Smuzhiyun     D4->BUSY	6 - 11		11 - 6
81*4882a593Smuzhiyun   Do not connect the other pins.  They are
82*4882a593Smuzhiyun     D5,D6,D7 are 7,8,9
83*4882a593Smuzhiyun     STROBE is 1, FEED is 14, INIT is 16
84*4882a593Smuzhiyun     extra grounds are 18,19,20,21,22,23,24
85*4882a593Smuzhiyun */
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun #include <linux/module.h>
88*4882a593Smuzhiyun #include <linux/kernel.h>
89*4882a593Smuzhiyun #include <linux/types.h>
90*4882a593Smuzhiyun #include <linux/fcntl.h>
91*4882a593Smuzhiyun #include <linux/interrupt.h>
92*4882a593Smuzhiyun #include <linux/string.h>
93*4882a593Smuzhiyun #include <linux/slab.h>
94*4882a593Smuzhiyun #include <linux/if_ether.h>
95*4882a593Smuzhiyun #include <linux/in.h>
96*4882a593Smuzhiyun #include <linux/errno.h>
97*4882a593Smuzhiyun #include <linux/delay.h>
98*4882a593Smuzhiyun #include <linux/init.h>
99*4882a593Smuzhiyun #include <linux/netdevice.h>
100*4882a593Smuzhiyun #include <linux/etherdevice.h>
101*4882a593Smuzhiyun #include <linux/inetdevice.h>
102*4882a593Smuzhiyun #include <linux/skbuff.h>
103*4882a593Smuzhiyun #include <linux/if_plip.h>
104*4882a593Smuzhiyun #include <linux/workqueue.h>
105*4882a593Smuzhiyun #include <linux/spinlock.h>
106*4882a593Smuzhiyun #include <linux/completion.h>
107*4882a593Smuzhiyun #include <linux/parport.h>
108*4882a593Smuzhiyun #include <linux/bitops.h>
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun #include <net/neighbour.h>
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun #include <asm/irq.h>
113*4882a593Smuzhiyun #include <asm/byteorder.h>
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun /* Maximum number of devices to support. */
116*4882a593Smuzhiyun #define PLIP_MAX  8
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun /* Use 0 for production, 1 for verification, >2 for debug */
119*4882a593Smuzhiyun #ifndef NET_DEBUG
120*4882a593Smuzhiyun #define NET_DEBUG 1
121*4882a593Smuzhiyun #endif
122*4882a593Smuzhiyun static const unsigned int net_debug = NET_DEBUG;
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun #define ENABLE(irq)  if (irq != -1) enable_irq(irq)
125*4882a593Smuzhiyun #define DISABLE(irq) if (irq != -1) disable_irq(irq)
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun /* In micro second */
128*4882a593Smuzhiyun #define PLIP_DELAY_UNIT		   1
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun /* Connection time out = PLIP_TRIGGER_WAIT * PLIP_DELAY_UNIT usec */
131*4882a593Smuzhiyun #define PLIP_TRIGGER_WAIT	 500
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun /* Nibble time out = PLIP_NIBBLE_WAIT * PLIP_DELAY_UNIT usec */
134*4882a593Smuzhiyun #define PLIP_NIBBLE_WAIT        3000
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun /* Bottom halves */
137*4882a593Smuzhiyun static void plip_kick_bh(struct work_struct *work);
138*4882a593Smuzhiyun static void plip_bh(struct work_struct *work);
139*4882a593Smuzhiyun static void plip_timer_bh(struct work_struct *work);
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun /* Interrupt handler */
142*4882a593Smuzhiyun static void plip_interrupt(void *dev_id);
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun /* Functions for DEV methods */
145*4882a593Smuzhiyun static netdev_tx_t plip_tx_packet(struct sk_buff *skb, struct net_device *dev);
146*4882a593Smuzhiyun static int plip_hard_header(struct sk_buff *skb, struct net_device *dev,
147*4882a593Smuzhiyun                             unsigned short type, const void *daddr,
148*4882a593Smuzhiyun 			    const void *saddr, unsigned len);
149*4882a593Smuzhiyun static int plip_hard_header_cache(const struct neighbour *neigh,
150*4882a593Smuzhiyun                                   struct hh_cache *hh, __be16 type);
151*4882a593Smuzhiyun static int plip_open(struct net_device *dev);
152*4882a593Smuzhiyun static int plip_close(struct net_device *dev);
153*4882a593Smuzhiyun static int plip_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
154*4882a593Smuzhiyun static int plip_preempt(void *handle);
155*4882a593Smuzhiyun static void plip_wakeup(void *handle);
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun enum plip_connection_state {
158*4882a593Smuzhiyun 	PLIP_CN_NONE=0,
159*4882a593Smuzhiyun 	PLIP_CN_RECEIVE,
160*4882a593Smuzhiyun 	PLIP_CN_SEND,
161*4882a593Smuzhiyun 	PLIP_CN_CLOSING,
162*4882a593Smuzhiyun 	PLIP_CN_ERROR
163*4882a593Smuzhiyun };
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun enum plip_packet_state {
166*4882a593Smuzhiyun 	PLIP_PK_DONE=0,
167*4882a593Smuzhiyun 	PLIP_PK_TRIGGER,
168*4882a593Smuzhiyun 	PLIP_PK_LENGTH_LSB,
169*4882a593Smuzhiyun 	PLIP_PK_LENGTH_MSB,
170*4882a593Smuzhiyun 	PLIP_PK_DATA,
171*4882a593Smuzhiyun 	PLIP_PK_CHECKSUM
172*4882a593Smuzhiyun };
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun enum plip_nibble_state {
175*4882a593Smuzhiyun 	PLIP_NB_BEGIN,
176*4882a593Smuzhiyun 	PLIP_NB_1,
177*4882a593Smuzhiyun 	PLIP_NB_2,
178*4882a593Smuzhiyun };
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun struct plip_local {
181*4882a593Smuzhiyun 	enum plip_packet_state state;
182*4882a593Smuzhiyun 	enum plip_nibble_state nibble;
183*4882a593Smuzhiyun 	union {
184*4882a593Smuzhiyun 		struct {
185*4882a593Smuzhiyun #if defined(__LITTLE_ENDIAN)
186*4882a593Smuzhiyun 			unsigned char lsb;
187*4882a593Smuzhiyun 			unsigned char msb;
188*4882a593Smuzhiyun #elif defined(__BIG_ENDIAN)
189*4882a593Smuzhiyun 			unsigned char msb;
190*4882a593Smuzhiyun 			unsigned char lsb;
191*4882a593Smuzhiyun #else
192*4882a593Smuzhiyun #error	"Please fix the endianness defines in <asm/byteorder.h>"
193*4882a593Smuzhiyun #endif
194*4882a593Smuzhiyun 		} b;
195*4882a593Smuzhiyun 		unsigned short h;
196*4882a593Smuzhiyun 	} length;
197*4882a593Smuzhiyun 	unsigned short byte;
198*4882a593Smuzhiyun 	unsigned char  checksum;
199*4882a593Smuzhiyun 	unsigned char  data;
200*4882a593Smuzhiyun 	struct sk_buff *skb;
201*4882a593Smuzhiyun };
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun struct net_local {
204*4882a593Smuzhiyun 	struct net_device *dev;
205*4882a593Smuzhiyun 	struct work_struct immediate;
206*4882a593Smuzhiyun 	struct delayed_work deferred;
207*4882a593Smuzhiyun 	struct delayed_work timer;
208*4882a593Smuzhiyun 	struct plip_local snd_data;
209*4882a593Smuzhiyun 	struct plip_local rcv_data;
210*4882a593Smuzhiyun 	struct pardevice *pardev;
211*4882a593Smuzhiyun 	unsigned long  trigger;
212*4882a593Smuzhiyun 	unsigned long  nibble;
213*4882a593Smuzhiyun 	enum plip_connection_state connection;
214*4882a593Smuzhiyun 	unsigned short timeout_count;
215*4882a593Smuzhiyun 	int is_deferred;
216*4882a593Smuzhiyun 	int port_owner;
217*4882a593Smuzhiyun 	int should_relinquish;
218*4882a593Smuzhiyun 	spinlock_t lock;
219*4882a593Smuzhiyun 	atomic_t kill_timer;
220*4882a593Smuzhiyun 	struct completion killed_timer_cmp;
221*4882a593Smuzhiyun };
222*4882a593Smuzhiyun 
enable_parport_interrupts(struct net_device * dev)223*4882a593Smuzhiyun static inline void enable_parport_interrupts (struct net_device *dev)
224*4882a593Smuzhiyun {
225*4882a593Smuzhiyun 	if (dev->irq != -1)
226*4882a593Smuzhiyun 	{
227*4882a593Smuzhiyun 		struct parport *port =
228*4882a593Smuzhiyun 		   ((struct net_local *)netdev_priv(dev))->pardev->port;
229*4882a593Smuzhiyun 		port->ops->enable_irq (port);
230*4882a593Smuzhiyun 	}
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun 
disable_parport_interrupts(struct net_device * dev)233*4882a593Smuzhiyun static inline void disable_parport_interrupts (struct net_device *dev)
234*4882a593Smuzhiyun {
235*4882a593Smuzhiyun 	if (dev->irq != -1)
236*4882a593Smuzhiyun 	{
237*4882a593Smuzhiyun 		struct parport *port =
238*4882a593Smuzhiyun 		   ((struct net_local *)netdev_priv(dev))->pardev->port;
239*4882a593Smuzhiyun 		port->ops->disable_irq (port);
240*4882a593Smuzhiyun 	}
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun 
write_data(struct net_device * dev,unsigned char data)243*4882a593Smuzhiyun static inline void write_data (struct net_device *dev, unsigned char data)
244*4882a593Smuzhiyun {
245*4882a593Smuzhiyun 	struct parport *port =
246*4882a593Smuzhiyun 	   ((struct net_local *)netdev_priv(dev))->pardev->port;
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	port->ops->write_data (port, data);
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun 
read_status(struct net_device * dev)251*4882a593Smuzhiyun static inline unsigned char read_status (struct net_device *dev)
252*4882a593Smuzhiyun {
253*4882a593Smuzhiyun 	struct parport *port =
254*4882a593Smuzhiyun 	   ((struct net_local *)netdev_priv(dev))->pardev->port;
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	return port->ops->read_status (port);
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun static const struct header_ops plip_header_ops = {
260*4882a593Smuzhiyun 	.create	= plip_hard_header,
261*4882a593Smuzhiyun 	.cache  = plip_hard_header_cache,
262*4882a593Smuzhiyun };
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun static const struct net_device_ops plip_netdev_ops = {
265*4882a593Smuzhiyun 	.ndo_open		 = plip_open,
266*4882a593Smuzhiyun 	.ndo_stop		 = plip_close,
267*4882a593Smuzhiyun 	.ndo_start_xmit		 = plip_tx_packet,
268*4882a593Smuzhiyun 	.ndo_do_ioctl		 = plip_ioctl,
269*4882a593Smuzhiyun 	.ndo_set_mac_address	 = eth_mac_addr,
270*4882a593Smuzhiyun 	.ndo_validate_addr	 = eth_validate_addr,
271*4882a593Smuzhiyun };
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun /* Entry point of PLIP driver.
274*4882a593Smuzhiyun    Probe the hardware, and register/initialize the driver.
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun    PLIP is rather weird, because of the way it interacts with the parport
277*4882a593Smuzhiyun    system.  It is _not_ initialised from Space.c.  Instead, plip_init()
278*4882a593Smuzhiyun    is called, and that function makes up a "struct net_device" for each port, and
279*4882a593Smuzhiyun    then calls us here.
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun    */
282*4882a593Smuzhiyun static void
plip_init_netdev(struct net_device * dev)283*4882a593Smuzhiyun plip_init_netdev(struct net_device *dev)
284*4882a593Smuzhiyun {
285*4882a593Smuzhiyun 	struct net_local *nl = netdev_priv(dev);
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	/* Then, override parts of it */
288*4882a593Smuzhiyun 	dev->tx_queue_len 	 = 10;
289*4882a593Smuzhiyun 	dev->flags	         = IFF_POINTOPOINT|IFF_NOARP;
290*4882a593Smuzhiyun 	memset(dev->dev_addr, 0xfc, ETH_ALEN);
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	dev->netdev_ops		 = &plip_netdev_ops;
293*4882a593Smuzhiyun 	dev->header_ops          = &plip_header_ops;
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	nl->port_owner = 0;
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	/* Initialize constants */
299*4882a593Smuzhiyun 	nl->trigger	= PLIP_TRIGGER_WAIT;
300*4882a593Smuzhiyun 	nl->nibble	= PLIP_NIBBLE_WAIT;
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	/* Initialize task queue structures */
303*4882a593Smuzhiyun 	INIT_WORK(&nl->immediate, plip_bh);
304*4882a593Smuzhiyun 	INIT_DELAYED_WORK(&nl->deferred, plip_kick_bh);
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	if (dev->irq == -1)
307*4882a593Smuzhiyun 		INIT_DELAYED_WORK(&nl->timer, plip_timer_bh);
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	spin_lock_init(&nl->lock);
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun /* Bottom half handler for the delayed request.
313*4882a593Smuzhiyun    This routine is kicked by do_timer().
314*4882a593Smuzhiyun    Request `plip_bh' to be invoked. */
315*4882a593Smuzhiyun static void
plip_kick_bh(struct work_struct * work)316*4882a593Smuzhiyun plip_kick_bh(struct work_struct *work)
317*4882a593Smuzhiyun {
318*4882a593Smuzhiyun 	struct net_local *nl =
319*4882a593Smuzhiyun 		container_of(work, struct net_local, deferred.work);
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	if (nl->is_deferred)
322*4882a593Smuzhiyun 		schedule_work(&nl->immediate);
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun /* Forward declarations of internal routines */
326*4882a593Smuzhiyun static int plip_none(struct net_device *, struct net_local *,
327*4882a593Smuzhiyun 		     struct plip_local *, struct plip_local *);
328*4882a593Smuzhiyun static int plip_receive_packet(struct net_device *, struct net_local *,
329*4882a593Smuzhiyun 			       struct plip_local *, struct plip_local *);
330*4882a593Smuzhiyun static int plip_send_packet(struct net_device *, struct net_local *,
331*4882a593Smuzhiyun 			    struct plip_local *, struct plip_local *);
332*4882a593Smuzhiyun static int plip_connection_close(struct net_device *, struct net_local *,
333*4882a593Smuzhiyun 				 struct plip_local *, struct plip_local *);
334*4882a593Smuzhiyun static int plip_error(struct net_device *, struct net_local *,
335*4882a593Smuzhiyun 		      struct plip_local *, struct plip_local *);
336*4882a593Smuzhiyun static int plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
337*4882a593Smuzhiyun 				 struct plip_local *snd,
338*4882a593Smuzhiyun 				 struct plip_local *rcv,
339*4882a593Smuzhiyun 				 int error);
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun #define OK        0
342*4882a593Smuzhiyun #define TIMEOUT   1
343*4882a593Smuzhiyun #define ERROR     2
344*4882a593Smuzhiyun #define HS_TIMEOUT	3
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun typedef int (*plip_func)(struct net_device *dev, struct net_local *nl,
347*4882a593Smuzhiyun 			 struct plip_local *snd, struct plip_local *rcv);
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun static const plip_func connection_state_table[] =
350*4882a593Smuzhiyun {
351*4882a593Smuzhiyun 	plip_none,
352*4882a593Smuzhiyun 	plip_receive_packet,
353*4882a593Smuzhiyun 	plip_send_packet,
354*4882a593Smuzhiyun 	plip_connection_close,
355*4882a593Smuzhiyun 	plip_error
356*4882a593Smuzhiyun };
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun /* Bottom half handler of PLIP. */
359*4882a593Smuzhiyun static void
plip_bh(struct work_struct * work)360*4882a593Smuzhiyun plip_bh(struct work_struct *work)
361*4882a593Smuzhiyun {
362*4882a593Smuzhiyun 	struct net_local *nl = container_of(work, struct net_local, immediate);
363*4882a593Smuzhiyun 	struct plip_local *snd = &nl->snd_data;
364*4882a593Smuzhiyun 	struct plip_local *rcv = &nl->rcv_data;
365*4882a593Smuzhiyun 	plip_func f;
366*4882a593Smuzhiyun 	int r;
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	nl->is_deferred = 0;
369*4882a593Smuzhiyun 	f = connection_state_table[nl->connection];
370*4882a593Smuzhiyun 	if ((r = (*f)(nl->dev, nl, snd, rcv)) != OK &&
371*4882a593Smuzhiyun 	    (r = plip_bh_timeout_error(nl->dev, nl, snd, rcv, r)) != OK) {
372*4882a593Smuzhiyun 		nl->is_deferred = 1;
373*4882a593Smuzhiyun 		schedule_delayed_work(&nl->deferred, 1);
374*4882a593Smuzhiyun 	}
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun static void
plip_timer_bh(struct work_struct * work)378*4882a593Smuzhiyun plip_timer_bh(struct work_struct *work)
379*4882a593Smuzhiyun {
380*4882a593Smuzhiyun 	struct net_local *nl =
381*4882a593Smuzhiyun 		container_of(work, struct net_local, timer.work);
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	if (!(atomic_read (&nl->kill_timer))) {
384*4882a593Smuzhiyun 		plip_interrupt (nl->dev);
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 		schedule_delayed_work(&nl->timer, 1);
387*4882a593Smuzhiyun 	}
388*4882a593Smuzhiyun 	else {
389*4882a593Smuzhiyun 		complete(&nl->killed_timer_cmp);
390*4882a593Smuzhiyun 	}
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun static int
plip_bh_timeout_error(struct net_device * dev,struct net_local * nl,struct plip_local * snd,struct plip_local * rcv,int error)394*4882a593Smuzhiyun plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
395*4882a593Smuzhiyun 		      struct plip_local *snd, struct plip_local *rcv,
396*4882a593Smuzhiyun 		      int error)
397*4882a593Smuzhiyun {
398*4882a593Smuzhiyun 	unsigned char c0;
399*4882a593Smuzhiyun 	/*
400*4882a593Smuzhiyun 	 * This is tricky. If we got here from the beginning of send (either
401*4882a593Smuzhiyun 	 * with ERROR or HS_TIMEOUT) we have IRQ enabled. Otherwise it's
402*4882a593Smuzhiyun 	 * already disabled. With the old variant of {enable,disable}_irq()
403*4882a593Smuzhiyun 	 * extra disable_irq() was a no-op. Now it became mortal - it's
404*4882a593Smuzhiyun 	 * unbalanced and thus we'll never re-enable IRQ (until rmmod plip,
405*4882a593Smuzhiyun 	 * that is). So we have to treat HS_TIMEOUT and ERROR from send
406*4882a593Smuzhiyun 	 * in a special way.
407*4882a593Smuzhiyun 	 */
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 	spin_lock_irq(&nl->lock);
410*4882a593Smuzhiyun 	if (nl->connection == PLIP_CN_SEND) {
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 		if (error != ERROR) { /* Timeout */
413*4882a593Smuzhiyun 			nl->timeout_count++;
414*4882a593Smuzhiyun 			if ((error == HS_TIMEOUT && nl->timeout_count <= 10) ||
415*4882a593Smuzhiyun 			    nl->timeout_count <= 3) {
416*4882a593Smuzhiyun 				spin_unlock_irq(&nl->lock);
417*4882a593Smuzhiyun 				/* Try again later */
418*4882a593Smuzhiyun 				return TIMEOUT;
419*4882a593Smuzhiyun 			}
420*4882a593Smuzhiyun 			c0 = read_status(dev);
421*4882a593Smuzhiyun 			printk(KERN_WARNING "%s: transmit timeout(%d,%02x)\n",
422*4882a593Smuzhiyun 			       dev->name, snd->state, c0);
423*4882a593Smuzhiyun 		} else
424*4882a593Smuzhiyun 			error = HS_TIMEOUT;
425*4882a593Smuzhiyun 		dev->stats.tx_errors++;
426*4882a593Smuzhiyun 		dev->stats.tx_aborted_errors++;
427*4882a593Smuzhiyun 	} else if (nl->connection == PLIP_CN_RECEIVE) {
428*4882a593Smuzhiyun 		if (rcv->state == PLIP_PK_TRIGGER) {
429*4882a593Smuzhiyun 			/* Transmission was interrupted. */
430*4882a593Smuzhiyun 			spin_unlock_irq(&nl->lock);
431*4882a593Smuzhiyun 			return OK;
432*4882a593Smuzhiyun 		}
433*4882a593Smuzhiyun 		if (error != ERROR) { /* Timeout */
434*4882a593Smuzhiyun 			if (++nl->timeout_count <= 3) {
435*4882a593Smuzhiyun 				spin_unlock_irq(&nl->lock);
436*4882a593Smuzhiyun 				/* Try again later */
437*4882a593Smuzhiyun 				return TIMEOUT;
438*4882a593Smuzhiyun 			}
439*4882a593Smuzhiyun 			c0 = read_status(dev);
440*4882a593Smuzhiyun 			printk(KERN_WARNING "%s: receive timeout(%d,%02x)\n",
441*4882a593Smuzhiyun 			       dev->name, rcv->state, c0);
442*4882a593Smuzhiyun 		}
443*4882a593Smuzhiyun 		dev->stats.rx_dropped++;
444*4882a593Smuzhiyun 	}
445*4882a593Smuzhiyun 	rcv->state = PLIP_PK_DONE;
446*4882a593Smuzhiyun 	if (rcv->skb) {
447*4882a593Smuzhiyun 		dev_kfree_skb_irq(rcv->skb);
448*4882a593Smuzhiyun 		rcv->skb = NULL;
449*4882a593Smuzhiyun 	}
450*4882a593Smuzhiyun 	snd->state = PLIP_PK_DONE;
451*4882a593Smuzhiyun 	if (snd->skb) {
452*4882a593Smuzhiyun 		dev_consume_skb_irq(snd->skb);
453*4882a593Smuzhiyun 		snd->skb = NULL;
454*4882a593Smuzhiyun 	}
455*4882a593Smuzhiyun 	spin_unlock_irq(&nl->lock);
456*4882a593Smuzhiyun 	if (error == HS_TIMEOUT) {
457*4882a593Smuzhiyun 		DISABLE(dev->irq);
458*4882a593Smuzhiyun 		synchronize_irq(dev->irq);
459*4882a593Smuzhiyun 	}
460*4882a593Smuzhiyun 	disable_parport_interrupts (dev);
461*4882a593Smuzhiyun 	netif_stop_queue (dev);
462*4882a593Smuzhiyun 	nl->connection = PLIP_CN_ERROR;
463*4882a593Smuzhiyun 	write_data (dev, 0x00);
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun 	return TIMEOUT;
466*4882a593Smuzhiyun }
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun static int
plip_none(struct net_device * dev,struct net_local * nl,struct plip_local * snd,struct plip_local * rcv)469*4882a593Smuzhiyun plip_none(struct net_device *dev, struct net_local *nl,
470*4882a593Smuzhiyun 	  struct plip_local *snd, struct plip_local *rcv)
471*4882a593Smuzhiyun {
472*4882a593Smuzhiyun 	return OK;
473*4882a593Smuzhiyun }
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun /* PLIP_RECEIVE --- receive a byte(two nibbles)
476*4882a593Smuzhiyun    Returns OK on success, TIMEOUT on timeout */
477*4882a593Smuzhiyun static inline int
plip_receive(unsigned short nibble_timeout,struct net_device * dev,enum plip_nibble_state * ns_p,unsigned char * data_p)478*4882a593Smuzhiyun plip_receive(unsigned short nibble_timeout, struct net_device *dev,
479*4882a593Smuzhiyun 	     enum plip_nibble_state *ns_p, unsigned char *data_p)
480*4882a593Smuzhiyun {
481*4882a593Smuzhiyun 	unsigned char c0, c1;
482*4882a593Smuzhiyun 	unsigned int cx;
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun 	switch (*ns_p) {
485*4882a593Smuzhiyun 	case PLIP_NB_BEGIN:
486*4882a593Smuzhiyun 		cx = nibble_timeout;
487*4882a593Smuzhiyun 		while (1) {
488*4882a593Smuzhiyun 			c0 = read_status(dev);
489*4882a593Smuzhiyun 			udelay(PLIP_DELAY_UNIT);
490*4882a593Smuzhiyun 			if ((c0 & 0x80) == 0) {
491*4882a593Smuzhiyun 				c1 = read_status(dev);
492*4882a593Smuzhiyun 				if (c0 == c1)
493*4882a593Smuzhiyun 					break;
494*4882a593Smuzhiyun 			}
495*4882a593Smuzhiyun 			if (--cx == 0)
496*4882a593Smuzhiyun 				return TIMEOUT;
497*4882a593Smuzhiyun 		}
498*4882a593Smuzhiyun 		*data_p = (c0 >> 3) & 0x0f;
499*4882a593Smuzhiyun 		write_data (dev, 0x10); /* send ACK */
500*4882a593Smuzhiyun 		*ns_p = PLIP_NB_1;
501*4882a593Smuzhiyun 		fallthrough;
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun 	case PLIP_NB_1:
504*4882a593Smuzhiyun 		cx = nibble_timeout;
505*4882a593Smuzhiyun 		while (1) {
506*4882a593Smuzhiyun 			c0 = read_status(dev);
507*4882a593Smuzhiyun 			udelay(PLIP_DELAY_UNIT);
508*4882a593Smuzhiyun 			if (c0 & 0x80) {
509*4882a593Smuzhiyun 				c1 = read_status(dev);
510*4882a593Smuzhiyun 				if (c0 == c1)
511*4882a593Smuzhiyun 					break;
512*4882a593Smuzhiyun 			}
513*4882a593Smuzhiyun 			if (--cx == 0)
514*4882a593Smuzhiyun 				return TIMEOUT;
515*4882a593Smuzhiyun 		}
516*4882a593Smuzhiyun 		*data_p |= (c0 << 1) & 0xf0;
517*4882a593Smuzhiyun 		write_data (dev, 0x00); /* send ACK */
518*4882a593Smuzhiyun 		*ns_p = PLIP_NB_BEGIN;
519*4882a593Smuzhiyun 	case PLIP_NB_2:
520*4882a593Smuzhiyun 		break;
521*4882a593Smuzhiyun 	}
522*4882a593Smuzhiyun 	return OK;
523*4882a593Smuzhiyun }
524*4882a593Smuzhiyun 
525*4882a593Smuzhiyun /*
526*4882a593Smuzhiyun  *	Determine the packet's protocol ID. The rule here is that we
527*4882a593Smuzhiyun  *	assume 802.3 if the type field is short enough to be a length.
528*4882a593Smuzhiyun  *	This is normal practice and works for any 'now in use' protocol.
529*4882a593Smuzhiyun  *
530*4882a593Smuzhiyun  *	PLIP is ethernet ish but the daddr might not be valid if unicast.
531*4882a593Smuzhiyun  *	PLIP fortunately has no bus architecture (its Point-to-point).
532*4882a593Smuzhiyun  *
533*4882a593Smuzhiyun  *	We can't fix the daddr thing as that quirk (more bug) is embedded
534*4882a593Smuzhiyun  *	in far too many old systems not all even running Linux.
535*4882a593Smuzhiyun  */
536*4882a593Smuzhiyun 
plip_type_trans(struct sk_buff * skb,struct net_device * dev)537*4882a593Smuzhiyun static __be16 plip_type_trans(struct sk_buff *skb, struct net_device *dev)
538*4882a593Smuzhiyun {
539*4882a593Smuzhiyun 	struct ethhdr *eth;
540*4882a593Smuzhiyun 	unsigned char *rawp;
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	skb_reset_mac_header(skb);
543*4882a593Smuzhiyun 	skb_pull(skb,dev->hard_header_len);
544*4882a593Smuzhiyun 	eth = eth_hdr(skb);
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun 	if(is_multicast_ether_addr(eth->h_dest))
547*4882a593Smuzhiyun 	{
548*4882a593Smuzhiyun 		if(ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
549*4882a593Smuzhiyun 			skb->pkt_type=PACKET_BROADCAST;
550*4882a593Smuzhiyun 		else
551*4882a593Smuzhiyun 			skb->pkt_type=PACKET_MULTICAST;
552*4882a593Smuzhiyun 	}
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 	/*
555*4882a593Smuzhiyun 	 *	This ALLMULTI check should be redundant by 1.4
556*4882a593Smuzhiyun 	 *	so don't forget to remove it.
557*4882a593Smuzhiyun 	 */
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun 	if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
560*4882a593Smuzhiyun 		return eth->h_proto;
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun 	rawp = skb->data;
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 	/*
565*4882a593Smuzhiyun 	 *	This is a magic hack to spot IPX packets. Older Novell breaks
566*4882a593Smuzhiyun 	 *	the protocol design and runs IPX over 802.3 without an 802.2 LLC
567*4882a593Smuzhiyun 	 *	layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
568*4882a593Smuzhiyun 	 *	won't work for fault tolerant netware but does for the rest.
569*4882a593Smuzhiyun 	 */
570*4882a593Smuzhiyun 	if (*(unsigned short *)rawp == 0xFFFF)
571*4882a593Smuzhiyun 		return htons(ETH_P_802_3);
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 	/*
574*4882a593Smuzhiyun 	 *	Real 802.2 LLC
575*4882a593Smuzhiyun 	 */
576*4882a593Smuzhiyun 	return htons(ETH_P_802_2);
577*4882a593Smuzhiyun }
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun /* PLIP_RECEIVE_PACKET --- receive a packet */
580*4882a593Smuzhiyun static int
plip_receive_packet(struct net_device * dev,struct net_local * nl,struct plip_local * snd,struct plip_local * rcv)581*4882a593Smuzhiyun plip_receive_packet(struct net_device *dev, struct net_local *nl,
582*4882a593Smuzhiyun 		    struct plip_local *snd, struct plip_local *rcv)
583*4882a593Smuzhiyun {
584*4882a593Smuzhiyun 	unsigned short nibble_timeout = nl->nibble;
585*4882a593Smuzhiyun 	unsigned char *lbuf;
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun 	switch (rcv->state) {
588*4882a593Smuzhiyun 	case PLIP_PK_TRIGGER:
589*4882a593Smuzhiyun 		DISABLE(dev->irq);
590*4882a593Smuzhiyun 		/* Don't need to synchronize irq, as we can safely ignore it */
591*4882a593Smuzhiyun 		disable_parport_interrupts (dev);
592*4882a593Smuzhiyun 		write_data (dev, 0x01); /* send ACK */
593*4882a593Smuzhiyun 		if (net_debug > 2)
594*4882a593Smuzhiyun 			printk(KERN_DEBUG "%s: receive start\n", dev->name);
595*4882a593Smuzhiyun 		rcv->state = PLIP_PK_LENGTH_LSB;
596*4882a593Smuzhiyun 		rcv->nibble = PLIP_NB_BEGIN;
597*4882a593Smuzhiyun 		fallthrough;
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 	case PLIP_PK_LENGTH_LSB:
600*4882a593Smuzhiyun 		if (snd->state != PLIP_PK_DONE) {
601*4882a593Smuzhiyun 			if (plip_receive(nl->trigger, dev,
602*4882a593Smuzhiyun 					 &rcv->nibble, &rcv->length.b.lsb)) {
603*4882a593Smuzhiyun 				/* collision, here dev->tbusy == 1 */
604*4882a593Smuzhiyun 				rcv->state = PLIP_PK_DONE;
605*4882a593Smuzhiyun 				nl->is_deferred = 1;
606*4882a593Smuzhiyun 				nl->connection = PLIP_CN_SEND;
607*4882a593Smuzhiyun 				schedule_delayed_work(&nl->deferred, 1);
608*4882a593Smuzhiyun 				enable_parport_interrupts (dev);
609*4882a593Smuzhiyun 				ENABLE(dev->irq);
610*4882a593Smuzhiyun 				return OK;
611*4882a593Smuzhiyun 			}
612*4882a593Smuzhiyun 		} else {
613*4882a593Smuzhiyun 			if (plip_receive(nibble_timeout, dev,
614*4882a593Smuzhiyun 					 &rcv->nibble, &rcv->length.b.lsb))
615*4882a593Smuzhiyun 				return TIMEOUT;
616*4882a593Smuzhiyun 		}
617*4882a593Smuzhiyun 		rcv->state = PLIP_PK_LENGTH_MSB;
618*4882a593Smuzhiyun 		fallthrough;
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun 	case PLIP_PK_LENGTH_MSB:
621*4882a593Smuzhiyun 		if (plip_receive(nibble_timeout, dev,
622*4882a593Smuzhiyun 				 &rcv->nibble, &rcv->length.b.msb))
623*4882a593Smuzhiyun 			return TIMEOUT;
624*4882a593Smuzhiyun 		if (rcv->length.h > dev->mtu + dev->hard_header_len ||
625*4882a593Smuzhiyun 		    rcv->length.h < 8) {
626*4882a593Smuzhiyun 			printk(KERN_WARNING "%s: bogus packet size %d.\n", dev->name, rcv->length.h);
627*4882a593Smuzhiyun 			return ERROR;
628*4882a593Smuzhiyun 		}
629*4882a593Smuzhiyun 		/* Malloc up new buffer. */
630*4882a593Smuzhiyun 		rcv->skb = dev_alloc_skb(rcv->length.h + 2);
631*4882a593Smuzhiyun 		if (rcv->skb == NULL) {
632*4882a593Smuzhiyun 			printk(KERN_ERR "%s: Memory squeeze.\n", dev->name);
633*4882a593Smuzhiyun 			return ERROR;
634*4882a593Smuzhiyun 		}
635*4882a593Smuzhiyun 		skb_reserve(rcv->skb, 2);	/* Align IP on 16 byte boundaries */
636*4882a593Smuzhiyun 		skb_put(rcv->skb,rcv->length.h);
637*4882a593Smuzhiyun 		rcv->skb->dev = dev;
638*4882a593Smuzhiyun 		rcv->state = PLIP_PK_DATA;
639*4882a593Smuzhiyun 		rcv->byte = 0;
640*4882a593Smuzhiyun 		rcv->checksum = 0;
641*4882a593Smuzhiyun 		fallthrough;
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun 	case PLIP_PK_DATA:
644*4882a593Smuzhiyun 		lbuf = rcv->skb->data;
645*4882a593Smuzhiyun 		do {
646*4882a593Smuzhiyun 			if (plip_receive(nibble_timeout, dev,
647*4882a593Smuzhiyun 					 &rcv->nibble, &lbuf[rcv->byte]))
648*4882a593Smuzhiyun 				return TIMEOUT;
649*4882a593Smuzhiyun 		} while (++rcv->byte < rcv->length.h);
650*4882a593Smuzhiyun 		do {
651*4882a593Smuzhiyun 			rcv->checksum += lbuf[--rcv->byte];
652*4882a593Smuzhiyun 		} while (rcv->byte);
653*4882a593Smuzhiyun 		rcv->state = PLIP_PK_CHECKSUM;
654*4882a593Smuzhiyun 		fallthrough;
655*4882a593Smuzhiyun 
656*4882a593Smuzhiyun 	case PLIP_PK_CHECKSUM:
657*4882a593Smuzhiyun 		if (plip_receive(nibble_timeout, dev,
658*4882a593Smuzhiyun 				 &rcv->nibble, &rcv->data))
659*4882a593Smuzhiyun 			return TIMEOUT;
660*4882a593Smuzhiyun 		if (rcv->data != rcv->checksum) {
661*4882a593Smuzhiyun 			dev->stats.rx_crc_errors++;
662*4882a593Smuzhiyun 			if (net_debug)
663*4882a593Smuzhiyun 				printk(KERN_DEBUG "%s: checksum error\n", dev->name);
664*4882a593Smuzhiyun 			return ERROR;
665*4882a593Smuzhiyun 		}
666*4882a593Smuzhiyun 		rcv->state = PLIP_PK_DONE;
667*4882a593Smuzhiyun 		fallthrough;
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun 	case PLIP_PK_DONE:
670*4882a593Smuzhiyun 		/* Inform the upper layer for the arrival of a packet. */
671*4882a593Smuzhiyun 		rcv->skb->protocol=plip_type_trans(rcv->skb, dev);
672*4882a593Smuzhiyun 		netif_rx_ni(rcv->skb);
673*4882a593Smuzhiyun 		dev->stats.rx_bytes += rcv->length.h;
674*4882a593Smuzhiyun 		dev->stats.rx_packets++;
675*4882a593Smuzhiyun 		rcv->skb = NULL;
676*4882a593Smuzhiyun 		if (net_debug > 2)
677*4882a593Smuzhiyun 			printk(KERN_DEBUG "%s: receive end\n", dev->name);
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun 		/* Close the connection. */
680*4882a593Smuzhiyun 		write_data (dev, 0x00);
681*4882a593Smuzhiyun 		spin_lock_irq(&nl->lock);
682*4882a593Smuzhiyun 		if (snd->state != PLIP_PK_DONE) {
683*4882a593Smuzhiyun 			nl->connection = PLIP_CN_SEND;
684*4882a593Smuzhiyun 			spin_unlock_irq(&nl->lock);
685*4882a593Smuzhiyun 			schedule_work(&nl->immediate);
686*4882a593Smuzhiyun 			enable_parport_interrupts (dev);
687*4882a593Smuzhiyun 			ENABLE(dev->irq);
688*4882a593Smuzhiyun 			return OK;
689*4882a593Smuzhiyun 		} else {
690*4882a593Smuzhiyun 			nl->connection = PLIP_CN_NONE;
691*4882a593Smuzhiyun 			spin_unlock_irq(&nl->lock);
692*4882a593Smuzhiyun 			enable_parport_interrupts (dev);
693*4882a593Smuzhiyun 			ENABLE(dev->irq);
694*4882a593Smuzhiyun 			return OK;
695*4882a593Smuzhiyun 		}
696*4882a593Smuzhiyun 	}
697*4882a593Smuzhiyun 	return OK;
698*4882a593Smuzhiyun }
699*4882a593Smuzhiyun 
700*4882a593Smuzhiyun /* PLIP_SEND --- send a byte (two nibbles)
701*4882a593Smuzhiyun    Returns OK on success, TIMEOUT when timeout    */
702*4882a593Smuzhiyun static inline int
plip_send(unsigned short nibble_timeout,struct net_device * dev,enum plip_nibble_state * ns_p,unsigned char data)703*4882a593Smuzhiyun plip_send(unsigned short nibble_timeout, struct net_device *dev,
704*4882a593Smuzhiyun 	  enum plip_nibble_state *ns_p, unsigned char data)
705*4882a593Smuzhiyun {
706*4882a593Smuzhiyun 	unsigned char c0;
707*4882a593Smuzhiyun 	unsigned int cx;
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun 	switch (*ns_p) {
710*4882a593Smuzhiyun 	case PLIP_NB_BEGIN:
711*4882a593Smuzhiyun 		write_data (dev, data & 0x0f);
712*4882a593Smuzhiyun 		*ns_p = PLIP_NB_1;
713*4882a593Smuzhiyun 		fallthrough;
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun 	case PLIP_NB_1:
716*4882a593Smuzhiyun 		write_data (dev, 0x10 | (data & 0x0f));
717*4882a593Smuzhiyun 		cx = nibble_timeout;
718*4882a593Smuzhiyun 		while (1) {
719*4882a593Smuzhiyun 			c0 = read_status(dev);
720*4882a593Smuzhiyun 			if ((c0 & 0x80) == 0)
721*4882a593Smuzhiyun 				break;
722*4882a593Smuzhiyun 			if (--cx == 0)
723*4882a593Smuzhiyun 				return TIMEOUT;
724*4882a593Smuzhiyun 			udelay(PLIP_DELAY_UNIT);
725*4882a593Smuzhiyun 		}
726*4882a593Smuzhiyun 		write_data (dev, 0x10 | (data >> 4));
727*4882a593Smuzhiyun 		*ns_p = PLIP_NB_2;
728*4882a593Smuzhiyun 		fallthrough;
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun 	case PLIP_NB_2:
731*4882a593Smuzhiyun 		write_data (dev, (data >> 4));
732*4882a593Smuzhiyun 		cx = nibble_timeout;
733*4882a593Smuzhiyun 		while (1) {
734*4882a593Smuzhiyun 			c0 = read_status(dev);
735*4882a593Smuzhiyun 			if (c0 & 0x80)
736*4882a593Smuzhiyun 				break;
737*4882a593Smuzhiyun 			if (--cx == 0)
738*4882a593Smuzhiyun 				return TIMEOUT;
739*4882a593Smuzhiyun 			udelay(PLIP_DELAY_UNIT);
740*4882a593Smuzhiyun 		}
741*4882a593Smuzhiyun 		*ns_p = PLIP_NB_BEGIN;
742*4882a593Smuzhiyun 		return OK;
743*4882a593Smuzhiyun 	}
744*4882a593Smuzhiyun 	return OK;
745*4882a593Smuzhiyun }
746*4882a593Smuzhiyun 
747*4882a593Smuzhiyun /* PLIP_SEND_PACKET --- send a packet */
748*4882a593Smuzhiyun static int
plip_send_packet(struct net_device * dev,struct net_local * nl,struct plip_local * snd,struct plip_local * rcv)749*4882a593Smuzhiyun plip_send_packet(struct net_device *dev, struct net_local *nl,
750*4882a593Smuzhiyun 		 struct plip_local *snd, struct plip_local *rcv)
751*4882a593Smuzhiyun {
752*4882a593Smuzhiyun 	unsigned short nibble_timeout = nl->nibble;
753*4882a593Smuzhiyun 	unsigned char *lbuf;
754*4882a593Smuzhiyun 	unsigned char c0;
755*4882a593Smuzhiyun 	unsigned int cx;
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 	if (snd->skb == NULL || (lbuf = snd->skb->data) == NULL) {
758*4882a593Smuzhiyun 		printk(KERN_DEBUG "%s: send skb lost\n", dev->name);
759*4882a593Smuzhiyun 		snd->state = PLIP_PK_DONE;
760*4882a593Smuzhiyun 		snd->skb = NULL;
761*4882a593Smuzhiyun 		return ERROR;
762*4882a593Smuzhiyun 	}
763*4882a593Smuzhiyun 
764*4882a593Smuzhiyun 	switch (snd->state) {
765*4882a593Smuzhiyun 	case PLIP_PK_TRIGGER:
766*4882a593Smuzhiyun 		if ((read_status(dev) & 0xf8) != 0x80)
767*4882a593Smuzhiyun 			return HS_TIMEOUT;
768*4882a593Smuzhiyun 
769*4882a593Smuzhiyun 		/* Trigger remote rx interrupt. */
770*4882a593Smuzhiyun 		write_data (dev, 0x08);
771*4882a593Smuzhiyun 		cx = nl->trigger;
772*4882a593Smuzhiyun 		while (1) {
773*4882a593Smuzhiyun 			udelay(PLIP_DELAY_UNIT);
774*4882a593Smuzhiyun 			spin_lock_irq(&nl->lock);
775*4882a593Smuzhiyun 			if (nl->connection == PLIP_CN_RECEIVE) {
776*4882a593Smuzhiyun 				spin_unlock_irq(&nl->lock);
777*4882a593Smuzhiyun 				/* Interrupted. */
778*4882a593Smuzhiyun 				dev->stats.collisions++;
779*4882a593Smuzhiyun 				return OK;
780*4882a593Smuzhiyun 			}
781*4882a593Smuzhiyun 			c0 = read_status(dev);
782*4882a593Smuzhiyun 			if (c0 & 0x08) {
783*4882a593Smuzhiyun 				spin_unlock_irq(&nl->lock);
784*4882a593Smuzhiyun 				DISABLE(dev->irq);
785*4882a593Smuzhiyun 				synchronize_irq(dev->irq);
786*4882a593Smuzhiyun 				if (nl->connection == PLIP_CN_RECEIVE) {
787*4882a593Smuzhiyun 					/* Interrupted.
788*4882a593Smuzhiyun 					   We don't need to enable irq,
789*4882a593Smuzhiyun 					   as it is soon disabled.    */
790*4882a593Smuzhiyun 					/* Yes, we do. New variant of
791*4882a593Smuzhiyun 					   {enable,disable}_irq *counts*
792*4882a593Smuzhiyun 					   them.  -- AV  */
793*4882a593Smuzhiyun 					ENABLE(dev->irq);
794*4882a593Smuzhiyun 					dev->stats.collisions++;
795*4882a593Smuzhiyun 					return OK;
796*4882a593Smuzhiyun 				}
797*4882a593Smuzhiyun 				disable_parport_interrupts (dev);
798*4882a593Smuzhiyun 				if (net_debug > 2)
799*4882a593Smuzhiyun 					printk(KERN_DEBUG "%s: send start\n", dev->name);
800*4882a593Smuzhiyun 				snd->state = PLIP_PK_LENGTH_LSB;
801*4882a593Smuzhiyun 				snd->nibble = PLIP_NB_BEGIN;
802*4882a593Smuzhiyun 				nl->timeout_count = 0;
803*4882a593Smuzhiyun 				break;
804*4882a593Smuzhiyun 			}
805*4882a593Smuzhiyun 			spin_unlock_irq(&nl->lock);
806*4882a593Smuzhiyun 			if (--cx == 0) {
807*4882a593Smuzhiyun 				write_data (dev, 0x00);
808*4882a593Smuzhiyun 				return HS_TIMEOUT;
809*4882a593Smuzhiyun 			}
810*4882a593Smuzhiyun 		}
811*4882a593Smuzhiyun 
812*4882a593Smuzhiyun 	case PLIP_PK_LENGTH_LSB:
813*4882a593Smuzhiyun 		if (plip_send(nibble_timeout, dev,
814*4882a593Smuzhiyun 			      &snd->nibble, snd->length.b.lsb))
815*4882a593Smuzhiyun 			return TIMEOUT;
816*4882a593Smuzhiyun 		snd->state = PLIP_PK_LENGTH_MSB;
817*4882a593Smuzhiyun 		fallthrough;
818*4882a593Smuzhiyun 
819*4882a593Smuzhiyun 	case PLIP_PK_LENGTH_MSB:
820*4882a593Smuzhiyun 		if (plip_send(nibble_timeout, dev,
821*4882a593Smuzhiyun 			      &snd->nibble, snd->length.b.msb))
822*4882a593Smuzhiyun 			return TIMEOUT;
823*4882a593Smuzhiyun 		snd->state = PLIP_PK_DATA;
824*4882a593Smuzhiyun 		snd->byte = 0;
825*4882a593Smuzhiyun 		snd->checksum = 0;
826*4882a593Smuzhiyun 		fallthrough;
827*4882a593Smuzhiyun 
828*4882a593Smuzhiyun 	case PLIP_PK_DATA:
829*4882a593Smuzhiyun 		do {
830*4882a593Smuzhiyun 			if (plip_send(nibble_timeout, dev,
831*4882a593Smuzhiyun 				      &snd->nibble, lbuf[snd->byte]))
832*4882a593Smuzhiyun 				return TIMEOUT;
833*4882a593Smuzhiyun 		} while (++snd->byte < snd->length.h);
834*4882a593Smuzhiyun 		do {
835*4882a593Smuzhiyun 			snd->checksum += lbuf[--snd->byte];
836*4882a593Smuzhiyun 		} while (snd->byte);
837*4882a593Smuzhiyun 		snd->state = PLIP_PK_CHECKSUM;
838*4882a593Smuzhiyun 		fallthrough;
839*4882a593Smuzhiyun 
840*4882a593Smuzhiyun 	case PLIP_PK_CHECKSUM:
841*4882a593Smuzhiyun 		if (plip_send(nibble_timeout, dev,
842*4882a593Smuzhiyun 			      &snd->nibble, snd->checksum))
843*4882a593Smuzhiyun 			return TIMEOUT;
844*4882a593Smuzhiyun 
845*4882a593Smuzhiyun 		dev->stats.tx_bytes += snd->skb->len;
846*4882a593Smuzhiyun 		dev_kfree_skb(snd->skb);
847*4882a593Smuzhiyun 		dev->stats.tx_packets++;
848*4882a593Smuzhiyun 		snd->state = PLIP_PK_DONE;
849*4882a593Smuzhiyun 		fallthrough;
850*4882a593Smuzhiyun 
851*4882a593Smuzhiyun 	case PLIP_PK_DONE:
852*4882a593Smuzhiyun 		/* Close the connection */
853*4882a593Smuzhiyun 		write_data (dev, 0x00);
854*4882a593Smuzhiyun 		snd->skb = NULL;
855*4882a593Smuzhiyun 		if (net_debug > 2)
856*4882a593Smuzhiyun 			printk(KERN_DEBUG "%s: send end\n", dev->name);
857*4882a593Smuzhiyun 		nl->connection = PLIP_CN_CLOSING;
858*4882a593Smuzhiyun 		nl->is_deferred = 1;
859*4882a593Smuzhiyun 		schedule_delayed_work(&nl->deferred, 1);
860*4882a593Smuzhiyun 		enable_parport_interrupts (dev);
861*4882a593Smuzhiyun 		ENABLE(dev->irq);
862*4882a593Smuzhiyun 		return OK;
863*4882a593Smuzhiyun 	}
864*4882a593Smuzhiyun 	return OK;
865*4882a593Smuzhiyun }
866*4882a593Smuzhiyun 
867*4882a593Smuzhiyun static int
plip_connection_close(struct net_device * dev,struct net_local * nl,struct plip_local * snd,struct plip_local * rcv)868*4882a593Smuzhiyun plip_connection_close(struct net_device *dev, struct net_local *nl,
869*4882a593Smuzhiyun 		      struct plip_local *snd, struct plip_local *rcv)
870*4882a593Smuzhiyun {
871*4882a593Smuzhiyun 	spin_lock_irq(&nl->lock);
872*4882a593Smuzhiyun 	if (nl->connection == PLIP_CN_CLOSING) {
873*4882a593Smuzhiyun 		nl->connection = PLIP_CN_NONE;
874*4882a593Smuzhiyun 		netif_wake_queue (dev);
875*4882a593Smuzhiyun 	}
876*4882a593Smuzhiyun 	spin_unlock_irq(&nl->lock);
877*4882a593Smuzhiyun 	if (nl->should_relinquish) {
878*4882a593Smuzhiyun 		nl->should_relinquish = nl->port_owner = 0;
879*4882a593Smuzhiyun 		parport_release(nl->pardev);
880*4882a593Smuzhiyun 	}
881*4882a593Smuzhiyun 	return OK;
882*4882a593Smuzhiyun }
883*4882a593Smuzhiyun 
884*4882a593Smuzhiyun /* PLIP_ERROR --- wait till other end settled */
885*4882a593Smuzhiyun static int
plip_error(struct net_device * dev,struct net_local * nl,struct plip_local * snd,struct plip_local * rcv)886*4882a593Smuzhiyun plip_error(struct net_device *dev, struct net_local *nl,
887*4882a593Smuzhiyun 	   struct plip_local *snd, struct plip_local *rcv)
888*4882a593Smuzhiyun {
889*4882a593Smuzhiyun 	unsigned char status;
890*4882a593Smuzhiyun 
891*4882a593Smuzhiyun 	status = read_status(dev);
892*4882a593Smuzhiyun 	if ((status & 0xf8) == 0x80) {
893*4882a593Smuzhiyun 		if (net_debug > 2)
894*4882a593Smuzhiyun 			printk(KERN_DEBUG "%s: reset interface.\n", dev->name);
895*4882a593Smuzhiyun 		nl->connection = PLIP_CN_NONE;
896*4882a593Smuzhiyun 		nl->should_relinquish = 0;
897*4882a593Smuzhiyun 		netif_start_queue (dev);
898*4882a593Smuzhiyun 		enable_parport_interrupts (dev);
899*4882a593Smuzhiyun 		ENABLE(dev->irq);
900*4882a593Smuzhiyun 		netif_wake_queue (dev);
901*4882a593Smuzhiyun 	} else {
902*4882a593Smuzhiyun 		nl->is_deferred = 1;
903*4882a593Smuzhiyun 		schedule_delayed_work(&nl->deferred, 1);
904*4882a593Smuzhiyun 	}
905*4882a593Smuzhiyun 
906*4882a593Smuzhiyun 	return OK;
907*4882a593Smuzhiyun }
908*4882a593Smuzhiyun 
909*4882a593Smuzhiyun /* Handle the parallel port interrupts. */
910*4882a593Smuzhiyun static void
plip_interrupt(void * dev_id)911*4882a593Smuzhiyun plip_interrupt(void *dev_id)
912*4882a593Smuzhiyun {
913*4882a593Smuzhiyun 	struct net_device *dev = dev_id;
914*4882a593Smuzhiyun 	struct net_local *nl;
915*4882a593Smuzhiyun 	struct plip_local *rcv;
916*4882a593Smuzhiyun 	unsigned char c0;
917*4882a593Smuzhiyun 	unsigned long flags;
918*4882a593Smuzhiyun 
919*4882a593Smuzhiyun 	nl = netdev_priv(dev);
920*4882a593Smuzhiyun 	rcv = &nl->rcv_data;
921*4882a593Smuzhiyun 
922*4882a593Smuzhiyun 	spin_lock_irqsave (&nl->lock, flags);
923*4882a593Smuzhiyun 
924*4882a593Smuzhiyun 	c0 = read_status(dev);
925*4882a593Smuzhiyun 	if ((c0 & 0xf8) != 0xc0) {
926*4882a593Smuzhiyun 		if ((dev->irq != -1) && (net_debug > 1))
927*4882a593Smuzhiyun 			printk(KERN_DEBUG "%s: spurious interrupt\n", dev->name);
928*4882a593Smuzhiyun 		spin_unlock_irqrestore (&nl->lock, flags);
929*4882a593Smuzhiyun 		return;
930*4882a593Smuzhiyun 	}
931*4882a593Smuzhiyun 
932*4882a593Smuzhiyun 	if (net_debug > 3)
933*4882a593Smuzhiyun 		printk(KERN_DEBUG "%s: interrupt.\n", dev->name);
934*4882a593Smuzhiyun 
935*4882a593Smuzhiyun 	switch (nl->connection) {
936*4882a593Smuzhiyun 	case PLIP_CN_CLOSING:
937*4882a593Smuzhiyun 		netif_wake_queue (dev);
938*4882a593Smuzhiyun 		fallthrough;
939*4882a593Smuzhiyun 	case PLIP_CN_NONE:
940*4882a593Smuzhiyun 	case PLIP_CN_SEND:
941*4882a593Smuzhiyun 		rcv->state = PLIP_PK_TRIGGER;
942*4882a593Smuzhiyun 		nl->connection = PLIP_CN_RECEIVE;
943*4882a593Smuzhiyun 		nl->timeout_count = 0;
944*4882a593Smuzhiyun 		schedule_work(&nl->immediate);
945*4882a593Smuzhiyun 		break;
946*4882a593Smuzhiyun 
947*4882a593Smuzhiyun 	case PLIP_CN_RECEIVE:
948*4882a593Smuzhiyun 		/* May occur because there is race condition
949*4882a593Smuzhiyun 		   around test and set of dev->interrupt.
950*4882a593Smuzhiyun 		   Ignore this interrupt. */
951*4882a593Smuzhiyun 		break;
952*4882a593Smuzhiyun 
953*4882a593Smuzhiyun 	case PLIP_CN_ERROR:
954*4882a593Smuzhiyun 		printk(KERN_ERR "%s: receive interrupt in error state\n", dev->name);
955*4882a593Smuzhiyun 		break;
956*4882a593Smuzhiyun 	}
957*4882a593Smuzhiyun 
958*4882a593Smuzhiyun 	spin_unlock_irqrestore(&nl->lock, flags);
959*4882a593Smuzhiyun }
960*4882a593Smuzhiyun 
961*4882a593Smuzhiyun static netdev_tx_t
plip_tx_packet(struct sk_buff * skb,struct net_device * dev)962*4882a593Smuzhiyun plip_tx_packet(struct sk_buff *skb, struct net_device *dev)
963*4882a593Smuzhiyun {
964*4882a593Smuzhiyun 	struct net_local *nl = netdev_priv(dev);
965*4882a593Smuzhiyun 	struct plip_local *snd = &nl->snd_data;
966*4882a593Smuzhiyun 
967*4882a593Smuzhiyun 	if (netif_queue_stopped(dev))
968*4882a593Smuzhiyun 		return NETDEV_TX_BUSY;
969*4882a593Smuzhiyun 
970*4882a593Smuzhiyun 	/* We may need to grab the bus */
971*4882a593Smuzhiyun 	if (!nl->port_owner) {
972*4882a593Smuzhiyun 		if (parport_claim(nl->pardev))
973*4882a593Smuzhiyun 			return NETDEV_TX_BUSY;
974*4882a593Smuzhiyun 		nl->port_owner = 1;
975*4882a593Smuzhiyun 	}
976*4882a593Smuzhiyun 
977*4882a593Smuzhiyun 	netif_stop_queue (dev);
978*4882a593Smuzhiyun 
979*4882a593Smuzhiyun 	if (skb->len > dev->mtu + dev->hard_header_len) {
980*4882a593Smuzhiyun 		printk(KERN_WARNING "%s: packet too big, %d.\n", dev->name, (int)skb->len);
981*4882a593Smuzhiyun 		netif_start_queue (dev);
982*4882a593Smuzhiyun 		return NETDEV_TX_BUSY;
983*4882a593Smuzhiyun 	}
984*4882a593Smuzhiyun 
985*4882a593Smuzhiyun 	if (net_debug > 2)
986*4882a593Smuzhiyun 		printk(KERN_DEBUG "%s: send request\n", dev->name);
987*4882a593Smuzhiyun 
988*4882a593Smuzhiyun 	spin_lock_irq(&nl->lock);
989*4882a593Smuzhiyun 	snd->skb = skb;
990*4882a593Smuzhiyun 	snd->length.h = skb->len;
991*4882a593Smuzhiyun 	snd->state = PLIP_PK_TRIGGER;
992*4882a593Smuzhiyun 	if (nl->connection == PLIP_CN_NONE) {
993*4882a593Smuzhiyun 		nl->connection = PLIP_CN_SEND;
994*4882a593Smuzhiyun 		nl->timeout_count = 0;
995*4882a593Smuzhiyun 	}
996*4882a593Smuzhiyun 	schedule_work(&nl->immediate);
997*4882a593Smuzhiyun 	spin_unlock_irq(&nl->lock);
998*4882a593Smuzhiyun 
999*4882a593Smuzhiyun 	return NETDEV_TX_OK;
1000*4882a593Smuzhiyun }
1001*4882a593Smuzhiyun 
1002*4882a593Smuzhiyun static void
plip_rewrite_address(const struct net_device * dev,struct ethhdr * eth)1003*4882a593Smuzhiyun plip_rewrite_address(const struct net_device *dev, struct ethhdr *eth)
1004*4882a593Smuzhiyun {
1005*4882a593Smuzhiyun 	const struct in_device *in_dev;
1006*4882a593Smuzhiyun 
1007*4882a593Smuzhiyun 	rcu_read_lock();
1008*4882a593Smuzhiyun 	in_dev = __in_dev_get_rcu(dev);
1009*4882a593Smuzhiyun 	if (in_dev) {
1010*4882a593Smuzhiyun 		/* Any address will do - we take the first */
1011*4882a593Smuzhiyun 		const struct in_ifaddr *ifa = rcu_dereference(in_dev->ifa_list);
1012*4882a593Smuzhiyun 		if (ifa) {
1013*4882a593Smuzhiyun 			memcpy(eth->h_source, dev->dev_addr, ETH_ALEN);
1014*4882a593Smuzhiyun 			memset(eth->h_dest, 0xfc, 2);
1015*4882a593Smuzhiyun 			memcpy(eth->h_dest+2, &ifa->ifa_address, 4);
1016*4882a593Smuzhiyun 		}
1017*4882a593Smuzhiyun 	}
1018*4882a593Smuzhiyun 	rcu_read_unlock();
1019*4882a593Smuzhiyun }
1020*4882a593Smuzhiyun 
1021*4882a593Smuzhiyun static int
plip_hard_header(struct sk_buff * skb,struct net_device * dev,unsigned short type,const void * daddr,const void * saddr,unsigned len)1022*4882a593Smuzhiyun plip_hard_header(struct sk_buff *skb, struct net_device *dev,
1023*4882a593Smuzhiyun 		 unsigned short type, const void *daddr,
1024*4882a593Smuzhiyun 		 const void *saddr, unsigned len)
1025*4882a593Smuzhiyun {
1026*4882a593Smuzhiyun 	int ret;
1027*4882a593Smuzhiyun 
1028*4882a593Smuzhiyun 	ret = eth_header(skb, dev, type, daddr, saddr, len);
1029*4882a593Smuzhiyun 	if (ret >= 0)
1030*4882a593Smuzhiyun 		plip_rewrite_address (dev, (struct ethhdr *)skb->data);
1031*4882a593Smuzhiyun 
1032*4882a593Smuzhiyun 	return ret;
1033*4882a593Smuzhiyun }
1034*4882a593Smuzhiyun 
plip_hard_header_cache(const struct neighbour * neigh,struct hh_cache * hh,__be16 type)1035*4882a593Smuzhiyun static int plip_hard_header_cache(const struct neighbour *neigh,
1036*4882a593Smuzhiyun 				  struct hh_cache *hh, __be16 type)
1037*4882a593Smuzhiyun {
1038*4882a593Smuzhiyun 	int ret;
1039*4882a593Smuzhiyun 
1040*4882a593Smuzhiyun 	ret = eth_header_cache(neigh, hh, type);
1041*4882a593Smuzhiyun 	if (ret == 0) {
1042*4882a593Smuzhiyun 		struct ethhdr *eth;
1043*4882a593Smuzhiyun 
1044*4882a593Smuzhiyun 		eth = (struct ethhdr*)(((u8*)hh->hh_data) +
1045*4882a593Smuzhiyun 				       HH_DATA_OFF(sizeof(*eth)));
1046*4882a593Smuzhiyun 		plip_rewrite_address (neigh->dev, eth);
1047*4882a593Smuzhiyun 	}
1048*4882a593Smuzhiyun 
1049*4882a593Smuzhiyun 	return ret;
1050*4882a593Smuzhiyun }
1051*4882a593Smuzhiyun 
1052*4882a593Smuzhiyun /* Open/initialize the board.  This is called (in the current kernel)
1053*4882a593Smuzhiyun    sometime after booting when the 'ifconfig' program is run.
1054*4882a593Smuzhiyun 
1055*4882a593Smuzhiyun    This routine gets exclusive access to the parallel port by allocating
1056*4882a593Smuzhiyun    its IRQ line.
1057*4882a593Smuzhiyun  */
1058*4882a593Smuzhiyun static int
plip_open(struct net_device * dev)1059*4882a593Smuzhiyun plip_open(struct net_device *dev)
1060*4882a593Smuzhiyun {
1061*4882a593Smuzhiyun 	struct net_local *nl = netdev_priv(dev);
1062*4882a593Smuzhiyun 	struct in_device *in_dev;
1063*4882a593Smuzhiyun 
1064*4882a593Smuzhiyun 	/* Grab the port */
1065*4882a593Smuzhiyun 	if (!nl->port_owner) {
1066*4882a593Smuzhiyun 		if (parport_claim(nl->pardev)) return -EAGAIN;
1067*4882a593Smuzhiyun 		nl->port_owner = 1;
1068*4882a593Smuzhiyun 	}
1069*4882a593Smuzhiyun 
1070*4882a593Smuzhiyun 	nl->should_relinquish = 0;
1071*4882a593Smuzhiyun 
1072*4882a593Smuzhiyun 	/* Clear the data port. */
1073*4882a593Smuzhiyun 	write_data (dev, 0x00);
1074*4882a593Smuzhiyun 
1075*4882a593Smuzhiyun 	/* Enable rx interrupt. */
1076*4882a593Smuzhiyun 	enable_parport_interrupts (dev);
1077*4882a593Smuzhiyun 	if (dev->irq == -1)
1078*4882a593Smuzhiyun 	{
1079*4882a593Smuzhiyun 		atomic_set (&nl->kill_timer, 0);
1080*4882a593Smuzhiyun 		schedule_delayed_work(&nl->timer, 1);
1081*4882a593Smuzhiyun 	}
1082*4882a593Smuzhiyun 
1083*4882a593Smuzhiyun 	/* Initialize the state machine. */
1084*4882a593Smuzhiyun 	nl->rcv_data.state = nl->snd_data.state = PLIP_PK_DONE;
1085*4882a593Smuzhiyun 	nl->rcv_data.skb = nl->snd_data.skb = NULL;
1086*4882a593Smuzhiyun 	nl->connection = PLIP_CN_NONE;
1087*4882a593Smuzhiyun 	nl->is_deferred = 0;
1088*4882a593Smuzhiyun 
1089*4882a593Smuzhiyun 	/* Fill in the MAC-level header.
1090*4882a593Smuzhiyun 	   We used to abuse dev->broadcast to store the point-to-point
1091*4882a593Smuzhiyun 	   MAC address, but we no longer do it. Instead, we fetch the
1092*4882a593Smuzhiyun 	   interface address whenever it is needed, which is cheap enough
1093*4882a593Smuzhiyun 	   because we use the hh_cache. Actually, abusing dev->broadcast
1094*4882a593Smuzhiyun 	   didn't work, because when using plip_open the point-to-point
1095*4882a593Smuzhiyun 	   address isn't yet known.
1096*4882a593Smuzhiyun 	   PLIP doesn't have a real MAC address, but we need it to be
1097*4882a593Smuzhiyun 	   DOS compatible, and to properly support taps (otherwise,
1098*4882a593Smuzhiyun 	   when the device address isn't identical to the address of a
1099*4882a593Smuzhiyun 	   received frame, the kernel incorrectly drops it).             */
1100*4882a593Smuzhiyun 
1101*4882a593Smuzhiyun 	in_dev=__in_dev_get_rtnl(dev);
1102*4882a593Smuzhiyun 	if (in_dev) {
1103*4882a593Smuzhiyun 		/* Any address will do - we take the first. We already
1104*4882a593Smuzhiyun 		   have the first two bytes filled with 0xfc, from
1105*4882a593Smuzhiyun 		   plip_init_dev(). */
1106*4882a593Smuzhiyun 		const struct in_ifaddr *ifa = rtnl_dereference(in_dev->ifa_list);
1107*4882a593Smuzhiyun 		if (ifa != NULL) {
1108*4882a593Smuzhiyun 			memcpy(dev->dev_addr+2, &ifa->ifa_local, 4);
1109*4882a593Smuzhiyun 		}
1110*4882a593Smuzhiyun 	}
1111*4882a593Smuzhiyun 
1112*4882a593Smuzhiyun 	netif_start_queue (dev);
1113*4882a593Smuzhiyun 
1114*4882a593Smuzhiyun 	return 0;
1115*4882a593Smuzhiyun }
1116*4882a593Smuzhiyun 
1117*4882a593Smuzhiyun /* The inverse routine to plip_open (). */
1118*4882a593Smuzhiyun static int
plip_close(struct net_device * dev)1119*4882a593Smuzhiyun plip_close(struct net_device *dev)
1120*4882a593Smuzhiyun {
1121*4882a593Smuzhiyun 	struct net_local *nl = netdev_priv(dev);
1122*4882a593Smuzhiyun 	struct plip_local *snd = &nl->snd_data;
1123*4882a593Smuzhiyun 	struct plip_local *rcv = &nl->rcv_data;
1124*4882a593Smuzhiyun 
1125*4882a593Smuzhiyun 	netif_stop_queue (dev);
1126*4882a593Smuzhiyun 	DISABLE(dev->irq);
1127*4882a593Smuzhiyun 	synchronize_irq(dev->irq);
1128*4882a593Smuzhiyun 
1129*4882a593Smuzhiyun 	if (dev->irq == -1)
1130*4882a593Smuzhiyun 	{
1131*4882a593Smuzhiyun 		init_completion(&nl->killed_timer_cmp);
1132*4882a593Smuzhiyun 		atomic_set (&nl->kill_timer, 1);
1133*4882a593Smuzhiyun 		wait_for_completion(&nl->killed_timer_cmp);
1134*4882a593Smuzhiyun 	}
1135*4882a593Smuzhiyun 
1136*4882a593Smuzhiyun #ifdef NOTDEF
1137*4882a593Smuzhiyun 	outb(0x00, PAR_DATA(dev));
1138*4882a593Smuzhiyun #endif
1139*4882a593Smuzhiyun 	nl->is_deferred = 0;
1140*4882a593Smuzhiyun 	nl->connection = PLIP_CN_NONE;
1141*4882a593Smuzhiyun 	if (nl->port_owner) {
1142*4882a593Smuzhiyun 		parport_release(nl->pardev);
1143*4882a593Smuzhiyun 		nl->port_owner = 0;
1144*4882a593Smuzhiyun 	}
1145*4882a593Smuzhiyun 
1146*4882a593Smuzhiyun 	snd->state = PLIP_PK_DONE;
1147*4882a593Smuzhiyun 	if (snd->skb) {
1148*4882a593Smuzhiyun 		dev_kfree_skb(snd->skb);
1149*4882a593Smuzhiyun 		snd->skb = NULL;
1150*4882a593Smuzhiyun 	}
1151*4882a593Smuzhiyun 	rcv->state = PLIP_PK_DONE;
1152*4882a593Smuzhiyun 	if (rcv->skb) {
1153*4882a593Smuzhiyun 		kfree_skb(rcv->skb);
1154*4882a593Smuzhiyun 		rcv->skb = NULL;
1155*4882a593Smuzhiyun 	}
1156*4882a593Smuzhiyun 
1157*4882a593Smuzhiyun #ifdef NOTDEF
1158*4882a593Smuzhiyun 	/* Reset. */
1159*4882a593Smuzhiyun 	outb(0x00, PAR_CONTROL(dev));
1160*4882a593Smuzhiyun #endif
1161*4882a593Smuzhiyun 	return 0;
1162*4882a593Smuzhiyun }
1163*4882a593Smuzhiyun 
1164*4882a593Smuzhiyun static int
plip_preempt(void * handle)1165*4882a593Smuzhiyun plip_preempt(void *handle)
1166*4882a593Smuzhiyun {
1167*4882a593Smuzhiyun 	struct net_device *dev = (struct net_device *)handle;
1168*4882a593Smuzhiyun 	struct net_local *nl = netdev_priv(dev);
1169*4882a593Smuzhiyun 
1170*4882a593Smuzhiyun 	/* Stand our ground if a datagram is on the wire */
1171*4882a593Smuzhiyun 	if (nl->connection != PLIP_CN_NONE) {
1172*4882a593Smuzhiyun 		nl->should_relinquish = 1;
1173*4882a593Smuzhiyun 		return 1;
1174*4882a593Smuzhiyun 	}
1175*4882a593Smuzhiyun 
1176*4882a593Smuzhiyun 	nl->port_owner = 0;	/* Remember that we released the bus */
1177*4882a593Smuzhiyun 	return 0;
1178*4882a593Smuzhiyun }
1179*4882a593Smuzhiyun 
1180*4882a593Smuzhiyun static void
plip_wakeup(void * handle)1181*4882a593Smuzhiyun plip_wakeup(void *handle)
1182*4882a593Smuzhiyun {
1183*4882a593Smuzhiyun 	struct net_device *dev = (struct net_device *)handle;
1184*4882a593Smuzhiyun 	struct net_local *nl = netdev_priv(dev);
1185*4882a593Smuzhiyun 
1186*4882a593Smuzhiyun 	if (nl->port_owner) {
1187*4882a593Smuzhiyun 		/* Why are we being woken up? */
1188*4882a593Smuzhiyun 		printk(KERN_DEBUG "%s: why am I being woken up?\n", dev->name);
1189*4882a593Smuzhiyun 		if (!parport_claim(nl->pardev))
1190*4882a593Smuzhiyun 			/* bus_owner is already set (but why?) */
1191*4882a593Smuzhiyun 			printk(KERN_DEBUG "%s: I'm broken.\n", dev->name);
1192*4882a593Smuzhiyun 		else
1193*4882a593Smuzhiyun 			return;
1194*4882a593Smuzhiyun 	}
1195*4882a593Smuzhiyun 
1196*4882a593Smuzhiyun 	if (!(dev->flags & IFF_UP))
1197*4882a593Smuzhiyun 		/* Don't need the port when the interface is down */
1198*4882a593Smuzhiyun 		return;
1199*4882a593Smuzhiyun 
1200*4882a593Smuzhiyun 	if (!parport_claim(nl->pardev)) {
1201*4882a593Smuzhiyun 		nl->port_owner = 1;
1202*4882a593Smuzhiyun 		/* Clear the data port. */
1203*4882a593Smuzhiyun 		write_data (dev, 0x00);
1204*4882a593Smuzhiyun 	}
1205*4882a593Smuzhiyun }
1206*4882a593Smuzhiyun 
1207*4882a593Smuzhiyun static int
plip_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)1208*4882a593Smuzhiyun plip_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1209*4882a593Smuzhiyun {
1210*4882a593Smuzhiyun 	struct net_local *nl = netdev_priv(dev);
1211*4882a593Smuzhiyun 	struct plipconf *pc = (struct plipconf *) &rq->ifr_ifru;
1212*4882a593Smuzhiyun 
1213*4882a593Smuzhiyun 	if (cmd != SIOCDEVPLIP)
1214*4882a593Smuzhiyun 		return -EOPNOTSUPP;
1215*4882a593Smuzhiyun 
1216*4882a593Smuzhiyun 	switch(pc->pcmd) {
1217*4882a593Smuzhiyun 	case PLIP_GET_TIMEOUT:
1218*4882a593Smuzhiyun 		pc->trigger = nl->trigger;
1219*4882a593Smuzhiyun 		pc->nibble  = nl->nibble;
1220*4882a593Smuzhiyun 		break;
1221*4882a593Smuzhiyun 	case PLIP_SET_TIMEOUT:
1222*4882a593Smuzhiyun 		if(!capable(CAP_NET_ADMIN))
1223*4882a593Smuzhiyun 			return -EPERM;
1224*4882a593Smuzhiyun 		nl->trigger = pc->trigger;
1225*4882a593Smuzhiyun 		nl->nibble  = pc->nibble;
1226*4882a593Smuzhiyun 		break;
1227*4882a593Smuzhiyun 	default:
1228*4882a593Smuzhiyun 		return -EOPNOTSUPP;
1229*4882a593Smuzhiyun 	}
1230*4882a593Smuzhiyun 	return 0;
1231*4882a593Smuzhiyun }
1232*4882a593Smuzhiyun 
1233*4882a593Smuzhiyun static int parport[PLIP_MAX] = { [0 ... PLIP_MAX-1] = -1 };
1234*4882a593Smuzhiyun static int timid;
1235*4882a593Smuzhiyun 
1236*4882a593Smuzhiyun module_param_array(parport, int, NULL, 0);
1237*4882a593Smuzhiyun module_param(timid, int, 0);
1238*4882a593Smuzhiyun MODULE_PARM_DESC(parport, "List of parport device numbers to use by plip");
1239*4882a593Smuzhiyun 
1240*4882a593Smuzhiyun static struct net_device *dev_plip[PLIP_MAX] = { NULL, };
1241*4882a593Smuzhiyun 
1242*4882a593Smuzhiyun static inline int
plip_searchfor(int list[],int a)1243*4882a593Smuzhiyun plip_searchfor(int list[], int a)
1244*4882a593Smuzhiyun {
1245*4882a593Smuzhiyun 	int i;
1246*4882a593Smuzhiyun 	for (i = 0; i < PLIP_MAX && list[i] != -1; i++) {
1247*4882a593Smuzhiyun 		if (list[i] == a) return 1;
1248*4882a593Smuzhiyun 	}
1249*4882a593Smuzhiyun 	return 0;
1250*4882a593Smuzhiyun }
1251*4882a593Smuzhiyun 
1252*4882a593Smuzhiyun /* plip_attach() is called (by the parport code) when a port is
1253*4882a593Smuzhiyun  * available to use. */
plip_attach(struct parport * port)1254*4882a593Smuzhiyun static void plip_attach (struct parport *port)
1255*4882a593Smuzhiyun {
1256*4882a593Smuzhiyun 	static int unit;
1257*4882a593Smuzhiyun 	struct net_device *dev;
1258*4882a593Smuzhiyun 	struct net_local *nl;
1259*4882a593Smuzhiyun 	char name[IFNAMSIZ];
1260*4882a593Smuzhiyun 	struct pardev_cb plip_cb;
1261*4882a593Smuzhiyun 
1262*4882a593Smuzhiyun 	if ((parport[0] == -1 && (!timid || !port->devices)) ||
1263*4882a593Smuzhiyun 	    plip_searchfor(parport, port->number)) {
1264*4882a593Smuzhiyun 		if (unit == PLIP_MAX) {
1265*4882a593Smuzhiyun 			printk(KERN_ERR "plip: too many devices\n");
1266*4882a593Smuzhiyun 			return;
1267*4882a593Smuzhiyun 		}
1268*4882a593Smuzhiyun 
1269*4882a593Smuzhiyun 		sprintf(name, "plip%d", unit);
1270*4882a593Smuzhiyun 		dev = alloc_etherdev(sizeof(struct net_local));
1271*4882a593Smuzhiyun 		if (!dev)
1272*4882a593Smuzhiyun 			return;
1273*4882a593Smuzhiyun 
1274*4882a593Smuzhiyun 		strcpy(dev->name, name);
1275*4882a593Smuzhiyun 
1276*4882a593Smuzhiyun 		dev->irq = port->irq;
1277*4882a593Smuzhiyun 		dev->base_addr = port->base;
1278*4882a593Smuzhiyun 		if (port->irq == -1) {
1279*4882a593Smuzhiyun 			printk(KERN_INFO "plip: %s has no IRQ. Using IRQ-less mode,"
1280*4882a593Smuzhiyun 		                 "which is fairly inefficient!\n", port->name);
1281*4882a593Smuzhiyun 		}
1282*4882a593Smuzhiyun 
1283*4882a593Smuzhiyun 		nl = netdev_priv(dev);
1284*4882a593Smuzhiyun 		nl->dev = dev;
1285*4882a593Smuzhiyun 
1286*4882a593Smuzhiyun 		memset(&plip_cb, 0, sizeof(plip_cb));
1287*4882a593Smuzhiyun 		plip_cb.private = dev;
1288*4882a593Smuzhiyun 		plip_cb.preempt = plip_preempt;
1289*4882a593Smuzhiyun 		plip_cb.wakeup = plip_wakeup;
1290*4882a593Smuzhiyun 		plip_cb.irq_func = plip_interrupt;
1291*4882a593Smuzhiyun 
1292*4882a593Smuzhiyun 		nl->pardev = parport_register_dev_model(port, dev->name,
1293*4882a593Smuzhiyun 							&plip_cb, unit);
1294*4882a593Smuzhiyun 
1295*4882a593Smuzhiyun 		if (!nl->pardev) {
1296*4882a593Smuzhiyun 			printk(KERN_ERR "%s: parport_register failed\n", name);
1297*4882a593Smuzhiyun 			goto err_free_dev;
1298*4882a593Smuzhiyun 		}
1299*4882a593Smuzhiyun 
1300*4882a593Smuzhiyun 		plip_init_netdev(dev);
1301*4882a593Smuzhiyun 
1302*4882a593Smuzhiyun 		if (register_netdev(dev)) {
1303*4882a593Smuzhiyun 			printk(KERN_ERR "%s: network register failed\n", name);
1304*4882a593Smuzhiyun 			goto err_parport_unregister;
1305*4882a593Smuzhiyun 		}
1306*4882a593Smuzhiyun 
1307*4882a593Smuzhiyun 		printk(KERN_INFO "%s", version);
1308*4882a593Smuzhiyun 		if (dev->irq != -1)
1309*4882a593Smuzhiyun 			printk(KERN_INFO "%s: Parallel port at %#3lx, "
1310*4882a593Smuzhiyun 					 "using IRQ %d.\n",
1311*4882a593Smuzhiyun 				         dev->name, dev->base_addr, dev->irq);
1312*4882a593Smuzhiyun 		else
1313*4882a593Smuzhiyun 			printk(KERN_INFO "%s: Parallel port at %#3lx, "
1314*4882a593Smuzhiyun 					 "not using IRQ.\n",
1315*4882a593Smuzhiyun 					 dev->name, dev->base_addr);
1316*4882a593Smuzhiyun 		dev_plip[unit++] = dev;
1317*4882a593Smuzhiyun 	}
1318*4882a593Smuzhiyun 	return;
1319*4882a593Smuzhiyun 
1320*4882a593Smuzhiyun err_parport_unregister:
1321*4882a593Smuzhiyun 	parport_unregister_device(nl->pardev);
1322*4882a593Smuzhiyun err_free_dev:
1323*4882a593Smuzhiyun 	free_netdev(dev);
1324*4882a593Smuzhiyun }
1325*4882a593Smuzhiyun 
1326*4882a593Smuzhiyun /* plip_detach() is called (by the parport code) when a port is
1327*4882a593Smuzhiyun  * no longer available to use. */
plip_detach(struct parport * port)1328*4882a593Smuzhiyun static void plip_detach (struct parport *port)
1329*4882a593Smuzhiyun {
1330*4882a593Smuzhiyun 	/* Nothing to do */
1331*4882a593Smuzhiyun }
1332*4882a593Smuzhiyun 
plip_probe(struct pardevice * par_dev)1333*4882a593Smuzhiyun static int plip_probe(struct pardevice *par_dev)
1334*4882a593Smuzhiyun {
1335*4882a593Smuzhiyun 	struct device_driver *drv = par_dev->dev.driver;
1336*4882a593Smuzhiyun 	int len = strlen(drv->name);
1337*4882a593Smuzhiyun 
1338*4882a593Smuzhiyun 	if (strncmp(par_dev->name, drv->name, len))
1339*4882a593Smuzhiyun 		return -ENODEV;
1340*4882a593Smuzhiyun 
1341*4882a593Smuzhiyun 	return 0;
1342*4882a593Smuzhiyun }
1343*4882a593Smuzhiyun 
1344*4882a593Smuzhiyun static struct parport_driver plip_driver = {
1345*4882a593Smuzhiyun 	.name		= "plip",
1346*4882a593Smuzhiyun 	.probe		= plip_probe,
1347*4882a593Smuzhiyun 	.match_port	= plip_attach,
1348*4882a593Smuzhiyun 	.detach		= plip_detach,
1349*4882a593Smuzhiyun 	.devmodel	= true,
1350*4882a593Smuzhiyun };
1351*4882a593Smuzhiyun 
plip_cleanup_module(void)1352*4882a593Smuzhiyun static void __exit plip_cleanup_module (void)
1353*4882a593Smuzhiyun {
1354*4882a593Smuzhiyun 	struct net_device *dev;
1355*4882a593Smuzhiyun 	int i;
1356*4882a593Smuzhiyun 
1357*4882a593Smuzhiyun 	for (i=0; i < PLIP_MAX; i++) {
1358*4882a593Smuzhiyun 		if ((dev = dev_plip[i])) {
1359*4882a593Smuzhiyun 			struct net_local *nl = netdev_priv(dev);
1360*4882a593Smuzhiyun 			unregister_netdev(dev);
1361*4882a593Smuzhiyun 			if (nl->port_owner)
1362*4882a593Smuzhiyun 				parport_release(nl->pardev);
1363*4882a593Smuzhiyun 			parport_unregister_device(nl->pardev);
1364*4882a593Smuzhiyun 			free_netdev(dev);
1365*4882a593Smuzhiyun 			dev_plip[i] = NULL;
1366*4882a593Smuzhiyun 		}
1367*4882a593Smuzhiyun 	}
1368*4882a593Smuzhiyun 
1369*4882a593Smuzhiyun 	parport_unregister_driver(&plip_driver);
1370*4882a593Smuzhiyun }
1371*4882a593Smuzhiyun 
1372*4882a593Smuzhiyun #ifndef MODULE
1373*4882a593Smuzhiyun 
1374*4882a593Smuzhiyun static int parport_ptr;
1375*4882a593Smuzhiyun 
plip_setup(char * str)1376*4882a593Smuzhiyun static int __init plip_setup(char *str)
1377*4882a593Smuzhiyun {
1378*4882a593Smuzhiyun 	int ints[4];
1379*4882a593Smuzhiyun 
1380*4882a593Smuzhiyun 	str = get_options(str, ARRAY_SIZE(ints), ints);
1381*4882a593Smuzhiyun 
1382*4882a593Smuzhiyun 	/* Ugh. */
1383*4882a593Smuzhiyun 	if (!strncmp(str, "parport", 7)) {
1384*4882a593Smuzhiyun 		int n = simple_strtoul(str+7, NULL, 10);
1385*4882a593Smuzhiyun 		if (parport_ptr < PLIP_MAX)
1386*4882a593Smuzhiyun 			parport[parport_ptr++] = n;
1387*4882a593Smuzhiyun 		else
1388*4882a593Smuzhiyun 			printk(KERN_INFO "plip: too many ports, %s ignored.\n",
1389*4882a593Smuzhiyun 			       str);
1390*4882a593Smuzhiyun 	} else if (!strcmp(str, "timid")) {
1391*4882a593Smuzhiyun 		timid = 1;
1392*4882a593Smuzhiyun 	} else {
1393*4882a593Smuzhiyun 		if (ints[0] == 0 || ints[1] == 0) {
1394*4882a593Smuzhiyun 			/* disable driver on "plip=" or "plip=0" */
1395*4882a593Smuzhiyun 			parport[0] = -2;
1396*4882a593Smuzhiyun 		} else {
1397*4882a593Smuzhiyun 			printk(KERN_WARNING "warning: 'plip=0x%x' ignored\n",
1398*4882a593Smuzhiyun 			       ints[1]);
1399*4882a593Smuzhiyun 		}
1400*4882a593Smuzhiyun 	}
1401*4882a593Smuzhiyun 	return 1;
1402*4882a593Smuzhiyun }
1403*4882a593Smuzhiyun 
1404*4882a593Smuzhiyun __setup("plip=", plip_setup);
1405*4882a593Smuzhiyun 
1406*4882a593Smuzhiyun #endif /* !MODULE */
1407*4882a593Smuzhiyun 
plip_init(void)1408*4882a593Smuzhiyun static int __init plip_init (void)
1409*4882a593Smuzhiyun {
1410*4882a593Smuzhiyun 	if (parport[0] == -2)
1411*4882a593Smuzhiyun 		return 0;
1412*4882a593Smuzhiyun 
1413*4882a593Smuzhiyun 	if (parport[0] != -1 && timid) {
1414*4882a593Smuzhiyun 		printk(KERN_WARNING "plip: warning, ignoring `timid' since specific ports given.\n");
1415*4882a593Smuzhiyun 		timid = 0;
1416*4882a593Smuzhiyun 	}
1417*4882a593Smuzhiyun 
1418*4882a593Smuzhiyun 	if (parport_register_driver (&plip_driver)) {
1419*4882a593Smuzhiyun 		printk (KERN_WARNING "plip: couldn't register driver\n");
1420*4882a593Smuzhiyun 		return 1;
1421*4882a593Smuzhiyun 	}
1422*4882a593Smuzhiyun 
1423*4882a593Smuzhiyun 	return 0;
1424*4882a593Smuzhiyun }
1425*4882a593Smuzhiyun 
1426*4882a593Smuzhiyun module_init(plip_init);
1427*4882a593Smuzhiyun module_exit(plip_cleanup_module);
1428*4882a593Smuzhiyun MODULE_LICENSE("GPL");
1429