xref: /OK3568_Linux_fs/kernel/include/linux/parport.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Any part of this program may be used in documents licensed under
3*4882a593Smuzhiyun  * the GNU Free Documentation License, Version 1.1 or any later version
4*4882a593Smuzhiyun  * published by the Free Software Foundation.
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun #ifndef _PARPORT_H_
7*4882a593Smuzhiyun #define _PARPORT_H_
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/jiffies.h>
11*4882a593Smuzhiyun #include <linux/proc_fs.h>
12*4882a593Smuzhiyun #include <linux/spinlock.h>
13*4882a593Smuzhiyun #include <linux/wait.h>
14*4882a593Smuzhiyun #include <linux/irqreturn.h>
15*4882a593Smuzhiyun #include <linux/semaphore.h>
16*4882a593Smuzhiyun #include <linux/device.h>
17*4882a593Smuzhiyun #include <asm/ptrace.h>
18*4882a593Smuzhiyun #include <uapi/linux/parport.h>
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun /* Define this later. */
21*4882a593Smuzhiyun struct parport;
22*4882a593Smuzhiyun struct pardevice;
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun struct pc_parport_state {
25*4882a593Smuzhiyun 	unsigned int ctr;
26*4882a593Smuzhiyun 	unsigned int ecr;
27*4882a593Smuzhiyun };
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun struct ax_parport_state {
30*4882a593Smuzhiyun 	unsigned int ctr;
31*4882a593Smuzhiyun 	unsigned int ecr;
32*4882a593Smuzhiyun 	unsigned int dcsr;
33*4882a593Smuzhiyun };
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun /* used by both parport_amiga and parport_mfc3 */
36*4882a593Smuzhiyun struct amiga_parport_state {
37*4882a593Smuzhiyun        unsigned char data;     /* ciaa.prb */
38*4882a593Smuzhiyun        unsigned char datadir;  /* ciaa.ddrb */
39*4882a593Smuzhiyun        unsigned char status;   /* ciab.pra & 7 */
40*4882a593Smuzhiyun        unsigned char statusdir;/* ciab.ddrb & 7 */
41*4882a593Smuzhiyun };
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun struct ax88796_parport_state {
44*4882a593Smuzhiyun 	unsigned char cpr;
45*4882a593Smuzhiyun };
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun struct ip32_parport_state {
48*4882a593Smuzhiyun 	unsigned int dcr;
49*4882a593Smuzhiyun 	unsigned int ecr;
50*4882a593Smuzhiyun };
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun struct parport_state {
53*4882a593Smuzhiyun 	union {
54*4882a593Smuzhiyun 		struct pc_parport_state pc;
55*4882a593Smuzhiyun 		/* ARC has no state. */
56*4882a593Smuzhiyun 		struct ax_parport_state ax;
57*4882a593Smuzhiyun 		struct amiga_parport_state amiga;
58*4882a593Smuzhiyun 		struct ax88796_parport_state ax88796;
59*4882a593Smuzhiyun 		/* Atari has not state. */
60*4882a593Smuzhiyun 		struct ip32_parport_state ip32;
61*4882a593Smuzhiyun 		void *misc;
62*4882a593Smuzhiyun 	} u;
63*4882a593Smuzhiyun };
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun struct parport_operations {
66*4882a593Smuzhiyun 	/* IBM PC-style virtual registers. */
67*4882a593Smuzhiyun 	void (*write_data)(struct parport *, unsigned char);
68*4882a593Smuzhiyun 	unsigned char (*read_data)(struct parport *);
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	void (*write_control)(struct parport *, unsigned char);
71*4882a593Smuzhiyun 	unsigned char (*read_control)(struct parport *);
72*4882a593Smuzhiyun 	unsigned char (*frob_control)(struct parport *, unsigned char mask,
73*4882a593Smuzhiyun 				      unsigned char val);
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	unsigned char (*read_status)(struct parport *);
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	/* IRQs. */
78*4882a593Smuzhiyun 	void (*enable_irq)(struct parport *);
79*4882a593Smuzhiyun 	void (*disable_irq)(struct parport *);
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	/* Data direction. */
82*4882a593Smuzhiyun 	void (*data_forward) (struct parport *);
83*4882a593Smuzhiyun 	void (*data_reverse) (struct parport *);
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	/* For core parport code. */
86*4882a593Smuzhiyun 	void (*init_state)(struct pardevice *, struct parport_state *);
87*4882a593Smuzhiyun 	void (*save_state)(struct parport *, struct parport_state *);
88*4882a593Smuzhiyun 	void (*restore_state)(struct parport *, struct parport_state *);
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	/* Block read/write */
91*4882a593Smuzhiyun 	size_t (*epp_write_data) (struct parport *port, const void *buf,
92*4882a593Smuzhiyun 				  size_t len, int flags);
93*4882a593Smuzhiyun 	size_t (*epp_read_data) (struct parport *port, void *buf, size_t len,
94*4882a593Smuzhiyun 				 int flags);
95*4882a593Smuzhiyun 	size_t (*epp_write_addr) (struct parport *port, const void *buf,
96*4882a593Smuzhiyun 				  size_t len, int flags);
97*4882a593Smuzhiyun 	size_t (*epp_read_addr) (struct parport *port, void *buf, size_t len,
98*4882a593Smuzhiyun 				 int flags);
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	size_t (*ecp_write_data) (struct parport *port, const void *buf,
101*4882a593Smuzhiyun 				  size_t len, int flags);
102*4882a593Smuzhiyun 	size_t (*ecp_read_data) (struct parport *port, void *buf, size_t len,
103*4882a593Smuzhiyun 				 int flags);
104*4882a593Smuzhiyun 	size_t (*ecp_write_addr) (struct parport *port, const void *buf,
105*4882a593Smuzhiyun 				  size_t len, int flags);
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	size_t (*compat_write_data) (struct parport *port, const void *buf,
108*4882a593Smuzhiyun 				     size_t len, int flags);
109*4882a593Smuzhiyun 	size_t (*nibble_read_data) (struct parport *port, void *buf,
110*4882a593Smuzhiyun 				    size_t len, int flags);
111*4882a593Smuzhiyun 	size_t (*byte_read_data) (struct parport *port, void *buf,
112*4882a593Smuzhiyun 				  size_t len, int flags);
113*4882a593Smuzhiyun 	struct module *owner;
114*4882a593Smuzhiyun };
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun struct parport_device_info {
117*4882a593Smuzhiyun 	parport_device_class class;
118*4882a593Smuzhiyun 	const char *class_name;
119*4882a593Smuzhiyun 	const char *mfr;
120*4882a593Smuzhiyun 	const char *model;
121*4882a593Smuzhiyun 	const char *cmdset;
122*4882a593Smuzhiyun 	const char *description;
123*4882a593Smuzhiyun };
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun /* Each device can have two callback functions:
126*4882a593Smuzhiyun  *  1) a preemption function, called by the resource manager to request
127*4882a593Smuzhiyun  *     that the driver relinquish control of the port.  The driver should
128*4882a593Smuzhiyun  *     return zero if it agrees to release the port, and nonzero if it
129*4882a593Smuzhiyun  *     refuses.  Do not call parport_release() - the kernel will do this
130*4882a593Smuzhiyun  *     implicitly.
131*4882a593Smuzhiyun  *
132*4882a593Smuzhiyun  *  2) a wake-up function, called by the resource manager to tell drivers
133*4882a593Smuzhiyun  *     that the port is available to be claimed.  If a driver wants to use
134*4882a593Smuzhiyun  *     the port, it should call parport_claim() here.
135*4882a593Smuzhiyun  */
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun /* A parallel port device */
138*4882a593Smuzhiyun struct pardevice {
139*4882a593Smuzhiyun 	const char *name;
140*4882a593Smuzhiyun 	struct parport *port;
141*4882a593Smuzhiyun 	int daisy;
142*4882a593Smuzhiyun 	int (*preempt)(void *);
143*4882a593Smuzhiyun 	void (*wakeup)(void *);
144*4882a593Smuzhiyun 	void *private;
145*4882a593Smuzhiyun 	void (*irq_func)(void *);
146*4882a593Smuzhiyun 	unsigned int flags;
147*4882a593Smuzhiyun 	struct pardevice *next;
148*4882a593Smuzhiyun 	struct pardevice *prev;
149*4882a593Smuzhiyun 	struct device dev;
150*4882a593Smuzhiyun 	bool devmodel;
151*4882a593Smuzhiyun 	struct parport_state *state;     /* saved status over preemption */
152*4882a593Smuzhiyun 	wait_queue_head_t wait_q;
153*4882a593Smuzhiyun 	unsigned long int time;
154*4882a593Smuzhiyun 	unsigned long int timeslice;
155*4882a593Smuzhiyun 	volatile long int timeout;
156*4882a593Smuzhiyun 	unsigned long waiting;		 /* long req'd for set_bit --RR */
157*4882a593Smuzhiyun 	struct pardevice *waitprev;
158*4882a593Smuzhiyun 	struct pardevice *waitnext;
159*4882a593Smuzhiyun 	void * sysctl_table;
160*4882a593Smuzhiyun };
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun #define to_pardevice(n) container_of(n, struct pardevice, dev)
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun /* IEEE1284 information */
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun /* IEEE1284 phases. These are exposed to userland through ppdev IOCTL
167*4882a593Smuzhiyun  * PP[GS]ETPHASE, so do not change existing values. */
168*4882a593Smuzhiyun enum ieee1284_phase {
169*4882a593Smuzhiyun 	IEEE1284_PH_FWD_DATA,
170*4882a593Smuzhiyun 	IEEE1284_PH_FWD_IDLE,
171*4882a593Smuzhiyun 	IEEE1284_PH_TERMINATE,
172*4882a593Smuzhiyun 	IEEE1284_PH_NEGOTIATION,
173*4882a593Smuzhiyun 	IEEE1284_PH_HBUSY_DNA,
174*4882a593Smuzhiyun 	IEEE1284_PH_REV_IDLE,
175*4882a593Smuzhiyun 	IEEE1284_PH_HBUSY_DAVAIL,
176*4882a593Smuzhiyun 	IEEE1284_PH_REV_DATA,
177*4882a593Smuzhiyun 	IEEE1284_PH_ECP_SETUP,
178*4882a593Smuzhiyun 	IEEE1284_PH_ECP_FWD_TO_REV,
179*4882a593Smuzhiyun 	IEEE1284_PH_ECP_REV_TO_FWD,
180*4882a593Smuzhiyun 	IEEE1284_PH_ECP_DIR_UNKNOWN,
181*4882a593Smuzhiyun };
182*4882a593Smuzhiyun struct ieee1284_info {
183*4882a593Smuzhiyun 	int mode;
184*4882a593Smuzhiyun 	volatile enum ieee1284_phase phase;
185*4882a593Smuzhiyun 	struct semaphore irq;
186*4882a593Smuzhiyun };
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun /* A parallel port */
189*4882a593Smuzhiyun struct parport {
190*4882a593Smuzhiyun 	unsigned long base;	/* base address */
191*4882a593Smuzhiyun 	unsigned long base_hi;  /* base address (hi - ECR) */
192*4882a593Smuzhiyun 	unsigned int size;	/* IO extent */
193*4882a593Smuzhiyun 	const char *name;
194*4882a593Smuzhiyun 	unsigned int modes;
195*4882a593Smuzhiyun 	int irq;		/* interrupt (or -1 for none) */
196*4882a593Smuzhiyun 	int dma;
197*4882a593Smuzhiyun 	int muxport;		/* which muxport (if any) this is */
198*4882a593Smuzhiyun 	int portnum;		/* which physical parallel port (not mux) */
199*4882a593Smuzhiyun 	struct device *dev;	/* Physical device associated with IO/DMA.
200*4882a593Smuzhiyun 				 * This may unfortulately be null if the
201*4882a593Smuzhiyun 				 * port has a legacy driver.
202*4882a593Smuzhiyun 				 */
203*4882a593Smuzhiyun 	struct device bus_dev;	/* to link with the bus */
204*4882a593Smuzhiyun 	struct parport *physport;
205*4882a593Smuzhiyun 				/* If this is a non-default mux
206*4882a593Smuzhiyun 				   parport, i.e. we're a clone of a real
207*4882a593Smuzhiyun 				   physical port, this is a pointer to that
208*4882a593Smuzhiyun 				   port. The locking is only done in the
209*4882a593Smuzhiyun 				   real port.  For a clone port, the
210*4882a593Smuzhiyun 				   following structure members are
211*4882a593Smuzhiyun 				   meaningless: devices, cad, muxsel,
212*4882a593Smuzhiyun 				   waithead, waittail, flags, pdir,
213*4882a593Smuzhiyun 				   dev, ieee1284, *_lock.
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 				   It this is a default mux parport, or
216*4882a593Smuzhiyun 				   there is no mux involved, this points to
217*4882a593Smuzhiyun 				   ourself. */
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	struct pardevice *devices;
220*4882a593Smuzhiyun 	struct pardevice *cad;	/* port owner */
221*4882a593Smuzhiyun 	int daisy;		/* currently selected daisy addr */
222*4882a593Smuzhiyun 	int muxsel;		/* currently selected mux port */
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	struct pardevice *waithead;
225*4882a593Smuzhiyun 	struct pardevice *waittail;
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	struct list_head list;
228*4882a593Smuzhiyun 	struct timer_list timer;
229*4882a593Smuzhiyun 	unsigned int flags;
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	void *sysctl_table;
232*4882a593Smuzhiyun 	struct parport_device_info probe_info[5]; /* 0-3 + non-IEEE1284.3 */
233*4882a593Smuzhiyun 	struct ieee1284_info ieee1284;
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	struct parport_operations *ops;
236*4882a593Smuzhiyun 	void *private_data;     /* for lowlevel driver */
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	int number;		/* port index - the `n' in `parportn' */
239*4882a593Smuzhiyun 	spinlock_t pardevice_lock;
240*4882a593Smuzhiyun 	spinlock_t waitlist_lock;
241*4882a593Smuzhiyun 	rwlock_t cad_lock;
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	int spintime;
244*4882a593Smuzhiyun 	atomic_t ref_count;
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	unsigned long devflags;
247*4882a593Smuzhiyun #define PARPORT_DEVPROC_REGISTERED	0
248*4882a593Smuzhiyun 	struct pardevice *proc_device;	/* Currently register proc device */
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	struct list_head full_list;
251*4882a593Smuzhiyun 	struct parport *slaves[3];
252*4882a593Smuzhiyun };
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun #define to_parport_dev(n) container_of(n, struct parport, bus_dev)
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun #define DEFAULT_SPIN_TIME 500 /* us */
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun struct parport_driver {
259*4882a593Smuzhiyun 	const char *name;
260*4882a593Smuzhiyun 	void (*attach) (struct parport *);
261*4882a593Smuzhiyun 	void (*detach) (struct parport *);
262*4882a593Smuzhiyun 	void (*match_port)(struct parport *);
263*4882a593Smuzhiyun 	int (*probe)(struct pardevice *);
264*4882a593Smuzhiyun 	struct device_driver driver;
265*4882a593Smuzhiyun 	bool devmodel;
266*4882a593Smuzhiyun 	struct list_head list;
267*4882a593Smuzhiyun };
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun #define to_parport_driver(n) container_of(n, struct parport_driver, driver)
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun int parport_bus_init(void);
272*4882a593Smuzhiyun void parport_bus_exit(void);
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun /* parport_register_port registers a new parallel port at the given
275*4882a593Smuzhiyun    address (if one does not already exist) and returns a pointer to it.
276*4882a593Smuzhiyun    This entails claiming the I/O region, IRQ and DMA.  NULL is returned
277*4882a593Smuzhiyun    if initialisation fails. */
278*4882a593Smuzhiyun struct parport *parport_register_port(unsigned long base, int irq, int dma,
279*4882a593Smuzhiyun 				      struct parport_operations *ops);
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun /* Once a registered port is ready for high-level drivers to use, the
282*4882a593Smuzhiyun    low-level driver that registered it should announce it.  This will
283*4882a593Smuzhiyun    call the high-level drivers' attach() functions (after things like
284*4882a593Smuzhiyun    determining the IEEE 1284.3 topology of the port and collecting
285*4882a593Smuzhiyun    DeviceIDs). */
286*4882a593Smuzhiyun void parport_announce_port (struct parport *port);
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun /* Unregister a port. */
289*4882a593Smuzhiyun extern void parport_remove_port(struct parport *port);
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun /* Register a new high-level driver. */
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun int __must_check __parport_register_driver(struct parport_driver *,
294*4882a593Smuzhiyun 					   struct module *,
295*4882a593Smuzhiyun 					   const char *mod_name);
296*4882a593Smuzhiyun /*
297*4882a593Smuzhiyun  * parport_register_driver must be a macro so that KBUILD_MODNAME can
298*4882a593Smuzhiyun  * be expanded
299*4882a593Smuzhiyun  */
300*4882a593Smuzhiyun #define parport_register_driver(driver)             \
301*4882a593Smuzhiyun 	__parport_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun /* Unregister a high-level driver. */
304*4882a593Smuzhiyun extern void parport_unregister_driver (struct parport_driver *);
305*4882a593Smuzhiyun void parport_unregister_driver(struct parport_driver *);
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun /* If parport_register_driver doesn't fit your needs, perhaps
308*4882a593Smuzhiyun  * parport_find_xxx does. */
309*4882a593Smuzhiyun extern struct parport *parport_find_number (int);
310*4882a593Smuzhiyun extern struct parport *parport_find_base (unsigned long);
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun /* generic irq handler, if it suits your needs */
313*4882a593Smuzhiyun extern irqreturn_t parport_irq_handler(int irq, void *dev_id);
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun /* Reference counting for ports. */
316*4882a593Smuzhiyun extern struct parport *parport_get_port (struct parport *);
317*4882a593Smuzhiyun extern void parport_put_port (struct parport *);
318*4882a593Smuzhiyun void parport_del_port(struct parport *);
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun struct pardev_cb {
321*4882a593Smuzhiyun 	int (*preempt)(void *);
322*4882a593Smuzhiyun 	void (*wakeup)(void *);
323*4882a593Smuzhiyun 	void *private;
324*4882a593Smuzhiyun 	void (*irq_func)(void *);
325*4882a593Smuzhiyun 	unsigned int flags;
326*4882a593Smuzhiyun };
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun /*
329*4882a593Smuzhiyun  * parport_register_dev_model declares that a device is connected to a
330*4882a593Smuzhiyun  * port, and tells the kernel all it needs to know.
331*4882a593Smuzhiyun  */
332*4882a593Smuzhiyun struct pardevice *
333*4882a593Smuzhiyun parport_register_dev_model(struct parport *port, const char *name,
334*4882a593Smuzhiyun 			   const struct pardev_cb *par_dev_cb, int cnt);
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun /* parport_unregister unlinks a device from the chain. */
337*4882a593Smuzhiyun extern void parport_unregister_device(struct pardevice *dev);
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun /* parport_claim tries to gain ownership of the port for a particular
340*4882a593Smuzhiyun    driver.  This may fail (return non-zero) if another driver is busy.
341*4882a593Smuzhiyun    If this driver has registered an interrupt handler, it will be
342*4882a593Smuzhiyun    enabled.  */
343*4882a593Smuzhiyun extern int parport_claim(struct pardevice *dev);
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun /* parport_claim_or_block is the same, but sleeps if the port cannot
346*4882a593Smuzhiyun    be claimed.  Return value is 1 if it slept, 0 normally and -errno
347*4882a593Smuzhiyun    on error.  */
348*4882a593Smuzhiyun extern int parport_claim_or_block(struct pardevice *dev);
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun /* parport_release reverses a previous parport_claim.  This can never
351*4882a593Smuzhiyun    fail, though the effects are undefined (except that they are bad)
352*4882a593Smuzhiyun    if you didn't previously own the port.  Once you have released the
353*4882a593Smuzhiyun    port you should make sure that neither your code nor the hardware
354*4882a593Smuzhiyun    on the port tries to initiate any communication without first
355*4882a593Smuzhiyun    re-claiming the port.  If you mess with the port state (enabling
356*4882a593Smuzhiyun    ECP for example) you should clean up before releasing the port. */
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun extern void parport_release(struct pardevice *dev);
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun /**
361*4882a593Smuzhiyun  * parport_yield - relinquish a parallel port temporarily
362*4882a593Smuzhiyun  * @dev: a device on the parallel port
363*4882a593Smuzhiyun  *
364*4882a593Smuzhiyun  * This function relinquishes the port if it would be helpful to other
365*4882a593Smuzhiyun  * drivers to do so.  Afterwards it tries to reclaim the port using
366*4882a593Smuzhiyun  * parport_claim(), and the return value is the same as for
367*4882a593Smuzhiyun  * parport_claim().  If it fails, the port is left unclaimed and it is
368*4882a593Smuzhiyun  * the driver's responsibility to reclaim the port.
369*4882a593Smuzhiyun  *
370*4882a593Smuzhiyun  * The parport_yield() and parport_yield_blocking() functions are for
371*4882a593Smuzhiyun  * marking points in the driver at which other drivers may claim the
372*4882a593Smuzhiyun  * port and use their devices.  Yielding the port is similar to
373*4882a593Smuzhiyun  * releasing it and reclaiming it, but is more efficient because no
374*4882a593Smuzhiyun  * action is taken if there are no other devices needing the port.  In
375*4882a593Smuzhiyun  * fact, nothing is done even if there are other devices waiting but
376*4882a593Smuzhiyun  * the current device is still within its "timeslice".  The default
377*4882a593Smuzhiyun  * timeslice is half a second, but it can be adjusted via the /proc
378*4882a593Smuzhiyun  * interface.
379*4882a593Smuzhiyun  **/
parport_yield(struct pardevice * dev)380*4882a593Smuzhiyun static __inline__ int parport_yield(struct pardevice *dev)
381*4882a593Smuzhiyun {
382*4882a593Smuzhiyun 	unsigned long int timeslip = (jiffies - dev->time);
383*4882a593Smuzhiyun 	if ((dev->port->waithead == NULL) || (timeslip < dev->timeslice))
384*4882a593Smuzhiyun 		return 0;
385*4882a593Smuzhiyun 	parport_release(dev);
386*4882a593Smuzhiyun 	return parport_claim(dev);
387*4882a593Smuzhiyun }
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun /**
390*4882a593Smuzhiyun  * parport_yield_blocking - relinquish a parallel port temporarily
391*4882a593Smuzhiyun  * @dev: a device on the parallel port
392*4882a593Smuzhiyun  *
393*4882a593Smuzhiyun  * This function relinquishes the port if it would be helpful to other
394*4882a593Smuzhiyun  * drivers to do so.  Afterwards it tries to reclaim the port using
395*4882a593Smuzhiyun  * parport_claim_or_block(), and the return value is the same as for
396*4882a593Smuzhiyun  * parport_claim_or_block().
397*4882a593Smuzhiyun  **/
parport_yield_blocking(struct pardevice * dev)398*4882a593Smuzhiyun static __inline__ int parport_yield_blocking(struct pardevice *dev)
399*4882a593Smuzhiyun {
400*4882a593Smuzhiyun 	unsigned long int timeslip = (jiffies - dev->time);
401*4882a593Smuzhiyun 	if ((dev->port->waithead == NULL) || (timeslip < dev->timeslice))
402*4882a593Smuzhiyun 		return 0;
403*4882a593Smuzhiyun 	parport_release(dev);
404*4882a593Smuzhiyun 	return parport_claim_or_block(dev);
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun /* Flags used to identify what a device does. */
408*4882a593Smuzhiyun #define PARPORT_DEV_TRAN		0	/* WARNING !! DEPRECATED !! */
409*4882a593Smuzhiyun #define PARPORT_DEV_LURK		(1<<0)	/* WARNING !! DEPRECATED !! */
410*4882a593Smuzhiyun #define PARPORT_DEV_EXCL		(1<<1)	/* Need exclusive access. */
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun #define PARPORT_FLAG_EXCL		(1<<1)	/* EXCL driver registered. */
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun /* IEEE1284 functions */
415*4882a593Smuzhiyun extern void parport_ieee1284_interrupt (void *);
416*4882a593Smuzhiyun extern int parport_negotiate (struct parport *, int mode);
417*4882a593Smuzhiyun extern ssize_t parport_write (struct parport *, const void *buf, size_t len);
418*4882a593Smuzhiyun extern ssize_t parport_read (struct parport *, void *buf, size_t len);
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun #define PARPORT_INACTIVITY_O_NONBLOCK 1
421*4882a593Smuzhiyun extern long parport_set_timeout (struct pardevice *, long inactivity);
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun extern int parport_wait_event (struct parport *, long timeout);
424*4882a593Smuzhiyun extern int parport_wait_peripheral (struct parport *port,
425*4882a593Smuzhiyun 				    unsigned char mask,
426*4882a593Smuzhiyun 				    unsigned char val);
427*4882a593Smuzhiyun extern int parport_poll_peripheral (struct parport *port,
428*4882a593Smuzhiyun 				    unsigned char mask,
429*4882a593Smuzhiyun 				    unsigned char val,
430*4882a593Smuzhiyun 				    int usec);
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun /* For architectural drivers */
433*4882a593Smuzhiyun extern size_t parport_ieee1284_write_compat (struct parport *,
434*4882a593Smuzhiyun 					     const void *, size_t, int);
435*4882a593Smuzhiyun extern size_t parport_ieee1284_read_nibble (struct parport *,
436*4882a593Smuzhiyun 					    void *, size_t, int);
437*4882a593Smuzhiyun extern size_t parport_ieee1284_read_byte (struct parport *,
438*4882a593Smuzhiyun 					  void *, size_t, int);
439*4882a593Smuzhiyun extern size_t parport_ieee1284_ecp_read_data (struct parport *,
440*4882a593Smuzhiyun 					      void *, size_t, int);
441*4882a593Smuzhiyun extern size_t parport_ieee1284_ecp_write_data (struct parport *,
442*4882a593Smuzhiyun 					       const void *, size_t, int);
443*4882a593Smuzhiyun extern size_t parport_ieee1284_ecp_write_addr (struct parport *,
444*4882a593Smuzhiyun 					       const void *, size_t, int);
445*4882a593Smuzhiyun extern size_t parport_ieee1284_epp_write_data (struct parport *,
446*4882a593Smuzhiyun 					       const void *, size_t, int);
447*4882a593Smuzhiyun extern size_t parport_ieee1284_epp_read_data (struct parport *,
448*4882a593Smuzhiyun 					      void *, size_t, int);
449*4882a593Smuzhiyun extern size_t parport_ieee1284_epp_write_addr (struct parport *,
450*4882a593Smuzhiyun 					       const void *, size_t, int);
451*4882a593Smuzhiyun extern size_t parport_ieee1284_epp_read_addr (struct parport *,
452*4882a593Smuzhiyun 					      void *, size_t, int);
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun /* IEEE1284.3 functions */
455*4882a593Smuzhiyun #define daisy_dev_name "Device ID probe"
456*4882a593Smuzhiyun extern int parport_daisy_init (struct parport *port);
457*4882a593Smuzhiyun extern void parport_daisy_fini (struct parport *port);
458*4882a593Smuzhiyun extern struct pardevice *parport_open (int devnum, const char *name);
459*4882a593Smuzhiyun extern void parport_close (struct pardevice *dev);
460*4882a593Smuzhiyun extern ssize_t parport_device_id (int devnum, char *buffer, size_t len);
461*4882a593Smuzhiyun extern void parport_daisy_deselect_all (struct parport *port);
462*4882a593Smuzhiyun extern int parport_daisy_select (struct parport *port, int daisy, int mode);
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun /* Lowlevel drivers _can_ call this support function to handle irqs.  */
parport_generic_irq(struct parport * port)465*4882a593Smuzhiyun static inline void parport_generic_irq(struct parport *port)
466*4882a593Smuzhiyun {
467*4882a593Smuzhiyun 	parport_ieee1284_interrupt (port);
468*4882a593Smuzhiyun 	read_lock(&port->cad_lock);
469*4882a593Smuzhiyun 	if (port->cad && port->cad->irq_func)
470*4882a593Smuzhiyun 		port->cad->irq_func(port->cad->private);
471*4882a593Smuzhiyun 	read_unlock(&port->cad_lock);
472*4882a593Smuzhiyun }
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun /* Prototypes from parport_procfs */
475*4882a593Smuzhiyun extern int parport_proc_register(struct parport *pp);
476*4882a593Smuzhiyun extern int parport_proc_unregister(struct parport *pp);
477*4882a593Smuzhiyun extern int parport_device_proc_register(struct pardevice *device);
478*4882a593Smuzhiyun extern int parport_device_proc_unregister(struct pardevice *device);
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun /* If PC hardware is the only type supported, we can optimise a bit.  */
481*4882a593Smuzhiyun #if !defined(CONFIG_PARPORT_NOT_PC)
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun #include <linux/parport_pc.h>
484*4882a593Smuzhiyun #define parport_write_data(p,x)            parport_pc_write_data(p,x)
485*4882a593Smuzhiyun #define parport_read_data(p)               parport_pc_read_data(p)
486*4882a593Smuzhiyun #define parport_write_control(p,x)         parport_pc_write_control(p,x)
487*4882a593Smuzhiyun #define parport_read_control(p)            parport_pc_read_control(p)
488*4882a593Smuzhiyun #define parport_frob_control(p,m,v)        parport_pc_frob_control(p,m,v)
489*4882a593Smuzhiyun #define parport_read_status(p)             parport_pc_read_status(p)
490*4882a593Smuzhiyun #define parport_enable_irq(p)              parport_pc_enable_irq(p)
491*4882a593Smuzhiyun #define parport_disable_irq(p)             parport_pc_disable_irq(p)
492*4882a593Smuzhiyun #define parport_data_forward(p)            parport_pc_data_forward(p)
493*4882a593Smuzhiyun #define parport_data_reverse(p)            parport_pc_data_reverse(p)
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun #else  /*  !CONFIG_PARPORT_NOT_PC  */
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun /* Generic operations vector through the dispatch table. */
498*4882a593Smuzhiyun #define parport_write_data(p,x)            (p)->ops->write_data(p,x)
499*4882a593Smuzhiyun #define parport_read_data(p)               (p)->ops->read_data(p)
500*4882a593Smuzhiyun #define parport_write_control(p,x)         (p)->ops->write_control(p,x)
501*4882a593Smuzhiyun #define parport_read_control(p)            (p)->ops->read_control(p)
502*4882a593Smuzhiyun #define parport_frob_control(p,m,v)        (p)->ops->frob_control(p,m,v)
503*4882a593Smuzhiyun #define parport_read_status(p)             (p)->ops->read_status(p)
504*4882a593Smuzhiyun #define parport_enable_irq(p)              (p)->ops->enable_irq(p)
505*4882a593Smuzhiyun #define parport_disable_irq(p)             (p)->ops->disable_irq(p)
506*4882a593Smuzhiyun #define parport_data_forward(p)            (p)->ops->data_forward(p)
507*4882a593Smuzhiyun #define parport_data_reverse(p)            (p)->ops->data_reverse(p)
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun #endif /*  !CONFIG_PARPORT_NOT_PC  */
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun extern unsigned long parport_default_timeslice;
512*4882a593Smuzhiyun extern int parport_default_spintime;
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun #endif /* _PARPORT_H_ */
515