1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun pd.c (c) 1997-8 Grant R. Guenther <grant@torque.net>
3*4882a593Smuzhiyun Under the terms of the GNU General Public License.
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun This is the high-level driver for parallel port IDE hard
6*4882a593Smuzhiyun drives based on chips supported by the paride module.
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun By default, the driver will autoprobe for a single parallel
9*4882a593Smuzhiyun port IDE drive, but if their individual parameters are
10*4882a593Smuzhiyun specified, the driver can handle up to 4 drives.
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun The behaviour of the pd driver can be altered by setting
13*4882a593Smuzhiyun some parameters from the insmod command line. The following
14*4882a593Smuzhiyun parameters are adjustable:
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun drive0 These four arguments can be arrays of
17*4882a593Smuzhiyun drive1 1-8 integers as follows:
18*4882a593Smuzhiyun drive2
19*4882a593Smuzhiyun drive3 <prt>,<pro>,<uni>,<mod>,<geo>,<sby>,<dly>,<slv>
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun Where,
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun <prt> is the base of the parallel port address for
24*4882a593Smuzhiyun the corresponding drive. (required)
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun <pro> is the protocol number for the adapter that
27*4882a593Smuzhiyun supports this drive. These numbers are
28*4882a593Smuzhiyun logged by 'paride' when the protocol modules
29*4882a593Smuzhiyun are initialised. (0 if not given)
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun <uni> for those adapters that support chained
32*4882a593Smuzhiyun devices, this is the unit selector for the
33*4882a593Smuzhiyun chain of devices on the given port. It should
34*4882a593Smuzhiyun be zero for devices that don't support chaining.
35*4882a593Smuzhiyun (0 if not given)
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun <mod> this can be -1 to choose the best mode, or one
38*4882a593Smuzhiyun of the mode numbers supported by the adapter.
39*4882a593Smuzhiyun (-1 if not given)
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun <geo> this defaults to 0 to indicate that the driver
42*4882a593Smuzhiyun should use the CHS geometry provided by the drive
43*4882a593Smuzhiyun itself. If set to 1, the driver will provide
44*4882a593Smuzhiyun a logical geometry with 64 heads and 32 sectors
45*4882a593Smuzhiyun per track, to be consistent with most SCSI
46*4882a593Smuzhiyun drivers. (0 if not given)
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun <sby> set this to zero to disable the power saving
49*4882a593Smuzhiyun standby mode, if needed. (1 if not given)
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun <dly> some parallel ports require the driver to
52*4882a593Smuzhiyun go more slowly. -1 sets a default value that
53*4882a593Smuzhiyun should work with the chosen protocol. Otherwise,
54*4882a593Smuzhiyun set this to a small integer, the larger it is
55*4882a593Smuzhiyun the slower the port i/o. In some cases, setting
56*4882a593Smuzhiyun this to zero will speed up the device. (default -1)
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun <slv> IDE disks can be jumpered to master or slave.
59*4882a593Smuzhiyun Set this to 0 to choose the master drive, 1 to
60*4882a593Smuzhiyun choose the slave, -1 (the default) to choose the
61*4882a593Smuzhiyun first drive found.
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun major You may use this parameter to override the
65*4882a593Smuzhiyun default major number (45) that this driver
66*4882a593Smuzhiyun will use. Be sure to change the device
67*4882a593Smuzhiyun name as well.
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun name This parameter is a character string that
70*4882a593Smuzhiyun contains the name the kernel will use for this
71*4882a593Smuzhiyun device (in /proc output, for instance).
72*4882a593Smuzhiyun (default "pd")
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun cluster The driver will attempt to aggregate requests
75*4882a593Smuzhiyun for adjacent blocks into larger multi-block
76*4882a593Smuzhiyun clusters. The maximum cluster size (in 512
77*4882a593Smuzhiyun byte sectors) is set with this parameter.
78*4882a593Smuzhiyun (default 64)
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun verbose This parameter controls the amount of logging
81*4882a593Smuzhiyun that the driver will do. Set it to 0 for
82*4882a593Smuzhiyun normal operation, 1 to see autoprobe progress
83*4882a593Smuzhiyun messages, or 2 to see additional debugging
84*4882a593Smuzhiyun output. (default 0)
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun nice This parameter controls the driver's use of
87*4882a593Smuzhiyun idle CPU time, at the expense of some speed.
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun If this driver is built into the kernel, you can use kernel
90*4882a593Smuzhiyun the following command line parameters, with the same values
91*4882a593Smuzhiyun as the corresponding module parameters listed above:
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun pd.drive0
94*4882a593Smuzhiyun pd.drive1
95*4882a593Smuzhiyun pd.drive2
96*4882a593Smuzhiyun pd.drive3
97*4882a593Smuzhiyun pd.cluster
98*4882a593Smuzhiyun pd.nice
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun In addition, you can use the parameter pd.disable to disable
101*4882a593Smuzhiyun the driver entirely.
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun */
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun /* Changes:
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun 1.01 GRG 1997.01.24 Restored pd_reset()
108*4882a593Smuzhiyun Added eject ioctl
109*4882a593Smuzhiyun 1.02 GRG 1998.05.06 SMP spinlock changes,
110*4882a593Smuzhiyun Added slave support
111*4882a593Smuzhiyun 1.03 GRG 1998.06.16 Eliminate an Ugh.
112*4882a593Smuzhiyun 1.04 GRG 1998.08.15 Extra debugging, use HZ in loop timing
113*4882a593Smuzhiyun 1.05 GRG 1998.09.24 Added jumbo support
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun */
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun #define PD_VERSION "1.05"
118*4882a593Smuzhiyun #define PD_MAJOR 45
119*4882a593Smuzhiyun #define PD_NAME "pd"
120*4882a593Smuzhiyun #define PD_UNITS 4
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun /* Here are things one can override from the insmod command.
123*4882a593Smuzhiyun Most are autoprobed by paride unless set here. Verbose is off
124*4882a593Smuzhiyun by default.
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun */
127*4882a593Smuzhiyun #include <linux/types.h>
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun static int verbose = 0;
130*4882a593Smuzhiyun static int major = PD_MAJOR;
131*4882a593Smuzhiyun static char *name = PD_NAME;
132*4882a593Smuzhiyun static int cluster = 64;
133*4882a593Smuzhiyun static int nice = 0;
134*4882a593Smuzhiyun static int disable = 0;
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun static int drive0[8] = { 0, 0, 0, -1, 0, 1, -1, -1 };
137*4882a593Smuzhiyun static int drive1[8] = { 0, 0, 0, -1, 0, 1, -1, -1 };
138*4882a593Smuzhiyun static int drive2[8] = { 0, 0, 0, -1, 0, 1, -1, -1 };
139*4882a593Smuzhiyun static int drive3[8] = { 0, 0, 0, -1, 0, 1, -1, -1 };
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun static int (*drives[4])[8] = {&drive0, &drive1, &drive2, &drive3};
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun enum {D_PRT, D_PRO, D_UNI, D_MOD, D_GEO, D_SBY, D_DLY, D_SLV};
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun /* end of parameters */
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun #include <linux/init.h>
148*4882a593Smuzhiyun #include <linux/module.h>
149*4882a593Smuzhiyun #include <linux/gfp.h>
150*4882a593Smuzhiyun #include <linux/fs.h>
151*4882a593Smuzhiyun #include <linux/delay.h>
152*4882a593Smuzhiyun #include <linux/hdreg.h>
153*4882a593Smuzhiyun #include <linux/cdrom.h> /* for the eject ioctl */
154*4882a593Smuzhiyun #include <linux/blk-mq.h>
155*4882a593Smuzhiyun #include <linux/blkpg.h>
156*4882a593Smuzhiyun #include <linux/kernel.h>
157*4882a593Smuzhiyun #include <linux/mutex.h>
158*4882a593Smuzhiyun #include <linux/uaccess.h>
159*4882a593Smuzhiyun #include <linux/workqueue.h>
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun static DEFINE_MUTEX(pd_mutex);
162*4882a593Smuzhiyun static DEFINE_SPINLOCK(pd_lock);
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun module_param(verbose, int, 0);
165*4882a593Smuzhiyun module_param(major, int, 0);
166*4882a593Smuzhiyun module_param(name, charp, 0);
167*4882a593Smuzhiyun module_param(cluster, int, 0);
168*4882a593Smuzhiyun module_param(nice, int, 0);
169*4882a593Smuzhiyun module_param_array(drive0, int, NULL, 0);
170*4882a593Smuzhiyun module_param_array(drive1, int, NULL, 0);
171*4882a593Smuzhiyun module_param_array(drive2, int, NULL, 0);
172*4882a593Smuzhiyun module_param_array(drive3, int, NULL, 0);
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun #include "paride.h"
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun #define PD_BITS 4
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun /* numbers for "SCSI" geometry */
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun #define PD_LOG_HEADS 64
181*4882a593Smuzhiyun #define PD_LOG_SECTS 32
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun #define PD_ID_OFF 54
184*4882a593Smuzhiyun #define PD_ID_LEN 14
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun #define PD_MAX_RETRIES 5
187*4882a593Smuzhiyun #define PD_TMO 800 /* interrupt timeout in jiffies */
188*4882a593Smuzhiyun #define PD_SPIN_DEL 50 /* spin delay in micro-seconds */
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun #define PD_SPIN (1000000*PD_TMO)/(HZ*PD_SPIN_DEL)
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun #define STAT_ERR 0x00001
193*4882a593Smuzhiyun #define STAT_INDEX 0x00002
194*4882a593Smuzhiyun #define STAT_ECC 0x00004
195*4882a593Smuzhiyun #define STAT_DRQ 0x00008
196*4882a593Smuzhiyun #define STAT_SEEK 0x00010
197*4882a593Smuzhiyun #define STAT_WRERR 0x00020
198*4882a593Smuzhiyun #define STAT_READY 0x00040
199*4882a593Smuzhiyun #define STAT_BUSY 0x00080
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun #define ERR_AMNF 0x00100
202*4882a593Smuzhiyun #define ERR_TK0NF 0x00200
203*4882a593Smuzhiyun #define ERR_ABRT 0x00400
204*4882a593Smuzhiyun #define ERR_MCR 0x00800
205*4882a593Smuzhiyun #define ERR_IDNF 0x01000
206*4882a593Smuzhiyun #define ERR_MC 0x02000
207*4882a593Smuzhiyun #define ERR_UNC 0x04000
208*4882a593Smuzhiyun #define ERR_TMO 0x10000
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun #define IDE_READ 0x20
211*4882a593Smuzhiyun #define IDE_WRITE 0x30
212*4882a593Smuzhiyun #define IDE_READ_VRFY 0x40
213*4882a593Smuzhiyun #define IDE_INIT_DEV_PARMS 0x91
214*4882a593Smuzhiyun #define IDE_STANDBY 0x96
215*4882a593Smuzhiyun #define IDE_ACKCHANGE 0xdb
216*4882a593Smuzhiyun #define IDE_DOORLOCK 0xde
217*4882a593Smuzhiyun #define IDE_DOORUNLOCK 0xdf
218*4882a593Smuzhiyun #define IDE_IDENTIFY 0xec
219*4882a593Smuzhiyun #define IDE_EJECT 0xed
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun #define PD_NAMELEN 8
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun struct pd_unit {
224*4882a593Smuzhiyun struct pi_adapter pia; /* interface to paride layer */
225*4882a593Smuzhiyun struct pi_adapter *pi;
226*4882a593Smuzhiyun int access; /* count of active opens ... */
227*4882a593Smuzhiyun int capacity; /* Size of this volume in sectors */
228*4882a593Smuzhiyun int heads; /* physical geometry */
229*4882a593Smuzhiyun int sectors;
230*4882a593Smuzhiyun int cylinders;
231*4882a593Smuzhiyun int can_lba;
232*4882a593Smuzhiyun int drive; /* master=0 slave=1 */
233*4882a593Smuzhiyun int changed; /* Have we seen a disk change ? */
234*4882a593Smuzhiyun int removable; /* removable media device ? */
235*4882a593Smuzhiyun int standby;
236*4882a593Smuzhiyun int alt_geom;
237*4882a593Smuzhiyun char name[PD_NAMELEN]; /* pda, pdb, etc ... */
238*4882a593Smuzhiyun struct gendisk *gd;
239*4882a593Smuzhiyun struct blk_mq_tag_set tag_set;
240*4882a593Smuzhiyun struct list_head rq_list;
241*4882a593Smuzhiyun };
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun static struct pd_unit pd[PD_UNITS];
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun struct pd_req {
246*4882a593Smuzhiyun /* for REQ_OP_DRV_IN: */
247*4882a593Smuzhiyun enum action (*func)(struct pd_unit *disk);
248*4882a593Smuzhiyun };
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun static char pd_scratch[512]; /* scratch block buffer */
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun static char *pd_errs[17] = { "ERR", "INDEX", "ECC", "DRQ", "SEEK", "WRERR",
253*4882a593Smuzhiyun "READY", "BUSY", "AMNF", "TK0NF", "ABRT", "MCR",
254*4882a593Smuzhiyun "IDNF", "MC", "UNC", "???", "TMO"
255*4882a593Smuzhiyun };
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun static void *par_drv; /* reference of parport driver */
258*4882a593Smuzhiyun
status_reg(struct pd_unit * disk)259*4882a593Smuzhiyun static inline int status_reg(struct pd_unit *disk)
260*4882a593Smuzhiyun {
261*4882a593Smuzhiyun return pi_read_regr(disk->pi, 1, 6);
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun
read_reg(struct pd_unit * disk,int reg)264*4882a593Smuzhiyun static inline int read_reg(struct pd_unit *disk, int reg)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun return pi_read_regr(disk->pi, 0, reg);
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun
write_status(struct pd_unit * disk,int val)269*4882a593Smuzhiyun static inline void write_status(struct pd_unit *disk, int val)
270*4882a593Smuzhiyun {
271*4882a593Smuzhiyun pi_write_regr(disk->pi, 1, 6, val);
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun
write_reg(struct pd_unit * disk,int reg,int val)274*4882a593Smuzhiyun static inline void write_reg(struct pd_unit *disk, int reg, int val)
275*4882a593Smuzhiyun {
276*4882a593Smuzhiyun pi_write_regr(disk->pi, 0, reg, val);
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun
DRIVE(struct pd_unit * disk)279*4882a593Smuzhiyun static inline u8 DRIVE(struct pd_unit *disk)
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun return 0xa0+0x10*disk->drive;
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun /* ide command interface */
285*4882a593Smuzhiyun
pd_print_error(struct pd_unit * disk,char * msg,int status)286*4882a593Smuzhiyun static void pd_print_error(struct pd_unit *disk, char *msg, int status)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun int i;
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun printk("%s: %s: status = 0x%x =", disk->name, msg, status);
291*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(pd_errs); i++)
292*4882a593Smuzhiyun if (status & (1 << i))
293*4882a593Smuzhiyun printk(" %s", pd_errs[i]);
294*4882a593Smuzhiyun printk("\n");
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun
pd_reset(struct pd_unit * disk)297*4882a593Smuzhiyun static void pd_reset(struct pd_unit *disk)
298*4882a593Smuzhiyun { /* called only for MASTER drive */
299*4882a593Smuzhiyun write_status(disk, 4);
300*4882a593Smuzhiyun udelay(50);
301*4882a593Smuzhiyun write_status(disk, 0);
302*4882a593Smuzhiyun udelay(250);
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun #define DBMSG(msg) ((verbose>1)?(msg):NULL)
306*4882a593Smuzhiyun
pd_wait_for(struct pd_unit * disk,int w,char * msg)307*4882a593Smuzhiyun static int pd_wait_for(struct pd_unit *disk, int w, char *msg)
308*4882a593Smuzhiyun { /* polled wait */
309*4882a593Smuzhiyun int k, r, e;
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun k = 0;
312*4882a593Smuzhiyun while (k < PD_SPIN) {
313*4882a593Smuzhiyun r = status_reg(disk);
314*4882a593Smuzhiyun k++;
315*4882a593Smuzhiyun if (((r & w) == w) && !(r & STAT_BUSY))
316*4882a593Smuzhiyun break;
317*4882a593Smuzhiyun udelay(PD_SPIN_DEL);
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun e = (read_reg(disk, 1) << 8) + read_reg(disk, 7);
320*4882a593Smuzhiyun if (k >= PD_SPIN)
321*4882a593Smuzhiyun e |= ERR_TMO;
322*4882a593Smuzhiyun if ((e & (STAT_ERR | ERR_TMO)) && (msg != NULL))
323*4882a593Smuzhiyun pd_print_error(disk, msg, e);
324*4882a593Smuzhiyun return e;
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun
pd_send_command(struct pd_unit * disk,int n,int s,int h,int c0,int c1,int func)327*4882a593Smuzhiyun static void pd_send_command(struct pd_unit *disk, int n, int s, int h, int c0, int c1, int func)
328*4882a593Smuzhiyun {
329*4882a593Smuzhiyun write_reg(disk, 6, DRIVE(disk) + h);
330*4882a593Smuzhiyun write_reg(disk, 1, 0); /* the IDE task file */
331*4882a593Smuzhiyun write_reg(disk, 2, n);
332*4882a593Smuzhiyun write_reg(disk, 3, s);
333*4882a593Smuzhiyun write_reg(disk, 4, c0);
334*4882a593Smuzhiyun write_reg(disk, 5, c1);
335*4882a593Smuzhiyun write_reg(disk, 7, func);
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun udelay(1);
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun
pd_ide_command(struct pd_unit * disk,int func,int block,int count)340*4882a593Smuzhiyun static void pd_ide_command(struct pd_unit *disk, int func, int block, int count)
341*4882a593Smuzhiyun {
342*4882a593Smuzhiyun int c1, c0, h, s;
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun if (disk->can_lba) {
345*4882a593Smuzhiyun s = block & 255;
346*4882a593Smuzhiyun c0 = (block >>= 8) & 255;
347*4882a593Smuzhiyun c1 = (block >>= 8) & 255;
348*4882a593Smuzhiyun h = ((block >>= 8) & 15) + 0x40;
349*4882a593Smuzhiyun } else {
350*4882a593Smuzhiyun s = (block % disk->sectors) + 1;
351*4882a593Smuzhiyun h = (block /= disk->sectors) % disk->heads;
352*4882a593Smuzhiyun c0 = (block /= disk->heads) % 256;
353*4882a593Smuzhiyun c1 = (block >>= 8);
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun pd_send_command(disk, count, s, h, c0, c1, func);
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun /* The i/o request engine */
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun enum action {Fail = 0, Ok = 1, Hold, Wait};
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun static struct request *pd_req; /* current request */
363*4882a593Smuzhiyun static enum action (*phase)(void);
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun static void run_fsm(void);
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun static void ps_tq_int(struct work_struct *work);
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun static DECLARE_DELAYED_WORK(fsm_tq, ps_tq_int);
370*4882a593Smuzhiyun
schedule_fsm(void)371*4882a593Smuzhiyun static void schedule_fsm(void)
372*4882a593Smuzhiyun {
373*4882a593Smuzhiyun if (!nice)
374*4882a593Smuzhiyun schedule_delayed_work(&fsm_tq, 0);
375*4882a593Smuzhiyun else
376*4882a593Smuzhiyun schedule_delayed_work(&fsm_tq, nice-1);
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun
ps_tq_int(struct work_struct * work)379*4882a593Smuzhiyun static void ps_tq_int(struct work_struct *work)
380*4882a593Smuzhiyun {
381*4882a593Smuzhiyun run_fsm();
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun static enum action do_pd_io_start(void);
385*4882a593Smuzhiyun static enum action pd_special(void);
386*4882a593Smuzhiyun static enum action do_pd_read_start(void);
387*4882a593Smuzhiyun static enum action do_pd_write_start(void);
388*4882a593Smuzhiyun static enum action do_pd_read_drq(void);
389*4882a593Smuzhiyun static enum action do_pd_write_done(void);
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun static int pd_queue;
392*4882a593Smuzhiyun static int pd_claimed;
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun static struct pd_unit *pd_current; /* current request's drive */
395*4882a593Smuzhiyun static PIA *pi_current; /* current request's PIA */
396*4882a593Smuzhiyun
set_next_request(void)397*4882a593Smuzhiyun static int set_next_request(void)
398*4882a593Smuzhiyun {
399*4882a593Smuzhiyun struct gendisk *disk;
400*4882a593Smuzhiyun struct request_queue *q;
401*4882a593Smuzhiyun int old_pos = pd_queue;
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun do {
404*4882a593Smuzhiyun disk = pd[pd_queue].gd;
405*4882a593Smuzhiyun q = disk ? disk->queue : NULL;
406*4882a593Smuzhiyun if (++pd_queue == PD_UNITS)
407*4882a593Smuzhiyun pd_queue = 0;
408*4882a593Smuzhiyun if (q) {
409*4882a593Smuzhiyun struct pd_unit *disk = q->queuedata;
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun if (list_empty(&disk->rq_list))
412*4882a593Smuzhiyun continue;
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun pd_req = list_first_entry(&disk->rq_list,
415*4882a593Smuzhiyun struct request,
416*4882a593Smuzhiyun queuelist);
417*4882a593Smuzhiyun list_del_init(&pd_req->queuelist);
418*4882a593Smuzhiyun blk_mq_start_request(pd_req);
419*4882a593Smuzhiyun break;
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun } while (pd_queue != old_pos);
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun return pd_req != NULL;
424*4882a593Smuzhiyun }
425*4882a593Smuzhiyun
run_fsm(void)426*4882a593Smuzhiyun static void run_fsm(void)
427*4882a593Smuzhiyun {
428*4882a593Smuzhiyun while (1) {
429*4882a593Smuzhiyun enum action res;
430*4882a593Smuzhiyun int stop = 0;
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun if (!phase) {
433*4882a593Smuzhiyun pd_current = pd_req->rq_disk->private_data;
434*4882a593Smuzhiyun pi_current = pd_current->pi;
435*4882a593Smuzhiyun phase = do_pd_io_start;
436*4882a593Smuzhiyun }
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun switch (pd_claimed) {
439*4882a593Smuzhiyun case 0:
440*4882a593Smuzhiyun pd_claimed = 1;
441*4882a593Smuzhiyun if (!pi_schedule_claimed(pi_current, run_fsm))
442*4882a593Smuzhiyun return;
443*4882a593Smuzhiyun fallthrough;
444*4882a593Smuzhiyun case 1:
445*4882a593Smuzhiyun pd_claimed = 2;
446*4882a593Smuzhiyun pi_current->proto->connect(pi_current);
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun switch(res = phase()) {
450*4882a593Smuzhiyun case Ok: case Fail: {
451*4882a593Smuzhiyun blk_status_t err;
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun err = res == Ok ? 0 : BLK_STS_IOERR;
454*4882a593Smuzhiyun pi_disconnect(pi_current);
455*4882a593Smuzhiyun pd_claimed = 0;
456*4882a593Smuzhiyun phase = NULL;
457*4882a593Smuzhiyun spin_lock_irq(&pd_lock);
458*4882a593Smuzhiyun if (!blk_update_request(pd_req, err,
459*4882a593Smuzhiyun blk_rq_cur_bytes(pd_req))) {
460*4882a593Smuzhiyun __blk_mq_end_request(pd_req, err);
461*4882a593Smuzhiyun pd_req = NULL;
462*4882a593Smuzhiyun stop = !set_next_request();
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun spin_unlock_irq(&pd_lock);
465*4882a593Smuzhiyun if (stop)
466*4882a593Smuzhiyun return;
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun fallthrough;
469*4882a593Smuzhiyun case Hold:
470*4882a593Smuzhiyun schedule_fsm();
471*4882a593Smuzhiyun return;
472*4882a593Smuzhiyun case Wait:
473*4882a593Smuzhiyun pi_disconnect(pi_current);
474*4882a593Smuzhiyun pd_claimed = 0;
475*4882a593Smuzhiyun }
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun static int pd_retries = 0; /* i/o error retry count */
480*4882a593Smuzhiyun static int pd_block; /* address of next requested block */
481*4882a593Smuzhiyun static int pd_count; /* number of blocks still to do */
482*4882a593Smuzhiyun static int pd_run; /* sectors in current cluster */
483*4882a593Smuzhiyun static char *pd_buf; /* buffer for request in progress */
484*4882a593Smuzhiyun
do_pd_io_start(void)485*4882a593Smuzhiyun static enum action do_pd_io_start(void)
486*4882a593Smuzhiyun {
487*4882a593Smuzhiyun switch (req_op(pd_req)) {
488*4882a593Smuzhiyun case REQ_OP_DRV_IN:
489*4882a593Smuzhiyun phase = pd_special;
490*4882a593Smuzhiyun return pd_special();
491*4882a593Smuzhiyun case REQ_OP_READ:
492*4882a593Smuzhiyun case REQ_OP_WRITE:
493*4882a593Smuzhiyun pd_block = blk_rq_pos(pd_req);
494*4882a593Smuzhiyun pd_count = blk_rq_cur_sectors(pd_req);
495*4882a593Smuzhiyun if (pd_block + pd_count > get_capacity(pd_req->rq_disk))
496*4882a593Smuzhiyun return Fail;
497*4882a593Smuzhiyun pd_run = blk_rq_sectors(pd_req);
498*4882a593Smuzhiyun pd_buf = bio_data(pd_req->bio);
499*4882a593Smuzhiyun pd_retries = 0;
500*4882a593Smuzhiyun if (req_op(pd_req) == REQ_OP_READ)
501*4882a593Smuzhiyun return do_pd_read_start();
502*4882a593Smuzhiyun else
503*4882a593Smuzhiyun return do_pd_write_start();
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun return Fail;
506*4882a593Smuzhiyun }
507*4882a593Smuzhiyun
pd_special(void)508*4882a593Smuzhiyun static enum action pd_special(void)
509*4882a593Smuzhiyun {
510*4882a593Smuzhiyun struct pd_req *req = blk_mq_rq_to_pdu(pd_req);
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun return req->func(pd_current);
513*4882a593Smuzhiyun }
514*4882a593Smuzhiyun
pd_next_buf(void)515*4882a593Smuzhiyun static int pd_next_buf(void)
516*4882a593Smuzhiyun {
517*4882a593Smuzhiyun unsigned long saved_flags;
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun pd_count--;
520*4882a593Smuzhiyun pd_run--;
521*4882a593Smuzhiyun pd_buf += 512;
522*4882a593Smuzhiyun pd_block++;
523*4882a593Smuzhiyun if (!pd_run)
524*4882a593Smuzhiyun return 1;
525*4882a593Smuzhiyun if (pd_count)
526*4882a593Smuzhiyun return 0;
527*4882a593Smuzhiyun spin_lock_irqsave(&pd_lock, saved_flags);
528*4882a593Smuzhiyun if (!blk_update_request(pd_req, 0, blk_rq_cur_bytes(pd_req))) {
529*4882a593Smuzhiyun __blk_mq_end_request(pd_req, 0);
530*4882a593Smuzhiyun pd_req = NULL;
531*4882a593Smuzhiyun pd_count = 0;
532*4882a593Smuzhiyun pd_buf = NULL;
533*4882a593Smuzhiyun } else {
534*4882a593Smuzhiyun pd_count = blk_rq_cur_sectors(pd_req);
535*4882a593Smuzhiyun pd_buf = bio_data(pd_req->bio);
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun spin_unlock_irqrestore(&pd_lock, saved_flags);
538*4882a593Smuzhiyun return !pd_count;
539*4882a593Smuzhiyun }
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun static unsigned long pd_timeout;
542*4882a593Smuzhiyun
do_pd_read_start(void)543*4882a593Smuzhiyun static enum action do_pd_read_start(void)
544*4882a593Smuzhiyun {
545*4882a593Smuzhiyun if (pd_wait_for(pd_current, STAT_READY, "do_pd_read") & STAT_ERR) {
546*4882a593Smuzhiyun if (pd_retries < PD_MAX_RETRIES) {
547*4882a593Smuzhiyun pd_retries++;
548*4882a593Smuzhiyun return Wait;
549*4882a593Smuzhiyun }
550*4882a593Smuzhiyun return Fail;
551*4882a593Smuzhiyun }
552*4882a593Smuzhiyun pd_ide_command(pd_current, IDE_READ, pd_block, pd_run);
553*4882a593Smuzhiyun phase = do_pd_read_drq;
554*4882a593Smuzhiyun pd_timeout = jiffies + PD_TMO;
555*4882a593Smuzhiyun return Hold;
556*4882a593Smuzhiyun }
557*4882a593Smuzhiyun
do_pd_write_start(void)558*4882a593Smuzhiyun static enum action do_pd_write_start(void)
559*4882a593Smuzhiyun {
560*4882a593Smuzhiyun if (pd_wait_for(pd_current, STAT_READY, "do_pd_write") & STAT_ERR) {
561*4882a593Smuzhiyun if (pd_retries < PD_MAX_RETRIES) {
562*4882a593Smuzhiyun pd_retries++;
563*4882a593Smuzhiyun return Wait;
564*4882a593Smuzhiyun }
565*4882a593Smuzhiyun return Fail;
566*4882a593Smuzhiyun }
567*4882a593Smuzhiyun pd_ide_command(pd_current, IDE_WRITE, pd_block, pd_run);
568*4882a593Smuzhiyun while (1) {
569*4882a593Smuzhiyun if (pd_wait_for(pd_current, STAT_DRQ, "do_pd_write_drq") & STAT_ERR) {
570*4882a593Smuzhiyun if (pd_retries < PD_MAX_RETRIES) {
571*4882a593Smuzhiyun pd_retries++;
572*4882a593Smuzhiyun return Wait;
573*4882a593Smuzhiyun }
574*4882a593Smuzhiyun return Fail;
575*4882a593Smuzhiyun }
576*4882a593Smuzhiyun pi_write_block(pd_current->pi, pd_buf, 512);
577*4882a593Smuzhiyun if (pd_next_buf())
578*4882a593Smuzhiyun break;
579*4882a593Smuzhiyun }
580*4882a593Smuzhiyun phase = do_pd_write_done;
581*4882a593Smuzhiyun pd_timeout = jiffies + PD_TMO;
582*4882a593Smuzhiyun return Hold;
583*4882a593Smuzhiyun }
584*4882a593Smuzhiyun
pd_ready(void)585*4882a593Smuzhiyun static inline int pd_ready(void)
586*4882a593Smuzhiyun {
587*4882a593Smuzhiyun return !(status_reg(pd_current) & STAT_BUSY);
588*4882a593Smuzhiyun }
589*4882a593Smuzhiyun
do_pd_read_drq(void)590*4882a593Smuzhiyun static enum action do_pd_read_drq(void)
591*4882a593Smuzhiyun {
592*4882a593Smuzhiyun if (!pd_ready() && !time_after_eq(jiffies, pd_timeout))
593*4882a593Smuzhiyun return Hold;
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun while (1) {
596*4882a593Smuzhiyun if (pd_wait_for(pd_current, STAT_DRQ, "do_pd_read_drq") & STAT_ERR) {
597*4882a593Smuzhiyun if (pd_retries < PD_MAX_RETRIES) {
598*4882a593Smuzhiyun pd_retries++;
599*4882a593Smuzhiyun phase = do_pd_read_start;
600*4882a593Smuzhiyun return Wait;
601*4882a593Smuzhiyun }
602*4882a593Smuzhiyun return Fail;
603*4882a593Smuzhiyun }
604*4882a593Smuzhiyun pi_read_block(pd_current->pi, pd_buf, 512);
605*4882a593Smuzhiyun if (pd_next_buf())
606*4882a593Smuzhiyun break;
607*4882a593Smuzhiyun }
608*4882a593Smuzhiyun return Ok;
609*4882a593Smuzhiyun }
610*4882a593Smuzhiyun
do_pd_write_done(void)611*4882a593Smuzhiyun static enum action do_pd_write_done(void)
612*4882a593Smuzhiyun {
613*4882a593Smuzhiyun if (!pd_ready() && !time_after_eq(jiffies, pd_timeout))
614*4882a593Smuzhiyun return Hold;
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun if (pd_wait_for(pd_current, STAT_READY, "do_pd_write_done") & STAT_ERR) {
617*4882a593Smuzhiyun if (pd_retries < PD_MAX_RETRIES) {
618*4882a593Smuzhiyun pd_retries++;
619*4882a593Smuzhiyun phase = do_pd_write_start;
620*4882a593Smuzhiyun return Wait;
621*4882a593Smuzhiyun }
622*4882a593Smuzhiyun return Fail;
623*4882a593Smuzhiyun }
624*4882a593Smuzhiyun return Ok;
625*4882a593Smuzhiyun }
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun /* special io requests */
628*4882a593Smuzhiyun
629*4882a593Smuzhiyun /* According to the ATA standard, the default CHS geometry should be
630*4882a593Smuzhiyun available following a reset. Some Western Digital drives come up
631*4882a593Smuzhiyun in a mode where only LBA addresses are accepted until the device
632*4882a593Smuzhiyun parameters are initialised.
633*4882a593Smuzhiyun */
634*4882a593Smuzhiyun
pd_init_dev_parms(struct pd_unit * disk)635*4882a593Smuzhiyun static void pd_init_dev_parms(struct pd_unit *disk)
636*4882a593Smuzhiyun {
637*4882a593Smuzhiyun pd_wait_for(disk, 0, DBMSG("before init_dev_parms"));
638*4882a593Smuzhiyun pd_send_command(disk, disk->sectors, 0, disk->heads - 1, 0, 0,
639*4882a593Smuzhiyun IDE_INIT_DEV_PARMS);
640*4882a593Smuzhiyun udelay(300);
641*4882a593Smuzhiyun pd_wait_for(disk, 0, "Initialise device parameters");
642*4882a593Smuzhiyun }
643*4882a593Smuzhiyun
pd_door_lock(struct pd_unit * disk)644*4882a593Smuzhiyun static enum action pd_door_lock(struct pd_unit *disk)
645*4882a593Smuzhiyun {
646*4882a593Smuzhiyun if (!(pd_wait_for(disk, STAT_READY, "Lock") & STAT_ERR)) {
647*4882a593Smuzhiyun pd_send_command(disk, 1, 0, 0, 0, 0, IDE_DOORLOCK);
648*4882a593Smuzhiyun pd_wait_for(disk, STAT_READY, "Lock done");
649*4882a593Smuzhiyun }
650*4882a593Smuzhiyun return Ok;
651*4882a593Smuzhiyun }
652*4882a593Smuzhiyun
pd_door_unlock(struct pd_unit * disk)653*4882a593Smuzhiyun static enum action pd_door_unlock(struct pd_unit *disk)
654*4882a593Smuzhiyun {
655*4882a593Smuzhiyun if (!(pd_wait_for(disk, STAT_READY, "Lock") & STAT_ERR)) {
656*4882a593Smuzhiyun pd_send_command(disk, 1, 0, 0, 0, 0, IDE_DOORUNLOCK);
657*4882a593Smuzhiyun pd_wait_for(disk, STAT_READY, "Lock done");
658*4882a593Smuzhiyun }
659*4882a593Smuzhiyun return Ok;
660*4882a593Smuzhiyun }
661*4882a593Smuzhiyun
pd_eject(struct pd_unit * disk)662*4882a593Smuzhiyun static enum action pd_eject(struct pd_unit *disk)
663*4882a593Smuzhiyun {
664*4882a593Smuzhiyun pd_wait_for(disk, 0, DBMSG("before unlock on eject"));
665*4882a593Smuzhiyun pd_send_command(disk, 1, 0, 0, 0, 0, IDE_DOORUNLOCK);
666*4882a593Smuzhiyun pd_wait_for(disk, 0, DBMSG("after unlock on eject"));
667*4882a593Smuzhiyun pd_wait_for(disk, 0, DBMSG("before eject"));
668*4882a593Smuzhiyun pd_send_command(disk, 0, 0, 0, 0, 0, IDE_EJECT);
669*4882a593Smuzhiyun pd_wait_for(disk, 0, DBMSG("after eject"));
670*4882a593Smuzhiyun return Ok;
671*4882a593Smuzhiyun }
672*4882a593Smuzhiyun
pd_media_check(struct pd_unit * disk)673*4882a593Smuzhiyun static enum action pd_media_check(struct pd_unit *disk)
674*4882a593Smuzhiyun {
675*4882a593Smuzhiyun int r = pd_wait_for(disk, STAT_READY, DBMSG("before media_check"));
676*4882a593Smuzhiyun if (!(r & STAT_ERR)) {
677*4882a593Smuzhiyun pd_send_command(disk, 1, 1, 0, 0, 0, IDE_READ_VRFY);
678*4882a593Smuzhiyun r = pd_wait_for(disk, STAT_READY, DBMSG("RDY after READ_VRFY"));
679*4882a593Smuzhiyun } else
680*4882a593Smuzhiyun disk->changed = 1; /* say changed if other error */
681*4882a593Smuzhiyun if (r & ERR_MC) {
682*4882a593Smuzhiyun disk->changed = 1;
683*4882a593Smuzhiyun pd_send_command(disk, 1, 0, 0, 0, 0, IDE_ACKCHANGE);
684*4882a593Smuzhiyun pd_wait_for(disk, STAT_READY, DBMSG("RDY after ACKCHANGE"));
685*4882a593Smuzhiyun pd_send_command(disk, 1, 1, 0, 0, 0, IDE_READ_VRFY);
686*4882a593Smuzhiyun r = pd_wait_for(disk, STAT_READY, DBMSG("RDY after VRFY"));
687*4882a593Smuzhiyun }
688*4882a593Smuzhiyun return Ok;
689*4882a593Smuzhiyun }
690*4882a593Smuzhiyun
pd_standby_off(struct pd_unit * disk)691*4882a593Smuzhiyun static void pd_standby_off(struct pd_unit *disk)
692*4882a593Smuzhiyun {
693*4882a593Smuzhiyun pd_wait_for(disk, 0, DBMSG("before STANDBY"));
694*4882a593Smuzhiyun pd_send_command(disk, 0, 0, 0, 0, 0, IDE_STANDBY);
695*4882a593Smuzhiyun pd_wait_for(disk, 0, DBMSG("after STANDBY"));
696*4882a593Smuzhiyun }
697*4882a593Smuzhiyun
pd_identify(struct pd_unit * disk)698*4882a593Smuzhiyun static enum action pd_identify(struct pd_unit *disk)
699*4882a593Smuzhiyun {
700*4882a593Smuzhiyun int j;
701*4882a593Smuzhiyun char id[PD_ID_LEN + 1];
702*4882a593Smuzhiyun
703*4882a593Smuzhiyun /* WARNING: here there may be dragons. reset() applies to both drives,
704*4882a593Smuzhiyun but we call it only on probing the MASTER. This should allow most
705*4882a593Smuzhiyun common configurations to work, but be warned that a reset can clear
706*4882a593Smuzhiyun settings on the SLAVE drive.
707*4882a593Smuzhiyun */
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun if (disk->drive == 0)
710*4882a593Smuzhiyun pd_reset(disk);
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun write_reg(disk, 6, DRIVE(disk));
713*4882a593Smuzhiyun pd_wait_for(disk, 0, DBMSG("before IDENT"));
714*4882a593Smuzhiyun pd_send_command(disk, 1, 0, 0, 0, 0, IDE_IDENTIFY);
715*4882a593Smuzhiyun
716*4882a593Smuzhiyun if (pd_wait_for(disk, STAT_DRQ, DBMSG("IDENT DRQ")) & STAT_ERR)
717*4882a593Smuzhiyun return Fail;
718*4882a593Smuzhiyun pi_read_block(disk->pi, pd_scratch, 512);
719*4882a593Smuzhiyun disk->can_lba = pd_scratch[99] & 2;
720*4882a593Smuzhiyun disk->sectors = le16_to_cpu(*(__le16 *) (pd_scratch + 12));
721*4882a593Smuzhiyun disk->heads = le16_to_cpu(*(__le16 *) (pd_scratch + 6));
722*4882a593Smuzhiyun disk->cylinders = le16_to_cpu(*(__le16 *) (pd_scratch + 2));
723*4882a593Smuzhiyun if (disk->can_lba)
724*4882a593Smuzhiyun disk->capacity = le32_to_cpu(*(__le32 *) (pd_scratch + 120));
725*4882a593Smuzhiyun else
726*4882a593Smuzhiyun disk->capacity = disk->sectors * disk->heads * disk->cylinders;
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun for (j = 0; j < PD_ID_LEN; j++)
729*4882a593Smuzhiyun id[j ^ 1] = pd_scratch[j + PD_ID_OFF];
730*4882a593Smuzhiyun j = PD_ID_LEN - 1;
731*4882a593Smuzhiyun while ((j >= 0) && (id[j] <= 0x20))
732*4882a593Smuzhiyun j--;
733*4882a593Smuzhiyun j++;
734*4882a593Smuzhiyun id[j] = 0;
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun disk->removable = pd_scratch[0] & 0x80;
737*4882a593Smuzhiyun
738*4882a593Smuzhiyun printk("%s: %s, %s, %d blocks [%dM], (%d/%d/%d), %s media\n",
739*4882a593Smuzhiyun disk->name, id,
740*4882a593Smuzhiyun disk->drive ? "slave" : "master",
741*4882a593Smuzhiyun disk->capacity, disk->capacity / 2048,
742*4882a593Smuzhiyun disk->cylinders, disk->heads, disk->sectors,
743*4882a593Smuzhiyun disk->removable ? "removable" : "fixed");
744*4882a593Smuzhiyun
745*4882a593Smuzhiyun if (disk->capacity)
746*4882a593Smuzhiyun pd_init_dev_parms(disk);
747*4882a593Smuzhiyun if (!disk->standby)
748*4882a593Smuzhiyun pd_standby_off(disk);
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun return Ok;
751*4882a593Smuzhiyun }
752*4882a593Smuzhiyun
753*4882a593Smuzhiyun /* end of io request engine */
754*4882a593Smuzhiyun
pd_queue_rq(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * bd)755*4882a593Smuzhiyun static blk_status_t pd_queue_rq(struct blk_mq_hw_ctx *hctx,
756*4882a593Smuzhiyun const struct blk_mq_queue_data *bd)
757*4882a593Smuzhiyun {
758*4882a593Smuzhiyun struct pd_unit *disk = hctx->queue->queuedata;
759*4882a593Smuzhiyun
760*4882a593Smuzhiyun spin_lock_irq(&pd_lock);
761*4882a593Smuzhiyun if (!pd_req) {
762*4882a593Smuzhiyun pd_req = bd->rq;
763*4882a593Smuzhiyun blk_mq_start_request(pd_req);
764*4882a593Smuzhiyun } else
765*4882a593Smuzhiyun list_add_tail(&bd->rq->queuelist, &disk->rq_list);
766*4882a593Smuzhiyun spin_unlock_irq(&pd_lock);
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun run_fsm();
769*4882a593Smuzhiyun return BLK_STS_OK;
770*4882a593Smuzhiyun }
771*4882a593Smuzhiyun
pd_special_command(struct pd_unit * disk,enum action (* func)(struct pd_unit * disk))772*4882a593Smuzhiyun static int pd_special_command(struct pd_unit *disk,
773*4882a593Smuzhiyun enum action (*func)(struct pd_unit *disk))
774*4882a593Smuzhiyun {
775*4882a593Smuzhiyun struct request *rq;
776*4882a593Smuzhiyun struct pd_req *req;
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun rq = blk_get_request(disk->gd->queue, REQ_OP_DRV_IN, 0);
779*4882a593Smuzhiyun if (IS_ERR(rq))
780*4882a593Smuzhiyun return PTR_ERR(rq);
781*4882a593Smuzhiyun req = blk_mq_rq_to_pdu(rq);
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun req->func = func;
784*4882a593Smuzhiyun blk_execute_rq(disk->gd->queue, disk->gd, rq, 0);
785*4882a593Smuzhiyun blk_put_request(rq);
786*4882a593Smuzhiyun return 0;
787*4882a593Smuzhiyun }
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun /* kernel glue structures */
790*4882a593Smuzhiyun
pd_open(struct block_device * bdev,fmode_t mode)791*4882a593Smuzhiyun static int pd_open(struct block_device *bdev, fmode_t mode)
792*4882a593Smuzhiyun {
793*4882a593Smuzhiyun struct pd_unit *disk = bdev->bd_disk->private_data;
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun mutex_lock(&pd_mutex);
796*4882a593Smuzhiyun disk->access++;
797*4882a593Smuzhiyun
798*4882a593Smuzhiyun if (disk->removable) {
799*4882a593Smuzhiyun pd_special_command(disk, pd_media_check);
800*4882a593Smuzhiyun pd_special_command(disk, pd_door_lock);
801*4882a593Smuzhiyun }
802*4882a593Smuzhiyun mutex_unlock(&pd_mutex);
803*4882a593Smuzhiyun return 0;
804*4882a593Smuzhiyun }
805*4882a593Smuzhiyun
pd_getgeo(struct block_device * bdev,struct hd_geometry * geo)806*4882a593Smuzhiyun static int pd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
807*4882a593Smuzhiyun {
808*4882a593Smuzhiyun struct pd_unit *disk = bdev->bd_disk->private_data;
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun if (disk->alt_geom) {
811*4882a593Smuzhiyun geo->heads = PD_LOG_HEADS;
812*4882a593Smuzhiyun geo->sectors = PD_LOG_SECTS;
813*4882a593Smuzhiyun geo->cylinders = disk->capacity / (geo->heads * geo->sectors);
814*4882a593Smuzhiyun } else {
815*4882a593Smuzhiyun geo->heads = disk->heads;
816*4882a593Smuzhiyun geo->sectors = disk->sectors;
817*4882a593Smuzhiyun geo->cylinders = disk->cylinders;
818*4882a593Smuzhiyun }
819*4882a593Smuzhiyun
820*4882a593Smuzhiyun return 0;
821*4882a593Smuzhiyun }
822*4882a593Smuzhiyun
pd_ioctl(struct block_device * bdev,fmode_t mode,unsigned int cmd,unsigned long arg)823*4882a593Smuzhiyun static int pd_ioctl(struct block_device *bdev, fmode_t mode,
824*4882a593Smuzhiyun unsigned int cmd, unsigned long arg)
825*4882a593Smuzhiyun {
826*4882a593Smuzhiyun struct pd_unit *disk = bdev->bd_disk->private_data;
827*4882a593Smuzhiyun
828*4882a593Smuzhiyun switch (cmd) {
829*4882a593Smuzhiyun case CDROMEJECT:
830*4882a593Smuzhiyun mutex_lock(&pd_mutex);
831*4882a593Smuzhiyun if (disk->access == 1)
832*4882a593Smuzhiyun pd_special_command(disk, pd_eject);
833*4882a593Smuzhiyun mutex_unlock(&pd_mutex);
834*4882a593Smuzhiyun return 0;
835*4882a593Smuzhiyun default:
836*4882a593Smuzhiyun return -EINVAL;
837*4882a593Smuzhiyun }
838*4882a593Smuzhiyun }
839*4882a593Smuzhiyun
pd_release(struct gendisk * p,fmode_t mode)840*4882a593Smuzhiyun static void pd_release(struct gendisk *p, fmode_t mode)
841*4882a593Smuzhiyun {
842*4882a593Smuzhiyun struct pd_unit *disk = p->private_data;
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun mutex_lock(&pd_mutex);
845*4882a593Smuzhiyun if (!--disk->access && disk->removable)
846*4882a593Smuzhiyun pd_special_command(disk, pd_door_unlock);
847*4882a593Smuzhiyun mutex_unlock(&pd_mutex);
848*4882a593Smuzhiyun }
849*4882a593Smuzhiyun
pd_check_events(struct gendisk * p,unsigned int clearing)850*4882a593Smuzhiyun static unsigned int pd_check_events(struct gendisk *p, unsigned int clearing)
851*4882a593Smuzhiyun {
852*4882a593Smuzhiyun struct pd_unit *disk = p->private_data;
853*4882a593Smuzhiyun int r;
854*4882a593Smuzhiyun if (!disk->removable)
855*4882a593Smuzhiyun return 0;
856*4882a593Smuzhiyun pd_special_command(disk, pd_media_check);
857*4882a593Smuzhiyun r = disk->changed;
858*4882a593Smuzhiyun disk->changed = 0;
859*4882a593Smuzhiyun return r ? DISK_EVENT_MEDIA_CHANGE : 0;
860*4882a593Smuzhiyun }
861*4882a593Smuzhiyun
pd_revalidate(struct gendisk * p)862*4882a593Smuzhiyun static int pd_revalidate(struct gendisk *p)
863*4882a593Smuzhiyun {
864*4882a593Smuzhiyun struct pd_unit *disk = p->private_data;
865*4882a593Smuzhiyun if (pd_special_command(disk, pd_identify) == 0)
866*4882a593Smuzhiyun set_capacity(p, disk->capacity);
867*4882a593Smuzhiyun else
868*4882a593Smuzhiyun set_capacity(p, 0);
869*4882a593Smuzhiyun return 0;
870*4882a593Smuzhiyun }
871*4882a593Smuzhiyun
872*4882a593Smuzhiyun static const struct block_device_operations pd_fops = {
873*4882a593Smuzhiyun .owner = THIS_MODULE,
874*4882a593Smuzhiyun .open = pd_open,
875*4882a593Smuzhiyun .release = pd_release,
876*4882a593Smuzhiyun .ioctl = pd_ioctl,
877*4882a593Smuzhiyun .compat_ioctl = pd_ioctl,
878*4882a593Smuzhiyun .getgeo = pd_getgeo,
879*4882a593Smuzhiyun .check_events = pd_check_events,
880*4882a593Smuzhiyun .revalidate_disk= pd_revalidate
881*4882a593Smuzhiyun };
882*4882a593Smuzhiyun
883*4882a593Smuzhiyun /* probing */
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun static const struct blk_mq_ops pd_mq_ops = {
886*4882a593Smuzhiyun .queue_rq = pd_queue_rq,
887*4882a593Smuzhiyun };
888*4882a593Smuzhiyun
pd_probe_drive(struct pd_unit * disk)889*4882a593Smuzhiyun static void pd_probe_drive(struct pd_unit *disk)
890*4882a593Smuzhiyun {
891*4882a593Smuzhiyun struct gendisk *p;
892*4882a593Smuzhiyun
893*4882a593Smuzhiyun p = alloc_disk(1 << PD_BITS);
894*4882a593Smuzhiyun if (!p)
895*4882a593Smuzhiyun return;
896*4882a593Smuzhiyun
897*4882a593Smuzhiyun strcpy(p->disk_name, disk->name);
898*4882a593Smuzhiyun p->fops = &pd_fops;
899*4882a593Smuzhiyun p->major = major;
900*4882a593Smuzhiyun p->first_minor = (disk - pd) << PD_BITS;
901*4882a593Smuzhiyun p->events = DISK_EVENT_MEDIA_CHANGE;
902*4882a593Smuzhiyun disk->gd = p;
903*4882a593Smuzhiyun p->private_data = disk;
904*4882a593Smuzhiyun
905*4882a593Smuzhiyun memset(&disk->tag_set, 0, sizeof(disk->tag_set));
906*4882a593Smuzhiyun disk->tag_set.ops = &pd_mq_ops;
907*4882a593Smuzhiyun disk->tag_set.cmd_size = sizeof(struct pd_req);
908*4882a593Smuzhiyun disk->tag_set.nr_hw_queues = 1;
909*4882a593Smuzhiyun disk->tag_set.nr_maps = 1;
910*4882a593Smuzhiyun disk->tag_set.queue_depth = 2;
911*4882a593Smuzhiyun disk->tag_set.numa_node = NUMA_NO_NODE;
912*4882a593Smuzhiyun disk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
913*4882a593Smuzhiyun
914*4882a593Smuzhiyun if (blk_mq_alloc_tag_set(&disk->tag_set))
915*4882a593Smuzhiyun return;
916*4882a593Smuzhiyun
917*4882a593Smuzhiyun p->queue = blk_mq_init_queue(&disk->tag_set);
918*4882a593Smuzhiyun if (IS_ERR(p->queue)) {
919*4882a593Smuzhiyun blk_mq_free_tag_set(&disk->tag_set);
920*4882a593Smuzhiyun p->queue = NULL;
921*4882a593Smuzhiyun return;
922*4882a593Smuzhiyun }
923*4882a593Smuzhiyun
924*4882a593Smuzhiyun p->queue->queuedata = disk;
925*4882a593Smuzhiyun blk_queue_max_hw_sectors(p->queue, cluster);
926*4882a593Smuzhiyun blk_queue_bounce_limit(p->queue, BLK_BOUNCE_HIGH);
927*4882a593Smuzhiyun
928*4882a593Smuzhiyun if (disk->drive == -1) {
929*4882a593Smuzhiyun for (disk->drive = 0; disk->drive <= 1; disk->drive++)
930*4882a593Smuzhiyun if (pd_special_command(disk, pd_identify) == 0)
931*4882a593Smuzhiyun return;
932*4882a593Smuzhiyun } else if (pd_special_command(disk, pd_identify) == 0)
933*4882a593Smuzhiyun return;
934*4882a593Smuzhiyun disk->gd = NULL;
935*4882a593Smuzhiyun put_disk(p);
936*4882a593Smuzhiyun }
937*4882a593Smuzhiyun
pd_detect(void)938*4882a593Smuzhiyun static int pd_detect(void)
939*4882a593Smuzhiyun {
940*4882a593Smuzhiyun int found = 0, unit, pd_drive_count = 0;
941*4882a593Smuzhiyun struct pd_unit *disk;
942*4882a593Smuzhiyun
943*4882a593Smuzhiyun for (unit = 0; unit < PD_UNITS; unit++) {
944*4882a593Smuzhiyun int *parm = *drives[unit];
945*4882a593Smuzhiyun struct pd_unit *disk = pd + unit;
946*4882a593Smuzhiyun disk->pi = &disk->pia;
947*4882a593Smuzhiyun disk->access = 0;
948*4882a593Smuzhiyun disk->changed = 1;
949*4882a593Smuzhiyun disk->capacity = 0;
950*4882a593Smuzhiyun disk->drive = parm[D_SLV];
951*4882a593Smuzhiyun snprintf(disk->name, PD_NAMELEN, "%s%c", name, 'a'+unit);
952*4882a593Smuzhiyun disk->alt_geom = parm[D_GEO];
953*4882a593Smuzhiyun disk->standby = parm[D_SBY];
954*4882a593Smuzhiyun if (parm[D_PRT])
955*4882a593Smuzhiyun pd_drive_count++;
956*4882a593Smuzhiyun INIT_LIST_HEAD(&disk->rq_list);
957*4882a593Smuzhiyun }
958*4882a593Smuzhiyun
959*4882a593Smuzhiyun par_drv = pi_register_driver(name);
960*4882a593Smuzhiyun if (!par_drv) {
961*4882a593Smuzhiyun pr_err("failed to register %s driver\n", name);
962*4882a593Smuzhiyun return -1;
963*4882a593Smuzhiyun }
964*4882a593Smuzhiyun
965*4882a593Smuzhiyun if (pd_drive_count == 0) { /* nothing spec'd - so autoprobe for 1 */
966*4882a593Smuzhiyun disk = pd;
967*4882a593Smuzhiyun if (pi_init(disk->pi, 1, -1, -1, -1, -1, -1, pd_scratch,
968*4882a593Smuzhiyun PI_PD, verbose, disk->name)) {
969*4882a593Smuzhiyun pd_probe_drive(disk);
970*4882a593Smuzhiyun if (!disk->gd)
971*4882a593Smuzhiyun pi_release(disk->pi);
972*4882a593Smuzhiyun }
973*4882a593Smuzhiyun
974*4882a593Smuzhiyun } else {
975*4882a593Smuzhiyun for (unit = 0, disk = pd; unit < PD_UNITS; unit++, disk++) {
976*4882a593Smuzhiyun int *parm = *drives[unit];
977*4882a593Smuzhiyun if (!parm[D_PRT])
978*4882a593Smuzhiyun continue;
979*4882a593Smuzhiyun if (pi_init(disk->pi, 0, parm[D_PRT], parm[D_MOD],
980*4882a593Smuzhiyun parm[D_UNI], parm[D_PRO], parm[D_DLY],
981*4882a593Smuzhiyun pd_scratch, PI_PD, verbose, disk->name)) {
982*4882a593Smuzhiyun pd_probe_drive(disk);
983*4882a593Smuzhiyun if (!disk->gd)
984*4882a593Smuzhiyun pi_release(disk->pi);
985*4882a593Smuzhiyun }
986*4882a593Smuzhiyun }
987*4882a593Smuzhiyun }
988*4882a593Smuzhiyun for (unit = 0, disk = pd; unit < PD_UNITS; unit++, disk++) {
989*4882a593Smuzhiyun if (disk->gd) {
990*4882a593Smuzhiyun set_capacity(disk->gd, disk->capacity);
991*4882a593Smuzhiyun add_disk(disk->gd);
992*4882a593Smuzhiyun found = 1;
993*4882a593Smuzhiyun }
994*4882a593Smuzhiyun }
995*4882a593Smuzhiyun if (!found) {
996*4882a593Smuzhiyun printk("%s: no valid drive found\n", name);
997*4882a593Smuzhiyun pi_unregister_driver(par_drv);
998*4882a593Smuzhiyun }
999*4882a593Smuzhiyun return found;
1000*4882a593Smuzhiyun }
1001*4882a593Smuzhiyun
pd_init(void)1002*4882a593Smuzhiyun static int __init pd_init(void)
1003*4882a593Smuzhiyun {
1004*4882a593Smuzhiyun if (disable)
1005*4882a593Smuzhiyun goto out1;
1006*4882a593Smuzhiyun
1007*4882a593Smuzhiyun if (register_blkdev(major, name))
1008*4882a593Smuzhiyun goto out1;
1009*4882a593Smuzhiyun
1010*4882a593Smuzhiyun printk("%s: %s version %s, major %d, cluster %d, nice %d\n",
1011*4882a593Smuzhiyun name, name, PD_VERSION, major, cluster, nice);
1012*4882a593Smuzhiyun if (!pd_detect())
1013*4882a593Smuzhiyun goto out2;
1014*4882a593Smuzhiyun
1015*4882a593Smuzhiyun return 0;
1016*4882a593Smuzhiyun
1017*4882a593Smuzhiyun out2:
1018*4882a593Smuzhiyun unregister_blkdev(major, name);
1019*4882a593Smuzhiyun out1:
1020*4882a593Smuzhiyun return -ENODEV;
1021*4882a593Smuzhiyun }
1022*4882a593Smuzhiyun
pd_exit(void)1023*4882a593Smuzhiyun static void __exit pd_exit(void)
1024*4882a593Smuzhiyun {
1025*4882a593Smuzhiyun struct pd_unit *disk;
1026*4882a593Smuzhiyun int unit;
1027*4882a593Smuzhiyun unregister_blkdev(major, name);
1028*4882a593Smuzhiyun for (unit = 0, disk = pd; unit < PD_UNITS; unit++, disk++) {
1029*4882a593Smuzhiyun struct gendisk *p = disk->gd;
1030*4882a593Smuzhiyun if (p) {
1031*4882a593Smuzhiyun disk->gd = NULL;
1032*4882a593Smuzhiyun del_gendisk(p);
1033*4882a593Smuzhiyun blk_cleanup_queue(p->queue);
1034*4882a593Smuzhiyun blk_mq_free_tag_set(&disk->tag_set);
1035*4882a593Smuzhiyun put_disk(p);
1036*4882a593Smuzhiyun pi_release(disk->pi);
1037*4882a593Smuzhiyun }
1038*4882a593Smuzhiyun }
1039*4882a593Smuzhiyun }
1040*4882a593Smuzhiyun
1041*4882a593Smuzhiyun MODULE_LICENSE("GPL");
1042*4882a593Smuzhiyun module_init(pd_init)
1043*4882a593Smuzhiyun module_exit(pd_exit)
1044