1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * IDE I/O functions
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Basic PIO and command management functionality.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * This code was split off from ide.c. See ide.c for history and original
7*4882a593Smuzhiyun * copyrights.
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * This program is free software; you can redistribute it and/or modify it
10*4882a593Smuzhiyun * under the terms of the GNU General Public License as published by the
11*4882a593Smuzhiyun * Free Software Foundation; either version 2, or (at your option) any
12*4882a593Smuzhiyun * later version.
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * This program is distributed in the hope that it will be useful, but
15*4882a593Smuzhiyun * WITHOUT ANY WARRANTY; without even the implied warranty of
16*4882a593Smuzhiyun * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17*4882a593Smuzhiyun * General Public License for more details.
18*4882a593Smuzhiyun *
19*4882a593Smuzhiyun * For the avoidance of doubt the "preferred form" of this code is one which
20*4882a593Smuzhiyun * is in an open non patent encumbered format. Where cryptographic key signing
21*4882a593Smuzhiyun * forms part of the process of creating an executable the information
22*4882a593Smuzhiyun * including keys needed to generate an equivalently functional executable
23*4882a593Smuzhiyun * are deemed to be part of the source code.
24*4882a593Smuzhiyun */
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun #include <linux/module.h>
28*4882a593Smuzhiyun #include <linux/types.h>
29*4882a593Smuzhiyun #include <linux/string.h>
30*4882a593Smuzhiyun #include <linux/kernel.h>
31*4882a593Smuzhiyun #include <linux/timer.h>
32*4882a593Smuzhiyun #include <linux/mm.h>
33*4882a593Smuzhiyun #include <linux/interrupt.h>
34*4882a593Smuzhiyun #include <linux/major.h>
35*4882a593Smuzhiyun #include <linux/errno.h>
36*4882a593Smuzhiyun #include <linux/genhd.h>
37*4882a593Smuzhiyun #include <linux/blkpg.h>
38*4882a593Smuzhiyun #include <linux/slab.h>
39*4882a593Smuzhiyun #include <linux/init.h>
40*4882a593Smuzhiyun #include <linux/pci.h>
41*4882a593Smuzhiyun #include <linux/delay.h>
42*4882a593Smuzhiyun #include <linux/ide.h>
43*4882a593Smuzhiyun #include <linux/completion.h>
44*4882a593Smuzhiyun #include <linux/reboot.h>
45*4882a593Smuzhiyun #include <linux/cdrom.h>
46*4882a593Smuzhiyun #include <linux/seq_file.h>
47*4882a593Smuzhiyun #include <linux/device.h>
48*4882a593Smuzhiyun #include <linux/kmod.h>
49*4882a593Smuzhiyun #include <linux/scatterlist.h>
50*4882a593Smuzhiyun #include <linux/bitops.h>
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun #include <asm/byteorder.h>
53*4882a593Smuzhiyun #include <asm/irq.h>
54*4882a593Smuzhiyun #include <linux/uaccess.h>
55*4882a593Smuzhiyun #include <asm/io.h>
56*4882a593Smuzhiyun
ide_end_rq(ide_drive_t * drive,struct request * rq,blk_status_t error,unsigned int nr_bytes)57*4882a593Smuzhiyun int ide_end_rq(ide_drive_t *drive, struct request *rq, blk_status_t error,
58*4882a593Smuzhiyun unsigned int nr_bytes)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun /*
61*4882a593Smuzhiyun * decide whether to reenable DMA -- 3 is a random magic for now,
62*4882a593Smuzhiyun * if we DMA timeout more than 3 times, just stay in PIO
63*4882a593Smuzhiyun */
64*4882a593Smuzhiyun if ((drive->dev_flags & IDE_DFLAG_DMA_PIO_RETRY) &&
65*4882a593Smuzhiyun drive->retry_pio <= 3) {
66*4882a593Smuzhiyun drive->dev_flags &= ~IDE_DFLAG_DMA_PIO_RETRY;
67*4882a593Smuzhiyun ide_dma_on(drive);
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun if (!blk_update_request(rq, error, nr_bytes)) {
71*4882a593Smuzhiyun if (rq == drive->sense_rq) {
72*4882a593Smuzhiyun drive->sense_rq = NULL;
73*4882a593Smuzhiyun drive->sense_rq_active = false;
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun __blk_mq_end_request(rq, error);
77*4882a593Smuzhiyun return 0;
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun return 1;
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ide_end_rq);
83*4882a593Smuzhiyun
ide_complete_cmd(ide_drive_t * drive,struct ide_cmd * cmd,u8 stat,u8 err)84*4882a593Smuzhiyun void ide_complete_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat, u8 err)
85*4882a593Smuzhiyun {
86*4882a593Smuzhiyun const struct ide_tp_ops *tp_ops = drive->hwif->tp_ops;
87*4882a593Smuzhiyun struct ide_taskfile *tf = &cmd->tf;
88*4882a593Smuzhiyun struct request *rq = cmd->rq;
89*4882a593Smuzhiyun u8 tf_cmd = tf->command;
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun tf->error = err;
92*4882a593Smuzhiyun tf->status = stat;
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun if (cmd->ftf_flags & IDE_FTFLAG_IN_DATA) {
95*4882a593Smuzhiyun u8 data[2];
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun tp_ops->input_data(drive, cmd, data, 2);
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun cmd->tf.data = data[0];
100*4882a593Smuzhiyun cmd->hob.data = data[1];
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun ide_tf_readback(drive, cmd);
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun if ((cmd->tf_flags & IDE_TFLAG_CUSTOM_HANDLER) &&
106*4882a593Smuzhiyun tf_cmd == ATA_CMD_IDLEIMMEDIATE) {
107*4882a593Smuzhiyun if (tf->lbal != 0xc4) {
108*4882a593Smuzhiyun printk(KERN_ERR "%s: head unload failed!\n",
109*4882a593Smuzhiyun drive->name);
110*4882a593Smuzhiyun ide_tf_dump(drive->name, cmd);
111*4882a593Smuzhiyun } else
112*4882a593Smuzhiyun drive->dev_flags |= IDE_DFLAG_PARKED;
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun if (rq && ata_taskfile_request(rq)) {
116*4882a593Smuzhiyun struct ide_cmd *orig_cmd = ide_req(rq)->special;
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun if (cmd->tf_flags & IDE_TFLAG_DYN)
119*4882a593Smuzhiyun kfree(orig_cmd);
120*4882a593Smuzhiyun else if (cmd != orig_cmd)
121*4882a593Smuzhiyun memcpy(orig_cmd, cmd, sizeof(*cmd));
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun
ide_complete_rq(ide_drive_t * drive,blk_status_t error,unsigned int nr_bytes)125*4882a593Smuzhiyun int ide_complete_rq(ide_drive_t *drive, blk_status_t error, unsigned int nr_bytes)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun ide_hwif_t *hwif = drive->hwif;
128*4882a593Smuzhiyun struct request *rq = hwif->rq;
129*4882a593Smuzhiyun int rc;
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun /*
132*4882a593Smuzhiyun * if failfast is set on a request, override number of sectors
133*4882a593Smuzhiyun * and complete the whole request right now
134*4882a593Smuzhiyun */
135*4882a593Smuzhiyun if (blk_noretry_request(rq) && error)
136*4882a593Smuzhiyun nr_bytes = blk_rq_sectors(rq) << 9;
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun rc = ide_end_rq(drive, rq, error, nr_bytes);
139*4882a593Smuzhiyun if (rc == 0)
140*4882a593Smuzhiyun hwif->rq = NULL;
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun return rc;
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun EXPORT_SYMBOL(ide_complete_rq);
145*4882a593Smuzhiyun
ide_kill_rq(ide_drive_t * drive,struct request * rq)146*4882a593Smuzhiyun void ide_kill_rq(ide_drive_t *drive, struct request *rq)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun u8 drv_req = ata_misc_request(rq) && rq->rq_disk;
149*4882a593Smuzhiyun u8 media = drive->media;
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun drive->failed_pc = NULL;
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun if ((media == ide_floppy || media == ide_tape) && drv_req) {
154*4882a593Smuzhiyun scsi_req(rq)->result = 0;
155*4882a593Smuzhiyun } else {
156*4882a593Smuzhiyun if (media == ide_tape)
157*4882a593Smuzhiyun scsi_req(rq)->result = IDE_DRV_ERROR_GENERAL;
158*4882a593Smuzhiyun else if (blk_rq_is_passthrough(rq) && scsi_req(rq)->result == 0)
159*4882a593Smuzhiyun scsi_req(rq)->result = -EIO;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun ide_complete_rq(drive, BLK_STS_IOERR, blk_rq_bytes(rq));
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
ide_tf_set_specify_cmd(ide_drive_t * drive,struct ide_taskfile * tf)165*4882a593Smuzhiyun static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun tf->nsect = drive->sect;
168*4882a593Smuzhiyun tf->lbal = drive->sect;
169*4882a593Smuzhiyun tf->lbam = drive->cyl;
170*4882a593Smuzhiyun tf->lbah = drive->cyl >> 8;
171*4882a593Smuzhiyun tf->device = (drive->head - 1) | drive->select;
172*4882a593Smuzhiyun tf->command = ATA_CMD_INIT_DEV_PARAMS;
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun
ide_tf_set_restore_cmd(ide_drive_t * drive,struct ide_taskfile * tf)175*4882a593Smuzhiyun static void ide_tf_set_restore_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun tf->nsect = drive->sect;
178*4882a593Smuzhiyun tf->command = ATA_CMD_RESTORE;
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun
ide_tf_set_setmult_cmd(ide_drive_t * drive,struct ide_taskfile * tf)181*4882a593Smuzhiyun static void ide_tf_set_setmult_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun tf->nsect = drive->mult_req;
184*4882a593Smuzhiyun tf->command = ATA_CMD_SET_MULTI;
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun /**
188*4882a593Smuzhiyun * do_special - issue some special commands
189*4882a593Smuzhiyun * @drive: drive the command is for
190*4882a593Smuzhiyun *
191*4882a593Smuzhiyun * do_special() is used to issue ATA_CMD_INIT_DEV_PARAMS,
192*4882a593Smuzhiyun * ATA_CMD_RESTORE and ATA_CMD_SET_MULTI commands to a drive.
193*4882a593Smuzhiyun */
194*4882a593Smuzhiyun
do_special(ide_drive_t * drive)195*4882a593Smuzhiyun static ide_startstop_t do_special(ide_drive_t *drive)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun struct ide_cmd cmd;
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun #ifdef DEBUG
200*4882a593Smuzhiyun printk(KERN_DEBUG "%s: %s: 0x%02x\n", drive->name, __func__,
201*4882a593Smuzhiyun drive->special_flags);
202*4882a593Smuzhiyun #endif
203*4882a593Smuzhiyun if (drive->media != ide_disk) {
204*4882a593Smuzhiyun drive->special_flags = 0;
205*4882a593Smuzhiyun drive->mult_req = 0;
206*4882a593Smuzhiyun return ide_stopped;
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun memset(&cmd, 0, sizeof(cmd));
210*4882a593Smuzhiyun cmd.protocol = ATA_PROT_NODATA;
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun if (drive->special_flags & IDE_SFLAG_SET_GEOMETRY) {
213*4882a593Smuzhiyun drive->special_flags &= ~IDE_SFLAG_SET_GEOMETRY;
214*4882a593Smuzhiyun ide_tf_set_specify_cmd(drive, &cmd.tf);
215*4882a593Smuzhiyun } else if (drive->special_flags & IDE_SFLAG_RECALIBRATE) {
216*4882a593Smuzhiyun drive->special_flags &= ~IDE_SFLAG_RECALIBRATE;
217*4882a593Smuzhiyun ide_tf_set_restore_cmd(drive, &cmd.tf);
218*4882a593Smuzhiyun } else if (drive->special_flags & IDE_SFLAG_SET_MULTMODE) {
219*4882a593Smuzhiyun drive->special_flags &= ~IDE_SFLAG_SET_MULTMODE;
220*4882a593Smuzhiyun ide_tf_set_setmult_cmd(drive, &cmd.tf);
221*4882a593Smuzhiyun } else
222*4882a593Smuzhiyun BUG();
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
225*4882a593Smuzhiyun cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
226*4882a593Smuzhiyun cmd.tf_flags = IDE_TFLAG_CUSTOM_HANDLER;
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun do_rw_taskfile(drive, &cmd);
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun return ide_started;
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun
ide_map_sg(ide_drive_t * drive,struct ide_cmd * cmd)233*4882a593Smuzhiyun void ide_map_sg(ide_drive_t *drive, struct ide_cmd *cmd)
234*4882a593Smuzhiyun {
235*4882a593Smuzhiyun ide_hwif_t *hwif = drive->hwif;
236*4882a593Smuzhiyun struct scatterlist *sg = hwif->sg_table, *last_sg = NULL;
237*4882a593Smuzhiyun struct request *rq = cmd->rq;
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun cmd->sg_nents = __blk_rq_map_sg(drive->queue, rq, sg, &last_sg);
240*4882a593Smuzhiyun if (blk_rq_bytes(rq) && (blk_rq_bytes(rq) & rq->q->dma_pad_mask))
241*4882a593Smuzhiyun last_sg->length +=
242*4882a593Smuzhiyun (rq->q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ide_map_sg);
245*4882a593Smuzhiyun
ide_init_sg_cmd(struct ide_cmd * cmd,unsigned int nr_bytes)246*4882a593Smuzhiyun void ide_init_sg_cmd(struct ide_cmd *cmd, unsigned int nr_bytes)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun cmd->nbytes = cmd->nleft = nr_bytes;
249*4882a593Smuzhiyun cmd->cursg_ofs = 0;
250*4882a593Smuzhiyun cmd->cursg = NULL;
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ide_init_sg_cmd);
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun /**
255*4882a593Smuzhiyun * execute_drive_command - issue special drive command
256*4882a593Smuzhiyun * @drive: the drive to issue the command on
257*4882a593Smuzhiyun * @rq: the request structure holding the command
258*4882a593Smuzhiyun *
259*4882a593Smuzhiyun * execute_drive_cmd() issues a special drive command, usually
260*4882a593Smuzhiyun * initiated by ioctl() from the external hdparm program. The
261*4882a593Smuzhiyun * command can be a drive command, drive task or taskfile
262*4882a593Smuzhiyun * operation. Weirdly you can call it with NULL to wait for
263*4882a593Smuzhiyun * all commands to finish. Don't do this as that is due to change
264*4882a593Smuzhiyun */
265*4882a593Smuzhiyun
execute_drive_cmd(ide_drive_t * drive,struct request * rq)266*4882a593Smuzhiyun static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
267*4882a593Smuzhiyun struct request *rq)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun struct ide_cmd *cmd = ide_req(rq)->special;
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun if (cmd) {
272*4882a593Smuzhiyun if (cmd->protocol == ATA_PROT_PIO) {
273*4882a593Smuzhiyun ide_init_sg_cmd(cmd, blk_rq_sectors(rq) << 9);
274*4882a593Smuzhiyun ide_map_sg(drive, cmd);
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun return do_rw_taskfile(drive, cmd);
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun /*
281*4882a593Smuzhiyun * NULL is actually a valid way of waiting for
282*4882a593Smuzhiyun * all current requests to be flushed from the queue.
283*4882a593Smuzhiyun */
284*4882a593Smuzhiyun #ifdef DEBUG
285*4882a593Smuzhiyun printk("%s: DRIVE_CMD (null)\n", drive->name);
286*4882a593Smuzhiyun #endif
287*4882a593Smuzhiyun scsi_req(rq)->result = 0;
288*4882a593Smuzhiyun ide_complete_rq(drive, BLK_STS_OK, blk_rq_bytes(rq));
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun return ide_stopped;
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun
ide_special_rq(ide_drive_t * drive,struct request * rq)293*4882a593Smuzhiyun static ide_startstop_t ide_special_rq(ide_drive_t *drive, struct request *rq)
294*4882a593Smuzhiyun {
295*4882a593Smuzhiyun u8 cmd = scsi_req(rq)->cmd[0];
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun switch (cmd) {
298*4882a593Smuzhiyun case REQ_PARK_HEADS:
299*4882a593Smuzhiyun case REQ_UNPARK_HEADS:
300*4882a593Smuzhiyun return ide_do_park_unpark(drive, rq);
301*4882a593Smuzhiyun case REQ_DEVSET_EXEC:
302*4882a593Smuzhiyun return ide_do_devset(drive, rq);
303*4882a593Smuzhiyun case REQ_DRIVE_RESET:
304*4882a593Smuzhiyun return ide_do_reset(drive);
305*4882a593Smuzhiyun default:
306*4882a593Smuzhiyun BUG();
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun /**
311*4882a593Smuzhiyun * start_request - start of I/O and command issuing for IDE
312*4882a593Smuzhiyun *
313*4882a593Smuzhiyun * start_request() initiates handling of a new I/O request. It
314*4882a593Smuzhiyun * accepts commands and I/O (read/write) requests.
315*4882a593Smuzhiyun *
316*4882a593Smuzhiyun * FIXME: this function needs a rename
317*4882a593Smuzhiyun */
318*4882a593Smuzhiyun
start_request(ide_drive_t * drive,struct request * rq)319*4882a593Smuzhiyun static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun ide_startstop_t startstop;
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun #ifdef DEBUG
324*4882a593Smuzhiyun printk("%s: start_request: current=0x%08lx\n",
325*4882a593Smuzhiyun drive->hwif->name, (unsigned long) rq);
326*4882a593Smuzhiyun #endif
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun /* bail early if we've exceeded max_failures */
329*4882a593Smuzhiyun if (drive->max_failures && (drive->failures > drive->max_failures)) {
330*4882a593Smuzhiyun rq->rq_flags |= RQF_FAILED;
331*4882a593Smuzhiyun goto kill_rq;
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun if (drive->prep_rq && !drive->prep_rq(drive, rq))
335*4882a593Smuzhiyun return ide_stopped;
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun if (ata_pm_request(rq))
338*4882a593Smuzhiyun ide_check_pm_state(drive, rq);
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun drive->hwif->tp_ops->dev_select(drive);
341*4882a593Smuzhiyun if (ide_wait_stat(&startstop, drive, drive->ready_stat,
342*4882a593Smuzhiyun ATA_BUSY | ATA_DRQ, WAIT_READY)) {
343*4882a593Smuzhiyun printk(KERN_ERR "%s: drive not ready for command\n", drive->name);
344*4882a593Smuzhiyun return startstop;
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun if (drive->special_flags == 0) {
348*4882a593Smuzhiyun struct ide_driver *drv;
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun /*
351*4882a593Smuzhiyun * We reset the drive so we need to issue a SETFEATURES.
352*4882a593Smuzhiyun * Do it _after_ do_special() restored device parameters.
353*4882a593Smuzhiyun */
354*4882a593Smuzhiyun if (drive->current_speed == 0xff)
355*4882a593Smuzhiyun ide_config_drive_speed(drive, drive->desired_speed);
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun if (ata_taskfile_request(rq))
358*4882a593Smuzhiyun return execute_drive_cmd(drive, rq);
359*4882a593Smuzhiyun else if (ata_pm_request(rq)) {
360*4882a593Smuzhiyun struct ide_pm_state *pm = ide_req(rq)->special;
361*4882a593Smuzhiyun #ifdef DEBUG_PM
362*4882a593Smuzhiyun printk("%s: start_power_step(step: %d)\n",
363*4882a593Smuzhiyun drive->name, pm->pm_step);
364*4882a593Smuzhiyun #endif
365*4882a593Smuzhiyun startstop = ide_start_power_step(drive, rq);
366*4882a593Smuzhiyun if (startstop == ide_stopped &&
367*4882a593Smuzhiyun pm->pm_step == IDE_PM_COMPLETED)
368*4882a593Smuzhiyun ide_complete_pm_rq(drive, rq);
369*4882a593Smuzhiyun return startstop;
370*4882a593Smuzhiyun } else if (!rq->rq_disk && ata_misc_request(rq))
371*4882a593Smuzhiyun /*
372*4882a593Smuzhiyun * TODO: Once all ULDs have been modified to
373*4882a593Smuzhiyun * check for specific op codes rather than
374*4882a593Smuzhiyun * blindly accepting any special request, the
375*4882a593Smuzhiyun * check for ->rq_disk above may be replaced
376*4882a593Smuzhiyun * by a more suitable mechanism or even
377*4882a593Smuzhiyun * dropped entirely.
378*4882a593Smuzhiyun */
379*4882a593Smuzhiyun return ide_special_rq(drive, rq);
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun drv = *(struct ide_driver **)rq->rq_disk->private_data;
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun return drv->do_request(drive, rq, blk_rq_pos(rq));
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun return do_special(drive);
386*4882a593Smuzhiyun kill_rq:
387*4882a593Smuzhiyun ide_kill_rq(drive, rq);
388*4882a593Smuzhiyun return ide_stopped;
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun /**
392*4882a593Smuzhiyun * ide_stall_queue - pause an IDE device
393*4882a593Smuzhiyun * @drive: drive to stall
394*4882a593Smuzhiyun * @timeout: time to stall for (jiffies)
395*4882a593Smuzhiyun *
396*4882a593Smuzhiyun * ide_stall_queue() can be used by a drive to give excess bandwidth back
397*4882a593Smuzhiyun * to the port by sleeping for timeout jiffies.
398*4882a593Smuzhiyun */
399*4882a593Smuzhiyun
ide_stall_queue(ide_drive_t * drive,unsigned long timeout)400*4882a593Smuzhiyun void ide_stall_queue (ide_drive_t *drive, unsigned long timeout)
401*4882a593Smuzhiyun {
402*4882a593Smuzhiyun if (timeout > WAIT_WORSTCASE)
403*4882a593Smuzhiyun timeout = WAIT_WORSTCASE;
404*4882a593Smuzhiyun drive->sleep = timeout + jiffies;
405*4882a593Smuzhiyun drive->dev_flags |= IDE_DFLAG_SLEEPING;
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun EXPORT_SYMBOL(ide_stall_queue);
408*4882a593Smuzhiyun
ide_lock_port(ide_hwif_t * hwif)409*4882a593Smuzhiyun static inline int ide_lock_port(ide_hwif_t *hwif)
410*4882a593Smuzhiyun {
411*4882a593Smuzhiyun if (hwif->busy)
412*4882a593Smuzhiyun return 1;
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun hwif->busy = 1;
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun return 0;
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun
ide_unlock_port(ide_hwif_t * hwif)419*4882a593Smuzhiyun static inline void ide_unlock_port(ide_hwif_t *hwif)
420*4882a593Smuzhiyun {
421*4882a593Smuzhiyun hwif->busy = 0;
422*4882a593Smuzhiyun }
423*4882a593Smuzhiyun
ide_lock_host(struct ide_host * host,ide_hwif_t * hwif)424*4882a593Smuzhiyun static inline int ide_lock_host(struct ide_host *host, ide_hwif_t *hwif)
425*4882a593Smuzhiyun {
426*4882a593Smuzhiyun int rc = 0;
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun if (host->host_flags & IDE_HFLAG_SERIALIZE) {
429*4882a593Smuzhiyun rc = test_and_set_bit_lock(IDE_HOST_BUSY, &host->host_busy);
430*4882a593Smuzhiyun if (rc == 0) {
431*4882a593Smuzhiyun if (host->get_lock)
432*4882a593Smuzhiyun host->get_lock(ide_intr, hwif);
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun return rc;
436*4882a593Smuzhiyun }
437*4882a593Smuzhiyun
ide_unlock_host(struct ide_host * host)438*4882a593Smuzhiyun static inline void ide_unlock_host(struct ide_host *host)
439*4882a593Smuzhiyun {
440*4882a593Smuzhiyun if (host->host_flags & IDE_HFLAG_SERIALIZE) {
441*4882a593Smuzhiyun if (host->release_lock)
442*4882a593Smuzhiyun host->release_lock();
443*4882a593Smuzhiyun clear_bit_unlock(IDE_HOST_BUSY, &host->host_busy);
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun
ide_requeue_and_plug(ide_drive_t * drive,struct request * rq)447*4882a593Smuzhiyun void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
448*4882a593Smuzhiyun {
449*4882a593Smuzhiyun struct request_queue *q = drive->queue;
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun /* Use 3ms as that was the old plug delay */
452*4882a593Smuzhiyun if (rq) {
453*4882a593Smuzhiyun blk_mq_requeue_request(rq, false);
454*4882a593Smuzhiyun blk_mq_delay_kick_requeue_list(q, 3);
455*4882a593Smuzhiyun } else
456*4882a593Smuzhiyun blk_mq_delay_run_hw_queue(q->queue_hw_ctx[0], 3);
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun
ide_issue_rq(ide_drive_t * drive,struct request * rq,bool local_requeue)459*4882a593Smuzhiyun blk_status_t ide_issue_rq(ide_drive_t *drive, struct request *rq,
460*4882a593Smuzhiyun bool local_requeue)
461*4882a593Smuzhiyun {
462*4882a593Smuzhiyun ide_hwif_t *hwif = drive->hwif;
463*4882a593Smuzhiyun struct ide_host *host = hwif->host;
464*4882a593Smuzhiyun ide_startstop_t startstop;
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun if (!blk_rq_is_passthrough(rq) && !(rq->rq_flags & RQF_DONTPREP)) {
467*4882a593Smuzhiyun rq->rq_flags |= RQF_DONTPREP;
468*4882a593Smuzhiyun ide_req(rq)->special = NULL;
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun /* HLD do_request() callback might sleep, make sure it's okay */
472*4882a593Smuzhiyun might_sleep();
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun if (ide_lock_host(host, hwif))
475*4882a593Smuzhiyun return BLK_STS_DEV_RESOURCE;
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun spin_lock_irq(&hwif->lock);
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun if (!ide_lock_port(hwif)) {
480*4882a593Smuzhiyun ide_hwif_t *prev_port;
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun WARN_ON_ONCE(hwif->rq);
483*4882a593Smuzhiyun repeat:
484*4882a593Smuzhiyun prev_port = hwif->host->cur_port;
485*4882a593Smuzhiyun if (drive->dev_flags & IDE_DFLAG_SLEEPING &&
486*4882a593Smuzhiyun time_after(drive->sleep, jiffies)) {
487*4882a593Smuzhiyun ide_unlock_port(hwif);
488*4882a593Smuzhiyun goto plug_device;
489*4882a593Smuzhiyun }
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun if ((hwif->host->host_flags & IDE_HFLAG_SERIALIZE) &&
492*4882a593Smuzhiyun hwif != prev_port) {
493*4882a593Smuzhiyun ide_drive_t *cur_dev =
494*4882a593Smuzhiyun prev_port ? prev_port->cur_dev : NULL;
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun /*
497*4882a593Smuzhiyun * set nIEN for previous port, drives in the
498*4882a593Smuzhiyun * quirk list may not like intr setups/cleanups
499*4882a593Smuzhiyun */
500*4882a593Smuzhiyun if (cur_dev &&
501*4882a593Smuzhiyun (cur_dev->dev_flags & IDE_DFLAG_NIEN_QUIRK) == 0)
502*4882a593Smuzhiyun prev_port->tp_ops->write_devctl(prev_port,
503*4882a593Smuzhiyun ATA_NIEN |
504*4882a593Smuzhiyun ATA_DEVCTL_OBS);
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun hwif->host->cur_port = hwif;
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun hwif->cur_dev = drive;
509*4882a593Smuzhiyun drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED);
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun /*
512*4882a593Smuzhiyun * Sanity: don't accept a request that isn't a PM request
513*4882a593Smuzhiyun * if we are currently power managed. This is very important as
514*4882a593Smuzhiyun * blk_stop_queue() doesn't prevent the blk_fetch_request()
515*4882a593Smuzhiyun * above to return us whatever is in the queue. Since we call
516*4882a593Smuzhiyun * ide_do_request() ourselves, we end up taking requests while
517*4882a593Smuzhiyun * the queue is blocked...
518*4882a593Smuzhiyun */
519*4882a593Smuzhiyun if ((drive->dev_flags & IDE_DFLAG_BLOCKED) &&
520*4882a593Smuzhiyun ata_pm_request(rq) == 0 &&
521*4882a593Smuzhiyun (rq->rq_flags & RQF_PM) == 0) {
522*4882a593Smuzhiyun /* there should be no pending command at this point */
523*4882a593Smuzhiyun ide_unlock_port(hwif);
524*4882a593Smuzhiyun goto plug_device;
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun scsi_req(rq)->resid_len = blk_rq_bytes(rq);
528*4882a593Smuzhiyun hwif->rq = rq;
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun spin_unlock_irq(&hwif->lock);
531*4882a593Smuzhiyun startstop = start_request(drive, rq);
532*4882a593Smuzhiyun spin_lock_irq(&hwif->lock);
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun if (startstop == ide_stopped) {
535*4882a593Smuzhiyun rq = hwif->rq;
536*4882a593Smuzhiyun hwif->rq = NULL;
537*4882a593Smuzhiyun if (rq)
538*4882a593Smuzhiyun goto repeat;
539*4882a593Smuzhiyun ide_unlock_port(hwif);
540*4882a593Smuzhiyun goto out;
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun } else {
543*4882a593Smuzhiyun plug_device:
544*4882a593Smuzhiyun if (local_requeue)
545*4882a593Smuzhiyun list_add(&rq->queuelist, &drive->rq_list);
546*4882a593Smuzhiyun spin_unlock_irq(&hwif->lock);
547*4882a593Smuzhiyun ide_unlock_host(host);
548*4882a593Smuzhiyun if (!local_requeue)
549*4882a593Smuzhiyun ide_requeue_and_plug(drive, rq);
550*4882a593Smuzhiyun return BLK_STS_OK;
551*4882a593Smuzhiyun }
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun out:
554*4882a593Smuzhiyun spin_unlock_irq(&hwif->lock);
555*4882a593Smuzhiyun if (rq == NULL)
556*4882a593Smuzhiyun ide_unlock_host(host);
557*4882a593Smuzhiyun return BLK_STS_OK;
558*4882a593Smuzhiyun }
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun /*
561*4882a593Smuzhiyun * Issue a new request to a device.
562*4882a593Smuzhiyun */
ide_queue_rq(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * bd)563*4882a593Smuzhiyun blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx,
564*4882a593Smuzhiyun const struct blk_mq_queue_data *bd)
565*4882a593Smuzhiyun {
566*4882a593Smuzhiyun ide_drive_t *drive = hctx->queue->queuedata;
567*4882a593Smuzhiyun ide_hwif_t *hwif = drive->hwif;
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun spin_lock_irq(&hwif->lock);
570*4882a593Smuzhiyun if (drive->sense_rq_active) {
571*4882a593Smuzhiyun spin_unlock_irq(&hwif->lock);
572*4882a593Smuzhiyun return BLK_STS_DEV_RESOURCE;
573*4882a593Smuzhiyun }
574*4882a593Smuzhiyun spin_unlock_irq(&hwif->lock);
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun blk_mq_start_request(bd->rq);
577*4882a593Smuzhiyun return ide_issue_rq(drive, bd->rq, false);
578*4882a593Smuzhiyun }
579*4882a593Smuzhiyun
drive_is_ready(ide_drive_t * drive)580*4882a593Smuzhiyun static int drive_is_ready(ide_drive_t *drive)
581*4882a593Smuzhiyun {
582*4882a593Smuzhiyun ide_hwif_t *hwif = drive->hwif;
583*4882a593Smuzhiyun u8 stat = 0;
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun if (drive->waiting_for_dma)
586*4882a593Smuzhiyun return hwif->dma_ops->dma_test_irq(drive);
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun if (hwif->io_ports.ctl_addr &&
589*4882a593Smuzhiyun (hwif->host_flags & IDE_HFLAG_BROKEN_ALTSTATUS) == 0)
590*4882a593Smuzhiyun stat = hwif->tp_ops->read_altstatus(hwif);
591*4882a593Smuzhiyun else
592*4882a593Smuzhiyun /* Note: this may clear a pending IRQ!! */
593*4882a593Smuzhiyun stat = hwif->tp_ops->read_status(hwif);
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun if (stat & ATA_BUSY)
596*4882a593Smuzhiyun /* drive busy: definitely not interrupting */
597*4882a593Smuzhiyun return 0;
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun /* drive ready: *might* be interrupting */
600*4882a593Smuzhiyun return 1;
601*4882a593Smuzhiyun }
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun /**
604*4882a593Smuzhiyun * ide_timer_expiry - handle lack of an IDE interrupt
605*4882a593Smuzhiyun * @data: timer callback magic (hwif)
606*4882a593Smuzhiyun *
607*4882a593Smuzhiyun * An IDE command has timed out before the expected drive return
608*4882a593Smuzhiyun * occurred. At this point we attempt to clean up the current
609*4882a593Smuzhiyun * mess. If the current handler includes an expiry handler then
610*4882a593Smuzhiyun * we invoke the expiry handler, and providing it is happy the
611*4882a593Smuzhiyun * work is done. If that fails we apply generic recovery rules
612*4882a593Smuzhiyun * invoking the handler and checking the drive DMA status. We
613*4882a593Smuzhiyun * have an excessively incestuous relationship with the DMA
614*4882a593Smuzhiyun * logic that wants cleaning up.
615*4882a593Smuzhiyun */
616*4882a593Smuzhiyun
ide_timer_expiry(struct timer_list * t)617*4882a593Smuzhiyun void ide_timer_expiry (struct timer_list *t)
618*4882a593Smuzhiyun {
619*4882a593Smuzhiyun ide_hwif_t *hwif = from_timer(hwif, t, timer);
620*4882a593Smuzhiyun ide_drive_t *drive;
621*4882a593Smuzhiyun ide_handler_t *handler;
622*4882a593Smuzhiyun unsigned long flags;
623*4882a593Smuzhiyun int wait = -1;
624*4882a593Smuzhiyun int plug_device = 0;
625*4882a593Smuzhiyun struct request *rq_in_flight;
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun spin_lock_irqsave(&hwif->lock, flags);
628*4882a593Smuzhiyun
629*4882a593Smuzhiyun handler = hwif->handler;
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun if (handler == NULL || hwif->req_gen != hwif->req_gen_timer) {
632*4882a593Smuzhiyun /*
633*4882a593Smuzhiyun * Either a marginal timeout occurred
634*4882a593Smuzhiyun * (got the interrupt just as timer expired),
635*4882a593Smuzhiyun * or we were "sleeping" to give other devices a chance.
636*4882a593Smuzhiyun * Either way, we don't really want to complain about anything.
637*4882a593Smuzhiyun */
638*4882a593Smuzhiyun } else {
639*4882a593Smuzhiyun ide_expiry_t *expiry = hwif->expiry;
640*4882a593Smuzhiyun ide_startstop_t startstop = ide_stopped;
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun drive = hwif->cur_dev;
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun if (expiry) {
645*4882a593Smuzhiyun wait = expiry(drive);
646*4882a593Smuzhiyun if (wait > 0) { /* continue */
647*4882a593Smuzhiyun /* reset timer */
648*4882a593Smuzhiyun hwif->timer.expires = jiffies + wait;
649*4882a593Smuzhiyun hwif->req_gen_timer = hwif->req_gen;
650*4882a593Smuzhiyun add_timer(&hwif->timer);
651*4882a593Smuzhiyun spin_unlock_irqrestore(&hwif->lock, flags);
652*4882a593Smuzhiyun return;
653*4882a593Smuzhiyun }
654*4882a593Smuzhiyun }
655*4882a593Smuzhiyun hwif->handler = NULL;
656*4882a593Smuzhiyun hwif->expiry = NULL;
657*4882a593Smuzhiyun /*
658*4882a593Smuzhiyun * We need to simulate a real interrupt when invoking
659*4882a593Smuzhiyun * the handler() function, which means we need to
660*4882a593Smuzhiyun * globally mask the specific IRQ:
661*4882a593Smuzhiyun */
662*4882a593Smuzhiyun spin_unlock(&hwif->lock);
663*4882a593Smuzhiyun /* disable_irq_nosync ?? */
664*4882a593Smuzhiyun disable_irq(hwif->irq);
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun if (hwif->polling) {
667*4882a593Smuzhiyun startstop = handler(drive);
668*4882a593Smuzhiyun } else if (drive_is_ready(drive)) {
669*4882a593Smuzhiyun if (drive->waiting_for_dma)
670*4882a593Smuzhiyun hwif->dma_ops->dma_lost_irq(drive);
671*4882a593Smuzhiyun if (hwif->port_ops && hwif->port_ops->clear_irq)
672*4882a593Smuzhiyun hwif->port_ops->clear_irq(drive);
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun printk(KERN_WARNING "%s: lost interrupt\n",
675*4882a593Smuzhiyun drive->name);
676*4882a593Smuzhiyun startstop = handler(drive);
677*4882a593Smuzhiyun } else {
678*4882a593Smuzhiyun if (drive->waiting_for_dma)
679*4882a593Smuzhiyun startstop = ide_dma_timeout_retry(drive, wait);
680*4882a593Smuzhiyun else
681*4882a593Smuzhiyun startstop = ide_error(drive, "irq timeout",
682*4882a593Smuzhiyun hwif->tp_ops->read_status(hwif));
683*4882a593Smuzhiyun }
684*4882a593Smuzhiyun /* Disable interrupts again, `handler' might have enabled it */
685*4882a593Smuzhiyun spin_lock_irq(&hwif->lock);
686*4882a593Smuzhiyun enable_irq(hwif->irq);
687*4882a593Smuzhiyun if (startstop == ide_stopped && hwif->polling == 0) {
688*4882a593Smuzhiyun rq_in_flight = hwif->rq;
689*4882a593Smuzhiyun hwif->rq = NULL;
690*4882a593Smuzhiyun ide_unlock_port(hwif);
691*4882a593Smuzhiyun plug_device = 1;
692*4882a593Smuzhiyun }
693*4882a593Smuzhiyun }
694*4882a593Smuzhiyun spin_unlock_irqrestore(&hwif->lock, flags);
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun if (plug_device) {
697*4882a593Smuzhiyun ide_unlock_host(hwif->host);
698*4882a593Smuzhiyun ide_requeue_and_plug(drive, rq_in_flight);
699*4882a593Smuzhiyun }
700*4882a593Smuzhiyun }
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun /**
703*4882a593Smuzhiyun * unexpected_intr - handle an unexpected IDE interrupt
704*4882a593Smuzhiyun * @irq: interrupt line
705*4882a593Smuzhiyun * @hwif: port being processed
706*4882a593Smuzhiyun *
707*4882a593Smuzhiyun * There's nothing really useful we can do with an unexpected interrupt,
708*4882a593Smuzhiyun * other than reading the status register (to clear it), and logging it.
709*4882a593Smuzhiyun * There should be no way that an irq can happen before we're ready for it,
710*4882a593Smuzhiyun * so we needn't worry much about losing an "important" interrupt here.
711*4882a593Smuzhiyun *
712*4882a593Smuzhiyun * On laptops (and "green" PCs), an unexpected interrupt occurs whenever
713*4882a593Smuzhiyun * the drive enters "idle", "standby", or "sleep" mode, so if the status
714*4882a593Smuzhiyun * looks "good", we just ignore the interrupt completely.
715*4882a593Smuzhiyun *
716*4882a593Smuzhiyun * This routine assumes __cli() is in effect when called.
717*4882a593Smuzhiyun *
718*4882a593Smuzhiyun * If an unexpected interrupt happens on irq15 while we are handling irq14
719*4882a593Smuzhiyun * and if the two interfaces are "serialized" (CMD640), then it looks like
720*4882a593Smuzhiyun * we could screw up by interfering with a new request being set up for
721*4882a593Smuzhiyun * irq15.
722*4882a593Smuzhiyun *
723*4882a593Smuzhiyun * In reality, this is a non-issue. The new command is not sent unless
724*4882a593Smuzhiyun * the drive is ready to accept one, in which case we know the drive is
725*4882a593Smuzhiyun * not trying to interrupt us. And ide_set_handler() is always invoked
726*4882a593Smuzhiyun * before completing the issuance of any new drive command, so we will not
727*4882a593Smuzhiyun * be accidentally invoked as a result of any valid command completion
728*4882a593Smuzhiyun * interrupt.
729*4882a593Smuzhiyun */
730*4882a593Smuzhiyun
unexpected_intr(int irq,ide_hwif_t * hwif)731*4882a593Smuzhiyun static void unexpected_intr(int irq, ide_hwif_t *hwif)
732*4882a593Smuzhiyun {
733*4882a593Smuzhiyun u8 stat = hwif->tp_ops->read_status(hwif);
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun if (!OK_STAT(stat, ATA_DRDY, BAD_STAT)) {
736*4882a593Smuzhiyun /* Try to not flood the console with msgs */
737*4882a593Smuzhiyun static unsigned long last_msgtime, count;
738*4882a593Smuzhiyun ++count;
739*4882a593Smuzhiyun
740*4882a593Smuzhiyun if (time_after(jiffies, last_msgtime + HZ)) {
741*4882a593Smuzhiyun last_msgtime = jiffies;
742*4882a593Smuzhiyun printk(KERN_ERR "%s: unexpected interrupt, "
743*4882a593Smuzhiyun "status=0x%02x, count=%ld\n",
744*4882a593Smuzhiyun hwif->name, stat, count);
745*4882a593Smuzhiyun }
746*4882a593Smuzhiyun }
747*4882a593Smuzhiyun }
748*4882a593Smuzhiyun
749*4882a593Smuzhiyun /**
750*4882a593Smuzhiyun * ide_intr - default IDE interrupt handler
751*4882a593Smuzhiyun * @irq: interrupt number
752*4882a593Smuzhiyun * @dev_id: hwif
753*4882a593Smuzhiyun * @regs: unused weirdness from the kernel irq layer
754*4882a593Smuzhiyun *
755*4882a593Smuzhiyun * This is the default IRQ handler for the IDE layer. You should
756*4882a593Smuzhiyun * not need to override it. If you do be aware it is subtle in
757*4882a593Smuzhiyun * places
758*4882a593Smuzhiyun *
759*4882a593Smuzhiyun * hwif is the interface in the group currently performing
760*4882a593Smuzhiyun * a command. hwif->cur_dev is the drive and hwif->handler is
761*4882a593Smuzhiyun * the IRQ handler to call. As we issue a command the handlers
762*4882a593Smuzhiyun * step through multiple states, reassigning the handler to the
763*4882a593Smuzhiyun * next step in the process. Unlike a smart SCSI controller IDE
764*4882a593Smuzhiyun * expects the main processor to sequence the various transfer
765*4882a593Smuzhiyun * stages. We also manage a poll timer to catch up with most
766*4882a593Smuzhiyun * timeout situations. There are still a few where the handlers
767*4882a593Smuzhiyun * don't ever decide to give up.
768*4882a593Smuzhiyun *
769*4882a593Smuzhiyun * The handler eventually returns ide_stopped to indicate the
770*4882a593Smuzhiyun * request completed. At this point we issue the next request
771*4882a593Smuzhiyun * on the port and the process begins again.
772*4882a593Smuzhiyun */
773*4882a593Smuzhiyun
ide_intr(int irq,void * dev_id)774*4882a593Smuzhiyun irqreturn_t ide_intr (int irq, void *dev_id)
775*4882a593Smuzhiyun {
776*4882a593Smuzhiyun ide_hwif_t *hwif = (ide_hwif_t *)dev_id;
777*4882a593Smuzhiyun struct ide_host *host = hwif->host;
778*4882a593Smuzhiyun ide_drive_t *drive;
779*4882a593Smuzhiyun ide_handler_t *handler;
780*4882a593Smuzhiyun unsigned long flags;
781*4882a593Smuzhiyun ide_startstop_t startstop;
782*4882a593Smuzhiyun irqreturn_t irq_ret = IRQ_NONE;
783*4882a593Smuzhiyun int plug_device = 0;
784*4882a593Smuzhiyun struct request *rq_in_flight;
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun if (host->host_flags & IDE_HFLAG_SERIALIZE) {
787*4882a593Smuzhiyun if (hwif != host->cur_port)
788*4882a593Smuzhiyun goto out_early;
789*4882a593Smuzhiyun }
790*4882a593Smuzhiyun
791*4882a593Smuzhiyun spin_lock_irqsave(&hwif->lock, flags);
792*4882a593Smuzhiyun
793*4882a593Smuzhiyun if (hwif->port_ops && hwif->port_ops->test_irq &&
794*4882a593Smuzhiyun hwif->port_ops->test_irq(hwif) == 0)
795*4882a593Smuzhiyun goto out;
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun handler = hwif->handler;
798*4882a593Smuzhiyun
799*4882a593Smuzhiyun if (handler == NULL || hwif->polling) {
800*4882a593Smuzhiyun /*
801*4882a593Smuzhiyun * Not expecting an interrupt from this drive.
802*4882a593Smuzhiyun * That means this could be:
803*4882a593Smuzhiyun * (1) an interrupt from another PCI device
804*4882a593Smuzhiyun * sharing the same PCI INT# as us.
805*4882a593Smuzhiyun * or (2) a drive just entered sleep or standby mode,
806*4882a593Smuzhiyun * and is interrupting to let us know.
807*4882a593Smuzhiyun * or (3) a spurious interrupt of unknown origin.
808*4882a593Smuzhiyun *
809*4882a593Smuzhiyun * For PCI, we cannot tell the difference,
810*4882a593Smuzhiyun * so in that case we just ignore it and hope it goes away.
811*4882a593Smuzhiyun */
812*4882a593Smuzhiyun if ((host->irq_flags & IRQF_SHARED) == 0) {
813*4882a593Smuzhiyun /*
814*4882a593Smuzhiyun * Probably not a shared PCI interrupt,
815*4882a593Smuzhiyun * so we can safely try to do something about it:
816*4882a593Smuzhiyun */
817*4882a593Smuzhiyun unexpected_intr(irq, hwif);
818*4882a593Smuzhiyun } else {
819*4882a593Smuzhiyun /*
820*4882a593Smuzhiyun * Whack the status register, just in case
821*4882a593Smuzhiyun * we have a leftover pending IRQ.
822*4882a593Smuzhiyun */
823*4882a593Smuzhiyun (void)hwif->tp_ops->read_status(hwif);
824*4882a593Smuzhiyun }
825*4882a593Smuzhiyun goto out;
826*4882a593Smuzhiyun }
827*4882a593Smuzhiyun
828*4882a593Smuzhiyun drive = hwif->cur_dev;
829*4882a593Smuzhiyun
830*4882a593Smuzhiyun if (!drive_is_ready(drive))
831*4882a593Smuzhiyun /*
832*4882a593Smuzhiyun * This happens regularly when we share a PCI IRQ with
833*4882a593Smuzhiyun * another device. Unfortunately, it can also happen
834*4882a593Smuzhiyun * with some buggy drives that trigger the IRQ before
835*4882a593Smuzhiyun * their status register is up to date. Hopefully we have
836*4882a593Smuzhiyun * enough advance overhead that the latter isn't a problem.
837*4882a593Smuzhiyun */
838*4882a593Smuzhiyun goto out;
839*4882a593Smuzhiyun
840*4882a593Smuzhiyun hwif->handler = NULL;
841*4882a593Smuzhiyun hwif->expiry = NULL;
842*4882a593Smuzhiyun hwif->req_gen++;
843*4882a593Smuzhiyun del_timer(&hwif->timer);
844*4882a593Smuzhiyun spin_unlock(&hwif->lock);
845*4882a593Smuzhiyun
846*4882a593Smuzhiyun if (hwif->port_ops && hwif->port_ops->clear_irq)
847*4882a593Smuzhiyun hwif->port_ops->clear_irq(drive);
848*4882a593Smuzhiyun
849*4882a593Smuzhiyun if (drive->dev_flags & IDE_DFLAG_UNMASK)
850*4882a593Smuzhiyun local_irq_enable_in_hardirq();
851*4882a593Smuzhiyun
852*4882a593Smuzhiyun /* service this interrupt, may set handler for next interrupt */
853*4882a593Smuzhiyun startstop = handler(drive);
854*4882a593Smuzhiyun
855*4882a593Smuzhiyun spin_lock_irq(&hwif->lock);
856*4882a593Smuzhiyun /*
857*4882a593Smuzhiyun * Note that handler() may have set things up for another
858*4882a593Smuzhiyun * interrupt to occur soon, but it cannot happen until
859*4882a593Smuzhiyun * we exit from this routine, because it will be the
860*4882a593Smuzhiyun * same irq as is currently being serviced here, and Linux
861*4882a593Smuzhiyun * won't allow another of the same (on any CPU) until we return.
862*4882a593Smuzhiyun */
863*4882a593Smuzhiyun if (startstop == ide_stopped && hwif->polling == 0) {
864*4882a593Smuzhiyun BUG_ON(hwif->handler);
865*4882a593Smuzhiyun rq_in_flight = hwif->rq;
866*4882a593Smuzhiyun hwif->rq = NULL;
867*4882a593Smuzhiyun ide_unlock_port(hwif);
868*4882a593Smuzhiyun plug_device = 1;
869*4882a593Smuzhiyun }
870*4882a593Smuzhiyun irq_ret = IRQ_HANDLED;
871*4882a593Smuzhiyun out:
872*4882a593Smuzhiyun spin_unlock_irqrestore(&hwif->lock, flags);
873*4882a593Smuzhiyun out_early:
874*4882a593Smuzhiyun if (plug_device) {
875*4882a593Smuzhiyun ide_unlock_host(hwif->host);
876*4882a593Smuzhiyun ide_requeue_and_plug(drive, rq_in_flight);
877*4882a593Smuzhiyun }
878*4882a593Smuzhiyun
879*4882a593Smuzhiyun return irq_ret;
880*4882a593Smuzhiyun }
881*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ide_intr);
882*4882a593Smuzhiyun
ide_pad_transfer(ide_drive_t * drive,int write,int len)883*4882a593Smuzhiyun void ide_pad_transfer(ide_drive_t *drive, int write, int len)
884*4882a593Smuzhiyun {
885*4882a593Smuzhiyun ide_hwif_t *hwif = drive->hwif;
886*4882a593Smuzhiyun u8 buf[4] = { 0 };
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun while (len > 0) {
889*4882a593Smuzhiyun if (write)
890*4882a593Smuzhiyun hwif->tp_ops->output_data(drive, NULL, buf, min(4, len));
891*4882a593Smuzhiyun else
892*4882a593Smuzhiyun hwif->tp_ops->input_data(drive, NULL, buf, min(4, len));
893*4882a593Smuzhiyun len -= 4;
894*4882a593Smuzhiyun }
895*4882a593Smuzhiyun }
896*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ide_pad_transfer);
897*4882a593Smuzhiyun
ide_insert_request_head(ide_drive_t * drive,struct request * rq)898*4882a593Smuzhiyun void ide_insert_request_head(ide_drive_t *drive, struct request *rq)
899*4882a593Smuzhiyun {
900*4882a593Smuzhiyun drive->sense_rq_active = true;
901*4882a593Smuzhiyun list_add_tail(&rq->queuelist, &drive->rq_list);
902*4882a593Smuzhiyun kblockd_schedule_work(&drive->rq_work);
903*4882a593Smuzhiyun }
904*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ide_insert_request_head);
905