1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun
3*4882a593Smuzhiyun #include <linux/kernel.h>
4*4882a593Smuzhiyun #include <linux/export.h>
5*4882a593Smuzhiyun #include <linux/ide.h>
6*4882a593Smuzhiyun #include <linux/delay.h>
7*4882a593Smuzhiyun
ide_ata_error(ide_drive_t * drive,struct request * rq,u8 stat,u8 err)8*4882a593Smuzhiyun static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq,
9*4882a593Smuzhiyun u8 stat, u8 err)
10*4882a593Smuzhiyun {
11*4882a593Smuzhiyun ide_hwif_t *hwif = drive->hwif;
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun if ((stat & ATA_BUSY) ||
14*4882a593Smuzhiyun ((stat & ATA_DF) && (drive->dev_flags & IDE_DFLAG_NOWERR) == 0)) {
15*4882a593Smuzhiyun /* other bits are useless when BUSY */
16*4882a593Smuzhiyun scsi_req(rq)->result |= ERROR_RESET;
17*4882a593Smuzhiyun } else if (stat & ATA_ERR) {
18*4882a593Smuzhiyun /* err has different meaning on cdrom and tape */
19*4882a593Smuzhiyun if (err == ATA_ABORTED) {
20*4882a593Smuzhiyun if ((drive->dev_flags & IDE_DFLAG_LBA) &&
21*4882a593Smuzhiyun /* some newer drives don't support ATA_CMD_INIT_DEV_PARAMS */
22*4882a593Smuzhiyun hwif->tp_ops->read_status(hwif) == ATA_CMD_INIT_DEV_PARAMS)
23*4882a593Smuzhiyun return ide_stopped;
24*4882a593Smuzhiyun } else if ((err & BAD_CRC) == BAD_CRC) {
25*4882a593Smuzhiyun /* UDMA crc error, just retry the operation */
26*4882a593Smuzhiyun drive->crc_count++;
27*4882a593Smuzhiyun } else if (err & (ATA_BBK | ATA_UNC)) {
28*4882a593Smuzhiyun /* retries won't help these */
29*4882a593Smuzhiyun scsi_req(rq)->result = ERROR_MAX;
30*4882a593Smuzhiyun } else if (err & ATA_TRK0NF) {
31*4882a593Smuzhiyun /* help it find track zero */
32*4882a593Smuzhiyun scsi_req(rq)->result |= ERROR_RECAL;
33*4882a593Smuzhiyun }
34*4882a593Smuzhiyun }
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun if ((stat & ATA_DRQ) && rq_data_dir(rq) == READ &&
37*4882a593Smuzhiyun (hwif->host_flags & IDE_HFLAG_ERROR_STOPS_FIFO) == 0) {
38*4882a593Smuzhiyun int nsect = drive->mult_count ? drive->mult_count : 1;
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun ide_pad_transfer(drive, READ, nsect * SECTOR_SIZE);
41*4882a593Smuzhiyun }
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun if (scsi_req(rq)->result >= ERROR_MAX || blk_noretry_request(rq)) {
44*4882a593Smuzhiyun ide_kill_rq(drive, rq);
45*4882a593Smuzhiyun return ide_stopped;
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun if (hwif->tp_ops->read_status(hwif) & (ATA_BUSY | ATA_DRQ))
49*4882a593Smuzhiyun scsi_req(rq)->result |= ERROR_RESET;
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun if ((scsi_req(rq)->result & ERROR_RESET) == ERROR_RESET) {
52*4882a593Smuzhiyun ++scsi_req(rq)->result;
53*4882a593Smuzhiyun return ide_do_reset(drive);
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun if ((scsi_req(rq)->result & ERROR_RECAL) == ERROR_RECAL)
57*4882a593Smuzhiyun drive->special_flags |= IDE_SFLAG_RECALIBRATE;
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun ++scsi_req(rq)->result;
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun return ide_stopped;
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun
ide_atapi_error(ide_drive_t * drive,struct request * rq,u8 stat,u8 err)64*4882a593Smuzhiyun static ide_startstop_t ide_atapi_error(ide_drive_t *drive, struct request *rq,
65*4882a593Smuzhiyun u8 stat, u8 err)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun ide_hwif_t *hwif = drive->hwif;
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun if ((stat & ATA_BUSY) ||
70*4882a593Smuzhiyun ((stat & ATA_DF) && (drive->dev_flags & IDE_DFLAG_NOWERR) == 0)) {
71*4882a593Smuzhiyun /* other bits are useless when BUSY */
72*4882a593Smuzhiyun scsi_req(rq)->result |= ERROR_RESET;
73*4882a593Smuzhiyun } else {
74*4882a593Smuzhiyun /* add decoding error stuff */
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun if (hwif->tp_ops->read_status(hwif) & (ATA_BUSY | ATA_DRQ))
78*4882a593Smuzhiyun /* force an abort */
79*4882a593Smuzhiyun hwif->tp_ops->exec_command(hwif, ATA_CMD_IDLEIMMEDIATE);
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun if (scsi_req(rq)->result >= ERROR_MAX) {
82*4882a593Smuzhiyun ide_kill_rq(drive, rq);
83*4882a593Smuzhiyun } else {
84*4882a593Smuzhiyun if ((scsi_req(rq)->result & ERROR_RESET) == ERROR_RESET) {
85*4882a593Smuzhiyun ++scsi_req(rq)->result;
86*4882a593Smuzhiyun return ide_do_reset(drive);
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun ++scsi_req(rq)->result;
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun return ide_stopped;
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun
__ide_error(ide_drive_t * drive,struct request * rq,u8 stat,u8 err)94*4882a593Smuzhiyun static ide_startstop_t __ide_error(ide_drive_t *drive, struct request *rq,
95*4882a593Smuzhiyun u8 stat, u8 err)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun if (drive->media == ide_disk)
98*4882a593Smuzhiyun return ide_ata_error(drive, rq, stat, err);
99*4882a593Smuzhiyun return ide_atapi_error(drive, rq, stat, err);
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun /**
103*4882a593Smuzhiyun * ide_error - handle an error on the IDE
104*4882a593Smuzhiyun * @drive: drive the error occurred on
105*4882a593Smuzhiyun * @msg: message to report
106*4882a593Smuzhiyun * @stat: status bits
107*4882a593Smuzhiyun *
108*4882a593Smuzhiyun * ide_error() takes action based on the error returned by the drive.
109*4882a593Smuzhiyun * For normal I/O that may well include retries. We deal with
110*4882a593Smuzhiyun * both new-style (taskfile) and old style command handling here.
111*4882a593Smuzhiyun * In the case of taskfile command handling there is work left to
112*4882a593Smuzhiyun * do
113*4882a593Smuzhiyun */
114*4882a593Smuzhiyun
ide_error(ide_drive_t * drive,const char * msg,u8 stat)115*4882a593Smuzhiyun ide_startstop_t ide_error(ide_drive_t *drive, const char *msg, u8 stat)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun struct request *rq;
118*4882a593Smuzhiyun u8 err;
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun err = ide_dump_status(drive, msg, stat);
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun rq = drive->hwif->rq;
123*4882a593Smuzhiyun if (rq == NULL)
124*4882a593Smuzhiyun return ide_stopped;
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun /* retry only "normal" I/O: */
127*4882a593Smuzhiyun if (blk_rq_is_passthrough(rq)) {
128*4882a593Smuzhiyun if (ata_taskfile_request(rq)) {
129*4882a593Smuzhiyun struct ide_cmd *cmd = ide_req(rq)->special;
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun if (cmd)
132*4882a593Smuzhiyun ide_complete_cmd(drive, cmd, stat, err);
133*4882a593Smuzhiyun } else if (ata_pm_request(rq)) {
134*4882a593Smuzhiyun scsi_req(rq)->result = 1;
135*4882a593Smuzhiyun ide_complete_pm_rq(drive, rq);
136*4882a593Smuzhiyun return ide_stopped;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun scsi_req(rq)->result = err;
139*4882a593Smuzhiyun ide_complete_rq(drive, err ? BLK_STS_IOERR : BLK_STS_OK, blk_rq_bytes(rq));
140*4882a593Smuzhiyun return ide_stopped;
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun return __ide_error(drive, rq, stat, err);
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ide_error);
146*4882a593Smuzhiyun
ide_complete_drive_reset(ide_drive_t * drive,blk_status_t err)147*4882a593Smuzhiyun static inline void ide_complete_drive_reset(ide_drive_t *drive, blk_status_t err)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun struct request *rq = drive->hwif->rq;
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun if (rq && ata_misc_request(rq) &&
152*4882a593Smuzhiyun scsi_req(rq)->cmd[0] == REQ_DRIVE_RESET) {
153*4882a593Smuzhiyun if (err <= 0 && scsi_req(rq)->result == 0)
154*4882a593Smuzhiyun scsi_req(rq)->result = -EIO;
155*4882a593Smuzhiyun ide_complete_rq(drive, err, blk_rq_bytes(rq));
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun /* needed below */
160*4882a593Smuzhiyun static ide_startstop_t do_reset1(ide_drive_t *, int);
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun /*
163*4882a593Smuzhiyun * atapi_reset_pollfunc() gets invoked to poll the interface for completion
164*4882a593Smuzhiyun * every 50ms during an atapi drive reset operation. If the drive has not yet
165*4882a593Smuzhiyun * responded, and we have not yet hit our maximum waiting time, then the timer
166*4882a593Smuzhiyun * is restarted for another 50ms.
167*4882a593Smuzhiyun */
atapi_reset_pollfunc(ide_drive_t * drive)168*4882a593Smuzhiyun static ide_startstop_t atapi_reset_pollfunc(ide_drive_t *drive)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun ide_hwif_t *hwif = drive->hwif;
171*4882a593Smuzhiyun const struct ide_tp_ops *tp_ops = hwif->tp_ops;
172*4882a593Smuzhiyun u8 stat;
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun tp_ops->dev_select(drive);
175*4882a593Smuzhiyun udelay(10);
176*4882a593Smuzhiyun stat = tp_ops->read_status(hwif);
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun if (OK_STAT(stat, 0, ATA_BUSY))
179*4882a593Smuzhiyun printk(KERN_INFO "%s: ATAPI reset complete\n", drive->name);
180*4882a593Smuzhiyun else {
181*4882a593Smuzhiyun if (time_before(jiffies, hwif->poll_timeout)) {
182*4882a593Smuzhiyun ide_set_handler(drive, &atapi_reset_pollfunc, HZ/20);
183*4882a593Smuzhiyun /* continue polling */
184*4882a593Smuzhiyun return ide_started;
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun /* end of polling */
187*4882a593Smuzhiyun hwif->polling = 0;
188*4882a593Smuzhiyun printk(KERN_ERR "%s: ATAPI reset timed-out, status=0x%02x\n",
189*4882a593Smuzhiyun drive->name, stat);
190*4882a593Smuzhiyun /* do it the old fashioned way */
191*4882a593Smuzhiyun return do_reset1(drive, 1);
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun /* done polling */
194*4882a593Smuzhiyun hwif->polling = 0;
195*4882a593Smuzhiyun ide_complete_drive_reset(drive, BLK_STS_OK);
196*4882a593Smuzhiyun return ide_stopped;
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun
ide_reset_report_error(ide_hwif_t * hwif,u8 err)199*4882a593Smuzhiyun static void ide_reset_report_error(ide_hwif_t *hwif, u8 err)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun static const char *err_master_vals[] =
202*4882a593Smuzhiyun { NULL, "passed", "formatter device error",
203*4882a593Smuzhiyun "sector buffer error", "ECC circuitry error",
204*4882a593Smuzhiyun "controlling MPU error" };
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun u8 err_master = err & 0x7f;
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun printk(KERN_ERR "%s: reset: master: ", hwif->name);
209*4882a593Smuzhiyun if (err_master && err_master < 6)
210*4882a593Smuzhiyun printk(KERN_CONT "%s", err_master_vals[err_master]);
211*4882a593Smuzhiyun else
212*4882a593Smuzhiyun printk(KERN_CONT "error (0x%02x?)", err);
213*4882a593Smuzhiyun if (err & 0x80)
214*4882a593Smuzhiyun printk(KERN_CONT "; slave: failed");
215*4882a593Smuzhiyun printk(KERN_CONT "\n");
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun /*
219*4882a593Smuzhiyun * reset_pollfunc() gets invoked to poll the interface for completion every 50ms
220*4882a593Smuzhiyun * during an ide reset operation. If the drives have not yet responded,
221*4882a593Smuzhiyun * and we have not yet hit our maximum waiting time, then the timer is restarted
222*4882a593Smuzhiyun * for another 50ms.
223*4882a593Smuzhiyun */
reset_pollfunc(ide_drive_t * drive)224*4882a593Smuzhiyun static ide_startstop_t reset_pollfunc(ide_drive_t *drive)
225*4882a593Smuzhiyun {
226*4882a593Smuzhiyun ide_hwif_t *hwif = drive->hwif;
227*4882a593Smuzhiyun const struct ide_port_ops *port_ops = hwif->port_ops;
228*4882a593Smuzhiyun u8 tmp;
229*4882a593Smuzhiyun blk_status_t err = BLK_STS_OK;
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun if (port_ops && port_ops->reset_poll) {
232*4882a593Smuzhiyun err = port_ops->reset_poll(drive);
233*4882a593Smuzhiyun if (err) {
234*4882a593Smuzhiyun printk(KERN_ERR "%s: host reset_poll failure for %s.\n",
235*4882a593Smuzhiyun hwif->name, drive->name);
236*4882a593Smuzhiyun goto out;
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun tmp = hwif->tp_ops->read_status(hwif);
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun if (!OK_STAT(tmp, 0, ATA_BUSY)) {
243*4882a593Smuzhiyun if (time_before(jiffies, hwif->poll_timeout)) {
244*4882a593Smuzhiyun ide_set_handler(drive, &reset_pollfunc, HZ/20);
245*4882a593Smuzhiyun /* continue polling */
246*4882a593Smuzhiyun return ide_started;
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun printk(KERN_ERR "%s: reset timed-out, status=0x%02x\n",
249*4882a593Smuzhiyun hwif->name, tmp);
250*4882a593Smuzhiyun drive->failures++;
251*4882a593Smuzhiyun err = BLK_STS_IOERR;
252*4882a593Smuzhiyun } else {
253*4882a593Smuzhiyun tmp = ide_read_error(drive);
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun if (tmp == 1) {
256*4882a593Smuzhiyun printk(KERN_INFO "%s: reset: success\n", hwif->name);
257*4882a593Smuzhiyun drive->failures = 0;
258*4882a593Smuzhiyun } else {
259*4882a593Smuzhiyun ide_reset_report_error(hwif, tmp);
260*4882a593Smuzhiyun drive->failures++;
261*4882a593Smuzhiyun err = BLK_STS_IOERR;
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun out:
265*4882a593Smuzhiyun hwif->polling = 0; /* done polling */
266*4882a593Smuzhiyun ide_complete_drive_reset(drive, err);
267*4882a593Smuzhiyun return ide_stopped;
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun
ide_disk_pre_reset(ide_drive_t * drive)270*4882a593Smuzhiyun static void ide_disk_pre_reset(ide_drive_t *drive)
271*4882a593Smuzhiyun {
272*4882a593Smuzhiyun int legacy = (drive->id[ATA_ID_CFS_ENABLE_2] & 0x0400) ? 0 : 1;
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun drive->special_flags =
275*4882a593Smuzhiyun legacy ? (IDE_SFLAG_SET_GEOMETRY | IDE_SFLAG_RECALIBRATE) : 0;
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun drive->mult_count = 0;
278*4882a593Smuzhiyun drive->dev_flags &= ~IDE_DFLAG_PARKED;
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun if ((drive->dev_flags & IDE_DFLAG_KEEP_SETTINGS) == 0 &&
281*4882a593Smuzhiyun (drive->dev_flags & IDE_DFLAG_USING_DMA) == 0)
282*4882a593Smuzhiyun drive->mult_req = 0;
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun if (drive->mult_req != drive->mult_count)
285*4882a593Smuzhiyun drive->special_flags |= IDE_SFLAG_SET_MULTMODE;
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun
pre_reset(ide_drive_t * drive)288*4882a593Smuzhiyun static void pre_reset(ide_drive_t *drive)
289*4882a593Smuzhiyun {
290*4882a593Smuzhiyun const struct ide_port_ops *port_ops = drive->hwif->port_ops;
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun if (drive->media == ide_disk)
293*4882a593Smuzhiyun ide_disk_pre_reset(drive);
294*4882a593Smuzhiyun else
295*4882a593Smuzhiyun drive->dev_flags |= IDE_DFLAG_POST_RESET;
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun if (drive->dev_flags & IDE_DFLAG_USING_DMA) {
298*4882a593Smuzhiyun if (drive->crc_count)
299*4882a593Smuzhiyun ide_check_dma_crc(drive);
300*4882a593Smuzhiyun else
301*4882a593Smuzhiyun ide_dma_off(drive);
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun if ((drive->dev_flags & IDE_DFLAG_KEEP_SETTINGS) == 0) {
305*4882a593Smuzhiyun if ((drive->dev_flags & IDE_DFLAG_USING_DMA) == 0) {
306*4882a593Smuzhiyun drive->dev_flags &= ~IDE_DFLAG_UNMASK;
307*4882a593Smuzhiyun drive->io_32bit = 0;
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun return;
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun if (port_ops && port_ops->pre_reset)
313*4882a593Smuzhiyun port_ops->pre_reset(drive);
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun if (drive->current_speed != 0xff)
316*4882a593Smuzhiyun drive->desired_speed = drive->current_speed;
317*4882a593Smuzhiyun drive->current_speed = 0xff;
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun /*
321*4882a593Smuzhiyun * do_reset1() attempts to recover a confused drive by resetting it.
322*4882a593Smuzhiyun * Unfortunately, resetting a disk drive actually resets all devices on
323*4882a593Smuzhiyun * the same interface, so it can really be thought of as resetting the
324*4882a593Smuzhiyun * interface rather than resetting the drive.
325*4882a593Smuzhiyun *
326*4882a593Smuzhiyun * ATAPI devices have their own reset mechanism which allows them to be
327*4882a593Smuzhiyun * individually reset without clobbering other devices on the same interface.
328*4882a593Smuzhiyun *
329*4882a593Smuzhiyun * Unfortunately, the IDE interface does not generate an interrupt to let
330*4882a593Smuzhiyun * us know when the reset operation has finished, so we must poll for this.
331*4882a593Smuzhiyun * Equally poor, though, is the fact that this may a very long time to complete,
332*4882a593Smuzhiyun * (up to 30 seconds worstcase). So, instead of busy-waiting here for it,
333*4882a593Smuzhiyun * we set a timer to poll at 50ms intervals.
334*4882a593Smuzhiyun */
do_reset1(ide_drive_t * drive,int do_not_try_atapi)335*4882a593Smuzhiyun static ide_startstop_t do_reset1(ide_drive_t *drive, int do_not_try_atapi)
336*4882a593Smuzhiyun {
337*4882a593Smuzhiyun ide_hwif_t *hwif = drive->hwif;
338*4882a593Smuzhiyun struct ide_io_ports *io_ports = &hwif->io_ports;
339*4882a593Smuzhiyun const struct ide_tp_ops *tp_ops = hwif->tp_ops;
340*4882a593Smuzhiyun const struct ide_port_ops *port_ops;
341*4882a593Smuzhiyun ide_drive_t *tdrive;
342*4882a593Smuzhiyun unsigned long flags, timeout;
343*4882a593Smuzhiyun int i;
344*4882a593Smuzhiyun DEFINE_WAIT(wait);
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun spin_lock_irqsave(&hwif->lock, flags);
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun /* We must not reset with running handlers */
349*4882a593Smuzhiyun BUG_ON(hwif->handler != NULL);
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun /* For an ATAPI device, first try an ATAPI SRST. */
352*4882a593Smuzhiyun if (drive->media != ide_disk && !do_not_try_atapi) {
353*4882a593Smuzhiyun pre_reset(drive);
354*4882a593Smuzhiyun tp_ops->dev_select(drive);
355*4882a593Smuzhiyun udelay(20);
356*4882a593Smuzhiyun tp_ops->exec_command(hwif, ATA_CMD_DEV_RESET);
357*4882a593Smuzhiyun ndelay(400);
358*4882a593Smuzhiyun hwif->poll_timeout = jiffies + WAIT_WORSTCASE;
359*4882a593Smuzhiyun hwif->polling = 1;
360*4882a593Smuzhiyun __ide_set_handler(drive, &atapi_reset_pollfunc, HZ/20);
361*4882a593Smuzhiyun spin_unlock_irqrestore(&hwif->lock, flags);
362*4882a593Smuzhiyun return ide_started;
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun /* We must not disturb devices in the IDE_DFLAG_PARKED state. */
366*4882a593Smuzhiyun do {
367*4882a593Smuzhiyun unsigned long now;
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun prepare_to_wait(&ide_park_wq, &wait, TASK_UNINTERRUPTIBLE);
370*4882a593Smuzhiyun timeout = jiffies;
371*4882a593Smuzhiyun ide_port_for_each_present_dev(i, tdrive, hwif) {
372*4882a593Smuzhiyun if ((tdrive->dev_flags & IDE_DFLAG_PARKED) &&
373*4882a593Smuzhiyun time_after(tdrive->sleep, timeout))
374*4882a593Smuzhiyun timeout = tdrive->sleep;
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun now = jiffies;
378*4882a593Smuzhiyun if (time_before_eq(timeout, now))
379*4882a593Smuzhiyun break;
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun spin_unlock_irqrestore(&hwif->lock, flags);
382*4882a593Smuzhiyun timeout = schedule_timeout_uninterruptible(timeout - now);
383*4882a593Smuzhiyun spin_lock_irqsave(&hwif->lock, flags);
384*4882a593Smuzhiyun } while (timeout);
385*4882a593Smuzhiyun finish_wait(&ide_park_wq, &wait);
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun /*
388*4882a593Smuzhiyun * First, reset any device state data we were maintaining
389*4882a593Smuzhiyun * for any of the drives on this interface.
390*4882a593Smuzhiyun */
391*4882a593Smuzhiyun ide_port_for_each_dev(i, tdrive, hwif)
392*4882a593Smuzhiyun pre_reset(tdrive);
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun if (io_ports->ctl_addr == 0) {
395*4882a593Smuzhiyun spin_unlock_irqrestore(&hwif->lock, flags);
396*4882a593Smuzhiyun ide_complete_drive_reset(drive, BLK_STS_IOERR);
397*4882a593Smuzhiyun return ide_stopped;
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun /*
401*4882a593Smuzhiyun * Note that we also set nIEN while resetting the device,
402*4882a593Smuzhiyun * to mask unwanted interrupts from the interface during the reset.
403*4882a593Smuzhiyun * However, due to the design of PC hardware, this will cause an
404*4882a593Smuzhiyun * immediate interrupt due to the edge transition it produces.
405*4882a593Smuzhiyun * This single interrupt gives us a "fast poll" for drives that
406*4882a593Smuzhiyun * recover from reset very quickly, saving us the first 50ms wait time.
407*4882a593Smuzhiyun */
408*4882a593Smuzhiyun /* set SRST and nIEN */
409*4882a593Smuzhiyun tp_ops->write_devctl(hwif, ATA_SRST | ATA_NIEN | ATA_DEVCTL_OBS);
410*4882a593Smuzhiyun /* more than enough time */
411*4882a593Smuzhiyun udelay(10);
412*4882a593Smuzhiyun /* clear SRST, leave nIEN (unless device is on the quirk list) */
413*4882a593Smuzhiyun tp_ops->write_devctl(hwif,
414*4882a593Smuzhiyun ((drive->dev_flags & IDE_DFLAG_NIEN_QUIRK) ? 0 : ATA_NIEN) |
415*4882a593Smuzhiyun ATA_DEVCTL_OBS);
416*4882a593Smuzhiyun /* more than enough time */
417*4882a593Smuzhiyun udelay(10);
418*4882a593Smuzhiyun hwif->poll_timeout = jiffies + WAIT_WORSTCASE;
419*4882a593Smuzhiyun hwif->polling = 1;
420*4882a593Smuzhiyun __ide_set_handler(drive, &reset_pollfunc, HZ/20);
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun /*
423*4882a593Smuzhiyun * Some weird controller like resetting themselves to a strange
424*4882a593Smuzhiyun * state when the disks are reset this way. At least, the Winbond
425*4882a593Smuzhiyun * 553 documentation says that
426*4882a593Smuzhiyun */
427*4882a593Smuzhiyun port_ops = hwif->port_ops;
428*4882a593Smuzhiyun if (port_ops && port_ops->resetproc)
429*4882a593Smuzhiyun port_ops->resetproc(drive);
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun spin_unlock_irqrestore(&hwif->lock, flags);
432*4882a593Smuzhiyun return ide_started;
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun /*
436*4882a593Smuzhiyun * ide_do_reset() is the entry point to the drive/interface reset code.
437*4882a593Smuzhiyun */
438*4882a593Smuzhiyun
ide_do_reset(ide_drive_t * drive)439*4882a593Smuzhiyun ide_startstop_t ide_do_reset(ide_drive_t *drive)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun return do_reset1(drive, 0);
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun EXPORT_SYMBOL(ide_do_reset);
444