1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 1994-1998 Linus Torvalds & authors (see below)
4*4882a593Smuzhiyun * Copyright (C) 2005, 2007 Bartlomiej Zolnierkiewicz
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun /*
8*4882a593Smuzhiyun * Mostly written by Mark Lord <mlord@pobox.com>
9*4882a593Smuzhiyun * and Gadi Oxman <gadio@netvision.net.il>
10*4882a593Smuzhiyun * and Andre Hedrick <andre@linux-ide.org>
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * See linux/MAINTAINERS for address of current maintainer.
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * This is the IDE probe module, as evolved from hd.c and ide.c.
15*4882a593Smuzhiyun *
16*4882a593Smuzhiyun * -- increase WAIT_PIDENTIFY to avoid CD-ROM locking at boot
17*4882a593Smuzhiyun * by Andrea Arcangeli
18*4882a593Smuzhiyun */
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun #include <linux/module.h>
21*4882a593Smuzhiyun #include <linux/types.h>
22*4882a593Smuzhiyun #include <linux/string.h>
23*4882a593Smuzhiyun #include <linux/kernel.h>
24*4882a593Smuzhiyun #include <linux/timer.h>
25*4882a593Smuzhiyun #include <linux/mm.h>
26*4882a593Smuzhiyun #include <linux/interrupt.h>
27*4882a593Smuzhiyun #include <linux/major.h>
28*4882a593Smuzhiyun #include <linux/errno.h>
29*4882a593Smuzhiyun #include <linux/genhd.h>
30*4882a593Smuzhiyun #include <linux/slab.h>
31*4882a593Smuzhiyun #include <linux/delay.h>
32*4882a593Smuzhiyun #include <linux/ide.h>
33*4882a593Smuzhiyun #include <linux/spinlock.h>
34*4882a593Smuzhiyun #include <linux/kmod.h>
35*4882a593Smuzhiyun #include <linux/pci.h>
36*4882a593Smuzhiyun #include <linux/scatterlist.h>
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun #include <asm/byteorder.h>
39*4882a593Smuzhiyun #include <asm/irq.h>
40*4882a593Smuzhiyun #include <linux/uaccess.h>
41*4882a593Smuzhiyun #include <asm/io.h>
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun /**
44*4882a593Smuzhiyun * generic_id - add a generic drive id
45*4882a593Smuzhiyun * @drive: drive to make an ID block for
46*4882a593Smuzhiyun *
47*4882a593Smuzhiyun * Add a fake id field to the drive we are passed. This allows
48*4882a593Smuzhiyun * use to skip a ton of NULL checks (which people always miss)
49*4882a593Smuzhiyun * and make drive properties unconditional outside of this file
50*4882a593Smuzhiyun */
51*4882a593Smuzhiyun
generic_id(ide_drive_t * drive)52*4882a593Smuzhiyun static void generic_id(ide_drive_t *drive)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun u16 *id = drive->id;
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun id[ATA_ID_CUR_CYLS] = id[ATA_ID_CYLS] = drive->cyl;
57*4882a593Smuzhiyun id[ATA_ID_CUR_HEADS] = id[ATA_ID_HEADS] = drive->head;
58*4882a593Smuzhiyun id[ATA_ID_CUR_SECTORS] = id[ATA_ID_SECTORS] = drive->sect;
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun
ide_disk_init_chs(ide_drive_t * drive)61*4882a593Smuzhiyun static void ide_disk_init_chs(ide_drive_t *drive)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun u16 *id = drive->id;
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun /* Extract geometry if we did not already have one for the drive */
66*4882a593Smuzhiyun if (!drive->cyl || !drive->head || !drive->sect) {
67*4882a593Smuzhiyun drive->cyl = drive->bios_cyl = id[ATA_ID_CYLS];
68*4882a593Smuzhiyun drive->head = drive->bios_head = id[ATA_ID_HEADS];
69*4882a593Smuzhiyun drive->sect = drive->bios_sect = id[ATA_ID_SECTORS];
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun /* Handle logical geometry translation by the drive */
73*4882a593Smuzhiyun if (ata_id_current_chs_valid(id)) {
74*4882a593Smuzhiyun drive->cyl = id[ATA_ID_CUR_CYLS];
75*4882a593Smuzhiyun drive->head = id[ATA_ID_CUR_HEADS];
76*4882a593Smuzhiyun drive->sect = id[ATA_ID_CUR_SECTORS];
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun /* Use physical geometry if what we have still makes no sense */
80*4882a593Smuzhiyun if (drive->head > 16 && id[ATA_ID_HEADS] && id[ATA_ID_HEADS] <= 16) {
81*4882a593Smuzhiyun drive->cyl = id[ATA_ID_CYLS];
82*4882a593Smuzhiyun drive->head = id[ATA_ID_HEADS];
83*4882a593Smuzhiyun drive->sect = id[ATA_ID_SECTORS];
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun
ide_disk_init_mult_count(ide_drive_t * drive)87*4882a593Smuzhiyun static void ide_disk_init_mult_count(ide_drive_t *drive)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun u16 *id = drive->id;
90*4882a593Smuzhiyun u8 max_multsect = id[ATA_ID_MAX_MULTSECT] & 0xff;
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun if (max_multsect) {
93*4882a593Smuzhiyun if ((max_multsect / 2) > 1)
94*4882a593Smuzhiyun id[ATA_ID_MULTSECT] = max_multsect | 0x100;
95*4882a593Smuzhiyun else
96*4882a593Smuzhiyun id[ATA_ID_MULTSECT] &= ~0x1ff;
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun drive->mult_req = id[ATA_ID_MULTSECT] & 0xff;
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun if (drive->mult_req)
101*4882a593Smuzhiyun drive->special_flags |= IDE_SFLAG_SET_MULTMODE;
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
ide_classify_ata_dev(ide_drive_t * drive)105*4882a593Smuzhiyun static void ide_classify_ata_dev(ide_drive_t *drive)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun u16 *id = drive->id;
108*4882a593Smuzhiyun char *m = (char *)&id[ATA_ID_PROD];
109*4882a593Smuzhiyun int is_cfa = ata_id_is_cfa(id);
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun /* CF devices are *not* removable in Linux definition of the term */
112*4882a593Smuzhiyun if (is_cfa == 0 && (id[ATA_ID_CONFIG] & (1 << 7)))
113*4882a593Smuzhiyun drive->dev_flags |= IDE_DFLAG_REMOVABLE;
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun drive->media = ide_disk;
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun if (!ata_id_has_unload(drive->id))
118*4882a593Smuzhiyun drive->dev_flags |= IDE_DFLAG_NO_UNLOAD;
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun printk(KERN_INFO "%s: %s, %s DISK drive\n", drive->name, m,
121*4882a593Smuzhiyun is_cfa ? "CFA" : "ATA");
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun
ide_classify_atapi_dev(ide_drive_t * drive)124*4882a593Smuzhiyun static void ide_classify_atapi_dev(ide_drive_t *drive)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun u16 *id = drive->id;
127*4882a593Smuzhiyun char *m = (char *)&id[ATA_ID_PROD];
128*4882a593Smuzhiyun u8 type = (id[ATA_ID_CONFIG] >> 8) & 0x1f;
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun printk(KERN_INFO "%s: %s, ATAPI ", drive->name, m);
131*4882a593Smuzhiyun switch (type) {
132*4882a593Smuzhiyun case ide_floppy:
133*4882a593Smuzhiyun if (!strstr(m, "CD-ROM")) {
134*4882a593Smuzhiyun if (!strstr(m, "oppy") &&
135*4882a593Smuzhiyun !strstr(m, "poyp") &&
136*4882a593Smuzhiyun !strstr(m, "ZIP"))
137*4882a593Smuzhiyun printk(KERN_CONT "cdrom or floppy?, assuming ");
138*4882a593Smuzhiyun if (drive->media != ide_cdrom) {
139*4882a593Smuzhiyun printk(KERN_CONT "FLOPPY");
140*4882a593Smuzhiyun drive->dev_flags |= IDE_DFLAG_REMOVABLE;
141*4882a593Smuzhiyun break;
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun /* Early cdrom models used zero */
145*4882a593Smuzhiyun type = ide_cdrom;
146*4882a593Smuzhiyun fallthrough;
147*4882a593Smuzhiyun case ide_cdrom:
148*4882a593Smuzhiyun drive->dev_flags |= IDE_DFLAG_REMOVABLE;
149*4882a593Smuzhiyun #ifdef CONFIG_PPC
150*4882a593Smuzhiyun /* kludge for Apple PowerBook internal zip */
151*4882a593Smuzhiyun if (!strstr(m, "CD-ROM") && strstr(m, "ZIP")) {
152*4882a593Smuzhiyun printk(KERN_CONT "FLOPPY");
153*4882a593Smuzhiyun type = ide_floppy;
154*4882a593Smuzhiyun break;
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun #endif
157*4882a593Smuzhiyun printk(KERN_CONT "CD/DVD-ROM");
158*4882a593Smuzhiyun break;
159*4882a593Smuzhiyun case ide_tape:
160*4882a593Smuzhiyun printk(KERN_CONT "TAPE");
161*4882a593Smuzhiyun break;
162*4882a593Smuzhiyun case ide_optical:
163*4882a593Smuzhiyun printk(KERN_CONT "OPTICAL");
164*4882a593Smuzhiyun drive->dev_flags |= IDE_DFLAG_REMOVABLE;
165*4882a593Smuzhiyun break;
166*4882a593Smuzhiyun default:
167*4882a593Smuzhiyun printk(KERN_CONT "UNKNOWN (type %d)", type);
168*4882a593Smuzhiyun break;
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun printk(KERN_CONT " drive\n");
172*4882a593Smuzhiyun drive->media = type;
173*4882a593Smuzhiyun /* an ATAPI device ignores DRDY */
174*4882a593Smuzhiyun drive->ready_stat = 0;
175*4882a593Smuzhiyun if (ata_id_cdb_intr(id))
176*4882a593Smuzhiyun drive->atapi_flags |= IDE_AFLAG_DRQ_INTERRUPT;
177*4882a593Smuzhiyun drive->dev_flags |= IDE_DFLAG_DOORLOCKING;
178*4882a593Smuzhiyun /* we don't do head unloading on ATAPI devices */
179*4882a593Smuzhiyun drive->dev_flags |= IDE_DFLAG_NO_UNLOAD;
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun /**
183*4882a593Smuzhiyun * do_identify - identify a drive
184*4882a593Smuzhiyun * @drive: drive to identify
185*4882a593Smuzhiyun * @cmd: command used
186*4882a593Smuzhiyun * @id: buffer for IDENTIFY data
187*4882a593Smuzhiyun *
188*4882a593Smuzhiyun * Called when we have issued a drive identify command to
189*4882a593Smuzhiyun * read and parse the results. This function is run with
190*4882a593Smuzhiyun * interrupts disabled.
191*4882a593Smuzhiyun */
192*4882a593Smuzhiyun
do_identify(ide_drive_t * drive,u8 cmd,u16 * id)193*4882a593Smuzhiyun static void do_identify(ide_drive_t *drive, u8 cmd, u16 *id)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun ide_hwif_t *hwif = drive->hwif;
196*4882a593Smuzhiyun char *m = (char *)&id[ATA_ID_PROD];
197*4882a593Smuzhiyun unsigned long flags;
198*4882a593Smuzhiyun int bswap = 1;
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun /* local CPU only; some systems need this */
201*4882a593Smuzhiyun local_irq_save(flags);
202*4882a593Smuzhiyun /* read 512 bytes of id info */
203*4882a593Smuzhiyun hwif->tp_ops->input_data(drive, NULL, id, SECTOR_SIZE);
204*4882a593Smuzhiyun local_irq_restore(flags);
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun drive->dev_flags |= IDE_DFLAG_ID_READ;
207*4882a593Smuzhiyun #ifdef DEBUG
208*4882a593Smuzhiyun printk(KERN_INFO "%s: dumping identify data\n", drive->name);
209*4882a593Smuzhiyun ide_dump_identify((u8 *)id);
210*4882a593Smuzhiyun #endif
211*4882a593Smuzhiyun ide_fix_driveid(id);
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun /*
214*4882a593Smuzhiyun * ATA_CMD_ID_ATA returns little-endian info,
215*4882a593Smuzhiyun * ATA_CMD_ID_ATAPI *usually* returns little-endian info.
216*4882a593Smuzhiyun */
217*4882a593Smuzhiyun if (cmd == ATA_CMD_ID_ATAPI) {
218*4882a593Smuzhiyun if ((m[0] == 'N' && m[1] == 'E') || /* NEC */
219*4882a593Smuzhiyun (m[0] == 'F' && m[1] == 'X') || /* Mitsumi */
220*4882a593Smuzhiyun (m[0] == 'P' && m[1] == 'i')) /* Pioneer */
221*4882a593Smuzhiyun /* Vertos drives may still be weird */
222*4882a593Smuzhiyun bswap ^= 1;
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun ide_fixstring(m, ATA_ID_PROD_LEN, bswap);
226*4882a593Smuzhiyun ide_fixstring((char *)&id[ATA_ID_FW_REV], ATA_ID_FW_REV_LEN, bswap);
227*4882a593Smuzhiyun ide_fixstring((char *)&id[ATA_ID_SERNO], ATA_ID_SERNO_LEN, bswap);
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun /* we depend on this a lot! */
230*4882a593Smuzhiyun m[ATA_ID_PROD_LEN - 1] = '\0';
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun if (strstr(m, "E X A B Y T E N E S T"))
233*4882a593Smuzhiyun drive->dev_flags &= ~IDE_DFLAG_PRESENT;
234*4882a593Smuzhiyun else
235*4882a593Smuzhiyun drive->dev_flags |= IDE_DFLAG_PRESENT;
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun /**
239*4882a593Smuzhiyun * ide_dev_read_id - send ATA/ATAPI IDENTIFY command
240*4882a593Smuzhiyun * @drive: drive to identify
241*4882a593Smuzhiyun * @cmd: command to use
242*4882a593Smuzhiyun * @id: buffer for IDENTIFY data
243*4882a593Smuzhiyun * @irq_ctx: flag set when called from the IRQ context
244*4882a593Smuzhiyun *
245*4882a593Smuzhiyun * Sends an ATA(PI) IDENTIFY request to a drive and waits for a response.
246*4882a593Smuzhiyun *
247*4882a593Smuzhiyun * Returns: 0 device was identified
248*4882a593Smuzhiyun * 1 device timed-out (no response to identify request)
249*4882a593Smuzhiyun * 2 device aborted the command (refused to identify itself)
250*4882a593Smuzhiyun */
251*4882a593Smuzhiyun
ide_dev_read_id(ide_drive_t * drive,u8 cmd,u16 * id,int irq_ctx)252*4882a593Smuzhiyun int ide_dev_read_id(ide_drive_t *drive, u8 cmd, u16 *id, int irq_ctx)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun ide_hwif_t *hwif = drive->hwif;
255*4882a593Smuzhiyun struct ide_io_ports *io_ports = &hwif->io_ports;
256*4882a593Smuzhiyun const struct ide_tp_ops *tp_ops = hwif->tp_ops;
257*4882a593Smuzhiyun int use_altstatus = 0, rc;
258*4882a593Smuzhiyun unsigned long timeout;
259*4882a593Smuzhiyun u8 s = 0, a = 0;
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun /*
262*4882a593Smuzhiyun * Disable device IRQ. Otherwise we'll get spurious interrupts
263*4882a593Smuzhiyun * during the identify phase that the IRQ handler isn't expecting.
264*4882a593Smuzhiyun */
265*4882a593Smuzhiyun if (io_ports->ctl_addr)
266*4882a593Smuzhiyun tp_ops->write_devctl(hwif, ATA_NIEN | ATA_DEVCTL_OBS);
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun /* take a deep breath */
269*4882a593Smuzhiyun if (irq_ctx)
270*4882a593Smuzhiyun mdelay(50);
271*4882a593Smuzhiyun else
272*4882a593Smuzhiyun msleep(50);
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun if (io_ports->ctl_addr &&
275*4882a593Smuzhiyun (hwif->host_flags & IDE_HFLAG_BROKEN_ALTSTATUS) == 0) {
276*4882a593Smuzhiyun a = tp_ops->read_altstatus(hwif);
277*4882a593Smuzhiyun s = tp_ops->read_status(hwif);
278*4882a593Smuzhiyun if ((a ^ s) & ~ATA_SENSE)
279*4882a593Smuzhiyun /* ancient Seagate drives, broken interfaces */
280*4882a593Smuzhiyun printk(KERN_INFO "%s: probing with STATUS(0x%02x) "
281*4882a593Smuzhiyun "instead of ALTSTATUS(0x%02x)\n",
282*4882a593Smuzhiyun drive->name, s, a);
283*4882a593Smuzhiyun else
284*4882a593Smuzhiyun /* use non-intrusive polling */
285*4882a593Smuzhiyun use_altstatus = 1;
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun /* set features register for atapi
289*4882a593Smuzhiyun * identify command to be sure of reply
290*4882a593Smuzhiyun */
291*4882a593Smuzhiyun if (cmd == ATA_CMD_ID_ATAPI) {
292*4882a593Smuzhiyun struct ide_taskfile tf;
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun memset(&tf, 0, sizeof(tf));
295*4882a593Smuzhiyun /* disable DMA & overlap */
296*4882a593Smuzhiyun tp_ops->tf_load(drive, &tf, IDE_VALID_FEATURE);
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun /* ask drive for ID */
300*4882a593Smuzhiyun tp_ops->exec_command(hwif, cmd);
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun timeout = ((cmd == ATA_CMD_ID_ATA) ? WAIT_WORSTCASE : WAIT_PIDENTIFY) / 2;
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun /* wait for IRQ and ATA_DRQ */
305*4882a593Smuzhiyun if (irq_ctx) {
306*4882a593Smuzhiyun rc = __ide_wait_stat(drive, ATA_DRQ, BAD_R_STAT, timeout, &s);
307*4882a593Smuzhiyun if (rc)
308*4882a593Smuzhiyun return 1;
309*4882a593Smuzhiyun } else {
310*4882a593Smuzhiyun rc = ide_busy_sleep(drive, timeout, use_altstatus);
311*4882a593Smuzhiyun if (rc)
312*4882a593Smuzhiyun return 1;
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun msleep(50);
315*4882a593Smuzhiyun s = tp_ops->read_status(hwif);
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun if (OK_STAT(s, ATA_DRQ, BAD_R_STAT)) {
319*4882a593Smuzhiyun /* drive returned ID */
320*4882a593Smuzhiyun do_identify(drive, cmd, id);
321*4882a593Smuzhiyun /* drive responded with ID */
322*4882a593Smuzhiyun rc = 0;
323*4882a593Smuzhiyun /* clear drive IRQ */
324*4882a593Smuzhiyun (void)tp_ops->read_status(hwif);
325*4882a593Smuzhiyun } else {
326*4882a593Smuzhiyun /* drive refused ID */
327*4882a593Smuzhiyun rc = 2;
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun return rc;
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun
ide_busy_sleep(ide_drive_t * drive,unsigned long timeout,int altstatus)332*4882a593Smuzhiyun int ide_busy_sleep(ide_drive_t *drive, unsigned long timeout, int altstatus)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun ide_hwif_t *hwif = drive->hwif;
335*4882a593Smuzhiyun u8 stat;
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun timeout += jiffies;
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun do {
340*4882a593Smuzhiyun msleep(50); /* give drive a breather */
341*4882a593Smuzhiyun stat = altstatus ? hwif->tp_ops->read_altstatus(hwif)
342*4882a593Smuzhiyun : hwif->tp_ops->read_status(hwif);
343*4882a593Smuzhiyun if ((stat & ATA_BUSY) == 0)
344*4882a593Smuzhiyun return 0;
345*4882a593Smuzhiyun } while (time_before(jiffies, timeout));
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun printk(KERN_ERR "%s: timeout in %s\n", drive->name, __func__);
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun return 1; /* drive timed-out */
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun
ide_read_device(ide_drive_t * drive)352*4882a593Smuzhiyun static u8 ide_read_device(ide_drive_t *drive)
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun struct ide_taskfile tf;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun drive->hwif->tp_ops->tf_read(drive, &tf, IDE_VALID_DEVICE);
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun return tf.device;
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun /**
362*4882a593Smuzhiyun * do_probe - probe an IDE device
363*4882a593Smuzhiyun * @drive: drive to probe
364*4882a593Smuzhiyun * @cmd: command to use
365*4882a593Smuzhiyun *
366*4882a593Smuzhiyun * do_probe() has the difficult job of finding a drive if it exists,
367*4882a593Smuzhiyun * without getting hung up if it doesn't exist, without trampling on
368*4882a593Smuzhiyun * ethernet cards, and without leaving any IRQs dangling to haunt us later.
369*4882a593Smuzhiyun *
370*4882a593Smuzhiyun * If a drive is "known" to exist (from CMOS or kernel parameters),
371*4882a593Smuzhiyun * but does not respond right away, the probe will "hang in there"
372*4882a593Smuzhiyun * for the maximum wait time (about 30 seconds), otherwise it will
373*4882a593Smuzhiyun * exit much more quickly.
374*4882a593Smuzhiyun *
375*4882a593Smuzhiyun * Returns: 0 device was identified
376*4882a593Smuzhiyun * 1 device timed-out (no response to identify request)
377*4882a593Smuzhiyun * 2 device aborted the command (refused to identify itself)
378*4882a593Smuzhiyun * 3 bad status from device (possible for ATAPI drives)
379*4882a593Smuzhiyun * 4 probe was not attempted because failure was obvious
380*4882a593Smuzhiyun */
381*4882a593Smuzhiyun
do_probe(ide_drive_t * drive,u8 cmd)382*4882a593Smuzhiyun static int do_probe (ide_drive_t *drive, u8 cmd)
383*4882a593Smuzhiyun {
384*4882a593Smuzhiyun ide_hwif_t *hwif = drive->hwif;
385*4882a593Smuzhiyun const struct ide_tp_ops *tp_ops = hwif->tp_ops;
386*4882a593Smuzhiyun u16 *id = drive->id;
387*4882a593Smuzhiyun int rc;
388*4882a593Smuzhiyun u8 present = !!(drive->dev_flags & IDE_DFLAG_PRESENT), stat;
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun /* avoid waiting for inappropriate probes */
391*4882a593Smuzhiyun if (present && drive->media != ide_disk && cmd == ATA_CMD_ID_ATA)
392*4882a593Smuzhiyun return 4;
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun #ifdef DEBUG
395*4882a593Smuzhiyun printk(KERN_INFO "probing for %s: present=%d, media=%d, probetype=%s\n",
396*4882a593Smuzhiyun drive->name, present, drive->media,
397*4882a593Smuzhiyun (cmd == ATA_CMD_ID_ATA) ? "ATA" : "ATAPI");
398*4882a593Smuzhiyun #endif
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun /* needed for some systems
401*4882a593Smuzhiyun * (e.g. crw9624 as drive0 with disk as slave)
402*4882a593Smuzhiyun */
403*4882a593Smuzhiyun msleep(50);
404*4882a593Smuzhiyun tp_ops->dev_select(drive);
405*4882a593Smuzhiyun msleep(50);
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun if (ide_read_device(drive) != drive->select && present == 0) {
408*4882a593Smuzhiyun if (drive->dn & 1) {
409*4882a593Smuzhiyun /* exit with drive0 selected */
410*4882a593Smuzhiyun tp_ops->dev_select(hwif->devices[0]);
411*4882a593Smuzhiyun /* allow ATA_BUSY to assert & clear */
412*4882a593Smuzhiyun msleep(50);
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun /* no i/f present: mmm.. this should be a 4 -ml */
415*4882a593Smuzhiyun return 3;
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun stat = tp_ops->read_status(hwif);
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun if (OK_STAT(stat, ATA_DRDY, ATA_BUSY) ||
421*4882a593Smuzhiyun present || cmd == ATA_CMD_ID_ATAPI) {
422*4882a593Smuzhiyun rc = ide_dev_read_id(drive, cmd, id, 0);
423*4882a593Smuzhiyun if (rc)
424*4882a593Smuzhiyun /* failed: try again */
425*4882a593Smuzhiyun rc = ide_dev_read_id(drive, cmd, id, 0);
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun stat = tp_ops->read_status(hwif);
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun if (stat == (ATA_BUSY | ATA_DRDY))
430*4882a593Smuzhiyun return 4;
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun if (rc == 1 && cmd == ATA_CMD_ID_ATAPI) {
433*4882a593Smuzhiyun printk(KERN_ERR "%s: no response (status = 0x%02x), "
434*4882a593Smuzhiyun "resetting drive\n", drive->name, stat);
435*4882a593Smuzhiyun msleep(50);
436*4882a593Smuzhiyun tp_ops->dev_select(drive);
437*4882a593Smuzhiyun msleep(50);
438*4882a593Smuzhiyun tp_ops->exec_command(hwif, ATA_CMD_DEV_RESET);
439*4882a593Smuzhiyun (void)ide_busy_sleep(drive, WAIT_WORSTCASE, 0);
440*4882a593Smuzhiyun rc = ide_dev_read_id(drive, cmd, id, 0);
441*4882a593Smuzhiyun }
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun /* ensure drive IRQ is clear */
444*4882a593Smuzhiyun stat = tp_ops->read_status(hwif);
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun if (rc == 1)
447*4882a593Smuzhiyun printk(KERN_ERR "%s: no response (status = 0x%02x)\n",
448*4882a593Smuzhiyun drive->name, stat);
449*4882a593Smuzhiyun } else {
450*4882a593Smuzhiyun /* not present or maybe ATAPI */
451*4882a593Smuzhiyun rc = 3;
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun if (drive->dn & 1) {
454*4882a593Smuzhiyun /* exit with drive0 selected */
455*4882a593Smuzhiyun tp_ops->dev_select(hwif->devices[0]);
456*4882a593Smuzhiyun msleep(50);
457*4882a593Smuzhiyun /* ensure drive irq is clear */
458*4882a593Smuzhiyun (void)tp_ops->read_status(hwif);
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun return rc;
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun /**
464*4882a593Smuzhiyun * probe_for_drives - upper level drive probe
465*4882a593Smuzhiyun * @drive: drive to probe for
466*4882a593Smuzhiyun *
467*4882a593Smuzhiyun * probe_for_drive() tests for existence of a given drive using do_probe()
468*4882a593Smuzhiyun * and presents things to the user as needed.
469*4882a593Smuzhiyun *
470*4882a593Smuzhiyun * Returns: 0 no device was found
471*4882a593Smuzhiyun * 1 device was found
472*4882a593Smuzhiyun * (note: IDE_DFLAG_PRESENT might still be not set)
473*4882a593Smuzhiyun */
474*4882a593Smuzhiyun
probe_for_drive(ide_drive_t * drive)475*4882a593Smuzhiyun static u8 probe_for_drive(ide_drive_t *drive)
476*4882a593Smuzhiyun {
477*4882a593Smuzhiyun char *m;
478*4882a593Smuzhiyun int rc;
479*4882a593Smuzhiyun u8 cmd;
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun drive->dev_flags &= ~IDE_DFLAG_ID_READ;
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun m = (char *)&drive->id[ATA_ID_PROD];
484*4882a593Smuzhiyun strcpy(m, "UNKNOWN");
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun /* skip probing? */
487*4882a593Smuzhiyun if ((drive->dev_flags & IDE_DFLAG_NOPROBE) == 0) {
488*4882a593Smuzhiyun /* if !(success||timed-out) */
489*4882a593Smuzhiyun cmd = ATA_CMD_ID_ATA;
490*4882a593Smuzhiyun rc = do_probe(drive, cmd);
491*4882a593Smuzhiyun if (rc >= 2) {
492*4882a593Smuzhiyun /* look for ATAPI device */
493*4882a593Smuzhiyun cmd = ATA_CMD_ID_ATAPI;
494*4882a593Smuzhiyun rc = do_probe(drive, cmd);
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0)
498*4882a593Smuzhiyun return 0;
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun /* identification failed? */
501*4882a593Smuzhiyun if ((drive->dev_flags & IDE_DFLAG_ID_READ) == 0) {
502*4882a593Smuzhiyun if (drive->media == ide_disk) {
503*4882a593Smuzhiyun printk(KERN_INFO "%s: non-IDE drive, CHS=%d/%d/%d\n",
504*4882a593Smuzhiyun drive->name, drive->cyl,
505*4882a593Smuzhiyun drive->head, drive->sect);
506*4882a593Smuzhiyun } else if (drive->media == ide_cdrom) {
507*4882a593Smuzhiyun printk(KERN_INFO "%s: ATAPI cdrom (?)\n", drive->name);
508*4882a593Smuzhiyun } else {
509*4882a593Smuzhiyun /* nuke it */
510*4882a593Smuzhiyun printk(KERN_WARNING "%s: Unknown device on bus refused identification. Ignoring.\n", drive->name);
511*4882a593Smuzhiyun drive->dev_flags &= ~IDE_DFLAG_PRESENT;
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun } else {
514*4882a593Smuzhiyun if (cmd == ATA_CMD_ID_ATAPI)
515*4882a593Smuzhiyun ide_classify_atapi_dev(drive);
516*4882a593Smuzhiyun else
517*4882a593Smuzhiyun ide_classify_ata_dev(drive);
518*4882a593Smuzhiyun }
519*4882a593Smuzhiyun }
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0)
522*4882a593Smuzhiyun return 0;
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun /* The drive wasn't being helpful. Add generic info only */
525*4882a593Smuzhiyun if ((drive->dev_flags & IDE_DFLAG_ID_READ) == 0) {
526*4882a593Smuzhiyun generic_id(drive);
527*4882a593Smuzhiyun return 1;
528*4882a593Smuzhiyun }
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun if (drive->media == ide_disk) {
531*4882a593Smuzhiyun ide_disk_init_chs(drive);
532*4882a593Smuzhiyun ide_disk_init_mult_count(drive);
533*4882a593Smuzhiyun }
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun return 1;
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun
hwif_release_dev(struct device * dev)538*4882a593Smuzhiyun static void hwif_release_dev(struct device *dev)
539*4882a593Smuzhiyun {
540*4882a593Smuzhiyun ide_hwif_t *hwif = container_of(dev, ide_hwif_t, gendev);
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun complete(&hwif->gendev_rel_comp);
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun
ide_register_port(ide_hwif_t * hwif)545*4882a593Smuzhiyun static int ide_register_port(ide_hwif_t *hwif)
546*4882a593Smuzhiyun {
547*4882a593Smuzhiyun int ret;
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun /* register with global device tree */
550*4882a593Smuzhiyun dev_set_name(&hwif->gendev, "%s", hwif->name);
551*4882a593Smuzhiyun dev_set_drvdata(&hwif->gendev, hwif);
552*4882a593Smuzhiyun if (hwif->gendev.parent == NULL)
553*4882a593Smuzhiyun hwif->gendev.parent = hwif->dev;
554*4882a593Smuzhiyun hwif->gendev.release = hwif_release_dev;
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun ret = device_register(&hwif->gendev);
557*4882a593Smuzhiyun if (ret < 0) {
558*4882a593Smuzhiyun printk(KERN_WARNING "IDE: %s: device_register error: %d\n",
559*4882a593Smuzhiyun __func__, ret);
560*4882a593Smuzhiyun goto out;
561*4882a593Smuzhiyun }
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun hwif->portdev = device_create(ide_port_class, &hwif->gendev,
564*4882a593Smuzhiyun MKDEV(0, 0), hwif, "%s", hwif->name);
565*4882a593Smuzhiyun if (IS_ERR(hwif->portdev)) {
566*4882a593Smuzhiyun ret = PTR_ERR(hwif->portdev);
567*4882a593Smuzhiyun device_unregister(&hwif->gendev);
568*4882a593Smuzhiyun }
569*4882a593Smuzhiyun out:
570*4882a593Smuzhiyun return ret;
571*4882a593Smuzhiyun }
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun /**
574*4882a593Smuzhiyun * ide_port_wait_ready - wait for port to become ready
575*4882a593Smuzhiyun * @hwif: IDE port
576*4882a593Smuzhiyun *
577*4882a593Smuzhiyun * This is needed on some PPCs and a bunch of BIOS-less embedded
578*4882a593Smuzhiyun * platforms. Typical cases are:
579*4882a593Smuzhiyun *
580*4882a593Smuzhiyun * - The firmware hard reset the disk before booting the kernel,
581*4882a593Smuzhiyun * the drive is still doing it's poweron-reset sequence, that
582*4882a593Smuzhiyun * can take up to 30 seconds.
583*4882a593Smuzhiyun *
584*4882a593Smuzhiyun * - The firmware does nothing (or no firmware), the device is
585*4882a593Smuzhiyun * still in POST state (same as above actually).
586*4882a593Smuzhiyun *
587*4882a593Smuzhiyun * - Some CD/DVD/Writer combo drives tend to drive the bus during
588*4882a593Smuzhiyun * their reset sequence even when they are non-selected slave
589*4882a593Smuzhiyun * devices, thus preventing discovery of the main HD.
590*4882a593Smuzhiyun *
591*4882a593Smuzhiyun * Doing this wait-for-non-busy should not harm any existing
592*4882a593Smuzhiyun * configuration and fix some issues like the above.
593*4882a593Smuzhiyun *
594*4882a593Smuzhiyun * BenH.
595*4882a593Smuzhiyun *
596*4882a593Smuzhiyun * Returns 0 on success, error code (< 0) otherwise.
597*4882a593Smuzhiyun */
598*4882a593Smuzhiyun
ide_port_wait_ready(ide_hwif_t * hwif)599*4882a593Smuzhiyun static int ide_port_wait_ready(ide_hwif_t *hwif)
600*4882a593Smuzhiyun {
601*4882a593Smuzhiyun const struct ide_tp_ops *tp_ops = hwif->tp_ops;
602*4882a593Smuzhiyun ide_drive_t *drive;
603*4882a593Smuzhiyun int i, rc;
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun printk(KERN_DEBUG "Probing IDE interface %s...\n", hwif->name);
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun /* Let HW settle down a bit from whatever init state we
608*4882a593Smuzhiyun * come from */
609*4882a593Smuzhiyun mdelay(2);
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun /* Wait for BSY bit to go away, spec timeout is 30 seconds,
612*4882a593Smuzhiyun * I know of at least one disk who takes 31 seconds, I use 35
613*4882a593Smuzhiyun * here to be safe
614*4882a593Smuzhiyun */
615*4882a593Smuzhiyun rc = ide_wait_not_busy(hwif, 35000);
616*4882a593Smuzhiyun if (rc)
617*4882a593Smuzhiyun return rc;
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun /* Now make sure both master & slave are ready */
620*4882a593Smuzhiyun ide_port_for_each_dev(i, drive, hwif) {
621*4882a593Smuzhiyun /* Ignore disks that we will not probe for later. */
622*4882a593Smuzhiyun if ((drive->dev_flags & IDE_DFLAG_NOPROBE) == 0 ||
623*4882a593Smuzhiyun (drive->dev_flags & IDE_DFLAG_PRESENT)) {
624*4882a593Smuzhiyun tp_ops->dev_select(drive);
625*4882a593Smuzhiyun tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS);
626*4882a593Smuzhiyun mdelay(2);
627*4882a593Smuzhiyun rc = ide_wait_not_busy(hwif, 35000);
628*4882a593Smuzhiyun if (rc)
629*4882a593Smuzhiyun goto out;
630*4882a593Smuzhiyun } else
631*4882a593Smuzhiyun printk(KERN_DEBUG "%s: ide_wait_not_busy() skipped\n",
632*4882a593Smuzhiyun drive->name);
633*4882a593Smuzhiyun }
634*4882a593Smuzhiyun out:
635*4882a593Smuzhiyun /* Exit function with master reselected (let's be sane) */
636*4882a593Smuzhiyun if (i)
637*4882a593Smuzhiyun tp_ops->dev_select(hwif->devices[0]);
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun return rc;
640*4882a593Smuzhiyun }
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun /**
643*4882a593Smuzhiyun * ide_undecoded_slave - look for bad CF adapters
644*4882a593Smuzhiyun * @dev1: slave device
645*4882a593Smuzhiyun *
646*4882a593Smuzhiyun * Analyse the drives on the interface and attempt to decide if we
647*4882a593Smuzhiyun * have the same drive viewed twice. This occurs with crap CF adapters
648*4882a593Smuzhiyun * and PCMCIA sometimes.
649*4882a593Smuzhiyun */
650*4882a593Smuzhiyun
ide_undecoded_slave(ide_drive_t * dev1)651*4882a593Smuzhiyun void ide_undecoded_slave(ide_drive_t *dev1)
652*4882a593Smuzhiyun {
653*4882a593Smuzhiyun ide_drive_t *dev0 = dev1->hwif->devices[0];
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun if ((dev1->dn & 1) == 0 || (dev0->dev_flags & IDE_DFLAG_PRESENT) == 0)
656*4882a593Smuzhiyun return;
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun /* If the models don't match they are not the same product */
659*4882a593Smuzhiyun if (strcmp((char *)&dev0->id[ATA_ID_PROD],
660*4882a593Smuzhiyun (char *)&dev1->id[ATA_ID_PROD]))
661*4882a593Smuzhiyun return;
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun /* Serial numbers do not match */
664*4882a593Smuzhiyun if (strncmp((char *)&dev0->id[ATA_ID_SERNO],
665*4882a593Smuzhiyun (char *)&dev1->id[ATA_ID_SERNO], ATA_ID_SERNO_LEN))
666*4882a593Smuzhiyun return;
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun /* No serial number, thankfully very rare for CF */
669*4882a593Smuzhiyun if (*(char *)&dev0->id[ATA_ID_SERNO] == 0)
670*4882a593Smuzhiyun return;
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun /* Appears to be an IDE flash adapter with decode bugs */
673*4882a593Smuzhiyun printk(KERN_WARNING "ide-probe: ignoring undecoded slave\n");
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun dev1->dev_flags &= ~IDE_DFLAG_PRESENT;
676*4882a593Smuzhiyun }
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ide_undecoded_slave);
679*4882a593Smuzhiyun
ide_probe_port(ide_hwif_t * hwif)680*4882a593Smuzhiyun static int ide_probe_port(ide_hwif_t *hwif)
681*4882a593Smuzhiyun {
682*4882a593Smuzhiyun ide_drive_t *drive;
683*4882a593Smuzhiyun unsigned int irqd;
684*4882a593Smuzhiyun int i, rc = -ENODEV;
685*4882a593Smuzhiyun
686*4882a593Smuzhiyun BUG_ON(hwif->present);
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun if ((hwif->devices[0]->dev_flags & IDE_DFLAG_NOPROBE) &&
689*4882a593Smuzhiyun (hwif->devices[1]->dev_flags & IDE_DFLAG_NOPROBE))
690*4882a593Smuzhiyun return -EACCES;
691*4882a593Smuzhiyun
692*4882a593Smuzhiyun /*
693*4882a593Smuzhiyun * We must always disable IRQ, as probe_for_drive will assert IRQ, but
694*4882a593Smuzhiyun * we'll install our IRQ driver much later...
695*4882a593Smuzhiyun */
696*4882a593Smuzhiyun irqd = hwif->irq;
697*4882a593Smuzhiyun if (irqd)
698*4882a593Smuzhiyun disable_irq(hwif->irq);
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun if (ide_port_wait_ready(hwif) == -EBUSY)
701*4882a593Smuzhiyun printk(KERN_DEBUG "%s: Wait for ready failed before probe !\n", hwif->name);
702*4882a593Smuzhiyun
703*4882a593Smuzhiyun /*
704*4882a593Smuzhiyun * Second drive should only exist if first drive was found,
705*4882a593Smuzhiyun * but a lot of cdrom drives are configured as single slaves.
706*4882a593Smuzhiyun */
707*4882a593Smuzhiyun ide_port_for_each_dev(i, drive, hwif) {
708*4882a593Smuzhiyun (void) probe_for_drive(drive);
709*4882a593Smuzhiyun if (drive->dev_flags & IDE_DFLAG_PRESENT)
710*4882a593Smuzhiyun rc = 0;
711*4882a593Smuzhiyun }
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun /*
714*4882a593Smuzhiyun * Use cached IRQ number. It might be (and is...) changed by probe
715*4882a593Smuzhiyun * code above
716*4882a593Smuzhiyun */
717*4882a593Smuzhiyun if (irqd)
718*4882a593Smuzhiyun enable_irq(irqd);
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun return rc;
721*4882a593Smuzhiyun }
722*4882a593Smuzhiyun
ide_port_tune_devices(ide_hwif_t * hwif)723*4882a593Smuzhiyun static void ide_port_tune_devices(ide_hwif_t *hwif)
724*4882a593Smuzhiyun {
725*4882a593Smuzhiyun const struct ide_port_ops *port_ops = hwif->port_ops;
726*4882a593Smuzhiyun ide_drive_t *drive;
727*4882a593Smuzhiyun int i;
728*4882a593Smuzhiyun
729*4882a593Smuzhiyun ide_port_for_each_present_dev(i, drive, hwif) {
730*4882a593Smuzhiyun ide_check_nien_quirk_list(drive);
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun if (port_ops && port_ops->quirkproc)
733*4882a593Smuzhiyun port_ops->quirkproc(drive);
734*4882a593Smuzhiyun }
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun ide_port_for_each_present_dev(i, drive, hwif) {
737*4882a593Smuzhiyun ide_set_max_pio(drive);
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun drive->dev_flags |= IDE_DFLAG_NICE1;
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun if (hwif->dma_ops)
742*4882a593Smuzhiyun ide_set_dma(drive);
743*4882a593Smuzhiyun }
744*4882a593Smuzhiyun }
745*4882a593Smuzhiyun
ide_initialize_rq(struct request * rq)746*4882a593Smuzhiyun static void ide_initialize_rq(struct request *rq)
747*4882a593Smuzhiyun {
748*4882a593Smuzhiyun struct ide_request *req = blk_mq_rq_to_pdu(rq);
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun req->special = NULL;
751*4882a593Smuzhiyun scsi_req_init(&req->sreq);
752*4882a593Smuzhiyun req->sreq.sense = req->sense;
753*4882a593Smuzhiyun }
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun static const struct blk_mq_ops ide_mq_ops = {
756*4882a593Smuzhiyun .queue_rq = ide_queue_rq,
757*4882a593Smuzhiyun .initialize_rq_fn = ide_initialize_rq,
758*4882a593Smuzhiyun };
759*4882a593Smuzhiyun
760*4882a593Smuzhiyun /*
761*4882a593Smuzhiyun * init request queue
762*4882a593Smuzhiyun */
ide_init_queue(ide_drive_t * drive)763*4882a593Smuzhiyun static int ide_init_queue(ide_drive_t *drive)
764*4882a593Smuzhiyun {
765*4882a593Smuzhiyun struct request_queue *q;
766*4882a593Smuzhiyun ide_hwif_t *hwif = drive->hwif;
767*4882a593Smuzhiyun int max_sectors = 256;
768*4882a593Smuzhiyun int max_sg_entries = PRD_ENTRIES;
769*4882a593Smuzhiyun struct blk_mq_tag_set *set;
770*4882a593Smuzhiyun
771*4882a593Smuzhiyun /*
772*4882a593Smuzhiyun * Our default set up assumes the normal IDE case,
773*4882a593Smuzhiyun * that is 64K segmenting, standard PRD setup
774*4882a593Smuzhiyun * and LBA28. Some drivers then impose their own
775*4882a593Smuzhiyun * limits and LBA48 we could raise it but as yet
776*4882a593Smuzhiyun * do not.
777*4882a593Smuzhiyun */
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun set = &drive->tag_set;
780*4882a593Smuzhiyun set->ops = &ide_mq_ops;
781*4882a593Smuzhiyun set->nr_hw_queues = 1;
782*4882a593Smuzhiyun set->queue_depth = 32;
783*4882a593Smuzhiyun set->reserved_tags = 1;
784*4882a593Smuzhiyun set->cmd_size = sizeof(struct ide_request);
785*4882a593Smuzhiyun set->numa_node = hwif_to_node(hwif);
786*4882a593Smuzhiyun set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
787*4882a593Smuzhiyun if (blk_mq_alloc_tag_set(set))
788*4882a593Smuzhiyun return 1;
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun q = blk_mq_init_queue(set);
791*4882a593Smuzhiyun if (IS_ERR(q)) {
792*4882a593Smuzhiyun blk_mq_free_tag_set(set);
793*4882a593Smuzhiyun return 1;
794*4882a593Smuzhiyun }
795*4882a593Smuzhiyun
796*4882a593Smuzhiyun blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
797*4882a593Smuzhiyun
798*4882a593Smuzhiyun q->queuedata = drive;
799*4882a593Smuzhiyun blk_queue_segment_boundary(q, 0xffff);
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun if (hwif->rqsize < max_sectors)
802*4882a593Smuzhiyun max_sectors = hwif->rqsize;
803*4882a593Smuzhiyun blk_queue_max_hw_sectors(q, max_sectors);
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun #ifdef CONFIG_PCI
806*4882a593Smuzhiyun /* When we have an IOMMU, we may have a problem where pci_map_sg()
807*4882a593Smuzhiyun * creates segments that don't completely match our boundary
808*4882a593Smuzhiyun * requirements and thus need to be broken up again. Because it
809*4882a593Smuzhiyun * doesn't align properly either, we may actually have to break up
810*4882a593Smuzhiyun * to more segments than what was we got in the first place, a max
811*4882a593Smuzhiyun * worst case is twice as many.
812*4882a593Smuzhiyun * This will be fixed once we teach pci_map_sg() about our boundary
813*4882a593Smuzhiyun * requirements, hopefully soon. *FIXME*
814*4882a593Smuzhiyun */
815*4882a593Smuzhiyun max_sg_entries >>= 1;
816*4882a593Smuzhiyun #endif /* CONFIG_PCI */
817*4882a593Smuzhiyun
818*4882a593Smuzhiyun blk_queue_max_segments(q, max_sg_entries);
819*4882a593Smuzhiyun
820*4882a593Smuzhiyun /* assign drive queue */
821*4882a593Smuzhiyun drive->queue = q;
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun return 0;
824*4882a593Smuzhiyun }
825*4882a593Smuzhiyun
826*4882a593Smuzhiyun static DEFINE_MUTEX(ide_cfg_mtx);
827*4882a593Smuzhiyun
828*4882a593Smuzhiyun /*
829*4882a593Smuzhiyun * For any present drive:
830*4882a593Smuzhiyun * - allocate the block device queue
831*4882a593Smuzhiyun */
ide_port_setup_devices(ide_hwif_t * hwif)832*4882a593Smuzhiyun static int ide_port_setup_devices(ide_hwif_t *hwif)
833*4882a593Smuzhiyun {
834*4882a593Smuzhiyun ide_drive_t *drive;
835*4882a593Smuzhiyun int i, j = 0;
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun mutex_lock(&ide_cfg_mtx);
838*4882a593Smuzhiyun ide_port_for_each_present_dev(i, drive, hwif) {
839*4882a593Smuzhiyun if (ide_init_queue(drive)) {
840*4882a593Smuzhiyun printk(KERN_ERR "ide: failed to init %s\n",
841*4882a593Smuzhiyun drive->name);
842*4882a593Smuzhiyun drive->dev_flags &= ~IDE_DFLAG_PRESENT;
843*4882a593Smuzhiyun continue;
844*4882a593Smuzhiyun }
845*4882a593Smuzhiyun
846*4882a593Smuzhiyun j++;
847*4882a593Smuzhiyun }
848*4882a593Smuzhiyun mutex_unlock(&ide_cfg_mtx);
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun return j;
851*4882a593Smuzhiyun }
852*4882a593Smuzhiyun
ide_host_enable_irqs(struct ide_host * host)853*4882a593Smuzhiyun static void ide_host_enable_irqs(struct ide_host *host)
854*4882a593Smuzhiyun {
855*4882a593Smuzhiyun ide_hwif_t *hwif;
856*4882a593Smuzhiyun int i;
857*4882a593Smuzhiyun
858*4882a593Smuzhiyun ide_host_for_each_port(i, hwif, host) {
859*4882a593Smuzhiyun if (hwif == NULL)
860*4882a593Smuzhiyun continue;
861*4882a593Smuzhiyun
862*4882a593Smuzhiyun /* clear any pending IRQs */
863*4882a593Smuzhiyun hwif->tp_ops->read_status(hwif);
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun /* unmask IRQs */
866*4882a593Smuzhiyun if (hwif->io_ports.ctl_addr)
867*4882a593Smuzhiyun hwif->tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS);
868*4882a593Smuzhiyun }
869*4882a593Smuzhiyun }
870*4882a593Smuzhiyun
871*4882a593Smuzhiyun /*
872*4882a593Smuzhiyun * This routine sets up the IRQ for an IDE interface.
873*4882a593Smuzhiyun */
init_irq(ide_hwif_t * hwif)874*4882a593Smuzhiyun static int init_irq (ide_hwif_t *hwif)
875*4882a593Smuzhiyun {
876*4882a593Smuzhiyun struct ide_io_ports *io_ports = &hwif->io_ports;
877*4882a593Smuzhiyun struct ide_host *host = hwif->host;
878*4882a593Smuzhiyun irq_handler_t irq_handler = host->irq_handler;
879*4882a593Smuzhiyun int sa = host->irq_flags;
880*4882a593Smuzhiyun
881*4882a593Smuzhiyun if (irq_handler == NULL)
882*4882a593Smuzhiyun irq_handler = ide_intr;
883*4882a593Smuzhiyun
884*4882a593Smuzhiyun if (!host->get_lock)
885*4882a593Smuzhiyun if (request_irq(hwif->irq, irq_handler, sa, hwif->name, hwif))
886*4882a593Smuzhiyun goto out_up;
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun #if !defined(__mc68000__)
889*4882a593Smuzhiyun printk(KERN_INFO "%s at 0x%03lx-0x%03lx,0x%03lx on irq %d", hwif->name,
890*4882a593Smuzhiyun io_ports->data_addr, io_ports->status_addr,
891*4882a593Smuzhiyun io_ports->ctl_addr, hwif->irq);
892*4882a593Smuzhiyun #else
893*4882a593Smuzhiyun printk(KERN_INFO "%s at 0x%08lx on irq %d", hwif->name,
894*4882a593Smuzhiyun io_ports->data_addr, hwif->irq);
895*4882a593Smuzhiyun #endif /* __mc68000__ */
896*4882a593Smuzhiyun if (hwif->host->host_flags & IDE_HFLAG_SERIALIZE)
897*4882a593Smuzhiyun printk(KERN_CONT " (serialized)");
898*4882a593Smuzhiyun printk(KERN_CONT "\n");
899*4882a593Smuzhiyun
900*4882a593Smuzhiyun return 0;
901*4882a593Smuzhiyun out_up:
902*4882a593Smuzhiyun return 1;
903*4882a593Smuzhiyun }
904*4882a593Smuzhiyun
ata_lock(dev_t dev,void * data)905*4882a593Smuzhiyun static int ata_lock(dev_t dev, void *data)
906*4882a593Smuzhiyun {
907*4882a593Smuzhiyun /* FIXME: we want to pin hwif down */
908*4882a593Smuzhiyun return 0;
909*4882a593Smuzhiyun }
910*4882a593Smuzhiyun
ata_probe(dev_t dev,int * part,void * data)911*4882a593Smuzhiyun static struct kobject *ata_probe(dev_t dev, int *part, void *data)
912*4882a593Smuzhiyun {
913*4882a593Smuzhiyun ide_hwif_t *hwif = data;
914*4882a593Smuzhiyun int unit = *part >> PARTN_BITS;
915*4882a593Smuzhiyun ide_drive_t *drive = hwif->devices[unit];
916*4882a593Smuzhiyun
917*4882a593Smuzhiyun if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0)
918*4882a593Smuzhiyun return NULL;
919*4882a593Smuzhiyun
920*4882a593Smuzhiyun if (drive->media == ide_disk)
921*4882a593Smuzhiyun request_module("ide-disk");
922*4882a593Smuzhiyun if (drive->media == ide_cdrom || drive->media == ide_optical)
923*4882a593Smuzhiyun request_module("ide-cd");
924*4882a593Smuzhiyun if (drive->media == ide_tape)
925*4882a593Smuzhiyun request_module("ide-tape");
926*4882a593Smuzhiyun if (drive->media == ide_floppy)
927*4882a593Smuzhiyun request_module("ide-floppy");
928*4882a593Smuzhiyun
929*4882a593Smuzhiyun return NULL;
930*4882a593Smuzhiyun }
931*4882a593Smuzhiyun
exact_match(dev_t dev,int * part,void * data)932*4882a593Smuzhiyun static struct kobject *exact_match(dev_t dev, int *part, void *data)
933*4882a593Smuzhiyun {
934*4882a593Smuzhiyun struct gendisk *p = data;
935*4882a593Smuzhiyun *part &= (1 << PARTN_BITS) - 1;
936*4882a593Smuzhiyun return &disk_to_dev(p)->kobj;
937*4882a593Smuzhiyun }
938*4882a593Smuzhiyun
exact_lock(dev_t dev,void * data)939*4882a593Smuzhiyun static int exact_lock(dev_t dev, void *data)
940*4882a593Smuzhiyun {
941*4882a593Smuzhiyun struct gendisk *p = data;
942*4882a593Smuzhiyun
943*4882a593Smuzhiyun if (!get_disk_and_module(p))
944*4882a593Smuzhiyun return -1;
945*4882a593Smuzhiyun return 0;
946*4882a593Smuzhiyun }
947*4882a593Smuzhiyun
ide_register_region(struct gendisk * disk)948*4882a593Smuzhiyun void ide_register_region(struct gendisk *disk)
949*4882a593Smuzhiyun {
950*4882a593Smuzhiyun blk_register_region(MKDEV(disk->major, disk->first_minor),
951*4882a593Smuzhiyun disk->minors, NULL, exact_match, exact_lock, disk);
952*4882a593Smuzhiyun }
953*4882a593Smuzhiyun
954*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ide_register_region);
955*4882a593Smuzhiyun
ide_unregister_region(struct gendisk * disk)956*4882a593Smuzhiyun void ide_unregister_region(struct gendisk *disk)
957*4882a593Smuzhiyun {
958*4882a593Smuzhiyun blk_unregister_region(MKDEV(disk->major, disk->first_minor),
959*4882a593Smuzhiyun disk->minors);
960*4882a593Smuzhiyun }
961*4882a593Smuzhiyun
962*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ide_unregister_region);
963*4882a593Smuzhiyun
ide_init_disk(struct gendisk * disk,ide_drive_t * drive)964*4882a593Smuzhiyun void ide_init_disk(struct gendisk *disk, ide_drive_t *drive)
965*4882a593Smuzhiyun {
966*4882a593Smuzhiyun ide_hwif_t *hwif = drive->hwif;
967*4882a593Smuzhiyun unsigned int unit = drive->dn & 1;
968*4882a593Smuzhiyun
969*4882a593Smuzhiyun disk->major = hwif->major;
970*4882a593Smuzhiyun disk->first_minor = unit << PARTN_BITS;
971*4882a593Smuzhiyun sprintf(disk->disk_name, "hd%c", 'a' + hwif->index * MAX_DRIVES + unit);
972*4882a593Smuzhiyun disk->queue = drive->queue;
973*4882a593Smuzhiyun }
974*4882a593Smuzhiyun
975*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ide_init_disk);
976*4882a593Smuzhiyun
drive_release_dev(struct device * dev)977*4882a593Smuzhiyun static void drive_release_dev (struct device *dev)
978*4882a593Smuzhiyun {
979*4882a593Smuzhiyun ide_drive_t *drive = container_of(dev, ide_drive_t, gendev);
980*4882a593Smuzhiyun
981*4882a593Smuzhiyun ide_proc_unregister_device(drive);
982*4882a593Smuzhiyun
983*4882a593Smuzhiyun if (drive->sense_rq)
984*4882a593Smuzhiyun blk_mq_free_request(drive->sense_rq);
985*4882a593Smuzhiyun
986*4882a593Smuzhiyun blk_cleanup_queue(drive->queue);
987*4882a593Smuzhiyun drive->queue = NULL;
988*4882a593Smuzhiyun blk_mq_free_tag_set(&drive->tag_set);
989*4882a593Smuzhiyun
990*4882a593Smuzhiyun drive->dev_flags &= ~IDE_DFLAG_PRESENT;
991*4882a593Smuzhiyun
992*4882a593Smuzhiyun complete(&drive->gendev_rel_comp);
993*4882a593Smuzhiyun }
994*4882a593Smuzhiyun
hwif_init(ide_hwif_t * hwif)995*4882a593Smuzhiyun static int hwif_init(ide_hwif_t *hwif)
996*4882a593Smuzhiyun {
997*4882a593Smuzhiyun if (!hwif->irq) {
998*4882a593Smuzhiyun printk(KERN_ERR "%s: disabled, no IRQ\n", hwif->name);
999*4882a593Smuzhiyun return 0;
1000*4882a593Smuzhiyun }
1001*4882a593Smuzhiyun
1002*4882a593Smuzhiyun if (register_blkdev(hwif->major, hwif->name))
1003*4882a593Smuzhiyun return 0;
1004*4882a593Smuzhiyun
1005*4882a593Smuzhiyun if (!hwif->sg_max_nents)
1006*4882a593Smuzhiyun hwif->sg_max_nents = PRD_ENTRIES;
1007*4882a593Smuzhiyun
1008*4882a593Smuzhiyun hwif->sg_table = kmalloc_array(hwif->sg_max_nents,
1009*4882a593Smuzhiyun sizeof(struct scatterlist),
1010*4882a593Smuzhiyun GFP_KERNEL);
1011*4882a593Smuzhiyun if (!hwif->sg_table) {
1012*4882a593Smuzhiyun printk(KERN_ERR "%s: unable to allocate SG table.\n", hwif->name);
1013*4882a593Smuzhiyun goto out;
1014*4882a593Smuzhiyun }
1015*4882a593Smuzhiyun
1016*4882a593Smuzhiyun sg_init_table(hwif->sg_table, hwif->sg_max_nents);
1017*4882a593Smuzhiyun
1018*4882a593Smuzhiyun if (init_irq(hwif)) {
1019*4882a593Smuzhiyun printk(KERN_ERR "%s: disabled, unable to get IRQ %d\n",
1020*4882a593Smuzhiyun hwif->name, hwif->irq);
1021*4882a593Smuzhiyun goto out;
1022*4882a593Smuzhiyun }
1023*4882a593Smuzhiyun
1024*4882a593Smuzhiyun blk_register_region(MKDEV(hwif->major, 0), MAX_DRIVES << PARTN_BITS,
1025*4882a593Smuzhiyun THIS_MODULE, ata_probe, ata_lock, hwif);
1026*4882a593Smuzhiyun return 1;
1027*4882a593Smuzhiyun
1028*4882a593Smuzhiyun out:
1029*4882a593Smuzhiyun unregister_blkdev(hwif->major, hwif->name);
1030*4882a593Smuzhiyun return 0;
1031*4882a593Smuzhiyun }
1032*4882a593Smuzhiyun
hwif_register_devices(ide_hwif_t * hwif)1033*4882a593Smuzhiyun static void hwif_register_devices(ide_hwif_t *hwif)
1034*4882a593Smuzhiyun {
1035*4882a593Smuzhiyun ide_drive_t *drive;
1036*4882a593Smuzhiyun unsigned int i;
1037*4882a593Smuzhiyun
1038*4882a593Smuzhiyun ide_port_for_each_present_dev(i, drive, hwif) {
1039*4882a593Smuzhiyun struct device *dev = &drive->gendev;
1040*4882a593Smuzhiyun int ret;
1041*4882a593Smuzhiyun
1042*4882a593Smuzhiyun dev_set_name(dev, "%u.%u", hwif->index, i);
1043*4882a593Smuzhiyun dev_set_drvdata(dev, drive);
1044*4882a593Smuzhiyun dev->parent = &hwif->gendev;
1045*4882a593Smuzhiyun dev->bus = &ide_bus_type;
1046*4882a593Smuzhiyun dev->release = drive_release_dev;
1047*4882a593Smuzhiyun
1048*4882a593Smuzhiyun ret = device_register(dev);
1049*4882a593Smuzhiyun if (ret < 0)
1050*4882a593Smuzhiyun printk(KERN_WARNING "IDE: %s: device_register error: "
1051*4882a593Smuzhiyun "%d\n", __func__, ret);
1052*4882a593Smuzhiyun }
1053*4882a593Smuzhiyun }
1054*4882a593Smuzhiyun
ide_port_init_devices(ide_hwif_t * hwif)1055*4882a593Smuzhiyun static void ide_port_init_devices(ide_hwif_t *hwif)
1056*4882a593Smuzhiyun {
1057*4882a593Smuzhiyun const struct ide_port_ops *port_ops = hwif->port_ops;
1058*4882a593Smuzhiyun ide_drive_t *drive;
1059*4882a593Smuzhiyun int i;
1060*4882a593Smuzhiyun
1061*4882a593Smuzhiyun ide_port_for_each_dev(i, drive, hwif) {
1062*4882a593Smuzhiyun drive->dn = i + hwif->channel * 2;
1063*4882a593Smuzhiyun
1064*4882a593Smuzhiyun if (hwif->host_flags & IDE_HFLAG_IO_32BIT)
1065*4882a593Smuzhiyun drive->io_32bit = 1;
1066*4882a593Smuzhiyun if (hwif->host_flags & IDE_HFLAG_NO_IO_32BIT)
1067*4882a593Smuzhiyun drive->dev_flags |= IDE_DFLAG_NO_IO_32BIT;
1068*4882a593Smuzhiyun if (hwif->host_flags & IDE_HFLAG_UNMASK_IRQS)
1069*4882a593Smuzhiyun drive->dev_flags |= IDE_DFLAG_UNMASK;
1070*4882a593Smuzhiyun if (hwif->host_flags & IDE_HFLAG_NO_UNMASK_IRQS)
1071*4882a593Smuzhiyun drive->dev_flags |= IDE_DFLAG_NO_UNMASK;
1072*4882a593Smuzhiyun
1073*4882a593Smuzhiyun drive->pio_mode = XFER_PIO_0;
1074*4882a593Smuzhiyun
1075*4882a593Smuzhiyun if (port_ops && port_ops->init_dev)
1076*4882a593Smuzhiyun port_ops->init_dev(drive);
1077*4882a593Smuzhiyun }
1078*4882a593Smuzhiyun }
1079*4882a593Smuzhiyun
ide_init_port(ide_hwif_t * hwif,unsigned int port,const struct ide_port_info * d)1080*4882a593Smuzhiyun static void ide_init_port(ide_hwif_t *hwif, unsigned int port,
1081*4882a593Smuzhiyun const struct ide_port_info *d)
1082*4882a593Smuzhiyun {
1083*4882a593Smuzhiyun hwif->channel = port;
1084*4882a593Smuzhiyun
1085*4882a593Smuzhiyun hwif->chipset = d->chipset ? d->chipset : ide_pci;
1086*4882a593Smuzhiyun
1087*4882a593Smuzhiyun if (d->init_iops)
1088*4882a593Smuzhiyun d->init_iops(hwif);
1089*4882a593Smuzhiyun
1090*4882a593Smuzhiyun /* ->host_flags may be set by ->init_iops (or even earlier...) */
1091*4882a593Smuzhiyun hwif->host_flags |= d->host_flags;
1092*4882a593Smuzhiyun hwif->pio_mask = d->pio_mask;
1093*4882a593Smuzhiyun
1094*4882a593Smuzhiyun if (d->tp_ops)
1095*4882a593Smuzhiyun hwif->tp_ops = d->tp_ops;
1096*4882a593Smuzhiyun
1097*4882a593Smuzhiyun /* ->set_pio_mode for DTC2278 is currently limited to port 0 */
1098*4882a593Smuzhiyun if ((hwif->host_flags & IDE_HFLAG_DTC2278) == 0 || hwif->channel == 0)
1099*4882a593Smuzhiyun hwif->port_ops = d->port_ops;
1100*4882a593Smuzhiyun
1101*4882a593Smuzhiyun hwif->swdma_mask = d->swdma_mask;
1102*4882a593Smuzhiyun hwif->mwdma_mask = d->mwdma_mask;
1103*4882a593Smuzhiyun hwif->ultra_mask = d->udma_mask;
1104*4882a593Smuzhiyun
1105*4882a593Smuzhiyun if ((d->host_flags & IDE_HFLAG_NO_DMA) == 0) {
1106*4882a593Smuzhiyun int rc;
1107*4882a593Smuzhiyun
1108*4882a593Smuzhiyun hwif->dma_ops = d->dma_ops;
1109*4882a593Smuzhiyun
1110*4882a593Smuzhiyun if (d->init_dma)
1111*4882a593Smuzhiyun rc = d->init_dma(hwif, d);
1112*4882a593Smuzhiyun else
1113*4882a593Smuzhiyun rc = ide_hwif_setup_dma(hwif, d);
1114*4882a593Smuzhiyun
1115*4882a593Smuzhiyun if (rc < 0) {
1116*4882a593Smuzhiyun printk(KERN_INFO "%s: DMA disabled\n", hwif->name);
1117*4882a593Smuzhiyun
1118*4882a593Smuzhiyun hwif->dma_ops = NULL;
1119*4882a593Smuzhiyun hwif->dma_base = 0;
1120*4882a593Smuzhiyun hwif->swdma_mask = 0;
1121*4882a593Smuzhiyun hwif->mwdma_mask = 0;
1122*4882a593Smuzhiyun hwif->ultra_mask = 0;
1123*4882a593Smuzhiyun }
1124*4882a593Smuzhiyun }
1125*4882a593Smuzhiyun
1126*4882a593Smuzhiyun if ((d->host_flags & IDE_HFLAG_SERIALIZE) ||
1127*4882a593Smuzhiyun ((d->host_flags & IDE_HFLAG_SERIALIZE_DMA) && hwif->dma_base))
1128*4882a593Smuzhiyun hwif->host->host_flags |= IDE_HFLAG_SERIALIZE;
1129*4882a593Smuzhiyun
1130*4882a593Smuzhiyun if (d->max_sectors)
1131*4882a593Smuzhiyun hwif->rqsize = d->max_sectors;
1132*4882a593Smuzhiyun else {
1133*4882a593Smuzhiyun if ((hwif->host_flags & IDE_HFLAG_NO_LBA48) ||
1134*4882a593Smuzhiyun (hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA))
1135*4882a593Smuzhiyun hwif->rqsize = 256;
1136*4882a593Smuzhiyun else
1137*4882a593Smuzhiyun hwif->rqsize = 65536;
1138*4882a593Smuzhiyun }
1139*4882a593Smuzhiyun
1140*4882a593Smuzhiyun /* call chipset specific routine for each enabled port */
1141*4882a593Smuzhiyun if (d->init_hwif)
1142*4882a593Smuzhiyun d->init_hwif(hwif);
1143*4882a593Smuzhiyun }
1144*4882a593Smuzhiyun
ide_port_cable_detect(ide_hwif_t * hwif)1145*4882a593Smuzhiyun static void ide_port_cable_detect(ide_hwif_t *hwif)
1146*4882a593Smuzhiyun {
1147*4882a593Smuzhiyun const struct ide_port_ops *port_ops = hwif->port_ops;
1148*4882a593Smuzhiyun
1149*4882a593Smuzhiyun if (port_ops && port_ops->cable_detect && (hwif->ultra_mask & 0x78)) {
1150*4882a593Smuzhiyun if (hwif->cbl != ATA_CBL_PATA40_SHORT)
1151*4882a593Smuzhiyun hwif->cbl = port_ops->cable_detect(hwif);
1152*4882a593Smuzhiyun }
1153*4882a593Smuzhiyun }
1154*4882a593Smuzhiyun
1155*4882a593Smuzhiyun /*
1156*4882a593Smuzhiyun * Deferred request list insertion handler
1157*4882a593Smuzhiyun */
drive_rq_insert_work(struct work_struct * work)1158*4882a593Smuzhiyun static void drive_rq_insert_work(struct work_struct *work)
1159*4882a593Smuzhiyun {
1160*4882a593Smuzhiyun ide_drive_t *drive = container_of(work, ide_drive_t, rq_work);
1161*4882a593Smuzhiyun ide_hwif_t *hwif = drive->hwif;
1162*4882a593Smuzhiyun struct request *rq;
1163*4882a593Smuzhiyun blk_status_t ret;
1164*4882a593Smuzhiyun LIST_HEAD(list);
1165*4882a593Smuzhiyun
1166*4882a593Smuzhiyun blk_mq_quiesce_queue(drive->queue);
1167*4882a593Smuzhiyun
1168*4882a593Smuzhiyun ret = BLK_STS_OK;
1169*4882a593Smuzhiyun spin_lock_irq(&hwif->lock);
1170*4882a593Smuzhiyun while (!list_empty(&drive->rq_list)) {
1171*4882a593Smuzhiyun rq = list_first_entry(&drive->rq_list, struct request, queuelist);
1172*4882a593Smuzhiyun list_del_init(&rq->queuelist);
1173*4882a593Smuzhiyun
1174*4882a593Smuzhiyun spin_unlock_irq(&hwif->lock);
1175*4882a593Smuzhiyun ret = ide_issue_rq(drive, rq, true);
1176*4882a593Smuzhiyun spin_lock_irq(&hwif->lock);
1177*4882a593Smuzhiyun }
1178*4882a593Smuzhiyun spin_unlock_irq(&hwif->lock);
1179*4882a593Smuzhiyun
1180*4882a593Smuzhiyun blk_mq_unquiesce_queue(drive->queue);
1181*4882a593Smuzhiyun
1182*4882a593Smuzhiyun if (ret != BLK_STS_OK)
1183*4882a593Smuzhiyun kblockd_schedule_work(&drive->rq_work);
1184*4882a593Smuzhiyun }
1185*4882a593Smuzhiyun
1186*4882a593Smuzhiyun static const u8 ide_hwif_to_major[] =
1187*4882a593Smuzhiyun { IDE0_MAJOR, IDE1_MAJOR, IDE2_MAJOR, IDE3_MAJOR, IDE4_MAJOR,
1188*4882a593Smuzhiyun IDE5_MAJOR, IDE6_MAJOR, IDE7_MAJOR, IDE8_MAJOR, IDE9_MAJOR };
1189*4882a593Smuzhiyun
ide_port_init_devices_data(ide_hwif_t * hwif)1190*4882a593Smuzhiyun static void ide_port_init_devices_data(ide_hwif_t *hwif)
1191*4882a593Smuzhiyun {
1192*4882a593Smuzhiyun ide_drive_t *drive;
1193*4882a593Smuzhiyun int i;
1194*4882a593Smuzhiyun
1195*4882a593Smuzhiyun ide_port_for_each_dev(i, drive, hwif) {
1196*4882a593Smuzhiyun u8 j = (hwif->index * MAX_DRIVES) + i;
1197*4882a593Smuzhiyun u16 *saved_id = drive->id;
1198*4882a593Smuzhiyun
1199*4882a593Smuzhiyun memset(drive, 0, sizeof(*drive));
1200*4882a593Smuzhiyun memset(saved_id, 0, SECTOR_SIZE);
1201*4882a593Smuzhiyun drive->id = saved_id;
1202*4882a593Smuzhiyun
1203*4882a593Smuzhiyun drive->media = ide_disk;
1204*4882a593Smuzhiyun drive->select = (i << 4) | ATA_DEVICE_OBS;
1205*4882a593Smuzhiyun drive->hwif = hwif;
1206*4882a593Smuzhiyun drive->ready_stat = ATA_DRDY;
1207*4882a593Smuzhiyun drive->bad_wstat = BAD_W_STAT;
1208*4882a593Smuzhiyun drive->special_flags = IDE_SFLAG_RECALIBRATE |
1209*4882a593Smuzhiyun IDE_SFLAG_SET_GEOMETRY;
1210*4882a593Smuzhiyun drive->name[0] = 'h';
1211*4882a593Smuzhiyun drive->name[1] = 'd';
1212*4882a593Smuzhiyun drive->name[2] = 'a' + j;
1213*4882a593Smuzhiyun drive->max_failures = IDE_DEFAULT_MAX_FAILURES;
1214*4882a593Smuzhiyun
1215*4882a593Smuzhiyun INIT_LIST_HEAD(&drive->list);
1216*4882a593Smuzhiyun init_completion(&drive->gendev_rel_comp);
1217*4882a593Smuzhiyun
1218*4882a593Smuzhiyun INIT_WORK(&drive->rq_work, drive_rq_insert_work);
1219*4882a593Smuzhiyun INIT_LIST_HEAD(&drive->rq_list);
1220*4882a593Smuzhiyun }
1221*4882a593Smuzhiyun }
1222*4882a593Smuzhiyun
ide_init_port_data(ide_hwif_t * hwif,unsigned int index)1223*4882a593Smuzhiyun static void ide_init_port_data(ide_hwif_t *hwif, unsigned int index)
1224*4882a593Smuzhiyun {
1225*4882a593Smuzhiyun /* fill in any non-zero initial values */
1226*4882a593Smuzhiyun hwif->index = index;
1227*4882a593Smuzhiyun hwif->major = ide_hwif_to_major[index];
1228*4882a593Smuzhiyun
1229*4882a593Smuzhiyun hwif->name[0] = 'i';
1230*4882a593Smuzhiyun hwif->name[1] = 'd';
1231*4882a593Smuzhiyun hwif->name[2] = 'e';
1232*4882a593Smuzhiyun hwif->name[3] = '0' + index;
1233*4882a593Smuzhiyun
1234*4882a593Smuzhiyun spin_lock_init(&hwif->lock);
1235*4882a593Smuzhiyun
1236*4882a593Smuzhiyun timer_setup(&hwif->timer, ide_timer_expiry, 0);
1237*4882a593Smuzhiyun
1238*4882a593Smuzhiyun init_completion(&hwif->gendev_rel_comp);
1239*4882a593Smuzhiyun
1240*4882a593Smuzhiyun hwif->tp_ops = &default_tp_ops;
1241*4882a593Smuzhiyun
1242*4882a593Smuzhiyun ide_port_init_devices_data(hwif);
1243*4882a593Smuzhiyun }
1244*4882a593Smuzhiyun
ide_init_port_hw(ide_hwif_t * hwif,struct ide_hw * hw)1245*4882a593Smuzhiyun static void ide_init_port_hw(ide_hwif_t *hwif, struct ide_hw *hw)
1246*4882a593Smuzhiyun {
1247*4882a593Smuzhiyun memcpy(&hwif->io_ports, &hw->io_ports, sizeof(hwif->io_ports));
1248*4882a593Smuzhiyun hwif->irq = hw->irq;
1249*4882a593Smuzhiyun hwif->dev = hw->dev;
1250*4882a593Smuzhiyun hwif->gendev.parent = hw->parent ? hw->parent : hw->dev;
1251*4882a593Smuzhiyun hwif->config_data = hw->config;
1252*4882a593Smuzhiyun }
1253*4882a593Smuzhiyun
1254*4882a593Smuzhiyun static unsigned int ide_indexes;
1255*4882a593Smuzhiyun
1256*4882a593Smuzhiyun /**
1257*4882a593Smuzhiyun * ide_find_port_slot - find free port slot
1258*4882a593Smuzhiyun * @d: IDE port info
1259*4882a593Smuzhiyun *
1260*4882a593Smuzhiyun * Return the new port slot index or -ENOENT if we are out of free slots.
1261*4882a593Smuzhiyun */
1262*4882a593Smuzhiyun
ide_find_port_slot(const struct ide_port_info * d)1263*4882a593Smuzhiyun static int ide_find_port_slot(const struct ide_port_info *d)
1264*4882a593Smuzhiyun {
1265*4882a593Smuzhiyun int idx = -ENOENT;
1266*4882a593Smuzhiyun u8 bootable = (d && (d->host_flags & IDE_HFLAG_NON_BOOTABLE)) ? 0 : 1;
1267*4882a593Smuzhiyun u8 i = (d && (d->host_flags & IDE_HFLAG_QD_2ND_PORT)) ? 1 : 0;
1268*4882a593Smuzhiyun
1269*4882a593Smuzhiyun /*
1270*4882a593Smuzhiyun * Claim an unassigned slot.
1271*4882a593Smuzhiyun *
1272*4882a593Smuzhiyun * Give preference to claiming other slots before claiming ide0/ide1,
1273*4882a593Smuzhiyun * just in case there's another interface yet-to-be-scanned
1274*4882a593Smuzhiyun * which uses ports 0x1f0/0x170 (the ide0/ide1 defaults).
1275*4882a593Smuzhiyun *
1276*4882a593Smuzhiyun * Unless there is a bootable card that does not use the standard
1277*4882a593Smuzhiyun * ports 0x1f0/0x170 (the ide0/ide1 defaults).
1278*4882a593Smuzhiyun */
1279*4882a593Smuzhiyun mutex_lock(&ide_cfg_mtx);
1280*4882a593Smuzhiyun if (bootable) {
1281*4882a593Smuzhiyun if ((ide_indexes | i) != (1 << MAX_HWIFS) - 1)
1282*4882a593Smuzhiyun idx = ffz(ide_indexes | i);
1283*4882a593Smuzhiyun } else {
1284*4882a593Smuzhiyun if ((ide_indexes | 3) != (1 << MAX_HWIFS) - 1)
1285*4882a593Smuzhiyun idx = ffz(ide_indexes | 3);
1286*4882a593Smuzhiyun else if ((ide_indexes & 3) != 3)
1287*4882a593Smuzhiyun idx = ffz(ide_indexes);
1288*4882a593Smuzhiyun }
1289*4882a593Smuzhiyun if (idx >= 0)
1290*4882a593Smuzhiyun ide_indexes |= (1 << idx);
1291*4882a593Smuzhiyun mutex_unlock(&ide_cfg_mtx);
1292*4882a593Smuzhiyun
1293*4882a593Smuzhiyun return idx;
1294*4882a593Smuzhiyun }
1295*4882a593Smuzhiyun
ide_free_port_slot(int idx)1296*4882a593Smuzhiyun static void ide_free_port_slot(int idx)
1297*4882a593Smuzhiyun {
1298*4882a593Smuzhiyun mutex_lock(&ide_cfg_mtx);
1299*4882a593Smuzhiyun ide_indexes &= ~(1 << idx);
1300*4882a593Smuzhiyun mutex_unlock(&ide_cfg_mtx);
1301*4882a593Smuzhiyun }
1302*4882a593Smuzhiyun
ide_port_free_devices(ide_hwif_t * hwif)1303*4882a593Smuzhiyun static void ide_port_free_devices(ide_hwif_t *hwif)
1304*4882a593Smuzhiyun {
1305*4882a593Smuzhiyun ide_drive_t *drive;
1306*4882a593Smuzhiyun int i;
1307*4882a593Smuzhiyun
1308*4882a593Smuzhiyun ide_port_for_each_dev(i, drive, hwif) {
1309*4882a593Smuzhiyun kfree(drive->id);
1310*4882a593Smuzhiyun kfree(drive);
1311*4882a593Smuzhiyun }
1312*4882a593Smuzhiyun }
1313*4882a593Smuzhiyun
ide_port_alloc_devices(ide_hwif_t * hwif,int node)1314*4882a593Smuzhiyun static int ide_port_alloc_devices(ide_hwif_t *hwif, int node)
1315*4882a593Smuzhiyun {
1316*4882a593Smuzhiyun ide_drive_t *drive;
1317*4882a593Smuzhiyun int i;
1318*4882a593Smuzhiyun
1319*4882a593Smuzhiyun for (i = 0; i < MAX_DRIVES; i++) {
1320*4882a593Smuzhiyun drive = kzalloc_node(sizeof(*drive), GFP_KERNEL, node);
1321*4882a593Smuzhiyun if (drive == NULL)
1322*4882a593Smuzhiyun goto out_nomem;
1323*4882a593Smuzhiyun
1324*4882a593Smuzhiyun /*
1325*4882a593Smuzhiyun * In order to keep things simple we have an id
1326*4882a593Smuzhiyun * block for all drives at all times. If the device
1327*4882a593Smuzhiyun * is pre ATA or refuses ATA/ATAPI identify we
1328*4882a593Smuzhiyun * will add faked data to this.
1329*4882a593Smuzhiyun *
1330*4882a593Smuzhiyun * Also note that 0 everywhere means "can't do X"
1331*4882a593Smuzhiyun */
1332*4882a593Smuzhiyun drive->id = kzalloc_node(SECTOR_SIZE, GFP_KERNEL, node);
1333*4882a593Smuzhiyun if (drive->id == NULL)
1334*4882a593Smuzhiyun goto out_free_drive;
1335*4882a593Smuzhiyun
1336*4882a593Smuzhiyun hwif->devices[i] = drive;
1337*4882a593Smuzhiyun }
1338*4882a593Smuzhiyun return 0;
1339*4882a593Smuzhiyun
1340*4882a593Smuzhiyun out_free_drive:
1341*4882a593Smuzhiyun kfree(drive);
1342*4882a593Smuzhiyun out_nomem:
1343*4882a593Smuzhiyun ide_port_free_devices(hwif);
1344*4882a593Smuzhiyun return -ENOMEM;
1345*4882a593Smuzhiyun }
1346*4882a593Smuzhiyun
ide_host_alloc(const struct ide_port_info * d,struct ide_hw ** hws,unsigned int n_ports)1347*4882a593Smuzhiyun struct ide_host *ide_host_alloc(const struct ide_port_info *d,
1348*4882a593Smuzhiyun struct ide_hw **hws, unsigned int n_ports)
1349*4882a593Smuzhiyun {
1350*4882a593Smuzhiyun struct ide_host *host;
1351*4882a593Smuzhiyun struct device *dev = hws[0] ? hws[0]->dev : NULL;
1352*4882a593Smuzhiyun int node = dev ? dev_to_node(dev) : -1;
1353*4882a593Smuzhiyun int i;
1354*4882a593Smuzhiyun
1355*4882a593Smuzhiyun host = kzalloc_node(sizeof(*host), GFP_KERNEL, node);
1356*4882a593Smuzhiyun if (host == NULL)
1357*4882a593Smuzhiyun return NULL;
1358*4882a593Smuzhiyun
1359*4882a593Smuzhiyun for (i = 0; i < n_ports; i++) {
1360*4882a593Smuzhiyun ide_hwif_t *hwif;
1361*4882a593Smuzhiyun int idx;
1362*4882a593Smuzhiyun
1363*4882a593Smuzhiyun if (hws[i] == NULL)
1364*4882a593Smuzhiyun continue;
1365*4882a593Smuzhiyun
1366*4882a593Smuzhiyun hwif = kzalloc_node(sizeof(*hwif), GFP_KERNEL, node);
1367*4882a593Smuzhiyun if (hwif == NULL)
1368*4882a593Smuzhiyun continue;
1369*4882a593Smuzhiyun
1370*4882a593Smuzhiyun if (ide_port_alloc_devices(hwif, node) < 0) {
1371*4882a593Smuzhiyun kfree(hwif);
1372*4882a593Smuzhiyun continue;
1373*4882a593Smuzhiyun }
1374*4882a593Smuzhiyun
1375*4882a593Smuzhiyun idx = ide_find_port_slot(d);
1376*4882a593Smuzhiyun if (idx < 0) {
1377*4882a593Smuzhiyun printk(KERN_ERR "%s: no free slot for interface\n",
1378*4882a593Smuzhiyun d ? d->name : "ide");
1379*4882a593Smuzhiyun ide_port_free_devices(hwif);
1380*4882a593Smuzhiyun kfree(hwif);
1381*4882a593Smuzhiyun continue;
1382*4882a593Smuzhiyun }
1383*4882a593Smuzhiyun
1384*4882a593Smuzhiyun ide_init_port_data(hwif, idx);
1385*4882a593Smuzhiyun
1386*4882a593Smuzhiyun hwif->host = host;
1387*4882a593Smuzhiyun
1388*4882a593Smuzhiyun host->ports[i] = hwif;
1389*4882a593Smuzhiyun host->n_ports++;
1390*4882a593Smuzhiyun }
1391*4882a593Smuzhiyun
1392*4882a593Smuzhiyun if (host->n_ports == 0) {
1393*4882a593Smuzhiyun kfree(host);
1394*4882a593Smuzhiyun return NULL;
1395*4882a593Smuzhiyun }
1396*4882a593Smuzhiyun
1397*4882a593Smuzhiyun host->dev[0] = dev;
1398*4882a593Smuzhiyun
1399*4882a593Smuzhiyun if (d) {
1400*4882a593Smuzhiyun host->init_chipset = d->init_chipset;
1401*4882a593Smuzhiyun host->get_lock = d->get_lock;
1402*4882a593Smuzhiyun host->release_lock = d->release_lock;
1403*4882a593Smuzhiyun host->host_flags = d->host_flags;
1404*4882a593Smuzhiyun host->irq_flags = d->irq_flags;
1405*4882a593Smuzhiyun }
1406*4882a593Smuzhiyun
1407*4882a593Smuzhiyun return host;
1408*4882a593Smuzhiyun }
1409*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ide_host_alloc);
1410*4882a593Smuzhiyun
ide_port_free(ide_hwif_t * hwif)1411*4882a593Smuzhiyun static void ide_port_free(ide_hwif_t *hwif)
1412*4882a593Smuzhiyun {
1413*4882a593Smuzhiyun ide_port_free_devices(hwif);
1414*4882a593Smuzhiyun ide_free_port_slot(hwif->index);
1415*4882a593Smuzhiyun kfree(hwif);
1416*4882a593Smuzhiyun }
1417*4882a593Smuzhiyun
ide_disable_port(ide_hwif_t * hwif)1418*4882a593Smuzhiyun static void ide_disable_port(ide_hwif_t *hwif)
1419*4882a593Smuzhiyun {
1420*4882a593Smuzhiyun struct ide_host *host = hwif->host;
1421*4882a593Smuzhiyun int i;
1422*4882a593Smuzhiyun
1423*4882a593Smuzhiyun printk(KERN_INFO "%s: disabling port\n", hwif->name);
1424*4882a593Smuzhiyun
1425*4882a593Smuzhiyun for (i = 0; i < MAX_HOST_PORTS; i++) {
1426*4882a593Smuzhiyun if (host->ports[i] == hwif) {
1427*4882a593Smuzhiyun host->ports[i] = NULL;
1428*4882a593Smuzhiyun host->n_ports--;
1429*4882a593Smuzhiyun }
1430*4882a593Smuzhiyun }
1431*4882a593Smuzhiyun
1432*4882a593Smuzhiyun ide_port_free(hwif);
1433*4882a593Smuzhiyun }
1434*4882a593Smuzhiyun
ide_host_register(struct ide_host * host,const struct ide_port_info * d,struct ide_hw ** hws)1435*4882a593Smuzhiyun int ide_host_register(struct ide_host *host, const struct ide_port_info *d,
1436*4882a593Smuzhiyun struct ide_hw **hws)
1437*4882a593Smuzhiyun {
1438*4882a593Smuzhiyun ide_hwif_t *hwif, *mate = NULL;
1439*4882a593Smuzhiyun int i, j = 0;
1440*4882a593Smuzhiyun
1441*4882a593Smuzhiyun pr_warn("legacy IDE will be removed in 2021, please switch to libata\n"
1442*4882a593Smuzhiyun "Report any missing HW support to linux-ide@vger.kernel.org\n");
1443*4882a593Smuzhiyun
1444*4882a593Smuzhiyun ide_host_for_each_port(i, hwif, host) {
1445*4882a593Smuzhiyun if (hwif == NULL) {
1446*4882a593Smuzhiyun mate = NULL;
1447*4882a593Smuzhiyun continue;
1448*4882a593Smuzhiyun }
1449*4882a593Smuzhiyun
1450*4882a593Smuzhiyun ide_init_port_hw(hwif, hws[i]);
1451*4882a593Smuzhiyun ide_port_apply_params(hwif);
1452*4882a593Smuzhiyun
1453*4882a593Smuzhiyun if ((i & 1) && mate) {
1454*4882a593Smuzhiyun hwif->mate = mate;
1455*4882a593Smuzhiyun mate->mate = hwif;
1456*4882a593Smuzhiyun }
1457*4882a593Smuzhiyun
1458*4882a593Smuzhiyun mate = (i & 1) ? NULL : hwif;
1459*4882a593Smuzhiyun
1460*4882a593Smuzhiyun ide_init_port(hwif, i & 1, d);
1461*4882a593Smuzhiyun ide_port_cable_detect(hwif);
1462*4882a593Smuzhiyun
1463*4882a593Smuzhiyun hwif->port_flags |= IDE_PFLAG_PROBING;
1464*4882a593Smuzhiyun
1465*4882a593Smuzhiyun ide_port_init_devices(hwif);
1466*4882a593Smuzhiyun }
1467*4882a593Smuzhiyun
1468*4882a593Smuzhiyun ide_host_for_each_port(i, hwif, host) {
1469*4882a593Smuzhiyun if (hwif == NULL)
1470*4882a593Smuzhiyun continue;
1471*4882a593Smuzhiyun
1472*4882a593Smuzhiyun if (ide_probe_port(hwif) == 0)
1473*4882a593Smuzhiyun hwif->present = 1;
1474*4882a593Smuzhiyun
1475*4882a593Smuzhiyun hwif->port_flags &= ~IDE_PFLAG_PROBING;
1476*4882a593Smuzhiyun
1477*4882a593Smuzhiyun if ((hwif->host_flags & IDE_HFLAG_4DRIVES) == 0 ||
1478*4882a593Smuzhiyun hwif->mate == NULL || hwif->mate->present == 0) {
1479*4882a593Smuzhiyun if (ide_register_port(hwif)) {
1480*4882a593Smuzhiyun ide_disable_port(hwif);
1481*4882a593Smuzhiyun continue;
1482*4882a593Smuzhiyun }
1483*4882a593Smuzhiyun }
1484*4882a593Smuzhiyun
1485*4882a593Smuzhiyun if (hwif->present)
1486*4882a593Smuzhiyun ide_port_tune_devices(hwif);
1487*4882a593Smuzhiyun }
1488*4882a593Smuzhiyun
1489*4882a593Smuzhiyun ide_host_enable_irqs(host);
1490*4882a593Smuzhiyun
1491*4882a593Smuzhiyun ide_host_for_each_port(i, hwif, host) {
1492*4882a593Smuzhiyun if (hwif == NULL)
1493*4882a593Smuzhiyun continue;
1494*4882a593Smuzhiyun
1495*4882a593Smuzhiyun if (hwif_init(hwif) == 0) {
1496*4882a593Smuzhiyun printk(KERN_INFO "%s: failed to initialize IDE "
1497*4882a593Smuzhiyun "interface\n", hwif->name);
1498*4882a593Smuzhiyun device_unregister(hwif->portdev);
1499*4882a593Smuzhiyun device_unregister(&hwif->gendev);
1500*4882a593Smuzhiyun ide_disable_port(hwif);
1501*4882a593Smuzhiyun continue;
1502*4882a593Smuzhiyun }
1503*4882a593Smuzhiyun
1504*4882a593Smuzhiyun if (hwif->present)
1505*4882a593Smuzhiyun if (ide_port_setup_devices(hwif) == 0) {
1506*4882a593Smuzhiyun hwif->present = 0;
1507*4882a593Smuzhiyun continue;
1508*4882a593Smuzhiyun }
1509*4882a593Smuzhiyun
1510*4882a593Smuzhiyun j++;
1511*4882a593Smuzhiyun
1512*4882a593Smuzhiyun ide_acpi_init_port(hwif);
1513*4882a593Smuzhiyun
1514*4882a593Smuzhiyun if (hwif->present)
1515*4882a593Smuzhiyun ide_acpi_port_init_devices(hwif);
1516*4882a593Smuzhiyun }
1517*4882a593Smuzhiyun
1518*4882a593Smuzhiyun ide_host_for_each_port(i, hwif, host) {
1519*4882a593Smuzhiyun if (hwif == NULL)
1520*4882a593Smuzhiyun continue;
1521*4882a593Smuzhiyun
1522*4882a593Smuzhiyun ide_sysfs_register_port(hwif);
1523*4882a593Smuzhiyun ide_proc_register_port(hwif);
1524*4882a593Smuzhiyun
1525*4882a593Smuzhiyun if (hwif->present) {
1526*4882a593Smuzhiyun ide_proc_port_register_devices(hwif);
1527*4882a593Smuzhiyun hwif_register_devices(hwif);
1528*4882a593Smuzhiyun }
1529*4882a593Smuzhiyun }
1530*4882a593Smuzhiyun
1531*4882a593Smuzhiyun return j ? 0 : -1;
1532*4882a593Smuzhiyun }
1533*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ide_host_register);
1534*4882a593Smuzhiyun
ide_host_add(const struct ide_port_info * d,struct ide_hw ** hws,unsigned int n_ports,struct ide_host ** hostp)1535*4882a593Smuzhiyun int ide_host_add(const struct ide_port_info *d, struct ide_hw **hws,
1536*4882a593Smuzhiyun unsigned int n_ports, struct ide_host **hostp)
1537*4882a593Smuzhiyun {
1538*4882a593Smuzhiyun struct ide_host *host;
1539*4882a593Smuzhiyun int rc;
1540*4882a593Smuzhiyun
1541*4882a593Smuzhiyun host = ide_host_alloc(d, hws, n_ports);
1542*4882a593Smuzhiyun if (host == NULL)
1543*4882a593Smuzhiyun return -ENOMEM;
1544*4882a593Smuzhiyun
1545*4882a593Smuzhiyun rc = ide_host_register(host, d, hws);
1546*4882a593Smuzhiyun if (rc) {
1547*4882a593Smuzhiyun ide_host_free(host);
1548*4882a593Smuzhiyun return rc;
1549*4882a593Smuzhiyun }
1550*4882a593Smuzhiyun
1551*4882a593Smuzhiyun if (hostp)
1552*4882a593Smuzhiyun *hostp = host;
1553*4882a593Smuzhiyun
1554*4882a593Smuzhiyun return 0;
1555*4882a593Smuzhiyun }
1556*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ide_host_add);
1557*4882a593Smuzhiyun
__ide_port_unregister_devices(ide_hwif_t * hwif)1558*4882a593Smuzhiyun static void __ide_port_unregister_devices(ide_hwif_t *hwif)
1559*4882a593Smuzhiyun {
1560*4882a593Smuzhiyun ide_drive_t *drive;
1561*4882a593Smuzhiyun int i;
1562*4882a593Smuzhiyun
1563*4882a593Smuzhiyun ide_port_for_each_present_dev(i, drive, hwif) {
1564*4882a593Smuzhiyun device_unregister(&drive->gendev);
1565*4882a593Smuzhiyun wait_for_completion(&drive->gendev_rel_comp);
1566*4882a593Smuzhiyun }
1567*4882a593Smuzhiyun }
1568*4882a593Smuzhiyun
ide_port_unregister_devices(ide_hwif_t * hwif)1569*4882a593Smuzhiyun void ide_port_unregister_devices(ide_hwif_t *hwif)
1570*4882a593Smuzhiyun {
1571*4882a593Smuzhiyun mutex_lock(&ide_cfg_mtx);
1572*4882a593Smuzhiyun __ide_port_unregister_devices(hwif);
1573*4882a593Smuzhiyun hwif->present = 0;
1574*4882a593Smuzhiyun ide_port_init_devices_data(hwif);
1575*4882a593Smuzhiyun mutex_unlock(&ide_cfg_mtx);
1576*4882a593Smuzhiyun }
1577*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ide_port_unregister_devices);
1578*4882a593Smuzhiyun
1579*4882a593Smuzhiyun /**
1580*4882a593Smuzhiyun * ide_unregister - free an IDE interface
1581*4882a593Smuzhiyun * @hwif: IDE interface
1582*4882a593Smuzhiyun *
1583*4882a593Smuzhiyun * Perform the final unregister of an IDE interface.
1584*4882a593Smuzhiyun *
1585*4882a593Smuzhiyun * Locking:
1586*4882a593Smuzhiyun * The caller must not hold the IDE locks.
1587*4882a593Smuzhiyun *
1588*4882a593Smuzhiyun * It is up to the caller to be sure there is no pending I/O here,
1589*4882a593Smuzhiyun * and that the interface will not be reopened (present/vanishing
1590*4882a593Smuzhiyun * locking isn't yet done BTW).
1591*4882a593Smuzhiyun */
1592*4882a593Smuzhiyun
ide_unregister(ide_hwif_t * hwif)1593*4882a593Smuzhiyun static void ide_unregister(ide_hwif_t *hwif)
1594*4882a593Smuzhiyun {
1595*4882a593Smuzhiyun BUG_ON(in_interrupt());
1596*4882a593Smuzhiyun BUG_ON(irqs_disabled());
1597*4882a593Smuzhiyun
1598*4882a593Smuzhiyun mutex_lock(&ide_cfg_mtx);
1599*4882a593Smuzhiyun
1600*4882a593Smuzhiyun if (hwif->present) {
1601*4882a593Smuzhiyun __ide_port_unregister_devices(hwif);
1602*4882a593Smuzhiyun hwif->present = 0;
1603*4882a593Smuzhiyun }
1604*4882a593Smuzhiyun
1605*4882a593Smuzhiyun ide_proc_unregister_port(hwif);
1606*4882a593Smuzhiyun
1607*4882a593Smuzhiyun if (!hwif->host->get_lock)
1608*4882a593Smuzhiyun free_irq(hwif->irq, hwif);
1609*4882a593Smuzhiyun
1610*4882a593Smuzhiyun device_unregister(hwif->portdev);
1611*4882a593Smuzhiyun device_unregister(&hwif->gendev);
1612*4882a593Smuzhiyun wait_for_completion(&hwif->gendev_rel_comp);
1613*4882a593Smuzhiyun
1614*4882a593Smuzhiyun /*
1615*4882a593Smuzhiyun * Remove us from the kernel's knowledge
1616*4882a593Smuzhiyun */
1617*4882a593Smuzhiyun blk_unregister_region(MKDEV(hwif->major, 0), MAX_DRIVES<<PARTN_BITS);
1618*4882a593Smuzhiyun kfree(hwif->sg_table);
1619*4882a593Smuzhiyun unregister_blkdev(hwif->major, hwif->name);
1620*4882a593Smuzhiyun
1621*4882a593Smuzhiyun ide_release_dma_engine(hwif);
1622*4882a593Smuzhiyun
1623*4882a593Smuzhiyun mutex_unlock(&ide_cfg_mtx);
1624*4882a593Smuzhiyun }
1625*4882a593Smuzhiyun
ide_host_free(struct ide_host * host)1626*4882a593Smuzhiyun void ide_host_free(struct ide_host *host)
1627*4882a593Smuzhiyun {
1628*4882a593Smuzhiyun ide_hwif_t *hwif;
1629*4882a593Smuzhiyun int i;
1630*4882a593Smuzhiyun
1631*4882a593Smuzhiyun ide_host_for_each_port(i, hwif, host) {
1632*4882a593Smuzhiyun if (hwif)
1633*4882a593Smuzhiyun ide_port_free(hwif);
1634*4882a593Smuzhiyun }
1635*4882a593Smuzhiyun
1636*4882a593Smuzhiyun kfree(host);
1637*4882a593Smuzhiyun }
1638*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ide_host_free);
1639*4882a593Smuzhiyun
ide_host_remove(struct ide_host * host)1640*4882a593Smuzhiyun void ide_host_remove(struct ide_host *host)
1641*4882a593Smuzhiyun {
1642*4882a593Smuzhiyun ide_hwif_t *hwif;
1643*4882a593Smuzhiyun int i;
1644*4882a593Smuzhiyun
1645*4882a593Smuzhiyun ide_host_for_each_port(i, hwif, host) {
1646*4882a593Smuzhiyun if (hwif)
1647*4882a593Smuzhiyun ide_unregister(hwif);
1648*4882a593Smuzhiyun }
1649*4882a593Smuzhiyun
1650*4882a593Smuzhiyun ide_host_free(host);
1651*4882a593Smuzhiyun }
1652*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ide_host_remove);
1653*4882a593Smuzhiyun
ide_port_scan(ide_hwif_t * hwif)1654*4882a593Smuzhiyun void ide_port_scan(ide_hwif_t *hwif)
1655*4882a593Smuzhiyun {
1656*4882a593Smuzhiyun int rc;
1657*4882a593Smuzhiyun
1658*4882a593Smuzhiyun ide_port_apply_params(hwif);
1659*4882a593Smuzhiyun ide_port_cable_detect(hwif);
1660*4882a593Smuzhiyun
1661*4882a593Smuzhiyun hwif->port_flags |= IDE_PFLAG_PROBING;
1662*4882a593Smuzhiyun
1663*4882a593Smuzhiyun ide_port_init_devices(hwif);
1664*4882a593Smuzhiyun
1665*4882a593Smuzhiyun rc = ide_probe_port(hwif);
1666*4882a593Smuzhiyun
1667*4882a593Smuzhiyun hwif->port_flags &= ~IDE_PFLAG_PROBING;
1668*4882a593Smuzhiyun
1669*4882a593Smuzhiyun if (rc < 0)
1670*4882a593Smuzhiyun return;
1671*4882a593Smuzhiyun
1672*4882a593Smuzhiyun hwif->present = 1;
1673*4882a593Smuzhiyun
1674*4882a593Smuzhiyun ide_port_tune_devices(hwif);
1675*4882a593Smuzhiyun ide_port_setup_devices(hwif);
1676*4882a593Smuzhiyun ide_acpi_port_init_devices(hwif);
1677*4882a593Smuzhiyun hwif_register_devices(hwif);
1678*4882a593Smuzhiyun ide_proc_port_register_devices(hwif);
1679*4882a593Smuzhiyun }
1680*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ide_port_scan);
1681