1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 1999-2001 Vojtech Pavlik
4*4882a593Smuzhiyun * Copyright (c) 2007-2008 Bartlomiej Zolnierkiewicz
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Should you need to contact me, the author, you can do so either by
7*4882a593Smuzhiyun * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
8*4882a593Smuzhiyun * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <linux/kernel.h>
12*4882a593Smuzhiyun #include <linux/ide.h>
13*4882a593Smuzhiyun #include <linux/module.h>
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun /*
16*4882a593Smuzhiyun * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
17*4882a593Smuzhiyun * These were taken from ATA/ATAPI-6 standard, rev 0a, except
18*4882a593Smuzhiyun * for PIO 5, which is a nonstandard extension and UDMA6, which
19*4882a593Smuzhiyun * is currently supported only by Maxtor drives.
20*4882a593Smuzhiyun */
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun static struct ide_timing ide_timing[] = {
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
25*4882a593Smuzhiyun { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
26*4882a593Smuzhiyun { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
27*4882a593Smuzhiyun { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
30*4882a593Smuzhiyun { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
31*4882a593Smuzhiyun { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
34*4882a593Smuzhiyun { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
35*4882a593Smuzhiyun { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
36*4882a593Smuzhiyun { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
37*4882a593Smuzhiyun { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
40*4882a593Smuzhiyun { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
41*4882a593Smuzhiyun { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
44*4882a593Smuzhiyun { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
45*4882a593Smuzhiyun { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
46*4882a593Smuzhiyun { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
49*4882a593Smuzhiyun { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
50*4882a593Smuzhiyun { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 },
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun { 0xff }
55*4882a593Smuzhiyun };
56*4882a593Smuzhiyun
ide_timing_find_mode(u8 speed)57*4882a593Smuzhiyun struct ide_timing *ide_timing_find_mode(u8 speed)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun struct ide_timing *t;
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun for (t = ide_timing; t->mode != speed; t++)
62*4882a593Smuzhiyun if (t->mode == 0xff)
63*4882a593Smuzhiyun return NULL;
64*4882a593Smuzhiyun return t;
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ide_timing_find_mode);
67*4882a593Smuzhiyun
ide_pio_cycle_time(ide_drive_t * drive,u8 pio)68*4882a593Smuzhiyun u16 ide_pio_cycle_time(ide_drive_t *drive, u8 pio)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun u16 *id = drive->id;
71*4882a593Smuzhiyun struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio);
72*4882a593Smuzhiyun u16 cycle = 0;
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun if (id[ATA_ID_FIELD_VALID] & 2) {
75*4882a593Smuzhiyun if (ata_id_has_iordy(drive->id))
76*4882a593Smuzhiyun cycle = id[ATA_ID_EIDE_PIO_IORDY];
77*4882a593Smuzhiyun else
78*4882a593Smuzhiyun cycle = id[ATA_ID_EIDE_PIO];
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun /* conservative "downgrade" for all pre-ATA2 drives */
81*4882a593Smuzhiyun if (pio < 3 && cycle < t->cycle)
82*4882a593Smuzhiyun cycle = 0; /* use standard timing */
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun /* Use the standard timing for the CF specific modes too */
85*4882a593Smuzhiyun if (pio > 4 && ata_id_is_cfa(id))
86*4882a593Smuzhiyun cycle = 0;
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun return cycle ? cycle : t->cycle;
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ide_pio_cycle_time);
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun #define ENOUGH(v, unit) (((v) - 1) / (unit) + 1)
94*4882a593Smuzhiyun #define EZ(v, unit) ((v) ? ENOUGH((v) * 1000, unit) : 0)
95*4882a593Smuzhiyun
ide_timing_quantize(struct ide_timing * t,struct ide_timing * q,int T,int UT)96*4882a593Smuzhiyun static void ide_timing_quantize(struct ide_timing *t, struct ide_timing *q,
97*4882a593Smuzhiyun int T, int UT)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun q->setup = EZ(t->setup, T);
100*4882a593Smuzhiyun q->act8b = EZ(t->act8b, T);
101*4882a593Smuzhiyun q->rec8b = EZ(t->rec8b, T);
102*4882a593Smuzhiyun q->cyc8b = EZ(t->cyc8b, T);
103*4882a593Smuzhiyun q->active = EZ(t->active, T);
104*4882a593Smuzhiyun q->recover = EZ(t->recover, T);
105*4882a593Smuzhiyun q->cycle = EZ(t->cycle, T);
106*4882a593Smuzhiyun q->udma = EZ(t->udma, UT);
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun
ide_timing_merge(struct ide_timing * a,struct ide_timing * b,struct ide_timing * m,unsigned int what)109*4882a593Smuzhiyun void ide_timing_merge(struct ide_timing *a, struct ide_timing *b,
110*4882a593Smuzhiyun struct ide_timing *m, unsigned int what)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun if (what & IDE_TIMING_SETUP)
113*4882a593Smuzhiyun m->setup = max(a->setup, b->setup);
114*4882a593Smuzhiyun if (what & IDE_TIMING_ACT8B)
115*4882a593Smuzhiyun m->act8b = max(a->act8b, b->act8b);
116*4882a593Smuzhiyun if (what & IDE_TIMING_REC8B)
117*4882a593Smuzhiyun m->rec8b = max(a->rec8b, b->rec8b);
118*4882a593Smuzhiyun if (what & IDE_TIMING_CYC8B)
119*4882a593Smuzhiyun m->cyc8b = max(a->cyc8b, b->cyc8b);
120*4882a593Smuzhiyun if (what & IDE_TIMING_ACTIVE)
121*4882a593Smuzhiyun m->active = max(a->active, b->active);
122*4882a593Smuzhiyun if (what & IDE_TIMING_RECOVER)
123*4882a593Smuzhiyun m->recover = max(a->recover, b->recover);
124*4882a593Smuzhiyun if (what & IDE_TIMING_CYCLE)
125*4882a593Smuzhiyun m->cycle = max(a->cycle, b->cycle);
126*4882a593Smuzhiyun if (what & IDE_TIMING_UDMA)
127*4882a593Smuzhiyun m->udma = max(a->udma, b->udma);
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ide_timing_merge);
130*4882a593Smuzhiyun
ide_timing_compute(ide_drive_t * drive,u8 speed,struct ide_timing * t,int T,int UT)131*4882a593Smuzhiyun int ide_timing_compute(ide_drive_t *drive, u8 speed,
132*4882a593Smuzhiyun struct ide_timing *t, int T, int UT)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun u16 *id = drive->id;
135*4882a593Smuzhiyun struct ide_timing *s, p;
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun /*
138*4882a593Smuzhiyun * Find the mode.
139*4882a593Smuzhiyun */
140*4882a593Smuzhiyun s = ide_timing_find_mode(speed);
141*4882a593Smuzhiyun if (s == NULL)
142*4882a593Smuzhiyun return -EINVAL;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun /*
145*4882a593Smuzhiyun * Copy the timing from the table.
146*4882a593Smuzhiyun */
147*4882a593Smuzhiyun *t = *s;
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun /*
150*4882a593Smuzhiyun * If the drive is an EIDE drive, it can tell us it needs extended
151*4882a593Smuzhiyun * PIO/MWDMA cycle timing.
152*4882a593Smuzhiyun */
153*4882a593Smuzhiyun if (id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
154*4882a593Smuzhiyun memset(&p, 0, sizeof(p));
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) {
157*4882a593Smuzhiyun if (speed <= XFER_PIO_2)
158*4882a593Smuzhiyun p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
159*4882a593Smuzhiyun else if ((speed <= XFER_PIO_4) ||
160*4882a593Smuzhiyun (speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
161*4882a593Smuzhiyun p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
162*4882a593Smuzhiyun } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
163*4882a593Smuzhiyun p.cycle = id[ATA_ID_EIDE_DMA_MIN];
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun ide_timing_merge(&p, t, t, IDE_TIMING_CYCLE | IDE_TIMING_CYC8B);
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun /*
169*4882a593Smuzhiyun * Convert the timing to bus clock counts.
170*4882a593Smuzhiyun */
171*4882a593Smuzhiyun ide_timing_quantize(t, t, T, UT);
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun /*
174*4882a593Smuzhiyun * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
175*4882a593Smuzhiyun * S.M.A.R.T and some other commands. We have to ensure that the
176*4882a593Smuzhiyun * DMA cycle timing is slower/equal than the current PIO timing.
177*4882a593Smuzhiyun */
178*4882a593Smuzhiyun if (speed >= XFER_SW_DMA_0) {
179*4882a593Smuzhiyun ide_timing_compute(drive, drive->pio_mode, &p, T, UT);
180*4882a593Smuzhiyun ide_timing_merge(&p, t, t, IDE_TIMING_ALL);
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun /*
184*4882a593Smuzhiyun * Lengthen active & recovery time so that cycle time is correct.
185*4882a593Smuzhiyun */
186*4882a593Smuzhiyun if (t->act8b + t->rec8b < t->cyc8b) {
187*4882a593Smuzhiyun t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
188*4882a593Smuzhiyun t->rec8b = t->cyc8b - t->act8b;
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun if (t->active + t->recover < t->cycle) {
192*4882a593Smuzhiyun t->active += (t->cycle - (t->active + t->recover)) / 2;
193*4882a593Smuzhiyun t->recover = t->cycle - t->active;
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun return 0;
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ide_timing_compute);
199