1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Helper library for PATA timings
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
6*4882a593Smuzhiyun * Copyright 2003-2004 Jeff Garzik
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/kernel.h>
10*4882a593Smuzhiyun #include <linux/module.h>
11*4882a593Smuzhiyun #include <linux/libata.h>
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun /*
14*4882a593Smuzhiyun * This mode timing computation functionality is ported over from
15*4882a593Smuzhiyun * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
16*4882a593Smuzhiyun */
17*4882a593Smuzhiyun /*
18*4882a593Smuzhiyun * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
19*4882a593Smuzhiyun * These were taken from ATA/ATAPI-6 standard, rev 0a, except
20*4882a593Smuzhiyun * for UDMA6, which is currently supported only by Maxtor drives.
21*4882a593Smuzhiyun *
22*4882a593Smuzhiyun * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
23*4882a593Smuzhiyun */
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun static const struct ata_timing ata_timing[] = {
26*4882a593Smuzhiyun /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0, 960, 0 }, */
27*4882a593Smuzhiyun { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 0, 600, 0 },
28*4882a593Smuzhiyun { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 0, 383, 0 },
29*4882a593Smuzhiyun { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 0, 240, 0 },
30*4882a593Smuzhiyun { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 0, 180, 0 },
31*4882a593Smuzhiyun { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 0, 120, 0 },
32*4882a593Smuzhiyun { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 0, 100, 0 },
33*4882a593Smuzhiyun { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 0, 80, 0 },
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 50, 960, 0 },
36*4882a593Smuzhiyun { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 30, 480, 0 },
37*4882a593Smuzhiyun { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 20, 240, 0 },
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 20, 480, 0 },
40*4882a593Smuzhiyun { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 5, 150, 0 },
41*4882a593Smuzhiyun { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 5, 120, 0 },
42*4882a593Smuzhiyun { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 5, 100, 0 },
43*4882a593Smuzhiyun { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 5, 80, 0 },
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 0, 150 }, */
46*4882a593Smuzhiyun { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 0, 120 },
47*4882a593Smuzhiyun { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 0, 80 },
48*4882a593Smuzhiyun { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 0, 60 },
49*4882a593Smuzhiyun { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 0, 45 },
50*4882a593Smuzhiyun { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 0, 30 },
51*4882a593Smuzhiyun { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 },
52*4882a593Smuzhiyun { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 },
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun { 0xFF }
55*4882a593Smuzhiyun };
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun #define ENOUGH(v, unit) (((v)-1)/(unit)+1)
58*4882a593Smuzhiyun #define EZ(v, unit) ((v)?ENOUGH(((v) * 1000), unit):0)
59*4882a593Smuzhiyun
ata_timing_quantize(const struct ata_timing * t,struct ata_timing * q,int T,int UT)60*4882a593Smuzhiyun static void ata_timing_quantize(const struct ata_timing *t,
61*4882a593Smuzhiyun struct ata_timing *q, int T, int UT)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun q->setup = EZ(t->setup, T);
64*4882a593Smuzhiyun q->act8b = EZ(t->act8b, T);
65*4882a593Smuzhiyun q->rec8b = EZ(t->rec8b, T);
66*4882a593Smuzhiyun q->cyc8b = EZ(t->cyc8b, T);
67*4882a593Smuzhiyun q->active = EZ(t->active, T);
68*4882a593Smuzhiyun q->recover = EZ(t->recover, T);
69*4882a593Smuzhiyun q->dmack_hold = EZ(t->dmack_hold, T);
70*4882a593Smuzhiyun q->cycle = EZ(t->cycle, T);
71*4882a593Smuzhiyun q->udma = EZ(t->udma, UT);
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun
ata_timing_merge(const struct ata_timing * a,const struct ata_timing * b,struct ata_timing * m,unsigned int what)74*4882a593Smuzhiyun void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
75*4882a593Smuzhiyun struct ata_timing *m, unsigned int what)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun if (what & ATA_TIMING_SETUP)
78*4882a593Smuzhiyun m->setup = max(a->setup, b->setup);
79*4882a593Smuzhiyun if (what & ATA_TIMING_ACT8B)
80*4882a593Smuzhiyun m->act8b = max(a->act8b, b->act8b);
81*4882a593Smuzhiyun if (what & ATA_TIMING_REC8B)
82*4882a593Smuzhiyun m->rec8b = max(a->rec8b, b->rec8b);
83*4882a593Smuzhiyun if (what & ATA_TIMING_CYC8B)
84*4882a593Smuzhiyun m->cyc8b = max(a->cyc8b, b->cyc8b);
85*4882a593Smuzhiyun if (what & ATA_TIMING_ACTIVE)
86*4882a593Smuzhiyun m->active = max(a->active, b->active);
87*4882a593Smuzhiyun if (what & ATA_TIMING_RECOVER)
88*4882a593Smuzhiyun m->recover = max(a->recover, b->recover);
89*4882a593Smuzhiyun if (what & ATA_TIMING_DMACK_HOLD)
90*4882a593Smuzhiyun m->dmack_hold = max(a->dmack_hold, b->dmack_hold);
91*4882a593Smuzhiyun if (what & ATA_TIMING_CYCLE)
92*4882a593Smuzhiyun m->cycle = max(a->cycle, b->cycle);
93*4882a593Smuzhiyun if (what & ATA_TIMING_UDMA)
94*4882a593Smuzhiyun m->udma = max(a->udma, b->udma);
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ata_timing_merge);
97*4882a593Smuzhiyun
ata_timing_find_mode(u8 xfer_mode)98*4882a593Smuzhiyun const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun const struct ata_timing *t = ata_timing;
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun while (xfer_mode > t->mode)
103*4882a593Smuzhiyun t++;
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun if (xfer_mode == t->mode)
106*4882a593Smuzhiyun return t;
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun WARN_ONCE(true, "%s: unable to find timing for xfer_mode 0x%x\n",
109*4882a593Smuzhiyun __func__, xfer_mode);
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun return NULL;
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ata_timing_find_mode);
114*4882a593Smuzhiyun
ata_timing_compute(struct ata_device * adev,unsigned short speed,struct ata_timing * t,int T,int UT)115*4882a593Smuzhiyun int ata_timing_compute(struct ata_device *adev, unsigned short speed,
116*4882a593Smuzhiyun struct ata_timing *t, int T, int UT)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun const u16 *id = adev->id;
119*4882a593Smuzhiyun const struct ata_timing *s;
120*4882a593Smuzhiyun struct ata_timing p;
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun /*
123*4882a593Smuzhiyun * Find the mode.
124*4882a593Smuzhiyun */
125*4882a593Smuzhiyun s = ata_timing_find_mode(speed);
126*4882a593Smuzhiyun if (!s)
127*4882a593Smuzhiyun return -EINVAL;
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun memcpy(t, s, sizeof(*s));
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun /*
132*4882a593Smuzhiyun * If the drive is an EIDE drive, it can tell us it needs extended
133*4882a593Smuzhiyun * PIO/MW_DMA cycle timing.
134*4882a593Smuzhiyun */
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun if (id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
137*4882a593Smuzhiyun memset(&p, 0, sizeof(p));
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) {
140*4882a593Smuzhiyun if (speed <= XFER_PIO_2)
141*4882a593Smuzhiyun p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
142*4882a593Smuzhiyun else if ((speed <= XFER_PIO_4) ||
143*4882a593Smuzhiyun (speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
144*4882a593Smuzhiyun p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
145*4882a593Smuzhiyun } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
146*4882a593Smuzhiyun p.cycle = id[ATA_ID_EIDE_DMA_MIN];
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun /*
152*4882a593Smuzhiyun * Convert the timing to bus clock counts.
153*4882a593Smuzhiyun */
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun ata_timing_quantize(t, t, T, UT);
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun /*
158*4882a593Smuzhiyun * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
159*4882a593Smuzhiyun * S.M.A.R.T * and some other commands. We have to ensure that the
160*4882a593Smuzhiyun * DMA cycle timing is slower/equal than the fastest PIO timing.
161*4882a593Smuzhiyun */
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun if (speed > XFER_PIO_6) {
164*4882a593Smuzhiyun ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
165*4882a593Smuzhiyun ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun /*
169*4882a593Smuzhiyun * Lengthen active & recovery time so that cycle time is correct.
170*4882a593Smuzhiyun */
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun if (t->act8b + t->rec8b < t->cyc8b) {
173*4882a593Smuzhiyun t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
174*4882a593Smuzhiyun t->rec8b = t->cyc8b - t->act8b;
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun if (t->active + t->recover < t->cycle) {
178*4882a593Smuzhiyun t->active += (t->cycle - (t->active + t->recover)) / 2;
179*4882a593Smuzhiyun t->recover = t->cycle - t->active;
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun /*
183*4882a593Smuzhiyun * In a few cases quantisation may produce enough errors to
184*4882a593Smuzhiyun * leave t->cycle too low for the sum of active and recovery
185*4882a593Smuzhiyun * if so we must correct this.
186*4882a593Smuzhiyun */
187*4882a593Smuzhiyun if (t->active + t->recover > t->cycle)
188*4882a593Smuzhiyun t->cycle = t->active + t->recover;
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun return 0;
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ata_timing_compute);
193