1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * sata_sx4.c - Promise SATA
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Maintained by: Tejun Heo <tj@kernel.org>
6*4882a593Smuzhiyun * Please ALWAYS copy linux-ide@vger.kernel.org
7*4882a593Smuzhiyun * on emails.
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * Copyright 2003-2004 Red Hat, Inc.
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * libata documentation is available via 'make {ps|pdf}docs',
12*4882a593Smuzhiyun * as Documentation/driver-api/libata.rst
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * Hardware documentation available under NDA.
15*4882a593Smuzhiyun */
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun /*
18*4882a593Smuzhiyun Theory of operation
19*4882a593Smuzhiyun -------------------
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun The SX4 (PDC20621) chip features a single Host DMA (HDMA) copy
22*4882a593Smuzhiyun engine, DIMM memory, and four ATA engines (one per SATA port).
23*4882a593Smuzhiyun Data is copied to/from DIMM memory by the HDMA engine, before
24*4882a593Smuzhiyun handing off to one (or more) of the ATA engines. The ATA
25*4882a593Smuzhiyun engines operate solely on DIMM memory.
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun The SX4 behaves like a PATA chip, with no SATA controls or
28*4882a593Smuzhiyun knowledge whatsoever, leading to the presumption that
29*4882a593Smuzhiyun PATA<->SATA bridges exist on SX4 boards, external to the
30*4882a593Smuzhiyun PDC20621 chip itself.
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun The chip is quite capable, supporting an XOR engine and linked
33*4882a593Smuzhiyun hardware commands (permits a string to transactions to be
34*4882a593Smuzhiyun submitted and waited-on as a single unit), and an optional
35*4882a593Smuzhiyun microprocessor.
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun The limiting factor is largely software. This Linux driver was
38*4882a593Smuzhiyun written to multiplex the single HDMA engine to copy disk
39*4882a593Smuzhiyun transactions into a fixed DIMM memory space, from where an ATA
40*4882a593Smuzhiyun engine takes over. As a result, each WRITE looks like this:
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun submit HDMA packet to hardware
43*4882a593Smuzhiyun hardware copies data from system memory to DIMM
44*4882a593Smuzhiyun hardware raises interrupt
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun submit ATA packet to hardware
47*4882a593Smuzhiyun hardware executes ATA WRITE command, w/ data in DIMM
48*4882a593Smuzhiyun hardware raises interrupt
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun and each READ looks like this:
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun submit ATA packet to hardware
53*4882a593Smuzhiyun hardware executes ATA READ command, w/ data in DIMM
54*4882a593Smuzhiyun hardware raises interrupt
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun submit HDMA packet to hardware
57*4882a593Smuzhiyun hardware copies data from DIMM to system memory
58*4882a593Smuzhiyun hardware raises interrupt
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun This is a very slow, lock-step way of doing things that can
61*4882a593Smuzhiyun certainly be improved by motivated kernel hackers.
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun */
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun #include <linux/kernel.h>
66*4882a593Smuzhiyun #include <linux/module.h>
67*4882a593Smuzhiyun #include <linux/pci.h>
68*4882a593Smuzhiyun #include <linux/slab.h>
69*4882a593Smuzhiyun #include <linux/blkdev.h>
70*4882a593Smuzhiyun #include <linux/delay.h>
71*4882a593Smuzhiyun #include <linux/interrupt.h>
72*4882a593Smuzhiyun #include <linux/device.h>
73*4882a593Smuzhiyun #include <scsi/scsi_host.h>
74*4882a593Smuzhiyun #include <scsi/scsi_cmnd.h>
75*4882a593Smuzhiyun #include <linux/libata.h>
76*4882a593Smuzhiyun #include "sata_promise.h"
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun #define DRV_NAME "sata_sx4"
79*4882a593Smuzhiyun #define DRV_VERSION "0.12"
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun enum {
83*4882a593Smuzhiyun PDC_MMIO_BAR = 3,
84*4882a593Smuzhiyun PDC_DIMM_BAR = 4,
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun PDC_PRD_TBL = 0x44, /* Direct command DMA table addr */
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */
89*4882a593Smuzhiyun PDC_HDMA_PKT_SUBMIT = 0x100, /* Host DMA packet pointer addr */
90*4882a593Smuzhiyun PDC_INT_SEQMASK = 0x40, /* Mask of asserted SEQ INTs */
91*4882a593Smuzhiyun PDC_HDMA_CTLSTAT = 0x12C, /* Host DMA control / status */
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun PDC_CTLSTAT = 0x60, /* IDEn control / status */
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun PDC_20621_SEQCTL = 0x400,
96*4882a593Smuzhiyun PDC_20621_SEQMASK = 0x480,
97*4882a593Smuzhiyun PDC_20621_GENERAL_CTL = 0x484,
98*4882a593Smuzhiyun PDC_20621_PAGE_SIZE = (32 * 1024),
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun /* chosen, not constant, values; we design our own DIMM mem map */
101*4882a593Smuzhiyun PDC_20621_DIMM_WINDOW = 0x0C, /* page# for 32K DIMM window */
102*4882a593Smuzhiyun PDC_20621_DIMM_BASE = 0x00200000,
103*4882a593Smuzhiyun PDC_20621_DIMM_DATA = (64 * 1024),
104*4882a593Smuzhiyun PDC_DIMM_DATA_STEP = (256 * 1024),
105*4882a593Smuzhiyun PDC_DIMM_WINDOW_STEP = (8 * 1024),
106*4882a593Smuzhiyun PDC_DIMM_HOST_PRD = (6 * 1024),
107*4882a593Smuzhiyun PDC_DIMM_HOST_PKT = (128 * 0),
108*4882a593Smuzhiyun PDC_DIMM_HPKT_PRD = (128 * 1),
109*4882a593Smuzhiyun PDC_DIMM_ATA_PKT = (128 * 2),
110*4882a593Smuzhiyun PDC_DIMM_APKT_PRD = (128 * 3),
111*4882a593Smuzhiyun PDC_DIMM_HEADER_SZ = PDC_DIMM_APKT_PRD + 128,
112*4882a593Smuzhiyun PDC_PAGE_WINDOW = 0x40,
113*4882a593Smuzhiyun PDC_PAGE_DATA = PDC_PAGE_WINDOW +
114*4882a593Smuzhiyun (PDC_20621_DIMM_DATA / PDC_20621_PAGE_SIZE),
115*4882a593Smuzhiyun PDC_PAGE_SET = PDC_DIMM_DATA_STEP / PDC_20621_PAGE_SIZE,
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun PDC_CHIP0_OFS = 0xC0000, /* offset of chip #0 */
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun PDC_20621_ERR_MASK = (1<<19) | (1<<20) | (1<<21) | (1<<22) |
120*4882a593Smuzhiyun (1<<23),
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun board_20621 = 0, /* FastTrak S150 SX4 */
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun PDC_MASK_INT = (1 << 10), /* HDMA/ATA mask int */
125*4882a593Smuzhiyun PDC_RESET = (1 << 11), /* HDMA/ATA reset */
126*4882a593Smuzhiyun PDC_DMA_ENABLE = (1 << 7), /* DMA start/stop */
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun PDC_MAX_HDMA = 32,
129*4882a593Smuzhiyun PDC_HDMA_Q_MASK = (PDC_MAX_HDMA - 1),
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun PDC_DIMM0_SPD_DEV_ADDRESS = 0x50,
132*4882a593Smuzhiyun PDC_DIMM1_SPD_DEV_ADDRESS = 0x51,
133*4882a593Smuzhiyun PDC_I2C_CONTROL = 0x48,
134*4882a593Smuzhiyun PDC_I2C_ADDR_DATA = 0x4C,
135*4882a593Smuzhiyun PDC_DIMM0_CONTROL = 0x80,
136*4882a593Smuzhiyun PDC_DIMM1_CONTROL = 0x84,
137*4882a593Smuzhiyun PDC_SDRAM_CONTROL = 0x88,
138*4882a593Smuzhiyun PDC_I2C_WRITE = 0, /* master -> slave */
139*4882a593Smuzhiyun PDC_I2C_READ = (1 << 6), /* master <- slave */
140*4882a593Smuzhiyun PDC_I2C_START = (1 << 7), /* start I2C proto */
141*4882a593Smuzhiyun PDC_I2C_MASK_INT = (1 << 5), /* mask I2C interrupt */
142*4882a593Smuzhiyun PDC_I2C_COMPLETE = (1 << 16), /* I2C normal compl. */
143*4882a593Smuzhiyun PDC_I2C_NO_ACK = (1 << 20), /* slave no-ack addr */
144*4882a593Smuzhiyun PDC_DIMM_SPD_SUBADDRESS_START = 0x00,
145*4882a593Smuzhiyun PDC_DIMM_SPD_SUBADDRESS_END = 0x7F,
146*4882a593Smuzhiyun PDC_DIMM_SPD_ROW_NUM = 3,
147*4882a593Smuzhiyun PDC_DIMM_SPD_COLUMN_NUM = 4,
148*4882a593Smuzhiyun PDC_DIMM_SPD_MODULE_ROW = 5,
149*4882a593Smuzhiyun PDC_DIMM_SPD_TYPE = 11,
150*4882a593Smuzhiyun PDC_DIMM_SPD_FRESH_RATE = 12,
151*4882a593Smuzhiyun PDC_DIMM_SPD_BANK_NUM = 17,
152*4882a593Smuzhiyun PDC_DIMM_SPD_CAS_LATENCY = 18,
153*4882a593Smuzhiyun PDC_DIMM_SPD_ATTRIBUTE = 21,
154*4882a593Smuzhiyun PDC_DIMM_SPD_ROW_PRE_CHARGE = 27,
155*4882a593Smuzhiyun PDC_DIMM_SPD_ROW_ACTIVE_DELAY = 28,
156*4882a593Smuzhiyun PDC_DIMM_SPD_RAS_CAS_DELAY = 29,
157*4882a593Smuzhiyun PDC_DIMM_SPD_ACTIVE_PRECHARGE = 30,
158*4882a593Smuzhiyun PDC_DIMM_SPD_SYSTEM_FREQ = 126,
159*4882a593Smuzhiyun PDC_CTL_STATUS = 0x08,
160*4882a593Smuzhiyun PDC_DIMM_WINDOW_CTLR = 0x0C,
161*4882a593Smuzhiyun PDC_TIME_CONTROL = 0x3C,
162*4882a593Smuzhiyun PDC_TIME_PERIOD = 0x40,
163*4882a593Smuzhiyun PDC_TIME_COUNTER = 0x44,
164*4882a593Smuzhiyun PDC_GENERAL_CTLR = 0x484,
165*4882a593Smuzhiyun PCI_PLL_INIT = 0x8A531824,
166*4882a593Smuzhiyun PCI_X_TCOUNT = 0xEE1E5CFF,
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun /* PDC_TIME_CONTROL bits */
169*4882a593Smuzhiyun PDC_TIMER_BUZZER = (1 << 10),
170*4882a593Smuzhiyun PDC_TIMER_MODE_PERIODIC = 0, /* bits 9:8 == 00 */
171*4882a593Smuzhiyun PDC_TIMER_MODE_ONCE = (1 << 8), /* bits 9:8 == 01 */
172*4882a593Smuzhiyun PDC_TIMER_ENABLE = (1 << 7),
173*4882a593Smuzhiyun PDC_TIMER_MASK_INT = (1 << 5),
174*4882a593Smuzhiyun PDC_TIMER_SEQ_MASK = 0x1f, /* SEQ ID for timer */
175*4882a593Smuzhiyun PDC_TIMER_DEFAULT = PDC_TIMER_MODE_ONCE |
176*4882a593Smuzhiyun PDC_TIMER_ENABLE |
177*4882a593Smuzhiyun PDC_TIMER_MASK_INT,
178*4882a593Smuzhiyun };
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun #define ECC_ERASE_BUF_SZ (128 * 1024)
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun struct pdc_port_priv {
183*4882a593Smuzhiyun u8 dimm_buf[(ATA_PRD_SZ * ATA_MAX_PRD) + 512];
184*4882a593Smuzhiyun u8 *pkt;
185*4882a593Smuzhiyun dma_addr_t pkt_dma;
186*4882a593Smuzhiyun };
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun struct pdc_host_priv {
189*4882a593Smuzhiyun unsigned int doing_hdma;
190*4882a593Smuzhiyun unsigned int hdma_prod;
191*4882a593Smuzhiyun unsigned int hdma_cons;
192*4882a593Smuzhiyun struct {
193*4882a593Smuzhiyun struct ata_queued_cmd *qc;
194*4882a593Smuzhiyun unsigned int seq;
195*4882a593Smuzhiyun unsigned long pkt_ofs;
196*4882a593Smuzhiyun } hdma[32];
197*4882a593Smuzhiyun };
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun static int pdc_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
201*4882a593Smuzhiyun static void pdc_error_handler(struct ata_port *ap);
202*4882a593Smuzhiyun static void pdc_freeze(struct ata_port *ap);
203*4882a593Smuzhiyun static void pdc_thaw(struct ata_port *ap);
204*4882a593Smuzhiyun static int pdc_port_start(struct ata_port *ap);
205*4882a593Smuzhiyun static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc);
206*4882a593Smuzhiyun static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
207*4882a593Smuzhiyun static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
208*4882a593Smuzhiyun static unsigned int pdc20621_dimm_init(struct ata_host *host);
209*4882a593Smuzhiyun static int pdc20621_detect_dimm(struct ata_host *host);
210*4882a593Smuzhiyun static unsigned int pdc20621_i2c_read(struct ata_host *host,
211*4882a593Smuzhiyun u32 device, u32 subaddr, u32 *pdata);
212*4882a593Smuzhiyun static int pdc20621_prog_dimm0(struct ata_host *host);
213*4882a593Smuzhiyun static unsigned int pdc20621_prog_dimm_global(struct ata_host *host);
214*4882a593Smuzhiyun #ifdef ATA_VERBOSE_DEBUG
215*4882a593Smuzhiyun static void pdc20621_get_from_dimm(struct ata_host *host,
216*4882a593Smuzhiyun void *psource, u32 offset, u32 size);
217*4882a593Smuzhiyun #endif
218*4882a593Smuzhiyun static void pdc20621_put_to_dimm(struct ata_host *host,
219*4882a593Smuzhiyun void *psource, u32 offset, u32 size);
220*4882a593Smuzhiyun static void pdc20621_irq_clear(struct ata_port *ap);
221*4882a593Smuzhiyun static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc);
222*4882a593Smuzhiyun static int pdc_softreset(struct ata_link *link, unsigned int *class,
223*4882a593Smuzhiyun unsigned long deadline);
224*4882a593Smuzhiyun static void pdc_post_internal_cmd(struct ata_queued_cmd *qc);
225*4882a593Smuzhiyun static int pdc_check_atapi_dma(struct ata_queued_cmd *qc);
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun static struct scsi_host_template pdc_sata_sht = {
229*4882a593Smuzhiyun ATA_BASE_SHT(DRV_NAME),
230*4882a593Smuzhiyun .sg_tablesize = LIBATA_MAX_PRD,
231*4882a593Smuzhiyun .dma_boundary = ATA_DMA_BOUNDARY,
232*4882a593Smuzhiyun };
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun /* TODO: inherit from base port_ops after converting to new EH */
235*4882a593Smuzhiyun static struct ata_port_operations pdc_20621_ops = {
236*4882a593Smuzhiyun .inherits = &ata_sff_port_ops,
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun .check_atapi_dma = pdc_check_atapi_dma,
239*4882a593Smuzhiyun .qc_prep = pdc20621_qc_prep,
240*4882a593Smuzhiyun .qc_issue = pdc20621_qc_issue,
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun .freeze = pdc_freeze,
243*4882a593Smuzhiyun .thaw = pdc_thaw,
244*4882a593Smuzhiyun .softreset = pdc_softreset,
245*4882a593Smuzhiyun .error_handler = pdc_error_handler,
246*4882a593Smuzhiyun .lost_interrupt = ATA_OP_NULL,
247*4882a593Smuzhiyun .post_internal_cmd = pdc_post_internal_cmd,
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun .port_start = pdc_port_start,
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun .sff_tf_load = pdc_tf_load_mmio,
252*4882a593Smuzhiyun .sff_exec_command = pdc_exec_command_mmio,
253*4882a593Smuzhiyun .sff_irq_clear = pdc20621_irq_clear,
254*4882a593Smuzhiyun };
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun static const struct ata_port_info pdc_port_info[] = {
257*4882a593Smuzhiyun /* board_20621 */
258*4882a593Smuzhiyun {
259*4882a593Smuzhiyun .flags = ATA_FLAG_SATA | ATA_FLAG_NO_ATAPI |
260*4882a593Smuzhiyun ATA_FLAG_PIO_POLLING,
261*4882a593Smuzhiyun .pio_mask = ATA_PIO4,
262*4882a593Smuzhiyun .mwdma_mask = ATA_MWDMA2,
263*4882a593Smuzhiyun .udma_mask = ATA_UDMA6,
264*4882a593Smuzhiyun .port_ops = &pdc_20621_ops,
265*4882a593Smuzhiyun },
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun };
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun static const struct pci_device_id pdc_sata_pci_tbl[] = {
270*4882a593Smuzhiyun { PCI_VDEVICE(PROMISE, 0x6622), board_20621 },
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun { } /* terminate list */
273*4882a593Smuzhiyun };
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun static struct pci_driver pdc_sata_pci_driver = {
276*4882a593Smuzhiyun .name = DRV_NAME,
277*4882a593Smuzhiyun .id_table = pdc_sata_pci_tbl,
278*4882a593Smuzhiyun .probe = pdc_sata_init_one,
279*4882a593Smuzhiyun .remove = ata_pci_remove_one,
280*4882a593Smuzhiyun };
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun
pdc_port_start(struct ata_port * ap)283*4882a593Smuzhiyun static int pdc_port_start(struct ata_port *ap)
284*4882a593Smuzhiyun {
285*4882a593Smuzhiyun struct device *dev = ap->host->dev;
286*4882a593Smuzhiyun struct pdc_port_priv *pp;
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
289*4882a593Smuzhiyun if (!pp)
290*4882a593Smuzhiyun return -ENOMEM;
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun pp->pkt = dmam_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL);
293*4882a593Smuzhiyun if (!pp->pkt)
294*4882a593Smuzhiyun return -ENOMEM;
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun ap->private_data = pp;
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun return 0;
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun
pdc20621_ata_sg(u8 * buf,unsigned int portno,unsigned int total_len)301*4882a593Smuzhiyun static inline void pdc20621_ata_sg(u8 *buf, unsigned int portno,
302*4882a593Smuzhiyun unsigned int total_len)
303*4882a593Smuzhiyun {
304*4882a593Smuzhiyun u32 addr;
305*4882a593Smuzhiyun unsigned int dw = PDC_DIMM_APKT_PRD >> 2;
306*4882a593Smuzhiyun __le32 *buf32 = (__le32 *) buf;
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun /* output ATA packet S/G table */
309*4882a593Smuzhiyun addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
310*4882a593Smuzhiyun (PDC_DIMM_DATA_STEP * portno);
311*4882a593Smuzhiyun VPRINTK("ATA sg addr 0x%x, %d\n", addr, addr);
312*4882a593Smuzhiyun buf32[dw] = cpu_to_le32(addr);
313*4882a593Smuzhiyun buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun VPRINTK("ATA PSG @ %x == (0x%x, 0x%x)\n",
316*4882a593Smuzhiyun PDC_20621_DIMM_BASE +
317*4882a593Smuzhiyun (PDC_DIMM_WINDOW_STEP * portno) +
318*4882a593Smuzhiyun PDC_DIMM_APKT_PRD,
319*4882a593Smuzhiyun buf32[dw], buf32[dw + 1]);
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun
pdc20621_host_sg(u8 * buf,unsigned int portno,unsigned int total_len)322*4882a593Smuzhiyun static inline void pdc20621_host_sg(u8 *buf, unsigned int portno,
323*4882a593Smuzhiyun unsigned int total_len)
324*4882a593Smuzhiyun {
325*4882a593Smuzhiyun u32 addr;
326*4882a593Smuzhiyun unsigned int dw = PDC_DIMM_HPKT_PRD >> 2;
327*4882a593Smuzhiyun __le32 *buf32 = (__le32 *) buf;
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun /* output Host DMA packet S/G table */
330*4882a593Smuzhiyun addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
331*4882a593Smuzhiyun (PDC_DIMM_DATA_STEP * portno);
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun buf32[dw] = cpu_to_le32(addr);
334*4882a593Smuzhiyun buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun VPRINTK("HOST PSG @ %x == (0x%x, 0x%x)\n",
337*4882a593Smuzhiyun PDC_20621_DIMM_BASE +
338*4882a593Smuzhiyun (PDC_DIMM_WINDOW_STEP * portno) +
339*4882a593Smuzhiyun PDC_DIMM_HPKT_PRD,
340*4882a593Smuzhiyun buf32[dw], buf32[dw + 1]);
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun
pdc20621_ata_pkt(struct ata_taskfile * tf,unsigned int devno,u8 * buf,unsigned int portno)343*4882a593Smuzhiyun static inline unsigned int pdc20621_ata_pkt(struct ata_taskfile *tf,
344*4882a593Smuzhiyun unsigned int devno, u8 *buf,
345*4882a593Smuzhiyun unsigned int portno)
346*4882a593Smuzhiyun {
347*4882a593Smuzhiyun unsigned int i, dw;
348*4882a593Smuzhiyun __le32 *buf32 = (__le32 *) buf;
349*4882a593Smuzhiyun u8 dev_reg;
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun unsigned int dimm_sg = PDC_20621_DIMM_BASE +
352*4882a593Smuzhiyun (PDC_DIMM_WINDOW_STEP * portno) +
353*4882a593Smuzhiyun PDC_DIMM_APKT_PRD;
354*4882a593Smuzhiyun VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg);
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun i = PDC_DIMM_ATA_PKT;
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun /*
359*4882a593Smuzhiyun * Set up ATA packet
360*4882a593Smuzhiyun */
361*4882a593Smuzhiyun if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
362*4882a593Smuzhiyun buf[i++] = PDC_PKT_READ;
363*4882a593Smuzhiyun else if (tf->protocol == ATA_PROT_NODATA)
364*4882a593Smuzhiyun buf[i++] = PDC_PKT_NODATA;
365*4882a593Smuzhiyun else
366*4882a593Smuzhiyun buf[i++] = 0;
367*4882a593Smuzhiyun buf[i++] = 0; /* reserved */
368*4882a593Smuzhiyun buf[i++] = portno + 1; /* seq. id */
369*4882a593Smuzhiyun buf[i++] = 0xff; /* delay seq. id */
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun /* dimm dma S/G, and next-pkt */
372*4882a593Smuzhiyun dw = i >> 2;
373*4882a593Smuzhiyun if (tf->protocol == ATA_PROT_NODATA)
374*4882a593Smuzhiyun buf32[dw] = 0;
375*4882a593Smuzhiyun else
376*4882a593Smuzhiyun buf32[dw] = cpu_to_le32(dimm_sg);
377*4882a593Smuzhiyun buf32[dw + 1] = 0;
378*4882a593Smuzhiyun i += 8;
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun if (devno == 0)
381*4882a593Smuzhiyun dev_reg = ATA_DEVICE_OBS;
382*4882a593Smuzhiyun else
383*4882a593Smuzhiyun dev_reg = ATA_DEVICE_OBS | ATA_DEV1;
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun /* select device */
386*4882a593Smuzhiyun buf[i++] = (1 << 5) | PDC_PKT_CLEAR_BSY | ATA_REG_DEVICE;
387*4882a593Smuzhiyun buf[i++] = dev_reg;
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun /* device control register */
390*4882a593Smuzhiyun buf[i++] = (1 << 5) | PDC_REG_DEVCTL;
391*4882a593Smuzhiyun buf[i++] = tf->ctl;
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun return i;
394*4882a593Smuzhiyun }
395*4882a593Smuzhiyun
pdc20621_host_pkt(struct ata_taskfile * tf,u8 * buf,unsigned int portno)396*4882a593Smuzhiyun static inline void pdc20621_host_pkt(struct ata_taskfile *tf, u8 *buf,
397*4882a593Smuzhiyun unsigned int portno)
398*4882a593Smuzhiyun {
399*4882a593Smuzhiyun unsigned int dw;
400*4882a593Smuzhiyun u32 tmp;
401*4882a593Smuzhiyun __le32 *buf32 = (__le32 *) buf;
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun unsigned int host_sg = PDC_20621_DIMM_BASE +
404*4882a593Smuzhiyun (PDC_DIMM_WINDOW_STEP * portno) +
405*4882a593Smuzhiyun PDC_DIMM_HOST_PRD;
406*4882a593Smuzhiyun unsigned int dimm_sg = PDC_20621_DIMM_BASE +
407*4882a593Smuzhiyun (PDC_DIMM_WINDOW_STEP * portno) +
408*4882a593Smuzhiyun PDC_DIMM_HPKT_PRD;
409*4882a593Smuzhiyun VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg);
410*4882a593Smuzhiyun VPRINTK("host_sg == 0x%x, %d\n", host_sg, host_sg);
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun dw = PDC_DIMM_HOST_PKT >> 2;
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun /*
415*4882a593Smuzhiyun * Set up Host DMA packet
416*4882a593Smuzhiyun */
417*4882a593Smuzhiyun if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
418*4882a593Smuzhiyun tmp = PDC_PKT_READ;
419*4882a593Smuzhiyun else
420*4882a593Smuzhiyun tmp = 0;
421*4882a593Smuzhiyun tmp |= ((portno + 1 + 4) << 16); /* seq. id */
422*4882a593Smuzhiyun tmp |= (0xff << 24); /* delay seq. id */
423*4882a593Smuzhiyun buf32[dw + 0] = cpu_to_le32(tmp);
424*4882a593Smuzhiyun buf32[dw + 1] = cpu_to_le32(host_sg);
425*4882a593Smuzhiyun buf32[dw + 2] = cpu_to_le32(dimm_sg);
426*4882a593Smuzhiyun buf32[dw + 3] = 0;
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun VPRINTK("HOST PKT @ %x == (0x%x 0x%x 0x%x 0x%x)\n",
429*4882a593Smuzhiyun PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * portno) +
430*4882a593Smuzhiyun PDC_DIMM_HOST_PKT,
431*4882a593Smuzhiyun buf32[dw + 0],
432*4882a593Smuzhiyun buf32[dw + 1],
433*4882a593Smuzhiyun buf32[dw + 2],
434*4882a593Smuzhiyun buf32[dw + 3]);
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun
pdc20621_dma_prep(struct ata_queued_cmd * qc)437*4882a593Smuzhiyun static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
438*4882a593Smuzhiyun {
439*4882a593Smuzhiyun struct scatterlist *sg;
440*4882a593Smuzhiyun struct ata_port *ap = qc->ap;
441*4882a593Smuzhiyun struct pdc_port_priv *pp = ap->private_data;
442*4882a593Smuzhiyun void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
443*4882a593Smuzhiyun void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
444*4882a593Smuzhiyun unsigned int portno = ap->port_no;
445*4882a593Smuzhiyun unsigned int i, si, idx, total_len = 0, sgt_len;
446*4882a593Smuzhiyun __le32 *buf = (__le32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ];
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun VPRINTK("ata%u: ENTER\n", ap->print_id);
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun /* hard-code chip #0 */
453*4882a593Smuzhiyun mmio += PDC_CHIP0_OFS;
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun /*
456*4882a593Smuzhiyun * Build S/G table
457*4882a593Smuzhiyun */
458*4882a593Smuzhiyun idx = 0;
459*4882a593Smuzhiyun for_each_sg(qc->sg, sg, qc->n_elem, si) {
460*4882a593Smuzhiyun buf[idx++] = cpu_to_le32(sg_dma_address(sg));
461*4882a593Smuzhiyun buf[idx++] = cpu_to_le32(sg_dma_len(sg));
462*4882a593Smuzhiyun total_len += sg_dma_len(sg);
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun buf[idx - 1] |= cpu_to_le32(ATA_PRD_EOT);
465*4882a593Smuzhiyun sgt_len = idx * 4;
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun /*
468*4882a593Smuzhiyun * Build ATA, host DMA packets
469*4882a593Smuzhiyun */
470*4882a593Smuzhiyun pdc20621_host_sg(&pp->dimm_buf[0], portno, total_len);
471*4882a593Smuzhiyun pdc20621_host_pkt(&qc->tf, &pp->dimm_buf[0], portno);
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun pdc20621_ata_sg(&pp->dimm_buf[0], portno, total_len);
474*4882a593Smuzhiyun i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun if (qc->tf.flags & ATA_TFLAG_LBA48)
477*4882a593Smuzhiyun i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
478*4882a593Smuzhiyun else
479*4882a593Smuzhiyun i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun /* copy three S/G tables and two packets to DIMM MMIO window */
484*4882a593Smuzhiyun memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
485*4882a593Smuzhiyun &pp->dimm_buf, PDC_DIMM_HEADER_SZ);
486*4882a593Smuzhiyun memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP) +
487*4882a593Smuzhiyun PDC_DIMM_HOST_PRD,
488*4882a593Smuzhiyun &pp->dimm_buf[PDC_DIMM_HEADER_SZ], sgt_len);
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun /* force host FIFO dump */
491*4882a593Smuzhiyun writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun readl(dimm_mmio); /* MMIO PCI posting flush */
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun VPRINTK("ata pkt buf ofs %u, prd size %u, mmio copied\n", i, sgt_len);
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun
pdc20621_nodata_prep(struct ata_queued_cmd * qc)498*4882a593Smuzhiyun static void pdc20621_nodata_prep(struct ata_queued_cmd *qc)
499*4882a593Smuzhiyun {
500*4882a593Smuzhiyun struct ata_port *ap = qc->ap;
501*4882a593Smuzhiyun struct pdc_port_priv *pp = ap->private_data;
502*4882a593Smuzhiyun void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
503*4882a593Smuzhiyun void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
504*4882a593Smuzhiyun unsigned int portno = ap->port_no;
505*4882a593Smuzhiyun unsigned int i;
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun VPRINTK("ata%u: ENTER\n", ap->print_id);
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun /* hard-code chip #0 */
510*4882a593Smuzhiyun mmio += PDC_CHIP0_OFS;
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun if (qc->tf.flags & ATA_TFLAG_LBA48)
515*4882a593Smuzhiyun i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
516*4882a593Smuzhiyun else
517*4882a593Smuzhiyun i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun /* copy three S/G tables and two packets to DIMM MMIO window */
522*4882a593Smuzhiyun memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
523*4882a593Smuzhiyun &pp->dimm_buf, PDC_DIMM_HEADER_SZ);
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun /* force host FIFO dump */
526*4882a593Smuzhiyun writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun readl(dimm_mmio); /* MMIO PCI posting flush */
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun VPRINTK("ata pkt buf ofs %u, mmio copied\n", i);
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun
pdc20621_qc_prep(struct ata_queued_cmd * qc)533*4882a593Smuzhiyun static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc)
534*4882a593Smuzhiyun {
535*4882a593Smuzhiyun switch (qc->tf.protocol) {
536*4882a593Smuzhiyun case ATA_PROT_DMA:
537*4882a593Smuzhiyun pdc20621_dma_prep(qc);
538*4882a593Smuzhiyun break;
539*4882a593Smuzhiyun case ATA_PROT_NODATA:
540*4882a593Smuzhiyun pdc20621_nodata_prep(qc);
541*4882a593Smuzhiyun break;
542*4882a593Smuzhiyun default:
543*4882a593Smuzhiyun break;
544*4882a593Smuzhiyun }
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun return AC_ERR_OK;
547*4882a593Smuzhiyun }
548*4882a593Smuzhiyun
__pdc20621_push_hdma(struct ata_queued_cmd * qc,unsigned int seq,u32 pkt_ofs)549*4882a593Smuzhiyun static void __pdc20621_push_hdma(struct ata_queued_cmd *qc,
550*4882a593Smuzhiyun unsigned int seq,
551*4882a593Smuzhiyun u32 pkt_ofs)
552*4882a593Smuzhiyun {
553*4882a593Smuzhiyun struct ata_port *ap = qc->ap;
554*4882a593Smuzhiyun struct ata_host *host = ap->host;
555*4882a593Smuzhiyun void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun /* hard-code chip #0 */
558*4882a593Smuzhiyun mmio += PDC_CHIP0_OFS;
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
561*4882a593Smuzhiyun readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun writel(pkt_ofs, mmio + PDC_HDMA_PKT_SUBMIT);
564*4882a593Smuzhiyun readl(mmio + PDC_HDMA_PKT_SUBMIT); /* flush */
565*4882a593Smuzhiyun }
566*4882a593Smuzhiyun
pdc20621_push_hdma(struct ata_queued_cmd * qc,unsigned int seq,u32 pkt_ofs)567*4882a593Smuzhiyun static void pdc20621_push_hdma(struct ata_queued_cmd *qc,
568*4882a593Smuzhiyun unsigned int seq,
569*4882a593Smuzhiyun u32 pkt_ofs)
570*4882a593Smuzhiyun {
571*4882a593Smuzhiyun struct ata_port *ap = qc->ap;
572*4882a593Smuzhiyun struct pdc_host_priv *pp = ap->host->private_data;
573*4882a593Smuzhiyun unsigned int idx = pp->hdma_prod & PDC_HDMA_Q_MASK;
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun if (!pp->doing_hdma) {
576*4882a593Smuzhiyun __pdc20621_push_hdma(qc, seq, pkt_ofs);
577*4882a593Smuzhiyun pp->doing_hdma = 1;
578*4882a593Smuzhiyun return;
579*4882a593Smuzhiyun }
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun pp->hdma[idx].qc = qc;
582*4882a593Smuzhiyun pp->hdma[idx].seq = seq;
583*4882a593Smuzhiyun pp->hdma[idx].pkt_ofs = pkt_ofs;
584*4882a593Smuzhiyun pp->hdma_prod++;
585*4882a593Smuzhiyun }
586*4882a593Smuzhiyun
pdc20621_pop_hdma(struct ata_queued_cmd * qc)587*4882a593Smuzhiyun static void pdc20621_pop_hdma(struct ata_queued_cmd *qc)
588*4882a593Smuzhiyun {
589*4882a593Smuzhiyun struct ata_port *ap = qc->ap;
590*4882a593Smuzhiyun struct pdc_host_priv *pp = ap->host->private_data;
591*4882a593Smuzhiyun unsigned int idx = pp->hdma_cons & PDC_HDMA_Q_MASK;
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun /* if nothing on queue, we're done */
594*4882a593Smuzhiyun if (pp->hdma_prod == pp->hdma_cons) {
595*4882a593Smuzhiyun pp->doing_hdma = 0;
596*4882a593Smuzhiyun return;
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun __pdc20621_push_hdma(pp->hdma[idx].qc, pp->hdma[idx].seq,
600*4882a593Smuzhiyun pp->hdma[idx].pkt_ofs);
601*4882a593Smuzhiyun pp->hdma_cons++;
602*4882a593Smuzhiyun }
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun #ifdef ATA_VERBOSE_DEBUG
pdc20621_dump_hdma(struct ata_queued_cmd * qc)605*4882a593Smuzhiyun static void pdc20621_dump_hdma(struct ata_queued_cmd *qc)
606*4882a593Smuzhiyun {
607*4882a593Smuzhiyun struct ata_port *ap = qc->ap;
608*4882a593Smuzhiyun unsigned int port_no = ap->port_no;
609*4882a593Smuzhiyun void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun dimm_mmio += (port_no * PDC_DIMM_WINDOW_STEP);
612*4882a593Smuzhiyun dimm_mmio += PDC_DIMM_HOST_PKT;
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun printk(KERN_ERR "HDMA[0] == 0x%08X\n", readl(dimm_mmio));
615*4882a593Smuzhiyun printk(KERN_ERR "HDMA[1] == 0x%08X\n", readl(dimm_mmio + 4));
616*4882a593Smuzhiyun printk(KERN_ERR "HDMA[2] == 0x%08X\n", readl(dimm_mmio + 8));
617*4882a593Smuzhiyun printk(KERN_ERR "HDMA[3] == 0x%08X\n", readl(dimm_mmio + 12));
618*4882a593Smuzhiyun }
619*4882a593Smuzhiyun #else
pdc20621_dump_hdma(struct ata_queued_cmd * qc)620*4882a593Smuzhiyun static inline void pdc20621_dump_hdma(struct ata_queued_cmd *qc) { }
621*4882a593Smuzhiyun #endif /* ATA_VERBOSE_DEBUG */
622*4882a593Smuzhiyun
pdc20621_packet_start(struct ata_queued_cmd * qc)623*4882a593Smuzhiyun static void pdc20621_packet_start(struct ata_queued_cmd *qc)
624*4882a593Smuzhiyun {
625*4882a593Smuzhiyun struct ata_port *ap = qc->ap;
626*4882a593Smuzhiyun struct ata_host *host = ap->host;
627*4882a593Smuzhiyun unsigned int port_no = ap->port_no;
628*4882a593Smuzhiyun void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
629*4882a593Smuzhiyun unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
630*4882a593Smuzhiyun u8 seq = (u8) (port_no + 1);
631*4882a593Smuzhiyun unsigned int port_ofs;
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun /* hard-code chip #0 */
634*4882a593Smuzhiyun mmio += PDC_CHIP0_OFS;
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun VPRINTK("ata%u: ENTER\n", ap->print_id);
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun wmb(); /* flush PRD, pkt writes */
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun port_ofs = PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun /* if writing, we (1) DMA to DIMM, then (2) do ATA command */
643*4882a593Smuzhiyun if (rw && qc->tf.protocol == ATA_PROT_DMA) {
644*4882a593Smuzhiyun seq += 4;
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun pdc20621_dump_hdma(qc);
647*4882a593Smuzhiyun pdc20621_push_hdma(qc, seq, port_ofs + PDC_DIMM_HOST_PKT);
648*4882a593Smuzhiyun VPRINTK("queued ofs 0x%x (%u), seq %u\n",
649*4882a593Smuzhiyun port_ofs + PDC_DIMM_HOST_PKT,
650*4882a593Smuzhiyun port_ofs + PDC_DIMM_HOST_PKT,
651*4882a593Smuzhiyun seq);
652*4882a593Smuzhiyun } else {
653*4882a593Smuzhiyun writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
654*4882a593Smuzhiyun readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun writel(port_ofs + PDC_DIMM_ATA_PKT,
657*4882a593Smuzhiyun ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
658*4882a593Smuzhiyun readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
659*4882a593Smuzhiyun VPRINTK("submitted ofs 0x%x (%u), seq %u\n",
660*4882a593Smuzhiyun port_ofs + PDC_DIMM_ATA_PKT,
661*4882a593Smuzhiyun port_ofs + PDC_DIMM_ATA_PKT,
662*4882a593Smuzhiyun seq);
663*4882a593Smuzhiyun }
664*4882a593Smuzhiyun }
665*4882a593Smuzhiyun
pdc20621_qc_issue(struct ata_queued_cmd * qc)666*4882a593Smuzhiyun static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc)
667*4882a593Smuzhiyun {
668*4882a593Smuzhiyun switch (qc->tf.protocol) {
669*4882a593Smuzhiyun case ATA_PROT_NODATA:
670*4882a593Smuzhiyun if (qc->tf.flags & ATA_TFLAG_POLLING)
671*4882a593Smuzhiyun break;
672*4882a593Smuzhiyun fallthrough;
673*4882a593Smuzhiyun case ATA_PROT_DMA:
674*4882a593Smuzhiyun pdc20621_packet_start(qc);
675*4882a593Smuzhiyun return 0;
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun case ATAPI_PROT_DMA:
678*4882a593Smuzhiyun BUG();
679*4882a593Smuzhiyun break;
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun default:
682*4882a593Smuzhiyun break;
683*4882a593Smuzhiyun }
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun return ata_sff_qc_issue(qc);
686*4882a593Smuzhiyun }
687*4882a593Smuzhiyun
pdc20621_host_intr(struct ata_port * ap,struct ata_queued_cmd * qc,unsigned int doing_hdma,void __iomem * mmio)688*4882a593Smuzhiyun static inline unsigned int pdc20621_host_intr(struct ata_port *ap,
689*4882a593Smuzhiyun struct ata_queued_cmd *qc,
690*4882a593Smuzhiyun unsigned int doing_hdma,
691*4882a593Smuzhiyun void __iomem *mmio)
692*4882a593Smuzhiyun {
693*4882a593Smuzhiyun unsigned int port_no = ap->port_no;
694*4882a593Smuzhiyun unsigned int port_ofs =
695*4882a593Smuzhiyun PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
696*4882a593Smuzhiyun u8 status;
697*4882a593Smuzhiyun unsigned int handled = 0;
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun VPRINTK("ENTER\n");
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun if ((qc->tf.protocol == ATA_PROT_DMA) && /* read */
702*4882a593Smuzhiyun (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun /* step two - DMA from DIMM to host */
705*4882a593Smuzhiyun if (doing_hdma) {
706*4882a593Smuzhiyun VPRINTK("ata%u: read hdma, 0x%x 0x%x\n", ap->print_id,
707*4882a593Smuzhiyun readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
708*4882a593Smuzhiyun /* get drive status; clear intr; complete txn */
709*4882a593Smuzhiyun qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
710*4882a593Smuzhiyun ata_qc_complete(qc);
711*4882a593Smuzhiyun pdc20621_pop_hdma(qc);
712*4882a593Smuzhiyun }
713*4882a593Smuzhiyun
714*4882a593Smuzhiyun /* step one - exec ATA command */
715*4882a593Smuzhiyun else {
716*4882a593Smuzhiyun u8 seq = (u8) (port_no + 1 + 4);
717*4882a593Smuzhiyun VPRINTK("ata%u: read ata, 0x%x 0x%x\n", ap->print_id,
718*4882a593Smuzhiyun readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun /* submit hdma pkt */
721*4882a593Smuzhiyun pdc20621_dump_hdma(qc);
722*4882a593Smuzhiyun pdc20621_push_hdma(qc, seq,
723*4882a593Smuzhiyun port_ofs + PDC_DIMM_HOST_PKT);
724*4882a593Smuzhiyun }
725*4882a593Smuzhiyun handled = 1;
726*4882a593Smuzhiyun
727*4882a593Smuzhiyun } else if (qc->tf.protocol == ATA_PROT_DMA) { /* write */
728*4882a593Smuzhiyun
729*4882a593Smuzhiyun /* step one - DMA from host to DIMM */
730*4882a593Smuzhiyun if (doing_hdma) {
731*4882a593Smuzhiyun u8 seq = (u8) (port_no + 1);
732*4882a593Smuzhiyun VPRINTK("ata%u: write hdma, 0x%x 0x%x\n", ap->print_id,
733*4882a593Smuzhiyun readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun /* submit ata pkt */
736*4882a593Smuzhiyun writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
737*4882a593Smuzhiyun readl(mmio + PDC_20621_SEQCTL + (seq * 4));
738*4882a593Smuzhiyun writel(port_ofs + PDC_DIMM_ATA_PKT,
739*4882a593Smuzhiyun ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
740*4882a593Smuzhiyun readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
741*4882a593Smuzhiyun }
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun /* step two - execute ATA command */
744*4882a593Smuzhiyun else {
745*4882a593Smuzhiyun VPRINTK("ata%u: write ata, 0x%x 0x%x\n", ap->print_id,
746*4882a593Smuzhiyun readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
747*4882a593Smuzhiyun /* get drive status; clear intr; complete txn */
748*4882a593Smuzhiyun qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
749*4882a593Smuzhiyun ata_qc_complete(qc);
750*4882a593Smuzhiyun pdc20621_pop_hdma(qc);
751*4882a593Smuzhiyun }
752*4882a593Smuzhiyun handled = 1;
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun /* command completion, but no data xfer */
755*4882a593Smuzhiyun } else if (qc->tf.protocol == ATA_PROT_NODATA) {
756*4882a593Smuzhiyun
757*4882a593Smuzhiyun status = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
758*4882a593Smuzhiyun DPRINTK("BUS_NODATA (drv_stat 0x%X)\n", status);
759*4882a593Smuzhiyun qc->err_mask |= ac_err_mask(status);
760*4882a593Smuzhiyun ata_qc_complete(qc);
761*4882a593Smuzhiyun handled = 1;
762*4882a593Smuzhiyun
763*4882a593Smuzhiyun } else {
764*4882a593Smuzhiyun ap->stats.idle_irq++;
765*4882a593Smuzhiyun }
766*4882a593Smuzhiyun
767*4882a593Smuzhiyun return handled;
768*4882a593Smuzhiyun }
769*4882a593Smuzhiyun
pdc20621_irq_clear(struct ata_port * ap)770*4882a593Smuzhiyun static void pdc20621_irq_clear(struct ata_port *ap)
771*4882a593Smuzhiyun {
772*4882a593Smuzhiyun ioread8(ap->ioaddr.status_addr);
773*4882a593Smuzhiyun }
774*4882a593Smuzhiyun
pdc20621_interrupt(int irq,void * dev_instance)775*4882a593Smuzhiyun static irqreturn_t pdc20621_interrupt(int irq, void *dev_instance)
776*4882a593Smuzhiyun {
777*4882a593Smuzhiyun struct ata_host *host = dev_instance;
778*4882a593Smuzhiyun struct ata_port *ap;
779*4882a593Smuzhiyun u32 mask = 0;
780*4882a593Smuzhiyun unsigned int i, tmp, port_no;
781*4882a593Smuzhiyun unsigned int handled = 0;
782*4882a593Smuzhiyun void __iomem *mmio_base;
783*4882a593Smuzhiyun
784*4882a593Smuzhiyun VPRINTK("ENTER\n");
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun if (!host || !host->iomap[PDC_MMIO_BAR]) {
787*4882a593Smuzhiyun VPRINTK("QUICK EXIT\n");
788*4882a593Smuzhiyun return IRQ_NONE;
789*4882a593Smuzhiyun }
790*4882a593Smuzhiyun
791*4882a593Smuzhiyun mmio_base = host->iomap[PDC_MMIO_BAR];
792*4882a593Smuzhiyun
793*4882a593Smuzhiyun /* reading should also clear interrupts */
794*4882a593Smuzhiyun mmio_base += PDC_CHIP0_OFS;
795*4882a593Smuzhiyun mask = readl(mmio_base + PDC_20621_SEQMASK);
796*4882a593Smuzhiyun VPRINTK("mask == 0x%x\n", mask);
797*4882a593Smuzhiyun
798*4882a593Smuzhiyun if (mask == 0xffffffff) {
799*4882a593Smuzhiyun VPRINTK("QUICK EXIT 2\n");
800*4882a593Smuzhiyun return IRQ_NONE;
801*4882a593Smuzhiyun }
802*4882a593Smuzhiyun mask &= 0xffff; /* only 16 tags possible */
803*4882a593Smuzhiyun if (!mask) {
804*4882a593Smuzhiyun VPRINTK("QUICK EXIT 3\n");
805*4882a593Smuzhiyun return IRQ_NONE;
806*4882a593Smuzhiyun }
807*4882a593Smuzhiyun
808*4882a593Smuzhiyun spin_lock(&host->lock);
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun for (i = 1; i < 9; i++) {
811*4882a593Smuzhiyun port_no = i - 1;
812*4882a593Smuzhiyun if (port_no > 3)
813*4882a593Smuzhiyun port_no -= 4;
814*4882a593Smuzhiyun if (port_no >= host->n_ports)
815*4882a593Smuzhiyun ap = NULL;
816*4882a593Smuzhiyun else
817*4882a593Smuzhiyun ap = host->ports[port_no];
818*4882a593Smuzhiyun tmp = mask & (1 << i);
819*4882a593Smuzhiyun VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp);
820*4882a593Smuzhiyun if (tmp && ap) {
821*4882a593Smuzhiyun struct ata_queued_cmd *qc;
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun qc = ata_qc_from_tag(ap, ap->link.active_tag);
824*4882a593Smuzhiyun if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
825*4882a593Smuzhiyun handled += pdc20621_host_intr(ap, qc, (i > 4),
826*4882a593Smuzhiyun mmio_base);
827*4882a593Smuzhiyun }
828*4882a593Smuzhiyun }
829*4882a593Smuzhiyun
830*4882a593Smuzhiyun spin_unlock(&host->lock);
831*4882a593Smuzhiyun
832*4882a593Smuzhiyun VPRINTK("mask == 0x%x\n", mask);
833*4882a593Smuzhiyun
834*4882a593Smuzhiyun VPRINTK("EXIT\n");
835*4882a593Smuzhiyun
836*4882a593Smuzhiyun return IRQ_RETVAL(handled);
837*4882a593Smuzhiyun }
838*4882a593Smuzhiyun
pdc_freeze(struct ata_port * ap)839*4882a593Smuzhiyun static void pdc_freeze(struct ata_port *ap)
840*4882a593Smuzhiyun {
841*4882a593Smuzhiyun void __iomem *mmio = ap->ioaddr.cmd_addr;
842*4882a593Smuzhiyun u32 tmp;
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun /* FIXME: if all 4 ATA engines are stopped, also stop HDMA engine */
845*4882a593Smuzhiyun
846*4882a593Smuzhiyun tmp = readl(mmio + PDC_CTLSTAT);
847*4882a593Smuzhiyun tmp |= PDC_MASK_INT;
848*4882a593Smuzhiyun tmp &= ~PDC_DMA_ENABLE;
849*4882a593Smuzhiyun writel(tmp, mmio + PDC_CTLSTAT);
850*4882a593Smuzhiyun readl(mmio + PDC_CTLSTAT); /* flush */
851*4882a593Smuzhiyun }
852*4882a593Smuzhiyun
pdc_thaw(struct ata_port * ap)853*4882a593Smuzhiyun static void pdc_thaw(struct ata_port *ap)
854*4882a593Smuzhiyun {
855*4882a593Smuzhiyun void __iomem *mmio = ap->ioaddr.cmd_addr;
856*4882a593Smuzhiyun u32 tmp;
857*4882a593Smuzhiyun
858*4882a593Smuzhiyun /* FIXME: start HDMA engine, if zero ATA engines running */
859*4882a593Smuzhiyun
860*4882a593Smuzhiyun /* clear IRQ */
861*4882a593Smuzhiyun ioread8(ap->ioaddr.status_addr);
862*4882a593Smuzhiyun
863*4882a593Smuzhiyun /* turn IRQ back on */
864*4882a593Smuzhiyun tmp = readl(mmio + PDC_CTLSTAT);
865*4882a593Smuzhiyun tmp &= ~PDC_MASK_INT;
866*4882a593Smuzhiyun writel(tmp, mmio + PDC_CTLSTAT);
867*4882a593Smuzhiyun readl(mmio + PDC_CTLSTAT); /* flush */
868*4882a593Smuzhiyun }
869*4882a593Smuzhiyun
pdc_reset_port(struct ata_port * ap)870*4882a593Smuzhiyun static void pdc_reset_port(struct ata_port *ap)
871*4882a593Smuzhiyun {
872*4882a593Smuzhiyun void __iomem *mmio = ap->ioaddr.cmd_addr + PDC_CTLSTAT;
873*4882a593Smuzhiyun unsigned int i;
874*4882a593Smuzhiyun u32 tmp;
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun /* FIXME: handle HDMA copy engine */
877*4882a593Smuzhiyun
878*4882a593Smuzhiyun for (i = 11; i > 0; i--) {
879*4882a593Smuzhiyun tmp = readl(mmio);
880*4882a593Smuzhiyun if (tmp & PDC_RESET)
881*4882a593Smuzhiyun break;
882*4882a593Smuzhiyun
883*4882a593Smuzhiyun udelay(100);
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun tmp |= PDC_RESET;
886*4882a593Smuzhiyun writel(tmp, mmio);
887*4882a593Smuzhiyun }
888*4882a593Smuzhiyun
889*4882a593Smuzhiyun tmp &= ~PDC_RESET;
890*4882a593Smuzhiyun writel(tmp, mmio);
891*4882a593Smuzhiyun readl(mmio); /* flush */
892*4882a593Smuzhiyun }
893*4882a593Smuzhiyun
pdc_softreset(struct ata_link * link,unsigned int * class,unsigned long deadline)894*4882a593Smuzhiyun static int pdc_softreset(struct ata_link *link, unsigned int *class,
895*4882a593Smuzhiyun unsigned long deadline)
896*4882a593Smuzhiyun {
897*4882a593Smuzhiyun pdc_reset_port(link->ap);
898*4882a593Smuzhiyun return ata_sff_softreset(link, class, deadline);
899*4882a593Smuzhiyun }
900*4882a593Smuzhiyun
pdc_error_handler(struct ata_port * ap)901*4882a593Smuzhiyun static void pdc_error_handler(struct ata_port *ap)
902*4882a593Smuzhiyun {
903*4882a593Smuzhiyun if (!(ap->pflags & ATA_PFLAG_FROZEN))
904*4882a593Smuzhiyun pdc_reset_port(ap);
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun ata_sff_error_handler(ap);
907*4882a593Smuzhiyun }
908*4882a593Smuzhiyun
pdc_post_internal_cmd(struct ata_queued_cmd * qc)909*4882a593Smuzhiyun static void pdc_post_internal_cmd(struct ata_queued_cmd *qc)
910*4882a593Smuzhiyun {
911*4882a593Smuzhiyun struct ata_port *ap = qc->ap;
912*4882a593Smuzhiyun
913*4882a593Smuzhiyun /* make DMA engine forget about the failed command */
914*4882a593Smuzhiyun if (qc->flags & ATA_QCFLAG_FAILED)
915*4882a593Smuzhiyun pdc_reset_port(ap);
916*4882a593Smuzhiyun }
917*4882a593Smuzhiyun
pdc_check_atapi_dma(struct ata_queued_cmd * qc)918*4882a593Smuzhiyun static int pdc_check_atapi_dma(struct ata_queued_cmd *qc)
919*4882a593Smuzhiyun {
920*4882a593Smuzhiyun u8 *scsicmd = qc->scsicmd->cmnd;
921*4882a593Smuzhiyun int pio = 1; /* atapi dma off by default */
922*4882a593Smuzhiyun
923*4882a593Smuzhiyun /* Whitelist commands that may use DMA. */
924*4882a593Smuzhiyun switch (scsicmd[0]) {
925*4882a593Smuzhiyun case WRITE_12:
926*4882a593Smuzhiyun case WRITE_10:
927*4882a593Smuzhiyun case WRITE_6:
928*4882a593Smuzhiyun case READ_12:
929*4882a593Smuzhiyun case READ_10:
930*4882a593Smuzhiyun case READ_6:
931*4882a593Smuzhiyun case 0xad: /* READ_DVD_STRUCTURE */
932*4882a593Smuzhiyun case 0xbe: /* READ_CD */
933*4882a593Smuzhiyun pio = 0;
934*4882a593Smuzhiyun }
935*4882a593Smuzhiyun /* -45150 (FFFF4FA2) to -1 (FFFFFFFF) shall use PIO mode */
936*4882a593Smuzhiyun if (scsicmd[0] == WRITE_10) {
937*4882a593Smuzhiyun unsigned int lba =
938*4882a593Smuzhiyun (scsicmd[2] << 24) |
939*4882a593Smuzhiyun (scsicmd[3] << 16) |
940*4882a593Smuzhiyun (scsicmd[4] << 8) |
941*4882a593Smuzhiyun scsicmd[5];
942*4882a593Smuzhiyun if (lba >= 0xFFFF4FA2)
943*4882a593Smuzhiyun pio = 1;
944*4882a593Smuzhiyun }
945*4882a593Smuzhiyun return pio;
946*4882a593Smuzhiyun }
947*4882a593Smuzhiyun
pdc_tf_load_mmio(struct ata_port * ap,const struct ata_taskfile * tf)948*4882a593Smuzhiyun static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
949*4882a593Smuzhiyun {
950*4882a593Smuzhiyun WARN_ON(tf->protocol == ATA_PROT_DMA ||
951*4882a593Smuzhiyun tf->protocol == ATAPI_PROT_DMA);
952*4882a593Smuzhiyun ata_sff_tf_load(ap, tf);
953*4882a593Smuzhiyun }
954*4882a593Smuzhiyun
955*4882a593Smuzhiyun
pdc_exec_command_mmio(struct ata_port * ap,const struct ata_taskfile * tf)956*4882a593Smuzhiyun static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
957*4882a593Smuzhiyun {
958*4882a593Smuzhiyun WARN_ON(tf->protocol == ATA_PROT_DMA ||
959*4882a593Smuzhiyun tf->protocol == ATAPI_PROT_DMA);
960*4882a593Smuzhiyun ata_sff_exec_command(ap, tf);
961*4882a593Smuzhiyun }
962*4882a593Smuzhiyun
963*4882a593Smuzhiyun
pdc_sata_setup_port(struct ata_ioports * port,void __iomem * base)964*4882a593Smuzhiyun static void pdc_sata_setup_port(struct ata_ioports *port, void __iomem *base)
965*4882a593Smuzhiyun {
966*4882a593Smuzhiyun port->cmd_addr = base;
967*4882a593Smuzhiyun port->data_addr = base;
968*4882a593Smuzhiyun port->feature_addr =
969*4882a593Smuzhiyun port->error_addr = base + 0x4;
970*4882a593Smuzhiyun port->nsect_addr = base + 0x8;
971*4882a593Smuzhiyun port->lbal_addr = base + 0xc;
972*4882a593Smuzhiyun port->lbam_addr = base + 0x10;
973*4882a593Smuzhiyun port->lbah_addr = base + 0x14;
974*4882a593Smuzhiyun port->device_addr = base + 0x18;
975*4882a593Smuzhiyun port->command_addr =
976*4882a593Smuzhiyun port->status_addr = base + 0x1c;
977*4882a593Smuzhiyun port->altstatus_addr =
978*4882a593Smuzhiyun port->ctl_addr = base + 0x38;
979*4882a593Smuzhiyun }
980*4882a593Smuzhiyun
981*4882a593Smuzhiyun
982*4882a593Smuzhiyun #ifdef ATA_VERBOSE_DEBUG
pdc20621_get_from_dimm(struct ata_host * host,void * psource,u32 offset,u32 size)983*4882a593Smuzhiyun static void pdc20621_get_from_dimm(struct ata_host *host, void *psource,
984*4882a593Smuzhiyun u32 offset, u32 size)
985*4882a593Smuzhiyun {
986*4882a593Smuzhiyun u32 window_size;
987*4882a593Smuzhiyun u16 idx;
988*4882a593Smuzhiyun u8 page_mask;
989*4882a593Smuzhiyun long dist;
990*4882a593Smuzhiyun void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
991*4882a593Smuzhiyun void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR];
992*4882a593Smuzhiyun
993*4882a593Smuzhiyun /* hard-code chip #0 */
994*4882a593Smuzhiyun mmio += PDC_CHIP0_OFS;
995*4882a593Smuzhiyun
996*4882a593Smuzhiyun page_mask = 0x00;
997*4882a593Smuzhiyun window_size = 0x2000 * 4; /* 32K byte uchar size */
998*4882a593Smuzhiyun idx = (u16) (offset / window_size);
999*4882a593Smuzhiyun
1000*4882a593Smuzhiyun writel(0x01, mmio + PDC_GENERAL_CTLR);
1001*4882a593Smuzhiyun readl(mmio + PDC_GENERAL_CTLR);
1002*4882a593Smuzhiyun writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1003*4882a593Smuzhiyun readl(mmio + PDC_DIMM_WINDOW_CTLR);
1004*4882a593Smuzhiyun
1005*4882a593Smuzhiyun offset -= (idx * window_size);
1006*4882a593Smuzhiyun idx++;
1007*4882a593Smuzhiyun dist = ((long) (window_size - (offset + size))) >= 0 ? size :
1008*4882a593Smuzhiyun (long) (window_size - offset);
1009*4882a593Smuzhiyun memcpy_fromio(psource, dimm_mmio + offset / 4, dist);
1010*4882a593Smuzhiyun
1011*4882a593Smuzhiyun psource += dist;
1012*4882a593Smuzhiyun size -= dist;
1013*4882a593Smuzhiyun for (; (long) size >= (long) window_size ;) {
1014*4882a593Smuzhiyun writel(0x01, mmio + PDC_GENERAL_CTLR);
1015*4882a593Smuzhiyun readl(mmio + PDC_GENERAL_CTLR);
1016*4882a593Smuzhiyun writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1017*4882a593Smuzhiyun readl(mmio + PDC_DIMM_WINDOW_CTLR);
1018*4882a593Smuzhiyun memcpy_fromio(psource, dimm_mmio, window_size / 4);
1019*4882a593Smuzhiyun psource += window_size;
1020*4882a593Smuzhiyun size -= window_size;
1021*4882a593Smuzhiyun idx++;
1022*4882a593Smuzhiyun }
1023*4882a593Smuzhiyun
1024*4882a593Smuzhiyun if (size) {
1025*4882a593Smuzhiyun writel(0x01, mmio + PDC_GENERAL_CTLR);
1026*4882a593Smuzhiyun readl(mmio + PDC_GENERAL_CTLR);
1027*4882a593Smuzhiyun writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1028*4882a593Smuzhiyun readl(mmio + PDC_DIMM_WINDOW_CTLR);
1029*4882a593Smuzhiyun memcpy_fromio(psource, dimm_mmio, size / 4);
1030*4882a593Smuzhiyun }
1031*4882a593Smuzhiyun }
1032*4882a593Smuzhiyun #endif
1033*4882a593Smuzhiyun
1034*4882a593Smuzhiyun
pdc20621_put_to_dimm(struct ata_host * host,void * psource,u32 offset,u32 size)1035*4882a593Smuzhiyun static void pdc20621_put_to_dimm(struct ata_host *host, void *psource,
1036*4882a593Smuzhiyun u32 offset, u32 size)
1037*4882a593Smuzhiyun {
1038*4882a593Smuzhiyun u32 window_size;
1039*4882a593Smuzhiyun u16 idx;
1040*4882a593Smuzhiyun u8 page_mask;
1041*4882a593Smuzhiyun long dist;
1042*4882a593Smuzhiyun void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1043*4882a593Smuzhiyun void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR];
1044*4882a593Smuzhiyun
1045*4882a593Smuzhiyun /* hard-code chip #0 */
1046*4882a593Smuzhiyun mmio += PDC_CHIP0_OFS;
1047*4882a593Smuzhiyun
1048*4882a593Smuzhiyun page_mask = 0x00;
1049*4882a593Smuzhiyun window_size = 0x2000 * 4; /* 32K byte uchar size */
1050*4882a593Smuzhiyun idx = (u16) (offset / window_size);
1051*4882a593Smuzhiyun
1052*4882a593Smuzhiyun writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1053*4882a593Smuzhiyun readl(mmio + PDC_DIMM_WINDOW_CTLR);
1054*4882a593Smuzhiyun offset -= (idx * window_size);
1055*4882a593Smuzhiyun idx++;
1056*4882a593Smuzhiyun dist = ((long)(s32)(window_size - (offset + size))) >= 0 ? size :
1057*4882a593Smuzhiyun (long) (window_size - offset);
1058*4882a593Smuzhiyun memcpy_toio(dimm_mmio + offset / 4, psource, dist);
1059*4882a593Smuzhiyun writel(0x01, mmio + PDC_GENERAL_CTLR);
1060*4882a593Smuzhiyun readl(mmio + PDC_GENERAL_CTLR);
1061*4882a593Smuzhiyun
1062*4882a593Smuzhiyun psource += dist;
1063*4882a593Smuzhiyun size -= dist;
1064*4882a593Smuzhiyun for (; (long) size >= (long) window_size ;) {
1065*4882a593Smuzhiyun writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1066*4882a593Smuzhiyun readl(mmio + PDC_DIMM_WINDOW_CTLR);
1067*4882a593Smuzhiyun memcpy_toio(dimm_mmio, psource, window_size / 4);
1068*4882a593Smuzhiyun writel(0x01, mmio + PDC_GENERAL_CTLR);
1069*4882a593Smuzhiyun readl(mmio + PDC_GENERAL_CTLR);
1070*4882a593Smuzhiyun psource += window_size;
1071*4882a593Smuzhiyun size -= window_size;
1072*4882a593Smuzhiyun idx++;
1073*4882a593Smuzhiyun }
1074*4882a593Smuzhiyun
1075*4882a593Smuzhiyun if (size) {
1076*4882a593Smuzhiyun writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1077*4882a593Smuzhiyun readl(mmio + PDC_DIMM_WINDOW_CTLR);
1078*4882a593Smuzhiyun memcpy_toio(dimm_mmio, psource, size / 4);
1079*4882a593Smuzhiyun writel(0x01, mmio + PDC_GENERAL_CTLR);
1080*4882a593Smuzhiyun readl(mmio + PDC_GENERAL_CTLR);
1081*4882a593Smuzhiyun }
1082*4882a593Smuzhiyun }
1083*4882a593Smuzhiyun
1084*4882a593Smuzhiyun
pdc20621_i2c_read(struct ata_host * host,u32 device,u32 subaddr,u32 * pdata)1085*4882a593Smuzhiyun static unsigned int pdc20621_i2c_read(struct ata_host *host, u32 device,
1086*4882a593Smuzhiyun u32 subaddr, u32 *pdata)
1087*4882a593Smuzhiyun {
1088*4882a593Smuzhiyun void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1089*4882a593Smuzhiyun u32 i2creg = 0;
1090*4882a593Smuzhiyun u32 status;
1091*4882a593Smuzhiyun u32 count = 0;
1092*4882a593Smuzhiyun
1093*4882a593Smuzhiyun /* hard-code chip #0 */
1094*4882a593Smuzhiyun mmio += PDC_CHIP0_OFS;
1095*4882a593Smuzhiyun
1096*4882a593Smuzhiyun i2creg |= device << 24;
1097*4882a593Smuzhiyun i2creg |= subaddr << 16;
1098*4882a593Smuzhiyun
1099*4882a593Smuzhiyun /* Set the device and subaddress */
1100*4882a593Smuzhiyun writel(i2creg, mmio + PDC_I2C_ADDR_DATA);
1101*4882a593Smuzhiyun readl(mmio + PDC_I2C_ADDR_DATA);
1102*4882a593Smuzhiyun
1103*4882a593Smuzhiyun /* Write Control to perform read operation, mask int */
1104*4882a593Smuzhiyun writel(PDC_I2C_READ | PDC_I2C_START | PDC_I2C_MASK_INT,
1105*4882a593Smuzhiyun mmio + PDC_I2C_CONTROL);
1106*4882a593Smuzhiyun
1107*4882a593Smuzhiyun for (count = 0; count <= 1000; count ++) {
1108*4882a593Smuzhiyun status = readl(mmio + PDC_I2C_CONTROL);
1109*4882a593Smuzhiyun if (status & PDC_I2C_COMPLETE) {
1110*4882a593Smuzhiyun status = readl(mmio + PDC_I2C_ADDR_DATA);
1111*4882a593Smuzhiyun break;
1112*4882a593Smuzhiyun } else if (count == 1000)
1113*4882a593Smuzhiyun return 0;
1114*4882a593Smuzhiyun }
1115*4882a593Smuzhiyun
1116*4882a593Smuzhiyun *pdata = (status >> 8) & 0x000000ff;
1117*4882a593Smuzhiyun return 1;
1118*4882a593Smuzhiyun }
1119*4882a593Smuzhiyun
1120*4882a593Smuzhiyun
pdc20621_detect_dimm(struct ata_host * host)1121*4882a593Smuzhiyun static int pdc20621_detect_dimm(struct ata_host *host)
1122*4882a593Smuzhiyun {
1123*4882a593Smuzhiyun u32 data = 0;
1124*4882a593Smuzhiyun if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1125*4882a593Smuzhiyun PDC_DIMM_SPD_SYSTEM_FREQ, &data)) {
1126*4882a593Smuzhiyun if (data == 100)
1127*4882a593Smuzhiyun return 100;
1128*4882a593Smuzhiyun } else
1129*4882a593Smuzhiyun return 0;
1130*4882a593Smuzhiyun
1131*4882a593Smuzhiyun if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, 9, &data)) {
1132*4882a593Smuzhiyun if (data <= 0x75)
1133*4882a593Smuzhiyun return 133;
1134*4882a593Smuzhiyun } else
1135*4882a593Smuzhiyun return 0;
1136*4882a593Smuzhiyun
1137*4882a593Smuzhiyun return 0;
1138*4882a593Smuzhiyun }
1139*4882a593Smuzhiyun
1140*4882a593Smuzhiyun
pdc20621_prog_dimm0(struct ata_host * host)1141*4882a593Smuzhiyun static int pdc20621_prog_dimm0(struct ata_host *host)
1142*4882a593Smuzhiyun {
1143*4882a593Smuzhiyun u32 spd0[50];
1144*4882a593Smuzhiyun u32 data = 0;
1145*4882a593Smuzhiyun int size, i;
1146*4882a593Smuzhiyun u8 bdimmsize;
1147*4882a593Smuzhiyun void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1148*4882a593Smuzhiyun static const struct {
1149*4882a593Smuzhiyun unsigned int reg;
1150*4882a593Smuzhiyun unsigned int ofs;
1151*4882a593Smuzhiyun } pdc_i2c_read_data [] = {
1152*4882a593Smuzhiyun { PDC_DIMM_SPD_TYPE, 11 },
1153*4882a593Smuzhiyun { PDC_DIMM_SPD_FRESH_RATE, 12 },
1154*4882a593Smuzhiyun { PDC_DIMM_SPD_COLUMN_NUM, 4 },
1155*4882a593Smuzhiyun { PDC_DIMM_SPD_ATTRIBUTE, 21 },
1156*4882a593Smuzhiyun { PDC_DIMM_SPD_ROW_NUM, 3 },
1157*4882a593Smuzhiyun { PDC_DIMM_SPD_BANK_NUM, 17 },
1158*4882a593Smuzhiyun { PDC_DIMM_SPD_MODULE_ROW, 5 },
1159*4882a593Smuzhiyun { PDC_DIMM_SPD_ROW_PRE_CHARGE, 27 },
1160*4882a593Smuzhiyun { PDC_DIMM_SPD_ROW_ACTIVE_DELAY, 28 },
1161*4882a593Smuzhiyun { PDC_DIMM_SPD_RAS_CAS_DELAY, 29 },
1162*4882a593Smuzhiyun { PDC_DIMM_SPD_ACTIVE_PRECHARGE, 30 },
1163*4882a593Smuzhiyun { PDC_DIMM_SPD_CAS_LATENCY, 18 },
1164*4882a593Smuzhiyun };
1165*4882a593Smuzhiyun
1166*4882a593Smuzhiyun /* hard-code chip #0 */
1167*4882a593Smuzhiyun mmio += PDC_CHIP0_OFS;
1168*4882a593Smuzhiyun
1169*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(pdc_i2c_read_data); i++)
1170*4882a593Smuzhiyun pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1171*4882a593Smuzhiyun pdc_i2c_read_data[i].reg,
1172*4882a593Smuzhiyun &spd0[pdc_i2c_read_data[i].ofs]);
1173*4882a593Smuzhiyun
1174*4882a593Smuzhiyun data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4);
1175*4882a593Smuzhiyun data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) |
1176*4882a593Smuzhiyun ((((spd0[27] + 9) / 10) - 1) << 8) ;
1177*4882a593Smuzhiyun data |= (((((spd0[29] > spd0[28])
1178*4882a593Smuzhiyun ? spd0[29] : spd0[28]) + 9) / 10) - 1) << 10;
1179*4882a593Smuzhiyun data |= ((spd0[30] - spd0[29] + 9) / 10 - 2) << 12;
1180*4882a593Smuzhiyun
1181*4882a593Smuzhiyun if (spd0[18] & 0x08)
1182*4882a593Smuzhiyun data |= ((0x03) << 14);
1183*4882a593Smuzhiyun else if (spd0[18] & 0x04)
1184*4882a593Smuzhiyun data |= ((0x02) << 14);
1185*4882a593Smuzhiyun else if (spd0[18] & 0x01)
1186*4882a593Smuzhiyun data |= ((0x01) << 14);
1187*4882a593Smuzhiyun else
1188*4882a593Smuzhiyun data |= (0 << 14);
1189*4882a593Smuzhiyun
1190*4882a593Smuzhiyun /*
1191*4882a593Smuzhiyun Calculate the size of bDIMMSize (power of 2) and
1192*4882a593Smuzhiyun merge the DIMM size by program start/end address.
1193*4882a593Smuzhiyun */
1194*4882a593Smuzhiyun
1195*4882a593Smuzhiyun bdimmsize = spd0[4] + (spd0[5] / 2) + spd0[3] + (spd0[17] / 2) + 3;
1196*4882a593Smuzhiyun size = (1 << bdimmsize) >> 20; /* size = xxx(MB) */
1197*4882a593Smuzhiyun data |= (((size / 16) - 1) << 16);
1198*4882a593Smuzhiyun data |= (0 << 23);
1199*4882a593Smuzhiyun data |= 8;
1200*4882a593Smuzhiyun writel(data, mmio + PDC_DIMM0_CONTROL);
1201*4882a593Smuzhiyun readl(mmio + PDC_DIMM0_CONTROL);
1202*4882a593Smuzhiyun return size;
1203*4882a593Smuzhiyun }
1204*4882a593Smuzhiyun
1205*4882a593Smuzhiyun
pdc20621_prog_dimm_global(struct ata_host * host)1206*4882a593Smuzhiyun static unsigned int pdc20621_prog_dimm_global(struct ata_host *host)
1207*4882a593Smuzhiyun {
1208*4882a593Smuzhiyun u32 data, spd0;
1209*4882a593Smuzhiyun int error, i;
1210*4882a593Smuzhiyun void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1211*4882a593Smuzhiyun
1212*4882a593Smuzhiyun /* hard-code chip #0 */
1213*4882a593Smuzhiyun mmio += PDC_CHIP0_OFS;
1214*4882a593Smuzhiyun
1215*4882a593Smuzhiyun /*
1216*4882a593Smuzhiyun Set To Default : DIMM Module Global Control Register (0x022259F1)
1217*4882a593Smuzhiyun DIMM Arbitration Disable (bit 20)
1218*4882a593Smuzhiyun DIMM Data/Control Output Driving Selection (bit12 - bit15)
1219*4882a593Smuzhiyun Refresh Enable (bit 17)
1220*4882a593Smuzhiyun */
1221*4882a593Smuzhiyun
1222*4882a593Smuzhiyun data = 0x022259F1;
1223*4882a593Smuzhiyun writel(data, mmio + PDC_SDRAM_CONTROL);
1224*4882a593Smuzhiyun readl(mmio + PDC_SDRAM_CONTROL);
1225*4882a593Smuzhiyun
1226*4882a593Smuzhiyun /* Turn on for ECC */
1227*4882a593Smuzhiyun if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1228*4882a593Smuzhiyun PDC_DIMM_SPD_TYPE, &spd0)) {
1229*4882a593Smuzhiyun pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n",
1230*4882a593Smuzhiyun PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
1231*4882a593Smuzhiyun return 1;
1232*4882a593Smuzhiyun }
1233*4882a593Smuzhiyun if (spd0 == 0x02) {
1234*4882a593Smuzhiyun data |= (0x01 << 16);
1235*4882a593Smuzhiyun writel(data, mmio + PDC_SDRAM_CONTROL);
1236*4882a593Smuzhiyun readl(mmio + PDC_SDRAM_CONTROL);
1237*4882a593Smuzhiyun printk(KERN_ERR "Local DIMM ECC Enabled\n");
1238*4882a593Smuzhiyun }
1239*4882a593Smuzhiyun
1240*4882a593Smuzhiyun /* DIMM Initialization Select/Enable (bit 18/19) */
1241*4882a593Smuzhiyun data &= (~(1<<18));
1242*4882a593Smuzhiyun data |= (1<<19);
1243*4882a593Smuzhiyun writel(data, mmio + PDC_SDRAM_CONTROL);
1244*4882a593Smuzhiyun
1245*4882a593Smuzhiyun error = 1;
1246*4882a593Smuzhiyun for (i = 1; i <= 10; i++) { /* polling ~5 secs */
1247*4882a593Smuzhiyun data = readl(mmio + PDC_SDRAM_CONTROL);
1248*4882a593Smuzhiyun if (!(data & (1<<19))) {
1249*4882a593Smuzhiyun error = 0;
1250*4882a593Smuzhiyun break;
1251*4882a593Smuzhiyun }
1252*4882a593Smuzhiyun msleep(i*100);
1253*4882a593Smuzhiyun }
1254*4882a593Smuzhiyun return error;
1255*4882a593Smuzhiyun }
1256*4882a593Smuzhiyun
1257*4882a593Smuzhiyun
pdc20621_dimm_init(struct ata_host * host)1258*4882a593Smuzhiyun static unsigned int pdc20621_dimm_init(struct ata_host *host)
1259*4882a593Smuzhiyun {
1260*4882a593Smuzhiyun int speed, size, length;
1261*4882a593Smuzhiyun u32 addr, spd0, pci_status;
1262*4882a593Smuzhiyun u32 time_period = 0;
1263*4882a593Smuzhiyun u32 tcount = 0;
1264*4882a593Smuzhiyun u32 ticks = 0;
1265*4882a593Smuzhiyun u32 clock = 0;
1266*4882a593Smuzhiyun u32 fparam = 0;
1267*4882a593Smuzhiyun void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1268*4882a593Smuzhiyun
1269*4882a593Smuzhiyun /* hard-code chip #0 */
1270*4882a593Smuzhiyun mmio += PDC_CHIP0_OFS;
1271*4882a593Smuzhiyun
1272*4882a593Smuzhiyun /* Initialize PLL based upon PCI Bus Frequency */
1273*4882a593Smuzhiyun
1274*4882a593Smuzhiyun /* Initialize Time Period Register */
1275*4882a593Smuzhiyun writel(0xffffffff, mmio + PDC_TIME_PERIOD);
1276*4882a593Smuzhiyun time_period = readl(mmio + PDC_TIME_PERIOD);
1277*4882a593Smuzhiyun VPRINTK("Time Period Register (0x40): 0x%x\n", time_period);
1278*4882a593Smuzhiyun
1279*4882a593Smuzhiyun /* Enable timer */
1280*4882a593Smuzhiyun writel(PDC_TIMER_DEFAULT, mmio + PDC_TIME_CONTROL);
1281*4882a593Smuzhiyun readl(mmio + PDC_TIME_CONTROL);
1282*4882a593Smuzhiyun
1283*4882a593Smuzhiyun /* Wait 3 seconds */
1284*4882a593Smuzhiyun msleep(3000);
1285*4882a593Smuzhiyun
1286*4882a593Smuzhiyun /*
1287*4882a593Smuzhiyun When timer is enabled, counter is decreased every internal
1288*4882a593Smuzhiyun clock cycle.
1289*4882a593Smuzhiyun */
1290*4882a593Smuzhiyun
1291*4882a593Smuzhiyun tcount = readl(mmio + PDC_TIME_COUNTER);
1292*4882a593Smuzhiyun VPRINTK("Time Counter Register (0x44): 0x%x\n", tcount);
1293*4882a593Smuzhiyun
1294*4882a593Smuzhiyun /*
1295*4882a593Smuzhiyun If SX4 is on PCI-X bus, after 3 seconds, the timer counter
1296*4882a593Smuzhiyun register should be >= (0xffffffff - 3x10^8).
1297*4882a593Smuzhiyun */
1298*4882a593Smuzhiyun if (tcount >= PCI_X_TCOUNT) {
1299*4882a593Smuzhiyun ticks = (time_period - tcount);
1300*4882a593Smuzhiyun VPRINTK("Num counters 0x%x (%d)\n", ticks, ticks);
1301*4882a593Smuzhiyun
1302*4882a593Smuzhiyun clock = (ticks / 300000);
1303*4882a593Smuzhiyun VPRINTK("10 * Internal clk = 0x%x (%d)\n", clock, clock);
1304*4882a593Smuzhiyun
1305*4882a593Smuzhiyun clock = (clock * 33);
1306*4882a593Smuzhiyun VPRINTK("10 * Internal clk * 33 = 0x%x (%d)\n", clock, clock);
1307*4882a593Smuzhiyun
1308*4882a593Smuzhiyun /* PLL F Param (bit 22:16) */
1309*4882a593Smuzhiyun fparam = (1400000 / clock) - 2;
1310*4882a593Smuzhiyun VPRINTK("PLL F Param: 0x%x (%d)\n", fparam, fparam);
1311*4882a593Smuzhiyun
1312*4882a593Smuzhiyun /* OD param = 0x2 (bit 31:30), R param = 0x5 (bit 29:25) */
1313*4882a593Smuzhiyun pci_status = (0x8a001824 | (fparam << 16));
1314*4882a593Smuzhiyun } else
1315*4882a593Smuzhiyun pci_status = PCI_PLL_INIT;
1316*4882a593Smuzhiyun
1317*4882a593Smuzhiyun /* Initialize PLL. */
1318*4882a593Smuzhiyun VPRINTK("pci_status: 0x%x\n", pci_status);
1319*4882a593Smuzhiyun writel(pci_status, mmio + PDC_CTL_STATUS);
1320*4882a593Smuzhiyun readl(mmio + PDC_CTL_STATUS);
1321*4882a593Smuzhiyun
1322*4882a593Smuzhiyun /*
1323*4882a593Smuzhiyun Read SPD of DIMM by I2C interface,
1324*4882a593Smuzhiyun and program the DIMM Module Controller.
1325*4882a593Smuzhiyun */
1326*4882a593Smuzhiyun if (!(speed = pdc20621_detect_dimm(host))) {
1327*4882a593Smuzhiyun printk(KERN_ERR "Detect Local DIMM Fail\n");
1328*4882a593Smuzhiyun return 1; /* DIMM error */
1329*4882a593Smuzhiyun }
1330*4882a593Smuzhiyun VPRINTK("Local DIMM Speed = %d\n", speed);
1331*4882a593Smuzhiyun
1332*4882a593Smuzhiyun /* Programming DIMM0 Module Control Register (index_CID0:80h) */
1333*4882a593Smuzhiyun size = pdc20621_prog_dimm0(host);
1334*4882a593Smuzhiyun VPRINTK("Local DIMM Size = %dMB\n", size);
1335*4882a593Smuzhiyun
1336*4882a593Smuzhiyun /* Programming DIMM Module Global Control Register (index_CID0:88h) */
1337*4882a593Smuzhiyun if (pdc20621_prog_dimm_global(host)) {
1338*4882a593Smuzhiyun printk(KERN_ERR "Programming DIMM Module Global Control Register Fail\n");
1339*4882a593Smuzhiyun return 1;
1340*4882a593Smuzhiyun }
1341*4882a593Smuzhiyun
1342*4882a593Smuzhiyun #ifdef ATA_VERBOSE_DEBUG
1343*4882a593Smuzhiyun {
1344*4882a593Smuzhiyun u8 test_parttern1[40] =
1345*4882a593Smuzhiyun {0x55,0xAA,'P','r','o','m','i','s','e',' ',
1346*4882a593Smuzhiyun 'N','o','t',' ','Y','e','t',' ',
1347*4882a593Smuzhiyun 'D','e','f','i','n','e','d',' ',
1348*4882a593Smuzhiyun '1','.','1','0',
1349*4882a593Smuzhiyun '9','8','0','3','1','6','1','2',0,0};
1350*4882a593Smuzhiyun u8 test_parttern2[40] = {0};
1351*4882a593Smuzhiyun
1352*4882a593Smuzhiyun pdc20621_put_to_dimm(host, test_parttern2, 0x10040, 40);
1353*4882a593Smuzhiyun pdc20621_put_to_dimm(host, test_parttern2, 0x40, 40);
1354*4882a593Smuzhiyun
1355*4882a593Smuzhiyun pdc20621_put_to_dimm(host, test_parttern1, 0x10040, 40);
1356*4882a593Smuzhiyun pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
1357*4882a593Smuzhiyun printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1358*4882a593Smuzhiyun test_parttern2[1], &(test_parttern2[2]));
1359*4882a593Smuzhiyun pdc20621_get_from_dimm(host, test_parttern2, 0x10040,
1360*4882a593Smuzhiyun 40);
1361*4882a593Smuzhiyun printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1362*4882a593Smuzhiyun test_parttern2[1], &(test_parttern2[2]));
1363*4882a593Smuzhiyun
1364*4882a593Smuzhiyun pdc20621_put_to_dimm(host, test_parttern1, 0x40, 40);
1365*4882a593Smuzhiyun pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
1366*4882a593Smuzhiyun printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1367*4882a593Smuzhiyun test_parttern2[1], &(test_parttern2[2]));
1368*4882a593Smuzhiyun }
1369*4882a593Smuzhiyun #endif
1370*4882a593Smuzhiyun
1371*4882a593Smuzhiyun /* ECC initiliazation. */
1372*4882a593Smuzhiyun
1373*4882a593Smuzhiyun if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1374*4882a593Smuzhiyun PDC_DIMM_SPD_TYPE, &spd0)) {
1375*4882a593Smuzhiyun pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n",
1376*4882a593Smuzhiyun PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
1377*4882a593Smuzhiyun return 1;
1378*4882a593Smuzhiyun }
1379*4882a593Smuzhiyun if (spd0 == 0x02) {
1380*4882a593Smuzhiyun void *buf;
1381*4882a593Smuzhiyun VPRINTK("Start ECC initialization\n");
1382*4882a593Smuzhiyun addr = 0;
1383*4882a593Smuzhiyun length = size * 1024 * 1024;
1384*4882a593Smuzhiyun buf = kzalloc(ECC_ERASE_BUF_SZ, GFP_KERNEL);
1385*4882a593Smuzhiyun if (!buf)
1386*4882a593Smuzhiyun return 1;
1387*4882a593Smuzhiyun while (addr < length) {
1388*4882a593Smuzhiyun pdc20621_put_to_dimm(host, buf, addr,
1389*4882a593Smuzhiyun ECC_ERASE_BUF_SZ);
1390*4882a593Smuzhiyun addr += ECC_ERASE_BUF_SZ;
1391*4882a593Smuzhiyun }
1392*4882a593Smuzhiyun kfree(buf);
1393*4882a593Smuzhiyun VPRINTK("Finish ECC initialization\n");
1394*4882a593Smuzhiyun }
1395*4882a593Smuzhiyun return 0;
1396*4882a593Smuzhiyun }
1397*4882a593Smuzhiyun
1398*4882a593Smuzhiyun
pdc_20621_init(struct ata_host * host)1399*4882a593Smuzhiyun static void pdc_20621_init(struct ata_host *host)
1400*4882a593Smuzhiyun {
1401*4882a593Smuzhiyun u32 tmp;
1402*4882a593Smuzhiyun void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1403*4882a593Smuzhiyun
1404*4882a593Smuzhiyun /* hard-code chip #0 */
1405*4882a593Smuzhiyun mmio += PDC_CHIP0_OFS;
1406*4882a593Smuzhiyun
1407*4882a593Smuzhiyun /*
1408*4882a593Smuzhiyun * Select page 0x40 for our 32k DIMM window
1409*4882a593Smuzhiyun */
1410*4882a593Smuzhiyun tmp = readl(mmio + PDC_20621_DIMM_WINDOW) & 0xffff0000;
1411*4882a593Smuzhiyun tmp |= PDC_PAGE_WINDOW; /* page 40h; arbitrarily selected */
1412*4882a593Smuzhiyun writel(tmp, mmio + PDC_20621_DIMM_WINDOW);
1413*4882a593Smuzhiyun
1414*4882a593Smuzhiyun /*
1415*4882a593Smuzhiyun * Reset Host DMA
1416*4882a593Smuzhiyun */
1417*4882a593Smuzhiyun tmp = readl(mmio + PDC_HDMA_CTLSTAT);
1418*4882a593Smuzhiyun tmp |= PDC_RESET;
1419*4882a593Smuzhiyun writel(tmp, mmio + PDC_HDMA_CTLSTAT);
1420*4882a593Smuzhiyun readl(mmio + PDC_HDMA_CTLSTAT); /* flush */
1421*4882a593Smuzhiyun
1422*4882a593Smuzhiyun udelay(10);
1423*4882a593Smuzhiyun
1424*4882a593Smuzhiyun tmp = readl(mmio + PDC_HDMA_CTLSTAT);
1425*4882a593Smuzhiyun tmp &= ~PDC_RESET;
1426*4882a593Smuzhiyun writel(tmp, mmio + PDC_HDMA_CTLSTAT);
1427*4882a593Smuzhiyun readl(mmio + PDC_HDMA_CTLSTAT); /* flush */
1428*4882a593Smuzhiyun }
1429*4882a593Smuzhiyun
pdc_sata_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)1430*4882a593Smuzhiyun static int pdc_sata_init_one(struct pci_dev *pdev,
1431*4882a593Smuzhiyun const struct pci_device_id *ent)
1432*4882a593Smuzhiyun {
1433*4882a593Smuzhiyun const struct ata_port_info *ppi[] =
1434*4882a593Smuzhiyun { &pdc_port_info[ent->driver_data], NULL };
1435*4882a593Smuzhiyun struct ata_host *host;
1436*4882a593Smuzhiyun struct pdc_host_priv *hpriv;
1437*4882a593Smuzhiyun int i, rc;
1438*4882a593Smuzhiyun
1439*4882a593Smuzhiyun ata_print_version_once(&pdev->dev, DRV_VERSION);
1440*4882a593Smuzhiyun
1441*4882a593Smuzhiyun /* allocate host */
1442*4882a593Smuzhiyun host = ata_host_alloc_pinfo(&pdev->dev, ppi, 4);
1443*4882a593Smuzhiyun hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
1444*4882a593Smuzhiyun if (!host || !hpriv)
1445*4882a593Smuzhiyun return -ENOMEM;
1446*4882a593Smuzhiyun
1447*4882a593Smuzhiyun host->private_data = hpriv;
1448*4882a593Smuzhiyun
1449*4882a593Smuzhiyun /* acquire resources and fill host */
1450*4882a593Smuzhiyun rc = pcim_enable_device(pdev);
1451*4882a593Smuzhiyun if (rc)
1452*4882a593Smuzhiyun return rc;
1453*4882a593Smuzhiyun
1454*4882a593Smuzhiyun rc = pcim_iomap_regions(pdev, (1 << PDC_MMIO_BAR) | (1 << PDC_DIMM_BAR),
1455*4882a593Smuzhiyun DRV_NAME);
1456*4882a593Smuzhiyun if (rc == -EBUSY)
1457*4882a593Smuzhiyun pcim_pin_device(pdev);
1458*4882a593Smuzhiyun if (rc)
1459*4882a593Smuzhiyun return rc;
1460*4882a593Smuzhiyun host->iomap = pcim_iomap_table(pdev);
1461*4882a593Smuzhiyun
1462*4882a593Smuzhiyun for (i = 0; i < 4; i++) {
1463*4882a593Smuzhiyun struct ata_port *ap = host->ports[i];
1464*4882a593Smuzhiyun void __iomem *base = host->iomap[PDC_MMIO_BAR] + PDC_CHIP0_OFS;
1465*4882a593Smuzhiyun unsigned int offset = 0x200 + i * 0x80;
1466*4882a593Smuzhiyun
1467*4882a593Smuzhiyun pdc_sata_setup_port(&ap->ioaddr, base + offset);
1468*4882a593Smuzhiyun
1469*4882a593Smuzhiyun ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio");
1470*4882a593Smuzhiyun ata_port_pbar_desc(ap, PDC_DIMM_BAR, -1, "dimm");
1471*4882a593Smuzhiyun ata_port_pbar_desc(ap, PDC_MMIO_BAR, offset, "port");
1472*4882a593Smuzhiyun }
1473*4882a593Smuzhiyun
1474*4882a593Smuzhiyun /* configure and activate */
1475*4882a593Smuzhiyun rc = dma_set_mask_and_coherent(&pdev->dev, ATA_DMA_MASK);
1476*4882a593Smuzhiyun if (rc)
1477*4882a593Smuzhiyun return rc;
1478*4882a593Smuzhiyun
1479*4882a593Smuzhiyun if (pdc20621_dimm_init(host))
1480*4882a593Smuzhiyun return -ENOMEM;
1481*4882a593Smuzhiyun pdc_20621_init(host);
1482*4882a593Smuzhiyun
1483*4882a593Smuzhiyun pci_set_master(pdev);
1484*4882a593Smuzhiyun return ata_host_activate(host, pdev->irq, pdc20621_interrupt,
1485*4882a593Smuzhiyun IRQF_SHARED, &pdc_sata_sht);
1486*4882a593Smuzhiyun }
1487*4882a593Smuzhiyun
1488*4882a593Smuzhiyun module_pci_driver(pdc_sata_pci_driver);
1489*4882a593Smuzhiyun
1490*4882a593Smuzhiyun MODULE_AUTHOR("Jeff Garzik");
1491*4882a593Smuzhiyun MODULE_DESCRIPTION("Promise SATA low-level driver");
1492*4882a593Smuzhiyun MODULE_LICENSE("GPL");
1493*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, pdc_sata_pci_tbl);
1494*4882a593Smuzhiyun MODULE_VERSION(DRV_VERSION);
1495