1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun
3*4882a593Smuzhiyun /*
4*4882a593Smuzhiyun * acard-ahci.c - ACard AHCI SATA support
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Maintained by: Tejun Heo <tj@kernel.org>
7*4882a593Smuzhiyun * Please ALWAYS copy linux-ide@vger.kernel.org
8*4882a593Smuzhiyun * on emails.
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * Copyright 2010 Red Hat, Inc.
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * libata documentation is available via 'make {ps|pdf}docs',
13*4882a593Smuzhiyun * as Documentation/driver-api/libata.rst
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun * AHCI hardware documentation:
16*4882a593Smuzhiyun * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
17*4882a593Smuzhiyun * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
18*4882a593Smuzhiyun */
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun #include <linux/kernel.h>
21*4882a593Smuzhiyun #include <linux/module.h>
22*4882a593Smuzhiyun #include <linux/pci.h>
23*4882a593Smuzhiyun #include <linux/blkdev.h>
24*4882a593Smuzhiyun #include <linux/delay.h>
25*4882a593Smuzhiyun #include <linux/interrupt.h>
26*4882a593Smuzhiyun #include <linux/dma-mapping.h>
27*4882a593Smuzhiyun #include <linux/device.h>
28*4882a593Smuzhiyun #include <linux/dmi.h>
29*4882a593Smuzhiyun #include <linux/gfp.h>
30*4882a593Smuzhiyun #include <scsi/scsi_host.h>
31*4882a593Smuzhiyun #include <scsi/scsi_cmnd.h>
32*4882a593Smuzhiyun #include <linux/libata.h>
33*4882a593Smuzhiyun #include "ahci.h"
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun #define DRV_NAME "acard-ahci"
36*4882a593Smuzhiyun #define DRV_VERSION "1.0"
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun /*
39*4882a593Smuzhiyun Received FIS structure limited to 80h.
40*4882a593Smuzhiyun */
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun #define ACARD_AHCI_RX_FIS_SZ 128
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun enum {
45*4882a593Smuzhiyun AHCI_PCI_BAR = 5,
46*4882a593Smuzhiyun };
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun enum board_ids {
49*4882a593Smuzhiyun board_acard_ahci,
50*4882a593Smuzhiyun };
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun struct acard_sg {
53*4882a593Smuzhiyun __le32 addr;
54*4882a593Smuzhiyun __le32 addr_hi;
55*4882a593Smuzhiyun __le32 reserved;
56*4882a593Smuzhiyun __le32 size; /* bit 31 (EOT) max==0x10000 (64k) */
57*4882a593Smuzhiyun };
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc);
60*4882a593Smuzhiyun static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
61*4882a593Smuzhiyun static int acard_ahci_port_start(struct ata_port *ap);
62*4882a593Smuzhiyun static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
65*4882a593Smuzhiyun static int acard_ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
66*4882a593Smuzhiyun static int acard_ahci_pci_device_resume(struct pci_dev *pdev);
67*4882a593Smuzhiyun #endif
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun static struct scsi_host_template acard_ahci_sht = {
70*4882a593Smuzhiyun AHCI_SHT("acard-ahci"),
71*4882a593Smuzhiyun };
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun static struct ata_port_operations acard_ops = {
74*4882a593Smuzhiyun .inherits = &ahci_ops,
75*4882a593Smuzhiyun .qc_prep = acard_ahci_qc_prep,
76*4882a593Smuzhiyun .qc_fill_rtf = acard_ahci_qc_fill_rtf,
77*4882a593Smuzhiyun .port_start = acard_ahci_port_start,
78*4882a593Smuzhiyun };
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun #define AHCI_HFLAGS(flags) .private_data = (void *)(flags)
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun static const struct ata_port_info acard_ahci_port_info[] = {
83*4882a593Smuzhiyun [board_acard_ahci] =
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ),
86*4882a593Smuzhiyun .flags = AHCI_FLAG_COMMON,
87*4882a593Smuzhiyun .pio_mask = ATA_PIO4,
88*4882a593Smuzhiyun .udma_mask = ATA_UDMA6,
89*4882a593Smuzhiyun .port_ops = &acard_ops,
90*4882a593Smuzhiyun },
91*4882a593Smuzhiyun };
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun static const struct pci_device_id acard_ahci_pci_tbl[] = {
94*4882a593Smuzhiyun /* ACard */
95*4882a593Smuzhiyun { PCI_VDEVICE(ARTOP, 0x000d), board_acard_ahci }, /* ATP8620 */
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun { } /* terminate list */
98*4882a593Smuzhiyun };
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun static struct pci_driver acard_ahci_pci_driver = {
101*4882a593Smuzhiyun .name = DRV_NAME,
102*4882a593Smuzhiyun .id_table = acard_ahci_pci_tbl,
103*4882a593Smuzhiyun .probe = acard_ahci_init_one,
104*4882a593Smuzhiyun .remove = ata_pci_remove_one,
105*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
106*4882a593Smuzhiyun .suspend = acard_ahci_pci_device_suspend,
107*4882a593Smuzhiyun .resume = acard_ahci_pci_device_resume,
108*4882a593Smuzhiyun #endif
109*4882a593Smuzhiyun };
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
acard_ahci_pci_device_suspend(struct pci_dev * pdev,pm_message_t mesg)112*4882a593Smuzhiyun static int acard_ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun struct ata_host *host = pci_get_drvdata(pdev);
115*4882a593Smuzhiyun struct ahci_host_priv *hpriv = host->private_data;
116*4882a593Smuzhiyun void __iomem *mmio = hpriv->mmio;
117*4882a593Smuzhiyun u32 ctl;
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun if (mesg.event & PM_EVENT_SUSPEND &&
120*4882a593Smuzhiyun hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
121*4882a593Smuzhiyun dev_err(&pdev->dev,
122*4882a593Smuzhiyun "BIOS update required for suspend/resume\n");
123*4882a593Smuzhiyun return -EIO;
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun if (mesg.event & PM_EVENT_SLEEP) {
127*4882a593Smuzhiyun /* AHCI spec rev1.1 section 8.3.3:
128*4882a593Smuzhiyun * Software must disable interrupts prior to requesting a
129*4882a593Smuzhiyun * transition of the HBA to D3 state.
130*4882a593Smuzhiyun */
131*4882a593Smuzhiyun ctl = readl(mmio + HOST_CTL);
132*4882a593Smuzhiyun ctl &= ~HOST_IRQ_EN;
133*4882a593Smuzhiyun writel(ctl, mmio + HOST_CTL);
134*4882a593Smuzhiyun readl(mmio + HOST_CTL); /* flush */
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun return ata_pci_device_suspend(pdev, mesg);
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun
acard_ahci_pci_device_resume(struct pci_dev * pdev)140*4882a593Smuzhiyun static int acard_ahci_pci_device_resume(struct pci_dev *pdev)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun struct ata_host *host = pci_get_drvdata(pdev);
143*4882a593Smuzhiyun int rc;
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun rc = ata_pci_device_do_resume(pdev);
146*4882a593Smuzhiyun if (rc)
147*4882a593Smuzhiyun return rc;
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
150*4882a593Smuzhiyun rc = ahci_reset_controller(host);
151*4882a593Smuzhiyun if (rc)
152*4882a593Smuzhiyun return rc;
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun ahci_init_controller(host);
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun ata_host_resume(host);
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun return 0;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun #endif
162*4882a593Smuzhiyun
acard_ahci_pci_print_info(struct ata_host * host)163*4882a593Smuzhiyun static void acard_ahci_pci_print_info(struct ata_host *host)
164*4882a593Smuzhiyun {
165*4882a593Smuzhiyun struct pci_dev *pdev = to_pci_dev(host->dev);
166*4882a593Smuzhiyun u16 cc;
167*4882a593Smuzhiyun const char *scc_s;
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun pci_read_config_word(pdev, 0x0a, &cc);
170*4882a593Smuzhiyun if (cc == PCI_CLASS_STORAGE_IDE)
171*4882a593Smuzhiyun scc_s = "IDE";
172*4882a593Smuzhiyun else if (cc == PCI_CLASS_STORAGE_SATA)
173*4882a593Smuzhiyun scc_s = "SATA";
174*4882a593Smuzhiyun else if (cc == PCI_CLASS_STORAGE_RAID)
175*4882a593Smuzhiyun scc_s = "RAID";
176*4882a593Smuzhiyun else
177*4882a593Smuzhiyun scc_s = "unknown";
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun ahci_print_info(host, scc_s);
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun
acard_ahci_fill_sg(struct ata_queued_cmd * qc,void * cmd_tbl)182*4882a593Smuzhiyun static unsigned int acard_ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
183*4882a593Smuzhiyun {
184*4882a593Smuzhiyun struct scatterlist *sg;
185*4882a593Smuzhiyun struct acard_sg *acard_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
186*4882a593Smuzhiyun unsigned int si, last_si = 0;
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun VPRINTK("ENTER\n");
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun /*
191*4882a593Smuzhiyun * Next, the S/G list.
192*4882a593Smuzhiyun */
193*4882a593Smuzhiyun for_each_sg(qc->sg, sg, qc->n_elem, si) {
194*4882a593Smuzhiyun dma_addr_t addr = sg_dma_address(sg);
195*4882a593Smuzhiyun u32 sg_len = sg_dma_len(sg);
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun /*
198*4882a593Smuzhiyun * ACard note:
199*4882a593Smuzhiyun * We must set an end-of-table (EOT) bit,
200*4882a593Smuzhiyun * and the segment cannot exceed 64k (0x10000)
201*4882a593Smuzhiyun */
202*4882a593Smuzhiyun acard_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
203*4882a593Smuzhiyun acard_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
204*4882a593Smuzhiyun acard_sg[si].size = cpu_to_le32(sg_len);
205*4882a593Smuzhiyun last_si = si;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun acard_sg[last_si].size |= cpu_to_le32(1 << 31); /* set EOT */
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun return si;
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun
acard_ahci_qc_prep(struct ata_queued_cmd * qc)213*4882a593Smuzhiyun static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun struct ata_port *ap = qc->ap;
216*4882a593Smuzhiyun struct ahci_port_priv *pp = ap->private_data;
217*4882a593Smuzhiyun int is_atapi = ata_is_atapi(qc->tf.protocol);
218*4882a593Smuzhiyun void *cmd_tbl;
219*4882a593Smuzhiyun u32 opts;
220*4882a593Smuzhiyun const u32 cmd_fis_len = 5; /* five dwords */
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun /*
223*4882a593Smuzhiyun * Fill in command table information. First, the header,
224*4882a593Smuzhiyun * a SATA Register - Host to Device command FIS.
225*4882a593Smuzhiyun */
226*4882a593Smuzhiyun cmd_tbl = pp->cmd_tbl + qc->hw_tag * AHCI_CMD_TBL_SZ;
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
229*4882a593Smuzhiyun if (is_atapi) {
230*4882a593Smuzhiyun memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
231*4882a593Smuzhiyun memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun if (qc->flags & ATA_QCFLAG_DMAMAP)
235*4882a593Smuzhiyun acard_ahci_fill_sg(qc, cmd_tbl);
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun /*
238*4882a593Smuzhiyun * Fill in command slot information.
239*4882a593Smuzhiyun *
240*4882a593Smuzhiyun * ACard note: prd table length not filled in
241*4882a593Smuzhiyun */
242*4882a593Smuzhiyun opts = cmd_fis_len | (qc->dev->link->pmp << 12);
243*4882a593Smuzhiyun if (qc->tf.flags & ATA_TFLAG_WRITE)
244*4882a593Smuzhiyun opts |= AHCI_CMD_WRITE;
245*4882a593Smuzhiyun if (is_atapi)
246*4882a593Smuzhiyun opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun ahci_fill_cmd_slot(pp, qc->hw_tag, opts);
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun return AC_ERR_OK;
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun
acard_ahci_qc_fill_rtf(struct ata_queued_cmd * qc)253*4882a593Smuzhiyun static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
254*4882a593Smuzhiyun {
255*4882a593Smuzhiyun struct ahci_port_priv *pp = qc->ap->private_data;
256*4882a593Smuzhiyun u8 *rx_fis = pp->rx_fis;
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun if (pp->fbs_enabled)
259*4882a593Smuzhiyun rx_fis += qc->dev->link->pmp * ACARD_AHCI_RX_FIS_SZ;
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun /*
262*4882a593Smuzhiyun * After a successful execution of an ATA PIO data-in command,
263*4882a593Smuzhiyun * the device doesn't send D2H Reg FIS to update the TF and
264*4882a593Smuzhiyun * the host should take TF and E_Status from the preceding PIO
265*4882a593Smuzhiyun * Setup FIS.
266*4882a593Smuzhiyun */
267*4882a593Smuzhiyun if (qc->tf.protocol == ATA_PROT_PIO && qc->dma_dir == DMA_FROM_DEVICE &&
268*4882a593Smuzhiyun !(qc->flags & ATA_QCFLAG_FAILED)) {
269*4882a593Smuzhiyun ata_tf_from_fis(rx_fis + RX_FIS_PIO_SETUP, &qc->result_tf);
270*4882a593Smuzhiyun qc->result_tf.command = (rx_fis + RX_FIS_PIO_SETUP)[15];
271*4882a593Smuzhiyun } else
272*4882a593Smuzhiyun ata_tf_from_fis(rx_fis + RX_FIS_D2H_REG, &qc->result_tf);
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun return true;
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun
acard_ahci_port_start(struct ata_port * ap)277*4882a593Smuzhiyun static int acard_ahci_port_start(struct ata_port *ap)
278*4882a593Smuzhiyun {
279*4882a593Smuzhiyun struct ahci_host_priv *hpriv = ap->host->private_data;
280*4882a593Smuzhiyun struct device *dev = ap->host->dev;
281*4882a593Smuzhiyun struct ahci_port_priv *pp;
282*4882a593Smuzhiyun void *mem;
283*4882a593Smuzhiyun dma_addr_t mem_dma;
284*4882a593Smuzhiyun size_t dma_sz, rx_fis_sz;
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
287*4882a593Smuzhiyun if (!pp)
288*4882a593Smuzhiyun return -ENOMEM;
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun /* check FBS capability */
291*4882a593Smuzhiyun if ((hpriv->cap & HOST_CAP_FBS) && sata_pmp_supported(ap)) {
292*4882a593Smuzhiyun void __iomem *port_mmio = ahci_port_base(ap);
293*4882a593Smuzhiyun u32 cmd = readl(port_mmio + PORT_CMD);
294*4882a593Smuzhiyun if (cmd & PORT_CMD_FBSCP)
295*4882a593Smuzhiyun pp->fbs_supported = true;
296*4882a593Smuzhiyun else if (hpriv->flags & AHCI_HFLAG_YES_FBS) {
297*4882a593Smuzhiyun dev_info(dev, "port %d can do FBS, forcing FBSCP\n",
298*4882a593Smuzhiyun ap->port_no);
299*4882a593Smuzhiyun pp->fbs_supported = true;
300*4882a593Smuzhiyun } else
301*4882a593Smuzhiyun dev_warn(dev, "port %d is not capable of FBS\n",
302*4882a593Smuzhiyun ap->port_no);
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun if (pp->fbs_supported) {
306*4882a593Smuzhiyun dma_sz = AHCI_PORT_PRIV_FBS_DMA_SZ;
307*4882a593Smuzhiyun rx_fis_sz = ACARD_AHCI_RX_FIS_SZ * 16;
308*4882a593Smuzhiyun } else {
309*4882a593Smuzhiyun dma_sz = AHCI_PORT_PRIV_DMA_SZ;
310*4882a593Smuzhiyun rx_fis_sz = ACARD_AHCI_RX_FIS_SZ;
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun mem = dmam_alloc_coherent(dev, dma_sz, &mem_dma, GFP_KERNEL);
314*4882a593Smuzhiyun if (!mem)
315*4882a593Smuzhiyun return -ENOMEM;
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun /*
318*4882a593Smuzhiyun * First item in chunk of DMA memory: 32-slot command table,
319*4882a593Smuzhiyun * 32 bytes each in size
320*4882a593Smuzhiyun */
321*4882a593Smuzhiyun pp->cmd_slot = mem;
322*4882a593Smuzhiyun pp->cmd_slot_dma = mem_dma;
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun mem += AHCI_CMD_SLOT_SZ;
325*4882a593Smuzhiyun mem_dma += AHCI_CMD_SLOT_SZ;
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun /*
328*4882a593Smuzhiyun * Second item: Received-FIS area
329*4882a593Smuzhiyun */
330*4882a593Smuzhiyun pp->rx_fis = mem;
331*4882a593Smuzhiyun pp->rx_fis_dma = mem_dma;
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun mem += rx_fis_sz;
334*4882a593Smuzhiyun mem_dma += rx_fis_sz;
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun /*
337*4882a593Smuzhiyun * Third item: data area for storing a single command
338*4882a593Smuzhiyun * and its scatter-gather table
339*4882a593Smuzhiyun */
340*4882a593Smuzhiyun pp->cmd_tbl = mem;
341*4882a593Smuzhiyun pp->cmd_tbl_dma = mem_dma;
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun /*
344*4882a593Smuzhiyun * Save off initial list of interrupts to be enabled.
345*4882a593Smuzhiyun * This could be changed later
346*4882a593Smuzhiyun */
347*4882a593Smuzhiyun pp->intr_mask = DEF_PORT_IRQ;
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun ap->private_data = pp;
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun /* engage engines, captain */
352*4882a593Smuzhiyun return ahci_port_resume(ap);
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun
acard_ahci_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)355*4882a593Smuzhiyun static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
356*4882a593Smuzhiyun {
357*4882a593Smuzhiyun unsigned int board_id = ent->driver_data;
358*4882a593Smuzhiyun struct ata_port_info pi = acard_ahci_port_info[board_id];
359*4882a593Smuzhiyun const struct ata_port_info *ppi[] = { &pi, NULL };
360*4882a593Smuzhiyun struct device *dev = &pdev->dev;
361*4882a593Smuzhiyun struct ahci_host_priv *hpriv;
362*4882a593Smuzhiyun struct ata_host *host;
363*4882a593Smuzhiyun int n_ports, i, rc;
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun VPRINTK("ENTER\n");
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun WARN_ON((int)ATA_MAX_QUEUE > AHCI_MAX_CMDS);
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun ata_print_version_once(&pdev->dev, DRV_VERSION);
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun /* acquire resources */
372*4882a593Smuzhiyun rc = pcim_enable_device(pdev);
373*4882a593Smuzhiyun if (rc)
374*4882a593Smuzhiyun return rc;
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun /* AHCI controllers often implement SFF compatible interface.
377*4882a593Smuzhiyun * Grab all PCI BARs just in case.
378*4882a593Smuzhiyun */
379*4882a593Smuzhiyun rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME);
380*4882a593Smuzhiyun if (rc == -EBUSY)
381*4882a593Smuzhiyun pcim_pin_device(pdev);
382*4882a593Smuzhiyun if (rc)
383*4882a593Smuzhiyun return rc;
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
386*4882a593Smuzhiyun if (!hpriv)
387*4882a593Smuzhiyun return -ENOMEM;
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun hpriv->irq = pdev->irq;
390*4882a593Smuzhiyun hpriv->flags |= (unsigned long)pi.private_data;
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun if (!(hpriv->flags & AHCI_HFLAG_NO_MSI))
393*4882a593Smuzhiyun pci_enable_msi(pdev);
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun hpriv->mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun /* save initial config */
398*4882a593Smuzhiyun ahci_save_initial_config(&pdev->dev, hpriv);
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun /* prepare host */
401*4882a593Smuzhiyun if (hpriv->cap & HOST_CAP_NCQ)
402*4882a593Smuzhiyun pi.flags |= ATA_FLAG_NCQ;
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun if (hpriv->cap & HOST_CAP_PMP)
405*4882a593Smuzhiyun pi.flags |= ATA_FLAG_PMP;
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun ahci_set_em_messages(hpriv, &pi);
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun /* CAP.NP sometimes indicate the index of the last enabled
410*4882a593Smuzhiyun * port, at other times, that of the last possible port, so
411*4882a593Smuzhiyun * determining the maximum port number requires looking at
412*4882a593Smuzhiyun * both CAP.NP and port_map.
413*4882a593Smuzhiyun */
414*4882a593Smuzhiyun n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
417*4882a593Smuzhiyun if (!host)
418*4882a593Smuzhiyun return -ENOMEM;
419*4882a593Smuzhiyun host->private_data = hpriv;
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
422*4882a593Smuzhiyun host->flags |= ATA_HOST_PARALLEL_SCAN;
423*4882a593Smuzhiyun else
424*4882a593Smuzhiyun printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n");
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun for (i = 0; i < host->n_ports; i++) {
427*4882a593Smuzhiyun struct ata_port *ap = host->ports[i];
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar");
430*4882a593Smuzhiyun ata_port_pbar_desc(ap, AHCI_PCI_BAR,
431*4882a593Smuzhiyun 0x100 + ap->port_no * 0x80, "port");
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun /* set initial link pm policy */
434*4882a593Smuzhiyun /*
435*4882a593Smuzhiyun ap->pm_policy = NOT_AVAILABLE;
436*4882a593Smuzhiyun */
437*4882a593Smuzhiyun /* disabled/not-implemented port */
438*4882a593Smuzhiyun if (!(hpriv->port_map & (1 << i)))
439*4882a593Smuzhiyun ap->ops = &ata_dummy_port_ops;
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun /* initialize adapter */
443*4882a593Smuzhiyun rc = dma_set_mask_and_coherent(&pdev->dev,
444*4882a593Smuzhiyun DMA_BIT_MASK((hpriv->cap & HOST_CAP_64) ? 64 : 32));
445*4882a593Smuzhiyun if (rc) {
446*4882a593Smuzhiyun dev_err(&pdev->dev, "DMA enable failed\n");
447*4882a593Smuzhiyun return rc;
448*4882a593Smuzhiyun }
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun rc = ahci_reset_controller(host);
451*4882a593Smuzhiyun if (rc)
452*4882a593Smuzhiyun return rc;
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun ahci_init_controller(host);
455*4882a593Smuzhiyun acard_ahci_pci_print_info(host);
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun pci_set_master(pdev);
458*4882a593Smuzhiyun return ahci_host_activate(host, &acard_ahci_sht);
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun module_pci_driver(acard_ahci_pci_driver);
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun MODULE_AUTHOR("Jeff Garzik");
464*4882a593Smuzhiyun MODULE_DESCRIPTION("ACard AHCI SATA low-level driver");
465*4882a593Smuzhiyun MODULE_LICENSE("GPL");
466*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, acard_ahci_pci_tbl);
467*4882a593Smuzhiyun MODULE_VERSION(DRV_VERSION);
468