1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * AppliedMicro X-Gene SoC SATA Host Controller Driver
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (c) 2014, Applied Micro Circuits Corporation
6*4882a593Smuzhiyun * Author: Loc Ho <lho@apm.com>
7*4882a593Smuzhiyun * Tuan Phan <tphan@apm.com>
8*4882a593Smuzhiyun * Suman Tripathi <stripathi@apm.com>
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * NOTE: PM support is not currently available.
11*4882a593Smuzhiyun */
12*4882a593Smuzhiyun #include <linux/acpi.h>
13*4882a593Smuzhiyun #include <linux/module.h>
14*4882a593Smuzhiyun #include <linux/platform_device.h>
15*4882a593Smuzhiyun #include <linux/ahci_platform.h>
16*4882a593Smuzhiyun #include <linux/of_address.h>
17*4882a593Smuzhiyun #include <linux/of_device.h>
18*4882a593Smuzhiyun #include <linux/of_irq.h>
19*4882a593Smuzhiyun #include <linux/phy/phy.h>
20*4882a593Smuzhiyun #include "ahci.h"
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #define DRV_NAME "xgene-ahci"
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun /* Max # of disk per a controller */
25*4882a593Smuzhiyun #define MAX_AHCI_CHN_PERCTR 2
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun /* MUX CSR */
28*4882a593Smuzhiyun #define SATA_ENET_CONFIG_REG 0x00000000
29*4882a593Smuzhiyun #define CFG_SATA_ENET_SELECT_MASK 0x00000001
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun /* SATA core host controller CSR */
32*4882a593Smuzhiyun #define SLVRDERRATTRIBUTES 0x00000000
33*4882a593Smuzhiyun #define SLVWRERRATTRIBUTES 0x00000004
34*4882a593Smuzhiyun #define MSTRDERRATTRIBUTES 0x00000008
35*4882a593Smuzhiyun #define MSTWRERRATTRIBUTES 0x0000000c
36*4882a593Smuzhiyun #define BUSCTLREG 0x00000014
37*4882a593Smuzhiyun #define IOFMSTRWAUX 0x00000018
38*4882a593Smuzhiyun #define INTSTATUSMASK 0x0000002c
39*4882a593Smuzhiyun #define ERRINTSTATUS 0x00000030
40*4882a593Smuzhiyun #define ERRINTSTATUSMASK 0x00000034
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun /* SATA host AHCI CSR */
43*4882a593Smuzhiyun #define PORTCFG 0x000000a4
44*4882a593Smuzhiyun #define PORTADDR_SET(dst, src) \
45*4882a593Smuzhiyun (((dst) & ~0x0000003f) | (((u32)(src)) & 0x0000003f))
46*4882a593Smuzhiyun #define PORTPHY1CFG 0x000000a8
47*4882a593Smuzhiyun #define PORTPHY1CFG_FRCPHYRDY_SET(dst, src) \
48*4882a593Smuzhiyun (((dst) & ~0x00100000) | (((u32)(src) << 0x14) & 0x00100000))
49*4882a593Smuzhiyun #define PORTPHY2CFG 0x000000ac
50*4882a593Smuzhiyun #define PORTPHY3CFG 0x000000b0
51*4882a593Smuzhiyun #define PORTPHY4CFG 0x000000b4
52*4882a593Smuzhiyun #define PORTPHY5CFG 0x000000b8
53*4882a593Smuzhiyun #define SCTL0 0x0000012C
54*4882a593Smuzhiyun #define PORTPHY5CFG_RTCHG_SET(dst, src) \
55*4882a593Smuzhiyun (((dst) & ~0xfff00000) | (((u32)(src) << 0x14) & 0xfff00000))
56*4882a593Smuzhiyun #define PORTAXICFG_EN_CONTEXT_SET(dst, src) \
57*4882a593Smuzhiyun (((dst) & ~0x01000000) | (((u32)(src) << 0x18) & 0x01000000))
58*4882a593Smuzhiyun #define PORTAXICFG 0x000000bc
59*4882a593Smuzhiyun #define PORTAXICFG_OUTTRANS_SET(dst, src) \
60*4882a593Smuzhiyun (((dst) & ~0x00f00000) | (((u32)(src) << 0x14) & 0x00f00000))
61*4882a593Smuzhiyun #define PORTRANSCFG 0x000000c8
62*4882a593Smuzhiyun #define PORTRANSCFG_RXWM_SET(dst, src) \
63*4882a593Smuzhiyun (((dst) & ~0x0000007f) | (((u32)(src)) & 0x0000007f))
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun /* SATA host controller AXI CSR */
66*4882a593Smuzhiyun #define INT_SLV_TMOMASK 0x00000010
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun /* SATA diagnostic CSR */
69*4882a593Smuzhiyun #define CFG_MEM_RAM_SHUTDOWN 0x00000070
70*4882a593Smuzhiyun #define BLOCK_MEM_RDY 0x00000074
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun /* Max retry for link down */
73*4882a593Smuzhiyun #define MAX_LINK_DOWN_RETRY 3
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun enum xgene_ahci_version {
76*4882a593Smuzhiyun XGENE_AHCI_V1 = 1,
77*4882a593Smuzhiyun XGENE_AHCI_V2,
78*4882a593Smuzhiyun };
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun struct xgene_ahci_context {
81*4882a593Smuzhiyun struct ahci_host_priv *hpriv;
82*4882a593Smuzhiyun struct device *dev;
83*4882a593Smuzhiyun u8 last_cmd[MAX_AHCI_CHN_PERCTR]; /* tracking the last command issued*/
84*4882a593Smuzhiyun u32 class[MAX_AHCI_CHN_PERCTR]; /* tracking the class of device */
85*4882a593Smuzhiyun void __iomem *csr_core; /* Core CSR address of IP */
86*4882a593Smuzhiyun void __iomem *csr_diag; /* Diag CSR address of IP */
87*4882a593Smuzhiyun void __iomem *csr_axi; /* AXI CSR address of IP */
88*4882a593Smuzhiyun void __iomem *csr_mux; /* MUX CSR address of IP */
89*4882a593Smuzhiyun };
90*4882a593Smuzhiyun
xgene_ahci_init_memram(struct xgene_ahci_context * ctx)91*4882a593Smuzhiyun static int xgene_ahci_init_memram(struct xgene_ahci_context *ctx)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun dev_dbg(ctx->dev, "Release memory from shutdown\n");
94*4882a593Smuzhiyun writel(0x0, ctx->csr_diag + CFG_MEM_RAM_SHUTDOWN);
95*4882a593Smuzhiyun readl(ctx->csr_diag + CFG_MEM_RAM_SHUTDOWN); /* Force a barrier */
96*4882a593Smuzhiyun msleep(1); /* reset may take up to 1ms */
97*4882a593Smuzhiyun if (readl(ctx->csr_diag + BLOCK_MEM_RDY) != 0xFFFFFFFF) {
98*4882a593Smuzhiyun dev_err(ctx->dev, "failed to release memory from shutdown\n");
99*4882a593Smuzhiyun return -ENODEV;
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun return 0;
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun /**
105*4882a593Smuzhiyun * xgene_ahci_poll_reg_val- Poll a register on a specific value.
106*4882a593Smuzhiyun * @ap : ATA port of interest.
107*4882a593Smuzhiyun * @reg : Register of interest.
108*4882a593Smuzhiyun * @val : Value to be attained.
109*4882a593Smuzhiyun * @interval : waiting interval for polling.
110*4882a593Smuzhiyun * @timeout : timeout for achieving the value.
111*4882a593Smuzhiyun */
xgene_ahci_poll_reg_val(struct ata_port * ap,void __iomem * reg,unsigned int val,unsigned long interval,unsigned long timeout)112*4882a593Smuzhiyun static int xgene_ahci_poll_reg_val(struct ata_port *ap,
113*4882a593Smuzhiyun void __iomem *reg, unsigned
114*4882a593Smuzhiyun int val, unsigned long interval,
115*4882a593Smuzhiyun unsigned long timeout)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun unsigned long deadline;
118*4882a593Smuzhiyun unsigned int tmp;
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun tmp = ioread32(reg);
121*4882a593Smuzhiyun deadline = ata_deadline(jiffies, timeout);
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun while (tmp != val && time_before(jiffies, deadline)) {
124*4882a593Smuzhiyun ata_msleep(ap, interval);
125*4882a593Smuzhiyun tmp = ioread32(reg);
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun return tmp;
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun /**
132*4882a593Smuzhiyun * xgene_ahci_restart_engine - Restart the dma engine.
133*4882a593Smuzhiyun * @ap : ATA port of interest
134*4882a593Smuzhiyun *
135*4882a593Smuzhiyun * Waits for completion of multiple commands and restarts
136*4882a593Smuzhiyun * the DMA engine inside the controller.
137*4882a593Smuzhiyun */
xgene_ahci_restart_engine(struct ata_port * ap)138*4882a593Smuzhiyun static int xgene_ahci_restart_engine(struct ata_port *ap)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun struct ahci_host_priv *hpriv = ap->host->private_data;
141*4882a593Smuzhiyun struct ahci_port_priv *pp = ap->private_data;
142*4882a593Smuzhiyun void __iomem *port_mmio = ahci_port_base(ap);
143*4882a593Smuzhiyun u32 fbs;
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun /*
146*4882a593Smuzhiyun * In case of PMP multiple IDENTIFY DEVICE commands can be
147*4882a593Smuzhiyun * issued inside PxCI. So need to poll PxCI for the
148*4882a593Smuzhiyun * completion of outstanding IDENTIFY DEVICE commands before
149*4882a593Smuzhiyun * we restart the DMA engine.
150*4882a593Smuzhiyun */
151*4882a593Smuzhiyun if (xgene_ahci_poll_reg_val(ap, port_mmio +
152*4882a593Smuzhiyun PORT_CMD_ISSUE, 0x0, 1, 100))
153*4882a593Smuzhiyun return -EBUSY;
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun hpriv->stop_engine(ap);
156*4882a593Smuzhiyun ahci_start_fis_rx(ap);
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun /*
159*4882a593Smuzhiyun * Enable the PxFBS.FBS_EN bit as it
160*4882a593Smuzhiyun * gets cleared due to stopping the engine.
161*4882a593Smuzhiyun */
162*4882a593Smuzhiyun if (pp->fbs_supported) {
163*4882a593Smuzhiyun fbs = readl(port_mmio + PORT_FBS);
164*4882a593Smuzhiyun writel(fbs | PORT_FBS_EN, port_mmio + PORT_FBS);
165*4882a593Smuzhiyun fbs = readl(port_mmio + PORT_FBS);
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun hpriv->start_engine(ap);
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun return 0;
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun /**
174*4882a593Smuzhiyun * xgene_ahci_qc_issue - Issue commands to the device
175*4882a593Smuzhiyun * @qc: Command to issue
176*4882a593Smuzhiyun *
177*4882a593Smuzhiyun * Due to Hardware errata for IDENTIFY DEVICE command, the controller cannot
178*4882a593Smuzhiyun * clear the BSY bit after receiving the PIO setup FIS. This results in the dma
179*4882a593Smuzhiyun * state machine goes into the CMFatalErrorUpdate state and locks up. By
180*4882a593Smuzhiyun * restarting the dma engine, it removes the controller out of lock up state.
181*4882a593Smuzhiyun *
182*4882a593Smuzhiyun * Due to H/W errata, the controller is unable to save the PMP
183*4882a593Smuzhiyun * field fetched from command header before sending the H2D FIS.
184*4882a593Smuzhiyun * When the device returns the PMP port field in the D2H FIS, there is
185*4882a593Smuzhiyun * a mismatch and results in command completion failure. The
186*4882a593Smuzhiyun * workaround is to write the pmp value to PxFBS.DEV field before issuing
187*4882a593Smuzhiyun * any command to PMP.
188*4882a593Smuzhiyun */
xgene_ahci_qc_issue(struct ata_queued_cmd * qc)189*4882a593Smuzhiyun static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun struct ata_port *ap = qc->ap;
192*4882a593Smuzhiyun struct ahci_host_priv *hpriv = ap->host->private_data;
193*4882a593Smuzhiyun struct xgene_ahci_context *ctx = hpriv->plat_data;
194*4882a593Smuzhiyun int rc = 0;
195*4882a593Smuzhiyun u32 port_fbs;
196*4882a593Smuzhiyun void *port_mmio = ahci_port_base(ap);
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun /*
199*4882a593Smuzhiyun * Write the pmp value to PxFBS.DEV
200*4882a593Smuzhiyun * for case of Port Mulitplier.
201*4882a593Smuzhiyun */
202*4882a593Smuzhiyun if (ctx->class[ap->port_no] == ATA_DEV_PMP) {
203*4882a593Smuzhiyun port_fbs = readl(port_mmio + PORT_FBS);
204*4882a593Smuzhiyun port_fbs &= ~PORT_FBS_DEV_MASK;
205*4882a593Smuzhiyun port_fbs |= qc->dev->link->pmp << PORT_FBS_DEV_OFFSET;
206*4882a593Smuzhiyun writel(port_fbs, port_mmio + PORT_FBS);
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun if (unlikely((ctx->last_cmd[ap->port_no] == ATA_CMD_ID_ATA) ||
210*4882a593Smuzhiyun (ctx->last_cmd[ap->port_no] == ATA_CMD_PACKET) ||
211*4882a593Smuzhiyun (ctx->last_cmd[ap->port_no] == ATA_CMD_SMART)))
212*4882a593Smuzhiyun xgene_ahci_restart_engine(ap);
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun rc = ahci_qc_issue(qc);
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun /* Save the last command issued */
217*4882a593Smuzhiyun ctx->last_cmd[ap->port_no] = qc->tf.command;
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun return rc;
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun
xgene_ahci_is_memram_inited(struct xgene_ahci_context * ctx)222*4882a593Smuzhiyun static bool xgene_ahci_is_memram_inited(struct xgene_ahci_context *ctx)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun void __iomem *diagcsr = ctx->csr_diag;
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun return (readl(diagcsr + CFG_MEM_RAM_SHUTDOWN) == 0 &&
227*4882a593Smuzhiyun readl(diagcsr + BLOCK_MEM_RDY) == 0xFFFFFFFF);
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun /**
231*4882a593Smuzhiyun * xgene_ahci_read_id - Read ID data from the specified device
232*4882a593Smuzhiyun * @dev: device
233*4882a593Smuzhiyun * @tf: proposed taskfile
234*4882a593Smuzhiyun * @id: data buffer
235*4882a593Smuzhiyun *
236*4882a593Smuzhiyun * This custom read ID function is required due to the fact that the HW
237*4882a593Smuzhiyun * does not support DEVSLP.
238*4882a593Smuzhiyun */
xgene_ahci_read_id(struct ata_device * dev,struct ata_taskfile * tf,u16 * id)239*4882a593Smuzhiyun static unsigned int xgene_ahci_read_id(struct ata_device *dev,
240*4882a593Smuzhiyun struct ata_taskfile *tf, u16 *id)
241*4882a593Smuzhiyun {
242*4882a593Smuzhiyun u32 err_mask;
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun err_mask = ata_do_dev_read_id(dev, tf, id);
245*4882a593Smuzhiyun if (err_mask)
246*4882a593Smuzhiyun return err_mask;
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun /*
249*4882a593Smuzhiyun * Mask reserved area. Word78 spec of Link Power Management
250*4882a593Smuzhiyun * bit15-8: reserved
251*4882a593Smuzhiyun * bit7: NCQ autosence
252*4882a593Smuzhiyun * bit6: Software settings preservation supported
253*4882a593Smuzhiyun * bit5: reserved
254*4882a593Smuzhiyun * bit4: In-order sata delivery supported
255*4882a593Smuzhiyun * bit3: DIPM requests supported
256*4882a593Smuzhiyun * bit2: DMA Setup FIS Auto-Activate optimization supported
257*4882a593Smuzhiyun * bit1: DMA Setup FIX non-Zero buffer offsets supported
258*4882a593Smuzhiyun * bit0: Reserved
259*4882a593Smuzhiyun *
260*4882a593Smuzhiyun * Clear reserved bit 8 (DEVSLP bit) as we don't support DEVSLP
261*4882a593Smuzhiyun */
262*4882a593Smuzhiyun id[ATA_ID_FEATURE_SUPP] &= cpu_to_le16(~(1 << 8));
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun return 0;
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun
xgene_ahci_set_phy_cfg(struct xgene_ahci_context * ctx,int channel)267*4882a593Smuzhiyun static void xgene_ahci_set_phy_cfg(struct xgene_ahci_context *ctx, int channel)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun void __iomem *mmio = ctx->hpriv->mmio;
270*4882a593Smuzhiyun u32 val;
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun dev_dbg(ctx->dev, "port configure mmio 0x%p channel %d\n",
273*4882a593Smuzhiyun mmio, channel);
274*4882a593Smuzhiyun val = readl(mmio + PORTCFG);
275*4882a593Smuzhiyun val = PORTADDR_SET(val, channel == 0 ? 2 : 3);
276*4882a593Smuzhiyun writel(val, mmio + PORTCFG);
277*4882a593Smuzhiyun readl(mmio + PORTCFG); /* Force a barrier */
278*4882a593Smuzhiyun /* Disable fix rate */
279*4882a593Smuzhiyun writel(0x0001fffe, mmio + PORTPHY1CFG);
280*4882a593Smuzhiyun readl(mmio + PORTPHY1CFG); /* Force a barrier */
281*4882a593Smuzhiyun writel(0x28183219, mmio + PORTPHY2CFG);
282*4882a593Smuzhiyun readl(mmio + PORTPHY2CFG); /* Force a barrier */
283*4882a593Smuzhiyun writel(0x13081008, mmio + PORTPHY3CFG);
284*4882a593Smuzhiyun readl(mmio + PORTPHY3CFG); /* Force a barrier */
285*4882a593Smuzhiyun writel(0x00480815, mmio + PORTPHY4CFG);
286*4882a593Smuzhiyun readl(mmio + PORTPHY4CFG); /* Force a barrier */
287*4882a593Smuzhiyun /* Set window negotiation */
288*4882a593Smuzhiyun val = readl(mmio + PORTPHY5CFG);
289*4882a593Smuzhiyun val = PORTPHY5CFG_RTCHG_SET(val, 0x300);
290*4882a593Smuzhiyun writel(val, mmio + PORTPHY5CFG);
291*4882a593Smuzhiyun readl(mmio + PORTPHY5CFG); /* Force a barrier */
292*4882a593Smuzhiyun val = readl(mmio + PORTAXICFG);
293*4882a593Smuzhiyun val = PORTAXICFG_EN_CONTEXT_SET(val, 0x1); /* Enable context mgmt */
294*4882a593Smuzhiyun val = PORTAXICFG_OUTTRANS_SET(val, 0xe); /* Set outstanding */
295*4882a593Smuzhiyun writel(val, mmio + PORTAXICFG);
296*4882a593Smuzhiyun readl(mmio + PORTAXICFG); /* Force a barrier */
297*4882a593Smuzhiyun /* Set the watermark threshold of the receive FIFO */
298*4882a593Smuzhiyun val = readl(mmio + PORTRANSCFG);
299*4882a593Smuzhiyun val = PORTRANSCFG_RXWM_SET(val, 0x30);
300*4882a593Smuzhiyun writel(val, mmio + PORTRANSCFG);
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun /**
304*4882a593Smuzhiyun * xgene_ahci_do_hardreset - Issue the actual COMRESET
305*4882a593Smuzhiyun * @link: link to reset
306*4882a593Smuzhiyun * @deadline: deadline jiffies for the operation
307*4882a593Smuzhiyun * @online: Return value to indicate if device online
308*4882a593Smuzhiyun *
309*4882a593Smuzhiyun * Due to the limitation of the hardware PHY, a difference set of setting is
310*4882a593Smuzhiyun * required for each supported disk speed - Gen3 (6.0Gbps), Gen2 (3.0Gbps),
311*4882a593Smuzhiyun * and Gen1 (1.5Gbps). Otherwise during long IO stress test, the PHY will
312*4882a593Smuzhiyun * report disparity error and etc. In addition, during COMRESET, there can
313*4882a593Smuzhiyun * be error reported in the register PORT_SCR_ERR. For SERR_DISPARITY and
314*4882a593Smuzhiyun * SERR_10B_8B_ERR, the PHY receiver line must be reseted. Also during long
315*4882a593Smuzhiyun * reboot cycle regression, sometimes the PHY reports link down even if the
316*4882a593Smuzhiyun * device is present because of speed negotiation failure. so need to retry
317*4882a593Smuzhiyun * the COMRESET to get the link up. The following algorithm is followed to
318*4882a593Smuzhiyun * proper configure the hardware PHY during COMRESET:
319*4882a593Smuzhiyun *
320*4882a593Smuzhiyun * Alg Part 1:
321*4882a593Smuzhiyun * 1. Start the PHY at Gen3 speed (default setting)
322*4882a593Smuzhiyun * 2. Issue the COMRESET
323*4882a593Smuzhiyun * 3. If no link, go to Alg Part 3
324*4882a593Smuzhiyun * 4. If link up, determine if the negotiated speed matches the PHY
325*4882a593Smuzhiyun * configured speed
326*4882a593Smuzhiyun * 5. If they matched, go to Alg Part 2
327*4882a593Smuzhiyun * 6. If they do not matched and first time, configure the PHY for the linked
328*4882a593Smuzhiyun * up disk speed and repeat step 2
329*4882a593Smuzhiyun * 7. Go to Alg Part 2
330*4882a593Smuzhiyun *
331*4882a593Smuzhiyun * Alg Part 2:
332*4882a593Smuzhiyun * 1. On link up, if there are any SERR_DISPARITY and SERR_10B_8B_ERR error
333*4882a593Smuzhiyun * reported in the register PORT_SCR_ERR, then reset the PHY receiver line
334*4882a593Smuzhiyun * 2. Go to Alg Part 4
335*4882a593Smuzhiyun *
336*4882a593Smuzhiyun * Alg Part 3:
337*4882a593Smuzhiyun * 1. Check the PORT_SCR_STAT to see whether device presence detected but PHY
338*4882a593Smuzhiyun * communication establishment failed and maximum link down attempts are
339*4882a593Smuzhiyun * less than Max attempts 3 then goto Alg Part 1.
340*4882a593Smuzhiyun * 2. Go to Alg Part 4.
341*4882a593Smuzhiyun *
342*4882a593Smuzhiyun * Alg Part 4:
343*4882a593Smuzhiyun * 1. Clear any pending from register PORT_SCR_ERR.
344*4882a593Smuzhiyun *
345*4882a593Smuzhiyun * NOTE: For the initial version, we will NOT support Gen1/Gen2. In addition
346*4882a593Smuzhiyun * and until the underlying PHY supports an method to reset the receiver
347*4882a593Smuzhiyun * line, on detection of SERR_DISPARITY or SERR_10B_8B_ERR errors,
348*4882a593Smuzhiyun * an warning message will be printed.
349*4882a593Smuzhiyun */
xgene_ahci_do_hardreset(struct ata_link * link,unsigned long deadline,bool * online)350*4882a593Smuzhiyun static int xgene_ahci_do_hardreset(struct ata_link *link,
351*4882a593Smuzhiyun unsigned long deadline, bool *online)
352*4882a593Smuzhiyun {
353*4882a593Smuzhiyun const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
354*4882a593Smuzhiyun struct ata_port *ap = link->ap;
355*4882a593Smuzhiyun struct ahci_host_priv *hpriv = ap->host->private_data;
356*4882a593Smuzhiyun struct xgene_ahci_context *ctx = hpriv->plat_data;
357*4882a593Smuzhiyun struct ahci_port_priv *pp = ap->private_data;
358*4882a593Smuzhiyun u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
359*4882a593Smuzhiyun void __iomem *port_mmio = ahci_port_base(ap);
360*4882a593Smuzhiyun struct ata_taskfile tf;
361*4882a593Smuzhiyun int link_down_retry = 0;
362*4882a593Smuzhiyun int rc;
363*4882a593Smuzhiyun u32 val, sstatus;
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun do {
366*4882a593Smuzhiyun /* clear D2H reception area to properly wait for D2H FIS */
367*4882a593Smuzhiyun ata_tf_init(link->device, &tf);
368*4882a593Smuzhiyun tf.command = ATA_BUSY;
369*4882a593Smuzhiyun ata_tf_to_fis(&tf, 0, 0, d2h_fis);
370*4882a593Smuzhiyun rc = sata_link_hardreset(link, timing, deadline, online,
371*4882a593Smuzhiyun ahci_check_ready);
372*4882a593Smuzhiyun if (*online) {
373*4882a593Smuzhiyun val = readl(port_mmio + PORT_SCR_ERR);
374*4882a593Smuzhiyun if (val & (SERR_DISPARITY | SERR_10B_8B_ERR))
375*4882a593Smuzhiyun dev_warn(ctx->dev, "link has error\n");
376*4882a593Smuzhiyun break;
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun sata_scr_read(link, SCR_STATUS, &sstatus);
380*4882a593Smuzhiyun } while (link_down_retry++ < MAX_LINK_DOWN_RETRY &&
381*4882a593Smuzhiyun (sstatus & 0xff) == 0x1);
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun /* clear all errors if any pending */
384*4882a593Smuzhiyun val = readl(port_mmio + PORT_SCR_ERR);
385*4882a593Smuzhiyun writel(val, port_mmio + PORT_SCR_ERR);
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun return rc;
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun
xgene_ahci_hardreset(struct ata_link * link,unsigned int * class,unsigned long deadline)390*4882a593Smuzhiyun static int xgene_ahci_hardreset(struct ata_link *link, unsigned int *class,
391*4882a593Smuzhiyun unsigned long deadline)
392*4882a593Smuzhiyun {
393*4882a593Smuzhiyun struct ata_port *ap = link->ap;
394*4882a593Smuzhiyun struct ahci_host_priv *hpriv = ap->host->private_data;
395*4882a593Smuzhiyun void __iomem *port_mmio = ahci_port_base(ap);
396*4882a593Smuzhiyun bool online;
397*4882a593Smuzhiyun int rc;
398*4882a593Smuzhiyun u32 portcmd_saved;
399*4882a593Smuzhiyun u32 portclb_saved;
400*4882a593Smuzhiyun u32 portclbhi_saved;
401*4882a593Smuzhiyun u32 portrxfis_saved;
402*4882a593Smuzhiyun u32 portrxfishi_saved;
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun /* As hardreset resets these CSR, save it to restore later */
405*4882a593Smuzhiyun portcmd_saved = readl(port_mmio + PORT_CMD);
406*4882a593Smuzhiyun portclb_saved = readl(port_mmio + PORT_LST_ADDR);
407*4882a593Smuzhiyun portclbhi_saved = readl(port_mmio + PORT_LST_ADDR_HI);
408*4882a593Smuzhiyun portrxfis_saved = readl(port_mmio + PORT_FIS_ADDR);
409*4882a593Smuzhiyun portrxfishi_saved = readl(port_mmio + PORT_FIS_ADDR_HI);
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun hpriv->stop_engine(ap);
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun rc = xgene_ahci_do_hardreset(link, deadline, &online);
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun /* As controller hardreset clears them, restore them */
416*4882a593Smuzhiyun writel(portcmd_saved, port_mmio + PORT_CMD);
417*4882a593Smuzhiyun writel(portclb_saved, port_mmio + PORT_LST_ADDR);
418*4882a593Smuzhiyun writel(portclbhi_saved, port_mmio + PORT_LST_ADDR_HI);
419*4882a593Smuzhiyun writel(portrxfis_saved, port_mmio + PORT_FIS_ADDR);
420*4882a593Smuzhiyun writel(portrxfishi_saved, port_mmio + PORT_FIS_ADDR_HI);
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun hpriv->start_engine(ap);
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun if (online)
425*4882a593Smuzhiyun *class = ahci_dev_classify(ap);
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun return rc;
428*4882a593Smuzhiyun }
429*4882a593Smuzhiyun
xgene_ahci_host_stop(struct ata_host * host)430*4882a593Smuzhiyun static void xgene_ahci_host_stop(struct ata_host *host)
431*4882a593Smuzhiyun {
432*4882a593Smuzhiyun struct ahci_host_priv *hpriv = host->private_data;
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun ahci_platform_disable_resources(hpriv);
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun /**
438*4882a593Smuzhiyun * xgene_ahci_pmp_softreset - Issue the softreset to the drives connected
439*4882a593Smuzhiyun * to Port Multiplier.
440*4882a593Smuzhiyun * @link: link to reset
441*4882a593Smuzhiyun * @class: Return value to indicate class of device
442*4882a593Smuzhiyun * @deadline: deadline jiffies for the operation
443*4882a593Smuzhiyun *
444*4882a593Smuzhiyun * Due to H/W errata, the controller is unable to save the PMP
445*4882a593Smuzhiyun * field fetched from command header before sending the H2D FIS.
446*4882a593Smuzhiyun * When the device returns the PMP port field in the D2H FIS, there is
447*4882a593Smuzhiyun * a mismatch and results in command completion failure. The workaround
448*4882a593Smuzhiyun * is to write the pmp value to PxFBS.DEV field before issuing any command
449*4882a593Smuzhiyun * to PMP.
450*4882a593Smuzhiyun */
xgene_ahci_pmp_softreset(struct ata_link * link,unsigned int * class,unsigned long deadline)451*4882a593Smuzhiyun static int xgene_ahci_pmp_softreset(struct ata_link *link, unsigned int *class,
452*4882a593Smuzhiyun unsigned long deadline)
453*4882a593Smuzhiyun {
454*4882a593Smuzhiyun int pmp = sata_srst_pmp(link);
455*4882a593Smuzhiyun struct ata_port *ap = link->ap;
456*4882a593Smuzhiyun u32 rc;
457*4882a593Smuzhiyun void *port_mmio = ahci_port_base(ap);
458*4882a593Smuzhiyun u32 port_fbs;
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun /*
461*4882a593Smuzhiyun * Set PxFBS.DEV field with pmp
462*4882a593Smuzhiyun * value.
463*4882a593Smuzhiyun */
464*4882a593Smuzhiyun port_fbs = readl(port_mmio + PORT_FBS);
465*4882a593Smuzhiyun port_fbs &= ~PORT_FBS_DEV_MASK;
466*4882a593Smuzhiyun port_fbs |= pmp << PORT_FBS_DEV_OFFSET;
467*4882a593Smuzhiyun writel(port_fbs, port_mmio + PORT_FBS);
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun rc = ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun return rc;
472*4882a593Smuzhiyun }
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun /**
475*4882a593Smuzhiyun * xgene_ahci_softreset - Issue the softreset to the drive.
476*4882a593Smuzhiyun * @link: link to reset
477*4882a593Smuzhiyun * @class: Return value to indicate class of device
478*4882a593Smuzhiyun * @deadline: deadline jiffies for the operation
479*4882a593Smuzhiyun *
480*4882a593Smuzhiyun * Due to H/W errata, the controller is unable to save the PMP
481*4882a593Smuzhiyun * field fetched from command header before sending the H2D FIS.
482*4882a593Smuzhiyun * When the device returns the PMP port field in the D2H FIS, there is
483*4882a593Smuzhiyun * a mismatch and results in command completion failure. The workaround
484*4882a593Smuzhiyun * is to write the pmp value to PxFBS.DEV field before issuing any command
485*4882a593Smuzhiyun * to PMP. Here is the algorithm to detect PMP :
486*4882a593Smuzhiyun *
487*4882a593Smuzhiyun * 1. Save the PxFBS value
488*4882a593Smuzhiyun * 2. Program PxFBS.DEV with pmp value send by framework. Framework sends
489*4882a593Smuzhiyun * 0xF for both PMP/NON-PMP initially
490*4882a593Smuzhiyun * 3. Issue softreset
491*4882a593Smuzhiyun * 4. If signature class is PMP goto 6
492*4882a593Smuzhiyun * 5. restore the original PxFBS and goto 3
493*4882a593Smuzhiyun * 6. return
494*4882a593Smuzhiyun */
xgene_ahci_softreset(struct ata_link * link,unsigned int * class,unsigned long deadline)495*4882a593Smuzhiyun static int xgene_ahci_softreset(struct ata_link *link, unsigned int *class,
496*4882a593Smuzhiyun unsigned long deadline)
497*4882a593Smuzhiyun {
498*4882a593Smuzhiyun int pmp = sata_srst_pmp(link);
499*4882a593Smuzhiyun struct ata_port *ap = link->ap;
500*4882a593Smuzhiyun struct ahci_host_priv *hpriv = ap->host->private_data;
501*4882a593Smuzhiyun struct xgene_ahci_context *ctx = hpriv->plat_data;
502*4882a593Smuzhiyun void *port_mmio = ahci_port_base(ap);
503*4882a593Smuzhiyun u32 port_fbs;
504*4882a593Smuzhiyun u32 port_fbs_save;
505*4882a593Smuzhiyun u32 retry = 1;
506*4882a593Smuzhiyun u32 rc;
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun port_fbs_save = readl(port_mmio + PORT_FBS);
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun /*
511*4882a593Smuzhiyun * Set PxFBS.DEV field with pmp
512*4882a593Smuzhiyun * value.
513*4882a593Smuzhiyun */
514*4882a593Smuzhiyun port_fbs = readl(port_mmio + PORT_FBS);
515*4882a593Smuzhiyun port_fbs &= ~PORT_FBS_DEV_MASK;
516*4882a593Smuzhiyun port_fbs |= pmp << PORT_FBS_DEV_OFFSET;
517*4882a593Smuzhiyun writel(port_fbs, port_mmio + PORT_FBS);
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun softreset_retry:
520*4882a593Smuzhiyun rc = ahci_do_softreset(link, class, pmp,
521*4882a593Smuzhiyun deadline, ahci_check_ready);
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun ctx->class[ap->port_no] = *class;
524*4882a593Smuzhiyun if (*class != ATA_DEV_PMP) {
525*4882a593Smuzhiyun /*
526*4882a593Smuzhiyun * Retry for normal drives without
527*4882a593Smuzhiyun * setting PxFBS.DEV field with pmp value.
528*4882a593Smuzhiyun */
529*4882a593Smuzhiyun if (retry--) {
530*4882a593Smuzhiyun writel(port_fbs_save, port_mmio + PORT_FBS);
531*4882a593Smuzhiyun goto softreset_retry;
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun }
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun return rc;
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun /**
539*4882a593Smuzhiyun * xgene_ahci_handle_broken_edge_irq - Handle the broken irq.
540*4882a593Smuzhiyun * @ata_host: Host that recieved the irq
541*4882a593Smuzhiyun * @irq_masked: HOST_IRQ_STAT value
542*4882a593Smuzhiyun *
543*4882a593Smuzhiyun * For hardware with broken edge trigger latch
544*4882a593Smuzhiyun * the HOST_IRQ_STAT register misses the edge interrupt
545*4882a593Smuzhiyun * when clearing of HOST_IRQ_STAT register and hardware
546*4882a593Smuzhiyun * reporting the PORT_IRQ_STAT register at the
547*4882a593Smuzhiyun * same clock cycle.
548*4882a593Smuzhiyun * As such, the algorithm below outlines the workaround.
549*4882a593Smuzhiyun *
550*4882a593Smuzhiyun * 1. Read HOST_IRQ_STAT register and save the state.
551*4882a593Smuzhiyun * 2. Clear the HOST_IRQ_STAT register.
552*4882a593Smuzhiyun * 3. Read back the HOST_IRQ_STAT register.
553*4882a593Smuzhiyun * 4. If HOST_IRQ_STAT register equals to zero, then
554*4882a593Smuzhiyun * traverse the rest of port's PORT_IRQ_STAT register
555*4882a593Smuzhiyun * to check if an interrupt is triggered at that point else
556*4882a593Smuzhiyun * go to step 6.
557*4882a593Smuzhiyun * 5. If PORT_IRQ_STAT register of rest ports is not equal to zero
558*4882a593Smuzhiyun * then update the state of HOST_IRQ_STAT saved in step 1.
559*4882a593Smuzhiyun * 6. Handle port interrupts.
560*4882a593Smuzhiyun * 7. Exit
561*4882a593Smuzhiyun */
xgene_ahci_handle_broken_edge_irq(struct ata_host * host,u32 irq_masked)562*4882a593Smuzhiyun static int xgene_ahci_handle_broken_edge_irq(struct ata_host *host,
563*4882a593Smuzhiyun u32 irq_masked)
564*4882a593Smuzhiyun {
565*4882a593Smuzhiyun struct ahci_host_priv *hpriv = host->private_data;
566*4882a593Smuzhiyun void __iomem *port_mmio;
567*4882a593Smuzhiyun int i;
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun if (!readl(hpriv->mmio + HOST_IRQ_STAT)) {
570*4882a593Smuzhiyun for (i = 0; i < host->n_ports; i++) {
571*4882a593Smuzhiyun if (irq_masked & (1 << i))
572*4882a593Smuzhiyun continue;
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun port_mmio = ahci_port_base(host->ports[i]);
575*4882a593Smuzhiyun if (readl(port_mmio + PORT_IRQ_STAT))
576*4882a593Smuzhiyun irq_masked |= (1 << i);
577*4882a593Smuzhiyun }
578*4882a593Smuzhiyun }
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun return ahci_handle_port_intr(host, irq_masked);
581*4882a593Smuzhiyun }
582*4882a593Smuzhiyun
xgene_ahci_irq_intr(int irq,void * dev_instance)583*4882a593Smuzhiyun static irqreturn_t xgene_ahci_irq_intr(int irq, void *dev_instance)
584*4882a593Smuzhiyun {
585*4882a593Smuzhiyun struct ata_host *host = dev_instance;
586*4882a593Smuzhiyun struct ahci_host_priv *hpriv;
587*4882a593Smuzhiyun unsigned int rc = 0;
588*4882a593Smuzhiyun void __iomem *mmio;
589*4882a593Smuzhiyun u32 irq_stat, irq_masked;
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun VPRINTK("ENTER\n");
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun hpriv = host->private_data;
594*4882a593Smuzhiyun mmio = hpriv->mmio;
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun /* sigh. 0xffffffff is a valid return from h/w */
597*4882a593Smuzhiyun irq_stat = readl(mmio + HOST_IRQ_STAT);
598*4882a593Smuzhiyun if (!irq_stat)
599*4882a593Smuzhiyun return IRQ_NONE;
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun irq_masked = irq_stat & hpriv->port_map;
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun spin_lock(&host->lock);
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun /*
606*4882a593Smuzhiyun * HOST_IRQ_STAT behaves as edge triggered latch meaning that
607*4882a593Smuzhiyun * it should be cleared before all the port events are cleared.
608*4882a593Smuzhiyun */
609*4882a593Smuzhiyun writel(irq_stat, mmio + HOST_IRQ_STAT);
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun rc = xgene_ahci_handle_broken_edge_irq(host, irq_masked);
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun spin_unlock(&host->lock);
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun VPRINTK("EXIT\n");
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun return IRQ_RETVAL(rc);
618*4882a593Smuzhiyun }
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun static struct ata_port_operations xgene_ahci_v1_ops = {
621*4882a593Smuzhiyun .inherits = &ahci_ops,
622*4882a593Smuzhiyun .host_stop = xgene_ahci_host_stop,
623*4882a593Smuzhiyun .hardreset = xgene_ahci_hardreset,
624*4882a593Smuzhiyun .read_id = xgene_ahci_read_id,
625*4882a593Smuzhiyun .qc_issue = xgene_ahci_qc_issue,
626*4882a593Smuzhiyun .softreset = xgene_ahci_softreset,
627*4882a593Smuzhiyun .pmp_softreset = xgene_ahci_pmp_softreset
628*4882a593Smuzhiyun };
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun static const struct ata_port_info xgene_ahci_v1_port_info = {
631*4882a593Smuzhiyun .flags = AHCI_FLAG_COMMON | ATA_FLAG_PMP,
632*4882a593Smuzhiyun .pio_mask = ATA_PIO4,
633*4882a593Smuzhiyun .udma_mask = ATA_UDMA6,
634*4882a593Smuzhiyun .port_ops = &xgene_ahci_v1_ops,
635*4882a593Smuzhiyun };
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun static struct ata_port_operations xgene_ahci_v2_ops = {
638*4882a593Smuzhiyun .inherits = &ahci_ops,
639*4882a593Smuzhiyun .host_stop = xgene_ahci_host_stop,
640*4882a593Smuzhiyun .hardreset = xgene_ahci_hardreset,
641*4882a593Smuzhiyun .read_id = xgene_ahci_read_id,
642*4882a593Smuzhiyun };
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun static const struct ata_port_info xgene_ahci_v2_port_info = {
645*4882a593Smuzhiyun .flags = AHCI_FLAG_COMMON | ATA_FLAG_PMP,
646*4882a593Smuzhiyun .pio_mask = ATA_PIO4,
647*4882a593Smuzhiyun .udma_mask = ATA_UDMA6,
648*4882a593Smuzhiyun .port_ops = &xgene_ahci_v2_ops,
649*4882a593Smuzhiyun };
650*4882a593Smuzhiyun
xgene_ahci_hw_init(struct ahci_host_priv * hpriv)651*4882a593Smuzhiyun static int xgene_ahci_hw_init(struct ahci_host_priv *hpriv)
652*4882a593Smuzhiyun {
653*4882a593Smuzhiyun struct xgene_ahci_context *ctx = hpriv->plat_data;
654*4882a593Smuzhiyun int i;
655*4882a593Smuzhiyun int rc;
656*4882a593Smuzhiyun u32 val;
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun /* Remove IP RAM out of shutdown */
659*4882a593Smuzhiyun rc = xgene_ahci_init_memram(ctx);
660*4882a593Smuzhiyun if (rc)
661*4882a593Smuzhiyun return rc;
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun for (i = 0; i < MAX_AHCI_CHN_PERCTR; i++)
664*4882a593Smuzhiyun xgene_ahci_set_phy_cfg(ctx, i);
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun /* AXI disable Mask */
667*4882a593Smuzhiyun writel(0xffffffff, hpriv->mmio + HOST_IRQ_STAT);
668*4882a593Smuzhiyun readl(hpriv->mmio + HOST_IRQ_STAT); /* Force a barrier */
669*4882a593Smuzhiyun writel(0, ctx->csr_core + INTSTATUSMASK);
670*4882a593Smuzhiyun val = readl(ctx->csr_core + INTSTATUSMASK); /* Force a barrier */
671*4882a593Smuzhiyun dev_dbg(ctx->dev, "top level interrupt mask 0x%X value 0x%08X\n",
672*4882a593Smuzhiyun INTSTATUSMASK, val);
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun writel(0x0, ctx->csr_core + ERRINTSTATUSMASK);
675*4882a593Smuzhiyun readl(ctx->csr_core + ERRINTSTATUSMASK); /* Force a barrier */
676*4882a593Smuzhiyun writel(0x0, ctx->csr_axi + INT_SLV_TMOMASK);
677*4882a593Smuzhiyun readl(ctx->csr_axi + INT_SLV_TMOMASK);
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun /* Enable AXI Interrupt */
680*4882a593Smuzhiyun writel(0xffffffff, ctx->csr_core + SLVRDERRATTRIBUTES);
681*4882a593Smuzhiyun writel(0xffffffff, ctx->csr_core + SLVWRERRATTRIBUTES);
682*4882a593Smuzhiyun writel(0xffffffff, ctx->csr_core + MSTRDERRATTRIBUTES);
683*4882a593Smuzhiyun writel(0xffffffff, ctx->csr_core + MSTWRERRATTRIBUTES);
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun /* Enable coherency */
686*4882a593Smuzhiyun val = readl(ctx->csr_core + BUSCTLREG);
687*4882a593Smuzhiyun val &= ~0x00000002; /* Enable write coherency */
688*4882a593Smuzhiyun val &= ~0x00000001; /* Enable read coherency */
689*4882a593Smuzhiyun writel(val, ctx->csr_core + BUSCTLREG);
690*4882a593Smuzhiyun
691*4882a593Smuzhiyun val = readl(ctx->csr_core + IOFMSTRWAUX);
692*4882a593Smuzhiyun val |= (1 << 3); /* Enable read coherency */
693*4882a593Smuzhiyun val |= (1 << 9); /* Enable write coherency */
694*4882a593Smuzhiyun writel(val, ctx->csr_core + IOFMSTRWAUX);
695*4882a593Smuzhiyun val = readl(ctx->csr_core + IOFMSTRWAUX);
696*4882a593Smuzhiyun dev_dbg(ctx->dev, "coherency 0x%X value 0x%08X\n",
697*4882a593Smuzhiyun IOFMSTRWAUX, val);
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun return rc;
700*4882a593Smuzhiyun }
701*4882a593Smuzhiyun
xgene_ahci_mux_select(struct xgene_ahci_context * ctx)702*4882a593Smuzhiyun static int xgene_ahci_mux_select(struct xgene_ahci_context *ctx)
703*4882a593Smuzhiyun {
704*4882a593Smuzhiyun u32 val;
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun /* Check for optional MUX resource */
707*4882a593Smuzhiyun if (!ctx->csr_mux)
708*4882a593Smuzhiyun return 0;
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun val = readl(ctx->csr_mux + SATA_ENET_CONFIG_REG);
711*4882a593Smuzhiyun val &= ~CFG_SATA_ENET_SELECT_MASK;
712*4882a593Smuzhiyun writel(val, ctx->csr_mux + SATA_ENET_CONFIG_REG);
713*4882a593Smuzhiyun val = readl(ctx->csr_mux + SATA_ENET_CONFIG_REG);
714*4882a593Smuzhiyun return val & CFG_SATA_ENET_SELECT_MASK ? -1 : 0;
715*4882a593Smuzhiyun }
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun static struct scsi_host_template ahci_platform_sht = {
718*4882a593Smuzhiyun AHCI_SHT(DRV_NAME),
719*4882a593Smuzhiyun };
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun #ifdef CONFIG_ACPI
722*4882a593Smuzhiyun static const struct acpi_device_id xgene_ahci_acpi_match[] = {
723*4882a593Smuzhiyun { "APMC0D0D", XGENE_AHCI_V1},
724*4882a593Smuzhiyun { "APMC0D32", XGENE_AHCI_V2},
725*4882a593Smuzhiyun {},
726*4882a593Smuzhiyun };
727*4882a593Smuzhiyun MODULE_DEVICE_TABLE(acpi, xgene_ahci_acpi_match);
728*4882a593Smuzhiyun #endif
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun static const struct of_device_id xgene_ahci_of_match[] = {
731*4882a593Smuzhiyun {.compatible = "apm,xgene-ahci", .data = (void *) XGENE_AHCI_V1},
732*4882a593Smuzhiyun {.compatible = "apm,xgene-ahci-v2", .data = (void *) XGENE_AHCI_V2},
733*4882a593Smuzhiyun {},
734*4882a593Smuzhiyun };
735*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, xgene_ahci_of_match);
736*4882a593Smuzhiyun
xgene_ahci_probe(struct platform_device * pdev)737*4882a593Smuzhiyun static int xgene_ahci_probe(struct platform_device *pdev)
738*4882a593Smuzhiyun {
739*4882a593Smuzhiyun struct device *dev = &pdev->dev;
740*4882a593Smuzhiyun struct ahci_host_priv *hpriv;
741*4882a593Smuzhiyun struct xgene_ahci_context *ctx;
742*4882a593Smuzhiyun struct resource *res;
743*4882a593Smuzhiyun const struct of_device_id *of_devid;
744*4882a593Smuzhiyun enum xgene_ahci_version version = XGENE_AHCI_V1;
745*4882a593Smuzhiyun const struct ata_port_info *ppi[] = { &xgene_ahci_v1_port_info,
746*4882a593Smuzhiyun &xgene_ahci_v2_port_info };
747*4882a593Smuzhiyun int rc;
748*4882a593Smuzhiyun
749*4882a593Smuzhiyun hpriv = ahci_platform_get_resources(pdev, 0);
750*4882a593Smuzhiyun if (IS_ERR(hpriv))
751*4882a593Smuzhiyun return PTR_ERR(hpriv);
752*4882a593Smuzhiyun
753*4882a593Smuzhiyun ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
754*4882a593Smuzhiyun if (!ctx)
755*4882a593Smuzhiyun return -ENOMEM;
756*4882a593Smuzhiyun
757*4882a593Smuzhiyun hpriv->plat_data = ctx;
758*4882a593Smuzhiyun ctx->hpriv = hpriv;
759*4882a593Smuzhiyun ctx->dev = dev;
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun /* Retrieve the IP core resource */
762*4882a593Smuzhiyun res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
763*4882a593Smuzhiyun ctx->csr_core = devm_ioremap_resource(dev, res);
764*4882a593Smuzhiyun if (IS_ERR(ctx->csr_core))
765*4882a593Smuzhiyun return PTR_ERR(ctx->csr_core);
766*4882a593Smuzhiyun
767*4882a593Smuzhiyun /* Retrieve the IP diagnostic resource */
768*4882a593Smuzhiyun res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
769*4882a593Smuzhiyun ctx->csr_diag = devm_ioremap_resource(dev, res);
770*4882a593Smuzhiyun if (IS_ERR(ctx->csr_diag))
771*4882a593Smuzhiyun return PTR_ERR(ctx->csr_diag);
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun /* Retrieve the IP AXI resource */
774*4882a593Smuzhiyun res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
775*4882a593Smuzhiyun ctx->csr_axi = devm_ioremap_resource(dev, res);
776*4882a593Smuzhiyun if (IS_ERR(ctx->csr_axi))
777*4882a593Smuzhiyun return PTR_ERR(ctx->csr_axi);
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun /* Retrieve the optional IP mux resource */
780*4882a593Smuzhiyun res = platform_get_resource(pdev, IORESOURCE_MEM, 4);
781*4882a593Smuzhiyun if (res) {
782*4882a593Smuzhiyun void __iomem *csr = devm_ioremap_resource(dev, res);
783*4882a593Smuzhiyun if (IS_ERR(csr))
784*4882a593Smuzhiyun return PTR_ERR(csr);
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun ctx->csr_mux = csr;
787*4882a593Smuzhiyun }
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun of_devid = of_match_device(xgene_ahci_of_match, dev);
790*4882a593Smuzhiyun if (of_devid) {
791*4882a593Smuzhiyun if (of_devid->data)
792*4882a593Smuzhiyun version = (enum xgene_ahci_version) of_devid->data;
793*4882a593Smuzhiyun }
794*4882a593Smuzhiyun #ifdef CONFIG_ACPI
795*4882a593Smuzhiyun else {
796*4882a593Smuzhiyun const struct acpi_device_id *acpi_id;
797*4882a593Smuzhiyun struct acpi_device_info *info;
798*4882a593Smuzhiyun acpi_status status;
799*4882a593Smuzhiyun
800*4882a593Smuzhiyun acpi_id = acpi_match_device(xgene_ahci_acpi_match, &pdev->dev);
801*4882a593Smuzhiyun if (!acpi_id) {
802*4882a593Smuzhiyun dev_warn(&pdev->dev, "No node entry in ACPI table. Assume version1\n");
803*4882a593Smuzhiyun version = XGENE_AHCI_V1;
804*4882a593Smuzhiyun } else if (acpi_id->driver_data) {
805*4882a593Smuzhiyun version = (enum xgene_ahci_version) acpi_id->driver_data;
806*4882a593Smuzhiyun status = acpi_get_object_info(ACPI_HANDLE(&pdev->dev), &info);
807*4882a593Smuzhiyun if (ACPI_FAILURE(status)) {
808*4882a593Smuzhiyun dev_warn(&pdev->dev, "%s: Error reading device info. Assume version1\n",
809*4882a593Smuzhiyun __func__);
810*4882a593Smuzhiyun version = XGENE_AHCI_V1;
811*4882a593Smuzhiyun } else {
812*4882a593Smuzhiyun if (info->valid & ACPI_VALID_CID)
813*4882a593Smuzhiyun version = XGENE_AHCI_V2;
814*4882a593Smuzhiyun kfree(info);
815*4882a593Smuzhiyun }
816*4882a593Smuzhiyun }
817*4882a593Smuzhiyun }
818*4882a593Smuzhiyun #endif
819*4882a593Smuzhiyun
820*4882a593Smuzhiyun dev_dbg(dev, "VAddr 0x%p Mmio VAddr 0x%p\n", ctx->csr_core,
821*4882a593Smuzhiyun hpriv->mmio);
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun /* Select ATA */
824*4882a593Smuzhiyun if ((rc = xgene_ahci_mux_select(ctx))) {
825*4882a593Smuzhiyun dev_err(dev, "SATA mux selection failed error %d\n", rc);
826*4882a593Smuzhiyun return -ENODEV;
827*4882a593Smuzhiyun }
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun if (xgene_ahci_is_memram_inited(ctx)) {
830*4882a593Smuzhiyun dev_info(dev, "skip clock and PHY initialization\n");
831*4882a593Smuzhiyun goto skip_clk_phy;
832*4882a593Smuzhiyun }
833*4882a593Smuzhiyun
834*4882a593Smuzhiyun /* Due to errata, HW requires full toggle transition */
835*4882a593Smuzhiyun rc = ahci_platform_enable_clks(hpriv);
836*4882a593Smuzhiyun if (rc)
837*4882a593Smuzhiyun goto disable_resources;
838*4882a593Smuzhiyun ahci_platform_disable_clks(hpriv);
839*4882a593Smuzhiyun
840*4882a593Smuzhiyun rc = ahci_platform_enable_resources(hpriv);
841*4882a593Smuzhiyun if (rc)
842*4882a593Smuzhiyun goto disable_resources;
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun /* Configure the host controller */
845*4882a593Smuzhiyun xgene_ahci_hw_init(hpriv);
846*4882a593Smuzhiyun skip_clk_phy:
847*4882a593Smuzhiyun
848*4882a593Smuzhiyun switch (version) {
849*4882a593Smuzhiyun case XGENE_AHCI_V1:
850*4882a593Smuzhiyun hpriv->flags = AHCI_HFLAG_NO_NCQ;
851*4882a593Smuzhiyun break;
852*4882a593Smuzhiyun case XGENE_AHCI_V2:
853*4882a593Smuzhiyun hpriv->flags |= AHCI_HFLAG_YES_FBS;
854*4882a593Smuzhiyun hpriv->irq_handler = xgene_ahci_irq_intr;
855*4882a593Smuzhiyun break;
856*4882a593Smuzhiyun default:
857*4882a593Smuzhiyun break;
858*4882a593Smuzhiyun }
859*4882a593Smuzhiyun
860*4882a593Smuzhiyun rc = ahci_platform_init_host(pdev, hpriv, ppi[version - 1],
861*4882a593Smuzhiyun &ahci_platform_sht);
862*4882a593Smuzhiyun if (rc)
863*4882a593Smuzhiyun goto disable_resources;
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun dev_dbg(dev, "X-Gene SATA host controller initialized\n");
866*4882a593Smuzhiyun return 0;
867*4882a593Smuzhiyun
868*4882a593Smuzhiyun disable_resources:
869*4882a593Smuzhiyun ahci_platform_disable_resources(hpriv);
870*4882a593Smuzhiyun return rc;
871*4882a593Smuzhiyun }
872*4882a593Smuzhiyun
873*4882a593Smuzhiyun static struct platform_driver xgene_ahci_driver = {
874*4882a593Smuzhiyun .probe = xgene_ahci_probe,
875*4882a593Smuzhiyun .remove = ata_platform_remove_one,
876*4882a593Smuzhiyun .driver = {
877*4882a593Smuzhiyun .name = DRV_NAME,
878*4882a593Smuzhiyun .of_match_table = xgene_ahci_of_match,
879*4882a593Smuzhiyun .acpi_match_table = ACPI_PTR(xgene_ahci_acpi_match),
880*4882a593Smuzhiyun },
881*4882a593Smuzhiyun };
882*4882a593Smuzhiyun
883*4882a593Smuzhiyun module_platform_driver(xgene_ahci_driver);
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun MODULE_DESCRIPTION("APM X-Gene AHCI SATA driver");
886*4882a593Smuzhiyun MODULE_AUTHOR("Loc Ho <lho@apm.com>");
887*4882a593Smuzhiyun MODULE_LICENSE("GPL");
888*4882a593Smuzhiyun MODULE_VERSION("0.4");
889