xref: /OK3568_Linux_fs/kernel/drivers/ata/sata_highbank.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Calxeda Highbank AHCI SATA platform driver
4*4882a593Smuzhiyun  * Copyright 2012 Calxeda, Inc.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * based on the AHCI SATA platform driver by Jeff Garzik and Anton Vorontsov
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun #include <linux/kernel.h>
9*4882a593Smuzhiyun #include <linux/gfp.h>
10*4882a593Smuzhiyun #include <linux/module.h>
11*4882a593Smuzhiyun #include <linux/types.h>
12*4882a593Smuzhiyun #include <linux/err.h>
13*4882a593Smuzhiyun #include <linux/io.h>
14*4882a593Smuzhiyun #include <linux/spinlock.h>
15*4882a593Smuzhiyun #include <linux/device.h>
16*4882a593Smuzhiyun #include <linux/of_device.h>
17*4882a593Smuzhiyun #include <linux/of_address.h>
18*4882a593Smuzhiyun #include <linux/platform_device.h>
19*4882a593Smuzhiyun #include <linux/libata.h>
20*4882a593Smuzhiyun #include <linux/interrupt.h>
21*4882a593Smuzhiyun #include <linux/delay.h>
22*4882a593Smuzhiyun #include <linux/export.h>
23*4882a593Smuzhiyun #include <linux/gpio/consumer.h>
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun #include "ahci.h"
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun #define CPHY_MAP(dev, addr) ((((dev) & 0x1f) << 7) | (((addr) >> 9) & 0x7f))
28*4882a593Smuzhiyun #define CPHY_ADDR(addr) (((addr) & 0x1ff) << 2)
29*4882a593Smuzhiyun #define SERDES_CR_CTL			0x80a0
30*4882a593Smuzhiyun #define SERDES_CR_ADDR			0x80a1
31*4882a593Smuzhiyun #define SERDES_CR_DATA			0x80a2
32*4882a593Smuzhiyun #define CR_BUSY				0x0001
33*4882a593Smuzhiyun #define CR_START			0x0001
34*4882a593Smuzhiyun #define CR_WR_RDN			0x0002
35*4882a593Smuzhiyun #define CPHY_TX_INPUT_STS		0x2001
36*4882a593Smuzhiyun #define CPHY_RX_INPUT_STS		0x2002
37*4882a593Smuzhiyun #define CPHY_SATA_TX_OVERRIDE		0x8000
38*4882a593Smuzhiyun #define CPHY_SATA_RX_OVERRIDE	 	0x4000
39*4882a593Smuzhiyun #define CPHY_TX_OVERRIDE		0x2004
40*4882a593Smuzhiyun #define CPHY_RX_OVERRIDE		0x2005
41*4882a593Smuzhiyun #define SPHY_LANE			0x100
42*4882a593Smuzhiyun #define SPHY_HALF_RATE			0x0001
43*4882a593Smuzhiyun #define CPHY_SATA_DPLL_MODE		0x0700
44*4882a593Smuzhiyun #define CPHY_SATA_DPLL_SHIFT		8
45*4882a593Smuzhiyun #define CPHY_SATA_DPLL_RESET		(1 << 11)
46*4882a593Smuzhiyun #define CPHY_SATA_TX_ATTEN		0x1c00
47*4882a593Smuzhiyun #define CPHY_SATA_TX_ATTEN_SHIFT	10
48*4882a593Smuzhiyun #define CPHY_PHY_COUNT			6
49*4882a593Smuzhiyun #define CPHY_LANE_COUNT			4
50*4882a593Smuzhiyun #define CPHY_PORT_COUNT			(CPHY_PHY_COUNT * CPHY_LANE_COUNT)
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun static DEFINE_SPINLOCK(cphy_lock);
53*4882a593Smuzhiyun /* Each of the 6 phys can have up to 4 sata ports attached to i. Map 0-based
54*4882a593Smuzhiyun  * sata ports to their phys and then to their lanes within the phys
55*4882a593Smuzhiyun  */
56*4882a593Smuzhiyun struct phy_lane_info {
57*4882a593Smuzhiyun 	void __iomem *phy_base;
58*4882a593Smuzhiyun 	u8 lane_mapping;
59*4882a593Smuzhiyun 	u8 phy_devs;
60*4882a593Smuzhiyun 	u8 tx_atten;
61*4882a593Smuzhiyun };
62*4882a593Smuzhiyun static struct phy_lane_info port_data[CPHY_PORT_COUNT];
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun static DEFINE_SPINLOCK(sgpio_lock);
65*4882a593Smuzhiyun #define SCLOCK				0
66*4882a593Smuzhiyun #define SLOAD				1
67*4882a593Smuzhiyun #define SDATA				2
68*4882a593Smuzhiyun #define SGPIO_PINS			3
69*4882a593Smuzhiyun #define SGPIO_PORTS			8
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun struct ecx_plat_data {
72*4882a593Smuzhiyun 	u32		n_ports;
73*4882a593Smuzhiyun 	/* number of extra clocks that the SGPIO PIC controller expects */
74*4882a593Smuzhiyun 	u32		pre_clocks;
75*4882a593Smuzhiyun 	u32		post_clocks;
76*4882a593Smuzhiyun 	struct gpio_desc *sgpio_gpiod[SGPIO_PINS];
77*4882a593Smuzhiyun 	u32		sgpio_pattern;
78*4882a593Smuzhiyun 	u32		port_to_sgpio[SGPIO_PORTS];
79*4882a593Smuzhiyun };
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun #define SGPIO_SIGNALS			3
82*4882a593Smuzhiyun #define ECX_ACTIVITY_BITS		0x300000
83*4882a593Smuzhiyun #define ECX_ACTIVITY_SHIFT		0
84*4882a593Smuzhiyun #define ECX_LOCATE_BITS			0x80000
85*4882a593Smuzhiyun #define ECX_LOCATE_SHIFT		1
86*4882a593Smuzhiyun #define ECX_FAULT_BITS			0x400000
87*4882a593Smuzhiyun #define ECX_FAULT_SHIFT			2
sgpio_bit_shift(struct ecx_plat_data * pdata,u32 port,u32 shift)88*4882a593Smuzhiyun static inline int sgpio_bit_shift(struct ecx_plat_data *pdata, u32 port,
89*4882a593Smuzhiyun 				u32 shift)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun 	return 1 << (3 * pdata->port_to_sgpio[port] + shift);
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun 
ecx_parse_sgpio(struct ecx_plat_data * pdata,u32 port,u32 state)94*4882a593Smuzhiyun static void ecx_parse_sgpio(struct ecx_plat_data *pdata, u32 port, u32 state)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun 	if (state & ECX_ACTIVITY_BITS)
97*4882a593Smuzhiyun 		pdata->sgpio_pattern |= sgpio_bit_shift(pdata, port,
98*4882a593Smuzhiyun 						ECX_ACTIVITY_SHIFT);
99*4882a593Smuzhiyun 	else
100*4882a593Smuzhiyun 		pdata->sgpio_pattern &= ~sgpio_bit_shift(pdata, port,
101*4882a593Smuzhiyun 						ECX_ACTIVITY_SHIFT);
102*4882a593Smuzhiyun 	if (state & ECX_LOCATE_BITS)
103*4882a593Smuzhiyun 		pdata->sgpio_pattern |= sgpio_bit_shift(pdata, port,
104*4882a593Smuzhiyun 						ECX_LOCATE_SHIFT);
105*4882a593Smuzhiyun 	else
106*4882a593Smuzhiyun 		pdata->sgpio_pattern &= ~sgpio_bit_shift(pdata, port,
107*4882a593Smuzhiyun 						ECX_LOCATE_SHIFT);
108*4882a593Smuzhiyun 	if (state & ECX_FAULT_BITS)
109*4882a593Smuzhiyun 		pdata->sgpio_pattern |= sgpio_bit_shift(pdata, port,
110*4882a593Smuzhiyun 						ECX_FAULT_SHIFT);
111*4882a593Smuzhiyun 	else
112*4882a593Smuzhiyun 		pdata->sgpio_pattern &= ~sgpio_bit_shift(pdata, port,
113*4882a593Smuzhiyun 						ECX_FAULT_SHIFT);
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun /*
117*4882a593Smuzhiyun  * Tell the LED controller that the signal has changed by raising the clock
118*4882a593Smuzhiyun  * line for 50 uS and then lowering it for 50 uS.
119*4882a593Smuzhiyun  */
ecx_led_cycle_clock(struct ecx_plat_data * pdata)120*4882a593Smuzhiyun static void ecx_led_cycle_clock(struct ecx_plat_data *pdata)
121*4882a593Smuzhiyun {
122*4882a593Smuzhiyun 	gpiod_set_value(pdata->sgpio_gpiod[SCLOCK], 1);
123*4882a593Smuzhiyun 	udelay(50);
124*4882a593Smuzhiyun 	gpiod_set_value(pdata->sgpio_gpiod[SCLOCK], 0);
125*4882a593Smuzhiyun 	udelay(50);
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun 
ecx_transmit_led_message(struct ata_port * ap,u32 state,ssize_t size)128*4882a593Smuzhiyun static ssize_t ecx_transmit_led_message(struct ata_port *ap, u32 state,
129*4882a593Smuzhiyun 					ssize_t size)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun 	struct ahci_host_priv *hpriv =  ap->host->private_data;
132*4882a593Smuzhiyun 	struct ecx_plat_data *pdata = hpriv->plat_data;
133*4882a593Smuzhiyun 	struct ahci_port_priv *pp = ap->private_data;
134*4882a593Smuzhiyun 	unsigned long flags;
135*4882a593Smuzhiyun 	int pmp, i;
136*4882a593Smuzhiyun 	struct ahci_em_priv *emp;
137*4882a593Smuzhiyun 	u32 sgpio_out;
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	/* get the slot number from the message */
140*4882a593Smuzhiyun 	pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
141*4882a593Smuzhiyun 	if (pmp < EM_MAX_SLOTS)
142*4882a593Smuzhiyun 		emp = &pp->em_priv[pmp];
143*4882a593Smuzhiyun 	else
144*4882a593Smuzhiyun 		return -EINVAL;
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	if (!(hpriv->em_msg_type & EM_MSG_TYPE_LED))
147*4882a593Smuzhiyun 		return size;
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	spin_lock_irqsave(&sgpio_lock, flags);
150*4882a593Smuzhiyun 	ecx_parse_sgpio(pdata, ap->port_no, state);
151*4882a593Smuzhiyun 	sgpio_out = pdata->sgpio_pattern;
152*4882a593Smuzhiyun 	for (i = 0; i < pdata->pre_clocks; i++)
153*4882a593Smuzhiyun 		ecx_led_cycle_clock(pdata);
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	gpiod_set_value(pdata->sgpio_gpiod[SLOAD], 1);
156*4882a593Smuzhiyun 	ecx_led_cycle_clock(pdata);
157*4882a593Smuzhiyun 	gpiod_set_value(pdata->sgpio_gpiod[SLOAD], 0);
158*4882a593Smuzhiyun 	/*
159*4882a593Smuzhiyun 	 * bit-bang out the SGPIO pattern, by consuming a bit and then
160*4882a593Smuzhiyun 	 * clocking it out.
161*4882a593Smuzhiyun 	 */
162*4882a593Smuzhiyun 	for (i = 0; i < (SGPIO_SIGNALS * pdata->n_ports); i++) {
163*4882a593Smuzhiyun 		gpiod_set_value(pdata->sgpio_gpiod[SDATA], sgpio_out & 1);
164*4882a593Smuzhiyun 		sgpio_out >>= 1;
165*4882a593Smuzhiyun 		ecx_led_cycle_clock(pdata);
166*4882a593Smuzhiyun 	}
167*4882a593Smuzhiyun 	for (i = 0; i < pdata->post_clocks; i++)
168*4882a593Smuzhiyun 		ecx_led_cycle_clock(pdata);
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	/* save off new led state for port/slot */
171*4882a593Smuzhiyun 	emp->led_state = state;
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	spin_unlock_irqrestore(&sgpio_lock, flags);
174*4882a593Smuzhiyun 	return size;
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun 
highbank_set_em_messages(struct device * dev,struct ahci_host_priv * hpriv,struct ata_port_info * pi)177*4882a593Smuzhiyun static void highbank_set_em_messages(struct device *dev,
178*4882a593Smuzhiyun 					struct ahci_host_priv *hpriv,
179*4882a593Smuzhiyun 					struct ata_port_info *pi)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun 	struct device_node *np = dev->of_node;
182*4882a593Smuzhiyun 	struct ecx_plat_data *pdata = hpriv->plat_data;
183*4882a593Smuzhiyun 	int i;
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	for (i = 0; i < SGPIO_PINS; i++) {
186*4882a593Smuzhiyun 		struct gpio_desc *gpiod;
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 		gpiod = devm_gpiod_get_index(dev, "calxeda,sgpio", i,
189*4882a593Smuzhiyun 					     GPIOD_OUT_HIGH);
190*4882a593Smuzhiyun 		if (IS_ERR(gpiod)) {
191*4882a593Smuzhiyun 			dev_err(dev, "failed to get GPIO %d\n", i);
192*4882a593Smuzhiyun 			continue;
193*4882a593Smuzhiyun 		}
194*4882a593Smuzhiyun 		gpiod_set_consumer_name(gpiod, "CX SGPIO");
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 		pdata->sgpio_gpiod[i] = gpiod;
197*4882a593Smuzhiyun 	}
198*4882a593Smuzhiyun 	of_property_read_u32_array(np, "calxeda,led-order",
199*4882a593Smuzhiyun 						pdata->port_to_sgpio,
200*4882a593Smuzhiyun 						pdata->n_ports);
201*4882a593Smuzhiyun 	if (of_property_read_u32(np, "calxeda,pre-clocks", &pdata->pre_clocks))
202*4882a593Smuzhiyun 		pdata->pre_clocks = 0;
203*4882a593Smuzhiyun 	if (of_property_read_u32(np, "calxeda,post-clocks",
204*4882a593Smuzhiyun 				&pdata->post_clocks))
205*4882a593Smuzhiyun 		pdata->post_clocks = 0;
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	/* store em_loc */
208*4882a593Smuzhiyun 	hpriv->em_loc = 0;
209*4882a593Smuzhiyun 	hpriv->em_buf_sz = 4;
210*4882a593Smuzhiyun 	hpriv->em_msg_type = EM_MSG_TYPE_LED;
211*4882a593Smuzhiyun 	pi->flags |= ATA_FLAG_EM | ATA_FLAG_SW_ACTIVITY;
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun 
__combo_phy_reg_read(u8 sata_port,u32 addr)214*4882a593Smuzhiyun static u32 __combo_phy_reg_read(u8 sata_port, u32 addr)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun 	u32 data;
217*4882a593Smuzhiyun 	u8 dev = port_data[sata_port].phy_devs;
218*4882a593Smuzhiyun 	spin_lock(&cphy_lock);
219*4882a593Smuzhiyun 	writel(CPHY_MAP(dev, addr), port_data[sata_port].phy_base + 0x800);
220*4882a593Smuzhiyun 	data = readl(port_data[sata_port].phy_base + CPHY_ADDR(addr));
221*4882a593Smuzhiyun 	spin_unlock(&cphy_lock);
222*4882a593Smuzhiyun 	return data;
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun 
__combo_phy_reg_write(u8 sata_port,u32 addr,u32 data)225*4882a593Smuzhiyun static void __combo_phy_reg_write(u8 sata_port, u32 addr, u32 data)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun 	u8 dev = port_data[sata_port].phy_devs;
228*4882a593Smuzhiyun 	spin_lock(&cphy_lock);
229*4882a593Smuzhiyun 	writel(CPHY_MAP(dev, addr), port_data[sata_port].phy_base + 0x800);
230*4882a593Smuzhiyun 	writel(data, port_data[sata_port].phy_base + CPHY_ADDR(addr));
231*4882a593Smuzhiyun 	spin_unlock(&cphy_lock);
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun 
combo_phy_wait_for_ready(u8 sata_port)234*4882a593Smuzhiyun static void combo_phy_wait_for_ready(u8 sata_port)
235*4882a593Smuzhiyun {
236*4882a593Smuzhiyun 	while (__combo_phy_reg_read(sata_port, SERDES_CR_CTL) & CR_BUSY)
237*4882a593Smuzhiyun 		udelay(5);
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun 
combo_phy_read(u8 sata_port,u32 addr)240*4882a593Smuzhiyun static u32 combo_phy_read(u8 sata_port, u32 addr)
241*4882a593Smuzhiyun {
242*4882a593Smuzhiyun 	combo_phy_wait_for_ready(sata_port);
243*4882a593Smuzhiyun 	__combo_phy_reg_write(sata_port, SERDES_CR_ADDR, addr);
244*4882a593Smuzhiyun 	__combo_phy_reg_write(sata_port, SERDES_CR_CTL, CR_START);
245*4882a593Smuzhiyun 	combo_phy_wait_for_ready(sata_port);
246*4882a593Smuzhiyun 	return __combo_phy_reg_read(sata_port, SERDES_CR_DATA);
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun 
combo_phy_write(u8 sata_port,u32 addr,u32 data)249*4882a593Smuzhiyun static void combo_phy_write(u8 sata_port, u32 addr, u32 data)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun 	combo_phy_wait_for_ready(sata_port);
252*4882a593Smuzhiyun 	__combo_phy_reg_write(sata_port, SERDES_CR_ADDR, addr);
253*4882a593Smuzhiyun 	__combo_phy_reg_write(sata_port, SERDES_CR_DATA, data);
254*4882a593Smuzhiyun 	__combo_phy_reg_write(sata_port, SERDES_CR_CTL, CR_WR_RDN | CR_START);
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun 
highbank_cphy_disable_overrides(u8 sata_port)257*4882a593Smuzhiyun static void highbank_cphy_disable_overrides(u8 sata_port)
258*4882a593Smuzhiyun {
259*4882a593Smuzhiyun 	u8 lane = port_data[sata_port].lane_mapping;
260*4882a593Smuzhiyun 	u32 tmp;
261*4882a593Smuzhiyun 	if (unlikely(port_data[sata_port].phy_base == NULL))
262*4882a593Smuzhiyun 		return;
263*4882a593Smuzhiyun 	tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS + lane * SPHY_LANE);
264*4882a593Smuzhiyun 	tmp &= ~CPHY_SATA_RX_OVERRIDE;
265*4882a593Smuzhiyun 	combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun 
cphy_override_tx_attenuation(u8 sata_port,u32 val)268*4882a593Smuzhiyun static void cphy_override_tx_attenuation(u8 sata_port, u32 val)
269*4882a593Smuzhiyun {
270*4882a593Smuzhiyun 	u8 lane = port_data[sata_port].lane_mapping;
271*4882a593Smuzhiyun 	u32 tmp;
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	if (val & 0x8)
274*4882a593Smuzhiyun 		return;
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	tmp = combo_phy_read(sata_port, CPHY_TX_INPUT_STS + lane * SPHY_LANE);
277*4882a593Smuzhiyun 	tmp &= ~CPHY_SATA_TX_OVERRIDE;
278*4882a593Smuzhiyun 	combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp);
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	tmp |= CPHY_SATA_TX_OVERRIDE;
281*4882a593Smuzhiyun 	combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp);
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 	tmp |= (val << CPHY_SATA_TX_ATTEN_SHIFT) & CPHY_SATA_TX_ATTEN;
284*4882a593Smuzhiyun 	combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp);
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun 
cphy_override_rx_mode(u8 sata_port,u32 val)287*4882a593Smuzhiyun static void cphy_override_rx_mode(u8 sata_port, u32 val)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun 	u8 lane = port_data[sata_port].lane_mapping;
290*4882a593Smuzhiyun 	u32 tmp;
291*4882a593Smuzhiyun 	tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS + lane * SPHY_LANE);
292*4882a593Smuzhiyun 	tmp &= ~CPHY_SATA_RX_OVERRIDE;
293*4882a593Smuzhiyun 	combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	tmp |= CPHY_SATA_RX_OVERRIDE;
296*4882a593Smuzhiyun 	combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	tmp &= ~CPHY_SATA_DPLL_MODE;
299*4882a593Smuzhiyun 	tmp |= val << CPHY_SATA_DPLL_SHIFT;
300*4882a593Smuzhiyun 	combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	tmp |= CPHY_SATA_DPLL_RESET;
303*4882a593Smuzhiyun 	combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	tmp &= ~CPHY_SATA_DPLL_RESET;
306*4882a593Smuzhiyun 	combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	msleep(15);
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun 
highbank_cphy_override_lane(u8 sata_port)311*4882a593Smuzhiyun static void highbank_cphy_override_lane(u8 sata_port)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun 	u8 lane = port_data[sata_port].lane_mapping;
314*4882a593Smuzhiyun 	u32 tmp, k = 0;
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	if (unlikely(port_data[sata_port].phy_base == NULL))
317*4882a593Smuzhiyun 		return;
318*4882a593Smuzhiyun 	do {
319*4882a593Smuzhiyun 		tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS +
320*4882a593Smuzhiyun 						lane * SPHY_LANE);
321*4882a593Smuzhiyun 	} while ((tmp & SPHY_HALF_RATE) && (k++ < 1000));
322*4882a593Smuzhiyun 	cphy_override_rx_mode(sata_port, 3);
323*4882a593Smuzhiyun 	cphy_override_tx_attenuation(sata_port, port_data[sata_port].tx_atten);
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun 
highbank_initialize_phys(struct device * dev,void __iomem * addr)326*4882a593Smuzhiyun static int highbank_initialize_phys(struct device *dev, void __iomem *addr)
327*4882a593Smuzhiyun {
328*4882a593Smuzhiyun 	struct device_node *sata_node = dev->of_node;
329*4882a593Smuzhiyun 	int phy_count = 0, phy, port = 0, i;
330*4882a593Smuzhiyun 	void __iomem *cphy_base[CPHY_PHY_COUNT] = {};
331*4882a593Smuzhiyun 	struct device_node *phy_nodes[CPHY_PHY_COUNT] = {};
332*4882a593Smuzhiyun 	u32 tx_atten[CPHY_PORT_COUNT] = {};
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 	memset(port_data, 0, sizeof(struct phy_lane_info) * CPHY_PORT_COUNT);
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	do {
337*4882a593Smuzhiyun 		u32 tmp;
338*4882a593Smuzhiyun 		struct of_phandle_args phy_data;
339*4882a593Smuzhiyun 		if (of_parse_phandle_with_args(sata_node,
340*4882a593Smuzhiyun 				"calxeda,port-phys", "#phy-cells",
341*4882a593Smuzhiyun 				port, &phy_data))
342*4882a593Smuzhiyun 			break;
343*4882a593Smuzhiyun 		for (phy = 0; phy < phy_count; phy++) {
344*4882a593Smuzhiyun 			if (phy_nodes[phy] == phy_data.np)
345*4882a593Smuzhiyun 				break;
346*4882a593Smuzhiyun 		}
347*4882a593Smuzhiyun 		if (phy_nodes[phy] == NULL) {
348*4882a593Smuzhiyun 			phy_nodes[phy] = phy_data.np;
349*4882a593Smuzhiyun 			cphy_base[phy] = of_iomap(phy_nodes[phy], 0);
350*4882a593Smuzhiyun 			if (cphy_base[phy] == NULL) {
351*4882a593Smuzhiyun 				return 0;
352*4882a593Smuzhiyun 			}
353*4882a593Smuzhiyun 			phy_count += 1;
354*4882a593Smuzhiyun 		}
355*4882a593Smuzhiyun 		port_data[port].lane_mapping = phy_data.args[0];
356*4882a593Smuzhiyun 		of_property_read_u32(phy_nodes[phy], "phydev", &tmp);
357*4882a593Smuzhiyun 		port_data[port].phy_devs = tmp;
358*4882a593Smuzhiyun 		port_data[port].phy_base = cphy_base[phy];
359*4882a593Smuzhiyun 		of_node_put(phy_data.np);
360*4882a593Smuzhiyun 		port += 1;
361*4882a593Smuzhiyun 	} while (port < CPHY_PORT_COUNT);
362*4882a593Smuzhiyun 	of_property_read_u32_array(sata_node, "calxeda,tx-atten",
363*4882a593Smuzhiyun 				tx_atten, port);
364*4882a593Smuzhiyun 	for (i = 0; i < port; i++)
365*4882a593Smuzhiyun 		port_data[i].tx_atten = (u8) tx_atten[i];
366*4882a593Smuzhiyun 	return 0;
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun /*
370*4882a593Smuzhiyun  * The Calxeda SATA phy intermittently fails to bring up a link with Gen3
371*4882a593Smuzhiyun  * Retrying the phy hard reset can work around the issue, but the drive
372*4882a593Smuzhiyun  * may fail again. In less than 150 out of 15000 test runs, it took more
373*4882a593Smuzhiyun  * than 10 tries for the link to be established (but never more than 35).
374*4882a593Smuzhiyun  * Triple the maximum observed retry count to provide plenty of margin for
375*4882a593Smuzhiyun  * rare events and to guarantee that the link is established.
376*4882a593Smuzhiyun  *
377*4882a593Smuzhiyun  * Also, the default 2 second time-out on a failed drive is too long in
378*4882a593Smuzhiyun  * this situation. The uboot implementation of the same driver function
379*4882a593Smuzhiyun  * uses a much shorter time-out period and never experiences a time out
380*4882a593Smuzhiyun  * issue. Reducing the time-out to 500ms improves the responsiveness.
381*4882a593Smuzhiyun  * The other timing constants were kept the same as the stock AHCI driver.
382*4882a593Smuzhiyun  * This change was also tested 15000 times on 24 drives and none of them
383*4882a593Smuzhiyun  * experienced a time out.
384*4882a593Smuzhiyun  */
ahci_highbank_hardreset(struct ata_link * link,unsigned int * class,unsigned long deadline)385*4882a593Smuzhiyun static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class,
386*4882a593Smuzhiyun 				unsigned long deadline)
387*4882a593Smuzhiyun {
388*4882a593Smuzhiyun 	static const unsigned long timing[] = { 5, 100, 500};
389*4882a593Smuzhiyun 	struct ata_port *ap = link->ap;
390*4882a593Smuzhiyun 	struct ahci_port_priv *pp = ap->private_data;
391*4882a593Smuzhiyun 	struct ahci_host_priv *hpriv = ap->host->private_data;
392*4882a593Smuzhiyun 	u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
393*4882a593Smuzhiyun 	struct ata_taskfile tf;
394*4882a593Smuzhiyun 	bool online;
395*4882a593Smuzhiyun 	u32 sstatus;
396*4882a593Smuzhiyun 	int rc;
397*4882a593Smuzhiyun 	int retry = 100;
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 	hpriv->stop_engine(ap);
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	/* clear D2H reception area to properly wait for D2H FIS */
402*4882a593Smuzhiyun 	ata_tf_init(link->device, &tf);
403*4882a593Smuzhiyun 	tf.command = ATA_BUSY;
404*4882a593Smuzhiyun 	ata_tf_to_fis(&tf, 0, 0, d2h_fis);
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	do {
407*4882a593Smuzhiyun 		highbank_cphy_disable_overrides(link->ap->port_no);
408*4882a593Smuzhiyun 		rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
409*4882a593Smuzhiyun 		highbank_cphy_override_lane(link->ap->port_no);
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 		/* If the status is 1, we are connected, but the link did not
412*4882a593Smuzhiyun 		 * come up. So retry resetting the link again.
413*4882a593Smuzhiyun 		 */
414*4882a593Smuzhiyun 		if (sata_scr_read(link, SCR_STATUS, &sstatus))
415*4882a593Smuzhiyun 			break;
416*4882a593Smuzhiyun 		if (!(sstatus & 0x3))
417*4882a593Smuzhiyun 			break;
418*4882a593Smuzhiyun 	} while (!online && retry--);
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun 	hpriv->start_engine(ap);
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 	if (online)
423*4882a593Smuzhiyun 		*class = ahci_dev_classify(ap);
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	return rc;
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun static struct ata_port_operations ahci_highbank_ops = {
429*4882a593Smuzhiyun 	.inherits		= &ahci_ops,
430*4882a593Smuzhiyun 	.hardreset		= ahci_highbank_hardreset,
431*4882a593Smuzhiyun 	.transmit_led_message   = ecx_transmit_led_message,
432*4882a593Smuzhiyun };
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun static const struct ata_port_info ahci_highbank_port_info = {
435*4882a593Smuzhiyun 	.flags          = AHCI_FLAG_COMMON,
436*4882a593Smuzhiyun 	.pio_mask       = ATA_PIO4,
437*4882a593Smuzhiyun 	.udma_mask      = ATA_UDMA6,
438*4882a593Smuzhiyun 	.port_ops       = &ahci_highbank_ops,
439*4882a593Smuzhiyun };
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun static struct scsi_host_template ahci_highbank_platform_sht = {
442*4882a593Smuzhiyun 	AHCI_SHT("sata_highbank"),
443*4882a593Smuzhiyun };
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun static const struct of_device_id ahci_of_match[] = {
446*4882a593Smuzhiyun 	{ .compatible = "calxeda,hb-ahci" },
447*4882a593Smuzhiyun 	{},
448*4882a593Smuzhiyun };
449*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, ahci_of_match);
450*4882a593Smuzhiyun 
ahci_highbank_probe(struct platform_device * pdev)451*4882a593Smuzhiyun static int ahci_highbank_probe(struct platform_device *pdev)
452*4882a593Smuzhiyun {
453*4882a593Smuzhiyun 	struct device *dev = &pdev->dev;
454*4882a593Smuzhiyun 	struct ahci_host_priv *hpriv;
455*4882a593Smuzhiyun 	struct ecx_plat_data *pdata;
456*4882a593Smuzhiyun 	struct ata_host *host;
457*4882a593Smuzhiyun 	struct resource *mem;
458*4882a593Smuzhiyun 	int irq;
459*4882a593Smuzhiyun 	int i;
460*4882a593Smuzhiyun 	int rc;
461*4882a593Smuzhiyun 	u32 n_ports;
462*4882a593Smuzhiyun 	struct ata_port_info pi = ahci_highbank_port_info;
463*4882a593Smuzhiyun 	const struct ata_port_info *ppi[] = { &pi, NULL };
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
466*4882a593Smuzhiyun 	if (!mem) {
467*4882a593Smuzhiyun 		dev_err(dev, "no mmio space\n");
468*4882a593Smuzhiyun 		return -EINVAL;
469*4882a593Smuzhiyun 	}
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 	irq = platform_get_irq(pdev, 0);
472*4882a593Smuzhiyun 	if (irq < 0) {
473*4882a593Smuzhiyun 		dev_err(dev, "no irq\n");
474*4882a593Smuzhiyun 		return irq;
475*4882a593Smuzhiyun 	}
476*4882a593Smuzhiyun 	if (!irq)
477*4882a593Smuzhiyun 		return -EINVAL;
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 	hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
480*4882a593Smuzhiyun 	if (!hpriv) {
481*4882a593Smuzhiyun 		dev_err(dev, "can't alloc ahci_host_priv\n");
482*4882a593Smuzhiyun 		return -ENOMEM;
483*4882a593Smuzhiyun 	}
484*4882a593Smuzhiyun 	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
485*4882a593Smuzhiyun 	if (!pdata) {
486*4882a593Smuzhiyun 		dev_err(dev, "can't alloc ecx_plat_data\n");
487*4882a593Smuzhiyun 		return -ENOMEM;
488*4882a593Smuzhiyun 	}
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 	hpriv->irq = irq;
491*4882a593Smuzhiyun 	hpriv->flags |= (unsigned long)pi.private_data;
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun 	hpriv->mmio = devm_ioremap(dev, mem->start, resource_size(mem));
494*4882a593Smuzhiyun 	if (!hpriv->mmio) {
495*4882a593Smuzhiyun 		dev_err(dev, "can't map %pR\n", mem);
496*4882a593Smuzhiyun 		return -ENOMEM;
497*4882a593Smuzhiyun 	}
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun 	rc = highbank_initialize_phys(dev, hpriv->mmio);
500*4882a593Smuzhiyun 	if (rc)
501*4882a593Smuzhiyun 		return rc;
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun 	ahci_save_initial_config(dev, hpriv);
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun 	/* prepare host */
507*4882a593Smuzhiyun 	if (hpriv->cap & HOST_CAP_NCQ)
508*4882a593Smuzhiyun 		pi.flags |= ATA_FLAG_NCQ;
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 	if (hpriv->cap & HOST_CAP_PMP)
511*4882a593Smuzhiyun 		pi.flags |= ATA_FLAG_PMP;
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 	if (hpriv->cap & HOST_CAP_64)
514*4882a593Smuzhiyun 		dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun 	/* CAP.NP sometimes indicate the index of the last enabled
517*4882a593Smuzhiyun 	 * port, at other times, that of the last possible port, so
518*4882a593Smuzhiyun 	 * determining the maximum port number requires looking at
519*4882a593Smuzhiyun 	 * both CAP.NP and port_map.
520*4882a593Smuzhiyun 	 */
521*4882a593Smuzhiyun 	n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun 	pdata->n_ports = n_ports;
524*4882a593Smuzhiyun 	hpriv->plat_data = pdata;
525*4882a593Smuzhiyun 	highbank_set_em_messages(dev, hpriv, &pi);
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 	host = ata_host_alloc_pinfo(dev, ppi, n_ports);
528*4882a593Smuzhiyun 	if (!host) {
529*4882a593Smuzhiyun 		rc = -ENOMEM;
530*4882a593Smuzhiyun 		goto err0;
531*4882a593Smuzhiyun 	}
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 	host->private_data = hpriv;
534*4882a593Smuzhiyun 
535*4882a593Smuzhiyun 	if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
536*4882a593Smuzhiyun 		host->flags |= ATA_HOST_PARALLEL_SCAN;
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	for (i = 0; i < host->n_ports; i++) {
539*4882a593Smuzhiyun 		struct ata_port *ap = host->ports[i];
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 		ata_port_desc(ap, "mmio %pR", mem);
542*4882a593Smuzhiyun 		ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80);
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 		/* set enclosure management message type */
545*4882a593Smuzhiyun 		if (ap->flags & ATA_FLAG_EM)
546*4882a593Smuzhiyun 			ap->em_message_type = hpriv->em_msg_type;
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun 		/* disabled/not-implemented port */
549*4882a593Smuzhiyun 		if (!(hpriv->port_map & (1 << i)))
550*4882a593Smuzhiyun 			ap->ops = &ata_dummy_port_ops;
551*4882a593Smuzhiyun 	}
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	rc = ahci_reset_controller(host);
554*4882a593Smuzhiyun 	if (rc)
555*4882a593Smuzhiyun 		goto err0;
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 	ahci_init_controller(host);
558*4882a593Smuzhiyun 	ahci_print_info(host, "platform");
559*4882a593Smuzhiyun 
560*4882a593Smuzhiyun 	rc = ahci_host_activate(host, &ahci_highbank_platform_sht);
561*4882a593Smuzhiyun 	if (rc)
562*4882a593Smuzhiyun 		goto err0;
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 	return 0;
565*4882a593Smuzhiyun err0:
566*4882a593Smuzhiyun 	return rc;
567*4882a593Smuzhiyun }
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
ahci_highbank_suspend(struct device * dev)570*4882a593Smuzhiyun static int ahci_highbank_suspend(struct device *dev)
571*4882a593Smuzhiyun {
572*4882a593Smuzhiyun 	struct ata_host *host = dev_get_drvdata(dev);
573*4882a593Smuzhiyun 	struct ahci_host_priv *hpriv = host->private_data;
574*4882a593Smuzhiyun 	void __iomem *mmio = hpriv->mmio;
575*4882a593Smuzhiyun 	u32 ctl;
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 	if (hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
578*4882a593Smuzhiyun 		dev_err(dev, "firmware update required for suspend/resume\n");
579*4882a593Smuzhiyun 		return -EIO;
580*4882a593Smuzhiyun 	}
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun 	/*
583*4882a593Smuzhiyun 	 * AHCI spec rev1.1 section 8.3.3:
584*4882a593Smuzhiyun 	 * Software must disable interrupts prior to requesting a
585*4882a593Smuzhiyun 	 * transition of the HBA to D3 state.
586*4882a593Smuzhiyun 	 */
587*4882a593Smuzhiyun 	ctl = readl(mmio + HOST_CTL);
588*4882a593Smuzhiyun 	ctl &= ~HOST_IRQ_EN;
589*4882a593Smuzhiyun 	writel(ctl, mmio + HOST_CTL);
590*4882a593Smuzhiyun 	readl(mmio + HOST_CTL); /* flush */
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun 	return ata_host_suspend(host, PMSG_SUSPEND);
593*4882a593Smuzhiyun }
594*4882a593Smuzhiyun 
ahci_highbank_resume(struct device * dev)595*4882a593Smuzhiyun static int ahci_highbank_resume(struct device *dev)
596*4882a593Smuzhiyun {
597*4882a593Smuzhiyun 	struct ata_host *host = dev_get_drvdata(dev);
598*4882a593Smuzhiyun 	int rc;
599*4882a593Smuzhiyun 
600*4882a593Smuzhiyun 	if (dev->power.power_state.event == PM_EVENT_SUSPEND) {
601*4882a593Smuzhiyun 		rc = ahci_reset_controller(host);
602*4882a593Smuzhiyun 		if (rc)
603*4882a593Smuzhiyun 			return rc;
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun 		ahci_init_controller(host);
606*4882a593Smuzhiyun 	}
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun 	ata_host_resume(host);
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun 	return 0;
611*4882a593Smuzhiyun }
612*4882a593Smuzhiyun #endif
613*4882a593Smuzhiyun 
614*4882a593Smuzhiyun static SIMPLE_DEV_PM_OPS(ahci_highbank_pm_ops,
615*4882a593Smuzhiyun 		  ahci_highbank_suspend, ahci_highbank_resume);
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun static struct platform_driver ahci_highbank_driver = {
618*4882a593Smuzhiyun 	.remove = ata_platform_remove_one,
619*4882a593Smuzhiyun         .driver = {
620*4882a593Smuzhiyun                 .name = "highbank-ahci",
621*4882a593Smuzhiyun                 .of_match_table = ahci_of_match,
622*4882a593Smuzhiyun                 .pm = &ahci_highbank_pm_ops,
623*4882a593Smuzhiyun         },
624*4882a593Smuzhiyun 	.probe = ahci_highbank_probe,
625*4882a593Smuzhiyun };
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun module_platform_driver(ahci_highbank_driver);
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun MODULE_DESCRIPTION("Calxeda Highbank AHCI SATA platform driver");
630*4882a593Smuzhiyun MODULE_AUTHOR("Mark Langsdorf <mark.langsdorf@calxeda.com>");
631*4882a593Smuzhiyun MODULE_LICENSE("GPL");
632*4882a593Smuzhiyun MODULE_ALIAS("sata:highbank");
633