1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Broadcom specific AMBA
3*4882a593Smuzhiyun * PCI Core
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright 2005, 2011, Broadcom Corporation
6*4882a593Smuzhiyun * Copyright 2006, 2007, Michael Buesch <m@bues.ch>
7*4882a593Smuzhiyun * Copyright 2011, 2012, Hauke Mehrtens <hauke@hauke-m.de>
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * Licensed under the GNU/GPL. See COPYING for details.
10*4882a593Smuzhiyun */
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include "bcma_private.h"
13*4882a593Smuzhiyun #include <linux/export.h>
14*4882a593Smuzhiyun #include <linux/bcma/bcma.h>
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun /**************************************************
17*4882a593Smuzhiyun * R/W ops.
18*4882a593Smuzhiyun **************************************************/
19*4882a593Smuzhiyun
bcma_pcie_read(struct bcma_drv_pci * pc,u32 address)20*4882a593Smuzhiyun u32 bcma_pcie_read(struct bcma_drv_pci *pc, u32 address)
21*4882a593Smuzhiyun {
22*4882a593Smuzhiyun pcicore_write32(pc, BCMA_CORE_PCI_PCIEIND_ADDR, address);
23*4882a593Smuzhiyun pcicore_read32(pc, BCMA_CORE_PCI_PCIEIND_ADDR);
24*4882a593Smuzhiyun return pcicore_read32(pc, BCMA_CORE_PCI_PCIEIND_DATA);
25*4882a593Smuzhiyun }
26*4882a593Smuzhiyun
bcma_pcie_write(struct bcma_drv_pci * pc,u32 address,u32 data)27*4882a593Smuzhiyun static void bcma_pcie_write(struct bcma_drv_pci *pc, u32 address, u32 data)
28*4882a593Smuzhiyun {
29*4882a593Smuzhiyun pcicore_write32(pc, BCMA_CORE_PCI_PCIEIND_ADDR, address);
30*4882a593Smuzhiyun pcicore_read32(pc, BCMA_CORE_PCI_PCIEIND_ADDR);
31*4882a593Smuzhiyun pcicore_write32(pc, BCMA_CORE_PCI_PCIEIND_DATA, data);
32*4882a593Smuzhiyun }
33*4882a593Smuzhiyun
bcma_pcie_mdio_set_phy(struct bcma_drv_pci * pc,u16 phy)34*4882a593Smuzhiyun static void bcma_pcie_mdio_set_phy(struct bcma_drv_pci *pc, u16 phy)
35*4882a593Smuzhiyun {
36*4882a593Smuzhiyun u32 v;
37*4882a593Smuzhiyun int i;
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun v = BCMA_CORE_PCI_MDIODATA_START;
40*4882a593Smuzhiyun v |= BCMA_CORE_PCI_MDIODATA_WRITE;
41*4882a593Smuzhiyun v |= (BCMA_CORE_PCI_MDIODATA_DEV_ADDR <<
42*4882a593Smuzhiyun BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF);
43*4882a593Smuzhiyun v |= (BCMA_CORE_PCI_MDIODATA_BLK_ADDR <<
44*4882a593Smuzhiyun BCMA_CORE_PCI_MDIODATA_REGADDR_SHF);
45*4882a593Smuzhiyun v |= BCMA_CORE_PCI_MDIODATA_TA;
46*4882a593Smuzhiyun v |= (phy << 4);
47*4882a593Smuzhiyun pcicore_write32(pc, BCMA_CORE_PCI_MDIO_DATA, v);
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun udelay(10);
50*4882a593Smuzhiyun for (i = 0; i < 200; i++) {
51*4882a593Smuzhiyun v = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_CONTROL);
52*4882a593Smuzhiyun if (v & BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE)
53*4882a593Smuzhiyun break;
54*4882a593Smuzhiyun usleep_range(1000, 2000);
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun
bcma_pcie_mdio_read(struct bcma_drv_pci * pc,u16 device,u8 address)58*4882a593Smuzhiyun static u16 bcma_pcie_mdio_read(struct bcma_drv_pci *pc, u16 device, u8 address)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun int max_retries = 10;
61*4882a593Smuzhiyun u16 ret = 0;
62*4882a593Smuzhiyun u32 v;
63*4882a593Smuzhiyun int i;
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun /* enable mdio access to SERDES */
66*4882a593Smuzhiyun v = BCMA_CORE_PCI_MDIOCTL_PREAM_EN;
67*4882a593Smuzhiyun v |= BCMA_CORE_PCI_MDIOCTL_DIVISOR_VAL;
68*4882a593Smuzhiyun pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, v);
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun if (pc->core->id.rev >= 10) {
71*4882a593Smuzhiyun max_retries = 200;
72*4882a593Smuzhiyun bcma_pcie_mdio_set_phy(pc, device);
73*4882a593Smuzhiyun v = (BCMA_CORE_PCI_MDIODATA_DEV_ADDR <<
74*4882a593Smuzhiyun BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF);
75*4882a593Smuzhiyun v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF);
76*4882a593Smuzhiyun } else {
77*4882a593Smuzhiyun v = (device << BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF_OLD);
78*4882a593Smuzhiyun v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF_OLD);
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun v |= BCMA_CORE_PCI_MDIODATA_START;
82*4882a593Smuzhiyun v |= BCMA_CORE_PCI_MDIODATA_READ;
83*4882a593Smuzhiyun v |= BCMA_CORE_PCI_MDIODATA_TA;
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun pcicore_write32(pc, BCMA_CORE_PCI_MDIO_DATA, v);
86*4882a593Smuzhiyun /* Wait for the device to complete the transaction */
87*4882a593Smuzhiyun udelay(10);
88*4882a593Smuzhiyun for (i = 0; i < max_retries; i++) {
89*4882a593Smuzhiyun v = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_CONTROL);
90*4882a593Smuzhiyun if (v & BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE) {
91*4882a593Smuzhiyun udelay(10);
92*4882a593Smuzhiyun ret = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_DATA);
93*4882a593Smuzhiyun break;
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun usleep_range(1000, 2000);
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, 0);
98*4882a593Smuzhiyun return ret;
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun
bcma_pcie_mdio_write(struct bcma_drv_pci * pc,u16 device,u8 address,u16 data)101*4882a593Smuzhiyun static void bcma_pcie_mdio_write(struct bcma_drv_pci *pc, u16 device,
102*4882a593Smuzhiyun u8 address, u16 data)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun int max_retries = 10;
105*4882a593Smuzhiyun u32 v;
106*4882a593Smuzhiyun int i;
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun /* enable mdio access to SERDES */
109*4882a593Smuzhiyun v = BCMA_CORE_PCI_MDIOCTL_PREAM_EN;
110*4882a593Smuzhiyun v |= BCMA_CORE_PCI_MDIOCTL_DIVISOR_VAL;
111*4882a593Smuzhiyun pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, v);
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun if (pc->core->id.rev >= 10) {
114*4882a593Smuzhiyun max_retries = 200;
115*4882a593Smuzhiyun bcma_pcie_mdio_set_phy(pc, device);
116*4882a593Smuzhiyun v = (BCMA_CORE_PCI_MDIODATA_DEV_ADDR <<
117*4882a593Smuzhiyun BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF);
118*4882a593Smuzhiyun v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF);
119*4882a593Smuzhiyun } else {
120*4882a593Smuzhiyun v = (device << BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF_OLD);
121*4882a593Smuzhiyun v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF_OLD);
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun v |= BCMA_CORE_PCI_MDIODATA_START;
125*4882a593Smuzhiyun v |= BCMA_CORE_PCI_MDIODATA_WRITE;
126*4882a593Smuzhiyun v |= BCMA_CORE_PCI_MDIODATA_TA;
127*4882a593Smuzhiyun v |= data;
128*4882a593Smuzhiyun pcicore_write32(pc, BCMA_CORE_PCI_MDIO_DATA, v);
129*4882a593Smuzhiyun /* Wait for the device to complete the transaction */
130*4882a593Smuzhiyun udelay(10);
131*4882a593Smuzhiyun for (i = 0; i < max_retries; i++) {
132*4882a593Smuzhiyun v = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_CONTROL);
133*4882a593Smuzhiyun if (v & BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE)
134*4882a593Smuzhiyun break;
135*4882a593Smuzhiyun usleep_range(1000, 2000);
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, 0);
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun
bcma_pcie_mdio_writeread(struct bcma_drv_pci * pc,u16 device,u8 address,u16 data)140*4882a593Smuzhiyun static u16 bcma_pcie_mdio_writeread(struct bcma_drv_pci *pc, u16 device,
141*4882a593Smuzhiyun u8 address, u16 data)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun bcma_pcie_mdio_write(pc, device, address, data);
144*4882a593Smuzhiyun return bcma_pcie_mdio_read(pc, device, address);
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun /**************************************************
148*4882a593Smuzhiyun * Early init.
149*4882a593Smuzhiyun **************************************************/
150*4882a593Smuzhiyun
bcma_core_pci_fixcfg(struct bcma_drv_pci * pc)151*4882a593Smuzhiyun static void bcma_core_pci_fixcfg(struct bcma_drv_pci *pc)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun struct bcma_device *core = pc->core;
154*4882a593Smuzhiyun u16 val16, core_index;
155*4882a593Smuzhiyun uint regoff;
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun regoff = BCMA_CORE_PCI_SPROM(BCMA_CORE_PCI_SPROM_PI_OFFSET);
158*4882a593Smuzhiyun core_index = (u16)core->core_index;
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun val16 = pcicore_read16(pc, regoff);
161*4882a593Smuzhiyun if (((val16 & BCMA_CORE_PCI_SPROM_PI_MASK) >> BCMA_CORE_PCI_SPROM_PI_SHIFT)
162*4882a593Smuzhiyun != core_index) {
163*4882a593Smuzhiyun val16 = (core_index << BCMA_CORE_PCI_SPROM_PI_SHIFT) |
164*4882a593Smuzhiyun (val16 & ~BCMA_CORE_PCI_SPROM_PI_MASK);
165*4882a593Smuzhiyun pcicore_write16(pc, regoff, val16);
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun /*
170*4882a593Smuzhiyun * Apply some early fixes required before accessing SPROM.
171*4882a593Smuzhiyun * See also si_pci_fixcfg.
172*4882a593Smuzhiyun */
bcma_core_pci_early_init(struct bcma_drv_pci * pc)173*4882a593Smuzhiyun void bcma_core_pci_early_init(struct bcma_drv_pci *pc)
174*4882a593Smuzhiyun {
175*4882a593Smuzhiyun if (pc->early_setup_done)
176*4882a593Smuzhiyun return;
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun pc->hostmode = bcma_core_pci_is_in_hostmode(pc);
179*4882a593Smuzhiyun if (pc->hostmode)
180*4882a593Smuzhiyun goto out;
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun bcma_core_pci_fixcfg(pc);
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun out:
185*4882a593Smuzhiyun pc->early_setup_done = true;
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun /**************************************************
189*4882a593Smuzhiyun * Workarounds.
190*4882a593Smuzhiyun **************************************************/
191*4882a593Smuzhiyun
bcma_pcicore_polarity_workaround(struct bcma_drv_pci * pc)192*4882a593Smuzhiyun static u8 bcma_pcicore_polarity_workaround(struct bcma_drv_pci *pc)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun u32 tmp;
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun tmp = bcma_pcie_read(pc, BCMA_CORE_PCI_PLP_STATUSREG);
197*4882a593Smuzhiyun if (tmp & BCMA_CORE_PCI_PLP_POLARITYINV_STAT)
198*4882a593Smuzhiyun return BCMA_CORE_PCI_SERDES_RX_CTRL_FORCE |
199*4882a593Smuzhiyun BCMA_CORE_PCI_SERDES_RX_CTRL_POLARITY;
200*4882a593Smuzhiyun else
201*4882a593Smuzhiyun return BCMA_CORE_PCI_SERDES_RX_CTRL_FORCE;
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun
bcma_pcicore_serdes_workaround(struct bcma_drv_pci * pc)204*4882a593Smuzhiyun static void bcma_pcicore_serdes_workaround(struct bcma_drv_pci *pc)
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun u16 tmp;
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun bcma_pcie_mdio_write(pc, BCMA_CORE_PCI_MDIODATA_DEV_RX,
209*4882a593Smuzhiyun BCMA_CORE_PCI_SERDES_RX_CTRL,
210*4882a593Smuzhiyun bcma_pcicore_polarity_workaround(pc));
211*4882a593Smuzhiyun tmp = bcma_pcie_mdio_read(pc, BCMA_CORE_PCI_MDIODATA_DEV_PLL,
212*4882a593Smuzhiyun BCMA_CORE_PCI_SERDES_PLL_CTRL);
213*4882a593Smuzhiyun if (tmp & BCMA_CORE_PCI_PLL_CTRL_FREQDET_EN)
214*4882a593Smuzhiyun bcma_pcie_mdio_write(pc, BCMA_CORE_PCI_MDIODATA_DEV_PLL,
215*4882a593Smuzhiyun BCMA_CORE_PCI_SERDES_PLL_CTRL,
216*4882a593Smuzhiyun tmp & ~BCMA_CORE_PCI_PLL_CTRL_FREQDET_EN);
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun /* Fix MISC config to allow coming out of L2/L3-Ready state w/o PRST */
220*4882a593Smuzhiyun /* Needs to happen when coming out of 'standby'/'hibernate' */
bcma_core_pci_config_fixup(struct bcma_drv_pci * pc)221*4882a593Smuzhiyun static void bcma_core_pci_config_fixup(struct bcma_drv_pci *pc)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun u16 val16;
224*4882a593Smuzhiyun uint regoff;
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun regoff = BCMA_CORE_PCI_SPROM(BCMA_CORE_PCI_SPROM_MISC_CONFIG);
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun val16 = pcicore_read16(pc, regoff);
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun if (!(val16 & BCMA_CORE_PCI_SPROM_L23READY_EXIT_NOPERST)) {
231*4882a593Smuzhiyun val16 |= BCMA_CORE_PCI_SPROM_L23READY_EXIT_NOPERST;
232*4882a593Smuzhiyun pcicore_write16(pc, regoff, val16);
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun /**************************************************
237*4882a593Smuzhiyun * Init.
238*4882a593Smuzhiyun **************************************************/
239*4882a593Smuzhiyun
bcma_core_pci_clientmode_init(struct bcma_drv_pci * pc)240*4882a593Smuzhiyun static void bcma_core_pci_clientmode_init(struct bcma_drv_pci *pc)
241*4882a593Smuzhiyun {
242*4882a593Smuzhiyun bcma_pcicore_serdes_workaround(pc);
243*4882a593Smuzhiyun bcma_core_pci_config_fixup(pc);
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun
bcma_core_pci_init(struct bcma_drv_pci * pc)246*4882a593Smuzhiyun void bcma_core_pci_init(struct bcma_drv_pci *pc)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun if (pc->setup_done)
249*4882a593Smuzhiyun return;
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun bcma_core_pci_early_init(pc);
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun if (pc->hostmode)
254*4882a593Smuzhiyun bcma_core_pci_hostmode_init(pc);
255*4882a593Smuzhiyun else
256*4882a593Smuzhiyun bcma_core_pci_clientmode_init(pc);
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun
bcma_core_pci_power_save(struct bcma_bus * bus,bool up)259*4882a593Smuzhiyun void bcma_core_pci_power_save(struct bcma_bus *bus, bool up)
260*4882a593Smuzhiyun {
261*4882a593Smuzhiyun struct bcma_drv_pci *pc;
262*4882a593Smuzhiyun u16 data;
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun if (bus->hosttype != BCMA_HOSTTYPE_PCI)
265*4882a593Smuzhiyun return;
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun pc = &bus->drv_pci[0];
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun if (pc->core->id.rev >= 15 && pc->core->id.rev <= 20) {
270*4882a593Smuzhiyun data = up ? 0x74 : 0x7C;
271*4882a593Smuzhiyun bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
272*4882a593Smuzhiyun BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7F64);
273*4882a593Smuzhiyun bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
274*4882a593Smuzhiyun BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data);
275*4882a593Smuzhiyun } else if (pc->core->id.rev >= 21 && pc->core->id.rev <= 22) {
276*4882a593Smuzhiyun data = up ? 0x75 : 0x7D;
277*4882a593Smuzhiyun bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
278*4882a593Smuzhiyun BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7E65);
279*4882a593Smuzhiyun bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
280*4882a593Smuzhiyun BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data);
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(bcma_core_pci_power_save);
284*4882a593Smuzhiyun
bcma_core_pci_extend_L1timer(struct bcma_drv_pci * pc,bool extend)285*4882a593Smuzhiyun static void bcma_core_pci_extend_L1timer(struct bcma_drv_pci *pc, bool extend)
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun u32 w;
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun w = bcma_pcie_read(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG);
290*4882a593Smuzhiyun if (extend)
291*4882a593Smuzhiyun w |= BCMA_CORE_PCI_ASPMTIMER_EXTEND;
292*4882a593Smuzhiyun else
293*4882a593Smuzhiyun w &= ~BCMA_CORE_PCI_ASPMTIMER_EXTEND;
294*4882a593Smuzhiyun bcma_pcie_write(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG, w);
295*4882a593Smuzhiyun bcma_pcie_read(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG);
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun
bcma_core_pci_up(struct bcma_drv_pci * pc)298*4882a593Smuzhiyun void bcma_core_pci_up(struct bcma_drv_pci *pc)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun bcma_core_pci_extend_L1timer(pc, true);
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun
bcma_core_pci_down(struct bcma_drv_pci * pc)303*4882a593Smuzhiyun void bcma_core_pci_down(struct bcma_drv_pci *pc)
304*4882a593Smuzhiyun {
305*4882a593Smuzhiyun bcma_core_pci_extend_L1timer(pc, false);
306*4882a593Smuzhiyun }
307