1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Broadcom specific AMBA
3*4882a593Smuzhiyun * PCI Host
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Licensed under the GNU/GPL. See COPYING for details.
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include "bcma_private.h"
9*4882a593Smuzhiyun #include <linux/slab.h>
10*4882a593Smuzhiyun #include <linux/bcma/bcma.h>
11*4882a593Smuzhiyun #include <linux/pci.h>
12*4882a593Smuzhiyun #include <linux/module.h>
13*4882a593Smuzhiyun
bcma_host_pci_switch_core(struct bcma_device * core)14*4882a593Smuzhiyun static void bcma_host_pci_switch_core(struct bcma_device *core)
15*4882a593Smuzhiyun {
16*4882a593Smuzhiyun int win2 = core->bus->host_is_pcie2 ?
17*4882a593Smuzhiyun BCMA_PCIE2_BAR0_WIN2 : BCMA_PCI_BAR0_WIN2;
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun pci_write_config_dword(core->bus->host_pci, BCMA_PCI_BAR0_WIN,
20*4882a593Smuzhiyun core->addr);
21*4882a593Smuzhiyun pci_write_config_dword(core->bus->host_pci, win2, core->wrap);
22*4882a593Smuzhiyun core->bus->mapped_core = core;
23*4882a593Smuzhiyun bcma_debug(core->bus, "Switched to core: 0x%X\n", core->id.id);
24*4882a593Smuzhiyun }
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun /* Provides access to the requested core. Returns base offset that has to be
27*4882a593Smuzhiyun * used. It makes use of fixed windows when possible. */
bcma_host_pci_provide_access_to_core(struct bcma_device * core)28*4882a593Smuzhiyun static u16 bcma_host_pci_provide_access_to_core(struct bcma_device *core)
29*4882a593Smuzhiyun {
30*4882a593Smuzhiyun switch (core->id.id) {
31*4882a593Smuzhiyun case BCMA_CORE_CHIPCOMMON:
32*4882a593Smuzhiyun return 3 * BCMA_CORE_SIZE;
33*4882a593Smuzhiyun case BCMA_CORE_PCIE:
34*4882a593Smuzhiyun return 2 * BCMA_CORE_SIZE;
35*4882a593Smuzhiyun }
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun if (core->bus->mapped_core != core)
38*4882a593Smuzhiyun bcma_host_pci_switch_core(core);
39*4882a593Smuzhiyun return 0;
40*4882a593Smuzhiyun }
41*4882a593Smuzhiyun
bcma_host_pci_read8(struct bcma_device * core,u16 offset)42*4882a593Smuzhiyun static u8 bcma_host_pci_read8(struct bcma_device *core, u16 offset)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun offset += bcma_host_pci_provide_access_to_core(core);
45*4882a593Smuzhiyun return ioread8(core->bus->mmio + offset);
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun
bcma_host_pci_read16(struct bcma_device * core,u16 offset)48*4882a593Smuzhiyun static u16 bcma_host_pci_read16(struct bcma_device *core, u16 offset)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun offset += bcma_host_pci_provide_access_to_core(core);
51*4882a593Smuzhiyun return ioread16(core->bus->mmio + offset);
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun
bcma_host_pci_read32(struct bcma_device * core,u16 offset)54*4882a593Smuzhiyun static u32 bcma_host_pci_read32(struct bcma_device *core, u16 offset)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun offset += bcma_host_pci_provide_access_to_core(core);
57*4882a593Smuzhiyun return ioread32(core->bus->mmio + offset);
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun
bcma_host_pci_write8(struct bcma_device * core,u16 offset,u8 value)60*4882a593Smuzhiyun static void bcma_host_pci_write8(struct bcma_device *core, u16 offset,
61*4882a593Smuzhiyun u8 value)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun offset += bcma_host_pci_provide_access_to_core(core);
64*4882a593Smuzhiyun iowrite8(value, core->bus->mmio + offset);
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun
bcma_host_pci_write16(struct bcma_device * core,u16 offset,u16 value)67*4882a593Smuzhiyun static void bcma_host_pci_write16(struct bcma_device *core, u16 offset,
68*4882a593Smuzhiyun u16 value)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun offset += bcma_host_pci_provide_access_to_core(core);
71*4882a593Smuzhiyun iowrite16(value, core->bus->mmio + offset);
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun
bcma_host_pci_write32(struct bcma_device * core,u16 offset,u32 value)74*4882a593Smuzhiyun static void bcma_host_pci_write32(struct bcma_device *core, u16 offset,
75*4882a593Smuzhiyun u32 value)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun offset += bcma_host_pci_provide_access_to_core(core);
78*4882a593Smuzhiyun iowrite32(value, core->bus->mmio + offset);
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun #ifdef CONFIG_BCMA_BLOCKIO
bcma_host_pci_block_read(struct bcma_device * core,void * buffer,size_t count,u16 offset,u8 reg_width)82*4882a593Smuzhiyun static void bcma_host_pci_block_read(struct bcma_device *core, void *buffer,
83*4882a593Smuzhiyun size_t count, u16 offset, u8 reg_width)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun void __iomem *addr = core->bus->mmio + offset;
86*4882a593Smuzhiyun if (core->bus->mapped_core != core)
87*4882a593Smuzhiyun bcma_host_pci_switch_core(core);
88*4882a593Smuzhiyun switch (reg_width) {
89*4882a593Smuzhiyun case sizeof(u8):
90*4882a593Smuzhiyun ioread8_rep(addr, buffer, count);
91*4882a593Smuzhiyun break;
92*4882a593Smuzhiyun case sizeof(u16):
93*4882a593Smuzhiyun WARN_ON(count & 1);
94*4882a593Smuzhiyun ioread16_rep(addr, buffer, count >> 1);
95*4882a593Smuzhiyun break;
96*4882a593Smuzhiyun case sizeof(u32):
97*4882a593Smuzhiyun WARN_ON(count & 3);
98*4882a593Smuzhiyun ioread32_rep(addr, buffer, count >> 2);
99*4882a593Smuzhiyun break;
100*4882a593Smuzhiyun default:
101*4882a593Smuzhiyun WARN_ON(1);
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
bcma_host_pci_block_write(struct bcma_device * core,const void * buffer,size_t count,u16 offset,u8 reg_width)105*4882a593Smuzhiyun static void bcma_host_pci_block_write(struct bcma_device *core,
106*4882a593Smuzhiyun const void *buffer, size_t count,
107*4882a593Smuzhiyun u16 offset, u8 reg_width)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun void __iomem *addr = core->bus->mmio + offset;
110*4882a593Smuzhiyun if (core->bus->mapped_core != core)
111*4882a593Smuzhiyun bcma_host_pci_switch_core(core);
112*4882a593Smuzhiyun switch (reg_width) {
113*4882a593Smuzhiyun case sizeof(u8):
114*4882a593Smuzhiyun iowrite8_rep(addr, buffer, count);
115*4882a593Smuzhiyun break;
116*4882a593Smuzhiyun case sizeof(u16):
117*4882a593Smuzhiyun WARN_ON(count & 1);
118*4882a593Smuzhiyun iowrite16_rep(addr, buffer, count >> 1);
119*4882a593Smuzhiyun break;
120*4882a593Smuzhiyun case sizeof(u32):
121*4882a593Smuzhiyun WARN_ON(count & 3);
122*4882a593Smuzhiyun iowrite32_rep(addr, buffer, count >> 2);
123*4882a593Smuzhiyun break;
124*4882a593Smuzhiyun default:
125*4882a593Smuzhiyun WARN_ON(1);
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun #endif
129*4882a593Smuzhiyun
bcma_host_pci_aread32(struct bcma_device * core,u16 offset)130*4882a593Smuzhiyun static u32 bcma_host_pci_aread32(struct bcma_device *core, u16 offset)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun if (core->bus->mapped_core != core)
133*4882a593Smuzhiyun bcma_host_pci_switch_core(core);
134*4882a593Smuzhiyun return ioread32(core->bus->mmio + (1 * BCMA_CORE_SIZE) + offset);
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun
bcma_host_pci_awrite32(struct bcma_device * core,u16 offset,u32 value)137*4882a593Smuzhiyun static void bcma_host_pci_awrite32(struct bcma_device *core, u16 offset,
138*4882a593Smuzhiyun u32 value)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun if (core->bus->mapped_core != core)
141*4882a593Smuzhiyun bcma_host_pci_switch_core(core);
142*4882a593Smuzhiyun iowrite32(value, core->bus->mmio + (1 * BCMA_CORE_SIZE) + offset);
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun static const struct bcma_host_ops bcma_host_pci_ops = {
146*4882a593Smuzhiyun .read8 = bcma_host_pci_read8,
147*4882a593Smuzhiyun .read16 = bcma_host_pci_read16,
148*4882a593Smuzhiyun .read32 = bcma_host_pci_read32,
149*4882a593Smuzhiyun .write8 = bcma_host_pci_write8,
150*4882a593Smuzhiyun .write16 = bcma_host_pci_write16,
151*4882a593Smuzhiyun .write32 = bcma_host_pci_write32,
152*4882a593Smuzhiyun #ifdef CONFIG_BCMA_BLOCKIO
153*4882a593Smuzhiyun .block_read = bcma_host_pci_block_read,
154*4882a593Smuzhiyun .block_write = bcma_host_pci_block_write,
155*4882a593Smuzhiyun #endif
156*4882a593Smuzhiyun .aread32 = bcma_host_pci_aread32,
157*4882a593Smuzhiyun .awrite32 = bcma_host_pci_awrite32,
158*4882a593Smuzhiyun };
159*4882a593Smuzhiyun
bcma_host_pci_probe(struct pci_dev * dev,const struct pci_device_id * id)160*4882a593Smuzhiyun static int bcma_host_pci_probe(struct pci_dev *dev,
161*4882a593Smuzhiyun const struct pci_device_id *id)
162*4882a593Smuzhiyun {
163*4882a593Smuzhiyun struct bcma_bus *bus;
164*4882a593Smuzhiyun int err = -ENOMEM;
165*4882a593Smuzhiyun const char *name;
166*4882a593Smuzhiyun u32 val;
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun /* Alloc */
169*4882a593Smuzhiyun bus = kzalloc(sizeof(*bus), GFP_KERNEL);
170*4882a593Smuzhiyun if (!bus)
171*4882a593Smuzhiyun goto out;
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun /* Basic PCI configuration */
174*4882a593Smuzhiyun err = pci_enable_device(dev);
175*4882a593Smuzhiyun if (err)
176*4882a593Smuzhiyun goto err_kfree_bus;
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun name = dev_name(&dev->dev);
179*4882a593Smuzhiyun if (dev->driver && dev->driver->name)
180*4882a593Smuzhiyun name = dev->driver->name;
181*4882a593Smuzhiyun err = pci_request_regions(dev, name);
182*4882a593Smuzhiyun if (err)
183*4882a593Smuzhiyun goto err_pci_disable;
184*4882a593Smuzhiyun pci_set_master(dev);
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun /* Disable the RETRY_TIMEOUT register (0x41) to keep
187*4882a593Smuzhiyun * PCI Tx retries from interfering with C3 CPU state */
188*4882a593Smuzhiyun pci_read_config_dword(dev, 0x40, &val);
189*4882a593Smuzhiyun if ((val & 0x0000ff00) != 0)
190*4882a593Smuzhiyun pci_write_config_dword(dev, 0x40, val & 0xffff00ff);
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun /* SSB needed additional powering up, do we have any AMBA PCI cards? */
193*4882a593Smuzhiyun if (!pci_is_pcie(dev)) {
194*4882a593Smuzhiyun bcma_err(bus, "PCI card detected, they are not supported.\n");
195*4882a593Smuzhiyun err = -ENXIO;
196*4882a593Smuzhiyun goto err_pci_release_regions;
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun bus->dev = &dev->dev;
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun /* Map MMIO */
202*4882a593Smuzhiyun err = -ENOMEM;
203*4882a593Smuzhiyun bus->mmio = pci_iomap(dev, 0, ~0UL);
204*4882a593Smuzhiyun if (!bus->mmio)
205*4882a593Smuzhiyun goto err_pci_release_regions;
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun /* Host specific */
208*4882a593Smuzhiyun bus->host_pci = dev;
209*4882a593Smuzhiyun bus->hosttype = BCMA_HOSTTYPE_PCI;
210*4882a593Smuzhiyun bus->ops = &bcma_host_pci_ops;
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun bus->boardinfo.vendor = bus->host_pci->subsystem_vendor;
213*4882a593Smuzhiyun bus->boardinfo.type = bus->host_pci->subsystem_device;
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun /* Initialize struct, detect chip */
216*4882a593Smuzhiyun bcma_init_bus(bus);
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun /* Scan bus to find out generation of PCIe core */
219*4882a593Smuzhiyun err = bcma_bus_scan(bus);
220*4882a593Smuzhiyun if (err)
221*4882a593Smuzhiyun goto err_pci_unmap_mmio;
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun if (bcma_find_core(bus, BCMA_CORE_PCIE2))
224*4882a593Smuzhiyun bus->host_is_pcie2 = true;
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun /* Register */
227*4882a593Smuzhiyun err = bcma_bus_register(bus);
228*4882a593Smuzhiyun if (err)
229*4882a593Smuzhiyun goto err_unregister_cores;
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun pci_set_drvdata(dev, bus);
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun out:
234*4882a593Smuzhiyun return err;
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun err_unregister_cores:
237*4882a593Smuzhiyun bcma_unregister_cores(bus);
238*4882a593Smuzhiyun err_pci_unmap_mmio:
239*4882a593Smuzhiyun pci_iounmap(dev, bus->mmio);
240*4882a593Smuzhiyun err_pci_release_regions:
241*4882a593Smuzhiyun pci_release_regions(dev);
242*4882a593Smuzhiyun err_pci_disable:
243*4882a593Smuzhiyun pci_disable_device(dev);
244*4882a593Smuzhiyun err_kfree_bus:
245*4882a593Smuzhiyun kfree(bus);
246*4882a593Smuzhiyun return err;
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun
bcma_host_pci_remove(struct pci_dev * dev)249*4882a593Smuzhiyun static void bcma_host_pci_remove(struct pci_dev *dev)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun struct bcma_bus *bus = pci_get_drvdata(dev);
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun bcma_bus_unregister(bus);
254*4882a593Smuzhiyun pci_iounmap(dev, bus->mmio);
255*4882a593Smuzhiyun pci_release_regions(dev);
256*4882a593Smuzhiyun pci_disable_device(dev);
257*4882a593Smuzhiyun kfree(bus);
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
bcma_host_pci_suspend(struct device * dev)261*4882a593Smuzhiyun static int bcma_host_pci_suspend(struct device *dev)
262*4882a593Smuzhiyun {
263*4882a593Smuzhiyun struct bcma_bus *bus = dev_get_drvdata(dev);
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun bus->mapped_core = NULL;
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun return bcma_bus_suspend(bus);
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun
bcma_host_pci_resume(struct device * dev)270*4882a593Smuzhiyun static int bcma_host_pci_resume(struct device *dev)
271*4882a593Smuzhiyun {
272*4882a593Smuzhiyun struct bcma_bus *bus = dev_get_drvdata(dev);
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun return bcma_bus_resume(bus);
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun static SIMPLE_DEV_PM_OPS(bcma_pm_ops, bcma_host_pci_suspend,
278*4882a593Smuzhiyun bcma_host_pci_resume);
279*4882a593Smuzhiyun #define BCMA_PM_OPS (&bcma_pm_ops)
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun #else /* CONFIG_PM_SLEEP */
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun #define BCMA_PM_OPS NULL
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun #endif /* CONFIG_PM_SLEEP */
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun static const struct pci_device_id bcma_pci_bridge_tbl[] = {
288*4882a593Smuzhiyun { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) },
289*4882a593Smuzhiyun { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4313) },
290*4882a593Smuzhiyun { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43224) }, /* 0xa8d8 */
291*4882a593Smuzhiyun { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) },
292*4882a593Smuzhiyun { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) },
293*4882a593Smuzhiyun { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) },
294*4882a593Smuzhiyun { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4358) },
295*4882a593Smuzhiyun { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4359) },
296*4882a593Smuzhiyun { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4360) },
297*4882a593Smuzhiyun { PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, 0x4365, PCI_VENDOR_ID_DELL, 0x0016) },
298*4882a593Smuzhiyun { PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, 0x4365, PCI_VENDOR_ID_DELL, 0x0018) },
299*4882a593Smuzhiyun { PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, 0x4365, PCI_VENDOR_ID_FOXCONN, 0xe092) },
300*4882a593Smuzhiyun { PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, 0x4365, PCI_VENDOR_ID_HP, 0x804a) },
301*4882a593Smuzhiyun { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a0) },
302*4882a593Smuzhiyun { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a9) },
303*4882a593Smuzhiyun { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43aa) },
304*4882a593Smuzhiyun { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43b1) },
305*4882a593Smuzhiyun { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) },
306*4882a593Smuzhiyun { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43227) }, /* 0xa8db, BCM43217 (sic!) */
307*4882a593Smuzhiyun { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43228) }, /* 0xa8dc */
308*4882a593Smuzhiyun { 0, },
309*4882a593Smuzhiyun };
310*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, bcma_pci_bridge_tbl);
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun static struct pci_driver bcma_pci_bridge_driver = {
313*4882a593Smuzhiyun .name = "bcma-pci-bridge",
314*4882a593Smuzhiyun .id_table = bcma_pci_bridge_tbl,
315*4882a593Smuzhiyun .probe = bcma_host_pci_probe,
316*4882a593Smuzhiyun .remove = bcma_host_pci_remove,
317*4882a593Smuzhiyun .driver.pm = BCMA_PM_OPS,
318*4882a593Smuzhiyun };
319*4882a593Smuzhiyun
bcma_host_pci_init(void)320*4882a593Smuzhiyun int __init bcma_host_pci_init(void)
321*4882a593Smuzhiyun {
322*4882a593Smuzhiyun return pci_register_driver(&bcma_pci_bridge_driver);
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun
bcma_host_pci_exit(void)325*4882a593Smuzhiyun void __exit bcma_host_pci_exit(void)
326*4882a593Smuzhiyun {
327*4882a593Smuzhiyun pci_unregister_driver(&bcma_pci_bridge_driver);
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun /**************************************************
331*4882a593Smuzhiyun * Runtime ops for drivers.
332*4882a593Smuzhiyun **************************************************/
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun /* See also pcicore_up */
bcma_host_pci_up(struct bcma_bus * bus)335*4882a593Smuzhiyun void bcma_host_pci_up(struct bcma_bus *bus)
336*4882a593Smuzhiyun {
337*4882a593Smuzhiyun if (bus->hosttype != BCMA_HOSTTYPE_PCI)
338*4882a593Smuzhiyun return;
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun if (bus->host_is_pcie2)
341*4882a593Smuzhiyun bcma_core_pcie2_up(&bus->drv_pcie2);
342*4882a593Smuzhiyun else
343*4882a593Smuzhiyun bcma_core_pci_up(&bus->drv_pci[0]);
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(bcma_host_pci_up);
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun /* See also pcicore_down */
bcma_host_pci_down(struct bcma_bus * bus)348*4882a593Smuzhiyun void bcma_host_pci_down(struct bcma_bus *bus)
349*4882a593Smuzhiyun {
350*4882a593Smuzhiyun if (bus->hosttype != BCMA_HOSTTYPE_PCI)
351*4882a593Smuzhiyun return;
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun if (!bus->host_is_pcie2)
354*4882a593Smuzhiyun bcma_core_pci_down(&bus->drv_pci[0]);
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(bcma_host_pci_down);
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun /* See also si_pci_setup */
bcma_host_pci_irq_ctl(struct bcma_bus * bus,struct bcma_device * core,bool enable)359*4882a593Smuzhiyun int bcma_host_pci_irq_ctl(struct bcma_bus *bus, struct bcma_device *core,
360*4882a593Smuzhiyun bool enable)
361*4882a593Smuzhiyun {
362*4882a593Smuzhiyun struct pci_dev *pdev;
363*4882a593Smuzhiyun u32 coremask, tmp;
364*4882a593Smuzhiyun int err = 0;
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun if (bus->hosttype != BCMA_HOSTTYPE_PCI) {
367*4882a593Smuzhiyun /* This bcma device is not on a PCI host-bus. So the IRQs are
368*4882a593Smuzhiyun * not routed through the PCI core.
369*4882a593Smuzhiyun * So we must not enable routing through the PCI core. */
370*4882a593Smuzhiyun goto out;
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun pdev = bus->host_pci;
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun err = pci_read_config_dword(pdev, BCMA_PCI_IRQMASK, &tmp);
376*4882a593Smuzhiyun if (err)
377*4882a593Smuzhiyun goto out;
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun coremask = BIT(core->core_index) << 8;
380*4882a593Smuzhiyun if (enable)
381*4882a593Smuzhiyun tmp |= coremask;
382*4882a593Smuzhiyun else
383*4882a593Smuzhiyun tmp &= ~coremask;
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun err = pci_write_config_dword(pdev, BCMA_PCI_IRQMASK, tmp);
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun out:
388*4882a593Smuzhiyun return err;
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(bcma_host_pci_irq_ctl);
391