1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Alchemy PCI host mode support.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright 2001-2003, 2007-2008 MontaVista Software Inc.
6*4882a593Smuzhiyun * Author: MontaVista Software, Inc. <source@mvista.com>
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * Support for all devices (greater than 16) added by David Gathright.
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <linux/clk.h>
12*4882a593Smuzhiyun #include <linux/export.h>
13*4882a593Smuzhiyun #include <linux/types.h>
14*4882a593Smuzhiyun #include <linux/pci.h>
15*4882a593Smuzhiyun #include <linux/platform_device.h>
16*4882a593Smuzhiyun #include <linux/kernel.h>
17*4882a593Smuzhiyun #include <linux/init.h>
18*4882a593Smuzhiyun #include <linux/syscore_ops.h>
19*4882a593Smuzhiyun #include <linux/vmalloc.h>
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #include <asm/dma-coherence.h>
22*4882a593Smuzhiyun #include <asm/mach-au1x00/au1000.h>
23*4882a593Smuzhiyun #include <asm/tlbmisc.h>
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun #ifdef CONFIG_PCI_DEBUG
26*4882a593Smuzhiyun #define DBG(x...) printk(KERN_DEBUG x)
27*4882a593Smuzhiyun #else
28*4882a593Smuzhiyun #define DBG(x...) do {} while (0)
29*4882a593Smuzhiyun #endif
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun #define PCI_ACCESS_READ 0
32*4882a593Smuzhiyun #define PCI_ACCESS_WRITE 1
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun struct alchemy_pci_context {
35*4882a593Smuzhiyun struct pci_controller alchemy_pci_ctrl; /* leave as first member! */
36*4882a593Smuzhiyun void __iomem *regs; /* ctrl base */
37*4882a593Smuzhiyun /* tools for wired entry for config space access */
38*4882a593Smuzhiyun unsigned long last_elo0;
39*4882a593Smuzhiyun unsigned long last_elo1;
40*4882a593Smuzhiyun int wired_entry;
41*4882a593Smuzhiyun struct vm_struct *pci_cfg_vm;
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun unsigned long pm[12];
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun int (*board_map_irq)(const struct pci_dev *d, u8 slot, u8 pin);
46*4882a593Smuzhiyun int (*board_pci_idsel)(unsigned int devsel, int assert);
47*4882a593Smuzhiyun };
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun /* for syscore_ops. There's only one PCI controller on Alchemy chips, so this
50*4882a593Smuzhiyun * should suffice for now.
51*4882a593Smuzhiyun */
52*4882a593Smuzhiyun static struct alchemy_pci_context *__alchemy_pci_ctx;
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun /* IO/MEM resources for PCI. Keep the memres in sync with fixup_bigphys_addr
56*4882a593Smuzhiyun * in arch/mips/alchemy/common/setup.c
57*4882a593Smuzhiyun */
58*4882a593Smuzhiyun static struct resource alchemy_pci_def_memres = {
59*4882a593Smuzhiyun .start = ALCHEMY_PCI_MEMWIN_START,
60*4882a593Smuzhiyun .end = ALCHEMY_PCI_MEMWIN_END,
61*4882a593Smuzhiyun .name = "PCI memory space",
62*4882a593Smuzhiyun .flags = IORESOURCE_MEM
63*4882a593Smuzhiyun };
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun static struct resource alchemy_pci_def_iores = {
66*4882a593Smuzhiyun .start = ALCHEMY_PCI_IOWIN_START,
67*4882a593Smuzhiyun .end = ALCHEMY_PCI_IOWIN_END,
68*4882a593Smuzhiyun .name = "PCI IO space",
69*4882a593Smuzhiyun .flags = IORESOURCE_IO
70*4882a593Smuzhiyun };
71*4882a593Smuzhiyun
mod_wired_entry(int entry,unsigned long entrylo0,unsigned long entrylo1,unsigned long entryhi,unsigned long pagemask)72*4882a593Smuzhiyun static void mod_wired_entry(int entry, unsigned long entrylo0,
73*4882a593Smuzhiyun unsigned long entrylo1, unsigned long entryhi,
74*4882a593Smuzhiyun unsigned long pagemask)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun unsigned long old_pagemask;
77*4882a593Smuzhiyun unsigned long old_ctx;
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun /* Save old context and create impossible VPN2 value */
80*4882a593Smuzhiyun old_ctx = read_c0_entryhi() & MIPS_ENTRYHI_ASID;
81*4882a593Smuzhiyun old_pagemask = read_c0_pagemask();
82*4882a593Smuzhiyun write_c0_index(entry);
83*4882a593Smuzhiyun write_c0_pagemask(pagemask);
84*4882a593Smuzhiyun write_c0_entryhi(entryhi);
85*4882a593Smuzhiyun write_c0_entrylo0(entrylo0);
86*4882a593Smuzhiyun write_c0_entrylo1(entrylo1);
87*4882a593Smuzhiyun tlb_write_indexed();
88*4882a593Smuzhiyun write_c0_entryhi(old_ctx);
89*4882a593Smuzhiyun write_c0_pagemask(old_pagemask);
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun
alchemy_pci_wired_entry(struct alchemy_pci_context * ctx)92*4882a593Smuzhiyun static void alchemy_pci_wired_entry(struct alchemy_pci_context *ctx)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun ctx->wired_entry = read_c0_wired();
95*4882a593Smuzhiyun add_wired_entry(0, 0, (unsigned long)ctx->pci_cfg_vm->addr, PM_4K);
96*4882a593Smuzhiyun ctx->last_elo0 = ctx->last_elo1 = ~0;
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun
config_access(unsigned char access_type,struct pci_bus * bus,unsigned int dev_fn,unsigned char where,u32 * data)99*4882a593Smuzhiyun static int config_access(unsigned char access_type, struct pci_bus *bus,
100*4882a593Smuzhiyun unsigned int dev_fn, unsigned char where, u32 *data)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun struct alchemy_pci_context *ctx = bus->sysdata;
103*4882a593Smuzhiyun unsigned int device = PCI_SLOT(dev_fn);
104*4882a593Smuzhiyun unsigned int function = PCI_FUNC(dev_fn);
105*4882a593Smuzhiyun unsigned long offset, status, cfg_base, flags, entryLo0, entryLo1, r;
106*4882a593Smuzhiyun int error = PCIBIOS_SUCCESSFUL;
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun if (device > 19) {
109*4882a593Smuzhiyun *data = 0xffffffff;
110*4882a593Smuzhiyun return -1;
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun local_irq_save(flags);
114*4882a593Smuzhiyun r = __raw_readl(ctx->regs + PCI_REG_STATCMD) & 0x0000ffff;
115*4882a593Smuzhiyun r |= PCI_STATCMD_STATUS(0x2000);
116*4882a593Smuzhiyun __raw_writel(r, ctx->regs + PCI_REG_STATCMD);
117*4882a593Smuzhiyun wmb();
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun /* Allow board vendors to implement their own off-chip IDSEL.
120*4882a593Smuzhiyun * If it doesn't succeed, may as well bail out at this point.
121*4882a593Smuzhiyun */
122*4882a593Smuzhiyun if (ctx->board_pci_idsel(device, 1) == 0) {
123*4882a593Smuzhiyun *data = 0xffffffff;
124*4882a593Smuzhiyun local_irq_restore(flags);
125*4882a593Smuzhiyun return -1;
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun /* Setup the config window */
129*4882a593Smuzhiyun if (bus->number == 0)
130*4882a593Smuzhiyun cfg_base = (1 << device) << 11;
131*4882a593Smuzhiyun else
132*4882a593Smuzhiyun cfg_base = 0x80000000 | (bus->number << 16) | (device << 11);
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun /* Setup the lower bits of the 36-bit address */
135*4882a593Smuzhiyun offset = (function << 8) | (where & ~0x3);
136*4882a593Smuzhiyun /* Pick up any address that falls below the page mask */
137*4882a593Smuzhiyun offset |= cfg_base & ~PAGE_MASK;
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun /* Page boundary */
140*4882a593Smuzhiyun cfg_base = cfg_base & PAGE_MASK;
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun /* To improve performance, if the current device is the same as
143*4882a593Smuzhiyun * the last device accessed, we don't touch the TLB.
144*4882a593Smuzhiyun */
145*4882a593Smuzhiyun entryLo0 = (6 << 26) | (cfg_base >> 6) | (2 << 3) | 7;
146*4882a593Smuzhiyun entryLo1 = (6 << 26) | (cfg_base >> 6) | (0x1000 >> 6) | (2 << 3) | 7;
147*4882a593Smuzhiyun if ((entryLo0 != ctx->last_elo0) || (entryLo1 != ctx->last_elo1)) {
148*4882a593Smuzhiyun mod_wired_entry(ctx->wired_entry, entryLo0, entryLo1,
149*4882a593Smuzhiyun (unsigned long)ctx->pci_cfg_vm->addr, PM_4K);
150*4882a593Smuzhiyun ctx->last_elo0 = entryLo0;
151*4882a593Smuzhiyun ctx->last_elo1 = entryLo1;
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun if (access_type == PCI_ACCESS_WRITE)
155*4882a593Smuzhiyun __raw_writel(*data, ctx->pci_cfg_vm->addr + offset);
156*4882a593Smuzhiyun else
157*4882a593Smuzhiyun *data = __raw_readl(ctx->pci_cfg_vm->addr + offset);
158*4882a593Smuzhiyun wmb();
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun DBG("alchemy-pci: cfg access %d bus %u dev %u at %x dat %x conf %lx\n",
161*4882a593Smuzhiyun access_type, bus->number, device, where, *data, offset);
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun /* check for errors, master abort */
164*4882a593Smuzhiyun status = __raw_readl(ctx->regs + PCI_REG_STATCMD);
165*4882a593Smuzhiyun if (status & (1 << 29)) {
166*4882a593Smuzhiyun *data = 0xffffffff;
167*4882a593Smuzhiyun error = -1;
168*4882a593Smuzhiyun DBG("alchemy-pci: master abort on cfg access %d bus %d dev %d\n",
169*4882a593Smuzhiyun access_type, bus->number, device);
170*4882a593Smuzhiyun } else if ((status >> 28) & 0xf) {
171*4882a593Smuzhiyun DBG("alchemy-pci: PCI ERR detected: dev %d, status %lx\n",
172*4882a593Smuzhiyun device, (status >> 28) & 0xf);
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun /* clear errors */
175*4882a593Smuzhiyun __raw_writel(status & 0xf000ffff, ctx->regs + PCI_REG_STATCMD);
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun *data = 0xffffffff;
178*4882a593Smuzhiyun error = -1;
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun /* Take away the IDSEL. */
182*4882a593Smuzhiyun (void)ctx->board_pci_idsel(device, 0);
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun local_irq_restore(flags);
185*4882a593Smuzhiyun return error;
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun
read_config_byte(struct pci_bus * bus,unsigned int devfn,int where,u8 * val)188*4882a593Smuzhiyun static int read_config_byte(struct pci_bus *bus, unsigned int devfn,
189*4882a593Smuzhiyun int where, u8 *val)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun u32 data;
192*4882a593Smuzhiyun int ret = config_access(PCI_ACCESS_READ, bus, devfn, where, &data);
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun if (where & 1)
195*4882a593Smuzhiyun data >>= 8;
196*4882a593Smuzhiyun if (where & 2)
197*4882a593Smuzhiyun data >>= 16;
198*4882a593Smuzhiyun *val = data & 0xff;
199*4882a593Smuzhiyun return ret;
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun
read_config_word(struct pci_bus * bus,unsigned int devfn,int where,u16 * val)202*4882a593Smuzhiyun static int read_config_word(struct pci_bus *bus, unsigned int devfn,
203*4882a593Smuzhiyun int where, u16 *val)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun u32 data;
206*4882a593Smuzhiyun int ret = config_access(PCI_ACCESS_READ, bus, devfn, where, &data);
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun if (where & 2)
209*4882a593Smuzhiyun data >>= 16;
210*4882a593Smuzhiyun *val = data & 0xffff;
211*4882a593Smuzhiyun return ret;
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun
read_config_dword(struct pci_bus * bus,unsigned int devfn,int where,u32 * val)214*4882a593Smuzhiyun static int read_config_dword(struct pci_bus *bus, unsigned int devfn,
215*4882a593Smuzhiyun int where, u32 *val)
216*4882a593Smuzhiyun {
217*4882a593Smuzhiyun return config_access(PCI_ACCESS_READ, bus, devfn, where, val);
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun
write_config_byte(struct pci_bus * bus,unsigned int devfn,int where,u8 val)220*4882a593Smuzhiyun static int write_config_byte(struct pci_bus *bus, unsigned int devfn,
221*4882a593Smuzhiyun int where, u8 val)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun u32 data = 0;
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun if (config_access(PCI_ACCESS_READ, bus, devfn, where, &data))
226*4882a593Smuzhiyun return -1;
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun data = (data & ~(0xff << ((where & 3) << 3))) |
229*4882a593Smuzhiyun (val << ((where & 3) << 3));
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun if (config_access(PCI_ACCESS_WRITE, bus, devfn, where, &data))
232*4882a593Smuzhiyun return -1;
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun return PCIBIOS_SUCCESSFUL;
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun
write_config_word(struct pci_bus * bus,unsigned int devfn,int where,u16 val)237*4882a593Smuzhiyun static int write_config_word(struct pci_bus *bus, unsigned int devfn,
238*4882a593Smuzhiyun int where, u16 val)
239*4882a593Smuzhiyun {
240*4882a593Smuzhiyun u32 data = 0;
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun if (config_access(PCI_ACCESS_READ, bus, devfn, where, &data))
243*4882a593Smuzhiyun return -1;
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun data = (data & ~(0xffff << ((where & 3) << 3))) |
246*4882a593Smuzhiyun (val << ((where & 3) << 3));
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun if (config_access(PCI_ACCESS_WRITE, bus, devfn, where, &data))
249*4882a593Smuzhiyun return -1;
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun return PCIBIOS_SUCCESSFUL;
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun
write_config_dword(struct pci_bus * bus,unsigned int devfn,int where,u32 val)254*4882a593Smuzhiyun static int write_config_dword(struct pci_bus *bus, unsigned int devfn,
255*4882a593Smuzhiyun int where, u32 val)
256*4882a593Smuzhiyun {
257*4882a593Smuzhiyun return config_access(PCI_ACCESS_WRITE, bus, devfn, where, &val);
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun
alchemy_pci_read(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * val)260*4882a593Smuzhiyun static int alchemy_pci_read(struct pci_bus *bus, unsigned int devfn,
261*4882a593Smuzhiyun int where, int size, u32 *val)
262*4882a593Smuzhiyun {
263*4882a593Smuzhiyun switch (size) {
264*4882a593Smuzhiyun case 1: {
265*4882a593Smuzhiyun u8 _val;
266*4882a593Smuzhiyun int rc = read_config_byte(bus, devfn, where, &_val);
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun *val = _val;
269*4882a593Smuzhiyun return rc;
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun case 2: {
272*4882a593Smuzhiyun u16 _val;
273*4882a593Smuzhiyun int rc = read_config_word(bus, devfn, where, &_val);
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun *val = _val;
276*4882a593Smuzhiyun return rc;
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun default:
279*4882a593Smuzhiyun return read_config_dword(bus, devfn, where, val);
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun
alchemy_pci_write(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 val)283*4882a593Smuzhiyun static int alchemy_pci_write(struct pci_bus *bus, unsigned int devfn,
284*4882a593Smuzhiyun int where, int size, u32 val)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun switch (size) {
287*4882a593Smuzhiyun case 1:
288*4882a593Smuzhiyun return write_config_byte(bus, devfn, where, (u8) val);
289*4882a593Smuzhiyun case 2:
290*4882a593Smuzhiyun return write_config_word(bus, devfn, where, (u16) val);
291*4882a593Smuzhiyun default:
292*4882a593Smuzhiyun return write_config_dword(bus, devfn, where, val);
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun static struct pci_ops alchemy_pci_ops = {
297*4882a593Smuzhiyun .read = alchemy_pci_read,
298*4882a593Smuzhiyun .write = alchemy_pci_write,
299*4882a593Smuzhiyun };
300*4882a593Smuzhiyun
alchemy_pci_def_idsel(unsigned int devsel,int assert)301*4882a593Smuzhiyun static int alchemy_pci_def_idsel(unsigned int devsel, int assert)
302*4882a593Smuzhiyun {
303*4882a593Smuzhiyun return 1; /* success */
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun /* save PCI controller register contents. */
alchemy_pci_suspend(void)307*4882a593Smuzhiyun static int alchemy_pci_suspend(void)
308*4882a593Smuzhiyun {
309*4882a593Smuzhiyun struct alchemy_pci_context *ctx = __alchemy_pci_ctx;
310*4882a593Smuzhiyun if (!ctx)
311*4882a593Smuzhiyun return 0;
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun ctx->pm[0] = __raw_readl(ctx->regs + PCI_REG_CMEM);
314*4882a593Smuzhiyun ctx->pm[1] = __raw_readl(ctx->regs + PCI_REG_CONFIG) & 0x0009ffff;
315*4882a593Smuzhiyun ctx->pm[2] = __raw_readl(ctx->regs + PCI_REG_B2BMASK_CCH);
316*4882a593Smuzhiyun ctx->pm[3] = __raw_readl(ctx->regs + PCI_REG_B2BBASE0_VID);
317*4882a593Smuzhiyun ctx->pm[4] = __raw_readl(ctx->regs + PCI_REG_B2BBASE1_SID);
318*4882a593Smuzhiyun ctx->pm[5] = __raw_readl(ctx->regs + PCI_REG_MWMASK_DEV);
319*4882a593Smuzhiyun ctx->pm[6] = __raw_readl(ctx->regs + PCI_REG_MWBASE_REV_CCL);
320*4882a593Smuzhiyun ctx->pm[7] = __raw_readl(ctx->regs + PCI_REG_ID);
321*4882a593Smuzhiyun ctx->pm[8] = __raw_readl(ctx->regs + PCI_REG_CLASSREV);
322*4882a593Smuzhiyun ctx->pm[9] = __raw_readl(ctx->regs + PCI_REG_PARAM);
323*4882a593Smuzhiyun ctx->pm[10] = __raw_readl(ctx->regs + PCI_REG_MBAR);
324*4882a593Smuzhiyun ctx->pm[11] = __raw_readl(ctx->regs + PCI_REG_TIMEOUT);
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun return 0;
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun
alchemy_pci_resume(void)329*4882a593Smuzhiyun static void alchemy_pci_resume(void)
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun struct alchemy_pci_context *ctx = __alchemy_pci_ctx;
332*4882a593Smuzhiyun if (!ctx)
333*4882a593Smuzhiyun return;
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun __raw_writel(ctx->pm[0], ctx->regs + PCI_REG_CMEM);
336*4882a593Smuzhiyun __raw_writel(ctx->pm[2], ctx->regs + PCI_REG_B2BMASK_CCH);
337*4882a593Smuzhiyun __raw_writel(ctx->pm[3], ctx->regs + PCI_REG_B2BBASE0_VID);
338*4882a593Smuzhiyun __raw_writel(ctx->pm[4], ctx->regs + PCI_REG_B2BBASE1_SID);
339*4882a593Smuzhiyun __raw_writel(ctx->pm[5], ctx->regs + PCI_REG_MWMASK_DEV);
340*4882a593Smuzhiyun __raw_writel(ctx->pm[6], ctx->regs + PCI_REG_MWBASE_REV_CCL);
341*4882a593Smuzhiyun __raw_writel(ctx->pm[7], ctx->regs + PCI_REG_ID);
342*4882a593Smuzhiyun __raw_writel(ctx->pm[8], ctx->regs + PCI_REG_CLASSREV);
343*4882a593Smuzhiyun __raw_writel(ctx->pm[9], ctx->regs + PCI_REG_PARAM);
344*4882a593Smuzhiyun __raw_writel(ctx->pm[10], ctx->regs + PCI_REG_MBAR);
345*4882a593Smuzhiyun __raw_writel(ctx->pm[11], ctx->regs + PCI_REG_TIMEOUT);
346*4882a593Smuzhiyun wmb();
347*4882a593Smuzhiyun __raw_writel(ctx->pm[1], ctx->regs + PCI_REG_CONFIG);
348*4882a593Smuzhiyun wmb();
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun /* YAMON on all db1xxx boards wipes the TLB and writes zero to C0_wired
351*4882a593Smuzhiyun * on resume, making it necessary to recreate it as soon as possible.
352*4882a593Smuzhiyun */
353*4882a593Smuzhiyun ctx->wired_entry = 8191; /* impossibly high value */
354*4882a593Smuzhiyun alchemy_pci_wired_entry(ctx); /* install it */
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun static struct syscore_ops alchemy_pci_pmops = {
358*4882a593Smuzhiyun .suspend = alchemy_pci_suspend,
359*4882a593Smuzhiyun .resume = alchemy_pci_resume,
360*4882a593Smuzhiyun };
361*4882a593Smuzhiyun
alchemy_pci_probe(struct platform_device * pdev)362*4882a593Smuzhiyun static int alchemy_pci_probe(struct platform_device *pdev)
363*4882a593Smuzhiyun {
364*4882a593Smuzhiyun struct alchemy_pci_platdata *pd = pdev->dev.platform_data;
365*4882a593Smuzhiyun struct alchemy_pci_context *ctx;
366*4882a593Smuzhiyun void __iomem *virt_io;
367*4882a593Smuzhiyun unsigned long val;
368*4882a593Smuzhiyun struct resource *r;
369*4882a593Smuzhiyun struct clk *c;
370*4882a593Smuzhiyun int ret;
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun /* need at least PCI IRQ mapping table */
373*4882a593Smuzhiyun if (!pd) {
374*4882a593Smuzhiyun dev_err(&pdev->dev, "need platform data for PCI setup\n");
375*4882a593Smuzhiyun ret = -ENODEV;
376*4882a593Smuzhiyun goto out;
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
380*4882a593Smuzhiyun if (!ctx) {
381*4882a593Smuzhiyun dev_err(&pdev->dev, "no memory for pcictl context\n");
382*4882a593Smuzhiyun ret = -ENOMEM;
383*4882a593Smuzhiyun goto out;
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
387*4882a593Smuzhiyun if (!r) {
388*4882a593Smuzhiyun dev_err(&pdev->dev, "no pcictl ctrl regs resource\n");
389*4882a593Smuzhiyun ret = -ENODEV;
390*4882a593Smuzhiyun goto out1;
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun if (!request_mem_region(r->start, resource_size(r), pdev->name)) {
394*4882a593Smuzhiyun dev_err(&pdev->dev, "cannot claim pci regs\n");
395*4882a593Smuzhiyun ret = -ENODEV;
396*4882a593Smuzhiyun goto out1;
397*4882a593Smuzhiyun }
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun c = clk_get(&pdev->dev, "pci_clko");
400*4882a593Smuzhiyun if (IS_ERR(c)) {
401*4882a593Smuzhiyun dev_err(&pdev->dev, "unable to find PCI clock\n");
402*4882a593Smuzhiyun ret = PTR_ERR(c);
403*4882a593Smuzhiyun goto out2;
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun ret = clk_prepare_enable(c);
407*4882a593Smuzhiyun if (ret) {
408*4882a593Smuzhiyun dev_err(&pdev->dev, "cannot enable PCI clock\n");
409*4882a593Smuzhiyun goto out6;
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun ctx->regs = ioremap(r->start, resource_size(r));
413*4882a593Smuzhiyun if (!ctx->regs) {
414*4882a593Smuzhiyun dev_err(&pdev->dev, "cannot map pci regs\n");
415*4882a593Smuzhiyun ret = -ENODEV;
416*4882a593Smuzhiyun goto out5;
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun /* map parts of the PCI IO area */
420*4882a593Smuzhiyun /* REVISIT: if this changes with a newer variant (doubt it) make this
421*4882a593Smuzhiyun * a platform resource.
422*4882a593Smuzhiyun */
423*4882a593Smuzhiyun virt_io = ioremap(AU1500_PCI_IO_PHYS_ADDR, 0x00100000);
424*4882a593Smuzhiyun if (!virt_io) {
425*4882a593Smuzhiyun dev_err(&pdev->dev, "cannot remap pci io space\n");
426*4882a593Smuzhiyun ret = -ENODEV;
427*4882a593Smuzhiyun goto out3;
428*4882a593Smuzhiyun }
429*4882a593Smuzhiyun ctx->alchemy_pci_ctrl.io_map_base = (unsigned long)virt_io;
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun /* Au1500 revisions older than AD have borked coherent PCI */
432*4882a593Smuzhiyun if ((alchemy_get_cputype() == ALCHEMY_CPU_AU1500) &&
433*4882a593Smuzhiyun (read_c0_prid() < 0x01030202) &&
434*4882a593Smuzhiyun (coherentio == IO_COHERENCE_DISABLED)) {
435*4882a593Smuzhiyun val = __raw_readl(ctx->regs + PCI_REG_CONFIG);
436*4882a593Smuzhiyun val |= PCI_CONFIG_NC;
437*4882a593Smuzhiyun __raw_writel(val, ctx->regs + PCI_REG_CONFIG);
438*4882a593Smuzhiyun wmb();
439*4882a593Smuzhiyun dev_info(&pdev->dev, "non-coherent PCI on Au1500 AA/AB/AC\n");
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun if (pd->board_map_irq)
443*4882a593Smuzhiyun ctx->board_map_irq = pd->board_map_irq;
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun if (pd->board_pci_idsel)
446*4882a593Smuzhiyun ctx->board_pci_idsel = pd->board_pci_idsel;
447*4882a593Smuzhiyun else
448*4882a593Smuzhiyun ctx->board_pci_idsel = alchemy_pci_def_idsel;
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun /* fill in relevant pci_controller members */
451*4882a593Smuzhiyun ctx->alchemy_pci_ctrl.pci_ops = &alchemy_pci_ops;
452*4882a593Smuzhiyun ctx->alchemy_pci_ctrl.mem_resource = &alchemy_pci_def_memres;
453*4882a593Smuzhiyun ctx->alchemy_pci_ctrl.io_resource = &alchemy_pci_def_iores;
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun /* we can't ioremap the entire pci config space because it's too large,
456*4882a593Smuzhiyun * nor can we dynamically ioremap it because some drivers use the
457*4882a593Smuzhiyun * PCI config routines from within atomic contex and that becomes a
458*4882a593Smuzhiyun * problem in get_vm_area(). Instead we use one wired TLB entry to
459*4882a593Smuzhiyun * handle all config accesses for all busses.
460*4882a593Smuzhiyun */
461*4882a593Smuzhiyun ctx->pci_cfg_vm = get_vm_area(0x2000, VM_IOREMAP);
462*4882a593Smuzhiyun if (!ctx->pci_cfg_vm) {
463*4882a593Smuzhiyun dev_err(&pdev->dev, "unable to get vm area\n");
464*4882a593Smuzhiyun ret = -ENOMEM;
465*4882a593Smuzhiyun goto out4;
466*4882a593Smuzhiyun }
467*4882a593Smuzhiyun ctx->wired_entry = 8191; /* impossibly high value */
468*4882a593Smuzhiyun alchemy_pci_wired_entry(ctx); /* install it */
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun set_io_port_base((unsigned long)ctx->alchemy_pci_ctrl.io_map_base);
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun /* board may want to modify bits in the config register, do it now */
473*4882a593Smuzhiyun val = __raw_readl(ctx->regs + PCI_REG_CONFIG);
474*4882a593Smuzhiyun val &= ~pd->pci_cfg_clr;
475*4882a593Smuzhiyun val |= pd->pci_cfg_set;
476*4882a593Smuzhiyun val &= ~PCI_CONFIG_PD; /* clear disable bit */
477*4882a593Smuzhiyun __raw_writel(val, ctx->regs + PCI_REG_CONFIG);
478*4882a593Smuzhiyun wmb();
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun __alchemy_pci_ctx = ctx;
481*4882a593Smuzhiyun platform_set_drvdata(pdev, ctx);
482*4882a593Smuzhiyun register_syscore_ops(&alchemy_pci_pmops);
483*4882a593Smuzhiyun register_pci_controller(&ctx->alchemy_pci_ctrl);
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun dev_info(&pdev->dev, "PCI controller at %ld MHz\n",
486*4882a593Smuzhiyun clk_get_rate(c) / 1000000);
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun return 0;
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun out4:
491*4882a593Smuzhiyun iounmap(virt_io);
492*4882a593Smuzhiyun out3:
493*4882a593Smuzhiyun iounmap(ctx->regs);
494*4882a593Smuzhiyun out5:
495*4882a593Smuzhiyun clk_disable_unprepare(c);
496*4882a593Smuzhiyun out6:
497*4882a593Smuzhiyun clk_put(c);
498*4882a593Smuzhiyun out2:
499*4882a593Smuzhiyun release_mem_region(r->start, resource_size(r));
500*4882a593Smuzhiyun out1:
501*4882a593Smuzhiyun kfree(ctx);
502*4882a593Smuzhiyun out:
503*4882a593Smuzhiyun return ret;
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun static struct platform_driver alchemy_pcictl_driver = {
507*4882a593Smuzhiyun .probe = alchemy_pci_probe,
508*4882a593Smuzhiyun .driver = {
509*4882a593Smuzhiyun .name = "alchemy-pci",
510*4882a593Smuzhiyun },
511*4882a593Smuzhiyun };
512*4882a593Smuzhiyun
alchemy_pci_init(void)513*4882a593Smuzhiyun static int __init alchemy_pci_init(void)
514*4882a593Smuzhiyun {
515*4882a593Smuzhiyun /* Au1500/Au1550 have PCI */
516*4882a593Smuzhiyun switch (alchemy_get_cputype()) {
517*4882a593Smuzhiyun case ALCHEMY_CPU_AU1500:
518*4882a593Smuzhiyun case ALCHEMY_CPU_AU1550:
519*4882a593Smuzhiyun return platform_driver_register(&alchemy_pcictl_driver);
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun return 0;
522*4882a593Smuzhiyun }
523*4882a593Smuzhiyun arch_initcall(alchemy_pci_init);
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun
pcibios_map_irq(const struct pci_dev * dev,u8 slot,u8 pin)526*4882a593Smuzhiyun int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
527*4882a593Smuzhiyun {
528*4882a593Smuzhiyun struct alchemy_pci_context *ctx = dev->sysdata;
529*4882a593Smuzhiyun if (ctx && ctx->board_map_irq)
530*4882a593Smuzhiyun return ctx->board_map_irq(dev, slot, pin);
531*4882a593Smuzhiyun return -1;
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun
pcibios_plat_dev_init(struct pci_dev * dev)534*4882a593Smuzhiyun int pcibios_plat_dev_init(struct pci_dev *dev)
535*4882a593Smuzhiyun {
536*4882a593Smuzhiyun return 0;
537*4882a593Smuzhiyun }
538