1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * IO workarounds for PCI on Celleb/Cell platform
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * (C) Copyright 2006-2007 TOSHIBA CORPORATION
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #undef DEBUG
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include <linux/kernel.h>
11*4882a593Smuzhiyun #include <linux/of_platform.h>
12*4882a593Smuzhiyun #include <linux/slab.h>
13*4882a593Smuzhiyun #include <linux/io.h>
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #include <asm/ppc-pci.h>
16*4882a593Smuzhiyun #include <asm/pci-bridge.h>
17*4882a593Smuzhiyun #include <asm/io-workarounds.h>
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #define SPIDER_PCI_DISABLE_PREFETCH
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun struct spiderpci_iowa_private {
22*4882a593Smuzhiyun void __iomem *regs;
23*4882a593Smuzhiyun };
24*4882a593Smuzhiyun
spiderpci_io_flush(struct iowa_bus * bus)25*4882a593Smuzhiyun static void spiderpci_io_flush(struct iowa_bus *bus)
26*4882a593Smuzhiyun {
27*4882a593Smuzhiyun struct spiderpci_iowa_private *priv;
28*4882a593Smuzhiyun u32 val;
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun priv = bus->private;
31*4882a593Smuzhiyun val = in_be32(priv->regs + SPIDER_PCI_DUMMY_READ);
32*4882a593Smuzhiyun iosync();
33*4882a593Smuzhiyun }
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun #define SPIDER_PCI_MMIO_READ(name, ret) \
36*4882a593Smuzhiyun static ret spiderpci_##name(const PCI_IO_ADDR addr) \
37*4882a593Smuzhiyun { \
38*4882a593Smuzhiyun ret val = __do_##name(addr); \
39*4882a593Smuzhiyun spiderpci_io_flush(iowa_mem_find_bus(addr)); \
40*4882a593Smuzhiyun return val; \
41*4882a593Smuzhiyun }
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun #define SPIDER_PCI_MMIO_READ_STR(name) \
44*4882a593Smuzhiyun static void spiderpci_##name(const PCI_IO_ADDR addr, void *buf, \
45*4882a593Smuzhiyun unsigned long count) \
46*4882a593Smuzhiyun { \
47*4882a593Smuzhiyun __do_##name(addr, buf, count); \
48*4882a593Smuzhiyun spiderpci_io_flush(iowa_mem_find_bus(addr)); \
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun
SPIDER_PCI_MMIO_READ(readb,u8)51*4882a593Smuzhiyun SPIDER_PCI_MMIO_READ(readb, u8)
52*4882a593Smuzhiyun SPIDER_PCI_MMIO_READ(readw, u16)
53*4882a593Smuzhiyun SPIDER_PCI_MMIO_READ(readl, u32)
54*4882a593Smuzhiyun SPIDER_PCI_MMIO_READ(readq, u64)
55*4882a593Smuzhiyun SPIDER_PCI_MMIO_READ(readw_be, u16)
56*4882a593Smuzhiyun SPIDER_PCI_MMIO_READ(readl_be, u32)
57*4882a593Smuzhiyun SPIDER_PCI_MMIO_READ(readq_be, u64)
58*4882a593Smuzhiyun SPIDER_PCI_MMIO_READ_STR(readsb)
59*4882a593Smuzhiyun SPIDER_PCI_MMIO_READ_STR(readsw)
60*4882a593Smuzhiyun SPIDER_PCI_MMIO_READ_STR(readsl)
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun static void spiderpci_memcpy_fromio(void *dest, const PCI_IO_ADDR src,
63*4882a593Smuzhiyun unsigned long n)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun __do_memcpy_fromio(dest, src, n);
66*4882a593Smuzhiyun spiderpci_io_flush(iowa_mem_find_bus(src));
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun
spiderpci_pci_setup_chip(struct pci_controller * phb,void __iomem * regs)69*4882a593Smuzhiyun static int __init spiderpci_pci_setup_chip(struct pci_controller *phb,
70*4882a593Smuzhiyun void __iomem *regs)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun void *dummy_page_va;
73*4882a593Smuzhiyun dma_addr_t dummy_page_da;
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun #ifdef SPIDER_PCI_DISABLE_PREFETCH
76*4882a593Smuzhiyun u32 val = in_be32(regs + SPIDER_PCI_VCI_CNTL_STAT);
77*4882a593Smuzhiyun pr_debug("SPIDER_IOWA:PVCI_Control_Status was 0x%08x\n", val);
78*4882a593Smuzhiyun out_be32(regs + SPIDER_PCI_VCI_CNTL_STAT, val | 0x8);
79*4882a593Smuzhiyun #endif /* SPIDER_PCI_DISABLE_PREFETCH */
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun /* setup dummy read */
82*4882a593Smuzhiyun /*
83*4882a593Smuzhiyun * On CellBlade, we can't know that which XDR memory is used by
84*4882a593Smuzhiyun * kmalloc() to allocate dummy_page_va.
85*4882a593Smuzhiyun * In order to imporve the performance, the XDR which is used to
86*4882a593Smuzhiyun * allocate dummy_page_va is the nearest the spider-pci.
87*4882a593Smuzhiyun * We have to select the CBE which is the nearest the spider-pci
88*4882a593Smuzhiyun * to allocate memory from the best XDR, but I don't know that
89*4882a593Smuzhiyun * how to do.
90*4882a593Smuzhiyun *
91*4882a593Smuzhiyun * Celleb does not have this problem, because it has only one XDR.
92*4882a593Smuzhiyun */
93*4882a593Smuzhiyun dummy_page_va = kmalloc(PAGE_SIZE, GFP_KERNEL);
94*4882a593Smuzhiyun if (!dummy_page_va) {
95*4882a593Smuzhiyun pr_err("SPIDERPCI-IOWA:Alloc dummy_page_va failed.\n");
96*4882a593Smuzhiyun return -1;
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun dummy_page_da = dma_map_single(phb->parent, dummy_page_va,
100*4882a593Smuzhiyun PAGE_SIZE, DMA_FROM_DEVICE);
101*4882a593Smuzhiyun if (dma_mapping_error(phb->parent, dummy_page_da)) {
102*4882a593Smuzhiyun pr_err("SPIDER-IOWA:Map dummy page filed.\n");
103*4882a593Smuzhiyun kfree(dummy_page_va);
104*4882a593Smuzhiyun return -1;
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun out_be32(regs + SPIDER_PCI_DUMMY_READ_BASE, dummy_page_da);
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun return 0;
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun
spiderpci_iowa_init(struct iowa_bus * bus,void * data)112*4882a593Smuzhiyun int __init spiderpci_iowa_init(struct iowa_bus *bus, void *data)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun void __iomem *regs = NULL;
115*4882a593Smuzhiyun struct spiderpci_iowa_private *priv;
116*4882a593Smuzhiyun struct device_node *np = bus->phb->dn;
117*4882a593Smuzhiyun struct resource r;
118*4882a593Smuzhiyun unsigned long offset = (unsigned long)data;
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun pr_debug("SPIDERPCI-IOWA:Bus initialize for spider(%pOF)\n",
121*4882a593Smuzhiyun np);
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun priv = kzalloc(sizeof(*priv), GFP_KERNEL);
124*4882a593Smuzhiyun if (!priv) {
125*4882a593Smuzhiyun pr_err("SPIDERPCI-IOWA:"
126*4882a593Smuzhiyun "Can't allocate struct spiderpci_iowa_private");
127*4882a593Smuzhiyun return -1;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun if (of_address_to_resource(np, 0, &r)) {
131*4882a593Smuzhiyun pr_err("SPIDERPCI-IOWA:Can't get resource.\n");
132*4882a593Smuzhiyun goto error;
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun regs = ioremap(r.start + offset, SPIDER_PCI_REG_SIZE);
136*4882a593Smuzhiyun if (!regs) {
137*4882a593Smuzhiyun pr_err("SPIDERPCI-IOWA:ioremap failed.\n");
138*4882a593Smuzhiyun goto error;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun priv->regs = regs;
141*4882a593Smuzhiyun bus->private = priv;
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun if (spiderpci_pci_setup_chip(bus->phb, regs))
144*4882a593Smuzhiyun goto error;
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun return 0;
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun error:
149*4882a593Smuzhiyun kfree(priv);
150*4882a593Smuzhiyun bus->private = NULL;
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun if (regs)
153*4882a593Smuzhiyun iounmap(regs);
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun return -1;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun struct ppc_pci_io spiderpci_ops = {
159*4882a593Smuzhiyun .readb = spiderpci_readb,
160*4882a593Smuzhiyun .readw = spiderpci_readw,
161*4882a593Smuzhiyun .readl = spiderpci_readl,
162*4882a593Smuzhiyun .readq = spiderpci_readq,
163*4882a593Smuzhiyun .readw_be = spiderpci_readw_be,
164*4882a593Smuzhiyun .readl_be = spiderpci_readl_be,
165*4882a593Smuzhiyun .readq_be = spiderpci_readq_be,
166*4882a593Smuzhiyun .readsb = spiderpci_readsb,
167*4882a593Smuzhiyun .readsw = spiderpci_readsw,
168*4882a593Smuzhiyun .readsl = spiderpci_readsl,
169*4882a593Smuzhiyun .memcpy_fromio = spiderpci_memcpy_fromio,
170*4882a593Smuzhiyun };
171*4882a593Smuzhiyun
172