1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
3*4882a593Smuzhiyun * reserved.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * This software is available to you under a choice of one of two
6*4882a593Smuzhiyun * licenses. You may choose to be licensed under the terms of the GNU
7*4882a593Smuzhiyun * General Public License (GPL) Version 2, available from the file
8*4882a593Smuzhiyun * COPYING in the main directory of this source tree, or the NetLogic
9*4882a593Smuzhiyun * license below:
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * Redistribution and use in source and binary forms, with or without
12*4882a593Smuzhiyun * modification, are permitted provided that the following conditions
13*4882a593Smuzhiyun * are met:
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun * 1. Redistributions of source code must retain the above copyright
16*4882a593Smuzhiyun * notice, this list of conditions and the following disclaimer.
17*4882a593Smuzhiyun * 2. Redistributions in binary form must reproduce the above copyright
18*4882a593Smuzhiyun * notice, this list of conditions and the following disclaimer in
19*4882a593Smuzhiyun * the documentation and/or other materials provided with the
20*4882a593Smuzhiyun * distribution.
21*4882a593Smuzhiyun *
22*4882a593Smuzhiyun * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
23*4882a593Smuzhiyun * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24*4882a593Smuzhiyun * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25*4882a593Smuzhiyun * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
26*4882a593Smuzhiyun * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27*4882a593Smuzhiyun * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28*4882a593Smuzhiyun * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
29*4882a593Smuzhiyun * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30*4882a593Smuzhiyun * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
31*4882a593Smuzhiyun * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
32*4882a593Smuzhiyun * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33*4882a593Smuzhiyun */
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun #include <linux/types.h>
36*4882a593Smuzhiyun #include <linux/pci.h>
37*4882a593Smuzhiyun #include <linux/kernel.h>
38*4882a593Smuzhiyun #include <linux/init.h>
39*4882a593Smuzhiyun #include <linux/msi.h>
40*4882a593Smuzhiyun #include <linux/mm.h>
41*4882a593Smuzhiyun #include <linux/irq.h>
42*4882a593Smuzhiyun #include <linux/irqdesc.h>
43*4882a593Smuzhiyun #include <linux/console.h>
44*4882a593Smuzhiyun #include <linux/pci_regs.h>
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun #include <asm/io.h>
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun #include <asm/netlogic/interrupt.h>
49*4882a593Smuzhiyun #include <asm/netlogic/haldefs.h>
50*4882a593Smuzhiyun #include <asm/netlogic/common.h>
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun #include <asm/netlogic/xlr/msidef.h>
53*4882a593Smuzhiyun #include <asm/netlogic/xlr/iomap.h>
54*4882a593Smuzhiyun #include <asm/netlogic/xlr/pic.h>
55*4882a593Smuzhiyun #include <asm/netlogic/xlr/xlr.h>
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun static void *pci_config_base;
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun #define pci_cfg_addr(bus, devfn, off) (((bus) << 16) | ((devfn) << 8) | (off))
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun /* PCI ops */
pci_cfg_read_32bit(struct pci_bus * bus,unsigned int devfn,int where)62*4882a593Smuzhiyun static inline u32 pci_cfg_read_32bit(struct pci_bus *bus, unsigned int devfn,
63*4882a593Smuzhiyun int where)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun u32 data;
66*4882a593Smuzhiyun u32 *cfgaddr;
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun cfgaddr = (u32 *)(pci_config_base +
69*4882a593Smuzhiyun pci_cfg_addr(bus->number, devfn, where & ~3));
70*4882a593Smuzhiyun data = *cfgaddr;
71*4882a593Smuzhiyun return cpu_to_le32(data);
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun
pci_cfg_write_32bit(struct pci_bus * bus,unsigned int devfn,int where,u32 data)74*4882a593Smuzhiyun static inline void pci_cfg_write_32bit(struct pci_bus *bus, unsigned int devfn,
75*4882a593Smuzhiyun int where, u32 data)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun u32 *cfgaddr;
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun cfgaddr = (u32 *)(pci_config_base +
80*4882a593Smuzhiyun pci_cfg_addr(bus->number, devfn, where & ~3));
81*4882a593Smuzhiyun *cfgaddr = cpu_to_le32(data);
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun
nlm_pcibios_read(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * val)84*4882a593Smuzhiyun static int nlm_pcibios_read(struct pci_bus *bus, unsigned int devfn,
85*4882a593Smuzhiyun int where, int size, u32 *val)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun u32 data;
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun if ((size == 2) && (where & 1))
90*4882a593Smuzhiyun return PCIBIOS_BAD_REGISTER_NUMBER;
91*4882a593Smuzhiyun else if ((size == 4) && (where & 3))
92*4882a593Smuzhiyun return PCIBIOS_BAD_REGISTER_NUMBER;
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun data = pci_cfg_read_32bit(bus, devfn, where);
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun if (size == 1)
97*4882a593Smuzhiyun *val = (data >> ((where & 3) << 3)) & 0xff;
98*4882a593Smuzhiyun else if (size == 2)
99*4882a593Smuzhiyun *val = (data >> ((where & 3) << 3)) & 0xffff;
100*4882a593Smuzhiyun else
101*4882a593Smuzhiyun *val = data;
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun return PCIBIOS_SUCCESSFUL;
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun
nlm_pcibios_write(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 val)107*4882a593Smuzhiyun static int nlm_pcibios_write(struct pci_bus *bus, unsigned int devfn,
108*4882a593Smuzhiyun int where, int size, u32 val)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun u32 data;
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun if ((size == 2) && (where & 1))
113*4882a593Smuzhiyun return PCIBIOS_BAD_REGISTER_NUMBER;
114*4882a593Smuzhiyun else if ((size == 4) && (where & 3))
115*4882a593Smuzhiyun return PCIBIOS_BAD_REGISTER_NUMBER;
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun data = pci_cfg_read_32bit(bus, devfn, where);
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun if (size == 1)
120*4882a593Smuzhiyun data = (data & ~(0xff << ((where & 3) << 3))) |
121*4882a593Smuzhiyun (val << ((where & 3) << 3));
122*4882a593Smuzhiyun else if (size == 2)
123*4882a593Smuzhiyun data = (data & ~(0xffff << ((where & 3) << 3))) |
124*4882a593Smuzhiyun (val << ((where & 3) << 3));
125*4882a593Smuzhiyun else
126*4882a593Smuzhiyun data = val;
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun pci_cfg_write_32bit(bus, devfn, where, data);
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun return PCIBIOS_SUCCESSFUL;
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun struct pci_ops nlm_pci_ops = {
134*4882a593Smuzhiyun .read = nlm_pcibios_read,
135*4882a593Smuzhiyun .write = nlm_pcibios_write
136*4882a593Smuzhiyun };
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun static struct resource nlm_pci_mem_resource = {
139*4882a593Smuzhiyun .name = "XLR PCI MEM",
140*4882a593Smuzhiyun .start = 0xd0000000UL, /* 256MB PCI mem @ 0xd000_0000 */
141*4882a593Smuzhiyun .end = 0xdfffffffUL,
142*4882a593Smuzhiyun .flags = IORESOURCE_MEM,
143*4882a593Smuzhiyun };
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun static struct resource nlm_pci_io_resource = {
146*4882a593Smuzhiyun .name = "XLR IO MEM",
147*4882a593Smuzhiyun .start = 0x10000000UL, /* 16MB PCI IO @ 0x1000_0000 */
148*4882a593Smuzhiyun .end = 0x100fffffUL,
149*4882a593Smuzhiyun .flags = IORESOURCE_IO,
150*4882a593Smuzhiyun };
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun struct pci_controller nlm_pci_controller = {
153*4882a593Smuzhiyun .index = 0,
154*4882a593Smuzhiyun .pci_ops = &nlm_pci_ops,
155*4882a593Smuzhiyun .mem_resource = &nlm_pci_mem_resource,
156*4882a593Smuzhiyun .mem_offset = 0x00000000UL,
157*4882a593Smuzhiyun .io_resource = &nlm_pci_io_resource,
158*4882a593Smuzhiyun .io_offset = 0x00000000UL,
159*4882a593Smuzhiyun };
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun /*
162*4882a593Smuzhiyun * The top level PCIe links on the XLS PCIe controller appear as
163*4882a593Smuzhiyun * bridges. Given a device, this function finds which link it is
164*4882a593Smuzhiyun * on.
165*4882a593Smuzhiyun */
xls_get_pcie_link(const struct pci_dev * dev)166*4882a593Smuzhiyun static struct pci_dev *xls_get_pcie_link(const struct pci_dev *dev)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun struct pci_bus *bus, *p;
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun /* Find the bridge on bus 0 */
171*4882a593Smuzhiyun bus = dev->bus;
172*4882a593Smuzhiyun for (p = bus->parent; p && p->number != 0; p = p->parent)
173*4882a593Smuzhiyun bus = p;
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun return p ? bus->self : NULL;
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun
nlm_pci_link_to_irq(int link)178*4882a593Smuzhiyun static int nlm_pci_link_to_irq(int link)
179*4882a593Smuzhiyun {
180*4882a593Smuzhiyun switch (link) {
181*4882a593Smuzhiyun case 0:
182*4882a593Smuzhiyun return PIC_PCIE_LINK0_IRQ;
183*4882a593Smuzhiyun case 1:
184*4882a593Smuzhiyun return PIC_PCIE_LINK1_IRQ;
185*4882a593Smuzhiyun case 2:
186*4882a593Smuzhiyun if (nlm_chip_is_xls_b())
187*4882a593Smuzhiyun return PIC_PCIE_XLSB0_LINK2_IRQ;
188*4882a593Smuzhiyun else
189*4882a593Smuzhiyun return PIC_PCIE_LINK2_IRQ;
190*4882a593Smuzhiyun case 3:
191*4882a593Smuzhiyun if (nlm_chip_is_xls_b())
192*4882a593Smuzhiyun return PIC_PCIE_XLSB0_LINK3_IRQ;
193*4882a593Smuzhiyun else
194*4882a593Smuzhiyun return PIC_PCIE_LINK3_IRQ;
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun WARN(1, "Unexpected link %d\n", link);
197*4882a593Smuzhiyun return 0;
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun
get_irq_vector(const struct pci_dev * dev)200*4882a593Smuzhiyun static int get_irq_vector(const struct pci_dev *dev)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun struct pci_dev *lnk;
203*4882a593Smuzhiyun int link;
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun if (!nlm_chip_is_xls())
206*4882a593Smuzhiyun return PIC_PCIX_IRQ; /* for XLR just one IRQ */
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun lnk = xls_get_pcie_link(dev);
209*4882a593Smuzhiyun if (lnk == NULL)
210*4882a593Smuzhiyun return 0;
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun link = PCI_SLOT(lnk->devfn);
213*4882a593Smuzhiyun return nlm_pci_link_to_irq(link);
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun #ifdef CONFIG_PCI_MSI
arch_teardown_msi_irq(unsigned int irq)217*4882a593Smuzhiyun void arch_teardown_msi_irq(unsigned int irq)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun
arch_setup_msi_irq(struct pci_dev * dev,struct msi_desc * desc)221*4882a593Smuzhiyun int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun struct msi_msg msg;
224*4882a593Smuzhiyun struct pci_dev *lnk;
225*4882a593Smuzhiyun int irq, ret;
226*4882a593Smuzhiyun u16 val;
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun /* MSI not supported on XLR */
229*4882a593Smuzhiyun if (!nlm_chip_is_xls())
230*4882a593Smuzhiyun return 1;
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun /*
233*4882a593Smuzhiyun * Enable MSI on the XLS PCIe controller bridge which was disabled
234*4882a593Smuzhiyun * at enumeration, the bridge MSI capability is at 0x50
235*4882a593Smuzhiyun */
236*4882a593Smuzhiyun lnk = xls_get_pcie_link(dev);
237*4882a593Smuzhiyun if (lnk == NULL)
238*4882a593Smuzhiyun return 1;
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun pci_read_config_word(lnk, 0x50 + PCI_MSI_FLAGS, &val);
241*4882a593Smuzhiyun if ((val & PCI_MSI_FLAGS_ENABLE) == 0) {
242*4882a593Smuzhiyun val |= PCI_MSI_FLAGS_ENABLE;
243*4882a593Smuzhiyun pci_write_config_word(lnk, 0x50 + PCI_MSI_FLAGS, val);
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun irq = get_irq_vector(dev);
247*4882a593Smuzhiyun if (irq <= 0)
248*4882a593Smuzhiyun return 1;
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun msg.address_hi = MSI_ADDR_BASE_HI;
251*4882a593Smuzhiyun msg.address_lo = MSI_ADDR_BASE_LO |
252*4882a593Smuzhiyun MSI_ADDR_DEST_MODE_PHYSICAL |
253*4882a593Smuzhiyun MSI_ADDR_REDIRECTION_CPU;
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun msg.data = MSI_DATA_TRIGGER_EDGE |
256*4882a593Smuzhiyun MSI_DATA_LEVEL_ASSERT |
257*4882a593Smuzhiyun MSI_DATA_DELIVERY_FIXED;
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun ret = irq_set_msi_desc(irq, desc);
260*4882a593Smuzhiyun if (ret < 0)
261*4882a593Smuzhiyun return ret;
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun pci_write_msi_msg(irq, &msg);
264*4882a593Smuzhiyun return 0;
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun #endif
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun /* Extra ACK needed for XLR on chip PCI controller */
xlr_pci_ack(struct irq_data * d)269*4882a593Smuzhiyun static void xlr_pci_ack(struct irq_data *d)
270*4882a593Smuzhiyun {
271*4882a593Smuzhiyun uint64_t pcibase = nlm_mmio_base(NETLOGIC_IO_PCIX_OFFSET);
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun nlm_read_reg(pcibase, (0x140 >> 2));
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun /* Extra ACK needed for XLS on chip PCIe controller */
xls_pcie_ack(struct irq_data * d)277*4882a593Smuzhiyun static void xls_pcie_ack(struct irq_data *d)
278*4882a593Smuzhiyun {
279*4882a593Smuzhiyun uint64_t pciebase_le = nlm_mmio_base(NETLOGIC_IO_PCIE_1_OFFSET);
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun switch (d->irq) {
282*4882a593Smuzhiyun case PIC_PCIE_LINK0_IRQ:
283*4882a593Smuzhiyun nlm_write_reg(pciebase_le, (0x90 >> 2), 0xffffffff);
284*4882a593Smuzhiyun break;
285*4882a593Smuzhiyun case PIC_PCIE_LINK1_IRQ:
286*4882a593Smuzhiyun nlm_write_reg(pciebase_le, (0x94 >> 2), 0xffffffff);
287*4882a593Smuzhiyun break;
288*4882a593Smuzhiyun case PIC_PCIE_LINK2_IRQ:
289*4882a593Smuzhiyun nlm_write_reg(pciebase_le, (0x190 >> 2), 0xffffffff);
290*4882a593Smuzhiyun break;
291*4882a593Smuzhiyun case PIC_PCIE_LINK3_IRQ:
292*4882a593Smuzhiyun nlm_write_reg(pciebase_le, (0x194 >> 2), 0xffffffff);
293*4882a593Smuzhiyun break;
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun /* For XLS B silicon, the 3,4 PCI interrupts are different */
xls_pcie_ack_b(struct irq_data * d)298*4882a593Smuzhiyun static void xls_pcie_ack_b(struct irq_data *d)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun uint64_t pciebase_le = nlm_mmio_base(NETLOGIC_IO_PCIE_1_OFFSET);
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun switch (d->irq) {
303*4882a593Smuzhiyun case PIC_PCIE_LINK0_IRQ:
304*4882a593Smuzhiyun nlm_write_reg(pciebase_le, (0x90 >> 2), 0xffffffff);
305*4882a593Smuzhiyun break;
306*4882a593Smuzhiyun case PIC_PCIE_LINK1_IRQ:
307*4882a593Smuzhiyun nlm_write_reg(pciebase_le, (0x94 >> 2), 0xffffffff);
308*4882a593Smuzhiyun break;
309*4882a593Smuzhiyun case PIC_PCIE_XLSB0_LINK2_IRQ:
310*4882a593Smuzhiyun nlm_write_reg(pciebase_le, (0x190 >> 2), 0xffffffff);
311*4882a593Smuzhiyun break;
312*4882a593Smuzhiyun case PIC_PCIE_XLSB0_LINK3_IRQ:
313*4882a593Smuzhiyun nlm_write_reg(pciebase_le, (0x194 >> 2), 0xffffffff);
314*4882a593Smuzhiyun break;
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun
pcibios_map_irq(const struct pci_dev * dev,u8 slot,u8 pin)318*4882a593Smuzhiyun int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
319*4882a593Smuzhiyun {
320*4882a593Smuzhiyun return get_irq_vector(dev);
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun /* Do platform specific device initialization at pci_enable_device() time */
pcibios_plat_dev_init(struct pci_dev * dev)324*4882a593Smuzhiyun int pcibios_plat_dev_init(struct pci_dev *dev)
325*4882a593Smuzhiyun {
326*4882a593Smuzhiyun return 0;
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun
pcibios_init(void)329*4882a593Smuzhiyun static int __init pcibios_init(void)
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun void (*extra_ack)(struct irq_data *);
332*4882a593Smuzhiyun int link, irq;
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun /* PSB assigns PCI resources */
335*4882a593Smuzhiyun pci_set_flags(PCI_PROBE_ONLY);
336*4882a593Smuzhiyun pci_config_base = ioremap(DEFAULT_PCI_CONFIG_BASE, 16 << 20);
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun /* Extend IO port for memory mapped io */
339*4882a593Smuzhiyun ioport_resource.start = 0;
340*4882a593Smuzhiyun ioport_resource.end = ~0;
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun set_io_port_base(CKSEG1);
343*4882a593Smuzhiyun nlm_pci_controller.io_map_base = CKSEG1;
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun pr_info("Registering XLR/XLS PCIX/PCIE Controller.\n");
346*4882a593Smuzhiyun register_pci_controller(&nlm_pci_controller);
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun /*
349*4882a593Smuzhiyun * For PCI interrupts, we need to ack the PCI controller too, overload
350*4882a593Smuzhiyun * irq handler data to do this
351*4882a593Smuzhiyun */
352*4882a593Smuzhiyun if (!nlm_chip_is_xls()) {
353*4882a593Smuzhiyun /* XLR PCI controller ACK */
354*4882a593Smuzhiyun nlm_set_pic_extra_ack(0, PIC_PCIX_IRQ, xlr_pci_ack);
355*4882a593Smuzhiyun } else {
356*4882a593Smuzhiyun if (nlm_chip_is_xls_b())
357*4882a593Smuzhiyun extra_ack = xls_pcie_ack_b;
358*4882a593Smuzhiyun else
359*4882a593Smuzhiyun extra_ack = xls_pcie_ack;
360*4882a593Smuzhiyun for (link = 0; link < 4; link++) {
361*4882a593Smuzhiyun irq = nlm_pci_link_to_irq(link);
362*4882a593Smuzhiyun nlm_set_pic_extra_ack(0, irq, extra_ack);
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun return 0;
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun arch_initcall(pcibios_init);
369