1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright (c) 2003-2012 Broadcom Corporation
3*4882a593Smuzhiyun * All Rights Reserved
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * This software is available to you under a choice of one of two
6*4882a593Smuzhiyun * licenses. You may choose to be licensed under the terms of the GNU
7*4882a593Smuzhiyun * General Public License (GPL) Version 2, available from the file
8*4882a593Smuzhiyun * COPYING in the main directory of this source tree, or the Broadcom
9*4882a593Smuzhiyun * license below:
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * Redistribution and use in source and binary forms, with or without
12*4882a593Smuzhiyun * modification, are permitted provided that the following conditions
13*4882a593Smuzhiyun * are met:
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun * 1. Redistributions of source code must retain the above copyright
16*4882a593Smuzhiyun * notice, this list of conditions and the following disclaimer.
17*4882a593Smuzhiyun * 2. Redistributions in binary form must reproduce the above copyright
18*4882a593Smuzhiyun * notice, this list of conditions and the following disclaimer in
19*4882a593Smuzhiyun * the documentation and/or other materials provided with the
20*4882a593Smuzhiyun * distribution.
21*4882a593Smuzhiyun *
22*4882a593Smuzhiyun * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
23*4882a593Smuzhiyun * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24*4882a593Smuzhiyun * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25*4882a593Smuzhiyun * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
26*4882a593Smuzhiyun * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27*4882a593Smuzhiyun * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28*4882a593Smuzhiyun * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
29*4882a593Smuzhiyun * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30*4882a593Smuzhiyun * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
31*4882a593Smuzhiyun * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
32*4882a593Smuzhiyun * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33*4882a593Smuzhiyun */
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun #include <linux/types.h>
36*4882a593Smuzhiyun #include <linux/pci.h>
37*4882a593Smuzhiyun #include <linux/kernel.h>
38*4882a593Smuzhiyun #include <linux/init.h>
39*4882a593Smuzhiyun #include <linux/msi.h>
40*4882a593Smuzhiyun #include <linux/mm.h>
41*4882a593Smuzhiyun #include <linux/irq.h>
42*4882a593Smuzhiyun #include <linux/irqdesc.h>
43*4882a593Smuzhiyun #include <linux/console.h>
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun #include <asm/io.h>
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun #include <asm/netlogic/interrupt.h>
48*4882a593Smuzhiyun #include <asm/netlogic/haldefs.h>
49*4882a593Smuzhiyun #include <asm/netlogic/common.h>
50*4882a593Smuzhiyun #include <asm/netlogic/mips-extns.h>
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun #include <asm/netlogic/xlp-hal/iomap.h>
53*4882a593Smuzhiyun #include <asm/netlogic/xlp-hal/xlp.h>
54*4882a593Smuzhiyun #include <asm/netlogic/xlp-hal/pic.h>
55*4882a593Smuzhiyun #include <asm/netlogic/xlp-hal/pcibus.h>
56*4882a593Smuzhiyun #include <asm/netlogic/xlp-hal/bridge.h>
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun #define XLP_MSIVEC_PER_LINK 32
59*4882a593Smuzhiyun #define XLP_MSIXVEC_TOTAL (cpu_is_xlp9xx() ? 128 : 32)
60*4882a593Smuzhiyun #define XLP_MSIXVEC_PER_LINK (cpu_is_xlp9xx() ? 32 : 8)
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun /* 128 MSI irqs per node, mapped starting at NLM_MSI_VEC_BASE */
nlm_link_msiirq(int link,int msivec)63*4882a593Smuzhiyun static inline int nlm_link_msiirq(int link, int msivec)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun return NLM_MSI_VEC_BASE + link * XLP_MSIVEC_PER_LINK + msivec;
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun /* get the link MSI vector from irq number */
nlm_irq_msivec(int irq)69*4882a593Smuzhiyun static inline int nlm_irq_msivec(int irq)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun return (irq - NLM_MSI_VEC_BASE) % XLP_MSIVEC_PER_LINK;
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun /* get the link from the irq number */
nlm_irq_msilink(int irq)75*4882a593Smuzhiyun static inline int nlm_irq_msilink(int irq)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun int total_msivec = XLP_MSIVEC_PER_LINK * PCIE_NLINKS;
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun return ((irq - NLM_MSI_VEC_BASE) % total_msivec) /
80*4882a593Smuzhiyun XLP_MSIVEC_PER_LINK;
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun /*
84*4882a593Smuzhiyun * For XLP 8xx/4xx/3xx/2xx, only 32 MSI-X vectors are possible because
85*4882a593Smuzhiyun * there are only 32 PIC interrupts for MSI. We split them statically
86*4882a593Smuzhiyun * and use 8 MSI-X vectors per link - this keeps the allocation and
87*4882a593Smuzhiyun * lookup simple.
88*4882a593Smuzhiyun * On XLP 9xx, there are 32 vectors per link, and the interrupts are
89*4882a593Smuzhiyun * not routed thru PIC, so we can use all 128 MSI-X vectors.
90*4882a593Smuzhiyun */
nlm_link_msixirq(int link,int bit)91*4882a593Smuzhiyun static inline int nlm_link_msixirq(int link, int bit)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun return NLM_MSIX_VEC_BASE + link * XLP_MSIXVEC_PER_LINK + bit;
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun /* get the link MSI vector from irq number */
nlm_irq_msixvec(int irq)97*4882a593Smuzhiyun static inline int nlm_irq_msixvec(int irq)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun return (irq - NLM_MSIX_VEC_BASE) % XLP_MSIXVEC_TOTAL;
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun /* get the link from MSIX vec */
nlm_irq_msixlink(int msixvec)103*4882a593Smuzhiyun static inline int nlm_irq_msixlink(int msixvec)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun return msixvec / XLP_MSIXVEC_PER_LINK;
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun /*
109*4882a593Smuzhiyun * Per link MSI and MSI-X information, set as IRQ handler data for
110*4882a593Smuzhiyun * MSI and MSI-X interrupts.
111*4882a593Smuzhiyun */
112*4882a593Smuzhiyun struct xlp_msi_data {
113*4882a593Smuzhiyun struct nlm_soc_info *node;
114*4882a593Smuzhiyun uint64_t lnkbase;
115*4882a593Smuzhiyun uint32_t msi_enabled_mask;
116*4882a593Smuzhiyun uint32_t msi_alloc_mask;
117*4882a593Smuzhiyun uint32_t msix_alloc_mask;
118*4882a593Smuzhiyun spinlock_t msi_lock;
119*4882a593Smuzhiyun };
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun /*
122*4882a593Smuzhiyun * MSI Chip definitions
123*4882a593Smuzhiyun *
124*4882a593Smuzhiyun * On XLP, there is a PIC interrupt associated with each PCIe link on the
125*4882a593Smuzhiyun * chip (which appears as a PCI bridge to us). This gives us 32 MSI irqa
126*4882a593Smuzhiyun * per link and 128 overall.
127*4882a593Smuzhiyun *
128*4882a593Smuzhiyun * When a device connected to the link raises a MSI interrupt, we get a
129*4882a593Smuzhiyun * link interrupt and we then have to look at PCIE_MSI_STATUS register at
130*4882a593Smuzhiyun * the bridge to map it to the IRQ
131*4882a593Smuzhiyun */
xlp_msi_enable(struct irq_data * d)132*4882a593Smuzhiyun static void xlp_msi_enable(struct irq_data *d)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun struct xlp_msi_data *md = irq_data_get_irq_chip_data(d);
135*4882a593Smuzhiyun unsigned long flags;
136*4882a593Smuzhiyun int vec;
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun vec = nlm_irq_msivec(d->irq);
139*4882a593Smuzhiyun spin_lock_irqsave(&md->msi_lock, flags);
140*4882a593Smuzhiyun md->msi_enabled_mask |= 1u << vec;
141*4882a593Smuzhiyun if (cpu_is_xlp9xx())
142*4882a593Smuzhiyun nlm_write_reg(md->lnkbase, PCIE_9XX_MSI_EN,
143*4882a593Smuzhiyun md->msi_enabled_mask);
144*4882a593Smuzhiyun else
145*4882a593Smuzhiyun nlm_write_reg(md->lnkbase, PCIE_MSI_EN, md->msi_enabled_mask);
146*4882a593Smuzhiyun spin_unlock_irqrestore(&md->msi_lock, flags);
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun
xlp_msi_disable(struct irq_data * d)149*4882a593Smuzhiyun static void xlp_msi_disable(struct irq_data *d)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun struct xlp_msi_data *md = irq_data_get_irq_chip_data(d);
152*4882a593Smuzhiyun unsigned long flags;
153*4882a593Smuzhiyun int vec;
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun vec = nlm_irq_msivec(d->irq);
156*4882a593Smuzhiyun spin_lock_irqsave(&md->msi_lock, flags);
157*4882a593Smuzhiyun md->msi_enabled_mask &= ~(1u << vec);
158*4882a593Smuzhiyun if (cpu_is_xlp9xx())
159*4882a593Smuzhiyun nlm_write_reg(md->lnkbase, PCIE_9XX_MSI_EN,
160*4882a593Smuzhiyun md->msi_enabled_mask);
161*4882a593Smuzhiyun else
162*4882a593Smuzhiyun nlm_write_reg(md->lnkbase, PCIE_MSI_EN, md->msi_enabled_mask);
163*4882a593Smuzhiyun spin_unlock_irqrestore(&md->msi_lock, flags);
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
xlp_msi_mask_ack(struct irq_data * d)166*4882a593Smuzhiyun static void xlp_msi_mask_ack(struct irq_data *d)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun struct xlp_msi_data *md = irq_data_get_irq_chip_data(d);
169*4882a593Smuzhiyun int link, vec;
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun link = nlm_irq_msilink(d->irq);
172*4882a593Smuzhiyun vec = nlm_irq_msivec(d->irq);
173*4882a593Smuzhiyun xlp_msi_disable(d);
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun /* Ack MSI on bridge */
176*4882a593Smuzhiyun if (cpu_is_xlp9xx())
177*4882a593Smuzhiyun nlm_write_reg(md->lnkbase, PCIE_9XX_MSI_STATUS, 1u << vec);
178*4882a593Smuzhiyun else
179*4882a593Smuzhiyun nlm_write_reg(md->lnkbase, PCIE_MSI_STATUS, 1u << vec);
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun static struct irq_chip xlp_msi_chip = {
184*4882a593Smuzhiyun .name = "XLP-MSI",
185*4882a593Smuzhiyun .irq_enable = xlp_msi_enable,
186*4882a593Smuzhiyun .irq_disable = xlp_msi_disable,
187*4882a593Smuzhiyun .irq_mask_ack = xlp_msi_mask_ack,
188*4882a593Smuzhiyun .irq_unmask = xlp_msi_enable,
189*4882a593Smuzhiyun };
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun /*
192*4882a593Smuzhiyun * XLP8XX/4XX/3XX/2XX:
193*4882a593Smuzhiyun * The MSI-X interrupt handling is different from MSI, there are 32 MSI-X
194*4882a593Smuzhiyun * interrupts generated by the PIC and each of these correspond to a MSI-X
195*4882a593Smuzhiyun * vector (0-31) that can be assigned.
196*4882a593Smuzhiyun *
197*4882a593Smuzhiyun * We divide the MSI-X vectors to 8 per link and do a per-link allocation
198*4882a593Smuzhiyun *
199*4882a593Smuzhiyun * XLP9XX:
200*4882a593Smuzhiyun * 32 MSI-X vectors are available per link, and the interrupts are not routed
201*4882a593Smuzhiyun * thru the PIC. PIC ack not needed.
202*4882a593Smuzhiyun *
203*4882a593Smuzhiyun * Enable and disable done using standard MSI functions.
204*4882a593Smuzhiyun */
xlp_msix_mask_ack(struct irq_data * d)205*4882a593Smuzhiyun static void xlp_msix_mask_ack(struct irq_data *d)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun struct xlp_msi_data *md;
208*4882a593Smuzhiyun int link, msixvec;
209*4882a593Smuzhiyun uint32_t status_reg, bit;
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun msixvec = nlm_irq_msixvec(d->irq);
212*4882a593Smuzhiyun link = nlm_irq_msixlink(msixvec);
213*4882a593Smuzhiyun pci_msi_mask_irq(d);
214*4882a593Smuzhiyun md = irq_data_get_irq_chip_data(d);
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun /* Ack MSI on bridge */
217*4882a593Smuzhiyun if (cpu_is_xlp9xx()) {
218*4882a593Smuzhiyun status_reg = PCIE_9XX_MSIX_STATUSX(link);
219*4882a593Smuzhiyun bit = msixvec % XLP_MSIXVEC_PER_LINK;
220*4882a593Smuzhiyun } else {
221*4882a593Smuzhiyun status_reg = PCIE_MSIX_STATUS;
222*4882a593Smuzhiyun bit = msixvec;
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun nlm_write_reg(md->lnkbase, status_reg, 1u << bit);
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun if (!cpu_is_xlp9xx())
227*4882a593Smuzhiyun nlm_pic_ack(md->node->picbase,
228*4882a593Smuzhiyun PIC_IRT_PCIE_MSIX_INDEX(msixvec));
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun static struct irq_chip xlp_msix_chip = {
232*4882a593Smuzhiyun .name = "XLP-MSIX",
233*4882a593Smuzhiyun .irq_enable = pci_msi_unmask_irq,
234*4882a593Smuzhiyun .irq_disable = pci_msi_mask_irq,
235*4882a593Smuzhiyun .irq_mask_ack = xlp_msix_mask_ack,
236*4882a593Smuzhiyun .irq_unmask = pci_msi_unmask_irq,
237*4882a593Smuzhiyun };
238*4882a593Smuzhiyun
arch_teardown_msi_irq(unsigned int irq)239*4882a593Smuzhiyun void arch_teardown_msi_irq(unsigned int irq)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun /*
244*4882a593Smuzhiyun * Setup a PCIe link for MSI. By default, the links are in
245*4882a593Smuzhiyun * legacy interrupt mode. We will switch them to MSI mode
246*4882a593Smuzhiyun * at the first MSI request.
247*4882a593Smuzhiyun */
xlp_config_link_msi(uint64_t lnkbase,int lirq,uint64_t msiaddr)248*4882a593Smuzhiyun static void xlp_config_link_msi(uint64_t lnkbase, int lirq, uint64_t msiaddr)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun u32 val;
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun if (cpu_is_xlp9xx()) {
253*4882a593Smuzhiyun val = nlm_read_reg(lnkbase, PCIE_9XX_INT_EN0);
254*4882a593Smuzhiyun if ((val & 0x200) == 0) {
255*4882a593Smuzhiyun val |= 0x200; /* MSI Interrupt enable */
256*4882a593Smuzhiyun nlm_write_reg(lnkbase, PCIE_9XX_INT_EN0, val);
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun } else {
259*4882a593Smuzhiyun val = nlm_read_reg(lnkbase, PCIE_INT_EN0);
260*4882a593Smuzhiyun if ((val & 0x200) == 0) {
261*4882a593Smuzhiyun val |= 0x200;
262*4882a593Smuzhiyun nlm_write_reg(lnkbase, PCIE_INT_EN0, val);
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun val = nlm_read_reg(lnkbase, 0x1); /* CMD */
267*4882a593Smuzhiyun if ((val & 0x0400) == 0) {
268*4882a593Smuzhiyun val |= 0x0400;
269*4882a593Smuzhiyun nlm_write_reg(lnkbase, 0x1, val);
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun /* Update IRQ in the PCI irq reg */
273*4882a593Smuzhiyun val = nlm_read_pci_reg(lnkbase, 0xf);
274*4882a593Smuzhiyun val &= ~0x1fu;
275*4882a593Smuzhiyun val |= (1 << 8) | lirq;
276*4882a593Smuzhiyun nlm_write_pci_reg(lnkbase, 0xf, val);
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun /* MSI addr */
279*4882a593Smuzhiyun nlm_write_reg(lnkbase, PCIE_BRIDGE_MSI_ADDRH, msiaddr >> 32);
280*4882a593Smuzhiyun nlm_write_reg(lnkbase, PCIE_BRIDGE_MSI_ADDRL, msiaddr & 0xffffffff);
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun /* MSI cap for bridge */
283*4882a593Smuzhiyun val = nlm_read_reg(lnkbase, PCIE_BRIDGE_MSI_CAP);
284*4882a593Smuzhiyun if ((val & (1 << 16)) == 0) {
285*4882a593Smuzhiyun val |= 0xb << 16; /* mmc32, msi enable */
286*4882a593Smuzhiyun nlm_write_reg(lnkbase, PCIE_BRIDGE_MSI_CAP, val);
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun /*
291*4882a593Smuzhiyun * Allocate a MSI vector on a link
292*4882a593Smuzhiyun */
xlp_setup_msi(uint64_t lnkbase,int node,int link,struct msi_desc * desc)293*4882a593Smuzhiyun static int xlp_setup_msi(uint64_t lnkbase, int node, int link,
294*4882a593Smuzhiyun struct msi_desc *desc)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun struct xlp_msi_data *md;
297*4882a593Smuzhiyun struct msi_msg msg;
298*4882a593Smuzhiyun unsigned long flags;
299*4882a593Smuzhiyun int msivec, irt, lirq, xirq, ret;
300*4882a593Smuzhiyun uint64_t msiaddr;
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun /* Get MSI data for the link */
303*4882a593Smuzhiyun lirq = PIC_PCIE_LINK_MSI_IRQ(link);
304*4882a593Smuzhiyun xirq = nlm_irq_to_xirq(node, nlm_link_msiirq(link, 0));
305*4882a593Smuzhiyun md = irq_get_chip_data(xirq);
306*4882a593Smuzhiyun msiaddr = MSI_LINK_ADDR(node, link);
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun spin_lock_irqsave(&md->msi_lock, flags);
309*4882a593Smuzhiyun if (md->msi_alloc_mask == 0) {
310*4882a593Smuzhiyun xlp_config_link_msi(lnkbase, lirq, msiaddr);
311*4882a593Smuzhiyun /* switch the link IRQ to MSI range */
312*4882a593Smuzhiyun if (cpu_is_xlp9xx())
313*4882a593Smuzhiyun irt = PIC_9XX_IRT_PCIE_LINK_INDEX(link);
314*4882a593Smuzhiyun else
315*4882a593Smuzhiyun irt = PIC_IRT_PCIE_LINK_INDEX(link);
316*4882a593Smuzhiyun nlm_setup_pic_irq(node, lirq, lirq, irt);
317*4882a593Smuzhiyun nlm_pic_init_irt(nlm_get_node(node)->picbase, irt, lirq,
318*4882a593Smuzhiyun node * nlm_threads_per_node(), 1 /*en */);
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun /* allocate a MSI vec, and tell the bridge about it */
322*4882a593Smuzhiyun msivec = fls(md->msi_alloc_mask);
323*4882a593Smuzhiyun if (msivec == XLP_MSIVEC_PER_LINK) {
324*4882a593Smuzhiyun spin_unlock_irqrestore(&md->msi_lock, flags);
325*4882a593Smuzhiyun return -ENOMEM;
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun md->msi_alloc_mask |= (1u << msivec);
328*4882a593Smuzhiyun spin_unlock_irqrestore(&md->msi_lock, flags);
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun msg.address_hi = msiaddr >> 32;
331*4882a593Smuzhiyun msg.address_lo = msiaddr & 0xffffffff;
332*4882a593Smuzhiyun msg.data = 0xc00 | msivec;
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun xirq = xirq + msivec; /* msi mapped to global irq space */
335*4882a593Smuzhiyun ret = irq_set_msi_desc(xirq, desc);
336*4882a593Smuzhiyun if (ret < 0)
337*4882a593Smuzhiyun return ret;
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun pci_write_msi_msg(xirq, &msg);
340*4882a593Smuzhiyun return 0;
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun /*
344*4882a593Smuzhiyun * Switch a link to MSI-X mode
345*4882a593Smuzhiyun */
xlp_config_link_msix(uint64_t lnkbase,int lirq,uint64_t msixaddr)346*4882a593Smuzhiyun static void xlp_config_link_msix(uint64_t lnkbase, int lirq, uint64_t msixaddr)
347*4882a593Smuzhiyun {
348*4882a593Smuzhiyun u32 val;
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun val = nlm_read_reg(lnkbase, 0x2C);
351*4882a593Smuzhiyun if ((val & 0x80000000U) == 0) {
352*4882a593Smuzhiyun val |= 0x80000000U;
353*4882a593Smuzhiyun nlm_write_reg(lnkbase, 0x2C, val);
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun if (cpu_is_xlp9xx()) {
357*4882a593Smuzhiyun val = nlm_read_reg(lnkbase, PCIE_9XX_INT_EN0);
358*4882a593Smuzhiyun if ((val & 0x200) == 0) {
359*4882a593Smuzhiyun val |= 0x200; /* MSI Interrupt enable */
360*4882a593Smuzhiyun nlm_write_reg(lnkbase, PCIE_9XX_INT_EN0, val);
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun } else {
363*4882a593Smuzhiyun val = nlm_read_reg(lnkbase, PCIE_INT_EN0);
364*4882a593Smuzhiyun if ((val & 0x200) == 0) {
365*4882a593Smuzhiyun val |= 0x200; /* MSI Interrupt enable */
366*4882a593Smuzhiyun nlm_write_reg(lnkbase, PCIE_INT_EN0, val);
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun val = nlm_read_reg(lnkbase, 0x1); /* CMD */
371*4882a593Smuzhiyun if ((val & 0x0400) == 0) {
372*4882a593Smuzhiyun val |= 0x0400;
373*4882a593Smuzhiyun nlm_write_reg(lnkbase, 0x1, val);
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun /* Update IRQ in the PCI irq reg */
377*4882a593Smuzhiyun val = nlm_read_pci_reg(lnkbase, 0xf);
378*4882a593Smuzhiyun val &= ~0x1fu;
379*4882a593Smuzhiyun val |= (1 << 8) | lirq;
380*4882a593Smuzhiyun nlm_write_pci_reg(lnkbase, 0xf, val);
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun if (cpu_is_xlp9xx()) {
383*4882a593Smuzhiyun /* MSI-X addresses */
384*4882a593Smuzhiyun nlm_write_reg(lnkbase, PCIE_9XX_BRIDGE_MSIX_ADDR_BASE,
385*4882a593Smuzhiyun msixaddr >> 8);
386*4882a593Smuzhiyun nlm_write_reg(lnkbase, PCIE_9XX_BRIDGE_MSIX_ADDR_LIMIT,
387*4882a593Smuzhiyun (msixaddr + MSI_ADDR_SZ) >> 8);
388*4882a593Smuzhiyun } else {
389*4882a593Smuzhiyun /* MSI-X addresses */
390*4882a593Smuzhiyun nlm_write_reg(lnkbase, PCIE_BRIDGE_MSIX_ADDR_BASE,
391*4882a593Smuzhiyun msixaddr >> 8);
392*4882a593Smuzhiyun nlm_write_reg(lnkbase, PCIE_BRIDGE_MSIX_ADDR_LIMIT,
393*4882a593Smuzhiyun (msixaddr + MSI_ADDR_SZ) >> 8);
394*4882a593Smuzhiyun }
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun /*
398*4882a593Smuzhiyun * Allocate a MSI-X vector
399*4882a593Smuzhiyun */
xlp_setup_msix(uint64_t lnkbase,int node,int link,struct msi_desc * desc)400*4882a593Smuzhiyun static int xlp_setup_msix(uint64_t lnkbase, int node, int link,
401*4882a593Smuzhiyun struct msi_desc *desc)
402*4882a593Smuzhiyun {
403*4882a593Smuzhiyun struct xlp_msi_data *md;
404*4882a593Smuzhiyun struct msi_msg msg;
405*4882a593Smuzhiyun unsigned long flags;
406*4882a593Smuzhiyun int t, msixvec, lirq, xirq, ret;
407*4882a593Smuzhiyun uint64_t msixaddr;
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun /* Get MSI data for the link */
410*4882a593Smuzhiyun lirq = PIC_PCIE_MSIX_IRQ(link);
411*4882a593Smuzhiyun xirq = nlm_irq_to_xirq(node, nlm_link_msixirq(link, 0));
412*4882a593Smuzhiyun md = irq_get_chip_data(xirq);
413*4882a593Smuzhiyun msixaddr = MSIX_LINK_ADDR(node, link);
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun spin_lock_irqsave(&md->msi_lock, flags);
416*4882a593Smuzhiyun /* switch the PCIe link to MSI-X mode at the first alloc */
417*4882a593Smuzhiyun if (md->msix_alloc_mask == 0)
418*4882a593Smuzhiyun xlp_config_link_msix(lnkbase, lirq, msixaddr);
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun /* allocate a MSI-X vec, and tell the bridge about it */
421*4882a593Smuzhiyun t = fls(md->msix_alloc_mask);
422*4882a593Smuzhiyun if (t == XLP_MSIXVEC_PER_LINK) {
423*4882a593Smuzhiyun spin_unlock_irqrestore(&md->msi_lock, flags);
424*4882a593Smuzhiyun return -ENOMEM;
425*4882a593Smuzhiyun }
426*4882a593Smuzhiyun md->msix_alloc_mask |= (1u << t);
427*4882a593Smuzhiyun spin_unlock_irqrestore(&md->msi_lock, flags);
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun xirq += t;
430*4882a593Smuzhiyun msixvec = nlm_irq_msixvec(xirq);
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun msg.address_hi = msixaddr >> 32;
433*4882a593Smuzhiyun msg.address_lo = msixaddr & 0xffffffff;
434*4882a593Smuzhiyun msg.data = 0xc00 | msixvec;
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun ret = irq_set_msi_desc(xirq, desc);
437*4882a593Smuzhiyun if (ret < 0)
438*4882a593Smuzhiyun return ret;
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun pci_write_msi_msg(xirq, &msg);
441*4882a593Smuzhiyun return 0;
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun
arch_setup_msi_irq(struct pci_dev * dev,struct msi_desc * desc)444*4882a593Smuzhiyun int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
445*4882a593Smuzhiyun {
446*4882a593Smuzhiyun struct pci_dev *lnkdev;
447*4882a593Smuzhiyun uint64_t lnkbase;
448*4882a593Smuzhiyun int node, link, slot;
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun lnkdev = xlp_get_pcie_link(dev);
451*4882a593Smuzhiyun if (lnkdev == NULL) {
452*4882a593Smuzhiyun dev_err(&dev->dev, "Could not find bridge\n");
453*4882a593Smuzhiyun return 1;
454*4882a593Smuzhiyun }
455*4882a593Smuzhiyun slot = PCI_SLOT(lnkdev->devfn);
456*4882a593Smuzhiyun link = PCI_FUNC(lnkdev->devfn);
457*4882a593Smuzhiyun node = slot / 8;
458*4882a593Smuzhiyun lnkbase = nlm_get_pcie_base(node, link);
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun if (desc->msi_attrib.is_msix)
461*4882a593Smuzhiyun return xlp_setup_msix(lnkbase, node, link, desc);
462*4882a593Smuzhiyun else
463*4882a593Smuzhiyun return xlp_setup_msi(lnkbase, node, link, desc);
464*4882a593Smuzhiyun }
465*4882a593Smuzhiyun
xlp_init_node_msi_irqs(int node,int link)466*4882a593Smuzhiyun void __init xlp_init_node_msi_irqs(int node, int link)
467*4882a593Smuzhiyun {
468*4882a593Smuzhiyun struct nlm_soc_info *nodep;
469*4882a593Smuzhiyun struct xlp_msi_data *md;
470*4882a593Smuzhiyun int irq, i, irt, msixvec, val;
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun pr_info("[%d %d] Init node PCI IRT\n", node, link);
473*4882a593Smuzhiyun nodep = nlm_get_node(node);
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun /* Alloc an MSI block for the link */
476*4882a593Smuzhiyun md = kzalloc(sizeof(*md), GFP_KERNEL);
477*4882a593Smuzhiyun spin_lock_init(&md->msi_lock);
478*4882a593Smuzhiyun md->msi_enabled_mask = 0;
479*4882a593Smuzhiyun md->msi_alloc_mask = 0;
480*4882a593Smuzhiyun md->msix_alloc_mask = 0;
481*4882a593Smuzhiyun md->node = nodep;
482*4882a593Smuzhiyun md->lnkbase = nlm_get_pcie_base(node, link);
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun /* extended space for MSI interrupts */
485*4882a593Smuzhiyun irq = nlm_irq_to_xirq(node, nlm_link_msiirq(link, 0));
486*4882a593Smuzhiyun for (i = irq; i < irq + XLP_MSIVEC_PER_LINK; i++) {
487*4882a593Smuzhiyun irq_set_chip_and_handler(i, &xlp_msi_chip, handle_level_irq);
488*4882a593Smuzhiyun irq_set_chip_data(i, md);
489*4882a593Smuzhiyun }
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun for (i = 0; i < XLP_MSIXVEC_PER_LINK ; i++) {
492*4882a593Smuzhiyun if (cpu_is_xlp9xx()) {
493*4882a593Smuzhiyun val = ((node * nlm_threads_per_node()) << 7 |
494*4882a593Smuzhiyun PIC_PCIE_MSIX_IRQ(link) << 1 | 0 << 0);
495*4882a593Smuzhiyun nlm_write_pcie_reg(md->lnkbase, PCIE_9XX_MSIX_VECX(i +
496*4882a593Smuzhiyun (link * XLP_MSIXVEC_PER_LINK)), val);
497*4882a593Smuzhiyun } else {
498*4882a593Smuzhiyun /* Initialize MSI-X irts to generate one interrupt
499*4882a593Smuzhiyun * per link
500*4882a593Smuzhiyun */
501*4882a593Smuzhiyun msixvec = link * XLP_MSIXVEC_PER_LINK + i;
502*4882a593Smuzhiyun irt = PIC_IRT_PCIE_MSIX_INDEX(msixvec);
503*4882a593Smuzhiyun nlm_pic_init_irt(nodep->picbase, irt,
504*4882a593Smuzhiyun PIC_PCIE_MSIX_IRQ(link),
505*4882a593Smuzhiyun node * nlm_threads_per_node(), 1);
506*4882a593Smuzhiyun }
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun /* Initialize MSI-X extended irq space for the link */
509*4882a593Smuzhiyun irq = nlm_irq_to_xirq(node, nlm_link_msixirq(link, i));
510*4882a593Smuzhiyun irq_set_chip_and_handler(irq, &xlp_msix_chip, handle_level_irq);
511*4882a593Smuzhiyun irq_set_chip_data(irq, md);
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun }
514*4882a593Smuzhiyun
nlm_dispatch_msi(int node,int lirq)515*4882a593Smuzhiyun void nlm_dispatch_msi(int node, int lirq)
516*4882a593Smuzhiyun {
517*4882a593Smuzhiyun struct xlp_msi_data *md;
518*4882a593Smuzhiyun int link, i, irqbase;
519*4882a593Smuzhiyun u32 status;
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun link = lirq - PIC_PCIE_LINK_MSI_IRQ_BASE;
522*4882a593Smuzhiyun irqbase = nlm_irq_to_xirq(node, nlm_link_msiirq(link, 0));
523*4882a593Smuzhiyun md = irq_get_chip_data(irqbase);
524*4882a593Smuzhiyun if (cpu_is_xlp9xx())
525*4882a593Smuzhiyun status = nlm_read_reg(md->lnkbase, PCIE_9XX_MSI_STATUS) &
526*4882a593Smuzhiyun md->msi_enabled_mask;
527*4882a593Smuzhiyun else
528*4882a593Smuzhiyun status = nlm_read_reg(md->lnkbase, PCIE_MSI_STATUS) &
529*4882a593Smuzhiyun md->msi_enabled_mask;
530*4882a593Smuzhiyun while (status) {
531*4882a593Smuzhiyun i = __ffs(status);
532*4882a593Smuzhiyun do_IRQ(irqbase + i);
533*4882a593Smuzhiyun status &= status - 1;
534*4882a593Smuzhiyun }
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun /* Ack at eirr and PIC */
537*4882a593Smuzhiyun ack_c0_eirr(PIC_PCIE_LINK_MSI_IRQ(link));
538*4882a593Smuzhiyun if (cpu_is_xlp9xx())
539*4882a593Smuzhiyun nlm_pic_ack(md->node->picbase,
540*4882a593Smuzhiyun PIC_9XX_IRT_PCIE_LINK_INDEX(link));
541*4882a593Smuzhiyun else
542*4882a593Smuzhiyun nlm_pic_ack(md->node->picbase, PIC_IRT_PCIE_LINK_INDEX(link));
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun
nlm_dispatch_msix(int node,int lirq)545*4882a593Smuzhiyun void nlm_dispatch_msix(int node, int lirq)
546*4882a593Smuzhiyun {
547*4882a593Smuzhiyun struct xlp_msi_data *md;
548*4882a593Smuzhiyun int link, i, irqbase;
549*4882a593Smuzhiyun u32 status;
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun link = lirq - PIC_PCIE_MSIX_IRQ_BASE;
552*4882a593Smuzhiyun irqbase = nlm_irq_to_xirq(node, nlm_link_msixirq(link, 0));
553*4882a593Smuzhiyun md = irq_get_chip_data(irqbase);
554*4882a593Smuzhiyun if (cpu_is_xlp9xx())
555*4882a593Smuzhiyun status = nlm_read_reg(md->lnkbase, PCIE_9XX_MSIX_STATUSX(link));
556*4882a593Smuzhiyun else
557*4882a593Smuzhiyun status = nlm_read_reg(md->lnkbase, PCIE_MSIX_STATUS);
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun /* narrow it down to the MSI-x vectors for our link */
560*4882a593Smuzhiyun if (!cpu_is_xlp9xx())
561*4882a593Smuzhiyun status = (status >> (link * XLP_MSIXVEC_PER_LINK)) &
562*4882a593Smuzhiyun ((1 << XLP_MSIXVEC_PER_LINK) - 1);
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun while (status) {
565*4882a593Smuzhiyun i = __ffs(status);
566*4882a593Smuzhiyun do_IRQ(irqbase + i);
567*4882a593Smuzhiyun status &= status - 1;
568*4882a593Smuzhiyun }
569*4882a593Smuzhiyun /* Ack at eirr and PIC */
570*4882a593Smuzhiyun ack_c0_eirr(PIC_PCIE_MSIX_IRQ(link));
571*4882a593Smuzhiyun }
572