1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun #include <linux/types.h>
3*4882a593Smuzhiyun #include <linux/kernel.h>
4*4882a593Smuzhiyun #include <linux/irq.h>
5*4882a593Smuzhiyun #include <linux/smp.h>
6*4882a593Smuzhiyun #include <linux/interrupt.h>
7*4882a593Smuzhiyun #include <linux/init.h>
8*4882a593Smuzhiyun #include <linux/cpu.h>
9*4882a593Smuzhiyun #include <linux/of.h>
10*4882a593Smuzhiyun #include <linux/spinlock.h>
11*4882a593Smuzhiyun #include <linux/msi.h>
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include <asm/prom.h>
14*4882a593Smuzhiyun #include <asm/smp.h>
15*4882a593Smuzhiyun #include <asm/machdep.h>
16*4882a593Smuzhiyun #include <asm/irq.h>
17*4882a593Smuzhiyun #include <asm/errno.h>
18*4882a593Smuzhiyun #include <asm/xics.h>
19*4882a593Smuzhiyun #include <asm/rtas.h>
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun /* RTAS service tokens */
22*4882a593Smuzhiyun static int ibm_get_xive;
23*4882a593Smuzhiyun static int ibm_set_xive;
24*4882a593Smuzhiyun static int ibm_int_on;
25*4882a593Smuzhiyun static int ibm_int_off;
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun static int ics_rtas_map(struct ics *ics, unsigned int virq);
28*4882a593Smuzhiyun static void ics_rtas_mask_unknown(struct ics *ics, unsigned long vec);
29*4882a593Smuzhiyun static long ics_rtas_get_server(struct ics *ics, unsigned long vec);
30*4882a593Smuzhiyun static int ics_rtas_host_match(struct ics *ics, struct device_node *node);
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun /* Only one global & state struct ics */
33*4882a593Smuzhiyun static struct ics ics_rtas = {
34*4882a593Smuzhiyun .map = ics_rtas_map,
35*4882a593Smuzhiyun .mask_unknown = ics_rtas_mask_unknown,
36*4882a593Smuzhiyun .get_server = ics_rtas_get_server,
37*4882a593Smuzhiyun .host_match = ics_rtas_host_match,
38*4882a593Smuzhiyun };
39*4882a593Smuzhiyun
ics_rtas_unmask_irq(struct irq_data * d)40*4882a593Smuzhiyun static void ics_rtas_unmask_irq(struct irq_data *d)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
43*4882a593Smuzhiyun int call_status;
44*4882a593Smuzhiyun int server;
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun pr_devel("xics: unmask virq %d [hw 0x%x]\n", d->irq, hw_irq);
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
49*4882a593Smuzhiyun return;
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun server = xics_get_irq_server(d->irq, irq_data_get_affinity_mask(d), 0);
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun call_status = rtas_call_reentrant(ibm_set_xive, 3, 1, NULL, hw_irq,
54*4882a593Smuzhiyun server, DEFAULT_PRIORITY);
55*4882a593Smuzhiyun if (call_status != 0) {
56*4882a593Smuzhiyun printk(KERN_ERR
57*4882a593Smuzhiyun "%s: ibm_set_xive irq %u server %x returned %d\n",
58*4882a593Smuzhiyun __func__, hw_irq, server, call_status);
59*4882a593Smuzhiyun return;
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun /* Now unmask the interrupt (often a no-op) */
63*4882a593Smuzhiyun call_status = rtas_call_reentrant(ibm_int_on, 1, 1, NULL, hw_irq);
64*4882a593Smuzhiyun if (call_status != 0) {
65*4882a593Smuzhiyun printk(KERN_ERR "%s: ibm_int_on irq=%u returned %d\n",
66*4882a593Smuzhiyun __func__, hw_irq, call_status);
67*4882a593Smuzhiyun return;
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun
ics_rtas_startup(struct irq_data * d)71*4882a593Smuzhiyun static unsigned int ics_rtas_startup(struct irq_data *d)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun #ifdef CONFIG_PCI_MSI
74*4882a593Smuzhiyun /*
75*4882a593Smuzhiyun * The generic MSI code returns with the interrupt disabled on the
76*4882a593Smuzhiyun * card, using the MSI mask bits. Firmware doesn't appear to unmask
77*4882a593Smuzhiyun * at that level, so we do it here by hand.
78*4882a593Smuzhiyun */
79*4882a593Smuzhiyun if (irq_data_get_msi_desc(d))
80*4882a593Smuzhiyun pci_msi_unmask_irq(d);
81*4882a593Smuzhiyun #endif
82*4882a593Smuzhiyun /* unmask it */
83*4882a593Smuzhiyun ics_rtas_unmask_irq(d);
84*4882a593Smuzhiyun return 0;
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun
ics_rtas_mask_real_irq(unsigned int hw_irq)87*4882a593Smuzhiyun static void ics_rtas_mask_real_irq(unsigned int hw_irq)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun int call_status;
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun if (hw_irq == XICS_IPI)
92*4882a593Smuzhiyun return;
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun call_status = rtas_call_reentrant(ibm_int_off, 1, 1, NULL, hw_irq);
95*4882a593Smuzhiyun if (call_status != 0) {
96*4882a593Smuzhiyun printk(KERN_ERR "%s: ibm_int_off irq=%u returned %d\n",
97*4882a593Smuzhiyun __func__, hw_irq, call_status);
98*4882a593Smuzhiyun return;
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun /* Have to set XIVE to 0xff to be able to remove a slot */
102*4882a593Smuzhiyun call_status = rtas_call_reentrant(ibm_set_xive, 3, 1, NULL, hw_irq,
103*4882a593Smuzhiyun xics_default_server, 0xff);
104*4882a593Smuzhiyun if (call_status != 0) {
105*4882a593Smuzhiyun printk(KERN_ERR "%s: ibm_set_xive(0xff) irq=%u returned %d\n",
106*4882a593Smuzhiyun __func__, hw_irq, call_status);
107*4882a593Smuzhiyun return;
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun
ics_rtas_mask_irq(struct irq_data * d)111*4882a593Smuzhiyun static void ics_rtas_mask_irq(struct irq_data *d)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun pr_devel("xics: mask virq %d [hw 0x%x]\n", d->irq, hw_irq);
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
118*4882a593Smuzhiyun return;
119*4882a593Smuzhiyun ics_rtas_mask_real_irq(hw_irq);
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun
ics_rtas_set_affinity(struct irq_data * d,const struct cpumask * cpumask,bool force)122*4882a593Smuzhiyun static int ics_rtas_set_affinity(struct irq_data *d,
123*4882a593Smuzhiyun const struct cpumask *cpumask,
124*4882a593Smuzhiyun bool force)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
127*4882a593Smuzhiyun int status;
128*4882a593Smuzhiyun int xics_status[2];
129*4882a593Smuzhiyun int irq_server;
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
132*4882a593Smuzhiyun return -1;
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun status = rtas_call_reentrant(ibm_get_xive, 1, 3, xics_status, hw_irq);
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun if (status) {
137*4882a593Smuzhiyun printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n",
138*4882a593Smuzhiyun __func__, hw_irq, status);
139*4882a593Smuzhiyun return -1;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun irq_server = xics_get_irq_server(d->irq, cpumask, 1);
143*4882a593Smuzhiyun if (irq_server == -1) {
144*4882a593Smuzhiyun pr_warn("%s: No online cpus in the mask %*pb for irq %d\n",
145*4882a593Smuzhiyun __func__, cpumask_pr_args(cpumask), d->irq);
146*4882a593Smuzhiyun return -1;
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun status = rtas_call_reentrant(ibm_set_xive, 3, 1, NULL,
150*4882a593Smuzhiyun hw_irq, irq_server, xics_status[1]);
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun if (status) {
153*4882a593Smuzhiyun printk(KERN_ERR "%s: ibm,set-xive irq=%u returns %d\n",
154*4882a593Smuzhiyun __func__, hw_irq, status);
155*4882a593Smuzhiyun return -1;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun return IRQ_SET_MASK_OK;
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun static struct irq_chip ics_rtas_irq_chip = {
162*4882a593Smuzhiyun .name = "XICS",
163*4882a593Smuzhiyun .irq_startup = ics_rtas_startup,
164*4882a593Smuzhiyun .irq_mask = ics_rtas_mask_irq,
165*4882a593Smuzhiyun .irq_unmask = ics_rtas_unmask_irq,
166*4882a593Smuzhiyun .irq_eoi = NULL, /* Patched at init time */
167*4882a593Smuzhiyun .irq_set_affinity = ics_rtas_set_affinity,
168*4882a593Smuzhiyun .irq_set_type = xics_set_irq_type,
169*4882a593Smuzhiyun .irq_retrigger = xics_retrigger,
170*4882a593Smuzhiyun };
171*4882a593Smuzhiyun
ics_rtas_map(struct ics * ics,unsigned int virq)172*4882a593Smuzhiyun static int ics_rtas_map(struct ics *ics, unsigned int virq)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun unsigned int hw_irq = (unsigned int)virq_to_hw(virq);
175*4882a593Smuzhiyun int status[2];
176*4882a593Smuzhiyun int rc;
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun if (WARN_ON(hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS))
179*4882a593Smuzhiyun return -EINVAL;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun /* Check if RTAS knows about this interrupt */
182*4882a593Smuzhiyun rc = rtas_call_reentrant(ibm_get_xive, 1, 3, status, hw_irq);
183*4882a593Smuzhiyun if (rc)
184*4882a593Smuzhiyun return -ENXIO;
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun irq_set_chip_and_handler(virq, &ics_rtas_irq_chip, handle_fasteoi_irq);
187*4882a593Smuzhiyun irq_set_chip_data(virq, &ics_rtas);
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun return 0;
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun
ics_rtas_mask_unknown(struct ics * ics,unsigned long vec)192*4882a593Smuzhiyun static void ics_rtas_mask_unknown(struct ics *ics, unsigned long vec)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun ics_rtas_mask_real_irq(vec);
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun
ics_rtas_get_server(struct ics * ics,unsigned long vec)197*4882a593Smuzhiyun static long ics_rtas_get_server(struct ics *ics, unsigned long vec)
198*4882a593Smuzhiyun {
199*4882a593Smuzhiyun int rc, status[2];
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun rc = rtas_call_reentrant(ibm_get_xive, 1, 3, status, vec);
202*4882a593Smuzhiyun if (rc)
203*4882a593Smuzhiyun return -1;
204*4882a593Smuzhiyun return status[0];
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun
ics_rtas_host_match(struct ics * ics,struct device_node * node)207*4882a593Smuzhiyun static int ics_rtas_host_match(struct ics *ics, struct device_node *node)
208*4882a593Smuzhiyun {
209*4882a593Smuzhiyun /* IBM machines have interrupt parents of various funky types for things
210*4882a593Smuzhiyun * like vdevices, events, etc... The trick we use here is to match
211*4882a593Smuzhiyun * everything here except the legacy 8259 which is compatible "chrp,iic"
212*4882a593Smuzhiyun */
213*4882a593Smuzhiyun return !of_device_is_compatible(node, "chrp,iic");
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun
ics_rtas_init(void)216*4882a593Smuzhiyun __init int ics_rtas_init(void)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun ibm_get_xive = rtas_token("ibm,get-xive");
219*4882a593Smuzhiyun ibm_set_xive = rtas_token("ibm,set-xive");
220*4882a593Smuzhiyun ibm_int_on = rtas_token("ibm,int-on");
221*4882a593Smuzhiyun ibm_int_off = rtas_token("ibm,int-off");
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun /* We enable the RTAS "ICS" if RTAS is present with the
224*4882a593Smuzhiyun * appropriate tokens
225*4882a593Smuzhiyun */
226*4882a593Smuzhiyun if (ibm_get_xive == RTAS_UNKNOWN_SERVICE ||
227*4882a593Smuzhiyun ibm_set_xive == RTAS_UNKNOWN_SERVICE)
228*4882a593Smuzhiyun return -ENODEV;
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun /* We need to patch our irq chip's EOI to point to the
231*4882a593Smuzhiyun * right ICP
232*4882a593Smuzhiyun */
233*4882a593Smuzhiyun ics_rtas_irq_chip.irq_eoi = icp_ops->eoi;
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun /* Register ourselves */
236*4882a593Smuzhiyun xics_register_ics(&ics_rtas);
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun return 0;
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun
241