1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright 2016,2017 IBM Corporation.
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #define pr_fmt(fmt) "xive: " fmt
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/types.h>
9*4882a593Smuzhiyun #include <linux/irq.h>
10*4882a593Smuzhiyun #include <linux/smp.h>
11*4882a593Smuzhiyun #include <linux/interrupt.h>
12*4882a593Smuzhiyun #include <linux/init.h>
13*4882a593Smuzhiyun #include <linux/of.h>
14*4882a593Smuzhiyun #include <linux/slab.h>
15*4882a593Smuzhiyun #include <linux/spinlock.h>
16*4882a593Smuzhiyun #include <linux/cpumask.h>
17*4882a593Smuzhiyun #include <linux/mm.h>
18*4882a593Smuzhiyun #include <linux/delay.h>
19*4882a593Smuzhiyun #include <linux/libfdt.h>
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #include <asm/machdep.h>
22*4882a593Smuzhiyun #include <asm/prom.h>
23*4882a593Smuzhiyun #include <asm/io.h>
24*4882a593Smuzhiyun #include <asm/smp.h>
25*4882a593Smuzhiyun #include <asm/irq.h>
26*4882a593Smuzhiyun #include <asm/errno.h>
27*4882a593Smuzhiyun #include <asm/xive.h>
28*4882a593Smuzhiyun #include <asm/xive-regs.h>
29*4882a593Smuzhiyun #include <asm/hvcall.h>
30*4882a593Smuzhiyun #include <asm/svm.h>
31*4882a593Smuzhiyun #include <asm/ultravisor.h>
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun #include "xive-internal.h"
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun static u32 xive_queue_shift;
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun struct xive_irq_bitmap {
38*4882a593Smuzhiyun unsigned long *bitmap;
39*4882a593Smuzhiyun unsigned int base;
40*4882a593Smuzhiyun unsigned int count;
41*4882a593Smuzhiyun spinlock_t lock;
42*4882a593Smuzhiyun struct list_head list;
43*4882a593Smuzhiyun };
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun static LIST_HEAD(xive_irq_bitmaps);
46*4882a593Smuzhiyun
xive_irq_bitmap_add(int base,int count)47*4882a593Smuzhiyun static int xive_irq_bitmap_add(int base, int count)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun struct xive_irq_bitmap *xibm;
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun xibm = kzalloc(sizeof(*xibm), GFP_KERNEL);
52*4882a593Smuzhiyun if (!xibm)
53*4882a593Smuzhiyun return -ENOMEM;
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun spin_lock_init(&xibm->lock);
56*4882a593Smuzhiyun xibm->base = base;
57*4882a593Smuzhiyun xibm->count = count;
58*4882a593Smuzhiyun xibm->bitmap = kzalloc(xibm->count, GFP_KERNEL);
59*4882a593Smuzhiyun if (!xibm->bitmap) {
60*4882a593Smuzhiyun kfree(xibm);
61*4882a593Smuzhiyun return -ENOMEM;
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun list_add(&xibm->list, &xive_irq_bitmaps);
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun pr_info("Using IRQ range [%x-%x]", xibm->base,
66*4882a593Smuzhiyun xibm->base + xibm->count - 1);
67*4882a593Smuzhiyun return 0;
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun
__xive_irq_bitmap_alloc(struct xive_irq_bitmap * xibm)70*4882a593Smuzhiyun static int __xive_irq_bitmap_alloc(struct xive_irq_bitmap *xibm)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun int irq;
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun irq = find_first_zero_bit(xibm->bitmap, xibm->count);
75*4882a593Smuzhiyun if (irq != xibm->count) {
76*4882a593Smuzhiyun set_bit(irq, xibm->bitmap);
77*4882a593Smuzhiyun irq += xibm->base;
78*4882a593Smuzhiyun } else {
79*4882a593Smuzhiyun irq = -ENOMEM;
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun return irq;
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun
xive_irq_bitmap_alloc(void)85*4882a593Smuzhiyun static int xive_irq_bitmap_alloc(void)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun struct xive_irq_bitmap *xibm;
88*4882a593Smuzhiyun unsigned long flags;
89*4882a593Smuzhiyun int irq = -ENOENT;
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun list_for_each_entry(xibm, &xive_irq_bitmaps, list) {
92*4882a593Smuzhiyun spin_lock_irqsave(&xibm->lock, flags);
93*4882a593Smuzhiyun irq = __xive_irq_bitmap_alloc(xibm);
94*4882a593Smuzhiyun spin_unlock_irqrestore(&xibm->lock, flags);
95*4882a593Smuzhiyun if (irq >= 0)
96*4882a593Smuzhiyun break;
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun return irq;
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun
xive_irq_bitmap_free(int irq)101*4882a593Smuzhiyun static void xive_irq_bitmap_free(int irq)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun unsigned long flags;
104*4882a593Smuzhiyun struct xive_irq_bitmap *xibm;
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun list_for_each_entry(xibm, &xive_irq_bitmaps, list) {
107*4882a593Smuzhiyun if ((irq >= xibm->base) && (irq < xibm->base + xibm->count)) {
108*4882a593Smuzhiyun spin_lock_irqsave(&xibm->lock, flags);
109*4882a593Smuzhiyun clear_bit(irq - xibm->base, xibm->bitmap);
110*4882a593Smuzhiyun spin_unlock_irqrestore(&xibm->lock, flags);
111*4882a593Smuzhiyun break;
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun /* Based on the similar routines in RTAS */
plpar_busy_delay_time(long rc)118*4882a593Smuzhiyun static unsigned int plpar_busy_delay_time(long rc)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun unsigned int ms = 0;
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun if (H_IS_LONG_BUSY(rc)) {
123*4882a593Smuzhiyun ms = get_longbusy_msecs(rc);
124*4882a593Smuzhiyun } else if (rc == H_BUSY) {
125*4882a593Smuzhiyun ms = 10; /* seems appropriate for XIVE hcalls */
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun return ms;
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun
plpar_busy_delay(int rc)131*4882a593Smuzhiyun static unsigned int plpar_busy_delay(int rc)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun unsigned int ms;
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun ms = plpar_busy_delay_time(rc);
136*4882a593Smuzhiyun if (ms)
137*4882a593Smuzhiyun mdelay(ms);
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun return ms;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun /*
143*4882a593Smuzhiyun * Note: this call has a partition wide scope and can take a while to
144*4882a593Smuzhiyun * complete. If it returns H_LONG_BUSY_* it should be retried
145*4882a593Smuzhiyun * periodically.
146*4882a593Smuzhiyun */
plpar_int_reset(unsigned long flags)147*4882a593Smuzhiyun static long plpar_int_reset(unsigned long flags)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun long rc;
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun do {
152*4882a593Smuzhiyun rc = plpar_hcall_norets(H_INT_RESET, flags);
153*4882a593Smuzhiyun } while (plpar_busy_delay(rc));
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun if (rc)
156*4882a593Smuzhiyun pr_err("H_INT_RESET failed %ld\n", rc);
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun return rc;
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun
plpar_int_get_source_info(unsigned long flags,unsigned long lisn,unsigned long * src_flags,unsigned long * eoi_page,unsigned long * trig_page,unsigned long * esb_shift)161*4882a593Smuzhiyun static long plpar_int_get_source_info(unsigned long flags,
162*4882a593Smuzhiyun unsigned long lisn,
163*4882a593Smuzhiyun unsigned long *src_flags,
164*4882a593Smuzhiyun unsigned long *eoi_page,
165*4882a593Smuzhiyun unsigned long *trig_page,
166*4882a593Smuzhiyun unsigned long *esb_shift)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
169*4882a593Smuzhiyun long rc;
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun do {
172*4882a593Smuzhiyun rc = plpar_hcall(H_INT_GET_SOURCE_INFO, retbuf, flags, lisn);
173*4882a593Smuzhiyun } while (plpar_busy_delay(rc));
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun if (rc) {
176*4882a593Smuzhiyun pr_err("H_INT_GET_SOURCE_INFO lisn=%ld failed %ld\n", lisn, rc);
177*4882a593Smuzhiyun return rc;
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun *src_flags = retbuf[0];
181*4882a593Smuzhiyun *eoi_page = retbuf[1];
182*4882a593Smuzhiyun *trig_page = retbuf[2];
183*4882a593Smuzhiyun *esb_shift = retbuf[3];
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun pr_devel("H_INT_GET_SOURCE_INFO flags=%lx eoi=%lx trig=%lx shift=%lx\n",
186*4882a593Smuzhiyun retbuf[0], retbuf[1], retbuf[2], retbuf[3]);
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun return 0;
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun #define XIVE_SRC_SET_EISN (1ull << (63 - 62))
192*4882a593Smuzhiyun #define XIVE_SRC_MASK (1ull << (63 - 63)) /* unused */
193*4882a593Smuzhiyun
plpar_int_set_source_config(unsigned long flags,unsigned long lisn,unsigned long target,unsigned long prio,unsigned long sw_irq)194*4882a593Smuzhiyun static long plpar_int_set_source_config(unsigned long flags,
195*4882a593Smuzhiyun unsigned long lisn,
196*4882a593Smuzhiyun unsigned long target,
197*4882a593Smuzhiyun unsigned long prio,
198*4882a593Smuzhiyun unsigned long sw_irq)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun long rc;
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun pr_devel("H_INT_SET_SOURCE_CONFIG flags=%lx lisn=%lx target=%lx prio=%lx sw_irq=%lx\n",
204*4882a593Smuzhiyun flags, lisn, target, prio, sw_irq);
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun do {
208*4882a593Smuzhiyun rc = plpar_hcall_norets(H_INT_SET_SOURCE_CONFIG, flags, lisn,
209*4882a593Smuzhiyun target, prio, sw_irq);
210*4882a593Smuzhiyun } while (plpar_busy_delay(rc));
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun if (rc) {
213*4882a593Smuzhiyun pr_err("H_INT_SET_SOURCE_CONFIG lisn=%ld target=%lx prio=%lx failed %ld\n",
214*4882a593Smuzhiyun lisn, target, prio, rc);
215*4882a593Smuzhiyun return rc;
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun return 0;
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun
plpar_int_get_source_config(unsigned long flags,unsigned long lisn,unsigned long * target,unsigned long * prio,unsigned long * sw_irq)221*4882a593Smuzhiyun static long plpar_int_get_source_config(unsigned long flags,
222*4882a593Smuzhiyun unsigned long lisn,
223*4882a593Smuzhiyun unsigned long *target,
224*4882a593Smuzhiyun unsigned long *prio,
225*4882a593Smuzhiyun unsigned long *sw_irq)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
228*4882a593Smuzhiyun long rc;
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun pr_devel("H_INT_GET_SOURCE_CONFIG flags=%lx lisn=%lx\n", flags, lisn);
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun do {
233*4882a593Smuzhiyun rc = plpar_hcall(H_INT_GET_SOURCE_CONFIG, retbuf, flags, lisn,
234*4882a593Smuzhiyun target, prio, sw_irq);
235*4882a593Smuzhiyun } while (plpar_busy_delay(rc));
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun if (rc) {
238*4882a593Smuzhiyun pr_err("H_INT_GET_SOURCE_CONFIG lisn=%ld failed %ld\n",
239*4882a593Smuzhiyun lisn, rc);
240*4882a593Smuzhiyun return rc;
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun *target = retbuf[0];
244*4882a593Smuzhiyun *prio = retbuf[1];
245*4882a593Smuzhiyun *sw_irq = retbuf[2];
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun pr_devel("H_INT_GET_SOURCE_CONFIG target=%lx prio=%lx sw_irq=%lx\n",
248*4882a593Smuzhiyun retbuf[0], retbuf[1], retbuf[2]);
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun return 0;
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun
plpar_int_get_queue_info(unsigned long flags,unsigned long target,unsigned long priority,unsigned long * esn_page,unsigned long * esn_size)253*4882a593Smuzhiyun static long plpar_int_get_queue_info(unsigned long flags,
254*4882a593Smuzhiyun unsigned long target,
255*4882a593Smuzhiyun unsigned long priority,
256*4882a593Smuzhiyun unsigned long *esn_page,
257*4882a593Smuzhiyun unsigned long *esn_size)
258*4882a593Smuzhiyun {
259*4882a593Smuzhiyun unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
260*4882a593Smuzhiyun long rc;
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun do {
263*4882a593Smuzhiyun rc = plpar_hcall(H_INT_GET_QUEUE_INFO, retbuf, flags, target,
264*4882a593Smuzhiyun priority);
265*4882a593Smuzhiyun } while (plpar_busy_delay(rc));
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun if (rc) {
268*4882a593Smuzhiyun pr_err("H_INT_GET_QUEUE_INFO cpu=%ld prio=%ld failed %ld\n",
269*4882a593Smuzhiyun target, priority, rc);
270*4882a593Smuzhiyun return rc;
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun *esn_page = retbuf[0];
274*4882a593Smuzhiyun *esn_size = retbuf[1];
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun pr_devel("H_INT_GET_QUEUE_INFO page=%lx size=%lx\n",
277*4882a593Smuzhiyun retbuf[0], retbuf[1]);
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun return 0;
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun #define XIVE_EQ_ALWAYS_NOTIFY (1ull << (63 - 63))
283*4882a593Smuzhiyun
plpar_int_set_queue_config(unsigned long flags,unsigned long target,unsigned long priority,unsigned long qpage,unsigned long qsize)284*4882a593Smuzhiyun static long plpar_int_set_queue_config(unsigned long flags,
285*4882a593Smuzhiyun unsigned long target,
286*4882a593Smuzhiyun unsigned long priority,
287*4882a593Smuzhiyun unsigned long qpage,
288*4882a593Smuzhiyun unsigned long qsize)
289*4882a593Smuzhiyun {
290*4882a593Smuzhiyun long rc;
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun pr_devel("H_INT_SET_QUEUE_CONFIG flags=%lx target=%lx priority=%lx qpage=%lx qsize=%lx\n",
293*4882a593Smuzhiyun flags, target, priority, qpage, qsize);
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun do {
296*4882a593Smuzhiyun rc = plpar_hcall_norets(H_INT_SET_QUEUE_CONFIG, flags, target,
297*4882a593Smuzhiyun priority, qpage, qsize);
298*4882a593Smuzhiyun } while (plpar_busy_delay(rc));
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun if (rc) {
301*4882a593Smuzhiyun pr_err("H_INT_SET_QUEUE_CONFIG cpu=%ld prio=%ld qpage=%lx returned %ld\n",
302*4882a593Smuzhiyun target, priority, qpage, rc);
303*4882a593Smuzhiyun return rc;
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun return 0;
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun
plpar_int_sync(unsigned long flags,unsigned long lisn)309*4882a593Smuzhiyun static long plpar_int_sync(unsigned long flags, unsigned long lisn)
310*4882a593Smuzhiyun {
311*4882a593Smuzhiyun long rc;
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun do {
314*4882a593Smuzhiyun rc = plpar_hcall_norets(H_INT_SYNC, flags, lisn);
315*4882a593Smuzhiyun } while (plpar_busy_delay(rc));
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun if (rc) {
318*4882a593Smuzhiyun pr_err("H_INT_SYNC lisn=%ld returned %ld\n", lisn, rc);
319*4882a593Smuzhiyun return rc;
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun return 0;
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun #define XIVE_ESB_FLAG_STORE (1ull << (63 - 63))
326*4882a593Smuzhiyun
plpar_int_esb(unsigned long flags,unsigned long lisn,unsigned long offset,unsigned long in_data,unsigned long * out_data)327*4882a593Smuzhiyun static long plpar_int_esb(unsigned long flags,
328*4882a593Smuzhiyun unsigned long lisn,
329*4882a593Smuzhiyun unsigned long offset,
330*4882a593Smuzhiyun unsigned long in_data,
331*4882a593Smuzhiyun unsigned long *out_data)
332*4882a593Smuzhiyun {
333*4882a593Smuzhiyun unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
334*4882a593Smuzhiyun long rc;
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun pr_devel("H_INT_ESB flags=%lx lisn=%lx offset=%lx in=%lx\n",
337*4882a593Smuzhiyun flags, lisn, offset, in_data);
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun do {
340*4882a593Smuzhiyun rc = plpar_hcall(H_INT_ESB, retbuf, flags, lisn, offset,
341*4882a593Smuzhiyun in_data);
342*4882a593Smuzhiyun } while (plpar_busy_delay(rc));
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun if (rc) {
345*4882a593Smuzhiyun pr_err("H_INT_ESB lisn=%ld offset=%ld returned %ld\n",
346*4882a593Smuzhiyun lisn, offset, rc);
347*4882a593Smuzhiyun return rc;
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun *out_data = retbuf[0];
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun return 0;
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun
xive_spapr_esb_rw(u32 lisn,u32 offset,u64 data,bool write)355*4882a593Smuzhiyun static u64 xive_spapr_esb_rw(u32 lisn, u32 offset, u64 data, bool write)
356*4882a593Smuzhiyun {
357*4882a593Smuzhiyun unsigned long read_data;
358*4882a593Smuzhiyun long rc;
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun rc = plpar_int_esb(write ? XIVE_ESB_FLAG_STORE : 0,
361*4882a593Smuzhiyun lisn, offset, data, &read_data);
362*4882a593Smuzhiyun if (rc)
363*4882a593Smuzhiyun return -1;
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun return write ? 0 : read_data;
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun #define XIVE_SRC_H_INT_ESB (1ull << (63 - 60))
369*4882a593Smuzhiyun #define XIVE_SRC_LSI (1ull << (63 - 61))
370*4882a593Smuzhiyun #define XIVE_SRC_TRIGGER (1ull << (63 - 62))
371*4882a593Smuzhiyun #define XIVE_SRC_STORE_EOI (1ull << (63 - 63))
372*4882a593Smuzhiyun
xive_spapr_populate_irq_data(u32 hw_irq,struct xive_irq_data * data)373*4882a593Smuzhiyun static int xive_spapr_populate_irq_data(u32 hw_irq, struct xive_irq_data *data)
374*4882a593Smuzhiyun {
375*4882a593Smuzhiyun long rc;
376*4882a593Smuzhiyun unsigned long flags;
377*4882a593Smuzhiyun unsigned long eoi_page;
378*4882a593Smuzhiyun unsigned long trig_page;
379*4882a593Smuzhiyun unsigned long esb_shift;
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun memset(data, 0, sizeof(*data));
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun rc = plpar_int_get_source_info(0, hw_irq, &flags, &eoi_page, &trig_page,
384*4882a593Smuzhiyun &esb_shift);
385*4882a593Smuzhiyun if (rc)
386*4882a593Smuzhiyun return -EINVAL;
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun if (flags & XIVE_SRC_H_INT_ESB)
389*4882a593Smuzhiyun data->flags |= XIVE_IRQ_FLAG_H_INT_ESB;
390*4882a593Smuzhiyun if (flags & XIVE_SRC_STORE_EOI)
391*4882a593Smuzhiyun data->flags |= XIVE_IRQ_FLAG_STORE_EOI;
392*4882a593Smuzhiyun if (flags & XIVE_SRC_LSI)
393*4882a593Smuzhiyun data->flags |= XIVE_IRQ_FLAG_LSI;
394*4882a593Smuzhiyun data->eoi_page = eoi_page;
395*4882a593Smuzhiyun data->esb_shift = esb_shift;
396*4882a593Smuzhiyun data->trig_page = trig_page;
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun data->hw_irq = hw_irq;
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun /*
401*4882a593Smuzhiyun * No chip-id for the sPAPR backend. This has an impact how we
402*4882a593Smuzhiyun * pick a target. See xive_pick_irq_target().
403*4882a593Smuzhiyun */
404*4882a593Smuzhiyun data->src_chip = XIVE_INVALID_CHIP_ID;
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun /*
407*4882a593Smuzhiyun * When the H_INT_ESB flag is set, the H_INT_ESB hcall should
408*4882a593Smuzhiyun * be used for interrupt management. Skip the remapping of the
409*4882a593Smuzhiyun * ESB pages which are not available.
410*4882a593Smuzhiyun */
411*4882a593Smuzhiyun if (data->flags & XIVE_IRQ_FLAG_H_INT_ESB)
412*4882a593Smuzhiyun return 0;
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun data->eoi_mmio = ioremap(data->eoi_page, 1u << data->esb_shift);
415*4882a593Smuzhiyun if (!data->eoi_mmio) {
416*4882a593Smuzhiyun pr_err("Failed to map EOI page for irq 0x%x\n", hw_irq);
417*4882a593Smuzhiyun return -ENOMEM;
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun /* Full function page supports trigger */
421*4882a593Smuzhiyun if (flags & XIVE_SRC_TRIGGER) {
422*4882a593Smuzhiyun data->trig_mmio = data->eoi_mmio;
423*4882a593Smuzhiyun return 0;
424*4882a593Smuzhiyun }
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun data->trig_mmio = ioremap(data->trig_page, 1u << data->esb_shift);
427*4882a593Smuzhiyun if (!data->trig_mmio) {
428*4882a593Smuzhiyun pr_err("Failed to map trigger page for irq 0x%x\n", hw_irq);
429*4882a593Smuzhiyun return -ENOMEM;
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun return 0;
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun
xive_spapr_configure_irq(u32 hw_irq,u32 target,u8 prio,u32 sw_irq)434*4882a593Smuzhiyun static int xive_spapr_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq)
435*4882a593Smuzhiyun {
436*4882a593Smuzhiyun long rc;
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun rc = plpar_int_set_source_config(XIVE_SRC_SET_EISN, hw_irq, target,
439*4882a593Smuzhiyun prio, sw_irq);
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun return rc == 0 ? 0 : -ENXIO;
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun
xive_spapr_get_irq_config(u32 hw_irq,u32 * target,u8 * prio,u32 * sw_irq)444*4882a593Smuzhiyun static int xive_spapr_get_irq_config(u32 hw_irq, u32 *target, u8 *prio,
445*4882a593Smuzhiyun u32 *sw_irq)
446*4882a593Smuzhiyun {
447*4882a593Smuzhiyun long rc;
448*4882a593Smuzhiyun unsigned long h_target;
449*4882a593Smuzhiyun unsigned long h_prio;
450*4882a593Smuzhiyun unsigned long h_sw_irq;
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun rc = plpar_int_get_source_config(0, hw_irq, &h_target, &h_prio,
453*4882a593Smuzhiyun &h_sw_irq);
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun *target = h_target;
456*4882a593Smuzhiyun *prio = h_prio;
457*4882a593Smuzhiyun *sw_irq = h_sw_irq;
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun return rc == 0 ? 0 : -ENXIO;
460*4882a593Smuzhiyun }
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun /* This can be called multiple time to change a queue configuration */
xive_spapr_configure_queue(u32 target,struct xive_q * q,u8 prio,__be32 * qpage,u32 order)463*4882a593Smuzhiyun static int xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio,
464*4882a593Smuzhiyun __be32 *qpage, u32 order)
465*4882a593Smuzhiyun {
466*4882a593Smuzhiyun s64 rc = 0;
467*4882a593Smuzhiyun unsigned long esn_page;
468*4882a593Smuzhiyun unsigned long esn_size;
469*4882a593Smuzhiyun u64 flags, qpage_phys;
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun /* If there's an actual queue page, clean it */
472*4882a593Smuzhiyun if (order) {
473*4882a593Smuzhiyun if (WARN_ON(!qpage))
474*4882a593Smuzhiyun return -EINVAL;
475*4882a593Smuzhiyun qpage_phys = __pa(qpage);
476*4882a593Smuzhiyun } else {
477*4882a593Smuzhiyun qpage_phys = 0;
478*4882a593Smuzhiyun }
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun /* Initialize the rest of the fields */
481*4882a593Smuzhiyun q->msk = order ? ((1u << (order - 2)) - 1) : 0;
482*4882a593Smuzhiyun q->idx = 0;
483*4882a593Smuzhiyun q->toggle = 0;
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun rc = plpar_int_get_queue_info(0, target, prio, &esn_page, &esn_size);
486*4882a593Smuzhiyun if (rc) {
487*4882a593Smuzhiyun pr_err("Error %lld getting queue info CPU %d prio %d\n", rc,
488*4882a593Smuzhiyun target, prio);
489*4882a593Smuzhiyun rc = -EIO;
490*4882a593Smuzhiyun goto fail;
491*4882a593Smuzhiyun }
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun /* TODO: add support for the notification page */
494*4882a593Smuzhiyun q->eoi_phys = esn_page;
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun /* Default is to always notify */
497*4882a593Smuzhiyun flags = XIVE_EQ_ALWAYS_NOTIFY;
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun /* Configure and enable the queue in HW */
500*4882a593Smuzhiyun rc = plpar_int_set_queue_config(flags, target, prio, qpage_phys, order);
501*4882a593Smuzhiyun if (rc) {
502*4882a593Smuzhiyun pr_err("Error %lld setting queue for CPU %d prio %d\n", rc,
503*4882a593Smuzhiyun target, prio);
504*4882a593Smuzhiyun rc = -EIO;
505*4882a593Smuzhiyun } else {
506*4882a593Smuzhiyun q->qpage = qpage;
507*4882a593Smuzhiyun if (is_secure_guest())
508*4882a593Smuzhiyun uv_share_page(PHYS_PFN(qpage_phys),
509*4882a593Smuzhiyun 1 << xive_alloc_order(order));
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun fail:
512*4882a593Smuzhiyun return rc;
513*4882a593Smuzhiyun }
514*4882a593Smuzhiyun
xive_spapr_setup_queue(unsigned int cpu,struct xive_cpu * xc,u8 prio)515*4882a593Smuzhiyun static int xive_spapr_setup_queue(unsigned int cpu, struct xive_cpu *xc,
516*4882a593Smuzhiyun u8 prio)
517*4882a593Smuzhiyun {
518*4882a593Smuzhiyun struct xive_q *q = &xc->queue[prio];
519*4882a593Smuzhiyun __be32 *qpage;
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun qpage = xive_queue_page_alloc(cpu, xive_queue_shift);
522*4882a593Smuzhiyun if (IS_ERR(qpage))
523*4882a593Smuzhiyun return PTR_ERR(qpage);
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun return xive_spapr_configure_queue(get_hard_smp_processor_id(cpu),
526*4882a593Smuzhiyun q, prio, qpage, xive_queue_shift);
527*4882a593Smuzhiyun }
528*4882a593Smuzhiyun
xive_spapr_cleanup_queue(unsigned int cpu,struct xive_cpu * xc,u8 prio)529*4882a593Smuzhiyun static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc,
530*4882a593Smuzhiyun u8 prio)
531*4882a593Smuzhiyun {
532*4882a593Smuzhiyun struct xive_q *q = &xc->queue[prio];
533*4882a593Smuzhiyun unsigned int alloc_order;
534*4882a593Smuzhiyun long rc;
535*4882a593Smuzhiyun int hw_cpu = get_hard_smp_processor_id(cpu);
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun rc = plpar_int_set_queue_config(0, hw_cpu, prio, 0, 0);
538*4882a593Smuzhiyun if (rc)
539*4882a593Smuzhiyun pr_err("Error %ld setting queue for CPU %d prio %d\n", rc,
540*4882a593Smuzhiyun hw_cpu, prio);
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun alloc_order = xive_alloc_order(xive_queue_shift);
543*4882a593Smuzhiyun if (is_secure_guest())
544*4882a593Smuzhiyun uv_unshare_page(PHYS_PFN(__pa(q->qpage)), 1 << alloc_order);
545*4882a593Smuzhiyun free_pages((unsigned long)q->qpage, alloc_order);
546*4882a593Smuzhiyun q->qpage = NULL;
547*4882a593Smuzhiyun }
548*4882a593Smuzhiyun
xive_spapr_match(struct device_node * node)549*4882a593Smuzhiyun static bool xive_spapr_match(struct device_node *node)
550*4882a593Smuzhiyun {
551*4882a593Smuzhiyun /* Ignore cascaded controllers for the moment */
552*4882a593Smuzhiyun return 1;
553*4882a593Smuzhiyun }
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun #ifdef CONFIG_SMP
xive_spapr_get_ipi(unsigned int cpu,struct xive_cpu * xc)556*4882a593Smuzhiyun static int xive_spapr_get_ipi(unsigned int cpu, struct xive_cpu *xc)
557*4882a593Smuzhiyun {
558*4882a593Smuzhiyun int irq = xive_irq_bitmap_alloc();
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun if (irq < 0) {
561*4882a593Smuzhiyun pr_err("Failed to allocate IPI on CPU %d\n", cpu);
562*4882a593Smuzhiyun return -ENXIO;
563*4882a593Smuzhiyun }
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun xc->hw_ipi = irq;
566*4882a593Smuzhiyun return 0;
567*4882a593Smuzhiyun }
568*4882a593Smuzhiyun
xive_spapr_put_ipi(unsigned int cpu,struct xive_cpu * xc)569*4882a593Smuzhiyun static void xive_spapr_put_ipi(unsigned int cpu, struct xive_cpu *xc)
570*4882a593Smuzhiyun {
571*4882a593Smuzhiyun if (xc->hw_ipi == XIVE_BAD_IRQ)
572*4882a593Smuzhiyun return;
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun xive_irq_bitmap_free(xc->hw_ipi);
575*4882a593Smuzhiyun xc->hw_ipi = XIVE_BAD_IRQ;
576*4882a593Smuzhiyun }
577*4882a593Smuzhiyun #endif /* CONFIG_SMP */
578*4882a593Smuzhiyun
xive_spapr_shutdown(void)579*4882a593Smuzhiyun static void xive_spapr_shutdown(void)
580*4882a593Smuzhiyun {
581*4882a593Smuzhiyun plpar_int_reset(0);
582*4882a593Smuzhiyun }
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun /*
585*4882a593Smuzhiyun * Perform an "ack" cycle on the current thread. Grab the pending
586*4882a593Smuzhiyun * active priorities and update the CPPR to the most favored one.
587*4882a593Smuzhiyun */
xive_spapr_update_pending(struct xive_cpu * xc)588*4882a593Smuzhiyun static void xive_spapr_update_pending(struct xive_cpu *xc)
589*4882a593Smuzhiyun {
590*4882a593Smuzhiyun u8 nsr, cppr;
591*4882a593Smuzhiyun u16 ack;
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun /*
594*4882a593Smuzhiyun * Perform the "Acknowledge O/S to Register" cycle.
595*4882a593Smuzhiyun *
596*4882a593Smuzhiyun * Let's speedup the access to the TIMA using the raw I/O
597*4882a593Smuzhiyun * accessor as we don't need the synchronisation routine of
598*4882a593Smuzhiyun * the higher level ones
599*4882a593Smuzhiyun */
600*4882a593Smuzhiyun ack = be16_to_cpu(__raw_readw(xive_tima + TM_SPC_ACK_OS_REG));
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun /* Synchronize subsequent queue accesses */
603*4882a593Smuzhiyun mb();
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun /*
606*4882a593Smuzhiyun * Grab the CPPR and the "NSR" field which indicates the source
607*4882a593Smuzhiyun * of the interrupt (if any)
608*4882a593Smuzhiyun */
609*4882a593Smuzhiyun cppr = ack & 0xff;
610*4882a593Smuzhiyun nsr = ack >> 8;
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun if (nsr & TM_QW1_NSR_EO) {
613*4882a593Smuzhiyun if (cppr == 0xff)
614*4882a593Smuzhiyun return;
615*4882a593Smuzhiyun /* Mark the priority pending */
616*4882a593Smuzhiyun xc->pending_prio |= 1 << cppr;
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun /*
619*4882a593Smuzhiyun * A new interrupt should never have a CPPR less favored
620*4882a593Smuzhiyun * than our current one.
621*4882a593Smuzhiyun */
622*4882a593Smuzhiyun if (cppr >= xc->cppr)
623*4882a593Smuzhiyun pr_err("CPU %d odd ack CPPR, got %d at %d\n",
624*4882a593Smuzhiyun smp_processor_id(), cppr, xc->cppr);
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun /* Update our idea of what the CPPR is */
627*4882a593Smuzhiyun xc->cppr = cppr;
628*4882a593Smuzhiyun }
629*4882a593Smuzhiyun }
630*4882a593Smuzhiyun
xive_spapr_eoi(u32 hw_irq)631*4882a593Smuzhiyun static void xive_spapr_eoi(u32 hw_irq)
632*4882a593Smuzhiyun {
633*4882a593Smuzhiyun /* Not used */;
634*4882a593Smuzhiyun }
635*4882a593Smuzhiyun
xive_spapr_setup_cpu(unsigned int cpu,struct xive_cpu * xc)636*4882a593Smuzhiyun static void xive_spapr_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
637*4882a593Smuzhiyun {
638*4882a593Smuzhiyun /* Only some debug on the TIMA settings */
639*4882a593Smuzhiyun pr_debug("(HW value: %08x %08x %08x)\n",
640*4882a593Smuzhiyun in_be32(xive_tima + TM_QW1_OS + TM_WORD0),
641*4882a593Smuzhiyun in_be32(xive_tima + TM_QW1_OS + TM_WORD1),
642*4882a593Smuzhiyun in_be32(xive_tima + TM_QW1_OS + TM_WORD2));
643*4882a593Smuzhiyun }
644*4882a593Smuzhiyun
xive_spapr_teardown_cpu(unsigned int cpu,struct xive_cpu * xc)645*4882a593Smuzhiyun static void xive_spapr_teardown_cpu(unsigned int cpu, struct xive_cpu *xc)
646*4882a593Smuzhiyun {
647*4882a593Smuzhiyun /* Nothing to do */;
648*4882a593Smuzhiyun }
649*4882a593Smuzhiyun
xive_spapr_sync_source(u32 hw_irq)650*4882a593Smuzhiyun static void xive_spapr_sync_source(u32 hw_irq)
651*4882a593Smuzhiyun {
652*4882a593Smuzhiyun /* Specs are unclear on what this is doing */
653*4882a593Smuzhiyun plpar_int_sync(0, hw_irq);
654*4882a593Smuzhiyun }
655*4882a593Smuzhiyun
xive_spapr_debug_show(struct seq_file * m,void * private)656*4882a593Smuzhiyun static int xive_spapr_debug_show(struct seq_file *m, void *private)
657*4882a593Smuzhiyun {
658*4882a593Smuzhiyun struct xive_irq_bitmap *xibm;
659*4882a593Smuzhiyun char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun if (!buf)
662*4882a593Smuzhiyun return -ENOMEM;
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun list_for_each_entry(xibm, &xive_irq_bitmaps, list) {
665*4882a593Smuzhiyun memset(buf, 0, PAGE_SIZE);
666*4882a593Smuzhiyun bitmap_print_to_pagebuf(true, buf, xibm->bitmap, xibm->count);
667*4882a593Smuzhiyun seq_printf(m, "bitmap #%d: %s", xibm->count, buf);
668*4882a593Smuzhiyun }
669*4882a593Smuzhiyun kfree(buf);
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun return 0;
672*4882a593Smuzhiyun }
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun static const struct xive_ops xive_spapr_ops = {
675*4882a593Smuzhiyun .populate_irq_data = xive_spapr_populate_irq_data,
676*4882a593Smuzhiyun .configure_irq = xive_spapr_configure_irq,
677*4882a593Smuzhiyun .get_irq_config = xive_spapr_get_irq_config,
678*4882a593Smuzhiyun .setup_queue = xive_spapr_setup_queue,
679*4882a593Smuzhiyun .cleanup_queue = xive_spapr_cleanup_queue,
680*4882a593Smuzhiyun .match = xive_spapr_match,
681*4882a593Smuzhiyun .shutdown = xive_spapr_shutdown,
682*4882a593Smuzhiyun .update_pending = xive_spapr_update_pending,
683*4882a593Smuzhiyun .eoi = xive_spapr_eoi,
684*4882a593Smuzhiyun .setup_cpu = xive_spapr_setup_cpu,
685*4882a593Smuzhiyun .teardown_cpu = xive_spapr_teardown_cpu,
686*4882a593Smuzhiyun .sync_source = xive_spapr_sync_source,
687*4882a593Smuzhiyun .esb_rw = xive_spapr_esb_rw,
688*4882a593Smuzhiyun #ifdef CONFIG_SMP
689*4882a593Smuzhiyun .get_ipi = xive_spapr_get_ipi,
690*4882a593Smuzhiyun .put_ipi = xive_spapr_put_ipi,
691*4882a593Smuzhiyun .debug_show = xive_spapr_debug_show,
692*4882a593Smuzhiyun #endif /* CONFIG_SMP */
693*4882a593Smuzhiyun .name = "spapr",
694*4882a593Smuzhiyun };
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun /*
697*4882a593Smuzhiyun * get max priority from "/ibm,plat-res-int-priorities"
698*4882a593Smuzhiyun */
xive_get_max_prio(u8 * max_prio)699*4882a593Smuzhiyun static bool xive_get_max_prio(u8 *max_prio)
700*4882a593Smuzhiyun {
701*4882a593Smuzhiyun struct device_node *rootdn;
702*4882a593Smuzhiyun const __be32 *reg;
703*4882a593Smuzhiyun u32 len;
704*4882a593Smuzhiyun int prio, found;
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun rootdn = of_find_node_by_path("/");
707*4882a593Smuzhiyun if (!rootdn) {
708*4882a593Smuzhiyun pr_err("not root node found !\n");
709*4882a593Smuzhiyun return false;
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun reg = of_get_property(rootdn, "ibm,plat-res-int-priorities", &len);
713*4882a593Smuzhiyun of_node_put(rootdn);
714*4882a593Smuzhiyun if (!reg) {
715*4882a593Smuzhiyun pr_err("Failed to read 'ibm,plat-res-int-priorities' property\n");
716*4882a593Smuzhiyun return false;
717*4882a593Smuzhiyun }
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun if (len % (2 * sizeof(u32)) != 0) {
720*4882a593Smuzhiyun pr_err("invalid 'ibm,plat-res-int-priorities' property\n");
721*4882a593Smuzhiyun return false;
722*4882a593Smuzhiyun }
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun /* HW supports priorities in the range [0-7] and 0xFF is a
725*4882a593Smuzhiyun * wildcard priority used to mask. We scan the ranges reserved
726*4882a593Smuzhiyun * by the hypervisor to find the lowest priority we can use.
727*4882a593Smuzhiyun */
728*4882a593Smuzhiyun found = 0xFF;
729*4882a593Smuzhiyun for (prio = 0; prio < 8; prio++) {
730*4882a593Smuzhiyun int reserved = 0;
731*4882a593Smuzhiyun int i;
732*4882a593Smuzhiyun
733*4882a593Smuzhiyun for (i = 0; i < len / (2 * sizeof(u32)); i++) {
734*4882a593Smuzhiyun int base = be32_to_cpu(reg[2 * i]);
735*4882a593Smuzhiyun int range = be32_to_cpu(reg[2 * i + 1]);
736*4882a593Smuzhiyun
737*4882a593Smuzhiyun if (prio >= base && prio < base + range)
738*4882a593Smuzhiyun reserved++;
739*4882a593Smuzhiyun }
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun if (!reserved)
742*4882a593Smuzhiyun found = prio;
743*4882a593Smuzhiyun }
744*4882a593Smuzhiyun
745*4882a593Smuzhiyun if (found == 0xFF) {
746*4882a593Smuzhiyun pr_err("no valid priority found in 'ibm,plat-res-int-priorities'\n");
747*4882a593Smuzhiyun return false;
748*4882a593Smuzhiyun }
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun *max_prio = found;
751*4882a593Smuzhiyun return true;
752*4882a593Smuzhiyun }
753*4882a593Smuzhiyun
get_vec5_feature(unsigned int index)754*4882a593Smuzhiyun static const u8 *get_vec5_feature(unsigned int index)
755*4882a593Smuzhiyun {
756*4882a593Smuzhiyun unsigned long root, chosen;
757*4882a593Smuzhiyun int size;
758*4882a593Smuzhiyun const u8 *vec5;
759*4882a593Smuzhiyun
760*4882a593Smuzhiyun root = of_get_flat_dt_root();
761*4882a593Smuzhiyun chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
762*4882a593Smuzhiyun if (chosen == -FDT_ERR_NOTFOUND)
763*4882a593Smuzhiyun return NULL;
764*4882a593Smuzhiyun
765*4882a593Smuzhiyun vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size);
766*4882a593Smuzhiyun if (!vec5)
767*4882a593Smuzhiyun return NULL;
768*4882a593Smuzhiyun
769*4882a593Smuzhiyun if (size <= index)
770*4882a593Smuzhiyun return NULL;
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun return vec5 + index;
773*4882a593Smuzhiyun }
774*4882a593Smuzhiyun
xive_spapr_disabled(void)775*4882a593Smuzhiyun static bool __init xive_spapr_disabled(void)
776*4882a593Smuzhiyun {
777*4882a593Smuzhiyun const u8 *vec5_xive;
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun vec5_xive = get_vec5_feature(OV5_INDX(OV5_XIVE_SUPPORT));
780*4882a593Smuzhiyun if (vec5_xive) {
781*4882a593Smuzhiyun u8 val;
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun val = *vec5_xive & OV5_FEAT(OV5_XIVE_SUPPORT);
784*4882a593Smuzhiyun switch (val) {
785*4882a593Smuzhiyun case OV5_FEAT(OV5_XIVE_EITHER):
786*4882a593Smuzhiyun case OV5_FEAT(OV5_XIVE_LEGACY):
787*4882a593Smuzhiyun break;
788*4882a593Smuzhiyun case OV5_FEAT(OV5_XIVE_EXPLOIT):
789*4882a593Smuzhiyun /* Hypervisor only supports XIVE */
790*4882a593Smuzhiyun if (xive_cmdline_disabled)
791*4882a593Smuzhiyun pr_warn("WARNING: Ignoring cmdline option xive=off\n");
792*4882a593Smuzhiyun return false;
793*4882a593Smuzhiyun default:
794*4882a593Smuzhiyun pr_warn("%s: Unknown xive support option: 0x%x\n",
795*4882a593Smuzhiyun __func__, val);
796*4882a593Smuzhiyun break;
797*4882a593Smuzhiyun }
798*4882a593Smuzhiyun }
799*4882a593Smuzhiyun
800*4882a593Smuzhiyun return xive_cmdline_disabled;
801*4882a593Smuzhiyun }
802*4882a593Smuzhiyun
xive_spapr_init(void)803*4882a593Smuzhiyun bool __init xive_spapr_init(void)
804*4882a593Smuzhiyun {
805*4882a593Smuzhiyun struct device_node *np;
806*4882a593Smuzhiyun struct resource r;
807*4882a593Smuzhiyun void __iomem *tima;
808*4882a593Smuzhiyun struct property *prop;
809*4882a593Smuzhiyun u8 max_prio;
810*4882a593Smuzhiyun u32 val;
811*4882a593Smuzhiyun u32 len;
812*4882a593Smuzhiyun const __be32 *reg;
813*4882a593Smuzhiyun int i;
814*4882a593Smuzhiyun
815*4882a593Smuzhiyun if (xive_spapr_disabled())
816*4882a593Smuzhiyun return false;
817*4882a593Smuzhiyun
818*4882a593Smuzhiyun pr_devel("%s()\n", __func__);
819*4882a593Smuzhiyun np = of_find_compatible_node(NULL, NULL, "ibm,power-ivpe");
820*4882a593Smuzhiyun if (!np) {
821*4882a593Smuzhiyun pr_devel("not found !\n");
822*4882a593Smuzhiyun return false;
823*4882a593Smuzhiyun }
824*4882a593Smuzhiyun pr_devel("Found %s\n", np->full_name);
825*4882a593Smuzhiyun
826*4882a593Smuzhiyun /* Resource 1 is the OS ring TIMA */
827*4882a593Smuzhiyun if (of_address_to_resource(np, 1, &r)) {
828*4882a593Smuzhiyun pr_err("Failed to get thread mgmnt area resource\n");
829*4882a593Smuzhiyun return false;
830*4882a593Smuzhiyun }
831*4882a593Smuzhiyun tima = ioremap(r.start, resource_size(&r));
832*4882a593Smuzhiyun if (!tima) {
833*4882a593Smuzhiyun pr_err("Failed to map thread mgmnt area\n");
834*4882a593Smuzhiyun return false;
835*4882a593Smuzhiyun }
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun if (!xive_get_max_prio(&max_prio))
838*4882a593Smuzhiyun return false;
839*4882a593Smuzhiyun
840*4882a593Smuzhiyun /* Feed the IRQ number allocator with the ranges given in the DT */
841*4882a593Smuzhiyun reg = of_get_property(np, "ibm,xive-lisn-ranges", &len);
842*4882a593Smuzhiyun if (!reg) {
843*4882a593Smuzhiyun pr_err("Failed to read 'ibm,xive-lisn-ranges' property\n");
844*4882a593Smuzhiyun return false;
845*4882a593Smuzhiyun }
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun if (len % (2 * sizeof(u32)) != 0) {
848*4882a593Smuzhiyun pr_err("invalid 'ibm,xive-lisn-ranges' property\n");
849*4882a593Smuzhiyun return false;
850*4882a593Smuzhiyun }
851*4882a593Smuzhiyun
852*4882a593Smuzhiyun for (i = 0; i < len / (2 * sizeof(u32)); i++, reg += 2)
853*4882a593Smuzhiyun xive_irq_bitmap_add(be32_to_cpu(reg[0]),
854*4882a593Smuzhiyun be32_to_cpu(reg[1]));
855*4882a593Smuzhiyun
856*4882a593Smuzhiyun /* Iterate the EQ sizes and pick one */
857*4882a593Smuzhiyun of_property_for_each_u32(np, "ibm,xive-eq-sizes", prop, reg, val) {
858*4882a593Smuzhiyun xive_queue_shift = val;
859*4882a593Smuzhiyun if (val == PAGE_SHIFT)
860*4882a593Smuzhiyun break;
861*4882a593Smuzhiyun }
862*4882a593Smuzhiyun
863*4882a593Smuzhiyun /* Initialize XIVE core with our backend */
864*4882a593Smuzhiyun if (!xive_core_init(&xive_spapr_ops, tima, TM_QW1_OS, max_prio))
865*4882a593Smuzhiyun return false;
866*4882a593Smuzhiyun
867*4882a593Smuzhiyun pr_info("Using %dkB queues\n", 1 << (xive_queue_shift - 10));
868*4882a593Smuzhiyun return true;
869*4882a593Smuzhiyun }
870*4882a593Smuzhiyun
871*4882a593Smuzhiyun machine_arch_initcall(pseries, xive_core_debug_init);
872