xref: /OK3568_Linux_fs/kernel/arch/powerpc/kvm/mpic.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * OpenPIC emulation
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright (c) 2004 Jocelyn Mayer
5*4882a593Smuzhiyun  *               2011 Alexander Graf
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining a copy
8*4882a593Smuzhiyun  * of this software and associated documentation files (the "Software"), to deal
9*4882a593Smuzhiyun  * in the Software without restriction, including without limitation the rights
10*4882a593Smuzhiyun  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11*4882a593Smuzhiyun  * copies of the Software, and to permit persons to whom the Software is
12*4882a593Smuzhiyun  * furnished to do so, subject to the following conditions:
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  * The above copyright notice and this permission notice shall be included in
15*4882a593Smuzhiyun  * all copies or substantial portions of the Software.
16*4882a593Smuzhiyun  *
17*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18*4882a593Smuzhiyun  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20*4882a593Smuzhiyun  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21*4882a593Smuzhiyun  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22*4882a593Smuzhiyun  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23*4882a593Smuzhiyun  * THE SOFTWARE.
24*4882a593Smuzhiyun  */
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun #include <linux/slab.h>
27*4882a593Smuzhiyun #include <linux/mutex.h>
28*4882a593Smuzhiyun #include <linux/kvm_host.h>
29*4882a593Smuzhiyun #include <linux/errno.h>
30*4882a593Smuzhiyun #include <linux/fs.h>
31*4882a593Smuzhiyun #include <linux/anon_inodes.h>
32*4882a593Smuzhiyun #include <linux/uaccess.h>
33*4882a593Smuzhiyun #include <asm/mpic.h>
34*4882a593Smuzhiyun #include <asm/kvm_para.h>
35*4882a593Smuzhiyun #include <asm/kvm_ppc.h>
36*4882a593Smuzhiyun #include <kvm/iodev.h>
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun #define MAX_CPU     32
39*4882a593Smuzhiyun #define MAX_SRC     256
40*4882a593Smuzhiyun #define MAX_TMR     4
41*4882a593Smuzhiyun #define MAX_IPI     4
42*4882a593Smuzhiyun #define MAX_MSI     8
43*4882a593Smuzhiyun #define MAX_IRQ     (MAX_SRC + MAX_IPI + MAX_TMR)
44*4882a593Smuzhiyun #define VID         0x03	/* MPIC version ID */
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun /* OpenPIC capability flags */
47*4882a593Smuzhiyun #define OPENPIC_FLAG_IDR_CRIT     (1 << 0)
48*4882a593Smuzhiyun #define OPENPIC_FLAG_ILR          (2 << 0)
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun /* OpenPIC address map */
51*4882a593Smuzhiyun #define OPENPIC_REG_SIZE             0x40000
52*4882a593Smuzhiyun #define OPENPIC_GLB_REG_START        0x0
53*4882a593Smuzhiyun #define OPENPIC_GLB_REG_SIZE         0x10F0
54*4882a593Smuzhiyun #define OPENPIC_TMR_REG_START        0x10F0
55*4882a593Smuzhiyun #define OPENPIC_TMR_REG_SIZE         0x220
56*4882a593Smuzhiyun #define OPENPIC_MSI_REG_START        0x1600
57*4882a593Smuzhiyun #define OPENPIC_MSI_REG_SIZE         0x200
58*4882a593Smuzhiyun #define OPENPIC_SUMMARY_REG_START    0x3800
59*4882a593Smuzhiyun #define OPENPIC_SUMMARY_REG_SIZE     0x800
60*4882a593Smuzhiyun #define OPENPIC_SRC_REG_START        0x10000
61*4882a593Smuzhiyun #define OPENPIC_SRC_REG_SIZE         (MAX_SRC * 0x20)
62*4882a593Smuzhiyun #define OPENPIC_CPU_REG_START        0x20000
63*4882a593Smuzhiyun #define OPENPIC_CPU_REG_SIZE         (0x100 + ((MAX_CPU - 1) * 0x1000))
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun struct fsl_mpic_info {
66*4882a593Smuzhiyun 	int max_ext;
67*4882a593Smuzhiyun };
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun static struct fsl_mpic_info fsl_mpic_20 = {
70*4882a593Smuzhiyun 	.max_ext = 12,
71*4882a593Smuzhiyun };
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun static struct fsl_mpic_info fsl_mpic_42 = {
74*4882a593Smuzhiyun 	.max_ext = 12,
75*4882a593Smuzhiyun };
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun #define FRR_NIRQ_SHIFT    16
78*4882a593Smuzhiyun #define FRR_NCPU_SHIFT     8
79*4882a593Smuzhiyun #define FRR_VID_SHIFT      0
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun #define VID_REVISION_1_2   2
82*4882a593Smuzhiyun #define VID_REVISION_1_3   3
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun #define VIR_GENERIC      0x00000000	/* Generic Vendor ID */
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun #define GCR_RESET        0x80000000
87*4882a593Smuzhiyun #define GCR_MODE_PASS    0x00000000
88*4882a593Smuzhiyun #define GCR_MODE_MIXED   0x20000000
89*4882a593Smuzhiyun #define GCR_MODE_PROXY   0x60000000
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun #define TBCR_CI           0x80000000	/* count inhibit */
92*4882a593Smuzhiyun #define TCCR_TOG          0x80000000	/* toggles when decrement to zero */
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun #define IDR_EP_SHIFT      31
95*4882a593Smuzhiyun #define IDR_EP_MASK       (1 << IDR_EP_SHIFT)
96*4882a593Smuzhiyun #define IDR_CI0_SHIFT     30
97*4882a593Smuzhiyun #define IDR_CI1_SHIFT     29
98*4882a593Smuzhiyun #define IDR_P1_SHIFT      1
99*4882a593Smuzhiyun #define IDR_P0_SHIFT      0
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun #define ILR_INTTGT_MASK   0x000000ff
102*4882a593Smuzhiyun #define ILR_INTTGT_INT    0x00
103*4882a593Smuzhiyun #define ILR_INTTGT_CINT   0x01	/* critical */
104*4882a593Smuzhiyun #define ILR_INTTGT_MCP    0x02	/* machine check */
105*4882a593Smuzhiyun #define NUM_OUTPUTS       3
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun #define MSIIR_OFFSET       0x140
108*4882a593Smuzhiyun #define MSIIR_SRS_SHIFT    29
109*4882a593Smuzhiyun #define MSIIR_SRS_MASK     (0x7 << MSIIR_SRS_SHIFT)
110*4882a593Smuzhiyun #define MSIIR_IBS_SHIFT    24
111*4882a593Smuzhiyun #define MSIIR_IBS_MASK     (0x1f << MSIIR_IBS_SHIFT)
112*4882a593Smuzhiyun 
get_current_cpu(void)113*4882a593Smuzhiyun static int get_current_cpu(void)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun #if defined(CONFIG_KVM) && defined(CONFIG_BOOKE)
116*4882a593Smuzhiyun 	struct kvm_vcpu *vcpu = current->thread.kvm_vcpu;
117*4882a593Smuzhiyun 	return vcpu ? vcpu->arch.irq_cpu_id : -1;
118*4882a593Smuzhiyun #else
119*4882a593Smuzhiyun 	/* XXX */
120*4882a593Smuzhiyun 	return -1;
121*4882a593Smuzhiyun #endif
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun static int openpic_cpu_write_internal(void *opaque, gpa_t addr,
125*4882a593Smuzhiyun 				      u32 val, int idx);
126*4882a593Smuzhiyun static int openpic_cpu_read_internal(void *opaque, gpa_t addr,
127*4882a593Smuzhiyun 				     u32 *ptr, int idx);
128*4882a593Smuzhiyun static inline void write_IRQreg_idr(struct openpic *opp, int n_IRQ,
129*4882a593Smuzhiyun 				    uint32_t val);
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun enum irq_type {
132*4882a593Smuzhiyun 	IRQ_TYPE_NORMAL = 0,
133*4882a593Smuzhiyun 	IRQ_TYPE_FSLINT,	/* FSL internal interrupt -- level only */
134*4882a593Smuzhiyun 	IRQ_TYPE_FSLSPECIAL,	/* FSL timer/IPI interrupt, edge, no polarity */
135*4882a593Smuzhiyun };
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun struct irq_queue {
138*4882a593Smuzhiyun 	/* Round up to the nearest 64 IRQs so that the queue length
139*4882a593Smuzhiyun 	 * won't change when moving between 32 and 64 bit hosts.
140*4882a593Smuzhiyun 	 */
141*4882a593Smuzhiyun 	unsigned long queue[BITS_TO_LONGS((MAX_IRQ + 63) & ~63)];
142*4882a593Smuzhiyun 	int next;
143*4882a593Smuzhiyun 	int priority;
144*4882a593Smuzhiyun };
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun struct irq_source {
147*4882a593Smuzhiyun 	uint32_t ivpr;		/* IRQ vector/priority register */
148*4882a593Smuzhiyun 	uint32_t idr;		/* IRQ destination register */
149*4882a593Smuzhiyun 	uint32_t destmask;	/* bitmap of CPU destinations */
150*4882a593Smuzhiyun 	int last_cpu;
151*4882a593Smuzhiyun 	int output;		/* IRQ level, e.g. ILR_INTTGT_INT */
152*4882a593Smuzhiyun 	int pending;		/* TRUE if IRQ is pending */
153*4882a593Smuzhiyun 	enum irq_type type;
154*4882a593Smuzhiyun 	bool level:1;		/* level-triggered */
155*4882a593Smuzhiyun 	bool nomask:1;	/* critical interrupts ignore mask on some FSL MPICs */
156*4882a593Smuzhiyun };
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun #define IVPR_MASK_SHIFT       31
159*4882a593Smuzhiyun #define IVPR_MASK_MASK        (1 << IVPR_MASK_SHIFT)
160*4882a593Smuzhiyun #define IVPR_ACTIVITY_SHIFT   30
161*4882a593Smuzhiyun #define IVPR_ACTIVITY_MASK    (1 << IVPR_ACTIVITY_SHIFT)
162*4882a593Smuzhiyun #define IVPR_MODE_SHIFT       29
163*4882a593Smuzhiyun #define IVPR_MODE_MASK        (1 << IVPR_MODE_SHIFT)
164*4882a593Smuzhiyun #define IVPR_POLARITY_SHIFT   23
165*4882a593Smuzhiyun #define IVPR_POLARITY_MASK    (1 << IVPR_POLARITY_SHIFT)
166*4882a593Smuzhiyun #define IVPR_SENSE_SHIFT      22
167*4882a593Smuzhiyun #define IVPR_SENSE_MASK       (1 << IVPR_SENSE_SHIFT)
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun #define IVPR_PRIORITY_MASK     (0xF << 16)
170*4882a593Smuzhiyun #define IVPR_PRIORITY(_ivprr_) ((int)(((_ivprr_) & IVPR_PRIORITY_MASK) >> 16))
171*4882a593Smuzhiyun #define IVPR_VECTOR(opp, _ivprr_) ((_ivprr_) & (opp)->vector_mask)
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun /* IDR[EP/CI] are only for FSL MPIC prior to v4.0 */
174*4882a593Smuzhiyun #define IDR_EP      0x80000000	/* external pin */
175*4882a593Smuzhiyun #define IDR_CI      0x40000000	/* critical interrupt */
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun struct irq_dest {
178*4882a593Smuzhiyun 	struct kvm_vcpu *vcpu;
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	int32_t ctpr;		/* CPU current task priority */
181*4882a593Smuzhiyun 	struct irq_queue raised;
182*4882a593Smuzhiyun 	struct irq_queue servicing;
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	/* Count of IRQ sources asserting on non-INT outputs */
185*4882a593Smuzhiyun 	uint32_t outputs_active[NUM_OUTPUTS];
186*4882a593Smuzhiyun };
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun #define MAX_MMIO_REGIONS 10
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun struct openpic {
191*4882a593Smuzhiyun 	struct kvm *kvm;
192*4882a593Smuzhiyun 	struct kvm_device *dev;
193*4882a593Smuzhiyun 	struct kvm_io_device mmio;
194*4882a593Smuzhiyun 	const struct mem_reg *mmio_regions[MAX_MMIO_REGIONS];
195*4882a593Smuzhiyun 	int num_mmio_regions;
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	gpa_t reg_base;
198*4882a593Smuzhiyun 	spinlock_t lock;
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	/* Behavior control */
201*4882a593Smuzhiyun 	struct fsl_mpic_info *fsl;
202*4882a593Smuzhiyun 	uint32_t model;
203*4882a593Smuzhiyun 	uint32_t flags;
204*4882a593Smuzhiyun 	uint32_t nb_irqs;
205*4882a593Smuzhiyun 	uint32_t vid;
206*4882a593Smuzhiyun 	uint32_t vir;		/* Vendor identification register */
207*4882a593Smuzhiyun 	uint32_t vector_mask;
208*4882a593Smuzhiyun 	uint32_t tfrr_reset;
209*4882a593Smuzhiyun 	uint32_t ivpr_reset;
210*4882a593Smuzhiyun 	uint32_t idr_reset;
211*4882a593Smuzhiyun 	uint32_t brr1;
212*4882a593Smuzhiyun 	uint32_t mpic_mode_mask;
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	/* Global registers */
215*4882a593Smuzhiyun 	uint32_t frr;		/* Feature reporting register */
216*4882a593Smuzhiyun 	uint32_t gcr;		/* Global configuration register  */
217*4882a593Smuzhiyun 	uint32_t pir;		/* Processor initialization register */
218*4882a593Smuzhiyun 	uint32_t spve;		/* Spurious vector register */
219*4882a593Smuzhiyun 	uint32_t tfrr;		/* Timer frequency reporting register */
220*4882a593Smuzhiyun 	/* Source registers */
221*4882a593Smuzhiyun 	struct irq_source src[MAX_IRQ];
222*4882a593Smuzhiyun 	/* Local registers per output pin */
223*4882a593Smuzhiyun 	struct irq_dest dst[MAX_CPU];
224*4882a593Smuzhiyun 	uint32_t nb_cpus;
225*4882a593Smuzhiyun 	/* Timer registers */
226*4882a593Smuzhiyun 	struct {
227*4882a593Smuzhiyun 		uint32_t tccr;	/* Global timer current count register */
228*4882a593Smuzhiyun 		uint32_t tbcr;	/* Global timer base count register */
229*4882a593Smuzhiyun 	} timers[MAX_TMR];
230*4882a593Smuzhiyun 	/* Shared MSI registers */
231*4882a593Smuzhiyun 	struct {
232*4882a593Smuzhiyun 		uint32_t msir;	/* Shared Message Signaled Interrupt Register */
233*4882a593Smuzhiyun 	} msi[MAX_MSI];
234*4882a593Smuzhiyun 	uint32_t max_irq;
235*4882a593Smuzhiyun 	uint32_t irq_ipi0;
236*4882a593Smuzhiyun 	uint32_t irq_tim0;
237*4882a593Smuzhiyun 	uint32_t irq_msi;
238*4882a593Smuzhiyun };
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 
mpic_irq_raise(struct openpic * opp,struct irq_dest * dst,int output)241*4882a593Smuzhiyun static void mpic_irq_raise(struct openpic *opp, struct irq_dest *dst,
242*4882a593Smuzhiyun 			   int output)
243*4882a593Smuzhiyun {
244*4882a593Smuzhiyun 	struct kvm_interrupt irq = {
245*4882a593Smuzhiyun 		.irq = KVM_INTERRUPT_SET_LEVEL,
246*4882a593Smuzhiyun 	};
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	if (!dst->vcpu) {
249*4882a593Smuzhiyun 		pr_debug("%s: destination cpu %d does not exist\n",
250*4882a593Smuzhiyun 			 __func__, (int)(dst - &opp->dst[0]));
251*4882a593Smuzhiyun 		return;
252*4882a593Smuzhiyun 	}
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	pr_debug("%s: cpu %d output %d\n", __func__, dst->vcpu->arch.irq_cpu_id,
255*4882a593Smuzhiyun 		output);
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	if (output != ILR_INTTGT_INT)	/* TODO */
258*4882a593Smuzhiyun 		return;
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	kvm_vcpu_ioctl_interrupt(dst->vcpu, &irq);
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun 
mpic_irq_lower(struct openpic * opp,struct irq_dest * dst,int output)263*4882a593Smuzhiyun static void mpic_irq_lower(struct openpic *opp, struct irq_dest *dst,
264*4882a593Smuzhiyun 			   int output)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun 	if (!dst->vcpu) {
267*4882a593Smuzhiyun 		pr_debug("%s: destination cpu %d does not exist\n",
268*4882a593Smuzhiyun 			 __func__, (int)(dst - &opp->dst[0]));
269*4882a593Smuzhiyun 		return;
270*4882a593Smuzhiyun 	}
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	pr_debug("%s: cpu %d output %d\n", __func__, dst->vcpu->arch.irq_cpu_id,
273*4882a593Smuzhiyun 		output);
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	if (output != ILR_INTTGT_INT)	/* TODO */
276*4882a593Smuzhiyun 		return;
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	kvmppc_core_dequeue_external(dst->vcpu);
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun 
IRQ_setbit(struct irq_queue * q,int n_IRQ)281*4882a593Smuzhiyun static inline void IRQ_setbit(struct irq_queue *q, int n_IRQ)
282*4882a593Smuzhiyun {
283*4882a593Smuzhiyun 	set_bit(n_IRQ, q->queue);
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun 
IRQ_resetbit(struct irq_queue * q,int n_IRQ)286*4882a593Smuzhiyun static inline void IRQ_resetbit(struct irq_queue *q, int n_IRQ)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun 	clear_bit(n_IRQ, q->queue);
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun 
IRQ_check(struct openpic * opp,struct irq_queue * q)291*4882a593Smuzhiyun static void IRQ_check(struct openpic *opp, struct irq_queue *q)
292*4882a593Smuzhiyun {
293*4882a593Smuzhiyun 	int irq = -1;
294*4882a593Smuzhiyun 	int next = -1;
295*4882a593Smuzhiyun 	int priority = -1;
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	for (;;) {
298*4882a593Smuzhiyun 		irq = find_next_bit(q->queue, opp->max_irq, irq + 1);
299*4882a593Smuzhiyun 		if (irq == opp->max_irq)
300*4882a593Smuzhiyun 			break;
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 		pr_debug("IRQ_check: irq %d set ivpr_pr=%d pr=%d\n",
303*4882a593Smuzhiyun 			irq, IVPR_PRIORITY(opp->src[irq].ivpr), priority);
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 		if (IVPR_PRIORITY(opp->src[irq].ivpr) > priority) {
306*4882a593Smuzhiyun 			next = irq;
307*4882a593Smuzhiyun 			priority = IVPR_PRIORITY(opp->src[irq].ivpr);
308*4882a593Smuzhiyun 		}
309*4882a593Smuzhiyun 	}
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	q->next = next;
312*4882a593Smuzhiyun 	q->priority = priority;
313*4882a593Smuzhiyun }
314*4882a593Smuzhiyun 
IRQ_get_next(struct openpic * opp,struct irq_queue * q)315*4882a593Smuzhiyun static int IRQ_get_next(struct openpic *opp, struct irq_queue *q)
316*4882a593Smuzhiyun {
317*4882a593Smuzhiyun 	/* XXX: optimize */
318*4882a593Smuzhiyun 	IRQ_check(opp, q);
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	return q->next;
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun 
IRQ_local_pipe(struct openpic * opp,int n_CPU,int n_IRQ,bool active,bool was_active)323*4882a593Smuzhiyun static void IRQ_local_pipe(struct openpic *opp, int n_CPU, int n_IRQ,
324*4882a593Smuzhiyun 			   bool active, bool was_active)
325*4882a593Smuzhiyun {
326*4882a593Smuzhiyun 	struct irq_dest *dst;
327*4882a593Smuzhiyun 	struct irq_source *src;
328*4882a593Smuzhiyun 	int priority;
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	dst = &opp->dst[n_CPU];
331*4882a593Smuzhiyun 	src = &opp->src[n_IRQ];
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	pr_debug("%s: IRQ %d active %d was %d\n",
334*4882a593Smuzhiyun 		__func__, n_IRQ, active, was_active);
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	if (src->output != ILR_INTTGT_INT) {
337*4882a593Smuzhiyun 		pr_debug("%s: output %d irq %d active %d was %d count %d\n",
338*4882a593Smuzhiyun 			__func__, src->output, n_IRQ, active, was_active,
339*4882a593Smuzhiyun 			dst->outputs_active[src->output]);
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 		/* On Freescale MPIC, critical interrupts ignore priority,
342*4882a593Smuzhiyun 		 * IACK, EOI, etc.  Before MPIC v4.1 they also ignore
343*4882a593Smuzhiyun 		 * masking.
344*4882a593Smuzhiyun 		 */
345*4882a593Smuzhiyun 		if (active) {
346*4882a593Smuzhiyun 			if (!was_active &&
347*4882a593Smuzhiyun 			    dst->outputs_active[src->output]++ == 0) {
348*4882a593Smuzhiyun 				pr_debug("%s: Raise OpenPIC output %d cpu %d irq %d\n",
349*4882a593Smuzhiyun 					__func__, src->output, n_CPU, n_IRQ);
350*4882a593Smuzhiyun 				mpic_irq_raise(opp, dst, src->output);
351*4882a593Smuzhiyun 			}
352*4882a593Smuzhiyun 		} else {
353*4882a593Smuzhiyun 			if (was_active &&
354*4882a593Smuzhiyun 			    --dst->outputs_active[src->output] == 0) {
355*4882a593Smuzhiyun 				pr_debug("%s: Lower OpenPIC output %d cpu %d irq %d\n",
356*4882a593Smuzhiyun 					__func__, src->output, n_CPU, n_IRQ);
357*4882a593Smuzhiyun 				mpic_irq_lower(opp, dst, src->output);
358*4882a593Smuzhiyun 			}
359*4882a593Smuzhiyun 		}
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 		return;
362*4882a593Smuzhiyun 	}
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun 	priority = IVPR_PRIORITY(src->ivpr);
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	/* Even if the interrupt doesn't have enough priority,
367*4882a593Smuzhiyun 	 * it is still raised, in case ctpr is lowered later.
368*4882a593Smuzhiyun 	 */
369*4882a593Smuzhiyun 	if (active)
370*4882a593Smuzhiyun 		IRQ_setbit(&dst->raised, n_IRQ);
371*4882a593Smuzhiyun 	else
372*4882a593Smuzhiyun 		IRQ_resetbit(&dst->raised, n_IRQ);
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 	IRQ_check(opp, &dst->raised);
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 	if (active && priority <= dst->ctpr) {
377*4882a593Smuzhiyun 		pr_debug("%s: IRQ %d priority %d too low for ctpr %d on CPU %d\n",
378*4882a593Smuzhiyun 			__func__, n_IRQ, priority, dst->ctpr, n_CPU);
379*4882a593Smuzhiyun 		active = 0;
380*4882a593Smuzhiyun 	}
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	if (active) {
383*4882a593Smuzhiyun 		if (IRQ_get_next(opp, &dst->servicing) >= 0 &&
384*4882a593Smuzhiyun 		    priority <= dst->servicing.priority) {
385*4882a593Smuzhiyun 			pr_debug("%s: IRQ %d is hidden by servicing IRQ %d on CPU %d\n",
386*4882a593Smuzhiyun 				__func__, n_IRQ, dst->servicing.next, n_CPU);
387*4882a593Smuzhiyun 		} else {
388*4882a593Smuzhiyun 			pr_debug("%s: Raise OpenPIC INT output cpu %d irq %d/%d\n",
389*4882a593Smuzhiyun 				__func__, n_CPU, n_IRQ, dst->raised.next);
390*4882a593Smuzhiyun 			mpic_irq_raise(opp, dst, ILR_INTTGT_INT);
391*4882a593Smuzhiyun 		}
392*4882a593Smuzhiyun 	} else {
393*4882a593Smuzhiyun 		IRQ_get_next(opp, &dst->servicing);
394*4882a593Smuzhiyun 		if (dst->raised.priority > dst->ctpr &&
395*4882a593Smuzhiyun 		    dst->raised.priority > dst->servicing.priority) {
396*4882a593Smuzhiyun 			pr_debug("%s: IRQ %d inactive, IRQ %d prio %d above %d/%d, CPU %d\n",
397*4882a593Smuzhiyun 				__func__, n_IRQ, dst->raised.next,
398*4882a593Smuzhiyun 				dst->raised.priority, dst->ctpr,
399*4882a593Smuzhiyun 				dst->servicing.priority, n_CPU);
400*4882a593Smuzhiyun 			/* IRQ line stays asserted */
401*4882a593Smuzhiyun 		} else {
402*4882a593Smuzhiyun 			pr_debug("%s: IRQ %d inactive, current prio %d/%d, CPU %d\n",
403*4882a593Smuzhiyun 				__func__, n_IRQ, dst->ctpr,
404*4882a593Smuzhiyun 				dst->servicing.priority, n_CPU);
405*4882a593Smuzhiyun 			mpic_irq_lower(opp, dst, ILR_INTTGT_INT);
406*4882a593Smuzhiyun 		}
407*4882a593Smuzhiyun 	}
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun /* update pic state because registers for n_IRQ have changed value */
openpic_update_irq(struct openpic * opp,int n_IRQ)411*4882a593Smuzhiyun static void openpic_update_irq(struct openpic *opp, int n_IRQ)
412*4882a593Smuzhiyun {
413*4882a593Smuzhiyun 	struct irq_source *src;
414*4882a593Smuzhiyun 	bool active, was_active;
415*4882a593Smuzhiyun 	int i;
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	src = &opp->src[n_IRQ];
418*4882a593Smuzhiyun 	active = src->pending;
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun 	if ((src->ivpr & IVPR_MASK_MASK) && !src->nomask) {
421*4882a593Smuzhiyun 		/* Interrupt source is disabled */
422*4882a593Smuzhiyun 		pr_debug("%s: IRQ %d is disabled\n", __func__, n_IRQ);
423*4882a593Smuzhiyun 		active = false;
424*4882a593Smuzhiyun 	}
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun 	was_active = !!(src->ivpr & IVPR_ACTIVITY_MASK);
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 	/*
429*4882a593Smuzhiyun 	 * We don't have a similar check for already-active because
430*4882a593Smuzhiyun 	 * ctpr may have changed and we need to withdraw the interrupt.
431*4882a593Smuzhiyun 	 */
432*4882a593Smuzhiyun 	if (!active && !was_active) {
433*4882a593Smuzhiyun 		pr_debug("%s: IRQ %d is already inactive\n", __func__, n_IRQ);
434*4882a593Smuzhiyun 		return;
435*4882a593Smuzhiyun 	}
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 	if (active)
438*4882a593Smuzhiyun 		src->ivpr |= IVPR_ACTIVITY_MASK;
439*4882a593Smuzhiyun 	else
440*4882a593Smuzhiyun 		src->ivpr &= ~IVPR_ACTIVITY_MASK;
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 	if (src->destmask == 0) {
443*4882a593Smuzhiyun 		/* No target */
444*4882a593Smuzhiyun 		pr_debug("%s: IRQ %d has no target\n", __func__, n_IRQ);
445*4882a593Smuzhiyun 		return;
446*4882a593Smuzhiyun 	}
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 	if (src->destmask == (1 << src->last_cpu)) {
449*4882a593Smuzhiyun 		/* Only one CPU is allowed to receive this IRQ */
450*4882a593Smuzhiyun 		IRQ_local_pipe(opp, src->last_cpu, n_IRQ, active, was_active);
451*4882a593Smuzhiyun 	} else if (!(src->ivpr & IVPR_MODE_MASK)) {
452*4882a593Smuzhiyun 		/* Directed delivery mode */
453*4882a593Smuzhiyun 		for (i = 0; i < opp->nb_cpus; i++) {
454*4882a593Smuzhiyun 			if (src->destmask & (1 << i)) {
455*4882a593Smuzhiyun 				IRQ_local_pipe(opp, i, n_IRQ, active,
456*4882a593Smuzhiyun 					       was_active);
457*4882a593Smuzhiyun 			}
458*4882a593Smuzhiyun 		}
459*4882a593Smuzhiyun 	} else {
460*4882a593Smuzhiyun 		/* Distributed delivery mode */
461*4882a593Smuzhiyun 		for (i = src->last_cpu + 1; i != src->last_cpu; i++) {
462*4882a593Smuzhiyun 			if (i == opp->nb_cpus)
463*4882a593Smuzhiyun 				i = 0;
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun 			if (src->destmask & (1 << i)) {
466*4882a593Smuzhiyun 				IRQ_local_pipe(opp, i, n_IRQ, active,
467*4882a593Smuzhiyun 					       was_active);
468*4882a593Smuzhiyun 				src->last_cpu = i;
469*4882a593Smuzhiyun 				break;
470*4882a593Smuzhiyun 			}
471*4882a593Smuzhiyun 		}
472*4882a593Smuzhiyun 	}
473*4882a593Smuzhiyun }
474*4882a593Smuzhiyun 
openpic_set_irq(void * opaque,int n_IRQ,int level)475*4882a593Smuzhiyun static void openpic_set_irq(void *opaque, int n_IRQ, int level)
476*4882a593Smuzhiyun {
477*4882a593Smuzhiyun 	struct openpic *opp = opaque;
478*4882a593Smuzhiyun 	struct irq_source *src;
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 	if (n_IRQ >= MAX_IRQ) {
481*4882a593Smuzhiyun 		WARN_ONCE(1, "%s: IRQ %d out of range\n", __func__, n_IRQ);
482*4882a593Smuzhiyun 		return;
483*4882a593Smuzhiyun 	}
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	src = &opp->src[n_IRQ];
486*4882a593Smuzhiyun 	pr_debug("openpic: set irq %d = %d ivpr=0x%08x\n",
487*4882a593Smuzhiyun 		n_IRQ, level, src->ivpr);
488*4882a593Smuzhiyun 	if (src->level) {
489*4882a593Smuzhiyun 		/* level-sensitive irq */
490*4882a593Smuzhiyun 		src->pending = level;
491*4882a593Smuzhiyun 		openpic_update_irq(opp, n_IRQ);
492*4882a593Smuzhiyun 	} else {
493*4882a593Smuzhiyun 		/* edge-sensitive irq */
494*4882a593Smuzhiyun 		if (level) {
495*4882a593Smuzhiyun 			src->pending = 1;
496*4882a593Smuzhiyun 			openpic_update_irq(opp, n_IRQ);
497*4882a593Smuzhiyun 		}
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun 		if (src->output != ILR_INTTGT_INT) {
500*4882a593Smuzhiyun 			/* Edge-triggered interrupts shouldn't be used
501*4882a593Smuzhiyun 			 * with non-INT delivery, but just in case,
502*4882a593Smuzhiyun 			 * try to make it do something sane rather than
503*4882a593Smuzhiyun 			 * cause an interrupt storm.  This is close to
504*4882a593Smuzhiyun 			 * what you'd probably see happen in real hardware.
505*4882a593Smuzhiyun 			 */
506*4882a593Smuzhiyun 			src->pending = 0;
507*4882a593Smuzhiyun 			openpic_update_irq(opp, n_IRQ);
508*4882a593Smuzhiyun 		}
509*4882a593Smuzhiyun 	}
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun 
openpic_reset(struct openpic * opp)512*4882a593Smuzhiyun static void openpic_reset(struct openpic *opp)
513*4882a593Smuzhiyun {
514*4882a593Smuzhiyun 	int i;
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun 	opp->gcr = GCR_RESET;
517*4882a593Smuzhiyun 	/* Initialise controller registers */
518*4882a593Smuzhiyun 	opp->frr = ((opp->nb_irqs - 1) << FRR_NIRQ_SHIFT) |
519*4882a593Smuzhiyun 	    (opp->vid << FRR_VID_SHIFT);
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 	opp->pir = 0;
522*4882a593Smuzhiyun 	opp->spve = -1 & opp->vector_mask;
523*4882a593Smuzhiyun 	opp->tfrr = opp->tfrr_reset;
524*4882a593Smuzhiyun 	/* Initialise IRQ sources */
525*4882a593Smuzhiyun 	for (i = 0; i < opp->max_irq; i++) {
526*4882a593Smuzhiyun 		opp->src[i].ivpr = opp->ivpr_reset;
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 		switch (opp->src[i].type) {
529*4882a593Smuzhiyun 		case IRQ_TYPE_NORMAL:
530*4882a593Smuzhiyun 			opp->src[i].level =
531*4882a593Smuzhiyun 			    !!(opp->ivpr_reset & IVPR_SENSE_MASK);
532*4882a593Smuzhiyun 			break;
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun 		case IRQ_TYPE_FSLINT:
535*4882a593Smuzhiyun 			opp->src[i].ivpr |= IVPR_POLARITY_MASK;
536*4882a593Smuzhiyun 			break;
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 		case IRQ_TYPE_FSLSPECIAL:
539*4882a593Smuzhiyun 			break;
540*4882a593Smuzhiyun 		}
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 		write_IRQreg_idr(opp, i, opp->idr_reset);
543*4882a593Smuzhiyun 	}
544*4882a593Smuzhiyun 	/* Initialise IRQ destinations */
545*4882a593Smuzhiyun 	for (i = 0; i < MAX_CPU; i++) {
546*4882a593Smuzhiyun 		opp->dst[i].ctpr = 15;
547*4882a593Smuzhiyun 		memset(&opp->dst[i].raised, 0, sizeof(struct irq_queue));
548*4882a593Smuzhiyun 		opp->dst[i].raised.next = -1;
549*4882a593Smuzhiyun 		memset(&opp->dst[i].servicing, 0, sizeof(struct irq_queue));
550*4882a593Smuzhiyun 		opp->dst[i].servicing.next = -1;
551*4882a593Smuzhiyun 	}
552*4882a593Smuzhiyun 	/* Initialise timers */
553*4882a593Smuzhiyun 	for (i = 0; i < MAX_TMR; i++) {
554*4882a593Smuzhiyun 		opp->timers[i].tccr = 0;
555*4882a593Smuzhiyun 		opp->timers[i].tbcr = TBCR_CI;
556*4882a593Smuzhiyun 	}
557*4882a593Smuzhiyun 	/* Go out of RESET state */
558*4882a593Smuzhiyun 	opp->gcr = 0;
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun 
read_IRQreg_idr(struct openpic * opp,int n_IRQ)561*4882a593Smuzhiyun static inline uint32_t read_IRQreg_idr(struct openpic *opp, int n_IRQ)
562*4882a593Smuzhiyun {
563*4882a593Smuzhiyun 	return opp->src[n_IRQ].idr;
564*4882a593Smuzhiyun }
565*4882a593Smuzhiyun 
read_IRQreg_ilr(struct openpic * opp,int n_IRQ)566*4882a593Smuzhiyun static inline uint32_t read_IRQreg_ilr(struct openpic *opp, int n_IRQ)
567*4882a593Smuzhiyun {
568*4882a593Smuzhiyun 	if (opp->flags & OPENPIC_FLAG_ILR)
569*4882a593Smuzhiyun 		return opp->src[n_IRQ].output;
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun 	return 0xffffffff;
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun 
read_IRQreg_ivpr(struct openpic * opp,int n_IRQ)574*4882a593Smuzhiyun static inline uint32_t read_IRQreg_ivpr(struct openpic *opp, int n_IRQ)
575*4882a593Smuzhiyun {
576*4882a593Smuzhiyun 	return opp->src[n_IRQ].ivpr;
577*4882a593Smuzhiyun }
578*4882a593Smuzhiyun 
write_IRQreg_idr(struct openpic * opp,int n_IRQ,uint32_t val)579*4882a593Smuzhiyun static inline void write_IRQreg_idr(struct openpic *opp, int n_IRQ,
580*4882a593Smuzhiyun 				    uint32_t val)
581*4882a593Smuzhiyun {
582*4882a593Smuzhiyun 	struct irq_source *src = &opp->src[n_IRQ];
583*4882a593Smuzhiyun 	uint32_t normal_mask = (1UL << opp->nb_cpus) - 1;
584*4882a593Smuzhiyun 	uint32_t crit_mask = 0;
585*4882a593Smuzhiyun 	uint32_t mask = normal_mask;
586*4882a593Smuzhiyun 	int crit_shift = IDR_EP_SHIFT - opp->nb_cpus;
587*4882a593Smuzhiyun 	int i;
588*4882a593Smuzhiyun 
589*4882a593Smuzhiyun 	if (opp->flags & OPENPIC_FLAG_IDR_CRIT) {
590*4882a593Smuzhiyun 		crit_mask = mask << crit_shift;
591*4882a593Smuzhiyun 		mask |= crit_mask | IDR_EP;
592*4882a593Smuzhiyun 	}
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 	src->idr = val & mask;
595*4882a593Smuzhiyun 	pr_debug("Set IDR %d to 0x%08x\n", n_IRQ, src->idr);
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun 	if (opp->flags & OPENPIC_FLAG_IDR_CRIT) {
598*4882a593Smuzhiyun 		if (src->idr & crit_mask) {
599*4882a593Smuzhiyun 			if (src->idr & normal_mask) {
600*4882a593Smuzhiyun 				pr_debug("%s: IRQ configured for multiple output types, using critical\n",
601*4882a593Smuzhiyun 					__func__);
602*4882a593Smuzhiyun 			}
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun 			src->output = ILR_INTTGT_CINT;
605*4882a593Smuzhiyun 			src->nomask = true;
606*4882a593Smuzhiyun 			src->destmask = 0;
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun 			for (i = 0; i < opp->nb_cpus; i++) {
609*4882a593Smuzhiyun 				int n_ci = IDR_CI0_SHIFT - i;
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 				if (src->idr & (1UL << n_ci))
612*4882a593Smuzhiyun 					src->destmask |= 1UL << i;
613*4882a593Smuzhiyun 			}
614*4882a593Smuzhiyun 		} else {
615*4882a593Smuzhiyun 			src->output = ILR_INTTGT_INT;
616*4882a593Smuzhiyun 			src->nomask = false;
617*4882a593Smuzhiyun 			src->destmask = src->idr & normal_mask;
618*4882a593Smuzhiyun 		}
619*4882a593Smuzhiyun 	} else {
620*4882a593Smuzhiyun 		src->destmask = src->idr;
621*4882a593Smuzhiyun 	}
622*4882a593Smuzhiyun }
623*4882a593Smuzhiyun 
write_IRQreg_ilr(struct openpic * opp,int n_IRQ,uint32_t val)624*4882a593Smuzhiyun static inline void write_IRQreg_ilr(struct openpic *opp, int n_IRQ,
625*4882a593Smuzhiyun 				    uint32_t val)
626*4882a593Smuzhiyun {
627*4882a593Smuzhiyun 	if (opp->flags & OPENPIC_FLAG_ILR) {
628*4882a593Smuzhiyun 		struct irq_source *src = &opp->src[n_IRQ];
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun 		src->output = val & ILR_INTTGT_MASK;
631*4882a593Smuzhiyun 		pr_debug("Set ILR %d to 0x%08x, output %d\n", n_IRQ, src->idr,
632*4882a593Smuzhiyun 			src->output);
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun 		/* TODO: on MPIC v4.0 only, set nomask for non-INT */
635*4882a593Smuzhiyun 	}
636*4882a593Smuzhiyun }
637*4882a593Smuzhiyun 
write_IRQreg_ivpr(struct openpic * opp,int n_IRQ,uint32_t val)638*4882a593Smuzhiyun static inline void write_IRQreg_ivpr(struct openpic *opp, int n_IRQ,
639*4882a593Smuzhiyun 				     uint32_t val)
640*4882a593Smuzhiyun {
641*4882a593Smuzhiyun 	uint32_t mask;
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun 	/* NOTE when implementing newer FSL MPIC models: starting with v4.0,
644*4882a593Smuzhiyun 	 * the polarity bit is read-only on internal interrupts.
645*4882a593Smuzhiyun 	 */
646*4882a593Smuzhiyun 	mask = IVPR_MASK_MASK | IVPR_PRIORITY_MASK | IVPR_SENSE_MASK |
647*4882a593Smuzhiyun 	    IVPR_POLARITY_MASK | opp->vector_mask;
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun 	/* ACTIVITY bit is read-only */
650*4882a593Smuzhiyun 	opp->src[n_IRQ].ivpr =
651*4882a593Smuzhiyun 	    (opp->src[n_IRQ].ivpr & IVPR_ACTIVITY_MASK) | (val & mask);
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun 	/* For FSL internal interrupts, The sense bit is reserved and zero,
654*4882a593Smuzhiyun 	 * and the interrupt is always level-triggered.  Timers and IPIs
655*4882a593Smuzhiyun 	 * have no sense or polarity bits, and are edge-triggered.
656*4882a593Smuzhiyun 	 */
657*4882a593Smuzhiyun 	switch (opp->src[n_IRQ].type) {
658*4882a593Smuzhiyun 	case IRQ_TYPE_NORMAL:
659*4882a593Smuzhiyun 		opp->src[n_IRQ].level =
660*4882a593Smuzhiyun 		    !!(opp->src[n_IRQ].ivpr & IVPR_SENSE_MASK);
661*4882a593Smuzhiyun 		break;
662*4882a593Smuzhiyun 
663*4882a593Smuzhiyun 	case IRQ_TYPE_FSLINT:
664*4882a593Smuzhiyun 		opp->src[n_IRQ].ivpr &= ~IVPR_SENSE_MASK;
665*4882a593Smuzhiyun 		break;
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun 	case IRQ_TYPE_FSLSPECIAL:
668*4882a593Smuzhiyun 		opp->src[n_IRQ].ivpr &= ~(IVPR_POLARITY_MASK | IVPR_SENSE_MASK);
669*4882a593Smuzhiyun 		break;
670*4882a593Smuzhiyun 	}
671*4882a593Smuzhiyun 
672*4882a593Smuzhiyun 	openpic_update_irq(opp, n_IRQ);
673*4882a593Smuzhiyun 	pr_debug("Set IVPR %d to 0x%08x -> 0x%08x\n", n_IRQ, val,
674*4882a593Smuzhiyun 		opp->src[n_IRQ].ivpr);
675*4882a593Smuzhiyun }
676*4882a593Smuzhiyun 
openpic_gcr_write(struct openpic * opp,uint64_t val)677*4882a593Smuzhiyun static void openpic_gcr_write(struct openpic *opp, uint64_t val)
678*4882a593Smuzhiyun {
679*4882a593Smuzhiyun 	if (val & GCR_RESET) {
680*4882a593Smuzhiyun 		openpic_reset(opp);
681*4882a593Smuzhiyun 		return;
682*4882a593Smuzhiyun 	}
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun 	opp->gcr &= ~opp->mpic_mode_mask;
685*4882a593Smuzhiyun 	opp->gcr |= val & opp->mpic_mode_mask;
686*4882a593Smuzhiyun }
687*4882a593Smuzhiyun 
openpic_gbl_write(void * opaque,gpa_t addr,u32 val)688*4882a593Smuzhiyun static int openpic_gbl_write(void *opaque, gpa_t addr, u32 val)
689*4882a593Smuzhiyun {
690*4882a593Smuzhiyun 	struct openpic *opp = opaque;
691*4882a593Smuzhiyun 	int err = 0;
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun 	pr_debug("%s: addr %#llx <= %08x\n", __func__, addr, val);
694*4882a593Smuzhiyun 	if (addr & 0xF)
695*4882a593Smuzhiyun 		return 0;
696*4882a593Smuzhiyun 
697*4882a593Smuzhiyun 	switch (addr) {
698*4882a593Smuzhiyun 	case 0x00:	/* Block Revision Register1 (BRR1) is Readonly */
699*4882a593Smuzhiyun 		break;
700*4882a593Smuzhiyun 	case 0x40:
701*4882a593Smuzhiyun 	case 0x50:
702*4882a593Smuzhiyun 	case 0x60:
703*4882a593Smuzhiyun 	case 0x70:
704*4882a593Smuzhiyun 	case 0x80:
705*4882a593Smuzhiyun 	case 0x90:
706*4882a593Smuzhiyun 	case 0xA0:
707*4882a593Smuzhiyun 	case 0xB0:
708*4882a593Smuzhiyun 		err = openpic_cpu_write_internal(opp, addr, val,
709*4882a593Smuzhiyun 						 get_current_cpu());
710*4882a593Smuzhiyun 		break;
711*4882a593Smuzhiyun 	case 0x1000:		/* FRR */
712*4882a593Smuzhiyun 		break;
713*4882a593Smuzhiyun 	case 0x1020:		/* GCR */
714*4882a593Smuzhiyun 		openpic_gcr_write(opp, val);
715*4882a593Smuzhiyun 		break;
716*4882a593Smuzhiyun 	case 0x1080:		/* VIR */
717*4882a593Smuzhiyun 		break;
718*4882a593Smuzhiyun 	case 0x1090:		/* PIR */
719*4882a593Smuzhiyun 		/*
720*4882a593Smuzhiyun 		 * This register is used to reset a CPU core --
721*4882a593Smuzhiyun 		 * let userspace handle it.
722*4882a593Smuzhiyun 		 */
723*4882a593Smuzhiyun 		err = -ENXIO;
724*4882a593Smuzhiyun 		break;
725*4882a593Smuzhiyun 	case 0x10A0:		/* IPI_IVPR */
726*4882a593Smuzhiyun 	case 0x10B0:
727*4882a593Smuzhiyun 	case 0x10C0:
728*4882a593Smuzhiyun 	case 0x10D0: {
729*4882a593Smuzhiyun 		int idx;
730*4882a593Smuzhiyun 		idx = (addr - 0x10A0) >> 4;
731*4882a593Smuzhiyun 		write_IRQreg_ivpr(opp, opp->irq_ipi0 + idx, val);
732*4882a593Smuzhiyun 		break;
733*4882a593Smuzhiyun 	}
734*4882a593Smuzhiyun 	case 0x10E0:		/* SPVE */
735*4882a593Smuzhiyun 		opp->spve = val & opp->vector_mask;
736*4882a593Smuzhiyun 		break;
737*4882a593Smuzhiyun 	default:
738*4882a593Smuzhiyun 		break;
739*4882a593Smuzhiyun 	}
740*4882a593Smuzhiyun 
741*4882a593Smuzhiyun 	return err;
742*4882a593Smuzhiyun }
743*4882a593Smuzhiyun 
openpic_gbl_read(void * opaque,gpa_t addr,u32 * ptr)744*4882a593Smuzhiyun static int openpic_gbl_read(void *opaque, gpa_t addr, u32 *ptr)
745*4882a593Smuzhiyun {
746*4882a593Smuzhiyun 	struct openpic *opp = opaque;
747*4882a593Smuzhiyun 	u32 retval;
748*4882a593Smuzhiyun 	int err = 0;
749*4882a593Smuzhiyun 
750*4882a593Smuzhiyun 	pr_debug("%s: addr %#llx\n", __func__, addr);
751*4882a593Smuzhiyun 	retval = 0xFFFFFFFF;
752*4882a593Smuzhiyun 	if (addr & 0xF)
753*4882a593Smuzhiyun 		goto out;
754*4882a593Smuzhiyun 
755*4882a593Smuzhiyun 	switch (addr) {
756*4882a593Smuzhiyun 	case 0x1000:		/* FRR */
757*4882a593Smuzhiyun 		retval = opp->frr;
758*4882a593Smuzhiyun 		retval |= (opp->nb_cpus - 1) << FRR_NCPU_SHIFT;
759*4882a593Smuzhiyun 		break;
760*4882a593Smuzhiyun 	case 0x1020:		/* GCR */
761*4882a593Smuzhiyun 		retval = opp->gcr;
762*4882a593Smuzhiyun 		break;
763*4882a593Smuzhiyun 	case 0x1080:		/* VIR */
764*4882a593Smuzhiyun 		retval = opp->vir;
765*4882a593Smuzhiyun 		break;
766*4882a593Smuzhiyun 	case 0x1090:		/* PIR */
767*4882a593Smuzhiyun 		retval = 0x00000000;
768*4882a593Smuzhiyun 		break;
769*4882a593Smuzhiyun 	case 0x00:		/* Block Revision Register1 (BRR1) */
770*4882a593Smuzhiyun 		retval = opp->brr1;
771*4882a593Smuzhiyun 		break;
772*4882a593Smuzhiyun 	case 0x40:
773*4882a593Smuzhiyun 	case 0x50:
774*4882a593Smuzhiyun 	case 0x60:
775*4882a593Smuzhiyun 	case 0x70:
776*4882a593Smuzhiyun 	case 0x80:
777*4882a593Smuzhiyun 	case 0x90:
778*4882a593Smuzhiyun 	case 0xA0:
779*4882a593Smuzhiyun 	case 0xB0:
780*4882a593Smuzhiyun 		err = openpic_cpu_read_internal(opp, addr,
781*4882a593Smuzhiyun 			&retval, get_current_cpu());
782*4882a593Smuzhiyun 		break;
783*4882a593Smuzhiyun 	case 0x10A0:		/* IPI_IVPR */
784*4882a593Smuzhiyun 	case 0x10B0:
785*4882a593Smuzhiyun 	case 0x10C0:
786*4882a593Smuzhiyun 	case 0x10D0:
787*4882a593Smuzhiyun 		{
788*4882a593Smuzhiyun 			int idx;
789*4882a593Smuzhiyun 			idx = (addr - 0x10A0) >> 4;
790*4882a593Smuzhiyun 			retval = read_IRQreg_ivpr(opp, opp->irq_ipi0 + idx);
791*4882a593Smuzhiyun 		}
792*4882a593Smuzhiyun 		break;
793*4882a593Smuzhiyun 	case 0x10E0:		/* SPVE */
794*4882a593Smuzhiyun 		retval = opp->spve;
795*4882a593Smuzhiyun 		break;
796*4882a593Smuzhiyun 	default:
797*4882a593Smuzhiyun 		break;
798*4882a593Smuzhiyun 	}
799*4882a593Smuzhiyun 
800*4882a593Smuzhiyun out:
801*4882a593Smuzhiyun 	pr_debug("%s: => 0x%08x\n", __func__, retval);
802*4882a593Smuzhiyun 	*ptr = retval;
803*4882a593Smuzhiyun 	return err;
804*4882a593Smuzhiyun }
805*4882a593Smuzhiyun 
openpic_tmr_write(void * opaque,gpa_t addr,u32 val)806*4882a593Smuzhiyun static int openpic_tmr_write(void *opaque, gpa_t addr, u32 val)
807*4882a593Smuzhiyun {
808*4882a593Smuzhiyun 	struct openpic *opp = opaque;
809*4882a593Smuzhiyun 	int idx;
810*4882a593Smuzhiyun 
811*4882a593Smuzhiyun 	addr += 0x10f0;
812*4882a593Smuzhiyun 
813*4882a593Smuzhiyun 	pr_debug("%s: addr %#llx <= %08x\n", __func__, addr, val);
814*4882a593Smuzhiyun 	if (addr & 0xF)
815*4882a593Smuzhiyun 		return 0;
816*4882a593Smuzhiyun 
817*4882a593Smuzhiyun 	if (addr == 0x10f0) {
818*4882a593Smuzhiyun 		/* TFRR */
819*4882a593Smuzhiyun 		opp->tfrr = val;
820*4882a593Smuzhiyun 		return 0;
821*4882a593Smuzhiyun 	}
822*4882a593Smuzhiyun 
823*4882a593Smuzhiyun 	idx = (addr >> 6) & 0x3;
824*4882a593Smuzhiyun 	addr = addr & 0x30;
825*4882a593Smuzhiyun 
826*4882a593Smuzhiyun 	switch (addr & 0x30) {
827*4882a593Smuzhiyun 	case 0x00:		/* TCCR */
828*4882a593Smuzhiyun 		break;
829*4882a593Smuzhiyun 	case 0x10:		/* TBCR */
830*4882a593Smuzhiyun 		if ((opp->timers[idx].tccr & TCCR_TOG) != 0 &&
831*4882a593Smuzhiyun 		    (val & TBCR_CI) == 0 &&
832*4882a593Smuzhiyun 		    (opp->timers[idx].tbcr & TBCR_CI) != 0)
833*4882a593Smuzhiyun 			opp->timers[idx].tccr &= ~TCCR_TOG;
834*4882a593Smuzhiyun 
835*4882a593Smuzhiyun 		opp->timers[idx].tbcr = val;
836*4882a593Smuzhiyun 		break;
837*4882a593Smuzhiyun 	case 0x20:		/* TVPR */
838*4882a593Smuzhiyun 		write_IRQreg_ivpr(opp, opp->irq_tim0 + idx, val);
839*4882a593Smuzhiyun 		break;
840*4882a593Smuzhiyun 	case 0x30:		/* TDR */
841*4882a593Smuzhiyun 		write_IRQreg_idr(opp, opp->irq_tim0 + idx, val);
842*4882a593Smuzhiyun 		break;
843*4882a593Smuzhiyun 	}
844*4882a593Smuzhiyun 
845*4882a593Smuzhiyun 	return 0;
846*4882a593Smuzhiyun }
847*4882a593Smuzhiyun 
openpic_tmr_read(void * opaque,gpa_t addr,u32 * ptr)848*4882a593Smuzhiyun static int openpic_tmr_read(void *opaque, gpa_t addr, u32 *ptr)
849*4882a593Smuzhiyun {
850*4882a593Smuzhiyun 	struct openpic *opp = opaque;
851*4882a593Smuzhiyun 	uint32_t retval = -1;
852*4882a593Smuzhiyun 	int idx;
853*4882a593Smuzhiyun 
854*4882a593Smuzhiyun 	pr_debug("%s: addr %#llx\n", __func__, addr);
855*4882a593Smuzhiyun 	if (addr & 0xF)
856*4882a593Smuzhiyun 		goto out;
857*4882a593Smuzhiyun 
858*4882a593Smuzhiyun 	idx = (addr >> 6) & 0x3;
859*4882a593Smuzhiyun 	if (addr == 0x0) {
860*4882a593Smuzhiyun 		/* TFRR */
861*4882a593Smuzhiyun 		retval = opp->tfrr;
862*4882a593Smuzhiyun 		goto out;
863*4882a593Smuzhiyun 	}
864*4882a593Smuzhiyun 
865*4882a593Smuzhiyun 	switch (addr & 0x30) {
866*4882a593Smuzhiyun 	case 0x00:		/* TCCR */
867*4882a593Smuzhiyun 		retval = opp->timers[idx].tccr;
868*4882a593Smuzhiyun 		break;
869*4882a593Smuzhiyun 	case 0x10:		/* TBCR */
870*4882a593Smuzhiyun 		retval = opp->timers[idx].tbcr;
871*4882a593Smuzhiyun 		break;
872*4882a593Smuzhiyun 	case 0x20:		/* TIPV */
873*4882a593Smuzhiyun 		retval = read_IRQreg_ivpr(opp, opp->irq_tim0 + idx);
874*4882a593Smuzhiyun 		break;
875*4882a593Smuzhiyun 	case 0x30:		/* TIDE (TIDR) */
876*4882a593Smuzhiyun 		retval = read_IRQreg_idr(opp, opp->irq_tim0 + idx);
877*4882a593Smuzhiyun 		break;
878*4882a593Smuzhiyun 	}
879*4882a593Smuzhiyun 
880*4882a593Smuzhiyun out:
881*4882a593Smuzhiyun 	pr_debug("%s: => 0x%08x\n", __func__, retval);
882*4882a593Smuzhiyun 	*ptr = retval;
883*4882a593Smuzhiyun 	return 0;
884*4882a593Smuzhiyun }
885*4882a593Smuzhiyun 
openpic_src_write(void * opaque,gpa_t addr,u32 val)886*4882a593Smuzhiyun static int openpic_src_write(void *opaque, gpa_t addr, u32 val)
887*4882a593Smuzhiyun {
888*4882a593Smuzhiyun 	struct openpic *opp = opaque;
889*4882a593Smuzhiyun 	int idx;
890*4882a593Smuzhiyun 
891*4882a593Smuzhiyun 	pr_debug("%s: addr %#llx <= %08x\n", __func__, addr, val);
892*4882a593Smuzhiyun 
893*4882a593Smuzhiyun 	addr = addr & 0xffff;
894*4882a593Smuzhiyun 	idx = addr >> 5;
895*4882a593Smuzhiyun 
896*4882a593Smuzhiyun 	switch (addr & 0x1f) {
897*4882a593Smuzhiyun 	case 0x00:
898*4882a593Smuzhiyun 		write_IRQreg_ivpr(opp, idx, val);
899*4882a593Smuzhiyun 		break;
900*4882a593Smuzhiyun 	case 0x10:
901*4882a593Smuzhiyun 		write_IRQreg_idr(opp, idx, val);
902*4882a593Smuzhiyun 		break;
903*4882a593Smuzhiyun 	case 0x18:
904*4882a593Smuzhiyun 		write_IRQreg_ilr(opp, idx, val);
905*4882a593Smuzhiyun 		break;
906*4882a593Smuzhiyun 	}
907*4882a593Smuzhiyun 
908*4882a593Smuzhiyun 	return 0;
909*4882a593Smuzhiyun }
910*4882a593Smuzhiyun 
openpic_src_read(void * opaque,gpa_t addr,u32 * ptr)911*4882a593Smuzhiyun static int openpic_src_read(void *opaque, gpa_t addr, u32 *ptr)
912*4882a593Smuzhiyun {
913*4882a593Smuzhiyun 	struct openpic *opp = opaque;
914*4882a593Smuzhiyun 	uint32_t retval;
915*4882a593Smuzhiyun 	int idx;
916*4882a593Smuzhiyun 
917*4882a593Smuzhiyun 	pr_debug("%s: addr %#llx\n", __func__, addr);
918*4882a593Smuzhiyun 	retval = 0xFFFFFFFF;
919*4882a593Smuzhiyun 
920*4882a593Smuzhiyun 	addr = addr & 0xffff;
921*4882a593Smuzhiyun 	idx = addr >> 5;
922*4882a593Smuzhiyun 
923*4882a593Smuzhiyun 	switch (addr & 0x1f) {
924*4882a593Smuzhiyun 	case 0x00:
925*4882a593Smuzhiyun 		retval = read_IRQreg_ivpr(opp, idx);
926*4882a593Smuzhiyun 		break;
927*4882a593Smuzhiyun 	case 0x10:
928*4882a593Smuzhiyun 		retval = read_IRQreg_idr(opp, idx);
929*4882a593Smuzhiyun 		break;
930*4882a593Smuzhiyun 	case 0x18:
931*4882a593Smuzhiyun 		retval = read_IRQreg_ilr(opp, idx);
932*4882a593Smuzhiyun 		break;
933*4882a593Smuzhiyun 	}
934*4882a593Smuzhiyun 
935*4882a593Smuzhiyun 	pr_debug("%s: => 0x%08x\n", __func__, retval);
936*4882a593Smuzhiyun 	*ptr = retval;
937*4882a593Smuzhiyun 	return 0;
938*4882a593Smuzhiyun }
939*4882a593Smuzhiyun 
openpic_msi_write(void * opaque,gpa_t addr,u32 val)940*4882a593Smuzhiyun static int openpic_msi_write(void *opaque, gpa_t addr, u32 val)
941*4882a593Smuzhiyun {
942*4882a593Smuzhiyun 	struct openpic *opp = opaque;
943*4882a593Smuzhiyun 	int idx = opp->irq_msi;
944*4882a593Smuzhiyun 	int srs, ibs;
945*4882a593Smuzhiyun 
946*4882a593Smuzhiyun 	pr_debug("%s: addr %#llx <= 0x%08x\n", __func__, addr, val);
947*4882a593Smuzhiyun 	if (addr & 0xF)
948*4882a593Smuzhiyun 		return 0;
949*4882a593Smuzhiyun 
950*4882a593Smuzhiyun 	switch (addr) {
951*4882a593Smuzhiyun 	case MSIIR_OFFSET:
952*4882a593Smuzhiyun 		srs = val >> MSIIR_SRS_SHIFT;
953*4882a593Smuzhiyun 		idx += srs;
954*4882a593Smuzhiyun 		ibs = (val & MSIIR_IBS_MASK) >> MSIIR_IBS_SHIFT;
955*4882a593Smuzhiyun 		opp->msi[srs].msir |= 1 << ibs;
956*4882a593Smuzhiyun 		openpic_set_irq(opp, idx, 1);
957*4882a593Smuzhiyun 		break;
958*4882a593Smuzhiyun 	default:
959*4882a593Smuzhiyun 		/* most registers are read-only, thus ignored */
960*4882a593Smuzhiyun 		break;
961*4882a593Smuzhiyun 	}
962*4882a593Smuzhiyun 
963*4882a593Smuzhiyun 	return 0;
964*4882a593Smuzhiyun }
965*4882a593Smuzhiyun 
openpic_msi_read(void * opaque,gpa_t addr,u32 * ptr)966*4882a593Smuzhiyun static int openpic_msi_read(void *opaque, gpa_t addr, u32 *ptr)
967*4882a593Smuzhiyun {
968*4882a593Smuzhiyun 	struct openpic *opp = opaque;
969*4882a593Smuzhiyun 	uint32_t r = 0;
970*4882a593Smuzhiyun 	int i, srs;
971*4882a593Smuzhiyun 
972*4882a593Smuzhiyun 	pr_debug("%s: addr %#llx\n", __func__, addr);
973*4882a593Smuzhiyun 	if (addr & 0xF)
974*4882a593Smuzhiyun 		return -ENXIO;
975*4882a593Smuzhiyun 
976*4882a593Smuzhiyun 	srs = addr >> 4;
977*4882a593Smuzhiyun 
978*4882a593Smuzhiyun 	switch (addr) {
979*4882a593Smuzhiyun 	case 0x00:
980*4882a593Smuzhiyun 	case 0x10:
981*4882a593Smuzhiyun 	case 0x20:
982*4882a593Smuzhiyun 	case 0x30:
983*4882a593Smuzhiyun 	case 0x40:
984*4882a593Smuzhiyun 	case 0x50:
985*4882a593Smuzhiyun 	case 0x60:
986*4882a593Smuzhiyun 	case 0x70:		/* MSIRs */
987*4882a593Smuzhiyun 		r = opp->msi[srs].msir;
988*4882a593Smuzhiyun 		/* Clear on read */
989*4882a593Smuzhiyun 		opp->msi[srs].msir = 0;
990*4882a593Smuzhiyun 		openpic_set_irq(opp, opp->irq_msi + srs, 0);
991*4882a593Smuzhiyun 		break;
992*4882a593Smuzhiyun 	case 0x120:		/* MSISR */
993*4882a593Smuzhiyun 		for (i = 0; i < MAX_MSI; i++)
994*4882a593Smuzhiyun 			r |= (opp->msi[i].msir ? 1 : 0) << i;
995*4882a593Smuzhiyun 		break;
996*4882a593Smuzhiyun 	}
997*4882a593Smuzhiyun 
998*4882a593Smuzhiyun 	pr_debug("%s: => 0x%08x\n", __func__, r);
999*4882a593Smuzhiyun 	*ptr = r;
1000*4882a593Smuzhiyun 	return 0;
1001*4882a593Smuzhiyun }
1002*4882a593Smuzhiyun 
openpic_summary_read(void * opaque,gpa_t addr,u32 * ptr)1003*4882a593Smuzhiyun static int openpic_summary_read(void *opaque, gpa_t addr, u32 *ptr)
1004*4882a593Smuzhiyun {
1005*4882a593Smuzhiyun 	uint32_t r = 0;
1006*4882a593Smuzhiyun 
1007*4882a593Smuzhiyun 	pr_debug("%s: addr %#llx\n", __func__, addr);
1008*4882a593Smuzhiyun 
1009*4882a593Smuzhiyun 	/* TODO: EISR/EIMR */
1010*4882a593Smuzhiyun 
1011*4882a593Smuzhiyun 	*ptr = r;
1012*4882a593Smuzhiyun 	return 0;
1013*4882a593Smuzhiyun }
1014*4882a593Smuzhiyun 
openpic_summary_write(void * opaque,gpa_t addr,u32 val)1015*4882a593Smuzhiyun static int openpic_summary_write(void *opaque, gpa_t addr, u32 val)
1016*4882a593Smuzhiyun {
1017*4882a593Smuzhiyun 	pr_debug("%s: addr %#llx <= 0x%08x\n", __func__, addr, val);
1018*4882a593Smuzhiyun 
1019*4882a593Smuzhiyun 	/* TODO: EISR/EIMR */
1020*4882a593Smuzhiyun 	return 0;
1021*4882a593Smuzhiyun }
1022*4882a593Smuzhiyun 
openpic_cpu_write_internal(void * opaque,gpa_t addr,u32 val,int idx)1023*4882a593Smuzhiyun static int openpic_cpu_write_internal(void *opaque, gpa_t addr,
1024*4882a593Smuzhiyun 				      u32 val, int idx)
1025*4882a593Smuzhiyun {
1026*4882a593Smuzhiyun 	struct openpic *opp = opaque;
1027*4882a593Smuzhiyun 	struct irq_source *src;
1028*4882a593Smuzhiyun 	struct irq_dest *dst;
1029*4882a593Smuzhiyun 	int s_IRQ, n_IRQ;
1030*4882a593Smuzhiyun 
1031*4882a593Smuzhiyun 	pr_debug("%s: cpu %d addr %#llx <= 0x%08x\n", __func__, idx,
1032*4882a593Smuzhiyun 		addr, val);
1033*4882a593Smuzhiyun 
1034*4882a593Smuzhiyun 	if (idx < 0)
1035*4882a593Smuzhiyun 		return 0;
1036*4882a593Smuzhiyun 
1037*4882a593Smuzhiyun 	if (addr & 0xF)
1038*4882a593Smuzhiyun 		return 0;
1039*4882a593Smuzhiyun 
1040*4882a593Smuzhiyun 	dst = &opp->dst[idx];
1041*4882a593Smuzhiyun 	addr &= 0xFF0;
1042*4882a593Smuzhiyun 	switch (addr) {
1043*4882a593Smuzhiyun 	case 0x40:		/* IPIDR */
1044*4882a593Smuzhiyun 	case 0x50:
1045*4882a593Smuzhiyun 	case 0x60:
1046*4882a593Smuzhiyun 	case 0x70:
1047*4882a593Smuzhiyun 		idx = (addr - 0x40) >> 4;
1048*4882a593Smuzhiyun 		/* we use IDE as mask which CPUs to deliver the IPI to still. */
1049*4882a593Smuzhiyun 		opp->src[opp->irq_ipi0 + idx].destmask |= val;
1050*4882a593Smuzhiyun 		openpic_set_irq(opp, opp->irq_ipi0 + idx, 1);
1051*4882a593Smuzhiyun 		openpic_set_irq(opp, opp->irq_ipi0 + idx, 0);
1052*4882a593Smuzhiyun 		break;
1053*4882a593Smuzhiyun 	case 0x80:		/* CTPR */
1054*4882a593Smuzhiyun 		dst->ctpr = val & 0x0000000F;
1055*4882a593Smuzhiyun 
1056*4882a593Smuzhiyun 		pr_debug("%s: set CPU %d ctpr to %d, raised %d servicing %d\n",
1057*4882a593Smuzhiyun 			__func__, idx, dst->ctpr, dst->raised.priority,
1058*4882a593Smuzhiyun 			dst->servicing.priority);
1059*4882a593Smuzhiyun 
1060*4882a593Smuzhiyun 		if (dst->raised.priority <= dst->ctpr) {
1061*4882a593Smuzhiyun 			pr_debug("%s: Lower OpenPIC INT output cpu %d due to ctpr\n",
1062*4882a593Smuzhiyun 				__func__, idx);
1063*4882a593Smuzhiyun 			mpic_irq_lower(opp, dst, ILR_INTTGT_INT);
1064*4882a593Smuzhiyun 		} else if (dst->raised.priority > dst->servicing.priority) {
1065*4882a593Smuzhiyun 			pr_debug("%s: Raise OpenPIC INT output cpu %d irq %d\n",
1066*4882a593Smuzhiyun 				__func__, idx, dst->raised.next);
1067*4882a593Smuzhiyun 			mpic_irq_raise(opp, dst, ILR_INTTGT_INT);
1068*4882a593Smuzhiyun 		}
1069*4882a593Smuzhiyun 
1070*4882a593Smuzhiyun 		break;
1071*4882a593Smuzhiyun 	case 0x90:		/* WHOAMI */
1072*4882a593Smuzhiyun 		/* Read-only register */
1073*4882a593Smuzhiyun 		break;
1074*4882a593Smuzhiyun 	case 0xA0:		/* IACK */
1075*4882a593Smuzhiyun 		/* Read-only register */
1076*4882a593Smuzhiyun 		break;
1077*4882a593Smuzhiyun 	case 0xB0: {		/* EOI */
1078*4882a593Smuzhiyun 		int notify_eoi;
1079*4882a593Smuzhiyun 
1080*4882a593Smuzhiyun 		pr_debug("EOI\n");
1081*4882a593Smuzhiyun 		s_IRQ = IRQ_get_next(opp, &dst->servicing);
1082*4882a593Smuzhiyun 
1083*4882a593Smuzhiyun 		if (s_IRQ < 0) {
1084*4882a593Smuzhiyun 			pr_debug("%s: EOI with no interrupt in service\n",
1085*4882a593Smuzhiyun 				__func__);
1086*4882a593Smuzhiyun 			break;
1087*4882a593Smuzhiyun 		}
1088*4882a593Smuzhiyun 
1089*4882a593Smuzhiyun 		IRQ_resetbit(&dst->servicing, s_IRQ);
1090*4882a593Smuzhiyun 		/* Notify listeners that the IRQ is over */
1091*4882a593Smuzhiyun 		notify_eoi = s_IRQ;
1092*4882a593Smuzhiyun 		/* Set up next servicing IRQ */
1093*4882a593Smuzhiyun 		s_IRQ = IRQ_get_next(opp, &dst->servicing);
1094*4882a593Smuzhiyun 		/* Check queued interrupts. */
1095*4882a593Smuzhiyun 		n_IRQ = IRQ_get_next(opp, &dst->raised);
1096*4882a593Smuzhiyun 		src = &opp->src[n_IRQ];
1097*4882a593Smuzhiyun 		if (n_IRQ != -1 &&
1098*4882a593Smuzhiyun 		    (s_IRQ == -1 ||
1099*4882a593Smuzhiyun 		     IVPR_PRIORITY(src->ivpr) > dst->servicing.priority)) {
1100*4882a593Smuzhiyun 			pr_debug("Raise OpenPIC INT output cpu %d irq %d\n",
1101*4882a593Smuzhiyun 				idx, n_IRQ);
1102*4882a593Smuzhiyun 			mpic_irq_raise(opp, dst, ILR_INTTGT_INT);
1103*4882a593Smuzhiyun 		}
1104*4882a593Smuzhiyun 
1105*4882a593Smuzhiyun 		spin_unlock(&opp->lock);
1106*4882a593Smuzhiyun 		kvm_notify_acked_irq(opp->kvm, 0, notify_eoi);
1107*4882a593Smuzhiyun 		spin_lock(&opp->lock);
1108*4882a593Smuzhiyun 
1109*4882a593Smuzhiyun 		break;
1110*4882a593Smuzhiyun 	}
1111*4882a593Smuzhiyun 	default:
1112*4882a593Smuzhiyun 		break;
1113*4882a593Smuzhiyun 	}
1114*4882a593Smuzhiyun 
1115*4882a593Smuzhiyun 	return 0;
1116*4882a593Smuzhiyun }
1117*4882a593Smuzhiyun 
openpic_cpu_write(void * opaque,gpa_t addr,u32 val)1118*4882a593Smuzhiyun static int openpic_cpu_write(void *opaque, gpa_t addr, u32 val)
1119*4882a593Smuzhiyun {
1120*4882a593Smuzhiyun 	struct openpic *opp = opaque;
1121*4882a593Smuzhiyun 
1122*4882a593Smuzhiyun 	return openpic_cpu_write_internal(opp, addr, val,
1123*4882a593Smuzhiyun 					 (addr & 0x1f000) >> 12);
1124*4882a593Smuzhiyun }
1125*4882a593Smuzhiyun 
openpic_iack(struct openpic * opp,struct irq_dest * dst,int cpu)1126*4882a593Smuzhiyun static uint32_t openpic_iack(struct openpic *opp, struct irq_dest *dst,
1127*4882a593Smuzhiyun 			     int cpu)
1128*4882a593Smuzhiyun {
1129*4882a593Smuzhiyun 	struct irq_source *src;
1130*4882a593Smuzhiyun 	int retval, irq;
1131*4882a593Smuzhiyun 
1132*4882a593Smuzhiyun 	pr_debug("Lower OpenPIC INT output\n");
1133*4882a593Smuzhiyun 	mpic_irq_lower(opp, dst, ILR_INTTGT_INT);
1134*4882a593Smuzhiyun 
1135*4882a593Smuzhiyun 	irq = IRQ_get_next(opp, &dst->raised);
1136*4882a593Smuzhiyun 	pr_debug("IACK: irq=%d\n", irq);
1137*4882a593Smuzhiyun 
1138*4882a593Smuzhiyun 	if (irq == -1)
1139*4882a593Smuzhiyun 		/* No more interrupt pending */
1140*4882a593Smuzhiyun 		return opp->spve;
1141*4882a593Smuzhiyun 
1142*4882a593Smuzhiyun 	src = &opp->src[irq];
1143*4882a593Smuzhiyun 	if (!(src->ivpr & IVPR_ACTIVITY_MASK) ||
1144*4882a593Smuzhiyun 	    !(IVPR_PRIORITY(src->ivpr) > dst->ctpr)) {
1145*4882a593Smuzhiyun 		pr_err("%s: bad raised IRQ %d ctpr %d ivpr 0x%08x\n",
1146*4882a593Smuzhiyun 			__func__, irq, dst->ctpr, src->ivpr);
1147*4882a593Smuzhiyun 		openpic_update_irq(opp, irq);
1148*4882a593Smuzhiyun 		retval = opp->spve;
1149*4882a593Smuzhiyun 	} else {
1150*4882a593Smuzhiyun 		/* IRQ enter servicing state */
1151*4882a593Smuzhiyun 		IRQ_setbit(&dst->servicing, irq);
1152*4882a593Smuzhiyun 		retval = IVPR_VECTOR(opp, src->ivpr);
1153*4882a593Smuzhiyun 	}
1154*4882a593Smuzhiyun 
1155*4882a593Smuzhiyun 	if (!src->level) {
1156*4882a593Smuzhiyun 		/* edge-sensitive IRQ */
1157*4882a593Smuzhiyun 		src->ivpr &= ~IVPR_ACTIVITY_MASK;
1158*4882a593Smuzhiyun 		src->pending = 0;
1159*4882a593Smuzhiyun 		IRQ_resetbit(&dst->raised, irq);
1160*4882a593Smuzhiyun 	}
1161*4882a593Smuzhiyun 
1162*4882a593Smuzhiyun 	if ((irq >= opp->irq_ipi0) && (irq < (opp->irq_ipi0 + MAX_IPI))) {
1163*4882a593Smuzhiyun 		src->destmask &= ~(1 << cpu);
1164*4882a593Smuzhiyun 		if (src->destmask && !src->level) {
1165*4882a593Smuzhiyun 			/* trigger on CPUs that didn't know about it yet */
1166*4882a593Smuzhiyun 			openpic_set_irq(opp, irq, 1);
1167*4882a593Smuzhiyun 			openpic_set_irq(opp, irq, 0);
1168*4882a593Smuzhiyun 			/* if all CPUs knew about it, set active bit again */
1169*4882a593Smuzhiyun 			src->ivpr |= IVPR_ACTIVITY_MASK;
1170*4882a593Smuzhiyun 		}
1171*4882a593Smuzhiyun 	}
1172*4882a593Smuzhiyun 
1173*4882a593Smuzhiyun 	return retval;
1174*4882a593Smuzhiyun }
1175*4882a593Smuzhiyun 
kvmppc_mpic_set_epr(struct kvm_vcpu * vcpu)1176*4882a593Smuzhiyun void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu)
1177*4882a593Smuzhiyun {
1178*4882a593Smuzhiyun 	struct openpic *opp = vcpu->arch.mpic;
1179*4882a593Smuzhiyun 	int cpu = vcpu->arch.irq_cpu_id;
1180*4882a593Smuzhiyun 	unsigned long flags;
1181*4882a593Smuzhiyun 
1182*4882a593Smuzhiyun 	spin_lock_irqsave(&opp->lock, flags);
1183*4882a593Smuzhiyun 
1184*4882a593Smuzhiyun 	if ((opp->gcr & opp->mpic_mode_mask) == GCR_MODE_PROXY)
1185*4882a593Smuzhiyun 		kvmppc_set_epr(vcpu, openpic_iack(opp, &opp->dst[cpu], cpu));
1186*4882a593Smuzhiyun 
1187*4882a593Smuzhiyun 	spin_unlock_irqrestore(&opp->lock, flags);
1188*4882a593Smuzhiyun }
1189*4882a593Smuzhiyun 
openpic_cpu_read_internal(void * opaque,gpa_t addr,u32 * ptr,int idx)1190*4882a593Smuzhiyun static int openpic_cpu_read_internal(void *opaque, gpa_t addr,
1191*4882a593Smuzhiyun 				     u32 *ptr, int idx)
1192*4882a593Smuzhiyun {
1193*4882a593Smuzhiyun 	struct openpic *opp = opaque;
1194*4882a593Smuzhiyun 	struct irq_dest *dst;
1195*4882a593Smuzhiyun 	uint32_t retval;
1196*4882a593Smuzhiyun 
1197*4882a593Smuzhiyun 	pr_debug("%s: cpu %d addr %#llx\n", __func__, idx, addr);
1198*4882a593Smuzhiyun 	retval = 0xFFFFFFFF;
1199*4882a593Smuzhiyun 
1200*4882a593Smuzhiyun 	if (idx < 0)
1201*4882a593Smuzhiyun 		goto out;
1202*4882a593Smuzhiyun 
1203*4882a593Smuzhiyun 	if (addr & 0xF)
1204*4882a593Smuzhiyun 		goto out;
1205*4882a593Smuzhiyun 
1206*4882a593Smuzhiyun 	dst = &opp->dst[idx];
1207*4882a593Smuzhiyun 	addr &= 0xFF0;
1208*4882a593Smuzhiyun 	switch (addr) {
1209*4882a593Smuzhiyun 	case 0x80:		/* CTPR */
1210*4882a593Smuzhiyun 		retval = dst->ctpr;
1211*4882a593Smuzhiyun 		break;
1212*4882a593Smuzhiyun 	case 0x90:		/* WHOAMI */
1213*4882a593Smuzhiyun 		retval = idx;
1214*4882a593Smuzhiyun 		break;
1215*4882a593Smuzhiyun 	case 0xA0:		/* IACK */
1216*4882a593Smuzhiyun 		retval = openpic_iack(opp, dst, idx);
1217*4882a593Smuzhiyun 		break;
1218*4882a593Smuzhiyun 	case 0xB0:		/* EOI */
1219*4882a593Smuzhiyun 		retval = 0;
1220*4882a593Smuzhiyun 		break;
1221*4882a593Smuzhiyun 	default:
1222*4882a593Smuzhiyun 		break;
1223*4882a593Smuzhiyun 	}
1224*4882a593Smuzhiyun 	pr_debug("%s: => 0x%08x\n", __func__, retval);
1225*4882a593Smuzhiyun 
1226*4882a593Smuzhiyun out:
1227*4882a593Smuzhiyun 	*ptr = retval;
1228*4882a593Smuzhiyun 	return 0;
1229*4882a593Smuzhiyun }
1230*4882a593Smuzhiyun 
openpic_cpu_read(void * opaque,gpa_t addr,u32 * ptr)1231*4882a593Smuzhiyun static int openpic_cpu_read(void *opaque, gpa_t addr, u32 *ptr)
1232*4882a593Smuzhiyun {
1233*4882a593Smuzhiyun 	struct openpic *opp = opaque;
1234*4882a593Smuzhiyun 
1235*4882a593Smuzhiyun 	return openpic_cpu_read_internal(opp, addr, ptr,
1236*4882a593Smuzhiyun 					 (addr & 0x1f000) >> 12);
1237*4882a593Smuzhiyun }
1238*4882a593Smuzhiyun 
1239*4882a593Smuzhiyun struct mem_reg {
1240*4882a593Smuzhiyun 	int (*read)(void *opaque, gpa_t addr, u32 *ptr);
1241*4882a593Smuzhiyun 	int (*write)(void *opaque, gpa_t addr, u32 val);
1242*4882a593Smuzhiyun 	gpa_t start_addr;
1243*4882a593Smuzhiyun 	int size;
1244*4882a593Smuzhiyun };
1245*4882a593Smuzhiyun 
1246*4882a593Smuzhiyun static const struct mem_reg openpic_gbl_mmio = {
1247*4882a593Smuzhiyun 	.write = openpic_gbl_write,
1248*4882a593Smuzhiyun 	.read = openpic_gbl_read,
1249*4882a593Smuzhiyun 	.start_addr = OPENPIC_GLB_REG_START,
1250*4882a593Smuzhiyun 	.size = OPENPIC_GLB_REG_SIZE,
1251*4882a593Smuzhiyun };
1252*4882a593Smuzhiyun 
1253*4882a593Smuzhiyun static const struct mem_reg openpic_tmr_mmio = {
1254*4882a593Smuzhiyun 	.write = openpic_tmr_write,
1255*4882a593Smuzhiyun 	.read = openpic_tmr_read,
1256*4882a593Smuzhiyun 	.start_addr = OPENPIC_TMR_REG_START,
1257*4882a593Smuzhiyun 	.size = OPENPIC_TMR_REG_SIZE,
1258*4882a593Smuzhiyun };
1259*4882a593Smuzhiyun 
1260*4882a593Smuzhiyun static const struct mem_reg openpic_cpu_mmio = {
1261*4882a593Smuzhiyun 	.write = openpic_cpu_write,
1262*4882a593Smuzhiyun 	.read = openpic_cpu_read,
1263*4882a593Smuzhiyun 	.start_addr = OPENPIC_CPU_REG_START,
1264*4882a593Smuzhiyun 	.size = OPENPIC_CPU_REG_SIZE,
1265*4882a593Smuzhiyun };
1266*4882a593Smuzhiyun 
1267*4882a593Smuzhiyun static const struct mem_reg openpic_src_mmio = {
1268*4882a593Smuzhiyun 	.write = openpic_src_write,
1269*4882a593Smuzhiyun 	.read = openpic_src_read,
1270*4882a593Smuzhiyun 	.start_addr = OPENPIC_SRC_REG_START,
1271*4882a593Smuzhiyun 	.size = OPENPIC_SRC_REG_SIZE,
1272*4882a593Smuzhiyun };
1273*4882a593Smuzhiyun 
1274*4882a593Smuzhiyun static const struct mem_reg openpic_msi_mmio = {
1275*4882a593Smuzhiyun 	.read = openpic_msi_read,
1276*4882a593Smuzhiyun 	.write = openpic_msi_write,
1277*4882a593Smuzhiyun 	.start_addr = OPENPIC_MSI_REG_START,
1278*4882a593Smuzhiyun 	.size = OPENPIC_MSI_REG_SIZE,
1279*4882a593Smuzhiyun };
1280*4882a593Smuzhiyun 
1281*4882a593Smuzhiyun static const struct mem_reg openpic_summary_mmio = {
1282*4882a593Smuzhiyun 	.read = openpic_summary_read,
1283*4882a593Smuzhiyun 	.write = openpic_summary_write,
1284*4882a593Smuzhiyun 	.start_addr = OPENPIC_SUMMARY_REG_START,
1285*4882a593Smuzhiyun 	.size = OPENPIC_SUMMARY_REG_SIZE,
1286*4882a593Smuzhiyun };
1287*4882a593Smuzhiyun 
add_mmio_region(struct openpic * opp,const struct mem_reg * mr)1288*4882a593Smuzhiyun static void add_mmio_region(struct openpic *opp, const struct mem_reg *mr)
1289*4882a593Smuzhiyun {
1290*4882a593Smuzhiyun 	if (opp->num_mmio_regions >= MAX_MMIO_REGIONS) {
1291*4882a593Smuzhiyun 		WARN(1, "kvm mpic: too many mmio regions\n");
1292*4882a593Smuzhiyun 		return;
1293*4882a593Smuzhiyun 	}
1294*4882a593Smuzhiyun 
1295*4882a593Smuzhiyun 	opp->mmio_regions[opp->num_mmio_regions++] = mr;
1296*4882a593Smuzhiyun }
1297*4882a593Smuzhiyun 
fsl_common_init(struct openpic * opp)1298*4882a593Smuzhiyun static void fsl_common_init(struct openpic *opp)
1299*4882a593Smuzhiyun {
1300*4882a593Smuzhiyun 	int i;
1301*4882a593Smuzhiyun 	int virq = MAX_SRC;
1302*4882a593Smuzhiyun 
1303*4882a593Smuzhiyun 	add_mmio_region(opp, &openpic_msi_mmio);
1304*4882a593Smuzhiyun 	add_mmio_region(opp, &openpic_summary_mmio);
1305*4882a593Smuzhiyun 
1306*4882a593Smuzhiyun 	opp->vid = VID_REVISION_1_2;
1307*4882a593Smuzhiyun 	opp->vir = VIR_GENERIC;
1308*4882a593Smuzhiyun 	opp->vector_mask = 0xFFFF;
1309*4882a593Smuzhiyun 	opp->tfrr_reset = 0;
1310*4882a593Smuzhiyun 	opp->ivpr_reset = IVPR_MASK_MASK;
1311*4882a593Smuzhiyun 	opp->idr_reset = 1 << 0;
1312*4882a593Smuzhiyun 	opp->max_irq = MAX_IRQ;
1313*4882a593Smuzhiyun 
1314*4882a593Smuzhiyun 	opp->irq_ipi0 = virq;
1315*4882a593Smuzhiyun 	virq += MAX_IPI;
1316*4882a593Smuzhiyun 	opp->irq_tim0 = virq;
1317*4882a593Smuzhiyun 	virq += MAX_TMR;
1318*4882a593Smuzhiyun 
1319*4882a593Smuzhiyun 	BUG_ON(virq > MAX_IRQ);
1320*4882a593Smuzhiyun 
1321*4882a593Smuzhiyun 	opp->irq_msi = 224;
1322*4882a593Smuzhiyun 
1323*4882a593Smuzhiyun 	for (i = 0; i < opp->fsl->max_ext; i++)
1324*4882a593Smuzhiyun 		opp->src[i].level = false;
1325*4882a593Smuzhiyun 
1326*4882a593Smuzhiyun 	/* Internal interrupts, including message and MSI */
1327*4882a593Smuzhiyun 	for (i = 16; i < MAX_SRC; i++) {
1328*4882a593Smuzhiyun 		opp->src[i].type = IRQ_TYPE_FSLINT;
1329*4882a593Smuzhiyun 		opp->src[i].level = true;
1330*4882a593Smuzhiyun 	}
1331*4882a593Smuzhiyun 
1332*4882a593Smuzhiyun 	/* timers and IPIs */
1333*4882a593Smuzhiyun 	for (i = MAX_SRC; i < virq; i++) {
1334*4882a593Smuzhiyun 		opp->src[i].type = IRQ_TYPE_FSLSPECIAL;
1335*4882a593Smuzhiyun 		opp->src[i].level = false;
1336*4882a593Smuzhiyun 	}
1337*4882a593Smuzhiyun }
1338*4882a593Smuzhiyun 
kvm_mpic_read_internal(struct openpic * opp,gpa_t addr,u32 * ptr)1339*4882a593Smuzhiyun static int kvm_mpic_read_internal(struct openpic *opp, gpa_t addr, u32 *ptr)
1340*4882a593Smuzhiyun {
1341*4882a593Smuzhiyun 	int i;
1342*4882a593Smuzhiyun 
1343*4882a593Smuzhiyun 	for (i = 0; i < opp->num_mmio_regions; i++) {
1344*4882a593Smuzhiyun 		const struct mem_reg *mr = opp->mmio_regions[i];
1345*4882a593Smuzhiyun 
1346*4882a593Smuzhiyun 		if (mr->start_addr > addr || addr >= mr->start_addr + mr->size)
1347*4882a593Smuzhiyun 			continue;
1348*4882a593Smuzhiyun 
1349*4882a593Smuzhiyun 		return mr->read(opp, addr - mr->start_addr, ptr);
1350*4882a593Smuzhiyun 	}
1351*4882a593Smuzhiyun 
1352*4882a593Smuzhiyun 	return -ENXIO;
1353*4882a593Smuzhiyun }
1354*4882a593Smuzhiyun 
kvm_mpic_write_internal(struct openpic * opp,gpa_t addr,u32 val)1355*4882a593Smuzhiyun static int kvm_mpic_write_internal(struct openpic *opp, gpa_t addr, u32 val)
1356*4882a593Smuzhiyun {
1357*4882a593Smuzhiyun 	int i;
1358*4882a593Smuzhiyun 
1359*4882a593Smuzhiyun 	for (i = 0; i < opp->num_mmio_regions; i++) {
1360*4882a593Smuzhiyun 		const struct mem_reg *mr = opp->mmio_regions[i];
1361*4882a593Smuzhiyun 
1362*4882a593Smuzhiyun 		if (mr->start_addr > addr || addr >= mr->start_addr + mr->size)
1363*4882a593Smuzhiyun 			continue;
1364*4882a593Smuzhiyun 
1365*4882a593Smuzhiyun 		return mr->write(opp, addr - mr->start_addr, val);
1366*4882a593Smuzhiyun 	}
1367*4882a593Smuzhiyun 
1368*4882a593Smuzhiyun 	return -ENXIO;
1369*4882a593Smuzhiyun }
1370*4882a593Smuzhiyun 
kvm_mpic_read(struct kvm_vcpu * vcpu,struct kvm_io_device * this,gpa_t addr,int len,void * ptr)1371*4882a593Smuzhiyun static int kvm_mpic_read(struct kvm_vcpu *vcpu,
1372*4882a593Smuzhiyun 			 struct kvm_io_device *this,
1373*4882a593Smuzhiyun 			 gpa_t addr, int len, void *ptr)
1374*4882a593Smuzhiyun {
1375*4882a593Smuzhiyun 	struct openpic *opp = container_of(this, struct openpic, mmio);
1376*4882a593Smuzhiyun 	int ret;
1377*4882a593Smuzhiyun 	union {
1378*4882a593Smuzhiyun 		u32 val;
1379*4882a593Smuzhiyun 		u8 bytes[4];
1380*4882a593Smuzhiyun 	} u;
1381*4882a593Smuzhiyun 
1382*4882a593Smuzhiyun 	if (addr & (len - 1)) {
1383*4882a593Smuzhiyun 		pr_debug("%s: bad alignment %llx/%d\n",
1384*4882a593Smuzhiyun 			 __func__, addr, len);
1385*4882a593Smuzhiyun 		return -EINVAL;
1386*4882a593Smuzhiyun 	}
1387*4882a593Smuzhiyun 
1388*4882a593Smuzhiyun 	spin_lock_irq(&opp->lock);
1389*4882a593Smuzhiyun 	ret = kvm_mpic_read_internal(opp, addr - opp->reg_base, &u.val);
1390*4882a593Smuzhiyun 	spin_unlock_irq(&opp->lock);
1391*4882a593Smuzhiyun 
1392*4882a593Smuzhiyun 	/*
1393*4882a593Smuzhiyun 	 * Technically only 32-bit accesses are allowed, but be nice to
1394*4882a593Smuzhiyun 	 * people dumping registers a byte at a time -- it works in real
1395*4882a593Smuzhiyun 	 * hardware (reads only, not writes).
1396*4882a593Smuzhiyun 	 */
1397*4882a593Smuzhiyun 	if (len == 4) {
1398*4882a593Smuzhiyun 		*(u32 *)ptr = u.val;
1399*4882a593Smuzhiyun 		pr_debug("%s: addr %llx ret %d len 4 val %x\n",
1400*4882a593Smuzhiyun 			 __func__, addr, ret, u.val);
1401*4882a593Smuzhiyun 	} else if (len == 1) {
1402*4882a593Smuzhiyun 		*(u8 *)ptr = u.bytes[addr & 3];
1403*4882a593Smuzhiyun 		pr_debug("%s: addr %llx ret %d len 1 val %x\n",
1404*4882a593Smuzhiyun 			 __func__, addr, ret, u.bytes[addr & 3]);
1405*4882a593Smuzhiyun 	} else {
1406*4882a593Smuzhiyun 		pr_debug("%s: bad length %d\n", __func__, len);
1407*4882a593Smuzhiyun 		return -EINVAL;
1408*4882a593Smuzhiyun 	}
1409*4882a593Smuzhiyun 
1410*4882a593Smuzhiyun 	return ret;
1411*4882a593Smuzhiyun }
1412*4882a593Smuzhiyun 
kvm_mpic_write(struct kvm_vcpu * vcpu,struct kvm_io_device * this,gpa_t addr,int len,const void * ptr)1413*4882a593Smuzhiyun static int kvm_mpic_write(struct kvm_vcpu *vcpu,
1414*4882a593Smuzhiyun 			  struct kvm_io_device *this,
1415*4882a593Smuzhiyun 			  gpa_t addr, int len, const void *ptr)
1416*4882a593Smuzhiyun {
1417*4882a593Smuzhiyun 	struct openpic *opp = container_of(this, struct openpic, mmio);
1418*4882a593Smuzhiyun 	int ret;
1419*4882a593Smuzhiyun 
1420*4882a593Smuzhiyun 	if (len != 4) {
1421*4882a593Smuzhiyun 		pr_debug("%s: bad length %d\n", __func__, len);
1422*4882a593Smuzhiyun 		return -EOPNOTSUPP;
1423*4882a593Smuzhiyun 	}
1424*4882a593Smuzhiyun 	if (addr & 3) {
1425*4882a593Smuzhiyun 		pr_debug("%s: bad alignment %llx/%d\n", __func__, addr, len);
1426*4882a593Smuzhiyun 		return -EOPNOTSUPP;
1427*4882a593Smuzhiyun 	}
1428*4882a593Smuzhiyun 
1429*4882a593Smuzhiyun 	spin_lock_irq(&opp->lock);
1430*4882a593Smuzhiyun 	ret = kvm_mpic_write_internal(opp, addr - opp->reg_base,
1431*4882a593Smuzhiyun 				      *(const u32 *)ptr);
1432*4882a593Smuzhiyun 	spin_unlock_irq(&opp->lock);
1433*4882a593Smuzhiyun 
1434*4882a593Smuzhiyun 	pr_debug("%s: addr %llx ret %d val %x\n",
1435*4882a593Smuzhiyun 		 __func__, addr, ret, *(const u32 *)ptr);
1436*4882a593Smuzhiyun 
1437*4882a593Smuzhiyun 	return ret;
1438*4882a593Smuzhiyun }
1439*4882a593Smuzhiyun 
1440*4882a593Smuzhiyun static const struct kvm_io_device_ops mpic_mmio_ops = {
1441*4882a593Smuzhiyun 	.read = kvm_mpic_read,
1442*4882a593Smuzhiyun 	.write = kvm_mpic_write,
1443*4882a593Smuzhiyun };
1444*4882a593Smuzhiyun 
map_mmio(struct openpic * opp)1445*4882a593Smuzhiyun static void map_mmio(struct openpic *opp)
1446*4882a593Smuzhiyun {
1447*4882a593Smuzhiyun 	kvm_iodevice_init(&opp->mmio, &mpic_mmio_ops);
1448*4882a593Smuzhiyun 
1449*4882a593Smuzhiyun 	kvm_io_bus_register_dev(opp->kvm, KVM_MMIO_BUS,
1450*4882a593Smuzhiyun 				opp->reg_base, OPENPIC_REG_SIZE,
1451*4882a593Smuzhiyun 				&opp->mmio);
1452*4882a593Smuzhiyun }
1453*4882a593Smuzhiyun 
unmap_mmio(struct openpic * opp)1454*4882a593Smuzhiyun static void unmap_mmio(struct openpic *opp)
1455*4882a593Smuzhiyun {
1456*4882a593Smuzhiyun 	kvm_io_bus_unregister_dev(opp->kvm, KVM_MMIO_BUS, &opp->mmio);
1457*4882a593Smuzhiyun }
1458*4882a593Smuzhiyun 
set_base_addr(struct openpic * opp,struct kvm_device_attr * attr)1459*4882a593Smuzhiyun static int set_base_addr(struct openpic *opp, struct kvm_device_attr *attr)
1460*4882a593Smuzhiyun {
1461*4882a593Smuzhiyun 	u64 base;
1462*4882a593Smuzhiyun 
1463*4882a593Smuzhiyun 	if (copy_from_user(&base, (u64 __user *)(long)attr->addr, sizeof(u64)))
1464*4882a593Smuzhiyun 		return -EFAULT;
1465*4882a593Smuzhiyun 
1466*4882a593Smuzhiyun 	if (base & 0x3ffff) {
1467*4882a593Smuzhiyun 		pr_debug("kvm mpic %s: KVM_DEV_MPIC_BASE_ADDR %08llx not aligned\n",
1468*4882a593Smuzhiyun 			 __func__, base);
1469*4882a593Smuzhiyun 		return -EINVAL;
1470*4882a593Smuzhiyun 	}
1471*4882a593Smuzhiyun 
1472*4882a593Smuzhiyun 	if (base == opp->reg_base)
1473*4882a593Smuzhiyun 		return 0;
1474*4882a593Smuzhiyun 
1475*4882a593Smuzhiyun 	mutex_lock(&opp->kvm->slots_lock);
1476*4882a593Smuzhiyun 
1477*4882a593Smuzhiyun 	unmap_mmio(opp);
1478*4882a593Smuzhiyun 	opp->reg_base = base;
1479*4882a593Smuzhiyun 
1480*4882a593Smuzhiyun 	pr_debug("kvm mpic %s: KVM_DEV_MPIC_BASE_ADDR %08llx\n",
1481*4882a593Smuzhiyun 		 __func__, base);
1482*4882a593Smuzhiyun 
1483*4882a593Smuzhiyun 	if (base == 0)
1484*4882a593Smuzhiyun 		goto out;
1485*4882a593Smuzhiyun 
1486*4882a593Smuzhiyun 	map_mmio(opp);
1487*4882a593Smuzhiyun 
1488*4882a593Smuzhiyun out:
1489*4882a593Smuzhiyun 	mutex_unlock(&opp->kvm->slots_lock);
1490*4882a593Smuzhiyun 	return 0;
1491*4882a593Smuzhiyun }
1492*4882a593Smuzhiyun 
1493*4882a593Smuzhiyun #define ATTR_SET		0
1494*4882a593Smuzhiyun #define ATTR_GET		1
1495*4882a593Smuzhiyun 
access_reg(struct openpic * opp,gpa_t addr,u32 * val,int type)1496*4882a593Smuzhiyun static int access_reg(struct openpic *opp, gpa_t addr, u32 *val, int type)
1497*4882a593Smuzhiyun {
1498*4882a593Smuzhiyun 	int ret;
1499*4882a593Smuzhiyun 
1500*4882a593Smuzhiyun 	if (addr & 3)
1501*4882a593Smuzhiyun 		return -ENXIO;
1502*4882a593Smuzhiyun 
1503*4882a593Smuzhiyun 	spin_lock_irq(&opp->lock);
1504*4882a593Smuzhiyun 
1505*4882a593Smuzhiyun 	if (type == ATTR_SET)
1506*4882a593Smuzhiyun 		ret = kvm_mpic_write_internal(opp, addr, *val);
1507*4882a593Smuzhiyun 	else
1508*4882a593Smuzhiyun 		ret = kvm_mpic_read_internal(opp, addr, val);
1509*4882a593Smuzhiyun 
1510*4882a593Smuzhiyun 	spin_unlock_irq(&opp->lock);
1511*4882a593Smuzhiyun 
1512*4882a593Smuzhiyun 	pr_debug("%s: type %d addr %llx val %x\n", __func__, type, addr, *val);
1513*4882a593Smuzhiyun 
1514*4882a593Smuzhiyun 	return ret;
1515*4882a593Smuzhiyun }
1516*4882a593Smuzhiyun 
mpic_set_attr(struct kvm_device * dev,struct kvm_device_attr * attr)1517*4882a593Smuzhiyun static int mpic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1518*4882a593Smuzhiyun {
1519*4882a593Smuzhiyun 	struct openpic *opp = dev->private;
1520*4882a593Smuzhiyun 	u32 attr32;
1521*4882a593Smuzhiyun 
1522*4882a593Smuzhiyun 	switch (attr->group) {
1523*4882a593Smuzhiyun 	case KVM_DEV_MPIC_GRP_MISC:
1524*4882a593Smuzhiyun 		switch (attr->attr) {
1525*4882a593Smuzhiyun 		case KVM_DEV_MPIC_BASE_ADDR:
1526*4882a593Smuzhiyun 			return set_base_addr(opp, attr);
1527*4882a593Smuzhiyun 		}
1528*4882a593Smuzhiyun 
1529*4882a593Smuzhiyun 		break;
1530*4882a593Smuzhiyun 
1531*4882a593Smuzhiyun 	case KVM_DEV_MPIC_GRP_REGISTER:
1532*4882a593Smuzhiyun 		if (get_user(attr32, (u32 __user *)(long)attr->addr))
1533*4882a593Smuzhiyun 			return -EFAULT;
1534*4882a593Smuzhiyun 
1535*4882a593Smuzhiyun 		return access_reg(opp, attr->attr, &attr32, ATTR_SET);
1536*4882a593Smuzhiyun 
1537*4882a593Smuzhiyun 	case KVM_DEV_MPIC_GRP_IRQ_ACTIVE:
1538*4882a593Smuzhiyun 		if (attr->attr > MAX_SRC)
1539*4882a593Smuzhiyun 			return -EINVAL;
1540*4882a593Smuzhiyun 
1541*4882a593Smuzhiyun 		if (get_user(attr32, (u32 __user *)(long)attr->addr))
1542*4882a593Smuzhiyun 			return -EFAULT;
1543*4882a593Smuzhiyun 
1544*4882a593Smuzhiyun 		if (attr32 != 0 && attr32 != 1)
1545*4882a593Smuzhiyun 			return -EINVAL;
1546*4882a593Smuzhiyun 
1547*4882a593Smuzhiyun 		spin_lock_irq(&opp->lock);
1548*4882a593Smuzhiyun 		openpic_set_irq(opp, attr->attr, attr32);
1549*4882a593Smuzhiyun 		spin_unlock_irq(&opp->lock);
1550*4882a593Smuzhiyun 		return 0;
1551*4882a593Smuzhiyun 	}
1552*4882a593Smuzhiyun 
1553*4882a593Smuzhiyun 	return -ENXIO;
1554*4882a593Smuzhiyun }
1555*4882a593Smuzhiyun 
mpic_get_attr(struct kvm_device * dev,struct kvm_device_attr * attr)1556*4882a593Smuzhiyun static int mpic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1557*4882a593Smuzhiyun {
1558*4882a593Smuzhiyun 	struct openpic *opp = dev->private;
1559*4882a593Smuzhiyun 	u64 attr64;
1560*4882a593Smuzhiyun 	u32 attr32;
1561*4882a593Smuzhiyun 	int ret;
1562*4882a593Smuzhiyun 
1563*4882a593Smuzhiyun 	switch (attr->group) {
1564*4882a593Smuzhiyun 	case KVM_DEV_MPIC_GRP_MISC:
1565*4882a593Smuzhiyun 		switch (attr->attr) {
1566*4882a593Smuzhiyun 		case KVM_DEV_MPIC_BASE_ADDR:
1567*4882a593Smuzhiyun 			mutex_lock(&opp->kvm->slots_lock);
1568*4882a593Smuzhiyun 			attr64 = opp->reg_base;
1569*4882a593Smuzhiyun 			mutex_unlock(&opp->kvm->slots_lock);
1570*4882a593Smuzhiyun 
1571*4882a593Smuzhiyun 			if (copy_to_user((u64 __user *)(long)attr->addr,
1572*4882a593Smuzhiyun 					 &attr64, sizeof(u64)))
1573*4882a593Smuzhiyun 				return -EFAULT;
1574*4882a593Smuzhiyun 
1575*4882a593Smuzhiyun 			return 0;
1576*4882a593Smuzhiyun 		}
1577*4882a593Smuzhiyun 
1578*4882a593Smuzhiyun 		break;
1579*4882a593Smuzhiyun 
1580*4882a593Smuzhiyun 	case KVM_DEV_MPIC_GRP_REGISTER:
1581*4882a593Smuzhiyun 		ret = access_reg(opp, attr->attr, &attr32, ATTR_GET);
1582*4882a593Smuzhiyun 		if (ret)
1583*4882a593Smuzhiyun 			return ret;
1584*4882a593Smuzhiyun 
1585*4882a593Smuzhiyun 		if (put_user(attr32, (u32 __user *)(long)attr->addr))
1586*4882a593Smuzhiyun 			return -EFAULT;
1587*4882a593Smuzhiyun 
1588*4882a593Smuzhiyun 		return 0;
1589*4882a593Smuzhiyun 
1590*4882a593Smuzhiyun 	case KVM_DEV_MPIC_GRP_IRQ_ACTIVE:
1591*4882a593Smuzhiyun 		if (attr->attr > MAX_SRC)
1592*4882a593Smuzhiyun 			return -EINVAL;
1593*4882a593Smuzhiyun 
1594*4882a593Smuzhiyun 		spin_lock_irq(&opp->lock);
1595*4882a593Smuzhiyun 		attr32 = opp->src[attr->attr].pending;
1596*4882a593Smuzhiyun 		spin_unlock_irq(&opp->lock);
1597*4882a593Smuzhiyun 
1598*4882a593Smuzhiyun 		if (put_user(attr32, (u32 __user *)(long)attr->addr))
1599*4882a593Smuzhiyun 			return -EFAULT;
1600*4882a593Smuzhiyun 
1601*4882a593Smuzhiyun 		return 0;
1602*4882a593Smuzhiyun 	}
1603*4882a593Smuzhiyun 
1604*4882a593Smuzhiyun 	return -ENXIO;
1605*4882a593Smuzhiyun }
1606*4882a593Smuzhiyun 
mpic_has_attr(struct kvm_device * dev,struct kvm_device_attr * attr)1607*4882a593Smuzhiyun static int mpic_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1608*4882a593Smuzhiyun {
1609*4882a593Smuzhiyun 	switch (attr->group) {
1610*4882a593Smuzhiyun 	case KVM_DEV_MPIC_GRP_MISC:
1611*4882a593Smuzhiyun 		switch (attr->attr) {
1612*4882a593Smuzhiyun 		case KVM_DEV_MPIC_BASE_ADDR:
1613*4882a593Smuzhiyun 			return 0;
1614*4882a593Smuzhiyun 		}
1615*4882a593Smuzhiyun 
1616*4882a593Smuzhiyun 		break;
1617*4882a593Smuzhiyun 
1618*4882a593Smuzhiyun 	case KVM_DEV_MPIC_GRP_REGISTER:
1619*4882a593Smuzhiyun 		return 0;
1620*4882a593Smuzhiyun 
1621*4882a593Smuzhiyun 	case KVM_DEV_MPIC_GRP_IRQ_ACTIVE:
1622*4882a593Smuzhiyun 		if (attr->attr > MAX_SRC)
1623*4882a593Smuzhiyun 			break;
1624*4882a593Smuzhiyun 
1625*4882a593Smuzhiyun 		return 0;
1626*4882a593Smuzhiyun 	}
1627*4882a593Smuzhiyun 
1628*4882a593Smuzhiyun 	return -ENXIO;
1629*4882a593Smuzhiyun }
1630*4882a593Smuzhiyun 
mpic_destroy(struct kvm_device * dev)1631*4882a593Smuzhiyun static void mpic_destroy(struct kvm_device *dev)
1632*4882a593Smuzhiyun {
1633*4882a593Smuzhiyun 	struct openpic *opp = dev->private;
1634*4882a593Smuzhiyun 
1635*4882a593Smuzhiyun 	dev->kvm->arch.mpic = NULL;
1636*4882a593Smuzhiyun 	kfree(opp);
1637*4882a593Smuzhiyun 	kfree(dev);
1638*4882a593Smuzhiyun }
1639*4882a593Smuzhiyun 
mpic_set_default_irq_routing(struct openpic * opp)1640*4882a593Smuzhiyun static int mpic_set_default_irq_routing(struct openpic *opp)
1641*4882a593Smuzhiyun {
1642*4882a593Smuzhiyun 	struct kvm_irq_routing_entry *routing;
1643*4882a593Smuzhiyun 
1644*4882a593Smuzhiyun 	/* Create a nop default map, so that dereferencing it still works */
1645*4882a593Smuzhiyun 	routing = kzalloc((sizeof(*routing)), GFP_KERNEL);
1646*4882a593Smuzhiyun 	if (!routing)
1647*4882a593Smuzhiyun 		return -ENOMEM;
1648*4882a593Smuzhiyun 
1649*4882a593Smuzhiyun 	kvm_set_irq_routing(opp->kvm, routing, 0, 0);
1650*4882a593Smuzhiyun 
1651*4882a593Smuzhiyun 	kfree(routing);
1652*4882a593Smuzhiyun 	return 0;
1653*4882a593Smuzhiyun }
1654*4882a593Smuzhiyun 
mpic_create(struct kvm_device * dev,u32 type)1655*4882a593Smuzhiyun static int mpic_create(struct kvm_device *dev, u32 type)
1656*4882a593Smuzhiyun {
1657*4882a593Smuzhiyun 	struct openpic *opp;
1658*4882a593Smuzhiyun 	int ret;
1659*4882a593Smuzhiyun 
1660*4882a593Smuzhiyun 	/* We only support one MPIC at a time for now */
1661*4882a593Smuzhiyun 	if (dev->kvm->arch.mpic)
1662*4882a593Smuzhiyun 		return -EINVAL;
1663*4882a593Smuzhiyun 
1664*4882a593Smuzhiyun 	opp = kzalloc(sizeof(struct openpic), GFP_KERNEL);
1665*4882a593Smuzhiyun 	if (!opp)
1666*4882a593Smuzhiyun 		return -ENOMEM;
1667*4882a593Smuzhiyun 
1668*4882a593Smuzhiyun 	dev->private = opp;
1669*4882a593Smuzhiyun 	opp->kvm = dev->kvm;
1670*4882a593Smuzhiyun 	opp->dev = dev;
1671*4882a593Smuzhiyun 	opp->model = type;
1672*4882a593Smuzhiyun 	spin_lock_init(&opp->lock);
1673*4882a593Smuzhiyun 
1674*4882a593Smuzhiyun 	add_mmio_region(opp, &openpic_gbl_mmio);
1675*4882a593Smuzhiyun 	add_mmio_region(opp, &openpic_tmr_mmio);
1676*4882a593Smuzhiyun 	add_mmio_region(opp, &openpic_src_mmio);
1677*4882a593Smuzhiyun 	add_mmio_region(opp, &openpic_cpu_mmio);
1678*4882a593Smuzhiyun 
1679*4882a593Smuzhiyun 	switch (opp->model) {
1680*4882a593Smuzhiyun 	case KVM_DEV_TYPE_FSL_MPIC_20:
1681*4882a593Smuzhiyun 		opp->fsl = &fsl_mpic_20;
1682*4882a593Smuzhiyun 		opp->brr1 = 0x00400200;
1683*4882a593Smuzhiyun 		opp->flags |= OPENPIC_FLAG_IDR_CRIT;
1684*4882a593Smuzhiyun 		opp->nb_irqs = 80;
1685*4882a593Smuzhiyun 		opp->mpic_mode_mask = GCR_MODE_MIXED;
1686*4882a593Smuzhiyun 
1687*4882a593Smuzhiyun 		fsl_common_init(opp);
1688*4882a593Smuzhiyun 
1689*4882a593Smuzhiyun 		break;
1690*4882a593Smuzhiyun 
1691*4882a593Smuzhiyun 	case KVM_DEV_TYPE_FSL_MPIC_42:
1692*4882a593Smuzhiyun 		opp->fsl = &fsl_mpic_42;
1693*4882a593Smuzhiyun 		opp->brr1 = 0x00400402;
1694*4882a593Smuzhiyun 		opp->flags |= OPENPIC_FLAG_ILR;
1695*4882a593Smuzhiyun 		opp->nb_irqs = 196;
1696*4882a593Smuzhiyun 		opp->mpic_mode_mask = GCR_MODE_PROXY;
1697*4882a593Smuzhiyun 
1698*4882a593Smuzhiyun 		fsl_common_init(opp);
1699*4882a593Smuzhiyun 
1700*4882a593Smuzhiyun 		break;
1701*4882a593Smuzhiyun 
1702*4882a593Smuzhiyun 	default:
1703*4882a593Smuzhiyun 		ret = -ENODEV;
1704*4882a593Smuzhiyun 		goto err;
1705*4882a593Smuzhiyun 	}
1706*4882a593Smuzhiyun 
1707*4882a593Smuzhiyun 	ret = mpic_set_default_irq_routing(opp);
1708*4882a593Smuzhiyun 	if (ret)
1709*4882a593Smuzhiyun 		goto err;
1710*4882a593Smuzhiyun 
1711*4882a593Smuzhiyun 	openpic_reset(opp);
1712*4882a593Smuzhiyun 
1713*4882a593Smuzhiyun 	smp_wmb();
1714*4882a593Smuzhiyun 	dev->kvm->arch.mpic = opp;
1715*4882a593Smuzhiyun 
1716*4882a593Smuzhiyun 	return 0;
1717*4882a593Smuzhiyun 
1718*4882a593Smuzhiyun err:
1719*4882a593Smuzhiyun 	kfree(opp);
1720*4882a593Smuzhiyun 	return ret;
1721*4882a593Smuzhiyun }
1722*4882a593Smuzhiyun 
1723*4882a593Smuzhiyun struct kvm_device_ops kvm_mpic_ops = {
1724*4882a593Smuzhiyun 	.name = "kvm-mpic",
1725*4882a593Smuzhiyun 	.create = mpic_create,
1726*4882a593Smuzhiyun 	.destroy = mpic_destroy,
1727*4882a593Smuzhiyun 	.set_attr = mpic_set_attr,
1728*4882a593Smuzhiyun 	.get_attr = mpic_get_attr,
1729*4882a593Smuzhiyun 	.has_attr = mpic_has_attr,
1730*4882a593Smuzhiyun };
1731*4882a593Smuzhiyun 
kvmppc_mpic_connect_vcpu(struct kvm_device * dev,struct kvm_vcpu * vcpu,u32 cpu)1732*4882a593Smuzhiyun int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
1733*4882a593Smuzhiyun 			     u32 cpu)
1734*4882a593Smuzhiyun {
1735*4882a593Smuzhiyun 	struct openpic *opp = dev->private;
1736*4882a593Smuzhiyun 	int ret = 0;
1737*4882a593Smuzhiyun 
1738*4882a593Smuzhiyun 	if (dev->ops != &kvm_mpic_ops)
1739*4882a593Smuzhiyun 		return -EPERM;
1740*4882a593Smuzhiyun 	if (opp->kvm != vcpu->kvm)
1741*4882a593Smuzhiyun 		return -EPERM;
1742*4882a593Smuzhiyun 	if (cpu < 0 || cpu >= MAX_CPU)
1743*4882a593Smuzhiyun 		return -EPERM;
1744*4882a593Smuzhiyun 
1745*4882a593Smuzhiyun 	spin_lock_irq(&opp->lock);
1746*4882a593Smuzhiyun 
1747*4882a593Smuzhiyun 	if (opp->dst[cpu].vcpu) {
1748*4882a593Smuzhiyun 		ret = -EEXIST;
1749*4882a593Smuzhiyun 		goto out;
1750*4882a593Smuzhiyun 	}
1751*4882a593Smuzhiyun 	if (vcpu->arch.irq_type) {
1752*4882a593Smuzhiyun 		ret = -EBUSY;
1753*4882a593Smuzhiyun 		goto out;
1754*4882a593Smuzhiyun 	}
1755*4882a593Smuzhiyun 
1756*4882a593Smuzhiyun 	opp->dst[cpu].vcpu = vcpu;
1757*4882a593Smuzhiyun 	opp->nb_cpus = max(opp->nb_cpus, cpu + 1);
1758*4882a593Smuzhiyun 
1759*4882a593Smuzhiyun 	vcpu->arch.mpic = opp;
1760*4882a593Smuzhiyun 	vcpu->arch.irq_cpu_id = cpu;
1761*4882a593Smuzhiyun 	vcpu->arch.irq_type = KVMPPC_IRQ_MPIC;
1762*4882a593Smuzhiyun 
1763*4882a593Smuzhiyun 	/* This might need to be changed if GCR gets extended */
1764*4882a593Smuzhiyun 	if (opp->mpic_mode_mask == GCR_MODE_PROXY)
1765*4882a593Smuzhiyun 		vcpu->arch.epr_flags |= KVMPPC_EPR_KERNEL;
1766*4882a593Smuzhiyun 
1767*4882a593Smuzhiyun out:
1768*4882a593Smuzhiyun 	spin_unlock_irq(&opp->lock);
1769*4882a593Smuzhiyun 	return ret;
1770*4882a593Smuzhiyun }
1771*4882a593Smuzhiyun 
1772*4882a593Smuzhiyun /*
1773*4882a593Smuzhiyun  * This should only happen immediately before the mpic is destroyed,
1774*4882a593Smuzhiyun  * so we shouldn't need to worry about anything still trying to
1775*4882a593Smuzhiyun  * access the vcpu pointer.
1776*4882a593Smuzhiyun  */
kvmppc_mpic_disconnect_vcpu(struct openpic * opp,struct kvm_vcpu * vcpu)1777*4882a593Smuzhiyun void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu)
1778*4882a593Smuzhiyun {
1779*4882a593Smuzhiyun 	BUG_ON(!opp->dst[vcpu->arch.irq_cpu_id].vcpu);
1780*4882a593Smuzhiyun 
1781*4882a593Smuzhiyun 	opp->dst[vcpu->arch.irq_cpu_id].vcpu = NULL;
1782*4882a593Smuzhiyun }
1783*4882a593Smuzhiyun 
1784*4882a593Smuzhiyun /*
1785*4882a593Smuzhiyun  * Return value:
1786*4882a593Smuzhiyun  *  < 0   Interrupt was ignored (masked or not delivered for other reasons)
1787*4882a593Smuzhiyun  *  = 0   Interrupt was coalesced (previous irq is still pending)
1788*4882a593Smuzhiyun  *  > 0   Number of CPUs interrupt was delivered to
1789*4882a593Smuzhiyun  */
mpic_set_irq(struct kvm_kernel_irq_routing_entry * e,struct kvm * kvm,int irq_source_id,int level,bool line_status)1790*4882a593Smuzhiyun static int mpic_set_irq(struct kvm_kernel_irq_routing_entry *e,
1791*4882a593Smuzhiyun 			struct kvm *kvm, int irq_source_id, int level,
1792*4882a593Smuzhiyun 			bool line_status)
1793*4882a593Smuzhiyun {
1794*4882a593Smuzhiyun 	u32 irq = e->irqchip.pin;
1795*4882a593Smuzhiyun 	struct openpic *opp = kvm->arch.mpic;
1796*4882a593Smuzhiyun 	unsigned long flags;
1797*4882a593Smuzhiyun 
1798*4882a593Smuzhiyun 	spin_lock_irqsave(&opp->lock, flags);
1799*4882a593Smuzhiyun 	openpic_set_irq(opp, irq, level);
1800*4882a593Smuzhiyun 	spin_unlock_irqrestore(&opp->lock, flags);
1801*4882a593Smuzhiyun 
1802*4882a593Smuzhiyun 	/* All code paths we care about don't check for the return value */
1803*4882a593Smuzhiyun 	return 0;
1804*4882a593Smuzhiyun }
1805*4882a593Smuzhiyun 
kvm_set_msi(struct kvm_kernel_irq_routing_entry * e,struct kvm * kvm,int irq_source_id,int level,bool line_status)1806*4882a593Smuzhiyun int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
1807*4882a593Smuzhiyun 		struct kvm *kvm, int irq_source_id, int level, bool line_status)
1808*4882a593Smuzhiyun {
1809*4882a593Smuzhiyun 	struct openpic *opp = kvm->arch.mpic;
1810*4882a593Smuzhiyun 	unsigned long flags;
1811*4882a593Smuzhiyun 
1812*4882a593Smuzhiyun 	spin_lock_irqsave(&opp->lock, flags);
1813*4882a593Smuzhiyun 
1814*4882a593Smuzhiyun 	/*
1815*4882a593Smuzhiyun 	 * XXX We ignore the target address for now, as we only support
1816*4882a593Smuzhiyun 	 *     a single MSI bank.
1817*4882a593Smuzhiyun 	 */
1818*4882a593Smuzhiyun 	openpic_msi_write(kvm->arch.mpic, MSIIR_OFFSET, e->msi.data);
1819*4882a593Smuzhiyun 	spin_unlock_irqrestore(&opp->lock, flags);
1820*4882a593Smuzhiyun 
1821*4882a593Smuzhiyun 	/* All code paths we care about don't check for the return value */
1822*4882a593Smuzhiyun 	return 0;
1823*4882a593Smuzhiyun }
1824*4882a593Smuzhiyun 
kvm_set_routing_entry(struct kvm * kvm,struct kvm_kernel_irq_routing_entry * e,const struct kvm_irq_routing_entry * ue)1825*4882a593Smuzhiyun int kvm_set_routing_entry(struct kvm *kvm,
1826*4882a593Smuzhiyun 			  struct kvm_kernel_irq_routing_entry *e,
1827*4882a593Smuzhiyun 			  const struct kvm_irq_routing_entry *ue)
1828*4882a593Smuzhiyun {
1829*4882a593Smuzhiyun 	int r = -EINVAL;
1830*4882a593Smuzhiyun 
1831*4882a593Smuzhiyun 	switch (ue->type) {
1832*4882a593Smuzhiyun 	case KVM_IRQ_ROUTING_IRQCHIP:
1833*4882a593Smuzhiyun 		e->set = mpic_set_irq;
1834*4882a593Smuzhiyun 		e->irqchip.irqchip = ue->u.irqchip.irqchip;
1835*4882a593Smuzhiyun 		e->irqchip.pin = ue->u.irqchip.pin;
1836*4882a593Smuzhiyun 		if (e->irqchip.pin >= KVM_IRQCHIP_NUM_PINS)
1837*4882a593Smuzhiyun 			goto out;
1838*4882a593Smuzhiyun 		break;
1839*4882a593Smuzhiyun 	case KVM_IRQ_ROUTING_MSI:
1840*4882a593Smuzhiyun 		e->set = kvm_set_msi;
1841*4882a593Smuzhiyun 		e->msi.address_lo = ue->u.msi.address_lo;
1842*4882a593Smuzhiyun 		e->msi.address_hi = ue->u.msi.address_hi;
1843*4882a593Smuzhiyun 		e->msi.data = ue->u.msi.data;
1844*4882a593Smuzhiyun 		break;
1845*4882a593Smuzhiyun 	default:
1846*4882a593Smuzhiyun 		goto out;
1847*4882a593Smuzhiyun 	}
1848*4882a593Smuzhiyun 
1849*4882a593Smuzhiyun 	r = 0;
1850*4882a593Smuzhiyun out:
1851*4882a593Smuzhiyun 	return r;
1852*4882a593Smuzhiyun }
1853