xref: /OK3568_Linux_fs/kernel/arch/powerpc/kvm/book3s_xive.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #ifndef _KVM_PPC_BOOK3S_XIVE_H
7*4882a593Smuzhiyun #define _KVM_PPC_BOOK3S_XIVE_H
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #ifdef CONFIG_KVM_XICS
10*4882a593Smuzhiyun #include "book3s_xics.h"
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun /*
13*4882a593Smuzhiyun  * The XIVE Interrupt source numbers are within the range 0 to
14*4882a593Smuzhiyun  * KVMPPC_XICS_NR_IRQS.
15*4882a593Smuzhiyun  */
16*4882a593Smuzhiyun #define KVMPPC_XIVE_FIRST_IRQ	0
17*4882a593Smuzhiyun #define KVMPPC_XIVE_NR_IRQS	KVMPPC_XICS_NR_IRQS
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun /*
20*4882a593Smuzhiyun  * State for one guest irq source.
21*4882a593Smuzhiyun  *
22*4882a593Smuzhiyun  * For each guest source we allocate a HW interrupt in the XIVE
23*4882a593Smuzhiyun  * which we use for all SW triggers. It will be unused for
24*4882a593Smuzhiyun  * pass-through but it's easier to keep around as the same
25*4882a593Smuzhiyun  * guest interrupt can alternatively be emulated or pass-through
26*4882a593Smuzhiyun  * if a physical device is hot unplugged and replaced with an
27*4882a593Smuzhiyun  * emulated one.
28*4882a593Smuzhiyun  *
29*4882a593Smuzhiyun  * This state structure is very similar to the XICS one with
30*4882a593Smuzhiyun  * additional XIVE specific tracking.
31*4882a593Smuzhiyun  */
32*4882a593Smuzhiyun struct kvmppc_xive_irq_state {
33*4882a593Smuzhiyun 	bool valid;			/* Interrupt entry is valid */
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun 	u32 number;			/* Guest IRQ number */
36*4882a593Smuzhiyun 	u32 ipi_number;			/* XIVE IPI HW number */
37*4882a593Smuzhiyun 	struct xive_irq_data ipi_data;	/* XIVE IPI associated data */
38*4882a593Smuzhiyun 	u32 pt_number;			/* XIVE Pass-through number if any */
39*4882a593Smuzhiyun 	struct xive_irq_data *pt_data;	/* XIVE Pass-through associated data */
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun 	/* Targetting as set by guest */
42*4882a593Smuzhiyun 	u8 guest_priority;		/* Guest set priority */
43*4882a593Smuzhiyun 	u8 saved_priority;		/* Saved priority when masking */
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 	/* Actual targetting */
46*4882a593Smuzhiyun 	u32 act_server;			/* Actual server */
47*4882a593Smuzhiyun 	u8 act_priority;		/* Actual priority */
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun 	/* Various state bits */
50*4882a593Smuzhiyun 	bool in_eoi;			/* Synchronize with H_EOI */
51*4882a593Smuzhiyun 	bool old_p;			/* P bit state when masking */
52*4882a593Smuzhiyun 	bool old_q;			/* Q bit state when masking */
53*4882a593Smuzhiyun 	bool lsi;			/* level-sensitive interrupt */
54*4882a593Smuzhiyun 	bool asserted;			/* Only for emulated LSI: current state */
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 	/* Saved for migration state */
57*4882a593Smuzhiyun 	bool in_queue;
58*4882a593Smuzhiyun 	bool saved_p;
59*4882a593Smuzhiyun 	bool saved_q;
60*4882a593Smuzhiyun 	u8 saved_scan_prio;
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	/* Xive native */
63*4882a593Smuzhiyun 	u32 eisn;			/* Guest Effective IRQ number */
64*4882a593Smuzhiyun };
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun /* Select the "right" interrupt (IPI vs. passthrough) */
kvmppc_xive_select_irq(struct kvmppc_xive_irq_state * state,u32 * out_hw_irq,struct xive_irq_data ** out_xd)67*4882a593Smuzhiyun static inline void kvmppc_xive_select_irq(struct kvmppc_xive_irq_state *state,
68*4882a593Smuzhiyun 					  u32 *out_hw_irq,
69*4882a593Smuzhiyun 					  struct xive_irq_data **out_xd)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun 	if (state->pt_number) {
72*4882a593Smuzhiyun 		if (out_hw_irq)
73*4882a593Smuzhiyun 			*out_hw_irq = state->pt_number;
74*4882a593Smuzhiyun 		if (out_xd)
75*4882a593Smuzhiyun 			*out_xd = state->pt_data;
76*4882a593Smuzhiyun 	} else {
77*4882a593Smuzhiyun 		if (out_hw_irq)
78*4882a593Smuzhiyun 			*out_hw_irq = state->ipi_number;
79*4882a593Smuzhiyun 		if (out_xd)
80*4882a593Smuzhiyun 			*out_xd = &state->ipi_data;
81*4882a593Smuzhiyun 	}
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun /*
85*4882a593Smuzhiyun  * This corresponds to an "ICS" in XICS terminology, we use it
86*4882a593Smuzhiyun  * as a mean to break up source information into multiple structures.
87*4882a593Smuzhiyun  */
88*4882a593Smuzhiyun struct kvmppc_xive_src_block {
89*4882a593Smuzhiyun 	arch_spinlock_t lock;
90*4882a593Smuzhiyun 	u16 id;
91*4882a593Smuzhiyun 	struct kvmppc_xive_irq_state irq_state[KVMPPC_XICS_IRQ_PER_ICS];
92*4882a593Smuzhiyun };
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun struct kvmppc_xive;
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun struct kvmppc_xive_ops {
97*4882a593Smuzhiyun 	int (*reset_mapped)(struct kvm *kvm, unsigned long guest_irq);
98*4882a593Smuzhiyun };
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun struct kvmppc_xive {
101*4882a593Smuzhiyun 	struct kvm *kvm;
102*4882a593Smuzhiyun 	struct kvm_device *dev;
103*4882a593Smuzhiyun 	struct dentry *dentry;
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	/* VP block associated with the VM */
106*4882a593Smuzhiyun 	u32	vp_base;
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	/* Blocks of sources */
109*4882a593Smuzhiyun 	struct kvmppc_xive_src_block *src_blocks[KVMPPC_XICS_MAX_ICS_ID + 1];
110*4882a593Smuzhiyun 	u32	max_sbid;
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	/*
113*4882a593Smuzhiyun 	 * For state save, we lazily scan the queues on the first interrupt
114*4882a593Smuzhiyun 	 * being migrated. We don't have a clean way to reset that flags
115*4882a593Smuzhiyun 	 * so we keep track of the number of valid sources and how many of
116*4882a593Smuzhiyun 	 * them were migrated so we can reset when all of them have been
117*4882a593Smuzhiyun 	 * processed.
118*4882a593Smuzhiyun 	 */
119*4882a593Smuzhiyun 	u32	src_count;
120*4882a593Smuzhiyun 	u32	saved_src_count;
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	/*
123*4882a593Smuzhiyun 	 * Some irqs are delayed on restore until the source is created,
124*4882a593Smuzhiyun 	 * keep track here of how many of them
125*4882a593Smuzhiyun 	 */
126*4882a593Smuzhiyun 	u32	delayed_irqs;
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	/* Which queues (priorities) are in use by the guest */
129*4882a593Smuzhiyun 	u8	qmap;
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	/* Queue orders */
132*4882a593Smuzhiyun 	u32	q_order;
133*4882a593Smuzhiyun 	u32	q_page_order;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	/* Flags */
136*4882a593Smuzhiyun 	u8	single_escalation;
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	/* Number of entries in the VP block */
139*4882a593Smuzhiyun 	u32	nr_servers;
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	struct kvmppc_xive_ops *ops;
142*4882a593Smuzhiyun 	struct address_space   *mapping;
143*4882a593Smuzhiyun 	struct mutex mapping_lock;
144*4882a593Smuzhiyun 	struct mutex lock;
145*4882a593Smuzhiyun };
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun #define KVMPPC_XIVE_Q_COUNT	8
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun struct kvmppc_xive_vcpu {
150*4882a593Smuzhiyun 	struct kvmppc_xive	*xive;
151*4882a593Smuzhiyun 	struct kvm_vcpu		*vcpu;
152*4882a593Smuzhiyun 	bool			valid;
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	/* Server number. This is the HW CPU ID from a guest perspective */
155*4882a593Smuzhiyun 	u32			server_num;
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	/*
158*4882a593Smuzhiyun 	 * HW VP corresponding to this VCPU. This is the base of the VP
159*4882a593Smuzhiyun 	 * block plus the server number.
160*4882a593Smuzhiyun 	 */
161*4882a593Smuzhiyun 	u32			vp_id;
162*4882a593Smuzhiyun 	u32			vp_chip_id;
163*4882a593Smuzhiyun 	u32			vp_cam;
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	/* IPI used for sending ... IPIs */
166*4882a593Smuzhiyun 	u32			vp_ipi;
167*4882a593Smuzhiyun 	struct xive_irq_data	vp_ipi_data;
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	/* Local emulation state */
170*4882a593Smuzhiyun 	uint8_t			cppr;	/* guest CPPR */
171*4882a593Smuzhiyun 	uint8_t			hw_cppr;/* Hardware CPPR */
172*4882a593Smuzhiyun 	uint8_t			mfrr;
173*4882a593Smuzhiyun 	uint8_t			pending;
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	/* Each VP has 8 queues though we only provision some */
176*4882a593Smuzhiyun 	struct xive_q		queues[KVMPPC_XIVE_Q_COUNT];
177*4882a593Smuzhiyun 	u32			esc_virq[KVMPPC_XIVE_Q_COUNT];
178*4882a593Smuzhiyun 	char			*esc_virq_names[KVMPPC_XIVE_Q_COUNT];
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	/* Stash a delayed irq on restore from migration (see set_icp) */
181*4882a593Smuzhiyun 	u32			delayed_irq;
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	/* Stats */
184*4882a593Smuzhiyun 	u64			stat_rm_h_xirr;
185*4882a593Smuzhiyun 	u64			stat_rm_h_ipoll;
186*4882a593Smuzhiyun 	u64			stat_rm_h_cppr;
187*4882a593Smuzhiyun 	u64			stat_rm_h_eoi;
188*4882a593Smuzhiyun 	u64			stat_rm_h_ipi;
189*4882a593Smuzhiyun 	u64			stat_vm_h_xirr;
190*4882a593Smuzhiyun 	u64			stat_vm_h_ipoll;
191*4882a593Smuzhiyun 	u64			stat_vm_h_cppr;
192*4882a593Smuzhiyun 	u64			stat_vm_h_eoi;
193*4882a593Smuzhiyun 	u64			stat_vm_h_ipi;
194*4882a593Smuzhiyun };
195*4882a593Smuzhiyun 
kvmppc_xive_find_server(struct kvm * kvm,u32 nr)196*4882a593Smuzhiyun static inline struct kvm_vcpu *kvmppc_xive_find_server(struct kvm *kvm, u32 nr)
197*4882a593Smuzhiyun {
198*4882a593Smuzhiyun 	struct kvm_vcpu *vcpu = NULL;
199*4882a593Smuzhiyun 	int i;
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	kvm_for_each_vcpu(i, vcpu, kvm) {
202*4882a593Smuzhiyun 		if (vcpu->arch.xive_vcpu && nr == vcpu->arch.xive_vcpu->server_num)
203*4882a593Smuzhiyun 			return vcpu;
204*4882a593Smuzhiyun 	}
205*4882a593Smuzhiyun 	return NULL;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun 
kvmppc_xive_find_source(struct kvmppc_xive * xive,u32 irq,u16 * source)208*4882a593Smuzhiyun static inline struct kvmppc_xive_src_block *kvmppc_xive_find_source(struct kvmppc_xive *xive,
209*4882a593Smuzhiyun 		u32 irq, u16 *source)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun 	u32 bid = irq >> KVMPPC_XICS_ICS_SHIFT;
212*4882a593Smuzhiyun 	u16 src = irq & KVMPPC_XICS_SRC_MASK;
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	if (source)
215*4882a593Smuzhiyun 		*source = src;
216*4882a593Smuzhiyun 	if (bid > KVMPPC_XICS_MAX_ICS_ID)
217*4882a593Smuzhiyun 		return NULL;
218*4882a593Smuzhiyun 	return xive->src_blocks[bid];
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun 
kvmppc_xive_vp(struct kvmppc_xive * xive,u32 server)221*4882a593Smuzhiyun static inline u32 kvmppc_xive_vp(struct kvmppc_xive *xive, u32 server)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun 	return xive->vp_base + kvmppc_pack_vcpu_id(xive->kvm, server);
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun 
kvmppc_xive_vp_in_use(struct kvm * kvm,u32 vp_id)226*4882a593Smuzhiyun static inline bool kvmppc_xive_vp_in_use(struct kvm *kvm, u32 vp_id)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun 	struct kvm_vcpu *vcpu = NULL;
229*4882a593Smuzhiyun 	int i;
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	kvm_for_each_vcpu(i, vcpu, kvm) {
232*4882a593Smuzhiyun 		if (vcpu->arch.xive_vcpu && vp_id == vcpu->arch.xive_vcpu->vp_id)
233*4882a593Smuzhiyun 			return true;
234*4882a593Smuzhiyun 	}
235*4882a593Smuzhiyun 	return false;
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun /*
239*4882a593Smuzhiyun  * Mapping between guest priorities and host priorities
240*4882a593Smuzhiyun  * is as follow.
241*4882a593Smuzhiyun  *
242*4882a593Smuzhiyun  * Guest request for 0...6 are honored. Guest request for anything
243*4882a593Smuzhiyun  * higher results in a priority of 6 being applied.
244*4882a593Smuzhiyun  *
245*4882a593Smuzhiyun  * Similar mapping is done for CPPR values
246*4882a593Smuzhiyun  */
xive_prio_from_guest(u8 prio)247*4882a593Smuzhiyun static inline u8 xive_prio_from_guest(u8 prio)
248*4882a593Smuzhiyun {
249*4882a593Smuzhiyun 	if (prio == 0xff || prio < 6)
250*4882a593Smuzhiyun 		return prio;
251*4882a593Smuzhiyun 	return 6;
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun 
xive_prio_to_guest(u8 prio)254*4882a593Smuzhiyun static inline u8 xive_prio_to_guest(u8 prio)
255*4882a593Smuzhiyun {
256*4882a593Smuzhiyun 	return prio;
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun 
__xive_read_eq(__be32 * qpage,u32 msk,u32 * idx,u32 * toggle)259*4882a593Smuzhiyun static inline u32 __xive_read_eq(__be32 *qpage, u32 msk, u32 *idx, u32 *toggle)
260*4882a593Smuzhiyun {
261*4882a593Smuzhiyun 	u32 cur;
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	if (!qpage)
264*4882a593Smuzhiyun 		return 0;
265*4882a593Smuzhiyun 	cur = be32_to_cpup(qpage + *idx);
266*4882a593Smuzhiyun 	if ((cur >> 31) == *toggle)
267*4882a593Smuzhiyun 		return 0;
268*4882a593Smuzhiyun 	*idx = (*idx + 1) & msk;
269*4882a593Smuzhiyun 	if (*idx == 0)
270*4882a593Smuzhiyun 		(*toggle) ^= 1;
271*4882a593Smuzhiyun 	return cur & 0x7fffffff;
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun extern unsigned long xive_rm_h_xirr(struct kvm_vcpu *vcpu);
275*4882a593Smuzhiyun extern unsigned long xive_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server);
276*4882a593Smuzhiyun extern int xive_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
277*4882a593Smuzhiyun 			 unsigned long mfrr);
278*4882a593Smuzhiyun extern int xive_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
279*4882a593Smuzhiyun extern int xive_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun extern unsigned long (*__xive_vm_h_xirr)(struct kvm_vcpu *vcpu);
282*4882a593Smuzhiyun extern unsigned long (*__xive_vm_h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server);
283*4882a593Smuzhiyun extern int (*__xive_vm_h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
284*4882a593Smuzhiyun 			      unsigned long mfrr);
285*4882a593Smuzhiyun extern int (*__xive_vm_h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr);
286*4882a593Smuzhiyun extern int (*__xive_vm_h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr);
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun /*
289*4882a593Smuzhiyun  * Common Xive routines for XICS-over-XIVE and XIVE native
290*4882a593Smuzhiyun  */
291*4882a593Smuzhiyun void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu);
292*4882a593Smuzhiyun int kvmppc_xive_debug_show_queues(struct seq_file *m, struct kvm_vcpu *vcpu);
293*4882a593Smuzhiyun struct kvmppc_xive_src_block *kvmppc_xive_create_src_block(
294*4882a593Smuzhiyun 	struct kvmppc_xive *xive, int irq);
295*4882a593Smuzhiyun void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb);
296*4882a593Smuzhiyun int kvmppc_xive_select_target(struct kvm *kvm, u32 *server, u8 prio);
297*4882a593Smuzhiyun int kvmppc_xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio,
298*4882a593Smuzhiyun 				  bool single_escalation);
299*4882a593Smuzhiyun struct kvmppc_xive *kvmppc_xive_get_device(struct kvm *kvm, u32 type);
300*4882a593Smuzhiyun void xive_cleanup_single_escalation(struct kvm_vcpu *vcpu,
301*4882a593Smuzhiyun 				    struct kvmppc_xive_vcpu *xc, int irq);
302*4882a593Smuzhiyun int kvmppc_xive_compute_vp_id(struct kvmppc_xive *xive, u32 cpu, u32 *vp);
303*4882a593Smuzhiyun int kvmppc_xive_set_nr_servers(struct kvmppc_xive *xive, u64 addr);
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun #endif /* CONFIG_KVM_XICS */
306*4882a593Smuzhiyun #endif /* _KVM_PPC_BOOK3S_XICS_H */
307