xref: /OK3568_Linux_fs/kernel/drivers/xen/events/events_2l.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Xen event channels (2-level ABI)
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/linkage.h>
11*4882a593Smuzhiyun #include <linux/interrupt.h>
12*4882a593Smuzhiyun #include <linux/irq.h>
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #include <asm/sync_bitops.h>
15*4882a593Smuzhiyun #include <asm/xen/hypercall.h>
16*4882a593Smuzhiyun #include <asm/xen/hypervisor.h>
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #include <xen/xen.h>
19*4882a593Smuzhiyun #include <xen/xen-ops.h>
20*4882a593Smuzhiyun #include <xen/events.h>
21*4882a593Smuzhiyun #include <xen/interface/xen.h>
22*4882a593Smuzhiyun #include <xen/interface/event_channel.h>
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #include "events_internal.h"
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun /*
27*4882a593Smuzhiyun  * Note sizeof(xen_ulong_t) can be more than sizeof(unsigned long). Be
28*4882a593Smuzhiyun  * careful to only use bitops which allow for this (e.g
29*4882a593Smuzhiyun  * test_bit/find_first_bit and friends but not __ffs) and to pass
30*4882a593Smuzhiyun  * BITS_PER_EVTCHN_WORD as the bitmask length.
31*4882a593Smuzhiyun  */
32*4882a593Smuzhiyun #define BITS_PER_EVTCHN_WORD (sizeof(xen_ulong_t)*8)
33*4882a593Smuzhiyun /*
34*4882a593Smuzhiyun  * Make a bitmask (i.e. unsigned long *) of a xen_ulong_t
35*4882a593Smuzhiyun  * array. Primarily to avoid long lines (hence the terse name).
36*4882a593Smuzhiyun  */
37*4882a593Smuzhiyun #define BM(x) (unsigned long *)(x)
38*4882a593Smuzhiyun /* Find the first set bit in a evtchn mask */
39*4882a593Smuzhiyun #define EVTCHN_FIRST_BIT(w) find_first_bit(BM(&(w)), BITS_PER_EVTCHN_WORD)
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun #define EVTCHN_MASK_SIZE (EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD)
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun static DEFINE_PER_CPU(xen_ulong_t [EVTCHN_MASK_SIZE], cpu_evtchn_mask);
44*4882a593Smuzhiyun 
evtchn_2l_max_channels(void)45*4882a593Smuzhiyun static unsigned evtchn_2l_max_channels(void)
46*4882a593Smuzhiyun {
47*4882a593Smuzhiyun 	return EVTCHN_2L_NR_CHANNELS;
48*4882a593Smuzhiyun }
49*4882a593Smuzhiyun 
evtchn_2l_remove(evtchn_port_t evtchn,unsigned int cpu)50*4882a593Smuzhiyun static void evtchn_2l_remove(evtchn_port_t evtchn, unsigned int cpu)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun 	clear_bit(evtchn, BM(per_cpu(cpu_evtchn_mask, cpu)));
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun 
evtchn_2l_bind_to_cpu(evtchn_port_t evtchn,unsigned int cpu,unsigned int old_cpu)55*4882a593Smuzhiyun static void evtchn_2l_bind_to_cpu(evtchn_port_t evtchn, unsigned int cpu,
56*4882a593Smuzhiyun 				  unsigned int old_cpu)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun 	clear_bit(evtchn, BM(per_cpu(cpu_evtchn_mask, old_cpu)));
59*4882a593Smuzhiyun 	set_bit(evtchn, BM(per_cpu(cpu_evtchn_mask, cpu)));
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun 
evtchn_2l_clear_pending(evtchn_port_t port)62*4882a593Smuzhiyun static void evtchn_2l_clear_pending(evtchn_port_t port)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun 	struct shared_info *s = HYPERVISOR_shared_info;
65*4882a593Smuzhiyun 	sync_clear_bit(port, BM(&s->evtchn_pending[0]));
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun 
evtchn_2l_set_pending(evtchn_port_t port)68*4882a593Smuzhiyun static void evtchn_2l_set_pending(evtchn_port_t port)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun 	struct shared_info *s = HYPERVISOR_shared_info;
71*4882a593Smuzhiyun 	sync_set_bit(port, BM(&s->evtchn_pending[0]));
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun 
evtchn_2l_is_pending(evtchn_port_t port)74*4882a593Smuzhiyun static bool evtchn_2l_is_pending(evtchn_port_t port)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun 	struct shared_info *s = HYPERVISOR_shared_info;
77*4882a593Smuzhiyun 	return sync_test_bit(port, BM(&s->evtchn_pending[0]));
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun 
evtchn_2l_mask(evtchn_port_t port)80*4882a593Smuzhiyun static void evtchn_2l_mask(evtchn_port_t port)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun 	struct shared_info *s = HYPERVISOR_shared_info;
83*4882a593Smuzhiyun 	sync_set_bit(port, BM(&s->evtchn_mask[0]));
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun 
evtchn_2l_unmask(evtchn_port_t port)86*4882a593Smuzhiyun static void evtchn_2l_unmask(evtchn_port_t port)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun 	struct shared_info *s = HYPERVISOR_shared_info;
89*4882a593Smuzhiyun 	unsigned int cpu = get_cpu();
90*4882a593Smuzhiyun 	int do_hypercall = 0, evtchn_pending = 0;
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	BUG_ON(!irqs_disabled());
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	smp_wmb();	/* All writes before unmask must be visible. */
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	if (unlikely((cpu != cpu_from_evtchn(port))))
97*4882a593Smuzhiyun 		do_hypercall = 1;
98*4882a593Smuzhiyun 	else {
99*4882a593Smuzhiyun 		/*
100*4882a593Smuzhiyun 		 * Need to clear the mask before checking pending to
101*4882a593Smuzhiyun 		 * avoid a race with an event becoming pending.
102*4882a593Smuzhiyun 		 *
103*4882a593Smuzhiyun 		 * EVTCHNOP_unmask will only trigger an upcall if the
104*4882a593Smuzhiyun 		 * mask bit was set, so if a hypercall is needed
105*4882a593Smuzhiyun 		 * remask the event.
106*4882a593Smuzhiyun 		 */
107*4882a593Smuzhiyun 		sync_clear_bit(port, BM(&s->evtchn_mask[0]));
108*4882a593Smuzhiyun 		evtchn_pending = sync_test_bit(port, BM(&s->evtchn_pending[0]));
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 		if (unlikely(evtchn_pending && xen_hvm_domain())) {
111*4882a593Smuzhiyun 			sync_set_bit(port, BM(&s->evtchn_mask[0]));
112*4882a593Smuzhiyun 			do_hypercall = 1;
113*4882a593Smuzhiyun 		}
114*4882a593Smuzhiyun 	}
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	/* Slow path (hypercall) if this is a non-local port or if this is
117*4882a593Smuzhiyun 	 * an hvm domain and an event is pending (hvm domains don't have
118*4882a593Smuzhiyun 	 * their own implementation of irq_enable). */
119*4882a593Smuzhiyun 	if (do_hypercall) {
120*4882a593Smuzhiyun 		struct evtchn_unmask unmask = { .port = port };
121*4882a593Smuzhiyun 		(void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
122*4882a593Smuzhiyun 	} else {
123*4882a593Smuzhiyun 		struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 		/*
126*4882a593Smuzhiyun 		 * The following is basically the equivalent of
127*4882a593Smuzhiyun 		 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
128*4882a593Smuzhiyun 		 * the interrupt edge' if the channel is masked.
129*4882a593Smuzhiyun 		 */
130*4882a593Smuzhiyun 		if (evtchn_pending &&
131*4882a593Smuzhiyun 		    !sync_test_and_set_bit(port / BITS_PER_EVTCHN_WORD,
132*4882a593Smuzhiyun 					   BM(&vcpu_info->evtchn_pending_sel)))
133*4882a593Smuzhiyun 			vcpu_info->evtchn_upcall_pending = 1;
134*4882a593Smuzhiyun 	}
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	put_cpu();
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun static DEFINE_PER_CPU(unsigned int, current_word_idx);
140*4882a593Smuzhiyun static DEFINE_PER_CPU(unsigned int, current_bit_idx);
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun /*
143*4882a593Smuzhiyun  * Mask out the i least significant bits of w
144*4882a593Smuzhiyun  */
145*4882a593Smuzhiyun #define MASK_LSBS(w, i) (w & ((~((xen_ulong_t)0UL)) << i))
146*4882a593Smuzhiyun 
active_evtchns(unsigned int cpu,struct shared_info * sh,unsigned int idx)147*4882a593Smuzhiyun static inline xen_ulong_t active_evtchns(unsigned int cpu,
148*4882a593Smuzhiyun 					 struct shared_info *sh,
149*4882a593Smuzhiyun 					 unsigned int idx)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun 	return sh->evtchn_pending[idx] &
152*4882a593Smuzhiyun 		per_cpu(cpu_evtchn_mask, cpu)[idx] &
153*4882a593Smuzhiyun 		~sh->evtchn_mask[idx];
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun /*
157*4882a593Smuzhiyun  * Search the CPU's pending events bitmasks.  For each one found, map
158*4882a593Smuzhiyun  * the event number to an irq, and feed it into do_IRQ() for handling.
159*4882a593Smuzhiyun  *
160*4882a593Smuzhiyun  * Xen uses a two-level bitmap to speed searching.  The first level is
161*4882a593Smuzhiyun  * a bitset of words which contain pending event bits.  The second
162*4882a593Smuzhiyun  * level is a bitset of pending events themselves.
163*4882a593Smuzhiyun  */
evtchn_2l_handle_events(unsigned cpu,struct evtchn_loop_ctrl * ctrl)164*4882a593Smuzhiyun static void evtchn_2l_handle_events(unsigned cpu, struct evtchn_loop_ctrl *ctrl)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun 	int irq;
167*4882a593Smuzhiyun 	xen_ulong_t pending_words;
168*4882a593Smuzhiyun 	xen_ulong_t pending_bits;
169*4882a593Smuzhiyun 	int start_word_idx, start_bit_idx;
170*4882a593Smuzhiyun 	int word_idx, bit_idx;
171*4882a593Smuzhiyun 	int i;
172*4882a593Smuzhiyun 	struct shared_info *s = HYPERVISOR_shared_info;
173*4882a593Smuzhiyun 	struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	/* Timer interrupt has highest priority. */
176*4882a593Smuzhiyun 	irq = irq_from_virq(cpu, VIRQ_TIMER);
177*4882a593Smuzhiyun 	if (irq != -1) {
178*4882a593Smuzhiyun 		evtchn_port_t evtchn = evtchn_from_irq(irq);
179*4882a593Smuzhiyun 		word_idx = evtchn / BITS_PER_LONG;
180*4882a593Smuzhiyun 		bit_idx = evtchn % BITS_PER_LONG;
181*4882a593Smuzhiyun 		if (active_evtchns(cpu, s, word_idx) & (1ULL << bit_idx))
182*4882a593Smuzhiyun 			generic_handle_irq(irq);
183*4882a593Smuzhiyun 	}
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	/*
186*4882a593Smuzhiyun 	 * Master flag must be cleared /before/ clearing
187*4882a593Smuzhiyun 	 * selector flag. xchg_xen_ulong must contain an
188*4882a593Smuzhiyun 	 * appropriate barrier.
189*4882a593Smuzhiyun 	 */
190*4882a593Smuzhiyun 	pending_words = xchg_xen_ulong(&vcpu_info->evtchn_pending_sel, 0);
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	start_word_idx = __this_cpu_read(current_word_idx);
193*4882a593Smuzhiyun 	start_bit_idx = __this_cpu_read(current_bit_idx);
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	word_idx = start_word_idx;
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	for (i = 0; pending_words != 0; i++) {
198*4882a593Smuzhiyun 		xen_ulong_t words;
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 		words = MASK_LSBS(pending_words, word_idx);
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 		/*
203*4882a593Smuzhiyun 		 * If we masked out all events, wrap to beginning.
204*4882a593Smuzhiyun 		 */
205*4882a593Smuzhiyun 		if (words == 0) {
206*4882a593Smuzhiyun 			word_idx = 0;
207*4882a593Smuzhiyun 			bit_idx = 0;
208*4882a593Smuzhiyun 			continue;
209*4882a593Smuzhiyun 		}
210*4882a593Smuzhiyun 		word_idx = EVTCHN_FIRST_BIT(words);
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 		pending_bits = active_evtchns(cpu, s, word_idx);
213*4882a593Smuzhiyun 		bit_idx = 0; /* usually scan entire word from start */
214*4882a593Smuzhiyun 		/*
215*4882a593Smuzhiyun 		 * We scan the starting word in two parts.
216*4882a593Smuzhiyun 		 *
217*4882a593Smuzhiyun 		 * 1st time: start in the middle, scanning the
218*4882a593Smuzhiyun 		 * upper bits.
219*4882a593Smuzhiyun 		 *
220*4882a593Smuzhiyun 		 * 2nd time: scan the whole word (not just the
221*4882a593Smuzhiyun 		 * parts skipped in the first pass) -- if an
222*4882a593Smuzhiyun 		 * event in the previously scanned bits is
223*4882a593Smuzhiyun 		 * pending again it would just be scanned on
224*4882a593Smuzhiyun 		 * the next loop anyway.
225*4882a593Smuzhiyun 		 */
226*4882a593Smuzhiyun 		if (word_idx == start_word_idx) {
227*4882a593Smuzhiyun 			if (i == 0)
228*4882a593Smuzhiyun 				bit_idx = start_bit_idx;
229*4882a593Smuzhiyun 		}
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 		do {
232*4882a593Smuzhiyun 			xen_ulong_t bits;
233*4882a593Smuzhiyun 			evtchn_port_t port;
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 			bits = MASK_LSBS(pending_bits, bit_idx);
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 			/* If we masked out all events, move on. */
238*4882a593Smuzhiyun 			if (bits == 0)
239*4882a593Smuzhiyun 				break;
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 			bit_idx = EVTCHN_FIRST_BIT(bits);
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 			/* Process port. */
244*4882a593Smuzhiyun 			port = (word_idx * BITS_PER_EVTCHN_WORD) + bit_idx;
245*4882a593Smuzhiyun 			handle_irq_for_port(port, ctrl);
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 			bit_idx = (bit_idx + 1) % BITS_PER_EVTCHN_WORD;
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 			/* Next caller starts at last processed + 1 */
250*4882a593Smuzhiyun 			__this_cpu_write(current_word_idx,
251*4882a593Smuzhiyun 					 bit_idx ? word_idx :
252*4882a593Smuzhiyun 					 (word_idx+1) % BITS_PER_EVTCHN_WORD);
253*4882a593Smuzhiyun 			__this_cpu_write(current_bit_idx, bit_idx);
254*4882a593Smuzhiyun 		} while (bit_idx != 0);
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 		/* Scan start_l1i twice; all others once. */
257*4882a593Smuzhiyun 		if ((word_idx != start_word_idx) || (i != 0))
258*4882a593Smuzhiyun 			pending_words &= ~(1UL << word_idx);
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 		word_idx = (word_idx + 1) % BITS_PER_EVTCHN_WORD;
261*4882a593Smuzhiyun 	}
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun 
xen_debug_interrupt(int irq,void * dev_id)264*4882a593Smuzhiyun irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun 	struct shared_info *sh = HYPERVISOR_shared_info;
267*4882a593Smuzhiyun 	int cpu = smp_processor_id();
268*4882a593Smuzhiyun 	xen_ulong_t *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu);
269*4882a593Smuzhiyun 	int i;
270*4882a593Smuzhiyun 	unsigned long flags;
271*4882a593Smuzhiyun 	static DEFINE_SPINLOCK(debug_lock);
272*4882a593Smuzhiyun 	struct vcpu_info *v;
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	spin_lock_irqsave(&debug_lock, flags);
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	printk("\nvcpu %d\n  ", cpu);
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	for_each_online_cpu(i) {
279*4882a593Smuzhiyun 		int pending;
280*4882a593Smuzhiyun 		v = per_cpu(xen_vcpu, i);
281*4882a593Smuzhiyun 		pending = (get_irq_regs() && i == cpu)
282*4882a593Smuzhiyun 			? xen_irqs_disabled(get_irq_regs())
283*4882a593Smuzhiyun 			: v->evtchn_upcall_mask;
284*4882a593Smuzhiyun 		printk("%d: masked=%d pending=%d event_sel %0*"PRI_xen_ulong"\n  ", i,
285*4882a593Smuzhiyun 		       pending, v->evtchn_upcall_pending,
286*4882a593Smuzhiyun 		       (int)(sizeof(v->evtchn_pending_sel)*2),
287*4882a593Smuzhiyun 		       v->evtchn_pending_sel);
288*4882a593Smuzhiyun 	}
289*4882a593Smuzhiyun 	v = per_cpu(xen_vcpu, cpu);
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	printk("\npending:\n   ");
292*4882a593Smuzhiyun 	for (i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
293*4882a593Smuzhiyun 		printk("%0*"PRI_xen_ulong"%s",
294*4882a593Smuzhiyun 		       (int)sizeof(sh->evtchn_pending[0])*2,
295*4882a593Smuzhiyun 		       sh->evtchn_pending[i],
296*4882a593Smuzhiyun 		       i % 8 == 0 ? "\n   " : " ");
297*4882a593Smuzhiyun 	printk("\nglobal mask:\n   ");
298*4882a593Smuzhiyun 	for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
299*4882a593Smuzhiyun 		printk("%0*"PRI_xen_ulong"%s",
300*4882a593Smuzhiyun 		       (int)(sizeof(sh->evtchn_mask[0])*2),
301*4882a593Smuzhiyun 		       sh->evtchn_mask[i],
302*4882a593Smuzhiyun 		       i % 8 == 0 ? "\n   " : " ");
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	printk("\nglobally unmasked:\n   ");
305*4882a593Smuzhiyun 	for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
306*4882a593Smuzhiyun 		printk("%0*"PRI_xen_ulong"%s",
307*4882a593Smuzhiyun 		       (int)(sizeof(sh->evtchn_mask[0])*2),
308*4882a593Smuzhiyun 		       sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
309*4882a593Smuzhiyun 		       i % 8 == 0 ? "\n   " : " ");
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	printk("\nlocal cpu%d mask:\n   ", cpu);
312*4882a593Smuzhiyun 	for (i = (EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD)-1; i >= 0; i--)
313*4882a593Smuzhiyun 		printk("%0*"PRI_xen_ulong"%s", (int)(sizeof(cpu_evtchn[0])*2),
314*4882a593Smuzhiyun 		       cpu_evtchn[i],
315*4882a593Smuzhiyun 		       i % 8 == 0 ? "\n   " : " ");
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	printk("\nlocally unmasked:\n   ");
318*4882a593Smuzhiyun 	for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) {
319*4882a593Smuzhiyun 		xen_ulong_t pending = sh->evtchn_pending[i]
320*4882a593Smuzhiyun 			& ~sh->evtchn_mask[i]
321*4882a593Smuzhiyun 			& cpu_evtchn[i];
322*4882a593Smuzhiyun 		printk("%0*"PRI_xen_ulong"%s",
323*4882a593Smuzhiyun 		       (int)(sizeof(sh->evtchn_mask[0])*2),
324*4882a593Smuzhiyun 		       pending, i % 8 == 0 ? "\n   " : " ");
325*4882a593Smuzhiyun 	}
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	printk("\npending list:\n");
328*4882a593Smuzhiyun 	for (i = 0; i < EVTCHN_2L_NR_CHANNELS; i++) {
329*4882a593Smuzhiyun 		if (sync_test_bit(i, BM(sh->evtchn_pending))) {
330*4882a593Smuzhiyun 			int word_idx = i / BITS_PER_EVTCHN_WORD;
331*4882a593Smuzhiyun 			printk("  %d: event %d -> irq %d%s%s%s\n",
332*4882a593Smuzhiyun 			       cpu_from_evtchn(i), i,
333*4882a593Smuzhiyun 			       get_evtchn_to_irq(i),
334*4882a593Smuzhiyun 			       sync_test_bit(word_idx, BM(&v->evtchn_pending_sel))
335*4882a593Smuzhiyun 			       ? "" : " l2-clear",
336*4882a593Smuzhiyun 			       !sync_test_bit(i, BM(sh->evtchn_mask))
337*4882a593Smuzhiyun 			       ? "" : " globally-masked",
338*4882a593Smuzhiyun 			       sync_test_bit(i, BM(cpu_evtchn))
339*4882a593Smuzhiyun 			       ? "" : " locally-masked");
340*4882a593Smuzhiyun 		}
341*4882a593Smuzhiyun 	}
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	spin_unlock_irqrestore(&debug_lock, flags);
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	return IRQ_HANDLED;
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun 
evtchn_2l_resume(void)348*4882a593Smuzhiyun static void evtchn_2l_resume(void)
349*4882a593Smuzhiyun {
350*4882a593Smuzhiyun 	int i;
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	for_each_online_cpu(i)
353*4882a593Smuzhiyun 		memset(per_cpu(cpu_evtchn_mask, i), 0, sizeof(xen_ulong_t) *
354*4882a593Smuzhiyun 				EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD);
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun 
evtchn_2l_percpu_deinit(unsigned int cpu)357*4882a593Smuzhiyun static int evtchn_2l_percpu_deinit(unsigned int cpu)
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun 	memset(per_cpu(cpu_evtchn_mask, cpu), 0, sizeof(xen_ulong_t) *
360*4882a593Smuzhiyun 			EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD);
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	return 0;
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun static const struct evtchn_ops evtchn_ops_2l = {
366*4882a593Smuzhiyun 	.max_channels      = evtchn_2l_max_channels,
367*4882a593Smuzhiyun 	.nr_channels       = evtchn_2l_max_channels,
368*4882a593Smuzhiyun 	.remove            = evtchn_2l_remove,
369*4882a593Smuzhiyun 	.bind_to_cpu       = evtchn_2l_bind_to_cpu,
370*4882a593Smuzhiyun 	.clear_pending     = evtchn_2l_clear_pending,
371*4882a593Smuzhiyun 	.set_pending       = evtchn_2l_set_pending,
372*4882a593Smuzhiyun 	.is_pending        = evtchn_2l_is_pending,
373*4882a593Smuzhiyun 	.mask              = evtchn_2l_mask,
374*4882a593Smuzhiyun 	.unmask            = evtchn_2l_unmask,
375*4882a593Smuzhiyun 	.handle_events     = evtchn_2l_handle_events,
376*4882a593Smuzhiyun 	.resume	           = evtchn_2l_resume,
377*4882a593Smuzhiyun 	.percpu_deinit     = evtchn_2l_percpu_deinit,
378*4882a593Smuzhiyun };
379*4882a593Smuzhiyun 
xen_evtchn_2l_init(void)380*4882a593Smuzhiyun void __init xen_evtchn_2l_init(void)
381*4882a593Smuzhiyun {
382*4882a593Smuzhiyun 	pr_info("Using 2-level ABI\n");
383*4882a593Smuzhiyun 	evtchn_ops = &evtchn_ops_2l;
384*4882a593Smuzhiyun }
385