xref: /OK3568_Linux_fs/kernel/arch/x86/kvm/i8259.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * 8259 interrupt controller emulation
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright (c) 2003-2004 Fabrice Bellard
5*4882a593Smuzhiyun  * Copyright (c) 2007 Intel Corporation
6*4882a593Smuzhiyun  * Copyright 2009 Red Hat, Inc. and/or its affiliates.
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining a copy
9*4882a593Smuzhiyun  * of this software and associated documentation files (the "Software"), to deal
10*4882a593Smuzhiyun  * in the Software without restriction, including without limitation the rights
11*4882a593Smuzhiyun  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12*4882a593Smuzhiyun  * copies of the Software, and to permit persons to whom the Software is
13*4882a593Smuzhiyun  * furnished to do so, subject to the following conditions:
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  * The above copyright notice and this permission notice shall be included in
16*4882a593Smuzhiyun  * all copies or substantial portions of the Software.
17*4882a593Smuzhiyun  *
18*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19*4882a593Smuzhiyun  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21*4882a593Smuzhiyun  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22*4882a593Smuzhiyun  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23*4882a593Smuzhiyun  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24*4882a593Smuzhiyun  * THE SOFTWARE.
25*4882a593Smuzhiyun  * Authors:
26*4882a593Smuzhiyun  *   Yaozu (Eddie) Dong <Eddie.dong@intel.com>
27*4882a593Smuzhiyun  *   Port from Qemu.
28*4882a593Smuzhiyun  */
29*4882a593Smuzhiyun #include <linux/mm.h>
30*4882a593Smuzhiyun #include <linux/slab.h>
31*4882a593Smuzhiyun #include <linux/bitops.h>
32*4882a593Smuzhiyun #include "irq.h"
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun #include <linux/kvm_host.h>
35*4882a593Smuzhiyun #include "trace.h"
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun #define pr_pic_unimpl(fmt, ...)	\
38*4882a593Smuzhiyun 	pr_err_ratelimited("kvm: pic: " fmt, ## __VA_ARGS__)
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun static void pic_irq_request(struct kvm *kvm, int level);
41*4882a593Smuzhiyun 
pic_lock(struct kvm_pic * s)42*4882a593Smuzhiyun static void pic_lock(struct kvm_pic *s)
43*4882a593Smuzhiyun 	__acquires(&s->lock)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun 	spin_lock(&s->lock);
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun 
pic_unlock(struct kvm_pic * s)48*4882a593Smuzhiyun static void pic_unlock(struct kvm_pic *s)
49*4882a593Smuzhiyun 	__releases(&s->lock)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun 	bool wakeup = s->wakeup_needed;
52*4882a593Smuzhiyun 	struct kvm_vcpu *vcpu;
53*4882a593Smuzhiyun 	int i;
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	s->wakeup_needed = false;
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	spin_unlock(&s->lock);
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	if (wakeup) {
60*4882a593Smuzhiyun 		kvm_for_each_vcpu(i, vcpu, s->kvm) {
61*4882a593Smuzhiyun 			if (kvm_apic_accept_pic_intr(vcpu)) {
62*4882a593Smuzhiyun 				kvm_make_request(KVM_REQ_EVENT, vcpu);
63*4882a593Smuzhiyun 				kvm_vcpu_kick(vcpu);
64*4882a593Smuzhiyun 				return;
65*4882a593Smuzhiyun 			}
66*4882a593Smuzhiyun 		}
67*4882a593Smuzhiyun 	}
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun 
pic_clear_isr(struct kvm_kpic_state * s,int irq)70*4882a593Smuzhiyun static void pic_clear_isr(struct kvm_kpic_state *s, int irq)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun 	s->isr &= ~(1 << irq);
73*4882a593Smuzhiyun 	if (s != &s->pics_state->pics[0])
74*4882a593Smuzhiyun 		irq += 8;
75*4882a593Smuzhiyun 	/*
76*4882a593Smuzhiyun 	 * We are dropping lock while calling ack notifiers since ack
77*4882a593Smuzhiyun 	 * notifier callbacks for assigned devices call into PIC recursively.
78*4882a593Smuzhiyun 	 * Other interrupt may be delivered to PIC while lock is dropped but
79*4882a593Smuzhiyun 	 * it should be safe since PIC state is already updated at this stage.
80*4882a593Smuzhiyun 	 */
81*4882a593Smuzhiyun 	pic_unlock(s->pics_state);
82*4882a593Smuzhiyun 	kvm_notify_acked_irq(s->pics_state->kvm, SELECT_PIC(irq), irq);
83*4882a593Smuzhiyun 	pic_lock(s->pics_state);
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun /*
87*4882a593Smuzhiyun  * set irq level. If an edge is detected, then the IRR is set to 1
88*4882a593Smuzhiyun  */
pic_set_irq1(struct kvm_kpic_state * s,int irq,int level)89*4882a593Smuzhiyun static inline int pic_set_irq1(struct kvm_kpic_state *s, int irq, int level)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun 	int mask, ret = 1;
92*4882a593Smuzhiyun 	mask = 1 << irq;
93*4882a593Smuzhiyun 	if (s->elcr & mask)	/* level triggered */
94*4882a593Smuzhiyun 		if (level) {
95*4882a593Smuzhiyun 			ret = !(s->irr & mask);
96*4882a593Smuzhiyun 			s->irr |= mask;
97*4882a593Smuzhiyun 			s->last_irr |= mask;
98*4882a593Smuzhiyun 		} else {
99*4882a593Smuzhiyun 			s->irr &= ~mask;
100*4882a593Smuzhiyun 			s->last_irr &= ~mask;
101*4882a593Smuzhiyun 		}
102*4882a593Smuzhiyun 	else	/* edge triggered */
103*4882a593Smuzhiyun 		if (level) {
104*4882a593Smuzhiyun 			if ((s->last_irr & mask) == 0) {
105*4882a593Smuzhiyun 				ret = !(s->irr & mask);
106*4882a593Smuzhiyun 				s->irr |= mask;
107*4882a593Smuzhiyun 			}
108*4882a593Smuzhiyun 			s->last_irr |= mask;
109*4882a593Smuzhiyun 		} else
110*4882a593Smuzhiyun 			s->last_irr &= ~mask;
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	return (s->imr & mask) ? -1 : ret;
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun /*
116*4882a593Smuzhiyun  * return the highest priority found in mask (highest = smallest
117*4882a593Smuzhiyun  * number). Return 8 if no irq
118*4882a593Smuzhiyun  */
get_priority(struct kvm_kpic_state * s,int mask)119*4882a593Smuzhiyun static inline int get_priority(struct kvm_kpic_state *s, int mask)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun 	int priority;
122*4882a593Smuzhiyun 	if (mask == 0)
123*4882a593Smuzhiyun 		return 8;
124*4882a593Smuzhiyun 	priority = 0;
125*4882a593Smuzhiyun 	while ((mask & (1 << ((priority + s->priority_add) & 7))) == 0)
126*4882a593Smuzhiyun 		priority++;
127*4882a593Smuzhiyun 	return priority;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun /*
131*4882a593Smuzhiyun  * return the pic wanted interrupt. return -1 if none
132*4882a593Smuzhiyun  */
pic_get_irq(struct kvm_kpic_state * s)133*4882a593Smuzhiyun static int pic_get_irq(struct kvm_kpic_state *s)
134*4882a593Smuzhiyun {
135*4882a593Smuzhiyun 	int mask, cur_priority, priority;
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	mask = s->irr & ~s->imr;
138*4882a593Smuzhiyun 	priority = get_priority(s, mask);
139*4882a593Smuzhiyun 	if (priority == 8)
140*4882a593Smuzhiyun 		return -1;
141*4882a593Smuzhiyun 	/*
142*4882a593Smuzhiyun 	 * compute current priority. If special fully nested mode on the
143*4882a593Smuzhiyun 	 * master, the IRQ coming from the slave is not taken into account
144*4882a593Smuzhiyun 	 * for the priority computation.
145*4882a593Smuzhiyun 	 */
146*4882a593Smuzhiyun 	mask = s->isr;
147*4882a593Smuzhiyun 	if (s->special_fully_nested_mode && s == &s->pics_state->pics[0])
148*4882a593Smuzhiyun 		mask &= ~(1 << 2);
149*4882a593Smuzhiyun 	cur_priority = get_priority(s, mask);
150*4882a593Smuzhiyun 	if (priority < cur_priority)
151*4882a593Smuzhiyun 		/*
152*4882a593Smuzhiyun 		 * higher priority found: an irq should be generated
153*4882a593Smuzhiyun 		 */
154*4882a593Smuzhiyun 		return (priority + s->priority_add) & 7;
155*4882a593Smuzhiyun 	else
156*4882a593Smuzhiyun 		return -1;
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun /*
160*4882a593Smuzhiyun  * raise irq to CPU if necessary. must be called every time the active
161*4882a593Smuzhiyun  * irq may change
162*4882a593Smuzhiyun  */
pic_update_irq(struct kvm_pic * s)163*4882a593Smuzhiyun static void pic_update_irq(struct kvm_pic *s)
164*4882a593Smuzhiyun {
165*4882a593Smuzhiyun 	int irq2, irq;
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	irq2 = pic_get_irq(&s->pics[1]);
168*4882a593Smuzhiyun 	if (irq2 >= 0) {
169*4882a593Smuzhiyun 		/*
170*4882a593Smuzhiyun 		 * if irq request by slave pic, signal master PIC
171*4882a593Smuzhiyun 		 */
172*4882a593Smuzhiyun 		pic_set_irq1(&s->pics[0], 2, 1);
173*4882a593Smuzhiyun 		pic_set_irq1(&s->pics[0], 2, 0);
174*4882a593Smuzhiyun 	}
175*4882a593Smuzhiyun 	irq = pic_get_irq(&s->pics[0]);
176*4882a593Smuzhiyun 	pic_irq_request(s->kvm, irq >= 0);
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun 
kvm_pic_update_irq(struct kvm_pic * s)179*4882a593Smuzhiyun void kvm_pic_update_irq(struct kvm_pic *s)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun 	pic_lock(s);
182*4882a593Smuzhiyun 	pic_update_irq(s);
183*4882a593Smuzhiyun 	pic_unlock(s);
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun 
kvm_pic_set_irq(struct kvm_pic * s,int irq,int irq_source_id,int level)186*4882a593Smuzhiyun int kvm_pic_set_irq(struct kvm_pic *s, int irq, int irq_source_id, int level)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun 	int ret, irq_level;
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	BUG_ON(irq < 0 || irq >= PIC_NUM_PINS);
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	pic_lock(s);
193*4882a593Smuzhiyun 	irq_level = __kvm_irq_line_state(&s->irq_states[irq],
194*4882a593Smuzhiyun 					 irq_source_id, level);
195*4882a593Smuzhiyun 	ret = pic_set_irq1(&s->pics[irq >> 3], irq & 7, irq_level);
196*4882a593Smuzhiyun 	pic_update_irq(s);
197*4882a593Smuzhiyun 	trace_kvm_pic_set_irq(irq >> 3, irq & 7, s->pics[irq >> 3].elcr,
198*4882a593Smuzhiyun 			      s->pics[irq >> 3].imr, ret == 0);
199*4882a593Smuzhiyun 	pic_unlock(s);
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	return ret;
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun 
kvm_pic_clear_all(struct kvm_pic * s,int irq_source_id)204*4882a593Smuzhiyun void kvm_pic_clear_all(struct kvm_pic *s, int irq_source_id)
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun 	int i;
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	pic_lock(s);
209*4882a593Smuzhiyun 	for (i = 0; i < PIC_NUM_PINS; i++)
210*4882a593Smuzhiyun 		__clear_bit(irq_source_id, &s->irq_states[i]);
211*4882a593Smuzhiyun 	pic_unlock(s);
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun /*
215*4882a593Smuzhiyun  * acknowledge interrupt 'irq'
216*4882a593Smuzhiyun  */
pic_intack(struct kvm_kpic_state * s,int irq)217*4882a593Smuzhiyun static inline void pic_intack(struct kvm_kpic_state *s, int irq)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun 	s->isr |= 1 << irq;
220*4882a593Smuzhiyun 	/*
221*4882a593Smuzhiyun 	 * We don't clear a level sensitive interrupt here
222*4882a593Smuzhiyun 	 */
223*4882a593Smuzhiyun 	if (!(s->elcr & (1 << irq)))
224*4882a593Smuzhiyun 		s->irr &= ~(1 << irq);
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	if (s->auto_eoi) {
227*4882a593Smuzhiyun 		if (s->rotate_on_auto_eoi)
228*4882a593Smuzhiyun 			s->priority_add = (irq + 1) & 7;
229*4882a593Smuzhiyun 		pic_clear_isr(s, irq);
230*4882a593Smuzhiyun 	}
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun 
kvm_pic_read_irq(struct kvm * kvm)234*4882a593Smuzhiyun int kvm_pic_read_irq(struct kvm *kvm)
235*4882a593Smuzhiyun {
236*4882a593Smuzhiyun 	int irq, irq2, intno;
237*4882a593Smuzhiyun 	struct kvm_pic *s = kvm->arch.vpic;
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	s->output = 0;
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	pic_lock(s);
242*4882a593Smuzhiyun 	irq = pic_get_irq(&s->pics[0]);
243*4882a593Smuzhiyun 	if (irq >= 0) {
244*4882a593Smuzhiyun 		pic_intack(&s->pics[0], irq);
245*4882a593Smuzhiyun 		if (irq == 2) {
246*4882a593Smuzhiyun 			irq2 = pic_get_irq(&s->pics[1]);
247*4882a593Smuzhiyun 			if (irq2 >= 0)
248*4882a593Smuzhiyun 				pic_intack(&s->pics[1], irq2);
249*4882a593Smuzhiyun 			else
250*4882a593Smuzhiyun 				/*
251*4882a593Smuzhiyun 				 * spurious IRQ on slave controller
252*4882a593Smuzhiyun 				 */
253*4882a593Smuzhiyun 				irq2 = 7;
254*4882a593Smuzhiyun 			intno = s->pics[1].irq_base + irq2;
255*4882a593Smuzhiyun 			irq = irq2 + 8;
256*4882a593Smuzhiyun 		} else
257*4882a593Smuzhiyun 			intno = s->pics[0].irq_base + irq;
258*4882a593Smuzhiyun 	} else {
259*4882a593Smuzhiyun 		/*
260*4882a593Smuzhiyun 		 * spurious IRQ on host controller
261*4882a593Smuzhiyun 		 */
262*4882a593Smuzhiyun 		irq = 7;
263*4882a593Smuzhiyun 		intno = s->pics[0].irq_base + irq;
264*4882a593Smuzhiyun 	}
265*4882a593Smuzhiyun 	pic_update_irq(s);
266*4882a593Smuzhiyun 	pic_unlock(s);
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	return intno;
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun 
kvm_pic_reset(struct kvm_kpic_state * s)271*4882a593Smuzhiyun static void kvm_pic_reset(struct kvm_kpic_state *s)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun 	int irq, i;
274*4882a593Smuzhiyun 	struct kvm_vcpu *vcpu;
275*4882a593Smuzhiyun 	u8 edge_irr = s->irr & ~s->elcr;
276*4882a593Smuzhiyun 	bool found = false;
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	s->last_irr = 0;
279*4882a593Smuzhiyun 	s->irr &= s->elcr;
280*4882a593Smuzhiyun 	s->imr = 0;
281*4882a593Smuzhiyun 	s->priority_add = 0;
282*4882a593Smuzhiyun 	s->special_mask = 0;
283*4882a593Smuzhiyun 	s->read_reg_select = 0;
284*4882a593Smuzhiyun 	if (!s->init4) {
285*4882a593Smuzhiyun 		s->special_fully_nested_mode = 0;
286*4882a593Smuzhiyun 		s->auto_eoi = 0;
287*4882a593Smuzhiyun 	}
288*4882a593Smuzhiyun 	s->init_state = 1;
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun 	kvm_for_each_vcpu(i, vcpu, s->pics_state->kvm)
291*4882a593Smuzhiyun 		if (kvm_apic_accept_pic_intr(vcpu)) {
292*4882a593Smuzhiyun 			found = true;
293*4882a593Smuzhiyun 			break;
294*4882a593Smuzhiyun 		}
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	if (!found)
298*4882a593Smuzhiyun 		return;
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	for (irq = 0; irq < PIC_NUM_PINS/2; irq++)
301*4882a593Smuzhiyun 		if (edge_irr & (1 << irq))
302*4882a593Smuzhiyun 			pic_clear_isr(s, irq);
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun 
pic_ioport_write(void * opaque,u32 addr,u32 val)305*4882a593Smuzhiyun static void pic_ioport_write(void *opaque, u32 addr, u32 val)
306*4882a593Smuzhiyun {
307*4882a593Smuzhiyun 	struct kvm_kpic_state *s = opaque;
308*4882a593Smuzhiyun 	int priority, cmd, irq;
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	addr &= 1;
311*4882a593Smuzhiyun 	if (addr == 0) {
312*4882a593Smuzhiyun 		if (val & 0x10) {
313*4882a593Smuzhiyun 			s->init4 = val & 1;
314*4882a593Smuzhiyun 			if (val & 0x02)
315*4882a593Smuzhiyun 				pr_pic_unimpl("single mode not supported");
316*4882a593Smuzhiyun 			if (val & 0x08)
317*4882a593Smuzhiyun 				pr_pic_unimpl(
318*4882a593Smuzhiyun 						"level sensitive irq not supported");
319*4882a593Smuzhiyun 			kvm_pic_reset(s);
320*4882a593Smuzhiyun 		} else if (val & 0x08) {
321*4882a593Smuzhiyun 			if (val & 0x04)
322*4882a593Smuzhiyun 				s->poll = 1;
323*4882a593Smuzhiyun 			if (val & 0x02)
324*4882a593Smuzhiyun 				s->read_reg_select = val & 1;
325*4882a593Smuzhiyun 			if (val & 0x40)
326*4882a593Smuzhiyun 				s->special_mask = (val >> 5) & 1;
327*4882a593Smuzhiyun 		} else {
328*4882a593Smuzhiyun 			cmd = val >> 5;
329*4882a593Smuzhiyun 			switch (cmd) {
330*4882a593Smuzhiyun 			case 0:
331*4882a593Smuzhiyun 			case 4:
332*4882a593Smuzhiyun 				s->rotate_on_auto_eoi = cmd >> 2;
333*4882a593Smuzhiyun 				break;
334*4882a593Smuzhiyun 			case 1:	/* end of interrupt */
335*4882a593Smuzhiyun 			case 5:
336*4882a593Smuzhiyun 				priority = get_priority(s, s->isr);
337*4882a593Smuzhiyun 				if (priority != 8) {
338*4882a593Smuzhiyun 					irq = (priority + s->priority_add) & 7;
339*4882a593Smuzhiyun 					if (cmd == 5)
340*4882a593Smuzhiyun 						s->priority_add = (irq + 1) & 7;
341*4882a593Smuzhiyun 					pic_clear_isr(s, irq);
342*4882a593Smuzhiyun 					pic_update_irq(s->pics_state);
343*4882a593Smuzhiyun 				}
344*4882a593Smuzhiyun 				break;
345*4882a593Smuzhiyun 			case 3:
346*4882a593Smuzhiyun 				irq = val & 7;
347*4882a593Smuzhiyun 				pic_clear_isr(s, irq);
348*4882a593Smuzhiyun 				pic_update_irq(s->pics_state);
349*4882a593Smuzhiyun 				break;
350*4882a593Smuzhiyun 			case 6:
351*4882a593Smuzhiyun 				s->priority_add = (val + 1) & 7;
352*4882a593Smuzhiyun 				pic_update_irq(s->pics_state);
353*4882a593Smuzhiyun 				break;
354*4882a593Smuzhiyun 			case 7:
355*4882a593Smuzhiyun 				irq = val & 7;
356*4882a593Smuzhiyun 				s->priority_add = (irq + 1) & 7;
357*4882a593Smuzhiyun 				pic_clear_isr(s, irq);
358*4882a593Smuzhiyun 				pic_update_irq(s->pics_state);
359*4882a593Smuzhiyun 				break;
360*4882a593Smuzhiyun 			default:
361*4882a593Smuzhiyun 				break;	/* no operation */
362*4882a593Smuzhiyun 			}
363*4882a593Smuzhiyun 		}
364*4882a593Smuzhiyun 	} else
365*4882a593Smuzhiyun 		switch (s->init_state) {
366*4882a593Smuzhiyun 		case 0: { /* normal mode */
367*4882a593Smuzhiyun 			u8 imr_diff = s->imr ^ val,
368*4882a593Smuzhiyun 				off = (s == &s->pics_state->pics[0]) ? 0 : 8;
369*4882a593Smuzhiyun 			s->imr = val;
370*4882a593Smuzhiyun 			for (irq = 0; irq < PIC_NUM_PINS/2; irq++)
371*4882a593Smuzhiyun 				if (imr_diff & (1 << irq))
372*4882a593Smuzhiyun 					kvm_fire_mask_notifiers(
373*4882a593Smuzhiyun 						s->pics_state->kvm,
374*4882a593Smuzhiyun 						SELECT_PIC(irq + off),
375*4882a593Smuzhiyun 						irq + off,
376*4882a593Smuzhiyun 						!!(s->imr & (1 << irq)));
377*4882a593Smuzhiyun 			pic_update_irq(s->pics_state);
378*4882a593Smuzhiyun 			break;
379*4882a593Smuzhiyun 		}
380*4882a593Smuzhiyun 		case 1:
381*4882a593Smuzhiyun 			s->irq_base = val & 0xf8;
382*4882a593Smuzhiyun 			s->init_state = 2;
383*4882a593Smuzhiyun 			break;
384*4882a593Smuzhiyun 		case 2:
385*4882a593Smuzhiyun 			if (s->init4)
386*4882a593Smuzhiyun 				s->init_state = 3;
387*4882a593Smuzhiyun 			else
388*4882a593Smuzhiyun 				s->init_state = 0;
389*4882a593Smuzhiyun 			break;
390*4882a593Smuzhiyun 		case 3:
391*4882a593Smuzhiyun 			s->special_fully_nested_mode = (val >> 4) & 1;
392*4882a593Smuzhiyun 			s->auto_eoi = (val >> 1) & 1;
393*4882a593Smuzhiyun 			s->init_state = 0;
394*4882a593Smuzhiyun 			break;
395*4882a593Smuzhiyun 		}
396*4882a593Smuzhiyun }
397*4882a593Smuzhiyun 
pic_poll_read(struct kvm_kpic_state * s,u32 addr1)398*4882a593Smuzhiyun static u32 pic_poll_read(struct kvm_kpic_state *s, u32 addr1)
399*4882a593Smuzhiyun {
400*4882a593Smuzhiyun 	int ret;
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 	ret = pic_get_irq(s);
403*4882a593Smuzhiyun 	if (ret >= 0) {
404*4882a593Smuzhiyun 		if (addr1 >> 7) {
405*4882a593Smuzhiyun 			s->pics_state->pics[0].isr &= ~(1 << 2);
406*4882a593Smuzhiyun 			s->pics_state->pics[0].irr &= ~(1 << 2);
407*4882a593Smuzhiyun 		}
408*4882a593Smuzhiyun 		s->irr &= ~(1 << ret);
409*4882a593Smuzhiyun 		pic_clear_isr(s, ret);
410*4882a593Smuzhiyun 		if (addr1 >> 7 || ret != 2)
411*4882a593Smuzhiyun 			pic_update_irq(s->pics_state);
412*4882a593Smuzhiyun 	} else {
413*4882a593Smuzhiyun 		ret = 0x07;
414*4882a593Smuzhiyun 		pic_update_irq(s->pics_state);
415*4882a593Smuzhiyun 	}
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	return ret;
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun 
pic_ioport_read(void * opaque,u32 addr)420*4882a593Smuzhiyun static u32 pic_ioport_read(void *opaque, u32 addr)
421*4882a593Smuzhiyun {
422*4882a593Smuzhiyun 	struct kvm_kpic_state *s = opaque;
423*4882a593Smuzhiyun 	int ret;
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	if (s->poll) {
426*4882a593Smuzhiyun 		ret = pic_poll_read(s, addr);
427*4882a593Smuzhiyun 		s->poll = 0;
428*4882a593Smuzhiyun 	} else
429*4882a593Smuzhiyun 		if ((addr & 1) == 0)
430*4882a593Smuzhiyun 			if (s->read_reg_select)
431*4882a593Smuzhiyun 				ret = s->isr;
432*4882a593Smuzhiyun 			else
433*4882a593Smuzhiyun 				ret = s->irr;
434*4882a593Smuzhiyun 		else
435*4882a593Smuzhiyun 			ret = s->imr;
436*4882a593Smuzhiyun 	return ret;
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun 
elcr_ioport_write(void * opaque,u32 addr,u32 val)439*4882a593Smuzhiyun static void elcr_ioport_write(void *opaque, u32 addr, u32 val)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun 	struct kvm_kpic_state *s = opaque;
442*4882a593Smuzhiyun 	s->elcr = val & s->elcr_mask;
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun 
elcr_ioport_read(void * opaque,u32 addr1)445*4882a593Smuzhiyun static u32 elcr_ioport_read(void *opaque, u32 addr1)
446*4882a593Smuzhiyun {
447*4882a593Smuzhiyun 	struct kvm_kpic_state *s = opaque;
448*4882a593Smuzhiyun 	return s->elcr;
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun 
picdev_write(struct kvm_pic * s,gpa_t addr,int len,const void * val)451*4882a593Smuzhiyun static int picdev_write(struct kvm_pic *s,
452*4882a593Smuzhiyun 			 gpa_t addr, int len, const void *val)
453*4882a593Smuzhiyun {
454*4882a593Smuzhiyun 	unsigned char data = *(unsigned char *)val;
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 	if (len != 1) {
457*4882a593Smuzhiyun 		pr_pic_unimpl("non byte write\n");
458*4882a593Smuzhiyun 		return 0;
459*4882a593Smuzhiyun 	}
460*4882a593Smuzhiyun 	switch (addr) {
461*4882a593Smuzhiyun 	case 0x20:
462*4882a593Smuzhiyun 	case 0x21:
463*4882a593Smuzhiyun 		pic_lock(s);
464*4882a593Smuzhiyun 		pic_ioport_write(&s->pics[0], addr, data);
465*4882a593Smuzhiyun 		pic_unlock(s);
466*4882a593Smuzhiyun 		break;
467*4882a593Smuzhiyun 	case 0xa0:
468*4882a593Smuzhiyun 	case 0xa1:
469*4882a593Smuzhiyun 		pic_lock(s);
470*4882a593Smuzhiyun 		pic_ioport_write(&s->pics[1], addr, data);
471*4882a593Smuzhiyun 		pic_unlock(s);
472*4882a593Smuzhiyun 		break;
473*4882a593Smuzhiyun 	case 0x4d0:
474*4882a593Smuzhiyun 	case 0x4d1:
475*4882a593Smuzhiyun 		pic_lock(s);
476*4882a593Smuzhiyun 		elcr_ioport_write(&s->pics[addr & 1], addr, data);
477*4882a593Smuzhiyun 		pic_unlock(s);
478*4882a593Smuzhiyun 		break;
479*4882a593Smuzhiyun 	default:
480*4882a593Smuzhiyun 		return -EOPNOTSUPP;
481*4882a593Smuzhiyun 	}
482*4882a593Smuzhiyun 	return 0;
483*4882a593Smuzhiyun }
484*4882a593Smuzhiyun 
picdev_read(struct kvm_pic * s,gpa_t addr,int len,void * val)485*4882a593Smuzhiyun static int picdev_read(struct kvm_pic *s,
486*4882a593Smuzhiyun 		       gpa_t addr, int len, void *val)
487*4882a593Smuzhiyun {
488*4882a593Smuzhiyun 	unsigned char *data = (unsigned char *)val;
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 	if (len != 1) {
491*4882a593Smuzhiyun 		memset(val, 0, len);
492*4882a593Smuzhiyun 		pr_pic_unimpl("non byte read\n");
493*4882a593Smuzhiyun 		return 0;
494*4882a593Smuzhiyun 	}
495*4882a593Smuzhiyun 	switch (addr) {
496*4882a593Smuzhiyun 	case 0x20:
497*4882a593Smuzhiyun 	case 0x21:
498*4882a593Smuzhiyun 	case 0xa0:
499*4882a593Smuzhiyun 	case 0xa1:
500*4882a593Smuzhiyun 		pic_lock(s);
501*4882a593Smuzhiyun 		*data = pic_ioport_read(&s->pics[addr >> 7], addr);
502*4882a593Smuzhiyun 		pic_unlock(s);
503*4882a593Smuzhiyun 		break;
504*4882a593Smuzhiyun 	case 0x4d0:
505*4882a593Smuzhiyun 	case 0x4d1:
506*4882a593Smuzhiyun 		pic_lock(s);
507*4882a593Smuzhiyun 		*data = elcr_ioport_read(&s->pics[addr & 1], addr);
508*4882a593Smuzhiyun 		pic_unlock(s);
509*4882a593Smuzhiyun 		break;
510*4882a593Smuzhiyun 	default:
511*4882a593Smuzhiyun 		return -EOPNOTSUPP;
512*4882a593Smuzhiyun 	}
513*4882a593Smuzhiyun 	return 0;
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun 
picdev_master_write(struct kvm_vcpu * vcpu,struct kvm_io_device * dev,gpa_t addr,int len,const void * val)516*4882a593Smuzhiyun static int picdev_master_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
517*4882a593Smuzhiyun 			       gpa_t addr, int len, const void *val)
518*4882a593Smuzhiyun {
519*4882a593Smuzhiyun 	return picdev_write(container_of(dev, struct kvm_pic, dev_master),
520*4882a593Smuzhiyun 			    addr, len, val);
521*4882a593Smuzhiyun }
522*4882a593Smuzhiyun 
picdev_master_read(struct kvm_vcpu * vcpu,struct kvm_io_device * dev,gpa_t addr,int len,void * val)523*4882a593Smuzhiyun static int picdev_master_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
524*4882a593Smuzhiyun 			      gpa_t addr, int len, void *val)
525*4882a593Smuzhiyun {
526*4882a593Smuzhiyun 	return picdev_read(container_of(dev, struct kvm_pic, dev_master),
527*4882a593Smuzhiyun 			    addr, len, val);
528*4882a593Smuzhiyun }
529*4882a593Smuzhiyun 
picdev_slave_write(struct kvm_vcpu * vcpu,struct kvm_io_device * dev,gpa_t addr,int len,const void * val)530*4882a593Smuzhiyun static int picdev_slave_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
531*4882a593Smuzhiyun 			      gpa_t addr, int len, const void *val)
532*4882a593Smuzhiyun {
533*4882a593Smuzhiyun 	return picdev_write(container_of(dev, struct kvm_pic, dev_slave),
534*4882a593Smuzhiyun 			    addr, len, val);
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun 
picdev_slave_read(struct kvm_vcpu * vcpu,struct kvm_io_device * dev,gpa_t addr,int len,void * val)537*4882a593Smuzhiyun static int picdev_slave_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
538*4882a593Smuzhiyun 			     gpa_t addr, int len, void *val)
539*4882a593Smuzhiyun {
540*4882a593Smuzhiyun 	return picdev_read(container_of(dev, struct kvm_pic, dev_slave),
541*4882a593Smuzhiyun 			    addr, len, val);
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun 
picdev_eclr_write(struct kvm_vcpu * vcpu,struct kvm_io_device * dev,gpa_t addr,int len,const void * val)544*4882a593Smuzhiyun static int picdev_eclr_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
545*4882a593Smuzhiyun 			     gpa_t addr, int len, const void *val)
546*4882a593Smuzhiyun {
547*4882a593Smuzhiyun 	return picdev_write(container_of(dev, struct kvm_pic, dev_eclr),
548*4882a593Smuzhiyun 			    addr, len, val);
549*4882a593Smuzhiyun }
550*4882a593Smuzhiyun 
picdev_eclr_read(struct kvm_vcpu * vcpu,struct kvm_io_device * dev,gpa_t addr,int len,void * val)551*4882a593Smuzhiyun static int picdev_eclr_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
552*4882a593Smuzhiyun 			    gpa_t addr, int len, void *val)
553*4882a593Smuzhiyun {
554*4882a593Smuzhiyun 	return picdev_read(container_of(dev, struct kvm_pic, dev_eclr),
555*4882a593Smuzhiyun 			    addr, len, val);
556*4882a593Smuzhiyun }
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun /*
559*4882a593Smuzhiyun  * callback when PIC0 irq status changed
560*4882a593Smuzhiyun  */
pic_irq_request(struct kvm * kvm,int level)561*4882a593Smuzhiyun static void pic_irq_request(struct kvm *kvm, int level)
562*4882a593Smuzhiyun {
563*4882a593Smuzhiyun 	struct kvm_pic *s = kvm->arch.vpic;
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 	if (!s->output)
566*4882a593Smuzhiyun 		s->wakeup_needed = true;
567*4882a593Smuzhiyun 	s->output = level;
568*4882a593Smuzhiyun }
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun static const struct kvm_io_device_ops picdev_master_ops = {
571*4882a593Smuzhiyun 	.read     = picdev_master_read,
572*4882a593Smuzhiyun 	.write    = picdev_master_write,
573*4882a593Smuzhiyun };
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun static const struct kvm_io_device_ops picdev_slave_ops = {
576*4882a593Smuzhiyun 	.read     = picdev_slave_read,
577*4882a593Smuzhiyun 	.write    = picdev_slave_write,
578*4882a593Smuzhiyun };
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun static const struct kvm_io_device_ops picdev_eclr_ops = {
581*4882a593Smuzhiyun 	.read     = picdev_eclr_read,
582*4882a593Smuzhiyun 	.write    = picdev_eclr_write,
583*4882a593Smuzhiyun };
584*4882a593Smuzhiyun 
kvm_pic_init(struct kvm * kvm)585*4882a593Smuzhiyun int kvm_pic_init(struct kvm *kvm)
586*4882a593Smuzhiyun {
587*4882a593Smuzhiyun 	struct kvm_pic *s;
588*4882a593Smuzhiyun 	int ret;
589*4882a593Smuzhiyun 
590*4882a593Smuzhiyun 	s = kzalloc(sizeof(struct kvm_pic), GFP_KERNEL_ACCOUNT);
591*4882a593Smuzhiyun 	if (!s)
592*4882a593Smuzhiyun 		return -ENOMEM;
593*4882a593Smuzhiyun 	spin_lock_init(&s->lock);
594*4882a593Smuzhiyun 	s->kvm = kvm;
595*4882a593Smuzhiyun 	s->pics[0].elcr_mask = 0xf8;
596*4882a593Smuzhiyun 	s->pics[1].elcr_mask = 0xde;
597*4882a593Smuzhiyun 	s->pics[0].pics_state = s;
598*4882a593Smuzhiyun 	s->pics[1].pics_state = s;
599*4882a593Smuzhiyun 
600*4882a593Smuzhiyun 	/*
601*4882a593Smuzhiyun 	 * Initialize PIO device
602*4882a593Smuzhiyun 	 */
603*4882a593Smuzhiyun 	kvm_iodevice_init(&s->dev_master, &picdev_master_ops);
604*4882a593Smuzhiyun 	kvm_iodevice_init(&s->dev_slave, &picdev_slave_ops);
605*4882a593Smuzhiyun 	kvm_iodevice_init(&s->dev_eclr, &picdev_eclr_ops);
606*4882a593Smuzhiyun 	mutex_lock(&kvm->slots_lock);
607*4882a593Smuzhiyun 	ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, 0x20, 2,
608*4882a593Smuzhiyun 				      &s->dev_master);
609*4882a593Smuzhiyun 	if (ret < 0)
610*4882a593Smuzhiyun 		goto fail_unlock;
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun 	ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, 0xa0, 2, &s->dev_slave);
613*4882a593Smuzhiyun 	if (ret < 0)
614*4882a593Smuzhiyun 		goto fail_unreg_2;
615*4882a593Smuzhiyun 
616*4882a593Smuzhiyun 	ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, 0x4d0, 2, &s->dev_eclr);
617*4882a593Smuzhiyun 	if (ret < 0)
618*4882a593Smuzhiyun 		goto fail_unreg_1;
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun 	mutex_unlock(&kvm->slots_lock);
621*4882a593Smuzhiyun 
622*4882a593Smuzhiyun 	kvm->arch.vpic = s;
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun 	return 0;
625*4882a593Smuzhiyun 
626*4882a593Smuzhiyun fail_unreg_1:
627*4882a593Smuzhiyun 	kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &s->dev_slave);
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun fail_unreg_2:
630*4882a593Smuzhiyun 	kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &s->dev_master);
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun fail_unlock:
633*4882a593Smuzhiyun 	mutex_unlock(&kvm->slots_lock);
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun 	kfree(s);
636*4882a593Smuzhiyun 
637*4882a593Smuzhiyun 	return ret;
638*4882a593Smuzhiyun }
639*4882a593Smuzhiyun 
kvm_pic_destroy(struct kvm * kvm)640*4882a593Smuzhiyun void kvm_pic_destroy(struct kvm *kvm)
641*4882a593Smuzhiyun {
642*4882a593Smuzhiyun 	struct kvm_pic *vpic = kvm->arch.vpic;
643*4882a593Smuzhiyun 
644*4882a593Smuzhiyun 	if (!vpic)
645*4882a593Smuzhiyun 		return;
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun 	mutex_lock(&kvm->slots_lock);
648*4882a593Smuzhiyun 	kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_master);
649*4882a593Smuzhiyun 	kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_slave);
650*4882a593Smuzhiyun 	kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_eclr);
651*4882a593Smuzhiyun 	mutex_unlock(&kvm->slots_lock);
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun 	kvm->arch.vpic = NULL;
654*4882a593Smuzhiyun 	kfree(vpic);
655*4882a593Smuzhiyun }
656