Lines Matching full:irq
9 #include <linux/irq.h>
53 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in vgic_mmio_read_group() local
55 if (irq->group) in vgic_mmio_read_group()
58 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_read_group()
64 static void vgic_update_vsgi(struct vgic_irq *irq) in vgic_update_vsgi() argument
66 WARN_ON(its_prop_update_vsgi(irq->host_irq, irq->priority, irq->group)); in vgic_update_vsgi()
77 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in vgic_mmio_write_group() local
79 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_mmio_write_group()
80 irq->group = !!(val & BIT(i)); in vgic_mmio_write_group()
81 if (irq->hw && vgic_irq_is_sgi(irq->intid)) { in vgic_mmio_write_group()
82 vgic_update_vsgi(irq); in vgic_mmio_write_group()
83 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_mmio_write_group()
85 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); in vgic_mmio_write_group()
88 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_write_group()
105 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in vgic_mmio_read_enable() local
107 if (irq->enabled) in vgic_mmio_read_enable()
110 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_read_enable()
125 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in vgic_mmio_write_senable() local
127 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_mmio_write_senable()
128 if (irq->hw && vgic_irq_is_sgi(irq->intid)) { in vgic_mmio_write_senable()
129 if (!irq->enabled) { in vgic_mmio_write_senable()
132 irq->enabled = true; in vgic_mmio_write_senable()
133 data = &irq_to_desc(irq->host_irq)->irq_data; in vgic_mmio_write_senable()
135 enable_irq(irq->host_irq); in vgic_mmio_write_senable()
138 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_mmio_write_senable()
139 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_write_senable()
142 } else if (vgic_irq_is_mapped_level(irq)) { in vgic_mmio_write_senable()
143 bool was_high = irq->line_level; in vgic_mmio_write_senable()
150 irq->line_level = vgic_get_phys_line_level(irq); in vgic_mmio_write_senable()
155 if (!irq->active && was_high && !irq->line_level) in vgic_mmio_write_senable()
156 vgic_irq_set_phys_active(irq, false); in vgic_mmio_write_senable()
158 irq->enabled = true; in vgic_mmio_write_senable()
159 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); in vgic_mmio_write_senable()
161 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_write_senable()
174 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in vgic_mmio_write_cenable() local
176 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_mmio_write_cenable()
177 if (irq->hw && vgic_irq_is_sgi(irq->intid) && irq->enabled) in vgic_mmio_write_cenable()
178 disable_irq_nosync(irq->host_irq); in vgic_mmio_write_cenable()
180 irq->enabled = false; in vgic_mmio_write_cenable()
182 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_mmio_write_cenable()
183 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_write_cenable()
196 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in vgic_uaccess_write_senable() local
198 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_uaccess_write_senable()
199 irq->enabled = true; in vgic_uaccess_write_senable()
200 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); in vgic_uaccess_write_senable()
202 vgic_put_irq(vcpu->kvm, irq); in vgic_uaccess_write_senable()
217 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in vgic_uaccess_write_cenable() local
219 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_uaccess_write_cenable()
220 irq->enabled = false; in vgic_uaccess_write_cenable()
221 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_uaccess_write_cenable()
223 vgic_put_irq(vcpu->kvm, irq); in vgic_uaccess_write_cenable()
239 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in __read_pending() local
243 raw_spin_lock_irqsave(&irq->irq_lock, flags); in __read_pending()
244 if (irq->hw && vgic_irq_is_sgi(irq->intid)) { in __read_pending()
248 err = irq_get_irqchip_state(irq->host_irq, in __read_pending()
251 WARN_RATELIMIT(err, "IRQ %d", irq->host_irq); in __read_pending()
252 } else if (!is_user && vgic_irq_is_mapped_level(irq)) { in __read_pending()
253 val = vgic_get_phys_line_level(irq); in __read_pending()
255 val = irq_is_pending(irq); in __read_pending()
259 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in __read_pending()
261 vgic_put_irq(vcpu->kvm, irq); in __read_pending()
279 static bool is_vgic_v2_sgi(struct kvm_vcpu *vcpu, struct vgic_irq *irq) in is_vgic_v2_sgi() argument
281 return (vgic_irq_is_sgi(irq->intid) && in is_vgic_v2_sgi()
294 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in vgic_mmio_write_spending() local
297 if (is_vgic_v2_sgi(vcpu, irq)) { in vgic_mmio_write_spending()
298 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_write_spending()
302 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_mmio_write_spending()
304 if (irq->hw && vgic_irq_is_sgi(irq->intid)) { in vgic_mmio_write_spending()
307 err = irq_set_irqchip_state(irq->host_irq, in vgic_mmio_write_spending()
310 WARN_RATELIMIT(err, "IRQ %d", irq->host_irq); in vgic_mmio_write_spending()
312 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_mmio_write_spending()
313 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_write_spending()
318 irq->pending_latch = true; in vgic_mmio_write_spending()
319 if (irq->hw) in vgic_mmio_write_spending()
320 vgic_irq_set_phys_active(irq, true); in vgic_mmio_write_spending()
322 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); in vgic_mmio_write_spending()
323 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_write_spending()
336 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in vgic_uaccess_write_spending() local
338 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_uaccess_write_spending()
339 irq->pending_latch = true; in vgic_uaccess_write_spending()
346 if (is_vgic_v2_sgi(vcpu, irq)) in vgic_uaccess_write_spending()
347 irq->source |= BIT(vcpu->vcpu_id); in vgic_uaccess_write_spending()
349 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); in vgic_uaccess_write_spending()
351 vgic_put_irq(vcpu->kvm, irq); in vgic_uaccess_write_spending()
357 /* Must be called with irq->irq_lock held */
358 static void vgic_hw_irq_cpending(struct kvm_vcpu *vcpu, struct vgic_irq *irq) in vgic_hw_irq_cpending() argument
360 irq->pending_latch = false; in vgic_hw_irq_cpending()
373 vgic_irq_set_phys_pending(irq, false); in vgic_hw_irq_cpending()
374 if (!irq->active) in vgic_hw_irq_cpending()
375 vgic_irq_set_phys_active(irq, false); in vgic_hw_irq_cpending()
387 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in vgic_mmio_write_cpending() local
390 if (is_vgic_v2_sgi(vcpu, irq)) { in vgic_mmio_write_cpending()
391 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_write_cpending()
395 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_mmio_write_cpending()
397 if (irq->hw && vgic_irq_is_sgi(irq->intid)) { in vgic_mmio_write_cpending()
400 err = irq_set_irqchip_state(irq->host_irq, in vgic_mmio_write_cpending()
403 WARN_RATELIMIT(err, "IRQ %d", irq->host_irq); in vgic_mmio_write_cpending()
405 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_mmio_write_cpending()
406 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_write_cpending()
411 if (irq->hw) in vgic_mmio_write_cpending()
412 vgic_hw_irq_cpending(vcpu, irq); in vgic_mmio_write_cpending()
414 irq->pending_latch = false; in vgic_mmio_write_cpending()
416 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_mmio_write_cpending()
417 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_write_cpending()
430 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in vgic_uaccess_write_cpending() local
432 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_uaccess_write_cpending()
438 if (is_vgic_v2_sgi(vcpu, irq)) in vgic_uaccess_write_cpending()
439 irq->source = 0; in vgic_uaccess_write_cpending()
441 irq->pending_latch = false; in vgic_uaccess_write_cpending()
443 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_uaccess_write_cpending()
445 vgic_put_irq(vcpu->kvm, irq); in vgic_uaccess_write_cpending()
452 * If we are fiddling with an IRQ's active state, we have to make sure the IRQ
459 * the IRQ locks and we don't want to be chasing moving targets.
490 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in __vgic_mmio_read_active() local
496 if (irq->active) in __vgic_mmio_read_active()
499 vgic_put_irq(vcpu->kvm, irq); in __vgic_mmio_read_active()
528 /* Must be called with irq->irq_lock held */
529 static void vgic_hw_irq_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, in vgic_hw_irq_change_active() argument
535 irq->active = active; in vgic_hw_irq_change_active()
536 vgic_irq_set_phys_active(irq, active); in vgic_hw_irq_change_active()
539 static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, in vgic_mmio_change_active() argument
545 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_mmio_change_active()
547 if (irq->hw && !vgic_irq_is_sgi(irq->intid)) { in vgic_mmio_change_active()
548 vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu); in vgic_mmio_change_active()
549 } else if (irq->hw && vgic_irq_is_sgi(irq->intid)) { in vgic_mmio_change_active()
555 irq->active = false; in vgic_mmio_change_active()
560 irq->active = active; in vgic_mmio_change_active()
576 active && vgic_irq_is_sgi(irq->intid)) in vgic_mmio_change_active()
577 irq->active_source = active_source; in vgic_mmio_change_active()
580 if (irq->active) in vgic_mmio_change_active()
581 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); in vgic_mmio_change_active()
583 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_mmio_change_active()
594 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in __vgic_mmio_write_cactive() local
595 vgic_mmio_change_active(vcpu, irq, false); in __vgic_mmio_write_cactive()
596 vgic_put_irq(vcpu->kvm, irq); in __vgic_mmio_write_cactive()
631 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in __vgic_mmio_write_sactive() local
632 vgic_mmio_change_active(vcpu, irq, true); in __vgic_mmio_write_sactive()
633 vgic_put_irq(vcpu->kvm, irq); in __vgic_mmio_write_sactive()
668 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in vgic_mmio_read_priority() local
670 val |= (u64)irq->priority << (i * 8); in vgic_mmio_read_priority()
672 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_read_priority()
694 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in vgic_mmio_write_priority() local
696 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_mmio_write_priority()
698 irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS); in vgic_mmio_write_priority()
699 if (irq->hw && vgic_irq_is_sgi(irq->intid)) in vgic_mmio_write_priority()
700 vgic_update_vsgi(irq); in vgic_mmio_write_priority()
701 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_mmio_write_priority()
703 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_write_priority()
715 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in vgic_mmio_read_config() local
717 if (irq->config == VGIC_CONFIG_EDGE) in vgic_mmio_read_config()
720 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_read_config()
735 struct vgic_irq *irq; in vgic_mmio_write_config() local
746 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in vgic_mmio_write_config()
747 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_mmio_write_config()
750 irq->config = VGIC_CONFIG_EDGE; in vgic_mmio_write_config()
752 irq->config = VGIC_CONFIG_LEVEL; in vgic_mmio_write_config()
754 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_mmio_write_config()
755 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_write_config()
766 struct vgic_irq *irq; in vgic_read_irq_line_level_info() local
771 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in vgic_read_irq_line_level_info()
772 if (irq->config == VGIC_CONFIG_LEVEL && irq->line_level) in vgic_read_irq_line_level_info()
775 vgic_put_irq(vcpu->kvm, irq); in vgic_read_irq_line_level_info()
789 struct vgic_irq *irq; in vgic_write_irq_line_level_info() local
795 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in vgic_write_irq_line_level_info()
798 * Line level is set irrespective of irq type in vgic_write_irq_line_level_info()
800 * restore irq config before line level. in vgic_write_irq_line_level_info()
803 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_write_irq_line_level_info()
804 irq->line_level = new_level; in vgic_write_irq_line_level_info()
806 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); in vgic_write_irq_line_level_info()
808 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_write_irq_line_level_info()
810 vgic_put_irq(vcpu->kvm, irq); in vgic_write_irq_line_level_info()
935 /* Do we access a non-allocated IRQ? */ in check_region()