Lines Matching full:vpes
173 struct its_vpe **vpes; member
1731 * (a) Either we have a GICv4.1, and all vPEs have to be mapped at all times
1735 * and we're better off mapping all VPEs always
1737 * If neither (a) nor (b) is true, then we map vPEs on demand.
1758 * If the VM wasn't mapped yet, iterate over the vpes and get in its_map_vm()
1767 struct its_vpe *vpe = vm->vpes[i]; in its_map_vm()
1795 its_send_vmapp(its, vm->vpes[i], false); in its_unmap_vm()
1836 /* Ensure all the VPEs are mapped on this ITS */ in its_vlpi_map()
3738 vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL; in its_vpe_db_proxy_unmap_locked()
3745 * effect... Let's just hope VPEs don't migrate too often. in its_vpe_db_proxy_unmap_locked()
3747 if (vpe_proxy.vpes[vpe_proxy.next_victim]) in its_vpe_db_proxy_unmap_locked()
3779 if (vpe_proxy.vpes[vpe_proxy.next_victim]) in its_vpe_db_proxy_map_locked()
3780 its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]); in its_vpe_db_proxy_map_locked()
3783 vpe_proxy.vpes[vpe_proxy.next_victim] = vpe; in its_vpe_db_proxy_map_locked()
4547 vm->vpes[i]->vpe_db_lpi = base + i; in its_vpe_irq_domain_alloc()
4548 err = its_vpe_init(vm->vpes[i]); in its_vpe_irq_domain_alloc()
4552 vm->vpes[i]->vpe_db_lpi); in its_vpe_irq_domain_alloc()
4556 irqchip, vm->vpes[i]); in its_vpe_irq_domain_alloc()
4945 vpe_proxy.vpes = kcalloc(entries, sizeof(*vpe_proxy.vpes), in its_init_vpe_domain()
4947 if (!vpe_proxy.vpes) { in its_init_vpe_domain()
4956 kfree(vpe_proxy.vpes); in its_init_vpe_domain()