Lines Matching full:vpes
172 struct its_vpe **vpes; member
1755 * (a) Either we have a GICv4.1, and all vPEs have to be mapped at all times
1759 * and we're better off mapping all VPEs always
1761 * If neither (a) nor (b) is true, then we map vPEs on demand.
1782 * If the VM wasn't mapped yet, iterate over the vpes and get in its_map_vm()
1791 struct its_vpe *vpe = vm->vpes[i]; in its_map_vm()
1819 its_send_vmapp(its, vm->vpes[i], false); in its_unmap_vm()
1860 /* Ensure all the VPEs are mapped on this ITS */ in its_vlpi_map()
3697 vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL; in its_vpe_db_proxy_unmap_locked()
3704 * effect... Let's just hope VPEs don't migrate too often. in its_vpe_db_proxy_unmap_locked()
3706 if (vpe_proxy.vpes[vpe_proxy.next_victim]) in its_vpe_db_proxy_unmap_locked()
3738 if (vpe_proxy.vpes[vpe_proxy.next_victim]) in its_vpe_db_proxy_map_locked()
3739 its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]); in its_vpe_db_proxy_map_locked()
3742 vpe_proxy.vpes[vpe_proxy.next_victim] = vpe; in its_vpe_db_proxy_map_locked()
4494 vm->vpes[i]->vpe_db_lpi = base + i; in its_vpe_irq_domain_alloc()
4495 err = its_vpe_init(vm->vpes[i]); in its_vpe_irq_domain_alloc()
4499 vm->vpes[i]->vpe_db_lpi); in its_vpe_irq_domain_alloc()
4503 irqchip, vm->vpes[i]); in its_vpe_irq_domain_alloc()
4887 vpe_proxy.vpes = kcalloc(entries, sizeof(*vpe_proxy.vpes), in its_init_vpe_domain()
4889 if (!vpe_proxy.vpes) { in its_init_vpe_domain()
4898 kfree(vpe_proxy.vpes); in its_init_vpe_domain()