Lines Matching full:vpe
65 * with the actual vPE affinity, and not the braindead concept of
75 * The choice made here is that each vcpu (VPE in old northern GICv4
78 * interrupt becomes a handle for the VPE, and that the hypervisor
82 * contain an irq domain where each interrupt maps to a VPE. In
88 * - irq_set_affinity is used to move a VPE from one redistributor to
92 * creating a new sub-API, namely scheduling/descheduling a VPE
104 vm->fwnode = irq_domain_alloc_named_id_fwnode("GICv4-vpe", in its_alloc_vcpu_irqs()
147 static int its_send_vpe_cmd(struct its_vpe *vpe, struct its_cmd_info *info) in its_send_vpe_cmd() argument
149 return irq_set_vcpu_affinity(vpe->irq, info); in its_send_vpe_cmd()
152 int its_schedule_vpe(struct its_vpe *vpe, bool on) in its_schedule_vpe() argument
160 return its_send_vpe_cmd(vpe, &info); in its_schedule_vpe()
163 int its_invall_vpe(struct its_vpe *vpe) in its_invall_vpe() argument
169 return its_send_vpe_cmd(vpe, &info); in its_invall_vpe()
234 pr_err("ITS: No GICv4 VPE domain allocated\n"); in its_init_v4()