Lines Matching full:vpe
217 static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe) in valid_vpe() argument
219 if (valid_col(its->collections + vpe->col_idx)) in valid_vpe()
220 return vpe; in valid_vpe()
278 struct its_vpe *vpe; member
282 struct its_vpe *vpe; member
288 struct its_vpe *vpe; member
296 struct its_vpe *vpe; member
303 struct its_vpe *vpe; member
593 its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id); in its_build_vinvall_cmd()
597 return valid_vpe(its, desc->its_vinvall_cmd.vpe); in its_build_vinvall_cmd()
607 vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page)); in its_build_vmapp_cmd()
611 its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id); in its_build_vmapp_cmd()
619 return valid_vpe(its, desc->its_vmapp_cmd.vpe); in its_build_vmapp_cmd()
629 db = desc->its_vmapti_cmd.vpe->vpe_db_lpi; in its_build_vmapti_cmd()
635 its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id); in its_build_vmapti_cmd()
642 return valid_vpe(its, desc->its_vmapti_cmd.vpe); in its_build_vmapti_cmd()
652 db = desc->its_vmovi_cmd.vpe->vpe_db_lpi; in its_build_vmovi_cmd()
658 its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id); in its_build_vmovi_cmd()
665 return valid_vpe(its, desc->its_vmovi_cmd.vpe); in its_build_vmovi_cmd()
678 its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id); in its_build_vmovp_cmd()
683 return valid_vpe(its, desc->its_vmovp_cmd.vpe); in its_build_vmovp_cmd()
966 desc.its_vmapti_cmd.vpe = map->vpe; in its_send_vmapti()
980 desc.its_vmovi_cmd.vpe = map->vpe; in its_send_vmovi()
989 struct its_vpe *vpe, bool valid) in its_send_vmapp() argument
993 desc.its_vmapp_cmd.vpe = vpe; in its_send_vmapp()
995 desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx]; in its_send_vmapp()
1000 static void its_send_vmovp(struct its_vpe *vpe) in its_send_vmovp() argument
1005 int col_id = vpe->col_idx; in its_send_vmovp()
1007 desc.its_vmovp_cmd.vpe = vpe; in its_send_vmovp()
1027 desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm); in its_send_vmovp()
1034 if (!vpe->its_vm->vlpi_count[its->list_nr]) in its_send_vmovp()
1044 static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe) in its_send_vinvall() argument
1048 desc.its_vinvall_cmd.vpe = vpe; in its_send_vinvall()
1125 * to the /same/ vPE, using this opportunity to adjust the in its_vlpi_set_doorbell()
1246 struct its_vpe *vpe = vm->vpes[i]; in its_map_vm() local
1247 struct irq_data *d = irq_get_irq_data(vpe->irq); in its_map_vm()
1249 /* Map the VPE to the first possible CPU */ in its_map_vm()
1250 vpe->col_idx = cpumask_first(cpu_online_mask); in its_map_vm()
1251 its_send_vmapp(its, vpe, true); in its_map_vm()
1252 its_send_vinvall(its, vpe); in its_map_vm()
1253 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); in its_map_vm()
2088 * sheduled as a vPE, especially for the first CPU, and the in its_cpu_init_lpis()
2254 * that have interrupts targeted at this VPE, but the in its_alloc_vpe_table()
2397 WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n", in its_msi_prepare()
2589 static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe) in its_vpe_db_proxy_unmap_locked() argument
2592 if (vpe->vpe_proxy_event == -1) in its_vpe_db_proxy_unmap_locked()
2595 its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event); in its_vpe_db_proxy_unmap_locked()
2596 vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL; in its_vpe_db_proxy_unmap_locked()
2606 vpe_proxy.next_victim = vpe->vpe_proxy_event; in its_vpe_db_proxy_unmap_locked()
2608 vpe->vpe_proxy_event = -1; in its_vpe_db_proxy_unmap_locked()
2611 static void its_vpe_db_proxy_unmap(struct its_vpe *vpe) in its_vpe_db_proxy_unmap() argument
2617 its_vpe_db_proxy_unmap_locked(vpe); in its_vpe_db_proxy_unmap()
2622 static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe) in its_vpe_db_proxy_map_locked() argument
2625 if (vpe->vpe_proxy_event != -1) in its_vpe_db_proxy_map_locked()
2628 /* This slot was already allocated. Kick the other VPE out. */ in its_vpe_db_proxy_map_locked()
2632 /* Map the new VPE instead */ in its_vpe_db_proxy_map_locked()
2633 vpe_proxy.vpes[vpe_proxy.next_victim] = vpe; in its_vpe_db_proxy_map_locked()
2634 vpe->vpe_proxy_event = vpe_proxy.next_victim; in its_vpe_db_proxy_map_locked()
2637 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx; in its_vpe_db_proxy_map_locked()
2638 its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event); in its_vpe_db_proxy_map_locked()
2641 static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to) in its_vpe_db_proxy_move() argument
2650 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR); in its_vpe_db_proxy_move()
2659 its_vpe_db_proxy_map_locked(vpe); in its_vpe_db_proxy_move()
2662 its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event); in its_vpe_db_proxy_move()
2663 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to; in its_vpe_db_proxy_move()
2672 struct its_vpe *vpe = irq_data_get_irq_chip_data(d); in its_vpe_set_affinity() local
2681 if (vpe->col_idx != cpu) { in its_vpe_set_affinity()
2682 int from = vpe->col_idx; in its_vpe_set_affinity()
2684 vpe->col_idx = cpu; in its_vpe_set_affinity()
2685 its_send_vmovp(vpe); in its_vpe_set_affinity()
2686 its_vpe_db_proxy_move(vpe, from, cpu); in its_vpe_set_affinity()
2694 static void its_vpe_schedule(struct its_vpe *vpe) in its_vpe_schedule() argument
2699 /* Schedule the VPE */ in its_vpe_schedule()
2700 val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) & in its_vpe_schedule()
2707 val = virt_to_phys(page_address(vpe->vpt_page)) & in its_vpe_schedule()
2714 * easily. So in the end, vpe->pending_last is only an in its_vpe_schedule()
2721 val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0; in its_vpe_schedule()
2726 static void its_vpe_deschedule(struct its_vpe *vpe) in its_vpe_deschedule() argument
2735 vpe->idai = false; in its_vpe_deschedule()
2736 vpe->pending_last = true; in its_vpe_deschedule()
2738 vpe->idai = !!(val & GICR_VPENDBASER_IDAI); in its_vpe_deschedule()
2739 vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast); in its_vpe_deschedule()
2743 static void its_vpe_invall(struct its_vpe *vpe) in its_vpe_invall() argument
2751 if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr]) in its_vpe_invall()
2758 its_send_vinvall(its, vpe); in its_vpe_invall()
2765 struct its_vpe *vpe = irq_data_get_irq_chip_data(d); in its_vpe_set_vcpu_affinity() local
2770 its_vpe_schedule(vpe); in its_vpe_set_vcpu_affinity()
2774 its_vpe_deschedule(vpe); in its_vpe_set_vcpu_affinity()
2778 its_vpe_invall(vpe); in its_vpe_set_vcpu_affinity()
2786 static void its_vpe_send_cmd(struct its_vpe *vpe, in its_vpe_send_cmd() argument
2793 its_vpe_db_proxy_map_locked(vpe); in its_vpe_send_cmd()
2794 cmd(vpe_proxy.dev, vpe->vpe_proxy_event); in its_vpe_send_cmd()
2801 struct its_vpe *vpe = irq_data_get_irq_chip_data(d); in its_vpe_send_inv() local
2806 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; in its_vpe_send_inv()
2807 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_INVLPIR); in its_vpe_send_inv()
2811 its_vpe_send_cmd(vpe, its_send_inv); in its_vpe_send_inv()
2838 struct its_vpe *vpe = irq_data_get_irq_chip_data(d); in its_vpe_set_irqchip_state() local
2846 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; in its_vpe_set_irqchip_state()
2848 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR); in its_vpe_set_irqchip_state()
2850 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR); in its_vpe_set_irqchip_state()
2856 its_vpe_send_cmd(vpe, its_send_int); in its_vpe_set_irqchip_state()
2858 its_vpe_send_cmd(vpe, its_send_clear); in its_vpe_set_irqchip_state()
2870 .name = "GICv4-vpe",
2890 static int its_vpe_init(struct its_vpe *vpe) in its_vpe_init() argument
2913 vpe->vpe_id = vpe_id; in its_vpe_init()
2914 vpe->vpt_page = vpt_page; in its_vpe_init()
2915 vpe->vpe_proxy_event = -1; in its_vpe_init()
2920 static void its_vpe_teardown(struct its_vpe *vpe) in its_vpe_teardown() argument
2922 its_vpe_db_proxy_unmap(vpe); in its_vpe_teardown()
2923 its_vpe_id_free(vpe->vpe_id); in its_vpe_teardown()
2924 its_free_pending_table(vpe->vpt_page); in its_vpe_teardown()
2939 struct its_vpe *vpe = irq_data_get_irq_chip_data(data); in its_vpe_irq_domain_free() local
2941 BUG_ON(vm != vpe->its_vm); in its_vpe_irq_domain_free()
2944 its_vpe_teardown(vpe); in its_vpe_irq_domain_free()
3012 struct its_vpe *vpe = irq_data_get_irq_chip_data(d); in its_vpe_irq_domain_activate() local
3019 /* Map the VPE to the first possible CPU */ in its_vpe_irq_domain_activate()
3020 vpe->col_idx = cpumask_first(cpu_online_mask); in its_vpe_irq_domain_activate()
3026 its_send_vmapp(its, vpe, true); in its_vpe_irq_domain_activate()
3027 its_send_vinvall(its, vpe); in its_vpe_irq_domain_activate()
3030 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); in its_vpe_irq_domain_activate()
3038 struct its_vpe *vpe = irq_data_get_irq_chip_data(d); in its_vpe_irq_domain_deactivate() local
3042 * If we use the list map, we unmap the VPE once no VLPIs are in its_vpe_irq_domain_deactivate()
3052 its_send_vmapp(its, vpe, false); in its_vpe_irq_domain_deactivate()
3373 pr_info("ITS: Using DirectLPI for VPE invalidation\n"); in its_init_vpe_domain()