/virt/kvm/arm/vgic/ |
D | vgic-mmio.h | 111 unsigned long extract_bytes(u64 data, unsigned int offset, 114 u64 update_64bit_reg(u64 reg, unsigned int offset, unsigned int len, 196 u64 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid); 199 const u64 val); 205 u64 vgic_sanitise_outer_cacheability(u64 reg); 206 u64 vgic_sanitise_inner_cacheability(u64 reg); 207 u64 vgic_sanitise_shareability(u64 reg); 208 u64 vgic_sanitise_field(u64 reg, u64 field_mask, int field_shift, 209 u64 (*sanitise_fn)(u64));
|
D | vgic-mmio-v3.c | 20 unsigned long extract_bytes(u64 data, unsigned int offset, in extract_bytes() 27 u64 update_64bit_reg(u64 reg, unsigned int offset, unsigned int len, in update_64bit_reg() 36 return reg | ((u64)val << lower); in update_64bit_reg() 213 u64 value; in vgic_mmio_read_v3r_typer() 215 value = (u64)(mpidr & GENMASK(23, 0)) << 32; in vgic_mmio_read_v3r_typer() 231 u64 value; in vgic_uaccess_read_v3r_typer() 233 value = (u64)(mpidr & GENMASK(23, 0)) << 32; in vgic_uaccess_read_v3r_typer() 319 u64 vgic_sanitise_shareability(u64 field) in vgic_sanitise_shareability() 330 u64 vgic_sanitise_inner_cacheability(u64 field) in vgic_sanitise_inner_cacheability() 342 u64 vgic_sanitise_outer_cacheability(u64 field) in vgic_sanitise_outer_cacheability() [all …]
|
D | vgic-kvm-device.c | 56 int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write) in kvm_vgic_addr() 61 u64 undef_value = VGIC_ADDR_UNDEF; in kvm_vgic_addr() 132 *addr |= (u64)rdreg->count << KVM_VGIC_V3_RDIST_COUNT_SHIFT; in kvm_vgic_addr() 162 u64 __user *uaddr = (u64 __user *)(long)attr->addr; in vgic_set_common_attr() 163 u64 addr; in vgic_set_common_attr() 225 u64 __user *uaddr = (u64 __user *)(long)attr->addr; in vgic_get_common_attr() 226 u64 addr; in vgic_get_common_attr() 510 u64 *reg, bool is_write) in vgic_v3_attr_regs_access() 555 u64 regid; in vgic_v3_attr_regs_access() 602 u64 reg; in vgic_v3_set_attr() [all …]
|
D | vgic-its.c | 283 u64 propbase = GICR_PROPBASER_ADDRESS(kvm->arch.vgic.propbaser); in update_lpi_config() 413 static u32 max_lpis_propbaser(u64 propbaser) in max_lpis_propbaser() 481 u64 reg = GITS_TYPER_PLPIS; in vgic_mmio_read_its_typer() 701 u64 address; in vgic_msi_to_its() 711 address = (u64)msi->address_hi << 32 | msi->address_lo; in vgic_msi_to_its() 762 db = (u64)msi->address_hi << 32 | msi->address_lo; in vgic_its_inject_cached_translation() 828 static u64 its_cmd_mask_field(u64 *its_cmd, int word, int shift, int size) in its_cmd_mask_field() 848 u64 *its_cmd) in vgic_its_cmd_handle_discard() 876 u64 *its_cmd) in vgic_its_cmd_handle_movi() 911 static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id, in vgic_its_check_id() [all …]
|
D | vgic.h | 222 int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count); 241 u64 id, u64 *val); 242 int vgic_v3_has_cpu_sysregs_attr(struct kvm_vcpu *vcpu, bool is_write, u64 id, 243 u64 *reg); 245 u32 intid, u64 *val);
|
D | vgic-v2.c | 430 u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs; in save_lrs() 431 u64 elrsr; in save_lrs() 436 elrsr |= ((u64)readl_relaxed(base + GICH_ELRSR1)) << 32; in save_lrs() 451 u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs; in vgic_v2_save_state() 466 u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs; in vgic_v2_restore_state()
|
D | vgic-v3.c | 25 static bool lr_signals_eoi_mi(u64 lr_val) in lr_signals_eoi_mi() 43 u64 val = cpuif->vgic_lr[lr]; in vgic_v3_fold_lr_state() 121 u64 val = irq->intid; in vgic_v3_populate_lr() 139 val |= ((u64)irq->hwintid) << ICH_LR_PHYS_ID_SHIFT; in vgic_v3_populate_lr() 195 val |= (u64)irq->priority << ICH_LR_PRIORITY_SHIFT; in vgic_v3_populate_lr()
|
D | vgic-mmio-v2.c | 156 u64 val = 0; in vgic_mmio_read_target() 161 val |= (u64)irq->targets << (i * 8); in vgic_mmio_read_target() 202 u64 val = 0; in vgic_mmio_read_sgipend() 207 val |= (u64)irq->source << (i * 8); in vgic_mmio_read_sgipend()
|
D | vgic-mmio.c | 511 u64 val = 0; in vgic_mmio_read_priority() 516 val |= (u64)irq->priority << (i * 8); in vgic_mmio_read_priority() 603 u64 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid) in vgic_read_irq_line_level_info() 606 u64 val = 0; in vgic_read_irq_line_level_info() 626 const u64 val) in vgic_write_irq_line_level_info() 768 case sizeof(u64): in check_region()
|
/virt/kvm/arm/ |
D | pmu.c | 17 static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx); 26 static bool kvm_pmu_idx_is_64bit(struct kvm_vcpu *vcpu, u64 select_idx) in kvm_pmu_idx_is_64bit() 58 static bool kvm_pmu_idx_is_high_counter(u64 select_idx) in kvm_pmu_idx_is_high_counter() 84 static bool kvm_pmu_idx_has_chain_evtype(struct kvm_vcpu *vcpu, u64 select_idx) in kvm_pmu_idx_has_chain_evtype() 86 u64 eventsel, reg; in kvm_pmu_idx_has_chain_evtype() 104 static u64 kvm_pmu_get_pair_counter_value(struct kvm_vcpu *vcpu, in kvm_pmu_get_pair_counter_value() 107 u64 counter, counter_high, reg, enabled, running; in kvm_pmu_get_pair_counter_value() 139 u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx) in kvm_pmu_get_counter_value() 141 u64 counter; in kvm_pmu_get_counter_value() 162 void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val) in kvm_pmu_set_counter_value() [all …]
|
D | arch_timer.c | 49 u64 val); 50 static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu, 54 u64 kvm_phys_timer_read(void) in kvm_phys_timer_read() 80 static void soft_timer_start(struct hrtimer *hrt, u64 ns) in soft_timer_start() 123 static u64 kvm_timer_compute_delta(struct arch_timer_context *timer_ctx) in kvm_timer_compute_delta() 125 u64 cval, now; in kvm_timer_compute_delta() 131 u64 ns; in kvm_timer_compute_delta() 155 static u64 kvm_timer_earliest_exp(struct kvm_vcpu *vcpu) in kvm_timer_earliest_exp() 157 u64 min_delta = ULLONG_MAX; in kvm_timer_earliest_exp() 179 u64 ns; in kvm_bg_timer_expire() [all …]
|
D | psci.c | 456 int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) in kvm_arm_copy_fw_reg_indices() 480 static int get_kernel_wa_level(u64 regid) in get_kernel_wa_level() 525 u64 val; in kvm_arm_get_fw_reg() 555 u64 val; in kvm_arm_set_fw_reg()
|
D | mmio.c | 21 u64 dword; in kvm_mmio_write_buf() 52 u64 dword; in kvm_mmio_read_buf()
|
D | mmu.c | 335 static void __unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size, in __unmap_stage2_range() 366 static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) in unmap_stage2_range() 541 phys_addr_t start, u64 size) in __unmap_hyp_range() 559 static void unmap_hyp_range(pgd_t *pgdp, phys_addr_t start, u64 size) in unmap_hyp_range() 564 static void unmap_hyp_idmap_range(pgd_t *pgdp, phys_addr_t start, u64 size) in unmap_hyp_idmap_range() 2030 gpa_t gpa, u64 size, in handle_hva_to_gpa() 2052 ret |= handler(kvm, gpa, (u64)(hva_end - hva_start), data); in handle_hva_to_gpa() 2058 static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) in kvm_unmap_hva_handler() 2078 static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) in kvm_set_spte_handler() 2117 static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) in kvm_age_hva_handler() [all …]
|
D | arm.c | 513 u64 current_vmid_gen = atomic64_read(&kvm_vmid_gen); in need_new_vmid_gen()
|
/virt/kvm/arm/hyp/ |
D | vgic-v3-sr.c | 19 static u64 __hyp_text __gic_v3_get_lr(unsigned int lr) in __gic_v3_get_lr() 59 static void __hyp_text __gic_v3_set_lr(u64 val, int lr) in __gic_v3_set_lr() 200 u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs; in __vgic_v3_save_state() 236 u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs; in __vgic_v3_restore_state() 312 u64 val; in __vgic_v3_deactivate_traps() 339 u64 val; in __vgic_v3_save_aprs() 376 u64 val; in __vgic_v3_restore_aprs() 419 u64 __hyp_text __vgic_v3_get_ich_vtr_el2(void) in __vgic_v3_get_ich_vtr_el2() 424 u64 __hyp_text __vgic_v3_read_vmcr(void) in __vgic_v3_read_vmcr() 454 u64 *lr_val) in __vgic_v3_highest_priority_lr() [all …]
|
D | timer-sr.c | 15 u64 cntvoff = (u64)cntvoff_high << 32 | cntvoff_low; in __kvm_timer_set_cntvoff() 25 u64 val; in __timer_disable_traps() 39 u64 val; in __timer_enable_traps()
|
/virt/kvm/ |
D | eventfd.c | 117 u64 cnt; in irqfd_shutdown() 659 u64 addr; 662 u64 datamatch; 685 u64 _val; in ioeventfd_in_range() 718 _val = *(u64 *)val; in ioeventfd_in_range()
|
D | kvm_main.c | 979 u64 gen = old_memslots->generation; in install_new_memslots() 1886 struct gfn_to_pfn_cache *cache, u64 gen) in kvm_cache_gfn_to_pfn() 1905 u64 gen = slots->generation; in __kvm_map_gfn() 2531 u64 block_ns; in kvm_vcpu_block() 4165 int (*get)(void *, u64 *), int (*set)(void *, u64), in kvm_debugfs_open() argument 4200 static int vm_stat_get_per_vm(void *data, u64 *val) in vm_stat_get_per_vm() 4209 static int vm_stat_clear_per_vm(void *data, u64 val) in vm_stat_clear_per_vm() 4237 static int vcpu_stat_get_per_vm(void *data, u64 *val) in vcpu_stat_get_per_vm() 4246 *val += *(u64 *)((void *)vcpu + stat_data->offset); in vcpu_stat_get_per_vm() 4251 static int vcpu_stat_clear_per_vm(void *data, u64 val) in vcpu_stat_clear_per_vm() [all …]
|
D | vfio.c | 184 static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg) in kvm_vfio_set_group()
|