/virt/kvm/arm/ |
D | mmio.c | 14 void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data) in kvm_mmio_write_buf() argument 26 tmp.byte = data; in kvm_mmio_write_buf() 30 tmp.hword = data; in kvm_mmio_write_buf() 34 tmp.word = data; in kvm_mmio_write_buf() 38 tmp.dword = data; in kvm_mmio_write_buf() 48 unsigned long data = 0; in kvm_mmio_read_buf() local 57 data = *(u8 *)buf; in kvm_mmio_read_buf() 61 data = tmp.hword; in kvm_mmio_read_buf() 65 data = tmp.word; in kvm_mmio_read_buf() 69 data = tmp.dword; in kvm_mmio_read_buf() [all …]
|
D | pmu.c | 442 struct perf_sample_data *data, in kvm_pmu_perf_overflow() argument 550 u64 eventsel, counter, reg, data; in kvm_pmu_create_perf_event() local 561 data = __vcpu_sys_reg(vcpu, reg); in kvm_pmu_create_perf_event() 564 eventsel = data & ARMV8_PMU_EVTYPE_EVENT; in kvm_pmu_create_perf_event() 576 attr.exclude_user = data & ARMV8_PMU_EXCLUDE_EL0 ? 1 : 0; in kvm_pmu_create_perf_event() 577 attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0; in kvm_pmu_create_perf_event() 655 void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, in kvm_pmu_set_counter_event_type() argument 658 u64 reg, event_type = data & ARMV8_PMU_EVTYPE_MASK; in kvm_pmu_set_counter_event_type()
|
D | mmu.c | 2018 void *data), in handle_hva_to_gpa() argument 2019 void *data) in handle_hva_to_gpa() 2039 ret |= handler(kvm, gpa, (u64)(hva_end - hva_start), data); in handle_hva_to_gpa() 2045 static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) in kvm_unmap_hva_handler() argument 2062 static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) in kvm_set_spte_handler() argument 2064 pte_t *pte = (pte_t *)data; in kvm_set_spte_handler() 2101 static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) in kvm_age_hva_handler() argument 2119 static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) in kvm_test_age_hva_handler() argument
|
/virt/kvm/arm/vgic/ |
D | vgic-mmio.c | 679 unsigned long data = kvm_mmio_read_buf(val, len); in vgic_data_mmio_bus_to_host() local 683 return data; in vgic_data_mmio_bus_to_host() 685 return le16_to_cpu(data); in vgic_data_mmio_bus_to_host() 687 return le32_to_cpu(data); in vgic_data_mmio_bus_to_host() 689 return le64_to_cpu(data); in vgic_data_mmio_bus_to_host() 703 unsigned long data) in vgic_data_host_to_mmio_bus() argument 709 data = cpu_to_le16(data); in vgic_data_host_to_mmio_bus() 712 data = cpu_to_le32(data); in vgic_data_host_to_mmio_bus() 715 data = cpu_to_le64(data); in vgic_data_host_to_mmio_bus() 718 kvm_mmio_write_buf(buf, len, data); in vgic_data_host_to_mmio_bus() [all …]
|
D | vgic-irqfd.c | 57 e->msi.data = ue->u.msi.data; in kvm_set_routing_entry() 74 msi->data = e->msi.data; in kvm_populate_msi()
|
D | vgic-v4.c | 240 .data = irq_entry->msi.data, in vgic_get_its() 271 irq_entry->msi.data, &irq); in kvm_vgic_v4_set_forwarding() 324 irq_entry->msi.data, &irq); in kvm_vgic_v4_unset_forwarding()
|
D | vgic.c | 485 struct irq_data *data; in kvm_vgic_map_irq() local 495 data = irq_desc_get_irq_data(desc); in kvm_vgic_map_irq() 496 while (data->parent_data) in kvm_vgic_map_irq() 497 data = data->parent_data; in kvm_vgic_map_irq() 501 irq->hwintid = data->hwirq; in kvm_vgic_map_irq()
|
D | vgic-mmio.h | 109 unsigned long data); 111 unsigned long extract_bytes(u64 data, unsigned int offset,
|
D | vgic-init.c | 459 static irqreturn_t vgic_maintenance_handler(int irq, void *data) in vgic_maintenance_handler() argument
|
D | vgic-mmio-v3.c | 20 unsigned long extract_bytes(u64 data, unsigned int offset, in extract_bytes() argument 23 return (data >> (offset * 8)) & GENMASK_ULL(num * 8 - 1, 0); in extract_bytes()
|
D | vgic-its.c | 749 irq = vgic_its_check_cache(kvm, db, msi->devid, msi->data); in vgic_its_inject_cached_translation() 780 ret = vgic_its_trigger_msi(kvm, its, msi->devid, msi->data); in vgic_its_inject_msi()
|
/virt/kvm/ |
D | kvm_main.c | 1953 void *data, int offset, int len) in __kvm_read_guest_page() argument 1961 r = __copy_from_user(data, (void __user *)addr + offset, len); in __kvm_read_guest_page() 1967 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, in kvm_read_guest_page() argument 1972 return __kvm_read_guest_page(slot, gfn, data, offset, len); in kvm_read_guest_page() 1976 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, in kvm_vcpu_read_guest_page() argument 1981 return __kvm_read_guest_page(slot, gfn, data, offset, len); in kvm_vcpu_read_guest_page() 1985 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) in kvm_read_guest() argument 1993 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg); in kvm_read_guest() 1998 data += seg; in kvm_read_guest() 2005 int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len) in kvm_vcpu_read_guest() argument [all …]
|
D | irqchip.c | 58 route.msi.data = msi->data; in kvm_send_userspace_msi()
|
D | coalesced_mmio.c | 88 memcpy(ring->coalesced_mmio[insert].data, val, len); in coalesced_mmio_write()
|