Lines Matching refs:size
335 static void __unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size, in __unmap_stage2_range() argument
339 phys_addr_t addr = start, end = start + size; in __unmap_stage2_range()
343 WARN_ON(size & ~PAGE_MASK); in __unmap_stage2_range()
366 static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) in unmap_stage2_range() argument
368 __unmap_stage2_range(kvm, start, size, true); in unmap_stage2_range()
541 phys_addr_t start, u64 size) in __unmap_hyp_range() argument
544 phys_addr_t addr = start, end = start + size; in __unmap_hyp_range()
559 static void unmap_hyp_range(pgd_t *pgdp, phys_addr_t start, u64 size) in unmap_hyp_range() argument
561 __unmap_hyp_range(pgdp, PTRS_PER_PGD, start, size); in unmap_hyp_range()
564 static void unmap_hyp_idmap_range(pgd_t *pgdp, phys_addr_t start, u64 size) in unmap_hyp_idmap_range() argument
566 __unmap_hyp_range(pgdp, __kvm_idmap_ptrs_per_pgd(), start, size); in unmap_hyp_idmap_range()
784 static int __create_hyp_private_mapping(phys_addr_t phys_addr, size_t size, in __create_hyp_private_mapping() argument
801 size = PAGE_ALIGN(size + offset_in_page(phys_addr)); in __create_hyp_private_mapping()
802 base = io_map_base - size; in __create_hyp_private_mapping()
823 base, base + size, in __create_hyp_private_mapping()
841 int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size, in create_hyp_io_mappings() argument
848 *kaddr = ioremap(phys_addr, size); in create_hyp_io_mappings()
857 ret = __create_hyp_private_mapping(phys_addr, size, in create_hyp_io_mappings()
876 int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size, in create_hyp_exec_mappings() argument
884 ret = __create_hyp_private_mapping(phys_addr, size, in create_hyp_exec_mappings()
934 phys_addr_t size = PAGE_SIZE * memslot->npages; in stage2_unmap_memslot() local
935 hva_t reg_end = hva + size; in stage2_unmap_memslot()
1346 phys_addr_t pa, unsigned long size, bool writable) in kvm_phys_addr_ioremap() argument
1353 end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK; in kvm_phys_addr_ioremap()
1595 static void clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size) in clean_dcache_guest_page() argument
1597 __clean_dcache_guest_page(pfn, size); in clean_dcache_guest_page()
1600 static void invalidate_icache_guest_page(kvm_pfn_t pfn, unsigned long size) in invalidate_icache_guest_page() argument
1602 __invalidate_icache_guest_page(pfn, size); in invalidate_icache_guest_page()
1624 size_t size; in fault_supports_stage2_huge_mapping() local
1626 size = memslot->npages * PAGE_SIZE; in fault_supports_stage2_huge_mapping()
1631 uaddr_end = uaddr_start + size; in fault_supports_stage2_huge_mapping()
2030 gpa_t gpa, u64 size, in handle_hva_to_gpa() argument
2058 static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) in kvm_unmap_hva_handler() argument
2063 __unmap_stage2_range(kvm, gpa, size, may_block); in kvm_unmap_hva_handler()
2078 static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) in kvm_set_spte_handler() argument
2082 WARN_ON(size != PAGE_SIZE); in kvm_set_spte_handler()
2117 static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) in kvm_age_hva_handler() argument
2123 WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE); in kvm_age_hva_handler()
2135 static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) in kvm_test_age_hva_handler() argument
2141 WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE); in kvm_test_age_hva_handler()
2409 phys_addr_t size = slot->npages << PAGE_SHIFT; in kvm_arch_flush_shadow_memslot() local
2412 unmap_stage2_range(kvm, gpa, size); in kvm_arch_flush_shadow_memslot()