1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ARM64_KVM_NVHE_IOMMU_H__
3 #define __ARM64_KVM_NVHE_IOMMU_H__
4
5 #include <asm/kvm_host.h>
6 #include <asm/kvm_pgtable.h>
7
8 #include <kvm/iommu.h>
9
10 #include <nvhe/alloc_mgt.h>
11 #include <nvhe/spinlock.h>
12
13 /* alloc/free from atomic pool. */
14 void *kvm_iommu_donate_pages_atomic(u8 order);
15 void kvm_iommu_reclaim_pages_atomic(void *p, u8 order);
16
17 /* Hypercall handlers */
18 int kvm_iommu_alloc_domain(pkvm_handle_t domain_id, int type);
19 int kvm_iommu_free_domain(pkvm_handle_t domain_id);
20 int kvm_iommu_attach_dev(pkvm_handle_t iommu_id, pkvm_handle_t domain_id,
21 u32 endpoint_id, u32 pasid, u32 pasid_bits,
22 unsigned long flags);
23 int kvm_iommu_detach_dev(pkvm_handle_t iommu_id, pkvm_handle_t domain_id,
24 u32 endpoint_id, u32 pasid);
25 size_t kvm_iommu_map_pages(pkvm_handle_t domain_id,
26 unsigned long iova, phys_addr_t paddr, size_t pgsize,
27 size_t pgcount, int prot, unsigned long *mapped);
28 size_t kvm_iommu_unmap_pages(pkvm_handle_t domain_id, unsigned long iova,
29 size_t pgsize, size_t pgcount);
30 phys_addr_t kvm_iommu_iova_to_phys(pkvm_handle_t domain_id, unsigned long iova);
31 bool kvm_iommu_host_dabt_handler(struct kvm_cpu_context *host_ctxt, u64 esr, u64 addr);
32 size_t kvm_iommu_map_sg(pkvm_handle_t domain, unsigned long iova, struct kvm_iommu_sg *sg,
33 unsigned int nent, unsigned int prot);
34
35 /* Flags for memory allocation for IOMMU drivers */
36 #define IOMMU_PAGE_NOCACHE BIT(0)
37 void *kvm_iommu_donate_pages(u8 order, int flags);
38 void kvm_iommu_reclaim_pages(void *p, u8 order);
39
40 #define kvm_iommu_donate_page() kvm_iommu_donate_pages(0, 0)
41 #define kvm_iommu_donate_page_nc() kvm_iommu_donate_pages(0, IOMMU_PAGE_NOCACHE)
42 #define kvm_iommu_reclaim_page(p) kvm_iommu_reclaim_pages(p, 0)
43
44 void kvm_iommu_host_stage2_idmap(phys_addr_t start, phys_addr_t end,
45 enum kvm_pgtable_prot prot);
46 void kvm_iommu_host_stage2_idmap_complete(bool map);
47 int kvm_iommu_snapshot_host_stage2(struct kvm_hyp_iommu_domain *domain);
48
49 int kvm_iommu_dev_block_dma(pkvm_handle_t iommu_id, u32 endpoint_id, bool host_to_guest);
50
51 struct pkvm_hyp_vm;
52 int kvm_iommu_force_free_domain(pkvm_handle_t domain_id, struct pkvm_hyp_vm *vm);
53 int kvm_iommu_id_to_token(pkvm_handle_t smmu_id, u64 *out_token);
54
55 struct kvm_iommu_ops {
56 int (*init)(void);
57 int (*alloc_domain)(struct kvm_hyp_iommu_domain *domain, int type);
58 void (*free_domain)(struct kvm_hyp_iommu_domain *domain);
59 struct kvm_hyp_iommu *(*get_iommu_by_id)(pkvm_handle_t iommu_id);
60 int (*attach_dev)(struct kvm_hyp_iommu *iommu, struct kvm_hyp_iommu_domain *domain,
61 u32 endpoint_id, u32 pasid, u32 pasid_bits, unsigned long flags);
62 int (*detach_dev)(struct kvm_hyp_iommu *iommu, struct kvm_hyp_iommu_domain *domain,
63 u32 endpoint_id, u32 pasid);
64 int (*map_pages)(struct kvm_hyp_iommu_domain *domain, unsigned long iova,
65 phys_addr_t paddr, size_t pgsize,
66 size_t pgcount, int prot, size_t *total_mapped);
67 size_t (*unmap_pages)(struct kvm_hyp_iommu_domain *domain, unsigned long iova,
68 size_t pgsize, size_t pgcount,
69 struct iommu_iotlb_gather *gather);
70 phys_addr_t (*iova_to_phys)(struct kvm_hyp_iommu_domain *domain, unsigned long iova);
71 void (*iotlb_sync)(struct kvm_hyp_iommu_domain *domain,
72 struct iommu_iotlb_gather *gather);
73 bool (*dabt_handler)(struct user_pt_regs *regs, u64 esr, u64 addr);
74 void (*host_stage2_idmap)(struct kvm_hyp_iommu_domain *domain,
75 phys_addr_t start, phys_addr_t end, int prot);
76 void (*host_stage2_idmap_complete)(bool map);
77 int (*suspend)(struct kvm_hyp_iommu *iommu);
78 int (*resume)(struct kvm_hyp_iommu *iommu);
79 int (*dev_block_dma)(struct kvm_hyp_iommu *iommu, u32 endpoint_id,
80 bool is_host_to_guest);
81 int (*get_iommu_token_by_id)(pkvm_handle_t smmu_id, u64 *out_token);
82 ANDROID_KABI_RESERVE(1);
83 ANDROID_KABI_RESERVE(2);
84 ANDROID_KABI_RESERVE(3);
85 ANDROID_KABI_RESERVE(4);
86 ANDROID_KABI_RESERVE(5);
87 ANDROID_KABI_RESERVE(6);
88 ANDROID_KABI_RESERVE(7);
89 ANDROID_KABI_RESERVE(8);
90 };
91
92 int kvm_iommu_init(struct kvm_iommu_ops *ops,
93 struct kvm_hyp_memcache *atomic_mc);
94 int kvm_iommu_init_device(struct kvm_hyp_iommu *iommu);
95
96 int iommu_pkvm_unuse_dma(u64 phys_addr, size_t size);
97
98 void kvm_iommu_iotlb_gather_add_page(struct kvm_hyp_iommu_domain *domain,
99 struct iommu_iotlb_gather *gather,
100 unsigned long iova,
101 size_t size);
102
kvm_iommu_get_lock(struct kvm_hyp_iommu * iommu)103 static inline hyp_spinlock_t *kvm_iommu_get_lock(struct kvm_hyp_iommu *iommu)
104 {
105 /* See struct kvm_hyp_iommu */
106 BUILD_BUG_ON(sizeof(iommu->lock) != sizeof(hyp_spinlock_t));
107 return (hyp_spinlock_t *)(&iommu->lock);
108 }
109
kvm_iommu_lock_init(struct kvm_hyp_iommu * iommu)110 static inline void kvm_iommu_lock_init(struct kvm_hyp_iommu *iommu)
111 {
112 hyp_spin_lock_init(kvm_iommu_get_lock(iommu));
113 }
114
kvm_iommu_lock(struct kvm_hyp_iommu * iommu)115 static inline void kvm_iommu_lock(struct kvm_hyp_iommu *iommu)
116 {
117 hyp_spin_lock(kvm_iommu_get_lock(iommu));
118 }
119
kvm_iommu_unlock(struct kvm_hyp_iommu * iommu)120 static inline void kvm_iommu_unlock(struct kvm_hyp_iommu *iommu)
121 {
122 hyp_spin_unlock(kvm_iommu_get_lock(iommu));
123 }
124
125 extern struct hyp_mgt_allocator_ops kvm_iommu_allocator_ops;
126
127 #endif /* __ARM64_KVM_NVHE_IOMMU_H__ */
128