1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2023 Google LLC
4 * Author: Mostafa Saleh <smostafa@google.com>
5 */
6
7 #include <asm/kvm_mmu.h>
8 #include <asm/kvm_pkvm.h>
9
10 #include <linux/cma.h>
11 #include <linux/of_reserved_mem.h>
12 #include <kvm/iommu.h>
13
14 #include <linux/arm-smccc.h>
15 #include <linux/kvm_host.h>
16
17 #define kvm_call_hyp_nvhe_mc(...) \
18 ({ \
19 struct arm_smccc_res __res; \
20 do { \
21 __res = kvm_call_hyp_nvhe_smccc(__VA_ARGS__); \
22 } while (__res.a1 && !kvm_iommu_topup_memcache(&__res, GFP_KERNEL));\
23 __res.a1; \
24 })
25
__topup_virt_to_phys(void * virt)26 static phys_addr_t __topup_virt_to_phys(void *virt)
27 {
28 return __pa(virt);
29 }
30
__kvm_iommu_alloc_from_cma(gfp_t gfp)31 static struct page *__kvm_iommu_alloc_from_cma(gfp_t gfp)
32 {
33 bool from_spare = (gfp & GFP_ATOMIC) == GFP_ATOMIC;
34 static atomic64_t spare_p;
35 struct page *p = NULL;
36
37 again:
38 if (from_spare)
39 return (struct page *)atomic64_cmpxchg(&spare_p, atomic64_read(&spare_p), 0);
40
41 p = kvm_iommu_cma_alloc();
42 if (!p) {
43 from_spare = true;
44 goto again;
45 }
46
47 /*
48 * Top-up the spare block if necessary. If we failed to update spare_p
49 * then someone did it already and we can proceed with that page.
50 */
51 if (!atomic64_read(&spare_p)) {
52 if (!atomic64_cmpxchg(&spare_p, 0, (u64)p))
53 goto again;
54 }
55
56 return p;
57 }
58
__kvm_iommu_topup_memcache_from_cma(size_t size,gfp_t gfp,size_t * allocated)59 static int __kvm_iommu_topup_memcache_from_cma(size_t size, gfp_t gfp, size_t *allocated)
60 {
61 *allocated = 0;
62
63 while (*allocated < size) {
64 struct page *p = __kvm_iommu_alloc_from_cma(gfp);
65 struct kvm_hyp_memcache mc;
66
67 if (!p)
68 return -ENOMEM;
69
70 init_hyp_memcache(&mc);
71 push_hyp_memcache(&mc, page_to_virt(p), __topup_virt_to_phys,
72 PMD_SHIFT - PAGE_SHIFT);
73
74 if (__pkvm_topup_hyp_alloc_mgt_mc(HYP_ALLOC_MGT_IOMMU_ID, &mc)) {
75 kvm_iommu_cma_release(p);
76 return -EINVAL;
77 }
78
79 *allocated += PMD_SIZE;
80 }
81
82 return 0;
83 }
84
kvm_iommu_topup_memcache(struct arm_smccc_res * res,gfp_t gfp)85 static int kvm_iommu_topup_memcache(struct arm_smccc_res *res, gfp_t gfp)
86 {
87 struct kvm_hyp_req req;
88
89 hyp_reqs_smccc_decode(res, &req);
90
91 if ((res->a1 == -ENOMEM) && (req.type != KVM_HYP_REQ_TYPE_MEM)) {
92 /*
93 * There is no way for drivers to populate hyp_alloc requests,
94 * so -ENOMEM + no request indicates that.
95 */
96 return __pkvm_topup_hyp_alloc(1);
97 } else if (req.type != KVM_HYP_REQ_TYPE_MEM) {
98 return -EBADE;
99 }
100
101 if (req.mem.dest == REQ_MEM_DEST_HYP_IOMMU) {
102 size_t nr_pages, from_cma = 0;
103 int ret;
104
105 nr_pages = req.mem.nr_pages;
106
107 if (req.mem.sz_alloc < PMD_SIZE) {
108 size_t size = req.mem.sz_alloc * nr_pages;
109
110 ret = __kvm_iommu_topup_memcache_from_cma(size, gfp, &from_cma);
111 if (!ret)
112 return 0;
113
114 nr_pages -= from_cma / req.mem.sz_alloc;
115 }
116
117 return __pkvm_topup_hyp_alloc_mgt_gfp(HYP_ALLOC_MGT_IOMMU_ID,
118 nr_pages,
119 req.mem.sz_alloc,
120 gfp);
121 } else if (req.mem.dest == REQ_MEM_DEST_HYP_ALLOC) {
122 /* Fill hyp alloc*/
123 return __pkvm_topup_hyp_alloc(req.mem.nr_pages);
124 }
125
126 pr_err("Bogus mem request");
127 return -EBADE;
128 }
129
130 struct kvm_iommu_driver *iommu_driver;
131 extern struct kvm_iommu_ops *kvm_nvhe_sym(kvm_iommu_ops);
132
133 static struct cma *kvm_iommu_cma;
134 extern phys_addr_t kvm_nvhe_sym(cma_base);
135 extern size_t kvm_nvhe_sym(cma_size);
136
kvm_iommu_register_driver(struct kvm_iommu_driver * kern_ops)137 int kvm_iommu_register_driver(struct kvm_iommu_driver *kern_ops)
138 {
139 if (WARN_ON(!kern_ops))
140 return -EINVAL;
141
142 /*
143 * Paired with smp_load_acquire(&iommu_driver)
144 * Ensure memory stores happening during a driver
145 * init are observed before executing kvm iommu callbacks.
146 */
147 return cmpxchg_release(&iommu_driver, NULL, kern_ops) ? -EBUSY : 0;
148 }
149 EXPORT_SYMBOL(kvm_iommu_register_driver);
150
kvm_iommu_init_hyp(struct kvm_iommu_ops * hyp_ops,struct kvm_hyp_memcache * atomic_mc)151 int kvm_iommu_init_hyp(struct kvm_iommu_ops *hyp_ops,
152 struct kvm_hyp_memcache *atomic_mc)
153 {
154 if (!hyp_ops)
155 return -EINVAL;
156
157 return kvm_call_hyp_nvhe(__pkvm_iommu_init, hyp_ops,
158 atomic_mc->head, atomic_mc->nr_pages);
159 }
160 EXPORT_SYMBOL(kvm_iommu_init_hyp);
161
pkvm_iommu_cma_setup(struct reserved_mem * rmem)162 static int __init pkvm_iommu_cma_setup(struct reserved_mem *rmem)
163 {
164 int err;
165
166 if (!IS_ALIGNED(rmem->base | rmem->size, PMD_SIZE))
167 kvm_info("pKVM IOMMU reserved memory not PMD-aligned\n");
168
169 err = cma_init_reserved_mem(rmem->base, rmem->size, 0, rmem->name,
170 &kvm_iommu_cma, false);
171 if (err) {
172 kvm_err("Failed to init pKVM IOMMU reserved memory\n");
173 kvm_iommu_cma = NULL;
174 return err;
175 }
176
177 kvm_nvhe_sym(cma_base) = cma_get_base(kvm_iommu_cma);
178 kvm_nvhe_sym(cma_size) = cma_get_size(kvm_iommu_cma);
179
180 return 0;
181 }
182 RESERVEDMEM_OF_DECLARE(pkvm_cma, "pkvm,cma", pkvm_iommu_cma_setup);
183
184 static const u8 pmd_order = PMD_SHIFT - PAGE_SHIFT;
185
kvm_iommu_cma_alloc(void)186 struct page *kvm_iommu_cma_alloc(void)
187 {
188 if (!kvm_iommu_cma)
189 return NULL;
190
191 return cma_alloc(kvm_iommu_cma, (1 << pmd_order), pmd_order, true);
192 }
193 EXPORT_SYMBOL(kvm_iommu_cma_alloc);
194
kvm_iommu_cma_release(struct page * p)195 bool kvm_iommu_cma_release(struct page *p)
196 {
197 if (!kvm_iommu_cma || !p)
198 return false;
199
200 return cma_release(kvm_iommu_cma, p, 1 << pmd_order);
201 }
202 EXPORT_SYMBOL(kvm_iommu_cma_release);
203
kvm_iommu_init_driver(void)204 int kvm_iommu_init_driver(void)
205 {
206 if (!smp_load_acquire(&iommu_driver) || !iommu_driver->get_iommu_id_by_of) {
207 kvm_err("pKVM enabled without an IOMMU driver, do not run confidential workloads in virtual machines\n");
208 return -ENODEV;
209 }
210
211 kvm_hyp_iommu_domains = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
212 get_order(KVM_IOMMU_DOMAINS_ROOT_SIZE));
213 if (!kvm_hyp_iommu_domains)
214 return -ENOMEM;
215
216 kvm_hyp_iommu_domains = kern_hyp_va(kvm_hyp_iommu_domains);
217
218 return iommu_driver->init_driver();
219 }
220 EXPORT_SYMBOL(kvm_iommu_init_driver);
221
kvm_iommu_remove_driver(void)222 void kvm_iommu_remove_driver(void)
223 {
224 if (smp_load_acquire(&iommu_driver))
225 iommu_driver->remove_driver();
226 }
227
228
kvm_get_iommu_id_by_of(struct device_node * np)229 pkvm_handle_t kvm_get_iommu_id_by_of(struct device_node *np)
230 {
231 if (!iommu_driver)
232 return 0;
233
234 return iommu_driver->get_iommu_id_by_of(np);
235 }
236
kvm_get_iommu_id(struct device * dev)237 static pkvm_handle_t kvm_get_iommu_id(struct device *dev)
238 {
239 return kvm_get_iommu_id_by_of(dev_of_node(dev));
240 }
241
pkvm_iommu_suspend(struct device * dev)242 int pkvm_iommu_suspend(struct device *dev)
243 {
244 int device_id = kvm_get_iommu_id(dev);
245
246 return kvm_call_hyp_nvhe(__pkvm_host_hvc_pd, device_id, 0);
247 }
248 EXPORT_SYMBOL(pkvm_iommu_suspend);
249
pkvm_iommu_resume(struct device * dev)250 int pkvm_iommu_resume(struct device *dev)
251 {
252 int device_id = kvm_get_iommu_id(dev);
253
254 return kvm_call_hyp_nvhe(__pkvm_host_hvc_pd, device_id, 1);
255 }
256 EXPORT_SYMBOL(pkvm_iommu_resume);
257
kvm_iommu_share_hyp_sg(struct kvm_iommu_sg * sg,unsigned int nents)258 int kvm_iommu_share_hyp_sg(struct kvm_iommu_sg *sg, unsigned int nents)
259 {
260 size_t nr_pages = PAGE_ALIGN(sizeof(*sg) * nents) >> PAGE_SHIFT;
261 phys_addr_t sg_pfn = virt_to_phys(sg) >> PAGE_SHIFT;
262 int i;
263 int ret;
264
265 for (i = 0 ; i < nr_pages ; ++i) {
266 ret = kvm_call_hyp_nvhe(__pkvm_host_share_hyp, sg_pfn + i);
267 if (ret)
268 return ret;
269 }
270
271 return 0;
272 }
273 EXPORT_SYMBOL(kvm_iommu_share_hyp_sg);
274
kvm_iommu_unshare_hyp_sg(struct kvm_iommu_sg * sg,unsigned int nents)275 int kvm_iommu_unshare_hyp_sg(struct kvm_iommu_sg *sg, unsigned int nents)
276 {
277 size_t nr_pages = PAGE_ALIGN(sizeof(*sg) * nents) >> PAGE_SHIFT;
278 phys_addr_t sg_pfn = virt_to_phys(sg) >> PAGE_SHIFT;
279 int i;
280 int ret;
281
282 for (i = 0 ; i < nr_pages ; ++i) {
283 ret = kvm_call_hyp_nvhe(__pkvm_host_unshare_hyp, sg_pfn + i);
284 if (ret)
285 return ret;
286 }
287
288 return 0;
289 }
290 EXPORT_SYMBOL(kvm_iommu_unshare_hyp_sg);
291
kvm_iommu_device_num_ids(struct device * dev)292 int kvm_iommu_device_num_ids(struct device *dev)
293 {
294 if (iommu_driver->get_device_iommu_num_ids)
295 return iommu_driver->get_device_iommu_num_ids(dev);
296 return 0;
297 }
298 EXPORT_SYMBOL_GPL(kvm_iommu_device_num_ids);
299
kvm_iommu_device_id(struct device * dev,u32 idx,pkvm_handle_t * out_iommu,u32 * out_sid)300 int kvm_iommu_device_id(struct device *dev, u32 idx,
301 pkvm_handle_t *out_iommu, u32 *out_sid)
302 {
303 if (iommu_driver->get_device_iommu_id)
304 return iommu_driver->get_device_iommu_id(dev, idx, out_iommu, out_sid);
305 return -ENODEV;
306 }
307 EXPORT_SYMBOL_GPL(kvm_iommu_device_id);
308
kvm_iommu_guest_alloc_mc(struct kvm_hyp_memcache * mc,u32 pgsize,u32 nr_pages)309 int kvm_iommu_guest_alloc_mc(struct kvm_hyp_memcache *mc, u32 pgsize, u32 nr_pages)
310 {
311 u8 order = get_order(pgsize);
312
313 /* Driver might have dedicated allocator especially if it needs large pages. */
314 if (iommu_driver && iommu_driver->guest_alloc && iommu_driver->guest_free)
315 return __topup_hyp_memcache(mc, nr_pages, iommu_driver->guest_alloc,
316 kvm_host_pa, 0, order);
317
318 return topup_hyp_memcache(mc, nr_pages, order);
319 }
320
kvm_iommu_guest_free_mc(struct kvm_hyp_memcache * mc)321 void kvm_iommu_guest_free_mc(struct kvm_hyp_memcache *mc)
322 {
323 if (iommu_driver && iommu_driver->guest_alloc && iommu_driver->guest_free)
324 __free_hyp_memcache(mc, iommu_driver->guest_free,
325 kvm_host_va, 0);
326 else
327 free_hyp_memcache(mc);
328 }
329
330 /* Hypercall abstractions exposed to kernel IOMMU drivers */
kvm_iommu_attach_dev(pkvm_handle_t iommu_id,pkvm_handle_t domain_id,unsigned int endpoint,unsigned int pasid,unsigned int ssid_bits,unsigned long flags)331 int kvm_iommu_attach_dev(pkvm_handle_t iommu_id, pkvm_handle_t domain_id,
332 unsigned int endpoint, unsigned int pasid,
333 unsigned int ssid_bits, unsigned long flags)
334 {
335 return kvm_call_hyp_nvhe_mc(__pkvm_host_iommu_attach_dev, iommu_id, domain_id,
336 endpoint, pasid, ssid_bits, flags);
337 }
338 EXPORT_SYMBOL(kvm_iommu_attach_dev);
339
kvm_iommu_detach_dev(pkvm_handle_t iommu_id,pkvm_handle_t domain_id,unsigned int endpoint,unsigned int pasid)340 int kvm_iommu_detach_dev(pkvm_handle_t iommu_id, pkvm_handle_t domain_id,
341 unsigned int endpoint, unsigned int pasid)
342 {
343 return kvm_call_hyp_nvhe(__pkvm_host_iommu_detach_dev, iommu_id, domain_id,
344 endpoint, pasid);
345 }
346 EXPORT_SYMBOL(kvm_iommu_detach_dev);
347
kvm_iommu_alloc_domain(pkvm_handle_t domain_id,int type)348 int kvm_iommu_alloc_domain(pkvm_handle_t domain_id, int type)
349 {
350 return kvm_call_hyp_nvhe_mc(__pkvm_host_iommu_alloc_domain,
351 domain_id, type);
352 }
353 EXPORT_SYMBOL(kvm_iommu_alloc_domain);
354
kvm_iommu_free_domain(pkvm_handle_t domain_id)355 int kvm_iommu_free_domain(pkvm_handle_t domain_id)
356 {
357 return kvm_call_hyp_nvhe(__pkvm_host_iommu_free_domain, domain_id);
358 }
359 EXPORT_SYMBOL(kvm_iommu_free_domain);
360
kvm_iommu_map_pages(pkvm_handle_t domain_id,unsigned long iova,phys_addr_t paddr,size_t pgsize,size_t pgcount,int prot,gfp_t gfp,size_t * total_mapped)361 int kvm_iommu_map_pages(pkvm_handle_t domain_id, unsigned long iova,
362 phys_addr_t paddr, size_t pgsize, size_t pgcount,
363 int prot, gfp_t gfp, size_t *total_mapped)
364 {
365 size_t mapped;
366 size_t size = pgsize * pgcount;
367 struct arm_smccc_res res;
368
369 do {
370 res = kvm_call_hyp_nvhe_smccc(__pkvm_host_iommu_map_pages, domain_id,
371 iova, paddr, pgsize, pgcount, prot);
372 mapped = res.a1;
373 iova += mapped;
374 paddr += mapped;
375 WARN_ON(mapped % pgsize);
376 WARN_ON(mapped > pgcount * pgsize);
377 pgcount -= mapped / pgsize;
378 *total_mapped += mapped;
379 } while (*total_mapped < size && !kvm_iommu_topup_memcache(&res, gfp));
380 if (*total_mapped < size)
381 return -EINVAL;
382 return 0;
383 }
384 EXPORT_SYMBOL(kvm_iommu_map_pages);
385
kvm_iommu_unmap_pages(pkvm_handle_t domain_id,unsigned long iova,size_t pgsize,size_t pgcount)386 size_t kvm_iommu_unmap_pages(pkvm_handle_t domain_id, unsigned long iova,
387 size_t pgsize, size_t pgcount)
388 {
389 size_t unmapped;
390 size_t total_unmapped = 0;
391 size_t size = pgsize * pgcount;
392 struct arm_smccc_res res;
393
394 do {
395 res = kvm_call_hyp_nvhe_smccc(__pkvm_host_iommu_unmap_pages,
396 domain_id, iova, pgsize, pgcount);
397 unmapped = res.a1;
398 total_unmapped += unmapped;
399 iova += unmapped;
400 WARN_ON(unmapped % pgsize);
401 pgcount -= unmapped / pgsize;
402
403 /*
404 * The page table driver can unmap less than we asked for. If it
405 * didn't unmap anything at all, then it either reached the end
406 * of the range, or it needs a page in the memcache to break a
407 * block mapping.
408 */
409 } while (total_unmapped < size &&
410 (unmapped || !kvm_iommu_topup_memcache(&res, GFP_ATOMIC)));
411
412 return total_unmapped;
413
414 }
415 EXPORT_SYMBOL(kvm_iommu_unmap_pages);
416
kvm_iommu_iova_to_phys(pkvm_handle_t domain_id,unsigned long iova)417 phys_addr_t kvm_iommu_iova_to_phys(pkvm_handle_t domain_id, unsigned long iova)
418 {
419 return kvm_call_hyp_nvhe(__pkvm_host_iommu_iova_to_phys, domain_id, iova);
420 }
421 EXPORT_SYMBOL(kvm_iommu_iova_to_phys);
422
kvm_iommu_map_sg(pkvm_handle_t domain_id,struct kvm_iommu_sg * sg,unsigned long iova,unsigned int nent,unsigned int prot,gfp_t gfp)423 size_t kvm_iommu_map_sg(pkvm_handle_t domain_id, struct kvm_iommu_sg *sg,
424 unsigned long iova, unsigned int nent,
425 unsigned int prot, gfp_t gfp)
426 {
427 size_t mapped, total_mapped = 0;
428 struct arm_smccc_res res;
429
430 do {
431 res = kvm_call_hyp_nvhe_smccc(__pkvm_host_iommu_map_sg,
432 domain_id, iova, sg, nent, prot);
433 mapped = res.a1;
434 iova += mapped;
435 total_mapped += mapped;
436 /* Skip mapped */
437 while (mapped) {
438 if (mapped < (sg->pgsize * sg->pgcount)) {
439 sg->phys += mapped;
440 sg->pgcount -= mapped / sg->pgsize;
441 mapped = 0;
442 } else {
443 mapped -= sg->pgsize * sg->pgcount;
444 sg++;
445 nent--;
446 }
447 }
448
449 kvm_iommu_topup_memcache(&res, gfp);
450 } while (nent);
451
452 return total_mapped;
453 }
454 EXPORT_SYMBOL(kvm_iommu_map_sg);
455