1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2020 Google LLC
4 * Author: Quentin Perret <qperret@google.com>
5 */
6
7 #include <linux/kvm_host.h>
8 #include <asm/kvm_hyp.h>
9 #include <asm/kvm_mmu.h>
10 #include <asm/kvm_pgtable.h>
11 #include <asm/kvm_pkvm.h>
12 #include <asm/spectre.h>
13
14 #include <nvhe/early_alloc.h>
15 #include <nvhe/gfp.h>
16 #include <nvhe/memory.h>
17 #include <nvhe/mem_protect.h>
18 #include <nvhe/mm.h>
19 #include <nvhe/modules.h>
20 #include <nvhe/spinlock.h>
21
22 struct kvm_pgtable pkvm_pgtable;
23 hyp_spinlock_t pkvm_pgd_lock;
24
25 struct memblock_region hyp_memory[HYP_MEMBLOCK_REGIONS];
26 unsigned int hyp_memblock_nr;
27
28 static u64 __io_map_base;
29
30 struct hyp_fixmap_slot {
31 u64 addr;
32 kvm_pte_t *ptep;
33 };
34 static DEFINE_PER_CPU(struct hyp_fixmap_slot, fixmap_slots);
35
__pkvm_create_mappings(unsigned long start,unsigned long size,unsigned long phys,enum kvm_pgtable_prot prot)36 static int __pkvm_create_mappings(unsigned long start, unsigned long size,
37 unsigned long phys, enum kvm_pgtable_prot prot)
38 {
39 int err;
40
41 hyp_spin_lock(&pkvm_pgd_lock);
42 err = kvm_pgtable_hyp_map(&pkvm_pgtable, start, size, phys, prot);
43 hyp_spin_unlock(&pkvm_pgd_lock);
44
45 return err;
46 }
47
__pkvm_alloc_private_va_range(unsigned long start,size_t size)48 static int __pkvm_alloc_private_va_range(unsigned long start, size_t size)
49 {
50 unsigned long cur;
51
52 hyp_assert_lock_held(&pkvm_pgd_lock);
53
54 if (!start || start < __io_map_base)
55 return -EINVAL;
56
57 /* The allocated size is always a multiple of PAGE_SIZE */
58 cur = start + PAGE_ALIGN(size);
59
60 /* Are we overflowing on the vmemmap ? */
61 if (cur > __hyp_vmemmap)
62 return -ENOMEM;
63
64 __io_map_base = cur;
65
66 return 0;
67 }
68
69 /**
70 * pkvm_alloc_private_va_range - Allocates a private VA range.
71 * @size: The size of the VA range to reserve.
72 * @haddr: The hypervisor virtual start address of the allocation.
73 *
74 * The private virtual address (VA) range is allocated above __io_map_base
75 * and aligned based on the order of @size.
76 *
77 * Return: 0 on success or negative error code on failure.
78 */
pkvm_alloc_private_va_range(size_t size,unsigned long * haddr)79 int pkvm_alloc_private_va_range(size_t size, unsigned long *haddr)
80 {
81 unsigned long addr;
82 int ret;
83
84 hyp_spin_lock(&pkvm_pgd_lock);
85 addr = __io_map_base;
86 ret = __pkvm_alloc_private_va_range(addr, size);
87 hyp_spin_unlock(&pkvm_pgd_lock);
88
89 *haddr = addr;
90
91 return ret;
92 }
93
__pkvm_create_private_mapping(phys_addr_t phys,size_t size,enum kvm_pgtable_prot prot,unsigned long * haddr)94 int __pkvm_create_private_mapping(phys_addr_t phys, size_t size,
95 enum kvm_pgtable_prot prot,
96 unsigned long *haddr)
97 {
98 unsigned long addr;
99 int err;
100
101 size = PAGE_ALIGN(size + offset_in_page(phys));
102 err = pkvm_alloc_private_va_range(size, &addr);
103 if (err)
104 return err;
105
106 err = __pkvm_create_mappings(addr, size, phys, prot);
107 if (err)
108 return err;
109
110 *haddr = addr + offset_in_page(phys);
111 return err;
112 }
113
114 #ifdef CONFIG_PKVM_STRICT_CHECKS
115 static unsigned long mod_range_start = ULONG_MAX;
116 static unsigned long mod_range_end;
117 static DEFINE_HYP_SPINLOCK(mod_range_lock);
118
update_mod_range(unsigned long addr,size_t size)119 static void update_mod_range(unsigned long addr, size_t size)
120 {
121 hyp_spin_lock(&mod_range_lock);
122 mod_range_start = min(mod_range_start, addr);
123 mod_range_end = max(mod_range_end, addr + size);
124 hyp_spin_unlock(&mod_range_lock);
125 }
126
assert_in_mod_range(unsigned long addr)127 void assert_in_mod_range(unsigned long addr)
128 {
129 /*
130 * This is not entirely watertight if there are private range
131 * allocations between modules being loaded, but in practice that is
132 * probably going to be allocation initiated by the modules themselves.
133 */
134 hyp_spin_lock(&mod_range_lock);
135 WARN_ON(addr < mod_range_start || mod_range_end <= addr);
136 hyp_spin_unlock(&mod_range_lock);
137 }
138 #else
update_mod_range(unsigned long addr,size_t size)139 static inline void update_mod_range(unsigned long addr, size_t size) { }
140 #endif
141
__pkvm_alloc_module_va(u64 nr_pages)142 void *__pkvm_alloc_module_va(u64 nr_pages)
143 {
144 size_t size = nr_pages << PAGE_SHIFT;
145 unsigned long addr = 0;
146
147 if (!pkvm_alloc_private_va_range(size, &addr))
148 update_mod_range(addr, size);
149
150 return (void *)addr;
151 }
152
__pkvm_map_module_page(u64 pfn,void * va,enum kvm_pgtable_prot prot,bool is_protected)153 int __pkvm_map_module_page(u64 pfn, void *va, enum kvm_pgtable_prot prot, bool is_protected)
154 {
155 unsigned long addr = (unsigned long)va;
156 int ret;
157
158 assert_in_mod_range(addr);
159
160 if (!is_protected) {
161 ret = __pkvm_host_donate_hyp(pfn, 1);
162 if (ret)
163 return ret;
164 }
165
166 ret = __pkvm_create_mappings(addr, PAGE_SIZE, hyp_pfn_to_phys(pfn), prot);
167 if (ret && !is_protected)
168 WARN_ON(__pkvm_hyp_donate_host(pfn, 1));
169
170 return ret;
171 }
172
__pkvm_unmap_module_page(u64 pfn,void * va)173 void __pkvm_unmap_module_page(u64 pfn, void *va)
174 {
175 WARN_ON(__pkvm_hyp_donate_host(pfn, 1));
176 pkvm_remove_mappings(va, va + PAGE_SIZE);
177 }
178
__hyp_allocator_map(unsigned long va,phys_addr_t phys)179 int __hyp_allocator_map(unsigned long va, phys_addr_t phys)
180 {
181 int ret = __pkvm_create_mappings(va, PAGE_SIZE, phys, PAGE_HYP);
182
183 /* Let's not confuse the hyp_alloc callers who will try to top-up pointlessly on -ENOMEM */
184 if (ret == -ENOMEM)
185 ret = -EBUSY;
186
187 return ret;
188 }
189
pkvm_create_mappings_locked(void * from,void * to,enum kvm_pgtable_prot prot)190 int pkvm_create_mappings_locked(void *from, void *to, enum kvm_pgtable_prot prot)
191 {
192 unsigned long start = (unsigned long)from;
193 unsigned long end = (unsigned long)to;
194 unsigned long virt_addr;
195 phys_addr_t phys;
196
197 hyp_assert_lock_held(&pkvm_pgd_lock);
198
199 start = start & PAGE_MASK;
200 end = PAGE_ALIGN(end);
201
202 for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
203 int err;
204
205 phys = hyp_virt_to_phys((void *)virt_addr);
206 err = kvm_pgtable_hyp_map(&pkvm_pgtable, virt_addr, PAGE_SIZE,
207 phys, prot);
208 if (err)
209 return err;
210 }
211
212 return 0;
213 }
214
pkvm_create_mappings(void * from,void * to,enum kvm_pgtable_prot prot)215 int pkvm_create_mappings(void *from, void *to, enum kvm_pgtable_prot prot)
216 {
217 int ret;
218
219 hyp_spin_lock(&pkvm_pgd_lock);
220 ret = pkvm_create_mappings_locked(from, to, prot);
221 hyp_spin_unlock(&pkvm_pgd_lock);
222
223 return ret;
224 }
225
pkvm_remove_mappings_locked(void * from,void * to)226 unsigned long pkvm_remove_mappings_locked(void *from, void *to)
227 {
228 unsigned long size = (unsigned long)to - (unsigned long)from;
229
230 return kvm_pgtable_hyp_unmap(&pkvm_pgtable, (u64)from, size);
231 }
232
pkvm_remove_mappings(void * from,void * to)233 void pkvm_remove_mappings(void *from, void *to)
234 {
235 unsigned long size = (unsigned long)to - (unsigned long)from;
236
237 hyp_spin_lock(&pkvm_pgd_lock);
238 WARN_ON(pkvm_remove_mappings_locked(from, to) != size);
239 hyp_spin_unlock(&pkvm_pgd_lock);
240 }
241
hyp_back_vmemmap(phys_addr_t back)242 int hyp_back_vmemmap(phys_addr_t back)
243 {
244 unsigned long i, start, size, end = 0;
245 int ret;
246
247 for (i = 0; i < hyp_memblock_nr; i++) {
248 start = hyp_memory[i].base;
249 start = ALIGN_DOWN((u64)hyp_phys_to_page(start), PAGE_SIZE);
250 /*
251 * The beginning of the hyp_vmemmap region for the current
252 * memblock may already be backed by the page backing the end
253 * the previous region, so avoid mapping it twice.
254 */
255 start = max(start, end);
256
257 end = hyp_memory[i].base + hyp_memory[i].size;
258 end = PAGE_ALIGN((u64)hyp_phys_to_page(end));
259 if (start >= end)
260 continue;
261
262 size = end - start;
263 ret = __pkvm_create_mappings(start, size, back, PAGE_HYP);
264 if (ret)
265 return ret;
266
267 memset(hyp_phys_to_virt(back), 0, size);
268 back += size;
269 }
270
271 return 0;
272 }
273
274 static void *__hyp_bp_vect_base;
pkvm_cpu_set_vector(enum arm64_hyp_spectre_vector slot)275 int pkvm_cpu_set_vector(enum arm64_hyp_spectre_vector slot)
276 {
277 void *vector;
278
279 switch (slot) {
280 case HYP_VECTOR_DIRECT: {
281 vector = __kvm_hyp_vector;
282 break;
283 }
284 case HYP_VECTOR_SPECTRE_DIRECT: {
285 vector = __bp_harden_hyp_vecs;
286 break;
287 }
288 case HYP_VECTOR_INDIRECT:
289 case HYP_VECTOR_SPECTRE_INDIRECT: {
290 vector = (void *)__hyp_bp_vect_base;
291 break;
292 }
293 default:
294 return -EINVAL;
295 }
296
297 vector = __kvm_vector_slot2addr(vector, slot);
298 *this_cpu_ptr(&kvm_hyp_vector) = (unsigned long)vector;
299
300 return 0;
301 }
302
hyp_map_vectors(void)303 int hyp_map_vectors(void)
304 {
305 phys_addr_t phys;
306 unsigned long bp_base;
307 int ret;
308
309 if (!kvm_system_needs_idmapped_vectors()) {
310 __hyp_bp_vect_base = __bp_harden_hyp_vecs;
311 return 0;
312 }
313
314 phys = __hyp_pa(__bp_harden_hyp_vecs);
315 ret = __pkvm_create_private_mapping(phys, __BP_HARDEN_HYP_VECS_SZ,
316 PAGE_HYP_EXEC, &bp_base);
317 if (ret)
318 return ret;
319
320 __hyp_bp_vect_base = (void *)bp_base;
321
322 return 0;
323 }
324
fixmap_map_slot(struct hyp_fixmap_slot * slot,phys_addr_t phys)325 static void *fixmap_map_slot(struct hyp_fixmap_slot *slot, phys_addr_t phys)
326 {
327 kvm_pte_t pte, *ptep = slot->ptep;
328
329 pte = *ptep;
330 pte &= ~kvm_phys_to_pte(KVM_PHYS_INVALID);
331 pte |= kvm_phys_to_pte(phys) | KVM_PTE_VALID;
332 WRITE_ONCE(*ptep, pte);
333 dsb(ishst);
334
335 return (void *)slot->addr + offset_in_page(phys);
336 }
337
hyp_fixmap_map(phys_addr_t phys)338 void *hyp_fixmap_map(phys_addr_t phys)
339 {
340 return fixmap_map_slot(this_cpu_ptr(&fixmap_slots), phys);
341 }
342
fixmap_clear_slot(struct hyp_fixmap_slot * slot)343 static void fixmap_clear_slot(struct hyp_fixmap_slot *slot)
344 {
345 kvm_pte_t *ptep = slot->ptep;
346 u64 addr = slot->addr;
347 u32 level;
348
349 if (FIELD_GET(KVM_PTE_TYPE, *ptep) == KVM_PTE_TYPE_PAGE)
350 level = KVM_PGTABLE_LAST_LEVEL;
351 else
352 level = KVM_PGTABLE_LAST_LEVEL - 1; /* create_fixblock() guarantees PMD level */
353
354 WRITE_ONCE(*ptep, *ptep & ~KVM_PTE_VALID);
355
356 /*
357 * Irritatingly, the architecture requires that we use inner-shareable
358 * broadcast TLB invalidation here in case another CPU speculates
359 * through our fixmap and decides to create an "amalagamation of the
360 * values held in the TLB" due to the apparent lack of a
361 * break-before-make sequence.
362 *
363 * https://lore.kernel.org/kvm/20221017115209.2099-1-will@kernel.org/T/#mf10dfbaf1eaef9274c581b81c53758918c1d0f03
364 */
365 dsb(ishst);
366 __tlbi_level(vale2is, __TLBI_VADDR(addr, 0), level);
367 dsb(ish);
368 isb();
369 }
370
hyp_fixmap_unmap(void)371 void hyp_fixmap_unmap(void)
372 {
373 fixmap_clear_slot(this_cpu_ptr(&fixmap_slots));
374 }
375
__create_fixmap_slot_cb(const struct kvm_pgtable_visit_ctx * ctx,enum kvm_pgtable_walk_flags visit)376 static int __create_fixmap_slot_cb(const struct kvm_pgtable_visit_ctx *ctx,
377 enum kvm_pgtable_walk_flags visit)
378 {
379 struct hyp_fixmap_slot *slot = (struct hyp_fixmap_slot *)ctx->arg;
380
381 if (!kvm_pte_valid(ctx->old) || (ctx->end - ctx->start) != kvm_granule_size(ctx->level))
382 return -EINVAL;
383
384 slot->addr = ctx->addr;
385 slot->ptep = ctx->ptep;
386
387 /*
388 * Clear the PTE, but keep the page-table page refcount elevated to
389 * prevent it from ever being freed. This lets us manipulate the PTEs
390 * by hand safely without ever needing to allocate memory.
391 */
392 fixmap_clear_slot(slot);
393
394 return 0;
395 }
396
create_fixmap_slot(u64 addr,u64 cpu)397 static int create_fixmap_slot(u64 addr, u64 cpu)
398 {
399 struct kvm_pgtable_walker walker = {
400 .cb = __create_fixmap_slot_cb,
401 .flags = KVM_PGTABLE_WALK_LEAF,
402 .arg = (void *)per_cpu_ptr(&fixmap_slots, cpu),
403 };
404
405 return kvm_pgtable_walk(&pkvm_pgtable, addr, PAGE_SIZE, &walker);
406 }
407
408 #ifndef CONFIG_ARM64_64K_PAGES
409 static struct hyp_fixmap_slot hyp_fixblock_slot;
410 static DEFINE_HYP_SPINLOCK(hyp_fixblock_lock);
411
hyp_fixblock_map(phys_addr_t phys)412 void *hyp_fixblock_map(phys_addr_t phys)
413 {
414 WARN_ON(!IS_ALIGNED(phys, PMD_SIZE));
415
416 hyp_spin_lock(&hyp_fixblock_lock);
417 return fixmap_map_slot(&hyp_fixblock_slot, phys);
418 }
419
hyp_fixblock_unmap(void)420 void hyp_fixblock_unmap(void)
421 {
422 fixmap_clear_slot(&hyp_fixblock_slot);
423 hyp_spin_unlock(&hyp_fixblock_lock);
424 }
425
create_fixblock(void)426 static int create_fixblock(void)
427 {
428 struct kvm_pgtable_walker walker = {
429 .cb = __create_fixmap_slot_cb,
430 .flags = KVM_PGTABLE_WALK_LEAF,
431 .arg = (void *)&hyp_fixblock_slot,
432 };
433 unsigned long addr;
434 phys_addr_t phys;
435 int ret, i;
436
437 /* Find a RAM phys address, PMD aligned */
438 for (i = 0; i < hyp_memblock_nr; i++) {
439 phys = ALIGN(hyp_memory[i].base, PMD_SIZE);
440 if (phys + PMD_SIZE < (hyp_memory[i].base + hyp_memory[i].size))
441 break;
442 }
443
444 /* Really? Your RAM isn't larger than a couple of times PMD_SIZE? */
445 if (i >= hyp_memblock_nr)
446 return -EINVAL;
447
448 hyp_spin_lock(&pkvm_pgd_lock);
449 addr = ALIGN(__io_map_base, PMD_SIZE);
450 ret = __pkvm_alloc_private_va_range(addr, PMD_SIZE);
451 if (ret)
452 goto unlock;
453
454 ret = kvm_pgtable_hyp_map(&pkvm_pgtable, addr, PMD_SIZE, phys, PAGE_HYP);
455 if (ret)
456 goto unlock;
457
458 ret = kvm_pgtable_walk(&pkvm_pgtable, addr, PMD_SIZE, &walker);
459 unlock:
460 hyp_spin_unlock(&pkvm_pgd_lock);
461
462 return ret;
463 }
464 #else
hyp_fixblock_unmap(void)465 void hyp_fixblock_unmap(void) { WARN_ON(1); }
hyp_fixblock_map(phys_addr_t phys)466 void *hyp_fixblock_map(phys_addr_t phys) { return NULL; }
create_fixblock(void)467 static int create_fixblock(void) { return 0; }
468 #endif
469
hyp_create_fixmap(void)470 int hyp_create_fixmap(void)
471 {
472 unsigned long addr, i;
473 int ret;
474
475 for (i = 0; i < hyp_nr_cpus; i++) {
476 ret = pkvm_alloc_private_va_range(PAGE_SIZE, &addr);
477 if (ret)
478 return ret;
479
480 ret = kvm_pgtable_hyp_map(&pkvm_pgtable, addr, PAGE_SIZE,
481 __hyp_pa(__hyp_bss_start), PAGE_HYP);
482 if (ret)
483 return ret;
484
485 ret = create_fixmap_slot(addr, i);
486 if (ret)
487 return ret;
488 }
489
490 return create_fixblock();
491 }
492
hyp_create_idmap(u32 hyp_va_bits)493 int hyp_create_idmap(u32 hyp_va_bits)
494 {
495 unsigned long start, end;
496
497 start = hyp_virt_to_phys((void *)__hyp_idmap_text_start);
498 start = ALIGN_DOWN(start, PAGE_SIZE);
499
500 end = hyp_virt_to_phys((void *)__hyp_idmap_text_end);
501 end = ALIGN(end, PAGE_SIZE);
502
503 /*
504 * One half of the VA space is reserved to linearly map portions of
505 * memory -- see va_layout.c for more details. The other half of the VA
506 * space contains the trampoline page, and needs some care. Split that
507 * second half in two and find the quarter of VA space not conflicting
508 * with the idmap to place the IOs and the vmemmap. IOs use the lower
509 * half of the quarter and the vmemmap the upper half.
510 */
511 __io_map_base = start & BIT(hyp_va_bits - 2);
512 __io_map_base ^= BIT(hyp_va_bits - 2);
513 __hyp_vmemmap = __io_map_base | BIT(hyp_va_bits - 3);
514
515 return __pkvm_create_mappings(start, end - start, start, PAGE_HYP_EXEC);
516 }
517
pkvm_create_stack(phys_addr_t phys,unsigned long * haddr)518 int pkvm_create_stack(phys_addr_t phys, unsigned long *haddr)
519 {
520 unsigned long addr, prev_base;
521 size_t size;
522 int ret;
523
524 hyp_spin_lock(&pkvm_pgd_lock);
525
526 prev_base = __io_map_base;
527 /*
528 * Efficient stack verification using the NVHE_STACK_SHIFT bit implies
529 * an alignment of our allocation on the order of the size.
530 */
531 size = NVHE_STACK_SIZE * 2;
532 addr = ALIGN(__io_map_base, size);
533
534 ret = __pkvm_alloc_private_va_range(addr, size);
535 if (!ret) {
536 /*
537 * Since the stack grows downwards, map the stack to the page
538 * at the higher address and leave the lower guard page
539 * unbacked.
540 *
541 * Any valid stack address now has the NVHE_STACK_SHIFT bit as 1
542 * and addresses corresponding to the guard page have the
543 * NVHE_STACK_SHIFT bit as 0 - this is used for overflow detection.
544 */
545 ret = kvm_pgtable_hyp_map(&pkvm_pgtable, addr + NVHE_STACK_SIZE,
546 NVHE_STACK_SIZE, phys, PAGE_HYP);
547 if (ret)
548 __io_map_base = prev_base;
549 }
550 hyp_spin_unlock(&pkvm_pgd_lock);
551
552 *haddr = addr + size;
553
554 return ret;
555 }
556
557 /* Note: The caller has to use a local copy of the arg */
admit_host_page(void * arg,unsigned long order)558 void *admit_host_page(void *arg, unsigned long order)
559 {
560 phys_addr_t p;
561 struct kvm_hyp_memcache *host_mc = arg;
562 unsigned long mc_order;
563
564 if (!host_mc->nr_pages)
565 return NULL;
566
567 mc_order = FIELD_GET(~PAGE_MASK, host_mc->head);
568 BUG_ON(order != mc_order);
569
570 p = host_mc->head & PAGE_MASK;
571 /*
572 * The host still owns the pages in its memcache, so we need to go
573 * through a full host-to-hyp donation cycle to change it. Fortunately,
574 * __pkvm_host_donate_hyp() takes care of races for us, so if it
575 * succeeds we're good to go.
576 */
577 if (__pkvm_host_donate_hyp(hyp_phys_to_pfn(p), 1 << order))
578 return NULL;
579
580 return pop_hyp_memcache(host_mc, hyp_phys_to_virt, &order);
581 }
582
583 /* Refill our local memcache by popping pages from the one provided by the host. */
refill_memcache(struct kvm_hyp_memcache * mc,unsigned long min_pages,struct kvm_hyp_memcache * host_mc)584 int refill_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages,
585 struct kvm_hyp_memcache *host_mc)
586 {
587 struct kvm_hyp_memcache tmp = *host_mc;
588 int ret;
589
590 ret = __topup_hyp_memcache(mc, min_pages, admit_host_page,
591 hyp_virt_to_phys, &tmp, 0);
592 *host_mc = tmp;
593
594 return ret;
595 }
596
__pkvm_private_range_pa(void * va)597 phys_addr_t __pkvm_private_range_pa(void *va)
598 {
599 kvm_pte_t pte;
600 s8 level;
601
602 hyp_spin_lock(&pkvm_pgd_lock);
603 WARN_ON(kvm_pgtable_get_leaf(&pkvm_pgtable, (u64)va, &pte, &level));
604 hyp_spin_unlock(&pkvm_pgd_lock);
605
606 BUG_ON(!kvm_pte_valid(pte));
607
608 return kvm_pte_to_phys(pte) + offset_in_page(va);
609 }
610
611 /* The host passed a mc, fill a pool with the pages in it. */
refill_hyp_pool(struct hyp_pool * pool,struct kvm_hyp_memcache * host_mc)612 int refill_hyp_pool(struct hyp_pool *pool, struct kvm_hyp_memcache *host_mc)
613 {
614 unsigned long order;
615 u64 nr_pages;
616 void *p;
617 struct kvm_hyp_memcache tmp = *host_mc;
618
619 while (tmp.nr_pages) {
620 order = FIELD_GET(~PAGE_MASK, tmp.head);
621 if (check_shl_overflow(1UL, order, &nr_pages))
622 return -EINVAL;
623
624 p = admit_host_page(&tmp, order);
625 if (!p)
626 return -EINVAL;
627 *host_mc = tmp;
628
629 hyp_virt_to_page(p)->order = order;
630 hyp_set_page_refcounted(hyp_virt_to_page(p));
631 hyp_put_page(pool, p);
632 }
633
634 return 0;
635 }
636
637 /*
638 * Remove target pages from the pool and put them in a memcache,
639 * so the host can reclaim them.
640 */
reclaim_hyp_pool(struct hyp_pool * pool,struct kvm_hyp_memcache * host_mc,int nr_pages)641 int reclaim_hyp_pool(struct hyp_pool *pool, struct kvm_hyp_memcache *host_mc,
642 int nr_pages)
643 {
644 struct hyp_page *page;
645 u8 order;
646 void *p;
647
648 while (nr_pages > 0) {
649 p = hyp_alloc_pages(pool, 0);
650 if (!p)
651 return -ENOMEM;
652 page = hyp_virt_to_page(p);
653 order = page->order;
654 nr_pages -= (1 << order);
655
656 /*
657 * For a compound page all the tail pages should normally
658 * have page->order == HYP_NO_ORDER which would need to be
659 * cleared one by one. But in this instance, the order 0
660 * allocation above can only return an _external_ compound
661 * page which is in fact ignored by the buddy logic, and the
662 * tail pages are never touched.
663 */
664 page->order = 0;
665 hyp_page_ref_dec(page);
666
667 push_hyp_memcache(host_mc, p, hyp_virt_to_phys, order);
668 WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(p), 1 << order));
669 }
670
671 return 0;
672 }
673
674 /* Remap hyp memory with different cacheability */
pkvm_remap_range(void * va,int nr_pages,bool nc)675 int pkvm_remap_range(void *va, int nr_pages, bool nc)
676 {
677 size_t size = nr_pages << PAGE_SHIFT;
678 phys_addr_t phys = hyp_virt_to_phys(va);
679 enum kvm_pgtable_prot prot = PAGE_HYP;
680 int ret;
681
682 if (nc)
683 prot |= KVM_PGTABLE_PROT_NORMAL_NC;
684 hyp_spin_lock(&pkvm_pgd_lock);
685 WARN_ON(kvm_pgtable_hyp_unmap(&pkvm_pgtable, (u64)va, size) != size);
686 ret = kvm_pgtable_hyp_map(&pkvm_pgtable, (u64)va, size, phys, prot);
687 hyp_spin_unlock(&pkvm_pgd_lock);
688 return ret;
689 }
690