• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2023 Google LLC
4  * Author: Vincent Donnefort <vdonnefort@google.com>
5  */
6 
7 #include <nvhe/alloc.h>
8 #include <nvhe/alloc_mgt.h>
9 #include <nvhe/mem_protect.h>
10 #include <nvhe/mm.h>
11 #include <nvhe/spinlock.h>
12 
13 #include <linux/build_bug.h>
14 #include <linux/hash.h>
15 #include <linux/kvm_host.h>
16 #include <linux/list.h>
17 
18 #define MIN_ALLOC 8UL
19 
20 static DEFINE_PER_CPU(int, hyp_allocator_errno);
21 static DEFINE_PER_CPU(struct kvm_hyp_memcache, hyp_allocator_mc);
22 static DEFINE_PER_CPU(u8, hyp_allocator_missing_donations);
23 
24 static struct hyp_allocator {
25 	struct list_head	chunks;
26 	unsigned long		start;
27 	u32			size;
28 	hyp_spinlock_t		lock;
29 } hyp_allocator;
30 
31 struct chunk_hdr {
32 	u32			alloc_size;
33 	u32			mapped_size;
34 	struct list_head	node;
35 	u32			hash;
36 	char			data __aligned(8);
37 };
38 
chunk_hash_compute(struct chunk_hdr * chunk)39 static u32 chunk_hash_compute(struct chunk_hdr *chunk)
40 {
41 	size_t len = offsetof(struct chunk_hdr, hash);
42 	u64 *data = (u64 *)chunk;
43 	u32 hash = 0;
44 
45 	BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct chunk_hdr, hash), sizeof(u32)));
46 
47 	while (len >= sizeof(u64)) {
48 		hash ^= hash_64(*data, 32);
49 		len -= sizeof(u64);
50 		data++;
51 	}
52 
53 	if (len)
54 		hash ^= hash_32(*(u32 *)data, 32);
55 
56 	return hash;
57 }
58 
chunk_hash_update(struct chunk_hdr * chunk)59 static inline void chunk_hash_update(struct chunk_hdr *chunk)
60 {
61 	if (chunk)
62 		chunk->hash = chunk_hash_compute(chunk);
63 }
64 
chunk_hash_validate(struct chunk_hdr * chunk)65 static inline void chunk_hash_validate(struct chunk_hdr *chunk)
66 {
67 	if (chunk)
68 		WARN_ON(chunk->hash != chunk_hash_compute(chunk));
69 }
70 
71 #define chunk_is_used(chunk) \
72 	(!!(chunk)->alloc_size)
73 
74 #define chunk_hdr_size() \
75 	offsetof(struct chunk_hdr, data)
76 
77 #define chunk_size(size) \
78 	(chunk_hdr_size() + max((size_t)(size), MIN_ALLOC))
79 
80 #define chunk_data(chunk) \
81 	((void *)(&(chunk)->data))
82 
83 #define __chunk_next(chunk, allocator)				\
84 ({								\
85 	list_is_last(&(chunk)->node, &(allocator)->chunks) ?	\
86 		NULL : list_next_entry(chunk, node);		\
87 })
88 
89 #define __chunk_prev(chunk, allocator)				\
90 ({								\
91 	list_is_first(&(chunk)->node, &(allocator)->chunks) ?	\
92 		NULL : list_prev_entry(chunk, node);		\
93 })
94 
95 #define chunk_get_next(chunk, allocator)			\
96 ({								\
97 	struct chunk_hdr *next = __chunk_next(chunk, allocator);\
98 	chunk_hash_validate(next);				\
99 	next;							\
100 })
101 
102 #define chunk_get_prev(chunk, allocator)			\
103 ({								\
104 	struct chunk_hdr *prev = __chunk_prev(chunk, allocator);\
105 	chunk_hash_validate(prev);				\
106 	prev;							\
107 })
108 
109 #define chunk_get(addr)						\
110 ({								\
111 	struct chunk_hdr *chunk = (struct chunk_hdr *)addr;	\
112 	chunk_hash_validate(chunk);				\
113 	chunk;							\
114 })
115 
116 #define chunk_unmapped_region(chunk) \
117 	((unsigned long)(chunk) + chunk->mapped_size)
118 
119 #define chunk_unmapped_size(chunk, allocator)				\
120 ({									\
121 	struct chunk_hdr *next = chunk_get_next(chunk, allocator);	\
122 	unsigned long allocator_end = (allocator)->start +		\
123 				      (allocator)->size;		\
124 	next ? (unsigned long)next - chunk_unmapped_region(chunk) :	\
125 		allocator_end - chunk_unmapped_region(chunk);		\
126 })
127 
chunk_list_insert(struct chunk_hdr * chunk,struct chunk_hdr * prev,struct hyp_allocator * allocator)128 static inline void chunk_list_insert(struct chunk_hdr *chunk,
129 				     struct chunk_hdr *prev,
130 				     struct hyp_allocator *allocator)
131 {
132 	list_add(&chunk->node, &prev->node);
133 	chunk_hash_update(prev);
134 	chunk_hash_update(__chunk_next(chunk, allocator));
135 	chunk_hash_update(chunk);
136 }
137 
chunk_list_del(struct chunk_hdr * chunk,struct hyp_allocator * allocator)138 static inline void chunk_list_del(struct chunk_hdr *chunk,
139 				  struct hyp_allocator *allocator)
140 {
141 	struct chunk_hdr *prev = __chunk_prev(chunk, allocator);
142 	struct chunk_hdr *next = __chunk_next(chunk, allocator);
143 
144 	list_del(&chunk->node);
145 	chunk_hash_update(prev);
146 	chunk_hash_update(next);
147 }
148 
hyp_allocator_unmap(struct hyp_allocator * allocator,unsigned long va,size_t size)149 static void hyp_allocator_unmap(struct hyp_allocator *allocator,
150 				unsigned long va, size_t size)
151 {
152 	struct kvm_hyp_memcache *mc = this_cpu_ptr(&hyp_allocator_mc);
153 	int nr_pages = size >> PAGE_SHIFT;
154 	unsigned long __va = va;
155 
156 	WARN_ON(!PAGE_ALIGNED(va));
157 	WARN_ON(!PAGE_ALIGNED(size));
158 
159 	while (nr_pages--) {
160 		phys_addr_t pa = __pkvm_private_range_pa((void *)__va);
161 		void *page = hyp_phys_to_virt(pa);
162 
163 		push_hyp_memcache(mc, page, hyp_virt_to_phys, 0);
164 		__va += PAGE_SIZE;
165 	}
166 
167 	pkvm_remove_mappings((void *)va, (void *)(va + size));
168 }
169 
hyp_allocator_map(struct hyp_allocator * allocator,unsigned long va,size_t size)170 static int hyp_allocator_map(struct hyp_allocator *allocator,
171 			     unsigned long va, size_t size)
172 {
173 	struct kvm_hyp_memcache *mc = this_cpu_ptr(&hyp_allocator_mc);
174 	unsigned long va_end = va + size;
175 	int ret, nr_pages = 0;
176 
177 	if (!PAGE_ALIGNED(va) || !PAGE_ALIGNED(size))
178 		return -EINVAL;
179 
180 	if (va_end < va || va_end > (allocator->start + allocator->size))
181 		return -E2BIG;
182 
183 	if (mc->nr_pages < (size >> PAGE_SHIFT)) {
184 		u8 *missing_donations = this_cpu_ptr(&hyp_allocator_missing_donations);
185 		u32 delta = (size >> PAGE_SHIFT) - mc->nr_pages;
186 
187 		*missing_donations = min(delta, U8_MAX);
188 
189 		return -ENOMEM;
190 	}
191 
192 	while (nr_pages < (size >> PAGE_SHIFT)) {
193 		void *page;
194 		unsigned long order;
195 
196 		page = pop_hyp_memcache(mc, hyp_phys_to_virt, &order);
197 		/* We only expect 1 page at a time for now. */
198 		WARN_ON(!page || order);
199 
200 		ret = __hyp_allocator_map(va, hyp_virt_to_phys(page));
201 		if (ret) {
202 			push_hyp_memcache(mc, page, hyp_virt_to_phys, 0);
203 			break;
204 		}
205 		va += PAGE_SIZE;
206 		nr_pages++;
207 	}
208 
209 	if (ret && nr_pages) {
210 		va -= PAGE_SIZE * nr_pages;
211 		hyp_allocator_unmap(allocator, va, nr_pages << PAGE_SHIFT);
212 	}
213 
214 	return ret;
215 }
216 
chunk_install(struct chunk_hdr * chunk,size_t size,struct chunk_hdr * prev,struct hyp_allocator * allocator)217 static int chunk_install(struct chunk_hdr *chunk, size_t size,
218 			 struct chunk_hdr *prev,
219 			 struct hyp_allocator *allocator)
220 {
221 	size_t prev_mapped_size;
222 
223 	/* First chunk, first allocation */
224 	if (!prev) {
225 		INIT_LIST_HEAD(&chunk->node);
226 		list_add(&chunk->node, &allocator->chunks);
227 		chunk->mapped_size = PAGE_ALIGN(chunk_size(size));
228 		chunk->alloc_size = size;
229 
230 		chunk_hash_update(chunk);
231 
232 		return 0;
233 	}
234 
235 	if (chunk_unmapped_region(prev) < (unsigned long)chunk)
236 		return -EINVAL;
237 	if ((unsigned long)chunk_data(prev) + prev->alloc_size > (unsigned long)chunk)
238 		return -EINVAL;
239 
240 	prev_mapped_size = prev->mapped_size;
241 	prev->mapped_size = (unsigned long)chunk - (unsigned long)prev;
242 
243 	chunk->mapped_size = prev_mapped_size - prev->mapped_size;
244 	chunk->alloc_size = size;
245 
246 	chunk_list_insert(chunk, prev, allocator);
247 
248 	return 0;
249 }
250 
chunk_merge(struct chunk_hdr * chunk,struct hyp_allocator * allocator)251 static int chunk_merge(struct chunk_hdr *chunk, struct hyp_allocator *allocator)
252 {
253 	/* The caller already validates prev */
254 	struct chunk_hdr *prev = __chunk_prev(chunk, allocator);
255 
256 	if (WARN_ON(!prev))
257 		return -EINVAL;
258 
259 	/* Can only merge free chunks */
260 	if (chunk_is_used(chunk) || chunk_is_used(prev))
261 		return -EBUSY;
262 
263 	/* Can't merge non-contiguous mapped regions */
264 	if (chunk_unmapped_region(prev) != (unsigned long)chunk)
265 		return 0;
266 
267 	/* mapped region inheritance */
268 	prev->mapped_size += chunk->mapped_size;
269 
270 	chunk_list_del(chunk, allocator);
271 
272 	return 0;
273 }
274 
chunk_needs_mapping(struct chunk_hdr * chunk,size_t size)275 static size_t chunk_needs_mapping(struct chunk_hdr *chunk, size_t size)
276 {
277 	size_t mapping_missing, mapping_needs = chunk_size(size);
278 
279 	if (mapping_needs <= chunk->mapped_size)
280 		return 0;
281 
282 	mapping_missing = PAGE_ALIGN(mapping_needs - chunk->mapped_size);
283 
284 	return mapping_missing;
285 }
286 
287 /*
288  * When a chunk spans over several pages, split it at the start of the latest
289  * page. This allows to punch holes in the mapping to reclaim pages.
290  *
291  *  +--------------+
292  *  |______________|
293  *  |______________|<- Next chunk
294  *  |_ _ _ __ _ _ _|
295  *  |              |<- New chunk installed, page aligned
296  *  +--------------+
297  *  +--------------+
298  *  |              |
299  *  |              |<- Allow to reclaim this page
300  *  |              |
301  *  |              |
302  *  +--------------+
303  *  +--------------+
304  *  |              |
305  *  |______________|
306  *  |______________|<- Chunk to split at page alignment
307  *  |              |
308  *  +--------------+
309  */
chunk_split_aligned(struct chunk_hdr * chunk,struct hyp_allocator * allocator)310 static int chunk_split_aligned(struct chunk_hdr *chunk,
311 			       struct hyp_allocator *allocator)
312 {
313 	struct chunk_hdr *next_chunk = chunk_get_next(chunk, allocator);
314 	unsigned long delta, mapped_end = chunk_unmapped_region(chunk);
315 	struct chunk_hdr *new_chunk;
316 
317 	if (PAGE_ALIGNED(mapped_end))
318 		return 0;
319 
320 	new_chunk = (struct chunk_hdr *)PAGE_ALIGN_DOWN(mapped_end);
321 	if ((unsigned long)new_chunk <= (unsigned long)chunk)
322 		return -EINVAL;
323 
324 	delta = ((unsigned long)next_chunk - (unsigned long)new_chunk);
325 
326 	/*
327 	 * This shouldn't happen, chunks are installed to a minimum distance
328 	 * from the page start
329 	 */
330 	WARN_ON(delta < chunk_size(0UL));
331 
332 	WARN_ON(chunk_install(new_chunk, 0, chunk, allocator));
333 
334 	return 0;
335 }
336 
chunk_inc_map(struct chunk_hdr * chunk,size_t map_size,struct hyp_allocator * allocator)337 static int chunk_inc_map(struct chunk_hdr *chunk, size_t map_size,
338 			 struct hyp_allocator *allocator)
339 {
340 	int ret;
341 
342 	if (chunk_unmapped_size(chunk, allocator) < map_size)
343 		return -EINVAL;
344 
345 	ret = hyp_allocator_map(allocator, chunk_unmapped_region(chunk),
346 				map_size);
347 	if (ret)
348 		return ret;
349 
350 	chunk->mapped_size += map_size;
351 	chunk_hash_update(chunk);
352 
353 	return 0;
354 }
355 
chunk_dec_map(struct chunk_hdr * chunk,struct hyp_allocator * allocator,size_t reclaim_target)356 static size_t chunk_dec_map(struct chunk_hdr *chunk,
357 			    struct hyp_allocator *allocator,
358 			    size_t reclaim_target)
359 {
360 	unsigned long start, end;
361 	size_t reclaimable;
362 
363 	start = PAGE_ALIGN((unsigned long)chunk +
364 			   chunk_size(chunk->alloc_size));
365 	end = chunk_unmapped_region(chunk);
366 
367 	if (start >= end)
368 		return 0;
369 
370 	reclaimable = end - start;
371 	if (reclaimable < PAGE_SIZE)
372 		return 0;
373 
374 	if (chunk_split_aligned(chunk, allocator))
375 		return 0;
376 
377 	end = chunk_unmapped_region(chunk);
378 	reclaimable = min(end - start, reclaim_target);
379 	start = end - reclaimable;
380 
381 	hyp_allocator_unmap(allocator, start, reclaimable);
382 
383 	chunk->mapped_size -= reclaimable;
384 	chunk_hash_update(chunk);
385 
386 	return reclaimable;
387 }
388 
chunk_addr_fixup(unsigned long addr)389 static unsigned long chunk_addr_fixup(unsigned long addr)
390 {
391 	unsigned long min_chunk_size = chunk_size(0UL);
392 	unsigned long page = PAGE_ALIGN_DOWN(addr);
393 	unsigned long delta = addr - page;
394 
395 	if (!delta)
396 		return addr;
397 
398 	/*
399 	 * To maximize reclaim, a chunk must fit between the page start and this
400 	 * addr.
401 	 */
402 	if (delta < min_chunk_size)
403 		return page + min_chunk_size;
404 
405 	return addr;
406 }
407 
chunk_can_split(struct chunk_hdr * chunk,unsigned long addr,struct hyp_allocator * allocator)408 static bool chunk_can_split(struct chunk_hdr *chunk, unsigned long addr,
409 			    struct hyp_allocator *allocator)
410 {
411 	unsigned long chunk_end;
412 
413 	/*
414 	 * There is no point splitting the last chunk, subsequent allocations
415 	 * would be able to use this space anyway.
416 	 */
417 	if (list_is_last(&chunk->node, &allocator->chunks))
418 		return false;
419 
420 	chunk_end = (unsigned long)chunk + chunk->mapped_size +
421 		    chunk_unmapped_size(chunk, allocator);
422 
423 	return addr + chunk_size(0UL) < chunk_end;
424 }
425 
chunk_recycle(struct chunk_hdr * chunk,size_t size,struct hyp_allocator * allocator)426 static int chunk_recycle(struct chunk_hdr *chunk, size_t size,
427 			 struct hyp_allocator *allocator)
428 {
429 	unsigned long new_chunk_addr = (unsigned long)chunk + chunk_size(size);
430 	size_t missing_map, expected_mapping = size;
431 	struct chunk_hdr *new_chunk = NULL;
432 	int ret;
433 
434 	new_chunk_addr = chunk_addr_fixup(new_chunk_addr);
435 	if (chunk_can_split(chunk, new_chunk_addr, allocator)) {
436 		new_chunk = (struct chunk_hdr *)new_chunk_addr;
437 		expected_mapping = new_chunk_addr + chunk_hdr_size() -
438 					(unsigned long)chunk_data(chunk);
439 	}
440 
441 	missing_map = chunk_needs_mapping(chunk, expected_mapping);
442 	if (missing_map) {
443 		ret = chunk_inc_map(chunk, missing_map, allocator);
444 		if (ret)
445 			return ret;
446 	}
447 
448 	chunk->alloc_size = size;
449 	chunk_hash_update(chunk);
450 
451 	if (new_chunk)
452 		WARN_ON(chunk_install(new_chunk, 0, chunk, allocator));
453 
454 	return 0;
455 }
456 
chunk_try_destroy(struct chunk_hdr * chunk,struct hyp_allocator * allocator,size_t reclaim_target)457 static size_t chunk_try_destroy(struct chunk_hdr *chunk,
458 				struct hyp_allocator *allocator,
459 				size_t reclaim_target)
460 {
461 	size_t unmapped;
462 
463 	if (chunk_is_used(chunk))
464 		return 0;
465 
466 	/* Don't kill the entire chunk if this is not necessary */
467 	if (chunk->mapped_size > reclaim_target)
468 		return 0;
469 
470 	if (list_is_first(&chunk->node, &allocator->chunks)) {
471 		/* last standing chunk ? */
472 		if (!list_is_last(&chunk->node, &allocator->chunks))
473 			return 0;
474 
475 		list_del(&chunk->node);
476 		goto unmap;
477 	}
478 
479 	/*
480 	 * Resolve discontiguous unmapped zones that are the result
481 	 * of a previous chunk_dec_map().
482 	 *
483 	 * To make sure we still keep track of that unmapped zone in our free
484 	 * list, we need either to be the last chunk or to have prev unused. Two
485 	 * contiguous chunks can be both free if they are separated by an
486 	 * unmapped zone (see chunk_recycle()).
487 	 */
488 
489 	if (!PAGE_ALIGNED((unsigned long)chunk))
490 		return 0;
491 
492 	if (list_is_last(&chunk->node, &allocator->chunks))
493 		goto destroy;
494 
495 	if (chunk_is_used(chunk_get_prev(chunk, allocator)))
496 		return 0;
497 
498 	if (chunk_split_aligned(chunk, allocator))
499 		return 0;
500 destroy:
501 	chunk_list_del(chunk, allocator);
502 unmap:
503 	unmapped = chunk->mapped_size;
504 	hyp_allocator_unmap(allocator, (unsigned long)chunk,
505 			    chunk->mapped_size);
506 
507 	return unmapped;
508 }
509 
setup_first_chunk(struct hyp_allocator * allocator,size_t size)510 static int setup_first_chunk(struct hyp_allocator *allocator, size_t size)
511 {
512 	int ret;
513 
514 	ret = hyp_allocator_map(allocator, allocator->start,
515 				PAGE_ALIGN(chunk_size(size)));
516 	if (ret)
517 		return ret;
518 
519 	return chunk_install((struct chunk_hdr *)allocator->start, size, NULL, allocator);
520 }
521 
522 static struct chunk_hdr *
get_free_chunk(struct hyp_allocator * allocator,size_t size)523 get_free_chunk(struct hyp_allocator *allocator, size_t size)
524 {
525 	struct chunk_hdr *chunk, *best_chunk = NULL;
526 	size_t best_available_size = SIZE_MAX;
527 
528 	list_for_each_entry(chunk, &allocator->chunks, node) {
529 		size_t available_size = chunk->mapped_size +
530 					chunk_unmapped_size(chunk, allocator);
531 		if (chunk_is_used(chunk))
532 			continue;
533 
534 		if (chunk_size(size) > available_size)
535 			continue;
536 
537 		if (best_available_size <= available_size)
538 			continue;
539 
540 		best_chunk = chunk;
541 		best_available_size = available_size;
542 	}
543 
544 	return chunk_get(best_chunk);
545 }
546 
hyp_alloc(size_t size)547 void *hyp_alloc(size_t size)
548 {
549 	struct hyp_allocator *allocator = &hyp_allocator;
550 	struct chunk_hdr *chunk, *last_chunk;
551 	unsigned long chunk_addr;
552 	size_t missing_map;
553 	int ret = 0;
554 
555 	/* constrained by chunk_hdr *_size types */
556 	if (size > U32_MAX) {
557 		ret = -E2BIG;
558 		goto end_unlocked;
559 	}
560 
561 	size = ALIGN(size ?: MIN_ALLOC, MIN_ALLOC);
562 
563 	hyp_spin_lock(&allocator->lock);
564 
565 	if (list_empty(&hyp_allocator.chunks)) {
566 		ret = setup_first_chunk(allocator, size);
567 		if (ret)
568 			goto end;
569 
570 		chunk = (struct chunk_hdr *)allocator->start;
571 		goto end;
572 	}
573 
574 	chunk = get_free_chunk(allocator, size);
575 	if (chunk) {
576 		ret = chunk_recycle(chunk, size, allocator);
577 		goto end;
578 	}
579 
580 	last_chunk = chunk_get(list_last_entry(&allocator->chunks, struct chunk_hdr, node));
581 
582 	chunk_addr = (unsigned long)last_chunk + chunk_size(last_chunk->alloc_size);
583 	chunk_addr = chunk_addr_fixup(chunk_addr);
584 	chunk = (struct chunk_hdr *)chunk_addr;
585 
586 	missing_map = chunk_needs_mapping(last_chunk,
587 					  chunk_addr + chunk_size(size) -
588 						(unsigned long)chunk_data(last_chunk));
589 	if (missing_map) {
590 		ret = chunk_inc_map(last_chunk, missing_map, allocator);
591 		if (ret)
592 			goto end;
593 	}
594 
595 	WARN_ON(chunk_install(chunk, size, last_chunk, allocator));
596 
597 end:
598 	hyp_spin_unlock(&allocator->lock);
599 
600 end_unlocked:
601 	*(this_cpu_ptr(&hyp_allocator_errno)) = ret;
602 
603 	/* Enforce zeroing allocated memory */
604 	if (!ret)
605 		memset(chunk_data(chunk), 0, size);
606 
607 	return ret ? NULL : chunk_data(chunk);
608 }
609 
hyp_alloc_size(void * addr)610 static size_t hyp_alloc_size(void *addr)
611 {
612 	struct hyp_allocator *allocator = &hyp_allocator;
613 	char *chunk_data = (char *)addr;
614 	struct chunk_hdr *chunk;
615 	size_t size;
616 
617 	hyp_spin_lock(&allocator->lock);
618 	chunk = chunk_get(container_of(chunk_data, struct chunk_hdr, data));
619 	size = chunk->alloc_size;
620 	hyp_spin_unlock(&allocator->lock);
621 
622 	return size;
623 }
624 
hyp_alloc_account(size_t size,struct kvm * host_kvm)625 void *hyp_alloc_account(size_t size, struct kvm *host_kvm)
626 {
627 	void *addr = hyp_alloc(size);
628 
629 	if (addr)
630 		atomic64_add(hyp_alloc_size(addr),
631 			     &host_kvm->stat.protected_hyp_mem);
632 	return addr;
633 }
634 
hyp_free(void * addr)635 void hyp_free(void *addr)
636 {
637 	struct chunk_hdr *chunk, *prev_chunk, *next_chunk;
638 	struct hyp_allocator *allocator = &hyp_allocator;
639 	char *chunk_data = (char *)addr;
640 
641 	hyp_spin_lock(&allocator->lock);
642 
643 	chunk = chunk_get(container_of(chunk_data, struct chunk_hdr, data));
644 	prev_chunk = chunk_get_prev(chunk, allocator);
645 	next_chunk = chunk_get_next(chunk, allocator);
646 
647 	chunk->alloc_size = 0;
648 	chunk_hash_update(chunk);
649 
650 	if (next_chunk && !chunk_is_used(next_chunk))
651 		WARN_ON(chunk_merge(next_chunk, allocator));
652 
653 	if (prev_chunk && !chunk_is_used(prev_chunk))
654 		WARN_ON(chunk_merge(chunk, allocator));
655 
656 	hyp_spin_unlock(&allocator->lock);
657 }
658 
hyp_free_account(void * addr,struct kvm * host_kvm)659 void hyp_free_account(void *addr, struct kvm *host_kvm)
660 {
661 	size_t size = hyp_alloc_size(addr);
662 
663 	hyp_free(addr);
664 
665 	atomic64_sub(size, &host_kvm->stat.protected_hyp_mem);
666 }
667 
668 /*
669  * While chunk_try_destroy() is actually destroying what can be, this function
670  * only help with estimating how much pages can be reclaimed. However the same
671  * comments apply here.
672  */
chunk_destroyable(struct chunk_hdr * chunk,struct hyp_allocator * allocator)673 static bool chunk_destroyable(struct chunk_hdr *chunk,
674 			      struct hyp_allocator *allocator)
675 {
676 	if (chunk_is_used(chunk))
677 		return false;
678 
679 	if (!PAGE_ALIGNED(chunk))
680 		return false;
681 
682 	if (list_is_first(&chunk->node, &allocator->chunks)) {
683 		if (list_is_last(&chunk->node, &allocator->chunks))
684 			return true;
685 
686 		return false;
687 	}
688 
689 	return !chunk_is_used(chunk_get_prev(chunk, allocator));
690 }
691 
chunk_reclaimable(struct chunk_hdr * chunk,struct hyp_allocator * allocator)692 static size_t chunk_reclaimable(struct chunk_hdr *chunk,
693 				struct hyp_allocator *allocator)
694 {
695 	unsigned long start, end = chunk_unmapped_region(chunk);
696 
697 	/*
698 	 * This should not happen, chunks are installed at a minimum distance
699 	 * from the page start
700 	 */
701 	WARN_ON(!PAGE_ALIGNED(end) &&
702 		(end - PAGE_ALIGN_DOWN(end) < chunk_size(0UL)));
703 
704 	if (chunk_destroyable(chunk, allocator))
705 		start = (unsigned long)chunk;
706 	else
707 		start = PAGE_ALIGN((unsigned long)chunk + chunk_size(chunk->alloc_size));
708 
709 	end = PAGE_ALIGN_DOWN(end);
710 	if (start > end)
711 		return 0;
712 
713 	return end - start;
714 }
715 
hyp_alloc_reclaimable(void)716 int hyp_alloc_reclaimable(void)
717 {
718 	struct hyp_allocator *allocator = &hyp_allocator;
719 	struct chunk_hdr *chunk;
720 	int reclaimable = 0;
721 	int cpu;
722 
723 	hyp_spin_lock(&allocator->lock);
724 
725 	/*
726 	 * This is slightly pessimistic: a real reclaim might be able to "fix"
727 	 * discontiguous unmapped region by deleting chunks from the top to the
728 	 * bottom.
729 	 */
730 	list_for_each_entry(chunk, &allocator->chunks, node)
731 		reclaimable += chunk_reclaimable(chunk, allocator) >> PAGE_SHIFT;
732 
733 	for (cpu = 0; cpu < hyp_nr_cpus; cpu++) {
734 		struct kvm_hyp_memcache *mc = per_cpu_ptr(&hyp_allocator_mc, cpu);
735 
736 		reclaimable += mc->nr_pages;
737 	}
738 
739 	hyp_spin_unlock(&allocator->lock);
740 
741 	return reclaimable;
742 }
743 
hyp_alloc_reclaim(struct kvm_hyp_memcache * mc,int target)744 void hyp_alloc_reclaim(struct kvm_hyp_memcache *mc, int target)
745 {
746 	struct hyp_allocator *allocator = &hyp_allocator;
747 	struct kvm_hyp_memcache *alloc_mc;
748 	struct chunk_hdr *chunk, *tmp;
749 	int cpu;
750 
751 	if (target <= 0)
752 		return;
753 
754 	hyp_spin_lock(&allocator->lock);
755 
756 	/* Start emptying potential unused memcache */
757 	for (cpu = 0; cpu < hyp_nr_cpus; cpu++) {
758 		alloc_mc = per_cpu_ptr(&hyp_allocator_mc, cpu);
759 
760 		while (alloc_mc->nr_pages) {
761 			unsigned long order;
762 			void *page = pop_hyp_memcache(alloc_mc, hyp_phys_to_virt, &order);
763 
764 			WARN_ON(order);
765 			push_hyp_memcache(mc, page, hyp_virt_to_phys, 0);
766 			WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(page), 1));
767 
768 			target--;
769 			if (target <= 0)
770 				goto done;
771 		}
772 	}
773 
774 	list_for_each_entry_safe_reverse(chunk, tmp, &allocator->chunks, node) {
775 		size_t r;
776 
777 		chunk_hash_validate(chunk);
778 		r = chunk_try_destroy(chunk, allocator, target << PAGE_SHIFT);
779 		if (!r)
780 			r = chunk_dec_map(chunk, allocator, target << PAGE_SHIFT);
781 
782 		target -= r >> PAGE_SHIFT;
783 		if (target <= 0)
784 			break;
785 	}
786 
787 	alloc_mc = this_cpu_ptr(&hyp_allocator_mc);
788 	while (alloc_mc->nr_pages) {
789 		unsigned long order;
790 		void *page = pop_hyp_memcache(alloc_mc, hyp_phys_to_virt, &order);
791 
792 		WARN_ON(order);
793 		memset(page, 0, PAGE_SIZE);
794 		kvm_flush_dcache_to_poc(page, PAGE_SIZE);
795 		push_hyp_memcache(mc, page, hyp_virt_to_phys, 0);
796 		WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(page), 1));
797 	}
798 done:
799 	hyp_spin_unlock(&allocator->lock);
800 }
801 
hyp_alloc_refill(struct kvm_hyp_memcache * host_mc)802 int hyp_alloc_refill(struct kvm_hyp_memcache *host_mc)
803 {
804 	struct kvm_hyp_memcache *alloc_mc = this_cpu_ptr(&hyp_allocator_mc);
805 
806 	return refill_memcache(alloc_mc, host_mc->nr_pages + alloc_mc->nr_pages,
807 			       host_mc);
808 }
809 
hyp_alloc_init(size_t size)810 int hyp_alloc_init(size_t size)
811 {
812 	struct hyp_allocator *allocator = &hyp_allocator;
813 	int ret;
814 
815 	size = PAGE_ALIGN(size);
816 
817 	/* constrained by chunk_hdr *_size types */
818 	if (size > U32_MAX)
819 		return -EINVAL;
820 
821 	ret = pkvm_alloc_private_va_range(size, &allocator->start);
822 	if (ret)
823 		return ret;
824 
825 	allocator->size = size;
826 	INIT_LIST_HEAD(&allocator->chunks);
827 	hyp_spin_lock_init(&allocator->lock);
828 
829 	return 0;
830 }
831 
hyp_alloc_errno(void)832 int hyp_alloc_errno(void)
833 {
834 	int *errno = this_cpu_ptr(&hyp_allocator_errno);
835 
836 	return *errno;
837 }
838 
hyp_alloc_missing_donations(void)839 u8 hyp_alloc_missing_donations(void)
840 {
841 	u8 *missing = (this_cpu_ptr(&hyp_allocator_missing_donations));
842 	u8 __missing = *missing;
843 
844 	*missing = 0;
845 
846 	return __missing;
847 }
848 
849 struct hyp_mgt_allocator_ops hyp_alloc_ops = {
850 	.refill = hyp_alloc_refill,
851 	.reclaim = hyp_alloc_reclaim,
852 	.reclaimable = hyp_alloc_reclaimable,
853 };
854