• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020 Google LLC
4  * Author: Quentin Perret <qperret@google.com>
5  */
6 
7 #include <asm/kvm_hyp.h>
8 #include <nvhe/gfp.h>
9 
10 u64 __hyp_vmemmap;
11 
12 /*
13  * Index the hyp_vmemmap to find a potential buddy page, but make no assumption
14  * about its current state.
15  *
16  * Example buddy-tree for a 4-pages physically contiguous pool:
17  *
18  *                 o : Page 3
19  *                /
20  *               o-o : Page 2
21  *              /
22  *             /   o : Page 1
23  *            /   /
24  *           o---o-o : Page 0
25  *    Order  2   1 0
26  *
27  * Example of requests on this pool:
28  *   __find_buddy_nocheck(pool, page 0, order 0) => page 1
29  *   __find_buddy_nocheck(pool, page 0, order 1) => page 2
30  *   __find_buddy_nocheck(pool, page 1, order 0) => page 0
31  *   __find_buddy_nocheck(pool, page 2, order 0) => page 3
32  */
__find_buddy_nocheck(struct hyp_pool * pool,struct hyp_page * p,u8 order)33 static struct hyp_page *__find_buddy_nocheck(struct hyp_pool *pool,
34 					     struct hyp_page *p,
35 					     u8 order)
36 {
37 	phys_addr_t addr = hyp_page_to_phys(p);
38 
39 	addr ^= (PAGE_SIZE << order);
40 
41 	/*
42 	 * Don't return a page outside the pool range -- it belongs to
43 	 * something else and may not be mapped in hyp_vmemmap.
44 	 */
45 	if (addr < pool->range_start || addr >= pool->range_end)
46 		return NULL;
47 
48 	return hyp_phys_to_page(addr);
49 }
50 
51 /* Find a buddy page currently available for allocation */
__find_buddy_avail(struct hyp_pool * pool,struct hyp_page * p,u8 order)52 static struct hyp_page *__find_buddy_avail(struct hyp_pool *pool,
53 					   struct hyp_page *p,
54 					   u8 order)
55 {
56 	struct hyp_page *buddy = __find_buddy_nocheck(pool, p, order);
57 
58 	if (!buddy || buddy->order != order || buddy->refcount)
59 		return NULL;
60 
61 	return buddy;
62 
63 }
64 
65 /*
66  * Pages that are available for allocation are tracked in free-lists, so we use
67  * the pages themselves to store the list nodes to avoid wasting space. As the
68  * allocator always returns zeroed pages (which are zeroed on the hyp_put_page()
69  * path to optimize allocation speed), we also need to clean-up the list node in
70  * each page when we take it out of the list.
71  */
page_remove_from_list(struct hyp_page * p)72 static inline void page_remove_from_list(struct hyp_page *p)
73 {
74 	struct list_head *node = hyp_page_to_virt(p);
75 
76 	__list_del_entry(node);
77 	memset(node, 0, sizeof(*node));
78 }
79 
page_add_to_list(struct hyp_page * p,struct list_head * head)80 static inline void page_add_to_list(struct hyp_page *p, struct list_head *head)
81 {
82 	struct list_head *node = hyp_page_to_virt(p);
83 
84 	INIT_LIST_HEAD(node);
85 	list_add_tail(node, head);
86 }
87 
node_to_page(struct list_head * node)88 static inline struct hyp_page *node_to_page(struct list_head *node)
89 {
90 	return hyp_virt_to_page(node);
91 }
92 
__hyp_attach_page(struct hyp_pool * pool,struct hyp_page * p)93 static void __hyp_attach_page(struct hyp_pool *pool,
94 			      struct hyp_page *p)
95 {
96 	phys_addr_t phys = hyp_page_to_phys(p);
97 	struct hyp_page *buddy;
98 	u8 order = p->order;
99 
100 	memset(hyp_page_to_virt(p), 0, PAGE_SIZE << p->order);
101 
102 	if (phys < pool->range_start || phys >= pool->range_end)
103 		goto insert;
104 
105 	/*
106 	 * Only the first struct hyp_page of a high-order page (otherwise known
107 	 * as the 'head') should have p->order set. The non-head pages should
108 	 * have p->order = HYP_NO_ORDER. Here @p may no longer be the head
109 	 * after coallescing, so make sure to mark it HYP_NO_ORDER proactively.
110 	 */
111 	p->order = HYP_NO_ORDER;
112 	for (; (order + 1) < pool->max_order; order++) {
113 		buddy = __find_buddy_avail(pool, p, order);
114 		if (!buddy)
115 			break;
116 
117 		/* Take the buddy out of its list, and coallesce with @p */
118 		page_remove_from_list(buddy);
119 		buddy->order = HYP_NO_ORDER;
120 		p = min(p, buddy);
121 	}
122 
123 insert:
124 	/* Mark the new head, and insert it */
125 	p->order = order;
126 	page_add_to_list(p, &pool->free_area[order]);
127 }
128 
__hyp_extract_page(struct hyp_pool * pool,struct hyp_page * p,u8 order)129 static struct hyp_page *__hyp_extract_page(struct hyp_pool *pool,
130 					   struct hyp_page *p,
131 					   u8 order)
132 {
133 	struct hyp_page *buddy;
134 
135 	page_remove_from_list(p);
136 	while (p->order > order) {
137 		/*
138 		 * The buddy of order n - 1 currently has HYP_NO_ORDER as it
139 		 * is covered by a higher-level page (whose head is @p). Use
140 		 * __find_buddy_nocheck() to find it and inject it in the
141 		 * free_list[n - 1], effectively splitting @p in half.
142 		 */
143 		p->order--;
144 		buddy = __find_buddy_nocheck(pool, p, p->order);
145 		buddy->order = p->order;
146 		page_add_to_list(buddy, &pool->free_area[buddy->order]);
147 	}
148 
149 	return p;
150 }
151 
__hyp_put_page(struct hyp_pool * pool,struct hyp_page * p)152 static void __hyp_put_page(struct hyp_pool *pool, struct hyp_page *p)
153 {
154 	if (hyp_page_ref_dec_and_test(p))
155 		__hyp_attach_page(pool, p);
156 }
157 
158 /*
159  * Changes to the buddy tree and page refcounts must be done with the hyp_pool
160  * lock held. If a refcount change requires an update to the buddy tree (e.g.
161  * hyp_put_page()), both operations must be done within the same critical
162  * section to guarantee transient states (e.g. a page with null refcount but
163  * not yet attached to a free list) can't be observed by well-behaved readers.
164  */
hyp_put_page(struct hyp_pool * pool,void * addr)165 void hyp_put_page(struct hyp_pool *pool, void *addr)
166 {
167 	struct hyp_page *p = hyp_virt_to_page(addr);
168 
169 	hyp_spin_lock(&pool->lock);
170 	__hyp_put_page(pool, p);
171 	hyp_spin_unlock(&pool->lock);
172 }
173 
hyp_get_page(struct hyp_pool * pool,void * addr)174 void hyp_get_page(struct hyp_pool *pool, void *addr)
175 {
176 	struct hyp_page *p = hyp_virt_to_page(addr);
177 
178 	hyp_spin_lock(&pool->lock);
179 	hyp_page_ref_inc(p);
180 	hyp_spin_unlock(&pool->lock);
181 }
182 
hyp_split_page(struct hyp_page * p)183 void hyp_split_page(struct hyp_page *p)
184 {
185 	u8 order = p->order;
186 	unsigned int i;
187 
188 	p->order = 0;
189 	for (i = 1; i < (1 << order); i++) {
190 		struct hyp_page *tail = p + i;
191 
192 		tail->order = 0;
193 		hyp_set_page_refcounted(tail);
194 	}
195 }
196 
hyp_alloc_pages(struct hyp_pool * pool,u8 order)197 void *hyp_alloc_pages(struct hyp_pool *pool, u8 order)
198 {
199 	struct hyp_page *p;
200 	u8 i = order;
201 
202 	hyp_spin_lock(&pool->lock);
203 
204 	/* Look for a high-enough-order page */
205 	while (i < pool->max_order && list_empty(&pool->free_area[i]))
206 		i++;
207 	if (i >= pool->max_order) {
208 		hyp_spin_unlock(&pool->lock);
209 		return NULL;
210 	}
211 
212 	/* Extract it from the tree at the right order */
213 	p = node_to_page(pool->free_area[i].next);
214 	p = __hyp_extract_page(pool, p, order);
215 
216 	hyp_set_page_refcounted(p);
217 	hyp_spin_unlock(&pool->lock);
218 
219 	return hyp_page_to_virt(p);
220 }
221 
hyp_pool_init(struct hyp_pool * pool,u64 pfn,unsigned int nr_pages,unsigned int reserved_pages)222 int hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages,
223 		  unsigned int reserved_pages)
224 {
225 	phys_addr_t phys = hyp_pfn_to_phys(pfn);
226 	struct hyp_page *p;
227 	int i;
228 
229 	hyp_spin_lock_init(&pool->lock);
230 	pool->max_order = min(MAX_ORDER, get_order((nr_pages + 1) << PAGE_SHIFT));
231 	for (i = 0; i < pool->max_order; i++)
232 		INIT_LIST_HEAD(&pool->free_area[i]);
233 	pool->range_start = phys;
234 	pool->range_end = phys + (nr_pages << PAGE_SHIFT);
235 
236 	/* Init the vmemmap portion */
237 	p = hyp_phys_to_page(phys);
238 	for (i = 0; i < nr_pages; i++)
239 		hyp_set_page_refcounted(&p[i]);
240 
241 	/* Attach the unused pages to the buddy tree */
242 	for (i = reserved_pages; i < nr_pages; i++)
243 		__hyp_put_page(pool, &p[i]);
244 
245 	return 0;
246 }
247