• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Debug helper used to dump the stage-2 pagetables of the system and their
4  * associated permissions.
5  *
6  * Copyright (C) Google, 2024
7  * Author: Sebastian Ene <sebastianene@google.com>
8  */
9 #include <linux/debugfs.h>
10 #include <linux/kvm_host.h>
11 #include <linux/seq_file.h>
12 
13 #include <asm/kvm_pkvm.h>
14 #include <asm/kvm_mmu.h>
15 #include <asm/kvm_pgtable.h>
16 #include <asm/ptdump.h>
17 
18 #define MARKERS_LEN		2
19 #define KVM_PGTABLE_MAX_LEVELS	(KVM_PGTABLE_LAST_LEVEL + 1)
20 #define MAX_LOG_PAGES	10
21 
22 struct kvm_ptdump_guest_state {
23 	struct kvm		*kvm;
24 	struct ptdump_pg_state	parser_state;
25 	struct addr_marker	ipa_marker[MARKERS_LEN];
26 	struct ptdump_pg_level	level[KVM_PGTABLE_MAX_LEVELS];
27 	struct ptdump_range	range[MARKERS_LEN];
28 	struct pkvm_ptdump_log_hdr	*log_pages;
29 };
30 
31 static const struct ptdump_prot_bits stage2_pte_bits[] = {
32 	{
33 		.mask	= PTE_VALID,
34 		.val	= PTE_VALID,
35 		.set	= " ",
36 		.clear	= "F",
37 	}, {
38 		.mask	= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R | PTE_VALID,
39 		.val	= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R | PTE_VALID,
40 		.set	= "R",
41 		.clear	= " ",
42 	}, {
43 		.mask	= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W | PTE_VALID,
44 		.val	= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W | PTE_VALID,
45 		.set	= "W",
46 		.clear	= " ",
47 	}, {
48 		.mask	= KVM_PTE_LEAF_ATTR_HI_S2_XN | PTE_VALID,
49 		.val	= PTE_VALID,
50 		.set	= "X",
51 		.clear	= " ",
52 	}, {
53 		.mask	= KVM_PTE_LEAF_ATTR_LO_S2_AF | PTE_VALID,
54 		.val	= KVM_PTE_LEAF_ATTR_LO_S2_AF | PTE_VALID,
55 		.set	= "AF",
56 		.clear	= "  ",
57 	}, {
58 		.mask	= PTE_TABLE_BIT | PTE_VALID,
59 		.val	= PTE_VALID,
60 		.set	= "BLK",
61 		.clear	= "   ",
62 	}, {
63 		.mask	= KVM_PGTABLE_PROT_SW0 | PTE_VALID,
64 		.val	= KVM_PGTABLE_PROT_SW0 | PTE_VALID,
65 		.set	= "SW0",
66 		.clear	= "   ",
67 	}, {
68 		.mask	= KVM_PGTABLE_PROT_SW1 | PTE_VALID,
69 		.val	= KVM_PGTABLE_PROT_SW1 | PTE_VALID,
70 		.set	= "SW1",
71 		.clear	= "   ",
72 	},
73 };
74 
kvm_ptdump_visitor(const struct kvm_pgtable_visit_ctx * ctx,enum kvm_pgtable_walk_flags visit)75 static int kvm_ptdump_visitor(const struct kvm_pgtable_visit_ctx *ctx,
76 			      enum kvm_pgtable_walk_flags visit)
77 {
78 	struct ptdump_pg_state *st = ctx->arg;
79 	struct ptdump_state *pt_st = &st->ptdump;
80 
81 	note_page(pt_st, ctx->addr, ctx->level, ctx->old);
82 
83 	return 0;
84 }
85 
kvm_ptdump_build_levels(struct ptdump_pg_level * level,u32 start_lvl)86 static int kvm_ptdump_build_levels(struct ptdump_pg_level *level, u32 start_lvl)
87 {
88 	u32 i;
89 	u64 mask;
90 
91 	if (WARN_ON_ONCE(start_lvl >= KVM_PGTABLE_LAST_LEVEL))
92 		return -EINVAL;
93 
94 	mask = 0;
95 	for (i = 0; i < ARRAY_SIZE(stage2_pte_bits); i++)
96 		mask |= stage2_pte_bits[i].mask;
97 
98 	for (i = start_lvl; i < KVM_PGTABLE_MAX_LEVELS; i++) {
99 		snprintf(level[i].name, sizeof(level[i].name), "%u", i);
100 
101 		level[i].num	= ARRAY_SIZE(stage2_pte_bits);
102 		level[i].bits	= stage2_pte_bits;
103 		level[i].mask	= mask;
104 	}
105 
106 	return 0;
107 }
108 
109 #define PKVM_HANDLE(kvm) ((kvm) != NULL ? (kvm)->arch.pkvm.handle : 0)
110 
ptdump_get_ranges(struct kvm * kvm)111 static u32 ptdump_get_ranges(struct kvm *kvm)
112 {
113 	if (!is_protected_kvm_enabled())
114 		return kvm->arch.mmu.pgt->ia_bits;
115 	return kvm_call_hyp_nvhe(__pkvm_ptdump, PKVM_HANDLE(kvm), PKVM_PTDUMP_GET_RANGE);
116 }
117 
ptdump_get_level(struct kvm * kvm)118 static s8 ptdump_get_level(struct kvm *kvm)
119 {
120 	if (!is_protected_kvm_enabled())
121 		return kvm->arch.mmu.pgt->start_level;
122 	return kvm_call_hyp_nvhe(__pkvm_ptdump, PKVM_HANDLE(kvm), PKVM_PTDUMP_GET_LEVEL);
123 }
124 
kvm_ptdump_parser_create(struct kvm * kvm)125 static struct kvm_ptdump_guest_state *kvm_ptdump_parser_create(struct kvm *kvm)
126 {
127 	struct kvm_ptdump_guest_state *st;
128 	int ret;
129 	u32 ia_bits = ptdump_get_ranges(kvm);
130 	s8 start_level = ptdump_get_level(kvm);
131 
132 	st = kzalloc(sizeof(struct kvm_ptdump_guest_state), GFP_KERNEL_ACCOUNT);
133 	if (!st)
134 		return ERR_PTR(-ENOMEM);
135 
136 	ret = kvm_ptdump_build_levels(&st->level[0], start_level);
137 	if (ret) {
138 		kfree(st);
139 		return ERR_PTR(ret);
140 	}
141 
142 	st->ipa_marker[0].name		= kvm == NULL ? "Host IPA" : "Guest IPA";
143 	st->ipa_marker[1].start_address = BIT(ia_bits);
144 	st->range[0].end		= BIT(ia_bits);
145 
146 	st->kvm				= kvm;
147 	st->parser_state = (struct ptdump_pg_state) {
148 		.marker		= &st->ipa_marker[0],
149 		.level		= -1,
150 		.pg_level	= &st->level[0],
151 		.ptdump.range	= &st->range[0],
152 		.start_address	= 0,
153 	};
154 
155 	return st;
156 }
157 
kvm_ptdump_guest_show(struct seq_file * m,void * unused)158 static int kvm_ptdump_guest_show(struct seq_file *m, void *unused)
159 {
160 	int ret;
161 	struct kvm_ptdump_guest_state *st = m->private;
162 	struct kvm *kvm = st->kvm;
163 	struct kvm_s2_mmu *mmu = &kvm->arch.mmu;
164 	struct ptdump_pg_state *parser_state = &st->parser_state;
165 	struct kvm_pgtable_walker walker = (struct kvm_pgtable_walker) {
166 		.cb	= kvm_ptdump_visitor,
167 		.arg	= parser_state,
168 		.flags	= KVM_PGTABLE_WALK_LEAF,
169 	};
170 
171 	parser_state->seq = m;
172 
173 	write_lock(&kvm->mmu_lock);
174 	ret = kvm_pgtable_walk(mmu->pgt, 0, BIT(mmu->pgt->ia_bits), &walker);
175 	write_unlock(&kvm->mmu_lock);
176 
177 	return ret;
178 }
179 
kvm_ptdump_guest_open(struct inode * m,struct file * file)180 static int kvm_ptdump_guest_open(struct inode *m, struct file *file)
181 {
182 	struct kvm *kvm = m->i_private;
183 	struct kvm_ptdump_guest_state *st;
184 	int ret;
185 
186 	if (!kvm_get_kvm_safe(kvm))
187 		return -ENOENT;
188 
189 	st = kvm_ptdump_parser_create(kvm);
190 	if (IS_ERR(st)) {
191 		ret = PTR_ERR(st);
192 		goto err_with_kvm_ref;
193 	}
194 
195 	ret = single_open(file, kvm_ptdump_guest_show, st);
196 	if (!ret)
197 		return 0;
198 
199 	kfree(st);
200 err_with_kvm_ref:
201 	kvm_put_kvm(kvm);
202 	return ret;
203 }
204 
kvm_ptdump_guest_close(struct inode * m,struct file * file)205 static int kvm_ptdump_guest_close(struct inode *m, struct file *file)
206 {
207 	struct kvm *kvm = m->i_private;
208 	void *st = ((struct seq_file *)file->private_data)->private;
209 
210 	kfree(st);
211 	kvm_put_kvm(kvm);
212 
213 	return single_release(m, file);
214 }
215 
216 static const struct file_operations kvm_ptdump_guest_fops = {
217 	.open		= kvm_ptdump_guest_open,
218 	.read		= seq_read,
219 	.llseek		= seq_lseek,
220 	.release	= kvm_ptdump_guest_close,
221 };
222 
kvm_pgtable_range_show(struct seq_file * m,void * unused)223 static int kvm_pgtable_range_show(struct seq_file *m, void *unused)
224 {
225 	struct kvm *kvm = m->private;
226 	u32 ia_bits = ptdump_get_ranges(kvm);
227 
228 	seq_printf(m, "%2u\n", ia_bits);
229 	return 0;
230 }
231 
kvm_pgtable_levels_show(struct seq_file * m,void * unused)232 static int kvm_pgtable_levels_show(struct seq_file *m, void *unused)
233 {
234 	struct kvm *kvm = m->private;
235 	s8 start_level = ptdump_get_level(kvm);
236 
237 	seq_printf(m, "%1d\n", KVM_PGTABLE_MAX_LEVELS - start_level);
238 	return 0;
239 }
240 
kvm_pgtable_debugfs_open(struct inode * m,struct file * file,int (* show)(struct seq_file *,void *))241 static int kvm_pgtable_debugfs_open(struct inode *m, struct file *file,
242 				    int (*show)(struct seq_file *, void *))
243 {
244 	struct kvm *kvm = m->i_private;
245 	int ret;
246 
247 	if (!kvm_get_kvm_safe(kvm))
248 		return -ENOENT;
249 
250 	ret = single_open(file, show, kvm);
251 	if (ret < 0)
252 		kvm_put_kvm(kvm);
253 	return ret;
254 }
255 
kvm_pgtable_range_open(struct inode * m,struct file * file)256 static int kvm_pgtable_range_open(struct inode *m, struct file *file)
257 {
258 	return kvm_pgtable_debugfs_open(m, file, kvm_pgtable_range_show);
259 }
260 
kvm_pgtable_levels_open(struct inode * m,struct file * file)261 static int kvm_pgtable_levels_open(struct inode *m, struct file *file)
262 {
263 	return kvm_pgtable_debugfs_open(m, file, kvm_pgtable_levels_show);
264 }
265 
kvm_pgtable_debugfs_close(struct inode * m,struct file * file)266 static int kvm_pgtable_debugfs_close(struct inode *m, struct file *file)
267 {
268 	struct kvm *kvm = m->i_private;
269 
270 	kvm_put_kvm(kvm);
271 	return single_release(m, file);
272 }
273 
274 static const struct file_operations kvm_pgtable_range_fops = {
275 	.open		= kvm_pgtable_range_open,
276 	.read		= seq_read,
277 	.llseek		= seq_lseek,
278 	.release	= kvm_pgtable_debugfs_close,
279 };
280 
281 static const struct file_operations kvm_pgtable_levels_fops = {
282 	.open		= kvm_pgtable_levels_open,
283 	.read		= seq_read,
284 	.llseek		= seq_lseek,
285 	.release	= kvm_pgtable_debugfs_close,
286 };
287 
pkvm_ptdump_alloc_page(struct pkvm_ptdump_log_hdr ** log_pages,size_t * num_pages)288 static int pkvm_ptdump_alloc_page(struct pkvm_ptdump_log_hdr **log_pages, size_t *num_pages)
289 {
290 	struct pkvm_ptdump_log_hdr *p, *it;
291 	size_t len = 1;
292 
293 	p = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
294 	if (!p)
295 		return -ENOMEM;
296 
297 	p->pfn_next = INVALID_PTDUMP_PFN;
298 
299 	if (*log_pages == NULL)
300 		*log_pages = p;
301 	else {
302 		it = *log_pages;
303 		while (it->pfn_next != INVALID_PTDUMP_PFN) {
304 			it = pfn_to_kaddr(it->pfn_next);
305 			len++;
306 		}
307 		it->pfn_next = virt_to_pfn(p);
308 	}
309 
310 	if (num_pages)
311 		*num_pages = len;
312 
313 	return 0;
314 }
315 
pkvm_ptdump_free_pages(struct pkvm_ptdump_log_hdr * log_pages)316 static void pkvm_ptdump_free_pages(struct pkvm_ptdump_log_hdr *log_pages)
317 {
318 	struct pkvm_ptdump_log_hdr *tmp;
319 	u64 log_pfn;
320 
321 	if (log_pages == NULL)
322 		return;
323 
324 	do {
325 		log_pfn = log_pages->pfn_next;
326 		tmp = pfn_to_kaddr(log_pfn);
327 		free_page((unsigned long)log_pages);
328 		log_pages = tmp;
329 	} while (log_pfn != INVALID_PTDUMP_PFN);
330 }
331 
pkvm_ptdump_unpack_pte(struct pkvm_ptdump_log * log)332 static u64 pkvm_ptdump_unpack_pte(struct pkvm_ptdump_log *log)
333 {
334 	return FIELD_PREP(KVM_PTE_VALID, log->valid) |
335 		FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R, log->r) |
336 		FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W, log->w) |
337 		FIELD_PREP(KVM_PTE_LEAF_ATTR_HI_S2_XN, log->xn) |
338 		FIELD_PREP(KVM_PTE_TYPE, log->table) |
339 		FIELD_PREP(KVM_PGTABLE_PROT_SW0 | KVM_PGTABLE_PROT_SW1, log->page_state);
340 }
341 
pkvm_ptdump_show(struct seq_file * m,void * unused)342 static int pkvm_ptdump_show(struct seq_file *m, void *unused)
343 {
344 	struct kvm_ptdump_guest_state *st = m->private;
345 	struct kvm *kvm = st->kvm;
346 	struct ptdump_pg_state *parser_state = &st->parser_state;
347 	struct ptdump_state *pt_st = &parser_state->ptdump;
348 	int ret, i;
349 	struct pkvm_ptdump_log *log = NULL;
350 	struct pkvm_ptdump_log_hdr *it;
351 	size_t num_pages;
352 
353 	parser_state->seq = m;
354 	parser_state->level = -1;
355 	parser_state->start_address = 0;
356 
357 retry_dump:
358 	ret = kvm_call_hyp_nvhe(__pkvm_ptdump, PKVM_HANDLE(kvm),
359 				PKVM_PTDUMP_WALK_RANGE, st->log_pages);
360 	if (ret == -ENOMEM) {
361 		ret = pkvm_ptdump_alloc_page(&st->log_pages, &num_pages);
362 		if (ret)
363 			return ret;
364 
365 		for (i = 0; i < num_pages; i++) {
366 			ret = pkvm_ptdump_alloc_page(&st->log_pages, NULL);
367 			if (ret)
368 				return ret;
369 		}
370 		goto retry_dump;
371 	} else if (ret != 0)
372 		return ret;
373 
374 	it = st->log_pages;
375 	for (;;) {
376 		for (i = 0; i < it->w_index; i += sizeof(struct pkvm_ptdump_log)) {
377 			log = (void *)it + sizeof(struct pkvm_ptdump_log_hdr) + i;
378 			note_page(pt_st, ((unsigned long)log->pfn) << PAGE_SHIFT, log->level,
379 				  pkvm_ptdump_unpack_pte(log));
380 		}
381 
382 		if (it->pfn_next == INVALID_PTDUMP_PFN)
383 			break;
384 
385 		it = pfn_to_kaddr(it->pfn_next);
386 	}
387 
388 	return 0;
389 }
390 
pkvm_ptdump_guest_open(struct inode * m,struct file * file)391 static int pkvm_ptdump_guest_open(struct inode *m, struct file *file)
392 {
393 	struct kvm *kvm = m->i_private;
394 	struct kvm_ptdump_guest_state *st;
395 	int ret;
396 
397 	if (!kvm_get_kvm_safe(kvm))
398 		return -ENOENT;
399 
400 	st = kvm_ptdump_parser_create(kvm);
401 	if (IS_ERR(st)) {
402 		ret = PTR_ERR(st);
403 		goto err_with_kvm_ref;
404 	}
405 
406 	pkvm_ptdump_alloc_page(&st->log_pages, NULL);
407 
408 	ret = single_open(file, pkvm_ptdump_show, st);
409 	if (!ret)
410 		return 0;
411 
412 	pkvm_ptdump_free_pages(st->log_pages);
413 	kfree(st);
414 err_with_kvm_ref:
415 	kvm_put_kvm(kvm);
416 	return ret;
417 }
418 
pkvm_ptdump_guest_close(struct inode * m,struct file * file)419 static int pkvm_ptdump_guest_close(struct inode *m, struct file *file)
420 {
421 	struct kvm *kvm = m->i_private;
422 	struct kvm_ptdump_guest_state *st = ((struct seq_file *)file->private_data)->private;
423 
424 	pkvm_ptdump_free_pages(st->log_pages);
425 	kfree(st);
426 	kvm_put_kvm(kvm);
427 
428 	return single_release(m, file);
429 }
430 
431 static const struct file_operations pkvm_ptdump_guest_fops = {
432 	.open		= pkvm_ptdump_guest_open,
433 	.read		= seq_read,
434 	.release	= pkvm_ptdump_guest_close,
435 };
436 
kvm_s2_ptdump_create_debugfs(struct kvm * kvm)437 void kvm_s2_ptdump_create_debugfs(struct kvm *kvm)
438 {
439 	debugfs_create_file("stage2_page_tables", 0400, kvm->debugfs_dentry,
440 			    kvm, is_protected_kvm_enabled() ?
441 			    &pkvm_ptdump_guest_fops : &kvm_ptdump_guest_fops);
442 	debugfs_create_file("ipa_range", 0400, kvm->debugfs_dentry, kvm,
443 			    &kvm_pgtable_range_fops);
444 	debugfs_create_file("stage2_levels", 0400, kvm->debugfs_dentry,
445 			    kvm, &kvm_pgtable_levels_fops);
446 }
447 
kvm_host_pgtable_range_open(struct inode * m,struct file * file)448 static int kvm_host_pgtable_range_open(struct inode *m, struct file *file)
449 {
450 	return single_open(file, kvm_pgtable_range_show, NULL);
451 }
452 
kvm_host_pgtable_levels_open(struct inode * m,struct file * file)453 static int kvm_host_pgtable_levels_open(struct inode *m, struct file *file)
454 {
455 	return single_open(file, kvm_pgtable_levels_show, NULL);
456 }
457 
458 static const struct file_operations kvm_host_pgtable_range_fops = {
459 	.open		= kvm_host_pgtable_range_open,
460 	.read		= seq_read,
461 	.release	= single_release,
462 };
463 
464 static const struct file_operations kvm_host_pgtable_levels_fops = {
465 	.open		= kvm_host_pgtable_levels_open,
466 	.read		= seq_read,
467 	.release	= single_release,
468 };
469 
kvm_ptdump_host_open(struct inode * m,struct file * file)470 static int kvm_ptdump_host_open(struct inode *m, struct file *file)
471 {
472 	struct kvm_ptdump_guest_state *st;
473 	int ret;
474 
475 	st = kvm_ptdump_parser_create(NULL);
476 	if (IS_ERR(st))
477 		return PTR_ERR(st);
478 
479 	for (int i = 0; i < MAX_LOG_PAGES; i++)
480 		pkvm_ptdump_alloc_page(&st->log_pages, NULL);
481 
482 	ret = single_open(file, pkvm_ptdump_show, st);
483 	if (!ret)
484 		return 0;
485 
486 	pkvm_ptdump_free_pages(st->log_pages);
487 	kfree(st);
488 	return ret;
489 }
490 
kvm_ptdump_host_close(struct inode * m,struct file * file)491 static int kvm_ptdump_host_close(struct inode *m, struct file *file)
492 {
493 	struct kvm_ptdump_guest_state *st = ((struct seq_file *)file->private_data)->private;
494 
495 	pkvm_ptdump_free_pages(st->log_pages);
496 	kfree(st);
497 
498 	return single_release(m, file);
499 }
500 
501 static const struct file_operations kvm_ptdump_host_fops = {
502 	.open		= kvm_ptdump_host_open,
503 	.read		= seq_read,
504 	.release	= kvm_ptdump_host_close,
505 };
506 
kvm_s2_ptdump_host_create_debugfs(void)507 void kvm_s2_ptdump_host_create_debugfs(void)
508 {
509 	struct dentry *kvm_debugfs_dir = debugfs_lookup("kvm", NULL);
510 
511 	debugfs_create_file("host_stage2_page_tables", 0400, kvm_debugfs_dir,
512 			    NULL, &kvm_ptdump_host_fops);
513 	debugfs_create_file("ipa_range", 0400, kvm_debugfs_dir, NULL,
514 			    &kvm_host_pgtable_range_fops);
515 	debugfs_create_file("stage2_levels", 0400, kvm_debugfs_dir,
516 			    NULL, &kvm_host_pgtable_levels_fops);
517 }
518