• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2016, Rashmica Gupta, IBM Corp.
3  *
4  * This traverses the kernel pagetables and dumps the
5  * information about the used sections of memory to
6  * /sys/kernel/debug/kernel_pagetables.
7  *
8  * Derived from the arm64 implementation:
9  * Copyright (c) 2014, The Linux Foundation, Laura Abbott.
10  * (C) Copyright 2008 Intel Corporation, Arjan van de Ven.
11  *
12  * This program is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU General Public License
14  * as published by the Free Software Foundation; version 2
15  * of the License.
16  */
17 #include <linux/debugfs.h>
18 #include <linux/fs.h>
19 #include <linux/hugetlb.h>
20 #include <linux/io.h>
21 #include <linux/mm.h>
22 #include <linux/highmem.h>
23 #include <linux/sched.h>
24 #include <linux/seq_file.h>
25 #include <asm/fixmap.h>
26 #include <asm/pgtable.h>
27 #include <linux/const.h>
28 #include <asm/page.h>
29 #include <asm/pgalloc.h>
30 
31 #ifdef CONFIG_PPC32
32 #define KERN_VIRT_START	0
33 #endif
34 
35 /*
36  * To visualise what is happening,
37  *
38  *  - PTRS_PER_P** = how many entries there are in the corresponding P**
39  *  - P**_SHIFT = how many bits of the address we use to index into the
40  * corresponding P**
41  *  - P**_SIZE is how much memory we can access through the table - not the
42  * size of the table itself.
43  * P**={PGD, PUD, PMD, PTE}
44  *
45  *
46  * Each entry of the PGD points to a PUD. Each entry of a PUD points to a
47  * PMD. Each entry of a PMD points to a PTE. And every PTE entry points to
48  * a page.
49  *
50  * In the case where there are only 3 levels, the PUD is folded into the
51  * PGD: every PUD has only one entry which points to the PMD.
52  *
53  * The page dumper groups page table entries of the same type into a single
54  * description. It uses pg_state to track the range information while
55  * iterating over the PTE entries. When the continuity is broken it then
56  * dumps out a description of the range - ie PTEs that are virtually contiguous
57  * with the same PTE flags are chunked together. This is to make it clear how
58  * different areas of the kernel virtual memory are used.
59  *
60  */
61 struct pg_state {
62 	struct seq_file *seq;
63 	const struct addr_marker *marker;
64 	unsigned long start_address;
65 	unsigned long start_pa;
66 	unsigned long last_pa;
67 	unsigned int level;
68 	u64 current_flags;
69 };
70 
71 struct addr_marker {
72 	unsigned long start_address;
73 	const char *name;
74 };
75 
76 static struct addr_marker address_markers[] = {
77 	{ 0,	"Start of kernel VM" },
78 	{ 0,	"vmalloc() Area" },
79 	{ 0,	"vmalloc() End" },
80 #ifdef CONFIG_PPC64
81 	{ 0,	"isa I/O start" },
82 	{ 0,	"isa I/O end" },
83 	{ 0,	"phb I/O start" },
84 	{ 0,	"phb I/O end" },
85 	{ 0,	"I/O remap start" },
86 	{ 0,	"I/O remap end" },
87 	{ 0,	"vmemmap start" },
88 #else
89 	{ 0,	"Early I/O remap start" },
90 	{ 0,	"Early I/O remap end" },
91 #ifdef CONFIG_NOT_COHERENT_CACHE
92 	{ 0,	"Consistent mem start" },
93 	{ 0,	"Consistent mem end" },
94 #endif
95 #ifdef CONFIG_HIGHMEM
96 	{ 0,	"Highmem PTEs start" },
97 	{ 0,	"Highmem PTEs end" },
98 #endif
99 	{ 0,	"Fixmap start" },
100 	{ 0,	"Fixmap end" },
101 #endif
102 	{ -1,	NULL },
103 };
104 
105 struct flag_info {
106 	u64		mask;
107 	u64		val;
108 	const char	*set;
109 	const char	*clear;
110 	bool		is_val;
111 	int		shift;
112 };
113 
114 static const struct flag_info flag_array[] = {
115 	{
116 		.mask	= _PAGE_USER | _PAGE_PRIVILEGED,
117 		.val	= _PAGE_USER,
118 		.set	= "user",
119 		.clear	= "    ",
120 	}, {
121 		.mask	= _PAGE_RW | _PAGE_RO | _PAGE_NA,
122 		.val	= _PAGE_RW,
123 		.set	= "rw",
124 	}, {
125 		.mask	= _PAGE_RW | _PAGE_RO | _PAGE_NA,
126 		.val	= _PAGE_RO,
127 		.set	= "ro",
128 	}, {
129 #if _PAGE_NA != 0
130 		.mask	= _PAGE_RW | _PAGE_RO | _PAGE_NA,
131 		.val	= _PAGE_RO,
132 		.set	= "na",
133 	}, {
134 #endif
135 		.mask	= _PAGE_EXEC,
136 		.val	= _PAGE_EXEC,
137 		.set	= " X ",
138 		.clear	= "   ",
139 	}, {
140 		.mask	= _PAGE_PTE,
141 		.val	= _PAGE_PTE,
142 		.set	= "pte",
143 		.clear	= "   ",
144 	}, {
145 		.mask	= _PAGE_PRESENT,
146 		.val	= _PAGE_PRESENT,
147 		.set	= "present",
148 		.clear	= "       ",
149 	}, {
150 #ifdef CONFIG_PPC_BOOK3S_64
151 		.mask	= H_PAGE_HASHPTE,
152 		.val	= H_PAGE_HASHPTE,
153 #else
154 		.mask	= _PAGE_HASHPTE,
155 		.val	= _PAGE_HASHPTE,
156 #endif
157 		.set	= "hpte",
158 		.clear	= "    ",
159 	}, {
160 #ifndef CONFIG_PPC_BOOK3S_64
161 		.mask	= _PAGE_GUARDED,
162 		.val	= _PAGE_GUARDED,
163 		.set	= "guarded",
164 		.clear	= "       ",
165 	}, {
166 #endif
167 		.mask	= _PAGE_DIRTY,
168 		.val	= _PAGE_DIRTY,
169 		.set	= "dirty",
170 		.clear	= "     ",
171 	}, {
172 		.mask	= _PAGE_ACCESSED,
173 		.val	= _PAGE_ACCESSED,
174 		.set	= "accessed",
175 		.clear	= "        ",
176 	}, {
177 #ifndef CONFIG_PPC_BOOK3S_64
178 		.mask	= _PAGE_WRITETHRU,
179 		.val	= _PAGE_WRITETHRU,
180 		.set	= "write through",
181 		.clear	= "             ",
182 	}, {
183 #endif
184 #ifndef CONFIG_PPC_BOOK3S_64
185 		.mask	= _PAGE_NO_CACHE,
186 		.val	= _PAGE_NO_CACHE,
187 		.set	= "no cache",
188 		.clear	= "        ",
189 	}, {
190 #else
191 		.mask	= _PAGE_NON_IDEMPOTENT,
192 		.val	= _PAGE_NON_IDEMPOTENT,
193 		.set	= "non-idempotent",
194 		.clear	= "              ",
195 	}, {
196 		.mask	= _PAGE_TOLERANT,
197 		.val	= _PAGE_TOLERANT,
198 		.set	= "tolerant",
199 		.clear	= "        ",
200 	}, {
201 #endif
202 #ifdef CONFIG_PPC_BOOK3S_64
203 		.mask	= H_PAGE_BUSY,
204 		.val	= H_PAGE_BUSY,
205 		.set	= "busy",
206 	}, {
207 #ifdef CONFIG_PPC_64K_PAGES
208 		.mask	= H_PAGE_COMBO,
209 		.val	= H_PAGE_COMBO,
210 		.set	= "combo",
211 	}, {
212 		.mask	= H_PAGE_4K_PFN,
213 		.val	= H_PAGE_4K_PFN,
214 		.set	= "4K_pfn",
215 	}, {
216 #else /* CONFIG_PPC_64K_PAGES */
217 		.mask	= H_PAGE_F_GIX,
218 		.val	= H_PAGE_F_GIX,
219 		.set	= "f_gix",
220 		.is_val	= true,
221 		.shift	= H_PAGE_F_GIX_SHIFT,
222 	}, {
223 		.mask	= H_PAGE_F_SECOND,
224 		.val	= H_PAGE_F_SECOND,
225 		.set	= "f_second",
226 	}, {
227 #endif /* CONFIG_PPC_64K_PAGES */
228 #endif
229 		.mask	= _PAGE_SPECIAL,
230 		.val	= _PAGE_SPECIAL,
231 		.set	= "special",
232 	}
233 };
234 
235 struct pgtable_level {
236 	const struct flag_info *flag;
237 	size_t num;
238 	u64 mask;
239 };
240 
241 static struct pgtable_level pg_level[] = {
242 	{
243 	}, { /* pgd */
244 		.flag	= flag_array,
245 		.num	= ARRAY_SIZE(flag_array),
246 	}, { /* pud */
247 		.flag	= flag_array,
248 		.num	= ARRAY_SIZE(flag_array),
249 	}, { /* pmd */
250 		.flag	= flag_array,
251 		.num	= ARRAY_SIZE(flag_array),
252 	}, { /* pte */
253 		.flag	= flag_array,
254 		.num	= ARRAY_SIZE(flag_array),
255 	},
256 };
257 
dump_flag_info(struct pg_state * st,const struct flag_info * flag,u64 pte,int num)258 static void dump_flag_info(struct pg_state *st, const struct flag_info
259 		*flag, u64 pte, int num)
260 {
261 	unsigned int i;
262 
263 	for (i = 0; i < num; i++, flag++) {
264 		const char *s = NULL;
265 		u64 val;
266 
267 		/* flag not defined so don't check it */
268 		if (flag->mask == 0)
269 			continue;
270 		/* Some 'flags' are actually values */
271 		if (flag->is_val) {
272 			val = pte & flag->val;
273 			if (flag->shift)
274 				val = val >> flag->shift;
275 			seq_printf(st->seq, "  %s:%llx", flag->set, val);
276 		} else {
277 			if ((pte & flag->mask) == flag->val)
278 				s = flag->set;
279 			else
280 				s = flag->clear;
281 			if (s)
282 				seq_printf(st->seq, "  %s", s);
283 		}
284 		st->current_flags &= ~flag->mask;
285 	}
286 	if (st->current_flags != 0)
287 		seq_printf(st->seq, "  unknown flags:%llx", st->current_flags);
288 }
289 
dump_addr(struct pg_state * st,unsigned long addr)290 static void dump_addr(struct pg_state *st, unsigned long addr)
291 {
292 	static const char units[] = "KMGTPE";
293 	const char *unit = units;
294 	unsigned long delta;
295 
296 #ifdef CONFIG_PPC64
297 	seq_printf(st->seq, "0x%016lx-0x%016lx ", st->start_address, addr-1);
298 	seq_printf(st->seq, "0x%016lx ", st->start_pa);
299 #else
300 	seq_printf(st->seq, "0x%08lx-0x%08lx ", st->start_address, addr - 1);
301 	seq_printf(st->seq, "0x%08lx ", st->start_pa);
302 #endif
303 
304 	delta = (addr - st->start_address) >> 10;
305 	/* Work out what appropriate unit to use */
306 	while (!(delta & 1023) && unit[1]) {
307 		delta >>= 10;
308 		unit++;
309 	}
310 	seq_printf(st->seq, "%9lu%c", delta, *unit);
311 
312 }
313 
note_page(struct pg_state * st,unsigned long addr,unsigned int level,u64 val)314 static void note_page(struct pg_state *st, unsigned long addr,
315 	       unsigned int level, u64 val)
316 {
317 	u64 flag = val & pg_level[level].mask;
318 	u64 pa = val & PTE_RPN_MASK;
319 
320 	/* At first no level is set */
321 	if (!st->level) {
322 		st->level = level;
323 		st->current_flags = flag;
324 		st->start_address = addr;
325 		st->start_pa = pa;
326 		st->last_pa = pa;
327 		seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
328 	/*
329 	 * Dump the section of virtual memory when:
330 	 *   - the PTE flags from one entry to the next differs.
331 	 *   - we change levels in the tree.
332 	 *   - the address is in a different section of memory and is thus
333 	 *   used for a different purpose, regardless of the flags.
334 	 *   - the pa of this page is not adjacent to the last inspected page
335 	 */
336 	} else if (flag != st->current_flags || level != st->level ||
337 		   addr >= st->marker[1].start_address ||
338 		   pa != st->last_pa + PAGE_SIZE) {
339 
340 		/* Check the PTE flags */
341 		if (st->current_flags) {
342 			dump_addr(st, addr);
343 
344 			/* Dump all the flags */
345 			if (pg_level[st->level].flag)
346 				dump_flag_info(st, pg_level[st->level].flag,
347 					  st->current_flags,
348 					  pg_level[st->level].num);
349 
350 			seq_putc(st->seq, '\n');
351 		}
352 
353 		/*
354 		 * Address indicates we have passed the end of the
355 		 * current section of virtual memory
356 		 */
357 		while (addr >= st->marker[1].start_address) {
358 			st->marker++;
359 			seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
360 		}
361 		st->start_address = addr;
362 		st->start_pa = pa;
363 		st->last_pa = pa;
364 		st->current_flags = flag;
365 		st->level = level;
366 	} else {
367 		st->last_pa = pa;
368 	}
369 }
370 
walk_pte(struct pg_state * st,pmd_t * pmd,unsigned long start)371 static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start)
372 {
373 	pte_t *pte = pte_offset_kernel(pmd, 0);
374 	unsigned long addr;
375 	unsigned int i;
376 
377 	for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
378 		addr = start + i * PAGE_SIZE;
379 		note_page(st, addr, 4, pte_val(*pte));
380 
381 	}
382 }
383 
walk_pmd(struct pg_state * st,pud_t * pud,unsigned long start)384 static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
385 {
386 	pmd_t *pmd = pmd_offset(pud, 0);
387 	unsigned long addr;
388 	unsigned int i;
389 
390 	for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
391 		addr = start + i * PMD_SIZE;
392 		if (!pmd_none(*pmd) && !pmd_huge(*pmd))
393 			/* pmd exists */
394 			walk_pte(st, pmd, addr);
395 		else
396 			note_page(st, addr, 3, pmd_val(*pmd));
397 	}
398 }
399 
walk_pud(struct pg_state * st,pgd_t * pgd,unsigned long start)400 static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
401 {
402 	pud_t *pud = pud_offset(pgd, 0);
403 	unsigned long addr;
404 	unsigned int i;
405 
406 	for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
407 		addr = start + i * PUD_SIZE;
408 		if (!pud_none(*pud) && !pud_huge(*pud))
409 			/* pud exists */
410 			walk_pmd(st, pud, addr);
411 		else
412 			note_page(st, addr, 2, pud_val(*pud));
413 	}
414 }
415 
walk_pagetables(struct pg_state * st)416 static void walk_pagetables(struct pg_state *st)
417 {
418 	pgd_t *pgd = pgd_offset_k(0UL);
419 	unsigned int i;
420 	unsigned long addr;
421 
422 	addr = st->start_address;
423 
424 	/*
425 	 * Traverse the linux pagetable structure and dump pages that are in
426 	 * the hash pagetable.
427 	 */
428 	for (i = 0; i < PTRS_PER_PGD; i++, pgd++, addr += PGDIR_SIZE) {
429 		if (!pgd_none(*pgd) && !pgd_huge(*pgd))
430 			/* pgd exists */
431 			walk_pud(st, pgd, addr);
432 		else
433 			note_page(st, addr, 1, pgd_val(*pgd));
434 	}
435 }
436 
populate_markers(void)437 static void populate_markers(void)
438 {
439 	int i = 0;
440 
441 	address_markers[i++].start_address = PAGE_OFFSET;
442 	address_markers[i++].start_address = VMALLOC_START;
443 	address_markers[i++].start_address = VMALLOC_END;
444 #ifdef CONFIG_PPC64
445 	address_markers[i++].start_address = ISA_IO_BASE;
446 	address_markers[i++].start_address = ISA_IO_END;
447 	address_markers[i++].start_address = PHB_IO_BASE;
448 	address_markers[i++].start_address = PHB_IO_END;
449 	address_markers[i++].start_address = IOREMAP_BASE;
450 	address_markers[i++].start_address = IOREMAP_END;
451 #ifdef CONFIG_PPC_BOOK3S_64
452 	address_markers[i++].start_address =  H_VMEMMAP_BASE;
453 #else
454 	address_markers[i++].start_address =  VMEMMAP_BASE;
455 #endif
456 #else /* !CONFIG_PPC64 */
457 	address_markers[i++].start_address = ioremap_bot;
458 	address_markers[i++].start_address = IOREMAP_TOP;
459 #ifdef CONFIG_NOT_COHERENT_CACHE
460 	address_markers[i++].start_address = IOREMAP_TOP;
461 	address_markers[i++].start_address = IOREMAP_TOP +
462 					     CONFIG_CONSISTENT_SIZE;
463 #endif
464 #ifdef CONFIG_HIGHMEM
465 	address_markers[i++].start_address = PKMAP_BASE;
466 	address_markers[i++].start_address = PKMAP_ADDR(LAST_PKMAP);
467 #endif
468 	address_markers[i++].start_address = FIXADDR_START;
469 	address_markers[i++].start_address = FIXADDR_TOP;
470 #endif /* CONFIG_PPC64 */
471 }
472 
ptdump_show(struct seq_file * m,void * v)473 static int ptdump_show(struct seq_file *m, void *v)
474 {
475 	struct pg_state st = {
476 		.seq = m,
477 		.marker = address_markers,
478 	};
479 
480 	if (radix_enabled())
481 		st.start_address = PAGE_OFFSET;
482 	else
483 		st.start_address = KERN_VIRT_START;
484 
485 	/* Traverse kernel page tables */
486 	walk_pagetables(&st);
487 	note_page(&st, 0, 0, 0);
488 	return 0;
489 }
490 
491 
ptdump_open(struct inode * inode,struct file * file)492 static int ptdump_open(struct inode *inode, struct file *file)
493 {
494 	return single_open(file, ptdump_show, NULL);
495 }
496 
497 static const struct file_operations ptdump_fops = {
498 	.open		= ptdump_open,
499 	.read		= seq_read,
500 	.llseek		= seq_lseek,
501 	.release	= single_release,
502 };
503 
build_pgtable_complete_mask(void)504 static void build_pgtable_complete_mask(void)
505 {
506 	unsigned int i, j;
507 
508 	for (i = 0; i < ARRAY_SIZE(pg_level); i++)
509 		if (pg_level[i].flag)
510 			for (j = 0; j < pg_level[i].num; j++)
511 				pg_level[i].mask |= pg_level[i].flag[j].mask;
512 }
513 
ptdump_init(void)514 static int ptdump_init(void)
515 {
516 	struct dentry *debugfs_file;
517 
518 	populate_markers();
519 	build_pgtable_complete_mask();
520 	debugfs_file = debugfs_create_file("kernel_page_tables", 0400, NULL,
521 			NULL, &ptdump_fops);
522 	return debugfs_file ? 0 : -ENOMEM;
523 }
524 device_initcall(ptdump_init);
525