• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #include <linux/mm.h>
2 #include <linux/hugetlb.h>
3 #include <linux/huge_mm.h>
4 #include <linux/mount.h>
5 #include <linux/seq_file.h>
6 #include <linux/highmem.h>
7 #include <linux/ptrace.h>
8 #include <linux/slab.h>
9 #include <linux/pagemap.h>
10 #include <linux/mempolicy.h>
11 #include <linux/rmap.h>
12 #include <linux/swap.h>
13 #include <linux/swapops.h>
14 
15 #include <asm/elf.h>
16 #include <asm/uaccess.h>
17 #include <asm/tlbflush.h>
18 #include "internal.h"
19 
task_mem(struct seq_file * m,struct mm_struct * mm)20 void task_mem(struct seq_file *m, struct mm_struct *mm)
21 {
22 	unsigned long data, text, lib, swap;
23 	unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
24 
25 	/*
26 	 * Note: to minimize their overhead, mm maintains hiwater_vm and
27 	 * hiwater_rss only when about to *lower* total_vm or rss.  Any
28 	 * collector of these hiwater stats must therefore get total_vm
29 	 * and rss too, which will usually be the higher.  Barriers? not
30 	 * worth the effort, such snapshots can always be inconsistent.
31 	 */
32 	hiwater_vm = total_vm = mm->total_vm;
33 	if (hiwater_vm < mm->hiwater_vm)
34 		hiwater_vm = mm->hiwater_vm;
35 	hiwater_rss = total_rss = get_mm_rss(mm);
36 	if (hiwater_rss < mm->hiwater_rss)
37 		hiwater_rss = mm->hiwater_rss;
38 
39 	data = mm->total_vm - mm->shared_vm - mm->stack_vm;
40 	text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
41 	lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
42 	swap = get_mm_counter(mm, MM_SWAPENTS);
43 	seq_printf(m,
44 		"VmPeak:\t%8lu kB\n"
45 		"VmSize:\t%8lu kB\n"
46 		"VmLck:\t%8lu kB\n"
47 		"VmPin:\t%8lu kB\n"
48 		"VmHWM:\t%8lu kB\n"
49 		"VmRSS:\t%8lu kB\n"
50 		"VmData:\t%8lu kB\n"
51 		"VmStk:\t%8lu kB\n"
52 		"VmExe:\t%8lu kB\n"
53 		"VmLib:\t%8lu kB\n"
54 		"VmPTE:\t%8lu kB\n"
55 		"VmSwap:\t%8lu kB\n",
56 		hiwater_vm << (PAGE_SHIFT-10),
57 		total_vm << (PAGE_SHIFT-10),
58 		mm->locked_vm << (PAGE_SHIFT-10),
59 		mm->pinned_vm << (PAGE_SHIFT-10),
60 		hiwater_rss << (PAGE_SHIFT-10),
61 		total_rss << (PAGE_SHIFT-10),
62 		data << (PAGE_SHIFT-10),
63 		mm->stack_vm << (PAGE_SHIFT-10), text, lib,
64 		(PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
65 		swap << (PAGE_SHIFT-10));
66 }
67 
task_vsize(struct mm_struct * mm)68 unsigned long task_vsize(struct mm_struct *mm)
69 {
70 	return PAGE_SIZE * mm->total_vm;
71 }
72 
task_statm(struct mm_struct * mm,unsigned long * shared,unsigned long * text,unsigned long * data,unsigned long * resident)73 unsigned long task_statm(struct mm_struct *mm,
74 			 unsigned long *shared, unsigned long *text,
75 			 unsigned long *data, unsigned long *resident)
76 {
77 	*shared = get_mm_counter(mm, MM_FILEPAGES);
78 	*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
79 								>> PAGE_SHIFT;
80 	*data = mm->total_vm - mm->shared_vm;
81 	*resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
82 	return mm->total_vm;
83 }
84 
pad_len_spaces(struct seq_file * m,int len)85 static void pad_len_spaces(struct seq_file *m, int len)
86 {
87 	len = 25 + sizeof(void*) * 6 - len;
88 	if (len < 1)
89 		len = 1;
90 	seq_printf(m, "%*c", len, ' ');
91 }
92 
93 #ifdef CONFIG_NUMA
94 /*
95  * These functions are for numa_maps but called in generic **maps seq_file
96  * ->start(), ->stop() ops.
97  *
98  * numa_maps scans all vmas under mmap_sem and checks their mempolicy.
99  * Each mempolicy object is controlled by reference counting. The problem here
100  * is how to avoid accessing dead mempolicy object.
101  *
102  * Because we're holding mmap_sem while reading seq_file, it's safe to access
103  * each vma's mempolicy, no vma objects will never drop refs to mempolicy.
104  *
105  * A task's mempolicy (task->mempolicy) has different behavior. task->mempolicy
106  * is set and replaced under mmap_sem but unrefed and cleared under task_lock().
107  * So, without task_lock(), we cannot trust get_vma_policy() because we cannot
108  * gurantee the task never exits under us. But taking task_lock() around
109  * get_vma_plicy() causes lock order problem.
110  *
111  * To access task->mempolicy without lock, we hold a reference count of an
112  * object pointed by task->mempolicy and remember it. This will guarantee
113  * that task->mempolicy points to an alive object or NULL in numa_maps accesses.
114  */
hold_task_mempolicy(struct proc_maps_private * priv)115 static void hold_task_mempolicy(struct proc_maps_private *priv)
116 {
117 	struct task_struct *task = priv->task;
118 
119 	task_lock(task);
120 	priv->task_mempolicy = task->mempolicy;
121 	mpol_get(priv->task_mempolicy);
122 	task_unlock(task);
123 }
release_task_mempolicy(struct proc_maps_private * priv)124 static void release_task_mempolicy(struct proc_maps_private *priv)
125 {
126 	mpol_put(priv->task_mempolicy);
127 }
128 #else
hold_task_mempolicy(struct proc_maps_private * priv)129 static void hold_task_mempolicy(struct proc_maps_private *priv)
130 {
131 }
release_task_mempolicy(struct proc_maps_private * priv)132 static void release_task_mempolicy(struct proc_maps_private *priv)
133 {
134 }
135 #endif
136 
seq_print_vma_name(struct seq_file * m,struct vm_area_struct * vma)137 static void seq_print_vma_name(struct seq_file *m, struct vm_area_struct *vma)
138 {
139 	const char __user *name = vma_get_anon_name(vma);
140 	struct mm_struct *mm = vma->vm_mm;
141 
142 	unsigned long page_start_vaddr;
143 	unsigned long page_offset;
144 	unsigned long num_pages;
145 	unsigned long max_len = NAME_MAX;
146 	int i;
147 
148 	page_start_vaddr = (unsigned long)name & PAGE_MASK;
149 	page_offset = (unsigned long)name - page_start_vaddr;
150 	num_pages = DIV_ROUND_UP(page_offset + max_len, PAGE_SIZE);
151 
152 	seq_puts(m, "[anon:");
153 
154 	for (i = 0; i < num_pages; i++) {
155 		int len;
156 		int write_len;
157 		const char *kaddr;
158 		long pages_pinned;
159 		struct page *page;
160 
161 		pages_pinned = get_user_pages(current, mm, page_start_vaddr,
162 				1, 0, 0, &page, NULL);
163 		if (pages_pinned < 1) {
164 			seq_puts(m, "<fault>]");
165 			return;
166 		}
167 
168 		kaddr = (const char *)kmap(page);
169 		len = min(max_len, PAGE_SIZE - page_offset);
170 		write_len = strnlen(kaddr + page_offset, len);
171 		seq_write(m, kaddr + page_offset, write_len);
172 		kunmap(page);
173 		put_page(page);
174 
175 		/* if strnlen hit a null terminator then we're done */
176 		if (write_len != len)
177 			break;
178 
179 		max_len -= len;
180 		page_offset = 0;
181 		page_start_vaddr += PAGE_SIZE;
182 	}
183 
184 	seq_putc(m, ']');
185 }
186 
vma_stop(struct proc_maps_private * priv,struct vm_area_struct * vma)187 static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma)
188 {
189 	if (vma && vma != priv->tail_vma) {
190 		struct mm_struct *mm = vma->vm_mm;
191 		release_task_mempolicy(priv);
192 		up_read(&mm->mmap_sem);
193 		mmput(mm);
194 	}
195 }
196 
m_start(struct seq_file * m,loff_t * pos)197 static void *m_start(struct seq_file *m, loff_t *pos)
198 {
199 	struct proc_maps_private *priv = m->private;
200 	unsigned long last_addr = m->version;
201 	struct mm_struct *mm;
202 	struct vm_area_struct *vma, *tail_vma = NULL;
203 	loff_t l = *pos;
204 
205 	/* Clear the per syscall fields in priv */
206 	priv->task = NULL;
207 	priv->tail_vma = NULL;
208 
209 	/*
210 	 * We remember last_addr rather than next_addr to hit with
211 	 * mmap_cache most of the time. We have zero last_addr at
212 	 * the beginning and also after lseek. We will have -1 last_addr
213 	 * after the end of the vmas.
214 	 */
215 
216 	if (last_addr == -1UL)
217 		return NULL;
218 
219 	priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
220 	if (!priv->task)
221 		return ERR_PTR(-ESRCH);
222 
223 	mm = mm_access(priv->task, PTRACE_MODE_READ);
224 	if (!mm || IS_ERR(mm))
225 		return mm;
226 	down_read(&mm->mmap_sem);
227 
228 	tail_vma = get_gate_vma(priv->task->mm);
229 	priv->tail_vma = tail_vma;
230 	hold_task_mempolicy(priv);
231 	/* Start with last addr hint */
232 	vma = find_vma(mm, last_addr);
233 	if (last_addr && vma) {
234 		vma = vma->vm_next;
235 		goto out;
236 	}
237 
238 	/*
239 	 * Check the vma index is within the range and do
240 	 * sequential scan until m_index.
241 	 */
242 	vma = NULL;
243 	if ((unsigned long)l < mm->map_count) {
244 		vma = mm->mmap;
245 		while (l-- && vma)
246 			vma = vma->vm_next;
247 		goto out;
248 	}
249 
250 	if (l != mm->map_count)
251 		tail_vma = NULL; /* After gate vma */
252 
253 out:
254 	if (vma)
255 		return vma;
256 
257 	release_task_mempolicy(priv);
258 	/* End of vmas has been reached */
259 	m->version = (tail_vma != NULL)? 0: -1UL;
260 	up_read(&mm->mmap_sem);
261 	mmput(mm);
262 	return tail_vma;
263 }
264 
m_next(struct seq_file * m,void * v,loff_t * pos)265 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
266 {
267 	struct proc_maps_private *priv = m->private;
268 	struct vm_area_struct *vma = v;
269 	struct vm_area_struct *tail_vma = priv->tail_vma;
270 
271 	(*pos)++;
272 	if (vma && (vma != tail_vma) && vma->vm_next)
273 		return vma->vm_next;
274 	vma_stop(priv, vma);
275 	return (vma != tail_vma)? tail_vma: NULL;
276 }
277 
m_stop(struct seq_file * m,void * v)278 static void m_stop(struct seq_file *m, void *v)
279 {
280 	struct proc_maps_private *priv = m->private;
281 	struct vm_area_struct *vma = v;
282 
283 	if (!IS_ERR(vma))
284 		vma_stop(priv, vma);
285 	if (priv->task)
286 		put_task_struct(priv->task);
287 }
288 
do_maps_open(struct inode * inode,struct file * file,const struct seq_operations * ops)289 static int do_maps_open(struct inode *inode, struct file *file,
290 			const struct seq_operations *ops)
291 {
292 	struct proc_maps_private *priv;
293 	int ret = -ENOMEM;
294 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
295 	if (priv) {
296 		priv->pid = proc_pid(inode);
297 		ret = seq_open(file, ops);
298 		if (!ret) {
299 			struct seq_file *m = file->private_data;
300 			m->private = priv;
301 		} else {
302 			kfree(priv);
303 		}
304 	}
305 	return ret;
306 }
307 
308 static void
show_map_vma(struct seq_file * m,struct vm_area_struct * vma,int is_pid)309 show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
310 {
311 	struct mm_struct *mm = vma->vm_mm;
312 	struct file *file = vma->vm_file;
313 	struct proc_maps_private *priv = m->private;
314 	struct task_struct *task = priv->task;
315 	vm_flags_t flags = vma->vm_flags;
316 	unsigned long ino = 0;
317 	unsigned long long pgoff = 0;
318 	unsigned long start, end;
319 	dev_t dev = 0;
320 	int len;
321 	const char *name = NULL;
322 
323 	if (file) {
324 		struct inode *inode = file_inode(vma->vm_file);
325 		dev = inode->i_sb->s_dev;
326 		ino = inode->i_ino;
327 		pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
328 	}
329 
330 	/* We don't show the stack guard page in /proc/maps */
331 	start = vma->vm_start;
332 	end = vma->vm_end;
333 
334 	seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
335 			start,
336 			end,
337 			flags & VM_READ ? 'r' : '-',
338 			flags & VM_WRITE ? 'w' : '-',
339 			flags & VM_EXEC ? 'x' : '-',
340 			flags & VM_MAYSHARE ? 's' : 'p',
341 			pgoff,
342 			MAJOR(dev), MINOR(dev), ino, &len);
343 
344 	/*
345 	 * Print the dentry name for named mappings, and a
346 	 * special [heap] marker for the heap:
347 	 */
348 	if (file) {
349 		pad_len_spaces(m, len);
350 		seq_path(m, &file->f_path, "\n");
351 		goto done;
352 	}
353 
354 	name = arch_vma_name(vma);
355 	if (!name) {
356 		pid_t tid;
357 
358 		if (!mm) {
359 			name = "[vdso]";
360 			goto done;
361 		}
362 
363 		if (vma->vm_start <= mm->brk &&
364 		    vma->vm_end >= mm->start_brk) {
365 			name = "[heap]";
366 			goto done;
367 		}
368 
369 		tid = vm_is_stack(task, vma, is_pid);
370 
371 		if (tid != 0) {
372 			/*
373 			 * Thread stack in /proc/PID/task/TID/maps or
374 			 * the main process stack.
375 			 */
376 			if (!is_pid || (vma->vm_start <= mm->start_stack &&
377 			    vma->vm_end >= mm->start_stack)) {
378 				name = "[stack]";
379 			} else {
380 				/* Thread stack in /proc/PID/maps */
381 				pad_len_spaces(m, len);
382 				seq_printf(m, "[stack:%d]", tid);
383 			}
384 			goto done;
385 		}
386 
387 		if (vma_get_anon_name(vma)) {
388 			pad_len_spaces(m, len);
389 			seq_print_vma_name(m, vma);
390 		}
391 	}
392 
393 done:
394 	if (name) {
395 		pad_len_spaces(m, len);
396 		seq_puts(m, name);
397 	}
398 	seq_putc(m, '\n');
399 }
400 
show_map(struct seq_file * m,void * v,int is_pid)401 static int show_map(struct seq_file *m, void *v, int is_pid)
402 {
403 	struct vm_area_struct *vma = v;
404 	struct proc_maps_private *priv = m->private;
405 	struct task_struct *task = priv->task;
406 
407 	show_map_vma(m, vma, is_pid);
408 
409 	if (m->count < m->size)  /* vma is copied successfully */
410 		m->version = (vma != get_gate_vma(task->mm))
411 			? vma->vm_start : 0;
412 	return 0;
413 }
414 
show_pid_map(struct seq_file * m,void * v)415 static int show_pid_map(struct seq_file *m, void *v)
416 {
417 	return show_map(m, v, 1);
418 }
419 
show_tid_map(struct seq_file * m,void * v)420 static int show_tid_map(struct seq_file *m, void *v)
421 {
422 	return show_map(m, v, 0);
423 }
424 
425 static const struct seq_operations proc_pid_maps_op = {
426 	.start	= m_start,
427 	.next	= m_next,
428 	.stop	= m_stop,
429 	.show	= show_pid_map
430 };
431 
432 static const struct seq_operations proc_tid_maps_op = {
433 	.start	= m_start,
434 	.next	= m_next,
435 	.stop	= m_stop,
436 	.show	= show_tid_map
437 };
438 
pid_maps_open(struct inode * inode,struct file * file)439 static int pid_maps_open(struct inode *inode, struct file *file)
440 {
441 	return do_maps_open(inode, file, &proc_pid_maps_op);
442 }
443 
tid_maps_open(struct inode * inode,struct file * file)444 static int tid_maps_open(struct inode *inode, struct file *file)
445 {
446 	return do_maps_open(inode, file, &proc_tid_maps_op);
447 }
448 
449 const struct file_operations proc_pid_maps_operations = {
450 	.open		= pid_maps_open,
451 	.read		= seq_read,
452 	.llseek		= seq_lseek,
453 	.release	= seq_release_private,
454 };
455 
456 const struct file_operations proc_tid_maps_operations = {
457 	.open		= tid_maps_open,
458 	.read		= seq_read,
459 	.llseek		= seq_lseek,
460 	.release	= seq_release_private,
461 };
462 
463 /*
464  * Proportional Set Size(PSS): my share of RSS.
465  *
466  * PSS of a process is the count of pages it has in memory, where each
467  * page is divided by the number of processes sharing it.  So if a
468  * process has 1000 pages all to itself, and 1000 shared with one other
469  * process, its PSS will be 1500.
470  *
471  * To keep (accumulated) division errors low, we adopt a 64bit
472  * fixed-point pss counter to minimize division errors. So (pss >>
473  * PSS_SHIFT) would be the real byte count.
474  *
475  * A shift of 12 before division means (assuming 4K page size):
476  * 	- 1M 3-user-pages add up to 8KB errors;
477  * 	- supports mapcount up to 2^24, or 16M;
478  * 	- supports PSS up to 2^52 bytes, or 4PB.
479  */
480 #define PSS_SHIFT 12
481 
482 #ifdef CONFIG_PROC_PAGE_MONITOR
483 struct mem_size_stats {
484 	struct vm_area_struct *vma;
485 	unsigned long resident;
486 	unsigned long shared_clean;
487 	unsigned long shared_dirty;
488 	unsigned long private_clean;
489 	unsigned long private_dirty;
490 	unsigned long referenced;
491 	unsigned long anonymous;
492 	unsigned long anonymous_thp;
493 	unsigned long swap;
494 	unsigned long nonlinear;
495 	u64 pss;
496 	u64 swap_pss;
497 };
498 
499 
smaps_pte_entry(pte_t ptent,unsigned long addr,unsigned long ptent_size,struct mm_walk * walk)500 static void smaps_pte_entry(pte_t ptent, unsigned long addr,
501 		unsigned long ptent_size, struct mm_walk *walk)
502 {
503 	struct mem_size_stats *mss = walk->private;
504 	struct vm_area_struct *vma = mss->vma;
505 	pgoff_t pgoff = linear_page_index(vma, addr);
506 	struct page *page = NULL;
507 	int mapcount;
508 
509 	if (pte_present(ptent)) {
510 		page = vm_normal_page(vma, addr, ptent);
511 	} else if (is_swap_pte(ptent)) {
512 		swp_entry_t swpent = pte_to_swp_entry(ptent);
513 
514 		if (!non_swap_entry(swpent)) {
515 			int mapcount;
516 
517 			mss->swap += PAGE_SIZE;
518 			mapcount = swp_swapcount(swpent);
519 			if (mapcount >= 2) {
520 				u64 pss_delta = (u64)PAGE_SIZE << PSS_SHIFT;
521 
522 				do_div(pss_delta, mapcount);
523 				mss->swap_pss += pss_delta;
524 			} else {
525 				mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT;
526 			}
527 		} else if (is_migration_entry(swpent))
528 			page = migration_entry_to_page(swpent);
529 	} else if (pte_file(ptent)) {
530 		if (pte_to_pgoff(ptent) != pgoff)
531 			mss->nonlinear += ptent_size;
532 	}
533 
534 	if (!page)
535 		return;
536 
537 	if (PageAnon(page))
538 		mss->anonymous += ptent_size;
539 
540 	if (page->index != pgoff)
541 		mss->nonlinear += ptent_size;
542 
543 	mss->resident += ptent_size;
544 	/* Accumulate the size in pages that have been accessed. */
545 	if (pte_young(ptent) || PageReferenced(page))
546 		mss->referenced += ptent_size;
547 	mapcount = page_mapcount(page);
548 	if (mapcount >= 2) {
549 		if (pte_dirty(ptent) || PageDirty(page))
550 			mss->shared_dirty += ptent_size;
551 		else
552 			mss->shared_clean += ptent_size;
553 		mss->pss += (ptent_size << PSS_SHIFT) / mapcount;
554 	} else {
555 		if (pte_dirty(ptent) || PageDirty(page))
556 			mss->private_dirty += ptent_size;
557 		else
558 			mss->private_clean += ptent_size;
559 		mss->pss += (ptent_size << PSS_SHIFT);
560 	}
561 }
562 
smaps_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)563 static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
564 			   struct mm_walk *walk)
565 {
566 	struct mem_size_stats *mss = walk->private;
567 	struct vm_area_struct *vma = mss->vma;
568 	pte_t *pte;
569 	spinlock_t *ptl;
570 
571 	if (pmd_trans_huge_lock(pmd, vma) == 1) {
572 		smaps_pte_entry(*(pte_t *)pmd, addr, HPAGE_PMD_SIZE, walk);
573 		spin_unlock(&walk->mm->page_table_lock);
574 		mss->anonymous_thp += HPAGE_PMD_SIZE;
575 		return 0;
576 	}
577 
578 	if (pmd_trans_unstable(pmd))
579 		return 0;
580 	/*
581 	 * The mmap_sem held all the way back in m_start() is what
582 	 * keeps khugepaged out of here and from collapsing things
583 	 * in here.
584 	 */
585 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
586 	for (; addr != end; pte++, addr += PAGE_SIZE)
587 		smaps_pte_entry(*pte, addr, PAGE_SIZE, walk);
588 	pte_unmap_unlock(pte - 1, ptl);
589 	cond_resched();
590 	return 0;
591 }
592 
show_smap_vma_flags(struct seq_file * m,struct vm_area_struct * vma)593 static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
594 {
595 	/*
596 	 * Don't forget to update Documentation/ on changes.
597 	 */
598 	static const char mnemonics[BITS_PER_LONG][2] = {
599 		/*
600 		 * In case if we meet a flag we don't know about.
601 		 */
602 		[0 ... (BITS_PER_LONG-1)] = "??",
603 
604 		[ilog2(VM_READ)]	= "rd",
605 		[ilog2(VM_WRITE)]	= "wr",
606 		[ilog2(VM_EXEC)]	= "ex",
607 		[ilog2(VM_SHARED)]	= "sh",
608 		[ilog2(VM_MAYREAD)]	= "mr",
609 		[ilog2(VM_MAYWRITE)]	= "mw",
610 		[ilog2(VM_MAYEXEC)]	= "me",
611 		[ilog2(VM_MAYSHARE)]	= "ms",
612 		[ilog2(VM_GROWSDOWN)]	= "gd",
613 		[ilog2(VM_PFNMAP)]	= "pf",
614 		[ilog2(VM_DENYWRITE)]	= "dw",
615 		[ilog2(VM_LOCKED)]	= "lo",
616 		[ilog2(VM_IO)]		= "io",
617 		[ilog2(VM_SEQ_READ)]	= "sr",
618 		[ilog2(VM_RAND_READ)]	= "rr",
619 		[ilog2(VM_DONTCOPY)]	= "dc",
620 		[ilog2(VM_DONTEXPAND)]	= "de",
621 		[ilog2(VM_ACCOUNT)]	= "ac",
622 		[ilog2(VM_NORESERVE)]	= "nr",
623 		[ilog2(VM_HUGETLB)]	= "ht",
624 		[ilog2(VM_NONLINEAR)]	= "nl",
625 		[ilog2(VM_ARCH_1)]	= "ar",
626 		[ilog2(VM_DONTDUMP)]	= "dd",
627 		[ilog2(VM_MIXEDMAP)]	= "mm",
628 		[ilog2(VM_HUGEPAGE)]	= "hg",
629 		[ilog2(VM_NOHUGEPAGE)]	= "nh",
630 		[ilog2(VM_MERGEABLE)]	= "mg",
631 	};
632 	size_t i;
633 
634 	seq_puts(m, "VmFlags: ");
635 	for (i = 0; i < BITS_PER_LONG; i++) {
636 		if (vma->vm_flags & (1UL << i)) {
637 			seq_printf(m, "%c%c ",
638 				   mnemonics[i][0], mnemonics[i][1]);
639 		}
640 	}
641 	seq_putc(m, '\n');
642 }
643 
show_smap(struct seq_file * m,void * v,int is_pid)644 static int show_smap(struct seq_file *m, void *v, int is_pid)
645 {
646 	struct proc_maps_private *priv = m->private;
647 	struct task_struct *task = priv->task;
648 	struct vm_area_struct *vma = v;
649 	struct mem_size_stats mss;
650 	struct mm_walk smaps_walk = {
651 		.pmd_entry = smaps_pte_range,
652 		.mm = vma->vm_mm,
653 		.private = &mss,
654 	};
655 
656 	memset(&mss, 0, sizeof mss);
657 	mss.vma = vma;
658 	/* mmap_sem is held in m_start */
659 	if (vma->vm_mm && !is_vm_hugetlb_page(vma))
660 		walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
661 
662 	show_map_vma(m, vma, is_pid);
663 
664 	seq_printf(m,
665 		   "Size:           %8lu kB\n"
666 		   "Rss:            %8lu kB\n"
667 		   "Pss:            %8lu kB\n"
668 		   "Shared_Clean:   %8lu kB\n"
669 		   "Shared_Dirty:   %8lu kB\n"
670 		   "Private_Clean:  %8lu kB\n"
671 		   "Private_Dirty:  %8lu kB\n"
672 		   "Referenced:     %8lu kB\n"
673 		   "Anonymous:      %8lu kB\n"
674 		   "AnonHugePages:  %8lu kB\n"
675 		   "Swap:           %8lu kB\n"
676 		   "SwapPss:        %8lu kB\n"
677 		   "KernelPageSize: %8lu kB\n"
678 		   "MMUPageSize:    %8lu kB\n"
679 		   "Locked:         %8lu kB\n",
680 		   (vma->vm_end - vma->vm_start) >> 10,
681 		   mss.resident >> 10,
682 		   (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
683 		   mss.shared_clean  >> 10,
684 		   mss.shared_dirty  >> 10,
685 		   mss.private_clean >> 10,
686 		   mss.private_dirty >> 10,
687 		   mss.referenced >> 10,
688 		   mss.anonymous >> 10,
689 		   mss.anonymous_thp >> 10,
690 		   mss.swap >> 10,
691 		   (unsigned long)(mss.swap_pss >> (10 + PSS_SHIFT)),
692 		   vma_kernel_pagesize(vma) >> 10,
693 		   vma_mmu_pagesize(vma) >> 10,
694 		   (vma->vm_flags & VM_LOCKED) ?
695 			(unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0);
696 
697 	if (vma->vm_flags & VM_NONLINEAR)
698 		seq_printf(m, "Nonlinear:      %8lu kB\n",
699 				mss.nonlinear >> 10);
700 
701 	show_smap_vma_flags(m, vma);
702 
703 	if (vma_get_anon_name(vma)) {
704 		seq_puts(m, "Name:           ");
705 		seq_print_vma_name(m, vma);
706 		seq_putc(m, '\n');
707 	}
708 
709 	if (m->count < m->size)  /* vma is copied successfully */
710 		m->version = (vma != get_gate_vma(task->mm))
711 			? vma->vm_start : 0;
712 	return 0;
713 }
714 
show_pid_smap(struct seq_file * m,void * v)715 static int show_pid_smap(struct seq_file *m, void *v)
716 {
717 	return show_smap(m, v, 1);
718 }
719 
show_tid_smap(struct seq_file * m,void * v)720 static int show_tid_smap(struct seq_file *m, void *v)
721 {
722 	return show_smap(m, v, 0);
723 }
724 
725 static const struct seq_operations proc_pid_smaps_op = {
726 	.start	= m_start,
727 	.next	= m_next,
728 	.stop	= m_stop,
729 	.show	= show_pid_smap
730 };
731 
732 static const struct seq_operations proc_tid_smaps_op = {
733 	.start	= m_start,
734 	.next	= m_next,
735 	.stop	= m_stop,
736 	.show	= show_tid_smap
737 };
738 
pid_smaps_open(struct inode * inode,struct file * file)739 static int pid_smaps_open(struct inode *inode, struct file *file)
740 {
741 	return do_maps_open(inode, file, &proc_pid_smaps_op);
742 }
743 
tid_smaps_open(struct inode * inode,struct file * file)744 static int tid_smaps_open(struct inode *inode, struct file *file)
745 {
746 	return do_maps_open(inode, file, &proc_tid_smaps_op);
747 }
748 
749 const struct file_operations proc_pid_smaps_operations = {
750 	.open		= pid_smaps_open,
751 	.read		= seq_read,
752 	.llseek		= seq_lseek,
753 	.release	= seq_release_private,
754 };
755 
756 const struct file_operations proc_tid_smaps_operations = {
757 	.open		= tid_smaps_open,
758 	.read		= seq_read,
759 	.llseek		= seq_lseek,
760 	.release	= seq_release_private,
761 };
762 
clear_refs_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)763 static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
764 				unsigned long end, struct mm_walk *walk)
765 {
766 	struct vm_area_struct *vma = walk->private;
767 	pte_t *pte, ptent;
768 	spinlock_t *ptl;
769 	struct page *page;
770 
771 	split_huge_page_pmd(vma, addr, pmd);
772 	if (pmd_trans_unstable(pmd))
773 		return 0;
774 
775 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
776 	for (; addr != end; pte++, addr += PAGE_SIZE) {
777 		ptent = *pte;
778 		if (!pte_present(ptent))
779 			continue;
780 
781 		page = vm_normal_page(vma, addr, ptent);
782 		if (!page)
783 			continue;
784 
785 		/* Clear accessed and referenced bits. */
786 		ptep_test_and_clear_young(vma, addr, pte);
787 		ClearPageReferenced(page);
788 	}
789 	pte_unmap_unlock(pte - 1, ptl);
790 	cond_resched();
791 	return 0;
792 }
793 
794 #define CLEAR_REFS_ALL 1
795 #define CLEAR_REFS_ANON 2
796 #define CLEAR_REFS_MAPPED 3
797 #define CLEAR_REFS_MM_HIWATER_RSS 5
798 
clear_refs_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)799 static ssize_t clear_refs_write(struct file *file, const char __user *buf,
800 				size_t count, loff_t *ppos)
801 {
802 	struct task_struct *task;
803 	char buffer[PROC_NUMBUF];
804 	struct mm_struct *mm;
805 	struct vm_area_struct *vma;
806 	int type;
807 	int rv;
808 
809 	memset(buffer, 0, sizeof(buffer));
810 	if (count > sizeof(buffer) - 1)
811 		count = sizeof(buffer) - 1;
812 	if (copy_from_user(buffer, buf, count))
813 		return -EFAULT;
814 	rv = kstrtoint(strstrip(buffer), 10, &type);
815 	if (rv < 0)
816 		return rv;
817 	if ((type < CLEAR_REFS_ALL || type > CLEAR_REFS_MAPPED) &&
818 	    type != CLEAR_REFS_MM_HIWATER_RSS)
819 		return -EINVAL;
820 	task = get_proc_task(file_inode(file));
821 	if (!task)
822 		return -ESRCH;
823 	mm = get_task_mm(task);
824 	if (mm) {
825 		struct mm_walk clear_refs_walk = {
826 			.pmd_entry = clear_refs_pte_range,
827 			.mm = mm,
828 		};
829 
830 		if (type == CLEAR_REFS_MM_HIWATER_RSS) {
831 			/*
832 			 * Writing 5 to /proc/pid/clear_refs resets the peak
833 			 * resident set size to this mm's current rss value.
834 			 */
835 			down_write(&mm->mmap_sem);
836 			reset_mm_hiwater_rss(mm);
837 			up_write(&mm->mmap_sem);
838 			goto out_mm;
839 		}
840 
841 		down_read(&mm->mmap_sem);
842 		for (vma = mm->mmap; vma; vma = vma->vm_next) {
843 			clear_refs_walk.private = vma;
844 			if (is_vm_hugetlb_page(vma))
845 				continue;
846 			/*
847 			 * Writing 1 to /proc/pid/clear_refs affects all pages.
848 			 *
849 			 * Writing 2 to /proc/pid/clear_refs only affects
850 			 * Anonymous pages.
851 			 *
852 			 * Writing 3 to /proc/pid/clear_refs only affects file
853 			 * mapped pages.
854 			 */
855 			if (type == CLEAR_REFS_ANON && vma->vm_file)
856 				continue;
857 			if (type == CLEAR_REFS_MAPPED && !vma->vm_file)
858 				continue;
859 			walk_page_range(vma->vm_start, vma->vm_end,
860 					&clear_refs_walk);
861 		}
862 		flush_tlb_mm(mm);
863 		up_read(&mm->mmap_sem);
864 out_mm:
865 		mmput(mm);
866 	}
867 	put_task_struct(task);
868 
869 	return count;
870 }
871 
872 const struct file_operations proc_clear_refs_operations = {
873 	.write		= clear_refs_write,
874 	.llseek		= noop_llseek,
875 };
876 
877 typedef struct {
878 	u64 pme;
879 } pagemap_entry_t;
880 
881 struct pagemapread {
882 	int pos, len;
883 	pagemap_entry_t *buffer;
884 };
885 
886 #define PAGEMAP_WALK_SIZE	(PMD_SIZE)
887 #define PAGEMAP_WALK_MASK	(PMD_MASK)
888 
889 #define PM_ENTRY_BYTES      sizeof(u64)
890 #define PM_STATUS_BITS      3
891 #define PM_STATUS_OFFSET    (64 - PM_STATUS_BITS)
892 #define PM_STATUS_MASK      (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
893 #define PM_STATUS(nr)       (((nr) << PM_STATUS_OFFSET) & PM_STATUS_MASK)
894 #define PM_PSHIFT_BITS      6
895 #define PM_PSHIFT_OFFSET    (PM_STATUS_OFFSET - PM_PSHIFT_BITS)
896 #define PM_PSHIFT_MASK      (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET)
897 #define PM_PSHIFT(x)        (((u64) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK)
898 #define PM_PFRAME_MASK      ((1LL << PM_PSHIFT_OFFSET) - 1)
899 #define PM_PFRAME(x)        ((x) & PM_PFRAME_MASK)
900 
901 #define PM_PRESENT          PM_STATUS(4LL)
902 #define PM_SWAP             PM_STATUS(2LL)
903 #define PM_FILE             PM_STATUS(1LL)
904 #define PM_NOT_PRESENT      PM_PSHIFT(PAGE_SHIFT)
905 #define PM_END_OF_BUFFER    1
906 
make_pme(u64 val)907 static inline pagemap_entry_t make_pme(u64 val)
908 {
909 	return (pagemap_entry_t) { .pme = val };
910 }
911 
add_to_pagemap(unsigned long addr,pagemap_entry_t * pme,struct pagemapread * pm)912 static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme,
913 			  struct pagemapread *pm)
914 {
915 	pm->buffer[pm->pos++] = *pme;
916 	if (pm->pos >= pm->len)
917 		return PM_END_OF_BUFFER;
918 	return 0;
919 }
920 
pagemap_pte_hole(unsigned long start,unsigned long end,struct mm_walk * walk)921 static int pagemap_pte_hole(unsigned long start, unsigned long end,
922 				struct mm_walk *walk)
923 {
924 	struct pagemapread *pm = walk->private;
925 	unsigned long addr;
926 	int err = 0;
927 	pagemap_entry_t pme = make_pme(PM_NOT_PRESENT);
928 
929 	for (addr = start; addr < end; addr += PAGE_SIZE) {
930 		err = add_to_pagemap(addr, &pme, pm);
931 		if (err)
932 			break;
933 	}
934 	return err;
935 }
936 
pte_to_pagemap_entry(pagemap_entry_t * pme,struct vm_area_struct * vma,unsigned long addr,pte_t pte)937 static void pte_to_pagemap_entry(pagemap_entry_t *pme,
938 		struct vm_area_struct *vma, unsigned long addr, pte_t pte)
939 {
940 	u64 frame, flags;
941 	struct page *page = NULL;
942 
943 	if (pte_present(pte)) {
944 		frame = pte_pfn(pte);
945 		flags = PM_PRESENT;
946 		page = vm_normal_page(vma, addr, pte);
947 	} else if (is_swap_pte(pte)) {
948 		swp_entry_t entry = pte_to_swp_entry(pte);
949 
950 		frame = swp_type(entry) |
951 			(swp_offset(entry) << MAX_SWAPFILES_SHIFT);
952 		flags = PM_SWAP;
953 		if (is_migration_entry(entry))
954 			page = migration_entry_to_page(entry);
955 	} else {
956 		*pme = make_pme(PM_NOT_PRESENT);
957 		return;
958 	}
959 
960 	if (page && !PageAnon(page))
961 		flags |= PM_FILE;
962 
963 	*pme = make_pme(PM_PFRAME(frame) | PM_PSHIFT(PAGE_SHIFT) | flags);
964 }
965 
966 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
thp_pmd_to_pagemap_entry(pagemap_entry_t * pme,pmd_t pmd,int offset)967 static void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme,
968 					pmd_t pmd, int offset)
969 {
970 	/*
971 	 * Currently pmd for thp is always present because thp can not be
972 	 * swapped-out, migrated, or HWPOISONed (split in such cases instead.)
973 	 * This if-check is just to prepare for future implementation.
974 	 */
975 	if (pmd_present(pmd))
976 		*pme = make_pme(PM_PFRAME(pmd_pfn(pmd) + offset)
977 				| PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT);
978 	else
979 		*pme = make_pme(PM_NOT_PRESENT);
980 }
981 #else
thp_pmd_to_pagemap_entry(pagemap_entry_t * pme,pmd_t pmd,int offset)982 static inline void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme,
983 						pmd_t pmd, int offset)
984 {
985 }
986 #endif
987 
pagemap_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)988 static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
989 			     struct mm_walk *walk)
990 {
991 	struct vm_area_struct *vma;
992 	struct pagemapread *pm = walk->private;
993 	pte_t *pte;
994 	int err = 0;
995 	pagemap_entry_t pme = make_pme(PM_NOT_PRESENT);
996 
997 	/* find the first VMA at or above 'addr' */
998 	vma = find_vma(walk->mm, addr);
999 	if (vma && pmd_trans_huge_lock(pmd, vma) == 1) {
1000 		for (; addr != end; addr += PAGE_SIZE) {
1001 			unsigned long offset;
1002 
1003 			offset = (addr & ~PAGEMAP_WALK_MASK) >>
1004 					PAGE_SHIFT;
1005 			thp_pmd_to_pagemap_entry(&pme, *pmd, offset);
1006 			err = add_to_pagemap(addr, &pme, pm);
1007 			if (err)
1008 				break;
1009 		}
1010 		spin_unlock(&walk->mm->page_table_lock);
1011 		return err;
1012 	}
1013 
1014 	if (pmd_trans_unstable(pmd))
1015 		return 0;
1016 	for (; addr != end; addr += PAGE_SIZE) {
1017 
1018 		/* check to see if we've left 'vma' behind
1019 		 * and need a new, higher one */
1020 		if (vma && (addr >= vma->vm_end)) {
1021 			vma = find_vma(walk->mm, addr);
1022 			pme = make_pme(PM_NOT_PRESENT);
1023 		}
1024 
1025 		/* check that 'vma' actually covers this address,
1026 		 * and that it isn't a huge page vma */
1027 		if (vma && (vma->vm_start <= addr) &&
1028 		    !is_vm_hugetlb_page(vma)) {
1029 			pte = pte_offset_map(pmd, addr);
1030 			pte_to_pagemap_entry(&pme, vma, addr, *pte);
1031 			/* unmap before userspace copy */
1032 			pte_unmap(pte);
1033 		}
1034 		err = add_to_pagemap(addr, &pme, pm);
1035 		if (err)
1036 			return err;
1037 	}
1038 
1039 	cond_resched();
1040 
1041 	return err;
1042 }
1043 
1044 #ifdef CONFIG_HUGETLB_PAGE
huge_pte_to_pagemap_entry(pagemap_entry_t * pme,pte_t pte,int offset)1045 static void huge_pte_to_pagemap_entry(pagemap_entry_t *pme,
1046 					pte_t pte, int offset)
1047 {
1048 	if (pte_present(pte))
1049 		*pme = make_pme(PM_PFRAME(pte_pfn(pte) + offset)
1050 				| PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT);
1051 	else
1052 		*pme = make_pme(PM_NOT_PRESENT);
1053 }
1054 
1055 /* This function walks within one hugetlb entry in the single call */
pagemap_hugetlb_range(pte_t * pte,unsigned long hmask,unsigned long addr,unsigned long end,struct mm_walk * walk)1056 static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask,
1057 				 unsigned long addr, unsigned long end,
1058 				 struct mm_walk *walk)
1059 {
1060 	struct pagemapread *pm = walk->private;
1061 	int err = 0;
1062 	pagemap_entry_t pme;
1063 
1064 	for (; addr != end; addr += PAGE_SIZE) {
1065 		int offset = (addr & ~hmask) >> PAGE_SHIFT;
1066 		huge_pte_to_pagemap_entry(&pme, *pte, offset);
1067 		err = add_to_pagemap(addr, &pme, pm);
1068 		if (err)
1069 			return err;
1070 	}
1071 
1072 	cond_resched();
1073 
1074 	return err;
1075 }
1076 #endif /* HUGETLB_PAGE */
1077 
1078 /*
1079  * /proc/pid/pagemap - an array mapping virtual pages to pfns
1080  *
1081  * For each page in the address space, this file contains one 64-bit entry
1082  * consisting of the following:
1083  *
1084  * Bits 0-54  page frame number (PFN) if present
1085  * Bits 0-4   swap type if swapped
1086  * Bits 5-54  swap offset if swapped
1087  * Bits 55-60 page shift (page size = 1<<page shift)
1088  * Bit  61    page is file-page or shared-anon
1089  * Bit  62    page swapped
1090  * Bit  63    page present
1091  *
1092  * If the page is not present but in swap, then the PFN contains an
1093  * encoding of the swap file number and the page's offset into the
1094  * swap. Unmapped pages return a null PFN. This allows determining
1095  * precisely which pages are mapped (or in swap) and comparing mapped
1096  * pages between processes.
1097  *
1098  * Efficient users of this interface will use /proc/pid/maps to
1099  * determine which areas of memory are actually mapped and llseek to
1100  * skip over unmapped regions.
1101  */
pagemap_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)1102 static ssize_t pagemap_read(struct file *file, char __user *buf,
1103 			    size_t count, loff_t *ppos)
1104 {
1105 	struct task_struct *task = get_proc_task(file_inode(file));
1106 	struct mm_struct *mm;
1107 	struct pagemapread pm;
1108 	int ret = -ESRCH;
1109 	struct mm_walk pagemap_walk = {};
1110 	unsigned long src;
1111 	unsigned long svpfn;
1112 	unsigned long start_vaddr;
1113 	unsigned long end_vaddr;
1114 	int copied = 0;
1115 
1116 	if (!task)
1117 		goto out;
1118 
1119 	ret = -EINVAL;
1120 	/* file position must be aligned */
1121 	if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
1122 		goto out_task;
1123 
1124 	ret = 0;
1125 	if (!count)
1126 		goto out_task;
1127 
1128 	pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
1129 	pm.buffer = kmalloc(pm.len, GFP_TEMPORARY);
1130 	ret = -ENOMEM;
1131 	if (!pm.buffer)
1132 		goto out_task;
1133 
1134 	mm = mm_access(task, PTRACE_MODE_READ);
1135 	ret = PTR_ERR(mm);
1136 	if (!mm || IS_ERR(mm))
1137 		goto out_free;
1138 
1139 	pagemap_walk.pmd_entry = pagemap_pte_range;
1140 	pagemap_walk.pte_hole = pagemap_pte_hole;
1141 #ifdef CONFIG_HUGETLB_PAGE
1142 	pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
1143 #endif
1144 	pagemap_walk.mm = mm;
1145 	pagemap_walk.private = &pm;
1146 
1147 	src = *ppos;
1148 	svpfn = src / PM_ENTRY_BYTES;
1149 	start_vaddr = svpfn << PAGE_SHIFT;
1150 	end_vaddr = TASK_SIZE_OF(task);
1151 
1152 	/* watch out for wraparound */
1153 	if (svpfn > TASK_SIZE_OF(task) >> PAGE_SHIFT)
1154 		start_vaddr = end_vaddr;
1155 
1156 	/*
1157 	 * The odds are that this will stop walking way
1158 	 * before end_vaddr, because the length of the
1159 	 * user buffer is tracked in "pm", and the walk
1160 	 * will stop when we hit the end of the buffer.
1161 	 */
1162 	ret = 0;
1163 	while (count && (start_vaddr < end_vaddr)) {
1164 		int len;
1165 		unsigned long end;
1166 
1167 		pm.pos = 0;
1168 		end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;
1169 		/* overflow ? */
1170 		if (end < start_vaddr || end > end_vaddr)
1171 			end = end_vaddr;
1172 		down_read(&mm->mmap_sem);
1173 		ret = walk_page_range(start_vaddr, end, &pagemap_walk);
1174 		up_read(&mm->mmap_sem);
1175 		start_vaddr = end;
1176 
1177 		len = min(count, PM_ENTRY_BYTES * pm.pos);
1178 		if (copy_to_user(buf, pm.buffer, len)) {
1179 			ret = -EFAULT;
1180 			goto out_mm;
1181 		}
1182 		copied += len;
1183 		buf += len;
1184 		count -= len;
1185 	}
1186 	*ppos += copied;
1187 	if (!ret || ret == PM_END_OF_BUFFER)
1188 		ret = copied;
1189 
1190 out_mm:
1191 	mmput(mm);
1192 out_free:
1193 	kfree(pm.buffer);
1194 out_task:
1195 	put_task_struct(task);
1196 out:
1197 	return ret;
1198 }
1199 
pagemap_open(struct inode * inode,struct file * file)1200 static int pagemap_open(struct inode *inode, struct file *file)
1201 {
1202 	/* do not disclose physical addresses: attack vector */
1203 	if (!capable(CAP_SYS_ADMIN))
1204 		return -EPERM;
1205 	return 0;
1206 }
1207 
1208 const struct file_operations proc_pagemap_operations = {
1209 	.llseek		= mem_lseek, /* borrow this */
1210 	.read		= pagemap_read,
1211 	.open		= pagemap_open,
1212 };
1213 #endif /* CONFIG_PROC_PAGE_MONITOR */
1214 
1215 #ifdef CONFIG_NUMA
1216 
1217 struct numa_maps {
1218 	struct vm_area_struct *vma;
1219 	unsigned long pages;
1220 	unsigned long anon;
1221 	unsigned long active;
1222 	unsigned long writeback;
1223 	unsigned long mapcount_max;
1224 	unsigned long dirty;
1225 	unsigned long swapcache;
1226 	unsigned long node[MAX_NUMNODES];
1227 };
1228 
1229 struct numa_maps_private {
1230 	struct proc_maps_private proc_maps;
1231 	struct numa_maps md;
1232 };
1233 
gather_stats(struct page * page,struct numa_maps * md,int pte_dirty,unsigned long nr_pages)1234 static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
1235 			unsigned long nr_pages)
1236 {
1237 	int count = page_mapcount(page);
1238 
1239 	md->pages += nr_pages;
1240 	if (pte_dirty || PageDirty(page))
1241 		md->dirty += nr_pages;
1242 
1243 	if (PageSwapCache(page))
1244 		md->swapcache += nr_pages;
1245 
1246 	if (PageActive(page) || PageUnevictable(page))
1247 		md->active += nr_pages;
1248 
1249 	if (PageWriteback(page))
1250 		md->writeback += nr_pages;
1251 
1252 	if (PageAnon(page))
1253 		md->anon += nr_pages;
1254 
1255 	if (count > md->mapcount_max)
1256 		md->mapcount_max = count;
1257 
1258 	md->node[page_to_nid(page)] += nr_pages;
1259 }
1260 
can_gather_numa_stats(pte_t pte,struct vm_area_struct * vma,unsigned long addr)1261 static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
1262 		unsigned long addr)
1263 {
1264 	struct page *page;
1265 	int nid;
1266 
1267 	if (!pte_present(pte))
1268 		return NULL;
1269 
1270 	page = vm_normal_page(vma, addr, pte);
1271 	if (!page)
1272 		return NULL;
1273 
1274 	if (PageReserved(page))
1275 		return NULL;
1276 
1277 	nid = page_to_nid(page);
1278 	if (!node_isset(nid, node_states[N_MEMORY]))
1279 		return NULL;
1280 
1281 	return page;
1282 }
1283 
gather_pte_stats(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)1284 static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
1285 		unsigned long end, struct mm_walk *walk)
1286 {
1287 	struct numa_maps *md;
1288 	spinlock_t *ptl;
1289 	pte_t *orig_pte;
1290 	pte_t *pte;
1291 
1292 	md = walk->private;
1293 
1294 	if (pmd_trans_huge_lock(pmd, md->vma) == 1) {
1295 		pte_t huge_pte = *(pte_t *)pmd;
1296 		struct page *page;
1297 
1298 		page = can_gather_numa_stats(huge_pte, md->vma, addr);
1299 		if (page)
1300 			gather_stats(page, md, pte_dirty(huge_pte),
1301 				     HPAGE_PMD_SIZE/PAGE_SIZE);
1302 		spin_unlock(&walk->mm->page_table_lock);
1303 		return 0;
1304 	}
1305 
1306 	if (pmd_trans_unstable(pmd))
1307 		return 0;
1308 	orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
1309 	do {
1310 		struct page *page = can_gather_numa_stats(*pte, md->vma, addr);
1311 		if (!page)
1312 			continue;
1313 		gather_stats(page, md, pte_dirty(*pte), 1);
1314 
1315 	} while (pte++, addr += PAGE_SIZE, addr != end);
1316 	pte_unmap_unlock(orig_pte, ptl);
1317 	return 0;
1318 }
1319 #ifdef CONFIG_HUGETLB_PAGE
gather_hugetbl_stats(pte_t * pte,unsigned long hmask,unsigned long addr,unsigned long end,struct mm_walk * walk)1320 static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
1321 		unsigned long addr, unsigned long end, struct mm_walk *walk)
1322 {
1323 	struct numa_maps *md;
1324 	struct page *page;
1325 
1326 	if (pte_none(*pte))
1327 		return 0;
1328 
1329 	page = pte_page(*pte);
1330 	if (!page)
1331 		return 0;
1332 
1333 	md = walk->private;
1334 	gather_stats(page, md, pte_dirty(*pte), 1);
1335 	return 0;
1336 }
1337 
1338 #else
gather_hugetbl_stats(pte_t * pte,unsigned long hmask,unsigned long addr,unsigned long end,struct mm_walk * walk)1339 static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
1340 		unsigned long addr, unsigned long end, struct mm_walk *walk)
1341 {
1342 	return 0;
1343 }
1344 #endif
1345 
1346 /*
1347  * Display pages allocated per node and memory policy via /proc.
1348  */
show_numa_map(struct seq_file * m,void * v,int is_pid)1349 static int show_numa_map(struct seq_file *m, void *v, int is_pid)
1350 {
1351 	struct numa_maps_private *numa_priv = m->private;
1352 	struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
1353 	struct vm_area_struct *vma = v;
1354 	struct numa_maps *md = &numa_priv->md;
1355 	struct file *file = vma->vm_file;
1356 	struct task_struct *task = proc_priv->task;
1357 	struct mm_struct *mm = vma->vm_mm;
1358 	struct mm_walk walk = {};
1359 	struct mempolicy *pol;
1360 	int n;
1361 	char buffer[50];
1362 
1363 	if (!mm)
1364 		return 0;
1365 
1366 	/* Ensure we start with an empty set of numa_maps statistics. */
1367 	memset(md, 0, sizeof(*md));
1368 
1369 	md->vma = vma;
1370 
1371 	walk.hugetlb_entry = gather_hugetbl_stats;
1372 	walk.pmd_entry = gather_pte_stats;
1373 	walk.private = md;
1374 	walk.mm = mm;
1375 
1376 	pol = get_vma_policy(task, vma, vma->vm_start);
1377 	mpol_to_str(buffer, sizeof(buffer), pol);
1378 	mpol_cond_put(pol);
1379 
1380 	seq_printf(m, "%08lx %s", vma->vm_start, buffer);
1381 
1382 	if (file) {
1383 		seq_printf(m, " file=");
1384 		seq_path(m, &file->f_path, "\n\t= ");
1385 	} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
1386 		seq_printf(m, " heap");
1387 	} else {
1388 		pid_t tid = vm_is_stack(task, vma, is_pid);
1389 		if (tid != 0) {
1390 			/*
1391 			 * Thread stack in /proc/PID/task/TID/maps or
1392 			 * the main process stack.
1393 			 */
1394 			if (!is_pid || (vma->vm_start <= mm->start_stack &&
1395 			    vma->vm_end >= mm->start_stack))
1396 				seq_printf(m, " stack");
1397 			else
1398 				seq_printf(m, " stack:%d", tid);
1399 		}
1400 	}
1401 
1402 	if (is_vm_hugetlb_page(vma))
1403 		seq_printf(m, " huge");
1404 
1405 	walk_page_range(vma->vm_start, vma->vm_end, &walk);
1406 
1407 	if (!md->pages)
1408 		goto out;
1409 
1410 	if (md->anon)
1411 		seq_printf(m, " anon=%lu", md->anon);
1412 
1413 	if (md->dirty)
1414 		seq_printf(m, " dirty=%lu", md->dirty);
1415 
1416 	if (md->pages != md->anon && md->pages != md->dirty)
1417 		seq_printf(m, " mapped=%lu", md->pages);
1418 
1419 	if (md->mapcount_max > 1)
1420 		seq_printf(m, " mapmax=%lu", md->mapcount_max);
1421 
1422 	if (md->swapcache)
1423 		seq_printf(m, " swapcache=%lu", md->swapcache);
1424 
1425 	if (md->active < md->pages && !is_vm_hugetlb_page(vma))
1426 		seq_printf(m, " active=%lu", md->active);
1427 
1428 	if (md->writeback)
1429 		seq_printf(m, " writeback=%lu", md->writeback);
1430 
1431 	for_each_node_state(n, N_MEMORY)
1432 		if (md->node[n])
1433 			seq_printf(m, " N%d=%lu", n, md->node[n]);
1434 out:
1435 	seq_putc(m, '\n');
1436 
1437 	if (m->count < m->size)
1438 		m->version = (vma != proc_priv->tail_vma) ? vma->vm_start : 0;
1439 	return 0;
1440 }
1441 
show_pid_numa_map(struct seq_file * m,void * v)1442 static int show_pid_numa_map(struct seq_file *m, void *v)
1443 {
1444 	return show_numa_map(m, v, 1);
1445 }
1446 
show_tid_numa_map(struct seq_file * m,void * v)1447 static int show_tid_numa_map(struct seq_file *m, void *v)
1448 {
1449 	return show_numa_map(m, v, 0);
1450 }
1451 
1452 static const struct seq_operations proc_pid_numa_maps_op = {
1453 	.start  = m_start,
1454 	.next   = m_next,
1455 	.stop   = m_stop,
1456 	.show   = show_pid_numa_map,
1457 };
1458 
1459 static const struct seq_operations proc_tid_numa_maps_op = {
1460 	.start  = m_start,
1461 	.next   = m_next,
1462 	.stop   = m_stop,
1463 	.show   = show_tid_numa_map,
1464 };
1465 
numa_maps_open(struct inode * inode,struct file * file,const struct seq_operations * ops)1466 static int numa_maps_open(struct inode *inode, struct file *file,
1467 			  const struct seq_operations *ops)
1468 {
1469 	struct numa_maps_private *priv;
1470 	int ret = -ENOMEM;
1471 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1472 	if (priv) {
1473 		priv->proc_maps.pid = proc_pid(inode);
1474 		ret = seq_open(file, ops);
1475 		if (!ret) {
1476 			struct seq_file *m = file->private_data;
1477 			m->private = priv;
1478 		} else {
1479 			kfree(priv);
1480 		}
1481 	}
1482 	return ret;
1483 }
1484 
pid_numa_maps_open(struct inode * inode,struct file * file)1485 static int pid_numa_maps_open(struct inode *inode, struct file *file)
1486 {
1487 	return numa_maps_open(inode, file, &proc_pid_numa_maps_op);
1488 }
1489 
tid_numa_maps_open(struct inode * inode,struct file * file)1490 static int tid_numa_maps_open(struct inode *inode, struct file *file)
1491 {
1492 	return numa_maps_open(inode, file, &proc_tid_numa_maps_op);
1493 }
1494 
1495 const struct file_operations proc_pid_numa_maps_operations = {
1496 	.open		= pid_numa_maps_open,
1497 	.read		= seq_read,
1498 	.llseek		= seq_lseek,
1499 	.release	= seq_release_private,
1500 };
1501 
1502 const struct file_operations proc_tid_numa_maps_operations = {
1503 	.open		= tid_numa_maps_open,
1504 	.read		= seq_read,
1505 	.llseek		= seq_lseek,
1506 	.release	= seq_release_private,
1507 };
1508 #endif /* CONFIG_NUMA */
1509