• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #include <linux/mm.h>
2 #include <linux/hugetlb.h>
3 #include <linux/huge_mm.h>
4 #include <linux/mount.h>
5 #include <linux/seq_file.h>
6 #include <linux/highmem.h>
7 #include <linux/ptrace.h>
8 #include <linux/slab.h>
9 #include <linux/pagemap.h>
10 #include <linux/mempolicy.h>
11 #include <linux/rmap.h>
12 #include <linux/swap.h>
13 #include <linux/swapops.h>
14 
15 #include <asm/elf.h>
16 #include <asm/uaccess.h>
17 #include <asm/tlbflush.h>
18 #include "internal.h"
19 
task_mem(struct seq_file * m,struct mm_struct * mm)20 void task_mem(struct seq_file *m, struct mm_struct *mm)
21 {
22 	unsigned long data, text, lib, swap;
23 	unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
24 
25 	/*
26 	 * Note: to minimize their overhead, mm maintains hiwater_vm and
27 	 * hiwater_rss only when about to *lower* total_vm or rss.  Any
28 	 * collector of these hiwater stats must therefore get total_vm
29 	 * and rss too, which will usually be the higher.  Barriers? not
30 	 * worth the effort, such snapshots can always be inconsistent.
31 	 */
32 	hiwater_vm = total_vm = mm->total_vm;
33 	if (hiwater_vm < mm->hiwater_vm)
34 		hiwater_vm = mm->hiwater_vm;
35 	hiwater_rss = total_rss = get_mm_rss(mm);
36 	if (hiwater_rss < mm->hiwater_rss)
37 		hiwater_rss = mm->hiwater_rss;
38 
39 	data = mm->total_vm - mm->shared_vm - mm->stack_vm;
40 	text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
41 	lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
42 	swap = get_mm_counter(mm, MM_SWAPENTS);
43 	seq_printf(m,
44 		"VmPeak:\t%8lu kB\n"
45 		"VmSize:\t%8lu kB\n"
46 		"VmLck:\t%8lu kB\n"
47 		"VmPin:\t%8lu kB\n"
48 		"VmHWM:\t%8lu kB\n"
49 		"VmRSS:\t%8lu kB\n"
50 		"VmData:\t%8lu kB\n"
51 		"VmStk:\t%8lu kB\n"
52 		"VmExe:\t%8lu kB\n"
53 		"VmLib:\t%8lu kB\n"
54 		"VmPTE:\t%8lu kB\n"
55 		"VmSwap:\t%8lu kB\n",
56 		hiwater_vm << (PAGE_SHIFT-10),
57 		(total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
58 		mm->locked_vm << (PAGE_SHIFT-10),
59 		mm->pinned_vm << (PAGE_SHIFT-10),
60 		hiwater_rss << (PAGE_SHIFT-10),
61 		total_rss << (PAGE_SHIFT-10),
62 		data << (PAGE_SHIFT-10),
63 		mm->stack_vm << (PAGE_SHIFT-10), text, lib,
64 		(PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
65 		swap << (PAGE_SHIFT-10));
66 }
67 
task_vsize(struct mm_struct * mm)68 unsigned long task_vsize(struct mm_struct *mm)
69 {
70 	return PAGE_SIZE * mm->total_vm;
71 }
72 
task_statm(struct mm_struct * mm,unsigned long * shared,unsigned long * text,unsigned long * data,unsigned long * resident)73 unsigned long task_statm(struct mm_struct *mm,
74 			 unsigned long *shared, unsigned long *text,
75 			 unsigned long *data, unsigned long *resident)
76 {
77 	*shared = get_mm_counter(mm, MM_FILEPAGES);
78 	*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
79 								>> PAGE_SHIFT;
80 	*data = mm->total_vm - mm->shared_vm;
81 	*resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
82 	return mm->total_vm;
83 }
84 
pad_len_spaces(struct seq_file * m,int len)85 static void pad_len_spaces(struct seq_file *m, int len)
86 {
87 	len = 25 + sizeof(void*) * 6 - len;
88 	if (len < 1)
89 		len = 1;
90 	seq_printf(m, "%*c", len, ' ');
91 }
92 
seq_print_vma_name(struct seq_file * m,struct vm_area_struct * vma)93 static void seq_print_vma_name(struct seq_file *m, struct vm_area_struct *vma)
94 {
95 	const char __user *name = vma_get_anon_name(vma);
96 	struct mm_struct *mm = vma->vm_mm;
97 
98 	unsigned long page_start_vaddr;
99 	unsigned long page_offset;
100 	unsigned long num_pages;
101 	unsigned long max_len = NAME_MAX;
102 	int i;
103 
104 	page_start_vaddr = (unsigned long)name & PAGE_MASK;
105 	page_offset = (unsigned long)name - page_start_vaddr;
106 	num_pages = DIV_ROUND_UP(page_offset + max_len, PAGE_SIZE);
107 
108 	seq_puts(m, "[anon:");
109 
110 	for (i = 0; i < num_pages; i++) {
111 		int len;
112 		int write_len;
113 		const char *kaddr;
114 		long pages_pinned;
115 		struct page *page;
116 
117 		pages_pinned = get_user_pages(current, mm, page_start_vaddr,
118 				1, 0, 0, &page, NULL);
119 		if (pages_pinned < 1) {
120 			seq_puts(m, "<fault>]");
121 			return;
122 		}
123 
124 		kaddr = (const char *)kmap(page);
125 		len = min(max_len, PAGE_SIZE - page_offset);
126 		write_len = strnlen(kaddr + page_offset, len);
127 		seq_write(m, kaddr + page_offset, write_len);
128 		kunmap(page);
129 		put_page(page);
130 
131 		/* if strnlen hit a null terminator then we're done */
132 		if (write_len != len)
133 			break;
134 
135 		max_len -= len;
136 		page_offset = 0;
137 		page_start_vaddr += PAGE_SIZE;
138 	}
139 
140 	seq_putc(m, ']');
141 }
142 
vma_stop(struct proc_maps_private * priv,struct vm_area_struct * vma)143 static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma)
144 {
145 	if (vma && vma != priv->tail_vma) {
146 		struct mm_struct *mm = vma->vm_mm;
147 		up_read(&mm->mmap_sem);
148 		mmput(mm);
149 	}
150 }
151 
m_start(struct seq_file * m,loff_t * pos)152 static void *m_start(struct seq_file *m, loff_t *pos)
153 {
154 	struct proc_maps_private *priv = m->private;
155 	unsigned long last_addr = m->version;
156 	struct mm_struct *mm;
157 	struct vm_area_struct *vma, *tail_vma = NULL;
158 	loff_t l = *pos;
159 
160 	/* Clear the per syscall fields in priv */
161 	priv->task = NULL;
162 	priv->tail_vma = NULL;
163 
164 	/*
165 	 * We remember last_addr rather than next_addr to hit with
166 	 * mmap_cache most of the time. We have zero last_addr at
167 	 * the beginning and also after lseek. We will have -1 last_addr
168 	 * after the end of the vmas.
169 	 */
170 
171 	if (last_addr == -1UL)
172 		return NULL;
173 
174 	priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
175 	if (!priv->task)
176 		return ERR_PTR(-ESRCH);
177 
178 	mm = mm_for_maps(priv->task);
179 	if (!mm || IS_ERR(mm))
180 		return mm;
181 	down_read(&mm->mmap_sem);
182 
183 	tail_vma = get_gate_vma(priv->task->mm);
184 	priv->tail_vma = tail_vma;
185 
186 	/* Start with last addr hint */
187 	vma = find_vma(mm, last_addr);
188 	if (last_addr && vma) {
189 		vma = vma->vm_next;
190 		goto out;
191 	}
192 
193 	/*
194 	 * Check the vma index is within the range and do
195 	 * sequential scan until m_index.
196 	 */
197 	vma = NULL;
198 	if ((unsigned long)l < mm->map_count) {
199 		vma = mm->mmap;
200 		while (l-- && vma)
201 			vma = vma->vm_next;
202 		goto out;
203 	}
204 
205 	if (l != mm->map_count)
206 		tail_vma = NULL; /* After gate vma */
207 
208 out:
209 	if (vma)
210 		return vma;
211 
212 	/* End of vmas has been reached */
213 	m->version = (tail_vma != NULL)? 0: -1UL;
214 	up_read(&mm->mmap_sem);
215 	mmput(mm);
216 	return tail_vma;
217 }
218 
m_next(struct seq_file * m,void * v,loff_t * pos)219 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
220 {
221 	struct proc_maps_private *priv = m->private;
222 	struct vm_area_struct *vma = v;
223 	struct vm_area_struct *tail_vma = priv->tail_vma;
224 
225 	(*pos)++;
226 	if (vma && (vma != tail_vma) && vma->vm_next)
227 		return vma->vm_next;
228 	vma_stop(priv, vma);
229 	return (vma != tail_vma)? tail_vma: NULL;
230 }
231 
m_stop(struct seq_file * m,void * v)232 static void m_stop(struct seq_file *m, void *v)
233 {
234 	struct proc_maps_private *priv = m->private;
235 	struct vm_area_struct *vma = v;
236 
237 	if (!IS_ERR(vma))
238 		vma_stop(priv, vma);
239 	if (priv->task)
240 		put_task_struct(priv->task);
241 }
242 
do_maps_open(struct inode * inode,struct file * file,const struct seq_operations * ops)243 static int do_maps_open(struct inode *inode, struct file *file,
244 			const struct seq_operations *ops)
245 {
246 	struct proc_maps_private *priv;
247 	int ret = -ENOMEM;
248 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
249 	if (priv) {
250 		priv->pid = proc_pid(inode);
251 		ret = seq_open(file, ops);
252 		if (!ret) {
253 			struct seq_file *m = file->private_data;
254 			m->private = priv;
255 		} else {
256 			kfree(priv);
257 		}
258 	}
259 	return ret;
260 }
261 
262 static void
show_map_vma(struct seq_file * m,struct vm_area_struct * vma,int is_pid)263 show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
264 {
265 	struct mm_struct *mm = vma->vm_mm;
266 	struct file *file = vma->vm_file;
267 	struct proc_maps_private *priv = m->private;
268 	struct task_struct *task = priv->task;
269 	vm_flags_t flags = vma->vm_flags;
270 	unsigned long ino = 0;
271 	unsigned long long pgoff = 0;
272 	unsigned long start, end;
273 	dev_t dev = 0;
274 	int len;
275 	const char *name = NULL;
276 
277 	if (file) {
278 		struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
279 		dev = inode->i_sb->s_dev;
280 		ino = inode->i_ino;
281 		pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
282 	}
283 
284 	/* We don't show the stack guard page in /proc/maps */
285 	start = vma->vm_start;
286 	if (stack_guard_page_start(vma, start))
287 		start += PAGE_SIZE;
288 	end = vma->vm_end;
289 	if (stack_guard_page_end(vma, end))
290 		end -= PAGE_SIZE;
291 
292 	seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
293 			start,
294 			end,
295 			flags & VM_READ ? 'r' : '-',
296 			flags & VM_WRITE ? 'w' : '-',
297 			flags & VM_EXEC ? 'x' : '-',
298 			flags & VM_MAYSHARE ? 's' : 'p',
299 			pgoff,
300 			MAJOR(dev), MINOR(dev), ino, &len);
301 
302 	/*
303 	 * Print the dentry name for named mappings, and a
304 	 * special [heap] marker for the heap:
305 	 */
306 	if (file) {
307 		pad_len_spaces(m, len);
308 		seq_path(m, &file->f_path, "\n");
309 		goto done;
310 	}
311 
312 	name = arch_vma_name(vma);
313 	if (!name) {
314 		pid_t tid;
315 
316 		if (!mm) {
317 			name = "[vdso]";
318 			goto done;
319 		}
320 
321 		if (vma->vm_start <= mm->brk &&
322 		    vma->vm_end >= mm->start_brk) {
323 			name = "[heap]";
324 			goto done;
325 		}
326 
327 		tid = vm_is_stack(task, vma, is_pid);
328 
329 		if (tid != 0) {
330 			/*
331 			 * Thread stack in /proc/PID/task/TID/maps or
332 			 * the main process stack.
333 			 */
334 			if (!is_pid || (vma->vm_start <= mm->start_stack &&
335 			    vma->vm_end >= mm->start_stack)) {
336 				name = "[stack]";
337 			} else {
338 				/* Thread stack in /proc/PID/maps */
339 				pad_len_spaces(m, len);
340 				seq_printf(m, "[stack:%d]", tid);
341 			}
342 			goto done;
343 		}
344 
345 		if (vma_get_anon_name(vma)) {
346 			pad_len_spaces(m, len);
347 			seq_print_vma_name(m, vma);
348 		}
349 	}
350 
351 done:
352 	if (name) {
353 		pad_len_spaces(m, len);
354 		seq_puts(m, name);
355 	}
356 	seq_putc(m, '\n');
357 }
358 
show_map(struct seq_file * m,void * v,int is_pid)359 static int show_map(struct seq_file *m, void *v, int is_pid)
360 {
361 	struct vm_area_struct *vma = v;
362 	struct proc_maps_private *priv = m->private;
363 	struct task_struct *task = priv->task;
364 
365 	show_map_vma(m, vma, is_pid);
366 
367 	if (m->count < m->size)  /* vma is copied successfully */
368 		m->version = (vma != get_gate_vma(task->mm))
369 			? vma->vm_start : 0;
370 	return 0;
371 }
372 
show_pid_map(struct seq_file * m,void * v)373 static int show_pid_map(struct seq_file *m, void *v)
374 {
375 	return show_map(m, v, 1);
376 }
377 
show_tid_map(struct seq_file * m,void * v)378 static int show_tid_map(struct seq_file *m, void *v)
379 {
380 	return show_map(m, v, 0);
381 }
382 
383 static const struct seq_operations proc_pid_maps_op = {
384 	.start	= m_start,
385 	.next	= m_next,
386 	.stop	= m_stop,
387 	.show	= show_pid_map
388 };
389 
390 static const struct seq_operations proc_tid_maps_op = {
391 	.start	= m_start,
392 	.next	= m_next,
393 	.stop	= m_stop,
394 	.show	= show_tid_map
395 };
396 
pid_maps_open(struct inode * inode,struct file * file)397 static int pid_maps_open(struct inode *inode, struct file *file)
398 {
399 	return do_maps_open(inode, file, &proc_pid_maps_op);
400 }
401 
tid_maps_open(struct inode * inode,struct file * file)402 static int tid_maps_open(struct inode *inode, struct file *file)
403 {
404 	return do_maps_open(inode, file, &proc_tid_maps_op);
405 }
406 
407 const struct file_operations proc_pid_maps_operations = {
408 	.open		= pid_maps_open,
409 	.read		= seq_read,
410 	.llseek		= seq_lseek,
411 	.release	= seq_release_private,
412 };
413 
414 const struct file_operations proc_tid_maps_operations = {
415 	.open		= tid_maps_open,
416 	.read		= seq_read,
417 	.llseek		= seq_lseek,
418 	.release	= seq_release_private,
419 };
420 
421 /*
422  * Proportional Set Size(PSS): my share of RSS.
423  *
424  * PSS of a process is the count of pages it has in memory, where each
425  * page is divided by the number of processes sharing it.  So if a
426  * process has 1000 pages all to itself, and 1000 shared with one other
427  * process, its PSS will be 1500.
428  *
429  * To keep (accumulated) division errors low, we adopt a 64bit
430  * fixed-point pss counter to minimize division errors. So (pss >>
431  * PSS_SHIFT) would be the real byte count.
432  *
433  * A shift of 12 before division means (assuming 4K page size):
434  * 	- 1M 3-user-pages add up to 8KB errors;
435  * 	- supports mapcount up to 2^24, or 16M;
436  * 	- supports PSS up to 2^52 bytes, or 4PB.
437  */
438 #define PSS_SHIFT 12
439 
440 #ifdef CONFIG_PROC_PAGE_MONITOR
441 struct mem_size_stats {
442 	struct vm_area_struct *vma;
443 	unsigned long resident;
444 	unsigned long shared_clean;
445 	unsigned long shared_dirty;
446 	unsigned long private_clean;
447 	unsigned long private_dirty;
448 	unsigned long referenced;
449 	unsigned long anonymous;
450 	unsigned long anonymous_thp;
451 	unsigned long swap;
452 	u64 pss;
453 	u64 swap_pss;
454 };
455 
456 
smaps_pte_entry(pte_t ptent,unsigned long addr,unsigned long ptent_size,struct mm_walk * walk)457 static void smaps_pte_entry(pte_t ptent, unsigned long addr,
458 		unsigned long ptent_size, struct mm_walk *walk)
459 {
460 	struct mem_size_stats *mss = walk->private;
461 	struct vm_area_struct *vma = mss->vma;
462 	struct page *page = NULL;
463 	int mapcount;
464 
465 	if (pte_present(ptent)) {
466 		page = vm_normal_page(vma, addr, ptent);
467 	} else if (is_swap_pte(ptent)) {
468 		swp_entry_t swpent = pte_to_swp_entry(ptent);
469 
470 		if (!non_swap_entry(swpent)) {
471 			int mapcount;
472 
473 			mss->swap += PAGE_SIZE;
474 			mapcount = swp_swapcount(swpent);
475 			if (mapcount >= 2) {
476 				u64 pss_delta = (u64)PAGE_SIZE << PSS_SHIFT;
477 
478 				do_div(pss_delta, mapcount);
479 				mss->swap_pss += pss_delta;
480 			} else {
481 				mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT;
482 			}
483 		} else if (is_migration_entry(swpent))
484 			page = migration_entry_to_page(swpent);
485 	}
486 
487 	if (!page)
488 		return;
489 
490 	if (PageAnon(page))
491 		mss->anonymous += ptent_size;
492 
493 	mss->resident += ptent_size;
494 	/* Accumulate the size in pages that have been accessed. */
495 	if (pte_young(ptent) || PageReferenced(page))
496 		mss->referenced += ptent_size;
497 	mapcount = page_mapcount(page);
498 	if (mapcount >= 2) {
499 		if (pte_dirty(ptent) || PageDirty(page))
500 			mss->shared_dirty += ptent_size;
501 		else
502 			mss->shared_clean += ptent_size;
503 		mss->pss += (ptent_size << PSS_SHIFT) / mapcount;
504 	} else {
505 		if (pte_dirty(ptent) || PageDirty(page))
506 			mss->private_dirty += ptent_size;
507 		else
508 			mss->private_clean += ptent_size;
509 		mss->pss += (ptent_size << PSS_SHIFT);
510 	}
511 }
512 
smaps_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)513 static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
514 			   struct mm_walk *walk)
515 {
516 	struct mem_size_stats *mss = walk->private;
517 	struct vm_area_struct *vma = mss->vma;
518 	pte_t *pte;
519 	spinlock_t *ptl;
520 
521 	if (pmd_trans_huge_lock(pmd, vma) == 1) {
522 		smaps_pte_entry(*(pte_t *)pmd, addr, HPAGE_PMD_SIZE, walk);
523 		spin_unlock(&walk->mm->page_table_lock);
524 		mss->anonymous_thp += HPAGE_PMD_SIZE;
525 		return 0;
526 	}
527 
528 	if (pmd_trans_unstable(pmd))
529 		return 0;
530 	/*
531 	 * The mmap_sem held all the way back in m_start() is what
532 	 * keeps khugepaged out of here and from collapsing things
533 	 * in here.
534 	 */
535 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
536 	for (; addr != end; pte++, addr += PAGE_SIZE)
537 		smaps_pte_entry(*pte, addr, PAGE_SIZE, walk);
538 	pte_unmap_unlock(pte - 1, ptl);
539 	cond_resched();
540 	return 0;
541 }
542 
show_smap(struct seq_file * m,void * v,int is_pid)543 static int show_smap(struct seq_file *m, void *v, int is_pid)
544 {
545 	struct proc_maps_private *priv = m->private;
546 	struct task_struct *task = priv->task;
547 	struct vm_area_struct *vma = v;
548 	struct mem_size_stats mss;
549 	struct mm_walk smaps_walk = {
550 		.pmd_entry = smaps_pte_range,
551 		.mm = vma->vm_mm,
552 		.private = &mss,
553 	};
554 
555 	memset(&mss, 0, sizeof mss);
556 	mss.vma = vma;
557 	/* mmap_sem is held in m_start */
558 	if (vma->vm_mm && !is_vm_hugetlb_page(vma))
559 		walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
560 
561 	show_map_vma(m, vma, is_pid);
562 
563 	seq_printf(m,
564 		   "Size:           %8lu kB\n"
565 		   "Rss:            %8lu kB\n"
566 		   "Pss:            %8lu kB\n"
567 		   "Shared_Clean:   %8lu kB\n"
568 		   "Shared_Dirty:   %8lu kB\n"
569 		   "Private_Clean:  %8lu kB\n"
570 		   "Private_Dirty:  %8lu kB\n"
571 		   "Referenced:     %8lu kB\n"
572 		   "Anonymous:      %8lu kB\n"
573 		   "AnonHugePages:  %8lu kB\n"
574 		   "Swap:           %8lu kB\n"
575 		   "SwapPss:        %8lu kB\n"
576 		   "KernelPageSize: %8lu kB\n"
577 		   "MMUPageSize:    %8lu kB\n"
578 		   "Locked:         %8lu kB\n",
579 		   (vma->vm_end - vma->vm_start) >> 10,
580 		   mss.resident >> 10,
581 		   (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
582 		   mss.shared_clean  >> 10,
583 		   mss.shared_dirty  >> 10,
584 		   mss.private_clean >> 10,
585 		   mss.private_dirty >> 10,
586 		   mss.referenced >> 10,
587 		   mss.anonymous >> 10,
588 		   mss.anonymous_thp >> 10,
589 		   mss.swap >> 10,
590 		   (unsigned long)(mss.swap_pss >> (10 + PSS_SHIFT)),
591 		   vma_kernel_pagesize(vma) >> 10,
592 		   vma_mmu_pagesize(vma) >> 10,
593 		   (vma->vm_flags & VM_LOCKED) ?
594 			(unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0);
595 
596 	if (vma_get_anon_name(vma)) {
597 		seq_puts(m, "Name:           ");
598 		seq_print_vma_name(m, vma);
599 		seq_putc(m, '\n');
600 	}
601 
602 	if (m->count < m->size)  /* vma is copied successfully */
603 		m->version = (vma != get_gate_vma(task->mm))
604 			? vma->vm_start : 0;
605 	return 0;
606 }
607 
show_pid_smap(struct seq_file * m,void * v)608 static int show_pid_smap(struct seq_file *m, void *v)
609 {
610 	return show_smap(m, v, 1);
611 }
612 
show_tid_smap(struct seq_file * m,void * v)613 static int show_tid_smap(struct seq_file *m, void *v)
614 {
615 	return show_smap(m, v, 0);
616 }
617 
618 static const struct seq_operations proc_pid_smaps_op = {
619 	.start	= m_start,
620 	.next	= m_next,
621 	.stop	= m_stop,
622 	.show	= show_pid_smap
623 };
624 
625 static const struct seq_operations proc_tid_smaps_op = {
626 	.start	= m_start,
627 	.next	= m_next,
628 	.stop	= m_stop,
629 	.show	= show_tid_smap
630 };
631 
pid_smaps_open(struct inode * inode,struct file * file)632 static int pid_smaps_open(struct inode *inode, struct file *file)
633 {
634 	return do_maps_open(inode, file, &proc_pid_smaps_op);
635 }
636 
tid_smaps_open(struct inode * inode,struct file * file)637 static int tid_smaps_open(struct inode *inode, struct file *file)
638 {
639 	return do_maps_open(inode, file, &proc_tid_smaps_op);
640 }
641 
642 const struct file_operations proc_pid_smaps_operations = {
643 	.open		= pid_smaps_open,
644 	.read		= seq_read,
645 	.llseek		= seq_lseek,
646 	.release	= seq_release_private,
647 };
648 
649 const struct file_operations proc_tid_smaps_operations = {
650 	.open		= tid_smaps_open,
651 	.read		= seq_read,
652 	.llseek		= seq_lseek,
653 	.release	= seq_release_private,
654 };
655 
clear_refs_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)656 static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
657 				unsigned long end, struct mm_walk *walk)
658 {
659 	struct vm_area_struct *vma = walk->private;
660 	pte_t *pte, ptent;
661 	spinlock_t *ptl;
662 	struct page *page;
663 
664 	split_huge_page_pmd(walk->mm, pmd);
665 	if (pmd_trans_unstable(pmd))
666 		return 0;
667 
668 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
669 	for (; addr != end; pte++, addr += PAGE_SIZE) {
670 		ptent = *pte;
671 		if (!pte_present(ptent))
672 			continue;
673 
674 		page = vm_normal_page(vma, addr, ptent);
675 		if (!page)
676 			continue;
677 
678 		/* Clear accessed and referenced bits. */
679 		ptep_test_and_clear_young(vma, addr, pte);
680 		ClearPageReferenced(page);
681 	}
682 	pte_unmap_unlock(pte - 1, ptl);
683 	cond_resched();
684 	return 0;
685 }
686 
687 #define CLEAR_REFS_ALL 1
688 #define CLEAR_REFS_ANON 2
689 #define CLEAR_REFS_MAPPED 3
690 #define CLEAR_REFS_MM_HIWATER_RSS 5
691 
clear_refs_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)692 static ssize_t clear_refs_write(struct file *file, const char __user *buf,
693 				size_t count, loff_t *ppos)
694 {
695 	struct task_struct *task;
696 	char buffer[PROC_NUMBUF];
697 	struct mm_struct *mm;
698 	struct vm_area_struct *vma;
699 	int type;
700 	int rv;
701 
702 	memset(buffer, 0, sizeof(buffer));
703 	if (count > sizeof(buffer) - 1)
704 		count = sizeof(buffer) - 1;
705 	if (copy_from_user(buffer, buf, count))
706 		return -EFAULT;
707 	rv = kstrtoint(strstrip(buffer), 10, &type);
708 	if (rv < 0)
709 		return rv;
710 	if ((type < CLEAR_REFS_ALL || type > CLEAR_REFS_MAPPED) &&
711 	    type != CLEAR_REFS_MM_HIWATER_RSS)
712 		return -EINVAL;
713 	task = get_proc_task(file->f_path.dentry->d_inode);
714 	if (!task)
715 		return -ESRCH;
716 	mm = get_task_mm(task);
717 	if (mm) {
718 		struct mm_walk clear_refs_walk = {
719 			.pmd_entry = clear_refs_pte_range,
720 			.mm = mm,
721 		};
722 
723 		if (type == CLEAR_REFS_MM_HIWATER_RSS) {
724 			/*
725 			 * Writing 5 to /proc/pid/clear_refs resets the peak
726 			 * resident set size to this mm's current rss value.
727 			 */
728 			down_write(&mm->mmap_sem);
729 			reset_mm_hiwater_rss(mm);
730 			up_write(&mm->mmap_sem);
731 			goto out_mm;
732 		}
733 
734 		down_read(&mm->mmap_sem);
735 		for (vma = mm->mmap; vma; vma = vma->vm_next) {
736 			clear_refs_walk.private = vma;
737 			if (is_vm_hugetlb_page(vma))
738 				continue;
739 			/*
740 			 * Writing 1 to /proc/pid/clear_refs affects all pages.
741 			 *
742 			 * Writing 2 to /proc/pid/clear_refs only affects
743 			 * Anonymous pages.
744 			 *
745 			 * Writing 3 to /proc/pid/clear_refs only affects file
746 			 * mapped pages.
747 			 */
748 			if (type == CLEAR_REFS_ANON && vma->vm_file)
749 				continue;
750 			if (type == CLEAR_REFS_MAPPED && !vma->vm_file)
751 				continue;
752 			walk_page_range(vma->vm_start, vma->vm_end,
753 					&clear_refs_walk);
754 		}
755 		flush_tlb_mm(mm);
756 		up_read(&mm->mmap_sem);
757 out_mm:
758 		mmput(mm);
759 	}
760 	put_task_struct(task);
761 
762 	return count;
763 }
764 
765 const struct file_operations proc_clear_refs_operations = {
766 	.write		= clear_refs_write,
767 	.llseek		= noop_llseek,
768 };
769 
770 typedef struct {
771 	u64 pme;
772 } pagemap_entry_t;
773 
774 struct pagemapread {
775 	int pos, len;		/* units: PM_ENTRY_BYTES, not bytes */
776 	pagemap_entry_t *buffer;
777 };
778 
779 #define PAGEMAP_WALK_SIZE	(PMD_SIZE)
780 #define PAGEMAP_WALK_MASK	(PMD_MASK)
781 
782 #define PM_ENTRY_BYTES      sizeof(pagemap_entry_t)
783 #define PM_STATUS_BITS      3
784 #define PM_STATUS_OFFSET    (64 - PM_STATUS_BITS)
785 #define PM_STATUS_MASK      (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
786 #define PM_STATUS(nr)       (((nr) << PM_STATUS_OFFSET) & PM_STATUS_MASK)
787 #define PM_PSHIFT_BITS      6
788 #define PM_PSHIFT_OFFSET    (PM_STATUS_OFFSET - PM_PSHIFT_BITS)
789 #define PM_PSHIFT_MASK      (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET)
790 #define PM_PSHIFT(x)        (((u64) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK)
791 #define PM_PFRAME_MASK      ((1LL << PM_PSHIFT_OFFSET) - 1)
792 #define PM_PFRAME(x)        ((x) & PM_PFRAME_MASK)
793 
794 #define PM_PRESENT          PM_STATUS(4LL)
795 #define PM_SWAP             PM_STATUS(2LL)
796 #define PM_NOT_PRESENT      PM_PSHIFT(PAGE_SHIFT)
797 #define PM_END_OF_BUFFER    1
798 
make_pme(u64 val)799 static inline pagemap_entry_t make_pme(u64 val)
800 {
801 	return (pagemap_entry_t) { .pme = val };
802 }
803 
add_to_pagemap(unsigned long addr,pagemap_entry_t * pme,struct pagemapread * pm)804 static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme,
805 			  struct pagemapread *pm)
806 {
807 	pm->buffer[pm->pos++] = *pme;
808 	if (pm->pos >= pm->len)
809 		return PM_END_OF_BUFFER;
810 	return 0;
811 }
812 
pagemap_pte_hole(unsigned long start,unsigned long end,struct mm_walk * walk)813 static int pagemap_pte_hole(unsigned long start, unsigned long end,
814 				struct mm_walk *walk)
815 {
816 	struct pagemapread *pm = walk->private;
817 	unsigned long addr;
818 	int err = 0;
819 	pagemap_entry_t pme = make_pme(PM_NOT_PRESENT);
820 
821 	for (addr = start; addr < end; addr += PAGE_SIZE) {
822 		err = add_to_pagemap(addr, &pme, pm);
823 		if (err)
824 			break;
825 	}
826 	return err;
827 }
828 
swap_pte_to_pagemap_entry(pte_t pte)829 static u64 swap_pte_to_pagemap_entry(pte_t pte)
830 {
831 	swp_entry_t e = pte_to_swp_entry(pte);
832 	return swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT);
833 }
834 
pte_to_pagemap_entry(pagemap_entry_t * pme,pte_t pte)835 static void pte_to_pagemap_entry(pagemap_entry_t *pme, pte_t pte)
836 {
837 	if (is_swap_pte(pte))
838 		*pme = make_pme(PM_PFRAME(swap_pte_to_pagemap_entry(pte))
839 				| PM_PSHIFT(PAGE_SHIFT) | PM_SWAP);
840 	else if (pte_present(pte))
841 		*pme = make_pme(PM_PFRAME(pte_pfn(pte))
842 				| PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT);
843 	else
844 		*pme = make_pme(PM_NOT_PRESENT);
845 }
846 
847 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
thp_pmd_to_pagemap_entry(pagemap_entry_t * pme,pmd_t pmd,int offset)848 static void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme,
849 					pmd_t pmd, int offset)
850 {
851 	/*
852 	 * Currently pmd for thp is always present because thp can not be
853 	 * swapped-out, migrated, or HWPOISONed (split in such cases instead.)
854 	 * This if-check is just to prepare for future implementation.
855 	 */
856 	if (pmd_present(pmd))
857 		*pme = make_pme(PM_PFRAME(pmd_pfn(pmd) + offset)
858 				| PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT);
859 	else
860 		*pme = make_pme(PM_NOT_PRESENT);
861 }
862 #else
thp_pmd_to_pagemap_entry(pagemap_entry_t * pme,pmd_t pmd,int offset)863 static inline void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme,
864 						pmd_t pmd, int offset)
865 {
866 }
867 #endif
868 
pagemap_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)869 static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
870 			     struct mm_walk *walk)
871 {
872 	struct vm_area_struct *vma;
873 	struct pagemapread *pm = walk->private;
874 	pte_t *pte;
875 	int err = 0;
876 	pagemap_entry_t pme = make_pme(PM_NOT_PRESENT);
877 
878 	/* find the first VMA at or above 'addr' */
879 	vma = find_vma(walk->mm, addr);
880 	if (vma && pmd_trans_huge_lock(pmd, vma) == 1) {
881 		for (; addr != end; addr += PAGE_SIZE) {
882 			unsigned long offset;
883 
884 			offset = (addr & ~PAGEMAP_WALK_MASK) >>
885 					PAGE_SHIFT;
886 			thp_pmd_to_pagemap_entry(&pme, *pmd, offset);
887 			err = add_to_pagemap(addr, &pme, pm);
888 			if (err)
889 				break;
890 		}
891 		spin_unlock(&walk->mm->page_table_lock);
892 		return err;
893 	}
894 
895 	if (pmd_trans_unstable(pmd))
896 		return 0;
897 	for (; addr != end; addr += PAGE_SIZE) {
898 
899 		/* check to see if we've left 'vma' behind
900 		 * and need a new, higher one */
901 		if (vma && (addr >= vma->vm_end)) {
902 			vma = find_vma(walk->mm, addr);
903 			pme = make_pme(PM_NOT_PRESENT);
904 		}
905 
906 		/* check that 'vma' actually covers this address,
907 		 * and that it isn't a huge page vma */
908 		if (vma && (vma->vm_start <= addr) &&
909 		    !is_vm_hugetlb_page(vma)) {
910 			pte = pte_offset_map(pmd, addr);
911 			pte_to_pagemap_entry(&pme, *pte);
912 			/* unmap before userspace copy */
913 			pte_unmap(pte);
914 		}
915 		err = add_to_pagemap(addr, &pme, pm);
916 		if (err)
917 			return err;
918 	}
919 
920 	cond_resched();
921 
922 	return err;
923 }
924 
925 #ifdef CONFIG_HUGETLB_PAGE
huge_pte_to_pagemap_entry(pagemap_entry_t * pme,pte_t pte,int offset)926 static void huge_pte_to_pagemap_entry(pagemap_entry_t *pme,
927 					pte_t pte, int offset)
928 {
929 	if (pte_present(pte))
930 		*pme = make_pme(PM_PFRAME(pte_pfn(pte) + offset)
931 				| PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT);
932 	else
933 		*pme = make_pme(PM_NOT_PRESENT);
934 }
935 
936 /* This function walks within one hugetlb entry in the single call */
pagemap_hugetlb_range(pte_t * pte,unsigned long hmask,unsigned long addr,unsigned long end,struct mm_walk * walk)937 static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask,
938 				 unsigned long addr, unsigned long end,
939 				 struct mm_walk *walk)
940 {
941 	struct pagemapread *pm = walk->private;
942 	int err = 0;
943 	pagemap_entry_t pme;
944 
945 	for (; addr != end; addr += PAGE_SIZE) {
946 		int offset = (addr & ~hmask) >> PAGE_SHIFT;
947 		huge_pte_to_pagemap_entry(&pme, *pte, offset);
948 		err = add_to_pagemap(addr, &pme, pm);
949 		if (err)
950 			return err;
951 	}
952 
953 	cond_resched();
954 
955 	return err;
956 }
957 #endif /* HUGETLB_PAGE */
958 
959 /*
960  * /proc/pid/pagemap - an array mapping virtual pages to pfns
961  *
962  * For each page in the address space, this file contains one 64-bit entry
963  * consisting of the following:
964  *
965  * Bits 0-55  page frame number (PFN) if present
966  * Bits 0-4   swap type if swapped
967  * Bits 5-55  swap offset if swapped
968  * Bits 55-60 page shift (page size = 1<<page shift)
969  * Bit  61    reserved for future use
970  * Bit  62    page swapped
971  * Bit  63    page present
972  *
973  * If the page is not present but in swap, then the PFN contains an
974  * encoding of the swap file number and the page's offset into the
975  * swap. Unmapped pages return a null PFN. This allows determining
976  * precisely which pages are mapped (or in swap) and comparing mapped
977  * pages between processes.
978  *
979  * Efficient users of this interface will use /proc/pid/maps to
980  * determine which areas of memory are actually mapped and llseek to
981  * skip over unmapped regions.
982  */
pagemap_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)983 static ssize_t pagemap_read(struct file *file, char __user *buf,
984 			    size_t count, loff_t *ppos)
985 {
986 	struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
987 	struct mm_struct *mm;
988 	struct pagemapread pm;
989 	int ret = -ESRCH;
990 	struct mm_walk pagemap_walk = {};
991 	unsigned long src;
992 	unsigned long svpfn;
993 	unsigned long start_vaddr;
994 	unsigned long end_vaddr;
995 	int copied = 0;
996 
997 	if (!task)
998 		goto out;
999 
1000 	ret = -EINVAL;
1001 	/* file position must be aligned */
1002 	if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
1003 		goto out_task;
1004 
1005 	ret = 0;
1006 	if (!count)
1007 		goto out_task;
1008 
1009 	pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
1010 	pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY);
1011 	ret = -ENOMEM;
1012 	if (!pm.buffer)
1013 		goto out_task;
1014 
1015 	mm = mm_for_maps(task);
1016 	ret = PTR_ERR(mm);
1017 	if (!mm || IS_ERR(mm))
1018 		goto out_free;
1019 
1020 	pagemap_walk.pmd_entry = pagemap_pte_range;
1021 	pagemap_walk.pte_hole = pagemap_pte_hole;
1022 #ifdef CONFIG_HUGETLB_PAGE
1023 	pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
1024 #endif
1025 	pagemap_walk.mm = mm;
1026 	pagemap_walk.private = &pm;
1027 
1028 	src = *ppos;
1029 	svpfn = src / PM_ENTRY_BYTES;
1030 	start_vaddr = svpfn << PAGE_SHIFT;
1031 	end_vaddr = TASK_SIZE_OF(task);
1032 
1033 	/* watch out for wraparound */
1034 	if (svpfn > TASK_SIZE_OF(task) >> PAGE_SHIFT)
1035 		start_vaddr = end_vaddr;
1036 
1037 	/*
1038 	 * The odds are that this will stop walking way
1039 	 * before end_vaddr, because the length of the
1040 	 * user buffer is tracked in "pm", and the walk
1041 	 * will stop when we hit the end of the buffer.
1042 	 */
1043 	ret = 0;
1044 	while (count && (start_vaddr < end_vaddr)) {
1045 		int len;
1046 		unsigned long end;
1047 
1048 		pm.pos = 0;
1049 		end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;
1050 		/* overflow ? */
1051 		if (end < start_vaddr || end > end_vaddr)
1052 			end = end_vaddr;
1053 		down_read(&mm->mmap_sem);
1054 		ret = walk_page_range(start_vaddr, end, &pagemap_walk);
1055 		up_read(&mm->mmap_sem);
1056 		start_vaddr = end;
1057 
1058 		len = min(count, PM_ENTRY_BYTES * pm.pos);
1059 		if (copy_to_user(buf, pm.buffer, len)) {
1060 			ret = -EFAULT;
1061 			goto out_mm;
1062 		}
1063 		copied += len;
1064 		buf += len;
1065 		count -= len;
1066 	}
1067 	*ppos += copied;
1068 	if (!ret || ret == PM_END_OF_BUFFER)
1069 		ret = copied;
1070 
1071 out_mm:
1072 	mmput(mm);
1073 out_free:
1074 	kfree(pm.buffer);
1075 out_task:
1076 	put_task_struct(task);
1077 out:
1078 	return ret;
1079 }
1080 
pagemap_open(struct inode * inode,struct file * file)1081 static int pagemap_open(struct inode *inode, struct file *file)
1082 {
1083 	/* do not disclose physical addresses: attack vector */
1084 	if (!capable(CAP_SYS_ADMIN))
1085 		return -EPERM;
1086 	return 0;
1087 }
1088 
1089 const struct file_operations proc_pagemap_operations = {
1090 	.llseek		= mem_lseek, /* borrow this */
1091 	.read		= pagemap_read,
1092 	.open		= pagemap_open,
1093 };
1094 #endif /* CONFIG_PROC_PAGE_MONITOR */
1095 
1096 #ifdef CONFIG_NUMA
1097 
1098 struct numa_maps {
1099 	struct vm_area_struct *vma;
1100 	unsigned long pages;
1101 	unsigned long anon;
1102 	unsigned long active;
1103 	unsigned long writeback;
1104 	unsigned long mapcount_max;
1105 	unsigned long dirty;
1106 	unsigned long swapcache;
1107 	unsigned long node[MAX_NUMNODES];
1108 };
1109 
1110 struct numa_maps_private {
1111 	struct proc_maps_private proc_maps;
1112 	struct numa_maps md;
1113 };
1114 
gather_stats(struct page * page,struct numa_maps * md,int pte_dirty,unsigned long nr_pages)1115 static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
1116 			unsigned long nr_pages)
1117 {
1118 	int count = page_mapcount(page);
1119 
1120 	md->pages += nr_pages;
1121 	if (pte_dirty || PageDirty(page))
1122 		md->dirty += nr_pages;
1123 
1124 	if (PageSwapCache(page))
1125 		md->swapcache += nr_pages;
1126 
1127 	if (PageActive(page) || PageUnevictable(page))
1128 		md->active += nr_pages;
1129 
1130 	if (PageWriteback(page))
1131 		md->writeback += nr_pages;
1132 
1133 	if (PageAnon(page))
1134 		md->anon += nr_pages;
1135 
1136 	if (count > md->mapcount_max)
1137 		md->mapcount_max = count;
1138 
1139 	md->node[page_to_nid(page)] += nr_pages;
1140 }
1141 
can_gather_numa_stats(pte_t pte,struct vm_area_struct * vma,unsigned long addr)1142 static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
1143 		unsigned long addr)
1144 {
1145 	struct page *page;
1146 	int nid;
1147 
1148 	if (!pte_present(pte))
1149 		return NULL;
1150 
1151 	page = vm_normal_page(vma, addr, pte);
1152 	if (!page)
1153 		return NULL;
1154 
1155 	if (PageReserved(page))
1156 		return NULL;
1157 
1158 	nid = page_to_nid(page);
1159 	if (!node_isset(nid, node_states[N_HIGH_MEMORY]))
1160 		return NULL;
1161 
1162 	return page;
1163 }
1164 
gather_pte_stats(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)1165 static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
1166 		unsigned long end, struct mm_walk *walk)
1167 {
1168 	struct numa_maps *md;
1169 	spinlock_t *ptl;
1170 	pte_t *orig_pte;
1171 	pte_t *pte;
1172 
1173 	md = walk->private;
1174 
1175 	if (pmd_trans_huge_lock(pmd, md->vma) == 1) {
1176 		pte_t huge_pte = *(pte_t *)pmd;
1177 		struct page *page;
1178 
1179 		page = can_gather_numa_stats(huge_pte, md->vma, addr);
1180 		if (page)
1181 			gather_stats(page, md, pte_dirty(huge_pte),
1182 				     HPAGE_PMD_SIZE/PAGE_SIZE);
1183 		spin_unlock(&walk->mm->page_table_lock);
1184 		return 0;
1185 	}
1186 
1187 	if (pmd_trans_unstable(pmd))
1188 		return 0;
1189 	orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
1190 	do {
1191 		struct page *page = can_gather_numa_stats(*pte, md->vma, addr);
1192 		if (!page)
1193 			continue;
1194 		gather_stats(page, md, pte_dirty(*pte), 1);
1195 
1196 	} while (pte++, addr += PAGE_SIZE, addr != end);
1197 	pte_unmap_unlock(orig_pte, ptl);
1198 	return 0;
1199 }
1200 #ifdef CONFIG_HUGETLB_PAGE
gather_hugetbl_stats(pte_t * pte,unsigned long hmask,unsigned long addr,unsigned long end,struct mm_walk * walk)1201 static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
1202 		unsigned long addr, unsigned long end, struct mm_walk *walk)
1203 {
1204 	struct numa_maps *md;
1205 	struct page *page;
1206 
1207 	if (pte_none(*pte))
1208 		return 0;
1209 
1210 	page = pte_page(*pte);
1211 	if (!page)
1212 		return 0;
1213 
1214 	md = walk->private;
1215 	gather_stats(page, md, pte_dirty(*pte), 1);
1216 	return 0;
1217 }
1218 
1219 #else
gather_hugetbl_stats(pte_t * pte,unsigned long hmask,unsigned long addr,unsigned long end,struct mm_walk * walk)1220 static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
1221 		unsigned long addr, unsigned long end, struct mm_walk *walk)
1222 {
1223 	return 0;
1224 }
1225 #endif
1226 
1227 /*
1228  * Display pages allocated per node and memory policy via /proc.
1229  */
show_numa_map(struct seq_file * m,void * v,int is_pid)1230 static int show_numa_map(struct seq_file *m, void *v, int is_pid)
1231 {
1232 	struct numa_maps_private *numa_priv = m->private;
1233 	struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
1234 	struct vm_area_struct *vma = v;
1235 	struct numa_maps *md = &numa_priv->md;
1236 	struct file *file = vma->vm_file;
1237 	struct mm_struct *mm = vma->vm_mm;
1238 	struct mm_walk walk = {};
1239 	struct mempolicy *pol;
1240 	int n;
1241 	char buffer[50];
1242 
1243 	if (!mm)
1244 		return 0;
1245 
1246 	/* Ensure we start with an empty set of numa_maps statistics. */
1247 	memset(md, 0, sizeof(*md));
1248 
1249 	md->vma = vma;
1250 
1251 	walk.hugetlb_entry = gather_hugetbl_stats;
1252 	walk.pmd_entry = gather_pte_stats;
1253 	walk.private = md;
1254 	walk.mm = mm;
1255 
1256 	pol = get_vma_policy(proc_priv->task, vma, vma->vm_start);
1257 	mpol_to_str(buffer, sizeof(buffer), pol, 0);
1258 	mpol_cond_put(pol);
1259 
1260 	seq_printf(m, "%08lx %s", vma->vm_start, buffer);
1261 
1262 	if (file) {
1263 		seq_printf(m, " file=");
1264 		seq_path(m, &file->f_path, "\n\t= ");
1265 	} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
1266 		seq_printf(m, " heap");
1267 	} else {
1268 		pid_t tid = vm_is_stack(proc_priv->task, vma, is_pid);
1269 		if (tid != 0) {
1270 			/*
1271 			 * Thread stack in /proc/PID/task/TID/maps or
1272 			 * the main process stack.
1273 			 */
1274 			if (!is_pid || (vma->vm_start <= mm->start_stack &&
1275 			    vma->vm_end >= mm->start_stack))
1276 				seq_printf(m, " stack");
1277 			else
1278 				seq_printf(m, " stack:%d", tid);
1279 		}
1280 	}
1281 
1282 	if (is_vm_hugetlb_page(vma))
1283 		seq_printf(m, " huge");
1284 
1285 	walk_page_range(vma->vm_start, vma->vm_end, &walk);
1286 
1287 	if (!md->pages)
1288 		goto out;
1289 
1290 	if (md->anon)
1291 		seq_printf(m, " anon=%lu", md->anon);
1292 
1293 	if (md->dirty)
1294 		seq_printf(m, " dirty=%lu", md->dirty);
1295 
1296 	if (md->pages != md->anon && md->pages != md->dirty)
1297 		seq_printf(m, " mapped=%lu", md->pages);
1298 
1299 	if (md->mapcount_max > 1)
1300 		seq_printf(m, " mapmax=%lu", md->mapcount_max);
1301 
1302 	if (md->swapcache)
1303 		seq_printf(m, " swapcache=%lu", md->swapcache);
1304 
1305 	if (md->active < md->pages && !is_vm_hugetlb_page(vma))
1306 		seq_printf(m, " active=%lu", md->active);
1307 
1308 	if (md->writeback)
1309 		seq_printf(m, " writeback=%lu", md->writeback);
1310 
1311 	for_each_node_state(n, N_HIGH_MEMORY)
1312 		if (md->node[n])
1313 			seq_printf(m, " N%d=%lu", n, md->node[n]);
1314 out:
1315 	seq_putc(m, '\n');
1316 
1317 	if (m->count < m->size)
1318 		m->version = (vma != proc_priv->tail_vma) ? vma->vm_start : 0;
1319 	return 0;
1320 }
1321 
show_pid_numa_map(struct seq_file * m,void * v)1322 static int show_pid_numa_map(struct seq_file *m, void *v)
1323 {
1324 	return show_numa_map(m, v, 1);
1325 }
1326 
show_tid_numa_map(struct seq_file * m,void * v)1327 static int show_tid_numa_map(struct seq_file *m, void *v)
1328 {
1329 	return show_numa_map(m, v, 0);
1330 }
1331 
1332 static const struct seq_operations proc_pid_numa_maps_op = {
1333 	.start  = m_start,
1334 	.next   = m_next,
1335 	.stop   = m_stop,
1336 	.show   = show_pid_numa_map,
1337 };
1338 
1339 static const struct seq_operations proc_tid_numa_maps_op = {
1340 	.start  = m_start,
1341 	.next   = m_next,
1342 	.stop   = m_stop,
1343 	.show   = show_tid_numa_map,
1344 };
1345 
numa_maps_open(struct inode * inode,struct file * file,const struct seq_operations * ops)1346 static int numa_maps_open(struct inode *inode, struct file *file,
1347 			  const struct seq_operations *ops)
1348 {
1349 	struct numa_maps_private *priv;
1350 	int ret = -ENOMEM;
1351 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1352 	if (priv) {
1353 		priv->proc_maps.pid = proc_pid(inode);
1354 		ret = seq_open(file, ops);
1355 		if (!ret) {
1356 			struct seq_file *m = file->private_data;
1357 			m->private = priv;
1358 		} else {
1359 			kfree(priv);
1360 		}
1361 	}
1362 	return ret;
1363 }
1364 
pid_numa_maps_open(struct inode * inode,struct file * file)1365 static int pid_numa_maps_open(struct inode *inode, struct file *file)
1366 {
1367 	return numa_maps_open(inode, file, &proc_pid_numa_maps_op);
1368 }
1369 
tid_numa_maps_open(struct inode * inode,struct file * file)1370 static int tid_numa_maps_open(struct inode *inode, struct file *file)
1371 {
1372 	return numa_maps_open(inode, file, &proc_tid_numa_maps_op);
1373 }
1374 
1375 const struct file_operations proc_pid_numa_maps_operations = {
1376 	.open		= pid_numa_maps_open,
1377 	.read		= seq_read,
1378 	.llseek		= seq_lseek,
1379 	.release	= seq_release_private,
1380 };
1381 
1382 const struct file_operations proc_tid_numa_maps_operations = {
1383 	.open		= tid_numa_maps_open,
1384 	.read		= seq_read,
1385 	.llseek		= seq_lseek,
1386 	.release	= seq_release_private,
1387 };
1388 #endif /* CONFIG_NUMA */
1389