• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 
2 #include <linux/mm.h>
3 #include <linux/file.h>
4 #include <linux/fdtable.h>
5 #include <linux/fs_struct.h>
6 #include <linux/mount.h>
7 #include <linux/ptrace.h>
8 #include <linux/slab.h>
9 #include <linux/seq_file.h>
10 #include "internal.h"
11 
12 /*
13  * Logic: we've got two memory sums for each process, "shared", and
14  * "non-shared". Shared memory may get counted more than once, for
15  * each process that owns it. Non-shared memory is counted
16  * accurately.
17  */
task_mem(struct seq_file * m,struct mm_struct * mm)18 void task_mem(struct seq_file *m, struct mm_struct *mm)
19 {
20 	struct vm_area_struct *vma;
21 	struct vm_region *region;
22 	struct rb_node *p;
23 	unsigned long bytes = 0, sbytes = 0, slack = 0, size;
24 
25 	down_read(&mm->mmap_sem);
26 	for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
27 		vma = rb_entry(p, struct vm_area_struct, vm_rb);
28 
29 		bytes += kobjsize(vma);
30 
31 		region = vma->vm_region;
32 		if (region) {
33 			size = kobjsize(region);
34 			size += region->vm_end - region->vm_start;
35 		} else {
36 			size = vma->vm_end - vma->vm_start;
37 		}
38 
39 		if (atomic_read(&mm->mm_count) > 1 ||
40 		    vma->vm_flags & VM_MAYSHARE) {
41 			sbytes += size;
42 		} else {
43 			bytes += size;
44 			if (region)
45 				slack = region->vm_end - vma->vm_end;
46 		}
47 	}
48 
49 	if (atomic_read(&mm->mm_count) > 1)
50 		sbytes += kobjsize(mm);
51 	else
52 		bytes += kobjsize(mm);
53 
54 	if (current->fs && current->fs->users > 1)
55 		sbytes += kobjsize(current->fs);
56 	else
57 		bytes += kobjsize(current->fs);
58 
59 	if (current->files && atomic_read(&current->files->count) > 1)
60 		sbytes += kobjsize(current->files);
61 	else
62 		bytes += kobjsize(current->files);
63 
64 	if (current->sighand && atomic_read(&current->sighand->count) > 1)
65 		sbytes += kobjsize(current->sighand);
66 	else
67 		bytes += kobjsize(current->sighand);
68 
69 	bytes += kobjsize(current); /* includes kernel stack */
70 
71 	seq_printf(m,
72 		"Mem:\t%8lu bytes\n"
73 		"Slack:\t%8lu bytes\n"
74 		"Shared:\t%8lu bytes\n",
75 		bytes, slack, sbytes);
76 
77 	up_read(&mm->mmap_sem);
78 }
79 
task_vsize(struct mm_struct * mm)80 unsigned long task_vsize(struct mm_struct *mm)
81 {
82 	struct vm_area_struct *vma;
83 	struct rb_node *p;
84 	unsigned long vsize = 0;
85 
86 	down_read(&mm->mmap_sem);
87 	for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
88 		vma = rb_entry(p, struct vm_area_struct, vm_rb);
89 		vsize += vma->vm_end - vma->vm_start;
90 	}
91 	up_read(&mm->mmap_sem);
92 	return vsize;
93 }
94 
task_statm(struct mm_struct * mm,unsigned long * shared,unsigned long * text,unsigned long * data,unsigned long * resident)95 unsigned long task_statm(struct mm_struct *mm,
96 			 unsigned long *shared, unsigned long *text,
97 			 unsigned long *data, unsigned long *resident)
98 {
99 	struct vm_area_struct *vma;
100 	struct vm_region *region;
101 	struct rb_node *p;
102 	unsigned long size = kobjsize(mm);
103 
104 	down_read(&mm->mmap_sem);
105 	for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
106 		vma = rb_entry(p, struct vm_area_struct, vm_rb);
107 		size += kobjsize(vma);
108 		region = vma->vm_region;
109 		if (region) {
110 			size += kobjsize(region);
111 			size += region->vm_end - region->vm_start;
112 		}
113 	}
114 
115 	*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
116 		>> PAGE_SHIFT;
117 	*data = (PAGE_ALIGN(mm->start_stack) - (mm->start_data & PAGE_MASK))
118 		>> PAGE_SHIFT;
119 	up_read(&mm->mmap_sem);
120 	size >>= PAGE_SHIFT;
121 	size += *text + *data;
122 	*resident = size;
123 	return size;
124 }
125 
is_stack(struct proc_maps_private * priv,struct vm_area_struct * vma)126 static int is_stack(struct proc_maps_private *priv,
127 		    struct vm_area_struct *vma)
128 {
129 	struct mm_struct *mm = vma->vm_mm;
130 
131 	/*
132 	 * We make no effort to guess what a given thread considers to be
133 	 * its "stack".  It's not even well-defined for programs written
134 	 * languages like Go.
135 	 */
136 	return vma->vm_start <= mm->start_stack &&
137 		vma->vm_end >= mm->start_stack;
138 }
139 
140 /*
141  * display a single VMA to a sequenced file
142  */
nommu_vma_show(struct seq_file * m,struct vm_area_struct * vma,int is_pid)143 static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
144 			  int is_pid)
145 {
146 	struct mm_struct *mm = vma->vm_mm;
147 	struct proc_maps_private *priv = m->private;
148 	unsigned long ino = 0;
149 	struct file *file;
150 	dev_t dev = 0;
151 	int flags;
152 	unsigned long long pgoff = 0;
153 
154 	flags = vma->vm_flags;
155 	file = vma->vm_file;
156 
157 	if (file) {
158 		struct inode *inode = file_inode(vma->vm_file);
159 		dev = inode->i_sb->s_dev;
160 		ino = inode->i_ino;
161 		pgoff = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
162 	}
163 
164 	seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
165 	seq_printf(m,
166 		   "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
167 		   vma->vm_start,
168 		   vma->vm_end,
169 		   flags & VM_READ ? 'r' : '-',
170 		   flags & VM_WRITE ? 'w' : '-',
171 		   flags & VM_EXEC ? 'x' : '-',
172 		   flags & VM_MAYSHARE ? flags & VM_SHARED ? 'S' : 's' : 'p',
173 		   pgoff,
174 		   MAJOR(dev), MINOR(dev), ino);
175 
176 	if (file) {
177 		seq_pad(m, ' ');
178 		seq_file_path(m, file, "");
179 	} else if (mm && is_stack(priv, vma)) {
180 		seq_pad(m, ' ');
181 		seq_printf(m, "[stack]");
182 	}
183 
184 	seq_putc(m, '\n');
185 	return 0;
186 }
187 
188 /*
189  * display mapping lines for a particular process's /proc/pid/maps
190  */
show_map(struct seq_file * m,void * _p,int is_pid)191 static int show_map(struct seq_file *m, void *_p, int is_pid)
192 {
193 	struct rb_node *p = _p;
194 
195 	return nommu_vma_show(m, rb_entry(p, struct vm_area_struct, vm_rb),
196 			      is_pid);
197 }
198 
show_pid_map(struct seq_file * m,void * _p)199 static int show_pid_map(struct seq_file *m, void *_p)
200 {
201 	return show_map(m, _p, 1);
202 }
203 
show_tid_map(struct seq_file * m,void * _p)204 static int show_tid_map(struct seq_file *m, void *_p)
205 {
206 	return show_map(m, _p, 0);
207 }
208 
m_start(struct seq_file * m,loff_t * pos)209 static void *m_start(struct seq_file *m, loff_t *pos)
210 {
211 	struct proc_maps_private *priv = m->private;
212 	struct mm_struct *mm;
213 	struct rb_node *p;
214 	loff_t n = *pos;
215 
216 	/* pin the task and mm whilst we play with them */
217 	priv->task = get_proc_task(priv->inode);
218 	if (!priv->task)
219 		return ERR_PTR(-ESRCH);
220 
221 	mm = priv->mm;
222 	if (!mm || !atomic_inc_not_zero(&mm->mm_users))
223 		return NULL;
224 
225 	down_read(&mm->mmap_sem);
226 	/* start from the Nth VMA */
227 	for (p = rb_first(&mm->mm_rb); p; p = rb_next(p))
228 		if (n-- == 0)
229 			return p;
230 
231 	up_read(&mm->mmap_sem);
232 	mmput(mm);
233 	return NULL;
234 }
235 
m_stop(struct seq_file * m,void * _vml)236 static void m_stop(struct seq_file *m, void *_vml)
237 {
238 	struct proc_maps_private *priv = m->private;
239 
240 	if (!IS_ERR_OR_NULL(_vml)) {
241 		up_read(&priv->mm->mmap_sem);
242 		mmput(priv->mm);
243 	}
244 	if (priv->task) {
245 		put_task_struct(priv->task);
246 		priv->task = NULL;
247 	}
248 }
249 
m_next(struct seq_file * m,void * _p,loff_t * pos)250 static void *m_next(struct seq_file *m, void *_p, loff_t *pos)
251 {
252 	struct rb_node *p = _p;
253 
254 	(*pos)++;
255 	return p ? rb_next(p) : NULL;
256 }
257 
258 static const struct seq_operations proc_pid_maps_ops = {
259 	.start	= m_start,
260 	.next	= m_next,
261 	.stop	= m_stop,
262 	.show	= show_pid_map
263 };
264 
265 static const struct seq_operations proc_tid_maps_ops = {
266 	.start	= m_start,
267 	.next	= m_next,
268 	.stop	= m_stop,
269 	.show	= show_tid_map
270 };
271 
maps_open(struct inode * inode,struct file * file,const struct seq_operations * ops)272 static int maps_open(struct inode *inode, struct file *file,
273 		     const struct seq_operations *ops)
274 {
275 	struct proc_maps_private *priv;
276 
277 	priv = __seq_open_private(file, ops, sizeof(*priv));
278 	if (!priv)
279 		return -ENOMEM;
280 
281 	priv->inode = inode;
282 	priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
283 	if (IS_ERR(priv->mm)) {
284 		int err = PTR_ERR(priv->mm);
285 
286 		seq_release_private(inode, file);
287 		return err;
288 	}
289 
290 	return 0;
291 }
292 
293 
map_release(struct inode * inode,struct file * file)294 static int map_release(struct inode *inode, struct file *file)
295 {
296 	struct seq_file *seq = file->private_data;
297 	struct proc_maps_private *priv = seq->private;
298 
299 	if (priv->mm)
300 		mmdrop(priv->mm);
301 
302 	return seq_release_private(inode, file);
303 }
304 
pid_maps_open(struct inode * inode,struct file * file)305 static int pid_maps_open(struct inode *inode, struct file *file)
306 {
307 	return maps_open(inode, file, &proc_pid_maps_ops);
308 }
309 
tid_maps_open(struct inode * inode,struct file * file)310 static int tid_maps_open(struct inode *inode, struct file *file)
311 {
312 	return maps_open(inode, file, &proc_tid_maps_ops);
313 }
314 
315 const struct file_operations proc_pid_maps_operations = {
316 	.open		= pid_maps_open,
317 	.read		= seq_read,
318 	.llseek		= seq_lseek,
319 	.release	= map_release,
320 };
321 
322 const struct file_operations proc_tid_maps_operations = {
323 	.open		= tid_maps_open,
324 	.read		= seq_read,
325 	.llseek		= seq_lseek,
326 	.release	= map_release,
327 };
328 
329