• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* drivers/misc/uid_sys_stats.c
2  *
3  * Copyright (C) 2014 - 2015 Google, Inc.
4  *
5  * This software is licensed under the terms of the GNU General Public
6  * License version 2, as published by the Free Software Foundation, and
7  * may be copied, distributed, and modified under those terms.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  */
15 
16 #include <linux/atomic.h>
17 #include <linux/cpufreq_times.h>
18 #include <linux/err.h>
19 #include <linux/hashtable.h>
20 #include <linux/init.h>
21 #include <linux/kernel.h>
22 #include <linux/list.h>
23 #include <linux/mm.h>
24 #include <linux/proc_fs.h>
25 #include <linux/profile.h>
26 #include <linux/rtmutex.h>
27 #include <linux/sched/cputime.h>
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
30 #include <linux/uaccess.h>
31 
32 
33 #define UID_HASH_BITS	10
34 DECLARE_HASHTABLE(hash_table, UID_HASH_BITS);
35 
36 static DEFINE_RT_MUTEX(uid_lock);
37 static struct proc_dir_entry *cpu_parent;
38 static struct proc_dir_entry *io_parent;
39 static struct proc_dir_entry *proc_parent;
40 
41 struct io_stats {
42 	u64 read_bytes;
43 	u64 write_bytes;
44 	u64 rchar;
45 	u64 wchar;
46 	u64 fsync;
47 };
48 
49 #define UID_STATE_FOREGROUND	0
50 #define UID_STATE_BACKGROUND	1
51 #define UID_STATE_BUCKET_SIZE	2
52 
53 #define UID_STATE_TOTAL_CURR	2
54 #define UID_STATE_TOTAL_LAST	3
55 #define UID_STATE_DEAD_TASKS	4
56 #define UID_STATE_SIZE		5
57 
58 #define MAX_TASK_COMM_LEN 256
59 
60 struct task_entry {
61 	char comm[MAX_TASK_COMM_LEN];
62 	pid_t pid;
63 	struct io_stats io[UID_STATE_SIZE];
64 	struct hlist_node hash;
65 };
66 
67 struct uid_entry {
68 	uid_t uid;
69 	u64 utime;
70 	u64 stime;
71 	u64 active_utime;
72 	u64 active_stime;
73 	int state;
74 	struct io_stats io[UID_STATE_SIZE];
75 	struct hlist_node hash;
76 #ifdef CONFIG_UID_SYS_STATS_DEBUG
77 	DECLARE_HASHTABLE(task_entries, UID_HASH_BITS);
78 #endif
79 };
80 
compute_write_bytes(struct task_struct * task)81 static u64 compute_write_bytes(struct task_struct *task)
82 {
83 	if (task->ioac.write_bytes <= task->ioac.cancelled_write_bytes)
84 		return 0;
85 
86 	return task->ioac.write_bytes - task->ioac.cancelled_write_bytes;
87 }
88 
compute_io_bucket_stats(struct io_stats * io_bucket,struct io_stats * io_curr,struct io_stats * io_last,struct io_stats * io_dead)89 static void compute_io_bucket_stats(struct io_stats *io_bucket,
90 					struct io_stats *io_curr,
91 					struct io_stats *io_last,
92 					struct io_stats *io_dead)
93 {
94 	/* tasks could switch to another uid group, but its io_last in the
95 	 * previous uid group could still be positive.
96 	 * therefore before each update, do an overflow check first
97 	 */
98 	int64_t delta;
99 
100 	delta = io_curr->read_bytes + io_dead->read_bytes -
101 		io_last->read_bytes;
102 	io_bucket->read_bytes += delta > 0 ? delta : 0;
103 	delta = io_curr->write_bytes + io_dead->write_bytes -
104 		io_last->write_bytes;
105 	io_bucket->write_bytes += delta > 0 ? delta : 0;
106 	delta = io_curr->rchar + io_dead->rchar - io_last->rchar;
107 	io_bucket->rchar += delta > 0 ? delta : 0;
108 	delta = io_curr->wchar + io_dead->wchar - io_last->wchar;
109 	io_bucket->wchar += delta > 0 ? delta : 0;
110 	delta = io_curr->fsync + io_dead->fsync - io_last->fsync;
111 	io_bucket->fsync += delta > 0 ? delta : 0;
112 
113 	io_last->read_bytes = io_curr->read_bytes;
114 	io_last->write_bytes = io_curr->write_bytes;
115 	io_last->rchar = io_curr->rchar;
116 	io_last->wchar = io_curr->wchar;
117 	io_last->fsync = io_curr->fsync;
118 
119 	memset(io_dead, 0, sizeof(struct io_stats));
120 }
121 
122 #ifdef CONFIG_UID_SYS_STATS_DEBUG
get_full_task_comm(struct task_entry * task_entry,struct task_struct * task)123 static void get_full_task_comm(struct task_entry *task_entry,
124 		struct task_struct *task)
125 {
126 	int i = 0, offset = 0, len = 0;
127 	/* save one byte for terminating null character */
128 	int unused_len = MAX_TASK_COMM_LEN - TASK_COMM_LEN - 1;
129 	char buf[unused_len];
130 	struct mm_struct *mm = task->mm;
131 
132 	/* fill the first TASK_COMM_LEN bytes with thread name */
133 	__get_task_comm(task_entry->comm, TASK_COMM_LEN, task);
134 	i = strlen(task_entry->comm);
135 	while (i < TASK_COMM_LEN)
136 		task_entry->comm[i++] = ' ';
137 
138 	/* next the executable file name */
139 	if (mm) {
140 		down_read(&mm->mmap_sem);
141 		if (mm->exe_file) {
142 			char *pathname = d_path(&mm->exe_file->f_path, buf,
143 					unused_len);
144 
145 			if (!IS_ERR(pathname)) {
146 				len = strlcpy(task_entry->comm + i, pathname,
147 						unused_len);
148 				i += len;
149 				task_entry->comm[i++] = ' ';
150 				unused_len--;
151 			}
152 		}
153 		up_read(&mm->mmap_sem);
154 	}
155 	unused_len -= len;
156 
157 	/* fill the rest with command line argument
158 	 * replace each null or new line character
159 	 * between args in argv with whitespace */
160 	len = get_cmdline(task, buf, unused_len);
161 	while (offset < len) {
162 		if (buf[offset] != '\0' && buf[offset] != '\n')
163 			task_entry->comm[i++] = buf[offset];
164 		else
165 			task_entry->comm[i++] = ' ';
166 		offset++;
167 	}
168 
169 	/* get rid of trailing whitespaces in case when arg is memset to
170 	 * zero before being reset in userspace
171 	 */
172 	while (task_entry->comm[i-1] == ' ')
173 		i--;
174 	task_entry->comm[i] = '\0';
175 }
176 
find_task_entry(struct uid_entry * uid_entry,struct task_struct * task)177 static struct task_entry *find_task_entry(struct uid_entry *uid_entry,
178 		struct task_struct *task)
179 {
180 	struct task_entry *task_entry;
181 
182 	hash_for_each_possible(uid_entry->task_entries, task_entry, hash,
183 			task->pid) {
184 		if (task->pid == task_entry->pid) {
185 			/* if thread name changed, update the entire command */
186 			int len = strnchr(task_entry->comm, ' ', TASK_COMM_LEN)
187 				- task_entry->comm;
188 
189 			if (strncmp(task_entry->comm, task->comm, len))
190 				get_full_task_comm(task_entry, task);
191 			return task_entry;
192 		}
193 	}
194 	return NULL;
195 }
196 
find_or_register_task(struct uid_entry * uid_entry,struct task_struct * task)197 static struct task_entry *find_or_register_task(struct uid_entry *uid_entry,
198 		struct task_struct *task)
199 {
200 	struct task_entry *task_entry;
201 	pid_t pid = task->pid;
202 
203 	task_entry = find_task_entry(uid_entry, task);
204 	if (task_entry)
205 		return task_entry;
206 
207 	task_entry = kzalloc(sizeof(struct task_entry), GFP_ATOMIC);
208 	if (!task_entry)
209 		return NULL;
210 
211 	get_full_task_comm(task_entry, task);
212 
213 	task_entry->pid = pid;
214 	hash_add(uid_entry->task_entries, &task_entry->hash, (unsigned int)pid);
215 
216 	return task_entry;
217 }
218 
remove_uid_tasks(struct uid_entry * uid_entry)219 static void remove_uid_tasks(struct uid_entry *uid_entry)
220 {
221 	struct task_entry *task_entry;
222 	unsigned long bkt_task;
223 	struct hlist_node *tmp_task;
224 
225 	hash_for_each_safe(uid_entry->task_entries, bkt_task,
226 			tmp_task, task_entry, hash) {
227 		hash_del(&task_entry->hash);
228 		kfree(task_entry);
229 	}
230 }
231 
set_io_uid_tasks_zero(struct uid_entry * uid_entry)232 static void set_io_uid_tasks_zero(struct uid_entry *uid_entry)
233 {
234 	struct task_entry *task_entry;
235 	unsigned long bkt_task;
236 
237 	hash_for_each(uid_entry->task_entries, bkt_task, task_entry, hash) {
238 		memset(&task_entry->io[UID_STATE_TOTAL_CURR], 0,
239 			sizeof(struct io_stats));
240 	}
241 }
242 
add_uid_tasks_io_stats(struct uid_entry * uid_entry,struct task_struct * task,int slot)243 static void add_uid_tasks_io_stats(struct uid_entry *uid_entry,
244 		struct task_struct *task, int slot)
245 {
246 	struct task_entry *task_entry = find_or_register_task(uid_entry, task);
247 	struct io_stats *task_io_slot = &task_entry->io[slot];
248 
249 	task_io_slot->read_bytes += task->ioac.read_bytes;
250 	task_io_slot->write_bytes += compute_write_bytes(task);
251 	task_io_slot->rchar += task->ioac.rchar;
252 	task_io_slot->wchar += task->ioac.wchar;
253 	task_io_slot->fsync += task->ioac.syscfs;
254 }
255 
compute_io_uid_tasks(struct uid_entry * uid_entry)256 static void compute_io_uid_tasks(struct uid_entry *uid_entry)
257 {
258 	struct task_entry *task_entry;
259 	unsigned long bkt_task;
260 
261 	hash_for_each(uid_entry->task_entries, bkt_task, task_entry, hash) {
262 		compute_io_bucket_stats(&task_entry->io[uid_entry->state],
263 					&task_entry->io[UID_STATE_TOTAL_CURR],
264 					&task_entry->io[UID_STATE_TOTAL_LAST],
265 					&task_entry->io[UID_STATE_DEAD_TASKS]);
266 	}
267 }
268 
show_io_uid_tasks(struct seq_file * m,struct uid_entry * uid_entry)269 static void show_io_uid_tasks(struct seq_file *m, struct uid_entry *uid_entry)
270 {
271 	struct task_entry *task_entry;
272 	unsigned long bkt_task;
273 
274 	hash_for_each(uid_entry->task_entries, bkt_task, task_entry, hash) {
275 		/* Separated by comma because space exists in task comm */
276 		seq_printf(m, "task,%s,%lu,%llu,%llu,%llu,%llu,%llu,%llu,%llu,%llu,%llu,%llu\n",
277 				task_entry->comm,
278 				(unsigned long)task_entry->pid,
279 				task_entry->io[UID_STATE_FOREGROUND].rchar,
280 				task_entry->io[UID_STATE_FOREGROUND].wchar,
281 				task_entry->io[UID_STATE_FOREGROUND].read_bytes,
282 				task_entry->io[UID_STATE_FOREGROUND].write_bytes,
283 				task_entry->io[UID_STATE_BACKGROUND].rchar,
284 				task_entry->io[UID_STATE_BACKGROUND].wchar,
285 				task_entry->io[UID_STATE_BACKGROUND].read_bytes,
286 				task_entry->io[UID_STATE_BACKGROUND].write_bytes,
287 				task_entry->io[UID_STATE_FOREGROUND].fsync,
288 				task_entry->io[UID_STATE_BACKGROUND].fsync);
289 	}
290 }
291 #else
remove_uid_tasks(struct uid_entry * uid_entry)292 static void remove_uid_tasks(struct uid_entry *uid_entry) {};
set_io_uid_tasks_zero(struct uid_entry * uid_entry)293 static void set_io_uid_tasks_zero(struct uid_entry *uid_entry) {};
add_uid_tasks_io_stats(struct uid_entry * uid_entry,struct task_struct * task,int slot)294 static void add_uid_tasks_io_stats(struct uid_entry *uid_entry,
295 		struct task_struct *task, int slot) {};
compute_io_uid_tasks(struct uid_entry * uid_entry)296 static void compute_io_uid_tasks(struct uid_entry *uid_entry) {};
show_io_uid_tasks(struct seq_file * m,struct uid_entry * uid_entry)297 static void show_io_uid_tasks(struct seq_file *m,
298 		struct uid_entry *uid_entry) {}
299 #endif
300 
find_uid_entry(uid_t uid)301 static struct uid_entry *find_uid_entry(uid_t uid)
302 {
303 	struct uid_entry *uid_entry;
304 	hash_for_each_possible(hash_table, uid_entry, hash, uid) {
305 		if (uid_entry->uid == uid)
306 			return uid_entry;
307 	}
308 	return NULL;
309 }
310 
find_or_register_uid(uid_t uid)311 static struct uid_entry *find_or_register_uid(uid_t uid)
312 {
313 	struct uid_entry *uid_entry;
314 
315 	uid_entry = find_uid_entry(uid);
316 	if (uid_entry)
317 		return uid_entry;
318 
319 	uid_entry = kzalloc(sizeof(struct uid_entry), GFP_ATOMIC);
320 	if (!uid_entry)
321 		return NULL;
322 
323 	uid_entry->uid = uid;
324 #ifdef CONFIG_UID_SYS_STATS_DEBUG
325 	hash_init(uid_entry->task_entries);
326 #endif
327 	hash_add(hash_table, &uid_entry->hash, uid);
328 
329 	return uid_entry;
330 }
331 
uid_cputime_show(struct seq_file * m,void * v)332 static int uid_cputime_show(struct seq_file *m, void *v)
333 {
334 	struct uid_entry *uid_entry = NULL;
335 	struct task_struct *task, *temp;
336 	struct user_namespace *user_ns = current_user_ns();
337 	u64 utime;
338 	u64 stime;
339 	unsigned long bkt;
340 	uid_t uid;
341 
342 	rt_mutex_lock(&uid_lock);
343 
344 	hash_for_each(hash_table, bkt, uid_entry, hash) {
345 		uid_entry->active_stime = 0;
346 		uid_entry->active_utime = 0;
347 	}
348 
349 	rcu_read_lock();
350 	do_each_thread(temp, task) {
351 		uid = from_kuid_munged(user_ns, task_uid(task));
352 		if (!uid_entry || uid_entry->uid != uid)
353 			uid_entry = find_or_register_uid(uid);
354 		if (!uid_entry) {
355 			rcu_read_unlock();
356 			rt_mutex_unlock(&uid_lock);
357 			pr_err("%s: failed to find the uid_entry for uid %d\n",
358 				__func__, uid);
359 			return -ENOMEM;
360 		}
361 		/* avoid double accounting of dying threads */
362 		if (!(task->flags & PF_EXITING)) {
363 			task_cputime_adjusted(task, &utime, &stime);
364 			uid_entry->active_utime += utime;
365 			uid_entry->active_stime += stime;
366 		}
367 	} while_each_thread(temp, task);
368 	rcu_read_unlock();
369 
370 	hash_for_each(hash_table, bkt, uid_entry, hash) {
371 		u64 total_utime = uid_entry->utime +
372 							uid_entry->active_utime;
373 		u64 total_stime = uid_entry->stime +
374 							uid_entry->active_stime;
375 		seq_printf(m, "%d: %llu %llu\n", uid_entry->uid,
376 			ktime_to_ms(total_utime), ktime_to_ms(total_stime));
377 	}
378 
379 	rt_mutex_unlock(&uid_lock);
380 	return 0;
381 }
382 
uid_cputime_open(struct inode * inode,struct file * file)383 static int uid_cputime_open(struct inode *inode, struct file *file)
384 {
385 	return single_open(file, uid_cputime_show, PDE_DATA(inode));
386 }
387 
388 static const struct file_operations uid_cputime_fops = {
389 	.open		= uid_cputime_open,
390 	.read		= seq_read,
391 	.llseek		= seq_lseek,
392 	.release	= single_release,
393 };
394 
uid_remove_open(struct inode * inode,struct file * file)395 static int uid_remove_open(struct inode *inode, struct file *file)
396 {
397 	return single_open(file, NULL, NULL);
398 }
399 
uid_remove_write(struct file * file,const char __user * buffer,size_t count,loff_t * ppos)400 static ssize_t uid_remove_write(struct file *file,
401 			const char __user *buffer, size_t count, loff_t *ppos)
402 {
403 	struct uid_entry *uid_entry;
404 	struct hlist_node *tmp;
405 	char uids[128];
406 	char *start_uid, *end_uid = NULL;
407 	long int uid_start = 0, uid_end = 0;
408 
409 	if (count >= sizeof(uids))
410 		count = sizeof(uids) - 1;
411 
412 	if (copy_from_user(uids, buffer, count))
413 		return -EFAULT;
414 
415 	uids[count] = '\0';
416 	end_uid = uids;
417 	start_uid = strsep(&end_uid, "-");
418 
419 	if (!start_uid || !end_uid)
420 		return -EINVAL;
421 
422 	if (kstrtol(start_uid, 10, &uid_start) != 0 ||
423 		kstrtol(end_uid, 10, &uid_end) != 0) {
424 		return -EINVAL;
425 	}
426 
427 	/* Also remove uids from /proc/uid_time_in_state */
428 	cpufreq_task_times_remove_uids(uid_start, uid_end);
429 
430 	rt_mutex_lock(&uid_lock);
431 
432 	for (; uid_start <= uid_end; uid_start++) {
433 		hash_for_each_possible_safe(hash_table, uid_entry, tmp,
434 							hash, (uid_t)uid_start) {
435 			if (uid_start == uid_entry->uid) {
436 				remove_uid_tasks(uid_entry);
437 				hash_del(&uid_entry->hash);
438 				kfree(uid_entry);
439 			}
440 		}
441 	}
442 
443 	rt_mutex_unlock(&uid_lock);
444 	return count;
445 }
446 
447 static const struct file_operations uid_remove_fops = {
448 	.open		= uid_remove_open,
449 	.release	= single_release,
450 	.write		= uid_remove_write,
451 };
452 
453 
add_uid_io_stats(struct uid_entry * uid_entry,struct task_struct * task,int slot)454 static void add_uid_io_stats(struct uid_entry *uid_entry,
455 			struct task_struct *task, int slot)
456 {
457 	struct io_stats *io_slot = &uid_entry->io[slot];
458 
459 	/* avoid double accounting of dying threads */
460 	if (slot != UID_STATE_DEAD_TASKS && (task->flags & PF_EXITING))
461 		return;
462 
463 	io_slot->read_bytes += task->ioac.read_bytes;
464 	io_slot->write_bytes += compute_write_bytes(task);
465 	io_slot->rchar += task->ioac.rchar;
466 	io_slot->wchar += task->ioac.wchar;
467 	io_slot->fsync += task->ioac.syscfs;
468 
469 	add_uid_tasks_io_stats(uid_entry, task, slot);
470 }
471 
update_io_stats_all_locked(void)472 static void update_io_stats_all_locked(void)
473 {
474 	struct uid_entry *uid_entry = NULL;
475 	struct task_struct *task, *temp;
476 	struct user_namespace *user_ns = current_user_ns();
477 	unsigned long bkt;
478 	uid_t uid;
479 
480 	hash_for_each(hash_table, bkt, uid_entry, hash) {
481 		memset(&uid_entry->io[UID_STATE_TOTAL_CURR], 0,
482 			sizeof(struct io_stats));
483 		set_io_uid_tasks_zero(uid_entry);
484 	}
485 
486 	rcu_read_lock();
487 	do_each_thread(temp, task) {
488 		uid = from_kuid_munged(user_ns, task_uid(task));
489 		if (!uid_entry || uid_entry->uid != uid)
490 			uid_entry = find_or_register_uid(uid);
491 		if (!uid_entry)
492 			continue;
493 		add_uid_io_stats(uid_entry, task, UID_STATE_TOTAL_CURR);
494 	} while_each_thread(temp, task);
495 	rcu_read_unlock();
496 
497 	hash_for_each(hash_table, bkt, uid_entry, hash) {
498 		compute_io_bucket_stats(&uid_entry->io[uid_entry->state],
499 					&uid_entry->io[UID_STATE_TOTAL_CURR],
500 					&uid_entry->io[UID_STATE_TOTAL_LAST],
501 					&uid_entry->io[UID_STATE_DEAD_TASKS]);
502 		compute_io_uid_tasks(uid_entry);
503 	}
504 }
505 
update_io_stats_uid_locked(struct uid_entry * uid_entry)506 static void update_io_stats_uid_locked(struct uid_entry *uid_entry)
507 {
508 	struct task_struct *task, *temp;
509 	struct user_namespace *user_ns = current_user_ns();
510 
511 	memset(&uid_entry->io[UID_STATE_TOTAL_CURR], 0,
512 		sizeof(struct io_stats));
513 	set_io_uid_tasks_zero(uid_entry);
514 
515 	rcu_read_lock();
516 	do_each_thread(temp, task) {
517 		if (from_kuid_munged(user_ns, task_uid(task)) != uid_entry->uid)
518 			continue;
519 		add_uid_io_stats(uid_entry, task, UID_STATE_TOTAL_CURR);
520 	} while_each_thread(temp, task);
521 	rcu_read_unlock();
522 
523 	compute_io_bucket_stats(&uid_entry->io[uid_entry->state],
524 				&uid_entry->io[UID_STATE_TOTAL_CURR],
525 				&uid_entry->io[UID_STATE_TOTAL_LAST],
526 				&uid_entry->io[UID_STATE_DEAD_TASKS]);
527 	compute_io_uid_tasks(uid_entry);
528 }
529 
530 
uid_io_show(struct seq_file * m,void * v)531 static int uid_io_show(struct seq_file *m, void *v)
532 {
533 	struct uid_entry *uid_entry;
534 	unsigned long bkt;
535 
536 	rt_mutex_lock(&uid_lock);
537 
538 	update_io_stats_all_locked();
539 
540 	hash_for_each(hash_table, bkt, uid_entry, hash) {
541 		seq_printf(m, "%d %llu %llu %llu %llu %llu %llu %llu %llu %llu %llu\n",
542 				uid_entry->uid,
543 				uid_entry->io[UID_STATE_FOREGROUND].rchar,
544 				uid_entry->io[UID_STATE_FOREGROUND].wchar,
545 				uid_entry->io[UID_STATE_FOREGROUND].read_bytes,
546 				uid_entry->io[UID_STATE_FOREGROUND].write_bytes,
547 				uid_entry->io[UID_STATE_BACKGROUND].rchar,
548 				uid_entry->io[UID_STATE_BACKGROUND].wchar,
549 				uid_entry->io[UID_STATE_BACKGROUND].read_bytes,
550 				uid_entry->io[UID_STATE_BACKGROUND].write_bytes,
551 				uid_entry->io[UID_STATE_FOREGROUND].fsync,
552 				uid_entry->io[UID_STATE_BACKGROUND].fsync);
553 
554 		show_io_uid_tasks(m, uid_entry);
555 	}
556 
557 	rt_mutex_unlock(&uid_lock);
558 	return 0;
559 }
560 
uid_io_open(struct inode * inode,struct file * file)561 static int uid_io_open(struct inode *inode, struct file *file)
562 {
563 	return single_open(file, uid_io_show, PDE_DATA(inode));
564 }
565 
566 static const struct file_operations uid_io_fops = {
567 	.open		= uid_io_open,
568 	.read		= seq_read,
569 	.llseek		= seq_lseek,
570 	.release	= single_release,
571 };
572 
uid_procstat_open(struct inode * inode,struct file * file)573 static int uid_procstat_open(struct inode *inode, struct file *file)
574 {
575 	return single_open(file, NULL, NULL);
576 }
577 
uid_procstat_write(struct file * file,const char __user * buffer,size_t count,loff_t * ppos)578 static ssize_t uid_procstat_write(struct file *file,
579 			const char __user *buffer, size_t count, loff_t *ppos)
580 {
581 	struct uid_entry *uid_entry;
582 	uid_t uid;
583 	int argc, state;
584 	char input[128];
585 
586 	if (count >= sizeof(input))
587 		return -EINVAL;
588 
589 	if (copy_from_user(input, buffer, count))
590 		return -EFAULT;
591 
592 	input[count] = '\0';
593 
594 	argc = sscanf(input, "%u %d", &uid, &state);
595 	if (argc != 2)
596 		return -EINVAL;
597 
598 	if (state != UID_STATE_BACKGROUND && state != UID_STATE_FOREGROUND)
599 		return -EINVAL;
600 
601 	rt_mutex_lock(&uid_lock);
602 
603 	uid_entry = find_or_register_uid(uid);
604 	if (!uid_entry) {
605 		rt_mutex_unlock(&uid_lock);
606 		return -EINVAL;
607 	}
608 
609 	if (uid_entry->state == state) {
610 		rt_mutex_unlock(&uid_lock);
611 		return count;
612 	}
613 
614 	update_io_stats_uid_locked(uid_entry);
615 
616 	uid_entry->state = state;
617 
618 	rt_mutex_unlock(&uid_lock);
619 
620 	return count;
621 }
622 
623 static const struct file_operations uid_procstat_fops = {
624 	.open		= uid_procstat_open,
625 	.release	= single_release,
626 	.write		= uid_procstat_write,
627 };
628 
process_notifier(struct notifier_block * self,unsigned long cmd,void * v)629 static int process_notifier(struct notifier_block *self,
630 			unsigned long cmd, void *v)
631 {
632 	struct task_struct *task = v;
633 	struct uid_entry *uid_entry;
634 	u64 utime, stime;
635 	uid_t uid;
636 
637 	if (!task)
638 		return NOTIFY_OK;
639 
640 	rt_mutex_lock(&uid_lock);
641 	uid = from_kuid_munged(current_user_ns(), task_uid(task));
642 	uid_entry = find_or_register_uid(uid);
643 	if (!uid_entry) {
644 		pr_err("%s: failed to find uid %d\n", __func__, uid);
645 		goto exit;
646 	}
647 
648 	task_cputime_adjusted(task, &utime, &stime);
649 	uid_entry->utime += utime;
650 	uid_entry->stime += stime;
651 
652 	add_uid_io_stats(uid_entry, task, UID_STATE_DEAD_TASKS);
653 
654 exit:
655 	rt_mutex_unlock(&uid_lock);
656 	return NOTIFY_OK;
657 }
658 
659 static struct notifier_block process_notifier_block = {
660 	.notifier_call	= process_notifier,
661 };
662 
proc_uid_sys_stats_init(void)663 static int __init proc_uid_sys_stats_init(void)
664 {
665 	hash_init(hash_table);
666 
667 	cpu_parent = proc_mkdir("uid_cputime", NULL);
668 	if (!cpu_parent) {
669 		pr_err("%s: failed to create uid_cputime proc entry\n",
670 			__func__);
671 		goto err;
672 	}
673 
674 	proc_create_data("remove_uid_range", 0222, cpu_parent,
675 		&uid_remove_fops, NULL);
676 	proc_create_data("show_uid_stat", 0444, cpu_parent,
677 		&uid_cputime_fops, NULL);
678 
679 	io_parent = proc_mkdir("uid_io", NULL);
680 	if (!io_parent) {
681 		pr_err("%s: failed to create uid_io proc entry\n",
682 			__func__);
683 		goto err;
684 	}
685 
686 	proc_create_data("stats", 0444, io_parent,
687 		&uid_io_fops, NULL);
688 
689 	proc_parent = proc_mkdir("uid_procstat", NULL);
690 	if (!proc_parent) {
691 		pr_err("%s: failed to create uid_procstat proc entry\n",
692 			__func__);
693 		goto err;
694 	}
695 
696 	proc_create_data("set", 0222, proc_parent,
697 		&uid_procstat_fops, NULL);
698 
699 	profile_event_register(PROFILE_TASK_EXIT, &process_notifier_block);
700 
701 	return 0;
702 
703 err:
704 	remove_proc_subtree("uid_cputime", NULL);
705 	remove_proc_subtree("uid_io", NULL);
706 	remove_proc_subtree("uid_procstat", NULL);
707 	return -ENOMEM;
708 }
709 
710 early_initcall(proc_uid_sys_stats_init);
711