• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* drivers/misc/uid_cputime.c
2  *
3  * Copyright (C) 2014 - 2015 Google, Inc.
4  *
5  * This software is licensed under the terms of the GNU General Public
6  * License version 2, as published by the Free Software Foundation, and
7  * may be copied, distributed, and modified under those terms.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  */
15 
16 #include <linux/atomic.h>
17 #include <linux/err.h>
18 #include <linux/hashtable.h>
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/list.h>
22 #include <linux/proc_fs.h>
23 #include <linux/profile.h>
24 #include <linux/rtmutex.h>
25 #include <linux/sched.h>
26 #include <linux/seq_file.h>
27 #include <linux/slab.h>
28 #include <linux/uaccess.h>
29 
30 #define UID_HASH_BITS	10
31 DECLARE_HASHTABLE(hash_table, UID_HASH_BITS);
32 
33 static DEFINE_RT_MUTEX(uid_lock);
34 static struct proc_dir_entry *cpu_parent;
35 static struct proc_dir_entry *io_parent;
36 static struct proc_dir_entry *proc_parent;
37 
38 struct io_stats {
39 	u64 read_bytes;
40 	u64 write_bytes;
41 	u64 rchar;
42 	u64 wchar;
43 	u64 fsync;
44 };
45 
46 #define UID_STATE_FOREGROUND	0
47 #define UID_STATE_BACKGROUND	1
48 #define UID_STATE_BUCKET_SIZE	2
49 
50 #define UID_STATE_TOTAL_CURR	2
51 #define UID_STATE_TOTAL_LAST	3
52 #define UID_STATE_DEAD_TASKS	4
53 #define UID_STATE_SIZE		5
54 
55 struct uid_entry {
56 	uid_t uid;
57 	cputime_t utime;
58 	cputime_t stime;
59 	cputime_t active_utime;
60 	cputime_t active_stime;
61 	unsigned long long active_power;
62 	unsigned long long power;
63 	int state;
64 	struct io_stats io[UID_STATE_SIZE];
65 	struct hlist_node hash;
66 };
67 
find_uid_entry(uid_t uid)68 static struct uid_entry *find_uid_entry(uid_t uid)
69 {
70 	struct uid_entry *uid_entry;
71 	hash_for_each_possible(hash_table, uid_entry, hash, uid) {
72 		if (uid_entry->uid == uid)
73 			return uid_entry;
74 	}
75 	return NULL;
76 }
77 
find_or_register_uid(uid_t uid)78 static struct uid_entry *find_or_register_uid(uid_t uid)
79 {
80 	struct uid_entry *uid_entry;
81 
82 	uid_entry = find_uid_entry(uid);
83 	if (uid_entry)
84 		return uid_entry;
85 
86 	uid_entry = kzalloc(sizeof(struct uid_entry), GFP_ATOMIC);
87 	if (!uid_entry)
88 		return NULL;
89 
90 	uid_entry->uid = uid;
91 
92 	hash_add(hash_table, &uid_entry->hash, uid);
93 
94 	return uid_entry;
95 }
96 
uid_cputime_show(struct seq_file * m,void * v)97 static int uid_cputime_show(struct seq_file *m, void *v)
98 {
99 	struct uid_entry *uid_entry;
100 	struct task_struct *task, *temp;
101 	struct user_namespace *user_ns = current_user_ns();
102 	cputime_t utime;
103 	cputime_t stime;
104 	unsigned long bkt;
105 	uid_t uid;
106 
107 	rt_mutex_lock(&uid_lock);
108 
109 	hash_for_each(hash_table, bkt, uid_entry, hash) {
110 		uid_entry->active_stime = 0;
111 		uid_entry->active_utime = 0;
112 		uid_entry->active_power = 0;
113 	}
114 
115 	read_lock(&tasklist_lock);
116 	do_each_thread(temp, task) {
117 		uid = from_kuid_munged(user_ns, task_uid(task));
118 		uid_entry = find_or_register_uid(uid);
119 		if (!uid_entry) {
120 			read_unlock(&tasklist_lock);
121 			rt_mutex_unlock(&uid_lock);
122 			pr_err("%s: failed to find the uid_entry for uid %d\n",
123 				__func__, uid);
124 			return -ENOMEM;
125 		}
126 		/* if this task is exiting, we have already accounted for the
127 		 * time and power.
128 		 */
129 		if (task->cpu_power == ULLONG_MAX)
130 			continue;
131 		task_cputime_adjusted(task, &utime, &stime);
132 		uid_entry->active_utime += utime;
133 		uid_entry->active_stime += stime;
134 		uid_entry->active_power += task->cpu_power;
135 	} while_each_thread(temp, task);
136 	read_unlock(&tasklist_lock);
137 
138 	hash_for_each(hash_table, bkt, uid_entry, hash) {
139 		cputime_t total_utime = uid_entry->utime +
140 							uid_entry->active_utime;
141 		cputime_t total_stime = uid_entry->stime +
142 							uid_entry->active_stime;
143 		unsigned long long total_power = uid_entry->power +
144 							uid_entry->active_power;
145 		seq_printf(m, "%d: %llu %llu %llu\n", uid_entry->uid,
146 			(unsigned long long)jiffies_to_msecs(
147 				cputime_to_jiffies(total_utime)) * USEC_PER_MSEC,
148 			(unsigned long long)jiffies_to_msecs(
149 				cputime_to_jiffies(total_stime)) * USEC_PER_MSEC,
150 			total_power);
151 	}
152 
153 	rt_mutex_unlock(&uid_lock);
154 	return 0;
155 }
156 
uid_cputime_open(struct inode * inode,struct file * file)157 static int uid_cputime_open(struct inode *inode, struct file *file)
158 {
159 	return single_open(file, uid_cputime_show, PDE_DATA(inode));
160 }
161 
162 static const struct file_operations uid_cputime_fops = {
163 	.open		= uid_cputime_open,
164 	.read		= seq_read,
165 	.llseek		= seq_lseek,
166 	.release	= single_release,
167 };
168 
uid_remove_open(struct inode * inode,struct file * file)169 static int uid_remove_open(struct inode *inode, struct file *file)
170 {
171 	return single_open(file, NULL, NULL);
172 }
173 
uid_remove_write(struct file * file,const char __user * buffer,size_t count,loff_t * ppos)174 static ssize_t uid_remove_write(struct file *file,
175 			const char __user *buffer, size_t count, loff_t *ppos)
176 {
177 	struct uid_entry *uid_entry;
178 	struct hlist_node *tmp;
179 	char uids[128];
180 	char *start_uid, *end_uid = NULL;
181 	long int uid_start = 0, uid_end = 0;
182 
183 	if (count >= sizeof(uids))
184 		count = sizeof(uids) - 1;
185 
186 	if (copy_from_user(uids, buffer, count))
187 		return -EFAULT;
188 
189 	uids[count] = '\0';
190 	end_uid = uids;
191 	start_uid = strsep(&end_uid, "-");
192 
193 	if (!start_uid || !end_uid)
194 		return -EINVAL;
195 
196 	if (kstrtol(start_uid, 10, &uid_start) != 0 ||
197 		kstrtol(end_uid, 10, &uid_end) != 0) {
198 		return -EINVAL;
199 	}
200 	rt_mutex_lock(&uid_lock);
201 
202 	for (; uid_start <= uid_end; uid_start++) {
203 		hash_for_each_possible_safe(hash_table, uid_entry, tmp,
204 							hash, (uid_t)uid_start) {
205 			if (uid_start == uid_entry->uid) {
206 				hash_del(&uid_entry->hash);
207 				kfree(uid_entry);
208 			}
209 		}
210 	}
211 
212 	rt_mutex_unlock(&uid_lock);
213 	return count;
214 }
215 
216 static const struct file_operations uid_remove_fops = {
217 	.open		= uid_remove_open,
218 	.release	= single_release,
219 	.write		= uid_remove_write,
220 };
221 
compute_write_bytes(struct task_struct * task)222 static u64 compute_write_bytes(struct task_struct *task)
223 {
224 	if (task->ioac.write_bytes <= task->ioac.cancelled_write_bytes)
225 		return 0;
226 
227 	return task->ioac.write_bytes - task->ioac.cancelled_write_bytes;
228 }
229 
add_uid_io_stats(struct uid_entry * uid_entry,struct task_struct * task,int slot)230 static void add_uid_io_stats(struct uid_entry *uid_entry,
231 			struct task_struct *task, int slot)
232 {
233 	struct io_stats *io_slot = &uid_entry->io[slot];
234 
235 	io_slot->read_bytes += task->ioac.read_bytes;
236 	io_slot->write_bytes += compute_write_bytes(task);
237 	io_slot->rchar += task->ioac.rchar;
238 	io_slot->wchar += task->ioac.wchar;
239 	io_slot->fsync += task->ioac.syscfs;
240 }
241 
compute_uid_io_bucket_stats(struct io_stats * io_bucket,struct io_stats * io_curr,struct io_stats * io_last,struct io_stats * io_dead)242 static void compute_uid_io_bucket_stats(struct io_stats *io_bucket,
243 					struct io_stats *io_curr,
244 					struct io_stats *io_last,
245 					struct io_stats *io_dead)
246 {
247 	s64 delta;
248 
249 	delta = io_curr->read_bytes + io_dead->read_bytes -
250 		io_last->read_bytes;
251 	if (delta > 0)
252 		io_bucket->read_bytes += delta;
253 
254 	delta = io_curr->write_bytes + io_dead->write_bytes -
255 		io_last->write_bytes;
256 	if (delta > 0)
257 		io_bucket->write_bytes += delta;
258 
259 	delta = io_curr->rchar + io_dead->rchar - io_last->rchar;
260 	if (delta > 0)
261 		io_bucket->rchar += delta;
262 
263 	delta = io_curr->wchar + io_dead->wchar - io_last->wchar;
264 	if (delta > 0)
265 		io_bucket->wchar += delta;
266 
267 	delta = io_curr->fsync + io_dead->fsync - io_last->fsync;
268 	if (delta > 0)
269 		io_bucket->fsync += delta;
270 
271 	io_last->read_bytes = io_curr->read_bytes;
272 	io_last->write_bytes = io_curr->write_bytes;
273 	io_last->rchar = io_curr->rchar;
274 	io_last->wchar = io_curr->wchar;
275 	io_last->fsync = io_curr->fsync;
276 
277 	memset(io_dead, 0, sizeof(struct io_stats));
278 }
279 
update_io_stats_all_locked(void)280 static void update_io_stats_all_locked(void)
281 {
282 	struct uid_entry *uid_entry;
283 	struct task_struct *task, *temp;
284 	struct user_namespace *user_ns = current_user_ns();
285 	unsigned long bkt;
286 	uid_t uid;
287 
288 	hash_for_each(hash_table, bkt, uid_entry, hash)
289 		memset(&uid_entry->io[UID_STATE_TOTAL_CURR], 0,
290 			sizeof(struct io_stats));
291 
292 	rcu_read_lock();
293 	do_each_thread(temp, task) {
294 		uid = from_kuid_munged(user_ns, task_uid(task));
295 		uid_entry = find_or_register_uid(uid);
296 		if (!uid_entry)
297 			continue;
298 		add_uid_io_stats(uid_entry, task, UID_STATE_TOTAL_CURR);
299 	} while_each_thread(temp, task);
300 	rcu_read_unlock();
301 
302 	hash_for_each(hash_table, bkt, uid_entry, hash) {
303 		compute_uid_io_bucket_stats(&uid_entry->io[uid_entry->state],
304 					&uid_entry->io[UID_STATE_TOTAL_CURR],
305 					&uid_entry->io[UID_STATE_TOTAL_LAST],
306 					&uid_entry->io[UID_STATE_DEAD_TASKS]);
307 	}
308 }
309 
update_io_stats_uid_locked(struct uid_entry * uid_entry)310 static void update_io_stats_uid_locked(struct uid_entry *uid_entry)
311 {
312 	struct task_struct *task, *temp;
313 	struct user_namespace *user_ns = current_user_ns();
314 
315 	memset(&uid_entry->io[UID_STATE_TOTAL_CURR], 0,
316 		sizeof(struct io_stats));
317 
318 	rcu_read_lock();
319 	do_each_thread(temp, task) {
320 		if (from_kuid_munged(user_ns, task_uid(task)) != uid_entry->uid)
321 			continue;
322 		add_uid_io_stats(uid_entry, task, UID_STATE_TOTAL_CURR);
323 	} while_each_thread(temp, task);
324 	rcu_read_unlock();
325 
326 	compute_uid_io_bucket_stats(&uid_entry->io[uid_entry->state],
327 				&uid_entry->io[UID_STATE_TOTAL_CURR],
328 				&uid_entry->io[UID_STATE_TOTAL_LAST],
329 				&uid_entry->io[UID_STATE_DEAD_TASKS]);
330 }
331 
uid_io_show(struct seq_file * m,void * v)332 static int uid_io_show(struct seq_file *m, void *v)
333 {
334 	struct uid_entry *uid_entry;
335 	unsigned long bkt;
336 
337 	rt_mutex_lock(&uid_lock);
338 
339 	update_io_stats_all_locked();
340 
341 	hash_for_each(hash_table, bkt, uid_entry, hash) {
342 		seq_printf(m, "%d %llu %llu %llu %llu %llu %llu %llu %llu %llu %llu\n",
343 			uid_entry->uid,
344 			uid_entry->io[UID_STATE_FOREGROUND].rchar,
345 			uid_entry->io[UID_STATE_FOREGROUND].wchar,
346 			uid_entry->io[UID_STATE_FOREGROUND].read_bytes,
347 			uid_entry->io[UID_STATE_FOREGROUND].write_bytes,
348 			uid_entry->io[UID_STATE_BACKGROUND].rchar,
349 			uid_entry->io[UID_STATE_BACKGROUND].wchar,
350 			uid_entry->io[UID_STATE_BACKGROUND].read_bytes,
351 			uid_entry->io[UID_STATE_BACKGROUND].write_bytes,
352 			uid_entry->io[UID_STATE_FOREGROUND].fsync,
353 			uid_entry->io[UID_STATE_BACKGROUND].fsync);
354 	}
355 
356 	rt_mutex_unlock(&uid_lock);
357 
358 	return 0;
359 }
360 
uid_io_open(struct inode * inode,struct file * file)361 static int uid_io_open(struct inode *inode, struct file *file)
362 {
363 	return single_open(file, uid_io_show, PDE_DATA(inode));
364 }
365 
366 static const struct file_operations uid_io_fops = {
367 	.open		= uid_io_open,
368 	.read		= seq_read,
369 	.llseek		= seq_lseek,
370 	.release	= single_release,
371 };
372 
uid_procstat_open(struct inode * inode,struct file * file)373 static int uid_procstat_open(struct inode *inode, struct file *file)
374 {
375 	return single_open(file, NULL, NULL);
376 }
377 
uid_procstat_write(struct file * file,const char __user * buffer,size_t count,loff_t * ppos)378 static ssize_t uid_procstat_write(struct file *file,
379 			const char __user *buffer, size_t count, loff_t *ppos)
380 {
381 	struct uid_entry *uid_entry;
382 	uid_t uid;
383 	int argc, state;
384 	char input[128];
385 
386 	if (count >= sizeof(input))
387 		return -EINVAL;
388 
389 	if (copy_from_user(input, buffer, count))
390 		return -EFAULT;
391 
392 	input[count] = '\0';
393 
394 	argc = sscanf(input, "%u %d", &uid, &state);
395 	if (argc != 2)
396 		return -EINVAL;
397 
398 	if (state != UID_STATE_BACKGROUND && state != UID_STATE_FOREGROUND)
399 		return -EINVAL;
400 
401 	rt_mutex_lock(&uid_lock);
402 
403 	uid_entry = find_or_register_uid(uid);
404 	if (!uid_entry) {
405 		rt_mutex_unlock(&uid_lock);
406 		return -EINVAL;
407 	}
408 
409 	if (uid_entry->state == state) {
410 		rt_mutex_unlock(&uid_lock);
411 		return count;
412 	}
413 
414 	update_io_stats_uid_locked(uid_entry);
415 
416 	uid_entry->state = state;
417 
418 	rt_mutex_unlock(&uid_lock);
419 
420 	return count;
421 }
422 
423 static const struct file_operations uid_procstat_fops = {
424 	.open		= uid_procstat_open,
425 	.release	= single_release,
426 	.write		= uid_procstat_write,
427 };
428 
process_notifier(struct notifier_block * self,unsigned long cmd,void * v)429 static int process_notifier(struct notifier_block *self,
430 			unsigned long cmd, void *v)
431 {
432 	struct task_struct *task = v;
433 	struct uid_entry *uid_entry;
434 	cputime_t utime, stime;
435 	uid_t uid;
436 
437 	if (!task)
438 		return NOTIFY_OK;
439 
440 	rt_mutex_lock(&uid_lock);
441 	uid = from_kuid_munged(current_user_ns(), task_uid(task));
442 	uid_entry = find_or_register_uid(uid);
443 	if (!uid_entry) {
444 		pr_err("%s: failed to find uid %d\n", __func__, uid);
445 		goto exit;
446 	}
447 
448 	task_cputime_adjusted(task, &utime, &stime);
449 	uid_entry->utime += utime;
450 	uid_entry->stime += stime;
451 	uid_entry->power += task->cpu_power;
452 	task->cpu_power = ULLONG_MAX;
453 
454 	add_uid_io_stats(uid_entry, task, UID_STATE_DEAD_TASKS);
455 
456 exit:
457 	rt_mutex_unlock(&uid_lock);
458 	return NOTIFY_OK;
459 }
460 
461 static struct notifier_block process_notifier_block = {
462 	.notifier_call	= process_notifier,
463 };
464 
proc_uid_sys_stats_init(void)465 static int __init proc_uid_sys_stats_init(void)
466 {
467 	hash_init(hash_table);
468 
469 	cpu_parent = proc_mkdir("uid_cputime", NULL);
470 	if (!cpu_parent) {
471 		pr_err("%s: failed to create uid_cputime proc entry\n",
472 			__func__);
473 		goto err;
474 	}
475 
476 	proc_create_data("remove_uid_range", 0222, cpu_parent,
477 		&uid_remove_fops, NULL);
478 	proc_create_data("show_uid_stat", 0444, cpu_parent,
479 		&uid_cputime_fops, NULL);
480 
481 	io_parent = proc_mkdir("uid_io", NULL);
482 	if (!io_parent) {
483 		pr_err("%s: failed to create uid_io proc entry\n",
484 			__func__);
485 		goto err;
486 	}
487 
488 	proc_create_data("stats", 0444, io_parent,
489 		&uid_io_fops, NULL);
490 
491 	proc_parent = proc_mkdir("uid_procstat", NULL);
492 	if (!proc_parent) {
493 		pr_err("%s: failed to create uid_procstat proc entry\n",
494 			__func__);
495 		goto err;
496 	}
497 
498 	proc_create_data("set", 0222, proc_parent,
499 		&uid_procstat_fops, NULL);
500 
501 	profile_event_register(PROFILE_TASK_EXIT, &process_notifier_block);
502 
503 	return 0;
504 
505 err:
506 	remove_proc_subtree("uid_cputime", NULL);
507 	remove_proc_subtree("uid_io", NULL);
508 	remove_proc_subtree("uid_procstat", NULL);
509 	return -ENOMEM;
510 }
511 
512 early_initcall(proc_uid_sys_stats_init);
513