• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (c) 2023 Huawei Device Co., Ltd.
4  */
5 #include "ucollection_process_cpu.h"
6 
7 #include <asm/div64.h>
8 #ifdef CONFIG_CPU_FREQ_TIMES
9 #include <linux/cpufreq_times.h>
10 #endif // CONFIG_CPU_FREQ_TIMES
11 #include <linux/sched/stat.h>
12 #include <linux/version.h>
13 #include <linux/uaccess.h>
14 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
15 #include <linux/sched.h>
16 #include <linux/sched/cputime.h>
17 #include <linux/sched/signal.h>
18 #endif // LINUX_VERSION_CODE
19 #ifdef CONFIG_SMT_MODE_GOV
20 #include <platform_include/cee/linux/time_in_state.h>
21 #endif // CONFIG_SMT_MODE_GOV
22 
23 #include "unified_collection_data.h"
24 
25 #define NS_TO_MS 1000000
26 static char dmips_values[DMIPS_NUM];
27 
get_proc_cpu_load(struct task_struct * task,char dmips[],unsigned int dmips_num)28 unsigned long long __attribute__((weak)) get_proc_cpu_load(struct task_struct *task, char dmips[],
29 	unsigned int dmips_num)
30 {
31 	return 0;
32 }
33 
get_process_flt(struct task_struct * task,struct ucollection_process_cpu_item * proc_cpu_entry)34 static void get_process_flt(struct task_struct *task, struct ucollection_process_cpu_item* proc_cpu_entry)
35 {
36 	unsigned long tmp_min_flt = 0;
37 	unsigned long tmp_maj_flt = 0;
38 
39 	struct task_struct *t = task;
40 	signed int thread_count = 0;
41 	do {
42 		tmp_min_flt += t->min_flt;
43 		tmp_maj_flt += t->maj_flt;
44 		++thread_count;
45 	} while_each_thread(task, t);
46 
47 	struct signal_struct *sig = task->signal;
48 	if (sig != NULL) {
49 		tmp_min_flt += sig->min_flt;
50 		tmp_maj_flt += sig->maj_flt;
51 	}
52 
53 	proc_cpu_entry->min_flt = tmp_min_flt;
54 	proc_cpu_entry->maj_flt = tmp_maj_flt;
55 	proc_cpu_entry->thread_total = thread_count;
56 }
57 
get_process_load_cputime(struct task_struct * task)58 static unsigned long long get_process_load_cputime(struct task_struct *task)
59 {
60 	unsigned long long proc_load_cputime = 0;
61 	proc_load_cputime = get_proc_cpu_load(task, dmips_values, DMIPS_NUM);
62 	return proc_load_cputime;
63 }
64 
get_process_usage_cputime(struct task_struct * task,unsigned long long * ut,unsigned long long * st)65 static void get_process_usage_cputime(struct task_struct *task, unsigned long long *ut, unsigned long long *st)
66 {
67 	unsigned long long utime, stime;
68 
69 	thread_group_cputime_adjusted(task, &utime, &stime);
70 	do_div(utime, NS_TO_MS);
71 	do_div(stime, NS_TO_MS);
72 	*ut = utime;
73 	*st = stime;
74 }
75 
get_process_load(struct task_struct * task,int cur_count,struct ucollection_process_cpu_entry __user * entry)76 static void get_process_load(struct task_struct *task, int cur_count,
77 	struct ucollection_process_cpu_entry __user *entry)
78 {
79 	struct ucollection_process_cpu_item proc_cpu_entry;
80 	memset(&proc_cpu_entry, 0, sizeof(struct ucollection_process_cpu_item));
81 	proc_cpu_entry.pid = task->pid;
82 	get_process_flt(task, &proc_cpu_entry);
83 	proc_cpu_entry.cpu_load_time = get_process_load_cputime(task);
84 	get_process_usage_cputime(task, &proc_cpu_entry.cpu_usage_utime, &proc_cpu_entry.cpu_usage_stime);
85 	(void)copy_to_user(&entry->datas[cur_count], &proc_cpu_entry, sizeof(struct ucollection_process_cpu_item));
86 }
87 
get_thread_load(struct task_struct * task,int cur_count,struct ucollection_thread_cpu_entry __user * entry)88 static void get_thread_load(struct task_struct *task, int cur_count,
89 	struct ucollection_thread_cpu_entry __user *entry)
90 {
91 	struct ucollection_thread_cpu_item thread_cpu_item;
92 	memset(&thread_cpu_item, 0, sizeof(struct ucollection_thread_cpu_item));
93 	unsigned long long utime, stime;
94 	utime = task->utime;
95 	stime = task->stime;
96 	do_div(utime, NS_TO_MS);
97 	do_div(stime, NS_TO_MS);
98 	thread_cpu_item.tid = task->pid;
99 	thread_cpu_item.cpu_usage_utime = utime;
100 	thread_cpu_item.cpu_usage_stime = stime;
101 	thread_cpu_item.cpu_load_time = 0;
102 	(void)copy_to_user(&entry->datas[cur_count], &thread_cpu_item, sizeof(struct ucollection_thread_cpu_item));
103 }
104 
ioctrl_collect_process_cpu(void __user * argp)105 static long ioctrl_collect_process_cpu(void __user *argp)
106 {
107 	struct task_struct *task = NULL;
108 	struct ucollection_process_cpu_entry kentry;
109 	struct ucollection_process_cpu_entry __user *entry = argp;
110 	if (entry == NULL) {
111 		pr_err("cpu entry is null");
112 		return -EINVAL;
113 	}
114 
115 	memset(&kentry, 0, sizeof(struct ucollection_process_cpu_entry));
116 	(void)copy_from_user(&kentry, entry, sizeof(struct ucollection_process_cpu_entry));
117 
118 	rcu_read_lock();
119 	task = &init_task;
120 	for_each_process(task) {
121 		if (task->pid != task->tgid)
122 			continue;
123 
124 		if (kentry.cur_count >= kentry.total_count) {
125 			pr_err("process over total count");
126 			break;
127 		}
128 
129 		get_process_load(task, kentry.cur_count, entry);
130 		kentry.cur_count++;
131 	}
132 	put_user(kentry.cur_count, &entry->cur_count);
133 	rcu_read_unlock();
134 	return 0;
135 }
136 
get_alive_task_by_pid(unsigned int pid)137 static struct task_struct* get_alive_task_by_pid(unsigned int pid)
138 {
139 	struct task_struct *task = NULL;
140 	task = find_task_by_pid_ns(pid, &init_pid_ns);
141 	if (task == NULL || !pid_alive(task)) {
142 		return NULL;
143 	}
144 	return task;
145 }
146 
ioctrl_collect_process_count(void __user * argp)147 static long ioctrl_collect_process_count(void __user *argp)
148 {
149 	struct task_struct *task = NULL;
150 	unsigned int process_count = 0;
151 	unsigned int __user *count = argp;
152 	rcu_read_lock();
153 	task = &init_task;
154 	for_each_process(task) {
155 		if (task->pid != task->tgid) {
156 			continue;
157 		}
158 		++process_count;
159 	}
160 	rcu_read_unlock();
161 	put_user(process_count, count);
162 	return 0;
163 }
164 
read_thread_count_locked(struct ucollection_process_thread_count * kcount,struct ucollection_process_thread_count __user * count)165 static long read_thread_count_locked(struct ucollection_process_thread_count *kcount,
166 	struct ucollection_process_thread_count __user *count)
167 {
168 	rcu_read_lock();
169 	struct task_struct *task = get_alive_task_by_pid(kcount->pid);
170 	if (task == NULL) {
171 		pr_info("pid=%d is task NULL or not alive", kcount->pid);
172 		rcu_read_unlock();
173 		return -EINVAL;
174 	}
175 	unsigned int thread_count = 0;
176 	struct task_struct *t = task;
177 	do {
178 		thread_count++;
179 	} while_each_thread(task, t);
180 	put_user(thread_count, &count->thread_count);
181 	rcu_read_unlock();
182 	return 0;
183 }
184 
ioctrl_collect_thread_count(void __user * argp)185 static long ioctrl_collect_thread_count(void __user *argp)
186 {
187 	struct ucollection_process_thread_count kcount;
188 	struct ucollection_process_thread_count __user *count = argp;
189 	if (count == NULL) {
190 		pr_err("cpu entry is null");
191 		return -EINVAL;
192 	}
193 	memset(&kcount, 0, sizeof(struct ucollection_process_thread_count));
194 	(void)copy_from_user(&kcount, count, sizeof(struct ucollection_process_thread_count));
195 	return read_thread_count_locked(&kcount, count);
196 }
197 
ioctrl_collect_app_thread_count(void __user * argp)198 static long ioctrl_collect_app_thread_count(void __user *argp)
199 {
200 	struct ucollection_process_thread_count kcount;
201 	struct ucollection_process_thread_count __user *count = argp;
202 	if (count == NULL) {
203 		pr_err("cpu entry is null");
204 		return -EINVAL;
205 	}
206 	memset(&kcount, 0, sizeof(struct ucollection_process_thread_count));
207 	(void)copy_from_user(&kcount, count, sizeof(struct ucollection_process_thread_count));
208 	if (current->tgid != kcount.pid) {
209 		pr_err("pid=%d is not self current tgid:%d", kcount.pid, current->tgid);
210 		return -EINVAL;
211 	}
212 	return read_thread_count_locked(&kcount, count);
213 }
214 
read_thread_info_locked(struct ucollection_thread_cpu_entry * kentry,struct ucollection_thread_cpu_entry __user * entry)215 static long read_thread_info_locked(struct ucollection_thread_cpu_entry *kentry,
216 	struct ucollection_thread_cpu_entry __user *entry)
217 {
218 	rcu_read_lock();
219 	struct task_struct *task = get_alive_task_by_pid(kentry->filter.pid);
220 	if (task == NULL) {
221 		pr_info("pid=%d is task NULL not alive", kentry->filter.pid);
222 		rcu_read_unlock();
223 		return -EINVAL;
224 	}
225 	unsigned int thread_count = 0;
226 	struct task_struct *t = task;
227 	do {
228 		if (thread_count >= kentry->total_count) {
229 			pr_err("thread over total count");
230 			break;
231 		}
232 		get_thread_load(t, thread_count, entry);
233 		thread_count++;
234 	} while_each_thread(task, t);
235 	put_user(thread_count, &entry->cur_count);
236 	rcu_read_unlock();
237 	return 0;
238 }
239 
ioctrl_collect_app_thread_cpu(void __user * argp)240 static long ioctrl_collect_app_thread_cpu(void __user *argp)
241 {
242 	struct ucollection_thread_cpu_entry kentry;
243 	struct ucollection_thread_cpu_entry __user *entry = argp;
244 	if (entry == NULL) {
245 		pr_err("cpu entry is null");
246 		return -EINVAL;
247 	}
248 	memset(&kentry, 0, sizeof(struct ucollection_thread_cpu_entry));
249 	(void)copy_from_user(&kentry, entry, sizeof(struct ucollection_thread_cpu_entry));
250 	if (current->tgid != kentry.filter.pid || kentry.cur_count >= kentry.total_count) {
251 		pr_err("pid=%d is not self current tgid:%d , or current count over total count"
252 			, kentry.filter.pid, current->tgid);
253 		return -EINVAL;
254 	}
255 	return read_thread_info_locked(&kentry, entry);
256 }
257 
ioctrl_collect_the_thread_cpu(void __user * argp)258 static long ioctrl_collect_the_thread_cpu(void __user *argp)
259 {
260 	struct ucollection_thread_cpu_entry kentry;
261 	struct ucollection_thread_cpu_entry __user *entry = argp;
262 	if (entry == NULL) {
263 		pr_err("cpu entry is null");
264 		return -EINVAL;
265 	}
266 	memset(&kentry, 0, sizeof(struct ucollection_thread_cpu_entry));
267 	(void)copy_from_user(&kentry, entry, sizeof(struct ucollection_thread_cpu_entry));
268 	if (kentry.cur_count >= kentry.total_count) {
269 		pr_err("pid=%d is not self current:%d , or current count over total count"
270 			, kentry.filter.pid, current->pid);
271 		return -EINVAL;
272 	}
273 	return read_thread_info_locked(&kentry, entry);
274 }
275 
ioctrl_collect_the_process_cpu(void __user * argp)276 static long ioctrl_collect_the_process_cpu(void __user *argp)
277 {
278 	struct ucollection_process_cpu_entry kentry;
279 	struct ucollection_process_cpu_entry __user *entry = argp;
280 	if (entry == NULL) {
281 		pr_err("cpu entry is null");
282 		return -EINVAL;
283 	}
284 
285 	memset(&kentry, 0, sizeof(struct ucollection_process_cpu_entry));
286 	(void)copy_from_user(&kentry, entry, sizeof(struct ucollection_process_cpu_entry));
287 
288 	if (kentry.cur_count >= kentry.total_count) {
289 		pr_err("current count over total count");
290 		return -EINVAL;
291 	}
292 
293 	rcu_read_lock();
294 	struct task_struct *task = get_alive_task_by_pid(kentry.filter.pid);
295 	if (task == NULL) {
296 		pr_info("pid=%d is task null or not alive", kentry.filter.pid);
297 		rcu_read_unlock();
298 		return -EINVAL;
299 	}
300 
301 	get_process_load(task, kentry.cur_count, entry);
302 	kentry.cur_count++;
303 	put_user(kentry.cur_count, &entry->cur_count);
304 	rcu_read_unlock();
305 	return 0;
306 }
307 
unified_collection_collect_process_cpu(unsigned int cmd,void __user * argp)308 long unified_collection_collect_process_cpu(unsigned int cmd, void __user *argp)
309 {
310 	long ret = 0;
311 	switch(cmd) {
312 	case IOCTRL_COLLECT_ALL_PROC_CPU:
313 		ret = ioctrl_collect_process_cpu(argp);
314 		break;
315 	case IOCTRL_COLLECT_THE_PROC_CPU:
316 		ret = ioctrl_collect_the_process_cpu(argp);
317 		break;
318 	case IOCTRL_COLLECT_THREAD_COUNT:
319 		ret = ioctrl_collect_thread_count(argp);
320 		break;
321 	case IOCTRL_COLLECT_APP_THREAD_COUNT:
322 		ret = ioctrl_collect_app_thread_count(argp);
323 		break;
324 	case IOCTRL_COLLECT_APP_THREAD:
325 		ret = ioctrl_collect_app_thread_cpu(argp);
326 		break;
327 	case IOCTRL_COLLECT_THE_THREAD:
328 		ret = ioctrl_collect_the_thread_cpu(argp);
329 		break;
330 	case IOCTRL_COLLECT_PROC_COUNT:
331 		ret = ioctrl_collect_process_count(argp);
332 		break;
333 	default:
334 		pr_err("handle ioctrl cmd %u, _IOC_TYPE(cmd)=%d", cmd, _IOC_TYPE(cmd));
335 		ret = 0;
336 	}
337 	return ret;
338 }