• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Simple CPU accounting cgroup controller
4  */
5 #include <linux/cpufreq_times.h>
6 #include <trace/hooks/sched.h>
7 
8 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
9 
10 /*
11  * There are no locks covering percpu hardirq/softirq time.
12  * They are only modified in vtime_account, on corresponding CPU
13  * with interrupts disabled. So, writes are safe.
14  * They are read and saved off onto struct rq in update_rq_clock().
15  * This may result in other CPU reading this CPU's irq time and can
16  * race with irq/vtime_account on this CPU. We would either get old
17  * or new value with a side effect of accounting a slice of irq time to wrong
18  * task when irq is in progress while we read rq->clock. That is a worthy
19  * compromise in place of having locks on each irq in account_system_time.
20  */
21 DEFINE_PER_CPU(struct irqtime, cpu_irqtime);
22 EXPORT_PER_CPU_SYMBOL_GPL(cpu_irqtime);
23 
24 static int sched_clock_irqtime;
25 
enable_sched_clock_irqtime(void)26 void enable_sched_clock_irqtime(void)
27 {
28 	sched_clock_irqtime = 1;
29 }
30 
disable_sched_clock_irqtime(void)31 void disable_sched_clock_irqtime(void)
32 {
33 	sched_clock_irqtime = 0;
34 }
35 
irqtime_account_delta(struct irqtime * irqtime,u64 delta,enum cpu_usage_stat idx)36 static void irqtime_account_delta(struct irqtime *irqtime, u64 delta,
37 				  enum cpu_usage_stat idx)
38 {
39 	u64 *cpustat = kcpustat_this_cpu->cpustat;
40 
41 	u64_stats_update_begin(&irqtime->sync);
42 	cpustat[idx] += delta;
43 	irqtime->total += delta;
44 	irqtime->tick_delta += delta;
45 	u64_stats_update_end(&irqtime->sync);
46 }
47 
48 /*
49  * Called after incrementing preempt_count on {soft,}irq_enter
50  * and before decrementing preempt_count on {soft,}irq_exit.
51  */
irqtime_account_irq(struct task_struct * curr,unsigned int offset)52 void irqtime_account_irq(struct task_struct *curr, unsigned int offset)
53 {
54 	struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
55 	unsigned int pc;
56 	s64 delta;
57 	int cpu;
58 	bool irq_start = true;
59 
60 	if (!sched_clock_irqtime)
61 		return;
62 
63 	cpu = smp_processor_id();
64 	delta = sched_clock_cpu(cpu) - irqtime->irq_start_time;
65 	irqtime->irq_start_time += delta;
66 	pc = irq_count() - offset;
67 
68 	/*
69 	 * We do not account for softirq time from ksoftirqd here.
70 	 * We want to continue accounting softirq time to ksoftirqd thread
71 	 * in that case, so as not to confuse scheduler with a special task
72 	 * that do not consume any time, but still wants to run.
73 	 */
74 	if (pc & HARDIRQ_MASK) {
75 		irqtime_account_delta(irqtime, delta, CPUTIME_IRQ);
76 		irq_start = false;
77 	} else if ((pc & SOFTIRQ_OFFSET) && curr != this_cpu_ksoftirqd()) {
78 		irqtime_account_delta(irqtime, delta, CPUTIME_SOFTIRQ);
79 		irq_start = false;
80 	}
81 
82 	trace_android_rvh_account_irq(curr, cpu, delta, irq_start);
83 }
84 
irqtime_tick_accounted(u64 maxtime)85 static u64 irqtime_tick_accounted(u64 maxtime)
86 {
87 	struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
88 	u64 delta;
89 
90 	delta = min(irqtime->tick_delta, maxtime);
91 	irqtime->tick_delta -= delta;
92 
93 	return delta;
94 }
95 
96 #else /* CONFIG_IRQ_TIME_ACCOUNTING */
97 
98 #define sched_clock_irqtime	(0)
99 
irqtime_tick_accounted(u64 dummy)100 static u64 irqtime_tick_accounted(u64 dummy)
101 {
102 	return 0;
103 }
104 
105 #endif /* !CONFIG_IRQ_TIME_ACCOUNTING */
106 
task_group_account_field(struct task_struct * p,int index,u64 tmp)107 static inline void task_group_account_field(struct task_struct *p, int index,
108 					    u64 tmp)
109 {
110 	/*
111 	 * Since all updates are sure to touch the root cgroup, we
112 	 * get ourselves ahead and touch it first. If the root cgroup
113 	 * is the only cgroup, then nothing else should be necessary.
114 	 *
115 	 */
116 	__this_cpu_add(kernel_cpustat.cpustat[index], tmp);
117 
118 	cgroup_account_cputime_field(p, index, tmp);
119 }
120 
121 /*
122  * Account user CPU time to a process.
123  * @p: the process that the CPU time gets accounted to
124  * @cputime: the CPU time spent in user space since the last update
125  */
account_user_time(struct task_struct * p,u64 cputime)126 void account_user_time(struct task_struct *p, u64 cputime)
127 {
128 	int index;
129 
130 	/* Add user time to process. */
131 	p->utime += cputime;
132 	account_group_user_time(p, cputime);
133 
134 	index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
135 
136 	/* Add user time to cpustat. */
137 	task_group_account_field(p, index, cputime);
138 
139 	/* Account for user time used */
140 	acct_account_cputime(p);
141 
142 	/* Account power usage for user time */
143 	cpufreq_acct_update_power(p, cputime);
144 }
145 
146 /*
147  * Account guest CPU time to a process.
148  * @p: the process that the CPU time gets accounted to
149  * @cputime: the CPU time spent in virtual machine since the last update
150  */
account_guest_time(struct task_struct * p,u64 cputime)151 void account_guest_time(struct task_struct *p, u64 cputime)
152 {
153 	u64 *cpustat = kcpustat_this_cpu->cpustat;
154 
155 	/* Add guest time to process. */
156 	p->utime += cputime;
157 	account_group_user_time(p, cputime);
158 	p->gtime += cputime;
159 
160 	/* Add guest time to cpustat. */
161 	if (task_nice(p) > 0) {
162 		task_group_account_field(p, CPUTIME_NICE, cputime);
163 		cpustat[CPUTIME_GUEST_NICE] += cputime;
164 	} else {
165 		task_group_account_field(p, CPUTIME_USER, cputime);
166 		cpustat[CPUTIME_GUEST] += cputime;
167 	}
168 }
169 
170 /*
171  * Account system CPU time to a process and desired cpustat field
172  * @p: the process that the CPU time gets accounted to
173  * @cputime: the CPU time spent in kernel space since the last update
174  * @index: pointer to cpustat field that has to be updated
175  */
account_system_index_time(struct task_struct * p,u64 cputime,enum cpu_usage_stat index)176 void account_system_index_time(struct task_struct *p,
177 			       u64 cputime, enum cpu_usage_stat index)
178 {
179 	/* Add system time to process. */
180 	p->stime += cputime;
181 	account_group_system_time(p, cputime);
182 
183 	/* Add system time to cpustat. */
184 	task_group_account_field(p, index, cputime);
185 
186 	/* Account for system time used */
187 	acct_account_cputime(p);
188 
189 	/* Account power usage for system time */
190 	cpufreq_acct_update_power(p, cputime);
191 }
192 
193 /*
194  * Account system CPU time to a process.
195  * @p: the process that the CPU time gets accounted to
196  * @hardirq_offset: the offset to subtract from hardirq_count()
197  * @cputime: the CPU time spent in kernel space since the last update
198  */
account_system_time(struct task_struct * p,int hardirq_offset,u64 cputime)199 void account_system_time(struct task_struct *p, int hardirq_offset, u64 cputime)
200 {
201 	int index;
202 
203 	if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
204 		account_guest_time(p, cputime);
205 		return;
206 	}
207 
208 	if (hardirq_count() - hardirq_offset)
209 		index = CPUTIME_IRQ;
210 	else if (in_serving_softirq())
211 		index = CPUTIME_SOFTIRQ;
212 	else
213 		index = CPUTIME_SYSTEM;
214 
215 	account_system_index_time(p, cputime, index);
216 }
217 
218 /*
219  * Account for involuntary wait time.
220  * @cputime: the CPU time spent in involuntary wait
221  */
account_steal_time(u64 cputime)222 void account_steal_time(u64 cputime)
223 {
224 	u64 *cpustat = kcpustat_this_cpu->cpustat;
225 
226 	cpustat[CPUTIME_STEAL] += cputime;
227 }
228 
229 /*
230  * Account for idle time.
231  * @cputime: the CPU time spent in idle wait
232  */
account_idle_time(u64 cputime)233 void account_idle_time(u64 cputime)
234 {
235 	u64 *cpustat = kcpustat_this_cpu->cpustat;
236 	struct rq *rq = this_rq();
237 
238 	if (atomic_read(&rq->nr_iowait) > 0)
239 		cpustat[CPUTIME_IOWAIT] += cputime;
240 	else
241 		cpustat[CPUTIME_IDLE] += cputime;
242 }
243 
244 
245 #ifdef CONFIG_SCHED_CORE
246 /*
247  * Account for forceidle time due to core scheduling.
248  *
249  * REQUIRES: schedstat is enabled.
250  */
__account_forceidle_time(struct task_struct * p,u64 delta)251 void __account_forceidle_time(struct task_struct *p, u64 delta)
252 {
253 	__schedstat_add(p->stats.core_forceidle_sum, delta);
254 
255 	task_group_account_field(p, CPUTIME_FORCEIDLE, delta);
256 }
257 #endif
258 
259 /*
260  * When a guest is interrupted for a longer amount of time, missed clock
261  * ticks are not redelivered later. Due to that, this function may on
262  * occasion account more time than the calling functions think elapsed.
263  */
steal_account_process_time(u64 maxtime)264 static __always_inline u64 steal_account_process_time(u64 maxtime)
265 {
266 #ifdef CONFIG_PARAVIRT
267 	if (static_key_false(&paravirt_steal_enabled)) {
268 		u64 steal;
269 
270 		steal = paravirt_steal_clock(smp_processor_id());
271 		steal -= this_rq()->prev_steal_time;
272 		steal = min(steal, maxtime);
273 		account_steal_time(steal);
274 		this_rq()->prev_steal_time += steal;
275 
276 		return steal;
277 	}
278 #endif
279 	return 0;
280 }
281 
282 /*
283  * Account how much elapsed time was spent in steal, irq, or softirq time.
284  */
account_other_time(u64 max)285 static inline u64 account_other_time(u64 max)
286 {
287 	u64 accounted;
288 
289 	lockdep_assert_irqs_disabled();
290 
291 	accounted = steal_account_process_time(max);
292 
293 	if (accounted < max)
294 		accounted += irqtime_tick_accounted(max - accounted);
295 
296 	return accounted;
297 }
298 
299 #ifdef CONFIG_64BIT
read_sum_exec_runtime(struct task_struct * t)300 static inline u64 read_sum_exec_runtime(struct task_struct *t)
301 {
302 	return t->se.sum_exec_runtime;
303 }
304 #else
read_sum_exec_runtime(struct task_struct * t)305 static u64 read_sum_exec_runtime(struct task_struct *t)
306 {
307 	u64 ns;
308 	struct rq_flags rf;
309 	struct rq *rq;
310 
311 	rq = task_rq_lock(t, &rf);
312 	ns = t->se.sum_exec_runtime;
313 	task_rq_unlock(rq, t, &rf);
314 
315 	return ns;
316 }
317 #endif
318 
319 /*
320  * Accumulate raw cputime values of dead tasks (sig->[us]time) and live
321  * tasks (sum on group iteration) belonging to @tsk's group.
322  */
thread_group_cputime(struct task_struct * tsk,struct task_cputime * times)323 void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
324 {
325 	struct signal_struct *sig = tsk->signal;
326 	u64 utime, stime;
327 	struct task_struct *t;
328 	unsigned int seq, nextseq;
329 	unsigned long flags;
330 
331 	/*
332 	 * Update current task runtime to account pending time since last
333 	 * scheduler action or thread_group_cputime() call. This thread group
334 	 * might have other running tasks on different CPUs, but updating
335 	 * their runtime can affect syscall performance, so we skip account
336 	 * those pending times and rely only on values updated on tick or
337 	 * other scheduler action.
338 	 */
339 	if (same_thread_group(current, tsk))
340 		(void) task_sched_runtime(current);
341 
342 	rcu_read_lock();
343 	/* Attempt a lockless read on the first round. */
344 	nextseq = 0;
345 	do {
346 		seq = nextseq;
347 		flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq);
348 		times->utime = sig->utime;
349 		times->stime = sig->stime;
350 		times->sum_exec_runtime = sig->sum_sched_runtime;
351 
352 		for_each_thread(tsk, t) {
353 			task_cputime(t, &utime, &stime);
354 			times->utime += utime;
355 			times->stime += stime;
356 			times->sum_exec_runtime += read_sum_exec_runtime(t);
357 		}
358 		/* If lockless access failed, take the lock. */
359 		nextseq = 1;
360 	} while (need_seqretry(&sig->stats_lock, seq));
361 	done_seqretry_irqrestore(&sig->stats_lock, seq, flags);
362 	rcu_read_unlock();
363 }
364 
365 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
366 /*
367  * Account a tick to a process and cpustat
368  * @p: the process that the CPU time gets accounted to
369  * @user_tick: is the tick from userspace
370  * @rq: the pointer to rq
371  *
372  * Tick demultiplexing follows the order
373  * - pending hardirq update
374  * - pending softirq update
375  * - user_time
376  * - idle_time
377  * - system time
378  *   - check for guest_time
379  *   - else account as system_time
380  *
381  * Check for hardirq is done both for system and user time as there is
382  * no timer going off while we are on hardirq and hence we may never get an
383  * opportunity to update it solely in system time.
384  * p->stime and friends are only updated on system time and not on irq
385  * softirq as those do not count in task exec_runtime any more.
386  */
irqtime_account_process_tick(struct task_struct * p,int user_tick,int ticks)387 static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
388 					 int ticks)
389 {
390 	u64 other, cputime = TICK_NSEC * ticks;
391 
392 	/*
393 	 * When returning from idle, many ticks can get accounted at
394 	 * once, including some ticks of steal, irq, and softirq time.
395 	 * Subtract those ticks from the amount of time accounted to
396 	 * idle, or potentially user or system time. Due to rounding,
397 	 * other time can exceed ticks occasionally.
398 	 */
399 	other = account_other_time(ULONG_MAX);
400 	if (other >= cputime)
401 		return;
402 
403 	cputime -= other;
404 
405 	if (this_cpu_ksoftirqd() == p) {
406 		/*
407 		 * ksoftirqd time do not get accounted in cpu_softirq_time.
408 		 * So, we have to handle it separately here.
409 		 * Also, p->stime needs to be updated for ksoftirqd.
410 		 */
411 		account_system_index_time(p, cputime, CPUTIME_SOFTIRQ);
412 	} else if (user_tick) {
413 		account_user_time(p, cputime);
414 	} else if (p == this_rq()->idle) {
415 		account_idle_time(cputime);
416 	} else if (p->flags & PF_VCPU) { /* System time or guest time */
417 		account_guest_time(p, cputime);
418 	} else {
419 		account_system_index_time(p, cputime, CPUTIME_SYSTEM);
420 	}
421 	trace_android_vh_irqtime_account_process_tick(p, this_rq(), user_tick, ticks);
422 }
423 
irqtime_account_idle_ticks(int ticks)424 static void irqtime_account_idle_ticks(int ticks)
425 {
426 	irqtime_account_process_tick(current, 0, ticks);
427 }
428 #else /* CONFIG_IRQ_TIME_ACCOUNTING */
irqtime_account_idle_ticks(int ticks)429 static inline void irqtime_account_idle_ticks(int ticks) { }
irqtime_account_process_tick(struct task_struct * p,int user_tick,int nr_ticks)430 static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick,
431 						int nr_ticks) { }
432 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
433 
434 /*
435  * Use precise platform statistics if available:
436  */
437 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
438 
439 # ifndef __ARCH_HAS_VTIME_TASK_SWITCH
vtime_task_switch(struct task_struct * prev)440 void vtime_task_switch(struct task_struct *prev)
441 {
442 	if (is_idle_task(prev))
443 		vtime_account_idle(prev);
444 	else
445 		vtime_account_kernel(prev);
446 
447 	vtime_flush(prev);
448 	arch_vtime_task_switch(prev);
449 }
450 # endif
451 
vtime_account_irq(struct task_struct * tsk,unsigned int offset)452 void vtime_account_irq(struct task_struct *tsk, unsigned int offset)
453 {
454 	unsigned int pc = irq_count() - offset;
455 
456 	if (pc & HARDIRQ_OFFSET) {
457 		vtime_account_hardirq(tsk);
458 	} else if (pc & SOFTIRQ_OFFSET) {
459 		vtime_account_softirq(tsk);
460 	} else if (!IS_ENABLED(CONFIG_HAVE_VIRT_CPU_ACCOUNTING_IDLE) &&
461 		   is_idle_task(tsk)) {
462 		vtime_account_idle(tsk);
463 	} else {
464 		vtime_account_kernel(tsk);
465 	}
466 }
467 
cputime_adjust(struct task_cputime * curr,struct prev_cputime * prev,u64 * ut,u64 * st)468 void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
469 		    u64 *ut, u64 *st)
470 {
471 	*ut = curr->utime;
472 	*st = curr->stime;
473 }
474 
task_cputime_adjusted(struct task_struct * p,u64 * ut,u64 * st)475 void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
476 {
477 	*ut = p->utime;
478 	*st = p->stime;
479 }
480 EXPORT_SYMBOL_GPL(task_cputime_adjusted);
481 
thread_group_cputime_adjusted(struct task_struct * p,u64 * ut,u64 * st)482 void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
483 {
484 	struct task_cputime cputime;
485 
486 	thread_group_cputime(p, &cputime);
487 
488 	*ut = cputime.utime;
489 	*st = cputime.stime;
490 }
491 EXPORT_SYMBOL_GPL(thread_group_cputime_adjusted);
492 
493 #else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE: */
494 
495 /*
496  * Account a single tick or a few ticks of CPU time.
497  * @p: the process that the CPU time gets accounted to
498  * @user_tick: indicates if the tick is a user or a system tick
499  */
account_process_tick(struct task_struct * p,int user_tick)500 void account_process_tick(struct task_struct *p, int user_tick)
501 {
502 	u64 cputime, steal;
503 	int ticks = 1;
504 
505 	trace_android_vh_account_process_tick_gran(user_tick, &ticks);
506 	if (!ticks)
507 		return;
508 
509 	if (vtime_accounting_enabled_this_cpu())
510 		return;
511 	trace_android_vh_account_task_time(p, this_rq(), user_tick, ticks);
512 
513 	if (sched_clock_irqtime) {
514 		irqtime_account_process_tick(p, user_tick, ticks);
515 		return;
516 	}
517 
518 	cputime = TICK_NSEC * ticks;
519 	steal = steal_account_process_time(ULONG_MAX);
520 
521 	if (steal >= cputime)
522 		return;
523 
524 	cputime -= steal;
525 
526 	if (user_tick)
527 		account_user_time(p, cputime);
528 	else if ((p != this_rq()->idle) || (irq_count() != HARDIRQ_OFFSET))
529 		account_system_time(p, HARDIRQ_OFFSET, cputime);
530 	else
531 		account_idle_time(cputime);
532 }
533 
534 /*
535  * Account multiple ticks of idle time.
536  * @ticks: number of stolen ticks
537  */
account_idle_ticks(unsigned long ticks)538 void account_idle_ticks(unsigned long ticks)
539 {
540 	u64 cputime, steal;
541 
542 	if (sched_clock_irqtime) {
543 		irqtime_account_idle_ticks(ticks);
544 		return;
545 	}
546 
547 	cputime = ticks * TICK_NSEC;
548 	steal = steal_account_process_time(ULONG_MAX);
549 
550 	if (steal >= cputime)
551 		return;
552 
553 	cputime -= steal;
554 	account_idle_time(cputime);
555 }
556 
557 /*
558  * Adjust tick based cputime random precision against scheduler runtime
559  * accounting.
560  *
561  * Tick based cputime accounting depend on random scheduling timeslices of a
562  * task to be interrupted or not by the timer.  Depending on these
563  * circumstances, the number of these interrupts may be over or
564  * under-optimistic, matching the real user and system cputime with a variable
565  * precision.
566  *
567  * Fix this by scaling these tick based values against the total runtime
568  * accounted by the CFS scheduler.
569  *
570  * This code provides the following guarantees:
571  *
572  *   stime + utime == rtime
573  *   stime_i+1 >= stime_i, utime_i+1 >= utime_i
574  *
575  * Assuming that rtime_i+1 >= rtime_i.
576  */
cputime_adjust(struct task_cputime * curr,struct prev_cputime * prev,u64 * ut,u64 * st)577 void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
578 		    u64 *ut, u64 *st)
579 {
580 	u64 rtime, stime, utime;
581 	unsigned long flags;
582 
583 	/* Serialize concurrent callers such that we can honour our guarantees */
584 	raw_spin_lock_irqsave(&prev->lock, flags);
585 	rtime = curr->sum_exec_runtime;
586 
587 	/*
588 	 * This is possible under two circumstances:
589 	 *  - rtime isn't monotonic after all (a bug);
590 	 *  - we got reordered by the lock.
591 	 *
592 	 * In both cases this acts as a filter such that the rest of the code
593 	 * can assume it is monotonic regardless of anything else.
594 	 */
595 	if (prev->stime + prev->utime >= rtime)
596 		goto out;
597 
598 	stime = curr->stime;
599 	utime = curr->utime;
600 
601 	/*
602 	 * If either stime or utime are 0, assume all runtime is userspace.
603 	 * Once a task gets some ticks, the monotonicity code at 'update:'
604 	 * will ensure things converge to the observed ratio.
605 	 */
606 	if (stime == 0) {
607 		utime = rtime;
608 		goto update;
609 	}
610 
611 	if (utime == 0) {
612 		stime = rtime;
613 		goto update;
614 	}
615 
616 	stime = mul_u64_u64_div_u64(stime, rtime, stime + utime);
617 
618 update:
619 	/*
620 	 * Make sure stime doesn't go backwards; this preserves monotonicity
621 	 * for utime because rtime is monotonic.
622 	 *
623 	 *  utime_i+1 = rtime_i+1 - stime_i
624 	 *            = rtime_i+1 - (rtime_i - utime_i)
625 	 *            = (rtime_i+1 - rtime_i) + utime_i
626 	 *            >= utime_i
627 	 */
628 	if (stime < prev->stime)
629 		stime = prev->stime;
630 	utime = rtime - stime;
631 
632 	/*
633 	 * Make sure utime doesn't go backwards; this still preserves
634 	 * monotonicity for stime, analogous argument to above.
635 	 */
636 	if (utime < prev->utime) {
637 		utime = prev->utime;
638 		stime = rtime - utime;
639 	}
640 
641 	prev->stime = stime;
642 	prev->utime = utime;
643 out:
644 	*ut = prev->utime;
645 	*st = prev->stime;
646 	raw_spin_unlock_irqrestore(&prev->lock, flags);
647 }
648 
task_cputime_adjusted(struct task_struct * p,u64 * ut,u64 * st)649 void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
650 {
651 	struct task_cputime cputime = {
652 		.sum_exec_runtime = p->se.sum_exec_runtime,
653 	};
654 
655 	if (task_cputime(p, &cputime.utime, &cputime.stime))
656 		cputime.sum_exec_runtime = task_sched_runtime(p);
657 	cputime_adjust(&cputime, &p->prev_cputime, ut, st);
658 }
659 EXPORT_SYMBOL_GPL(task_cputime_adjusted);
660 
thread_group_cputime_adjusted(struct task_struct * p,u64 * ut,u64 * st)661 void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
662 {
663 	struct task_cputime cputime;
664 
665 	thread_group_cputime(p, &cputime);
666 	cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st);
667 }
668 EXPORT_SYMBOL_GPL(thread_group_cputime_adjusted);
669 
670 #endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
671 
672 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
vtime_delta(struct vtime * vtime)673 static u64 vtime_delta(struct vtime *vtime)
674 {
675 	unsigned long long clock;
676 
677 	clock = sched_clock();
678 	if (clock < vtime->starttime)
679 		return 0;
680 
681 	return clock - vtime->starttime;
682 }
683 
get_vtime_delta(struct vtime * vtime)684 static u64 get_vtime_delta(struct vtime *vtime)
685 {
686 	u64 delta = vtime_delta(vtime);
687 	u64 other;
688 
689 	/*
690 	 * Unlike tick based timing, vtime based timing never has lost
691 	 * ticks, and no need for steal time accounting to make up for
692 	 * lost ticks. Vtime accounts a rounded version of actual
693 	 * elapsed time. Limit account_other_time to prevent rounding
694 	 * errors from causing elapsed vtime to go negative.
695 	 */
696 	other = account_other_time(delta);
697 	WARN_ON_ONCE(vtime->state == VTIME_INACTIVE);
698 	vtime->starttime += delta;
699 
700 	return delta - other;
701 }
702 
vtime_account_system(struct task_struct * tsk,struct vtime * vtime)703 static void vtime_account_system(struct task_struct *tsk,
704 				 struct vtime *vtime)
705 {
706 	vtime->stime += get_vtime_delta(vtime);
707 	if (vtime->stime >= TICK_NSEC) {
708 		account_system_time(tsk, irq_count(), vtime->stime);
709 		vtime->stime = 0;
710 	}
711 }
712 
vtime_account_guest(struct task_struct * tsk,struct vtime * vtime)713 static void vtime_account_guest(struct task_struct *tsk,
714 				struct vtime *vtime)
715 {
716 	vtime->gtime += get_vtime_delta(vtime);
717 	if (vtime->gtime >= TICK_NSEC) {
718 		account_guest_time(tsk, vtime->gtime);
719 		vtime->gtime = 0;
720 	}
721 }
722 
__vtime_account_kernel(struct task_struct * tsk,struct vtime * vtime)723 static void __vtime_account_kernel(struct task_struct *tsk,
724 				   struct vtime *vtime)
725 {
726 	/* We might have scheduled out from guest path */
727 	if (vtime->state == VTIME_GUEST)
728 		vtime_account_guest(tsk, vtime);
729 	else
730 		vtime_account_system(tsk, vtime);
731 }
732 
vtime_account_kernel(struct task_struct * tsk)733 void vtime_account_kernel(struct task_struct *tsk)
734 {
735 	struct vtime *vtime = &tsk->vtime;
736 
737 	if (!vtime_delta(vtime))
738 		return;
739 
740 	write_seqcount_begin(&vtime->seqcount);
741 	__vtime_account_kernel(tsk, vtime);
742 	write_seqcount_end(&vtime->seqcount);
743 }
744 
vtime_user_enter(struct task_struct * tsk)745 void vtime_user_enter(struct task_struct *tsk)
746 {
747 	struct vtime *vtime = &tsk->vtime;
748 
749 	write_seqcount_begin(&vtime->seqcount);
750 	vtime_account_system(tsk, vtime);
751 	vtime->state = VTIME_USER;
752 	write_seqcount_end(&vtime->seqcount);
753 }
754 
vtime_user_exit(struct task_struct * tsk)755 void vtime_user_exit(struct task_struct *tsk)
756 {
757 	struct vtime *vtime = &tsk->vtime;
758 
759 	write_seqcount_begin(&vtime->seqcount);
760 	vtime->utime += get_vtime_delta(vtime);
761 	if (vtime->utime >= TICK_NSEC) {
762 		account_user_time(tsk, vtime->utime);
763 		vtime->utime = 0;
764 	}
765 	vtime->state = VTIME_SYS;
766 	write_seqcount_end(&vtime->seqcount);
767 }
768 
vtime_guest_enter(struct task_struct * tsk)769 void vtime_guest_enter(struct task_struct *tsk)
770 {
771 	struct vtime *vtime = &tsk->vtime;
772 	/*
773 	 * The flags must be updated under the lock with
774 	 * the vtime_starttime flush and update.
775 	 * That enforces a right ordering and update sequence
776 	 * synchronization against the reader (task_gtime())
777 	 * that can thus safely catch up with a tickless delta.
778 	 */
779 	write_seqcount_begin(&vtime->seqcount);
780 	vtime_account_system(tsk, vtime);
781 	tsk->flags |= PF_VCPU;
782 	vtime->state = VTIME_GUEST;
783 	write_seqcount_end(&vtime->seqcount);
784 }
785 EXPORT_SYMBOL_GPL(vtime_guest_enter);
786 
vtime_guest_exit(struct task_struct * tsk)787 void vtime_guest_exit(struct task_struct *tsk)
788 {
789 	struct vtime *vtime = &tsk->vtime;
790 
791 	write_seqcount_begin(&vtime->seqcount);
792 	vtime_account_guest(tsk, vtime);
793 	tsk->flags &= ~PF_VCPU;
794 	vtime->state = VTIME_SYS;
795 	write_seqcount_end(&vtime->seqcount);
796 }
797 EXPORT_SYMBOL_GPL(vtime_guest_exit);
798 
vtime_account_idle(struct task_struct * tsk)799 void vtime_account_idle(struct task_struct *tsk)
800 {
801 	account_idle_time(get_vtime_delta(&tsk->vtime));
802 }
803 
vtime_task_switch_generic(struct task_struct * prev)804 void vtime_task_switch_generic(struct task_struct *prev)
805 {
806 	struct vtime *vtime = &prev->vtime;
807 
808 	write_seqcount_begin(&vtime->seqcount);
809 	if (vtime->state == VTIME_IDLE)
810 		vtime_account_idle(prev);
811 	else
812 		__vtime_account_kernel(prev, vtime);
813 	vtime->state = VTIME_INACTIVE;
814 	vtime->cpu = -1;
815 	write_seqcount_end(&vtime->seqcount);
816 
817 	vtime = &current->vtime;
818 
819 	write_seqcount_begin(&vtime->seqcount);
820 	if (is_idle_task(current))
821 		vtime->state = VTIME_IDLE;
822 	else if (current->flags & PF_VCPU)
823 		vtime->state = VTIME_GUEST;
824 	else
825 		vtime->state = VTIME_SYS;
826 	vtime->starttime = sched_clock();
827 	vtime->cpu = smp_processor_id();
828 	write_seqcount_end(&vtime->seqcount);
829 }
830 
vtime_init_idle(struct task_struct * t,int cpu)831 void vtime_init_idle(struct task_struct *t, int cpu)
832 {
833 	struct vtime *vtime = &t->vtime;
834 	unsigned long flags;
835 
836 	local_irq_save(flags);
837 	write_seqcount_begin(&vtime->seqcount);
838 	vtime->state = VTIME_IDLE;
839 	vtime->starttime = sched_clock();
840 	vtime->cpu = cpu;
841 	write_seqcount_end(&vtime->seqcount);
842 	local_irq_restore(flags);
843 }
844 
task_gtime(struct task_struct * t)845 u64 task_gtime(struct task_struct *t)
846 {
847 	struct vtime *vtime = &t->vtime;
848 	unsigned int seq;
849 	u64 gtime;
850 
851 	if (!vtime_accounting_enabled())
852 		return t->gtime;
853 
854 	do {
855 		seq = read_seqcount_begin(&vtime->seqcount);
856 
857 		gtime = t->gtime;
858 		if (vtime->state == VTIME_GUEST)
859 			gtime += vtime->gtime + vtime_delta(vtime);
860 
861 	} while (read_seqcount_retry(&vtime->seqcount, seq));
862 
863 	return gtime;
864 }
865 
866 /*
867  * Fetch cputime raw values from fields of task_struct and
868  * add up the pending nohz execution time since the last
869  * cputime snapshot.
870  */
task_cputime(struct task_struct * t,u64 * utime,u64 * stime)871 bool task_cputime(struct task_struct *t, u64 *utime, u64 *stime)
872 {
873 	struct vtime *vtime = &t->vtime;
874 	unsigned int seq;
875 	u64 delta;
876 	int ret;
877 
878 	if (!vtime_accounting_enabled()) {
879 		*utime = t->utime;
880 		*stime = t->stime;
881 		return false;
882 	}
883 
884 	do {
885 		ret = false;
886 		seq = read_seqcount_begin(&vtime->seqcount);
887 
888 		*utime = t->utime;
889 		*stime = t->stime;
890 
891 		/* Task is sleeping or idle, nothing to add */
892 		if (vtime->state < VTIME_SYS)
893 			continue;
894 
895 		ret = true;
896 		delta = vtime_delta(vtime);
897 
898 		/*
899 		 * Task runs either in user (including guest) or kernel space,
900 		 * add pending nohz time to the right place.
901 		 */
902 		if (vtime->state == VTIME_SYS)
903 			*stime += vtime->stime + delta;
904 		else
905 			*utime += vtime->utime + delta;
906 	} while (read_seqcount_retry(&vtime->seqcount, seq));
907 
908 	return ret;
909 }
910 
vtime_state_fetch(struct vtime * vtime,int cpu)911 static int vtime_state_fetch(struct vtime *vtime, int cpu)
912 {
913 	int state = READ_ONCE(vtime->state);
914 
915 	/*
916 	 * We raced against a context switch, fetch the
917 	 * kcpustat task again.
918 	 */
919 	if (vtime->cpu != cpu && vtime->cpu != -1)
920 		return -EAGAIN;
921 
922 	/*
923 	 * Two possible things here:
924 	 * 1) We are seeing the scheduling out task (prev) or any past one.
925 	 * 2) We are seeing the scheduling in task (next) but it hasn't
926 	 *    passed though vtime_task_switch() yet so the pending
927 	 *    cputime of the prev task may not be flushed yet.
928 	 *
929 	 * Case 1) is ok but 2) is not. So wait for a safe VTIME state.
930 	 */
931 	if (state == VTIME_INACTIVE)
932 		return -EAGAIN;
933 
934 	return state;
935 }
936 
kcpustat_user_vtime(struct vtime * vtime)937 static u64 kcpustat_user_vtime(struct vtime *vtime)
938 {
939 	if (vtime->state == VTIME_USER)
940 		return vtime->utime + vtime_delta(vtime);
941 	else if (vtime->state == VTIME_GUEST)
942 		return vtime->gtime + vtime_delta(vtime);
943 	return 0;
944 }
945 
kcpustat_field_vtime(u64 * cpustat,struct task_struct * tsk,enum cpu_usage_stat usage,int cpu,u64 * val)946 static int kcpustat_field_vtime(u64 *cpustat,
947 				struct task_struct *tsk,
948 				enum cpu_usage_stat usage,
949 				int cpu, u64 *val)
950 {
951 	struct vtime *vtime = &tsk->vtime;
952 	unsigned int seq;
953 
954 	do {
955 		int state;
956 
957 		seq = read_seqcount_begin(&vtime->seqcount);
958 
959 		state = vtime_state_fetch(vtime, cpu);
960 		if (state < 0)
961 			return state;
962 
963 		*val = cpustat[usage];
964 
965 		/*
966 		 * Nice VS unnice cputime accounting may be inaccurate if
967 		 * the nice value has changed since the last vtime update.
968 		 * But proper fix would involve interrupting target on nice
969 		 * updates which is a no go on nohz_full (although the scheduler
970 		 * may still interrupt the target if rescheduling is needed...)
971 		 */
972 		switch (usage) {
973 		case CPUTIME_SYSTEM:
974 			if (state == VTIME_SYS)
975 				*val += vtime->stime + vtime_delta(vtime);
976 			break;
977 		case CPUTIME_USER:
978 			if (task_nice(tsk) <= 0)
979 				*val += kcpustat_user_vtime(vtime);
980 			break;
981 		case CPUTIME_NICE:
982 			if (task_nice(tsk) > 0)
983 				*val += kcpustat_user_vtime(vtime);
984 			break;
985 		case CPUTIME_GUEST:
986 			if (state == VTIME_GUEST && task_nice(tsk) <= 0)
987 				*val += vtime->gtime + vtime_delta(vtime);
988 			break;
989 		case CPUTIME_GUEST_NICE:
990 			if (state == VTIME_GUEST && task_nice(tsk) > 0)
991 				*val += vtime->gtime + vtime_delta(vtime);
992 			break;
993 		default:
994 			break;
995 		}
996 	} while (read_seqcount_retry(&vtime->seqcount, seq));
997 
998 	return 0;
999 }
1000 
kcpustat_field(struct kernel_cpustat * kcpustat,enum cpu_usage_stat usage,int cpu)1001 u64 kcpustat_field(struct kernel_cpustat *kcpustat,
1002 		   enum cpu_usage_stat usage, int cpu)
1003 {
1004 	u64 *cpustat = kcpustat->cpustat;
1005 	u64 val = cpustat[usage];
1006 	struct rq *rq;
1007 	int err;
1008 
1009 	if (!vtime_accounting_enabled_cpu(cpu))
1010 		return val;
1011 
1012 	rq = cpu_rq(cpu);
1013 
1014 	for (;;) {
1015 		struct task_struct *curr;
1016 
1017 		rcu_read_lock();
1018 		curr = rcu_dereference(rq->curr);
1019 		if (WARN_ON_ONCE(!curr)) {
1020 			rcu_read_unlock();
1021 			return cpustat[usage];
1022 		}
1023 
1024 		err = kcpustat_field_vtime(cpustat, curr, usage, cpu, &val);
1025 		rcu_read_unlock();
1026 
1027 		if (!err)
1028 			return val;
1029 
1030 		cpu_relax();
1031 	}
1032 }
1033 EXPORT_SYMBOL_GPL(kcpustat_field);
1034 
kcpustat_cpu_fetch_vtime(struct kernel_cpustat * dst,const struct kernel_cpustat * src,struct task_struct * tsk,int cpu)1035 static int kcpustat_cpu_fetch_vtime(struct kernel_cpustat *dst,
1036 				    const struct kernel_cpustat *src,
1037 				    struct task_struct *tsk, int cpu)
1038 {
1039 	struct vtime *vtime = &tsk->vtime;
1040 	unsigned int seq;
1041 
1042 	do {
1043 		u64 *cpustat;
1044 		u64 delta;
1045 		int state;
1046 
1047 		seq = read_seqcount_begin(&vtime->seqcount);
1048 
1049 		state = vtime_state_fetch(vtime, cpu);
1050 		if (state < 0)
1051 			return state;
1052 
1053 		*dst = *src;
1054 		cpustat = dst->cpustat;
1055 
1056 		/* Task is sleeping, dead or idle, nothing to add */
1057 		if (state < VTIME_SYS)
1058 			continue;
1059 
1060 		delta = vtime_delta(vtime);
1061 
1062 		/*
1063 		 * Task runs either in user (including guest) or kernel space,
1064 		 * add pending nohz time to the right place.
1065 		 */
1066 		if (state == VTIME_SYS) {
1067 			cpustat[CPUTIME_SYSTEM] += vtime->stime + delta;
1068 		} else if (state == VTIME_USER) {
1069 			if (task_nice(tsk) > 0)
1070 				cpustat[CPUTIME_NICE] += vtime->utime + delta;
1071 			else
1072 				cpustat[CPUTIME_USER] += vtime->utime + delta;
1073 		} else {
1074 			WARN_ON_ONCE(state != VTIME_GUEST);
1075 			if (task_nice(tsk) > 0) {
1076 				cpustat[CPUTIME_GUEST_NICE] += vtime->gtime + delta;
1077 				cpustat[CPUTIME_NICE] += vtime->gtime + delta;
1078 			} else {
1079 				cpustat[CPUTIME_GUEST] += vtime->gtime + delta;
1080 				cpustat[CPUTIME_USER] += vtime->gtime + delta;
1081 			}
1082 		}
1083 	} while (read_seqcount_retry(&vtime->seqcount, seq));
1084 
1085 	return 0;
1086 }
1087 
kcpustat_cpu_fetch(struct kernel_cpustat * dst,int cpu)1088 void kcpustat_cpu_fetch(struct kernel_cpustat *dst, int cpu)
1089 {
1090 	const struct kernel_cpustat *src = &kcpustat_cpu(cpu);
1091 	struct rq *rq;
1092 	int err;
1093 
1094 	if (!vtime_accounting_enabled_cpu(cpu)) {
1095 		*dst = *src;
1096 		return;
1097 	}
1098 
1099 	rq = cpu_rq(cpu);
1100 
1101 	for (;;) {
1102 		struct task_struct *curr;
1103 
1104 		rcu_read_lock();
1105 		curr = rcu_dereference(rq->curr);
1106 		if (WARN_ON_ONCE(!curr)) {
1107 			rcu_read_unlock();
1108 			*dst = *src;
1109 			return;
1110 		}
1111 
1112 		err = kcpustat_cpu_fetch_vtime(dst, src, curr, cpu);
1113 		rcu_read_unlock();
1114 
1115 		if (!err)
1116 			return;
1117 
1118 		cpu_relax();
1119 	}
1120 }
1121 EXPORT_SYMBOL_GPL(kcpustat_cpu_fetch);
1122 
1123 #endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */
1124