1 /*
2 * Copyright (c) 2016, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 *
14 * Window Assisted Load Tracking (WALT) implementation credits:
15 * Srivatsa Vaddagiri, Steve Muckle, Syed Rameez Mustafa, Joonwoo Park,
16 * Pavan Kumar Kondeti, Olav Haugan
17 *
18 * 2016-03-06: Integration with EAS/refactoring by Vikram Mulukutla
19 * and Todd Kjos
20 */
21
22 #include <linux/syscore_ops.h>
23 #include <linux/cpufreq.h>
24 #include <trace/events/sched.h>
25 #include <clocksource/arm_arch_timer.h>
26 #include "sched.h"
27 #include "walt.h"
28
29 #define WINDOW_STATS_RECENT 0
30 #define WINDOW_STATS_MAX 1
31 #define WINDOW_STATS_MAX_RECENT_AVG 2
32 #define WINDOW_STATS_AVG 3
33 #define WINDOW_STATS_INVALID_POLICY 4
34
35 #define EXITING_TASK_MARKER 0xdeaddead
36
37 static __read_mostly unsigned int walt_ravg_hist_size = 5;
38 static __read_mostly unsigned int walt_window_stats_policy =
39 WINDOW_STATS_MAX_RECENT_AVG;
40 static __read_mostly unsigned int walt_account_wait_time = 1;
41 static __read_mostly unsigned int walt_freq_account_wait_time = 0;
42 static __read_mostly unsigned int walt_io_is_busy = 0;
43
44 unsigned int sysctl_sched_walt_init_task_load_pct = 15;
45
46 /* 1 -> use PELT based load stats, 0 -> use window-based load stats */
47 unsigned int __read_mostly walt_disabled = 0;
48
49 static unsigned int max_possible_efficiency = 1024;
50 static unsigned int min_possible_efficiency = 1024;
51
52 /*
53 * Maximum possible frequency across all cpus. Task demand and cpu
54 * capacity (cpu_power) metrics are scaled in reference to it.
55 */
56 static unsigned int max_possible_freq = 1;
57
58 /*
59 * Minimum possible max_freq across all cpus. This will be same as
60 * max_possible_freq on homogeneous systems and could be different from
61 * max_possible_freq on heterogenous systems. min_max_freq is used to derive
62 * capacity (cpu_power) of cpus.
63 */
64 static unsigned int min_max_freq = 1;
65
66 static unsigned int max_capacity = 1024;
67 static unsigned int min_capacity = 1024;
68 static unsigned int max_load_scale_factor = 1024;
69 static unsigned int max_possible_capacity = 1024;
70
71 /* Mask of all CPUs that have max_possible_capacity */
72 static cpumask_t mpc_mask = CPU_MASK_ALL;
73
74 /* Window size (in ns) */
75 __read_mostly unsigned int walt_ravg_window = 20000000;
76
77 /* Min window size (in ns) = 10ms */
78 #ifdef CONFIG_HZ_300
79 /*
80 * Tick interval becomes to 3333333 due to
81 * rounding error when HZ=300.
82 */
83 #define MIN_SCHED_RAVG_WINDOW (3333333 * 6)
84 #else
85 #define MIN_SCHED_RAVG_WINDOW 10000000
86 #endif
87
88 /* Max window size (in ns) = 1s */
89 #define MAX_SCHED_RAVG_WINDOW 1000000000
90
91 static unsigned int sync_cpu;
92 static ktime_t ktime_last;
93 static bool walt_ktime_suspended;
94
task_load(struct task_struct * p)95 static unsigned int task_load(struct task_struct *p)
96 {
97 return p->ravg.demand;
98 }
99
100 void
walt_inc_cumulative_runnable_avg(struct rq * rq,struct task_struct * p)101 walt_inc_cumulative_runnable_avg(struct rq *rq,
102 struct task_struct *p)
103 {
104 rq->cumulative_runnable_avg += p->ravg.demand;
105 }
106
107 void
walt_dec_cumulative_runnable_avg(struct rq * rq,struct task_struct * p)108 walt_dec_cumulative_runnable_avg(struct rq *rq,
109 struct task_struct *p)
110 {
111 rq->cumulative_runnable_avg -= p->ravg.demand;
112 BUG_ON((s64)rq->cumulative_runnable_avg < 0);
113 }
114
115 static void
fixup_cumulative_runnable_avg(struct rq * rq,struct task_struct * p,s64 task_load_delta)116 fixup_cumulative_runnable_avg(struct rq *rq,
117 struct task_struct *p, s64 task_load_delta)
118 {
119 rq->cumulative_runnable_avg += task_load_delta;
120 if ((s64)rq->cumulative_runnable_avg < 0)
121 panic("cra less than zero: tld: %lld, task_load(p) = %u\n",
122 task_load_delta, task_load(p));
123 }
124
walt_ktime_clock(void)125 u64 walt_ktime_clock(void)
126 {
127 if (unlikely(walt_ktime_suspended))
128 return ktime_to_ns(ktime_last);
129 return ktime_get_ns();
130 }
131
walt_resume(void)132 static void walt_resume(void)
133 {
134 walt_ktime_suspended = false;
135 }
136
walt_suspend(void)137 static int walt_suspend(void)
138 {
139 ktime_last = ktime_get();
140 walt_ktime_suspended = true;
141 return 0;
142 }
143
144 static struct syscore_ops walt_syscore_ops = {
145 .resume = walt_resume,
146 .suspend = walt_suspend
147 };
148
walt_init_ops(void)149 static int __init walt_init_ops(void)
150 {
151 register_syscore_ops(&walt_syscore_ops);
152 return 0;
153 }
154 late_initcall(walt_init_ops);
155
walt_inc_cfs_cumulative_runnable_avg(struct cfs_rq * cfs_rq,struct task_struct * p)156 void walt_inc_cfs_cumulative_runnable_avg(struct cfs_rq *cfs_rq,
157 struct task_struct *p)
158 {
159 cfs_rq->cumulative_runnable_avg += p->ravg.demand;
160 }
161
walt_dec_cfs_cumulative_runnable_avg(struct cfs_rq * cfs_rq,struct task_struct * p)162 void walt_dec_cfs_cumulative_runnable_avg(struct cfs_rq *cfs_rq,
163 struct task_struct *p)
164 {
165 cfs_rq->cumulative_runnable_avg -= p->ravg.demand;
166 }
167
exiting_task(struct task_struct * p)168 static int exiting_task(struct task_struct *p)
169 {
170 if (p->flags & PF_EXITING) {
171 if (p->ravg.sum_history[0] != EXITING_TASK_MARKER) {
172 p->ravg.sum_history[0] = EXITING_TASK_MARKER;
173 }
174 return 1;
175 }
176 return 0;
177 }
178
set_walt_ravg_window(char * str)179 static int __init set_walt_ravg_window(char *str)
180 {
181 get_option(&str, &walt_ravg_window);
182
183 walt_disabled = (walt_ravg_window < MIN_SCHED_RAVG_WINDOW ||
184 walt_ravg_window > MAX_SCHED_RAVG_WINDOW);
185 return 0;
186 }
187
188 early_param("walt_ravg_window", set_walt_ravg_window);
189
190 static void
update_window_start(struct rq * rq,u64 wallclock)191 update_window_start(struct rq *rq, u64 wallclock)
192 {
193 s64 delta;
194 int nr_windows;
195
196 delta = wallclock - rq->window_start;
197 /* If the MPM global timer is cleared, set delta as 0 to avoid kernel BUG happening */
198 if (delta < 0) {
199 if (arch_timer_read_counter() == 0)
200 delta = 0;
201 else
202 BUG_ON(1);
203 }
204
205 if (delta < walt_ravg_window)
206 return;
207
208 nr_windows = div64_u64(delta, walt_ravg_window);
209 rq->window_start += (u64)nr_windows * (u64)walt_ravg_window;
210 }
211
scale_exec_time(u64 delta,struct rq * rq)212 static u64 scale_exec_time(u64 delta, struct rq *rq)
213 {
214 unsigned int cur_freq = rq->cur_freq;
215 int sf;
216
217 if (unlikely(cur_freq > max_possible_freq))
218 cur_freq = rq->max_possible_freq;
219
220 /* round up div64 */
221 delta = div64_u64(delta * cur_freq + max_possible_freq - 1,
222 max_possible_freq);
223
224 sf = DIV_ROUND_UP(rq->efficiency * 1024, max_possible_efficiency);
225
226 delta *= sf;
227 delta >>= 10;
228
229 return delta;
230 }
231
cpu_is_waiting_on_io(struct rq * rq)232 static int cpu_is_waiting_on_io(struct rq *rq)
233 {
234 if (!walt_io_is_busy)
235 return 0;
236
237 return atomic_read(&rq->nr_iowait);
238 }
239
walt_account_irqtime(int cpu,struct task_struct * curr,u64 delta,u64 wallclock)240 void walt_account_irqtime(int cpu, struct task_struct *curr,
241 u64 delta, u64 wallclock)
242 {
243 struct rq *rq = cpu_rq(cpu);
244 unsigned long flags, nr_windows;
245 u64 cur_jiffies_ts;
246
247 raw_spin_lock_irqsave(&rq->lock, flags);
248
249 /*
250 * cputime (wallclock) uses sched_clock so use the same here for
251 * consistency.
252 */
253 delta += sched_clock() - wallclock;
254 cur_jiffies_ts = get_jiffies_64();
255
256 if (is_idle_task(curr))
257 walt_update_task_ravg(curr, rq, IRQ_UPDATE, walt_ktime_clock(),
258 delta);
259
260 nr_windows = cur_jiffies_ts - rq->irqload_ts;
261
262 if (nr_windows) {
263 if (nr_windows < 10) {
264 /* Decay CPU's irqload by 3/4 for each window. */
265 rq->avg_irqload *= (3 * nr_windows);
266 rq->avg_irqload = div64_u64(rq->avg_irqload,
267 4 * nr_windows);
268 } else {
269 rq->avg_irqload = 0;
270 }
271 rq->avg_irqload += rq->cur_irqload;
272 rq->cur_irqload = 0;
273 }
274
275 rq->cur_irqload += delta;
276 rq->irqload_ts = cur_jiffies_ts;
277 raw_spin_unlock_irqrestore(&rq->lock, flags);
278 }
279
280
281 #define WALT_HIGH_IRQ_TIMEOUT 3
282
walt_irqload(int cpu)283 u64 walt_irqload(int cpu) {
284 struct rq *rq = cpu_rq(cpu);
285 s64 delta;
286 delta = get_jiffies_64() - rq->irqload_ts;
287
288 /*
289 * Current context can be preempted by irq and rq->irqload_ts can be
290 * updated by irq context so that delta can be negative.
291 * But this is okay and we can safely return as this means there
292 * was recent irq occurrence.
293 */
294
295 if (delta < WALT_HIGH_IRQ_TIMEOUT)
296 return rq->avg_irqload;
297 else
298 return 0;
299 }
300
walt_cpu_high_irqload(int cpu)301 int walt_cpu_high_irqload(int cpu) {
302 return walt_irqload(cpu) >= sysctl_sched_walt_cpu_high_irqload;
303 }
304
account_busy_for_cpu_time(struct rq * rq,struct task_struct * p,u64 irqtime,int event)305 static int account_busy_for_cpu_time(struct rq *rq, struct task_struct *p,
306 u64 irqtime, int event)
307 {
308 if (is_idle_task(p)) {
309 /* TASK_WAKE && TASK_MIGRATE is not possible on idle task! */
310 if (event == PICK_NEXT_TASK)
311 return 0;
312
313 /* PUT_PREV_TASK, TASK_UPDATE && IRQ_UPDATE are left */
314 return irqtime || cpu_is_waiting_on_io(rq);
315 }
316
317 if (event == TASK_WAKE)
318 return 0;
319
320 if (event == PUT_PREV_TASK || event == IRQ_UPDATE ||
321 event == TASK_UPDATE)
322 return 1;
323
324 /* Only TASK_MIGRATE && PICK_NEXT_TASK left */
325 return walt_freq_account_wait_time;
326 }
327
328 /*
329 * Account cpu activity in its busy time counters (rq->curr/prev_runnable_sum)
330 */
update_cpu_busy_time(struct task_struct * p,struct rq * rq,int event,u64 wallclock,u64 irqtime)331 static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
332 int event, u64 wallclock, u64 irqtime)
333 {
334 int new_window, nr_full_windows = 0;
335 int p_is_curr_task = (p == rq->curr);
336 u64 mark_start = p->ravg.mark_start;
337 u64 window_start = rq->window_start;
338 u32 window_size = walt_ravg_window;
339 u64 delta;
340
341 new_window = mark_start < window_start;
342 if (new_window) {
343 nr_full_windows = div64_u64((window_start - mark_start),
344 window_size);
345 if (p->ravg.active_windows < USHRT_MAX)
346 p->ravg.active_windows++;
347 }
348
349 /* Handle per-task window rollover. We don't care about the idle
350 * task or exiting tasks. */
351 if (new_window && !is_idle_task(p) && !exiting_task(p)) {
352 u32 curr_window = 0;
353
354 if (!nr_full_windows)
355 curr_window = p->ravg.curr_window;
356
357 p->ravg.prev_window = curr_window;
358 p->ravg.curr_window = 0;
359 }
360
361 if (!account_busy_for_cpu_time(rq, p, irqtime, event)) {
362 /* account_busy_for_cpu_time() = 0, so no update to the
363 * task's current window needs to be made. This could be
364 * for example
365 *
366 * - a wakeup event on a task within the current
367 * window (!new_window below, no action required),
368 * - switching to a new task from idle (PICK_NEXT_TASK)
369 * in a new window where irqtime is 0 and we aren't
370 * waiting on IO */
371
372 if (!new_window)
373 return;
374
375 /* A new window has started. The RQ demand must be rolled
376 * over if p is the current task. */
377 if (p_is_curr_task) {
378 u64 prev_sum = 0;
379
380 /* p is either idle task or an exiting task */
381 if (!nr_full_windows) {
382 prev_sum = rq->curr_runnable_sum;
383 }
384
385 rq->prev_runnable_sum = prev_sum;
386 rq->curr_runnable_sum = 0;
387 }
388
389 return;
390 }
391
392 if (!new_window) {
393 /* account_busy_for_cpu_time() = 1 so busy time needs
394 * to be accounted to the current window. No rollover
395 * since we didn't start a new window. An example of this is
396 * when a task starts execution and then sleeps within the
397 * same window. */
398
399 if (!irqtime || !is_idle_task(p) || cpu_is_waiting_on_io(rq))
400 delta = wallclock - mark_start;
401 else
402 delta = irqtime;
403 delta = scale_exec_time(delta, rq);
404 rq->curr_runnable_sum += delta;
405 if (!is_idle_task(p) && !exiting_task(p))
406 p->ravg.curr_window += delta;
407
408 return;
409 }
410
411 if (!p_is_curr_task) {
412 /* account_busy_for_cpu_time() = 1 so busy time needs
413 * to be accounted to the current window. A new window
414 * has also started, but p is not the current task, so the
415 * window is not rolled over - just split up and account
416 * as necessary into curr and prev. The window is only
417 * rolled over when a new window is processed for the current
418 * task.
419 *
420 * Irqtime can't be accounted by a task that isn't the
421 * currently running task. */
422
423 if (!nr_full_windows) {
424 /* A full window hasn't elapsed, account partial
425 * contribution to previous completed window. */
426 delta = scale_exec_time(window_start - mark_start, rq);
427 if (!exiting_task(p))
428 p->ravg.prev_window += delta;
429 } else {
430 /* Since at least one full window has elapsed,
431 * the contribution to the previous window is the
432 * full window (window_size). */
433 delta = scale_exec_time(window_size, rq);
434 if (!exiting_task(p))
435 p->ravg.prev_window = delta;
436 }
437 rq->prev_runnable_sum += delta;
438
439 /* Account piece of busy time in the current window. */
440 delta = scale_exec_time(wallclock - window_start, rq);
441 rq->curr_runnable_sum += delta;
442 if (!exiting_task(p))
443 p->ravg.curr_window = delta;
444
445 return;
446 }
447
448 if (!irqtime || !is_idle_task(p) || cpu_is_waiting_on_io(rq)) {
449 /* account_busy_for_cpu_time() = 1 so busy time needs
450 * to be accounted to the current window. A new window
451 * has started and p is the current task so rollover is
452 * needed. If any of these three above conditions are true
453 * then this busy time can't be accounted as irqtime.
454 *
455 * Busy time for the idle task or exiting tasks need not
456 * be accounted.
457 *
458 * An example of this would be a task that starts execution
459 * and then sleeps once a new window has begun. */
460
461 if (!nr_full_windows) {
462 /* A full window hasn't elapsed, account partial
463 * contribution to previous completed window. */
464 delta = scale_exec_time(window_start - mark_start, rq);
465 if (!is_idle_task(p) && !exiting_task(p))
466 p->ravg.prev_window += delta;
467
468 delta += rq->curr_runnable_sum;
469 } else {
470 /* Since at least one full window has elapsed,
471 * the contribution to the previous window is the
472 * full window (window_size). */
473 delta = scale_exec_time(window_size, rq);
474 if (!is_idle_task(p) && !exiting_task(p))
475 p->ravg.prev_window = delta;
476
477 }
478 /*
479 * Rollover for normal runnable sum is done here by overwriting
480 * the values in prev_runnable_sum and curr_runnable_sum.
481 * Rollover for new task runnable sum has completed by previous
482 * if-else statement.
483 */
484 rq->prev_runnable_sum = delta;
485
486 /* Account piece of busy time in the current window. */
487 delta = scale_exec_time(wallclock - window_start, rq);
488 rq->curr_runnable_sum = delta;
489 if (!is_idle_task(p) && !exiting_task(p))
490 p->ravg.curr_window = delta;
491
492 return;
493 }
494
495 if (irqtime) {
496 /* account_busy_for_cpu_time() = 1 so busy time needs
497 * to be accounted to the current window. A new window
498 * has started and p is the current task so rollover is
499 * needed. The current task must be the idle task because
500 * irqtime is not accounted for any other task.
501 *
502 * Irqtime will be accounted each time we process IRQ activity
503 * after a period of idleness, so we know the IRQ busy time
504 * started at wallclock - irqtime. */
505
506 BUG_ON(!is_idle_task(p));
507 mark_start = wallclock - irqtime;
508
509 /* Roll window over. If IRQ busy time was just in the current
510 * window then that is all that need be accounted. */
511 rq->prev_runnable_sum = rq->curr_runnable_sum;
512 if (mark_start > window_start) {
513 rq->curr_runnable_sum = scale_exec_time(irqtime, rq);
514 return;
515 }
516
517 /* The IRQ busy time spanned multiple windows. Process the
518 * busy time preceding the current window start first. */
519 delta = window_start - mark_start;
520 if (delta > window_size)
521 delta = window_size;
522 delta = scale_exec_time(delta, rq);
523 rq->prev_runnable_sum += delta;
524
525 /* Process the remaining IRQ busy time in the current window. */
526 delta = wallclock - window_start;
527 rq->curr_runnable_sum = scale_exec_time(delta, rq);
528
529 return;
530 }
531
532 BUG();
533 }
534
account_busy_for_task_demand(struct task_struct * p,int event)535 static int account_busy_for_task_demand(struct task_struct *p, int event)
536 {
537 /* No need to bother updating task demand for exiting tasks
538 * or the idle task. */
539 if (exiting_task(p) || is_idle_task(p))
540 return 0;
541
542 /* When a task is waking up it is completing a segment of non-busy
543 * time. Likewise, if wait time is not treated as busy time, then
544 * when a task begins to run or is migrated, it is not running and
545 * is completing a segment of non-busy time. */
546 if (event == TASK_WAKE || (!walt_account_wait_time &&
547 (event == PICK_NEXT_TASK || event == TASK_MIGRATE)))
548 return 0;
549
550 return 1;
551 }
552
553 /*
554 * Called when new window is starting for a task, to record cpu usage over
555 * recently concluded window(s). Normally 'samples' should be 1. It can be > 1
556 * when, say, a real-time task runs without preemption for several windows at a
557 * stretch.
558 */
update_history(struct rq * rq,struct task_struct * p,u32 runtime,int samples,int event)559 static void update_history(struct rq *rq, struct task_struct *p,
560 u32 runtime, int samples, int event)
561 {
562 u32 *hist = &p->ravg.sum_history[0];
563 int ridx, widx;
564 u32 max = 0, avg, demand;
565 u64 sum = 0;
566
567 /* Ignore windows where task had no activity */
568 if (!runtime || is_idle_task(p) || exiting_task(p) || !samples)
569 goto done;
570
571 /* Push new 'runtime' value onto stack */
572 widx = walt_ravg_hist_size - 1;
573 ridx = widx - samples;
574 for (; ridx >= 0; --widx, --ridx) {
575 hist[widx] = hist[ridx];
576 sum += hist[widx];
577 if (hist[widx] > max)
578 max = hist[widx];
579 }
580
581 for (widx = 0; widx < samples && widx < walt_ravg_hist_size; widx++) {
582 hist[widx] = runtime;
583 sum += hist[widx];
584 if (hist[widx] > max)
585 max = hist[widx];
586 }
587
588 p->ravg.sum = 0;
589
590 if (walt_window_stats_policy == WINDOW_STATS_RECENT) {
591 demand = runtime;
592 } else if (walt_window_stats_policy == WINDOW_STATS_MAX) {
593 demand = max;
594 } else {
595 avg = div64_u64(sum, walt_ravg_hist_size);
596 if (walt_window_stats_policy == WINDOW_STATS_AVG)
597 demand = avg;
598 else
599 demand = max(avg, runtime);
600 }
601
602 /*
603 * A throttled deadline sched class task gets dequeued without
604 * changing p->on_rq. Since the dequeue decrements hmp stats
605 * avoid decrementing it here again.
606 */
607 if (task_on_rq_queued(p) && (!task_has_dl_policy(p) ||
608 !p->dl.dl_throttled))
609 fixup_cumulative_runnable_avg(rq, p, demand);
610
611 p->ravg.demand = demand;
612
613 done:
614 trace_walt_update_history(rq, p, runtime, samples, event);
615 return;
616 }
617
add_to_task_demand(struct rq * rq,struct task_struct * p,u64 delta)618 static void add_to_task_demand(struct rq *rq, struct task_struct *p,
619 u64 delta)
620 {
621 delta = scale_exec_time(delta, rq);
622 p->ravg.sum += delta;
623 if (unlikely(p->ravg.sum > walt_ravg_window))
624 p->ravg.sum = walt_ravg_window;
625 }
626
627 /*
628 * Account cpu demand of task and/or update task's cpu demand history
629 *
630 * ms = p->ravg.mark_start;
631 * wc = wallclock
632 * ws = rq->window_start
633 *
634 * Three possibilities:
635 *
636 * a) Task event is contained within one window.
637 * window_start < mark_start < wallclock
638 *
639 * ws ms wc
640 * | | |
641 * V V V
642 * |---------------|
643 *
644 * In this case, p->ravg.sum is updated *iff* event is appropriate
645 * (ex: event == PUT_PREV_TASK)
646 *
647 * b) Task event spans two windows.
648 * mark_start < window_start < wallclock
649 *
650 * ms ws wc
651 * | | |
652 * V V V
653 * -----|-------------------
654 *
655 * In this case, p->ravg.sum is updated with (ws - ms) *iff* event
656 * is appropriate, then a new window sample is recorded followed
657 * by p->ravg.sum being set to (wc - ws) *iff* event is appropriate.
658 *
659 * c) Task event spans more than two windows.
660 *
661 * ms ws_tmp ws wc
662 * | | | |
663 * V V V V
664 * ---|-------|-------|-------|-------|------
665 * | |
666 * |<------ nr_full_windows ------>|
667 *
668 * In this case, p->ravg.sum is updated with (ws_tmp - ms) first *iff*
669 * event is appropriate, window sample of p->ravg.sum is recorded,
670 * 'nr_full_window' samples of window_size is also recorded *iff*
671 * event is appropriate and finally p->ravg.sum is set to (wc - ws)
672 * *iff* event is appropriate.
673 *
674 * IMPORTANT : Leave p->ravg.mark_start unchanged, as update_cpu_busy_time()
675 * depends on it!
676 */
update_task_demand(struct task_struct * p,struct rq * rq,int event,u64 wallclock)677 static void update_task_demand(struct task_struct *p, struct rq *rq,
678 int event, u64 wallclock)
679 {
680 u64 mark_start = p->ravg.mark_start;
681 u64 delta, window_start = rq->window_start;
682 int new_window, nr_full_windows;
683 u32 window_size = walt_ravg_window;
684
685 new_window = mark_start < window_start;
686 if (!account_busy_for_task_demand(p, event)) {
687 if (new_window)
688 /* If the time accounted isn't being accounted as
689 * busy time, and a new window started, only the
690 * previous window need be closed out with the
691 * pre-existing demand. Multiple windows may have
692 * elapsed, but since empty windows are dropped,
693 * it is not necessary to account those. */
694 update_history(rq, p, p->ravg.sum, 1, event);
695 return;
696 }
697
698 if (!new_window) {
699 /* The simple case - busy time contained within the existing
700 * window. */
701 add_to_task_demand(rq, p, wallclock - mark_start);
702 return;
703 }
704
705 /* Busy time spans at least two windows. Temporarily rewind
706 * window_start to first window boundary after mark_start. */
707 delta = window_start - mark_start;
708 nr_full_windows = div64_u64(delta, window_size);
709 window_start -= (u64)nr_full_windows * (u64)window_size;
710
711 /* Process (window_start - mark_start) first */
712 add_to_task_demand(rq, p, window_start - mark_start);
713
714 /* Push new sample(s) into task's demand history */
715 update_history(rq, p, p->ravg.sum, 1, event);
716 if (nr_full_windows)
717 update_history(rq, p, scale_exec_time(window_size, rq),
718 nr_full_windows, event);
719
720 /* Roll window_start back to current to process any remainder
721 * in current window. */
722 window_start += (u64)nr_full_windows * (u64)window_size;
723
724 /* Process (wallclock - window_start) next */
725 mark_start = window_start;
726 add_to_task_demand(rq, p, wallclock - mark_start);
727 }
728
729 /* Reflect task activity on its demand and cpu's busy time statistics */
walt_update_task_ravg(struct task_struct * p,struct rq * rq,int event,u64 wallclock,u64 irqtime)730 void walt_update_task_ravg(struct task_struct *p, struct rq *rq,
731 int event, u64 wallclock, u64 irqtime)
732 {
733 if (walt_disabled || !rq->window_start)
734 return;
735
736 lockdep_assert_held(&rq->lock);
737
738 update_window_start(rq, wallclock);
739
740 if (!p->ravg.mark_start)
741 goto done;
742
743 update_task_demand(p, rq, event, wallclock);
744 update_cpu_busy_time(p, rq, event, wallclock, irqtime);
745
746 done:
747 trace_walt_update_task_ravg(p, rq, event, wallclock, irqtime);
748
749 p->ravg.mark_start = wallclock;
750 }
751
arch_get_cpu_efficiency(int cpu)752 unsigned long __weak arch_get_cpu_efficiency(int cpu)
753 {
754 return SCHED_LOAD_SCALE;
755 }
756
walt_init_cpu_efficiency(void)757 void walt_init_cpu_efficiency(void)
758 {
759 int i, efficiency;
760 unsigned int max = 0, min = UINT_MAX;
761
762 for_each_possible_cpu(i) {
763 efficiency = arch_get_cpu_efficiency(i);
764 cpu_rq(i)->efficiency = efficiency;
765
766 if (efficiency > max)
767 max = efficiency;
768 if (efficiency < min)
769 min = efficiency;
770 }
771
772 if (max)
773 max_possible_efficiency = max;
774
775 if (min)
776 min_possible_efficiency = min;
777 }
778
reset_task_stats(struct task_struct * p)779 static void reset_task_stats(struct task_struct *p)
780 {
781 u32 sum = 0;
782
783 if (exiting_task(p))
784 sum = EXITING_TASK_MARKER;
785
786 memset(&p->ravg, 0, sizeof(struct ravg));
787 /* Retain EXITING_TASK marker */
788 p->ravg.sum_history[0] = sum;
789 }
790
walt_mark_task_starting(struct task_struct * p)791 void walt_mark_task_starting(struct task_struct *p)
792 {
793 u64 wallclock;
794 struct rq *rq = task_rq(p);
795
796 if (!rq->window_start) {
797 reset_task_stats(p);
798 return;
799 }
800
801 wallclock = walt_ktime_clock();
802 p->ravg.mark_start = wallclock;
803 }
804
walt_set_window_start(struct rq * rq)805 void walt_set_window_start(struct rq *rq)
806 {
807 int cpu = cpu_of(rq);
808 struct rq *sync_rq = cpu_rq(sync_cpu);
809
810 if (rq->window_start)
811 return;
812
813 if (cpu == sync_cpu) {
814 rq->window_start = walt_ktime_clock();
815 } else {
816 raw_spin_unlock(&rq->lock);
817 double_rq_lock(rq, sync_rq);
818 rq->window_start = cpu_rq(sync_cpu)->window_start;
819 rq->curr_runnable_sum = rq->prev_runnable_sum = 0;
820 raw_spin_unlock(&sync_rq->lock);
821 }
822
823 rq->curr->ravg.mark_start = rq->window_start;
824 }
825
walt_migrate_sync_cpu(int cpu)826 void walt_migrate_sync_cpu(int cpu)
827 {
828 if (cpu == sync_cpu)
829 sync_cpu = smp_processor_id();
830 }
831
walt_fixup_busy_time(struct task_struct * p,int new_cpu)832 void walt_fixup_busy_time(struct task_struct *p, int new_cpu)
833 {
834 struct rq *src_rq = task_rq(p);
835 struct rq *dest_rq = cpu_rq(new_cpu);
836 u64 wallclock;
837
838 if (!p->on_rq && p->state != TASK_WAKING)
839 return;
840
841 if (exiting_task(p)) {
842 return;
843 }
844
845 if (p->state == TASK_WAKING)
846 double_rq_lock(src_rq, dest_rq);
847
848 wallclock = walt_ktime_clock();
849
850 walt_update_task_ravg(task_rq(p)->curr, task_rq(p),
851 TASK_UPDATE, wallclock, 0);
852 walt_update_task_ravg(dest_rq->curr, dest_rq,
853 TASK_UPDATE, wallclock, 0);
854
855 walt_update_task_ravg(p, task_rq(p), TASK_MIGRATE, wallclock, 0);
856
857 if (p->ravg.curr_window) {
858 src_rq->curr_runnable_sum -= p->ravg.curr_window;
859 dest_rq->curr_runnable_sum += p->ravg.curr_window;
860 }
861
862 if (p->ravg.prev_window) {
863 src_rq->prev_runnable_sum -= p->ravg.prev_window;
864 dest_rq->prev_runnable_sum += p->ravg.prev_window;
865 }
866
867 if ((s64)src_rq->prev_runnable_sum < 0) {
868 src_rq->prev_runnable_sum = 0;
869 WARN_ON(1);
870 }
871 if ((s64)src_rq->curr_runnable_sum < 0) {
872 src_rq->curr_runnable_sum = 0;
873 WARN_ON(1);
874 }
875
876 trace_walt_migration_update_sum(src_rq, p);
877 trace_walt_migration_update_sum(dest_rq, p);
878
879 if (p->state == TASK_WAKING)
880 double_rq_unlock(src_rq, dest_rq);
881 }
882
883 /* Keep track of max/min capacity possible across CPUs "currently" */
__update_min_max_capacity(void)884 static void __update_min_max_capacity(void)
885 {
886 int i;
887 int max = 0, min = INT_MAX;
888
889 for_each_online_cpu(i) {
890 if (cpu_rq(i)->capacity > max)
891 max = cpu_rq(i)->capacity;
892 if (cpu_rq(i)->capacity < min)
893 min = cpu_rq(i)->capacity;
894 }
895
896 max_capacity = max;
897 min_capacity = min;
898 }
899
update_min_max_capacity(void)900 static void update_min_max_capacity(void)
901 {
902 unsigned long flags;
903 int i;
904
905 local_irq_save(flags);
906 for_each_possible_cpu(i)
907 raw_spin_lock(&cpu_rq(i)->lock);
908
909 __update_min_max_capacity();
910
911 for_each_possible_cpu(i)
912 raw_spin_unlock(&cpu_rq(i)->lock);
913 local_irq_restore(flags);
914 }
915
916 /*
917 * Return 'capacity' of a cpu in reference to "least" efficient cpu, such that
918 * least efficient cpu gets capacity of 1024
919 */
capacity_scale_cpu_efficiency(int cpu)920 static unsigned long capacity_scale_cpu_efficiency(int cpu)
921 {
922 return (1024 * cpu_rq(cpu)->efficiency) / min_possible_efficiency;
923 }
924
925 /*
926 * Return 'capacity' of a cpu in reference to cpu with lowest max_freq
927 * (min_max_freq), such that one with lowest max_freq gets capacity of 1024.
928 */
capacity_scale_cpu_freq(int cpu)929 static unsigned long capacity_scale_cpu_freq(int cpu)
930 {
931 return (1024 * cpu_rq(cpu)->max_freq) / min_max_freq;
932 }
933
934 /*
935 * Return load_scale_factor of a cpu in reference to "most" efficient cpu, so
936 * that "most" efficient cpu gets a load_scale_factor of 1
937 */
load_scale_cpu_efficiency(int cpu)938 static unsigned long load_scale_cpu_efficiency(int cpu)
939 {
940 return DIV_ROUND_UP(1024 * max_possible_efficiency,
941 cpu_rq(cpu)->efficiency);
942 }
943
944 /*
945 * Return load_scale_factor of a cpu in reference to cpu with best max_freq
946 * (max_possible_freq), so that one with best max_freq gets a load_scale_factor
947 * of 1.
948 */
load_scale_cpu_freq(int cpu)949 static unsigned long load_scale_cpu_freq(int cpu)
950 {
951 return DIV_ROUND_UP(1024 * max_possible_freq, cpu_rq(cpu)->max_freq);
952 }
953
compute_capacity(int cpu)954 static int compute_capacity(int cpu)
955 {
956 int capacity = 1024;
957
958 capacity *= capacity_scale_cpu_efficiency(cpu);
959 capacity >>= 10;
960
961 capacity *= capacity_scale_cpu_freq(cpu);
962 capacity >>= 10;
963
964 return capacity;
965 }
966
compute_load_scale_factor(int cpu)967 static int compute_load_scale_factor(int cpu)
968 {
969 int load_scale = 1024;
970
971 /*
972 * load_scale_factor accounts for the fact that task load
973 * is in reference to "best" performing cpu. Task's load will need to be
974 * scaled (up) by a factor to determine suitability to be placed on a
975 * (little) cpu.
976 */
977 load_scale *= load_scale_cpu_efficiency(cpu);
978 load_scale >>= 10;
979
980 load_scale *= load_scale_cpu_freq(cpu);
981 load_scale >>= 10;
982
983 return load_scale;
984 }
985
cpufreq_notifier_policy(struct notifier_block * nb,unsigned long val,void * data)986 static int cpufreq_notifier_policy(struct notifier_block *nb,
987 unsigned long val, void *data)
988 {
989 struct cpufreq_policy *policy = (struct cpufreq_policy *)data;
990 int i, update_max = 0;
991 u64 highest_mpc = 0, highest_mplsf = 0;
992 const struct cpumask *cpus = policy->related_cpus;
993 unsigned int orig_min_max_freq = min_max_freq;
994 unsigned int orig_max_possible_freq = max_possible_freq;
995 /* Initialized to policy->max in case policy->related_cpus is empty! */
996 unsigned int orig_max_freq = policy->max;
997
998 if (val != CPUFREQ_NOTIFY && val != CPUFREQ_REMOVE_POLICY &&
999 val != CPUFREQ_CREATE_POLICY)
1000 return 0;
1001
1002 if (val == CPUFREQ_REMOVE_POLICY || val == CPUFREQ_CREATE_POLICY) {
1003 update_min_max_capacity();
1004 return 0;
1005 }
1006
1007 for_each_cpu(i, policy->related_cpus) {
1008 cpumask_copy(&cpu_rq(i)->freq_domain_cpumask,
1009 policy->related_cpus);
1010 orig_max_freq = cpu_rq(i)->max_freq;
1011 cpu_rq(i)->min_freq = policy->min;
1012 cpu_rq(i)->max_freq = policy->max;
1013 cpu_rq(i)->cur_freq = policy->cur;
1014 cpu_rq(i)->max_possible_freq = policy->cpuinfo.max_freq;
1015 }
1016
1017 max_possible_freq = max(max_possible_freq, policy->cpuinfo.max_freq);
1018 if (min_max_freq == 1)
1019 min_max_freq = UINT_MAX;
1020 min_max_freq = min(min_max_freq, policy->cpuinfo.max_freq);
1021 BUG_ON(!min_max_freq);
1022 BUG_ON(!policy->max);
1023
1024 /* Changes to policy other than max_freq don't require any updates */
1025 if (orig_max_freq == policy->max)
1026 return 0;
1027
1028 /*
1029 * A changed min_max_freq or max_possible_freq (possible during bootup)
1030 * needs to trigger re-computation of load_scale_factor and capacity for
1031 * all possible cpus (even those offline). It also needs to trigger
1032 * re-computation of nr_big_task count on all online cpus.
1033 *
1034 * A changed rq->max_freq otoh needs to trigger re-computation of
1035 * load_scale_factor and capacity for just the cluster of cpus involved.
1036 * Since small task definition depends on max_load_scale_factor, a
1037 * changed load_scale_factor of one cluster could influence
1038 * classification of tasks in another cluster. Hence a changed
1039 * rq->max_freq will need to trigger re-computation of nr_big_task
1040 * count on all online cpus.
1041 *
1042 * While it should be sufficient for nr_big_tasks to be
1043 * re-computed for only online cpus, we have inadequate context
1044 * information here (in policy notifier) with regard to hotplug-safety
1045 * context in which notification is issued. As a result, we can't use
1046 * get_online_cpus() here, as it can lead to deadlock. Until cpufreq is
1047 * fixed up to issue notification always in hotplug-safe context,
1048 * re-compute nr_big_task for all possible cpus.
1049 */
1050
1051 if (orig_min_max_freq != min_max_freq ||
1052 orig_max_possible_freq != max_possible_freq) {
1053 cpus = cpu_possible_mask;
1054 update_max = 1;
1055 }
1056
1057 /*
1058 * Changed load_scale_factor can trigger reclassification of tasks as
1059 * big or small. Make this change "atomic" so that tasks are accounted
1060 * properly due to changed load_scale_factor
1061 */
1062 for_each_cpu(i, cpus) {
1063 struct rq *rq = cpu_rq(i);
1064
1065 rq->capacity = compute_capacity(i);
1066 rq->load_scale_factor = compute_load_scale_factor(i);
1067
1068 if (update_max) {
1069 u64 mpc, mplsf;
1070
1071 mpc = div_u64(((u64) rq->capacity) *
1072 rq->max_possible_freq, rq->max_freq);
1073 rq->max_possible_capacity = (int) mpc;
1074
1075 mplsf = div_u64(((u64) rq->load_scale_factor) *
1076 rq->max_possible_freq, rq->max_freq);
1077
1078 if (mpc > highest_mpc) {
1079 highest_mpc = mpc;
1080 cpumask_clear(&mpc_mask);
1081 cpumask_set_cpu(i, &mpc_mask);
1082 } else if (mpc == highest_mpc) {
1083 cpumask_set_cpu(i, &mpc_mask);
1084 }
1085
1086 if (mplsf > highest_mplsf)
1087 highest_mplsf = mplsf;
1088 }
1089 }
1090
1091 if (update_max) {
1092 max_possible_capacity = highest_mpc;
1093 max_load_scale_factor = highest_mplsf;
1094 }
1095
1096 __update_min_max_capacity();
1097
1098 return 0;
1099 }
1100
cpufreq_notifier_trans(struct notifier_block * nb,unsigned long val,void * data)1101 static int cpufreq_notifier_trans(struct notifier_block *nb,
1102 unsigned long val, void *data)
1103 {
1104 struct cpufreq_freqs *freq = (struct cpufreq_freqs *)data;
1105 unsigned int cpu = freq->cpu, new_freq = freq->new;
1106 unsigned long flags;
1107 int i;
1108
1109 if (val != CPUFREQ_POSTCHANGE)
1110 return 0;
1111
1112 BUG_ON(!new_freq);
1113
1114 if (cpu_rq(cpu)->cur_freq == new_freq)
1115 return 0;
1116
1117 for_each_cpu(i, &cpu_rq(cpu)->freq_domain_cpumask) {
1118 struct rq *rq = cpu_rq(i);
1119
1120 raw_spin_lock_irqsave(&rq->lock, flags);
1121 walt_update_task_ravg(rq->curr, rq, TASK_UPDATE,
1122 walt_ktime_clock(), 0);
1123 rq->cur_freq = new_freq;
1124 raw_spin_unlock_irqrestore(&rq->lock, flags);
1125 }
1126
1127 return 0;
1128 }
1129
1130 static struct notifier_block notifier_policy_block = {
1131 .notifier_call = cpufreq_notifier_policy
1132 };
1133
1134 static struct notifier_block notifier_trans_block = {
1135 .notifier_call = cpufreq_notifier_trans
1136 };
1137
register_sched_callback(void)1138 static int register_sched_callback(void)
1139 {
1140 int ret;
1141
1142 ret = cpufreq_register_notifier(¬ifier_policy_block,
1143 CPUFREQ_POLICY_NOTIFIER);
1144
1145 if (!ret)
1146 ret = cpufreq_register_notifier(¬ifier_trans_block,
1147 CPUFREQ_TRANSITION_NOTIFIER);
1148
1149 return 0;
1150 }
1151
1152 /*
1153 * cpufreq callbacks can be registered at core_initcall or later time.
1154 * Any registration done prior to that is "forgotten" by cpufreq. See
1155 * initialization of variable init_cpufreq_transition_notifier_list_called
1156 * for further information.
1157 */
1158 core_initcall(register_sched_callback);
1159
walt_init_new_task_load(struct task_struct * p)1160 void walt_init_new_task_load(struct task_struct *p)
1161 {
1162 int i;
1163 u32 init_load_windows =
1164 div64_u64((u64)sysctl_sched_walt_init_task_load_pct *
1165 (u64)walt_ravg_window, 100);
1166 u32 init_load_pct = current->init_load_pct;
1167
1168 p->init_load_pct = 0;
1169 memset(&p->ravg, 0, sizeof(struct ravg));
1170
1171 if (init_load_pct) {
1172 init_load_windows = div64_u64((u64)init_load_pct *
1173 (u64)walt_ravg_window, 100);
1174 }
1175
1176 p->ravg.demand = init_load_windows;
1177 for (i = 0; i < RAVG_HIST_SIZE_MAX; ++i)
1178 p->ravg.sum_history[i] = init_load_windows;
1179 }
1180