• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2012, 2015-2021, The Linux Foundation. All rights reserved.
4  */
5 /*
6  * Scheduler hook for average runqueue determination
7  */
8 #include <linux/module.h>
9 #include <linux/percpu.h>
10 #include <linux/hrtimer.h>
11 #include <linux/sched.h>
12 #include <linux/math64.h>
13 
14 #include "sched.h"
15 #include "walt.h"
16 #include <trace/events/sched.h>
17 
18 static DEFINE_PER_CPU(u64, nr_prod_sum);
19 static DEFINE_PER_CPU(u64, last_time);
20 static DEFINE_PER_CPU(u64, nr_big_prod_sum);
21 static DEFINE_PER_CPU(u64, nr);
22 static DEFINE_PER_CPU(u64, nr_max);
23 
24 static DEFINE_PER_CPU(unsigned long, iowait_prod_sum);
25 static DEFINE_PER_CPU(spinlock_t, nr_lock) = __SPIN_LOCK_UNLOCKED(nr_lock);
26 static s64 last_get_time;
27 
28 static DEFINE_PER_CPU(atomic64_t, last_busy_time) = ATOMIC64_INIT(0);
29 
30 #define NR_THRESHOLD_PCT		15
31 
32 /**
33  * sched_get_nr_running_avg
34  * @return: Average nr_running, iowait and nr_big_tasks value since last poll.
35  *	    Returns the avg * 100 to return up to two decimal points
36  *	    of accuracy.
37  *
38  * Obtains the average nr_running value since the last poll.
39  * This function may not be called concurrently with itself
40  */
sched_get_nr_running_avg(struct sched_avg_stats * stats)41 void sched_get_nr_running_avg(struct sched_avg_stats *stats)
42 {
43 	int cpu;
44 	u64 curr_time = sched_clock();
45 	u64 period = curr_time - last_get_time;
46 	u64 tmp_nr, tmp_misfit;
47 
48 	if (!period)
49 		return;
50 
51 	/* read and reset nr_running counts */
52 	for_each_possible_cpu(cpu) {
53 		unsigned long flags;
54 		u64 diff;
55 
56 		spin_lock_irqsave(&per_cpu(nr_lock, cpu), flags);
57 		curr_time = sched_clock();
58 		diff = curr_time - per_cpu(last_time, cpu);
59 		BUG_ON((s64)diff < 0);
60 
61 		tmp_nr = per_cpu(nr_prod_sum, cpu);
62 		tmp_nr += per_cpu(nr, cpu) * diff;
63 		tmp_nr = div64_u64((tmp_nr * 100), period);
64 
65 		tmp_misfit = per_cpu(nr_big_prod_sum, cpu);
66 		tmp_misfit = div64_u64((tmp_misfit * 100), period);
67 
68 		/*
69 		 * NR_THRESHOLD_PCT is to make sure that the task ran
70 		 * at least 85% in the last window to compensate any
71 		 * over estimating being done.
72 		 */
73 		stats[cpu].nr = (int)div64_u64((tmp_nr + NR_THRESHOLD_PCT),
74 								100);
75 		stats[cpu].nr_misfit = (int)div64_u64((tmp_misfit +
76 						NR_THRESHOLD_PCT), 100);
77 		stats[cpu].nr_max = per_cpu(nr_max, cpu);
78 
79 		trace_sched_get_nr_running_avg(cpu, stats[cpu].nr,
80 				stats[cpu].nr_misfit, stats[cpu].nr_max);
81 
82 		per_cpu(last_time, cpu) = curr_time;
83 		per_cpu(nr_prod_sum, cpu) = 0;
84 		per_cpu(nr_big_prod_sum, cpu) = 0;
85 		per_cpu(iowait_prod_sum, cpu) = 0;
86 		per_cpu(nr_max, cpu) = per_cpu(nr, cpu);
87 
88 		spin_unlock_irqrestore(&per_cpu(nr_lock, cpu), flags);
89 	}
90 
91 	last_get_time = curr_time;
92 
93 }
94 EXPORT_SYMBOL(sched_get_nr_running_avg);
95 
96 #define BUSY_NR_RUN		3
97 #define BUSY_LOAD_FACTOR	10
update_last_busy_time(int cpu,bool dequeue,unsigned long prev_nr_run,u64 curr_time)98 static inline void update_last_busy_time(int cpu, bool dequeue,
99 				unsigned long prev_nr_run, u64 curr_time)
100 {
101 	bool nr_run_trigger = false, load_trigger = false;
102 
103 	if (!hmp_capable() || is_min_capacity_cpu(cpu))
104 		return;
105 
106 	if (prev_nr_run >= BUSY_NR_RUN && per_cpu(nr, cpu) < BUSY_NR_RUN)
107 		nr_run_trigger = true;
108 
109 	if (dequeue && (cpu_util(cpu) * BUSY_LOAD_FACTOR) >
110 			capacity_orig_of(cpu))
111 		load_trigger = true;
112 
113 	if (nr_run_trigger || load_trigger)
114 		atomic64_set(&per_cpu(last_busy_time, cpu), curr_time);
115 }
116 
117 /**
118  * sched_update_nr_prod
119  * @cpu: The core id of the nr running driver.
120  * @delta: Adjust nr by 'delta' amount
121  * @inc: Whether we are increasing or decreasing the count
122  * @return: N/A
123  *
124  * Update average with latest nr_running value for CPU
125  */
sched_update_nr_prod(int cpu,long delta,bool inc)126 void sched_update_nr_prod(int cpu, long delta, bool inc)
127 {
128 	u64 diff;
129 	u64 curr_time;
130 	unsigned long flags, nr_running;
131 
132 	spin_lock_irqsave(&per_cpu(nr_lock, cpu), flags);
133 	nr_running = per_cpu(nr, cpu);
134 	curr_time = sched_clock();
135 	diff = curr_time - per_cpu(last_time, cpu);
136 	BUG_ON((s64)diff < 0);
137 	per_cpu(last_time, cpu) = curr_time;
138 	per_cpu(nr, cpu) = nr_running + (inc ? delta : -delta);
139 
140 	BUG_ON((s64)per_cpu(nr, cpu) < 0);
141 
142 	if (per_cpu(nr, cpu) > per_cpu(nr_max, cpu))
143 		per_cpu(nr_max, cpu) = per_cpu(nr, cpu);
144 
145 	update_last_busy_time(cpu, !inc, nr_running, curr_time);
146 
147 	per_cpu(nr_prod_sum, cpu) += nr_running * diff;
148 	per_cpu(iowait_prod_sum, cpu) += nr_iowait_cpu(cpu) * diff;
149 	spin_unlock_irqrestore(&per_cpu(nr_lock, cpu), flags);
150 }
151 EXPORT_SYMBOL(sched_update_nr_prod);
152 
153 /*
154  * Returns the CPU utilization % in the last window.
155  *
156  */
sched_get_cpu_util(int cpu)157 unsigned int sched_get_cpu_util(int cpu)
158 {
159 	struct rq *rq = cpu_rq(cpu);
160 	u64 util;
161 	unsigned long capacity, flags;
162 	unsigned int busy;
163 
164 	raw_spin_lock_irqsave(&rq->lock, flags);
165 
166 	util = rq->cfs.avg.util_avg;
167 	capacity = capacity_orig_of(cpu);
168 
169 #ifdef CONFIG_SCHED_WALT
170 	if (!walt_disabled && sysctl_sched_use_walt_cpu_util) {
171 		util = rq->prev_runnable_sum;
172 		util = div64_u64(util,
173 				 sched_ravg_window >> SCHED_CAPACITY_SHIFT);
174 	}
175 #endif
176 	raw_spin_unlock_irqrestore(&rq->lock, flags);
177 
178 	util = (util >= capacity) ? capacity : util;
179 	busy = div64_ul((util * 100), capacity);
180 	return busy;
181 }
182 
sched_get_cpu_last_busy_time(int cpu)183 u64 sched_get_cpu_last_busy_time(int cpu)
184 {
185 	return atomic64_read(&per_cpu(last_busy_time, cpu));
186 }
187