• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * walt.h
4  *
5  * head file for Window-Assistant-Load-Tracking
6  *
7  * This software is licensed under the terms of the GNU General Public
8  * License version 2, as published by the Free Software Foundation, and
9  * may be copied, distributed, and modified under those terms.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  */
17 
18 #ifndef __WALT_H
19 #define __WALT_H
20 
21 #ifdef CONFIG_SCHED_WALT
22 
23 #include <linux/sched/sysctl.h>
24 
25 #define WINDOW_STATS_RECENT		0
26 #define WINDOW_STATS_MAX		1
27 #define WINDOW_STATS_MAX_RECENT_AVG	2
28 #define WINDOW_STATS_AVG		3
29 #define WINDOW_STATS_INVALID_POLICY	4
30 
31 #define EXITING_TASK_MARKER	0xdeaddead
32 
33 #define SCHED_NEW_TASK_WINDOWS 5
34 
35 extern unsigned int sched_ravg_window;
36 extern unsigned int sysctl_sched_walt_init_task_load_pct;
37 
exiting_task(struct task_struct * p)38 static inline int exiting_task(struct task_struct *p)
39 {
40 	return (p->ravg.sum_history[0] == EXITING_TASK_MARKER);
41 }
42 
cpu_cluster(int cpu)43 static inline struct sched_cluster *cpu_cluster(int cpu)
44 {
45 	return cpu_rq(cpu)->cluster;
46 }
47 
same_cluster(int src_cpu,int dst_cpu)48 static inline int same_cluster(int src_cpu, int dst_cpu)
49 {
50 	return cpu_rq(src_cpu)->cluster == cpu_rq(dst_cpu)->cluster;
51 }
52 
scale_exec_time(u64 delta,struct rq * rq)53 static inline u64 scale_exec_time(u64 delta, struct rq *rq)
54 {
55 	unsigned long capcurr = capacity_curr_of(cpu_of(rq));
56 
57 	delta = (delta * capcurr) >> SCHED_CAPACITY_SHIFT;
58 
59 	return delta;
60 }
61 
is_new_task(struct task_struct * p)62 static inline bool is_new_task(struct task_struct *p)
63 {
64 	return p->ravg.active_windows < SCHED_NEW_TASK_WINDOWS;
65 }
66 
max_task_load(void)67 static inline unsigned int max_task_load(void)
68 {
69 	return sched_ravg_window;
70 }
71 
72 static inline void
move_list(struct list_head * dst,struct list_head * src,bool sync_rcu)73 move_list(struct list_head *dst, struct list_head *src, bool sync_rcu)
74 {
75 	struct list_head *first, *last;
76 
77 	first = src->next;
78 	last = src->prev;
79 
80 	if (sync_rcu) {
81 		INIT_LIST_HEAD_RCU(src);
82 		synchronize_rcu();
83 	}
84 
85 	first->prev = dst;
86 	dst->prev = last;
87 	last->next = dst;
88 
89 	/* Ensure list sanity before making the head visible to all CPUs. */
90 	smp_mb();
91 	dst->next = first;
92 }
93 
94 extern void reset_task_stats(struct task_struct *p);
95 extern void update_cluster_topology(void);
96 extern void init_clusters(void);
97 extern void update_task_ravg(struct task_struct *p, struct rq *rq, int event,
98 						u64 wallclock, u64 irqtime);
99 
100 static inline void
fixup_cumulative_runnable_avg(struct walt_sched_stats * stats,s64 demand_scaled_delta)101 fixup_cumulative_runnable_avg(struct walt_sched_stats *stats,
102 			      s64 demand_scaled_delta)
103 {
104 	if (sched_disable_window_stats)
105 		return;
106 
107 	stats->cumulative_runnable_avg_scaled += demand_scaled_delta;
108 	BUG_ON((s64)stats->cumulative_runnable_avg_scaled < 0);
109 }
110 
111 static inline void
walt_inc_cumulative_runnable_avg(struct rq * rq,struct task_struct * p)112 walt_inc_cumulative_runnable_avg(struct rq *rq, struct task_struct *p)
113 {
114 	if (sched_disable_window_stats)
115 		return;
116 
117 	fixup_cumulative_runnable_avg(&rq->walt_stats, p->ravg.demand_scaled);
118 
119 	/*
120 	 * Add a task's contribution to the cumulative window demand when
121 	 *
122 	 * (1) task is enqueued with on_rq = 1 i.e migration,
123 	 *     prio/cgroup/class change.
124 	 * (2) task is waking for the first time in this window.
125 	 */
126 	if (p->on_rq || (p->last_sleep_ts < rq->window_start))
127 		walt_fixup_cum_window_demand(rq, p->ravg.demand_scaled);
128 }
129 
130 static inline void
walt_dec_cumulative_runnable_avg(struct rq * rq,struct task_struct * p)131 walt_dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p)
132 {
133 	if (sched_disable_window_stats)
134 		return;
135 
136 	fixup_cumulative_runnable_avg(&rq->walt_stats,
137 				      -(s64)p->ravg.demand_scaled);
138 
139 	/*
140 	 * on_rq will be 1 for sleeping tasks. So check if the task
141 	 * is migrating or dequeuing in RUNNING state to change the
142 	 * prio/cgroup/class.
143 	 */
144 	if (task_on_rq_migrating(p) || p->state == TASK_RUNNING)
145 		walt_fixup_cum_window_demand(rq, -(s64)p->ravg.demand_scaled);
146 }
147 extern void fixup_walt_sched_stats_common(struct rq *rq, struct task_struct *p,
148 					  u16 updated_demand_scaled);
149 extern void inc_rq_walt_stats(struct rq *rq, struct task_struct *p);
150 extern void dec_rq_walt_stats(struct rq *rq, struct task_struct *p);
151 extern void fixup_busy_time(struct task_struct *p, int new_cpu);
152 extern void init_new_task_load(struct task_struct *p);
153 extern void mark_task_starting(struct task_struct *p);
154 extern void set_window_start(struct rq *rq);
155 void account_irqtime(int cpu, struct task_struct *curr, u64 delta, u64 wallclock);
156 
157 void walt_irq_work(struct irq_work *irq_work);
158 
159 void walt_sched_init_rq(struct rq *rq);
160 
161 extern void sched_account_irqtime(int cpu, struct task_struct *curr,
162 				  u64 delta, u64 wallclock);
163 
164 #define SCHED_HIGH_IRQ_TIMEOUT 3
sched_irqload(int cpu)165 static inline u64 sched_irqload(int cpu)
166 {
167 	struct rq *rq = cpu_rq(cpu);
168 	s64 delta;
169 
170 	delta = get_jiffies_64() - rq->irqload_ts;
171 	/*
172 	 * Current context can be preempted by irq and rq->irqload_ts can be
173 	 * updated by irq context so that delta can be negative.
174 	 * But this is okay and we can safely return as this means there
175 	 * was recent irq occurrence.
176 	 */
177 
178 	if (delta < SCHED_HIGH_IRQ_TIMEOUT)
179 		return rq->avg_irqload;
180 	else
181 		return 0;
182 }
183 
sched_cpu_high_irqload(int cpu)184 static inline int sched_cpu_high_irqload(int cpu)
185 {
186 	return sched_irqload(cpu) >= sysctl_sched_cpu_high_irqload;
187 }
188 
189 extern int
190 sysctl_sched_walt_init_task_load_pct_sysctl_handler(struct ctl_table *table,
191 	int write, void __user *buffer, size_t *length, loff_t *ppos);
192 
cpu_cur_freq(int cpu)193 static inline unsigned int cpu_cur_freq(int cpu)
194 {
195 	return cpu_rq(cpu)->cluster->cur_freq;
196 }
197 
assign_cluster_ids(struct list_head * head)198 static inline void assign_cluster_ids(struct list_head *head)
199 {
200 	struct sched_cluster *cluster;
201 	int pos = 0;
202 
203 	list_for_each_entry(cluster, head, list) {
204 		cluster->id = pos;
205 		sched_cluster[pos++] = cluster;
206 	}
207 }
208 
209 extern void update_cluster_load_subtractions(struct task_struct *p,
210 		int cpu, u64 ws, bool new_task);
211 #else /* CONFIG_SCHED_WALT */
walt_sched_init_rq(struct rq * rq)212 static inline void walt_sched_init_rq(struct rq *rq) { }
213 
update_task_ravg(struct task_struct * p,struct rq * rq,int event,u64 wallclock,u64 irqtime)214 static inline void update_task_ravg(struct task_struct *p, struct rq *rq,
215 				int event, u64 wallclock, u64 irqtime) { }
216 
walt_inc_cumulative_runnable_avg(struct rq * rq,struct task_struct * p)217 static inline void walt_inc_cumulative_runnable_avg(struct rq *rq,
218 		struct task_struct *p) { }
219 
walt_dec_cumulative_runnable_avg(struct rq * rq,struct task_struct * p)220 static inline void walt_dec_cumulative_runnable_avg(struct rq *rq,
221 		struct task_struct *p) { }
222 
223 static inline void
inc_rq_walt_stats(struct rq * rq,struct task_struct * p)224 inc_rq_walt_stats(struct rq *rq, struct task_struct *p) { }
225 
226 static inline void
dec_rq_walt_stats(struct rq * rq,struct task_struct * p)227 dec_rq_walt_stats(struct rq *rq, struct task_struct *p) { }
228 
fixup_busy_time(struct task_struct * p,int new_cpu)229 static inline void fixup_busy_time(struct task_struct *p, int new_cpu) { }
init_new_task_load(struct task_struct * p)230 static inline void init_new_task_load(struct task_struct *p) { }
mark_task_starting(struct task_struct * p)231 static inline void mark_task_starting(struct task_struct *p) { }
set_window_start(struct rq * rq)232 static inline void set_window_start(struct rq *rq) { }
update_cluster_topology(void)233 static inline void update_cluster_topology(void) { }
init_clusters(void)234 static inline void init_clusters(void) { }
235 
236 static inline void
fixup_walt_sched_stats_common(struct rq * rq,struct task_struct * p,u16 updated_demand_scaled)237 fixup_walt_sched_stats_common(struct rq *rq, struct task_struct *p,
238 			      u16 updated_demand_scaled) { }
239 
sched_account_irqtime(int cpu,struct task_struct * curr,u64 delta,u64 wallclock)240 static inline void sched_account_irqtime(int cpu, struct task_struct *curr,
241 					 u64 delta, u64 wallclock) { }
242 
sched_irqload(int cpu)243 static inline u64 sched_irqload(int cpu)
244 {
245 	return 0;
246 }
sched_cpu_high_irqload(int cpu)247 static inline int sched_cpu_high_irqload(int cpu)
248 {
249 	return 0;
250 }
same_cluster(int src_cpu,int dst_cpu)251 static inline int same_cluster(int src_cpu, int dst_cpu) { return 1; }
252 #endif /* CONFIG_SCHED_WALT */
253 
254 #endif /* __WALT_H */
255