1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * kernel/sched/cpupri.c
4 *
5 * CPU priority management
6 *
7 * Copyright (C) 2007-2008 Novell
8 *
9 * Author: Gregory Haskins <ghaskins@novell.com>
10 *
11 * This code tracks the priority of each CPU so that global migration
12 * decisions are easy to calculate. Each CPU can be in a state as follows:
13 *
14 * (INVALID), NORMAL, RT1, ... RT99, HIGHER
15 *
16 * going from the lowest priority to the highest. CPUs in the INVALID state
17 * are not eligible for routing. The system maintains this state with
18 * a 2 dimensional bitmap (the first for priority class, the second for CPUs
19 * in that class). Therefore a typical application without affinity
20 * restrictions can find a suitable CPU with O(1) complexity (e.g. two bit
21 * searches). For tasks with affinity restrictions, the algorithm has a
22 * worst case complexity of O(min(101, nr_domcpus)), though the scenario that
23 * yields the worst case search is fairly contrived.
24 */
25 #include "sched.h"
26
27 /*
28 * p->rt_priority p->prio newpri cpupri
29 *
30 * -1 -1 (CPUPRI_INVALID)
31 *
32 * 99 0 (CPUPRI_NORMAL)
33 *
34 * 1 98 98 1
35 * ...
36 * 49 50 50 49
37 * 50 49 49 50
38 * ...
39 * 99 0 0 99
40 *
41 * 100 100 (CPUPRI_HIGHER)
42 */
convert_prio(int prio)43 static int convert_prio(int prio)
44 {
45 int cpupri;
46
47 switch (prio) {
48 case CPUPRI_INVALID:
49 cpupri = CPUPRI_INVALID; /* -1 */
50 break;
51
52 case 0 ... 98:
53 cpupri = MAX_RT_PRIO-1 - prio; /* 1 ... 99 */
54 break;
55
56 case MAX_RT_PRIO-1:
57 cpupri = CPUPRI_NORMAL; /* 0 */
58 break;
59
60 case MAX_RT_PRIO:
61 cpupri = CPUPRI_HIGHER; /* 100 */
62 break;
63 }
64
65 return cpupri;
66 }
67
68 #ifdef CONFIG_RT_SOFTINT_OPTIMIZATION
69 /**
70 * drop_nopreempt_cpus - remove likely nonpreemptible cpus from the mask
71 * @lowest_mask: mask with selected CPUs (non-NULL)
72 */
73 static void
drop_nopreempt_cpus(struct cpumask * lowest_mask)74 drop_nopreempt_cpus(struct cpumask *lowest_mask)
75 {
76 unsigned int cpu = cpumask_first(lowest_mask);
77 while (cpu < nr_cpu_ids) {
78 /* unlocked access */
79 struct task_struct *task = READ_ONCE(cpu_rq(cpu)->curr);
80 if (task_may_not_preempt(task, cpu)) {
81 cpumask_clear_cpu(cpu, lowest_mask);
82 }
83 cpu = cpumask_next(cpu, lowest_mask);
84 }
85 }
86 #endif
87
__cpupri_find(struct cpupri * cp,struct task_struct * p,struct cpumask * lowest_mask,int idx,bool drop_nopreempts)88 static inline int __cpupri_find(struct cpupri *cp, struct task_struct *p,
89 struct cpumask *lowest_mask, int idx,
90 bool drop_nopreempts)
91 {
92 struct cpupri_vec *vec = &cp->pri_to_cpu[idx];
93 int skip = 0;
94
95 if (!atomic_read(&(vec)->count))
96 skip = 1;
97 /*
98 * When looking at the vector, we need to read the counter,
99 * do a memory barrier, then read the mask.
100 *
101 * Note: This is still all racy, but we can deal with it.
102 * Ideally, we only want to look at masks that are set.
103 *
104 * If a mask is not set, then the only thing wrong is that we
105 * did a little more work than necessary.
106 *
107 * If we read a zero count but the mask is set, because of the
108 * memory barriers, that can only happen when the highest prio
109 * task for a run queue has left the run queue, in which case,
110 * it will be followed by a pull. If the task we are processing
111 * fails to find a proper place to go, that pull request will
112 * pull this task if the run queue is running at a lower
113 * priority.
114 */
115 smp_rmb();
116
117 /* Need to do the rmb for every iteration */
118 if (skip)
119 return 0;
120
121 if (cpumask_any_and(&p->cpus_mask, vec->mask) >= nr_cpu_ids)
122 return 0;
123
124 if (lowest_mask) {
125 cpumask_and(lowest_mask, &p->cpus_mask, vec->mask);
126 cpumask_and(lowest_mask, lowest_mask, cpu_active_mask);
127
128 #ifdef CONFIG_RT_SOFTINT_OPTIMIZATION
129 if (drop_nopreempts)
130 drop_nopreempt_cpus(lowest_mask);
131 #endif
132
133 /*
134 * We have to ensure that we have at least one bit
135 * still set in the array, since the map could have
136 * been concurrently emptied between the first and
137 * second reads of vec->mask. If we hit this
138 * condition, simply act as though we never hit this
139 * priority level and continue on.
140 */
141 if (cpumask_empty(lowest_mask))
142 return 0;
143 }
144
145 return 1;
146 }
147
cpupri_find(struct cpupri * cp,struct task_struct * p,struct cpumask * lowest_mask)148 int cpupri_find(struct cpupri *cp, struct task_struct *p,
149 struct cpumask *lowest_mask)
150 {
151 return cpupri_find_fitness(cp, p, lowest_mask, NULL);
152 }
153
154 /**
155 * cpupri_find_fitness - find the best (lowest-pri) CPU in the system
156 * @cp: The cpupri context
157 * @p: The task
158 * @lowest_mask: A mask to fill in with selected CPUs (or NULL)
159 * @fitness_fn: A pointer to a function to do custom checks whether the CPU
160 * fits a specific criteria so that we only return those CPUs.
161 *
162 * Note: This function returns the recommended CPUs as calculated during the
163 * current invocation. By the time the call returns, the CPUs may have in
164 * fact changed priorities any number of times. While not ideal, it is not
165 * an issue of correctness since the normal rebalancer logic will correct
166 * any discrepancies created by racing against the uncertainty of the current
167 * priority configuration.
168 *
169 * Return: (int)bool - CPUs were found
170 */
cpupri_find_fitness(struct cpupri * cp,struct task_struct * p,struct cpumask * lowest_mask,bool (* fitness_fn)(struct task_struct * p,int cpu))171 int cpupri_find_fitness(struct cpupri *cp, struct task_struct *p,
172 struct cpumask *lowest_mask,
173 bool (*fitness_fn)(struct task_struct *p, int cpu))
174 {
175 int task_pri = convert_prio(p->prio);
176 int idx, cpu;
177 bool drop_nopreempts = task_pri <= MAX_RT_PRIO;
178
179 BUG_ON(task_pri >= CPUPRI_NR_PRIORITIES);
180
181 #ifdef CONFIG_RT_SOFTINT_OPTIMIZATION
182 retry:
183 #endif
184 for (idx = 0; idx < task_pri; idx++) {
185
186 if (!__cpupri_find(cp, p, lowest_mask, idx, drop_nopreempts))
187 continue;
188
189 if (!lowest_mask || !fitness_fn)
190 return 1;
191
192 /* Ensure the capacity of the CPUs fit the task */
193 for_each_cpu(cpu, lowest_mask) {
194 if (!fitness_fn(p, cpu))
195 cpumask_clear_cpu(cpu, lowest_mask);
196 }
197
198 /*
199 * If no CPU at the current priority can fit the task
200 * continue looking
201 */
202 if (cpumask_empty(lowest_mask))
203 continue;
204
205 return 1;
206 }
207
208 /*
209 * If we can't find any non-preemptible cpu's, retry so we can
210 * find the lowest priority target and avoid priority inversion.
211 */
212 #ifdef CONFIG_RT_SOFTINT_OPTIMIZATION
213 if (drop_nopreempts) {
214 drop_nopreempts = false;
215 goto retry;
216 }
217 #endif
218
219 /*
220 * If we failed to find a fitting lowest_mask, kick off a new search
221 * but without taking into account any fitness criteria this time.
222 *
223 * This rule favours honouring priority over fitting the task in the
224 * correct CPU (Capacity Awareness being the only user now).
225 * The idea is that if a higher priority task can run, then it should
226 * run even if this ends up being on unfitting CPU.
227 *
228 * The cost of this trade-off is not entirely clear and will probably
229 * be good for some workloads and bad for others.
230 *
231 * The main idea here is that if some CPUs were over-committed, we try
232 * to spread which is what the scheduler traditionally did. Sys admins
233 * must do proper RT planning to avoid overloading the system if they
234 * really care.
235 */
236 if (fitness_fn)
237 return cpupri_find(cp, p, lowest_mask);
238
239 return 0;
240 }
241 EXPORT_SYMBOL_GPL(cpupri_find_fitness);
242
243 /**
244 * cpupri_set - update the CPU priority setting
245 * @cp: The cpupri context
246 * @cpu: The target CPU
247 * @newpri: The priority (INVALID,NORMAL,RT1-RT99,HIGHER) to assign to this CPU
248 *
249 * Note: Assumes cpu_rq(cpu)->lock is locked
250 *
251 * Returns: (void)
252 */
cpupri_set(struct cpupri * cp,int cpu,int newpri)253 void cpupri_set(struct cpupri *cp, int cpu, int newpri)
254 {
255 int *currpri = &cp->cpu_to_pri[cpu];
256 int oldpri = *currpri;
257 int do_mb = 0;
258
259 newpri = convert_prio(newpri);
260
261 BUG_ON(newpri >= CPUPRI_NR_PRIORITIES);
262
263 if (newpri == oldpri)
264 return;
265
266 /*
267 * If the CPU was currently mapped to a different value, we
268 * need to map it to the new value then remove the old value.
269 * Note, we must add the new value first, otherwise we risk the
270 * cpu being missed by the priority loop in cpupri_find.
271 */
272 if (likely(newpri != CPUPRI_INVALID)) {
273 struct cpupri_vec *vec = &cp->pri_to_cpu[newpri];
274
275 cpumask_set_cpu(cpu, vec->mask);
276 /*
277 * When adding a new vector, we update the mask first,
278 * do a write memory barrier, and then update the count, to
279 * make sure the vector is visible when count is set.
280 */
281 smp_mb__before_atomic();
282 atomic_inc(&(vec)->count);
283 do_mb = 1;
284 }
285 if (likely(oldpri != CPUPRI_INVALID)) {
286 struct cpupri_vec *vec = &cp->pri_to_cpu[oldpri];
287
288 /*
289 * Because the order of modification of the vec->count
290 * is important, we must make sure that the update
291 * of the new prio is seen before we decrement the
292 * old prio. This makes sure that the loop sees
293 * one or the other when we raise the priority of
294 * the run queue. We don't care about when we lower the
295 * priority, as that will trigger an rt pull anyway.
296 *
297 * We only need to do a memory barrier if we updated
298 * the new priority vec.
299 */
300 if (do_mb)
301 smp_mb__after_atomic();
302
303 /*
304 * When removing from the vector, we decrement the counter first
305 * do a memory barrier and then clear the mask.
306 */
307 atomic_dec(&(vec)->count);
308 smp_mb__after_atomic();
309 cpumask_clear_cpu(cpu, vec->mask);
310 }
311
312 *currpri = newpri;
313 }
314
315 /**
316 * cpupri_init - initialize the cpupri structure
317 * @cp: The cpupri context
318 *
319 * Return: -ENOMEM on memory allocation failure.
320 */
cpupri_init(struct cpupri * cp)321 int cpupri_init(struct cpupri *cp)
322 {
323 int i;
324
325 for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) {
326 struct cpupri_vec *vec = &cp->pri_to_cpu[i];
327
328 atomic_set(&vec->count, 0);
329 if (!zalloc_cpumask_var(&vec->mask, GFP_KERNEL))
330 goto cleanup;
331 }
332
333 cp->cpu_to_pri = kcalloc(nr_cpu_ids, sizeof(int), GFP_KERNEL);
334 if (!cp->cpu_to_pri)
335 goto cleanup;
336
337 for_each_possible_cpu(i)
338 cp->cpu_to_pri[i] = CPUPRI_INVALID;
339
340 return 0;
341
342 cleanup:
343 for (i--; i >= 0; i--)
344 free_cpumask_var(cp->pri_to_cpu[i].mask);
345 return -ENOMEM;
346 }
347
348 /**
349 * cpupri_cleanup - clean up the cpupri structure
350 * @cp: The cpupri context
351 */
cpupri_cleanup(struct cpupri * cp)352 void cpupri_cleanup(struct cpupri *cp)
353 {
354 int i;
355
356 kfree(cp->cpu_to_pri);
357 for (i = 0; i < CPUPRI_NR_PRIORITIES; i++)
358 free_cpumask_var(cp->pri_to_cpu[i].mask);
359 }
360
361 #ifdef CONFIG_RT_SOFTINT_OPTIMIZATION
362 /*
363 * cpupri_check_rt - check if CPU has a RT task
364 * should be called from rcu-sched read section.
365 */
cpupri_check_rt(void)366 bool cpupri_check_rt(void)
367 {
368 int cpu = raw_smp_processor_id();
369
370 return (cpu_rq(cpu)->rd->cpupri.cpu_to_pri[cpu] > CPUPRI_NORMAL) &&
371 (cpu_rq(cpu)->rt.rt_throttled == 0);
372 }
373 #endif
374