1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * kernel/sched/cpupri.c
4 *
5 * CPU priority management
6 *
7 * Copyright (C) 2007-2008 Novell
8 *
9 * Author: Gregory Haskins <ghaskins@novell.com>
10 *
11 * This code tracks the priority of each CPU so that global migration
12 * decisions are easy to calculate. Each CPU can be in a state as follows:
13 *
14 * (INVALID), IDLE, NORMAL, RT1, ... RT99
15 *
16 * going from the lowest priority to the highest. CPUs in the INVALID state
17 * are not eligible for routing. The system maintains this state with
18 * a 2 dimensional bitmap (the first for priority class, the second for CPUs
19 * in that class). Therefore a typical application without affinity
20 * restrictions can find a suitable CPU with O(1) complexity (e.g. two bit
21 * searches). For tasks with affinity restrictions, the algorithm has a
22 * worst case complexity of O(min(102, nr_domcpus)), though the scenario that
23 * yields the worst case search is fairly contrived.
24 */
25 #include "sched.h"
26
27 /* Convert between a 140 based task->prio, and our 102 based cpupri */
convert_prio(int prio)28 static int convert_prio(int prio)
29 {
30 int cpupri;
31
32 if (prio == CPUPRI_INVALID)
33 cpupri = CPUPRI_INVALID;
34 else if (prio == MAX_PRIO)
35 cpupri = CPUPRI_IDLE;
36 else if (prio >= MAX_RT_PRIO)
37 cpupri = CPUPRI_NORMAL;
38 else
39 cpupri = MAX_RT_PRIO - prio + 1;
40
41 return cpupri;
42 }
43
__cpupri_find(struct cpupri * cp,struct task_struct * p,struct cpumask * lowest_mask,int idx)44 static inline int __cpupri_find(struct cpupri *cp, struct task_struct *p,
45 struct cpumask *lowest_mask, int idx)
46 {
47 struct cpupri_vec *vec = &cp->pri_to_cpu[idx];
48 int skip = 0;
49
50 if (!atomic_read(&(vec)->count))
51 skip = 1;
52 /*
53 * When looking at the vector, we need to read the counter,
54 * do a memory barrier, then read the mask.
55 *
56 * Note: This is still all racey, but we can deal with it.
57 * Ideally, we only want to look at masks that are set.
58 *
59 * If a mask is not set, then the only thing wrong is that we
60 * did a little more work than necessary.
61 *
62 * If we read a zero count but the mask is set, because of the
63 * memory barriers, that can only happen when the highest prio
64 * task for a run queue has left the run queue, in which case,
65 * it will be followed by a pull. If the task we are processing
66 * fails to find a proper place to go, that pull request will
67 * pull this task if the run queue is running at a lower
68 * priority.
69 */
70 smp_rmb();
71
72 /* Need to do the rmb for every iteration */
73 if (skip)
74 return 0;
75
76 if (cpumask_any_and(p->cpus_ptr, vec->mask) >= nr_cpu_ids)
77 return 0;
78
79 if (lowest_mask) {
80 cpumask_and(lowest_mask, p->cpus_ptr, vec->mask);
81
82 #ifdef CONFIG_CPU_ISOLATION_OPT
83 cpumask_andnot(lowest_mask, lowest_mask, cpu_isolated_mask);
84 #endif
85 /*
86 * We have to ensure that we have at least one bit
87 * still set in the array, since the map could have
88 * been concurrently emptied between the first and
89 * second reads of vec->mask. If we hit this
90 * condition, simply act as though we never hit this
91 * priority level and continue on.
92 */
93 if (cpumask_empty(lowest_mask))
94 return 0;
95 }
96
97 return 1;
98 }
99
cpupri_find(struct cpupri * cp,struct task_struct * p,struct cpumask * lowest_mask)100 int cpupri_find(struct cpupri *cp, struct task_struct *p,
101 struct cpumask *lowest_mask)
102 {
103 return cpupri_find_fitness(cp, p, lowest_mask, NULL);
104 }
105
106 /**
107 * cpupri_find_fitness - find the best (lowest-pri) CPU in the system
108 * @cp: The cpupri context
109 * @p: The task
110 * @lowest_mask: A mask to fill in with selected CPUs (or NULL)
111 * @fitness_fn: A pointer to a function to do custom checks whether the CPU
112 * fits a specific criteria so that we only return those CPUs.
113 *
114 * Note: This function returns the recommended CPUs as calculated during the
115 * current invocation. By the time the call returns, the CPUs may have in
116 * fact changed priorities any number of times. While not ideal, it is not
117 * an issue of correctness since the normal rebalancer logic will correct
118 * any discrepancies created by racing against the uncertainty of the current
119 * priority configuration.
120 *
121 * Return: (int)bool - CPUs were found
122 */
cpupri_find_fitness(struct cpupri * cp,struct task_struct * p,struct cpumask * lowest_mask,bool (* fitness_fn)(struct task_struct * p,int cpu))123 int cpupri_find_fitness(struct cpupri *cp, struct task_struct *p,
124 struct cpumask *lowest_mask,
125 bool (*fitness_fn)(struct task_struct *p, int cpu))
126 {
127 int task_pri = convert_prio(p->prio);
128 int idx, cpu;
129
130 BUG_ON(task_pri >= CPUPRI_NR_PRIORITIES);
131
132 for (idx = 0; idx < task_pri; idx++) {
133
134 if (!__cpupri_find(cp, p, lowest_mask, idx))
135 continue;
136
137 if (!lowest_mask || !fitness_fn)
138 return 1;
139
140 /* Ensure the capacity of the CPUs fit the task */
141 for_each_cpu(cpu, lowest_mask) {
142 if (!fitness_fn(p, cpu))
143 cpumask_clear_cpu(cpu, lowest_mask);
144 }
145
146 /*
147 * If no CPU at the current priority can fit the task
148 * continue looking
149 */
150 if (cpumask_empty(lowest_mask))
151 continue;
152
153 return 1;
154 }
155
156 /*
157 * If we failed to find a fitting lowest_mask, kick off a new search
158 * but without taking into account any fitness criteria this time.
159 *
160 * This rule favours honouring priority over fitting the task in the
161 * correct CPU (Capacity Awareness being the only user now).
162 * The idea is that if a higher priority task can run, then it should
163 * run even if this ends up being on unfitting CPU.
164 *
165 * The cost of this trade-off is not entirely clear and will probably
166 * be good for some workloads and bad for others.
167 *
168 * The main idea here is that if some CPUs were overcommitted, we try
169 * to spread which is what the scheduler traditionally did. Sys admins
170 * must do proper RT planning to avoid overloading the system if they
171 * really care.
172 */
173 if (fitness_fn)
174 return cpupri_find(cp, p, lowest_mask);
175
176 return 0;
177 }
178
179 /**
180 * cpupri_set - update the CPU priority setting
181 * @cp: The cpupri context
182 * @cpu: The target CPU
183 * @newpri: The priority (INVALID-RT99) to assign to this CPU
184 *
185 * Note: Assumes cpu_rq(cpu)->lock is locked
186 *
187 * Returns: (void)
188 */
cpupri_set(struct cpupri * cp,int cpu,int newpri)189 void cpupri_set(struct cpupri *cp, int cpu, int newpri)
190 {
191 int *currpri = &cp->cpu_to_pri[cpu];
192 int oldpri = *currpri;
193 int do_mb = 0;
194
195 newpri = convert_prio(newpri);
196
197 BUG_ON(newpri >= CPUPRI_NR_PRIORITIES);
198
199 if (newpri == oldpri)
200 return;
201
202 /*
203 * If the CPU was currently mapped to a different value, we
204 * need to map it to the new value then remove the old value.
205 * Note, we must add the new value first, otherwise we risk the
206 * cpu being missed by the priority loop in cpupri_find.
207 */
208 if (likely(newpri != CPUPRI_INVALID)) {
209 struct cpupri_vec *vec = &cp->pri_to_cpu[newpri];
210
211 cpumask_set_cpu(cpu, vec->mask);
212 /*
213 * When adding a new vector, we update the mask first,
214 * do a write memory barrier, and then update the count, to
215 * make sure the vector is visible when count is set.
216 */
217 smp_mb__before_atomic();
218 atomic_inc(&(vec)->count);
219 do_mb = 1;
220 }
221 if (likely(oldpri != CPUPRI_INVALID)) {
222 struct cpupri_vec *vec = &cp->pri_to_cpu[oldpri];
223
224 /*
225 * Because the order of modification of the vec->count
226 * is important, we must make sure that the update
227 * of the new prio is seen before we decrement the
228 * old prio. This makes sure that the loop sees
229 * one or the other when we raise the priority of
230 * the run queue. We don't care about when we lower the
231 * priority, as that will trigger an rt pull anyway.
232 *
233 * We only need to do a memory barrier if we updated
234 * the new priority vec.
235 */
236 if (do_mb)
237 smp_mb__after_atomic();
238
239 /*
240 * When removing from the vector, we decrement the counter first
241 * do a memory barrier and then clear the mask.
242 */
243 atomic_dec(&(vec)->count);
244 smp_mb__after_atomic();
245 cpumask_clear_cpu(cpu, vec->mask);
246 }
247
248 *currpri = newpri;
249 }
250
251 /**
252 * cpupri_init - initialize the cpupri structure
253 * @cp: The cpupri context
254 *
255 * Return: -ENOMEM on memory allocation failure.
256 */
cpupri_init(struct cpupri * cp)257 int cpupri_init(struct cpupri *cp)
258 {
259 int i;
260
261 for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) {
262 struct cpupri_vec *vec = &cp->pri_to_cpu[i];
263
264 atomic_set(&vec->count, 0);
265 if (!zalloc_cpumask_var(&vec->mask, GFP_KERNEL))
266 goto cleanup;
267 }
268
269 cp->cpu_to_pri = kcalloc(nr_cpu_ids, sizeof(int), GFP_KERNEL);
270 if (!cp->cpu_to_pri)
271 goto cleanup;
272
273 for_each_possible_cpu(i)
274 cp->cpu_to_pri[i] = CPUPRI_INVALID;
275
276 return 0;
277
278 cleanup:
279 for (i--; i >= 0; i--)
280 free_cpumask_var(cp->pri_to_cpu[i].mask);
281 return -ENOMEM;
282 }
283
284 /**
285 * cpupri_cleanup - clean up the cpupri structure
286 * @cp: The cpupri context
287 */
cpupri_cleanup(struct cpupri * cp)288 void cpupri_cleanup(struct cpupri *cp)
289 {
290 int i;
291
292 kfree(cp->cpu_to_pri);
293 for (i = 0; i < CPUPRI_NR_PRIORITIES; i++)
294 free_cpumask_var(cp->pri_to_cpu[i].mask);
295 }
296