1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * kernel/sched/cpupri.c
4 *
5 * CPU priority management
6 *
7 * Copyright (C) 2007-2008 Novell
8 *
9 * Author: Gregory Haskins <ghaskins@novell.com>
10 *
11 * This code tracks the priority of each CPU so that global migration
12 * decisions are easy to calculate. Each CPU can be in a state as follows:
13 *
14 * (INVALID), NORMAL, RT1, ... RT99, HIGHER
15 *
16 * going from the lowest priority to the highest. CPUs in the INVALID state
17 * are not eligible for routing. The system maintains this state with
18 * a 2 dimensional bitmap (the first for priority class, the second for CPUs
19 * in that class). Therefore a typical application without affinity
20 * restrictions can find a suitable CPU with O(1) complexity (e.g. two bit
21 * searches). For tasks with affinity restrictions, the algorithm has a
22 * worst case complexity of O(min(101, nr_domcpus)), though the scenario that
23 * yields the worst case search is fairly contrived.
24 */
25
26 /*
27 * p->rt_priority p->prio newpri cpupri
28 *
29 * -1 -1 (CPUPRI_INVALID)
30 *
31 * 99 0 (CPUPRI_NORMAL)
32 *
33 * 1 98 98 1
34 * ...
35 * 49 50 50 49
36 * 50 49 49 50
37 * ...
38 * 99 0 0 99
39 *
40 * 100 100 (CPUPRI_HIGHER)
41 */
convert_prio(int prio)42 static int convert_prio(int prio)
43 {
44 int cpupri;
45
46 switch (prio) {
47 case CPUPRI_INVALID:
48 cpupri = CPUPRI_INVALID; /* -1 */
49 break;
50
51 case 0 ... 98:
52 cpupri = MAX_RT_PRIO-1 - prio; /* 1 ... 99 */
53 break;
54
55 case MAX_RT_PRIO-1:
56 cpupri = CPUPRI_NORMAL; /* 0 */
57 break;
58
59 case MAX_RT_PRIO:
60 cpupri = CPUPRI_HIGHER; /* 100 */
61 break;
62 }
63
64 return cpupri;
65 }
66
__cpupri_find(struct cpupri * cp,struct task_struct * p,struct cpumask * lowest_mask,int idx)67 static inline int __cpupri_find(struct cpupri *cp, struct task_struct *p,
68 struct cpumask *lowest_mask, int idx)
69 {
70 struct cpupri_vec *vec = &cp->pri_to_cpu[idx];
71 int skip = 0;
72
73 if (!atomic_read(&(vec)->count))
74 skip = 1;
75 /*
76 * When looking at the vector, we need to read the counter,
77 * do a memory barrier, then read the mask.
78 *
79 * Note: This is still all racy, but we can deal with it.
80 * Ideally, we only want to look at masks that are set.
81 *
82 * If a mask is not set, then the only thing wrong is that we
83 * did a little more work than necessary.
84 *
85 * If we read a zero count but the mask is set, because of the
86 * memory barriers, that can only happen when the highest prio
87 * task for a run queue has left the run queue, in which case,
88 * it will be followed by a pull. If the task we are processing
89 * fails to find a proper place to go, that pull request will
90 * pull this task if the run queue is running at a lower
91 * priority.
92 */
93 smp_rmb();
94
95 /* Need to do the rmb for every iteration */
96 if (skip)
97 return 0;
98
99 if ((p && cpumask_any_and(&p->cpus_mask, vec->mask) >= nr_cpu_ids) ||
100 (!p && cpumask_any(vec->mask) >= nr_cpu_ids))
101 return 0;
102
103 if (lowest_mask) {
104 if (p) {
105 cpumask_and(lowest_mask, &p->cpus_mask, vec->mask);
106 cpumask_and(lowest_mask, lowest_mask, cpu_active_mask);
107 } else {
108 cpumask_copy(lowest_mask, vec->mask);
109 }
110
111 /*
112 * We have to ensure that we have at least one bit
113 * still set in the array, since the map could have
114 * been concurrently emptied between the first and
115 * second reads of vec->mask. If we hit this
116 * condition, simply act as though we never hit this
117 * priority level and continue on.
118 */
119 if (cpumask_empty(lowest_mask))
120 return 0;
121 }
122
123 return 1;
124 }
125
cpupri_find(struct cpupri * cp,struct task_struct * sched_ctx,struct task_struct * exec_ctx,struct cpumask * lowest_mask)126 int cpupri_find(struct cpupri *cp, struct task_struct *sched_ctx,
127 struct task_struct *exec_ctx,
128 struct cpumask *lowest_mask)
129 {
130 return cpupri_find_fitness(cp, sched_ctx, exec_ctx, lowest_mask, NULL);
131 }
132
133 /**
134 * cpupri_find_fitness - find the best (lowest-pri) CPU in the system
135 * @cp: The cpupri context
136 * @p: The task
137 * @lowest_mask: A mask to fill in with selected CPUs (or NULL)
138 * @fitness_fn: A pointer to a function to do custom checks whether the CPU
139 * fits a specific criteria so that we only return those CPUs.
140 *
141 * Note: This function returns the recommended CPUs as calculated during the
142 * current invocation. By the time the call returns, the CPUs may have in
143 * fact changed priorities any number of times. While not ideal, it is not
144 * an issue of correctness since the normal rebalancer logic will correct
145 * any discrepancies created by racing against the uncertainty of the current
146 * priority configuration.
147 *
148 * Return: (int)bool - CPUs were found
149 */
cpupri_find_fitness(struct cpupri * cp,struct task_struct * sched_ctx,struct task_struct * exec_ctx,struct cpumask * lowest_mask,bool (* fitness_fn)(struct task_struct * p,int cpu))150 int cpupri_find_fitness(struct cpupri *cp, struct task_struct *sched_ctx,
151 struct task_struct *exec_ctx,
152 struct cpumask *lowest_mask,
153 bool (*fitness_fn)(struct task_struct *p, int cpu))
154 {
155 int task_pri = convert_prio(sched_ctx->prio);
156 int idx, cpu;
157
158 WARN_ON_ONCE(task_pri >= CPUPRI_NR_PRIORITIES);
159
160 for (idx = 0; idx < task_pri; idx++) {
161
162 if (!__cpupri_find(cp, exec_ctx, lowest_mask, idx))
163 continue;
164
165 if (!lowest_mask || !fitness_fn)
166 return 1;
167
168 /* Ensure the capacity of the CPUs fit the task */
169 for_each_cpu(cpu, lowest_mask) {
170 if (!fitness_fn(sched_ctx, cpu))
171 cpumask_clear_cpu(cpu, lowest_mask);
172 }
173
174 /*
175 * If no CPU at the current priority can fit the task
176 * continue looking
177 */
178 if (cpumask_empty(lowest_mask))
179 continue;
180
181 return 1;
182 }
183
184 /*
185 * If we failed to find a fitting lowest_mask, kick off a new search
186 * but without taking into account any fitness criteria this time.
187 *
188 * This rule favours honouring priority over fitting the task in the
189 * correct CPU (Capacity Awareness being the only user now).
190 * The idea is that if a higher priority task can run, then it should
191 * run even if this ends up being on unfitting CPU.
192 *
193 * The cost of this trade-off is not entirely clear and will probably
194 * be good for some workloads and bad for others.
195 *
196 * The main idea here is that if some CPUs were over-committed, we try
197 * to spread which is what the scheduler traditionally did. Sys admins
198 * must do proper RT planning to avoid overloading the system if they
199 * really care.
200 */
201 if (fitness_fn)
202 return cpupri_find(cp, sched_ctx, exec_ctx, lowest_mask);
203
204 return 0;
205 }
206 EXPORT_SYMBOL_GPL(cpupri_find_fitness);
207
208 /**
209 * cpupri_set - update the CPU priority setting
210 * @cp: The cpupri context
211 * @cpu: The target CPU
212 * @newpri: The priority (INVALID,NORMAL,RT1-RT99,HIGHER) to assign to this CPU
213 *
214 * Note: Assumes cpu_rq(cpu)->lock is locked
215 *
216 * Returns: (void)
217 */
cpupri_set(struct cpupri * cp,int cpu,int newpri)218 void cpupri_set(struct cpupri *cp, int cpu, int newpri)
219 {
220 int *currpri = &cp->cpu_to_pri[cpu];
221 int oldpri = *currpri;
222 int do_mb = 0;
223
224 newpri = convert_prio(newpri);
225
226 BUG_ON(newpri >= CPUPRI_NR_PRIORITIES);
227
228 if (newpri == oldpri)
229 return;
230
231 /*
232 * If the CPU was currently mapped to a different value, we
233 * need to map it to the new value then remove the old value.
234 * Note, we must add the new value first, otherwise we risk the
235 * cpu being missed by the priority loop in cpupri_find.
236 */
237 if (likely(newpri != CPUPRI_INVALID)) {
238 struct cpupri_vec *vec = &cp->pri_to_cpu[newpri];
239
240 cpumask_set_cpu(cpu, vec->mask);
241 /*
242 * When adding a new vector, we update the mask first,
243 * do a write memory barrier, and then update the count, to
244 * make sure the vector is visible when count is set.
245 */
246 smp_mb__before_atomic();
247 atomic_inc(&(vec)->count);
248 do_mb = 1;
249 }
250 if (likely(oldpri != CPUPRI_INVALID)) {
251 struct cpupri_vec *vec = &cp->pri_to_cpu[oldpri];
252
253 /*
254 * Because the order of modification of the vec->count
255 * is important, we must make sure that the update
256 * of the new prio is seen before we decrement the
257 * old prio. This makes sure that the loop sees
258 * one or the other when we raise the priority of
259 * the run queue. We don't care about when we lower the
260 * priority, as that will trigger an rt pull anyway.
261 *
262 * We only need to do a memory barrier if we updated
263 * the new priority vec.
264 */
265 if (do_mb)
266 smp_mb__after_atomic();
267
268 /*
269 * When removing from the vector, we decrement the counter first
270 * do a memory barrier and then clear the mask.
271 */
272 atomic_dec(&(vec)->count);
273 smp_mb__after_atomic();
274 cpumask_clear_cpu(cpu, vec->mask);
275 }
276
277 *currpri = newpri;
278 }
279
280 /**
281 * cpupri_init - initialize the cpupri structure
282 * @cp: The cpupri context
283 *
284 * Return: -ENOMEM on memory allocation failure.
285 */
cpupri_init(struct cpupri * cp)286 int cpupri_init(struct cpupri *cp)
287 {
288 int i;
289
290 for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) {
291 struct cpupri_vec *vec = &cp->pri_to_cpu[i];
292
293 atomic_set(&vec->count, 0);
294 if (!zalloc_cpumask_var(&vec->mask, GFP_KERNEL))
295 goto cleanup;
296 }
297
298 cp->cpu_to_pri = kcalloc(nr_cpu_ids, sizeof(int), GFP_KERNEL);
299 if (!cp->cpu_to_pri)
300 goto cleanup;
301
302 for_each_possible_cpu(i)
303 cp->cpu_to_pri[i] = CPUPRI_INVALID;
304
305 return 0;
306
307 cleanup:
308 for (i--; i >= 0; i--)
309 free_cpumask_var(cp->pri_to_cpu[i].mask);
310 return -ENOMEM;
311 }
312
313 /**
314 * cpupri_cleanup - clean up the cpupri structure
315 * @cp: The cpupri context
316 */
cpupri_cleanup(struct cpupri * cp)317 void cpupri_cleanup(struct cpupri *cp)
318 {
319 int i;
320
321 kfree(cp->cpu_to_pri);
322 for (i = 0; i < CPUPRI_NR_PRIORITIES; i++)
323 free_cpumask_var(cp->pri_to_cpu[i].mask);
324 }
325