• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  kernel/sched/cpupri.c
4  *
5  *  CPU priority management
6  *
7  *  Copyright (C) 2007-2008 Novell
8  *
9  *  Author: Gregory Haskins <ghaskins@novell.com>
10  *
11  *  This code tracks the priority of each CPU so that global migration
12  *  decisions are easy to calculate.  Each CPU can be in a state as follows:
13  *
14  *                 (INVALID), IDLE, NORMAL, RT1, ... RT99
15  *
16  *  going from the lowest priority to the highest.  CPUs in the INVALID state
17  *  are not eligible for routing.  The system maintains this state with
18  *  a 2 dimensional bitmap (the first for priority class, the second for CPUs
19  *  in that class).  Therefore a typical application without affinity
20  *  restrictions can find a suitable CPU with O(1) complexity (e.g. two bit
21  *  searches).  For tasks with affinity restrictions, the algorithm has a
22  *  worst case complexity of O(min(102, nr_domcpus)), though the scenario that
23  *  yields the worst case search is fairly contrived.
24  */
25 #include "sched.h"
26 
27 /* Convert between a 140 based task->prio, and our 102 based cpupri */
convert_prio(int prio)28 static int convert_prio(int prio)
29 {
30 	int cpupri;
31 
32 	if (prio == CPUPRI_INVALID)
33 		cpupri = CPUPRI_INVALID;
34 	else if (prio == MAX_PRIO)
35 		cpupri = CPUPRI_IDLE;
36 	else if (prio >= MAX_RT_PRIO)
37 		cpupri = CPUPRI_NORMAL;
38 	else
39 		cpupri = MAX_RT_PRIO - prio + 1;
40 
41 	return cpupri;
42 }
43 
44 #ifdef CONFIG_RT_SOFTINT_OPTIMIZATION
45 /**
46  * drop_nopreempt_cpus - remove likely nonpreemptible cpus from the mask
47  * @lowest_mask: mask with selected CPUs (non-NULL)
48  */
49 static void
drop_nopreempt_cpus(struct cpumask * lowest_mask)50 drop_nopreempt_cpus(struct cpumask *lowest_mask)
51 {
52 	unsigned int cpu = cpumask_first(lowest_mask);
53 	while (cpu < nr_cpu_ids) {
54 		/* unlocked access */
55 		struct task_struct *task = READ_ONCE(cpu_rq(cpu)->curr);
56 		if (task_may_not_preempt(task, cpu)) {
57 			cpumask_clear_cpu(cpu, lowest_mask);
58 		}
59 		cpu = cpumask_next(cpu, lowest_mask);
60 	}
61 }
62 #endif
63 
__cpupri_find(struct cpupri * cp,struct task_struct * p,struct cpumask * lowest_mask,int idx,bool drop_nopreempts)64 static inline int __cpupri_find(struct cpupri *cp, struct task_struct *p,
65 				struct cpumask *lowest_mask, int idx,
66 				bool drop_nopreempts)
67 {
68 	struct cpupri_vec *vec  = &cp->pri_to_cpu[idx];
69 	int skip = 0;
70 
71 	if (!atomic_read(&(vec)->count))
72 		skip = 1;
73 	/*
74 	 * When looking at the vector, we need to read the counter,
75 	 * do a memory barrier, then read the mask.
76 	 *
77 	 * Note: This is still all racey, but we can deal with it.
78 	 *  Ideally, we only want to look at masks that are set.
79 	 *
80 	 *  If a mask is not set, then the only thing wrong is that we
81 	 *  did a little more work than necessary.
82 	 *
83 	 *  If we read a zero count but the mask is set, because of the
84 	 *  memory barriers, that can only happen when the highest prio
85 	 *  task for a run queue has left the run queue, in which case,
86 	 *  it will be followed by a pull. If the task we are processing
87 	 *  fails to find a proper place to go, that pull request will
88 	 *  pull this task if the run queue is running at a lower
89 	 *  priority.
90 	 */
91 	smp_rmb();
92 
93 	/* Need to do the rmb for every iteration */
94 	if (skip)
95 		return 0;
96 
97 	if (cpumask_any_and(p->cpus_ptr, vec->mask) >= nr_cpu_ids)
98 		return 0;
99 
100 	if (lowest_mask) {
101 		cpumask_and(lowest_mask, p->cpus_ptr, vec->mask);
102 		cpumask_and(lowest_mask, lowest_mask, cpu_active_mask);
103 
104 #ifdef CONFIG_RT_SOFTINT_OPTIMIZATION
105 		if (drop_nopreempts)
106 			drop_nopreempt_cpus(lowest_mask);
107 #endif
108 
109 		/*
110 		 * We have to ensure that we have at least one bit
111 		 * still set in the array, since the map could have
112 		 * been concurrently emptied between the first and
113 		 * second reads of vec->mask.  If we hit this
114 		 * condition, simply act as though we never hit this
115 		 * priority level and continue on.
116 		 */
117 		if (cpumask_empty(lowest_mask))
118 			return 0;
119 	}
120 
121 	return 1;
122 }
123 
cpupri_find(struct cpupri * cp,struct task_struct * p,struct cpumask * lowest_mask)124 int cpupri_find(struct cpupri *cp, struct task_struct *p,
125 		struct cpumask *lowest_mask)
126 {
127 	return cpupri_find_fitness(cp, p, lowest_mask, NULL);
128 }
129 
130 /**
131  * cpupri_find_fitness - find the best (lowest-pri) CPU in the system
132  * @cp: The cpupri context
133  * @p: The task
134  * @lowest_mask: A mask to fill in with selected CPUs (or NULL)
135  * @fitness_fn: A pointer to a function to do custom checks whether the CPU
136  *              fits a specific criteria so that we only return those CPUs.
137  *
138  * Note: This function returns the recommended CPUs as calculated during the
139  * current invocation.  By the time the call returns, the CPUs may have in
140  * fact changed priorities any number of times.  While not ideal, it is not
141  * an issue of correctness since the normal rebalancer logic will correct
142  * any discrepancies created by racing against the uncertainty of the current
143  * priority configuration.
144  *
145  * Return: (int)bool - CPUs were found
146  */
cpupri_find_fitness(struct cpupri * cp,struct task_struct * p,struct cpumask * lowest_mask,bool (* fitness_fn)(struct task_struct * p,int cpu))147 int cpupri_find_fitness(struct cpupri *cp, struct task_struct *p,
148 		struct cpumask *lowest_mask,
149 		bool (*fitness_fn)(struct task_struct *p, int cpu))
150 {
151 	int task_pri = convert_prio(p->prio);
152 	int idx, cpu;
153 	bool drop_nopreempts = task_pri <= MAX_RT_PRIO;
154 
155 	BUG_ON(task_pri >= CPUPRI_NR_PRIORITIES);
156 
157 #ifdef CONFIG_RT_SOFTINT_OPTIMIZATION
158 retry:
159 #endif
160 	for (idx = 0; idx < task_pri; idx++) {
161 
162 		if (!__cpupri_find(cp, p, lowest_mask, idx, drop_nopreempts))
163 			continue;
164 
165 		if (!lowest_mask || !fitness_fn)
166 			return 1;
167 
168 		/* Ensure the capacity of the CPUs fit the task */
169 		for_each_cpu(cpu, lowest_mask) {
170 			if (!fitness_fn(p, cpu))
171 				cpumask_clear_cpu(cpu, lowest_mask);
172 		}
173 
174 		/*
175 		 * If no CPU at the current priority can fit the task
176 		 * continue looking
177 		 */
178 		if (cpumask_empty(lowest_mask))
179 			continue;
180 
181 		return 1;
182 	}
183 
184 	/*
185 	 * If we can't find any non-preemptible cpu's, retry so we can
186 	 * find the lowest priority target and avoid priority inversion.
187 	 */
188 #ifdef CONFIG_RT_SOFTINT_OPTIMIZATION
189 	if (drop_nopreempts) {
190 		drop_nopreempts = false;
191 		goto retry;
192 	}
193 #endif
194 
195 	/*
196 	 * If we failed to find a fitting lowest_mask, kick off a new search
197 	 * but without taking into account any fitness criteria this time.
198 	 *
199 	 * This rule favours honouring priority over fitting the task in the
200 	 * correct CPU (Capacity Awareness being the only user now).
201 	 * The idea is that if a higher priority task can run, then it should
202 	 * run even if this ends up being on unfitting CPU.
203 	 *
204 	 * The cost of this trade-off is not entirely clear and will probably
205 	 * be good for some workloads and bad for others.
206 	 *
207 	 * The main idea here is that if some CPUs were overcommitted, we try
208 	 * to spread which is what the scheduler traditionally did. Sys admins
209 	 * must do proper RT planning to avoid overloading the system if they
210 	 * really care.
211 	 */
212 	if (fitness_fn)
213 		return cpupri_find(cp, p, lowest_mask);
214 
215 	return 0;
216 }
217 EXPORT_SYMBOL_GPL(cpupri_find_fitness);
218 
219 /**
220  * cpupri_set - update the CPU priority setting
221  * @cp: The cpupri context
222  * @cpu: The target CPU
223  * @newpri: The priority (INVALID-RT99) to assign to this CPU
224  *
225  * Note: Assumes cpu_rq(cpu)->lock is locked
226  *
227  * Returns: (void)
228  */
cpupri_set(struct cpupri * cp,int cpu,int newpri)229 void cpupri_set(struct cpupri *cp, int cpu, int newpri)
230 {
231 	int *currpri = &cp->cpu_to_pri[cpu];
232 	int oldpri = *currpri;
233 	int do_mb = 0;
234 
235 	newpri = convert_prio(newpri);
236 
237 	BUG_ON(newpri >= CPUPRI_NR_PRIORITIES);
238 
239 	if (newpri == oldpri)
240 		return;
241 
242 	/*
243 	 * If the CPU was currently mapped to a different value, we
244 	 * need to map it to the new value then remove the old value.
245 	 * Note, we must add the new value first, otherwise we risk the
246 	 * cpu being missed by the priority loop in cpupri_find.
247 	 */
248 	if (likely(newpri != CPUPRI_INVALID)) {
249 		struct cpupri_vec *vec = &cp->pri_to_cpu[newpri];
250 
251 		cpumask_set_cpu(cpu, vec->mask);
252 		/*
253 		 * When adding a new vector, we update the mask first,
254 		 * do a write memory barrier, and then update the count, to
255 		 * make sure the vector is visible when count is set.
256 		 */
257 		smp_mb__before_atomic();
258 		atomic_inc(&(vec)->count);
259 		do_mb = 1;
260 	}
261 	if (likely(oldpri != CPUPRI_INVALID)) {
262 		struct cpupri_vec *vec  = &cp->pri_to_cpu[oldpri];
263 
264 		/*
265 		 * Because the order of modification of the vec->count
266 		 * is important, we must make sure that the update
267 		 * of the new prio is seen before we decrement the
268 		 * old prio. This makes sure that the loop sees
269 		 * one or the other when we raise the priority of
270 		 * the run queue. We don't care about when we lower the
271 		 * priority, as that will trigger an rt pull anyway.
272 		 *
273 		 * We only need to do a memory barrier if we updated
274 		 * the new priority vec.
275 		 */
276 		if (do_mb)
277 			smp_mb__after_atomic();
278 
279 		/*
280 		 * When removing from the vector, we decrement the counter first
281 		 * do a memory barrier and then clear the mask.
282 		 */
283 		atomic_dec(&(vec)->count);
284 		smp_mb__after_atomic();
285 		cpumask_clear_cpu(cpu, vec->mask);
286 	}
287 
288 	*currpri = newpri;
289 }
290 
291 /**
292  * cpupri_init - initialize the cpupri structure
293  * @cp: The cpupri context
294  *
295  * Return: -ENOMEM on memory allocation failure.
296  */
cpupri_init(struct cpupri * cp)297 int cpupri_init(struct cpupri *cp)
298 {
299 	int i;
300 
301 	for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) {
302 		struct cpupri_vec *vec = &cp->pri_to_cpu[i];
303 
304 		atomic_set(&vec->count, 0);
305 		if (!zalloc_cpumask_var(&vec->mask, GFP_KERNEL))
306 			goto cleanup;
307 	}
308 
309 	cp->cpu_to_pri = kcalloc(nr_cpu_ids, sizeof(int), GFP_KERNEL);
310 	if (!cp->cpu_to_pri)
311 		goto cleanup;
312 
313 	for_each_possible_cpu(i)
314 		cp->cpu_to_pri[i] = CPUPRI_INVALID;
315 
316 	return 0;
317 
318 cleanup:
319 	for (i--; i >= 0; i--)
320 		free_cpumask_var(cp->pri_to_cpu[i].mask);
321 	return -ENOMEM;
322 }
323 
324 /**
325  * cpupri_cleanup - clean up the cpupri structure
326  * @cp: The cpupri context
327  */
cpupri_cleanup(struct cpupri * cp)328 void cpupri_cleanup(struct cpupri *cp)
329 {
330 	int i;
331 
332 	kfree(cp->cpu_to_pri);
333 	for (i = 0; i < CPUPRI_NR_PRIORITIES; i++)
334 		free_cpumask_var(cp->pri_to_cpu[i].mask);
335 }
336 
337 #ifdef CONFIG_RT_SOFTINT_OPTIMIZATION
338 /*
339  * cpupri_check_rt - check if CPU has a RT task
340  * should be called from rcu-sched read section.
341  */
cpupri_check_rt(void)342 bool cpupri_check_rt(void)
343 {
344 	int cpu = raw_smp_processor_id();
345 
346 	return cpu_rq(cpu)->rd->cpupri.cpu_to_pri[cpu] > CPUPRI_NORMAL;
347 }
348 #endif
349