1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_CPUMASK_H
3 #define __LINUX_CPUMASK_H
4
5 /*
6 * Cpumasks provide a bitmap suitable for representing the
7 * set of CPU's in a system, one bit position per CPU number. In general,
8 * only nr_cpu_ids (<= NR_CPUS) bits are valid.
9 */
10 #include <linux/kernel.h>
11 #include <linux/threads.h>
12 #include <linux/bitmap.h>
13 #include <linux/atomic.h>
14 #include <linux/bug.h>
15
16 /* Don't assign or return these: may not be this big! */
17 typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
18
19 /**
20 * cpumask_bits - get the bits in a cpumask
21 * @maskp: the struct cpumask *
22 *
23 * You should only assume nr_cpu_ids bits of this mask are valid. This is
24 * a macro so it's const-correct.
25 */
26 #define cpumask_bits(maskp) ((maskp)->bits)
27
28 /**
29 * cpumask_pr_args - printf args to output a cpumask
30 * @maskp: cpumask to be printed
31 *
32 * Can be used to provide arguments for '%*pb[l]' when printing a cpumask.
33 */
34 #define cpumask_pr_args(maskp) nr_cpu_ids, cpumask_bits(maskp)
35
36 #if NR_CPUS == 1
37 #define nr_cpu_ids 1U
38 #else
39 extern unsigned int nr_cpu_ids;
40 #endif
41
42 #ifdef CONFIG_CPUMASK_OFFSTACK
43 /* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also,
44 * not all bits may be allocated. */
45 #define nr_cpumask_bits nr_cpu_ids
46 #else
47 #define nr_cpumask_bits ((unsigned int)NR_CPUS)
48 #endif
49
50 /*
51 * The following particular system cpumasks and operations manage
52 * possible, present, active and online cpus.
53 *
54 * cpu_possible_mask- has bit 'cpu' set iff cpu is populatable
55 * cpu_present_mask - has bit 'cpu' set iff cpu is populated
56 * cpu_online_mask - has bit 'cpu' set iff cpu available to scheduler
57 * cpu_active_mask - has bit 'cpu' set iff cpu available to migration
58 * cpu_isolated_mask- has bit 'cpu' set iff cpu isolated
59 *
60 * If !CONFIG_HOTPLUG_CPU, present == possible, and active == online.
61 *
62 * The cpu_possible_mask is fixed at boot time, as the set of CPU id's
63 * that it is possible might ever be plugged in at anytime during the
64 * life of that system boot. The cpu_present_mask is dynamic(*),
65 * representing which CPUs are currently plugged in. And
66 * cpu_online_mask is the dynamic subset of cpu_present_mask,
67 * indicating those CPUs available for scheduling.
68 *
69 * If HOTPLUG is enabled, then cpu_possible_mask is forced to have
70 * all NR_CPUS bits set, otherwise it is just the set of CPUs that
71 * ACPI reports present at boot.
72 *
73 * If HOTPLUG is enabled, then cpu_present_mask varies dynamically,
74 * depending on what ACPI reports as currently plugged in, otherwise
75 * cpu_present_mask is just a copy of cpu_possible_mask.
76 *
77 * (*) Well, cpu_present_mask is dynamic in the hotplug case. If not
78 * hotplug, it's a copy of cpu_possible_mask, hence fixed at boot.
79 *
80 * Subtleties:
81 * 1) UP arch's (NR_CPUS == 1, CONFIG_SMP not defined) hardcode
82 * assumption that their single CPU is online. The UP
83 * cpu_{online,possible,present}_masks are placebos. Changing them
84 * will have no useful affect on the following num_*_cpus()
85 * and cpu_*() macros in the UP case. This ugliness is a UP
86 * optimization - don't waste any instructions or memory references
87 * asking if you're online or how many CPUs there are if there is
88 * only one CPU.
89 */
90
91 extern struct cpumask __cpu_possible_mask;
92 extern struct cpumask __cpu_online_mask;
93 extern struct cpumask __cpu_present_mask;
94 extern struct cpumask __cpu_active_mask;
95 #define cpu_possible_mask ((const struct cpumask *)&__cpu_possible_mask)
96 #define cpu_online_mask ((const struct cpumask *)&__cpu_online_mask)
97 #define cpu_present_mask ((const struct cpumask *)&__cpu_present_mask)
98 #define cpu_active_mask ((const struct cpumask *)&__cpu_active_mask)
99
100 #ifdef CONFIG_CPU_ISOLATION_OPT
101 extern struct cpumask __cpu_isolated_mask;
102 #define cpu_isolated_mask ((const struct cpumask *)&__cpu_isolated_mask)
103 #endif
104
105 extern atomic_t __num_online_cpus;
106
107 #if NR_CPUS > 1
108 /**
109 * num_online_cpus() - Read the number of online CPUs
110 *
111 * Despite the fact that __num_online_cpus is of type atomic_t, this
112 * interface gives only a momentary snapshot and is not protected against
113 * concurrent CPU hotplug operations unless invoked from a cpuhp_lock held
114 * region.
115 */
num_online_cpus(void)116 static inline unsigned int num_online_cpus(void)
117 {
118 return atomic_read(&__num_online_cpus);
119 }
120 #define num_possible_cpus() cpumask_weight(cpu_possible_mask)
121 #define num_present_cpus() cpumask_weight(cpu_present_mask)
122 #define num_active_cpus() cpumask_weight(cpu_active_mask)
123 #define cpu_online(cpu) cpumask_test_cpu((cpu), cpu_online_mask)
124 #define cpu_possible(cpu) cpumask_test_cpu((cpu), cpu_possible_mask)
125 #define cpu_present(cpu) cpumask_test_cpu((cpu), cpu_present_mask)
126 #define cpu_active(cpu) cpumask_test_cpu((cpu), cpu_active_mask)
127 #else
128 #define num_online_cpus() 1U
129 #define num_possible_cpus() 1U
130 #define num_present_cpus() 1U
131 #define num_active_cpus() 1U
132 #define cpu_online(cpu) ((cpu) == 0)
133 #define cpu_possible(cpu) ((cpu) == 0)
134 #define cpu_present(cpu) ((cpu) == 0)
135 #define cpu_active(cpu) ((cpu) == 0)
136 #endif
137
138 #if defined(CONFIG_CPU_ISOLATION_OPT) && NR_CPUS > 1
139 #define num_isolated_cpus() cpumask_weight(cpu_isolated_mask)
140 #define num_online_uniso_cpus() \
141 ({ \
142 cpumask_t mask; \
143 \
144 cpumask_andnot(&mask, cpu_online_mask, cpu_isolated_mask); \
145 cpumask_weight(&mask); \
146 })
147 #define cpu_isolated(cpu) cpumask_test_cpu((cpu), cpu_isolated_mask)
148 #else /* !CONFIG_CPU_ISOLATION_OPT || NR_CPUS == 1 */
149 #define num_isolated_cpus() 0U
150 #define num_online_uniso_cpus() num_online_cpus()
151 #define cpu_isolated(cpu) 0U
152 #endif
153
154 extern cpumask_t cpus_booted_once_mask;
155
cpu_max_bits_warn(unsigned int cpu,unsigned int bits)156 static inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits)
157 {
158 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
159 WARN_ON_ONCE(cpu >= bits);
160 #endif /* CONFIG_DEBUG_PER_CPU_MAPS */
161 }
162
163 /* verify cpu argument to cpumask_* operators */
cpumask_check(unsigned int cpu)164 static inline unsigned int cpumask_check(unsigned int cpu)
165 {
166 cpu_max_bits_warn(cpu, nr_cpumask_bits);
167 return cpu;
168 }
169
170 #if NR_CPUS == 1
171 /* Uniprocessor. Assume all masks are "1". */
cpumask_first(const struct cpumask * srcp)172 static inline unsigned int cpumask_first(const struct cpumask *srcp)
173 {
174 return 0;
175 }
176
cpumask_last(const struct cpumask * srcp)177 static inline unsigned int cpumask_last(const struct cpumask *srcp)
178 {
179 return 0;
180 }
181
182 /* Valid inputs for n are -1 and 0. */
cpumask_next(int n,const struct cpumask * srcp)183 static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
184 {
185 return n+1;
186 }
187
cpumask_next_zero(int n,const struct cpumask * srcp)188 static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
189 {
190 return n+1;
191 }
192
cpumask_next_and(int n,const struct cpumask * srcp,const struct cpumask * andp)193 static inline unsigned int cpumask_next_and(int n,
194 const struct cpumask *srcp,
195 const struct cpumask *andp)
196 {
197 return n+1;
198 }
199
cpumask_next_wrap(int n,const struct cpumask * mask,int start,bool wrap)200 static inline unsigned int cpumask_next_wrap(int n, const struct cpumask *mask,
201 int start, bool wrap)
202 {
203 /* cpu0 unless stop condition, wrap and at cpu0, then nr_cpumask_bits */
204 return (wrap && n == 0);
205 }
206
207 /* cpu must be a valid cpu, ie 0, so there's no other choice. */
cpumask_any_but(const struct cpumask * mask,unsigned int cpu)208 static inline unsigned int cpumask_any_but(const struct cpumask *mask,
209 unsigned int cpu)
210 {
211 return 1;
212 }
213
cpumask_local_spread(unsigned int i,int node)214 static inline unsigned int cpumask_local_spread(unsigned int i, int node)
215 {
216 return 0;
217 }
218
cpumask_any_and_distribute(const struct cpumask * src1p,const struct cpumask * src2p)219 static inline int cpumask_any_and_distribute(const struct cpumask *src1p,
220 const struct cpumask *src2p) {
221 return cpumask_next_and(-1, src1p, src2p);
222 }
223
224 #define for_each_cpu(cpu, mask) \
225 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
226 #define for_each_cpu_not(cpu, mask) \
227 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
228 #define for_each_cpu_wrap(cpu, mask, start) \
229 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)(start))
230 #define for_each_cpu_and(cpu, mask1, mask2) \
231 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask1, (void)mask2)
232 #else
233 /**
234 * cpumask_first - get the first cpu in a cpumask
235 * @srcp: the cpumask pointer
236 *
237 * Returns >= nr_cpu_ids if no cpus set.
238 */
cpumask_first(const struct cpumask * srcp)239 static inline unsigned int cpumask_first(const struct cpumask *srcp)
240 {
241 return find_first_bit(cpumask_bits(srcp), nr_cpumask_bits);
242 }
243
244 /**
245 * cpumask_last - get the last CPU in a cpumask
246 * @srcp: - the cpumask pointer
247 *
248 * Returns >= nr_cpumask_bits if no CPUs set.
249 */
cpumask_last(const struct cpumask * srcp)250 static inline unsigned int cpumask_last(const struct cpumask *srcp)
251 {
252 return find_last_bit(cpumask_bits(srcp), nr_cpumask_bits);
253 }
254
255 unsigned int cpumask_next(int n, const struct cpumask *srcp);
256
257 /**
258 * cpumask_next_zero - get the next unset cpu in a cpumask
259 * @n: the cpu prior to the place to search (ie. return will be > @n)
260 * @srcp: the cpumask pointer
261 *
262 * Returns >= nr_cpu_ids if no further cpus unset.
263 */
cpumask_next_zero(int n,const struct cpumask * srcp)264 static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
265 {
266 /* -1 is a legal arg here. */
267 if (n != -1)
268 cpumask_check(n);
269 return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
270 }
271
272 int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
273 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
274 unsigned int cpumask_local_spread(unsigned int i, int node);
275 int cpumask_any_and_distribute(const struct cpumask *src1p,
276 const struct cpumask *src2p);
277
278 /**
279 * for_each_cpu - iterate over every cpu in a mask
280 * @cpu: the (optionally unsigned) integer iterator
281 * @mask: the cpumask pointer
282 *
283 * After the loop, cpu is >= nr_cpu_ids.
284 */
285 #define for_each_cpu(cpu, mask) \
286 for ((cpu) = -1; \
287 (cpu) = cpumask_next((cpu), (mask)), \
288 (cpu) < nr_cpu_ids;)
289
290 /**
291 * for_each_cpu_not - iterate over every cpu in a complemented mask
292 * @cpu: the (optionally unsigned) integer iterator
293 * @mask: the cpumask pointer
294 *
295 * After the loop, cpu is >= nr_cpu_ids.
296 */
297 #define for_each_cpu_not(cpu, mask) \
298 for ((cpu) = -1; \
299 (cpu) = cpumask_next_zero((cpu), (mask)), \
300 (cpu) < nr_cpu_ids;)
301
302 extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap);
303
304 /**
305 * for_each_cpu_wrap - iterate over every cpu in a mask, starting at a specified location
306 * @cpu: the (optionally unsigned) integer iterator
307 * @mask: the cpumask poiter
308 * @start: the start location
309 *
310 * The implementation does not assume any bit in @mask is set (including @start).
311 *
312 * After the loop, cpu is >= nr_cpu_ids.
313 */
314 #define for_each_cpu_wrap(cpu, mask, start) \
315 for ((cpu) = cpumask_next_wrap((start)-1, (mask), (start), false); \
316 (cpu) < nr_cpumask_bits; \
317 (cpu) = cpumask_next_wrap((cpu), (mask), (start), true))
318
319 /**
320 * for_each_cpu_and - iterate over every cpu in both masks
321 * @cpu: the (optionally unsigned) integer iterator
322 * @mask1: the first cpumask pointer
323 * @mask2: the second cpumask pointer
324 *
325 * This saves a temporary CPU mask in many places. It is equivalent to:
326 * struct cpumask tmp;
327 * cpumask_and(&tmp, &mask1, &mask2);
328 * for_each_cpu(cpu, &tmp)
329 * ...
330 *
331 * After the loop, cpu is >= nr_cpu_ids.
332 */
333 #define for_each_cpu_and(cpu, mask1, mask2) \
334 for ((cpu) = -1; \
335 (cpu) = cpumask_next_and((cpu), (mask1), (mask2)), \
336 (cpu) < nr_cpu_ids;)
337 #endif /* SMP */
338
339 #define CPU_BITS_NONE \
340 { \
341 [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \
342 }
343
344 #define CPU_BITS_CPU0 \
345 { \
346 [0] = 1UL \
347 }
348
349 /**
350 * cpumask_set_cpu - set a cpu in a cpumask
351 * @cpu: cpu number (< nr_cpu_ids)
352 * @dstp: the cpumask pointer
353 */
cpumask_set_cpu(unsigned int cpu,struct cpumask * dstp)354 static inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
355 {
356 set_bit(cpumask_check(cpu), cpumask_bits(dstp));
357 }
358
__cpumask_set_cpu(unsigned int cpu,struct cpumask * dstp)359 static inline void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
360 {
361 __set_bit(cpumask_check(cpu), cpumask_bits(dstp));
362 }
363
364
365 /**
366 * cpumask_clear_cpu - clear a cpu in a cpumask
367 * @cpu: cpu number (< nr_cpu_ids)
368 * @dstp: the cpumask pointer
369 */
cpumask_clear_cpu(int cpu,struct cpumask * dstp)370 static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
371 {
372 clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
373 }
374
__cpumask_clear_cpu(int cpu,struct cpumask * dstp)375 static inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp)
376 {
377 __clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
378 }
379
380 /**
381 * cpumask_test_cpu - test for a cpu in a cpumask
382 * @cpu: cpu number (< nr_cpu_ids)
383 * @cpumask: the cpumask pointer
384 *
385 * Returns 1 if @cpu is set in @cpumask, else returns 0
386 */
cpumask_test_cpu(int cpu,const struct cpumask * cpumask)387 static inline int cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
388 {
389 return test_bit(cpumask_check(cpu), cpumask_bits((cpumask)));
390 }
391
392 /**
393 * cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask
394 * @cpu: cpu number (< nr_cpu_ids)
395 * @cpumask: the cpumask pointer
396 *
397 * Returns 1 if @cpu is set in old bitmap of @cpumask, else returns 0
398 *
399 * test_and_set_bit wrapper for cpumasks.
400 */
cpumask_test_and_set_cpu(int cpu,struct cpumask * cpumask)401 static inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
402 {
403 return test_and_set_bit(cpumask_check(cpu), cpumask_bits(cpumask));
404 }
405
406 /**
407 * cpumask_test_and_clear_cpu - atomically test and clear a cpu in a cpumask
408 * @cpu: cpu number (< nr_cpu_ids)
409 * @cpumask: the cpumask pointer
410 *
411 * Returns 1 if @cpu is set in old bitmap of @cpumask, else returns 0
412 *
413 * test_and_clear_bit wrapper for cpumasks.
414 */
cpumask_test_and_clear_cpu(int cpu,struct cpumask * cpumask)415 static inline int cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
416 {
417 return test_and_clear_bit(cpumask_check(cpu), cpumask_bits(cpumask));
418 }
419
420 /**
421 * cpumask_setall - set all cpus (< nr_cpu_ids) in a cpumask
422 * @dstp: the cpumask pointer
423 */
cpumask_setall(struct cpumask * dstp)424 static inline void cpumask_setall(struct cpumask *dstp)
425 {
426 bitmap_fill(cpumask_bits(dstp), nr_cpumask_bits);
427 }
428
429 /**
430 * cpumask_clear - clear all cpus (< nr_cpu_ids) in a cpumask
431 * @dstp: the cpumask pointer
432 */
cpumask_clear(struct cpumask * dstp)433 static inline void cpumask_clear(struct cpumask *dstp)
434 {
435 bitmap_zero(cpumask_bits(dstp), nr_cpumask_bits);
436 }
437
438 /**
439 * cpumask_and - *dstp = *src1p & *src2p
440 * @dstp: the cpumask result
441 * @src1p: the first input
442 * @src2p: the second input
443 *
444 * If *@dstp is empty, returns 0, else returns 1
445 */
cpumask_and(struct cpumask * dstp,const struct cpumask * src1p,const struct cpumask * src2p)446 static inline int cpumask_and(struct cpumask *dstp,
447 const struct cpumask *src1p,
448 const struct cpumask *src2p)
449 {
450 return bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p),
451 cpumask_bits(src2p), nr_cpumask_bits);
452 }
453
454 /**
455 * cpumask_or - *dstp = *src1p | *src2p
456 * @dstp: the cpumask result
457 * @src1p: the first input
458 * @src2p: the second input
459 */
cpumask_or(struct cpumask * dstp,const struct cpumask * src1p,const struct cpumask * src2p)460 static inline void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p,
461 const struct cpumask *src2p)
462 {
463 bitmap_or(cpumask_bits(dstp), cpumask_bits(src1p),
464 cpumask_bits(src2p), nr_cpumask_bits);
465 }
466
467 /**
468 * cpumask_xor - *dstp = *src1p ^ *src2p
469 * @dstp: the cpumask result
470 * @src1p: the first input
471 * @src2p: the second input
472 */
cpumask_xor(struct cpumask * dstp,const struct cpumask * src1p,const struct cpumask * src2p)473 static inline void cpumask_xor(struct cpumask *dstp,
474 const struct cpumask *src1p,
475 const struct cpumask *src2p)
476 {
477 bitmap_xor(cpumask_bits(dstp), cpumask_bits(src1p),
478 cpumask_bits(src2p), nr_cpumask_bits);
479 }
480
481 /**
482 * cpumask_andnot - *dstp = *src1p & ~*src2p
483 * @dstp: the cpumask result
484 * @src1p: the first input
485 * @src2p: the second input
486 *
487 * If *@dstp is empty, returns 0, else returns 1
488 */
cpumask_andnot(struct cpumask * dstp,const struct cpumask * src1p,const struct cpumask * src2p)489 static inline int cpumask_andnot(struct cpumask *dstp,
490 const struct cpumask *src1p,
491 const struct cpumask *src2p)
492 {
493 return bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p),
494 cpumask_bits(src2p), nr_cpumask_bits);
495 }
496
497 /**
498 * cpumask_complement - *dstp = ~*srcp
499 * @dstp: the cpumask result
500 * @srcp: the input to invert
501 */
cpumask_complement(struct cpumask * dstp,const struct cpumask * srcp)502 static inline void cpumask_complement(struct cpumask *dstp,
503 const struct cpumask *srcp)
504 {
505 bitmap_complement(cpumask_bits(dstp), cpumask_bits(srcp),
506 nr_cpumask_bits);
507 }
508
509 /**
510 * cpumask_equal - *src1p == *src2p
511 * @src1p: the first input
512 * @src2p: the second input
513 */
cpumask_equal(const struct cpumask * src1p,const struct cpumask * src2p)514 static inline bool cpumask_equal(const struct cpumask *src1p,
515 const struct cpumask *src2p)
516 {
517 return bitmap_equal(cpumask_bits(src1p), cpumask_bits(src2p),
518 nr_cpumask_bits);
519 }
520
521 /**
522 * cpumask_or_equal - *src1p | *src2p == *src3p
523 * @src1p: the first input
524 * @src2p: the second input
525 * @src3p: the third input
526 */
cpumask_or_equal(const struct cpumask * src1p,const struct cpumask * src2p,const struct cpumask * src3p)527 static inline bool cpumask_or_equal(const struct cpumask *src1p,
528 const struct cpumask *src2p,
529 const struct cpumask *src3p)
530 {
531 return bitmap_or_equal(cpumask_bits(src1p), cpumask_bits(src2p),
532 cpumask_bits(src3p), nr_cpumask_bits);
533 }
534
535 /**
536 * cpumask_intersects - (*src1p & *src2p) != 0
537 * @src1p: the first input
538 * @src2p: the second input
539 */
cpumask_intersects(const struct cpumask * src1p,const struct cpumask * src2p)540 static inline bool cpumask_intersects(const struct cpumask *src1p,
541 const struct cpumask *src2p)
542 {
543 return bitmap_intersects(cpumask_bits(src1p), cpumask_bits(src2p),
544 nr_cpumask_bits);
545 }
546
547 /**
548 * cpumask_subset - (*src1p & ~*src2p) == 0
549 * @src1p: the first input
550 * @src2p: the second input
551 *
552 * Returns 1 if *@src1p is a subset of *@src2p, else returns 0
553 */
cpumask_subset(const struct cpumask * src1p,const struct cpumask * src2p)554 static inline int cpumask_subset(const struct cpumask *src1p,
555 const struct cpumask *src2p)
556 {
557 return bitmap_subset(cpumask_bits(src1p), cpumask_bits(src2p),
558 nr_cpumask_bits);
559 }
560
561 /**
562 * cpumask_empty - *srcp == 0
563 * @srcp: the cpumask to that all cpus < nr_cpu_ids are clear.
564 */
cpumask_empty(const struct cpumask * srcp)565 static inline bool cpumask_empty(const struct cpumask *srcp)
566 {
567 return bitmap_empty(cpumask_bits(srcp), nr_cpumask_bits);
568 }
569
570 /**
571 * cpumask_full - *srcp == 0xFFFFFFFF...
572 * @srcp: the cpumask to that all cpus < nr_cpu_ids are set.
573 */
cpumask_full(const struct cpumask * srcp)574 static inline bool cpumask_full(const struct cpumask *srcp)
575 {
576 return bitmap_full(cpumask_bits(srcp), nr_cpumask_bits);
577 }
578
579 /**
580 * cpumask_weight - Count of bits in *srcp
581 * @srcp: the cpumask to count bits (< nr_cpu_ids) in.
582 */
cpumask_weight(const struct cpumask * srcp)583 static inline unsigned int cpumask_weight(const struct cpumask *srcp)
584 {
585 return bitmap_weight(cpumask_bits(srcp), nr_cpumask_bits);
586 }
587
588 /**
589 * cpumask_shift_right - *dstp = *srcp >> n
590 * @dstp: the cpumask result
591 * @srcp: the input to shift
592 * @n: the number of bits to shift by
593 */
cpumask_shift_right(struct cpumask * dstp,const struct cpumask * srcp,int n)594 static inline void cpumask_shift_right(struct cpumask *dstp,
595 const struct cpumask *srcp, int n)
596 {
597 bitmap_shift_right(cpumask_bits(dstp), cpumask_bits(srcp), n,
598 nr_cpumask_bits);
599 }
600
601 /**
602 * cpumask_shift_left - *dstp = *srcp << n
603 * @dstp: the cpumask result
604 * @srcp: the input to shift
605 * @n: the number of bits to shift by
606 */
cpumask_shift_left(struct cpumask * dstp,const struct cpumask * srcp,int n)607 static inline void cpumask_shift_left(struct cpumask *dstp,
608 const struct cpumask *srcp, int n)
609 {
610 bitmap_shift_left(cpumask_bits(dstp), cpumask_bits(srcp), n,
611 nr_cpumask_bits);
612 }
613
614 /**
615 * cpumask_copy - *dstp = *srcp
616 * @dstp: the result
617 * @srcp: the input cpumask
618 */
cpumask_copy(struct cpumask * dstp,const struct cpumask * srcp)619 static inline void cpumask_copy(struct cpumask *dstp,
620 const struct cpumask *srcp)
621 {
622 bitmap_copy(cpumask_bits(dstp), cpumask_bits(srcp), nr_cpumask_bits);
623 }
624
625 /**
626 * cpumask_any - pick a "random" cpu from *srcp
627 * @srcp: the input cpumask
628 *
629 * Returns >= nr_cpu_ids if no cpus set.
630 */
631 #define cpumask_any(srcp) cpumask_first(srcp)
632
633 /**
634 * cpumask_first_and - return the first cpu from *srcp1 & *srcp2
635 * @src1p: the first input
636 * @src2p: the second input
637 *
638 * Returns >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and().
639 */
640 #define cpumask_first_and(src1p, src2p) cpumask_next_and(-1, (src1p), (src2p))
641
642 /**
643 * cpumask_any_and - pick a "random" cpu from *mask1 & *mask2
644 * @mask1: the first input cpumask
645 * @mask2: the second input cpumask
646 *
647 * Returns >= nr_cpu_ids if no cpus set.
648 */
649 #define cpumask_any_and(mask1, mask2) cpumask_first_and((mask1), (mask2))
650
651 /**
652 * cpumask_of - the cpumask containing just a given cpu
653 * @cpu: the cpu (<= nr_cpu_ids)
654 */
655 #define cpumask_of(cpu) (get_cpu_mask(cpu))
656
657 /**
658 * cpumask_parse_user - extract a cpumask from a user string
659 * @buf: the buffer to extract from
660 * @len: the length of the buffer
661 * @dstp: the cpumask to set.
662 *
663 * Returns -errno, or 0 for success.
664 */
cpumask_parse_user(const char __user * buf,int len,struct cpumask * dstp)665 static inline int cpumask_parse_user(const char __user *buf, int len,
666 struct cpumask *dstp)
667 {
668 return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
669 }
670
671 /**
672 * cpumask_parselist_user - extract a cpumask from a user string
673 * @buf: the buffer to extract from
674 * @len: the length of the buffer
675 * @dstp: the cpumask to set.
676 *
677 * Returns -errno, or 0 for success.
678 */
cpumask_parselist_user(const char __user * buf,int len,struct cpumask * dstp)679 static inline int cpumask_parselist_user(const char __user *buf, int len,
680 struct cpumask *dstp)
681 {
682 return bitmap_parselist_user(buf, len, cpumask_bits(dstp),
683 nr_cpumask_bits);
684 }
685
686 /**
687 * cpumask_parse - extract a cpumask from a string
688 * @buf: the buffer to extract from
689 * @dstp: the cpumask to set.
690 *
691 * Returns -errno, or 0 for success.
692 */
cpumask_parse(const char * buf,struct cpumask * dstp)693 static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
694 {
695 return bitmap_parse(buf, UINT_MAX, cpumask_bits(dstp), nr_cpumask_bits);
696 }
697
698 /**
699 * cpulist_parse - extract a cpumask from a user string of ranges
700 * @buf: the buffer to extract from
701 * @dstp: the cpumask to set.
702 *
703 * Returns -errno, or 0 for success.
704 */
cpulist_parse(const char * buf,struct cpumask * dstp)705 static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
706 {
707 return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits);
708 }
709
710 /**
711 * cpumask_size - size to allocate for a 'struct cpumask' in bytes
712 */
cpumask_size(void)713 static inline unsigned int cpumask_size(void)
714 {
715 return BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long);
716 }
717
718 /*
719 * cpumask_var_t: struct cpumask for stack usage.
720 *
721 * Oh, the wicked games we play! In order to make kernel coding a
722 * little more difficult, we typedef cpumask_var_t to an array or a
723 * pointer: doing &mask on an array is a noop, so it still works.
724 *
725 * ie.
726 * cpumask_var_t tmpmask;
727 * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
728 * return -ENOMEM;
729 *
730 * ... use 'tmpmask' like a normal struct cpumask * ...
731 *
732 * free_cpumask_var(tmpmask);
733 *
734 *
735 * However, one notable exception is there. alloc_cpumask_var() allocates
736 * only nr_cpumask_bits bits (in the other hand, real cpumask_t always has
737 * NR_CPUS bits). Therefore you don't have to dereference cpumask_var_t.
738 *
739 * cpumask_var_t tmpmask;
740 * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
741 * return -ENOMEM;
742 *
743 * var = *tmpmask;
744 *
745 * This code makes NR_CPUS length memcopy and brings to a memory corruption.
746 * cpumask_copy() provide safe copy functionality.
747 *
748 * Note that there is another evil here: If you define a cpumask_var_t
749 * as a percpu variable then the way to obtain the address of the cpumask
750 * structure differently influences what this_cpu_* operation needs to be
751 * used. Please use this_cpu_cpumask_var_t in those cases. The direct use
752 * of this_cpu_ptr() or this_cpu_read() will lead to failures when the
753 * other type of cpumask_var_t implementation is configured.
754 *
755 * Please also note that __cpumask_var_read_mostly can be used to declare
756 * a cpumask_var_t variable itself (not its content) as read mostly.
757 */
758 #ifdef CONFIG_CPUMASK_OFFSTACK
759 typedef struct cpumask *cpumask_var_t;
760
761 #define this_cpu_cpumask_var_ptr(x) this_cpu_read(x)
762 #define __cpumask_var_read_mostly __read_mostly
763
764 bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
765 bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
766 bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
767 bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
768 void alloc_bootmem_cpumask_var(cpumask_var_t *mask);
769 void free_cpumask_var(cpumask_var_t mask);
770 void free_bootmem_cpumask_var(cpumask_var_t mask);
771
cpumask_available(cpumask_var_t mask)772 static inline bool cpumask_available(cpumask_var_t mask)
773 {
774 return mask != NULL;
775 }
776
777 #else
778 typedef struct cpumask cpumask_var_t[1];
779
780 #define this_cpu_cpumask_var_ptr(x) this_cpu_ptr(x)
781 #define __cpumask_var_read_mostly
782
alloc_cpumask_var(cpumask_var_t * mask,gfp_t flags)783 static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
784 {
785 return true;
786 }
787
alloc_cpumask_var_node(cpumask_var_t * mask,gfp_t flags,int node)788 static inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
789 int node)
790 {
791 return true;
792 }
793
zalloc_cpumask_var(cpumask_var_t * mask,gfp_t flags)794 static inline bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
795 {
796 cpumask_clear(*mask);
797 return true;
798 }
799
zalloc_cpumask_var_node(cpumask_var_t * mask,gfp_t flags,int node)800 static inline bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
801 int node)
802 {
803 cpumask_clear(*mask);
804 return true;
805 }
806
alloc_bootmem_cpumask_var(cpumask_var_t * mask)807 static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask)
808 {
809 }
810
free_cpumask_var(cpumask_var_t mask)811 static inline void free_cpumask_var(cpumask_var_t mask)
812 {
813 }
814
free_bootmem_cpumask_var(cpumask_var_t mask)815 static inline void free_bootmem_cpumask_var(cpumask_var_t mask)
816 {
817 }
818
cpumask_available(cpumask_var_t mask)819 static inline bool cpumask_available(cpumask_var_t mask)
820 {
821 return true;
822 }
823 #endif /* CONFIG_CPUMASK_OFFSTACK */
824
825 /* It's common to want to use cpu_all_mask in struct member initializers,
826 * so it has to refer to an address rather than a pointer. */
827 extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS);
828 #define cpu_all_mask to_cpumask(cpu_all_bits)
829
830 /* First bits of cpu_bit_bitmap are in fact unset. */
831 #define cpu_none_mask to_cpumask(cpu_bit_bitmap[0])
832
833 #define for_each_possible_cpu(cpu) for_each_cpu((cpu), cpu_possible_mask)
834 #define for_each_online_cpu(cpu) for_each_cpu((cpu), cpu_online_mask)
835 #define for_each_present_cpu(cpu) for_each_cpu((cpu), cpu_present_mask)
836 #ifdef CONFIG_CPU_ISOLATION_OPT
837 #define for_each_isolated_cpu(cpu) for_each_cpu((cpu), cpu_isolated_mask)
838 #endif
839
840 /* Wrappers for arch boot code to manipulate normally-constant masks */
841 void init_cpu_present(const struct cpumask *src);
842 void init_cpu_possible(const struct cpumask *src);
843 void init_cpu_online(const struct cpumask *src);
844
reset_cpu_possible_mask(void)845 static inline void reset_cpu_possible_mask(void)
846 {
847 bitmap_zero(cpumask_bits(&__cpu_possible_mask), NR_CPUS);
848 }
849
850 static inline void
set_cpu_possible(unsigned int cpu,bool possible)851 set_cpu_possible(unsigned int cpu, bool possible)
852 {
853 if (possible)
854 cpumask_set_cpu(cpu, &__cpu_possible_mask);
855 else
856 cpumask_clear_cpu(cpu, &__cpu_possible_mask);
857 }
858
859 static inline void
set_cpu_present(unsigned int cpu,bool present)860 set_cpu_present(unsigned int cpu, bool present)
861 {
862 if (present)
863 cpumask_set_cpu(cpu, &__cpu_present_mask);
864 else
865 cpumask_clear_cpu(cpu, &__cpu_present_mask);
866 }
867
868 void set_cpu_online(unsigned int cpu, bool online);
869
870 static inline void
set_cpu_active(unsigned int cpu,bool active)871 set_cpu_active(unsigned int cpu, bool active)
872 {
873 if (active)
874 cpumask_set_cpu(cpu, &__cpu_active_mask);
875 else
876 cpumask_clear_cpu(cpu, &__cpu_active_mask);
877 }
878
879 #ifdef CONFIG_CPU_ISOLATION_OPT
880 static inline void
set_cpu_isolated(unsigned int cpu,bool isolated)881 set_cpu_isolated(unsigned int cpu, bool isolated)
882 {
883 if (isolated)
884 cpumask_set_cpu(cpu, &__cpu_isolated_mask);
885 else
886 cpumask_clear_cpu(cpu, &__cpu_isolated_mask);
887 }
888 #endif
889
890 /**
891 * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask *
892 * @bitmap: the bitmap
893 *
894 * There are a few places where cpumask_var_t isn't appropriate and
895 * static cpumasks must be used (eg. very early boot), yet we don't
896 * expose the definition of 'struct cpumask'.
897 *
898 * This does the conversion, and can be used as a constant initializer.
899 */
900 #define to_cpumask(bitmap) \
901 ((struct cpumask *)(1 ? (bitmap) \
902 : (void *)sizeof(__check_is_bitmap(bitmap))))
903
__check_is_bitmap(const unsigned long * bitmap)904 static inline int __check_is_bitmap(const unsigned long *bitmap)
905 {
906 return 1;
907 }
908
909 /*
910 * Special-case data structure for "single bit set only" constant CPU masks.
911 *
912 * We pre-generate all the 64 (or 32) possible bit positions, with enough
913 * padding to the left and the right, and return the constant pointer
914 * appropriately offset.
915 */
916 extern const unsigned long
917 cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)];
918
get_cpu_mask(unsigned int cpu)919 static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
920 {
921 const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG];
922 p -= cpu / BITS_PER_LONG;
923 return to_cpumask(p);
924 }
925
926 #define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
927
928 #if NR_CPUS <= BITS_PER_LONG
929 #define CPU_BITS_ALL \
930 { \
931 [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
932 }
933
934 #else /* NR_CPUS > BITS_PER_LONG */
935
936 #define CPU_BITS_ALL \
937 { \
938 [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
939 [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
940 }
941 #endif /* NR_CPUS > BITS_PER_LONG */
942
943 /**
944 * cpumap_print_to_pagebuf - copies the cpumask into the buffer either
945 * as comma-separated list of cpus or hex values of cpumask
946 * @list: indicates whether the cpumap must be list
947 * @mask: the cpumask to copy
948 * @buf: the buffer to copy into
949 *
950 * Returns the length of the (null-terminated) @buf string, zero if
951 * nothing is copied.
952 */
953 static inline ssize_t
cpumap_print_to_pagebuf(bool list,char * buf,const struct cpumask * mask)954 cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
955 {
956 return bitmap_print_to_pagebuf(list, buf, cpumask_bits(mask),
957 nr_cpu_ids);
958 }
959
960 #if NR_CPUS <= BITS_PER_LONG
961 #define CPU_MASK_ALL \
962 (cpumask_t) { { \
963 [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
964 } }
965 #else
966 #define CPU_MASK_ALL \
967 (cpumask_t) { { \
968 [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
969 [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
970 } }
971 #endif /* NR_CPUS > BITS_PER_LONG */
972
973 #define CPU_MASK_NONE \
974 (cpumask_t) { { \
975 [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \
976 } }
977
978 #define CPU_MASK_CPU0 \
979 (cpumask_t) { { \
980 [0] = 1UL \
981 } }
982
983 #endif /* __LINUX_CPUMASK_H */
984