1 #include <linux/kernel.h>
2 #include <linux/bitops.h>
3 #include <linux/cpumask.h>
4 #include <linux/module.h>
5 #include <linux/bootmem.h>
6
__first_cpu(const cpumask_t * srcp)7 int __first_cpu(const cpumask_t *srcp)
8 {
9 return min_t(int, NR_CPUS, find_first_bit(srcp->bits, NR_CPUS));
10 }
11 EXPORT_SYMBOL(__first_cpu);
12
__next_cpu(int n,const cpumask_t * srcp)13 int __next_cpu(int n, const cpumask_t *srcp)
14 {
15 return min_t(int, NR_CPUS, find_next_bit(srcp->bits, NR_CPUS, n+1));
16 }
17 EXPORT_SYMBOL(__next_cpu);
18
19 #if NR_CPUS > 64
__next_cpu_nr(int n,const cpumask_t * srcp)20 int __next_cpu_nr(int n, const cpumask_t *srcp)
21 {
22 return min_t(int, nr_cpu_ids,
23 find_next_bit(srcp->bits, nr_cpu_ids, n+1));
24 }
25 EXPORT_SYMBOL(__next_cpu_nr);
26 #endif
27
__any_online_cpu(const cpumask_t * mask)28 int __any_online_cpu(const cpumask_t *mask)
29 {
30 int cpu;
31
32 for_each_cpu_mask(cpu, *mask) {
33 if (cpu_online(cpu))
34 break;
35 }
36 return cpu;
37 }
38 EXPORT_SYMBOL(__any_online_cpu);
39
40 /**
41 * cpumask_next_and - get the next cpu in *src1p & *src2p
42 * @n: the cpu prior to the place to search (ie. return will be > @n)
43 * @src1p: the first cpumask pointer
44 * @src2p: the second cpumask pointer
45 *
46 * Returns >= nr_cpu_ids if no further cpus set in both.
47 */
cpumask_next_and(int n,const struct cpumask * src1p,const struct cpumask * src2p)48 int cpumask_next_and(int n, const struct cpumask *src1p,
49 const struct cpumask *src2p)
50 {
51 while ((n = cpumask_next(n, src1p)) < nr_cpu_ids)
52 if (cpumask_test_cpu(n, src2p))
53 break;
54 return n;
55 }
56 EXPORT_SYMBOL(cpumask_next_and);
57
58 /**
59 * cpumask_any_but - return a "random" in a cpumask, but not this one.
60 * @mask: the cpumask to search
61 * @cpu: the cpu to ignore.
62 *
63 * Often used to find any cpu but smp_processor_id() in a mask.
64 * Returns >= nr_cpu_ids if no cpus set.
65 */
cpumask_any_but(const struct cpumask * mask,unsigned int cpu)66 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
67 {
68 unsigned int i;
69
70 cpumask_check(cpu);
71 for_each_cpu(i, mask)
72 if (i != cpu)
73 break;
74 return i;
75 }
76
77 /* These are not inline because of header tangles. */
78 #ifdef CONFIG_CPUMASK_OFFSTACK
79 /**
80 * alloc_cpumask_var_node - allocate a struct cpumask on a given node
81 * @mask: pointer to cpumask_var_t where the cpumask is returned
82 * @flags: GFP_ flags
83 *
84 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
85 * a nop returning a constant 1 (in <linux/cpumask.h>)
86 * Returns TRUE if memory allocation succeeded, FALSE otherwise.
87 *
88 * In addition, mask will be NULL if this fails. Note that gcc is
89 * usually smart enough to know that mask can never be NULL if
90 * CONFIG_CPUMASK_OFFSTACK=n, so does code elimination in that case
91 * too.
92 */
alloc_cpumask_var_node(cpumask_var_t * mask,gfp_t flags,int node)93 bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
94 {
95 if (likely(slab_is_available()))
96 *mask = kmalloc_node(cpumask_size(), flags, node);
97 else {
98 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
99 printk(KERN_ERR
100 "=> alloc_cpumask_var: kmalloc not available!\n");
101 #endif
102 *mask = NULL;
103 }
104 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
105 if (!*mask) {
106 printk(KERN_ERR "=> alloc_cpumask_var: failed!\n");
107 dump_stack();
108 }
109 #endif
110 /* FIXME: Bandaid to save us from old primitives which go to NR_CPUS. */
111 if (*mask) {
112 unsigned int tail;
113 tail = BITS_TO_LONGS(NR_CPUS - nr_cpumask_bits) * sizeof(long);
114 memset(cpumask_bits(*mask) + cpumask_size() - tail,
115 0, tail);
116 }
117
118 return *mask != NULL;
119 }
120 EXPORT_SYMBOL(alloc_cpumask_var_node);
121
122 /**
123 * alloc_cpumask_var - allocate a struct cpumask
124 * @mask: pointer to cpumask_var_t where the cpumask is returned
125 * @flags: GFP_ flags
126 *
127 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
128 * a nop returning a constant 1 (in <linux/cpumask.h>).
129 *
130 * See alloc_cpumask_var_node.
131 */
alloc_cpumask_var(cpumask_var_t * mask,gfp_t flags)132 bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
133 {
134 return alloc_cpumask_var_node(mask, flags, numa_node_id());
135 }
136 EXPORT_SYMBOL(alloc_cpumask_var);
137
138 /**
139 * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena.
140 * @mask: pointer to cpumask_var_t where the cpumask is returned
141 *
142 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
143 * a nop (in <linux/cpumask.h>).
144 * Either returns an allocated (zero-filled) cpumask, or causes the
145 * system to panic.
146 */
alloc_bootmem_cpumask_var(cpumask_var_t * mask)147 void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
148 {
149 *mask = alloc_bootmem(cpumask_size());
150 }
151
152 /**
153 * free_cpumask_var - frees memory allocated for a struct cpumask.
154 * @mask: cpumask to free
155 *
156 * This is safe on a NULL mask.
157 */
free_cpumask_var(cpumask_var_t mask)158 void free_cpumask_var(cpumask_var_t mask)
159 {
160 kfree(mask);
161 }
162 EXPORT_SYMBOL(free_cpumask_var);
163
164 /**
165 * free_bootmem_cpumask_var - frees result of alloc_bootmem_cpumask_var
166 * @mask: cpumask to free
167 */
free_bootmem_cpumask_var(cpumask_var_t mask)168 void __init free_bootmem_cpumask_var(cpumask_var_t mask)
169 {
170 free_bootmem((unsigned long)mask, cpumask_size());
171 }
172 #endif
173