• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #include <linux/slab.h>
2 #include <linux/kernel.h>
3 #include <linux/bitops.h>
4 #include <linux/cpumask.h>
5 #include <linux/export.h>
6 #include <linux/bootmem.h>
7 
8 /**
9  * cpumask_next_and - get the next cpu in *src1p & *src2p
10  * @n: the cpu prior to the place to search (ie. return will be > @n)
11  * @src1p: the first cpumask pointer
12  * @src2p: the second cpumask pointer
13  *
14  * Returns >= nr_cpu_ids if no further cpus set in both.
15  */
cpumask_next_and(int n,const struct cpumask * src1p,const struct cpumask * src2p)16 int cpumask_next_and(int n, const struct cpumask *src1p,
17 		     const struct cpumask *src2p)
18 {
19 	while ((n = cpumask_next(n, src1p)) < nr_cpu_ids)
20 		if (cpumask_test_cpu(n, src2p))
21 			break;
22 	return n;
23 }
24 EXPORT_SYMBOL(cpumask_next_and);
25 
26 /**
27  * cpumask_any_but - return a "random" in a cpumask, but not this one.
28  * @mask: the cpumask to search
29  * @cpu: the cpu to ignore.
30  *
31  * Often used to find any cpu but smp_processor_id() in a mask.
32  * Returns >= nr_cpu_ids if no cpus set.
33  */
cpumask_any_but(const struct cpumask * mask,unsigned int cpu)34 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
35 {
36 	unsigned int i;
37 
38 	cpumask_check(cpu);
39 	for_each_cpu(i, mask)
40 		if (i != cpu)
41 			break;
42 	return i;
43 }
44 
45 /**
46  * cpumask_next_wrap - helper to implement for_each_cpu_wrap
47  * @n: the cpu prior to the place to search
48  * @mask: the cpumask pointer
49  * @start: the start point of the iteration
50  * @wrap: assume @n crossing @start terminates the iteration
51  *
52  * Returns >= nr_cpu_ids on completion
53  *
54  * Note: the @wrap argument is required for the start condition when
55  * we cannot assume @start is set in @mask.
56  */
cpumask_next_wrap(int n,const struct cpumask * mask,int start,bool wrap)57 int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
58 {
59 	int next;
60 
61 again:
62 	next = cpumask_next(n, mask);
63 
64 	if (wrap && n < start && next >= start) {
65 		return nr_cpumask_bits;
66 
67 	} else if (next >= nr_cpumask_bits) {
68 		wrap = true;
69 		n = -1;
70 		goto again;
71 	}
72 
73 	return next;
74 }
75 EXPORT_SYMBOL(cpumask_next_wrap);
76 
77 /* These are not inline because of header tangles. */
78 #ifdef CONFIG_CPUMASK_OFFSTACK
79 /**
80  * alloc_cpumask_var_node - allocate a struct cpumask on a given node
81  * @mask: pointer to cpumask_var_t where the cpumask is returned
82  * @flags: GFP_ flags
83  *
84  * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
85  * a nop returning a constant 1 (in <linux/cpumask.h>)
86  * Returns TRUE if memory allocation succeeded, FALSE otherwise.
87  *
88  * In addition, mask will be NULL if this fails.  Note that gcc is
89  * usually smart enough to know that mask can never be NULL if
90  * CONFIG_CPUMASK_OFFSTACK=n, so does code elimination in that case
91  * too.
92  */
alloc_cpumask_var_node(cpumask_var_t * mask,gfp_t flags,int node)93 bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
94 {
95 	*mask = kmalloc_node(cpumask_size(), flags, node);
96 
97 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
98 	if (!*mask) {
99 		printk(KERN_ERR "=> alloc_cpumask_var: failed!\n");
100 		dump_stack();
101 	}
102 #endif
103 
104 	return *mask != NULL;
105 }
106 EXPORT_SYMBOL(alloc_cpumask_var_node);
107 
zalloc_cpumask_var_node(cpumask_var_t * mask,gfp_t flags,int node)108 bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
109 {
110 	return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node);
111 }
112 EXPORT_SYMBOL(zalloc_cpumask_var_node);
113 
114 /**
115  * alloc_cpumask_var - allocate a struct cpumask
116  * @mask: pointer to cpumask_var_t where the cpumask is returned
117  * @flags: GFP_ flags
118  *
119  * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
120  * a nop returning a constant 1 (in <linux/cpumask.h>).
121  *
122  * See alloc_cpumask_var_node.
123  */
alloc_cpumask_var(cpumask_var_t * mask,gfp_t flags)124 bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
125 {
126 	return alloc_cpumask_var_node(mask, flags, NUMA_NO_NODE);
127 }
128 EXPORT_SYMBOL(alloc_cpumask_var);
129 
zalloc_cpumask_var(cpumask_var_t * mask,gfp_t flags)130 bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
131 {
132 	return alloc_cpumask_var(mask, flags | __GFP_ZERO);
133 }
134 EXPORT_SYMBOL(zalloc_cpumask_var);
135 
136 /**
137  * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena.
138  * @mask: pointer to cpumask_var_t where the cpumask is returned
139  *
140  * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
141  * a nop (in <linux/cpumask.h>).
142  * Either returns an allocated (zero-filled) cpumask, or causes the
143  * system to panic.
144  */
alloc_bootmem_cpumask_var(cpumask_var_t * mask)145 void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
146 {
147 	*mask = memblock_virt_alloc(cpumask_size(), 0);
148 }
149 
150 /**
151  * free_cpumask_var - frees memory allocated for a struct cpumask.
152  * @mask: cpumask to free
153  *
154  * This is safe on a NULL mask.
155  */
free_cpumask_var(cpumask_var_t mask)156 void free_cpumask_var(cpumask_var_t mask)
157 {
158 	kfree(mask);
159 }
160 EXPORT_SYMBOL(free_cpumask_var);
161 
162 /**
163  * free_bootmem_cpumask_var - frees result of alloc_bootmem_cpumask_var
164  * @mask: cpumask to free
165  */
free_bootmem_cpumask_var(cpumask_var_t mask)166 void __init free_bootmem_cpumask_var(cpumask_var_t mask)
167 {
168 	memblock_free_early(__pa(mask), cpumask_size());
169 }
170 #endif
171 
172 /**
173  * cpumask_local_spread - select the i'th cpu with local numa cpu's first
174  * @i: index number
175  * @node: local numa_node
176  *
177  * This function selects an online CPU according to a numa aware policy;
178  * local cpus are returned first, followed by non-local ones, then it
179  * wraps around.
180  *
181  * It's not very efficient, but useful for setup.
182  */
cpumask_local_spread(unsigned int i,int node)183 unsigned int cpumask_local_spread(unsigned int i, int node)
184 {
185 	int cpu;
186 
187 	/* Wrap: we always want a cpu. */
188 	i %= num_online_cpus();
189 
190 	if (node == -1) {
191 		for_each_cpu(cpu, cpu_online_mask)
192 			if (i-- == 0)
193 				return cpu;
194 	} else {
195 		/* NUMA first. */
196 		for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask)
197 			if (i-- == 0)
198 				return cpu;
199 
200 		for_each_cpu(cpu, cpu_online_mask) {
201 			/* Skip NUMA nodes, done above. */
202 			if (cpumask_test_cpu(cpu, cpumask_of_node(node)))
203 				continue;
204 
205 			if (i-- == 0)
206 				return cpu;
207 		}
208 	}
209 	BUG();
210 }
211 EXPORT_SYMBOL(cpumask_local_spread);
212