• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <perf/cpumap.h>
3 #include <stdlib.h>
4 #include <linux/refcount.h>
5 #include <internal/cpumap.h>
6 #include <asm/bug.h>
7 #include <stdio.h>
8 #include <string.h>
9 #include <unistd.h>
10 #include <ctype.h>
11 #include <limits.h>
12 
perf_cpu_map__set_nr(struct perf_cpu_map * map,int nr_cpus)13 void perf_cpu_map__set_nr(struct perf_cpu_map *map, int nr_cpus)
14 {
15 	RC_CHK_ACCESS(map)->nr = nr_cpus;
16 }
17 
perf_cpu_map__alloc(int nr_cpus)18 struct perf_cpu_map *perf_cpu_map__alloc(int nr_cpus)
19 {
20 	RC_STRUCT(perf_cpu_map) *cpus = malloc(sizeof(*cpus) + sizeof(struct perf_cpu) * nr_cpus);
21 	struct perf_cpu_map *result;
22 
23 	if (ADD_RC_CHK(result, cpus)) {
24 		cpus->nr = nr_cpus;
25 		refcount_set(&cpus->refcnt, 1);
26 	}
27 	return result;
28 }
29 
perf_cpu_map__dummy_new(void)30 struct perf_cpu_map *perf_cpu_map__dummy_new(void)
31 {
32 	struct perf_cpu_map *cpus = perf_cpu_map__alloc(1);
33 
34 	if (cpus)
35 		RC_CHK_ACCESS(cpus)->map[0].cpu = -1;
36 
37 	return cpus;
38 }
39 
cpu_map__delete(struct perf_cpu_map * map)40 static void cpu_map__delete(struct perf_cpu_map *map)
41 {
42 	if (map) {
43 		WARN_ONCE(refcount_read(perf_cpu_map__refcnt(map)) != 0,
44 			  "cpu_map refcnt unbalanced\n");
45 		RC_CHK_FREE(map);
46 	}
47 }
48 
perf_cpu_map__get(struct perf_cpu_map * map)49 struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map)
50 {
51 	struct perf_cpu_map *result;
52 
53 	if (RC_CHK_GET(result, map))
54 		refcount_inc(perf_cpu_map__refcnt(map));
55 
56 	return result;
57 }
58 
perf_cpu_map__put(struct perf_cpu_map * map)59 void perf_cpu_map__put(struct perf_cpu_map *map)
60 {
61 	if (map) {
62 		if (refcount_dec_and_test(perf_cpu_map__refcnt(map)))
63 			cpu_map__delete(map);
64 		else
65 			RC_CHK_PUT(map);
66 	}
67 }
68 
cpu_map__default_new(void)69 static struct perf_cpu_map *cpu_map__default_new(void)
70 {
71 	struct perf_cpu_map *cpus;
72 	int nr_cpus;
73 
74 	nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
75 	if (nr_cpus < 0)
76 		return NULL;
77 
78 	cpus = perf_cpu_map__alloc(nr_cpus);
79 	if (cpus != NULL) {
80 		int i;
81 
82 		for (i = 0; i < nr_cpus; ++i)
83 			RC_CHK_ACCESS(cpus)->map[i].cpu = i;
84 	}
85 
86 	return cpus;
87 }
88 
perf_cpu_map__default_new(void)89 struct perf_cpu_map *perf_cpu_map__default_new(void)
90 {
91 	return cpu_map__default_new();
92 }
93 
94 
cmp_cpu(const void * a,const void * b)95 static int cmp_cpu(const void *a, const void *b)
96 {
97 	const struct perf_cpu *cpu_a = a, *cpu_b = b;
98 
99 	return cpu_a->cpu - cpu_b->cpu;
100 }
101 
__perf_cpu_map__cpu(const struct perf_cpu_map * cpus,int idx)102 static struct perf_cpu __perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx)
103 {
104 	return RC_CHK_ACCESS(cpus)->map[idx];
105 }
106 
cpu_map__trim_new(int nr_cpus,const struct perf_cpu * tmp_cpus)107 static struct perf_cpu_map *cpu_map__trim_new(int nr_cpus, const struct perf_cpu *tmp_cpus)
108 {
109 	size_t payload_size = nr_cpus * sizeof(struct perf_cpu);
110 	struct perf_cpu_map *cpus = perf_cpu_map__alloc(nr_cpus);
111 	int i, j;
112 
113 	if (cpus != NULL) {
114 		memcpy(RC_CHK_ACCESS(cpus)->map, tmp_cpus, payload_size);
115 		qsort(RC_CHK_ACCESS(cpus)->map, nr_cpus, sizeof(struct perf_cpu), cmp_cpu);
116 		/* Remove dups */
117 		j = 0;
118 		for (i = 0; i < nr_cpus; i++) {
119 			if (i == 0 ||
120 			    __perf_cpu_map__cpu(cpus, i).cpu !=
121 			    __perf_cpu_map__cpu(cpus, i - 1).cpu) {
122 				RC_CHK_ACCESS(cpus)->map[j++].cpu =
123 					__perf_cpu_map__cpu(cpus, i).cpu;
124 			}
125 		}
126 		perf_cpu_map__set_nr(cpus, j);
127 		assert(j <= nr_cpus);
128 	}
129 	return cpus;
130 }
131 
perf_cpu_map__read(FILE * file)132 struct perf_cpu_map *perf_cpu_map__read(FILE *file)
133 {
134 	struct perf_cpu_map *cpus = NULL;
135 	int nr_cpus = 0;
136 	struct perf_cpu *tmp_cpus = NULL, *tmp;
137 	int max_entries = 0;
138 	int n, cpu, prev;
139 	char sep;
140 
141 	sep = 0;
142 	prev = -1;
143 	for (;;) {
144 		n = fscanf(file, "%u%c", &cpu, &sep);
145 		if (n <= 0)
146 			break;
147 		if (prev >= 0) {
148 			int new_max = nr_cpus + cpu - prev - 1;
149 
150 			WARN_ONCE(new_max >= MAX_NR_CPUS, "Perf can support %d CPUs. "
151 							  "Consider raising MAX_NR_CPUS\n", MAX_NR_CPUS);
152 
153 			if (new_max >= max_entries) {
154 				max_entries = new_max + MAX_NR_CPUS / 2;
155 				tmp = realloc(tmp_cpus, max_entries * sizeof(struct perf_cpu));
156 				if (tmp == NULL)
157 					goto out_free_tmp;
158 				tmp_cpus = tmp;
159 			}
160 
161 			while (++prev < cpu)
162 				tmp_cpus[nr_cpus++].cpu = prev;
163 		}
164 		if (nr_cpus == max_entries) {
165 			max_entries += MAX_NR_CPUS;
166 			tmp = realloc(tmp_cpus, max_entries * sizeof(struct perf_cpu));
167 			if (tmp == NULL)
168 				goto out_free_tmp;
169 			tmp_cpus = tmp;
170 		}
171 
172 		tmp_cpus[nr_cpus++].cpu = cpu;
173 		if (n == 2 && sep == '-')
174 			prev = cpu;
175 		else
176 			prev = -1;
177 		if (n == 1 || sep == '\n')
178 			break;
179 	}
180 
181 	if (nr_cpus > 0)
182 		cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
183 	else
184 		cpus = cpu_map__default_new();
185 out_free_tmp:
186 	free(tmp_cpus);
187 	return cpus;
188 }
189 
cpu_map__read_all_cpu_map(void)190 static struct perf_cpu_map *cpu_map__read_all_cpu_map(void)
191 {
192 	struct perf_cpu_map *cpus = NULL;
193 	FILE *onlnf;
194 
195 	onlnf = fopen("/sys/devices/system/cpu/online", "r");
196 	if (!onlnf)
197 		return cpu_map__default_new();
198 
199 	cpus = perf_cpu_map__read(onlnf);
200 	fclose(onlnf);
201 	return cpus;
202 }
203 
perf_cpu_map__new(const char * cpu_list)204 struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list)
205 {
206 	struct perf_cpu_map *cpus = NULL;
207 	unsigned long start_cpu, end_cpu = 0;
208 	char *p = NULL;
209 	int i, nr_cpus = 0;
210 	struct perf_cpu *tmp_cpus = NULL, *tmp;
211 	int max_entries = 0;
212 
213 	if (!cpu_list)
214 		return cpu_map__read_all_cpu_map();
215 
216 	/*
217 	 * must handle the case of empty cpumap to cover
218 	 * TOPOLOGY header for NUMA nodes with no CPU
219 	 * ( e.g., because of CPU hotplug)
220 	 */
221 	if (!isdigit(*cpu_list) && *cpu_list != '\0')
222 		goto out;
223 
224 	while (isdigit(*cpu_list)) {
225 		p = NULL;
226 		start_cpu = strtoul(cpu_list, &p, 0);
227 		if (start_cpu >= INT_MAX
228 		    || (*p != '\0' && *p != ',' && *p != '-'))
229 			goto invalid;
230 
231 		if (*p == '-') {
232 			cpu_list = ++p;
233 			p = NULL;
234 			end_cpu = strtoul(cpu_list, &p, 0);
235 
236 			if (end_cpu >= INT_MAX || (*p != '\0' && *p != ','))
237 				goto invalid;
238 
239 			if (end_cpu < start_cpu)
240 				goto invalid;
241 		} else {
242 			end_cpu = start_cpu;
243 		}
244 
245 		WARN_ONCE(end_cpu >= MAX_NR_CPUS, "Perf can support %d CPUs. "
246 						  "Consider raising MAX_NR_CPUS\n", MAX_NR_CPUS);
247 
248 		for (; start_cpu <= end_cpu; start_cpu++) {
249 			/* check for duplicates */
250 			for (i = 0; i < nr_cpus; i++)
251 				if (tmp_cpus[i].cpu == (int)start_cpu)
252 					goto invalid;
253 
254 			if (nr_cpus == max_entries) {
255 				max_entries += MAX_NR_CPUS;
256 				tmp = realloc(tmp_cpus, max_entries * sizeof(struct perf_cpu));
257 				if (tmp == NULL)
258 					goto invalid;
259 				tmp_cpus = tmp;
260 			}
261 			tmp_cpus[nr_cpus++].cpu = (int)start_cpu;
262 		}
263 		if (*p)
264 			++p;
265 
266 		cpu_list = p;
267 	}
268 
269 	if (nr_cpus > 0)
270 		cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
271 	else if (*cpu_list != '\0')
272 		cpus = cpu_map__default_new();
273 	else
274 		cpus = perf_cpu_map__dummy_new();
275 invalid:
276 	free(tmp_cpus);
277 out:
278 	return cpus;
279 }
280 
__perf_cpu_map__nr(const struct perf_cpu_map * cpus)281 static int __perf_cpu_map__nr(const struct perf_cpu_map *cpus)
282 {
283 	return RC_CHK_ACCESS(cpus)->nr;
284 }
285 
perf_cpu_map__cpu(const struct perf_cpu_map * cpus,int idx)286 struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx)
287 {
288 	struct perf_cpu result = {
289 		.cpu = -1
290 	};
291 
292 	if (cpus && idx < __perf_cpu_map__nr(cpus))
293 		return __perf_cpu_map__cpu(cpus, idx);
294 
295 	return result;
296 }
297 
perf_cpu_map__nr(const struct perf_cpu_map * cpus)298 int perf_cpu_map__nr(const struct perf_cpu_map *cpus)
299 {
300 	return cpus ? __perf_cpu_map__nr(cpus) : 1;
301 }
302 
perf_cpu_map__empty(const struct perf_cpu_map * map)303 bool perf_cpu_map__empty(const struct perf_cpu_map *map)
304 {
305 	return map ? __perf_cpu_map__cpu(map, 0).cpu == -1 : true;
306 }
307 
perf_cpu_map__idx(const struct perf_cpu_map * cpus,struct perf_cpu cpu)308 int perf_cpu_map__idx(const struct perf_cpu_map *cpus, struct perf_cpu cpu)
309 {
310 	int low, high;
311 
312 	if (!cpus)
313 		return -1;
314 
315 	low = 0;
316 	high = __perf_cpu_map__nr(cpus);
317 	while (low < high) {
318 		int idx = (low + high) / 2;
319 		struct perf_cpu cpu_at_idx = __perf_cpu_map__cpu(cpus, idx);
320 
321 		if (cpu_at_idx.cpu == cpu.cpu)
322 			return idx;
323 
324 		if (cpu_at_idx.cpu > cpu.cpu)
325 			high = idx;
326 		else
327 			low = idx + 1;
328 	}
329 
330 	return -1;
331 }
332 
perf_cpu_map__has(const struct perf_cpu_map * cpus,struct perf_cpu cpu)333 bool perf_cpu_map__has(const struct perf_cpu_map *cpus, struct perf_cpu cpu)
334 {
335 	return perf_cpu_map__idx(cpus, cpu) != -1;
336 }
337 
perf_cpu_map__equal(const struct perf_cpu_map * lhs,const struct perf_cpu_map * rhs)338 bool perf_cpu_map__equal(const struct perf_cpu_map *lhs, const struct perf_cpu_map *rhs)
339 {
340 	int nr;
341 
342 	if (lhs == rhs)
343 		return true;
344 
345 	if (!lhs || !rhs)
346 		return false;
347 
348 	nr = __perf_cpu_map__nr(lhs);
349 	if (nr != __perf_cpu_map__nr(rhs))
350 		return false;
351 
352 	for (int idx = 0; idx < nr; idx++) {
353 		if (__perf_cpu_map__cpu(lhs, idx).cpu != __perf_cpu_map__cpu(rhs, idx).cpu)
354 			return false;
355 	}
356 	return true;
357 }
358 
perf_cpu_map__has_any_cpu(const struct perf_cpu_map * map)359 bool perf_cpu_map__has_any_cpu(const struct perf_cpu_map *map)
360 {
361 	return map && __perf_cpu_map__cpu(map, 0).cpu == -1;
362 }
363 
perf_cpu_map__max(const struct perf_cpu_map * map)364 struct perf_cpu perf_cpu_map__max(const struct perf_cpu_map *map)
365 {
366 	struct perf_cpu result = {
367 		.cpu = -1
368 	};
369 
370 	// cpu_map__trim_new() qsort()s it, cpu_map__default_new() sorts it as well.
371 	return __perf_cpu_map__nr(map) > 0
372 		? __perf_cpu_map__cpu(map, __perf_cpu_map__nr(map) - 1)
373 		: result;
374 }
375 
376 /** Is 'b' a subset of 'a'. */
perf_cpu_map__is_subset(const struct perf_cpu_map * a,const struct perf_cpu_map * b)377 bool perf_cpu_map__is_subset(const struct perf_cpu_map *a, const struct perf_cpu_map *b)
378 {
379 	if (a == b || !b)
380 		return true;
381 	if (!a || __perf_cpu_map__nr(b) > __perf_cpu_map__nr(a))
382 		return false;
383 
384 	for (int i = 0, j = 0; i < __perf_cpu_map__nr(a); i++) {
385 		if (__perf_cpu_map__cpu(a, i).cpu > __perf_cpu_map__cpu(b, j).cpu)
386 			return false;
387 		if (__perf_cpu_map__cpu(a, i).cpu == __perf_cpu_map__cpu(b, j).cpu) {
388 			j++;
389 			if (j == __perf_cpu_map__nr(b))
390 				return true;
391 		}
392 	}
393 	return false;
394 }
395 
396 /*
397  * Merge two cpumaps
398  *
399  * orig either gets freed and replaced with a new map, or reused
400  * with no reference count change (similar to "realloc")
401  * other has its reference count increased.
402  */
403 
perf_cpu_map__merge(struct perf_cpu_map * orig,struct perf_cpu_map * other)404 struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig,
405 					 struct perf_cpu_map *other)
406 {
407 	struct perf_cpu *tmp_cpus;
408 	int tmp_len;
409 	int i, j, k;
410 	struct perf_cpu_map *merged;
411 
412 	if (perf_cpu_map__is_subset(orig, other))
413 		return orig;
414 	if (perf_cpu_map__is_subset(other, orig)) {
415 		perf_cpu_map__put(orig);
416 		return perf_cpu_map__get(other);
417 	}
418 
419 	tmp_len = __perf_cpu_map__nr(orig) + __perf_cpu_map__nr(other);
420 	tmp_cpus = malloc(tmp_len * sizeof(struct perf_cpu));
421 	if (!tmp_cpus)
422 		return NULL;
423 
424 	/* Standard merge algorithm from wikipedia */
425 	i = j = k = 0;
426 	while (i < __perf_cpu_map__nr(orig) && j < __perf_cpu_map__nr(other)) {
427 		if (__perf_cpu_map__cpu(orig, i).cpu <= __perf_cpu_map__cpu(other, j).cpu) {
428 			if (__perf_cpu_map__cpu(orig, i).cpu == __perf_cpu_map__cpu(other, j).cpu)
429 				j++;
430 			tmp_cpus[k++] = __perf_cpu_map__cpu(orig, i++);
431 		} else
432 			tmp_cpus[k++] = __perf_cpu_map__cpu(other, j++);
433 	}
434 
435 	while (i < __perf_cpu_map__nr(orig))
436 		tmp_cpus[k++] = __perf_cpu_map__cpu(orig, i++);
437 
438 	while (j < __perf_cpu_map__nr(other))
439 		tmp_cpus[k++] = __perf_cpu_map__cpu(other, j++);
440 	assert(k <= tmp_len);
441 
442 	merged = cpu_map__trim_new(k, tmp_cpus);
443 	free(tmp_cpus);
444 	perf_cpu_map__put(orig);
445 	return merged;
446 }
447 
perf_cpu_map__intersect(struct perf_cpu_map * orig,struct perf_cpu_map * other)448 struct perf_cpu_map *perf_cpu_map__intersect(struct perf_cpu_map *orig,
449 					     struct perf_cpu_map *other)
450 {
451 	struct perf_cpu *tmp_cpus;
452 	int tmp_len;
453 	int i, j, k;
454 	struct perf_cpu_map *merged = NULL;
455 
456 	if (perf_cpu_map__is_subset(other, orig))
457 		return perf_cpu_map__get(orig);
458 	if (perf_cpu_map__is_subset(orig, other))
459 		return perf_cpu_map__get(other);
460 
461 	tmp_len = max(__perf_cpu_map__nr(orig), __perf_cpu_map__nr(other));
462 	tmp_cpus = malloc(tmp_len * sizeof(struct perf_cpu));
463 	if (!tmp_cpus)
464 		return NULL;
465 
466 	i = j = k = 0;
467 	while (i < __perf_cpu_map__nr(orig) && j < __perf_cpu_map__nr(other)) {
468 		if (__perf_cpu_map__cpu(orig, i).cpu < __perf_cpu_map__cpu(other, j).cpu)
469 			i++;
470 		else if (__perf_cpu_map__cpu(orig, i).cpu > __perf_cpu_map__cpu(other, j).cpu)
471 			j++;
472 		else {
473 			j++;
474 			tmp_cpus[k++] = __perf_cpu_map__cpu(orig, i++);
475 		}
476 	}
477 	if (k)
478 		merged = cpu_map__trim_new(k, tmp_cpus);
479 	free(tmp_cpus);
480 	return merged;
481 }
482