• Home
  • Raw
  • Download

Lines Matching +full:cpu +full:- +full:nr

1 // SPDX-License-Identifier: GPL-2.0
25 map = perf_cpu_map__empty_new(cpus->nr); in cpu_map__from_entries()
29 for (i = 0; i < cpus->nr; i++) { in cpu_map__from_entries()
31 * Special treatment for -1, which is not real cpu number, in cpu_map__from_entries()
32 * and we need to use (int) -1 to initialize map[i], in cpu_map__from_entries()
35 if (cpus->cpu[i] == (u16) -1) in cpu_map__from_entries()
36 map->map[i] = -1; in cpu_map__from_entries()
38 map->map[i] = (int) cpus->cpu[i]; in cpu_map__from_entries()
48 int nr, nbits = mask->nr * mask->long_size * BITS_PER_BYTE; in cpu_map__from_mask() local
50 nr = bitmap_weight(mask->mask, nbits); in cpu_map__from_mask()
52 map = perf_cpu_map__empty_new(nr); in cpu_map__from_mask()
54 int cpu, i = 0; in cpu_map__from_mask() local
56 for_each_set_bit(cpu, mask->mask, nbits) in cpu_map__from_mask()
57 map->map[i++] = cpu; in cpu_map__from_mask()
65 if (data->type == PERF_CPU_MAP__CPUS) in cpu_map__new_data()
66 return cpu_map__from_entries((struct cpu_map_entries *)data->data); in cpu_map__new_data()
68 return cpu_map__from_mask((struct perf_record_record_cpu_map *)data->data); in cpu_map__new_data()
81 struct perf_cpu_map *perf_cpu_map__empty_new(int nr) in perf_cpu_map__empty_new() argument
83 struct perf_cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int) * nr); in perf_cpu_map__empty_new()
88 cpus->nr = nr; in perf_cpu_map__empty_new()
89 for (i = 0; i < nr; i++) in perf_cpu_map__empty_new()
90 cpus->map[i] = -1; in perf_cpu_map__empty_new()
92 refcount_set(&cpus->refcnt, 1); in perf_cpu_map__empty_new()
98 static int cpu__get_topology_int(int cpu, const char *name, int *value) in cpu__get_topology_int() argument
103 "devices/system/cpu/cpu%d/topology/%s", cpu, name); in cpu__get_topology_int()
108 int cpu_map__get_socket_id(int cpu) in cpu_map__get_socket_id() argument
110 int value, ret = cpu__get_topology_int(cpu, "physical_package_id", &value); in cpu_map__get_socket_id()
116 int cpu; in cpu_map__get_socket() local
118 if (idx > map->nr) in cpu_map__get_socket()
119 return -1; in cpu_map__get_socket()
121 cpu = map->map[idx]; in cpu_map__get_socket()
123 return cpu_map__get_socket_id(cpu); in cpu_map__get_socket()
128 return *(int *)a - *(int *)b; in cmp_ids()
132 int (*f)(struct perf_cpu_map *map, int cpu, void *data), in cpu_map__build_map()
136 int nr = cpus->nr; in cpu_map__build_map() local
137 int cpu, s1, s2; in cpu_map__build_map() local
140 c = calloc(1, sizeof(*c) + nr * sizeof(int)); in cpu_map__build_map()
142 return -1; in cpu_map__build_map()
144 for (cpu = 0; cpu < nr; cpu++) { in cpu_map__build_map()
145 s1 = f(cpus, cpu, data); in cpu_map__build_map()
146 for (s2 = 0; s2 < c->nr; s2++) { in cpu_map__build_map()
147 if (s1 == c->map[s2]) in cpu_map__build_map()
150 if (s2 == c->nr) { in cpu_map__build_map()
151 c->map[c->nr] = s1; in cpu_map__build_map()
152 c->nr++; in cpu_map__build_map()
156 qsort(c->map, c->nr, sizeof(int), cmp_ids); in cpu_map__build_map()
158 refcount_set(&c->refcnt, 1); in cpu_map__build_map()
163 int cpu_map__get_die_id(int cpu) in cpu_map__get_die_id() argument
165 int value, ret = cpu__get_topology_int(cpu, "die_id", &value); in cpu_map__get_die_id()
172 int cpu, die_id, s; in cpu_map__get_die() local
174 if (idx > map->nr) in cpu_map__get_die()
175 return -1; in cpu_map__get_die()
177 cpu = map->map[idx]; in cpu_map__get_die()
179 die_id = cpu_map__get_die_id(cpu); in cpu_map__get_die()
181 if (die_id == -1) in cpu_map__get_die()
185 if (s == -1) in cpu_map__get_die()
186 return -1; in cpu_map__get_die()
195 return -1; in cpu_map__get_die()
198 return -1; in cpu_map__get_die()
203 int cpu_map__get_core_id(int cpu) in cpu_map__get_core_id() argument
205 int value, ret = cpu__get_topology_int(cpu, "core_id", &value); in cpu_map__get_core_id()
209 int cpu_map__get_node_id(int cpu) in cpu_map__get_node_id() argument
211 return cpu__get_node(cpu); in cpu_map__get_node_id()
216 int cpu, s_die; in cpu_map__get_core() local
218 if (idx > map->nr) in cpu_map__get_core()
219 return -1; in cpu_map__get_core()
221 cpu = map->map[idx]; in cpu_map__get_core()
223 cpu = cpu_map__get_core_id(cpu); in cpu_map__get_core()
227 if (s_die == -1) in cpu_map__get_core()
228 return -1; in cpu_map__get_core()
237 if (WARN_ONCE(cpu >> 16, "The core id number is too big.\n")) in cpu_map__get_core()
238 return -1; in cpu_map__get_core()
240 return (s_die << 16) | (cpu & 0xffff); in cpu_map__get_core()
245 if (idx < 0 || idx >= map->nr) in cpu_map__get_node()
246 return -1; in cpu_map__get_node()
248 return cpu_map__get_node_id(map->map[idx]); in cpu_map__get_node()
271 /* setup simple routines to easily access node numbers given a cpu number */
279 return -1; in get_max_num()
284 while (--num) { in get_max_num()
285 if ((buf[num] == ',') || (buf[num] == '-')) { in get_max_num()
291 err = -1; in get_max_num()
295 /* convert from 0-based to 1-based */ in get_max_num()
303 /* Determine highest possible cpu in the system for sparse allocation */
308 int ret = -1; in set_max_cpu_num()
318 /* get the highest possible cpu number for a sparse allocation */ in set_max_cpu_num()
319 ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/possible", mnt); in set_max_cpu_num()
329 /* get the highest present cpu number for a sparse allocation */ in set_max_cpu_num()
330 ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/present", mnt); in set_max_cpu_num()
348 int ret = -1; in set_max_node_num()
357 /* get the highest possible cpu number for a sparse allocation */ in set_max_node_num()
396 int cpu__get_node(int cpu) in cpu__get_node() argument
400 return -1; in cpu__get_node()
403 return cpunode_map[cpu]; in cpu__get_node()
416 return -1; in init_cpunode_map()
420 cpunode_map[i] = -1; in init_cpunode_map()
429 unsigned int cpu, mem; in cpu__setup_cpunode_map() local
437 return -1; in cpu__setup_cpunode_map()
446 return -1; in cpu__setup_cpunode_map()
455 if (dent1->d_type != DT_DIR || sscanf(dent1->d_name, "node%u", &mem) < 1) in cpu__setup_cpunode_map()
458 n = snprintf(buf, PATH_MAX, "%s/%s", path, dent1->d_name); in cpu__setup_cpunode_map()
468 if (dent2->d_type != DT_LNK || sscanf(dent2->d_name, "cpu%u", &cpu) < 1) in cpu__setup_cpunode_map()
470 cpunode_map[cpu] = mem; in cpu__setup_cpunode_map()
478 bool cpu_map__has(struct perf_cpu_map *cpus, int cpu) in cpu_map__has() argument
480 return perf_cpu_map__idx(cpus, cpu) != -1; in cpu_map__has()
485 return cpus->map[idx]; in cpu_map__cpu()
490 int i, cpu, start = -1; in cpu_map__snprint() local
496 for (i = 0; i < map->nr + 1; i++) { in cpu_map__snprint()
497 bool last = i == map->nr; in cpu_map__snprint()
499 cpu = last ? INT_MAX : map->map[i]; in cpu_map__snprint()
501 if (start == -1) { in cpu_map__snprint()
504 ret += snprintf(buf + ret, size - ret, in cpu_map__snprint()
506 map->map[i]); in cpu_map__snprint()
508 } else if (((i - start) != (cpu - map->map[start])) || last) { in cpu_map__snprint()
509 int end = i - 1; in cpu_map__snprint()
512 ret += snprintf(buf + ret, size - ret, in cpu_map__snprint()
514 map->map[start]); in cpu_map__snprint()
516 ret += snprintf(buf + ret, size - ret, in cpu_map__snprint()
517 "%s%d-%d", COMMA, in cpu_map__snprint()
518 map->map[start], map->map[end]); in cpu_map__snprint()
536 return val - 10 + 'a'; in hex_char()
542 int i, cpu; in cpu_map__snprint_mask() local
545 int last_cpu = cpu_map__cpu(map, map->nr - 1); in cpu_map__snprint_mask()
556 for (i = 0; i < map->nr; i++) { in cpu_map__snprint_mask()
557 cpu = cpu_map__cpu(map, i); in cpu_map__snprint_mask()
558 bitmap[cpu / 8] |= 1 << (cpu % 8); in cpu_map__snprint_mask()
561 for (cpu = last_cpu / 4 * 4; cpu >= 0; cpu -= 4) { in cpu_map__snprint_mask()
562 unsigned char bits = bitmap[cpu / 8]; in cpu_map__snprint_mask()
564 if (cpu % 8) in cpu_map__snprint_mask()
570 if ((cpu % 32) == 0 && cpu > 0) in cpu_map__snprint_mask()
576 buf[size - 1] = '\0'; in cpu_map__snprint_mask()
577 return ptr - buf; in cpu_map__snprint_mask()
585 online = perf_cpu_map__new(NULL); /* from /sys/devices/system/cpu/online */ in cpu_map__online()