• Home
  • Raw
  • Download

Lines Matching +full:int +full:- +full:map +full:- +full:mask

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2011-2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
5 * Parts came from evlist.c builtin-{top,stat,record}.c, see those files for further
29 void mmap_cpu_mask__scnprintf(struct mmap_cpu_mask *mask, const char *tag) in mmap_cpu_mask__scnprintf() argument
34 len = bitmap_scnprintf(mask->bits, mask->nbits, buf, MASK_SIZE); in mmap_cpu_mask__scnprintf()
36 pr_debug("%p: %s mask[%zd]: %s\n", mask, tag, mask->nbits, buf); in mmap_cpu_mask__scnprintf()
39 size_t mmap__mmap_len(struct mmap *map) in mmap__mmap_len() argument
41 return perf_mmap__mmap_len(&map->core); in mmap__mmap_len()
44 int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused, in auxtrace_mmap__mmap()
47 int fd __maybe_unused) in auxtrace_mmap__mmap()
58 unsigned int auxtrace_pages __maybe_unused, in auxtrace_mmap_params__init()
65 int idx __maybe_unused, in auxtrace_mmap_params__set_idx()
71 static int perf_mmap__aio_enabled(struct mmap *map) in perf_mmap__aio_enabled() argument
73 return map->aio.nr_cblocks > 0; in perf_mmap__aio_enabled()
77 static int perf_mmap__aio_alloc(struct mmap *map, int idx) in perf_mmap__aio_alloc() argument
79 map->aio.data[idx] = mmap(NULL, mmap__mmap_len(map), PROT_READ|PROT_WRITE, in perf_mmap__aio_alloc()
81 if (map->aio.data[idx] == MAP_FAILED) { in perf_mmap__aio_alloc()
82 map->aio.data[idx] = NULL; in perf_mmap__aio_alloc()
83 return -1; in perf_mmap__aio_alloc()
89 static void perf_mmap__aio_free(struct mmap *map, int idx) in perf_mmap__aio_free() argument
91 if (map->aio.data[idx]) { in perf_mmap__aio_free()
92 munmap(map->aio.data[idx], mmap__mmap_len(map)); in perf_mmap__aio_free()
93 map->aio.data[idx] = NULL; in perf_mmap__aio_free()
97 static int perf_mmap__aio_bind(struct mmap *map, int idx, int cpu, int affinity) in perf_mmap__aio_bind() argument
103 int err = 0; in perf_mmap__aio_bind()
106 data = map->aio.data[idx]; in perf_mmap__aio_bind()
107 mmap_len = mmap__mmap_len(map); in perf_mmap__aio_bind()
111 pr_err("Failed to allocate node mask for mbind: error %m\n"); in perf_mmap__aio_bind()
112 return -1; in perf_mmap__aio_bind()
116 pr_err("Failed to bind [%p-%p] AIO buffer to node %lu: error %m\n", in perf_mmap__aio_bind()
118 err = -1; in perf_mmap__aio_bind()
126 static int perf_mmap__aio_alloc(struct mmap *map, int idx) in perf_mmap__aio_alloc() argument
128 map->aio.data[idx] = malloc(mmap__mmap_len(map)); in perf_mmap__aio_alloc()
129 if (map->aio.data[idx] == NULL) in perf_mmap__aio_alloc()
130 return -1; in perf_mmap__aio_alloc()
135 static void perf_mmap__aio_free(struct mmap *map, int idx) in perf_mmap__aio_free() argument
137 zfree(&(map->aio.data[idx])); in perf_mmap__aio_free()
140 static int perf_mmap__aio_bind(struct mmap *map __maybe_unused, int idx __maybe_unused, in perf_mmap__aio_bind()
141 int cpu __maybe_unused, int affinity __maybe_unused) in perf_mmap__aio_bind()
147 static int perf_mmap__aio_mmap(struct mmap *map, struct mmap_params *mp) in perf_mmap__aio_mmap() argument
149 int delta_max, i, prio, ret; in perf_mmap__aio_mmap()
151 map->aio.nr_cblocks = mp->nr_cblocks; in perf_mmap__aio_mmap()
152 if (map->aio.nr_cblocks) { in perf_mmap__aio_mmap()
153 map->aio.aiocb = calloc(map->aio.nr_cblocks, sizeof(struct aiocb *)); in perf_mmap__aio_mmap()
154 if (!map->aio.aiocb) { in perf_mmap__aio_mmap()
156 return -1; in perf_mmap__aio_mmap()
158 map->aio.cblocks = calloc(map->aio.nr_cblocks, sizeof(struct aiocb)); in perf_mmap__aio_mmap()
159 if (!map->aio.cblocks) { in perf_mmap__aio_mmap()
161 return -1; in perf_mmap__aio_mmap()
163 map->aio.data = calloc(map->aio.nr_cblocks, sizeof(void *)); in perf_mmap__aio_mmap()
164 if (!map->aio.data) { in perf_mmap__aio_mmap()
166 return -1; in perf_mmap__aio_mmap()
169 for (i = 0; i < map->aio.nr_cblocks; ++i) { in perf_mmap__aio_mmap()
170 ret = perf_mmap__aio_alloc(map, i); in perf_mmap__aio_mmap()
171 if (ret == -1) { in perf_mmap__aio_mmap()
173 return -1; in perf_mmap__aio_mmap()
175 ret = perf_mmap__aio_bind(map, i, map->core.cpu, mp->affinity); in perf_mmap__aio_mmap()
176 if (ret == -1) in perf_mmap__aio_mmap()
177 return -1; in perf_mmap__aio_mmap()
179 * Use cblock.aio_fildes value different from -1 in perf_mmap__aio_mmap()
184 map->aio.cblocks[i].aio_fildes = -1; in perf_mmap__aio_mmap()
188 * are kept in separate per-prio queues and adding in perf_mmap__aio_mmap()
189 * a new request will iterate thru shorter per-prio in perf_mmap__aio_mmap()
193 prio = delta_max - i; in perf_mmap__aio_mmap()
194 map->aio.cblocks[i].aio_reqprio = prio >= 0 ? prio : 0; in perf_mmap__aio_mmap()
201 static void perf_mmap__aio_munmap(struct mmap *map) in perf_mmap__aio_munmap() argument
203 int i; in perf_mmap__aio_munmap()
205 for (i = 0; i < map->aio.nr_cblocks; ++i) in perf_mmap__aio_munmap()
206 perf_mmap__aio_free(map, i); in perf_mmap__aio_munmap()
207 if (map->aio.data) in perf_mmap__aio_munmap()
208 zfree(&map->aio.data); in perf_mmap__aio_munmap()
209 zfree(&map->aio.cblocks); in perf_mmap__aio_munmap()
210 zfree(&map->aio.aiocb); in perf_mmap__aio_munmap()
213 static int perf_mmap__aio_enabled(struct mmap *map __maybe_unused) in perf_mmap__aio_enabled()
218 static int perf_mmap__aio_mmap(struct mmap *map __maybe_unused, in perf_mmap__aio_mmap()
224 static void perf_mmap__aio_munmap(struct mmap *map __maybe_unused) in perf_mmap__aio_munmap()
229 void mmap__munmap(struct mmap *map) in mmap__munmap() argument
231 bitmap_free(map->affinity_mask.bits); in mmap__munmap()
233 perf_mmap__aio_munmap(map); in mmap__munmap()
234 if (map->data != NULL) { in mmap__munmap()
235 munmap(map->data, mmap__mmap_len(map)); in mmap__munmap()
236 map->data = NULL; in mmap__munmap()
238 auxtrace_mmap__munmap(&map->auxtrace_mmap); in mmap__munmap()
241 static void build_node_mask(int node, struct mmap_cpu_mask *mask) in build_node_mask() argument
243 int c, cpu, nr_cpus; in build_node_mask()
252 cpu = cpu_map->map[c]; /* map c index to online cpu index */ in build_node_mask()
254 set_bit(cpu, mask->bits); in build_node_mask()
258 static int perf_mmap__setup_affinity_mask(struct mmap *map, struct mmap_params *mp) in perf_mmap__setup_affinity_mask() argument
260 map->affinity_mask.nbits = cpu__max_cpu(); in perf_mmap__setup_affinity_mask()
261 map->affinity_mask.bits = bitmap_alloc(map->affinity_mask.nbits); in perf_mmap__setup_affinity_mask()
262 if (!map->affinity_mask.bits) in perf_mmap__setup_affinity_mask()
263 return -1; in perf_mmap__setup_affinity_mask()
265 if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1) in perf_mmap__setup_affinity_mask()
266 build_node_mask(cpu__get_node(map->core.cpu), &map->affinity_mask); in perf_mmap__setup_affinity_mask()
267 else if (mp->affinity == PERF_AFFINITY_CPU) in perf_mmap__setup_affinity_mask()
268 set_bit(map->core.cpu, map->affinity_mask.bits); in perf_mmap__setup_affinity_mask()
273 int mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu) in mmap__mmap() argument
275 if (perf_mmap__mmap(&map->core, &mp->core, fd, cpu)) { in mmap__mmap()
278 return -1; in mmap__mmap()
281 if (mp->affinity != PERF_AFFINITY_SYS && in mmap__mmap()
282 perf_mmap__setup_affinity_mask(map, mp)) { in mmap__mmap()
283 pr_debug2("failed to alloc mmap affinity mask, error %d\n", in mmap__mmap()
285 return -1; in mmap__mmap()
289 mmap_cpu_mask__scnprintf(&map->affinity_mask, "mmap"); in mmap__mmap()
291 map->core.flush = mp->flush; in mmap__mmap()
293 map->comp_level = mp->comp_level; in mmap__mmap()
295 if (map->comp_level && !perf_mmap__aio_enabled(map)) { in mmap__mmap()
296 map->data = mmap(NULL, mmap__mmap_len(map), PROT_READ|PROT_WRITE, in mmap__mmap()
298 if (map->data == MAP_FAILED) { in mmap__mmap()
301 map->data = NULL; in mmap__mmap()
302 return -1; in mmap__mmap()
306 if (auxtrace_mmap__mmap(&map->auxtrace_mmap, in mmap__mmap()
307 &mp->auxtrace_mp, map->core.base, fd)) in mmap__mmap()
308 return -1; in mmap__mmap()
310 return perf_mmap__aio_mmap(map, mp); in mmap__mmap()
313 int perf_mmap__push(struct mmap *md, void *to, in perf_mmap__push()
314 int push(struct mmap *map, void *to, void *buf, size_t size)) in perf_mmap__push() argument
316 u64 head = perf_mmap__read_head(&md->core); in perf_mmap__push()
317 unsigned char *data = md->core.base + page_size; in perf_mmap__push()
320 int rc = 0; in perf_mmap__push()
322 rc = perf_mmap__read_init(&md->core); in perf_mmap__push()
324 return (rc == -EAGAIN) ? 1 : -1; in perf_mmap__push()
326 size = md->core.end - md->core.start; in perf_mmap__push()
328 if ((md->core.start & md->core.mask) + size != (md->core.end & md->core.mask)) { in perf_mmap__push()
329 buf = &data[md->core.start & md->core.mask]; in perf_mmap__push()
330 size = md->core.mask + 1 - (md->core.start & md->core.mask); in perf_mmap__push()
331 md->core.start += size; in perf_mmap__push()
334 rc = -1; in perf_mmap__push()
339 buf = &data[md->core.start & md->core.mask]; in perf_mmap__push()
340 size = md->core.end - md->core.start; in perf_mmap__push()
341 md->core.start += size; in perf_mmap__push()
344 rc = -1; in perf_mmap__push()
348 md->core.prev = head; in perf_mmap__push()
349 perf_mmap__consume(&md->core); in perf_mmap__push()