Home
last modified time | relevance | path

Searched +full:cpu +full:- +full:map (Results 1 – 25 of 1091) sorted by relevance

12345678910>>...44

/kernel/linux/linux-6.6/tools/perf/tests/
Dtopology.c1 // SPDX-License-Identifier: GPL-2.0
14 #define TEMPL "/tmp/perf-test-XXXXXX"
26 return -1; in get_temp()
44 session->evlist = evlist__new_default(); in session_write_header()
45 TEST_ASSERT_VAL("can't get evlist", session->evlist); in session_write_header()
47 perf_header__set_feat(&session->header, HEADER_CPU_TOPOLOGY); in session_write_header()
48 perf_header__set_feat(&session->header, HEADER_NRCPUS); in session_write_header()
49 perf_header__set_feat(&session->header, HEADER_ARCH); in session_write_header()
51 session->header.data_size += DATA_SIZE; in session_write_header()
54 !perf_session__write_header(session, session->evlist, data.file.fd, true)); in session_write_header()
[all …]
Dcpumap.c1 // SPDX-License-Identifier: GPL-2.0
6 #include "util/synthetic-events.h"
19 struct perf_record_cpu_map *map_event = &event->cpu_map; in process_event_mask()
21 struct perf_cpu_map *map; in process_event_mask() local
24 data = &map_event->data; in process_event_mask()
26 TEST_ASSERT_VAL("wrong type", data->type == PERF_CPU_MAP__MASK); in process_event_mask()
28 long_size = data->mask32_data.long_size; in process_event_mask()
32 TEST_ASSERT_VAL("wrong nr", data->mask32_data.nr == 1); in process_event_mask()
34 TEST_ASSERT_VAL("wrong cpu", perf_record_cpu_map_data__test_bit(0, data)); in process_event_mask()
35 TEST_ASSERT_VAL("wrong cpu", !perf_record_cpu_map_data__test_bit(1, data)); in process_event_mask()
[all …]
/kernel/linux/linux-5.10/tools/perf/util/
Dcpumap.c1 // SPDX-License-Identifier: GPL-2.0
23 struct perf_cpu_map *map; in cpu_map__from_entries() local
25 map = perf_cpu_map__empty_new(cpus->nr); in cpu_map__from_entries()
26 if (map) { in cpu_map__from_entries()
29 for (i = 0; i < cpus->nr; i++) { in cpu_map__from_entries()
31 * Special treatment for -1, which is not real cpu number, in cpu_map__from_entries()
32 * and we need to use (int) -1 to initialize map[i], in cpu_map__from_entries()
35 if (cpus->cpu[i] == (u16) -1) in cpu_map__from_entries()
36 map->map[i] = -1; in cpu_map__from_entries()
38 map->map[i] = (int) cpus->cpu[i]; in cpu_map__from_entries()
[all …]
Dmmap.c1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2011-2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
5 * Parts came from evlist.c builtin-{top,stat,record}.c, see those files for further
34 len = bitmap_scnprintf(mask->bits, mask->nbits, buf, MASK_SIZE); in mmap_cpu_mask__scnprintf()
36 pr_debug("%p: %s mask[%zd]: %s\n", mask, tag, mask->nbits, buf); in mmap_cpu_mask__scnprintf()
39 size_t mmap__mmap_len(struct mmap *map) in mmap__mmap_len() argument
41 return perf_mmap__mmap_len(&map->core); in mmap__mmap_len()
71 static int perf_mmap__aio_enabled(struct mmap *map) in perf_mmap__aio_enabled() argument
73 return map->aio.nr_cblocks > 0; in perf_mmap__aio_enabled()
77 static int perf_mmap__aio_alloc(struct mmap *map, int idx) in perf_mmap__aio_alloc() argument
[all …]
/kernel/linux/linux-5.10/arch/powerpc/platforms/cell/
Dcbe_regs.c1 // SPDX-License-Identifier: GPL-2.0-only
20 #include <asm/cell-regs.h>
23 * Current implementation uses "cpu" nodes. We build our own mapping
24 * array of cpu numbers to cpu nodes locally for now to allow interrupt
26 * we implement cpu hotplug, we'll have to install an appropriate norifier
27 * in order to release references to the cpu going away
49 static cpumask_t cbe_local_mask[MAX_CBE] = { [0 ... MAX_CBE-1] = {CPU_BITS_NONE} };
65 if (np->data) in cbe_find_map()
66 return np->data; in cbe_find_map()
68 /* walk up path until cpu or be node was found */ in cbe_find_map()
[all …]
/kernel/linux/linux-6.6/arch/powerpc/platforms/cell/
Dcbe_regs.c1 // SPDX-License-Identifier: GPL-2.0-only
19 #include <asm/cell-regs.h>
22 * Current implementation uses "cpu" nodes. We build our own mapping
23 * array of cpu numbers to cpu nodes locally for now to allow interrupt
25 * we implement cpu hotplug, we'll have to install an appropriate notifier
26 * in order to release references to the cpu going away
48 static cpumask_t cbe_local_mask[MAX_CBE] = { [0 ... MAX_CBE-1] = {CPU_BITS_NONE} };
64 if (np->data) in cbe_find_map()
65 return np->data; in cbe_find_map()
67 /* walk up path until cpu or be node was found */ in cbe_find_map()
[all …]
/kernel/linux/linux-6.6/tools/perf/util/
Dcpumap.c1 // SPDX-License-Identifier: GPL-2.0
22 * CPU number.
34 return (data->mask32_data.long_size == 4) in perf_record_cpu_map_data__test_bit()
35 ? (bit_word32 < data->mask32_data.nr) && in perf_record_cpu_map_data__test_bit()
36 (data->mask32_data.mask[bit_word32] & bit_mask32) != 0 in perf_record_cpu_map_data__test_bit()
37 : (bit_word64 < data->mask64_data.nr) && in perf_record_cpu_map_data__test_bit()
38 (data->mask64_data.mask[bit_word64] & bit_mask64) != 0; in perf_record_cpu_map_data__test_bit()
41 /* Read ith mask value from data into the given 64-bit sized bitmap */
46 if (data->mask32_data.long_size == 4) in perf_record_cpu_map_data__read_one_mask()
47 bitmap[0] = data->mask32_data.mask[i]; in perf_record_cpu_map_data__read_one_mask()
[all …]
Dmmap.c1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2011-2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
5 * Parts came from evlist.c builtin-{top,stat,record}.c, see those files for further
34 len = bitmap_scnprintf(mask->bits, mask->nbits, buf, MASK_SIZE); in mmap_cpu_mask__scnprintf()
36 pr_debug("%p: %s mask[%zd]: %s\n", mask, tag, mask->nbits, buf); in mmap_cpu_mask__scnprintf()
39 size_t mmap__mmap_len(struct mmap *map) in mmap__mmap_len() argument
41 return perf_mmap__mmap_len(&map->core); in mmap__mmap_len()
71 static int perf_mmap__aio_enabled(struct mmap *map) in perf_mmap__aio_enabled() argument
73 return map->aio.nr_cblocks > 0; in perf_mmap__aio_enabled()
77 static int perf_mmap__aio_alloc(struct mmap *map, int idx) in perf_mmap__aio_alloc() argument
[all …]
/kernel/linux/linux-6.6/tools/lib/perf/
Dcpumap.c1 // SPDX-License-Identifier: GPL-2.0-only
13 void perf_cpu_map__set_nr(struct perf_cpu_map *map, int nr_cpus) in perf_cpu_map__set_nr() argument
15 RC_CHK_ACCESS(map)->nr = nr_cpus; in perf_cpu_map__set_nr()
24 cpus->nr = nr_cpus; in perf_cpu_map__alloc()
25 refcount_set(&cpus->refcnt, 1); in perf_cpu_map__alloc()
35 RC_CHK_ACCESS(cpus)->map[0].cpu = -1; in perf_cpu_map__dummy_new()
40 static void cpu_map__delete(struct perf_cpu_map *map) in cpu_map__delete() argument
42 if (map) { in cpu_map__delete()
43 WARN_ONCE(refcount_read(perf_cpu_map__refcnt(map)) != 0, in cpu_map__delete()
45 RC_CHK_FREE(map); in cpu_map__delete()
[all …]
Devlist.c1 // SPDX-License-Identifier: GPL-2.0
30 INIT_LIST_HEAD(&evlist->entries); in perf_evlist__init()
31 evlist->nr_entries = 0; in perf_evlist__init()
32 fdarray__init(&evlist->pollfd, 64); in perf_evlist__init()
39 if (evsel->system_wide) { in __perf_evlist__propagate_maps()
40 /* System wide: set the cpu map of the evsel to all online CPUs. */ in __perf_evlist__propagate_maps()
41 perf_cpu_map__put(evsel->cpus); in __perf_evlist__propagate_maps()
42 evsel->cpus = perf_cpu_map__new(NULL); in __perf_evlist__propagate_maps()
43 } else if (evlist->has_user_cpus && evsel->is_pmu_core) { in __perf_evlist__propagate_maps()
48 perf_cpu_map__put(evsel->cpus); in __perf_evlist__propagate_maps()
[all …]
/kernel/linux/linux-5.10/block/
Dblk-mq-cpumap.c1 // SPDX-License-Identifier: GPL-2.0
3 * CPU <-> hardware queue mapping helpers
5 * Copyright (C) 2013-2014 Jens Axboe
12 #include <linux/cpu.h>
14 #include <linux/blk-mq.h>
16 #include "blk-mq.h"
21 return qmap->queue_offset + (q % nr_queues); in queue_index()
24 static int get_first_sibling(unsigned int cpu) in get_first_sibling() argument
28 ret = cpumask_first(topology_sibling_cpumask(cpu)); in get_first_sibling()
32 return cpu; in get_first_sibling()
[all …]
/kernel/linux/linux-6.6/Documentation/devicetree/bindings/cpu/
Dcpu-topology.txt2 CPU topology binding description
6 1 - Introduction
12 - socket
13 - cluster
14 - core
15 - thread
18 symmetric multi-threading (SMT) is supported or not.
20 For instance in a system where CPUs support SMT, "cpu" nodes represent all
21 threads existing in the system and map to the hierarchy level "thread" above.
22 In systems where SMT is not supported "cpu" nodes represent all cores present
[all …]
/kernel/linux/linux-5.10/Documentation/devicetree/bindings/cpu/
Dcpu-topology.txt2 CPU topology binding description
6 1 - Introduction
12 - socket
13 - cluster
14 - core
15 - thread
18 symmetric multi-threading (SMT) is supported or not.
20 For instance in a system where CPUs support SMT, "cpu" nodes represent all
21 threads existing in the system and map to the hierarchy level "thread" above.
22 In systems where SMT is not supported "cpu" nodes represent all cores present
[all …]
/kernel/linux/linux-5.10/tools/perf/tests/
Dcpumap.c1 // SPDX-License-Identifier: GPL-2.0
6 #include "util/synthetic-events.h"
19 struct perf_record_cpu_map *map_event = &event->cpu_map; in process_event_mask()
22 struct perf_cpu_map *map; in process_event_mask() local
25 data = &map_event->data; in process_event_mask()
27 TEST_ASSERT_VAL("wrong type", data->type == PERF_CPU_MAP__MASK); in process_event_mask()
29 mask = (struct perf_record_record_cpu_map *)data->data; in process_event_mask()
31 TEST_ASSERT_VAL("wrong nr", mask->nr == 1); in process_event_mask()
34 TEST_ASSERT_VAL("wrong cpu", test_bit(i, mask->mask)); in process_event_mask()
37 map = cpu_map__new_data(data); in process_event_mask()
[all …]
Dtopology.c1 // SPDX-License-Identifier: GPL-2.0
13 #define TEMPL "/tmp/perf-test-XXXXXX"
25 return -1; in get_temp()
43 session->evlist = perf_evlist__new_default(); in session_write_header()
44 TEST_ASSERT_VAL("can't get evlist", session->evlist); in session_write_header()
46 perf_header__set_feat(&session->header, HEADER_CPU_TOPOLOGY); in session_write_header()
47 perf_header__set_feat(&session->header, HEADER_NRCPUS); in session_write_header()
48 perf_header__set_feat(&session->header, HEADER_ARCH); in session_write_header()
50 session->header.data_size += DATA_SIZE; in session_write_header()
53 !perf_session__write_header(session, session->evlist, data.file.fd, true)); in session_write_header()
[all …]
/kernel/linux/linux-6.6/kernel/bpf/
Dcpumap.c1 // SPDX-License-Identifier: GPL-2.0-only
8 * DOC: cpu map
9 * The 'cpumap' is primarily used as a backend map for XDP BPF helper
13 * this map type redirects raw XDP frames to another CPU. The remote
14 * CPU will do SKB-allocation and call the normal network stack.
20 * basically allows for 10G wirespeed pre-filtering via bpf.
38 /* General idea: XDP packets getting XDP redirected to another CPU,
39 * will maximum be stored/queued for one driver ->poll() call. It is
41 * same CPU. Thus, cpu_map_flush operation can deduct via this_cpu_ptr()
45 #define CPU_MAP_BULK_SIZE 8 /* 8 == one cacheline on 64-bit archs */
[all …]
/kernel/linux/linux-6.6/tools/lib/perf/include/perf/
Dcpumap.h1 /* SPDX-License-Identifier: GPL-2.0 */
9 /** A wrapper around a CPU to avoid confusion with the perf_cpu_map's map's indices. */
11 int cpu; member
22 * perf_cpu_map__dummy_new - a map with a singular "any CPU"/dummy -1 value.
28 LIBPERF_API struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map);
33 LIBPERF_API void perf_cpu_map__put(struct perf_cpu_map *map);
37 * perf_cpu_map__empty - is map either empty or the "any CPU"/dummy value.
39 LIBPERF_API bool perf_cpu_map__empty(const struct perf_cpu_map *map);
40 LIBPERF_API struct perf_cpu perf_cpu_map__max(const struct perf_cpu_map *map);
41 LIBPERF_API bool perf_cpu_map__has(const struct perf_cpu_map *map, struct perf_cpu cpu);
[all …]
/kernel/linux/linux-5.10/kernel/bpf/
Dcpumap.c1 // SPDX-License-Identifier: GPL-2.0-only
7 /* The 'cpumap' is primarily used as a backend map for XDP BPF helper
11 * this map type redirects raw XDP frames to another CPU. The remote
12 * CPU will do SKB-allocation and call the normal network stack.
17 * basically allows for 10G wirespeed pre-filtering via bpf.
33 /* General idea: XDP packets getting XDP redirected to another CPU,
34 * will maximum be stored/queued for one driver ->poll() call. It is
36 * same CPU. Thus, cpu_map_flush operation can deduct via this_cpu_ptr()
40 #define CPU_MAP_BULK_SIZE 8 /* 8 == one cacheline on 64-bit archs */
51 /* Struct for every remote "destination" CPU in map */
[all …]
/kernel/linux/linux-6.6/lib/
Dcpu_rmap.c1 // SPDX-License-Identifier: GPL-2.0-only
3 * cpu_rmap.c: CPU affinity reverse-map support
13 * objects with CPU affinities. This can be seen as a reverse-map of
14 * CPU affinity. However, we do not assume that the object affinities
17 * CPU topology.
21 * alloc_cpu_rmap - allocate CPU affinity reverse-map
28 unsigned int cpu; in alloc_cpu_rmap() local
39 rmap = kzalloc(obj_offset + size * sizeof(rmap->obj[0]), flags); in alloc_cpu_rmap()
43 kref_init(&rmap->refcount); in alloc_cpu_rmap()
44 rmap->obj = (void **)((char *)rmap + obj_offset); in alloc_cpu_rmap()
[all …]
/kernel/linux/linux-6.6/drivers/clocksource/
Dingenic-timer.c1 // SPDX-License-Identifier: GPL-2.0
14 #include <linux/mfd/ingenic-tcu.h>
23 #include <dt-bindings/clock/ingenic,tcu.h>
32 unsigned int cpu; member
40 struct regmap *map; member
56 regmap_read(tcu->map, TCU_REG_TCNTc(tcu->cs_channel), &count); in ingenic_tcu_timer_read()
69 return container_of(timer, struct ingenic_tcu, timers[timer->cpu]); in to_ingenic_tcu()
83 regmap_write(tcu->map, TCU_REG_TECR, BIT(timer->channel)); in ingenic_tcu_cevt_set_state_shutdown()
95 return -EINVAL; in ingenic_tcu_cevt_set_next()
97 regmap_write(tcu->map, TCU_REG_TDFRc(timer->channel), next); in ingenic_tcu_cevt_set_next()
[all …]
/kernel/linux/linux-5.10/drivers/clocksource/
Dingenic-timer.c1 // SPDX-License-Identifier: GPL-2.0
13 #include <linux/mfd/ingenic-tcu.h>
24 #include <dt-bindings/clock/ingenic,tcu.h>
33 unsigned int cpu; member
41 struct regmap *map; member
57 regmap_read(tcu->map, TCU_REG_TCNTc(tcu->cs_channel), &count); in ingenic_tcu_timer_read()
70 return container_of(timer, struct ingenic_tcu, timers[timer->cpu]); in to_ingenic_tcu()
84 regmap_write(tcu->map, TCU_REG_TECR, BIT(timer->channel)); in ingenic_tcu_cevt_set_state_shutdown()
96 return -EINVAL; in ingenic_tcu_cevt_set_next()
98 regmap_write(tcu->map, TCU_REG_TDFRc(timer->channel), next); in ingenic_tcu_cevt_set_next()
[all …]
/kernel/linux/linux-6.6/Documentation/bpf/
Dmap_hash.rst1 .. SPDX-License-Identifier: GPL-2.0-only
3 .. Copyright (C) 2022-2023 Isovalent, Inc.
10 - ``BPF_MAP_TYPE_HASH`` was introduced in kernel version 3.19
11 - ``BPF_MAP_TYPE_PERCPU_HASH`` was introduced in version 4.6
12 - Both ``BPF_MAP_TYPE_LRU_HASH`` and ``BPF_MAP_TYPE_LRU_PERCPU_HASH``
16 purpose hash map storage. Both the key and the value can be structs,
20 to the max_entries limit that you specify. Hash maps use pre-allocation
22 used to disable pre-allocation when it is too memory expensive.
25 CPU. The per-cpu values are stored internally in an array.
32 shared across CPUs but it is possible to request a per CPU LRU list with
[all …]
/kernel/linux/linux-6.6/samples/bpf/
Dmap_perf_test_user.c1 // SPDX-License-Identifier: GPL-2.0-only
82 static void test_hash_prealloc(int cpu) in test_hash_prealloc() argument
90 printf("%d:hash_map_perf pre-alloc %lld events per sec\n", in test_hash_prealloc()
91 cpu, max_cnt * 1000000000ll / (time_get_ns() - start_time)); in test_hash_prealloc()
106 * It is fine that the user requests for a map with in pre_test_lru_hash_lookup()
108 * may return not found. For LRU map, we are not interested in pre_test_lru_hash_lookup()
109 * in such small map performance. in pre_test_lru_hash_lookup()
120 static void do_test_lru(enum test_type test, int cpu) in do_test_lru() argument
129 if (test == INNER_LRU_HASH_PREALLOC && cpu) { in do_test_lru()
130 /* If CPU is not 0, create inner_lru hash map and insert the fd in do_test_lru()
[all …]
/kernel/linux/linux-5.10/samples/bpf/
Dmap_perf_test_user.c1 // SPDX-License-Identifier: GPL-2.0-only
83 static void test_hash_prealloc(int cpu) in test_hash_prealloc() argument
91 printf("%d:hash_map_perf pre-alloc %lld events per sec\n", in test_hash_prealloc()
92 cpu, max_cnt * 1000000000ll / (time_get_ns() - start_time)); in test_hash_prealloc()
107 * It is fine that the user requests for a map with in pre_test_lru_hash_lookup()
109 * may return not found. For LRU map, we are not interested in pre_test_lru_hash_lookup()
110 * in such small map performance. in pre_test_lru_hash_lookup()
121 static void do_test_lru(enum test_type test, int cpu) in do_test_lru() argument
130 if (test == INNER_LRU_HASH_PREALLOC && cpu) { in do_test_lru()
131 /* If CPU is not 0, create inner_lru hash map and insert the fd in do_test_lru()
[all …]
/kernel/linux/linux-5.10/tools/lib/perf/
Dcpumap.c1 // SPDX-License-Identifier: GPL-2.0-only
18 cpus->nr = 1; in perf_cpu_map__dummy_new()
19 cpus->map[0] = -1; in perf_cpu_map__dummy_new()
20 refcount_set(&cpus->refcnt, 1); in perf_cpu_map__dummy_new()
26 static void cpu_map__delete(struct perf_cpu_map *map) in cpu_map__delete() argument
28 if (map) { in cpu_map__delete()
29 WARN_ONCE(refcount_read(&map->refcnt) != 0, in cpu_map__delete()
31 free(map); in cpu_map__delete()
35 struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map) in perf_cpu_map__get() argument
37 if (map) in perf_cpu_map__get()
[all …]

12345678910>>...44