| /tools/testing/selftests/rseq/ |
| D | basic_test.c | 18 cpu_set_t affinity, test_affinity; in test_cpu_pointer() local 21 sched_getaffinity(0, sizeof(affinity), &affinity); in test_cpu_pointer() 24 if (CPU_ISSET(i, &affinity)) { in test_cpu_pointer() 39 sched_setaffinity(0, sizeof(affinity), &affinity); in test_cpu_pointer()
|
| /tools/perf/util/ |
| D | affinity.h | 7 struct affinity { struct 13 void affinity__cleanup(struct affinity *a); argument 14 void affinity__set(struct affinity *a, int cpu); 15 int affinity__setup(struct affinity *a);
|
| D | affinity.c | 24 int affinity__setup(struct affinity *a) in affinity__setup() 48 void affinity__set(struct affinity *a, int cpu) in affinity__set() 71 static void __affinity__cleanup(struct affinity *a) in __affinity__cleanup() 81 void affinity__cleanup(struct affinity *a) in affinity__cleanup()
|
| D | mmap.c | 97 static int perf_mmap__aio_bind(struct mmap *map, int idx, struct perf_cpu cpu, int affinity) in perf_mmap__aio_bind() argument 105 if (affinity != PERF_AFFINITY_SYS && cpu__max_node() > 1) { in perf_mmap__aio_bind() 141 struct perf_cpu cpu __maybe_unused, int affinity __maybe_unused) in perf_mmap__aio_bind() 175 ret = perf_mmap__aio_bind(map, i, map->core.cpu, mp->affinity); in perf_mmap__aio_mmap() 268 if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1) in perf_mmap__setup_affinity_mask() 270 else if (mp->affinity == PERF_AFFINITY_CPU) in perf_mmap__setup_affinity_mask() 284 if (mp->affinity != PERF_AFFINITY_SYS && in mmap__mmap()
|
| D | evlist.h | 201 int affinity, int flush, int comp_level); 368 struct affinity *affinity; member 381 #define evlist__for_each_cpu(evlist_cpu_itr, evlist, affinity) \ argument 382 for ((evlist_cpu_itr) = evlist__cpu_begin(evlist, affinity); \ 387 struct evlist_cpu_iterator evlist__cpu_begin(struct evlist *evlist, struct affinity *affinity);
|
| D | evlist.c | 406 struct evlist_cpu_iterator evlist__cpu_begin(struct evlist *evlist, struct affinity *affinity) in evlist__cpu_begin() argument 415 .affinity = affinity, in evlist__cpu_begin() 423 if (itr.affinity) { in evlist__cpu_begin() 425 affinity__set(itr.affinity, itr.cpu.cpu); in evlist__cpu_begin() 454 if (evlist_cpu_itr->affinity) in evlist_cpu_iterator__next() 455 affinity__set(evlist_cpu_itr->affinity, evlist_cpu_itr->cpu.cpu); in evlist_cpu_iterator__next() 500 struct affinity saved_affinity, *affinity = NULL; in __evlist__disable() local 507 affinity = &saved_affinity; in __evlist__disable() 512 evlist__for_each_cpu(evlist_cpu_itr, evlist, affinity) { in __evlist__disable() 530 affinity__cleanup(affinity); in __evlist__disable() [all …]
|
| D | mmap.h | 48 int nr_cblocks, affinity, flush, comp_level; member
|
| D | record.h | 72 int affinity; member
|
| D | Build | 96 perf-util-y += affinity.o
|
| /tools/virtio/ringtest/ |
| D | run-on-all.sh | 20 "$@" --host-affinity $HOST_AFFINITY --guest-affinity $cpu 24 "$@" --host-affinity $HOST_AFFINITY
|
| /tools/testing/selftests/rcutorture/bin/ |
| D | kvm-test-1-run-batch.sh | 63 print "echo No CPU-affinity information, so no taskset command."; 69 print "echo " scenario ": Bogus CPU-affinity information, so no taskset command.";
|
| /tools/perf/ |
| D | builtin-record.c | 98 struct mmap_cpu_mask affinity; member 1304 if (opts->affinity != PERF_AFFINITY_SYS) in record__mmap_evlist() 1310 opts->nr_cblocks, opts->affinity, in record__mmap_evlist() 1517 if (rec->opts.affinity != PERF_AFFINITY_SYS && in record__adjust_affinity() 1518 !bitmap_equal(thread->mask->affinity.bits, map->affinity_mask.bits, in record__adjust_affinity() 1519 thread->mask->affinity.nbits)) { in record__adjust_affinity() 1520 bitmap_zero(thread->mask->affinity.bits, thread->mask->affinity.nbits); in record__adjust_affinity() 1521 bitmap_or(thread->mask->affinity.bits, thread->mask->affinity.bits, in record__adjust_affinity() 1522 map->affinity_mask.bits, thread->mask->affinity.nbits); in record__adjust_affinity() 1523 sched_setaffinity(0, MMAP_CPU_MASK_BYTES(&thread->mask->affinity), in record__adjust_affinity() [all …]
|
| D | builtin-stat.c | 369 struct affinity saved_affinity, *affinity; in read_affinity_counters() local 375 affinity = NULL; in read_affinity_counters() 379 affinity = &saved_affinity; in read_affinity_counters() 381 evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { in read_affinity_counters() 390 if (affinity) in read_affinity_counters() 706 struct affinity saved_affinity, *affinity = NULL; in __run_perf_stat() local 723 affinity = &saved_affinity; in __run_perf_stat() 738 evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { in __run_perf_stat() 795 evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { in __run_perf_stat() 804 evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { in __run_perf_stat() [all …]
|
| /tools/testing/selftests/bpf/ |
| D | bench.c | 19 .affinity = false, 344 env.affinity = true; in parse_arg() 350 env.affinity = true; in parse_arg() 358 env.affinity = true; in parse_arg() 665 if (env.affinity) in setup_benchmark() 688 if (env.affinity) in setup_benchmark()
|
| D | bench.h | 27 bool affinity; member
|
| /tools/testing/selftests/seccomp/ |
| D | seccomp_benchmark.c | 141 void affinity(void) in affinity() function 207 affinity(); in main()
|
| /tools/perf/Documentation/ |
| D | perf-record.txt | 560 --affinity=mode:: 561 Set affinity mask of trace reading thread according to the policy defined by 'mode' value: 563 - node - thread affinity mask is set to NUMA node cpu mask of the processed mmap buffer 564 - cpu - thread affinity mask is set to cpu of the processed mmap buffer 782 define CPUs to be monitored by a thread and affinity mask of that thread 785 <cpus mask 1>/<affinity mask 1>:<cpus mask 2>/<affinity mask 2>:... 787 CPUs or affinity masks must not overlap with other corresponding masks. 796 the first thread monitors CPUs 0 and 2-4 with the affinity mask 2-4, 797 the second monitors CPUs 1 and 5-7 with the affinity mask 5-7.
|
| /tools/testing/selftests/bpf/benchs/ |
| D | bench_bpf_hashmap_lookup.c | 255 if (env.affinity) in hashmap_report_final()
|