/tools/perf/util/ |
D | thread_map.c | 52 struct thread_map *threads; in thread_map__new_by_pid() local 63 threads = thread_map__alloc(items); in thread_map__new_by_pid() 64 if (threads != NULL) { in thread_map__new_by_pid() 66 thread_map__set_pid(threads, i, atoi(namelist[i]->d_name)); in thread_map__new_by_pid() 67 threads->nr = items; in thread_map__new_by_pid() 68 atomic_set(&threads->refcnt, 1); in thread_map__new_by_pid() 75 return threads; in thread_map__new_by_pid() 80 struct thread_map *threads = thread_map__alloc(1); in thread_map__new_by_tid() local 82 if (threads != NULL) { in thread_map__new_by_tid() 83 thread_map__set_pid(threads, 0, tid); in thread_map__new_by_tid() [all …]
|
D | thread_map.h | 31 size_t thread_map__fprintf(struct thread_map *threads, FILE *fp); 33 static inline int thread_map__nr(struct thread_map *threads) in thread_map__nr() argument 35 return threads ? threads->nr : 1; in thread_map__nr() 54 void thread_map__read_comms(struct thread_map *threads);
|
D | values.c | 14 values->threads = 0; in perf_read_values_init() 33 for (i = 0; i < values->threads; i++) in perf_read_values_destroy() 62 for (i = 0; i < values->threads; i++) in perf_read_values__findnew_thread() 66 if (values->threads == values->threads_max) in perf_read_values__findnew_thread() 69 i = values->threads++; in perf_read_values__findnew_thread() 91 for (i = 0; i < values->threads; i++) { in perf_read_values__enlarge_counters() 144 for (i = 0; i < values->threads; i++) { in perf_read_values__display_pretty() 165 for (i = 0; i < values->threads; i++) { in perf_read_values__display_pretty() 188 for (i = 0; i < values->threads; i++) { in perf_read_values__display_raw() 204 for (i = 0; i < values->threads; i++) { in perf_read_values__display_raw() [all …]
|
D | evlist.c | 37 struct thread_map *threads) in perf_evlist__init() argument 44 perf_evlist__set_maps(evlist, cpus, threads); in perf_evlist__init() 120 thread_map__put(evlist->threads); in perf_evlist__delete() 122 evlist->threads = NULL; in perf_evlist__delete() 143 thread_map__put(evsel->threads); in __perf_evlist__propagate_maps() 144 evsel->threads = thread_map__get(evlist->threads); in __perf_evlist__propagate_maps() 334 return thread_map__nr(evlist->threads); in perf_evlist__nr_threads() 476 int nr_threads = thread_map__nr(evlist->threads); in perf_evlist__alloc_pollfd() 604 if (!evsel->system_wide && evlist->threads && thread >= 0) in perf_evlist__set_sid_idx() 605 sid->tid = thread_map__pid(evlist->threads, thread); in perf_evlist__set_sid_idx() [all …]
|
D | python.c | 481 struct thread_map *threads; member 494 pthreads->threads = thread_map__new(pid, tid, uid); in pyrf_thread_map__init() 495 if (pthreads->threads == NULL) in pyrf_thread_map__init() 502 thread_map__put(pthreads->threads); in pyrf_thread_map__delete() 510 return pthreads->threads->nr; in pyrf_thread_map__length() 517 if (i >= pthreads->threads->nr) in pyrf_thread_map__item() 520 return Py_BuildValue("i", pthreads->threads->map[i]); in pyrf_thread_map__item() 672 struct thread_map *threads = NULL; in pyrf_evsel__open() local 682 threads = ((struct pyrf_thread_map *)pthreads)->threads; in pyrf_evsel__open() 692 if (perf_evsel__open(evsel, cpus, threads) < 0) { in pyrf_evsel__open() [all …]
|
D | values.h | 7 int threads; member
|
D | machine.h | 32 struct rb_root threads; member 233 struct target *target, struct thread_map *threads, 238 struct thread_map *threads, bool data_mmap, in machine__synthesize_threads() argument 241 return __machine__synthesize_threads(machine, NULL, target, threads, in machine__synthesize_threads()
|
/tools/perf/tests/ |
D | openat-syscall.c | 13 struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX); in test__openat_syscall_event() local 17 if (threads == NULL) { in test__openat_syscall_event() 29 if (perf_evsel__open_per_thread(evsel, threads) < 0) { in test__openat_syscall_event() 54 perf_evsel__close_fd(evsel, 1, threads->nr); in test__openat_syscall_event() 58 thread_map__put(threads); in test__openat_syscall_event()
|
D | task-exit.c | 47 struct thread_map *threads; in test__task_exit() local 64 threads = thread_map__new_by_tid(-1); in test__task_exit() 65 if (!cpus || !threads) { in test__task_exit() 71 perf_evlist__set_maps(evlist, cpus, threads); in test__task_exit() 74 threads = NULL; in test__task_exit() 126 thread_map__put(threads); in test__task_exit()
|
D | sw-clock.c | 38 struct thread_map *threads; in __test__sw_clock_freq() local 56 threads = thread_map__new_by_tid(getpid()); in __test__sw_clock_freq() 57 if (!cpus || !threads) { in __test__sw_clock_freq() 63 perf_evlist__set_maps(evlist, cpus, threads); in __test__sw_clock_freq() 66 threads = NULL; in __test__sw_clock_freq() 119 thread_map__put(threads); in __test__sw_clock_freq()
|
D | mmap-basic.c | 23 struct thread_map *threads; in test__basic_mmap() local 35 threads = thread_map__new(-1, getpid(), UINT_MAX); in test__basic_mmap() 36 if (threads == NULL) { in test__basic_mmap() 62 perf_evlist__set_maps(evlist, cpus, threads); in test__basic_mmap() 79 if (perf_evsel__open(evsels[i], cpus, threads) < 0) { in test__basic_mmap() 142 threads = NULL; in test__basic_mmap() 146 thread_map__put(threads); in test__basic_mmap()
|
D | openat-syscall-all-cpus.c | 17 struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX); in test__openat_syscall_event_on_all_cpus() local 21 if (threads == NULL) { in test__openat_syscall_event_on_all_cpus() 41 if (perf_evsel__open(evsel, cpus, threads) < 0) { in test__openat_syscall_event_on_all_cpus() 109 perf_evsel__close_fd(evsel, 1, threads->nr); in test__openat_syscall_event_on_all_cpus() 115 thread_map__put(threads); in test__openat_syscall_event_on_all_cpus()
|
D | mmap-thread-lookup.c | 26 static struct thread_data threads[THREADS]; variable 75 struct thread_data *td = &threads[i]; in thread_create() 95 struct thread_data *td0 = &threads[0]; in threads_create() 112 struct thread_data *td0 = &threads[0]; in threads_destroy() 121 err = pthread_join(threads[i].pt, NULL); in threads_destroy() 182 struct thread_data *td = &threads[i]; in mmap_events()
|
D | keep-tracking.c | 63 struct thread_map *threads = NULL; in test__keep_tracking() local 70 threads = thread_map__new(-1, getpid(), UINT_MAX); in test__keep_tracking() 71 CHECK_NOT_NULL__(threads); in test__keep_tracking() 79 perf_evlist__set_maps(evlist, cpus, threads); in test__keep_tracking() 148 thread_map__put(threads); in test__keep_tracking()
|
D | code-reading.c | 451 struct thread_map *threads = NULL; in do_test_code_reading() local 493 threads = thread_map__new_by_tid(pid); in do_test_code_reading() 494 if (!threads) { in do_test_code_reading() 499 ret = perf_event__synthesize_thread_map(NULL, threads, in do_test_code_reading() 527 perf_evlist__set_maps(evlist, cpus, threads); in do_test_code_reading() 595 thread_map__put(threads); in do_test_code_reading()
|
D | switch-tracking.c | 321 struct thread_map *threads = NULL; in test__switch_tracking() local 329 threads = thread_map__new(-1, getpid(), UINT_MAX); in test__switch_tracking() 330 if (!threads) { in test__switch_tracking() 347 perf_evlist__set_maps(evlist, cpus, threads); in test__switch_tracking() 564 thread_map__put(threads); in test__switch_tracking()
|
/tools/perf/python/ |
D | twatch.py | 20 threads = perf.thread_map(thread) 35 evsel.open(cpus = cpus, threads = threads); 36 evlist = perf.evlist(cpus, threads)
|
/tools/perf/bench/ |
D | sched-pipe.c | 80 struct thread_data threads[2], *td; in bench_sched_pipe() local 103 td = threads + t; in bench_sched_pipe() 120 td = threads + t; in bench_sched_pipe() 127 td = threads + t; in bench_sched_pipe() 138 worker_thread(threads + 0); in bench_sched_pipe() 141 worker_thread(threads + 1); in bench_sched_pipe()
|
D | numa.c | 144 struct thread_data *threads; member 573 td = g->threads + t; in parse_setup_cpu_list() 695 td = g->threads + t; in parse_setup_node_list() 854 g->threads[task_nr].curr_cpu = cpu; in update_curr_cpu() 880 td = g->threads + task_nr; in count_process_nodes() 916 td = g->threads + task_nr; in count_node_processes() 983 struct thread_data *td = g->threads + t; in calc_convergence() 1215 this_cpu = g->threads[task_nr].curr_cpu; in worker_thread() 1293 td = g->threads + task_nr; in worker_process() 1308 td = g->threads + task_nr; in worker_process() [all …]
|
/tools/usb/ |
D | ffs-test.c | 322 } threads[] = { variable 364 if (t != threads) { in cleanup_thread() 631 init_thread(threads); in main() 632 ep0_init(threads, legacy_descriptors); in main() 634 for (i = 1; i < sizeof threads / sizeof *threads; ++i) in main() 635 init_thread(threads + i); in main() 637 for (i = 1; i < sizeof threads / sizeof *threads; ++i) in main() 638 start_thread(threads + i); in main() 640 start_thread_helper(threads); in main() 642 for (i = 1; i < sizeof threads / sizeof *threads; ++i) in main() [all …]
|
/tools/testing/selftests/powerpc/dscr/ |
D | dscr_default_test.c | 65 pthread_t threads[THREADS]; in dscr_default() local 77 if (pthread_create(&threads[i], NULL, do_test, (void *)i)) { in dscr_default() 106 if (pthread_join(threads[i], (void **)&(status[i]))) { in dscr_default()
|
/tools/perf/arch/x86/tests/ |
D | perf-time-to-tsc.c | 50 struct thread_map *threads = NULL; in test__perf_time_to_tsc() local 62 threads = thread_map__new(-1, getpid(), UINT_MAX); in test__perf_time_to_tsc() 63 CHECK_NOT_NULL__(threads); in test__perf_time_to_tsc() 71 perf_evlist__set_maps(evlist, cpus, threads); in test__perf_time_to_tsc()
|
/tools/virtio/virtio-trace/ |
D | README | 14 The read/write threads hold it. 17 the controller wake read/write threads. 18 5) The read/write threads start to read trace data from ring-buffers and 20 6) If the controller receives a stop order from a host, the read/write threads 31 trace-agent-ctl.c: includes controller function for read/write threads 32 trace-agent-rw.c: includes read/write threads function 103 read/write threads in the agent wait for start order from host. If you add -o
|
/tools/perf/Documentation/ |
D | perf-lock.txt | 27 'perf lock info' shows metadata like threads or addresses 57 --threads::
|
D | perf-sched.txt | 27 via perf sched record. (this is done by starting up mockup threads 29 threads can then replay the timings (CPU runtime and sleep patterns)
|