• Home
  • Raw
  • Download

Lines Matching refs:sched

109 	int (*switch_event)(struct perf_sched *sched, struct perf_evsel *evsel,
112 int (*runtime_event)(struct perf_sched *sched, struct perf_evsel *evsel,
115 int (*wakeup_event)(struct perf_sched *sched, struct perf_evsel *evsel,
119 int (*fork_event)(struct perf_sched *sched, union perf_event *event,
122 int (*migrate_task_event)(struct perf_sched *sched,
206 static void burn_nsecs(struct perf_sched *sched, u64 nsecs) in burn_nsecs() argument
212 } while (T1 + sched->run_measurement_overhead < T0 + nsecs); in burn_nsecs()
225 static void calibrate_run_measurement_overhead(struct perf_sched *sched) in calibrate_run_measurement_overhead() argument
232 burn_nsecs(sched, 0); in calibrate_run_measurement_overhead()
237 sched->run_measurement_overhead = min_delta; in calibrate_run_measurement_overhead()
242 static void calibrate_sleep_measurement_overhead(struct perf_sched *sched) in calibrate_sleep_measurement_overhead() argument
255 sched->sleep_measurement_overhead = min_delta; in calibrate_sleep_measurement_overhead()
288 static void add_sched_event_run(struct perf_sched *sched, struct task_desc *task, in add_sched_event_run() argument
298 sched->nr_run_events_optimized++; in add_sched_event_run()
308 sched->nr_run_events++; in add_sched_event_run()
311 static void add_sched_event_wakeup(struct perf_sched *sched, struct task_desc *task, in add_sched_event_wakeup() argument
322 sched->targetless_wakeups++; in add_sched_event_wakeup()
326 sched->multitarget_wakeups++; in add_sched_event_wakeup()
335 sched->nr_wakeup_events++; in add_sched_event_wakeup()
338 static void add_sched_event_sleep(struct perf_sched *sched, struct task_desc *task, in add_sched_event_sleep() argument
345 sched->nr_sleep_events++; in add_sched_event_sleep()
348 static struct task_desc *register_pid(struct perf_sched *sched, in register_pid() argument
354 if (sched->pid_to_task == NULL) { in register_pid()
357 BUG_ON((sched->pid_to_task = calloc(pid_max, sizeof(struct task_desc *))) == NULL); in register_pid()
360 BUG_ON((sched->pid_to_task = realloc(sched->pid_to_task, (pid + 1) * in register_pid()
363 sched->pid_to_task[pid_max++] = NULL; in register_pid()
366 task = sched->pid_to_task[pid]; in register_pid()
373 task->nr = sched->nr_tasks; in register_pid()
379 add_sched_event_sleep(sched, task, 0, 0); in register_pid()
381 sched->pid_to_task[pid] = task; in register_pid()
382 sched->nr_tasks++; in register_pid()
383 sched->tasks = realloc(sched->tasks, sched->nr_tasks * sizeof(struct task_desc *)); in register_pid()
384 BUG_ON(!sched->tasks); in register_pid()
385 sched->tasks[task->nr] = task; in register_pid()
388 printf("registered task #%ld, PID %ld (%s)\n", sched->nr_tasks, pid, comm); in register_pid()
394 static void print_task_traces(struct perf_sched *sched) in print_task_traces() argument
399 for (i = 0; i < sched->nr_tasks; i++) { in print_task_traces()
400 task = sched->tasks[i]; in print_task_traces()
406 static void add_cross_task_wakeups(struct perf_sched *sched) in add_cross_task_wakeups() argument
411 for (i = 0; i < sched->nr_tasks; i++) { in add_cross_task_wakeups()
412 task1 = sched->tasks[i]; in add_cross_task_wakeups()
414 if (j == sched->nr_tasks) in add_cross_task_wakeups()
416 task2 = sched->tasks[j]; in add_cross_task_wakeups()
417 add_sched_event_wakeup(sched, task1, 0, task2); in add_cross_task_wakeups()
421 static void perf_sched__process_event(struct perf_sched *sched, in perf_sched__process_event() argument
428 burn_nsecs(sched, atom->duration); in perf_sched__process_event()
462 static int self_open_counters(struct perf_sched *sched, unsigned long cur_task) in self_open_counters() argument
481 if (sched->force) { in self_open_counters()
483 limit.rlim_cur += sched->nr_tasks - cur_task; in self_open_counters()
517 struct perf_sched *sched; member
525 struct perf_sched *sched = parms->sched; in thread_func() local
540 ret = pthread_mutex_lock(&sched->start_work_mutex); in thread_func()
542 ret = pthread_mutex_unlock(&sched->start_work_mutex); in thread_func()
549 perf_sched__process_event(sched, this_task->atoms[i]); in thread_func()
557 ret = pthread_mutex_lock(&sched->work_done_wait_mutex); in thread_func()
559 ret = pthread_mutex_unlock(&sched->work_done_wait_mutex); in thread_func()
565 static void create_tasks(struct perf_sched *sched) in create_tasks() argument
577 err = pthread_mutex_lock(&sched->start_work_mutex); in create_tasks()
579 err = pthread_mutex_lock(&sched->work_done_wait_mutex); in create_tasks()
581 for (i = 0; i < sched->nr_tasks; i++) { in create_tasks()
584 parms->task = task = sched->tasks[i]; in create_tasks()
585 parms->sched = sched; in create_tasks()
586 parms->fd = self_open_counters(sched, i); in create_tasks()
596 static void wait_for_tasks(struct perf_sched *sched) in wait_for_tasks() argument
602 sched->start_time = get_nsecs(); in wait_for_tasks()
603 sched->cpu_usage = 0; in wait_for_tasks()
604 pthread_mutex_unlock(&sched->work_done_wait_mutex); in wait_for_tasks()
606 for (i = 0; i < sched->nr_tasks; i++) { in wait_for_tasks()
607 task = sched->tasks[i]; in wait_for_tasks()
612 ret = pthread_mutex_lock(&sched->work_done_wait_mutex); in wait_for_tasks()
617 pthread_mutex_unlock(&sched->start_work_mutex); in wait_for_tasks()
619 for (i = 0; i < sched->nr_tasks; i++) { in wait_for_tasks()
620 task = sched->tasks[i]; in wait_for_tasks()
624 sched->cpu_usage += task->cpu_usage; in wait_for_tasks()
629 if (!sched->runavg_cpu_usage) in wait_for_tasks()
630 sched->runavg_cpu_usage = sched->cpu_usage; in wait_for_tasks()
631sched->runavg_cpu_usage = (sched->runavg_cpu_usage * (sched->replay_repeat - 1) + sched->cpu_usage… in wait_for_tasks()
633 sched->parent_cpu_usage = cpu_usage_1 - cpu_usage_0; in wait_for_tasks()
634 if (!sched->runavg_parent_cpu_usage) in wait_for_tasks()
635 sched->runavg_parent_cpu_usage = sched->parent_cpu_usage; in wait_for_tasks()
636 sched->runavg_parent_cpu_usage = (sched->runavg_parent_cpu_usage * (sched->replay_repeat - 1) + in wait_for_tasks()
637 sched->parent_cpu_usage)/sched->replay_repeat; in wait_for_tasks()
639 ret = pthread_mutex_lock(&sched->start_work_mutex); in wait_for_tasks()
642 for (i = 0; i < sched->nr_tasks; i++) { in wait_for_tasks()
643 task = sched->tasks[i]; in wait_for_tasks()
649 static void run_one_test(struct perf_sched *sched) in run_one_test() argument
654 wait_for_tasks(sched); in run_one_test()
658 sched->sum_runtime += delta; in run_one_test()
659 sched->nr_runs++; in run_one_test()
661 avg_delta = sched->sum_runtime / sched->nr_runs; in run_one_test()
666 sched->sum_fluct += fluct; in run_one_test()
667 if (!sched->run_avg) in run_one_test()
668 sched->run_avg = delta; in run_one_test()
669 sched->run_avg = (sched->run_avg * (sched->replay_repeat - 1) + delta) / sched->replay_repeat; in run_one_test()
671 printf("#%-3ld: %0.3f, ", sched->nr_runs, (double)delta / NSEC_PER_MSEC); in run_one_test()
673 printf("ravg: %0.2f, ", (double)sched->run_avg / NSEC_PER_MSEC); in run_one_test()
676 (double)sched->cpu_usage / NSEC_PER_MSEC, (double)sched->runavg_cpu_usage / NSEC_PER_MSEC); in run_one_test()
684 (double)sched->parent_cpu_usage / NSEC_PER_MSEC, in run_one_test()
685 (double)sched->runavg_parent_cpu_usage / NSEC_PER_MSEC); in run_one_test()
690 if (sched->nr_sleep_corrections) in run_one_test()
691 printf(" (%ld sleep corrections)\n", sched->nr_sleep_corrections); in run_one_test()
692 sched->nr_sleep_corrections = 0; in run_one_test()
695 static void test_calibrations(struct perf_sched *sched) in test_calibrations() argument
700 burn_nsecs(sched, NSEC_PER_MSEC); in test_calibrations()
713 replay_wakeup_event(struct perf_sched *sched, in replay_wakeup_event() argument
727 waker = register_pid(sched, sample->tid, "<unknown>"); in replay_wakeup_event()
728 wakee = register_pid(sched, pid, comm); in replay_wakeup_event()
730 add_sched_event_wakeup(sched, waker, sample->time, wakee); in replay_wakeup_event()
734 static int replay_switch_event(struct perf_sched *sched, in replay_switch_event() argument
755 timestamp0 = sched->cpu_last_switched[cpu]; in replay_switch_event()
769 prev = register_pid(sched, prev_pid, prev_comm); in replay_switch_event()
770 next = register_pid(sched, next_pid, next_comm); in replay_switch_event()
772 sched->cpu_last_switched[cpu] = timestamp; in replay_switch_event()
774 add_sched_event_run(sched, prev, timestamp, delta); in replay_switch_event()
775 add_sched_event_sleep(sched, prev, timestamp, prev_state); in replay_switch_event()
780 static int replay_fork_event(struct perf_sched *sched, in replay_fork_event() argument
803 register_pid(sched, parent->tid, thread__comm_str(parent)); in replay_fork_event()
804 register_pid(sched, child->tid, thread__comm_str(child)); in replay_fork_event()
885 static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread) in thread_atoms_insert() argument
895 __thread_latency_insert(&sched->atom_root, atoms, &sched->cmp_pid); in thread_atoms_insert()
973 static int latency_switch_event(struct perf_sched *sched, in latency_switch_event() argument
989 timestamp0 = sched->cpu_last_switched[cpu]; in latency_switch_event()
990 sched->cpu_last_switched[cpu] = timestamp; in latency_switch_event()
1006 out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid); in latency_switch_event()
1008 if (thread_atoms_insert(sched, sched_out)) in latency_switch_event()
1010 out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid); in latency_switch_event()
1019 in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid); in latency_switch_event()
1021 if (thread_atoms_insert(sched, sched_in)) in latency_switch_event()
1023 in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid); in latency_switch_event()
1043 static int latency_runtime_event(struct perf_sched *sched, in latency_runtime_event() argument
1051 struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid); in latency_runtime_event()
1060 if (thread_atoms_insert(sched, thread)) in latency_runtime_event()
1062 atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid); in latency_runtime_event()
1078 static int latency_wakeup_event(struct perf_sched *sched, in latency_wakeup_event() argument
1093 atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid); in latency_wakeup_event()
1095 if (thread_atoms_insert(sched, wakee)) in latency_wakeup_event()
1097 atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid); in latency_wakeup_event()
1121 if (sched->profile_cpu == -1 && atom->state != THREAD_SLEEPING) in latency_wakeup_event()
1124 sched->nr_timestamps++; in latency_wakeup_event()
1126 sched->nr_unordered_timestamps++; in latency_wakeup_event()
1139 static int latency_migrate_task_event(struct perf_sched *sched, in latency_migrate_task_event() argument
1154 if (sched->profile_cpu == -1) in latency_migrate_task_event()
1160 atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid); in latency_migrate_task_event()
1162 if (thread_atoms_insert(sched, migrant)) in latency_migrate_task_event()
1164 register_pid(sched, migrant->tid, thread__comm_str(migrant)); in latency_migrate_task_event()
1165 atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid); in latency_migrate_task_event()
1179 sched->nr_timestamps++; in latency_migrate_task_event()
1182 sched->nr_unordered_timestamps++; in latency_migrate_task_event()
1189 static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_list) in output_lat_thread() argument
1203 sched->all_runtime += work_list->total_runtime; in output_lat_thread()
1204 sched->all_count += work_list->nb_atoms; in output_lat_thread()
1327 static void perf_sched__sort_lat(struct perf_sched *sched) in perf_sched__sort_lat() argument
1330 struct rb_root *root = &sched->atom_root; in perf_sched__sort_lat()
1340 __thread_latency_insert(&sched->sorted_atom_root, data, &sched->sort_list); in perf_sched__sort_lat()
1342 if (root == &sched->atom_root) { in perf_sched__sort_lat()
1343 root = &sched->merged_atom_root; in perf_sched__sort_lat()
1353 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); in process_sched_wakeup_event() local
1355 if (sched->tp_handler->wakeup_event) in process_sched_wakeup_event()
1356 return sched->tp_handler->wakeup_event(sched, evsel, sample, machine); in process_sched_wakeup_event()
1376 map__findnew_thread(struct perf_sched *sched, struct machine *machine, pid_t pid, pid_t tid) in map__findnew_thread() argument
1383 if (!sched->map.color_pids || !thread || thread__priv(thread)) in map__findnew_thread()
1386 if (thread_map__has(sched->map.color_pids, tid)) in map__findnew_thread()
1393 static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel, in map_switch_event() argument
1408 if (this_cpu > sched->max_cpu) in map_switch_event()
1409 sched->max_cpu = this_cpu; in map_switch_event()
1411 if (sched->map.comp) { in map_switch_event()
1412 cpus_nr = bitmap_weight(sched->map.comp_cpus_mask, MAX_CPUS); in map_switch_event()
1413 if (!test_and_set_bit(this_cpu, sched->map.comp_cpus_mask)) { in map_switch_event()
1414 sched->map.comp_cpus[cpus_nr++] = this_cpu; in map_switch_event()
1418 cpus_nr = sched->max_cpu; in map_switch_event()
1420 timestamp0 = sched->cpu_last_switched[this_cpu]; in map_switch_event()
1421 sched->cpu_last_switched[this_cpu] = timestamp; in map_switch_event()
1432 sched_in = map__findnew_thread(sched, machine, -1, next_pid); in map_switch_event()
1436 sched->curr_thread[this_cpu] = thread__get(sched_in); in map_switch_event()
1450 sched_in->shortname[0] = sched->next_shortname1; in map_switch_event()
1451 sched_in->shortname[1] = sched->next_shortname2; in map_switch_event()
1453 if (sched->next_shortname1 < 'Z') { in map_switch_event()
1454 sched->next_shortname1++; in map_switch_event()
1456 sched->next_shortname1 = 'A'; in map_switch_event()
1457 if (sched->next_shortname2 < '9') in map_switch_event()
1458 sched->next_shortname2++; in map_switch_event()
1460 sched->next_shortname2 = '0'; in map_switch_event()
1467 int cpu = sched->map.comp ? sched->map.comp_cpus[i] : i; in map_switch_event()
1468 struct thread *curr_thread = sched->curr_thread[cpu]; in map_switch_event()
1475 if (sched->map.cpus && !cpu_map__has(sched->map.cpus, cpu)) in map_switch_event()
1478 if (sched->map.color_cpus && cpu_map__has(sched->map.color_cpus, cpu)) in map_switch_event()
1486 if (sched->curr_thread[cpu]) in map_switch_event()
1487 color_fprintf(stdout, pid_color, "%2s ", sched->curr_thread[cpu]->shortname); in map_switch_event()
1492 if (sched->map.cpus && !cpu_map__has(sched->map.cpus, this_cpu)) in map_switch_event()
1506 if (sched->map.comp && new_cpu) in map_switch_event()
1522 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); in process_sched_switch_event() local
1527 if (sched->curr_pid[this_cpu] != (u32)-1) { in process_sched_switch_event()
1532 if (sched->curr_pid[this_cpu] != prev_pid) in process_sched_switch_event()
1533 sched->nr_context_switch_bugs++; in process_sched_switch_event()
1536 if (sched->tp_handler->switch_event) in process_sched_switch_event()
1537 err = sched->tp_handler->switch_event(sched, evsel, sample, machine); in process_sched_switch_event()
1539 sched->curr_pid[this_cpu] = next_pid; in process_sched_switch_event()
1548 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); in process_sched_runtime_event() local
1550 if (sched->tp_handler->runtime_event) in process_sched_runtime_event()
1551 return sched->tp_handler->runtime_event(sched, evsel, sample, machine); in process_sched_runtime_event()
1561 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); in perf_sched__process_fork_event() local
1567 if (sched->tp_handler->fork_event) in perf_sched__process_fork_event()
1568 return sched->tp_handler->fork_event(sched, event, machine); in perf_sched__process_fork_event()
1578 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); in process_sched_migrate_task_event() local
1580 if (sched->tp_handler->migrate_task_event) in process_sched_migrate_task_event()
1581 return sched->tp_handler->migrate_task_event(sched, evsel, sample, machine); in process_sched_migrate_task_event()
1607 static int perf_sched__read_events(struct perf_sched *sched) in perf_sched__read_events() argument
1620 .force = sched->force, in perf_sched__read_events()
1624 session = perf_session__new(&file, false, &sched->tool); in perf_sched__read_events()
1642 sched->nr_events = session->evlist->stats.nr_events[0]; in perf_sched__read_events()
1643 sched->nr_lost_events = session->evlist->stats.total_lost; in perf_sched__read_events()
1644 sched->nr_lost_chunks = session->evlist->stats.nr_events[PERF_RECORD_LOST]; in perf_sched__read_events()
1653 static void print_bad_events(struct perf_sched *sched) in print_bad_events() argument
1655 if (sched->nr_unordered_timestamps && sched->nr_timestamps) { in print_bad_events()
1657 (double)sched->nr_unordered_timestamps/(double)sched->nr_timestamps*100.0, in print_bad_events()
1658 sched->nr_unordered_timestamps, sched->nr_timestamps); in print_bad_events()
1660 if (sched->nr_lost_events && sched->nr_events) { in print_bad_events()
1662 (double)sched->nr_lost_events/(double)sched->nr_events * 100.0, in print_bad_events()
1663 sched->nr_lost_events, sched->nr_events, sched->nr_lost_chunks); in print_bad_events()
1665 if (sched->nr_context_switch_bugs && sched->nr_timestamps) { in print_bad_events()
1667 (double)sched->nr_context_switch_bugs/(double)sched->nr_timestamps*100.0, in print_bad_events()
1668 sched->nr_context_switch_bugs, sched->nr_timestamps); in print_bad_events()
1669 if (sched->nr_lost_events) in print_bad_events()
1713 static void perf_sched__merge_lat(struct perf_sched *sched) in perf_sched__merge_lat() argument
1718 if (sched->skip_merge) in perf_sched__merge_lat()
1721 while ((node = rb_first(&sched->atom_root))) { in perf_sched__merge_lat()
1722 rb_erase(node, &sched->atom_root); in perf_sched__merge_lat()
1724 __merge_work_atoms(&sched->merged_atom_root, data); in perf_sched__merge_lat()
1728 static int perf_sched__lat(struct perf_sched *sched) in perf_sched__lat() argument
1734 if (perf_sched__read_events(sched)) in perf_sched__lat()
1737 perf_sched__merge_lat(sched); in perf_sched__lat()
1738 perf_sched__sort_lat(sched); in perf_sched__lat()
1744 next = rb_first(&sched->sorted_atom_root); in perf_sched__lat()
1750 output_lat_thread(sched, work_list); in perf_sched__lat()
1757 (double)sched->all_runtime / NSEC_PER_MSEC, sched->all_count); in perf_sched__lat()
1761 print_bad_events(sched); in perf_sched__lat()
1767 static int setup_map_cpus(struct perf_sched *sched) in setup_map_cpus() argument
1771 sched->max_cpu = sysconf(_SC_NPROCESSORS_CONF); in setup_map_cpus()
1773 if (sched->map.comp) { in setup_map_cpus()
1774 sched->map.comp_cpus = zalloc(sched->max_cpu * sizeof(int)); in setup_map_cpus()
1775 if (!sched->map.comp_cpus) in setup_map_cpus()
1779 if (!sched->map.cpus_str) in setup_map_cpus()
1782 map = cpu_map__new(sched->map.cpus_str); in setup_map_cpus()
1784 pr_err("failed to get cpus map from %s\n", sched->map.cpus_str); in setup_map_cpus()
1788 sched->map.cpus = map; in setup_map_cpus()
1792 static int setup_color_pids(struct perf_sched *sched) in setup_color_pids() argument
1796 if (!sched->map.color_pids_str) in setup_color_pids()
1799 map = thread_map__new_by_tid_str(sched->map.color_pids_str); in setup_color_pids()
1801 pr_err("failed to get thread map from %s\n", sched->map.color_pids_str); in setup_color_pids()
1805 sched->map.color_pids = map; in setup_color_pids()
1809 static int setup_color_cpus(struct perf_sched *sched) in setup_color_cpus() argument
1813 if (!sched->map.color_cpus_str) in setup_color_cpus()
1816 map = cpu_map__new(sched->map.color_cpus_str); in setup_color_cpus()
1818 pr_err("failed to get thread map from %s\n", sched->map.color_cpus_str); in setup_color_cpus()
1822 sched->map.color_cpus = map; in setup_color_cpus()
1826 static int perf_sched__map(struct perf_sched *sched) in perf_sched__map() argument
1828 if (setup_map_cpus(sched)) in perf_sched__map()
1831 if (setup_color_pids(sched)) in perf_sched__map()
1834 if (setup_color_cpus(sched)) in perf_sched__map()
1838 if (perf_sched__read_events(sched)) in perf_sched__map()
1840 print_bad_events(sched); in perf_sched__map()
1844 static int perf_sched__replay(struct perf_sched *sched) in perf_sched__replay() argument
1848 calibrate_run_measurement_overhead(sched); in perf_sched__replay()
1849 calibrate_sleep_measurement_overhead(sched); in perf_sched__replay()
1851 test_calibrations(sched); in perf_sched__replay()
1853 if (perf_sched__read_events(sched)) in perf_sched__replay()
1856 printf("nr_run_events: %ld\n", sched->nr_run_events); in perf_sched__replay()
1857 printf("nr_sleep_events: %ld\n", sched->nr_sleep_events); in perf_sched__replay()
1858 printf("nr_wakeup_events: %ld\n", sched->nr_wakeup_events); in perf_sched__replay()
1860 if (sched->targetless_wakeups) in perf_sched__replay()
1861 printf("target-less wakeups: %ld\n", sched->targetless_wakeups); in perf_sched__replay()
1862 if (sched->multitarget_wakeups) in perf_sched__replay()
1863 printf("multi-target wakeups: %ld\n", sched->multitarget_wakeups); in perf_sched__replay()
1864 if (sched->nr_run_events_optimized) in perf_sched__replay()
1866 sched->nr_run_events_optimized); in perf_sched__replay()
1868 print_task_traces(sched); in perf_sched__replay()
1869 add_cross_task_wakeups(sched); in perf_sched__replay()
1871 create_tasks(sched); in perf_sched__replay()
1873 for (i = 0; i < sched->replay_repeat; i++) in perf_sched__replay()
1874 run_one_test(sched); in perf_sched__replay()
1879 static void setup_sorting(struct perf_sched *sched, const struct option *options, in setup_sorting() argument
1882 char *tmp, *tok, *str = strdup(sched->sort_order); in setup_sorting()
1886 if (sort_dimension__add(tok, &sched->sort_list) < 0) { in setup_sorting()
1894 sort_dimension__add("pid", &sched->cmp_pid); in setup_sorting()
1938 struct perf_sched sched = { in cmd_sched() local
1946 .cmp_pid = LIST_HEAD_INIT(sched.cmp_pid), in cmd_sched()
1947 .sort_list = LIST_HEAD_INIT(sched.sort_list), in cmd_sched()
1958 OPT_STRING('s', "sort", &sched.sort_order, "key[,key2...]", in cmd_sched()
1962 OPT_INTEGER('C', "CPU", &sched.profile_cpu, in cmd_sched()
1966 OPT_BOOLEAN('p', "pids", &sched.skip_merge, in cmd_sched()
1971 OPT_UINTEGER('r', "repeat", &sched.replay_repeat, in cmd_sched()
1977 OPT_BOOLEAN('f', "force", &sched.force, "don't complain, do it"), in cmd_sched()
1990 OPT_BOOLEAN(0, "compact", &sched.map.comp, in cmd_sched()
1992 OPT_STRING(0, "color-pids", &sched.map.color_pids_str, "pids", in cmd_sched()
1994 OPT_STRING(0, "color-cpus", &sched.map.color_cpus_str, "cpus", in cmd_sched()
1996 OPT_STRING(0, "cpus", &sched.map.cpus_str, "cpus", in cmd_sched()
2034 for (i = 0; i < ARRAY_SIZE(sched.curr_pid); i++) in cmd_sched()
2035 sched.curr_pid[i] = -1; in cmd_sched()
2051 sched.tp_handler = &lat_ops; in cmd_sched()
2057 setup_sorting(&sched, latency_options, latency_usage); in cmd_sched()
2058 return perf_sched__lat(&sched); in cmd_sched()
2065 sched.tp_handler = &map_ops; in cmd_sched()
2066 setup_sorting(&sched, latency_options, latency_usage); in cmd_sched()
2067 return perf_sched__map(&sched); in cmd_sched()
2069 sched.tp_handler = &replay_ops; in cmd_sched()
2075 return perf_sched__replay(&sched); in cmd_sched()