• Home
  • Raw
  • Download

Lines Matching refs:x86_pmu

47 struct x86_pmu x86_pmu __read_mostly;
62 DEFINE_STATIC_CALL_NULL(x86_pmu_handle_irq, *x86_pmu.handle_irq);
63 DEFINE_STATIC_CALL_NULL(x86_pmu_disable_all, *x86_pmu.disable_all);
64 DEFINE_STATIC_CALL_NULL(x86_pmu_enable_all, *x86_pmu.enable_all);
65 DEFINE_STATIC_CALL_NULL(x86_pmu_enable, *x86_pmu.enable);
66 DEFINE_STATIC_CALL_NULL(x86_pmu_disable, *x86_pmu.disable);
68 DEFINE_STATIC_CALL_NULL(x86_pmu_add, *x86_pmu.add);
69 DEFINE_STATIC_CALL_NULL(x86_pmu_del, *x86_pmu.del);
70 DEFINE_STATIC_CALL_NULL(x86_pmu_read, *x86_pmu.read);
72 DEFINE_STATIC_CALL_NULL(x86_pmu_schedule_events, *x86_pmu.schedule_events);
73 DEFINE_STATIC_CALL_NULL(x86_pmu_get_event_constraints, *x86_pmu.get_event_constraints);
74 DEFINE_STATIC_CALL_NULL(x86_pmu_put_event_constraints, *x86_pmu.put_event_constraints);
76 DEFINE_STATIC_CALL_NULL(x86_pmu_start_scheduling, *x86_pmu.start_scheduling);
77 DEFINE_STATIC_CALL_NULL(x86_pmu_commit_scheduling, *x86_pmu.commit_scheduling);
78 DEFINE_STATIC_CALL_NULL(x86_pmu_stop_scheduling, *x86_pmu.stop_scheduling);
80 DEFINE_STATIC_CALL_NULL(x86_pmu_sched_task, *x86_pmu.sched_task);
81 DEFINE_STATIC_CALL_NULL(x86_pmu_swap_task_ctx, *x86_pmu.swap_task_ctx);
83 DEFINE_STATIC_CALL_NULL(x86_pmu_drain_pebs, *x86_pmu.drain_pebs);
84 DEFINE_STATIC_CALL_NULL(x86_pmu_pebs_aliases, *x86_pmu.pebs_aliases);
103 int shift = 64 - x86_pmu.cntval_bits; in x86_perf_event_update()
110 if (unlikely(is_topdown_count(event)) && x86_pmu.update_topdown_event) in x86_perf_event_update()
111 return x86_pmu.update_topdown_event(event); in x86_perf_event_update()
155 if (!x86_pmu.extra_regs) in x86_pmu_extra_regs()
158 for (er = x86_pmu.extra_regs; er->msr; er++) { in x86_pmu_extra_regs()
185 for (i = 0; i < x86_pmu.num_counters; i++) { in reserve_pmc_hardware()
190 for (i = 0; i < x86_pmu.num_counters; i++) { in reserve_pmc_hardware()
201 i = x86_pmu.num_counters; in reserve_pmc_hardware()
214 for (i = 0; i < x86_pmu.num_counters; i++) { in release_pmc_hardware()
238 for (i = 0; i < x86_pmu.num_counters; i++) { in check_hw_exists()
252 if (x86_pmu.num_counters_fixed) { in check_hw_exists()
257 for (i = 0; i < x86_pmu.num_counters_fixed; i++) { in check_hw_exists()
330 return x86_pmu.handle_irq != NULL; in x86_pmu_initialized()
414 if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt) in x86_add_exclusive()
417 if (!atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what])) { in x86_add_exclusive()
419 for (i = 0; i < ARRAY_SIZE(x86_pmu.lbr_exclusive); i++) { in x86_add_exclusive()
420 if (i != what && atomic_read(&x86_pmu.lbr_exclusive[i])) in x86_add_exclusive()
423 atomic_inc(&x86_pmu.lbr_exclusive[what]); in x86_add_exclusive()
443 if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt) in x86_del_exclusive()
446 atomic_dec(&x86_pmu.lbr_exclusive[what]); in x86_del_exclusive()
456 hwc->sample_period = x86_pmu.max_period; in x86_setup_perfctr()
467 if (attr->config >= x86_pmu.max_events) in x86_setup_perfctr()
470 attr->config = array_index_nospec((unsigned long)attr->config, x86_pmu.max_events); in x86_setup_perfctr()
475 config = x86_pmu.event_map(attr->config); in x86_setup_perfctr()
523 if (x86_pmu.pebs_active && !x86_pmu.pebs_broken) { in x86_pmu_max_precise()
527 if (x86_pmu.lbr_nr || x86_pmu.intel_cap.pebs_format >= 2) in x86_pmu_max_precise()
530 if (x86_pmu.pebs_prec_dist) in x86_pmu_max_precise()
552 if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format < 2) { in x86_pmu_hw_config()
599 if (event->attr.sample_period && x86_pmu.limit_period) { in x86_pmu_hw_config()
600 if (x86_pmu.limit_period(event, event->attr.sample_period) > in x86_pmu_hw_config()
648 return x86_pmu.hw_config(event); in __x86_pmu_event_init()
656 for (idx = 0; idx < x86_pmu.num_counters; idx++) { in x86_pmu_disable_all()
707 for (idx = 0; idx < x86_pmu.num_counters; idx++) { in x86_pmu_enable_all()
1011 int gpmax = x86_pmu.num_counters; in x86_schedule_events()
1031 if (x86_pmu.flags & PMU_FL_PAIR) { in x86_schedule_events()
1032 gpmax = x86_pmu.num_counters - cpuc->n_pair; in x86_schedule_events()
1097 if (x86_pmu.intel_cap.perf_metrics && add_nr_metric_event(cpuc, event)) in collect_event()
1121 max_count = x86_pmu.num_counters + x86_pmu.num_counters_fixed; in collect_events()
1326 x86_pmu.set_topdown_event_period) in x86_perf_event_set_period()
1327 return x86_pmu.set_topdown_event_period(event); in x86_perf_event_set_period()
1351 if (left > x86_pmu.max_period) in x86_perf_event_set_period()
1352 left = x86_pmu.max_period; in x86_perf_event_set_period()
1354 if (x86_pmu.limit_period) in x86_perf_event_set_period()
1355 left = x86_pmu.limit_period(event, left); in x86_perf_event_set_period()
1365 wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask); in x86_perf_event_set_period()
1379 if (x86_pmu.perfctr_second_write) { in x86_perf_event_set_period()
1381 (u64)(-left) & x86_pmu.cntval_mask); in x86_perf_event_set_period()
1493 if (!x86_pmu.num_counters) in perf_event_print_debug()
1501 if (x86_pmu.version >= 2) { in perf_event_print_debug()
1512 if (x86_pmu.pebs_constraints) { in perf_event_print_debug()
1516 if (x86_pmu.lbr_nr) { in perf_event_print_debug()
1523 for (idx = 0; idx < x86_pmu.num_counters; idx++) { in perf_event_print_debug()
1536 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) { in perf_event_print_debug()
1610 if (x86_pmu.intel_cap.perf_metrics) in x86_pmu_del()
1644 for (idx = 0; idx < x86_pmu.num_counters; idx++) { in x86_pmu_handle_irq()
1651 if (val & (1ULL << (x86_pmu.cntval_bits - 1))) in x86_pmu_handle_irq()
1675 if (!x86_pmu.apic || !x86_pmu_initialized()) in perf_events_lapic_init()
1718 if (x86_pmu.cpu_prepare) in x86_pmu_prepare_cpu()
1719 return x86_pmu.cpu_prepare(cpu); in x86_pmu_prepare_cpu()
1725 if (x86_pmu.cpu_dead) in x86_pmu_dead_cpu()
1726 x86_pmu.cpu_dead(cpu); in x86_pmu_dead_cpu()
1744 if (x86_pmu.cpu_starting) in x86_pmu_starting_cpu()
1745 x86_pmu.cpu_starting(cpu); in x86_pmu_starting_cpu()
1751 if (x86_pmu.cpu_dying) in x86_pmu_dying_cpu()
1752 x86_pmu.cpu_dying(cpu); in x86_pmu_dying_cpu()
1761 x86_pmu.apic = 0; in pmu_check_apic()
1786 if (pmu_attr->id < x86_pmu.max_events) in events_sysfs_show()
1787 config = x86_pmu.event_map(pmu_attr->id); in events_sysfs_show()
1793 return x86_pmu.events_sysfs_show(page, config); in events_sysfs_show()
1856 if (idx >= x86_pmu.max_events) in is_visible()
1861 return pmu_attr->event_str || x86_pmu.event_map(idx) ? attr->mode : 0; in is_visible()
1914 static_call_update(x86_pmu_handle_irq, x86_pmu.handle_irq); in x86_pmu_static_call_update()
1915 static_call_update(x86_pmu_disable_all, x86_pmu.disable_all); in x86_pmu_static_call_update()
1916 static_call_update(x86_pmu_enable_all, x86_pmu.enable_all); in x86_pmu_static_call_update()
1917 static_call_update(x86_pmu_enable, x86_pmu.enable); in x86_pmu_static_call_update()
1918 static_call_update(x86_pmu_disable, x86_pmu.disable); in x86_pmu_static_call_update()
1920 static_call_update(x86_pmu_add, x86_pmu.add); in x86_pmu_static_call_update()
1921 static_call_update(x86_pmu_del, x86_pmu.del); in x86_pmu_static_call_update()
1922 static_call_update(x86_pmu_read, x86_pmu.read); in x86_pmu_static_call_update()
1924 static_call_update(x86_pmu_schedule_events, x86_pmu.schedule_events); in x86_pmu_static_call_update()
1925 static_call_update(x86_pmu_get_event_constraints, x86_pmu.get_event_constraints); in x86_pmu_static_call_update()
1926 static_call_update(x86_pmu_put_event_constraints, x86_pmu.put_event_constraints); in x86_pmu_static_call_update()
1928 static_call_update(x86_pmu_start_scheduling, x86_pmu.start_scheduling); in x86_pmu_static_call_update()
1929 static_call_update(x86_pmu_commit_scheduling, x86_pmu.commit_scheduling); in x86_pmu_static_call_update()
1930 static_call_update(x86_pmu_stop_scheduling, x86_pmu.stop_scheduling); in x86_pmu_static_call_update()
1932 static_call_update(x86_pmu_sched_task, x86_pmu.sched_task); in x86_pmu_static_call_update()
1933 static_call_update(x86_pmu_swap_task_ctx, x86_pmu.swap_task_ctx); in x86_pmu_static_call_update()
1935 static_call_update(x86_pmu_drain_pebs, x86_pmu.drain_pebs); in x86_pmu_static_call_update()
1936 static_call_update(x86_pmu_pebs_aliases, x86_pmu.pebs_aliases); in x86_pmu_static_call_update()
1960 x86_pmu.name = "HYGON"; in init_hw_perf_events()
1980 pr_cont("%s PMU driver.\n", x86_pmu.name); in init_hw_perf_events()
1982 x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */ in init_hw_perf_events()
1984 for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next) in init_hw_perf_events()
1987 if (!x86_pmu.intel_ctrl) in init_hw_perf_events()
1988 x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1; in init_hw_perf_events()
1994 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1, in init_hw_perf_events()
1995 0, x86_pmu.num_counters, 0, 0); in init_hw_perf_events()
1997 x86_pmu_format_group.attrs = x86_pmu.format_attrs; in init_hw_perf_events()
1999 if (!x86_pmu.events_sysfs_show) in init_hw_perf_events()
2002 pmu.attr_update = x86_pmu.attr_update; in init_hw_perf_events()
2004 pr_info("... version: %d\n", x86_pmu.version); in init_hw_perf_events()
2005 pr_info("... bit width: %d\n", x86_pmu.cntval_bits); in init_hw_perf_events()
2006 pr_info("... generic registers: %d\n", x86_pmu.num_counters); in init_hw_perf_events()
2007 pr_info("... value mask: %016Lx\n", x86_pmu.cntval_mask); in init_hw_perf_events()
2008 pr_info("... max period: %016Lx\n", x86_pmu.max_period); in init_hw_perf_events()
2009 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed); in init_hw_perf_events()
2010 pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl); in init_hw_perf_events()
2012 if (!x86_pmu.read) in init_hw_perf_events()
2013 x86_pmu.read = _x86_pmu_read; in init_hw_perf_events()
2196 c = x86_pmu.get_event_constraints(fake_cpuc, 0, event); in validate_event()
2201 if (x86_pmu.put_event_constraints) in validate_event()
2202 x86_pmu.put_event_constraints(fake_cpuc, event); in validate_event()
2245 ret = x86_pmu.schedule_events(fake_cpuc, n, NULL); in validate_group()
2290 if (READ_ONCE(x86_pmu.attr_rdpmc) && in x86_pmu_event_init()
2345 return snprintf(buf, 40, "%d\n", x86_pmu.attr_rdpmc); in get_attr_rdpmc()
2362 if (x86_pmu.attr_rdpmc_broken) in set_attr_rdpmc()
2365 if (val != x86_pmu.attr_rdpmc) { in set_attr_rdpmc()
2373 else if (x86_pmu.attr_rdpmc == 0) in set_attr_rdpmc()
2378 else if (x86_pmu.attr_rdpmc == 2) in set_attr_rdpmc()
2382 x86_pmu.attr_rdpmc = val; in set_attr_rdpmc()
2439 if (x86_pmu.check_microcode) in perf_check_microcode()
2440 x86_pmu.check_microcode(); in perf_check_microcode()
2445 if (x86_pmu.check_period && x86_pmu.check_period(event, value)) in x86_pmu_check_period()
2448 if (value && x86_pmu.limit_period) { in x86_pmu_check_period()
2449 if (x86_pmu.limit_period(event, value) > value) in x86_pmu_check_period()
2461 if (x86_pmu.aux_output_match) in x86_pmu_aux_output_match()
2462 return x86_pmu.aux_output_match(event); in x86_pmu_aux_output_match()
2506 userpg->pmc_width = x86_pmu.cntval_bits; in arch_perf_update_userpage()
2766 cap->version = x86_pmu.version; in perf_get_x86_pmu_capability()
2767 cap->num_counters_gp = x86_pmu.num_counters; in perf_get_x86_pmu_capability()
2768 cap->num_counters_fixed = x86_pmu.num_counters_fixed; in perf_get_x86_pmu_capability()
2769 cap->bit_width_gp = x86_pmu.cntval_bits; in perf_get_x86_pmu_capability()
2770 cap->bit_width_fixed = x86_pmu.cntval_bits; in perf_get_x86_pmu_capability()
2771 cap->events_mask = (unsigned int)x86_pmu.events_maskl; in perf_get_x86_pmu_capability()
2772 cap->events_mask_len = x86_pmu.events_mask_len; in perf_get_x86_pmu_capability()