• Home
  • Raw
  • Download

Lines Matching refs:cpuc

558 static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, in…  in sparc_pmu_enable_event()  argument
562 enc = perf_event_get_enc(cpuc->events[idx]); in sparc_pmu_enable_event()
564 val = cpuc->pcr; in sparc_pmu_enable_event()
567 cpuc->pcr = val; in sparc_pmu_enable_event()
569 pcr_ops->write(cpuc->pcr); in sparc_pmu_enable_event()
572 static inline void sparc_pmu_disable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, i… in sparc_pmu_disable_event() argument
578 val = cpuc->pcr; in sparc_pmu_disable_event()
581 cpuc->pcr = val; in sparc_pmu_disable_event()
583 pcr_ops->write(cpuc->pcr); in sparc_pmu_disable_event()
674 static u64 maybe_change_configuration(struct cpu_hw_events *cpuc, u64 pcr) in maybe_change_configuration() argument
678 if (!cpuc->n_added) in maybe_change_configuration()
682 for (i = 0; i < cpuc->n_events; i++) { in maybe_change_configuration()
683 struct perf_event *cp = cpuc->event[i]; in maybe_change_configuration()
685 if (cpuc->current_idx[i] != PIC_NO_INDEX && in maybe_change_configuration()
686 cpuc->current_idx[i] != cp->hw.idx) { in maybe_change_configuration()
688 cpuc->current_idx[i]); in maybe_change_configuration()
689 cpuc->current_idx[i] = PIC_NO_INDEX; in maybe_change_configuration()
694 for (i = 0; i < cpuc->n_events; i++) { in maybe_change_configuration()
695 struct perf_event *cp = cpuc->event[i]; in maybe_change_configuration()
700 if (cpuc->current_idx[i] != PIC_NO_INDEX) in maybe_change_configuration()
704 cpuc->current_idx[i] = idx; in maybe_change_configuration()
706 enc = perf_event_get_enc(cpuc->events[i]); in maybe_change_configuration()
719 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); in sparc_pmu_enable() local
722 if (cpuc->enabled) in sparc_pmu_enable()
725 cpuc->enabled = 1; in sparc_pmu_enable()
728 pcr = cpuc->pcr; in sparc_pmu_enable()
729 if (!cpuc->n_events) { in sparc_pmu_enable()
732 pcr = maybe_change_configuration(cpuc, pcr); in sparc_pmu_enable()
738 cpuc->pcr = pcr | cpuc->event[0]->hw.config_base; in sparc_pmu_enable()
741 pcr_ops->write(cpuc->pcr); in sparc_pmu_enable()
746 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); in sparc_pmu_disable() local
749 if (!cpuc->enabled) in sparc_pmu_disable()
752 cpuc->enabled = 0; in sparc_pmu_disable()
753 cpuc->n_added = 0; in sparc_pmu_disable()
755 val = cpuc->pcr; in sparc_pmu_disable()
758 cpuc->pcr = val; in sparc_pmu_disable()
760 pcr_ops->write(cpuc->pcr); in sparc_pmu_disable()
763 static int active_event_index(struct cpu_hw_events *cpuc, in active_event_index() argument
768 for (i = 0; i < cpuc->n_events; i++) { in active_event_index()
769 if (cpuc->event[i] == event) in active_event_index()
772 BUG_ON(i == cpuc->n_events); in active_event_index()
773 return cpuc->current_idx[i]; in active_event_index()
778 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); in sparc_pmu_start() local
779 int idx = active_event_index(cpuc, event); in sparc_pmu_start()
788 sparc_pmu_enable_event(cpuc, &event->hw, idx); in sparc_pmu_start()
793 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); in sparc_pmu_stop() local
794 int idx = active_event_index(cpuc, event); in sparc_pmu_stop()
797 sparc_pmu_disable_event(cpuc, &event->hw, idx); in sparc_pmu_stop()
809 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); in sparc_pmu_del() local
816 for (i = 0; i < cpuc->n_events; i++) { in sparc_pmu_del()
817 if (event == cpuc->event[i]) { in sparc_pmu_del()
826 while (++i < cpuc->n_events) { in sparc_pmu_del()
827 cpuc->event[i - 1] = cpuc->event[i]; in sparc_pmu_del()
828 cpuc->events[i - 1] = cpuc->events[i]; in sparc_pmu_del()
829 cpuc->current_idx[i - 1] = in sparc_pmu_del()
830 cpuc->current_idx[i]; in sparc_pmu_del()
835 cpuc->n_events--; in sparc_pmu_del()
846 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); in sparc_pmu_read() local
847 int idx = active_event_index(cpuc, event); in sparc_pmu_read()
858 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); in perf_stop_nmi_watchdog() local
861 cpuc->pcr = pcr_ops->read(); in perf_stop_nmi_watchdog()
1056 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); in sparc_pmu_add() local
1063 n0 = cpuc->n_events; in sparc_pmu_add()
1067 cpuc->event[n0] = event; in sparc_pmu_add()
1068 cpuc->events[n0] = event->hw.event_base; in sparc_pmu_add()
1069 cpuc->current_idx[n0] = PIC_NO_INDEX; in sparc_pmu_add()
1080 if (cpuc->group_flag & PERF_EVENT_TXN) in sparc_pmu_add()
1083 if (check_excludes(cpuc->event, n0, 1)) in sparc_pmu_add()
1085 if (sparc_check_constraints(cpuc->event, cpuc->events, n0 + 1)) in sparc_pmu_add()
1089 cpuc->n_events++; in sparc_pmu_add()
1090 cpuc->n_added++; in sparc_pmu_add()
1224 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); in sparc_pmu_commit_txn() local
1230 cpuc = &__get_cpu_var(cpu_hw_events); in sparc_pmu_commit_txn()
1231 n = cpuc->n_events; in sparc_pmu_commit_txn()
1232 if (check_excludes(cpuc->event, 0, n)) in sparc_pmu_commit_txn()
1234 if (sparc_check_constraints(cpuc->event, cpuc->events, n)) in sparc_pmu_commit_txn()
1237 cpuc->group_flag &= ~PERF_EVENT_TXN; in sparc_pmu_commit_txn()
1284 struct cpu_hw_events *cpuc; in perf_event_nmi_handler() local
1303 cpuc = &__get_cpu_var(cpu_hw_events); in perf_event_nmi_handler()
1313 pcr_ops->write(cpuc->pcr); in perf_event_nmi_handler()
1315 for (i = 0; i < cpuc->n_events; i++) { in perf_event_nmi_handler()
1316 struct perf_event *event = cpuc->event[i]; in perf_event_nmi_handler()
1317 int idx = cpuc->current_idx[i]; in perf_event_nmi_handler()