• Home
  • Raw
  • Download

Lines Matching refs:cpuc

497 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);  in x86_pmu_disable_all()  local
503 if (!test_bit(idx, cpuc->active_mask)) in x86_pmu_disable_all()
515 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); in x86_pmu_disable() local
520 if (!cpuc->enabled) in x86_pmu_disable()
523 cpuc->n_added = 0; in x86_pmu_disable()
524 cpuc->enabled = 0; in x86_pmu_disable()
532 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); in x86_pmu_enable_all() local
536 struct hw_perf_event *hwc = &cpuc->events[idx]->hw; in x86_pmu_enable_all()
538 if (!test_bit(idx, cpuc->active_mask)) in x86_pmu_enable_all()
727 int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) in x86_schedule_events() argument
737 c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]); in x86_schedule_events()
747 hwc = &cpuc->event_list[i]->hw; in x86_schedule_events()
778 x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]); in x86_schedule_events()
788 static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp) in collect_events() argument
796 n = cpuc->n_events; in collect_events()
801 cpuc->event_list[n] = leader; in collect_events()
815 cpuc->event_list[n] = event; in collect_events()
822 struct cpu_hw_events *cpuc, int i) in x86_assign_hw_event() argument
826 hwc->idx = cpuc->assign[i]; in x86_assign_hw_event()
828 hwc->last_tag = ++cpuc->tags[i]; in x86_assign_hw_event()
843 struct cpu_hw_events *cpuc, in match_prev_assignment() argument
846 return hwc->idx == cpuc->assign[i] && in match_prev_assignment()
848 hwc->last_tag == cpuc->tags[i]; in match_prev_assignment()
855 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); in x86_pmu_enable() local
858 int i, added = cpuc->n_added; in x86_pmu_enable()
863 if (cpuc->enabled) in x86_pmu_enable()
866 if (cpuc->n_added) { in x86_pmu_enable()
867 int n_running = cpuc->n_events - cpuc->n_added; in x86_pmu_enable()
876 event = cpuc->event_list[i]; in x86_pmu_enable()
886 match_prev_assignment(hwc, cpuc, i)) in x86_pmu_enable()
899 for (i = 0; i < cpuc->n_events; i++) { in x86_pmu_enable()
900 event = cpuc->event_list[i]; in x86_pmu_enable()
903 if (!match_prev_assignment(hwc, cpuc, i)) in x86_pmu_enable()
904 x86_assign_hw_event(event, cpuc, i); in x86_pmu_enable()
913 cpuc->n_added = 0; in x86_pmu_enable()
917 cpuc->enabled = 1; in x86_pmu_enable()
1004 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); in x86_pmu_add() local
1012 n0 = cpuc->n_events; in x86_pmu_add()
1013 ret = n = collect_events(cpuc, event, false); in x86_pmu_add()
1026 if (cpuc->group_flag & PERF_EVENT_TXN) in x86_pmu_add()
1029 ret = x86_pmu.schedule_events(cpuc, n, assign); in x86_pmu_add()
1036 memcpy(cpuc->assign, assign, n*sizeof(int)); in x86_pmu_add()
1039 cpuc->n_events = n; in x86_pmu_add()
1040 cpuc->n_added += n - n0; in x86_pmu_add()
1041 cpuc->n_txn += n - n0; in x86_pmu_add()
1051 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); in x86_pmu_start() local
1067 cpuc->events[idx] = event; in x86_pmu_start()
1068 __set_bit(idx, cpuc->active_mask); in x86_pmu_start()
1069 __set_bit(idx, cpuc->running); in x86_pmu_start()
1078 struct cpu_hw_events *cpuc; in perf_event_print_debug() local
1088 cpuc = &per_cpu(cpu_hw_events, cpu); in perf_event_print_debug()
1104 pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask); in perf_event_print_debug()
1130 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); in x86_pmu_stop() local
1133 if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) { in x86_pmu_stop()
1135 cpuc->events[hwc->idx] = NULL; in x86_pmu_stop()
1152 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); in x86_pmu_del() local
1160 if (cpuc->group_flag & PERF_EVENT_TXN) in x86_pmu_del()
1165 for (i = 0; i < cpuc->n_events; i++) { in x86_pmu_del()
1166 if (event == cpuc->event_list[i]) { in x86_pmu_del()
1169 x86_pmu.put_event_constraints(cpuc, event); in x86_pmu_del()
1171 while (++i < cpuc->n_events) in x86_pmu_del()
1172 cpuc->event_list[i-1] = cpuc->event_list[i]; in x86_pmu_del()
1174 --cpuc->n_events; in x86_pmu_del()
1184 struct cpu_hw_events *cpuc; in x86_pmu_handle_irq() local
1191 cpuc = &__get_cpu_var(cpu_hw_events); in x86_pmu_handle_irq()
1204 if (!test_bit(idx, cpuc->active_mask)) { in x86_pmu_handle_irq()
1210 if (__test_and_clear_bit(idx, cpuc->running)) in x86_pmu_handle_irq()
1215 event = cpuc->events[idx]; in x86_pmu_handle_irq()
1267 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); in x86_pmu_notifier() local
1272 cpuc->kfree_on_online = NULL; in x86_pmu_notifier()
1285 kfree(cpuc->kfree_on_online); in x86_pmu_notifier()
1452 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); in x86_pmu_commit_txn() local
1456 n = cpuc->n_events; in x86_pmu_commit_txn()
1461 ret = x86_pmu.schedule_events(cpuc, n, assign); in x86_pmu_commit_txn()
1469 memcpy(cpuc->assign, assign, n*sizeof(int)); in x86_pmu_commit_txn()
1471 cpuc->group_flag &= ~PERF_EVENT_TXN; in x86_pmu_commit_txn()
1483 static void free_fake_cpuc(struct cpu_hw_events *cpuc) in free_fake_cpuc() argument
1485 kfree(cpuc->shared_regs); in free_fake_cpuc()
1486 kfree(cpuc); in free_fake_cpuc()
1491 struct cpu_hw_events *cpuc; in allocate_fake_cpuc() local
1494 cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL); in allocate_fake_cpuc()
1495 if (!cpuc) in allocate_fake_cpuc()
1500 cpuc->shared_regs = allocate_shared_regs(cpu); in allocate_fake_cpuc()
1501 if (!cpuc->shared_regs) in allocate_fake_cpuc()
1504 return cpuc; in allocate_fake_cpuc()
1506 free_fake_cpuc(cpuc); in allocate_fake_cpuc()