Lines Matching refs:event
169 static bool is_kernel_event(struct perf_event *event) in is_kernel_event() argument
171 return READ_ONCE(event->owner) == TASK_TOMBSTONE; in is_kernel_event()
197 struct perf_event *event; member
205 struct perf_event *event = efs->event; in event_function() local
206 struct perf_event_context *ctx = event->ctx; in event_function()
241 efs->func(event, cpuctx, ctx, efs->data); in event_function()
248 static void event_function_call(struct perf_event *event, event_f func, void *data) in event_function_call() argument
250 struct perf_event_context *ctx = event->ctx; in event_function_call()
253 .event = event, in event_function_call()
258 if (!event->parent) { in event_function_call()
268 cpu_function_call(event->cpu, event_function, &efs); in event_function_call()
293 func(event, NULL, ctx, data); in event_function_call()
301 static void event_function_local(struct perf_event *event, event_f func, void *data) in event_function_local() argument
303 struct perf_event_context *ctx = event->ctx; in event_function_local()
340 func(event, cpuctx, ctx, data); in event_function_local()
567 static u64 perf_event_time(struct perf_event *event);
581 static inline u64 perf_event_clock(struct perf_event *event) in perf_event_clock() argument
583 return event->clock(); in perf_event_clock()
609 __perf_effective_state(struct perf_event *event) in __perf_effective_state() argument
611 struct perf_event *leader = event->group_leader; in __perf_effective_state()
616 return event->state; in __perf_effective_state()
620 __perf_update_times(struct perf_event *event, u64 now, u64 *enabled, u64 *running) in __perf_update_times() argument
622 enum perf_event_state state = __perf_effective_state(event); in __perf_update_times()
623 u64 delta = now - event->tstamp; in __perf_update_times()
625 *enabled = event->total_time_enabled; in __perf_update_times()
629 *running = event->total_time_running; in __perf_update_times()
634 static void perf_event_update_time(struct perf_event *event) in perf_event_update_time() argument
636 u64 now = perf_event_time(event); in perf_event_update_time()
638 __perf_update_times(event, now, &event->total_time_enabled, in perf_event_update_time()
639 &event->total_time_running); in perf_event_update_time()
640 event->tstamp = now; in perf_event_update_time()
652 perf_event_set_state(struct perf_event *event, enum perf_event_state state) in perf_event_set_state() argument
654 if (event->state == state) in perf_event_set_state()
657 perf_event_update_time(event); in perf_event_set_state()
662 if ((event->state < 0) ^ (state < 0)) in perf_event_set_state()
663 perf_event_update_sibling_time(event); in perf_event_set_state()
665 WRITE_ONCE(event->state, state); in perf_event_set_state()
671 perf_cgroup_match(struct perf_event *event) in perf_cgroup_match() argument
673 struct perf_event_context *ctx = event->ctx; in perf_cgroup_match()
677 if (!event->cgrp) in perf_cgroup_match()
691 event->cgrp->css.cgroup); in perf_cgroup_match()
694 static inline void perf_detach_cgroup(struct perf_event *event) in perf_detach_cgroup() argument
696 css_put(&event->cgrp->css); in perf_detach_cgroup()
697 event->cgrp = NULL; in perf_detach_cgroup()
700 static inline int is_cgroup_event(struct perf_event *event) in is_cgroup_event() argument
702 return event->cgrp != NULL; in is_cgroup_event()
705 static inline u64 perf_cgroup_event_time(struct perf_event *event) in perf_cgroup_event_time() argument
709 t = per_cpu_ptr(event->cgrp->info, event->cpu); in perf_cgroup_event_time()
739 static inline void update_cgrp_time_from_event(struct perf_event *event) in update_cgrp_time_from_event() argument
747 if (!is_cgroup_event(event)) in update_cgrp_time_from_event()
750 cgrp = perf_cgroup_from_task(current, event->ctx); in update_cgrp_time_from_event()
754 if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup)) in update_cgrp_time_from_event()
755 __update_cgrp_time(event->cgrp); in update_cgrp_time_from_event()
894 static inline int perf_cgroup_connect(int fd, struct perf_event *event, in perf_cgroup_connect() argument
914 event->cgrp = cgrp; in perf_cgroup_connect()
922 perf_detach_cgroup(event); in perf_cgroup_connect()
931 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) in perf_cgroup_set_shadow_time() argument
934 t = per_cpu_ptr(event->cgrp->info, event->cpu); in perf_cgroup_set_shadow_time()
935 event->shadow_ctx_time = now - t->timestamp; in perf_cgroup_set_shadow_time()
943 list_update_cgroup_event(struct perf_event *event, in list_update_cgroup_event() argument
949 if (!is_cgroup_event(event)) in list_update_cgroup_event()
967 if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup)) in list_update_cgroup_event()
990 perf_cgroup_match(struct perf_event *event) in perf_cgroup_match() argument
995 static inline void perf_detach_cgroup(struct perf_event *event) in perf_detach_cgroup() argument
998 static inline int is_cgroup_event(struct perf_event *event) in is_cgroup_event() argument
1003 static inline void update_cgrp_time_from_event(struct perf_event *event) in update_cgrp_time_from_event() argument
1021 static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event, in perf_cgroup_connect() argument
1040 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) in perf_cgroup_set_shadow_time() argument
1044 static inline u64 perf_cgroup_event_time(struct perf_event *event) in perf_cgroup_event_time() argument
1050 list_update_cgroup_event(struct perf_event *event, in list_update_cgroup_event() argument
1265 perf_event_ctx_lock_nested(struct perf_event *event, int nesting) in perf_event_ctx_lock_nested() argument
1271 ctx = READ_ONCE(event->ctx); in perf_event_ctx_lock_nested()
1279 if (event->ctx != ctx) { in perf_event_ctx_lock_nested()
1289 perf_event_ctx_lock(struct perf_event *event) in perf_event_ctx_lock() argument
1291 return perf_event_ctx_lock_nested(event, 0); in perf_event_ctx_lock()
1294 static void perf_event_ctx_unlock(struct perf_event *event, in perf_event_ctx_unlock() argument
1320 static u32 perf_event_pid_type(struct perf_event *event, struct task_struct *p, in perf_event_pid_type() argument
1327 if (event->parent) in perf_event_pid_type()
1328 event = event->parent; in perf_event_pid_type()
1330 nr = __task_pid_nr_ns(p, type, event->ns); in perf_event_pid_type()
1337 static u32 perf_event_pid(struct perf_event *event, struct task_struct *p) in perf_event_pid() argument
1339 return perf_event_pid_type(event, p, PIDTYPE_TGID); in perf_event_pid()
1342 static u32 perf_event_tid(struct perf_event *event, struct task_struct *p) in perf_event_tid() argument
1344 return perf_event_pid_type(event, p, PIDTYPE_PID); in perf_event_tid()
1351 static u64 primary_event_id(struct perf_event *event) in primary_event_id() argument
1353 u64 id = event->id; in primary_event_id()
1355 if (event->parent) in primary_event_id()
1356 id = event->parent->id; in primary_event_id()
1457 static u64 perf_event_time(struct perf_event *event) in perf_event_time() argument
1459 struct perf_event_context *ctx = event->ctx; in perf_event_time()
1461 if (is_cgroup_event(event)) in perf_event_time()
1462 return perf_cgroup_event_time(event); in perf_event_time()
1467 static enum event_type_t get_event_type(struct perf_event *event) in get_event_type() argument
1469 struct perf_event_context *ctx = event->ctx; in get_event_type()
1478 if (event->group_leader != event) in get_event_type()
1479 event = event->group_leader; in get_event_type()
1481 event_type = event->attr.pinned ? EVENT_PINNED : EVENT_FLEXIBLE; in get_event_type()
1491 static void init_event_group(struct perf_event *event) in init_event_group() argument
1493 RB_CLEAR_NODE(&event->group_node); in init_event_group()
1494 event->group_index = 0; in init_event_group()
1502 get_event_groups(struct perf_event *event, struct perf_event_context *ctx) in get_event_groups() argument
1504 if (event->attr.pinned) in get_event_groups()
1548 struct perf_event *event) in perf_event_groups_insert() argument
1554 event->group_index = ++groups->index; in perf_event_groups_insert()
1563 if (perf_event_groups_less(event, node_event)) in perf_event_groups_insert()
1569 rb_link_node(&event->group_node, parent, node); in perf_event_groups_insert()
1570 rb_insert_color(&event->group_node, &groups->tree); in perf_event_groups_insert()
1577 add_event_to_groups(struct perf_event *event, struct perf_event_context *ctx) in add_event_to_groups() argument
1581 groups = get_event_groups(event, ctx); in add_event_to_groups()
1582 perf_event_groups_insert(groups, event); in add_event_to_groups()
1590 struct perf_event *event) in perf_event_groups_delete() argument
1592 WARN_ON_ONCE(RB_EMPTY_NODE(&event->group_node) || in perf_event_groups_delete()
1595 rb_erase(&event->group_node, &groups->tree); in perf_event_groups_delete()
1596 init_event_group(event); in perf_event_groups_delete()
1603 del_event_from_groups(struct perf_event *event, struct perf_event_context *ctx) in del_event_from_groups() argument
1607 groups = get_event_groups(event, ctx); in del_event_from_groups()
1608 perf_event_groups_delete(groups, event); in del_event_from_groups()
1640 perf_event_groups_next(struct perf_event *event) in perf_event_groups_next() argument
1644 next = rb_entry_safe(rb_next(&event->group_node), typeof(*event), group_node); in perf_event_groups_next()
1645 if (next && next->cpu == event->cpu) in perf_event_groups_next()
1654 #define perf_event_groups_for_each(event, groups) \ argument
1655 for (event = rb_entry_safe(rb_first(&((groups)->tree)), \
1656 typeof(*event), group_node); event; \
1657 event = rb_entry_safe(rb_next(&event->group_node), \
1658 typeof(*event), group_node))
1665 list_add_event(struct perf_event *event, struct perf_event_context *ctx) in list_add_event() argument
1669 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); in list_add_event()
1670 event->attach_state |= PERF_ATTACH_CONTEXT; in list_add_event()
1672 event->tstamp = perf_event_time(event); in list_add_event()
1679 if (event->group_leader == event) { in list_add_event()
1680 event->group_caps = event->event_caps; in list_add_event()
1681 add_event_to_groups(event, ctx); in list_add_event()
1684 list_update_cgroup_event(event, ctx, true); in list_add_event()
1686 list_add_rcu(&event->event_entry, &ctx->event_list); in list_add_event()
1688 if (event->attr.inherit_stat) in list_add_event()
1697 static inline void perf_event__state_init(struct perf_event *event) in perf_event__state_init() argument
1699 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF : in perf_event__state_init()
1703 static void __perf_event_read_size(struct perf_event *event, int nr_siblings) in __perf_event_read_size() argument
1709 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) in __perf_event_read_size()
1712 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) in __perf_event_read_size()
1715 if (event->attr.read_format & PERF_FORMAT_ID) in __perf_event_read_size()
1718 if (event->attr.read_format & PERF_FORMAT_GROUP) { in __perf_event_read_size()
1724 event->read_size = size; in __perf_event_read_size()
1727 static void __perf_event_header_size(struct perf_event *event, u64 sample_type) in __perf_event_header_size() argument
1745 size += event->read_size; in __perf_event_header_size()
1756 event->header_size = size; in __perf_event_header_size()
1763 static void perf_event__header_size(struct perf_event *event) in perf_event__header_size() argument
1765 __perf_event_read_size(event, in perf_event__header_size()
1766 event->group_leader->nr_siblings); in perf_event__header_size()
1767 __perf_event_header_size(event, event->attr.sample_type); in perf_event__header_size()
1770 static void perf_event__id_header_size(struct perf_event *event) in perf_event__id_header_size() argument
1773 u64 sample_type = event->attr.sample_type; in perf_event__id_header_size()
1794 event->id_header_size = size; in perf_event__id_header_size()
1797 static bool perf_event_validate_size(struct perf_event *event) in perf_event_validate_size() argument
1803 __perf_event_read_size(event, event->group_leader->nr_siblings + 1); in perf_event_validate_size()
1804 __perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ); in perf_event_validate_size()
1805 perf_event__id_header_size(event); in perf_event_validate_size()
1811 if (event->read_size + event->header_size + in perf_event_validate_size()
1812 event->id_header_size + sizeof(struct perf_event_header) >= 16*1024) in perf_event_validate_size()
1818 static void perf_group_attach(struct perf_event *event) in perf_group_attach() argument
1820 struct perf_event *group_leader = event->group_leader, *pos; in perf_group_attach()
1822 lockdep_assert_held(&event->ctx->lock); in perf_group_attach()
1827 if (event->attach_state & PERF_ATTACH_GROUP) in perf_group_attach()
1830 event->attach_state |= PERF_ATTACH_GROUP; in perf_group_attach()
1832 if (group_leader == event) in perf_group_attach()
1835 WARN_ON_ONCE(group_leader->ctx != event->ctx); in perf_group_attach()
1837 group_leader->group_caps &= event->event_caps; in perf_group_attach()
1839 list_add_tail(&event->sibling_list, &group_leader->sibling_list); in perf_group_attach()
1853 list_del_event(struct perf_event *event, struct perf_event_context *ctx) in list_del_event() argument
1855 WARN_ON_ONCE(event->ctx != ctx); in list_del_event()
1861 if (!(event->attach_state & PERF_ATTACH_CONTEXT)) in list_del_event()
1864 event->attach_state &= ~PERF_ATTACH_CONTEXT; in list_del_event()
1866 list_update_cgroup_event(event, ctx, false); in list_del_event()
1869 if (event->attr.inherit_stat) in list_del_event()
1872 list_del_rcu(&event->event_entry); in list_del_event()
1874 if (event->group_leader == event) in list_del_event()
1875 del_event_from_groups(event, ctx); in list_del_event()
1884 if (event->state > PERF_EVENT_STATE_OFF) in list_del_event()
1885 perf_event_set_state(event, PERF_EVENT_STATE_OFF); in list_del_event()
1891 perf_aux_output_match(struct perf_event *event, struct perf_event *aux_event) in perf_aux_output_match() argument
1896 if (!event->pmu->aux_output_match) in perf_aux_output_match()
1899 return event->pmu->aux_output_match(aux_event); in perf_aux_output_match()
1902 static void put_event(struct perf_event *event);
1903 static void event_sched_out(struct perf_event *event,
1907 static void perf_put_aux_event(struct perf_event *event) in perf_put_aux_event() argument
1909 struct perf_event_context *ctx = event->ctx; in perf_put_aux_event()
1916 if (event->aux_event) { in perf_put_aux_event()
1917 iter = event->aux_event; in perf_put_aux_event()
1918 event->aux_event = NULL; in perf_put_aux_event()
1927 for_each_sibling_event(iter, event->group_leader) { in perf_put_aux_event()
1928 if (iter->aux_event != event) in perf_put_aux_event()
1932 put_event(event); in perf_put_aux_event()
1940 perf_event_set_state(event, PERF_EVENT_STATE_ERROR); in perf_put_aux_event()
1944 static int perf_get_aux_event(struct perf_event *event, in perf_get_aux_event() argument
1956 if (!perf_aux_output_match(event, group_leader)) in perf_get_aux_event()
1968 event->aux_event = group_leader; in perf_get_aux_event()
1973 static void perf_group_detach(struct perf_event *event) in perf_group_detach() argument
1976 struct perf_event_context *ctx = event->ctx; in perf_group_detach()
1983 if (!(event->attach_state & PERF_ATTACH_GROUP)) in perf_group_detach()
1986 event->attach_state &= ~PERF_ATTACH_GROUP; in perf_group_detach()
1988 perf_put_aux_event(event); in perf_group_detach()
1993 if (event->group_leader != event) { in perf_group_detach()
1994 list_del_init(&event->sibling_list); in perf_group_detach()
1995 event->group_leader->nr_siblings--; in perf_group_detach()
2004 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, sibling_list) { in perf_group_detach()
2010 sibling->group_caps = event->group_caps; in perf_group_detach()
2012 if (!RB_EMPTY_NODE(&event->group_node)) { in perf_group_detach()
2013 add_event_to_groups(sibling, event->ctx); in perf_group_detach()
2023 WARN_ON_ONCE(sibling->ctx != event->ctx); in perf_group_detach()
2027 perf_event__header_size(event->group_leader); in perf_group_detach()
2029 for_each_sibling_event(tmp, event->group_leader) in perf_group_detach()
2033 static bool is_orphaned_event(struct perf_event *event) in is_orphaned_event() argument
2035 return event->state == PERF_EVENT_STATE_DEAD; in is_orphaned_event()
2038 static inline int __pmu_filter_match(struct perf_event *event) in __pmu_filter_match() argument
2040 struct pmu *pmu = event->pmu; in __pmu_filter_match()
2041 return pmu->filter_match ? pmu->filter_match(event) : 1; in __pmu_filter_match()
2050 static inline int pmu_filter_match(struct perf_event *event) in pmu_filter_match() argument
2054 if (!__pmu_filter_match(event)) in pmu_filter_match()
2057 for_each_sibling_event(sibling, event) { in pmu_filter_match()
2066 event_filter_match(struct perf_event *event) in event_filter_match() argument
2068 return (event->cpu == -1 || event->cpu == smp_processor_id()) && in event_filter_match()
2069 perf_cgroup_match(event) && pmu_filter_match(event); in event_filter_match()
2073 event_sched_out(struct perf_event *event, in event_sched_out() argument
2079 WARN_ON_ONCE(event->ctx != ctx); in event_sched_out()
2082 if (event->state != PERF_EVENT_STATE_ACTIVE) in event_sched_out()
2090 list_del_init(&event->active_list); in event_sched_out()
2092 perf_pmu_disable(event->pmu); in event_sched_out()
2094 event->pmu->del(event, 0); in event_sched_out()
2095 event->oncpu = -1; in event_sched_out()
2097 if (READ_ONCE(event->pending_disable) >= 0) { in event_sched_out()
2098 WRITE_ONCE(event->pending_disable, -1); in event_sched_out()
2101 perf_event_set_state(event, state); in event_sched_out()
2103 if (!is_software_event(event)) in event_sched_out()
2107 if (event->attr.freq && event->attr.sample_freq) in event_sched_out()
2109 if (event->attr.exclusive || !cpuctx->active_oncpu) in event_sched_out()
2112 perf_pmu_enable(event->pmu); in event_sched_out()
2120 struct perf_event *event; in group_sched_out() local
2132 for_each_sibling_event(event, group_event) in group_sched_out()
2133 event_sched_out(event, cpuctx, ctx); in group_sched_out()
2150 __perf_remove_from_context(struct perf_event *event, in __perf_remove_from_context() argument
2162 event_sched_out(event, cpuctx, ctx); in __perf_remove_from_context()
2164 perf_group_detach(event); in __perf_remove_from_context()
2165 list_del_event(event, ctx); in __perf_remove_from_context()
2186 static void perf_remove_from_context(struct perf_event *event, unsigned long flags) in perf_remove_from_context() argument
2188 struct perf_event_context *ctx = event->ctx; in perf_remove_from_context()
2192 event_function_call(event, __perf_remove_from_context, (void *)flags); in perf_remove_from_context()
2200 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); in perf_remove_from_context()
2202 (event->attach_state & PERF_ATTACH_GROUP)) { in perf_remove_from_context()
2208 perf_group_detach(event); in perf_remove_from_context()
2216 static void __perf_event_disable(struct perf_event *event, in __perf_event_disable() argument
2221 if (event->state < PERF_EVENT_STATE_INACTIVE) in __perf_event_disable()
2226 update_cgrp_time_from_event(event); in __perf_event_disable()
2229 if (event == event->group_leader) in __perf_event_disable()
2230 group_sched_out(event, cpuctx, ctx); in __perf_event_disable()
2232 event_sched_out(event, cpuctx, ctx); in __perf_event_disable()
2234 perf_event_set_state(event, PERF_EVENT_STATE_OFF); in __perf_event_disable()
2251 static void _perf_event_disable(struct perf_event *event) in _perf_event_disable() argument
2253 struct perf_event_context *ctx = event->ctx; in _perf_event_disable()
2256 if (event->state <= PERF_EVENT_STATE_OFF) { in _perf_event_disable()
2262 event_function_call(event, __perf_event_disable, NULL); in _perf_event_disable()
2265 void perf_event_disable_local(struct perf_event *event) in perf_event_disable_local() argument
2267 event_function_local(event, __perf_event_disable, NULL); in perf_event_disable_local()
2274 void perf_event_disable(struct perf_event *event) in perf_event_disable() argument
2278 ctx = perf_event_ctx_lock(event); in perf_event_disable()
2279 _perf_event_disable(event); in perf_event_disable()
2280 perf_event_ctx_unlock(event, ctx); in perf_event_disable()
2284 void perf_event_disable_inatomic(struct perf_event *event) in perf_event_disable_inatomic() argument
2286 WRITE_ONCE(event->pending_disable, smp_processor_id()); in perf_event_disable_inatomic()
2288 irq_work_queue(&event->pending); in perf_event_disable_inatomic()
2291 static void perf_set_shadow_time(struct perf_event *event, in perf_set_shadow_time() argument
2319 if (is_cgroup_event(event)) in perf_set_shadow_time()
2320 perf_cgroup_set_shadow_time(event, event->tstamp); in perf_set_shadow_time()
2322 event->shadow_ctx_time = event->tstamp - ctx->timestamp; in perf_set_shadow_time()
2327 static void perf_log_throttle(struct perf_event *event, int enable);
2328 static void perf_log_itrace_start(struct perf_event *event);
2331 event_sched_in(struct perf_event *event, in event_sched_in() argument
2339 if (event->state <= PERF_EVENT_STATE_OFF) in event_sched_in()
2342 WRITE_ONCE(event->oncpu, smp_processor_id()); in event_sched_in()
2349 perf_event_set_state(event, PERF_EVENT_STATE_ACTIVE); in event_sched_in()
2356 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) { in event_sched_in()
2357 perf_log_throttle(event, 1); in event_sched_in()
2358 event->hw.interrupts = 0; in event_sched_in()
2361 perf_pmu_disable(event->pmu); in event_sched_in()
2363 perf_set_shadow_time(event, ctx); in event_sched_in()
2365 perf_log_itrace_start(event); in event_sched_in()
2367 if (event->pmu->add(event, PERF_EF_START)) { in event_sched_in()
2368 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); in event_sched_in()
2369 event->oncpu = -1; in event_sched_in()
2374 if (!is_software_event(event)) in event_sched_in()
2378 if (event->attr.freq && event->attr.sample_freq) in event_sched_in()
2381 if (event->attr.exclusive) in event_sched_in()
2385 perf_pmu_enable(event->pmu); in event_sched_in()
2395 struct perf_event *event, *partial_group = NULL; in group_sched_in() local
2412 for_each_sibling_event(event, group_event) { in group_sched_in()
2413 if (event_sched_in(event, cpuctx, ctx)) { in group_sched_in()
2414 partial_group = event; in group_sched_in()
2428 for_each_sibling_event(event, group_event) { in group_sched_in()
2429 if (event == partial_group) in group_sched_in()
2432 event_sched_out(event, cpuctx, ctx); in group_sched_in()
2446 static int group_can_go_on(struct perf_event *event, in group_can_go_on() argument
2453 if (event->group_caps & PERF_EV_CAP_SOFTWARE) in group_can_go_on()
2465 if (event->attr.exclusive && cpuctx->active_oncpu) in group_can_go_on()
2474 static void add_event_to_ctx(struct perf_event *event, in add_event_to_ctx() argument
2477 list_add_event(event, ctx); in add_event_to_ctx()
2478 perf_group_attach(event); in add_event_to_ctx()
2584 struct perf_event *event = info; in __perf_install_in_context() local
2585 struct perf_event_context *ctx = event->ctx; in __perf_install_in_context()
2616 if (is_cgroup_event(event)) { in __perf_install_in_context()
2623 event->cgrp->css.cgroup); in __perf_install_in_context()
2629 add_event_to_ctx(event, ctx); in __perf_install_in_context()
2630 ctx_resched(cpuctx, task_ctx, get_event_type(event)); in __perf_install_in_context()
2632 add_event_to_ctx(event, ctx); in __perf_install_in_context()
2641 static bool exclusive_event_installable(struct perf_event *event,
2651 struct perf_event *event, in perf_install_in_context() argument
2658 WARN_ON_ONCE(!exclusive_event_installable(event, ctx)); in perf_install_in_context()
2660 if (event->cpu != -1) in perf_install_in_context()
2661 event->cpu = cpu; in perf_install_in_context()
2667 smp_store_release(&event->ctx, ctx); in perf_install_in_context()
2670 cpu_function_call(cpu, __perf_install_in_context, event); in perf_install_in_context()
2712 if (!task_function_call(task, __perf_install_in_context, event)) in perf_install_in_context()
2734 add_event_to_ctx(event, ctx); in perf_install_in_context()
2741 static void __perf_event_enable(struct perf_event *event, in __perf_event_enable() argument
2746 struct perf_event *leader = event->group_leader; in __perf_event_enable()
2749 if (event->state >= PERF_EVENT_STATE_INACTIVE || in __perf_event_enable()
2750 event->state <= PERF_EVENT_STATE_ERROR) in __perf_event_enable()
2756 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); in __perf_event_enable()
2761 if (!event_filter_match(event)) { in __perf_event_enable()
2770 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) { in __perf_event_enable()
2779 ctx_resched(cpuctx, task_ctx, get_event_type(event)); in __perf_event_enable()
2791 static void _perf_event_enable(struct perf_event *event) in _perf_event_enable() argument
2793 struct perf_event_context *ctx = event->ctx; in _perf_event_enable()
2796 if (event->state >= PERF_EVENT_STATE_INACTIVE || in _perf_event_enable()
2797 event->state < PERF_EVENT_STATE_ERROR) { in _perf_event_enable()
2809 if (event->state == PERF_EVENT_STATE_ERROR) in _perf_event_enable()
2810 event->state = PERF_EVENT_STATE_OFF; in _perf_event_enable()
2813 event_function_call(event, __perf_event_enable, NULL); in _perf_event_enable()
2819 void perf_event_enable(struct perf_event *event) in perf_event_enable() argument
2823 ctx = perf_event_ctx_lock(event); in perf_event_enable()
2824 _perf_event_enable(event); in perf_event_enable()
2825 perf_event_ctx_unlock(event, ctx); in perf_event_enable()
2830 struct perf_event *event; member
2837 struct perf_event *event = sd->event; in __perf_event_stop() local
2840 if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE) in __perf_event_stop()
2850 if (READ_ONCE(event->oncpu) != smp_processor_id()) in __perf_event_stop()
2853 event->pmu->stop(event, PERF_EF_UPDATE); in __perf_event_stop()
2865 event->pmu->start(event, 0); in __perf_event_stop()
2870 static int perf_event_stop(struct perf_event *event, int restart) in perf_event_stop() argument
2873 .event = event, in perf_event_stop()
2879 if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE) in perf_event_stop()
2890 ret = cpu_function_call(READ_ONCE(event->oncpu), in perf_event_stop()
2919 void perf_event_addr_filters_sync(struct perf_event *event) in perf_event_addr_filters_sync() argument
2921 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in perf_event_addr_filters_sync()
2923 if (!has_addr_filter(event)) in perf_event_addr_filters_sync()
2927 if (event->addr_filters_gen != event->hw.addr_filters_gen) { in perf_event_addr_filters_sync()
2928 event->pmu->addr_filters_sync(event); in perf_event_addr_filters_sync()
2929 event->hw.addr_filters_gen = event->addr_filters_gen; in perf_event_addr_filters_sync()
2935 static int _perf_event_refresh(struct perf_event *event, int refresh) in _perf_event_refresh() argument
2940 if (event->attr.inherit || !is_sampling_event(event)) in _perf_event_refresh()
2943 atomic_add(refresh, &event->event_limit); in _perf_event_refresh()
2944 _perf_event_enable(event); in _perf_event_refresh()
2952 int perf_event_refresh(struct perf_event *event, int refresh) in perf_event_refresh() argument
2957 ctx = perf_event_ctx_lock(event); in perf_event_refresh()
2958 ret = _perf_event_refresh(event, refresh); in perf_event_refresh()
2959 perf_event_ctx_unlock(event, ctx); in perf_event_refresh()
2980 static int perf_event_modify_attr(struct perf_event *event, in perf_event_modify_attr() argument
2983 if (event->attr.type != attr->type) in perf_event_modify_attr()
2986 switch (event->attr.type) { in perf_event_modify_attr()
2988 return perf_event_modify_breakpoint(event, attr); in perf_event_modify_attr()
2999 struct perf_event *event, *tmp; in ctx_sched_out() local
3053 list_for_each_entry_safe(event, tmp, &ctx->pinned_active, active_list) in ctx_sched_out()
3054 group_sched_out(event, cpuctx, ctx); in ctx_sched_out()
3058 list_for_each_entry_safe(event, tmp, &ctx->flexible_active, active_list) in ctx_sched_out()
3059 group_sched_out(event, cpuctx, ctx); in ctx_sched_out()
3102 static void __perf_event_sync_stat(struct perf_event *event, in __perf_event_sync_stat() argument
3107 if (!event->attr.inherit_stat) in __perf_event_sync_stat()
3117 if (event->state == PERF_EVENT_STATE_ACTIVE) in __perf_event_sync_stat()
3118 event->pmu->read(event); in __perf_event_sync_stat()
3120 perf_event_update_time(event); in __perf_event_sync_stat()
3127 value = local64_xchg(&event->count, value); in __perf_event_sync_stat()
3130 swap(event->total_time_enabled, next_event->total_time_enabled); in __perf_event_sync_stat()
3131 swap(event->total_time_running, next_event->total_time_running); in __perf_event_sync_stat()
3136 perf_event_update_userpage(event); in __perf_event_sync_stat()
3143 struct perf_event *event, *next_event; in perf_event_sync_stat() local
3150 event = list_first_entry(&ctx->event_list, in perf_event_sync_stat()
3156 while (&event->event_entry != &ctx->event_list && in perf_event_sync_stat()
3159 __perf_event_sync_stat(event, next_event); in perf_event_sync_stat()
3161 event = list_next_entry(event, event_entry); in perf_event_sync_stat()
3382 static int pinned_sched_in(struct perf_event *event, void *data) in pinned_sched_in() argument
3386 if (event->state <= PERF_EVENT_STATE_OFF) in pinned_sched_in()
3389 if (!event_filter_match(event)) in pinned_sched_in()
3392 if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) { in pinned_sched_in()
3393 if (!group_sched_in(event, sid->cpuctx, sid->ctx)) in pinned_sched_in()
3394 list_add_tail(&event->active_list, &sid->ctx->pinned_active); in pinned_sched_in()
3401 if (event->state == PERF_EVENT_STATE_INACTIVE) in pinned_sched_in()
3402 perf_event_set_state(event, PERF_EVENT_STATE_ERROR); in pinned_sched_in()
3407 static int flexible_sched_in(struct perf_event *event, void *data) in flexible_sched_in() argument
3411 if (event->state <= PERF_EVENT_STATE_OFF) in flexible_sched_in()
3414 if (!event_filter_match(event)) in flexible_sched_in()
3417 if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) { in flexible_sched_in()
3418 int ret = group_sched_in(event, sid->cpuctx, sid->ctx); in flexible_sched_in()
3424 list_add_tail(&event->active_list, &sid->ctx->flexible_active); in flexible_sched_in()
3589 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) in perf_calculate_period() argument
3591 u64 frequency = event->attr.sample_freq; in perf_calculate_period()
3665 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable) in perf_adjust_period() argument
3667 struct hw_perf_event *hwc = &event->hw; in perf_adjust_period()
3671 period = perf_calculate_period(event, nsec, count); in perf_adjust_period()
3685 event->pmu->stop(event, PERF_EF_UPDATE); in perf_adjust_period()
3690 event->pmu->start(event, PERF_EF_RELOAD); in perf_adjust_period()
3702 struct perf_event *event; in perf_adjust_freq_unthr_context() local
3718 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { in perf_adjust_freq_unthr_context()
3719 if (event->state != PERF_EVENT_STATE_ACTIVE) in perf_adjust_freq_unthr_context()
3722 if (!event_filter_match(event)) in perf_adjust_freq_unthr_context()
3725 perf_pmu_disable(event->pmu); in perf_adjust_freq_unthr_context()
3727 hwc = &event->hw; in perf_adjust_freq_unthr_context()
3731 perf_log_throttle(event, 1); in perf_adjust_freq_unthr_context()
3732 event->pmu->start(event, 0); in perf_adjust_freq_unthr_context()
3735 if (!event->attr.freq || !event->attr.sample_freq) in perf_adjust_freq_unthr_context()
3741 event->pmu->stop(event, PERF_EF_UPDATE); in perf_adjust_freq_unthr_context()
3743 now = local64_read(&event->count); in perf_adjust_freq_unthr_context()
3755 perf_adjust_period(event, period, delta, false); in perf_adjust_freq_unthr_context()
3757 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); in perf_adjust_freq_unthr_context()
3759 perf_pmu_enable(event->pmu); in perf_adjust_freq_unthr_context()
3769 static void rotate_ctx(struct perf_event_context *ctx, struct perf_event *event) in rotate_ctx() argument
3778 perf_event_groups_delete(&ctx->flexible_groups, event); in rotate_ctx()
3779 perf_event_groups_insert(&ctx->flexible_groups, event); in rotate_ctx()
3786 struct perf_event *event; in ctx_event_to_rotate() local
3789 event = list_first_entry_or_null(&ctx->flexible_active, in ctx_event_to_rotate()
3793 if (!event) { in ctx_event_to_rotate()
3794 event = rb_entry_safe(rb_first(&ctx->flexible_groups.tree), in ctx_event_to_rotate()
3795 typeof(*event), group_node); in ctx_event_to_rotate()
3798 return event; in ctx_event_to_rotate()
3865 static int event_enable_on_exec(struct perf_event *event, in event_enable_on_exec() argument
3868 if (!event->attr.enable_on_exec) in event_enable_on_exec()
3871 event->attr.enable_on_exec = 0; in event_enable_on_exec()
3872 if (event->state >= PERF_EVENT_STATE_INACTIVE) in event_enable_on_exec()
3875 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); in event_enable_on_exec()
3889 struct perf_event *event; in perf_event_enable_on_exec() local
3901 list_for_each_entry(event, &ctx->event_list, event_entry) { in perf_event_enable_on_exec()
3902 enabled |= event_enable_on_exec(event, ctx); in perf_event_enable_on_exec()
3903 event_type |= get_event_type(event); in perf_event_enable_on_exec()
3925 struct perf_event *event; member
3930 static int __perf_event_read_cpu(struct perf_event *event, int event_cpu) in __perf_event_read_cpu() argument
3934 if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) { in __perf_event_read_cpu()
3953 struct perf_event *sub, *event = data->event; in __perf_event_read() local
3954 struct perf_event_context *ctx = event->ctx; in __perf_event_read()
3956 struct pmu *pmu = event->pmu; in __perf_event_read()
3971 update_cgrp_time_from_event(event); in __perf_event_read()
3974 perf_event_update_time(event); in __perf_event_read()
3976 perf_event_update_sibling_time(event); in __perf_event_read()
3978 if (event->state != PERF_EVENT_STATE_ACTIVE) in __perf_event_read()
3982 pmu->read(event); in __perf_event_read()
3989 pmu->read(event); in __perf_event_read()
3991 for_each_sibling_event(sub, event) { in __perf_event_read()
4007 static inline u64 perf_event_count(struct perf_event *event) in perf_event_count() argument
4009 return local64_read(&event->count) + atomic64_read(&event->child_count); in perf_event_count()
4020 int perf_event_read_local(struct perf_event *event, u64 *value, in perf_event_read_local() argument
4036 if (event->attr.inherit) { in perf_event_read_local()
4042 if ((event->attach_state & PERF_ATTACH_TASK) && in perf_event_read_local()
4043 event->hw.target != current) { in perf_event_read_local()
4049 if (!(event->attach_state & PERF_ATTACH_TASK) && in perf_event_read_local()
4050 event->cpu != smp_processor_id()) { in perf_event_read_local()
4056 if (event->attr.pinned && event->oncpu != smp_processor_id()) { in perf_event_read_local()
4066 if (event->oncpu == smp_processor_id()) in perf_event_read_local()
4067 event->pmu->read(event); in perf_event_read_local()
4069 *value = local64_read(&event->count); in perf_event_read_local()
4071 u64 now = event->shadow_ctx_time + perf_clock(); in perf_event_read_local()
4074 __perf_update_times(event, now, &__enabled, &__running); in perf_event_read_local()
4086 static int perf_event_read(struct perf_event *event, bool group) in perf_event_read() argument
4088 enum perf_event_state state = READ_ONCE(event->state); in perf_event_read()
4107 event_cpu = READ_ONCE(event->oncpu); in perf_event_read()
4112 .event = event, in perf_event_read()
4118 event_cpu = __perf_event_read_cpu(event, event_cpu); in perf_event_read()
4135 struct perf_event_context *ctx = event->ctx; in perf_event_read()
4139 state = event->state; in perf_event_read()
4151 update_cgrp_time_from_event(event); in perf_event_read()
4154 perf_event_update_time(event); in perf_event_read()
4156 perf_event_update_sibling_time(event); in perf_event_read()
4221 struct perf_event *event) in find_get_context() argument
4228 int cpu = event->cpu; in find_get_context()
4232 err = perf_allow_cpu(&event->attr); in find_get_context()
4249 if (event->attach_state & PERF_ATTACH_TASK_DATA) { in find_get_context()
4316 static void perf_event_free_filter(struct perf_event *event);
4317 static void perf_event_free_bpf_prog(struct perf_event *event);
4321 struct perf_event *event; in free_event_rcu() local
4323 event = container_of(head, struct perf_event, rcu_head); in free_event_rcu()
4324 if (event->ns) in free_event_rcu()
4325 put_pid_ns(event->ns); in free_event_rcu()
4326 perf_event_free_filter(event); in free_event_rcu()
4327 kfree(event); in free_event_rcu()
4330 static void ring_buffer_attach(struct perf_event *event,
4333 static void detach_sb_event(struct perf_event *event) in detach_sb_event() argument
4335 struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu); in detach_sb_event()
4338 list_del_rcu(&event->sb_list); in detach_sb_event()
4342 static bool is_sb_event(struct perf_event *event) in is_sb_event() argument
4344 struct perf_event_attr *attr = &event->attr; in is_sb_event()
4346 if (event->parent) in is_sb_event()
4349 if (event->attach_state & PERF_ATTACH_TASK) in is_sb_event()
4361 static void unaccount_pmu_sb_event(struct perf_event *event) in unaccount_pmu_sb_event() argument
4363 if (is_sb_event(event)) in unaccount_pmu_sb_event()
4364 detach_sb_event(event); in unaccount_pmu_sb_event()
4367 static void unaccount_event_cpu(struct perf_event *event, int cpu) in unaccount_event_cpu() argument
4369 if (event->parent) in unaccount_event_cpu()
4372 if (is_cgroup_event(event)) in unaccount_event_cpu()
4398 static void unaccount_event(struct perf_event *event) in unaccount_event() argument
4402 if (event->parent) in unaccount_event()
4405 if (event->attach_state & PERF_ATTACH_TASK) in unaccount_event()
4407 if (event->attr.mmap || event->attr.mmap_data) in unaccount_event()
4409 if (event->attr.comm) in unaccount_event()
4411 if (event->attr.namespaces) in unaccount_event()
4413 if (event->attr.task) in unaccount_event()
4415 if (event->attr.freq) in unaccount_event()
4417 if (event->attr.context_switch) { in unaccount_event()
4421 if (is_cgroup_event(event)) in unaccount_event()
4423 if (has_branch_stack(event)) in unaccount_event()
4425 if (event->attr.ksymbol) in unaccount_event()
4427 if (event->attr.bpf_event) in unaccount_event()
4435 unaccount_event_cpu(event, event->cpu); in unaccount_event()
4437 unaccount_pmu_sb_event(event); in unaccount_event()
4460 static int exclusive_event_init(struct perf_event *event) in exclusive_event_init() argument
4462 struct pmu *pmu = event->pmu; in exclusive_event_init()
4480 if (event->attach_state & PERF_ATTACH_TASK) { in exclusive_event_init()
4491 static void exclusive_event_destroy(struct perf_event *event) in exclusive_event_destroy() argument
4493 struct pmu *pmu = event->pmu; in exclusive_event_destroy()
4499 if (event->attach_state & PERF_ATTACH_TASK) in exclusive_event_destroy()
4515 static bool exclusive_event_installable(struct perf_event *event, in exclusive_event_installable() argument
4519 struct pmu *pmu = event->pmu; in exclusive_event_installable()
4527 if (exclusive_event_match(iter_event, event)) in exclusive_event_installable()
4534 static void perf_addr_filters_splice(struct perf_event *event,
4537 static void _free_event(struct perf_event *event) in _free_event() argument
4539 irq_work_sync(&event->pending); in _free_event()
4541 unaccount_event(event); in _free_event()
4543 security_perf_event_free(event); in _free_event()
4545 if (event->rb) { in _free_event()
4552 mutex_lock(&event->mmap_mutex); in _free_event()
4553 ring_buffer_attach(event, NULL); in _free_event()
4554 mutex_unlock(&event->mmap_mutex); in _free_event()
4557 if (is_cgroup_event(event)) in _free_event()
4558 perf_detach_cgroup(event); in _free_event()
4560 if (!event->parent) { in _free_event()
4561 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) in _free_event()
4565 perf_event_free_bpf_prog(event); in _free_event()
4566 perf_addr_filters_splice(event, NULL); in _free_event()
4567 kfree(event->addr_filter_ranges); in _free_event()
4569 if (event->destroy) in _free_event()
4570 event->destroy(event); in _free_event()
4576 if (event->hw.target) in _free_event()
4577 put_task_struct(event->hw.target); in _free_event()
4583 if (event->ctx) in _free_event()
4584 put_ctx(event->ctx); in _free_event()
4586 exclusive_event_destroy(event); in _free_event()
4587 module_put(event->pmu->module); in _free_event()
4589 call_rcu(&event->rcu_head, free_event_rcu); in _free_event()
4596 static void free_event(struct perf_event *event) in free_event() argument
4598 if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1, in free_event()
4600 atomic_long_read(&event->refcount), event)) { in free_event()
4605 _free_event(event); in free_event()
4611 static void perf_remove_from_owner(struct perf_event *event) in perf_remove_from_owner() argument
4622 owner = READ_ONCE(event->owner); in perf_remove_from_owner()
4650 if (event->owner) { in perf_remove_from_owner()
4651 list_del_init(&event->owner_entry); in perf_remove_from_owner()
4652 smp_store_release(&event->owner, NULL); in perf_remove_from_owner()
4659 static void put_event(struct perf_event *event) in put_event() argument
4661 if (!atomic_long_dec_and_test(&event->refcount)) in put_event()
4664 _free_event(event); in put_event()
4672 int perf_event_release_kernel(struct perf_event *event) in perf_event_release_kernel() argument
4674 struct perf_event_context *ctx = event->ctx; in perf_event_release_kernel()
4683 WARN_ON_ONCE(event->attach_state & in perf_event_release_kernel()
4688 if (!is_kernel_event(event)) in perf_event_release_kernel()
4689 perf_remove_from_owner(event); in perf_event_release_kernel()
4691 ctx = perf_event_ctx_lock(event); in perf_event_release_kernel()
4693 perf_remove_from_context(event, DETACH_GROUP); in perf_event_release_kernel()
4707 event->state = PERF_EVENT_STATE_DEAD; in perf_event_release_kernel()
4710 perf_event_ctx_unlock(event, ctx); in perf_event_release_kernel()
4713 mutex_lock(&event->child_mutex); in perf_event_release_kernel()
4714 list_for_each_entry(child, &event->child_list, child_list) { in perf_event_release_kernel()
4736 mutex_unlock(&event->child_mutex); in perf_event_release_kernel()
4738 mutex_lock(&event->child_mutex); in perf_event_release_kernel()
4745 tmp = list_first_entry_or_null(&event->child_list, in perf_event_release_kernel()
4754 put_event(event); in perf_event_release_kernel()
4757 mutex_unlock(&event->child_mutex); in perf_event_release_kernel()
4762 mutex_unlock(&event->child_mutex); in perf_event_release_kernel()
4779 put_event(event); /* Must be the 'last' reference */ in perf_event_release_kernel()
4793 static u64 __perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) in __perf_event_read_value() argument
4801 mutex_lock(&event->child_mutex); in __perf_event_read_value()
4803 (void)perf_event_read(event, false); in __perf_event_read_value()
4804 total += perf_event_count(event); in __perf_event_read_value()
4806 *enabled += event->total_time_enabled + in __perf_event_read_value()
4807 atomic64_read(&event->child_total_time_enabled); in __perf_event_read_value()
4808 *running += event->total_time_running + in __perf_event_read_value()
4809 atomic64_read(&event->child_total_time_running); in __perf_event_read_value()
4811 list_for_each_entry(child, &event->child_list, child_list) { in __perf_event_read_value()
4817 mutex_unlock(&event->child_mutex); in __perf_event_read_value()
4822 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) in perf_event_read_value() argument
4827 ctx = perf_event_ctx_lock(event); in perf_event_read_value()
4828 count = __perf_event_read_value(event, enabled, running); in perf_event_read_value()
4829 perf_event_ctx_unlock(event, ctx); in perf_event_read_value()
4882 static int perf_read_group(struct perf_event *event, in perf_read_group() argument
4885 struct perf_event *leader = event->group_leader, *child; in perf_read_group()
4892 values = kzalloc(event->read_size, GFP_KERNEL); in perf_read_group()
4916 ret = event->read_size; in perf_read_group()
4917 if (copy_to_user(buf, values, event->read_size)) in perf_read_group()
4928 static int perf_read_one(struct perf_event *event, in perf_read_one() argument
4935 values[n++] = __perf_event_read_value(event, &enabled, &running); in perf_read_one()
4941 values[n++] = primary_event_id(event); in perf_read_one()
4949 static bool is_event_hup(struct perf_event *event) in is_event_hup() argument
4953 if (event->state > PERF_EVENT_STATE_EXIT) in is_event_hup()
4956 mutex_lock(&event->child_mutex); in is_event_hup()
4957 no_children = list_empty(&event->child_list); in is_event_hup()
4958 mutex_unlock(&event->child_mutex); in is_event_hup()
4966 __perf_read(struct perf_event *event, char __user *buf, size_t count) in __perf_read() argument
4968 u64 read_format = event->attr.read_format; in __perf_read()
4976 if (event->state == PERF_EVENT_STATE_ERROR) in __perf_read()
4979 if (count < event->read_size) in __perf_read()
4982 WARN_ON_ONCE(event->ctx->parent_ctx); in __perf_read()
4984 ret = perf_read_group(event, read_format, buf); in __perf_read()
4986 ret = perf_read_one(event, read_format, buf); in __perf_read()
4994 struct perf_event *event = file->private_data; in perf_read() local
4998 ret = security_perf_event_read(event); in perf_read()
5002 ctx = perf_event_ctx_lock(event); in perf_read()
5003 ret = __perf_read(event, buf, count); in perf_read()
5004 perf_event_ctx_unlock(event, ctx); in perf_read()
5011 struct perf_event *event = file->private_data; in perf_poll() local
5015 poll_wait(file, &event->waitq, wait); in perf_poll()
5017 if (is_event_hup(event)) in perf_poll()
5024 mutex_lock(&event->mmap_mutex); in perf_poll()
5025 rb = event->rb; in perf_poll()
5028 mutex_unlock(&event->mmap_mutex); in perf_poll()
5032 static void _perf_event_reset(struct perf_event *event) in _perf_event_reset() argument
5034 (void)perf_event_read(event, false); in _perf_event_reset()
5035 local64_set(&event->count, 0); in _perf_event_reset()
5036 perf_event_update_userpage(event); in _perf_event_reset()
5045 static void perf_event_for_each_child(struct perf_event *event, in perf_event_for_each_child() argument
5050 WARN_ON_ONCE(event->ctx->parent_ctx); in perf_event_for_each_child()
5052 mutex_lock(&event->child_mutex); in perf_event_for_each_child()
5053 func(event); in perf_event_for_each_child()
5054 list_for_each_entry(child, &event->child_list, child_list) in perf_event_for_each_child()
5056 mutex_unlock(&event->child_mutex); in perf_event_for_each_child()
5059 static void perf_event_for_each(struct perf_event *event, in perf_event_for_each() argument
5062 struct perf_event_context *ctx = event->ctx; in perf_event_for_each()
5067 event = event->group_leader; in perf_event_for_each()
5069 perf_event_for_each_child(event, func); in perf_event_for_each()
5070 for_each_sibling_event(sibling, event) in perf_event_for_each()
5074 static void __perf_event_period(struct perf_event *event, in __perf_event_period() argument
5082 if (event->attr.freq) { in __perf_event_period()
5083 event->attr.sample_freq = value; in __perf_event_period()
5085 event->attr.sample_period = value; in __perf_event_period()
5086 event->hw.sample_period = value; in __perf_event_period()
5089 active = (event->state == PERF_EVENT_STATE_ACTIVE); in __perf_event_period()
5096 if (event->hw.interrupts == MAX_INTERRUPTS) { in __perf_event_period()
5097 event->hw.interrupts = 0; in __perf_event_period()
5098 perf_log_throttle(event, 1); in __perf_event_period()
5100 event->pmu->stop(event, PERF_EF_UPDATE); in __perf_event_period()
5103 local64_set(&event->hw.period_left, 0); in __perf_event_period()
5106 event->pmu->start(event, PERF_EF_RELOAD); in __perf_event_period()
5111 static int perf_event_check_period(struct perf_event *event, u64 value) in perf_event_check_period() argument
5113 return event->pmu->check_period(event, value); in perf_event_check_period()
5116 static int perf_event_period(struct perf_event *event, u64 __user *arg) in perf_event_period() argument
5120 if (!is_sampling_event(event)) in perf_event_period()
5129 if (event->attr.freq && value > sysctl_perf_event_sample_rate) in perf_event_period()
5132 if (perf_event_check_period(event, value)) in perf_event_period()
5135 if (!event->attr.freq && (value & (1ULL << 63))) in perf_event_period()
5138 event_function_call(event, __perf_event_period, &value); in perf_event_period()
5159 static int perf_event_set_output(struct perf_event *event,
5161 static int perf_event_set_filter(struct perf_event *event, void __user *arg);
5162 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd);
5166 static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg) in _perf_ioctl() argument
5183 return _perf_event_refresh(event, arg); in _perf_ioctl()
5186 return perf_event_period(event, (u64 __user *)arg); in _perf_ioctl()
5190 u64 id = primary_event_id(event); in _perf_ioctl()
5207 ret = perf_event_set_output(event, output_event); in _perf_ioctl()
5210 ret = perf_event_set_output(event, NULL); in _perf_ioctl()
5216 return perf_event_set_filter(event, (void __user *)arg); in _perf_ioctl()
5219 return perf_event_set_bpf_prog(event, arg); in _perf_ioctl()
5225 rb = rcu_dereference(event->rb); in _perf_ioctl()
5236 return perf_event_query_prog_array(event, (void __user *)arg); in _perf_ioctl()
5246 return perf_event_modify_attr(event, &new_attr); in _perf_ioctl()
5253 perf_event_for_each(event, func); in _perf_ioctl()
5255 perf_event_for_each_child(event, func); in _perf_ioctl()
5262 struct perf_event *event = file->private_data; in perf_ioctl() local
5267 ret = security_perf_event_write(event); in perf_ioctl()
5271 ctx = perf_event_ctx_lock(event); in perf_ioctl()
5272 ret = _perf_ioctl(event, cmd, arg); in perf_ioctl()
5273 perf_event_ctx_unlock(event, ctx); in perf_ioctl()
5303 struct perf_event *event; in perf_event_task_enable() local
5306 list_for_each_entry(event, ¤t->perf_event_list, owner_entry) { in perf_event_task_enable()
5307 ctx = perf_event_ctx_lock(event); in perf_event_task_enable()
5308 perf_event_for_each_child(event, _perf_event_enable); in perf_event_task_enable()
5309 perf_event_ctx_unlock(event, ctx); in perf_event_task_enable()
5319 struct perf_event *event; in perf_event_task_disable() local
5322 list_for_each_entry(event, ¤t->perf_event_list, owner_entry) { in perf_event_task_disable()
5323 ctx = perf_event_ctx_lock(event); in perf_event_task_disable()
5324 perf_event_for_each_child(event, _perf_event_disable); in perf_event_task_disable()
5325 perf_event_ctx_unlock(event, ctx); in perf_event_task_disable()
5332 static int perf_event_index(struct perf_event *event) in perf_event_index() argument
5334 if (event->hw.state & PERF_HES_STOPPED) in perf_event_index()
5337 if (event->state != PERF_EVENT_STATE_ACTIVE) in perf_event_index()
5340 return event->pmu->event_idx(event); in perf_event_index()
5343 static void calc_timer_values(struct perf_event *event, in calc_timer_values() argument
5351 ctx_time = event->shadow_ctx_time + *now; in calc_timer_values()
5352 __perf_update_times(event, ctx_time, enabled, running); in calc_timer_values()
5355 static void perf_event_init_userpage(struct perf_event *event) in perf_event_init_userpage() argument
5361 rb = rcu_dereference(event->rb); in perf_event_init_userpage()
5378 struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now) in arch_perf_update_userpage() argument
5387 void perf_event_update_userpage(struct perf_event *event) in perf_event_update_userpage() argument
5394 rb = rcu_dereference(event->rb); in perf_event_update_userpage()
5407 calc_timer_values(event, &now, &enabled, &running); in perf_event_update_userpage()
5417 userpg->index = perf_event_index(event); in perf_event_update_userpage()
5418 userpg->offset = perf_event_count(event); in perf_event_update_userpage()
5420 userpg->offset -= local64_read(&event->hw.prev_count); in perf_event_update_userpage()
5423 atomic64_read(&event->child_total_time_enabled); in perf_event_update_userpage()
5426 atomic64_read(&event->child_total_time_running); in perf_event_update_userpage()
5428 arch_perf_update_userpage(event, userpg, now); in perf_event_update_userpage()
5440 struct perf_event *event = vmf->vma->vm_file->private_data; in perf_mmap_fault() local
5451 rb = rcu_dereference(event->rb); in perf_mmap_fault()
5473 static void ring_buffer_attach(struct perf_event *event, in ring_buffer_attach() argument
5479 if (event->rb) { in ring_buffer_attach()
5484 WARN_ON_ONCE(event->rcu_pending); in ring_buffer_attach()
5486 old_rb = event->rb; in ring_buffer_attach()
5488 list_del_rcu(&event->rb_entry); in ring_buffer_attach()
5491 event->rcu_batches = get_state_synchronize_rcu(); in ring_buffer_attach()
5492 event->rcu_pending = 1; in ring_buffer_attach()
5496 if (event->rcu_pending) { in ring_buffer_attach()
5497 cond_synchronize_rcu(event->rcu_batches); in ring_buffer_attach()
5498 event->rcu_pending = 0; in ring_buffer_attach()
5502 list_add_rcu(&event->rb_entry, &rb->event_list); in ring_buffer_attach()
5516 if (has_aux(event)) in ring_buffer_attach()
5517 perf_event_stop(event, 0); in ring_buffer_attach()
5519 rcu_assign_pointer(event->rb, rb); in ring_buffer_attach()
5528 wake_up_all(&event->waitq); in ring_buffer_attach()
5532 static void ring_buffer_wakeup(struct perf_event *event) in ring_buffer_wakeup() argument
5537 rb = rcu_dereference(event->rb); in ring_buffer_wakeup()
5539 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) in ring_buffer_wakeup()
5540 wake_up_all(&event->waitq); in ring_buffer_wakeup()
5545 struct ring_buffer *ring_buffer_get(struct perf_event *event) in ring_buffer_get() argument
5550 rb = rcu_dereference(event->rb); in ring_buffer_get()
5572 struct perf_event *event = vma->vm_file->private_data; in perf_mmap_open() local
5574 atomic_inc(&event->mmap_count); in perf_mmap_open()
5575 atomic_inc(&event->rb->mmap_count); in perf_mmap_open()
5578 atomic_inc(&event->rb->aux_mmap_count); in perf_mmap_open()
5580 if (event->pmu->event_mapped) in perf_mmap_open()
5581 event->pmu->event_mapped(event, vma->vm_mm); in perf_mmap_open()
5584 static void perf_pmu_output_stop(struct perf_event *event);
5596 struct perf_event *event = vma->vm_file->private_data; in perf_mmap_close() local
5598 struct ring_buffer *rb = ring_buffer_get(event); in perf_mmap_close()
5603 if (event->pmu->event_unmapped) in perf_mmap_close()
5604 event->pmu->event_unmapped(event, vma->vm_mm); in perf_mmap_close()
5612 atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) { in perf_mmap_close()
5619 perf_pmu_output_stop(event); in perf_mmap_close()
5629 mutex_unlock(&event->mmap_mutex); in perf_mmap_close()
5634 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) in perf_mmap_close()
5637 ring_buffer_attach(event, NULL); in perf_mmap_close()
5638 mutex_unlock(&event->mmap_mutex); in perf_mmap_close()
5651 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) { in perf_mmap_close()
5652 if (!atomic_long_inc_not_zero(&event->refcount)) { in perf_mmap_close()
5661 mutex_lock(&event->mmap_mutex); in perf_mmap_close()
5672 if (event->rb == rb) in perf_mmap_close()
5673 ring_buffer_attach(event, NULL); in perf_mmap_close()
5675 mutex_unlock(&event->mmap_mutex); in perf_mmap_close()
5676 put_event(event); in perf_mmap_close()
5713 struct perf_event *event = file->private_data; in perf_mmap() local
5728 if (event->cpu == -1 && event->attr.inherit) in perf_mmap()
5734 ret = security_perf_event_read(event); in perf_mmap()
5750 if (!event->rb) in perf_mmap()
5755 mutex_lock(&event->mmap_mutex); in perf_mmap()
5758 rb = event->rb; in perf_mmap()
5810 WARN_ON_ONCE(event->ctx->parent_ctx); in perf_mmap()
5812 mutex_lock(&event->mmap_mutex); in perf_mmap()
5813 if (event->rb) { in perf_mmap()
5814 if (event->rb->nr_pages != nr_pages) { in perf_mmap()
5819 if (!atomic_inc_not_zero(&event->rb->mmap_count)) { in perf_mmap()
5825 mutex_unlock(&event->mmap_mutex); in perf_mmap()
5869 WARN_ON(!rb && event->rb); in perf_mmap()
5876 event->attr.watermark ? event->attr.wakeup_watermark : 0, in perf_mmap()
5877 event->cpu, flags); in perf_mmap()
5888 ring_buffer_attach(event, rb); in perf_mmap()
5890 perf_event_init_userpage(event); in perf_mmap()
5891 perf_event_update_userpage(event); in perf_mmap()
5893 ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages, in perf_mmap()
5894 event->attr.aux_watermark, flags); in perf_mmap()
5904 atomic_inc(&event->mmap_count); in perf_mmap()
5909 mutex_unlock(&event->mmap_mutex); in perf_mmap()
5918 if (event->pmu->event_mapped) in perf_mmap()
5919 event->pmu->event_mapped(event, vma->vm_mm); in perf_mmap()
5927 struct perf_event *event = filp->private_data; in perf_fasync() local
5931 retval = fasync_helper(fd, filp, on, &event->fasync); in perf_fasync()
5958 static inline struct fasync_struct **perf_event_fasync(struct perf_event *event) in perf_event_fasync() argument
5961 if (event->parent) in perf_event_fasync()
5962 event = event->parent; in perf_event_fasync()
5963 return &event->fasync; in perf_event_fasync()
5966 void perf_event_wakeup(struct perf_event *event) in perf_event_wakeup() argument
5968 ring_buffer_wakeup(event); in perf_event_wakeup()
5970 if (event->pending_kill) { in perf_event_wakeup()
5971 kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill); in perf_event_wakeup()
5972 event->pending_kill = 0; in perf_event_wakeup()
5976 static void perf_pending_event_disable(struct perf_event *event) in perf_pending_event_disable() argument
5978 int cpu = READ_ONCE(event->pending_disable); in perf_pending_event_disable()
5984 WRITE_ONCE(event->pending_disable, -1); in perf_pending_event_disable()
5985 perf_event_disable_local(event); in perf_pending_event_disable()
6009 irq_work_queue_on(&event->pending, cpu); in perf_pending_event_disable()
6014 struct perf_event *event = container_of(entry, struct perf_event, pending); in perf_pending_event() local
6023 perf_pending_event_disable(event); in perf_pending_event()
6025 if (event->pending_wakeup) { in perf_pending_event()
6026 event->pending_wakeup = 0; in perf_pending_event()
6027 perf_event_wakeup(event); in perf_pending_event()
6195 struct perf_event *event) in __perf_event_header__init_id() argument
6197 u64 sample_type = event->attr.sample_type; in __perf_event_header__init_id()
6200 header->size += event->id_header_size; in __perf_event_header__init_id()
6204 data->tid_entry.pid = perf_event_pid(event, current); in __perf_event_header__init_id()
6205 data->tid_entry.tid = perf_event_tid(event, current); in __perf_event_header__init_id()
6209 data->time = perf_event_clock(event); in __perf_event_header__init_id()
6212 data->id = primary_event_id(event); in __perf_event_header__init_id()
6215 data->stream_id = event->id; in __perf_event_header__init_id()
6225 struct perf_event *event) in perf_event_header__init_id() argument
6227 if (event->attr.sample_id_all) in perf_event_header__init_id()
6228 __perf_event_header__init_id(header, data, event); in perf_event_header__init_id()
6255 void perf_event__output_id_sample(struct perf_event *event, in perf_event__output_id_sample() argument
6259 if (event->attr.sample_id_all) in perf_event__output_id_sample()
6264 struct perf_event *event, in perf_output_read_one() argument
6267 u64 read_format = event->attr.read_format; in perf_output_read_one()
6271 values[n++] = perf_event_count(event); in perf_output_read_one()
6274 atomic64_read(&event->child_total_time_enabled); in perf_output_read_one()
6278 atomic64_read(&event->child_total_time_running); in perf_output_read_one()
6281 values[n++] = primary_event_id(event); in perf_output_read_one()
6287 struct perf_event *event, in perf_output_read_group() argument
6290 struct perf_event *leader = event->group_leader, *sub; in perf_output_read_group()
6291 u64 read_format = event->attr.read_format; in perf_output_read_group()
6303 if ((leader != event) && in perf_output_read_group()
6316 if ((sub != event) && in perf_output_read_group()
6339 struct perf_event *event) in perf_output_read() argument
6342 u64 read_format = event->attr.read_format; in perf_output_read()
6354 calc_timer_values(event, &now, &enabled, &running); in perf_output_read()
6356 if (event->attr.read_format & PERF_FORMAT_GROUP) in perf_output_read()
6357 perf_output_read_group(handle, event, enabled, running); in perf_output_read()
6359 perf_output_read_one(handle, event, enabled, running); in perf_output_read()
6365 struct perf_event *event) in perf_output_sample() argument
6399 perf_output_read(handle, event); in perf_output_sample()
6470 u64 mask = event->attr.sample_regs_user; in perf_output_sample()
6501 u64 mask = event->attr.sample_regs_intr; in perf_output_sample()
6512 if (!event->attr.watermark) { in perf_output_sample()
6513 int wakeup_events = event->attr.wakeup_events; in perf_output_sample()
6562 perf_callchain(struct perf_event *event, struct pt_regs *regs) in perf_callchain() argument
6564 bool kernel = !event->attr.exclude_callchain_kernel; in perf_callchain()
6565 bool user = !event->attr.exclude_callchain_user; in perf_callchain()
6567 bool crosstask = event->ctx->task && event->ctx->task != current; in perf_callchain()
6568 const u32 max_stack = event->attr.sample_max_stack; in perf_callchain()
6581 struct perf_event *event, in perf_prepare_sample() argument
6584 u64 sample_type = event->attr.sample_type; in perf_prepare_sample()
6587 header->size = sizeof(*header) + event->header_size; in perf_prepare_sample()
6592 __perf_event_header__init_id(header, data, event); in perf_prepare_sample()
6601 data->callchain = perf_callchain(event, regs); in perf_prepare_sample()
6651 u64 mask = event->attr.sample_regs_user; in perf_prepare_sample()
6665 u16 stack_size = event->attr.sample_stack_user; in perf_prepare_sample()
6690 u64 mask = event->attr.sample_regs_intr; in perf_prepare_sample()
6703 __perf_event_output(struct perf_event *event, in __perf_event_output() argument
6717 perf_prepare_sample(&header, data, event, regs); in __perf_event_output()
6719 err = output_begin(&handle, event, header.size); in __perf_event_output()
6723 perf_output_sample(&handle, &header, data, event); in __perf_event_output()
6733 perf_event_output_forward(struct perf_event *event, in perf_event_output_forward() argument
6737 __perf_event_output(event, data, regs, perf_output_begin_forward); in perf_event_output_forward()
6741 perf_event_output_backward(struct perf_event *event, in perf_event_output_backward() argument
6745 __perf_event_output(event, data, regs, perf_output_begin_backward); in perf_event_output_backward()
6749 perf_event_output(struct perf_event *event, in perf_event_output() argument
6753 return __perf_event_output(event, data, regs, perf_output_begin); in perf_event_output()
6768 perf_event_read_event(struct perf_event *event, in perf_event_read_event() argument
6777 .size = sizeof(read_event) + event->read_size, in perf_event_read_event()
6779 .pid = perf_event_pid(event, task), in perf_event_read_event()
6780 .tid = perf_event_tid(event, task), in perf_event_read_event()
6784 perf_event_header__init_id(&read_event.header, &sample, event); in perf_event_read_event()
6785 ret = perf_output_begin(&handle, event, read_event.header.size); in perf_event_read_event()
6790 perf_output_read(&handle, event); in perf_event_read_event()
6791 perf_event__output_id_sample(event, &handle, &sample); in perf_event_read_event()
6796 typedef void (perf_iterate_f)(struct perf_event *event, void *data);
6803 struct perf_event *event; in perf_iterate_ctx() local
6805 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { in perf_iterate_ctx()
6807 if (event->state < PERF_EVENT_STATE_INACTIVE) in perf_iterate_ctx()
6809 if (!event_filter_match(event)) in perf_iterate_ctx()
6813 output(event, data); in perf_iterate_ctx()
6820 struct perf_event *event; in perf_iterate_sb_cpu() local
6822 list_for_each_entry_rcu(event, &pel->list, sb_list) { in perf_iterate_sb_cpu()
6828 if (!smp_load_acquire(&event->ctx)) in perf_iterate_sb_cpu()
6831 if (event->state < PERF_EVENT_STATE_INACTIVE) in perf_iterate_sb_cpu()
6833 if (!event_filter_match(event)) in perf_iterate_sb_cpu()
6835 output(event, data); in perf_iterate_sb_cpu()
6881 static void perf_event_addr_filters_exec(struct perf_event *event, void *data) in perf_event_addr_filters_exec() argument
6883 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in perf_event_addr_filters_exec()
6888 if (!has_addr_filter(event)) in perf_event_addr_filters_exec()
6894 event->addr_filter_ranges[count].start = 0; in perf_event_addr_filters_exec()
6895 event->addr_filter_ranges[count].size = 0; in perf_event_addr_filters_exec()
6903 event->addr_filters_gen++; in perf_event_addr_filters_exec()
6907 perf_event_stop(event, 1); in perf_event_addr_filters_exec()
6934 static void __perf_event_output_stop(struct perf_event *event, void *data) in __perf_event_output_stop() argument
6936 struct perf_event *parent = event->parent; in __perf_event_output_stop()
6940 .event = event, in __perf_event_output_stop()
6943 if (!has_aux(event)) in __perf_event_output_stop()
6947 parent = event; in __perf_event_output_stop()
6965 struct perf_event *event = info; in __perf_pmu_output_stop() local
6966 struct pmu *pmu = event->ctx->pmu; in __perf_pmu_output_stop()
6969 .rb = event->rb, in __perf_pmu_output_stop()
6982 static void perf_pmu_output_stop(struct perf_event *event) in perf_pmu_output_stop() argument
6989 list_for_each_entry_rcu(iter, &event->rb->event_list, rb_entry) { in perf_pmu_output_stop()
7003 err = cpu_function_call(cpu, __perf_pmu_output_stop, event); in perf_pmu_output_stop()
7033 static int perf_event_task_match(struct perf_event *event) in perf_event_task_match() argument
7035 return event->attr.comm || event->attr.mmap || in perf_event_task_match()
7036 event->attr.mmap2 || event->attr.mmap_data || in perf_event_task_match()
7037 event->attr.task; in perf_event_task_match()
7040 static void perf_event_task_output(struct perf_event *event, in perf_event_task_output() argument
7049 if (!perf_event_task_match(event)) in perf_event_task_output()
7052 perf_event_header__init_id(&task_event->event_id.header, &sample, event); in perf_event_task_output()
7054 ret = perf_output_begin(&handle, event, in perf_event_task_output()
7059 task_event->event_id.pid = perf_event_pid(event, task); in perf_event_task_output()
7060 task_event->event_id.ppid = perf_event_pid(event, current); in perf_event_task_output()
7062 task_event->event_id.tid = perf_event_tid(event, task); in perf_event_task_output()
7063 task_event->event_id.ptid = perf_event_tid(event, current); in perf_event_task_output()
7065 task_event->event_id.time = perf_event_clock(event); in perf_event_task_output()
7069 perf_event__output_id_sample(event, &handle, &sample); in perf_event_task_output()
7132 static int perf_event_comm_match(struct perf_event *event) in perf_event_comm_match() argument
7134 return event->attr.comm; in perf_event_comm_match()
7137 static void perf_event_comm_output(struct perf_event *event, in perf_event_comm_output() argument
7146 if (!perf_event_comm_match(event)) in perf_event_comm_output()
7149 perf_event_header__init_id(&comm_event->event_id.header, &sample, event); in perf_event_comm_output()
7150 ret = perf_output_begin(&handle, event, in perf_event_comm_output()
7156 comm_event->event_id.pid = perf_event_pid(event, comm_event->task); in perf_event_comm_output()
7157 comm_event->event_id.tid = perf_event_tid(event, comm_event->task); in perf_event_comm_output()
7163 perf_event__output_id_sample(event, &handle, &sample); in perf_event_comm_output()
7231 static int perf_event_namespaces_match(struct perf_event *event) in perf_event_namespaces_match() argument
7233 return event->attr.namespaces; in perf_event_namespaces_match()
7236 static void perf_event_namespaces_output(struct perf_event *event, in perf_event_namespaces_output() argument
7245 if (!perf_event_namespaces_match(event)) in perf_event_namespaces_output()
7249 &sample, event); in perf_event_namespaces_output()
7250 ret = perf_output_begin(&handle, event, in perf_event_namespaces_output()
7255 namespaces_event->event_id.pid = perf_event_pid(event, in perf_event_namespaces_output()
7257 namespaces_event->event_id.tid = perf_event_tid(event, in perf_event_namespaces_output()
7262 perf_event__output_id_sample(event, &handle, &sample); in perf_event_namespaces_output()
7369 static int perf_event_mmap_match(struct perf_event *event, in perf_event_mmap_match() argument
7376 return (!executable && event->attr.mmap_data) || in perf_event_mmap_match()
7377 (executable && (event->attr.mmap || event->attr.mmap2)); in perf_event_mmap_match()
7380 static void perf_event_mmap_output(struct perf_event *event, in perf_event_mmap_output() argument
7390 if (!perf_event_mmap_match(event, data)) in perf_event_mmap_output()
7393 if (event->attr.mmap2) { in perf_event_mmap_output()
7403 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event); in perf_event_mmap_output()
7404 ret = perf_output_begin(&handle, event, in perf_event_mmap_output()
7409 mmap_event->event_id.pid = perf_event_pid(event, current); in perf_event_mmap_output()
7410 mmap_event->event_id.tid = perf_event_tid(event, current); in perf_event_mmap_output()
7414 if (event->attr.mmap2) { in perf_event_mmap_output()
7426 perf_event__output_id_sample(event, &handle, &sample); in perf_event_mmap_output()
7599 static void __perf_addr_filters_adjust(struct perf_event *event, void *data) in __perf_addr_filters_adjust() argument
7601 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in __perf_addr_filters_adjust()
7607 if (!has_addr_filter(event)) in __perf_addr_filters_adjust()
7616 &event->addr_filter_ranges[count])) in __perf_addr_filters_adjust()
7623 event->addr_filters_gen++; in __perf_addr_filters_adjust()
7627 perf_event_stop(event, 1); in __perf_addr_filters_adjust()
7691 void perf_event_aux_event(struct perf_event *event, unsigned long head, in perf_event_aux_event() argument
7713 perf_event_header__init_id(&rec.header, &sample, event); in perf_event_aux_event()
7714 ret = perf_output_begin(&handle, event, rec.header.size); in perf_event_aux_event()
7720 perf_event__output_id_sample(event, &handle, &sample); in perf_event_aux_event()
7728 void perf_log_lost_samples(struct perf_event *event, u64 lost) in perf_log_lost_samples() argument
7746 perf_event_header__init_id(&lost_samples_event.header, &sample, event); in perf_log_lost_samples()
7748 ret = perf_output_begin(&handle, event, in perf_log_lost_samples()
7754 perf_event__output_id_sample(event, &handle, &sample); in perf_log_lost_samples()
7773 static int perf_event_switch_match(struct perf_event *event) in perf_event_switch_match() argument
7775 return event->attr.context_switch; in perf_event_switch_match()
7778 static void perf_event_switch_output(struct perf_event *event, void *data) in perf_event_switch_output() argument
7785 if (!perf_event_switch_match(event)) in perf_event_switch_output()
7789 if (event->ctx->task) { in perf_event_switch_output()
7796 perf_event_pid(event, se->next_prev); in perf_event_switch_output()
7798 perf_event_tid(event, se->next_prev); in perf_event_switch_output()
7801 perf_event_header__init_id(&se->event_id.header, &sample, event); in perf_event_switch_output()
7803 ret = perf_output_begin(&handle, event, se->event_id.header.size); in perf_event_switch_output()
7807 if (event->ctx->task) in perf_event_switch_output()
7812 perf_event__output_id_sample(event, &handle, &sample); in perf_event_switch_output()
7851 static void perf_log_throttle(struct perf_event *event, int enable) in perf_log_throttle() argument
7868 .time = perf_event_clock(event), in perf_log_throttle()
7869 .id = primary_event_id(event), in perf_log_throttle()
7870 .stream_id = event->id, in perf_log_throttle()
7876 perf_event_header__init_id(&throttle_event.header, &sample, event); in perf_log_throttle()
7878 ret = perf_output_begin(&handle, event, in perf_log_throttle()
7884 perf_event__output_id_sample(event, &handle, &sample); in perf_log_throttle()
7904 static int perf_event_ksymbol_match(struct perf_event *event) in perf_event_ksymbol_match() argument
7906 return event->attr.ksymbol; in perf_event_ksymbol_match()
7909 static void perf_event_ksymbol_output(struct perf_event *event, void *data) in perf_event_ksymbol_output() argument
7916 if (!perf_event_ksymbol_match(event)) in perf_event_ksymbol_output()
7920 &sample, event); in perf_event_ksymbol_output()
7921 ret = perf_output_begin(&handle, event, in perf_event_ksymbol_output()
7928 perf_event__output_id_sample(event, &handle, &sample); in perf_event_ksymbol_output()
7994 static int perf_event_bpf_match(struct perf_event *event) in perf_event_bpf_match() argument
7996 return event->attr.bpf_event; in perf_event_bpf_match()
7999 static void perf_event_bpf_output(struct perf_event *event, void *data) in perf_event_bpf_output() argument
8006 if (!perf_event_bpf_match(event)) in perf_event_bpf_output()
8010 &sample, event); in perf_event_bpf_output()
8011 ret = perf_output_begin(&handle, event, in perf_event_bpf_output()
8017 perf_event__output_id_sample(event, &handle, &sample); in perf_event_bpf_output()
8089 void perf_event_itrace_started(struct perf_event *event) in perf_event_itrace_started() argument
8091 event->attach_state |= PERF_ATTACH_ITRACE; in perf_event_itrace_started()
8094 static void perf_log_itrace_start(struct perf_event *event) in perf_log_itrace_start() argument
8105 if (event->parent) in perf_log_itrace_start()
8106 event = event->parent; in perf_log_itrace_start()
8108 if (!(event->pmu->capabilities & PERF_PMU_CAP_ITRACE) || in perf_log_itrace_start()
8109 event->attach_state & PERF_ATTACH_ITRACE) in perf_log_itrace_start()
8115 rec.pid = perf_event_pid(event, current); in perf_log_itrace_start()
8116 rec.tid = perf_event_tid(event, current); in perf_log_itrace_start()
8118 perf_event_header__init_id(&rec.header, &sample, event); in perf_log_itrace_start()
8119 ret = perf_output_begin(&handle, event, rec.header.size); in perf_log_itrace_start()
8125 perf_event__output_id_sample(event, &handle, &sample); in perf_log_itrace_start()
8131 __perf_event_account_interrupt(struct perf_event *event, int throttle) in __perf_event_account_interrupt() argument
8133 struct hw_perf_event *hwc = &event->hw; in __perf_event_account_interrupt()
8148 perf_log_throttle(event, 0); in __perf_event_account_interrupt()
8153 if (event->attr.freq) { in __perf_event_account_interrupt()
8160 perf_adjust_period(event, delta, hwc->last_period, true); in __perf_event_account_interrupt()
8166 int perf_event_account_interrupt(struct perf_event *event) in perf_event_account_interrupt() argument
8168 return __perf_event_account_interrupt(event, 1); in perf_event_account_interrupt()
8175 static int __perf_event_overflow(struct perf_event *event, in __perf_event_overflow() argument
8179 int events = atomic_read(&event->event_limit); in __perf_event_overflow()
8186 if (unlikely(!is_sampling_event(event))) in __perf_event_overflow()
8189 ret = __perf_event_account_interrupt(event, throttle); in __perf_event_overflow()
8196 event->pending_kill = POLL_IN; in __perf_event_overflow()
8197 if (events && atomic_dec_and_test(&event->event_limit)) { in __perf_event_overflow()
8199 event->pending_kill = POLL_HUP; in __perf_event_overflow()
8201 perf_event_disable_inatomic(event); in __perf_event_overflow()
8204 READ_ONCE(event->overflow_handler)(event, data, regs); in __perf_event_overflow() local
8206 if (*perf_event_fasync(event) && event->pending_kill) { in __perf_event_overflow()
8207 event->pending_wakeup = 1; in __perf_event_overflow()
8208 irq_work_queue(&event->pending); in __perf_event_overflow()
8214 int perf_event_overflow(struct perf_event *event, in perf_event_overflow() argument
8218 return __perf_event_overflow(event, 1, data, regs); in perf_event_overflow()
8243 u64 perf_swevent_set_period(struct perf_event *event) in perf_swevent_set_period() argument
8245 struct hw_perf_event *hwc = &event->hw; in perf_swevent_set_period()
8266 static void perf_swevent_overflow(struct perf_event *event, u64 overflow, in perf_swevent_overflow() argument
8270 struct hw_perf_event *hwc = &event->hw; in perf_swevent_overflow()
8274 overflow = perf_swevent_set_period(event); in perf_swevent_overflow()
8280 if (__perf_event_overflow(event, throttle, in perf_swevent_overflow()
8292 static void perf_swevent_event(struct perf_event *event, u64 nr, in perf_swevent_event() argument
8296 struct hw_perf_event *hwc = &event->hw; in perf_swevent_event()
8298 local64_add(nr, &event->count); in perf_swevent_event()
8303 if (!is_sampling_event(event)) in perf_swevent_event()
8306 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) { in perf_swevent_event()
8308 return perf_swevent_overflow(event, 1, data, regs); in perf_swevent_event()
8310 data->period = event->hw.last_period; in perf_swevent_event()
8312 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) in perf_swevent_event()
8313 return perf_swevent_overflow(event, 1, data, regs); in perf_swevent_event()
8318 perf_swevent_overflow(event, 0, data, regs); in perf_swevent_event()
8321 static int perf_exclude_event(struct perf_event *event, in perf_exclude_event() argument
8324 if (event->hw.state & PERF_HES_STOPPED) in perf_exclude_event()
8328 if (event->attr.exclude_user && user_mode(regs)) in perf_exclude_event()
8331 if (event->attr.exclude_kernel && !user_mode(regs)) in perf_exclude_event()
8338 static int perf_swevent_match(struct perf_event *event, in perf_swevent_match() argument
8344 if (event->attr.type != type) in perf_swevent_match()
8347 if (event->attr.config != event_id) in perf_swevent_match()
8350 if (perf_exclude_event(event, regs)) in perf_swevent_match()
8386 find_swevent_head(struct swevent_htable *swhash, struct perf_event *event) in find_swevent_head() argument
8389 u32 event_id = event->attr.config; in find_swevent_head()
8390 u64 type = event->attr.type; in find_swevent_head()
8398 lockdep_is_held(&event->ctx->lock)); in find_swevent_head()
8411 struct perf_event *event; in do_perf_sw_event() local
8419 hlist_for_each_entry_rcu(event, head, hlist_entry) { in do_perf_sw_event()
8420 if (perf_swevent_match(event, type, event_id, data, regs)) in do_perf_sw_event()
8421 perf_swevent_event(event, nr, data, regs); in do_perf_sw_event()
8471 static void perf_swevent_read(struct perf_event *event) in perf_swevent_read() argument
8475 static int perf_swevent_add(struct perf_event *event, int flags) in perf_swevent_add() argument
8478 struct hw_perf_event *hwc = &event->hw; in perf_swevent_add()
8481 if (is_sampling_event(event)) { in perf_swevent_add()
8483 perf_swevent_set_period(event); in perf_swevent_add()
8488 head = find_swevent_head(swhash, event); in perf_swevent_add()
8492 hlist_add_head_rcu(&event->hlist_entry, head); in perf_swevent_add()
8493 perf_event_update_userpage(event); in perf_swevent_add()
8498 static void perf_swevent_del(struct perf_event *event, int flags) in perf_swevent_del() argument
8500 hlist_del_rcu(&event->hlist_entry); in perf_swevent_del()
8503 static void perf_swevent_start(struct perf_event *event, int flags) in perf_swevent_start() argument
8505 event->hw.state = 0; in perf_swevent_start()
8508 static void perf_swevent_stop(struct perf_event *event, int flags) in perf_swevent_stop() argument
8510 event->hw.state = PERF_HES_STOPPED; in perf_swevent_stop()
8602 static void sw_perf_event_destroy(struct perf_event *event) in sw_perf_event_destroy() argument
8604 u64 event_id = event->attr.config; in sw_perf_event_destroy()
8606 WARN_ON(event->parent); in sw_perf_event_destroy()
8612 static int perf_swevent_init(struct perf_event *event) in perf_swevent_init() argument
8614 u64 event_id = event->attr.config; in perf_swevent_init()
8616 if (event->attr.type != PERF_TYPE_SOFTWARE) in perf_swevent_init()
8622 if (has_branch_stack(event)) in perf_swevent_init()
8637 if (!event->parent) { in perf_swevent_init()
8645 event->destroy = sw_perf_event_destroy; in perf_swevent_init()
8666 static int perf_tp_filter_match(struct perf_event *event, in perf_tp_filter_match() argument
8672 if (event->parent) in perf_tp_filter_match()
8673 event = event->parent; in perf_tp_filter_match()
8675 if (likely(!event->filter) || filter_match_preds(event->filter, record)) in perf_tp_filter_match()
8680 static int perf_tp_event_match(struct perf_event *event, in perf_tp_event_match() argument
8684 if (event->hw.state & PERF_HES_STOPPED) in perf_tp_event_match()
8689 if (event->attr.exclude_kernel && !user_mode(regs)) in perf_tp_event_match()
8692 if (!perf_tp_filter_match(event, data)) in perf_tp_event_match()
8710 perf_tp_event(call->event.type, count, raw_data, size, regs, head, in perf_trace_run_bpf_submit()
8720 struct perf_event *event; in perf_tp_event() local
8734 hlist_for_each_entry_rcu(event, head, hlist_entry) { in perf_tp_event()
8735 if (perf_tp_event_match(event, &data, regs)) in perf_tp_event()
8736 perf_swevent_event(event, count, &data, regs); in perf_tp_event()
8752 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { in perf_tp_event()
8753 if (event->cpu != smp_processor_id()) in perf_tp_event()
8755 if (event->attr.type != PERF_TYPE_TRACEPOINT) in perf_tp_event()
8757 if (event->attr.config != entry->type) in perf_tp_event()
8759 if (perf_tp_event_match(event, &data, regs)) in perf_tp_event()
8760 perf_swevent_event(event, count, &data, regs); in perf_tp_event()
8770 static void tp_perf_event_destroy(struct perf_event *event) in tp_perf_event_destroy() argument
8772 perf_trace_destroy(event); in tp_perf_event_destroy()
8775 static int perf_tp_event_init(struct perf_event *event) in perf_tp_event_init() argument
8779 if (event->attr.type != PERF_TYPE_TRACEPOINT) in perf_tp_event_init()
8785 if (has_branch_stack(event)) in perf_tp_event_init()
8788 err = perf_trace_init(event); in perf_tp_event_init()
8792 event->destroy = tp_perf_event_destroy; in perf_tp_event_init()
8848 static int perf_kprobe_event_init(struct perf_event *event);
8860 static int perf_kprobe_event_init(struct perf_event *event) in perf_kprobe_event_init() argument
8865 if (event->attr.type != perf_kprobe.type) in perf_kprobe_event_init()
8874 if (has_branch_stack(event)) in perf_kprobe_event_init()
8877 is_retprobe = event->attr.config & PERF_PROBE_CONFIG_IS_RETPROBE; in perf_kprobe_event_init()
8878 err = perf_kprobe_init(event, is_retprobe); in perf_kprobe_event_init()
8882 event->destroy = perf_kprobe_destroy; in perf_kprobe_event_init()
8907 static int perf_uprobe_event_init(struct perf_event *event);
8919 static int perf_uprobe_event_init(struct perf_event *event) in perf_uprobe_event_init() argument
8925 if (event->attr.type != perf_uprobe.type) in perf_uprobe_event_init()
8934 if (has_branch_stack(event)) in perf_uprobe_event_init()
8937 is_retprobe = event->attr.config & PERF_PROBE_CONFIG_IS_RETPROBE; in perf_uprobe_event_init()
8938 ref_ctr_offset = event->attr.config >> PERF_UPROBE_REF_CTR_OFFSET_SHIFT; in perf_uprobe_event_init()
8939 err = perf_uprobe_init(event, ref_ctr_offset, is_retprobe); in perf_uprobe_event_init()
8943 event->destroy = perf_uprobe_destroy; in perf_uprobe_event_init()
8960 static void perf_event_free_filter(struct perf_event *event) in perf_event_free_filter() argument
8962 ftrace_profile_free_filter(event); in perf_event_free_filter()
8966 static void bpf_overflow_handler(struct perf_event *event, in bpf_overflow_handler() argument
8972 .event = event, in bpf_overflow_handler()
8981 ret = BPF_PROG_RUN(event->prog, &ctx); in bpf_overflow_handler()
8989 event->orig_overflow_handler(event, data, regs); in bpf_overflow_handler()
8992 static int perf_event_set_bpf_handler(struct perf_event *event, u32 prog_fd) in perf_event_set_bpf_handler() argument
8996 if (event->overflow_handler_context) in perf_event_set_bpf_handler()
9000 if (event->prog) in perf_event_set_bpf_handler()
9007 event->prog = prog; in perf_event_set_bpf_handler()
9008 event->orig_overflow_handler = READ_ONCE(event->overflow_handler); in perf_event_set_bpf_handler()
9009 WRITE_ONCE(event->overflow_handler, bpf_overflow_handler); in perf_event_set_bpf_handler()
9013 static void perf_event_free_bpf_handler(struct perf_event *event) in perf_event_free_bpf_handler() argument
9015 struct bpf_prog *prog = event->prog; in perf_event_free_bpf_handler()
9020 WRITE_ONCE(event->overflow_handler, event->orig_overflow_handler); in perf_event_free_bpf_handler()
9021 event->prog = NULL; in perf_event_free_bpf_handler()
9025 static int perf_event_set_bpf_handler(struct perf_event *event, u32 prog_fd) in perf_event_set_bpf_handler() argument
9029 static void perf_event_free_bpf_handler(struct perf_event *event) in perf_event_free_bpf_handler() argument
9038 static inline bool perf_event_is_tracing(struct perf_event *event) in perf_event_is_tracing() argument
9040 if (event->pmu == &perf_tracepoint) in perf_event_is_tracing()
9043 if (event->pmu == &perf_kprobe) in perf_event_is_tracing()
9047 if (event->pmu == &perf_uprobe) in perf_event_is_tracing()
9053 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd) in perf_event_set_bpf_prog() argument
9059 if (!perf_event_is_tracing(event)) in perf_event_set_bpf_prog()
9060 return perf_event_set_bpf_handler(event, prog_fd); in perf_event_set_bpf_prog()
9062 is_kprobe = event->tp_event->flags & TRACE_EVENT_FL_UKPROBE; in perf_event_set_bpf_prog()
9063 is_tracepoint = event->tp_event->flags & TRACE_EVENT_FL_TRACEPOINT; in perf_event_set_bpf_prog()
9064 is_syscall_tp = is_syscall_trace_event(event->tp_event); in perf_event_set_bpf_prog()
9083 !(event->tp_event->flags & TRACE_EVENT_FL_KPROBE)) { in perf_event_set_bpf_prog()
9089 int off = trace_event_get_offsets(event->tp_event); in perf_event_set_bpf_prog()
9097 ret = perf_event_attach_bpf_prog(event, prog); in perf_event_set_bpf_prog()
9103 static void perf_event_free_bpf_prog(struct perf_event *event) in perf_event_free_bpf_prog() argument
9105 if (!perf_event_is_tracing(event)) { in perf_event_free_bpf_prog()
9106 perf_event_free_bpf_handler(event); in perf_event_free_bpf_prog()
9109 perf_event_detach_bpf_prog(event); in perf_event_free_bpf_prog()
9118 static void perf_event_free_filter(struct perf_event *event) in perf_event_free_filter() argument
9122 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd) in perf_event_set_bpf_prog() argument
9127 static void perf_event_free_bpf_prog(struct perf_event *event) in perf_event_free_bpf_prog() argument
9149 perf_addr_filter_new(struct perf_event *event, struct list_head *filters) in perf_addr_filter_new() argument
9151 int node = cpu_to_node(event->cpu == -1 ? 0 : event->cpu); in perf_addr_filter_new()
9178 static void perf_addr_filters_splice(struct perf_event *event, in perf_addr_filters_splice() argument
9184 if (!has_addr_filter(event)) in perf_addr_filters_splice()
9188 if (event->parent) in perf_addr_filters_splice()
9191 raw_spin_lock_irqsave(&event->addr_filters.lock, flags); in perf_addr_filters_splice()
9193 list_splice_init(&event->addr_filters.list, &list); in perf_addr_filters_splice()
9195 list_splice(head, &event->addr_filters.list); in perf_addr_filters_splice()
9197 raw_spin_unlock_irqrestore(&event->addr_filters.lock, flags); in perf_addr_filters_splice()
9226 static void perf_event_addr_filters_apply(struct perf_event *event) in perf_event_addr_filters_apply() argument
9228 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in perf_event_addr_filters_apply()
9229 struct task_struct *task = READ_ONCE(event->ctx->task); in perf_event_addr_filters_apply()
9243 mm = get_task_mm(event->ctx->task); in perf_event_addr_filters_apply()
9257 event->addr_filter_ranges[count].start = 0; in perf_event_addr_filters_apply()
9258 event->addr_filter_ranges[count].size = 0; in perf_event_addr_filters_apply()
9260 perf_addr_filter_apply(filter, mm, &event->addr_filter_ranges[count]); in perf_event_addr_filters_apply()
9262 event->addr_filter_ranges[count].start = filter->offset; in perf_event_addr_filters_apply()
9263 event->addr_filter_ranges[count].size = filter->size; in perf_event_addr_filters_apply()
9269 event->addr_filters_gen++; in perf_event_addr_filters_apply()
9279 perf_event_stop(event, 1); in perf_event_addr_filters_apply()
9333 perf_event_parse_addr_filter(struct perf_event *event, char *fstr, in perf_event_parse_addr_filter() argument
9360 filter = perf_addr_filter_new(event, filters); in perf_event_parse_addr_filter()
9423 if (kernel && event->attr.exclude_kernel) in perf_event_parse_addr_filter()
9447 if (!event->ctx->task) in perf_event_parse_addr_filter()
9465 event->addr_filters.nr_file_filters++; in perf_event_parse_addr_filter()
9491 perf_event_set_addr_filter(struct perf_event *event, char *filter_str) in perf_event_set_addr_filter() argument
9500 lockdep_assert_held(&event->ctx->mutex); in perf_event_set_addr_filter()
9502 if (WARN_ON_ONCE(event->parent)) in perf_event_set_addr_filter()
9505 ret = perf_event_parse_addr_filter(event, filter_str, &filters); in perf_event_set_addr_filter()
9509 ret = event->pmu->addr_filters_validate(&filters); in perf_event_set_addr_filter()
9514 perf_addr_filters_splice(event, &filters); in perf_event_set_addr_filter()
9517 perf_event_for_each_child(event, perf_event_addr_filters_apply); in perf_event_set_addr_filter()
9525 event->addr_filters.nr_file_filters = 0; in perf_event_set_addr_filter()
9530 static int perf_event_set_filter(struct perf_event *event, void __user *arg) in perf_event_set_filter() argument
9540 if (perf_event_is_tracing(event)) { in perf_event_set_filter()
9541 struct perf_event_context *ctx = event->ctx; in perf_event_set_filter()
9555 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str); in perf_event_set_filter()
9559 if (has_addr_filter(event)) in perf_event_set_filter()
9560 ret = perf_event_set_addr_filter(event, filter_str); in perf_event_set_filter()
9575 struct perf_event *event; in perf_swevent_hrtimer() local
9578 event = container_of(hrtimer, struct perf_event, hw.hrtimer); in perf_swevent_hrtimer()
9580 if (event->state != PERF_EVENT_STATE_ACTIVE) in perf_swevent_hrtimer()
9583 event->pmu->read(event); in perf_swevent_hrtimer()
9585 perf_sample_data_init(&data, 0, event->hw.last_period); in perf_swevent_hrtimer()
9588 if (regs && !perf_exclude_event(event, regs)) { in perf_swevent_hrtimer()
9589 if (!(event->attr.exclude_idle && is_idle_task(current))) in perf_swevent_hrtimer()
9590 if (__perf_event_overflow(event, 1, &data, regs)) in perf_swevent_hrtimer()
9594 period = max_t(u64, 10000, event->hw.sample_period); in perf_swevent_hrtimer()
9600 static void perf_swevent_start_hrtimer(struct perf_event *event) in perf_swevent_start_hrtimer() argument
9602 struct hw_perf_event *hwc = &event->hw; in perf_swevent_start_hrtimer()
9605 if (!is_sampling_event(event)) in perf_swevent_start_hrtimer()
9621 static void perf_swevent_cancel_hrtimer(struct perf_event *event) in perf_swevent_cancel_hrtimer() argument
9623 struct hw_perf_event *hwc = &event->hw; in perf_swevent_cancel_hrtimer()
9625 if (is_sampling_event(event)) { in perf_swevent_cancel_hrtimer()
9633 static void perf_swevent_init_hrtimer(struct perf_event *event) in perf_swevent_init_hrtimer() argument
9635 struct hw_perf_event *hwc = &event->hw; in perf_swevent_init_hrtimer()
9637 if (!is_sampling_event(event)) in perf_swevent_init_hrtimer()
9647 if (event->attr.freq) { in perf_swevent_init_hrtimer()
9648 long freq = event->attr.sample_freq; in perf_swevent_init_hrtimer()
9650 event->attr.sample_period = NSEC_PER_SEC / freq; in perf_swevent_init_hrtimer()
9651 hwc->sample_period = event->attr.sample_period; in perf_swevent_init_hrtimer()
9654 event->attr.freq = 0; in perf_swevent_init_hrtimer()
9662 static void cpu_clock_event_update(struct perf_event *event) in cpu_clock_event_update() argument
9668 prev = local64_xchg(&event->hw.prev_count, now); in cpu_clock_event_update()
9669 local64_add(now - prev, &event->count); in cpu_clock_event_update()
9672 static void cpu_clock_event_start(struct perf_event *event, int flags) in cpu_clock_event_start() argument
9674 local64_set(&event->hw.prev_count, local_clock()); in cpu_clock_event_start()
9675 perf_swevent_start_hrtimer(event); in cpu_clock_event_start()
9678 static void cpu_clock_event_stop(struct perf_event *event, int flags) in cpu_clock_event_stop() argument
9680 perf_swevent_cancel_hrtimer(event); in cpu_clock_event_stop()
9681 cpu_clock_event_update(event); in cpu_clock_event_stop()
9684 static int cpu_clock_event_add(struct perf_event *event, int flags) in cpu_clock_event_add() argument
9687 cpu_clock_event_start(event, flags); in cpu_clock_event_add()
9688 perf_event_update_userpage(event); in cpu_clock_event_add()
9693 static void cpu_clock_event_del(struct perf_event *event, int flags) in cpu_clock_event_del() argument
9695 cpu_clock_event_stop(event, flags); in cpu_clock_event_del()
9698 static void cpu_clock_event_read(struct perf_event *event) in cpu_clock_event_read() argument
9700 cpu_clock_event_update(event); in cpu_clock_event_read()
9703 static int cpu_clock_event_init(struct perf_event *event) in cpu_clock_event_init() argument
9705 if (event->attr.type != PERF_TYPE_SOFTWARE) in cpu_clock_event_init()
9708 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK) in cpu_clock_event_init()
9714 if (has_branch_stack(event)) in cpu_clock_event_init()
9717 perf_swevent_init_hrtimer(event); in cpu_clock_event_init()
9739 static void task_clock_event_update(struct perf_event *event, u64 now) in task_clock_event_update() argument
9744 prev = local64_xchg(&event->hw.prev_count, now); in task_clock_event_update()
9746 local64_add(delta, &event->count); in task_clock_event_update()
9749 static void task_clock_event_start(struct perf_event *event, int flags) in task_clock_event_start() argument
9751 local64_set(&event->hw.prev_count, event->ctx->time); in task_clock_event_start()
9752 perf_swevent_start_hrtimer(event); in task_clock_event_start()
9755 static void task_clock_event_stop(struct perf_event *event, int flags) in task_clock_event_stop() argument
9757 perf_swevent_cancel_hrtimer(event); in task_clock_event_stop()
9758 task_clock_event_update(event, event->ctx->time); in task_clock_event_stop()
9761 static int task_clock_event_add(struct perf_event *event, int flags) in task_clock_event_add() argument
9764 task_clock_event_start(event, flags); in task_clock_event_add()
9765 perf_event_update_userpage(event); in task_clock_event_add()
9770 static void task_clock_event_del(struct perf_event *event, int flags) in task_clock_event_del() argument
9772 task_clock_event_stop(event, PERF_EF_UPDATE); in task_clock_event_del()
9775 static void task_clock_event_read(struct perf_event *event) in task_clock_event_read() argument
9778 u64 delta = now - event->ctx->timestamp; in task_clock_event_read()
9779 u64 time = event->ctx->time + delta; in task_clock_event_read()
9781 task_clock_event_update(event, time); in task_clock_event_read()
9784 static int task_clock_event_init(struct perf_event *event) in task_clock_event_init() argument
9786 if (event->attr.type != PERF_TYPE_SOFTWARE) in task_clock_event_init()
9789 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK) in task_clock_event_init()
9795 if (has_branch_stack(event)) in task_clock_event_init()
9798 perf_swevent_init_hrtimer(event); in task_clock_event_init()
9829 static int perf_event_nop_int(struct perf_event *event, u64 value) in perf_event_nop_int() argument
9871 static int perf_event_idx_default(struct perf_event *event) in perf_event_idx_default() argument
10194 static inline bool has_extended_regs(struct perf_event *event) in has_extended_regs() argument
10196 return (event->attr.sample_regs_user & PERF_REG_EXTENDED_MASK) || in has_extended_regs()
10197 (event->attr.sample_regs_intr & PERF_REG_EXTENDED_MASK); in has_extended_regs()
10200 static int perf_try_init_event(struct pmu *pmu, struct perf_event *event) in perf_try_init_event() argument
10214 if (event->group_leader != event && pmu->task_ctx_nr != perf_sw_context) { in perf_try_init_event()
10219 ctx = perf_event_ctx_lock_nested(event->group_leader, in perf_try_init_event()
10224 event->pmu = pmu; in perf_try_init_event()
10225 ret = pmu->event_init(event); in perf_try_init_event()
10228 perf_event_ctx_unlock(event->group_leader, ctx); in perf_try_init_event()
10232 has_extended_regs(event)) in perf_try_init_event()
10236 event_has_any_exclude_flag(event)) in perf_try_init_event()
10239 if (ret && event->destroy) in perf_try_init_event()
10240 event->destroy(event); in perf_try_init_event()
10249 static struct pmu *perf_init_event(struct perf_event *event) in perf_init_event() argument
10258 if (event->parent && event->parent->pmu) { in perf_init_event()
10259 pmu = event->parent->pmu; in perf_init_event()
10260 ret = perf_try_init_event(pmu, event); in perf_init_event()
10266 pmu = idr_find(&pmu_idr, event->attr.type); in perf_init_event()
10269 ret = perf_try_init_event(pmu, event); in perf_init_event()
10276 ret = perf_try_init_event(pmu, event); in perf_init_event()
10292 static void attach_sb_event(struct perf_event *event) in attach_sb_event() argument
10294 struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu); in attach_sb_event()
10297 list_add_rcu(&event->sb_list, &pel->list); in attach_sb_event()
10308 static void account_pmu_sb_event(struct perf_event *event) in account_pmu_sb_event() argument
10310 if (is_sb_event(event)) in account_pmu_sb_event()
10311 attach_sb_event(event); in account_pmu_sb_event()
10314 static void account_event_cpu(struct perf_event *event, int cpu) in account_event_cpu() argument
10316 if (event->parent) in account_event_cpu()
10319 if (is_cgroup_event(event)) in account_event_cpu()
10344 static void account_event(struct perf_event *event) in account_event() argument
10348 if (event->parent) in account_event()
10351 if (event->attach_state & PERF_ATTACH_TASK) in account_event()
10353 if (event->attr.mmap || event->attr.mmap_data) in account_event()
10355 if (event->attr.comm) in account_event()
10357 if (event->attr.namespaces) in account_event()
10359 if (event->attr.task) in account_event()
10361 if (event->attr.freq) in account_event()
10363 if (event->attr.context_switch) { in account_event()
10367 if (has_branch_stack(event)) in account_event()
10369 if (is_cgroup_event(event)) in account_event()
10371 if (event->attr.ksymbol) in account_event()
10373 if (event->attr.bpf_event) in account_event()
10404 account_event_cpu(event, event->cpu); in account_event()
10406 account_pmu_sb_event(event); in account_event()
10421 struct perf_event *event; in perf_event_alloc() local
10430 event = kzalloc(sizeof(*event), GFP_KERNEL); in perf_event_alloc()
10431 if (!event) in perf_event_alloc()
10439 group_leader = event; in perf_event_alloc()
10441 mutex_init(&event->child_mutex); in perf_event_alloc()
10442 INIT_LIST_HEAD(&event->child_list); in perf_event_alloc()
10444 INIT_LIST_HEAD(&event->event_entry); in perf_event_alloc()
10445 INIT_LIST_HEAD(&event->sibling_list); in perf_event_alloc()
10446 INIT_LIST_HEAD(&event->active_list); in perf_event_alloc()
10447 init_event_group(event); in perf_event_alloc()
10448 INIT_LIST_HEAD(&event->rb_entry); in perf_event_alloc()
10449 INIT_LIST_HEAD(&event->active_entry); in perf_event_alloc()
10450 INIT_LIST_HEAD(&event->addr_filters.list); in perf_event_alloc()
10451 INIT_HLIST_NODE(&event->hlist_entry); in perf_event_alloc()
10454 init_waitqueue_head(&event->waitq); in perf_event_alloc()
10455 event->pending_disable = -1; in perf_event_alloc()
10456 init_irq_work(&event->pending, perf_pending_event); in perf_event_alloc()
10458 mutex_init(&event->mmap_mutex); in perf_event_alloc()
10459 raw_spin_lock_init(&event->addr_filters.lock); in perf_event_alloc()
10461 atomic_long_set(&event->refcount, 1); in perf_event_alloc()
10462 event->cpu = cpu; in perf_event_alloc()
10463 event->attr = *attr; in perf_event_alloc()
10464 event->group_leader = group_leader; in perf_event_alloc()
10465 event->pmu = NULL; in perf_event_alloc()
10466 event->oncpu = -1; in perf_event_alloc()
10468 event->parent = parent_event; in perf_event_alloc()
10470 event->ns = get_pid_ns(task_active_pid_ns(current)); in perf_event_alloc()
10471 event->id = atomic64_inc_return(&perf_event_id); in perf_event_alloc()
10473 event->state = PERF_EVENT_STATE_INACTIVE; in perf_event_alloc()
10476 event->attach_state = PERF_ATTACH_TASK; in perf_event_alloc()
10482 event->hw.target = get_task_struct(task); in perf_event_alloc()
10485 event->clock = &local_clock; in perf_event_alloc()
10487 event->clock = parent_event->clock; in perf_event_alloc()
10500 event->prog = prog; in perf_event_alloc()
10501 event->orig_overflow_handler = in perf_event_alloc()
10508 event->overflow_handler = overflow_handler; in perf_event_alloc()
10509 event->overflow_handler_context = context; in perf_event_alloc()
10510 } else if (is_write_backward(event)){ in perf_event_alloc()
10511 event->overflow_handler = perf_event_output_backward; in perf_event_alloc()
10512 event->overflow_handler_context = NULL; in perf_event_alloc()
10514 event->overflow_handler = perf_event_output_forward; in perf_event_alloc()
10515 event->overflow_handler_context = NULL; in perf_event_alloc()
10518 perf_event__state_init(event); in perf_event_alloc()
10522 hwc = &event->hw; in perf_event_alloc()
10537 if (!has_branch_stack(event)) in perf_event_alloc()
10538 event->attr.branch_sample_type = 0; in perf_event_alloc()
10541 err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader); in perf_event_alloc()
10546 pmu = perf_init_event(event); in perf_event_alloc()
10561 if (event->attr.aux_output && in perf_event_alloc()
10567 err = exclusive_event_init(event); in perf_event_alloc()
10571 if (has_addr_filter(event)) { in perf_event_alloc()
10572 event->addr_filter_ranges = kcalloc(pmu->nr_addr_filters, in perf_event_alloc()
10575 if (!event->addr_filter_ranges) { in perf_event_alloc()
10584 if (event->parent) { in perf_event_alloc()
10585 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in perf_event_alloc()
10588 memcpy(event->addr_filter_ranges, in perf_event_alloc()
10589 event->parent->addr_filter_ranges, in perf_event_alloc()
10595 event->addr_filters_gen = 1; in perf_event_alloc()
10598 if (!event->parent) { in perf_event_alloc()
10599 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) { in perf_event_alloc()
10606 err = security_perf_event_alloc(event); in perf_event_alloc()
10611 account_event(event); in perf_event_alloc()
10613 return event; in perf_event_alloc()
10616 if (!event->parent) { in perf_event_alloc()
10617 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) in perf_event_alloc()
10621 kfree(event->addr_filter_ranges); in perf_event_alloc()
10624 exclusive_event_destroy(event); in perf_event_alloc()
10627 if (event->destroy) in perf_event_alloc()
10628 event->destroy(event); in perf_event_alloc()
10631 if (is_cgroup_event(event)) in perf_event_alloc()
10632 perf_detach_cgroup(event); in perf_event_alloc()
10633 if (event->ns) in perf_event_alloc()
10634 put_pid_ns(event->ns); in perf_event_alloc()
10635 if (event->hw.target) in perf_event_alloc()
10636 put_task_struct(event->hw.target); in perf_event_alloc()
10637 kfree(event); in perf_event_alloc()
10751 perf_event_set_output(struct perf_event *event, struct perf_event *output_event) in perf_event_set_output() argument
10760 if (event == output_event) in perf_event_set_output()
10766 if (output_event->cpu != event->cpu) in perf_event_set_output()
10772 if (output_event->cpu == -1 && output_event->ctx != event->ctx) in perf_event_set_output()
10778 if (output_event->clock != event->clock) in perf_event_set_output()
10785 if (is_write_backward(output_event) != is_write_backward(event)) in perf_event_set_output()
10791 if (has_aux(event) && has_aux(output_event) && in perf_event_set_output()
10792 event->pmu != output_event->pmu) in perf_event_set_output()
10796 mutex_lock(&event->mmap_mutex); in perf_event_set_output()
10798 if (atomic_read(&event->mmap_count)) in perf_event_set_output()
10808 ring_buffer_attach(event, rb); in perf_event_set_output()
10812 mutex_unlock(&event->mmap_mutex); in perf_event_set_output()
10827 static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id) in perf_event_set_clock() argument
10833 event->clock = &ktime_get_mono_fast_ns; in perf_event_set_clock()
10838 event->clock = &ktime_get_raw_fast_ns; in perf_event_set_clock()
10843 event->clock = &ktime_get_real_ns; in perf_event_set_clock()
10847 event->clock = &ktime_get_boottime_ns; in perf_event_set_clock()
10851 event->clock = &ktime_get_clocktai_ns; in perf_event_set_clock()
10858 if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI)) in perf_event_set_clock()
10908 struct perf_event *event, *sibling; in SYSCALL_DEFINE5() local
11029 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, in SYSCALL_DEFINE5()
11031 if (IS_ERR(event)) { in SYSCALL_DEFINE5()
11032 err = PTR_ERR(event); in SYSCALL_DEFINE5()
11036 if (is_sampling_event(event)) { in SYSCALL_DEFINE5()
11037 if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) { in SYSCALL_DEFINE5()
11047 pmu = event->pmu; in SYSCALL_DEFINE5()
11050 err = perf_event_set_clock(event, attr.clockid); in SYSCALL_DEFINE5()
11056 event->event_caps |= PERF_EV_CAP_SOFTWARE; in SYSCALL_DEFINE5()
11059 if (is_software_event(event) && in SYSCALL_DEFINE5()
11070 } else if (!is_software_event(event) && in SYSCALL_DEFINE5()
11085 ctx = find_get_context(pmu, task, event); in SYSCALL_DEFINE5()
11105 if (group_leader->clock != event->clock) in SYSCALL_DEFINE5()
11113 if (group_leader->cpu != event->cpu) in SYSCALL_DEFINE5()
11139 err = perf_event_set_output(event, output_event); in SYSCALL_DEFINE5()
11144 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, in SYSCALL_DEFINE5()
11199 if (!perf_event_validate_size(event)) { in SYSCALL_DEFINE5()
11220 if (event->attr.aux_output && !perf_get_aux_event(event, group_leader)) { in SYSCALL_DEFINE5()
11229 if (!exclusive_event_installable(event, ctx)) { in SYSCALL_DEFINE5()
11292 perf_event__header_size(event); in SYSCALL_DEFINE5()
11293 perf_event__id_header_size(event); in SYSCALL_DEFINE5()
11295 event->owner = current; in SYSCALL_DEFINE5()
11297 perf_install_in_context(ctx, event, event->cpu); in SYSCALL_DEFINE5()
11310 list_add_tail(&event->owner_entry, ¤t->perf_event_list); in SYSCALL_DEFINE5()
11338 free_event(event); in SYSCALL_DEFINE5()
11366 struct perf_event *event; in perf_event_create_kernel_counter() local
11376 event = perf_event_alloc(attr, cpu, task, NULL, NULL, in perf_event_create_kernel_counter()
11378 if (IS_ERR(event)) { in perf_event_create_kernel_counter()
11379 err = PTR_ERR(event); in perf_event_create_kernel_counter()
11384 event->owner = TASK_TOMBSTONE; in perf_event_create_kernel_counter()
11389 ctx = find_get_context(event->pmu, task, event); in perf_event_create_kernel_counter()
11417 if (!exclusive_event_installable(event, ctx)) { in perf_event_create_kernel_counter()
11422 perf_install_in_context(ctx, event, event->cpu); in perf_event_create_kernel_counter()
11426 return event; in perf_event_create_kernel_counter()
11433 free_event(event); in perf_event_create_kernel_counter()
11443 struct perf_event *event, *tmp; in perf_pmu_migrate_context() local
11454 list_for_each_entry_safe(event, tmp, &src_ctx->event_list, in perf_pmu_migrate_context()
11456 perf_remove_from_context(event, 0); in perf_pmu_migrate_context()
11457 unaccount_event_cpu(event, src_cpu); in perf_pmu_migrate_context()
11459 list_add(&event->migrate_entry, &events); in perf_pmu_migrate_context()
11475 list_for_each_entry_safe(event, tmp, &events, migrate_entry) { in perf_pmu_migrate_context()
11476 if (event->group_leader == event) in perf_pmu_migrate_context()
11479 list_del(&event->migrate_entry); in perf_pmu_migrate_context()
11480 if (event->state >= PERF_EVENT_STATE_OFF) in perf_pmu_migrate_context()
11481 event->state = PERF_EVENT_STATE_INACTIVE; in perf_pmu_migrate_context()
11482 account_event_cpu(event, dst_cpu); in perf_pmu_migrate_context()
11483 perf_install_in_context(dst_ctx, event, dst_cpu); in perf_pmu_migrate_context()
11491 list_for_each_entry_safe(event, tmp, &events, migrate_entry) { in perf_pmu_migrate_context()
11492 list_del(&event->migrate_entry); in perf_pmu_migrate_context()
11493 if (event->state >= PERF_EVENT_STATE_OFF) in perf_pmu_migrate_context()
11494 event->state = PERF_EVENT_STATE_INACTIVE; in perf_pmu_migrate_context()
11495 account_event_cpu(event, dst_cpu); in perf_pmu_migrate_context()
11496 perf_install_in_context(dst_ctx, event, dst_cpu); in perf_pmu_migrate_context()
11651 struct perf_event *event, *tmp; in perf_event_exit_task() local
11655 list_for_each_entry_safe(event, tmp, &child->perf_event_list, in perf_event_exit_task()
11657 list_del_init(&event->owner_entry); in perf_event_exit_task()
11664 smp_store_release(&event->owner, NULL); in perf_event_exit_task()
11680 static void perf_free_event(struct perf_event *event, in perf_free_event() argument
11683 struct perf_event *parent = event->parent; in perf_free_event()
11689 list_del_init(&event->child_list); in perf_free_event()
11695 perf_group_detach(event); in perf_free_event()
11696 list_del_event(event, ctx); in perf_free_event()
11698 free_event(event); in perf_free_event()
11711 struct perf_event *event, *tmp; in perf_event_free_task() local
11732 list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) in perf_event_free_task()
11733 perf_free_event(event, ctx); in perf_event_free_task()
11786 const struct perf_event_attr *perf_event_attrs(struct perf_event *event) in perf_event_attrs() argument
11788 if (!event) in perf_event_attrs()
11791 return &event->attr; in perf_event_attrs()
11962 inherit_task_group(struct perf_event *event, struct task_struct *parent, in inherit_task_group() argument
11970 if (!event->attr.inherit) { in inherit_task_group()
11990 ret = inherit_group(event, parent, parent_ctx, in inherit_task_group()
12006 struct perf_event *event; in perf_event_init_context() local
12040 perf_event_groups_for_each(event, &parent_ctx->pinned_groups) { in perf_event_init_context()
12041 ret = inherit_task_group(event, parent, parent_ctx, in perf_event_init_context()
12056 perf_event_groups_for_each(event, &parent_ctx->flexible_groups) { in perf_event_init_context()
12057 ret = inherit_task_group(event, parent, parent_ctx, in perf_event_init_context()
12161 struct perf_event *event; in __perf_event_exit_context() local
12165 list_for_each_entry(event, &ctx->event_list, event_entry) in __perf_event_exit_context()
12166 __perf_remove_from_context(event, cpuctx, ctx, (void *)DETACH_GROUP); in __perf_event_exit_context()