• Home
  • Raw
  • Download

Lines Matching full:event

177 static bool is_kernel_event(struct perf_event *event)  in is_kernel_event()  argument
179 return READ_ONCE(event->owner) == TASK_TOMBSTONE; in is_kernel_event()
191 * - removing the last event from a task ctx; this is relatively straight
194 * - adding the first event to a task ctx; this is tricky because we cannot
205 struct perf_event *event; member
213 struct perf_event *event = efs->event; in event_function() local
214 struct perf_event_context *ctx = event->ctx; in event_function()
249 efs->func(event, cpuctx, ctx, efs->data); in event_function()
256 static void event_function_call(struct perf_event *event, event_f func, void *data) in event_function_call() argument
258 struct perf_event_context *ctx = event->ctx; in event_function_call()
261 .event = event, in event_function_call()
266 if (!event->parent) { in event_function_call()
268 * If this is a !child event, we must hold ctx::mutex to in event_function_call()
269 * stabilize the event->ctx relation. See in event_function_call()
276 cpu_function_call(event->cpu, event_function, &efs); in event_function_call()
301 func(event, NULL, ctx, data); in event_function_call()
309 static void event_function_local(struct perf_event *event, event_f func, void *data) in event_function_local() argument
311 struct perf_event_context *ctx = event->ctx; in event_function_local()
348 func(event, cpuctx, ctx, data); in event_function_local()
406 * perf event paranoia level:
418 * max perf event sample rate
575 static u64 perf_event_time(struct perf_event *event);
589 static inline u64 perf_event_clock(struct perf_event *event) in perf_event_clock() argument
591 return event->clock(); in perf_event_clock()
595 * State based event timekeeping...
597 * The basic idea is to use event->state to determine which (if any) time
602 * Event groups make things a little more complicated, but not terribly so. The
617 __perf_effective_state(struct perf_event *event) in __perf_effective_state() argument
619 struct perf_event *leader = event->group_leader; in __perf_effective_state()
624 return event->state; in __perf_effective_state()
628 __perf_update_times(struct perf_event *event, u64 now, u64 *enabled, u64 *running) in __perf_update_times() argument
630 enum perf_event_state state = __perf_effective_state(event); in __perf_update_times()
631 u64 delta = now - event->tstamp; in __perf_update_times()
633 *enabled = event->total_time_enabled; in __perf_update_times()
637 *running = event->total_time_running; in __perf_update_times()
642 static void perf_event_update_time(struct perf_event *event) in perf_event_update_time() argument
644 u64 now = perf_event_time(event); in perf_event_update_time()
646 __perf_update_times(event, now, &event->total_time_enabled, in perf_event_update_time()
647 &event->total_time_running); in perf_event_update_time()
648 event->tstamp = now; in perf_event_update_time()
660 perf_event_set_state(struct perf_event *event, enum perf_event_state state) in perf_event_set_state() argument
662 if (event->state == state) in perf_event_set_state()
665 perf_event_update_time(event); in perf_event_set_state()
670 if ((event->state < 0) ^ (state < 0)) in perf_event_set_state()
671 perf_event_update_sibling_time(event); in perf_event_set_state()
673 WRITE_ONCE(event->state, state); in perf_event_set_state()
696 perf_cgroup_match(struct perf_event *event) in perf_cgroup_match() argument
698 struct perf_event_context *ctx = event->ctx; in perf_cgroup_match()
701 /* @event doesn't care about cgroup */ in perf_cgroup_match()
702 if (!event->cgrp) in perf_cgroup_match()
710 * Cgroup scoping is recursive. An event enabled for a cgroup is in perf_cgroup_match()
712 * cgroup is a descendant of @event's (the test covers identity in perf_cgroup_match()
716 event->cgrp->css.cgroup); in perf_cgroup_match()
719 static inline void perf_detach_cgroup(struct perf_event *event) in perf_detach_cgroup() argument
721 css_put(&event->cgrp->css); in perf_detach_cgroup()
722 event->cgrp = NULL; in perf_detach_cgroup()
725 static inline int is_cgroup_event(struct perf_event *event) in is_cgroup_event() argument
727 return event->cgrp != NULL; in is_cgroup_event()
730 static inline u64 perf_cgroup_event_time(struct perf_event *event) in perf_cgroup_event_time() argument
734 t = per_cpu_ptr(event->cgrp->info, event->cpu); in perf_cgroup_event_time()
738 static inline u64 perf_cgroup_event_time_now(struct perf_event *event, u64 now) in perf_cgroup_event_time_now() argument
742 t = per_cpu_ptr(event->cgrp->info, event->cpu); in perf_cgroup_event_time_now()
780 static inline void update_cgrp_time_from_event(struct perf_event *event) in update_cgrp_time_from_event() argument
789 if (!is_cgroup_event(event)) in update_cgrp_time_from_event()
792 cgrp = perf_cgroup_from_task(current, event->ctx); in update_cgrp_time_from_event()
796 if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup)) { in update_cgrp_time_from_event()
797 info = this_cpu_ptr(event->cgrp->info); in update_cgrp_time_from_event()
830 #define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */
939 static int perf_cgroup_ensure_storage(struct perf_event *event, in perf_cgroup_ensure_storage() argument
954 cpuctx = per_cpu_ptr(event->pmu->pmu_cpu_context, cpu); in perf_cgroup_ensure_storage()
980 static inline int perf_cgroup_connect(int fd, struct perf_event *event, in perf_cgroup_connect() argument
999 ret = perf_cgroup_ensure_storage(event, css); in perf_cgroup_connect()
1004 event->cgrp = cgrp; in perf_cgroup_connect()
1012 perf_detach_cgroup(event); in perf_cgroup_connect()
1021 perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx) in perf_cgroup_event_enable() argument
1025 if (!is_cgroup_event(event)) in perf_cgroup_event_enable()
1036 * matching the event's cgroup, we must do this for every new event, in perf_cgroup_event_enable()
1043 if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup)) in perf_cgroup_event_enable()
1051 per_cpu_ptr(&cgrp_cpuctx_list, event->cpu)); in perf_cgroup_event_enable()
1055 perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx) in perf_cgroup_event_disable() argument
1059 if (!is_cgroup_event(event)) in perf_cgroup_event_disable()
1080 perf_cgroup_match(struct perf_event *event) in perf_cgroup_match() argument
1085 static inline void perf_detach_cgroup(struct perf_event *event) in perf_detach_cgroup() argument
1088 static inline int is_cgroup_event(struct perf_event *event) in is_cgroup_event() argument
1093 static inline void update_cgrp_time_from_event(struct perf_event *event) in update_cgrp_time_from_event() argument
1112 static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event, in perf_cgroup_connect() argument
1130 static inline u64 perf_cgroup_event_time(struct perf_event *event) in perf_cgroup_event_time() argument
1135 static inline u64 perf_cgroup_event_time_now(struct perf_event *event, u64 now) in perf_cgroup_event_time_now() argument
1141 perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx) in perf_cgroup_event_enable() argument
1146 perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx) in perf_cgroup_event_disable() argument
1338 * because the sys_perf_event_open() case will install a new event and break
1349 * quiesce the event, after which we can install it in the new location. This
1350 * means that only external vectors (perf_fops, prctl) can perturb the event
1354 * However; because event->ctx can change while we're waiting to acquire
1373 perf_event_ctx_lock_nested(struct perf_event *event, int nesting) in perf_event_ctx_lock_nested() argument
1379 ctx = READ_ONCE(event->ctx); in perf_event_ctx_lock_nested()
1387 if (event->ctx != ctx) { in perf_event_ctx_lock_nested()
1397 perf_event_ctx_lock(struct perf_event *event) in perf_event_ctx_lock() argument
1399 return perf_event_ctx_lock_nested(event, 0); in perf_event_ctx_lock()
1402 static void perf_event_ctx_unlock(struct perf_event *event, in perf_event_ctx_unlock() argument
1428 static u32 perf_event_pid_type(struct perf_event *event, struct task_struct *p, in perf_event_pid_type() argument
1435 if (event->parent) in perf_event_pid_type()
1436 event = event->parent; in perf_event_pid_type()
1438 nr = __task_pid_nr_ns(p, type, event->ns); in perf_event_pid_type()
1445 static u32 perf_event_pid(struct perf_event *event, struct task_struct *p) in perf_event_pid() argument
1447 return perf_event_pid_type(event, p, PIDTYPE_TGID); in perf_event_pid()
1450 static u32 perf_event_tid(struct perf_event *event, struct task_struct *p) in perf_event_tid() argument
1452 return perf_event_pid_type(event, p, PIDTYPE_PID); in perf_event_tid()
1456 * If we inherit events we want to return the parent event id
1459 static u64 primary_event_id(struct perf_event *event) in primary_event_id() argument
1461 u64 id = event->id; in primary_event_id()
1463 if (event->parent) in primary_event_id()
1464 id = event->parent->id; in primary_event_id()
1582 static u64 perf_event_time(struct perf_event *event) in perf_event_time() argument
1584 struct perf_event_context *ctx = event->ctx; in perf_event_time()
1589 if (is_cgroup_event(event)) in perf_event_time()
1590 return perf_cgroup_event_time(event); in perf_event_time()
1595 static u64 perf_event_time_now(struct perf_event *event, u64 now) in perf_event_time_now() argument
1597 struct perf_event_context *ctx = event->ctx; in perf_event_time_now()
1602 if (is_cgroup_event(event)) in perf_event_time_now()
1603 return perf_cgroup_event_time_now(event, now); in perf_event_time_now()
1612 static enum event_type_t get_event_type(struct perf_event *event) in get_event_type() argument
1614 struct perf_event_context *ctx = event->ctx; in get_event_type()
1623 if (event->group_leader != event) in get_event_type()
1624 event = event->group_leader; in get_event_type()
1626 event_type = event->attr.pinned ? EVENT_PINNED : EVENT_FLEXIBLE; in get_event_type()
1634 * Helper function to initialize event group nodes.
1636 static void init_event_group(struct perf_event *event) in init_event_group() argument
1638 RB_CLEAR_NODE(&event->group_node); in init_event_group()
1639 event->group_index = 0; in init_event_group()
1644 * based on event attrs bits.
1647 get_event_groups(struct perf_event *event, struct perf_event_context *ctx) in get_event_groups() argument
1649 if (event->attr.pinned) in get_event_groups()
1665 * Compare function for event groups;
1711 * Insert @event into @groups' tree; using {@event->cpu, ++@groups->index} for
1717 struct perf_event *event) in perf_event_groups_insert() argument
1723 event->group_index = ++groups->index; in perf_event_groups_insert()
1732 if (perf_event_groups_less(event, node_event)) in perf_event_groups_insert()
1738 rb_link_node(&event->group_node, parent, node); in perf_event_groups_insert()
1739 rb_insert_color(&event->group_node, &groups->tree); in perf_event_groups_insert()
1743 * Helper function to insert event into the pinned or flexible groups.
1746 add_event_to_groups(struct perf_event *event, struct perf_event_context *ctx) in add_event_to_groups() argument
1750 groups = get_event_groups(event, ctx); in add_event_to_groups()
1751 perf_event_groups_insert(groups, event); in add_event_to_groups()
1759 struct perf_event *event) in perf_event_groups_delete() argument
1761 WARN_ON_ONCE(RB_EMPTY_NODE(&event->group_node) || in perf_event_groups_delete()
1764 rb_erase(&event->group_node, &groups->tree); in perf_event_groups_delete()
1765 init_event_group(event); in perf_event_groups_delete()
1769 * Helper function to delete event from its groups.
1772 del_event_from_groups(struct perf_event *event, struct perf_event_context *ctx) in del_event_from_groups() argument
1776 groups = get_event_groups(event, ctx); in del_event_from_groups()
1777 perf_event_groups_delete(groups, event); in del_event_from_groups()
1781 * Get the leftmost event in the cpu/cgroup subtree.
1832 perf_event_groups_next(struct perf_event *event) in perf_event_groups_next() argument
1840 next = rb_entry_safe(rb_next(&event->group_node), typeof(*event), group_node); in perf_event_groups_next()
1841 if (next == NULL || next->cpu != event->cpu) in perf_event_groups_next()
1845 if (event->cgrp && event->cgrp->css.cgroup) in perf_event_groups_next()
1846 curr_cgrp_id = event->cgrp->css.cgroup->kn->id; in perf_event_groups_next()
1860 #define perf_event_groups_for_each(event, groups) \ argument
1861 for (event = rb_entry_safe(rb_first(&((groups)->tree)), \
1862 typeof(*event), group_node); event; \
1863 event = rb_entry_safe(rb_next(&event->group_node), \
1864 typeof(*event), group_node))
1867 * Add an event from the lists for its context.
1871 list_add_event(struct perf_event *event, struct perf_event_context *ctx) in list_add_event() argument
1875 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); in list_add_event()
1876 event->attach_state |= PERF_ATTACH_CONTEXT; in list_add_event()
1878 event->tstamp = perf_event_time(event); in list_add_event()
1881 * If we're a stand alone event or group leader, we go to the context in list_add_event()
1885 if (event->group_leader == event) { in list_add_event()
1886 event->group_caps = event->event_caps; in list_add_event()
1887 add_event_to_groups(event, ctx); in list_add_event()
1890 list_add_rcu(&event->event_entry, &ctx->event_list); in list_add_event()
1892 if (event->attr.inherit_stat) in list_add_event()
1895 if (event->state > PERF_EVENT_STATE_OFF) in list_add_event()
1896 perf_cgroup_event_enable(event, ctx); in list_add_event()
1902 * Initialize event state based on the perf_event_attr::disabled.
1904 static inline void perf_event__state_init(struct perf_event *event) in perf_event__state_init() argument
1906 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF : in perf_event__state_init()
1910 static void __perf_event_read_size(struct perf_event *event, int nr_siblings) in __perf_event_read_size() argument
1916 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) in __perf_event_read_size()
1919 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) in __perf_event_read_size()
1922 if (event->attr.read_format & PERF_FORMAT_ID) in __perf_event_read_size()
1925 if (event->attr.read_format & PERF_FORMAT_GROUP) { in __perf_event_read_size()
1931 event->read_size = size; in __perf_event_read_size()
1934 static void __perf_event_header_size(struct perf_event *event, u64 sample_type) in __perf_event_header_size() argument
1952 size += event->read_size; in __perf_event_header_size()
1966 event->header_size = size; in __perf_event_header_size()
1973 static void perf_event__header_size(struct perf_event *event) in perf_event__header_size() argument
1975 __perf_event_read_size(event, in perf_event__header_size()
1976 event->group_leader->nr_siblings); in perf_event__header_size()
1977 __perf_event_header_size(event, event->attr.sample_type); in perf_event__header_size()
1980 static void perf_event__id_header_size(struct perf_event *event) in perf_event__id_header_size() argument
1983 u64 sample_type = event->attr.sample_type; in perf_event__id_header_size()
2004 event->id_header_size = size; in perf_event__id_header_size()
2007 static bool perf_event_validate_size(struct perf_event *event) in perf_event_validate_size() argument
2011 * attach the event. in perf_event_validate_size()
2013 __perf_event_read_size(event, event->group_leader->nr_siblings + 1); in perf_event_validate_size()
2014 __perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ); in perf_event_validate_size()
2015 perf_event__id_header_size(event); in perf_event_validate_size()
2021 if (event->read_size + event->header_size + in perf_event_validate_size()
2022 event->id_header_size + sizeof(struct perf_event_header) >= 16*1024) in perf_event_validate_size()
2028 static void perf_group_attach(struct perf_event *event) in perf_group_attach() argument
2030 struct perf_event *group_leader = event->group_leader, *pos; in perf_group_attach()
2032 lockdep_assert_held(&event->ctx->lock); in perf_group_attach()
2037 if (event->attach_state & PERF_ATTACH_GROUP) in perf_group_attach()
2040 event->attach_state |= PERF_ATTACH_GROUP; in perf_group_attach()
2042 if (group_leader == event) in perf_group_attach()
2045 WARN_ON_ONCE(group_leader->ctx != event->ctx); in perf_group_attach()
2047 group_leader->group_caps &= event->event_caps; in perf_group_attach()
2049 list_add_tail(&event->sibling_list, &group_leader->sibling_list); in perf_group_attach()
2059 * Remove an event from the lists for its context.
2063 list_del_event(struct perf_event *event, struct perf_event_context *ctx) in list_del_event() argument
2065 WARN_ON_ONCE(event->ctx != ctx); in list_del_event()
2071 if (!(event->attach_state & PERF_ATTACH_CONTEXT)) in list_del_event()
2074 event->attach_state &= ~PERF_ATTACH_CONTEXT; in list_del_event()
2077 if (event->attr.inherit_stat) in list_del_event()
2080 list_del_rcu(&event->event_entry); in list_del_event()
2082 if (event->group_leader == event) in list_del_event()
2083 del_event_from_groups(event, ctx); in list_del_event()
2086 * If event was in error state, then keep it in list_del_event()
2090 * of the event in list_del_event()
2092 if (event->state > PERF_EVENT_STATE_OFF) { in list_del_event()
2093 perf_cgroup_event_disable(event, ctx); in list_del_event()
2094 perf_event_set_state(event, PERF_EVENT_STATE_OFF); in list_del_event()
2101 perf_aux_output_match(struct perf_event *event, struct perf_event *aux_event) in perf_aux_output_match() argument
2106 if (!event->pmu->aux_output_match) in perf_aux_output_match()
2109 return event->pmu->aux_output_match(aux_event); in perf_aux_output_match()
2112 static void put_event(struct perf_event *event);
2113 static void event_sched_out(struct perf_event *event,
2117 static void perf_put_aux_event(struct perf_event *event) in perf_put_aux_event() argument
2119 struct perf_event_context *ctx = event->ctx; in perf_put_aux_event()
2124 * If event uses aux_event tear down the link in perf_put_aux_event()
2126 if (event->aux_event) { in perf_put_aux_event()
2127 iter = event->aux_event; in perf_put_aux_event()
2128 event->aux_event = NULL; in perf_put_aux_event()
2134 * If the event is an aux_event, tear down all links to in perf_put_aux_event()
2137 for_each_sibling_event(iter, event->group_leader) { in perf_put_aux_event()
2138 if (iter->aux_event != event) in perf_put_aux_event()
2142 put_event(event); in perf_put_aux_event()
2150 perf_event_set_state(event, PERF_EVENT_STATE_ERROR); in perf_put_aux_event()
2154 static bool perf_need_aux_event(struct perf_event *event) in perf_need_aux_event() argument
2156 return !!event->attr.aux_output || !!event->attr.aux_sample_size; in perf_need_aux_event()
2159 static int perf_get_aux_event(struct perf_event *event, in perf_get_aux_event() argument
2163 * Our group leader must be an aux event if we want to be in perf_get_aux_event()
2164 * an aux_output. This way, the aux event will precede its in perf_get_aux_event()
2174 if (event->attr.aux_output && event->attr.aux_sample_size) in perf_get_aux_event()
2177 if (event->attr.aux_output && in perf_get_aux_event()
2178 !perf_aux_output_match(event, group_leader)) in perf_get_aux_event()
2181 if (event->attr.aux_sample_size && !group_leader->pmu->snapshot_aux) in perf_get_aux_event()
2188 * Link aux_outputs to their aux event; this is undone in in perf_get_aux_event()
2193 event->aux_event = group_leader; in perf_get_aux_event()
2198 static inline struct list_head *get_event_list(struct perf_event *event) in get_event_list() argument
2200 struct perf_event_context *ctx = event->ctx; in get_event_list()
2201 return event->attr.pinned ? &ctx->pinned_active : &ctx->flexible_active; in get_event_list()
2210 static inline void perf_remove_sibling_event(struct perf_event *event) in perf_remove_sibling_event() argument
2212 struct perf_event_context *ctx = event->ctx; in perf_remove_sibling_event()
2215 event_sched_out(event, cpuctx, ctx); in perf_remove_sibling_event()
2216 perf_event_set_state(event, PERF_EVENT_STATE_ERROR); in perf_remove_sibling_event()
2219 static void perf_group_detach(struct perf_event *event) in perf_group_detach() argument
2221 struct perf_event *leader = event->group_leader; in perf_group_detach()
2223 struct perf_event_context *ctx = event->ctx; in perf_group_detach()
2230 if (!(event->attach_state & PERF_ATTACH_GROUP)) in perf_group_detach()
2233 event->attach_state &= ~PERF_ATTACH_GROUP; in perf_group_detach()
2235 perf_put_aux_event(event); in perf_group_detach()
2240 if (leader != event) { in perf_group_detach()
2241 list_del_init(&event->sibling_list); in perf_group_detach()
2242 event->group_leader->nr_siblings--; in perf_group_detach()
2247 * If this was a group event with sibling events then in perf_group_detach()
2251 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, sibling_list) { in perf_group_detach()
2260 sibling->group_caps = event->group_caps; in perf_group_detach()
2262 if (!RB_EMPTY_NODE(&event->group_node)) { in perf_group_detach()
2263 add_event_to_groups(sibling, event->ctx); in perf_group_detach()
2269 WARN_ON_ONCE(sibling->ctx != event->ctx); in perf_group_detach()
2279 static bool is_orphaned_event(struct perf_event *event) in is_orphaned_event() argument
2281 return event->state == PERF_EVENT_STATE_DEAD; in is_orphaned_event()
2284 static inline int __pmu_filter_match(struct perf_event *event) in __pmu_filter_match() argument
2286 struct pmu *pmu = event->pmu; in __pmu_filter_match()
2287 return pmu->filter_match ? pmu->filter_match(event) : 1; in __pmu_filter_match()
2291 * Check whether we should attempt to schedule an event group based on
2292 * PMU-specific filtering. An event group can consist of HW and SW events,
2296 static inline int pmu_filter_match(struct perf_event *event) in pmu_filter_match() argument
2300 if (!__pmu_filter_match(event)) in pmu_filter_match()
2303 for_each_sibling_event(sibling, event) { in pmu_filter_match()
2312 event_filter_match(struct perf_event *event) in event_filter_match() argument
2314 return (event->cpu == -1 || event->cpu == smp_processor_id()) && in event_filter_match()
2315 perf_cgroup_match(event) && pmu_filter_match(event); in event_filter_match()
2319 event_sched_out(struct perf_event *event, in event_sched_out() argument
2325 WARN_ON_ONCE(event->ctx != ctx); in event_sched_out()
2328 if (event->state != PERF_EVENT_STATE_ACTIVE) in event_sched_out()
2336 list_del_init(&event->active_list); in event_sched_out()
2338 perf_pmu_disable(event->pmu); in event_sched_out()
2340 event->pmu->del(event, 0); in event_sched_out()
2341 event->oncpu = -1; in event_sched_out()
2343 if (READ_ONCE(event->pending_disable) >= 0) { in event_sched_out()
2344 WRITE_ONCE(event->pending_disable, -1); in event_sched_out()
2345 perf_cgroup_event_disable(event, ctx); in event_sched_out()
2348 perf_event_set_state(event, state); in event_sched_out()
2350 if (!is_software_event(event)) in event_sched_out()
2354 if (event->attr.freq && event->attr.sample_freq) in event_sched_out()
2356 if (event->attr.exclusive || !cpuctx->active_oncpu) in event_sched_out()
2359 perf_pmu_enable(event->pmu); in event_sched_out()
2367 struct perf_event *event; in group_sched_out() local
2379 for_each_sibling_event(event, group_event) in group_sched_out()
2380 event_sched_out(event, cpuctx, ctx); in group_sched_out()
2388 * Cross CPU call to remove a performance event
2390 * We disable the event on the hardware level first. After that we
2394 __perf_remove_from_context(struct perf_event *event, in __perf_remove_from_context() argument
2406 event_sched_out(event, cpuctx, ctx); in __perf_remove_from_context()
2408 perf_group_detach(event); in __perf_remove_from_context()
2409 list_del_event(event, ctx); in __perf_remove_from_context()
2425 * Remove the event from a task's (or a CPU's) list of events.
2427 * If event->ctx is a cloned context, callers must make sure that
2428 * every task struct that event->ctx->task could possibly point to
2434 static void perf_remove_from_context(struct perf_event *event, unsigned long flags) in perf_remove_from_context() argument
2436 struct perf_event_context *ctx = event->ctx; in perf_remove_from_context()
2440 event_function_call(event, __perf_remove_from_context, (void *)flags); in perf_remove_from_context()
2448 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); in perf_remove_from_context()
2450 (event->attach_state & PERF_ATTACH_GROUP)) { in perf_remove_from_context()
2456 perf_group_detach(event); in perf_remove_from_context()
2462 * Cross CPU call to disable a performance event
2464 static void __perf_event_disable(struct perf_event *event, in __perf_event_disable() argument
2469 if (event->state < PERF_EVENT_STATE_INACTIVE) in __perf_event_disable()
2474 update_cgrp_time_from_event(event); in __perf_event_disable()
2477 if (event == event->group_leader) in __perf_event_disable()
2478 group_sched_out(event, cpuctx, ctx); in __perf_event_disable()
2480 event_sched_out(event, cpuctx, ctx); in __perf_event_disable()
2482 perf_event_set_state(event, PERF_EVENT_STATE_OFF); in __perf_event_disable()
2483 perf_cgroup_event_disable(event, ctx); in __perf_event_disable()
2487 * Disable an event.
2489 * If event->ctx is a cloned context, callers must make sure that
2490 * every task struct that event->ctx->task could possibly point to
2493 * hold the top-level event's child_mutex, so any descendant that
2496 * When called from perf_pending_event it's OK because event->ctx
2500 static void _perf_event_disable(struct perf_event *event) in _perf_event_disable() argument
2502 struct perf_event_context *ctx = event->ctx; in _perf_event_disable()
2505 if (event->state <= PERF_EVENT_STATE_OFF) { in _perf_event_disable()
2511 event_function_call(event, __perf_event_disable, NULL); in _perf_event_disable()
2514 void perf_event_disable_local(struct perf_event *event) in perf_event_disable_local() argument
2516 event_function_local(event, __perf_event_disable, NULL); in perf_event_disable_local()
2523 void perf_event_disable(struct perf_event *event) in perf_event_disable() argument
2527 ctx = perf_event_ctx_lock(event); in perf_event_disable()
2528 _perf_event_disable(event); in perf_event_disable()
2529 perf_event_ctx_unlock(event, ctx); in perf_event_disable()
2533 void perf_event_disable_inatomic(struct perf_event *event) in perf_event_disable_inatomic() argument
2535 WRITE_ONCE(event->pending_disable, smp_processor_id()); in perf_event_disable_inatomic()
2537 irq_work_queue(&event->pending); in perf_event_disable_inatomic()
2542 static void perf_log_throttle(struct perf_event *event, int enable);
2543 static void perf_log_itrace_start(struct perf_event *event);
2546 event_sched_in(struct perf_event *event, in event_sched_in() argument
2552 WARN_ON_ONCE(event->ctx != ctx); in event_sched_in()
2556 if (event->state <= PERF_EVENT_STATE_OFF) in event_sched_in()
2559 WRITE_ONCE(event->oncpu, smp_processor_id()); in event_sched_in()
2561 * Order event::oncpu write to happen before the ACTIVE state is in event_sched_in()
2566 perf_event_set_state(event, PERF_EVENT_STATE_ACTIVE); in event_sched_in()
2573 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) { in event_sched_in()
2574 perf_log_throttle(event, 1); in event_sched_in()
2575 event->hw.interrupts = 0; in event_sched_in()
2578 perf_pmu_disable(event->pmu); in event_sched_in()
2580 perf_log_itrace_start(event); in event_sched_in()
2582 if (event->pmu->add(event, PERF_EF_START)) { in event_sched_in()
2583 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); in event_sched_in()
2584 event->oncpu = -1; in event_sched_in()
2589 if (!is_software_event(event)) in event_sched_in()
2593 if (event->attr.freq && event->attr.sample_freq) in event_sched_in()
2596 if (event->attr.exclusive) in event_sched_in()
2600 perf_pmu_enable(event->pmu); in event_sched_in()
2610 struct perf_event *event, *partial_group = NULL; in group_sched_in() local
2624 for_each_sibling_event(event, group_event) { in group_sched_in()
2625 if (event_sched_in(event, cpuctx, ctx)) { in group_sched_in()
2626 partial_group = event; in group_sched_in()
2638 * The events up to the failed event are scheduled out normally. in group_sched_in()
2640 for_each_sibling_event(event, group_event) { in group_sched_in()
2641 if (event == partial_group) in group_sched_in()
2644 event_sched_out(event, cpuctx, ctx); in group_sched_in()
2654 * Work out whether we can put this event group on the CPU now.
2656 static int group_can_go_on(struct perf_event *event, in group_can_go_on() argument
2663 if (event->group_caps & PERF_EV_CAP_SOFTWARE) in group_can_go_on()
2675 if (event->attr.exclusive && !list_empty(get_event_list(event))) in group_can_go_on()
2684 static void add_event_to_ctx(struct perf_event *event, in add_event_to_ctx() argument
2687 list_add_event(event, ctx); in add_event_to_ctx()
2688 perf_group_attach(event); in add_event_to_ctx()
2733 * time an event is added, only do it for the groups of equal priority and
2787 * Cross CPU call to install and enable a performance event
2794 struct perf_event *event = info; in __perf_install_in_context() local
2795 struct perf_event_context *ctx = event->ctx; in __perf_install_in_context()
2826 if (event->state > PERF_EVENT_STATE_OFF && is_cgroup_event(event)) { in __perf_install_in_context()
2828 * If the current cgroup doesn't match the event's in __perf_install_in_context()
2833 event->cgrp->css.cgroup); in __perf_install_in_context()
2839 add_event_to_ctx(event, ctx); in __perf_install_in_context()
2840 ctx_resched(cpuctx, task_ctx, get_event_type(event)); in __perf_install_in_context()
2842 add_event_to_ctx(event, ctx); in __perf_install_in_context()
2851 static bool exclusive_event_installable(struct perf_event *event,
2855 * Attach a performance event to a context.
2861 struct perf_event *event, in perf_install_in_context() argument
2868 WARN_ON_ONCE(!exclusive_event_installable(event, ctx)); in perf_install_in_context()
2870 if (event->cpu != -1) in perf_install_in_context()
2871 event->cpu = cpu; in perf_install_in_context()
2874 * Ensures that if we can observe event->ctx, both the event and ctx in perf_install_in_context()
2877 smp_store_release(&event->ctx, ctx); in perf_install_in_context()
2881 * without IPI. Except when this is the first event for the context, in in perf_install_in_context()
2885 * event will issue the IPI and reprogram the hardware. in perf_install_in_context()
2887 if (__perf_effective_state(event) == PERF_EVENT_STATE_OFF && ctx->nr_events) { in perf_install_in_context()
2893 add_event_to_ctx(event, ctx); in perf_install_in_context()
2899 cpu_function_call(cpu, __perf_install_in_context, event); in perf_install_in_context()
2941 if (!task_function_call(task, __perf_install_in_context, event)) in perf_install_in_context()
2957 * thus we can safely install the event. in perf_install_in_context()
2963 add_event_to_ctx(event, ctx); in perf_install_in_context()
2968 * Cross CPU call to enable a performance event
2970 static void __perf_event_enable(struct perf_event *event, in __perf_event_enable() argument
2975 struct perf_event *leader = event->group_leader; in __perf_event_enable()
2978 if (event->state >= PERF_EVENT_STATE_INACTIVE || in __perf_event_enable()
2979 event->state <= PERF_EVENT_STATE_ERROR) in __perf_event_enable()
2985 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); in __perf_event_enable()
2986 perf_cgroup_event_enable(event, ctx); in __perf_event_enable()
2991 if (!event_filter_match(event)) { in __perf_event_enable()
2997 * If the event is in a group and isn't the group leader, in __perf_event_enable()
3000 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) { in __perf_event_enable()
3009 ctx_resched(cpuctx, task_ctx, get_event_type(event)); in __perf_event_enable()
3013 * Enable an event.
3015 * If event->ctx is a cloned context, callers must make sure that
3016 * every task struct that event->ctx->task could possibly point to
3021 static void _perf_event_enable(struct perf_event *event) in _perf_event_enable() argument
3023 struct perf_event_context *ctx = event->ctx; in _perf_event_enable()
3026 if (event->state >= PERF_EVENT_STATE_INACTIVE || in _perf_event_enable()
3027 event->state < PERF_EVENT_STATE_ERROR) { in _perf_event_enable()
3034 * If the event is in error state, clear that first. in _perf_event_enable()
3036 * That way, if we see the event in error state below, we know that it in _perf_event_enable()
3040 if (event->state == PERF_EVENT_STATE_ERROR) { in _perf_event_enable()
3044 if (event->event_caps & PERF_EV_CAP_SIBLING && in _perf_event_enable()
3045 event->group_leader == event) in _perf_event_enable()
3048 event->state = PERF_EVENT_STATE_OFF; in _perf_event_enable()
3052 event_function_call(event, __perf_event_enable, NULL); in _perf_event_enable()
3058 void perf_event_enable(struct perf_event *event) in perf_event_enable() argument
3062 ctx = perf_event_ctx_lock(event); in perf_event_enable()
3063 _perf_event_enable(event); in perf_event_enable()
3064 perf_event_ctx_unlock(event, ctx); in perf_event_enable()
3069 struct perf_event *event; member
3076 struct perf_event *event = sd->event; in __perf_event_stop() local
3079 if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE) in __perf_event_stop()
3087 * so we need to check again lest we try to stop another CPU's event. in __perf_event_stop()
3089 if (READ_ONCE(event->oncpu) != smp_processor_id()) in __perf_event_stop()
3092 event->pmu->stop(event, PERF_EF_UPDATE); in __perf_event_stop()
3100 * Since this is happening on an event-local CPU, no trace is lost in __perf_event_stop()
3104 event->pmu->start(event, 0); in __perf_event_stop()
3109 static int perf_event_stop(struct perf_event *event, int restart) in perf_event_stop() argument
3112 .event = event, in perf_event_stop()
3118 if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE) in perf_event_stop()
3125 * We only want to restart ACTIVE events, so if the event goes in perf_event_stop()
3126 * inactive here (event->oncpu==-1), there's nothing more to do; in perf_event_stop()
3129 ret = cpu_function_call(READ_ONCE(event->oncpu), in perf_event_stop()
3142 * event::addr_filter_ranges array and bump the event::addr_filters_gen;
3143 * (p2) when an event is scheduled in (pmu::add), it calls
3147 * If (p1) happens while the event is active, we restart it to force (p2).
3158 void perf_event_addr_filters_sync(struct perf_event *event) in perf_event_addr_filters_sync() argument
3160 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in perf_event_addr_filters_sync()
3162 if (!has_addr_filter(event)) in perf_event_addr_filters_sync()
3166 if (event->addr_filters_gen != event->hw.addr_filters_gen) { in perf_event_addr_filters_sync()
3167 event->pmu->addr_filters_sync(event); in perf_event_addr_filters_sync()
3168 event->hw.addr_filters_gen = event->addr_filters_gen; in perf_event_addr_filters_sync()
3174 static int _perf_event_refresh(struct perf_event *event, int refresh) in _perf_event_refresh() argument
3179 if (event->attr.inherit || !is_sampling_event(event)) in _perf_event_refresh()
3182 atomic_add(refresh, &event->event_limit); in _perf_event_refresh()
3183 _perf_event_enable(event); in _perf_event_refresh()
3191 int perf_event_refresh(struct perf_event *event, int refresh) in perf_event_refresh() argument
3196 ctx = perf_event_ctx_lock(event); in perf_event_refresh()
3197 ret = _perf_event_refresh(event, refresh); in perf_event_refresh()
3198 perf_event_ctx_unlock(event, ctx); in perf_event_refresh()
3219 static int perf_event_modify_attr(struct perf_event *event, in perf_event_modify_attr() argument
3222 if (event->attr.type != attr->type) in perf_event_modify_attr()
3225 switch (event->attr.type) { in perf_event_modify_attr()
3227 return perf_event_modify_breakpoint(event, attr); in perf_event_modify_attr()
3238 struct perf_event *event, *tmp; in ctx_sched_out() local
3291 list_for_each_entry_safe(event, tmp, &ctx->pinned_active, active_list) in ctx_sched_out()
3292 group_sched_out(event, cpuctx, ctx); in ctx_sched_out()
3296 list_for_each_entry_safe(event, tmp, &ctx->flexible_active, active_list) in ctx_sched_out()
3297 group_sched_out(event, cpuctx, ctx); in ctx_sched_out()
3347 static void __perf_event_sync_stat(struct perf_event *event, in __perf_event_sync_stat() argument
3352 if (!event->attr.inherit_stat) in __perf_event_sync_stat()
3356 * Update the event value, we cannot use perf_event_read() in __perf_event_sync_stat()
3359 * we know the event must be on the current CPU, therefore we in __perf_event_sync_stat()
3362 if (event->state == PERF_EVENT_STATE_ACTIVE) in __perf_event_sync_stat()
3363 event->pmu->read(event); in __perf_event_sync_stat()
3365 perf_event_update_time(event); in __perf_event_sync_stat()
3368 * In order to keep per-task stats reliable we need to flip the event in __perf_event_sync_stat()
3372 value = local64_xchg(&event->count, value); in __perf_event_sync_stat()
3375 swap(event->total_time_enabled, next_event->total_time_enabled); in __perf_event_sync_stat()
3376 swap(event->total_time_running, next_event->total_time_running); in __perf_event_sync_stat()
3381 perf_event_update_userpage(event); in __perf_event_sync_stat()
3388 struct perf_event *event, *next_event; in perf_event_sync_stat() local
3395 event = list_first_entry(&ctx->event_list, in perf_event_sync_stat()
3401 while (&event->event_entry != &ctx->event_list && in perf_event_sync_stat()
3404 __perf_event_sync_stat(event, next_event); in perf_event_sync_stat()
3406 event = list_next_entry(event, event_entry); in perf_event_sync_stat()
3536 * This callback is relevant even to per-cpu events; for example multi event
3586 * We stop each event and update the event value in event->count.
3589 * sets the disabled bit in the control field of event _before_
3590 * accessing the event control register. If a NMI hits, then it will
3591 * not restart the event.
3610 * cgroup event are system-wide mode only in __perf_event_task_sched_out()
3646 static void __heap_add(struct min_heap *heap, struct perf_event *event) in __heap_add() argument
3650 if (event) { in __heap_add()
3651 itrs[heap->nr] = event; in __heap_add()
3664 /* Space for per CPU and/or any CPU event iterators. */ in visit_groups_merge()
3719 * Because the userpage is strictly per-event (there is no concept of context,
3725 static inline bool event_update_userpage(struct perf_event *event) in event_update_userpage() argument
3727 if (likely(!atomic_read(&event->mmap_count))) in event_update_userpage()
3730 perf_event_update_time(event); in event_update_userpage()
3731 perf_event_update_userpage(event); in event_update_userpage()
3738 struct perf_event *event; in group_update_userpage() local
3743 for_each_sibling_event(event, group_event) in group_update_userpage()
3744 event_update_userpage(event); in group_update_userpage()
3747 static int merge_sched_in(struct perf_event *event, void *data) in merge_sched_in() argument
3749 struct perf_event_context *ctx = event->ctx; in merge_sched_in()
3753 if (event->state <= PERF_EVENT_STATE_OFF) in merge_sched_in()
3756 if (!event_filter_match(event)) in merge_sched_in()
3759 if (group_can_go_on(event, cpuctx, *can_add_hw)) { in merge_sched_in()
3760 if (!group_sched_in(event, cpuctx, ctx)) in merge_sched_in()
3761 list_add_tail(&event->active_list, get_event_list(event)); in merge_sched_in()
3764 if (event->state == PERF_EVENT_STATE_INACTIVE) { in merge_sched_in()
3766 if (event->attr.pinned) { in merge_sched_in()
3767 perf_cgroup_event_disable(event, ctx); in merge_sched_in()
3768 perf_event_set_state(event, PERF_EVENT_STATE_ERROR); in merge_sched_in()
3772 group_update_userpage(event); in merge_sched_in()
3909 * We restore the event value and then enable it.
3912 * sets the enabled bit in the control field of event _before_
3913 * accessing the event control register. If a NMI hits, then it will
3914 * keep the event running.
3924 * to switch in PMU state; cgroup event are system-wide mode only. in __perf_event_task_sched_in()
3947 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) in perf_calculate_period() argument
3949 u64 frequency = event->attr.sample_freq; in perf_calculate_period()
4023 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable) in perf_adjust_period() argument
4025 struct hw_perf_event *hwc = &event->hw; in perf_adjust_period()
4029 period = perf_calculate_period(event, nsec, count); in perf_adjust_period()
4043 event->pmu->stop(event, PERF_EF_UPDATE); in perf_adjust_period()
4048 event->pmu->start(event, PERF_EF_RELOAD); in perf_adjust_period()
4060 struct perf_event *event; in perf_adjust_freq_unthr_context() local
4076 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { in perf_adjust_freq_unthr_context()
4077 if (event->state != PERF_EVENT_STATE_ACTIVE) in perf_adjust_freq_unthr_context()
4080 if (!event_filter_match(event)) in perf_adjust_freq_unthr_context()
4083 perf_pmu_disable(event->pmu); in perf_adjust_freq_unthr_context()
4085 hwc = &event->hw; in perf_adjust_freq_unthr_context()
4089 perf_log_throttle(event, 1); in perf_adjust_freq_unthr_context()
4090 event->pmu->start(event, 0); in perf_adjust_freq_unthr_context()
4093 if (!event->attr.freq || !event->attr.sample_freq) in perf_adjust_freq_unthr_context()
4097 * stop the event and update event->count in perf_adjust_freq_unthr_context()
4099 event->pmu->stop(event, PERF_EF_UPDATE); in perf_adjust_freq_unthr_context()
4101 now = local64_read(&event->count); in perf_adjust_freq_unthr_context()
4106 * restart the event in perf_adjust_freq_unthr_context()
4108 * we have stopped the event so tell that in perf_adjust_freq_unthr_context()
4113 perf_adjust_period(event, period, delta, false); in perf_adjust_freq_unthr_context()
4115 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); in perf_adjust_freq_unthr_context()
4117 perf_pmu_enable(event->pmu); in perf_adjust_freq_unthr_context()
4125 * Move @event to the tail of the @ctx's elegible events.
4127 static void rotate_ctx(struct perf_event_context *ctx, struct perf_event *event) in rotate_ctx() argument
4136 perf_event_groups_delete(&ctx->flexible_groups, event); in rotate_ctx()
4137 perf_event_groups_insert(&ctx->flexible_groups, event); in rotate_ctx()
4140 /* pick an event from the flexible_groups to rotate */
4144 struct perf_event *event; in ctx_event_to_rotate() local
4146 /* pick the first active flexible event */ in ctx_event_to_rotate()
4147 event = list_first_entry_or_null(&ctx->flexible_active, in ctx_event_to_rotate()
4150 /* if no active flexible event, pick the first event */ in ctx_event_to_rotate()
4151 if (!event) { in ctx_event_to_rotate()
4152 event = rb_entry_safe(rb_first(&ctx->flexible_groups.tree), in ctx_event_to_rotate()
4153 typeof(*event), group_node); in ctx_event_to_rotate()
4162 return event; in ctx_event_to_rotate()
4173 * events, thus the event count values are stable. in perf_rotate_context()
4229 static int event_enable_on_exec(struct perf_event *event, in event_enable_on_exec() argument
4232 if (!event->attr.enable_on_exec) in event_enable_on_exec()
4235 event->attr.enable_on_exec = 0; in event_enable_on_exec()
4236 if (event->state >= PERF_EVENT_STATE_INACTIVE) in event_enable_on_exec()
4239 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); in event_enable_on_exec()
4253 struct perf_event *event; in perf_event_enable_on_exec() local
4265 list_for_each_entry(event, &ctx->event_list, event_entry) { in perf_event_enable_on_exec()
4266 enabled |= event_enable_on_exec(event, ctx); in perf_event_enable_on_exec()
4267 event_type |= get_event_type(event); in perf_event_enable_on_exec()
4271 * Unclone and reschedule this context if we enabled any event. in perf_event_enable_on_exec()
4289 struct perf_event *event; member
4294 static int __perf_event_read_cpu(struct perf_event *event, int event_cpu) in __perf_event_read_cpu() argument
4298 if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) { in __perf_event_read_cpu()
4312 * Cross CPU call to read the hardware event
4317 struct perf_event *sub, *event = data->event; in __perf_event_read() local
4318 struct perf_event_context *ctx = event->ctx; in __perf_event_read()
4320 struct pmu *pmu = event->pmu; in __perf_event_read()
4326 * event->count would have been updated to a recent sample in __perf_event_read()
4327 * when the event was scheduled out. in __perf_event_read()
4335 update_cgrp_time_from_event(event); in __perf_event_read()
4338 perf_event_update_time(event); in __perf_event_read()
4340 perf_event_update_sibling_time(event); in __perf_event_read()
4342 if (event->state != PERF_EVENT_STATE_ACTIVE) in __perf_event_read()
4346 pmu->read(event); in __perf_event_read()
4353 pmu->read(event); in __perf_event_read()
4355 for_each_sibling_event(sub, event) { in __perf_event_read()
4358 * Use sibling's PMU rather than @event's since in __perf_event_read()
4371 static inline u64 perf_event_count(struct perf_event *event) in perf_event_count() argument
4373 return local64_read(&event->count) + atomic64_read(&event->child_count); in perf_event_count()
4376 static void calc_timer_values(struct perf_event *event, in calc_timer_values() argument
4384 ctx_time = perf_event_time_now(event, *now); in calc_timer_values()
4385 __perf_update_times(event, ctx_time, enabled, running); in calc_timer_values()
4389 * NMI-safe method to read a local event, that is an event that
4396 int perf_event_read_local(struct perf_event *event, u64 *value, in perf_event_read_local() argument
4409 * It must not be an event with inherit set, we cannot read in perf_event_read_local()
4412 if (event->attr.inherit) { in perf_event_read_local()
4417 /* If this is a per-task event, it must be for current */ in perf_event_read_local()
4418 if ((event->attach_state & PERF_ATTACH_TASK) && in perf_event_read_local()
4419 event->hw.target != current) { in perf_event_read_local()
4424 /* If this is a per-CPU event, it must be for this CPU */ in perf_event_read_local()
4425 if (!(event->attach_state & PERF_ATTACH_TASK) && in perf_event_read_local()
4426 event->cpu != smp_processor_id()) { in perf_event_read_local()
4431 /* If this is a pinned event it must be running on this CPU */ in perf_event_read_local()
4432 if (event->attr.pinned && event->oncpu != smp_processor_id()) { in perf_event_read_local()
4438 * If the event is currently on this CPU, its either a per-task event, in perf_event_read_local()
4442 if (event->oncpu == smp_processor_id()) in perf_event_read_local()
4443 event->pmu->read(event); in perf_event_read_local()
4445 *value = local64_read(&event->count); in perf_event_read_local()
4449 calc_timer_values(event, &__now, &__enabled, &__running); in perf_event_read_local()
4461 static int perf_event_read(struct perf_event *event, bool group) in perf_event_read() argument
4463 enum perf_event_state state = READ_ONCE(event->state); in perf_event_read()
4467 * If event is enabled and currently active on a CPU, update the in perf_event_read()
4468 * value in the event structure: in perf_event_read()
4482 event_cpu = READ_ONCE(event->oncpu); in perf_event_read()
4487 .event = event, in perf_event_read()
4493 event_cpu = __perf_event_read_cpu(event, event_cpu); in perf_event_read()
4499 * If event_cpu isn't a valid CPU it means the event got in perf_event_read()
4500 * scheduled out and that will have updated the event count. in perf_event_read()
4502 * Therefore, either way, we'll have an up-to-date event count in perf_event_read()
4510 struct perf_event_context *ctx = event->ctx; in perf_event_read()
4514 state = event->state; in perf_event_read()
4526 update_cgrp_time_from_event(event); in perf_event_read()
4529 perf_event_update_time(event); in perf_event_read()
4531 perf_event_update_sibling_time(event); in perf_event_read()
4596 struct perf_event *event) in find_get_context() argument
4603 int cpu = event->cpu; in find_get_context()
4606 /* Must be root to operate on a CPU event: */ in find_get_context()
4607 err = perf_allow_cpu(&event->attr); in find_get_context()
4626 if (event->attach_state & PERF_ATTACH_TASK_DATA) { in find_get_context()
4693 static void perf_event_free_filter(struct perf_event *event);
4694 static void perf_event_free_bpf_prog(struct perf_event *event);
4698 struct perf_event *event; in free_event_rcu() local
4700 event = container_of(head, struct perf_event, rcu_head); in free_event_rcu()
4701 if (event->ns) in free_event_rcu()
4702 put_pid_ns(event->ns); in free_event_rcu()
4703 perf_event_free_filter(event); in free_event_rcu()
4704 kfree(event); in free_event_rcu()
4707 static void ring_buffer_attach(struct perf_event *event,
4710 static void detach_sb_event(struct perf_event *event) in detach_sb_event() argument
4712 struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu); in detach_sb_event()
4715 list_del_rcu(&event->sb_list); in detach_sb_event()
4719 static bool is_sb_event(struct perf_event *event) in is_sb_event() argument
4721 struct perf_event_attr *attr = &event->attr; in is_sb_event()
4723 if (event->parent) in is_sb_event()
4726 if (event->attach_state & PERF_ATTACH_TASK) in is_sb_event()
4738 static void unaccount_pmu_sb_event(struct perf_event *event) in unaccount_pmu_sb_event() argument
4740 if (is_sb_event(event)) in unaccount_pmu_sb_event()
4741 detach_sb_event(event); in unaccount_pmu_sb_event()
4744 static void unaccount_event_cpu(struct perf_event *event, int cpu) in unaccount_event_cpu() argument
4746 if (event->parent) in unaccount_event_cpu()
4749 if (is_cgroup_event(event)) in unaccount_event_cpu()
4775 static void unaccount_event(struct perf_event *event) in unaccount_event() argument
4779 if (event->parent) in unaccount_event()
4782 if (event->attach_state & (PERF_ATTACH_TASK | PERF_ATTACH_SCHED_CB)) in unaccount_event()
4784 if (event->attr.mmap || event->attr.mmap_data) in unaccount_event()
4786 if (event->attr.comm) in unaccount_event()
4788 if (event->attr.namespaces) in unaccount_event()
4790 if (event->attr.cgroup) in unaccount_event()
4792 if (event->attr.task) in unaccount_event()
4794 if (event->attr.freq) in unaccount_event()
4796 if (event->attr.context_switch) { in unaccount_event()
4800 if (is_cgroup_event(event)) in unaccount_event()
4802 if (has_branch_stack(event)) in unaccount_event()
4804 if (event->attr.ksymbol) in unaccount_event()
4806 if (event->attr.bpf_event) in unaccount_event()
4808 if (event->attr.text_poke) in unaccount_event()
4816 unaccount_event_cpu(event, event->cpu); in unaccount_event()
4818 unaccount_pmu_sb_event(event); in unaccount_event()
4831 * (PERF_PMU_CAP_EXCLUSIVE). Such pmus can only have one event scheduled
4841 static int exclusive_event_init(struct perf_event *event) in exclusive_event_init() argument
4843 struct pmu *pmu = event->pmu; in exclusive_event_init()
4856 * Since this is called in perf_event_alloc() path, event::ctx in exclusive_event_init()
4858 * to mean "per-task event", because unlike other attach states it in exclusive_event_init()
4861 if (event->attach_state & PERF_ATTACH_TASK) { in exclusive_event_init()
4872 static void exclusive_event_destroy(struct perf_event *event) in exclusive_event_destroy() argument
4874 struct pmu *pmu = event->pmu; in exclusive_event_destroy()
4880 if (event->attach_state & PERF_ATTACH_TASK) in exclusive_event_destroy()
4896 static bool exclusive_event_installable(struct perf_event *event, in exclusive_event_installable() argument
4900 struct pmu *pmu = event->pmu; in exclusive_event_installable()
4908 if (exclusive_event_match(iter_event, event)) in exclusive_event_installable()
4915 static void perf_addr_filters_splice(struct perf_event *event,
4918 static void _free_event(struct perf_event *event) in _free_event() argument
4920 irq_work_sync(&event->pending); in _free_event()
4922 unaccount_event(event); in _free_event()
4924 security_perf_event_free(event); in _free_event()
4926 if (event->rb) { in _free_event()
4928 * Can happen when we close an event with re-directed output. in _free_event()
4933 mutex_lock(&event->mmap_mutex); in _free_event()
4934 ring_buffer_attach(event, NULL); in _free_event()
4935 mutex_unlock(&event->mmap_mutex); in _free_event()
4938 if (is_cgroup_event(event)) in _free_event()
4939 perf_detach_cgroup(event); in _free_event()
4941 if (!event->parent) { in _free_event()
4942 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) in _free_event()
4946 perf_event_free_bpf_prog(event); in _free_event()
4947 perf_addr_filters_splice(event, NULL); in _free_event()
4948 kfree(event->addr_filter_ranges); in _free_event()
4950 if (event->destroy) in _free_event()
4951 event->destroy(event); in _free_event()
4957 if (event->hw.target) in _free_event()
4958 put_task_struct(event->hw.target); in _free_event()
4964 if (event->ctx) in _free_event()
4965 put_ctx(event->ctx); in _free_event()
4967 exclusive_event_destroy(event); in _free_event()
4968 module_put(event->pmu->module); in _free_event()
4970 call_rcu(&event->rcu_head, free_event_rcu); in _free_event()
4975 * where the event isn't exposed yet and inherited events.
4977 static void free_event(struct perf_event *event) in free_event() argument
4979 if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1, in free_event()
4980 "unexpected event refcount: %ld; ptr=%p\n", in free_event()
4981 atomic_long_read(&event->refcount), event)) { in free_event()
4986 _free_event(event); in free_event()
4990 * Remove user event from the owner task.
4992 static void perf_remove_from_owner(struct perf_event *event) in perf_remove_from_owner() argument
5000 * indeed free this event, otherwise we need to serialize on in perf_remove_from_owner()
5003 owner = READ_ONCE(event->owner); in perf_remove_from_owner()
5026 * We have to re-check the event->owner field, if it is cleared in perf_remove_from_owner()
5029 * event. in perf_remove_from_owner()
5031 if (event->owner) { in perf_remove_from_owner()
5032 list_del_init(&event->owner_entry); in perf_remove_from_owner()
5033 smp_store_release(&event->owner, NULL); in perf_remove_from_owner()
5040 static void put_event(struct perf_event *event) in put_event() argument
5042 if (!atomic_long_dec_and_test(&event->refcount)) in put_event()
5045 _free_event(event); in put_event()
5049 * Kill an event dead; while event:refcount will preserve the event
5053 int perf_event_release_kernel(struct perf_event *event) in perf_event_release_kernel() argument
5055 struct perf_event_context *ctx = event->ctx; in perf_event_release_kernel()
5064 WARN_ON_ONCE(event->attach_state & in perf_event_release_kernel()
5069 if (!is_kernel_event(event)) in perf_event_release_kernel()
5070 perf_remove_from_owner(event); in perf_event_release_kernel()
5072 ctx = perf_event_ctx_lock(event); in perf_event_release_kernel()
5074 perf_remove_from_context(event, DETACH_GROUP); in perf_event_release_kernel()
5078 * Mark this event as STATE_DEAD, there is no external reference to it in perf_event_release_kernel()
5081 * Anybody acquiring event->child_mutex after the below loop _must_ in perf_event_release_kernel()
5088 event->state = PERF_EVENT_STATE_DEAD; in perf_event_release_kernel()
5091 perf_event_ctx_unlock(event, ctx); in perf_event_release_kernel()
5094 mutex_lock(&event->child_mutex); in perf_event_release_kernel()
5095 list_for_each_entry(child, &event->child_list, child_list) { in perf_event_release_kernel()
5106 * Since the event cannot get freed while we hold the in perf_event_release_kernel()
5117 mutex_unlock(&event->child_mutex); in perf_event_release_kernel()
5119 mutex_lock(&event->child_mutex); in perf_event_release_kernel()
5126 tmp = list_first_entry_or_null(&event->child_list, in perf_event_release_kernel()
5135 put_event(event); in perf_event_release_kernel()
5138 mutex_unlock(&event->child_mutex); in perf_event_release_kernel()
5143 mutex_unlock(&event->child_mutex); in perf_event_release_kernel()
5152 * Wake any perf_event_free_task() waiting for this event to be in perf_event_release_kernel()
5160 put_event(event); /* Must be the 'last' reference */ in perf_event_release_kernel()
5174 static u64 __perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) in __perf_event_read_value() argument
5182 mutex_lock(&event->child_mutex); in __perf_event_read_value()
5184 (void)perf_event_read(event, false); in __perf_event_read_value()
5185 total += perf_event_count(event); in __perf_event_read_value()
5187 *enabled += event->total_time_enabled + in __perf_event_read_value()
5188 atomic64_read(&event->child_total_time_enabled); in __perf_event_read_value()
5189 *running += event->total_time_running + in __perf_event_read_value()
5190 atomic64_read(&event->child_total_time_running); in __perf_event_read_value()
5192 list_for_each_entry(child, &event->child_list, child_list) { in __perf_event_read_value()
5198 mutex_unlock(&event->child_mutex); in __perf_event_read_value()
5203 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) in perf_event_read_value() argument
5208 ctx = perf_event_ctx_lock(event); in perf_event_read_value()
5209 count = __perf_event_read_value(event, enabled, running); in perf_event_read_value()
5210 perf_event_ctx_unlock(event, ctx); in perf_event_read_value()
5263 static int perf_read_group(struct perf_event *event, in perf_read_group() argument
5266 struct perf_event *leader = event->group_leader, *child; in perf_read_group()
5273 values = kzalloc(event->read_size, GFP_KERNEL); in perf_read_group()
5297 ret = event->read_size; in perf_read_group()
5298 if (copy_to_user(buf, values, event->read_size)) in perf_read_group()
5309 static int perf_read_one(struct perf_event *event, in perf_read_one() argument
5316 values[n++] = __perf_event_read_value(event, &enabled, &running); in perf_read_one()
5322 values[n++] = primary_event_id(event); in perf_read_one()
5330 static bool is_event_hup(struct perf_event *event) in is_event_hup() argument
5334 if (event->state > PERF_EVENT_STATE_EXIT) in is_event_hup()
5337 mutex_lock(&event->child_mutex); in is_event_hup()
5338 no_children = list_empty(&event->child_list); in is_event_hup()
5339 mutex_unlock(&event->child_mutex); in is_event_hup()
5344 * Read the performance event - simple non blocking version for now
5347 __perf_read(struct perf_event *event, char __user *buf, size_t count) in __perf_read() argument
5349 u64 read_format = event->attr.read_format; in __perf_read()
5353 * Return end-of-file for a read on an event that is in in __perf_read()
5357 if (event->state == PERF_EVENT_STATE_ERROR) in __perf_read()
5360 if (count < event->read_size) in __perf_read()
5363 WARN_ON_ONCE(event->ctx->parent_ctx); in __perf_read()
5365 ret = perf_read_group(event, read_format, buf); in __perf_read()
5367 ret = perf_read_one(event, read_format, buf); in __perf_read()
5375 struct perf_event *event = file->private_data; in perf_read() local
5379 ret = security_perf_event_read(event); in perf_read()
5383 ctx = perf_event_ctx_lock(event); in perf_read()
5384 ret = __perf_read(event, buf, count); in perf_read()
5385 perf_event_ctx_unlock(event, ctx); in perf_read()
5392 struct perf_event *event = file->private_data; in perf_poll() local
5396 poll_wait(file, &event->waitq, wait); in perf_poll()
5398 if (is_event_hup(event)) in perf_poll()
5402 * Pin the event->rb by taking event->mmap_mutex; otherwise in perf_poll()
5405 mutex_lock(&event->mmap_mutex); in perf_poll()
5406 rb = event->rb; in perf_poll()
5409 mutex_unlock(&event->mmap_mutex); in perf_poll()
5413 static void _perf_event_reset(struct perf_event *event) in _perf_event_reset() argument
5415 (void)perf_event_read(event, false); in _perf_event_reset()
5416 local64_set(&event->count, 0); in _perf_event_reset()
5417 perf_event_update_userpage(event); in _perf_event_reset()
5420 /* Assume it's not an event with inherit set. */
5421 u64 perf_event_pause(struct perf_event *event, bool reset) in perf_event_pause() argument
5426 ctx = perf_event_ctx_lock(event); in perf_event_pause()
5427 WARN_ON_ONCE(event->attr.inherit); in perf_event_pause()
5428 _perf_event_disable(event); in perf_event_pause()
5429 count = local64_read(&event->count); in perf_event_pause()
5431 local64_set(&event->count, 0); in perf_event_pause()
5432 perf_event_ctx_unlock(event, ctx); in perf_event_pause()
5439 * Holding the top-level event's child_mutex means that any
5440 * descendant process that has inherited this event will block
5444 static void perf_event_for_each_child(struct perf_event *event, in perf_event_for_each_child() argument
5449 WARN_ON_ONCE(event->ctx->parent_ctx); in perf_event_for_each_child()
5451 mutex_lock(&event->child_mutex); in perf_event_for_each_child()
5452 func(event); in perf_event_for_each_child()
5453 list_for_each_entry(child, &event->child_list, child_list) in perf_event_for_each_child()
5455 mutex_unlock(&event->child_mutex); in perf_event_for_each_child()
5458 static void perf_event_for_each(struct perf_event *event, in perf_event_for_each() argument
5461 struct perf_event_context *ctx = event->ctx; in perf_event_for_each()
5466 event = event->group_leader; in perf_event_for_each()
5468 perf_event_for_each_child(event, func); in perf_event_for_each()
5469 for_each_sibling_event(sibling, event) in perf_event_for_each()
5473 static void __perf_event_period(struct perf_event *event, in __perf_event_period() argument
5481 if (event->attr.freq) { in __perf_event_period()
5482 event->attr.sample_freq = value; in __perf_event_period()
5484 event->attr.sample_period = value; in __perf_event_period()
5485 event->hw.sample_period = value; in __perf_event_period()
5488 active = (event->state == PERF_EVENT_STATE_ACTIVE); in __perf_event_period()
5493 * trying to unthrottle while we already re-started the event. in __perf_event_period()
5495 if (event->hw.interrupts == MAX_INTERRUPTS) { in __perf_event_period()
5496 event->hw.interrupts = 0; in __perf_event_period()
5497 perf_log_throttle(event, 1); in __perf_event_period()
5499 event->pmu->stop(event, PERF_EF_UPDATE); in __perf_event_period()
5502 local64_set(&event->hw.period_left, 0); in __perf_event_period()
5505 event->pmu->start(event, PERF_EF_RELOAD); in __perf_event_period()
5510 static int perf_event_check_period(struct perf_event *event, u64 value) in perf_event_check_period() argument
5512 return event->pmu->check_period(event, value); in perf_event_check_period()
5515 static int _perf_event_period(struct perf_event *event, u64 value) in _perf_event_period() argument
5517 if (!is_sampling_event(event)) in _perf_event_period()
5523 if (event->attr.freq && value > sysctl_perf_event_sample_rate) in _perf_event_period()
5526 if (perf_event_check_period(event, value)) in _perf_event_period()
5529 if (!event->attr.freq && (value & (1ULL << 63))) in _perf_event_period()
5532 event_function_call(event, __perf_event_period, &value); in _perf_event_period()
5537 int perf_event_period(struct perf_event *event, u64 value) in perf_event_period() argument
5542 ctx = perf_event_ctx_lock(event); in perf_event_period()
5543 ret = _perf_event_period(event, value); in perf_event_period()
5544 perf_event_ctx_unlock(event, ctx); in perf_event_period()
5566 static int perf_event_set_output(struct perf_event *event,
5568 static int perf_event_set_filter(struct perf_event *event, void __user *arg);
5569 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd);
5573 static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg) in _perf_ioctl() argument
5590 return _perf_event_refresh(event, arg); in _perf_ioctl()
5599 return _perf_event_period(event, value); in _perf_ioctl()
5603 u64 id = primary_event_id(event); in _perf_ioctl()
5620 ret = perf_event_set_output(event, output_event); in _perf_ioctl()
5623 ret = perf_event_set_output(event, NULL); in _perf_ioctl()
5629 return perf_event_set_filter(event, (void __user *)arg); in _perf_ioctl()
5632 return perf_event_set_bpf_prog(event, arg); in _perf_ioctl()
5638 rb = rcu_dereference(event->rb); in _perf_ioctl()
5649 return perf_event_query_prog_array(event, (void __user *)arg); in _perf_ioctl()
5659 return perf_event_modify_attr(event, &new_attr); in _perf_ioctl()
5666 perf_event_for_each(event, func); in _perf_ioctl()
5668 perf_event_for_each_child(event, func); in _perf_ioctl()
5675 struct perf_event *event = file->private_data; in perf_ioctl() local
5680 ret = security_perf_event_write(event); in perf_ioctl()
5684 ctx = perf_event_ctx_lock(event); in perf_ioctl()
5685 ret = _perf_ioctl(event, cmd, arg); in perf_ioctl()
5686 perf_event_ctx_unlock(event, ctx); in perf_ioctl()
5716 struct perf_event *event; in perf_event_task_enable() local
5719 list_for_each_entry(event, &current->perf_event_list, owner_entry) { in perf_event_task_enable()
5720 ctx = perf_event_ctx_lock(event); in perf_event_task_enable()
5721 perf_event_for_each_child(event, _perf_event_enable); in perf_event_task_enable()
5722 perf_event_ctx_unlock(event, ctx); in perf_event_task_enable()
5732 struct perf_event *event; in perf_event_task_disable() local
5735 list_for_each_entry(event, &current->perf_event_list, owner_entry) { in perf_event_task_disable()
5736 ctx = perf_event_ctx_lock(event); in perf_event_task_disable()
5737 perf_event_for_each_child(event, _perf_event_disable); in perf_event_task_disable()
5738 perf_event_ctx_unlock(event, ctx); in perf_event_task_disable()
5745 static int perf_event_index(struct perf_event *event) in perf_event_index() argument
5747 if (event->hw.state & PERF_HES_STOPPED) in perf_event_index()
5750 if (event->state != PERF_EVENT_STATE_ACTIVE) in perf_event_index()
5753 return event->pmu->event_idx(event); in perf_event_index()
5756 static void perf_event_init_userpage(struct perf_event *event) in perf_event_init_userpage() argument
5762 rb = rcu_dereference(event->rb); in perf_event_init_userpage()
5779 struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now) in arch_perf_update_userpage() argument
5788 void perf_event_update_userpage(struct perf_event *event) in perf_event_update_userpage() argument
5795 rb = rcu_dereference(event->rb); in perf_event_update_userpage()
5801 * based on snapshot values taken when the event in perf_event_update_userpage()
5808 calc_timer_values(event, &now, &enabled, &running); in perf_event_update_userpage()
5818 userpg->index = perf_event_index(event); in perf_event_update_userpage()
5819 userpg->offset = perf_event_count(event); in perf_event_update_userpage()
5821 userpg->offset -= local64_read(&event->hw.prev_count); in perf_event_update_userpage()
5824 atomic64_read(&event->child_total_time_enabled); in perf_event_update_userpage()
5827 atomic64_read(&event->child_total_time_running); in perf_event_update_userpage()
5829 arch_perf_update_userpage(event, userpg, now); in perf_event_update_userpage()
5841 struct perf_event *event = vmf->vma->vm_file->private_data; in perf_mmap_fault() local
5852 rb = rcu_dereference(event->rb); in perf_mmap_fault()
5874 static void ring_buffer_attach(struct perf_event *event, in ring_buffer_attach() argument
5880 if (event->rb) { in ring_buffer_attach()
5883 * event->rb_entry and wait/clear when adding event->rb_entry. in ring_buffer_attach()
5885 WARN_ON_ONCE(event->rcu_pending); in ring_buffer_attach()
5887 old_rb = event->rb; in ring_buffer_attach()
5889 list_del_rcu(&event->rb_entry); in ring_buffer_attach()
5892 event->rcu_batches = get_state_synchronize_rcu(); in ring_buffer_attach()
5893 event->rcu_pending = 1; in ring_buffer_attach()
5897 if (event->rcu_pending) { in ring_buffer_attach()
5898 cond_synchronize_rcu(event->rcu_batches); in ring_buffer_attach()
5899 event->rcu_pending = 0; in ring_buffer_attach()
5903 list_add_rcu(&event->rb_entry, &rb->event_list); in ring_buffer_attach()
5908 * Avoid racing with perf_mmap_close(AUX): stop the event in ring_buffer_attach()
5909 * before swizzling the event::rb pointer; if it's getting in ring_buffer_attach()
5917 if (has_aux(event)) in ring_buffer_attach()
5918 perf_event_stop(event, 0); in ring_buffer_attach()
5920 rcu_assign_pointer(event->rb, rb); in ring_buffer_attach()
5929 wake_up_all(&event->waitq); in ring_buffer_attach()
5933 static void ring_buffer_wakeup(struct perf_event *event) in ring_buffer_wakeup() argument
5938 rb = rcu_dereference(event->rb); in ring_buffer_wakeup()
5940 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) in ring_buffer_wakeup()
5941 wake_up_all(&event->waitq); in ring_buffer_wakeup()
5946 struct perf_buffer *ring_buffer_get(struct perf_event *event) in ring_buffer_get() argument
5951 rb = rcu_dereference(event->rb); in ring_buffer_get()
5973 struct perf_event *event = vma->vm_file->private_data; in perf_mmap_open() local
5975 atomic_inc(&event->mmap_count); in perf_mmap_open()
5976 atomic_inc(&event->rb->mmap_count); in perf_mmap_open()
5979 atomic_inc(&event->rb->aux_mmap_count); in perf_mmap_open()
5981 if (event->pmu->event_mapped) in perf_mmap_open()
5982 event->pmu->event_mapped(event, vma->vm_mm); in perf_mmap_open()
5985 static void perf_pmu_output_stop(struct perf_event *event);
5989 * event, or through other events by use of perf_event_set_output().
5997 struct perf_event *event = vma->vm_file->private_data; in perf_mmap_close() local
5998 struct perf_buffer *rb = ring_buffer_get(event); in perf_mmap_close()
6004 if (event->pmu->event_unmapped) in perf_mmap_close()
6005 event->pmu->event_unmapped(event, vma->vm_mm); in perf_mmap_close()
6009 * event->mmap_count, so it is ok to use event->mmap_mutex to in perf_mmap_close()
6013 atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) { in perf_mmap_close()
6020 perf_pmu_output_stop(event); in perf_mmap_close()
6030 mutex_unlock(&event->mmap_mutex); in perf_mmap_close()
6036 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) in perf_mmap_close()
6039 ring_buffer_attach(event, NULL); in perf_mmap_close()
6040 mutex_unlock(&event->mmap_mutex); in perf_mmap_close()
6053 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) { in perf_mmap_close()
6054 if (!atomic_long_inc_not_zero(&event->refcount)) { in perf_mmap_close()
6056 * This event is en-route to free_event() which will in perf_mmap_close()
6063 mutex_lock(&event->mmap_mutex); in perf_mmap_close()
6069 * If we find a different rb; ignore this event, a next in perf_mmap_close()
6074 if (event->rb == rb) in perf_mmap_close()
6075 ring_buffer_attach(event, NULL); in perf_mmap_close()
6077 mutex_unlock(&event->mmap_mutex); in perf_mmap_close()
6078 put_event(event); in perf_mmap_close()
6115 struct perf_event *event = file->private_data; in perf_mmap() local
6130 if (event->cpu == -1 && event->attr.inherit) in perf_mmap()
6136 ret = security_perf_event_read(event); in perf_mmap()
6152 if (!event->rb) in perf_mmap()
6157 mutex_lock(&event->mmap_mutex); in perf_mmap()
6160 rb = event->rb; in perf_mmap()
6212 WARN_ON_ONCE(event->ctx->parent_ctx); in perf_mmap()
6214 mutex_lock(&event->mmap_mutex); in perf_mmap()
6215 if (event->rb) { in perf_mmap()
6216 if (event->rb->nr_pages != nr_pages) { in perf_mmap()
6221 if (!atomic_inc_not_zero(&event->rb->mmap_count)) { in perf_mmap()
6227 mutex_unlock(&event->mmap_mutex); in perf_mmap()
6273 WARN_ON(!rb && event->rb); in perf_mmap()
6280 event->attr.watermark ? event->attr.wakeup_watermark : 0, in perf_mmap()
6281 event->cpu, flags); in perf_mmap()
6292 ring_buffer_attach(event, rb); in perf_mmap()
6294 perf_event_update_time(event); in perf_mmap()
6295 perf_event_init_userpage(event); in perf_mmap()
6296 perf_event_update_userpage(event); in perf_mmap()
6298 ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages, in perf_mmap()
6299 event->attr.aux_watermark, flags); in perf_mmap()
6309 atomic_inc(&event->mmap_count); in perf_mmap()
6314 mutex_unlock(&event->mmap_mutex); in perf_mmap()
6323 if (event->pmu->event_mapped) in perf_mmap()
6324 event->pmu->event_mapped(event, vma->vm_mm); in perf_mmap()
6332 struct perf_event *event = filp->private_data; in perf_fasync() local
6336 retval = fasync_helper(fd, filp, on, &event->fasync); in perf_fasync()
6357 * Perf event wakeup
6363 static inline struct fasync_struct **perf_event_fasync(struct perf_event *event) in perf_event_fasync() argument
6366 if (event->parent) in perf_event_fasync()
6367 event = event->parent; in perf_event_fasync()
6368 return &event->fasync; in perf_event_fasync()
6371 void perf_event_wakeup(struct perf_event *event) in perf_event_wakeup() argument
6373 ring_buffer_wakeup(event); in perf_event_wakeup()
6375 if (event->pending_kill) { in perf_event_wakeup()
6376 kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill); in perf_event_wakeup()
6377 event->pending_kill = 0; in perf_event_wakeup()
6381 static void perf_pending_event_disable(struct perf_event *event) in perf_pending_event_disable() argument
6383 int cpu = READ_ONCE(event->pending_disable); in perf_pending_event_disable()
6389 WRITE_ONCE(event->pending_disable, -1); in perf_pending_event_disable()
6390 perf_event_disable_local(event); in perf_pending_event_disable()
6412 * But the event runs on CPU-B and wants disabling there. in perf_pending_event_disable()
6414 irq_work_queue_on(&event->pending, cpu); in perf_pending_event_disable()
6419 struct perf_event *event = container_of(entry, struct perf_event, pending); in perf_pending_event() local
6428 perf_pending_event_disable(event); in perf_pending_event()
6430 if (event->pending_wakeup) { in perf_pending_event()
6431 event->pending_wakeup = 0; in perf_pending_event()
6432 perf_event_wakeup(event); in perf_pending_event()
6603 static unsigned long perf_prepare_sample_aux(struct perf_event *event, in perf_prepare_sample_aux() argument
6607 struct perf_event *sampler = event->aux_event; in perf_prepare_sample_aux()
6642 struct perf_event *event, in perf_pmu_snapshot_aux() argument
6652 * the IRQ ones, that is, for example, re-starting an event that's just in perf_pmu_snapshot_aux()
6654 * doesn't change the event state. in perf_pmu_snapshot_aux()
6666 ret = event->pmu->snapshot_aux(event, handle, size); in perf_pmu_snapshot_aux()
6675 static void perf_aux_sample_output(struct perf_event *event, in perf_aux_sample_output() argument
6679 struct perf_event *sampler = event->aux_event; in perf_aux_sample_output()
6721 struct perf_event *event) in __perf_event_header__init_id() argument
6723 u64 sample_type = event->attr.sample_type; in __perf_event_header__init_id()
6726 header->size += event->id_header_size; in __perf_event_header__init_id()
6730 data->tid_entry.pid = perf_event_pid(event, current); in __perf_event_header__init_id()
6731 data->tid_entry.tid = perf_event_tid(event, current); in __perf_event_header__init_id()
6735 data->time = perf_event_clock(event); in __perf_event_header__init_id()
6738 data->id = primary_event_id(event); in __perf_event_header__init_id()
6741 data->stream_id = event->id; in __perf_event_header__init_id()
6751 struct perf_event *event) in perf_event_header__init_id() argument
6753 if (event->attr.sample_id_all) in perf_event_header__init_id()
6754 __perf_event_header__init_id(header, data, event); in perf_event_header__init_id()
6781 void perf_event__output_id_sample(struct perf_event *event, in perf_event__output_id_sample() argument
6785 if (event->attr.sample_id_all) in perf_event__output_id_sample()
6790 struct perf_event *event, in perf_output_read_one() argument
6793 u64 read_format = event->attr.read_format; in perf_output_read_one()
6797 values[n++] = perf_event_count(event); in perf_output_read_one()
6800 atomic64_read(&event->child_total_time_enabled); in perf_output_read_one()
6804 atomic64_read(&event->child_total_time_running); in perf_output_read_one()
6807 values[n++] = primary_event_id(event); in perf_output_read_one()
6813 struct perf_event *event, in perf_output_read_group() argument
6816 struct perf_event *leader = event->group_leader, *sub; in perf_output_read_group()
6817 u64 read_format = event->attr.read_format; in perf_output_read_group()
6829 if ((leader != event) && in perf_output_read_group()
6842 if ((sub != event) && in perf_output_read_group()
6865 struct perf_event *event) in perf_output_read() argument
6868 u64 read_format = event->attr.read_format; in perf_output_read()
6872 * based on snapshot values taken when the event in perf_output_read()
6880 calc_timer_values(event, &now, &enabled, &running); in perf_output_read()
6882 if (event->attr.read_format & PERF_FORMAT_GROUP) in perf_output_read()
6883 perf_output_read_group(handle, event, enabled, running); in perf_output_read()
6885 perf_output_read_one(handle, event, enabled, running); in perf_output_read()
6888 static inline bool perf_sample_save_hw_index(struct perf_event *event) in perf_sample_save_hw_index() argument
6890 return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_HW_INDEX; in perf_sample_save_hw_index()
6896 struct perf_event *event) in perf_output_sample() argument
6930 perf_output_read(handle, event); in perf_output_sample()
6981 if (perf_sample_save_hw_index(event)) in perf_output_sample()
7003 u64 mask = event->attr.sample_regs_user; in perf_output_sample()
7034 u64 mask = event->attr.sample_regs_intr; in perf_output_sample()
7052 perf_aux_sample_output(event, handle, data); in perf_output_sample()
7055 if (!event->attr.watermark) { in perf_output_sample()
7056 int wakeup_events = event->attr.wakeup_events; in perf_output_sample()
7108 perf_callchain(struct perf_event *event, struct pt_regs *regs) in perf_callchain() argument
7110 bool kernel = !event->attr.exclude_callchain_kernel; in perf_callchain()
7111 bool user = !event->attr.exclude_callchain_user; in perf_callchain()
7113 bool crosstask = event->ctx->task && event->ctx->task != current; in perf_callchain()
7114 const u32 max_stack = event->attr.sample_max_stack; in perf_callchain()
7127 struct perf_event *event, in perf_prepare_sample() argument
7130 u64 sample_type = event->attr.sample_type; in perf_prepare_sample()
7133 header->size = sizeof(*header) + event->header_size; in perf_prepare_sample()
7138 __perf_event_header__init_id(header, data, event); in perf_prepare_sample()
7147 data->callchain = perf_callchain(event, regs); in perf_prepare_sample()
7182 if (perf_sample_save_hw_index(event)) in perf_prepare_sample()
7199 u64 mask = event->attr.sample_regs_user; in perf_prepare_sample()
7213 u16 stack_size = event->attr.sample_stack_user; in perf_prepare_sample()
7238 u64 mask = event->attr.sample_regs_intr; in perf_prepare_sample()
7271 event->attr.aux_sample_size); in perf_prepare_sample()
7273 size = perf_prepare_sample_aux(event, data, size); in perf_prepare_sample()
7290 __perf_event_output(struct perf_event *event, in __perf_event_output() argument
7305 perf_prepare_sample(&header, data, event, regs); in __perf_event_output()
7307 err = output_begin(&handle, data, event, header.size); in __perf_event_output()
7311 perf_output_sample(&handle, &header, data, event); in __perf_event_output()
7321 perf_event_output_forward(struct perf_event *event, in perf_event_output_forward() argument
7325 __perf_event_output(event, data, regs, perf_output_begin_forward); in perf_event_output_forward()
7329 perf_event_output_backward(struct perf_event *event, in perf_event_output_backward() argument
7333 __perf_event_output(event, data, regs, perf_output_begin_backward); in perf_event_output_backward()
7337 perf_event_output(struct perf_event *event, in perf_event_output() argument
7341 return __perf_event_output(event, data, regs, perf_output_begin); in perf_event_output()
7356 perf_event_read_event(struct perf_event *event, in perf_event_read_event() argument
7365 .size = sizeof(read_event) + event->read_size, in perf_event_read_event()
7367 .pid = perf_event_pid(event, task), in perf_event_read_event()
7368 .tid = perf_event_tid(event, task), in perf_event_read_event()
7372 perf_event_header__init_id(&read_event.header, &sample, event); in perf_event_read_event()
7373 ret = perf_output_begin(&handle, &sample, event, read_event.header.size); in perf_event_read_event()
7378 perf_output_read(&handle, event); in perf_event_read_event()
7379 perf_event__output_id_sample(event, &handle, &sample); in perf_event_read_event()
7384 typedef void (perf_iterate_f)(struct perf_event *event, void *data);
7391 struct perf_event *event; in perf_iterate_ctx() local
7393 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { in perf_iterate_ctx()
7395 if (event->state < PERF_EVENT_STATE_INACTIVE) in perf_iterate_ctx()
7397 if (!event_filter_match(event)) in perf_iterate_ctx()
7401 output(event, data); in perf_iterate_ctx()
7408 struct perf_event *event; in perf_iterate_sb_cpu() local
7410 list_for_each_entry_rcu(event, &pel->list, sb_list) { in perf_iterate_sb_cpu()
7413 * if we observe event->ctx, both event and ctx will be in perf_iterate_sb_cpu()
7416 if (!smp_load_acquire(&event->ctx)) in perf_iterate_sb_cpu()
7419 if (event->state < PERF_EVENT_STATE_INACTIVE) in perf_iterate_sb_cpu()
7421 if (!event_filter_match(event)) in perf_iterate_sb_cpu()
7423 output(event, data); in perf_iterate_sb_cpu()
7431 * your event, otherwise it might not get delivered.
7469 static void perf_event_addr_filters_exec(struct perf_event *event, void *data) in perf_event_addr_filters_exec() argument
7471 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in perf_event_addr_filters_exec()
7476 if (!has_addr_filter(event)) in perf_event_addr_filters_exec()
7482 event->addr_filter_ranges[count].start = 0; in perf_event_addr_filters_exec()
7483 event->addr_filter_ranges[count].size = 0; in perf_event_addr_filters_exec()
7491 event->addr_filters_gen++; in perf_event_addr_filters_exec()
7495 perf_event_stop(event, 1); in perf_event_addr_filters_exec()
7522 static void __perf_event_output_stop(struct perf_event *event, void *data) in __perf_event_output_stop() argument
7524 struct perf_event *parent = event->parent; in __perf_event_output_stop()
7528 .event = event, in __perf_event_output_stop()
7531 if (!has_aux(event)) in __perf_event_output_stop()
7535 parent = event; in __perf_event_output_stop()
7541 * We are using event::rb to determine if the event should be stopped, in __perf_event_output_stop()
7543 * which will make us skip the event that actually needs to be stopped. in __perf_event_output_stop()
7544 * So ring_buffer_attach() has to stop an aux event before re-assigning in __perf_event_output_stop()
7553 struct perf_event *event = info; in __perf_pmu_output_stop() local
7554 struct pmu *pmu = event->ctx->pmu; in __perf_pmu_output_stop()
7557 .rb = event->rb, in __perf_pmu_output_stop()
7570 static void perf_pmu_output_stop(struct perf_event *event) in perf_pmu_output_stop() argument
7577 list_for_each_entry_rcu(iter, &event->rb->event_list, rb_entry) { in perf_pmu_output_stop()
7581 * sufficient to stop the event itself if it's active, since in perf_pmu_output_stop()
7591 err = cpu_function_call(cpu, __perf_pmu_output_stop, event); in perf_pmu_output_stop()
7621 static int perf_event_task_match(struct perf_event *event) in perf_event_task_match() argument
7623 return event->attr.comm || event->attr.mmap || in perf_event_task_match()
7624 event->attr.mmap2 || event->attr.mmap_data || in perf_event_task_match()
7625 event->attr.task; in perf_event_task_match()
7628 static void perf_event_task_output(struct perf_event *event, in perf_event_task_output() argument
7637 if (!perf_event_task_match(event)) in perf_event_task_output()
7640 perf_event_header__init_id(&task_event->event_id.header, &sample, event); in perf_event_task_output()
7642 ret = perf_output_begin(&handle, &sample, event, in perf_event_task_output()
7647 task_event->event_id.pid = perf_event_pid(event, task); in perf_event_task_output()
7648 task_event->event_id.tid = perf_event_tid(event, task); in perf_event_task_output()
7651 task_event->event_id.ppid = perf_event_pid(event, in perf_event_task_output()
7653 task_event->event_id.ptid = perf_event_pid(event, in perf_event_task_output()
7656 task_event->event_id.ppid = perf_event_pid(event, current); in perf_event_task_output()
7657 task_event->event_id.ptid = perf_event_tid(event, current); in perf_event_task_output()
7660 task_event->event_id.time = perf_event_clock(event); in perf_event_task_output()
7664 perf_event__output_id_sample(event, &handle, &sample); in perf_event_task_output()
7727 static int perf_event_comm_match(struct perf_event *event) in perf_event_comm_match() argument
7729 return event->attr.comm; in perf_event_comm_match()
7732 static void perf_event_comm_output(struct perf_event *event, in perf_event_comm_output() argument
7741 if (!perf_event_comm_match(event)) in perf_event_comm_output()
7744 perf_event_header__init_id(&comm_event->event_id.header, &sample, event); in perf_event_comm_output()
7745 ret = perf_output_begin(&handle, &sample, event, in perf_event_comm_output()
7751 comm_event->event_id.pid = perf_event_pid(event, comm_event->task); in perf_event_comm_output()
7752 comm_event->event_id.tid = perf_event_tid(event, comm_event->task); in perf_event_comm_output()
7758 perf_event__output_id_sample(event, &handle, &sample); in perf_event_comm_output()
7826 static int perf_event_namespaces_match(struct perf_event *event) in perf_event_namespaces_match() argument
7828 return event->attr.namespaces; in perf_event_namespaces_match()
7831 static void perf_event_namespaces_output(struct perf_event *event, in perf_event_namespaces_output() argument
7840 if (!perf_event_namespaces_match(event)) in perf_event_namespaces_output()
7844 &sample, event); in perf_event_namespaces_output()
7845 ret = perf_output_begin(&handle, &sample, event, in perf_event_namespaces_output()
7850 namespaces_event->event_id.pid = perf_event_pid(event, in perf_event_namespaces_output()
7852 namespaces_event->event_id.tid = perf_event_tid(event, in perf_event_namespaces_output()
7857 perf_event__output_id_sample(event, &handle, &sample); in perf_event_namespaces_output()
7954 static int perf_event_cgroup_match(struct perf_event *event) in perf_event_cgroup_match() argument
7956 return event->attr.cgroup; in perf_event_cgroup_match()
7959 static void perf_event_cgroup_output(struct perf_event *event, void *data) in perf_event_cgroup_output() argument
7967 if (!perf_event_cgroup_match(event)) in perf_event_cgroup_output()
7971 &sample, event); in perf_event_cgroup_output()
7972 ret = perf_output_begin(&handle, &sample, event, in perf_event_cgroup_output()
7980 perf_event__output_id_sample(event, &handle, &sample); in perf_event_cgroup_output()
8063 static int perf_event_mmap_match(struct perf_event *event, in perf_event_mmap_match() argument
8070 return (!executable && event->attr.mmap_data) || in perf_event_mmap_match()
8071 (executable && (event->attr.mmap || event->attr.mmap2)); in perf_event_mmap_match()
8074 static void perf_event_mmap_output(struct perf_event *event, in perf_event_mmap_output() argument
8084 if (!perf_event_mmap_match(event, data)) in perf_event_mmap_output()
8087 if (event->attr.mmap2) { in perf_event_mmap_output()
8097 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event); in perf_event_mmap_output()
8098 ret = perf_output_begin(&handle, &sample, event, in perf_event_mmap_output()
8103 mmap_event->event_id.pid = perf_event_pid(event, current); in perf_event_mmap_output()
8104 mmap_event->event_id.tid = perf_event_tid(event, current); in perf_event_mmap_output()
8108 if (event->attr.mmap2) { in perf_event_mmap_output()
8120 perf_event__output_id_sample(event, &handle, &sample); in perf_event_mmap_output()
8293 static void __perf_addr_filters_adjust(struct perf_event *event, void *data) in __perf_addr_filters_adjust() argument
8295 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in __perf_addr_filters_adjust()
8301 if (!has_addr_filter(event)) in __perf_addr_filters_adjust()
8310 &event->addr_filter_ranges[count])) in __perf_addr_filters_adjust()
8317 event->addr_filters_gen++; in __perf_addr_filters_adjust()
8321 perf_event_stop(event, 1); in __perf_addr_filters_adjust()
8385 void perf_event_aux_event(struct perf_event *event, unsigned long head, in perf_event_aux_event() argument
8407 perf_event_header__init_id(&rec.header, &sample, event); in perf_event_aux_event()
8408 ret = perf_output_begin(&handle, &sample, event, rec.header.size); in perf_event_aux_event()
8414 perf_event__output_id_sample(event, &handle, &sample); in perf_event_aux_event()
8422 void perf_log_lost_samples(struct perf_event *event, u64 lost) in perf_log_lost_samples() argument
8440 perf_event_header__init_id(&lost_samples_event.header, &sample, event); in perf_log_lost_samples()
8442 ret = perf_output_begin(&handle, &sample, event, in perf_log_lost_samples()
8448 perf_event__output_id_sample(event, &handle, &sample); in perf_log_lost_samples()
8467 static int perf_event_switch_match(struct perf_event *event) in perf_event_switch_match() argument
8469 return event->attr.context_switch; in perf_event_switch_match()
8472 static void perf_event_switch_output(struct perf_event *event, void *data) in perf_event_switch_output() argument
8479 if (!perf_event_switch_match(event)) in perf_event_switch_output()
8483 if (event->ctx->task) { in perf_event_switch_output()
8490 perf_event_pid(event, se->next_prev); in perf_event_switch_output()
8492 perf_event_tid(event, se->next_prev); in perf_event_switch_output()
8495 perf_event_header__init_id(&se->event_id.header, &sample, event); in perf_event_switch_output()
8497 ret = perf_output_begin(&handle, &sample, event, se->event_id.header.size); in perf_event_switch_output()
8501 if (event->ctx->task) in perf_event_switch_output()
8506 perf_event__output_id_sample(event, &handle, &sample); in perf_event_switch_output()
8545 static void perf_log_throttle(struct perf_event *event, int enable) in perf_log_throttle() argument
8562 .time = perf_event_clock(event), in perf_log_throttle()
8563 .id = primary_event_id(event), in perf_log_throttle()
8564 .stream_id = event->id, in perf_log_throttle()
8570 perf_event_header__init_id(&throttle_event.header, &sample, event); in perf_log_throttle()
8572 ret = perf_output_begin(&handle, &sample, event, in perf_log_throttle()
8578 perf_event__output_id_sample(event, &handle, &sample); in perf_log_throttle()
8598 static int perf_event_ksymbol_match(struct perf_event *event) in perf_event_ksymbol_match() argument
8600 return event->attr.ksymbol; in perf_event_ksymbol_match()
8603 static void perf_event_ksymbol_output(struct perf_event *event, void *data) in perf_event_ksymbol_output() argument
8610 if (!perf_event_ksymbol_match(event)) in perf_event_ksymbol_output()
8614 &sample, event); in perf_event_ksymbol_output()
8615 ret = perf_output_begin(&handle, &sample, event, in perf_event_ksymbol_output()
8622 perf_event__output_id_sample(event, &handle, &sample); in perf_event_ksymbol_output()
8688 static int perf_event_bpf_match(struct perf_event *event) in perf_event_bpf_match() argument
8690 return event->attr.bpf_event; in perf_event_bpf_match()
8693 static void perf_event_bpf_output(struct perf_event *event, void *data) in perf_event_bpf_output() argument
8700 if (!perf_event_bpf_match(event)) in perf_event_bpf_output()
8704 &sample, event); in perf_event_bpf_output()
8705 ret = perf_output_begin(&handle, data, event, in perf_event_bpf_output()
8711 perf_event__output_id_sample(event, &handle, &sample); in perf_event_bpf_output()
8796 static int perf_event_text_poke_match(struct perf_event *event) in perf_event_text_poke_match() argument
8798 return event->attr.text_poke; in perf_event_text_poke_match()
8801 static void perf_event_text_poke_output(struct perf_event *event, void *data) in perf_event_text_poke_output() argument
8809 if (!perf_event_text_poke_match(event)) in perf_event_text_poke_output()
8812 perf_event_header__init_id(&text_poke_event->event_id.header, &sample, event); in perf_event_text_poke_output()
8814 ret = perf_output_begin(&handle, &sample, event, in perf_event_text_poke_output()
8829 perf_event__output_id_sample(event, &handle, &sample); in perf_event_text_poke_output()
8866 void perf_event_itrace_started(struct perf_event *event) in perf_event_itrace_started() argument
8868 event->attach_state |= PERF_ATTACH_ITRACE; in perf_event_itrace_started()
8871 static void perf_log_itrace_start(struct perf_event *event) in perf_log_itrace_start() argument
8882 if (event->parent) in perf_log_itrace_start()
8883 event = event->parent; in perf_log_itrace_start()
8885 if (!(event->pmu->capabilities & PERF_PMU_CAP_ITRACE) || in perf_log_itrace_start()
8886 event->attach_state & PERF_ATTACH_ITRACE) in perf_log_itrace_start()
8892 rec.pid = perf_event_pid(event, current); in perf_log_itrace_start()
8893 rec.tid = perf_event_tid(event, current); in perf_log_itrace_start()
8895 perf_event_header__init_id(&rec.header, &sample, event); in perf_log_itrace_start()
8896 ret = perf_output_begin(&handle, &sample, event, rec.header.size); in perf_log_itrace_start()
8902 perf_event__output_id_sample(event, &handle, &sample); in perf_log_itrace_start()
8908 __perf_event_account_interrupt(struct perf_event *event, int throttle) in __perf_event_account_interrupt() argument
8910 struct hw_perf_event *hwc = &event->hw; in __perf_event_account_interrupt()
8925 perf_log_throttle(event, 0); in __perf_event_account_interrupt()
8930 if (event->attr.freq) { in __perf_event_account_interrupt()
8937 perf_adjust_period(event, delta, hwc->last_period, true); in __perf_event_account_interrupt()
8943 int perf_event_account_interrupt(struct perf_event *event) in perf_event_account_interrupt() argument
8945 return __perf_event_account_interrupt(event, 1); in perf_event_account_interrupt()
8949 * Generic event overflow handling, sampling.
8952 static int __perf_event_overflow(struct perf_event *event, in __perf_event_overflow() argument
8956 int events = atomic_read(&event->event_limit); in __perf_event_overflow()
8963 if (unlikely(!is_sampling_event(event))) in __perf_event_overflow()
8966 ret = __perf_event_account_interrupt(event, throttle); in __perf_event_overflow()
8973 event->pending_kill = POLL_IN; in __perf_event_overflow()
8974 if (events && atomic_dec_and_test(&event->event_limit)) { in __perf_event_overflow()
8976 event->pending_kill = POLL_HUP; in __perf_event_overflow()
8978 perf_event_disable_inatomic(event); in __perf_event_overflow()
8981 READ_ONCE(event->overflow_handler)(event, data, regs); in __perf_event_overflow() local
8983 if (*perf_event_fasync(event) && event->pending_kill) { in __perf_event_overflow()
8984 event->pending_wakeup = 1; in __perf_event_overflow()
8985 irq_work_queue(&event->pending); in __perf_event_overflow()
8991 int perf_event_overflow(struct perf_event *event, in perf_event_overflow() argument
8995 return __perf_event_overflow(event, 1, data, regs); in perf_event_overflow()
8999 * Generic software event infrastructure
9014 * We directly increment event->count and keep a second value in
9015 * event->hw.period_left to count intervals. This period event
9020 u64 perf_swevent_set_period(struct perf_event *event) in perf_swevent_set_period() argument
9022 struct hw_perf_event *hwc = &event->hw; in perf_swevent_set_period()
9043 static void perf_swevent_overflow(struct perf_event *event, u64 overflow, in perf_swevent_overflow() argument
9047 struct hw_perf_event *hwc = &event->hw; in perf_swevent_overflow()
9051 overflow = perf_swevent_set_period(event); in perf_swevent_overflow()
9057 if (__perf_event_overflow(event, throttle, in perf_swevent_overflow()
9069 static void perf_swevent_event(struct perf_event *event, u64 nr, in perf_swevent_event() argument
9073 struct hw_perf_event *hwc = &event->hw; in perf_swevent_event()
9075 local64_add(nr, &event->count); in perf_swevent_event()
9080 if (!is_sampling_event(event)) in perf_swevent_event()
9083 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) { in perf_swevent_event()
9085 return perf_swevent_overflow(event, 1, data, regs); in perf_swevent_event()
9087 data->period = event->hw.last_period; in perf_swevent_event()
9089 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) in perf_swevent_event()
9090 return perf_swevent_overflow(event, 1, data, regs); in perf_swevent_event()
9095 perf_swevent_overflow(event, 0, data, regs); in perf_swevent_event()
9098 static int perf_exclude_event(struct perf_event *event, in perf_exclude_event() argument
9101 if (event->hw.state & PERF_HES_STOPPED) in perf_exclude_event()
9105 if (event->attr.exclude_user && user_mode(regs)) in perf_exclude_event()
9108 if (event->attr.exclude_kernel && !user_mode(regs)) in perf_exclude_event()
9115 static int perf_swevent_match(struct perf_event *event, in perf_swevent_match() argument
9121 if (event->attr.type != type) in perf_swevent_match()
9124 if (event->attr.config != event_id) in perf_swevent_match()
9127 if (perf_exclude_event(event, regs)) in perf_swevent_match()
9161 /* For the event head insertion and removal in the hlist */
9163 find_swevent_head(struct swevent_htable *swhash, struct perf_event *event) in find_swevent_head() argument
9166 u32 event_id = event->attr.config; in find_swevent_head()
9167 u64 type = event->attr.type; in find_swevent_head()
9170 * Event scheduling is always serialized against hlist allocation in find_swevent_head()
9175 lockdep_is_held(&event->ctx->lock)); in find_swevent_head()
9188 struct perf_event *event; in do_perf_sw_event() local
9196 hlist_for_each_entry_rcu(event, head, hlist_entry) { in do_perf_sw_event()
9197 if (perf_swevent_match(event, type, event_id, data, regs)) in do_perf_sw_event()
9198 perf_swevent_event(event, nr, data, regs); in do_perf_sw_event()
9248 static void perf_swevent_read(struct perf_event *event) in perf_swevent_read() argument
9252 static int perf_swevent_add(struct perf_event *event, int flags) in perf_swevent_add() argument
9255 struct hw_perf_event *hwc = &event->hw; in perf_swevent_add()
9258 if (is_sampling_event(event)) { in perf_swevent_add()
9260 perf_swevent_set_period(event); in perf_swevent_add()
9265 head = find_swevent_head(swhash, event); in perf_swevent_add()
9269 hlist_add_head_rcu(&event->hlist_entry, head); in perf_swevent_add()
9270 perf_event_update_userpage(event); in perf_swevent_add()
9275 static void perf_swevent_del(struct perf_event *event, int flags) in perf_swevent_del() argument
9277 hlist_del_rcu(&event->hlist_entry); in perf_swevent_del()
9280 static void perf_swevent_start(struct perf_event *event, int flags) in perf_swevent_start() argument
9282 event->hw.state = 0; in perf_swevent_start()
9285 static void perf_swevent_stop(struct perf_event *event, int flags) in perf_swevent_stop() argument
9287 event->hw.state = PERF_HES_STOPPED; in perf_swevent_stop()
9379 static void sw_perf_event_destroy(struct perf_event *event) in sw_perf_event_destroy() argument
9381 u64 event_id = event->attr.config; in sw_perf_event_destroy()
9383 WARN_ON(event->parent); in sw_perf_event_destroy()
9389 static int perf_swevent_init(struct perf_event *event) in perf_swevent_init() argument
9391 u64 event_id = event->attr.config; in perf_swevent_init()
9393 if (event->attr.type != PERF_TYPE_SOFTWARE) in perf_swevent_init()
9399 if (has_branch_stack(event)) in perf_swevent_init()
9414 if (!event->parent) { in perf_swevent_init()
9422 event->destroy = sw_perf_event_destroy; in perf_swevent_init()
9443 static int perf_tp_filter_match(struct perf_event *event, in perf_tp_filter_match() argument
9449 if (event->parent) in perf_tp_filter_match()
9450 event = event->parent; in perf_tp_filter_match()
9452 if (likely(!event->filter) || filter_match_preds(event->filter, record)) in perf_tp_filter_match()
9457 static int perf_tp_event_match(struct perf_event *event, in perf_tp_event_match() argument
9461 if (event->hw.state & PERF_HES_STOPPED) in perf_tp_event_match()
9466 if (event->attr.exclude_kernel && !user_mode(regs)) in perf_tp_event_match()
9469 if (!perf_tp_filter_match(event, data)) in perf_tp_event_match()
9487 perf_tp_event(call->event.type, count, raw_data, size, regs, head, in perf_trace_run_bpf_submit()
9497 struct perf_event *event; in perf_tp_event() local
9511 hlist_for_each_entry_rcu(event, head, hlist_entry) { in perf_tp_event()
9512 if (perf_tp_event_match(event, &data, regs)) in perf_tp_event()
9513 perf_swevent_event(event, count, &data, regs); in perf_tp_event()
9518 * deliver this event there too. in perf_tp_event()
9529 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { in perf_tp_event()
9530 if (event->cpu != smp_processor_id()) in perf_tp_event()
9532 if (event->attr.type != PERF_TYPE_TRACEPOINT) in perf_tp_event()
9534 if (event->attr.config != entry->type) in perf_tp_event()
9536 if (perf_tp_event_match(event, &data, regs)) in perf_tp_event()
9537 perf_swevent_event(event, count, &data, regs); in perf_tp_event()
9547 static void tp_perf_event_destroy(struct perf_event *event) in tp_perf_event_destroy() argument
9549 perf_trace_destroy(event); in tp_perf_event_destroy()
9552 static int perf_tp_event_init(struct perf_event *event) in perf_tp_event_init() argument
9556 if (event->attr.type != PERF_TYPE_TRACEPOINT) in perf_tp_event_init()
9562 if (has_branch_stack(event)) in perf_tp_event_init()
9565 err = perf_trace_init(event); in perf_tp_event_init()
9569 event->destroy = tp_perf_event_destroy; in perf_tp_event_init()
9625 static int perf_kprobe_event_init(struct perf_event *event);
9637 static int perf_kprobe_event_init(struct perf_event *event) in perf_kprobe_event_init() argument
9642 if (event->attr.type != perf_kprobe.type) in perf_kprobe_event_init()
9651 if (has_branch_stack(event)) in perf_kprobe_event_init()
9654 is_retprobe = event->attr.config & PERF_PROBE_CONFIG_IS_RETPROBE; in perf_kprobe_event_init()
9655 err = perf_kprobe_init(event, is_retprobe); in perf_kprobe_event_init()
9659 event->destroy = perf_kprobe_destroy; in perf_kprobe_event_init()
9684 static int perf_uprobe_event_init(struct perf_event *event);
9696 static int perf_uprobe_event_init(struct perf_event *event) in perf_uprobe_event_init() argument
9702 if (event->attr.type != perf_uprobe.type) in perf_uprobe_event_init()
9711 if (has_branch_stack(event)) in perf_uprobe_event_init()
9714 is_retprobe = event->attr.config & PERF_PROBE_CONFIG_IS_RETPROBE; in perf_uprobe_event_init()
9715 ref_ctr_offset = event->attr.config >> PERF_UPROBE_REF_CTR_OFFSET_SHIFT; in perf_uprobe_event_init()
9716 err = perf_uprobe_init(event, ref_ctr_offset, is_retprobe); in perf_uprobe_event_init()
9720 event->destroy = perf_uprobe_destroy; in perf_uprobe_event_init()
9737 static void perf_event_free_filter(struct perf_event *event) in perf_event_free_filter() argument
9739 ftrace_profile_free_filter(event); in perf_event_free_filter()
9743 static void bpf_overflow_handler(struct perf_event *event, in bpf_overflow_handler() argument
9749 .event = event, in bpf_overflow_handler()
9757 ret = BPF_PROG_RUN(event->prog, &ctx); in bpf_overflow_handler()
9764 event->orig_overflow_handler(event, data, regs); in bpf_overflow_handler()
9767 static int perf_event_set_bpf_handler(struct perf_event *event, u32 prog_fd) in perf_event_set_bpf_handler() argument
9771 if (event->overflow_handler_context) in perf_event_set_bpf_handler()
9775 if (event->prog) in perf_event_set_bpf_handler()
9782 if (event->attr.precise_ip && in perf_event_set_bpf_handler()
9784 (!(event->attr.sample_type & __PERF_SAMPLE_CALLCHAIN_EARLY) || in perf_event_set_bpf_handler()
9785 event->attr.exclude_callchain_kernel || in perf_event_set_bpf_handler()
9786 event->attr.exclude_callchain_user)) { in perf_event_set_bpf_handler()
9800 event->prog = prog; in perf_event_set_bpf_handler()
9801 event->orig_overflow_handler = READ_ONCE(event->overflow_handler); in perf_event_set_bpf_handler()
9802 WRITE_ONCE(event->overflow_handler, bpf_overflow_handler); in perf_event_set_bpf_handler()
9806 static void perf_event_free_bpf_handler(struct perf_event *event) in perf_event_free_bpf_handler() argument
9808 struct bpf_prog *prog = event->prog; in perf_event_free_bpf_handler()
9813 WRITE_ONCE(event->overflow_handler, event->orig_overflow_handler); in perf_event_free_bpf_handler()
9814 event->prog = NULL; in perf_event_free_bpf_handler()
9818 static int perf_event_set_bpf_handler(struct perf_event *event, u32 prog_fd) in perf_event_set_bpf_handler() argument
9822 static void perf_event_free_bpf_handler(struct perf_event *event) in perf_event_free_bpf_handler() argument
9828 * returns true if the event is a tracepoint, or a kprobe/upprobe created
9831 static inline bool perf_event_is_tracing(struct perf_event *event) in perf_event_is_tracing() argument
9833 if (event->pmu == &perf_tracepoint) in perf_event_is_tracing()
9836 if (event->pmu == &perf_kprobe) in perf_event_is_tracing()
9840 if (event->pmu == &perf_uprobe) in perf_event_is_tracing()
9846 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd) in perf_event_set_bpf_prog() argument
9852 if (!perf_event_is_tracing(event)) in perf_event_set_bpf_prog()
9853 return perf_event_set_bpf_handler(event, prog_fd); in perf_event_set_bpf_prog()
9855 is_kprobe = event->tp_event->flags & TRACE_EVENT_FL_UKPROBE; in perf_event_set_bpf_prog()
9856 is_tracepoint = event->tp_event->flags & TRACE_EVENT_FL_TRACEPOINT; in perf_event_set_bpf_prog()
9857 is_syscall_tp = is_syscall_trace_event(event->tp_event); in perf_event_set_bpf_prog()
9876 !(event->tp_event->flags & TRACE_EVENT_FL_KPROBE)) { in perf_event_set_bpf_prog()
9882 int off = trace_event_get_offsets(event->tp_event); in perf_event_set_bpf_prog()
9890 ret = perf_event_attach_bpf_prog(event, prog); in perf_event_set_bpf_prog()
9896 static void perf_event_free_bpf_prog(struct perf_event *event) in perf_event_free_bpf_prog() argument
9898 if (!perf_event_is_tracing(event)) { in perf_event_free_bpf_prog()
9899 perf_event_free_bpf_handler(event); in perf_event_free_bpf_prog()
9902 perf_event_detach_bpf_prog(event); in perf_event_free_bpf_prog()
9911 static void perf_event_free_filter(struct perf_event *event) in perf_event_free_filter() argument
9915 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd) in perf_event_set_bpf_prog() argument
9920 static void perf_event_free_bpf_prog(struct perf_event *event) in perf_event_free_bpf_prog() argument
9942 perf_addr_filter_new(struct perf_event *event, struct list_head *filters) in perf_addr_filter_new() argument
9944 int node = cpu_to_node(event->cpu == -1 ? 0 : event->cpu); in perf_addr_filter_new()
9971 static void perf_addr_filters_splice(struct perf_event *event, in perf_addr_filters_splice() argument
9977 if (!has_addr_filter(event)) in perf_addr_filters_splice()
9981 if (event->parent) in perf_addr_filters_splice()
9984 raw_spin_lock_irqsave(&event->addr_filters.lock, flags); in perf_addr_filters_splice()
9986 list_splice_init(&event->addr_filters.list, &list); in perf_addr_filters_splice()
9988 list_splice(head, &event->addr_filters.list); in perf_addr_filters_splice()
9990 raw_spin_unlock_irqrestore(&event->addr_filters.lock, flags); in perf_addr_filters_splice()
10016 * Update event's address range filters based on the
10019 static void perf_event_addr_filters_apply(struct perf_event *event) in perf_event_addr_filters_apply() argument
10021 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in perf_event_addr_filters_apply()
10022 struct task_struct *task = READ_ONCE(event->ctx->task); in perf_event_addr_filters_apply()
10029 * We may observe TASK_TOMBSTONE, which means that the event tear-down in perf_event_addr_filters_apply()
10050 event->addr_filter_ranges[count].start = 0; in perf_event_addr_filters_apply()
10051 event->addr_filter_ranges[count].size = 0; in perf_event_addr_filters_apply()
10053 perf_addr_filter_apply(filter, mm, &event->addr_filter_ranges[count]); in perf_event_addr_filters_apply()
10055 event->addr_filter_ranges[count].start = filter->offset; in perf_event_addr_filters_apply()
10056 event->addr_filter_ranges[count].size = filter->size; in perf_event_addr_filters_apply()
10062 event->addr_filters_gen++; in perf_event_addr_filters_apply()
10072 perf_event_stop(event, 1); in perf_event_addr_filters_apply()
10126 perf_event_parse_addr_filter(struct perf_event *event, char *fstr, in perf_event_parse_addr_filter() argument
10153 filter = perf_addr_filter_new(event, filters); in perf_event_parse_addr_filter()
10212 * Make sure that it doesn't contradict itself or the event's in perf_event_parse_addr_filter()
10217 if (kernel && event->attr.exclude_kernel) in perf_event_parse_addr_filter()
10241 if (!event->ctx->task) in perf_event_parse_addr_filter()
10256 event->addr_filters.nr_file_filters++; in perf_event_parse_addr_filter()
10282 perf_event_set_addr_filter(struct perf_event *event, char *filter_str) in perf_event_set_addr_filter() argument
10291 lockdep_assert_held(&event->ctx->mutex); in perf_event_set_addr_filter()
10293 if (WARN_ON_ONCE(event->parent)) in perf_event_set_addr_filter()
10296 ret = perf_event_parse_addr_filter(event, filter_str, &filters); in perf_event_set_addr_filter()
10300 ret = event->pmu->addr_filters_validate(&filters); in perf_event_set_addr_filter()
10305 perf_addr_filters_splice(event, &filters); in perf_event_set_addr_filter()
10308 perf_event_for_each_child(event, perf_event_addr_filters_apply); in perf_event_set_addr_filter()
10316 event->addr_filters.nr_file_filters = 0; in perf_event_set_addr_filter()
10321 static int perf_event_set_filter(struct perf_event *event, void __user *arg) in perf_event_set_filter() argument
10331 if (perf_event_is_tracing(event)) { in perf_event_set_filter()
10332 struct perf_event_context *ctx = event->ctx; in perf_event_set_filter()
10342 * This can result in event getting moved to a different ctx, in perf_event_set_filter()
10346 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str); in perf_event_set_filter()
10350 if (has_addr_filter(event)) in perf_event_set_filter()
10351 ret = perf_event_set_addr_filter(event, filter_str); in perf_event_set_filter()
10366 struct perf_event *event; in perf_swevent_hrtimer() local
10369 event = container_of(hrtimer, struct perf_event, hw.hrtimer); in perf_swevent_hrtimer()
10371 if (event->state != PERF_EVENT_STATE_ACTIVE) in perf_swevent_hrtimer()
10374 event->pmu->read(event); in perf_swevent_hrtimer()
10376 perf_sample_data_init(&data, 0, event->hw.last_period); in perf_swevent_hrtimer()
10379 if (regs && !perf_exclude_event(event, regs)) { in perf_swevent_hrtimer()
10380 if (!(event->attr.exclude_idle && is_idle_task(current))) in perf_swevent_hrtimer()
10381 if (__perf_event_overflow(event, 1, &data, regs)) in perf_swevent_hrtimer()
10385 period = max_t(u64, 10000, event->hw.sample_period); in perf_swevent_hrtimer()
10391 static void perf_swevent_start_hrtimer(struct perf_event *event) in perf_swevent_start_hrtimer() argument
10393 struct hw_perf_event *hwc = &event->hw; in perf_swevent_start_hrtimer()
10396 if (!is_sampling_event(event)) in perf_swevent_start_hrtimer()
10412 static void perf_swevent_cancel_hrtimer(struct perf_event *event) in perf_swevent_cancel_hrtimer() argument
10414 struct hw_perf_event *hwc = &event->hw; in perf_swevent_cancel_hrtimer()
10416 if (is_sampling_event(event)) { in perf_swevent_cancel_hrtimer()
10424 static void perf_swevent_init_hrtimer(struct perf_event *event) in perf_swevent_init_hrtimer() argument
10426 struct hw_perf_event *hwc = &event->hw; in perf_swevent_init_hrtimer()
10428 if (!is_sampling_event(event)) in perf_swevent_init_hrtimer()
10438 if (event->attr.freq) { in perf_swevent_init_hrtimer()
10439 long freq = event->attr.sample_freq; in perf_swevent_init_hrtimer()
10441 event->attr.sample_period = NSEC_PER_SEC / freq; in perf_swevent_init_hrtimer()
10442 hwc->sample_period = event->attr.sample_period; in perf_swevent_init_hrtimer()
10445 event->attr.freq = 0; in perf_swevent_init_hrtimer()
10450 * Software event: cpu wall time clock
10453 static void cpu_clock_event_update(struct perf_event *event) in cpu_clock_event_update() argument
10459 prev = local64_xchg(&event->hw.prev_count, now); in cpu_clock_event_update()
10460 local64_add(now - prev, &event->count); in cpu_clock_event_update()
10463 static void cpu_clock_event_start(struct perf_event *event, int flags) in cpu_clock_event_start() argument
10465 local64_set(&event->hw.prev_count, local_clock()); in cpu_clock_event_start()
10466 perf_swevent_start_hrtimer(event); in cpu_clock_event_start()
10469 static void cpu_clock_event_stop(struct perf_event *event, int flags) in cpu_clock_event_stop() argument
10471 perf_swevent_cancel_hrtimer(event); in cpu_clock_event_stop()
10472 cpu_clock_event_update(event); in cpu_clock_event_stop()
10475 static int cpu_clock_event_add(struct perf_event *event, int flags) in cpu_clock_event_add() argument
10478 cpu_clock_event_start(event, flags); in cpu_clock_event_add()
10479 perf_event_update_userpage(event); in cpu_clock_event_add()
10484 static void cpu_clock_event_del(struct perf_event *event, int flags) in cpu_clock_event_del() argument
10486 cpu_clock_event_stop(event, flags); in cpu_clock_event_del()
10489 static void cpu_clock_event_read(struct perf_event *event) in cpu_clock_event_read() argument
10491 cpu_clock_event_update(event); in cpu_clock_event_read()
10494 static int cpu_clock_event_init(struct perf_event *event) in cpu_clock_event_init() argument
10496 if (event->attr.type != PERF_TYPE_SOFTWARE) in cpu_clock_event_init()
10499 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK) in cpu_clock_event_init()
10505 if (has_branch_stack(event)) in cpu_clock_event_init()
10508 perf_swevent_init_hrtimer(event); in cpu_clock_event_init()
10527 * Software event: task time clock
10530 static void task_clock_event_update(struct perf_event *event, u64 now) in task_clock_event_update() argument
10535 prev = local64_xchg(&event->hw.prev_count, now); in task_clock_event_update()
10537 local64_add(delta, &event->count); in task_clock_event_update()
10540 static void task_clock_event_start(struct perf_event *event, int flags) in task_clock_event_start() argument
10542 local64_set(&event->hw.prev_count, event->ctx->time); in task_clock_event_start()
10543 perf_swevent_start_hrtimer(event); in task_clock_event_start()
10546 static void task_clock_event_stop(struct perf_event *event, int flags) in task_clock_event_stop() argument
10548 perf_swevent_cancel_hrtimer(event); in task_clock_event_stop()
10549 task_clock_event_update(event, event->ctx->time); in task_clock_event_stop()
10552 static int task_clock_event_add(struct perf_event *event, int flags) in task_clock_event_add() argument
10555 task_clock_event_start(event, flags); in task_clock_event_add()
10556 perf_event_update_userpage(event); in task_clock_event_add()
10561 static void task_clock_event_del(struct perf_event *event, int flags) in task_clock_event_del() argument
10563 task_clock_event_stop(event, PERF_EF_UPDATE); in task_clock_event_del()
10566 static void task_clock_event_read(struct perf_event *event) in task_clock_event_read() argument
10569 u64 delta = now - event->ctx->timestamp; in task_clock_event_read()
10570 u64 time = event->ctx->time + delta; in task_clock_event_read()
10572 task_clock_event_update(event, time); in task_clock_event_read()
10575 static int task_clock_event_init(struct perf_event *event) in task_clock_event_init() argument
10577 if (event->attr.type != PERF_TYPE_SOFTWARE) in task_clock_event_init()
10580 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK) in task_clock_event_init()
10586 if (has_branch_stack(event)) in task_clock_event_init()
10589 perf_swevent_init_hrtimer(event); in task_clock_event_init()
10620 static int perf_event_nop_int(struct perf_event *event, u64 value) in perf_event_nop_int() argument
10662 static int perf_event_idx_default(struct perf_event *event) in perf_event_idx_default() argument
10948 * is fast, provided a valid software event is provided. in perf_pmu_register()
11002 static inline bool has_extended_regs(struct perf_event *event) in has_extended_regs() argument
11004 return (event->attr.sample_regs_user & PERF_REG_EXTENDED_MASK) || in has_extended_regs()
11005 (event->attr.sample_regs_intr & PERF_REG_EXTENDED_MASK); in has_extended_regs()
11008 static int perf_try_init_event(struct pmu *pmu, struct perf_event *event) in perf_try_init_event() argument
11019 * if this is a sibling event, acquire the ctx->mutex to protect in perf_try_init_event()
11022 if (event->group_leader != event && pmu->task_ctx_nr != perf_sw_context) { in perf_try_init_event()
11027 ctx = perf_event_ctx_lock_nested(event->group_leader, in perf_try_init_event()
11032 event->pmu = pmu; in perf_try_init_event()
11033 ret = pmu->event_init(event); in perf_try_init_event()
11036 perf_event_ctx_unlock(event->group_leader, ctx); in perf_try_init_event()
11040 has_extended_regs(event)) in perf_try_init_event()
11044 event_has_any_exclude_flag(event)) in perf_try_init_event()
11047 if (ret && event->destroy) in perf_try_init_event()
11048 event->destroy(event); in perf_try_init_event()
11057 static struct pmu *perf_init_event(struct perf_event *event) in perf_init_event() argument
11065 if (event->parent && event->parent->pmu) { in perf_init_event()
11066 pmu = event->parent->pmu; in perf_init_event()
11067 ret = perf_try_init_event(pmu, event); in perf_init_event()
11076 type = event->attr.type; in perf_init_event()
11085 ret = perf_try_init_event(pmu, event); in perf_init_event()
11086 if (ret == -ENOENT && event->attr.type != type) { in perf_init_event()
11087 type = event->attr.type; in perf_init_event()
11098 ret = perf_try_init_event(pmu, event); in perf_init_event()
11114 static void attach_sb_event(struct perf_event *event) in attach_sb_event() argument
11116 struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu); in attach_sb_event()
11119 list_add_rcu(&event->sb_list, &pel->list); in attach_sb_event()
11130 static void account_pmu_sb_event(struct perf_event *event) in account_pmu_sb_event() argument
11132 if (is_sb_event(event)) in account_pmu_sb_event()
11133 attach_sb_event(event); in account_pmu_sb_event()
11136 static void account_event_cpu(struct perf_event *event, int cpu) in account_event_cpu() argument
11138 if (event->parent) in account_event_cpu()
11141 if (is_cgroup_event(event)) in account_event_cpu()
11166 static void account_event(struct perf_event *event) in account_event() argument
11170 if (event->parent) in account_event()
11173 if (event->attach_state & (PERF_ATTACH_TASK | PERF_ATTACH_SCHED_CB)) in account_event()
11175 if (event->attr.mmap || event->attr.mmap_data) in account_event()
11177 if (event->attr.comm) in account_event()
11179 if (event->attr.namespaces) in account_event()
11181 if (event->attr.cgroup) in account_event()
11183 if (event->attr.task) in account_event()
11185 if (event->attr.freq) in account_event()
11187 if (event->attr.context_switch) { in account_event()
11191 if (has_branch_stack(event)) in account_event()
11193 if (is_cgroup_event(event)) in account_event()
11195 if (event->attr.ksymbol) in account_event()
11197 if (event->attr.bpf_event) in account_event()
11199 if (event->attr.text_poke) in account_event()
11230 account_event_cpu(event, event->cpu); in account_event()
11232 account_pmu_sb_event(event); in account_event()
11236 * Allocate and initialize an event structure
11247 struct perf_event *event; in perf_event_alloc() local
11256 event = kzalloc(sizeof(*event), GFP_KERNEL); in perf_event_alloc()
11257 if (!event) in perf_event_alloc()
11265 group_leader = event; in perf_event_alloc()
11267 mutex_init(&event->child_mutex); in perf_event_alloc()
11268 INIT_LIST_HEAD(&event->child_list); in perf_event_alloc()
11270 INIT_LIST_HEAD(&event->event_entry); in perf_event_alloc()
11271 INIT_LIST_HEAD(&event->sibling_list); in perf_event_alloc()
11272 INIT_LIST_HEAD(&event->active_list); in perf_event_alloc()
11273 init_event_group(event); in perf_event_alloc()
11274 INIT_LIST_HEAD(&event->rb_entry); in perf_event_alloc()
11275 INIT_LIST_HEAD(&event->active_entry); in perf_event_alloc()
11276 INIT_LIST_HEAD(&event->addr_filters.list); in perf_event_alloc()
11277 INIT_HLIST_NODE(&event->hlist_entry); in perf_event_alloc()
11280 init_waitqueue_head(&event->waitq); in perf_event_alloc()
11281 event->pending_disable = -1; in perf_event_alloc()
11282 init_irq_work(&event->pending, perf_pending_event); in perf_event_alloc()
11284 mutex_init(&event->mmap_mutex); in perf_event_alloc()
11285 raw_spin_lock_init(&event->addr_filters.lock); in perf_event_alloc()
11287 atomic_long_set(&event->refcount, 1); in perf_event_alloc()
11288 event->cpu = cpu; in perf_event_alloc()
11289 event->attr = *attr; in perf_event_alloc()
11290 event->group_leader = group_leader; in perf_event_alloc()
11291 event->pmu = NULL; in perf_event_alloc()
11292 event->oncpu = -1; in perf_event_alloc()
11294 event->parent = parent_event; in perf_event_alloc()
11296 event->ns = get_pid_ns(task_active_pid_ns(current)); in perf_event_alloc()
11297 event->id = atomic64_inc_return(&perf_event_id); in perf_event_alloc()
11299 event->state = PERF_EVENT_STATE_INACTIVE; in perf_event_alloc()
11302 event->attach_state = PERF_ATTACH_TASK; in perf_event_alloc()
11308 event->hw.target = get_task_struct(task); in perf_event_alloc()
11311 event->clock = &local_clock; in perf_event_alloc()
11313 event->clock = parent_event->clock; in perf_event_alloc()
11323 event->prog = prog; in perf_event_alloc()
11324 event->orig_overflow_handler = in perf_event_alloc()
11331 event->overflow_handler = overflow_handler; in perf_event_alloc()
11332 event->overflow_handler_context = context; in perf_event_alloc()
11333 } else if (is_write_backward(event)){ in perf_event_alloc()
11334 event->overflow_handler = perf_event_output_backward; in perf_event_alloc()
11335 event->overflow_handler_context = NULL; in perf_event_alloc()
11337 event->overflow_handler = perf_event_output_forward; in perf_event_alloc()
11338 event->overflow_handler_context = NULL; in perf_event_alloc()
11341 perf_event__state_init(event); in perf_event_alloc()
11345 hwc = &event->hw; in perf_event_alloc()
11360 if (!has_branch_stack(event)) in perf_event_alloc()
11361 event->attr.branch_sample_type = 0; in perf_event_alloc()
11363 pmu = perf_init_event(event); in perf_event_alloc()
11378 if (event->attr.aux_output && in perf_event_alloc()
11385 err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader); in perf_event_alloc()
11390 err = exclusive_event_init(event); in perf_event_alloc()
11394 if (has_addr_filter(event)) { in perf_event_alloc()
11395 event->addr_filter_ranges = kcalloc(pmu->nr_addr_filters, in perf_event_alloc()
11398 if (!event->addr_filter_ranges) { in perf_event_alloc()
11407 if (event->parent) { in perf_event_alloc()
11408 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in perf_event_alloc()
11411 memcpy(event->addr_filter_ranges, in perf_event_alloc()
11412 event->parent->addr_filter_ranges, in perf_event_alloc()
11418 event->addr_filters_gen = 1; in perf_event_alloc()
11421 if (!event->parent) { in perf_event_alloc()
11422 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) { in perf_event_alloc()
11429 err = security_perf_event_alloc(event); in perf_event_alloc()
11434 account_event(event); in perf_event_alloc()
11436 return event; in perf_event_alloc()
11439 if (!event->parent) { in perf_event_alloc()
11440 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) in perf_event_alloc()
11444 kfree(event->addr_filter_ranges); in perf_event_alloc()
11447 exclusive_event_destroy(event); in perf_event_alloc()
11450 if (is_cgroup_event(event)) in perf_event_alloc()
11451 perf_detach_cgroup(event); in perf_event_alloc()
11452 if (event->destroy) in perf_event_alloc()
11453 event->destroy(event); in perf_event_alloc()
11456 if (event->ns) in perf_event_alloc()
11457 put_pid_ns(event->ns); in perf_event_alloc()
11458 if (event->hw.target) in perf_event_alloc()
11459 put_task_struct(event->hw.target); in perf_event_alloc()
11460 kfree(event); in perf_event_alloc()
11580 perf_event_set_output(struct perf_event *event, struct perf_event *output_event) in perf_event_set_output() argument
11589 if (event == output_event) in perf_event_set_output()
11595 if (output_event->cpu != event->cpu) in perf_event_set_output()
11601 if (output_event->cpu == -1 && output_event->ctx != event->ctx) in perf_event_set_output()
11607 if (output_event->clock != event->clock) in perf_event_set_output()
11614 if (is_write_backward(output_event) != is_write_backward(event)) in perf_event_set_output()
11620 if (has_aux(event) && has_aux(output_event) && in perf_event_set_output()
11621 event->pmu != output_event->pmu) in perf_event_set_output()
11625 mutex_lock(&event->mmap_mutex); in perf_event_set_output()
11627 if (atomic_read(&event->mmap_count)) in perf_event_set_output()
11637 ring_buffer_attach(event, rb); in perf_event_set_output()
11641 mutex_unlock(&event->mmap_mutex); in perf_event_set_output()
11656 static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id) in perf_event_set_clock() argument
11662 event->clock = &ktime_get_mono_fast_ns; in perf_event_set_clock()
11667 event->clock = &ktime_get_raw_fast_ns; in perf_event_set_clock()
11672 event->clock = &ktime_get_real_ns; in perf_event_set_clock()
11676 event->clock = &ktime_get_boottime_ns; in perf_event_set_clock()
11680 event->clock = &ktime_get_clocktai_ns; in perf_event_set_clock()
11687 if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI)) in perf_event_set_clock()
11725 * sys_perf_event_open - open a performance event, associate it to a task/cpu
11730 * @group_fd: group leader event fd
11737 struct perf_event *event, *sibling; in SYSCALL_DEFINE5() local
11840 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, in SYSCALL_DEFINE5()
11842 if (IS_ERR(event)) { in SYSCALL_DEFINE5()
11843 err = PTR_ERR(event); in SYSCALL_DEFINE5()
11847 if (is_sampling_event(event)) { in SYSCALL_DEFINE5()
11848 if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) { in SYSCALL_DEFINE5()
11858 pmu = event->pmu; in SYSCALL_DEFINE5()
11861 err = perf_event_set_clock(event, attr.clockid); in SYSCALL_DEFINE5()
11867 event->event_caps |= PERF_EV_CAP_SOFTWARE; in SYSCALL_DEFINE5()
11870 if (is_software_event(event) && in SYSCALL_DEFINE5()
11873 * If the event is a sw event, but the group_leader in SYSCALL_DEFINE5()
11881 } else if (!is_software_event(event) && in SYSCALL_DEFINE5()
11886 * try to add a hardware event, move the whole group to in SYSCALL_DEFINE5()
11896 ctx = find_get_context(pmu, task, event); in SYSCALL_DEFINE5()
11903 * Look up the group leader (we will attach this event to it): in SYSCALL_DEFINE5()
11916 if (group_leader->clock != event->clock) in SYSCALL_DEFINE5()
11924 if (group_leader->cpu != event->cpu) in SYSCALL_DEFINE5()
11953 err = perf_event_set_output(event, output_event); in SYSCALL_DEFINE5()
11958 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, in SYSCALL_DEFINE5()
11975 * perf_install_in_context() call for this new event to in SYSCALL_DEFINE5()
11999 * if this new event wound up on the same ctx, if so in SYSCALL_DEFINE5()
12042 if (!perf_event_validate_size(event)) { in SYSCALL_DEFINE5()
12049 * Check if the @cpu we're creating an event for is online. in SYSCALL_DEFINE5()
12063 if (perf_need_aux_event(event) && !perf_get_aux_event(event, group_leader)) { in SYSCALL_DEFINE5()
12070 * because we need to serialize with concurrent event creation. in SYSCALL_DEFINE5()
12072 if (!exclusive_event_installable(event, ctx)) { in SYSCALL_DEFINE5()
12121 * event. What we want here is event in the initial in SYSCALL_DEFINE5()
12132 * perf_install_in_context() which is the point the event is active and in SYSCALL_DEFINE5()
12135 perf_event__header_size(event); in SYSCALL_DEFINE5()
12136 perf_event__id_header_size(event); in SYSCALL_DEFINE5()
12138 event->owner = current; in SYSCALL_DEFINE5()
12140 perf_install_in_context(ctx, event, event->cpu); in SYSCALL_DEFINE5()
12153 list_add_tail(&event->owner_entry, &current->perf_event_list); in SYSCALL_DEFINE5()
12158 * new event on the sibling_list. This ensures destruction in SYSCALL_DEFINE5()
12181 * and that will take care of freeing the event. in SYSCALL_DEFINE5()
12184 free_event(event); in SYSCALL_DEFINE5()
12209 struct perf_event *event; in perf_event_create_kernel_counter() local
12219 event = perf_event_alloc(attr, cpu, task, NULL, NULL, in perf_event_create_kernel_counter()
12221 if (IS_ERR(event)) { in perf_event_create_kernel_counter()
12222 err = PTR_ERR(event); in perf_event_create_kernel_counter()
12227 event->owner = TASK_TOMBSTONE; in perf_event_create_kernel_counter()
12232 ctx = find_get_context(event->pmu, task, event); in perf_event_create_kernel_counter()
12247 * Check if the @cpu we're creating an event for is online. in perf_event_create_kernel_counter()
12260 if (!exclusive_event_installable(event, ctx)) { in perf_event_create_kernel_counter()
12265 perf_install_in_context(ctx, event, event->cpu); in perf_event_create_kernel_counter()
12269 return event; in perf_event_create_kernel_counter()
12276 free_event(event); in perf_event_create_kernel_counter()
12286 struct perf_event *event, *tmp; in perf_pmu_migrate_context() local
12297 list_for_each_entry_safe(event, tmp, &src_ctx->event_list, in perf_pmu_migrate_context()
12299 perf_remove_from_context(event, 0); in perf_pmu_migrate_context()
12300 unaccount_event_cpu(event, src_cpu); in perf_pmu_migrate_context()
12302 list_add(&event->migrate_entry, &events); in perf_pmu_migrate_context()
12318 list_for_each_entry_safe(event, tmp, &events, migrate_entry) { in perf_pmu_migrate_context()
12319 if (event->group_leader == event) in perf_pmu_migrate_context()
12322 list_del(&event->migrate_entry); in perf_pmu_migrate_context()
12323 if (event->state >= PERF_EVENT_STATE_OFF) in perf_pmu_migrate_context()
12324 event->state = PERF_EVENT_STATE_INACTIVE; in perf_pmu_migrate_context()
12325 account_event_cpu(event, dst_cpu); in perf_pmu_migrate_context()
12326 perf_install_in_context(dst_ctx, event, dst_cpu); in perf_pmu_migrate_context()
12334 list_for_each_entry_safe(event, tmp, &events, migrate_entry) { in perf_pmu_migrate_context()
12335 list_del(&event->migrate_entry); in perf_pmu_migrate_context()
12336 if (event->state >= PERF_EVENT_STATE_OFF) in perf_pmu_migrate_context()
12337 event->state = PERF_EVENT_STATE_INACTIVE; in perf_pmu_migrate_context()
12338 account_event_cpu(event, dst_cpu); in perf_pmu_migrate_context()
12339 perf_install_in_context(dst_ctx, event, dst_cpu); in perf_pmu_migrate_context()
12410 * Remove this event from the parent's list in perf_event_exit_event()
12487 * When a child task exits, feed back event values to parent events.
12494 struct perf_event *event, *tmp; in perf_event_exit_task() local
12498 list_for_each_entry_safe(event, tmp, &child->perf_event_list, in perf_event_exit_task()
12500 list_del_init(&event->owner_entry); in perf_event_exit_task()
12507 smp_store_release(&event->owner, NULL); in perf_event_exit_task()
12523 static void perf_free_event(struct perf_event *event, in perf_free_event() argument
12526 struct perf_event *parent = event->parent; in perf_free_event()
12532 list_del_init(&event->child_list); in perf_free_event()
12538 perf_group_detach(event); in perf_free_event()
12539 list_del_event(event, ctx); in perf_free_event()
12541 free_event(event); in perf_free_event()
12554 struct perf_event *event, *tmp; in perf_event_free_task() local
12575 list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) in perf_event_free_task()
12576 perf_free_event(event, ctx); in perf_event_free_task()
12589 * _free_event()'s put_task_struct(event->hw.target) will be a in perf_event_free_task()
12629 const struct perf_event_attr *perf_event_attrs(struct perf_event *event) in perf_event_attrs() argument
12631 if (!event) in perf_event_attrs()
12634 return &event->attr; in perf_event_attrs()
12638 * Inherit an event from parent task to child task.
12704 * Make the child state follow the state of the parent event, in inherit_event()
12742 * Link this into the parent event's child list in inherit_event()
12751 * Inherits an event group.
12793 * Creates the child task context and tries to inherit the event-group.
12796 * inherited_all set when we 'fail' to inherit an orphaned event; this is
12804 inherit_task_group(struct perf_event *event, struct task_struct *parent, in inherit_task_group() argument
12812 if (!event->attr.inherit) { in inherit_task_group()
12832 ret = inherit_group(event, parent, parent_ctx, in inherit_task_group()
12848 struct perf_event *event; in perf_event_init_context() local
12882 perf_event_groups_for_each(event, &parent_ctx->pinned_groups) { in perf_event_init_context()
12883 ret = inherit_task_group(event, parent, parent_ctx, in perf_event_init_context()
12898 perf_event_groups_for_each(event, &parent_ctx->flexible_groups) { in perf_event_init_context()
12899 ret = inherit_task_group(event, parent, parent_ctx, in perf_event_init_context()
13003 struct perf_event *event; in __perf_event_exit_context() local
13007 list_for_each_entry(event, &ctx->event_list, event_entry) in __perf_event_exit_context()
13008 __perf_remove_from_context(event, cpuctx, ctx, (void *)DETACH_GROUP); in __perf_event_exit_context()