• Home
  • Raw
  • Download

Lines Matching full:event

176 static bool is_kernel_event(struct perf_event *event)  in is_kernel_event()  argument
178 return READ_ONCE(event->owner) == TASK_TOMBSTONE; in is_kernel_event()
190 * - removing the last event from a task ctx; this is relatively straight
193 * - adding the first event to a task ctx; this is tricky because we cannot
204 struct perf_event *event; member
212 struct perf_event *event = efs->event; in event_function() local
213 struct perf_event_context *ctx = event->ctx; in event_function()
248 efs->func(event, cpuctx, ctx, efs->data); in event_function()
255 static void event_function_call(struct perf_event *event, event_f func, void *data) in event_function_call() argument
257 struct perf_event_context *ctx = event->ctx; in event_function_call()
260 .event = event, in event_function_call()
265 if (!event->parent) { in event_function_call()
267 * If this is a !child event, we must hold ctx::mutex to in event_function_call()
268 * stabilize the the event->ctx relation. See in event_function_call()
275 cpu_function_call(event->cpu, event_function, &efs); in event_function_call()
300 func(event, NULL, ctx, data); in event_function_call()
308 static void event_function_local(struct perf_event *event, event_f func, void *data) in event_function_local() argument
310 struct perf_event_context *ctx = event->ctx; in event_function_local()
347 func(event, cpuctx, ctx, data); in event_function_local()
401 * perf event paranoia level:
413 * max perf event sample rate
572 static u64 perf_event_time(struct perf_event *event);
586 static inline u64 perf_event_clock(struct perf_event *event) in perf_event_clock() argument
588 return event->clock(); in perf_event_clock()
592 * State based event timekeeping...
594 * The basic idea is to use event->state to determine which (if any) time
599 * Event groups make things a little more complicated, but not terribly so. The
614 __perf_effective_state(struct perf_event *event) in __perf_effective_state() argument
616 struct perf_event *leader = event->group_leader; in __perf_effective_state()
621 return event->state; in __perf_effective_state()
625 __perf_update_times(struct perf_event *event, u64 now, u64 *enabled, u64 *running) in __perf_update_times() argument
627 enum perf_event_state state = __perf_effective_state(event); in __perf_update_times()
628 u64 delta = now - event->tstamp; in __perf_update_times()
630 *enabled = event->total_time_enabled; in __perf_update_times()
634 *running = event->total_time_running; in __perf_update_times()
639 static void perf_event_update_time(struct perf_event *event) in perf_event_update_time() argument
641 u64 now = perf_event_time(event); in perf_event_update_time()
643 __perf_update_times(event, now, &event->total_time_enabled, in perf_event_update_time()
644 &event->total_time_running); in perf_event_update_time()
645 event->tstamp = now; in perf_event_update_time()
657 perf_event_set_state(struct perf_event *event, enum perf_event_state state) in perf_event_set_state() argument
659 if (event->state == state) in perf_event_set_state()
662 perf_event_update_time(event); in perf_event_set_state()
667 if ((event->state < 0) ^ (state < 0)) in perf_event_set_state()
668 perf_event_update_sibling_time(event); in perf_event_set_state()
670 WRITE_ONCE(event->state, state); in perf_event_set_state()
676 perf_cgroup_match(struct perf_event *event) in perf_cgroup_match() argument
678 struct perf_event_context *ctx = event->ctx; in perf_cgroup_match()
681 /* @event doesn't care about cgroup */ in perf_cgroup_match()
682 if (!event->cgrp) in perf_cgroup_match()
690 * Cgroup scoping is recursive. An event enabled for a cgroup is in perf_cgroup_match()
692 * cgroup is a descendant of @event's (the test covers identity in perf_cgroup_match()
696 event->cgrp->css.cgroup); in perf_cgroup_match()
699 static inline void perf_detach_cgroup(struct perf_event *event) in perf_detach_cgroup() argument
701 css_put(&event->cgrp->css); in perf_detach_cgroup()
702 event->cgrp = NULL; in perf_detach_cgroup()
705 static inline int is_cgroup_event(struct perf_event *event) in is_cgroup_event() argument
707 return event->cgrp != NULL; in is_cgroup_event()
710 static inline u64 perf_cgroup_event_time(struct perf_event *event) in perf_cgroup_event_time() argument
714 t = per_cpu_ptr(event->cgrp->info, event->cpu); in perf_cgroup_event_time()
744 static inline void update_cgrp_time_from_event(struct perf_event *event) in update_cgrp_time_from_event() argument
752 if (!is_cgroup_event(event)) in update_cgrp_time_from_event()
755 cgrp = perf_cgroup_from_task(current, event->ctx); in update_cgrp_time_from_event()
759 if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup)) in update_cgrp_time_from_event()
760 __update_cgrp_time(event->cgrp); in update_cgrp_time_from_event()
790 #define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */
899 static inline int perf_cgroup_connect(int fd, struct perf_event *event, in perf_cgroup_connect() argument
919 event->cgrp = cgrp; in perf_cgroup_connect()
927 perf_detach_cgroup(event); in perf_cgroup_connect()
936 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) in perf_cgroup_set_shadow_time() argument
939 t = per_cpu_ptr(event->cgrp->info, event->cpu); in perf_cgroup_set_shadow_time()
940 event->shadow_ctx_time = now - t->timestamp; in perf_cgroup_set_shadow_time()
944 * Update cpuctx->cgrp so that it is set when first cgroup event is added and
945 * cleared when last cgroup event is removed.
948 list_update_cgroup_event(struct perf_event *event, in list_update_cgroup_event() argument
954 if (!is_cgroup_event(event)) in list_update_cgroup_event()
965 * matching the event's cgroup, we must do this for every new event, in list_update_cgroup_event()
972 if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup)) in list_update_cgroup_event()
995 perf_cgroup_match(struct perf_event *event) in perf_cgroup_match() argument
1000 static inline void perf_detach_cgroup(struct perf_event *event) in perf_detach_cgroup() argument
1003 static inline int is_cgroup_event(struct perf_event *event) in is_cgroup_event() argument
1008 static inline void update_cgrp_time_from_event(struct perf_event *event) in update_cgrp_time_from_event() argument
1026 static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event, in perf_cgroup_connect() argument
1045 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) in perf_cgroup_set_shadow_time() argument
1049 static inline u64 perf_cgroup_event_time(struct perf_event *event) in perf_cgroup_event_time() argument
1055 list_update_cgroup_event(struct perf_event *event, in list_update_cgroup_event() argument
1235 * because the sys_perf_event_open() case will install a new event and break
1246 * quiesce the event, after which we can install it in the new location. This
1247 * means that only external vectors (perf_fops, prctl) can perturb the event
1251 * However; because event->ctx can change while we're waiting to acquire
1270 perf_event_ctx_lock_nested(struct perf_event *event, int nesting) in perf_event_ctx_lock_nested() argument
1276 ctx = READ_ONCE(event->ctx); in perf_event_ctx_lock_nested()
1284 if (event->ctx != ctx) { in perf_event_ctx_lock_nested()
1294 perf_event_ctx_lock(struct perf_event *event) in perf_event_ctx_lock() argument
1296 return perf_event_ctx_lock_nested(event, 0); in perf_event_ctx_lock()
1299 static void perf_event_ctx_unlock(struct perf_event *event, in perf_event_ctx_unlock() argument
1325 static u32 perf_event_pid_type(struct perf_event *event, struct task_struct *p, in perf_event_pid_type() argument
1332 if (event->parent) in perf_event_pid_type()
1333 event = event->parent; in perf_event_pid_type()
1335 nr = __task_pid_nr_ns(p, type, event->ns); in perf_event_pid_type()
1342 static u32 perf_event_pid(struct perf_event *event, struct task_struct *p) in perf_event_pid() argument
1344 return perf_event_pid_type(event, p, PIDTYPE_TGID); in perf_event_pid()
1347 static u32 perf_event_tid(struct perf_event *event, struct task_struct *p) in perf_event_tid() argument
1349 return perf_event_pid_type(event, p, PIDTYPE_PID); in perf_event_tid()
1353 * If we inherit events we want to return the parent event id
1356 static u64 primary_event_id(struct perf_event *event) in primary_event_id() argument
1358 u64 id = event->id; in primary_event_id()
1360 if (event->parent) in primary_event_id()
1361 id = event->parent->id; in primary_event_id()
1462 static u64 perf_event_time(struct perf_event *event) in perf_event_time() argument
1464 struct perf_event_context *ctx = event->ctx; in perf_event_time()
1466 if (is_cgroup_event(event)) in perf_event_time()
1467 return perf_cgroup_event_time(event); in perf_event_time()
1472 static enum event_type_t get_event_type(struct perf_event *event) in get_event_type() argument
1474 struct perf_event_context *ctx = event->ctx; in get_event_type()
1483 if (event->group_leader != event) in get_event_type()
1484 event = event->group_leader; in get_event_type()
1486 event_type = event->attr.pinned ? EVENT_PINNED : EVENT_FLEXIBLE; in get_event_type()
1494 * Helper function to initialize event group nodes.
1496 static void init_event_group(struct perf_event *event) in init_event_group() argument
1498 RB_CLEAR_NODE(&event->group_node); in init_event_group()
1499 event->group_index = 0; in init_event_group()
1504 * based on event attrs bits.
1507 get_event_groups(struct perf_event *event, struct perf_event_context *ctx) in get_event_groups() argument
1509 if (event->attr.pinned) in get_event_groups()
1525 * Compare function for event groups;
1547 * Insert @event into @groups' tree; using {@event->cpu, ++@groups->index} for
1553 struct perf_event *event) in perf_event_groups_insert() argument
1559 event->group_index = ++groups->index; in perf_event_groups_insert()
1568 if (perf_event_groups_less(event, node_event)) in perf_event_groups_insert()
1574 rb_link_node(&event->group_node, parent, node); in perf_event_groups_insert()
1575 rb_insert_color(&event->group_node, &groups->tree); in perf_event_groups_insert()
1579 * Helper function to insert event into the pinned or flexible groups.
1582 add_event_to_groups(struct perf_event *event, struct perf_event_context *ctx) in add_event_to_groups() argument
1586 groups = get_event_groups(event, ctx); in add_event_to_groups()
1587 perf_event_groups_insert(groups, event); in add_event_to_groups()
1595 struct perf_event *event) in perf_event_groups_delete() argument
1597 WARN_ON_ONCE(RB_EMPTY_NODE(&event->group_node) || in perf_event_groups_delete()
1600 rb_erase(&event->group_node, &groups->tree); in perf_event_groups_delete()
1601 init_event_group(event); in perf_event_groups_delete()
1605 * Helper function to delete event from its groups.
1608 del_event_from_groups(struct perf_event *event, struct perf_event_context *ctx) in del_event_from_groups() argument
1612 groups = get_event_groups(event, ctx); in del_event_from_groups()
1613 perf_event_groups_delete(groups, event); in del_event_from_groups()
1617 * Get the leftmost event in the @cpu subtree.
1645 perf_event_groups_next(struct perf_event *event) in perf_event_groups_next() argument
1649 next = rb_entry_safe(rb_next(&event->group_node), typeof(*event), group_node); in perf_event_groups_next()
1650 if (next && next->cpu == event->cpu) in perf_event_groups_next()
1659 #define perf_event_groups_for_each(event, groups) \ argument
1660 for (event = rb_entry_safe(rb_first(&((groups)->tree)), \
1661 typeof(*event), group_node); event; \
1662 event = rb_entry_safe(rb_next(&event->group_node), \
1663 typeof(*event), group_node))
1666 * Add an event from the lists for its context.
1670 list_add_event(struct perf_event *event, struct perf_event_context *ctx) in list_add_event() argument
1674 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); in list_add_event()
1675 event->attach_state |= PERF_ATTACH_CONTEXT; in list_add_event()
1677 event->tstamp = perf_event_time(event); in list_add_event()
1680 * If we're a stand alone event or group leader, we go to the context in list_add_event()
1684 if (event->group_leader == event) { in list_add_event()
1685 event->group_caps = event->event_caps; in list_add_event()
1686 add_event_to_groups(event, ctx); in list_add_event()
1689 list_update_cgroup_event(event, ctx, true); in list_add_event()
1691 list_add_rcu(&event->event_entry, &ctx->event_list); in list_add_event()
1693 if (event->attr.inherit_stat) in list_add_event()
1700 * Initialize event state based on the perf_event_attr::disabled.
1702 static inline void perf_event__state_init(struct perf_event *event) in perf_event__state_init() argument
1704 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF : in perf_event__state_init()
1708 static void __perf_event_read_size(struct perf_event *event, int nr_siblings) in __perf_event_read_size() argument
1714 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) in __perf_event_read_size()
1717 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) in __perf_event_read_size()
1720 if (event->attr.read_format & PERF_FORMAT_ID) in __perf_event_read_size()
1723 if (event->attr.read_format & PERF_FORMAT_GROUP) { in __perf_event_read_size()
1729 event->read_size = size; in __perf_event_read_size()
1732 static void __perf_event_header_size(struct perf_event *event, u64 sample_type) in __perf_event_header_size() argument
1750 size += event->read_size; in __perf_event_header_size()
1761 event->header_size = size; in __perf_event_header_size()
1768 static void perf_event__header_size(struct perf_event *event) in perf_event__header_size() argument
1770 __perf_event_read_size(event, in perf_event__header_size()
1771 event->group_leader->nr_siblings); in perf_event__header_size()
1772 __perf_event_header_size(event, event->attr.sample_type); in perf_event__header_size()
1775 static void perf_event__id_header_size(struct perf_event *event) in perf_event__id_header_size() argument
1778 u64 sample_type = event->attr.sample_type; in perf_event__id_header_size()
1799 event->id_header_size = size; in perf_event__id_header_size()
1802 static bool perf_event_validate_size(struct perf_event *event) in perf_event_validate_size() argument
1806 * attach the event. in perf_event_validate_size()
1808 __perf_event_read_size(event, event->group_leader->nr_siblings + 1); in perf_event_validate_size()
1809 __perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ); in perf_event_validate_size()
1810 perf_event__id_header_size(event); in perf_event_validate_size()
1816 if (event->read_size + event->header_size + in perf_event_validate_size()
1817 event->id_header_size + sizeof(struct perf_event_header) >= 16*1024) in perf_event_validate_size()
1823 static void perf_group_attach(struct perf_event *event) in perf_group_attach() argument
1825 struct perf_event *group_leader = event->group_leader, *pos; in perf_group_attach()
1827 lockdep_assert_held(&event->ctx->lock); in perf_group_attach()
1832 if (event->attach_state & PERF_ATTACH_GROUP) in perf_group_attach()
1835 event->attach_state |= PERF_ATTACH_GROUP; in perf_group_attach()
1837 if (group_leader == event) in perf_group_attach()
1840 WARN_ON_ONCE(group_leader->ctx != event->ctx); in perf_group_attach()
1842 group_leader->group_caps &= event->event_caps; in perf_group_attach()
1844 list_add_tail(&event->sibling_list, &group_leader->sibling_list); in perf_group_attach()
1854 * Remove an event from the lists for its context.
1858 list_del_event(struct perf_event *event, struct perf_event_context *ctx) in list_del_event() argument
1860 WARN_ON_ONCE(event->ctx != ctx); in list_del_event()
1866 if (!(event->attach_state & PERF_ATTACH_CONTEXT)) in list_del_event()
1869 event->attach_state &= ~PERF_ATTACH_CONTEXT; in list_del_event()
1871 list_update_cgroup_event(event, ctx, false); in list_del_event()
1874 if (event->attr.inherit_stat) in list_del_event()
1877 list_del_rcu(&event->event_entry); in list_del_event()
1879 if (event->group_leader == event) in list_del_event()
1880 del_event_from_groups(event, ctx); in list_del_event()
1883 * If event was in error state, then keep it in list_del_event()
1887 * of the event in list_del_event()
1889 if (event->state > PERF_EVENT_STATE_OFF) in list_del_event()
1890 perf_event_set_state(event, PERF_EVENT_STATE_OFF); in list_del_event()
1895 static void perf_group_detach(struct perf_event *event) in perf_group_detach() argument
1898 struct perf_event_context *ctx = event->ctx; in perf_group_detach()
1905 if (!(event->attach_state & PERF_ATTACH_GROUP)) in perf_group_detach()
1908 event->attach_state &= ~PERF_ATTACH_GROUP; in perf_group_detach()
1913 if (event->group_leader != event) { in perf_group_detach()
1914 list_del_init(&event->sibling_list); in perf_group_detach()
1915 event->group_leader->nr_siblings--; in perf_group_detach()
1920 * If this was a group event with sibling events then in perf_group_detach()
1924 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, sibling_list) { in perf_group_detach()
1930 sibling->group_caps = event->group_caps; in perf_group_detach()
1932 if (!RB_EMPTY_NODE(&event->group_node)) { in perf_group_detach()
1933 add_event_to_groups(sibling, event->ctx); in perf_group_detach()
1943 WARN_ON_ONCE(sibling->ctx != event->ctx); in perf_group_detach()
1947 perf_event__header_size(event->group_leader); in perf_group_detach()
1949 for_each_sibling_event(tmp, event->group_leader) in perf_group_detach()
1953 static bool is_orphaned_event(struct perf_event *event) in is_orphaned_event() argument
1955 return event->state == PERF_EVENT_STATE_DEAD; in is_orphaned_event()
1958 static inline int __pmu_filter_match(struct perf_event *event) in __pmu_filter_match() argument
1960 struct pmu *pmu = event->pmu; in __pmu_filter_match()
1961 return pmu->filter_match ? pmu->filter_match(event) : 1; in __pmu_filter_match()
1965 * Check whether we should attempt to schedule an event group based on
1966 * PMU-specific filtering. An event group can consist of HW and SW events,
1970 static inline int pmu_filter_match(struct perf_event *event) in pmu_filter_match() argument
1974 if (!__pmu_filter_match(event)) in pmu_filter_match()
1977 for_each_sibling_event(sibling, event) { in pmu_filter_match()
1986 event_filter_match(struct perf_event *event) in event_filter_match() argument
1988 return (event->cpu == -1 || event->cpu == smp_processor_id()) && in event_filter_match()
1989 perf_cgroup_match(event) && pmu_filter_match(event); in event_filter_match()
1993 event_sched_out(struct perf_event *event, in event_sched_out() argument
1999 WARN_ON_ONCE(event->ctx != ctx); in event_sched_out()
2002 if (event->state != PERF_EVENT_STATE_ACTIVE) in event_sched_out()
2010 list_del_init(&event->active_list); in event_sched_out()
2012 perf_pmu_disable(event->pmu); in event_sched_out()
2014 event->pmu->del(event, 0); in event_sched_out()
2015 event->oncpu = -1; in event_sched_out()
2017 if (READ_ONCE(event->pending_disable) >= 0) { in event_sched_out()
2018 WRITE_ONCE(event->pending_disable, -1); in event_sched_out()
2021 perf_event_set_state(event, state); in event_sched_out()
2023 if (!is_software_event(event)) in event_sched_out()
2027 if (event->attr.freq && event->attr.sample_freq) in event_sched_out()
2029 if (event->attr.exclusive || !cpuctx->active_oncpu) in event_sched_out()
2032 perf_pmu_enable(event->pmu); in event_sched_out()
2040 struct perf_event *event; in group_sched_out() local
2052 for_each_sibling_event(event, group_event) in group_sched_out()
2053 event_sched_out(event, cpuctx, ctx); in group_sched_out()
2064 * Cross CPU call to remove a performance event
2066 * We disable the event on the hardware level first. After that we
2070 __perf_remove_from_context(struct perf_event *event, in __perf_remove_from_context() argument
2082 event_sched_out(event, cpuctx, ctx); in __perf_remove_from_context()
2084 perf_group_detach(event); in __perf_remove_from_context()
2085 list_del_event(event, ctx); in __perf_remove_from_context()
2097 * Remove the event from a task's (or a CPU's) list of events.
2099 * If event->ctx is a cloned context, callers must make sure that
2100 * every task struct that event->ctx->task could possibly point to
2106 static void perf_remove_from_context(struct perf_event *event, unsigned long flags) in perf_remove_from_context() argument
2108 struct perf_event_context *ctx = event->ctx; in perf_remove_from_context()
2112 event_function_call(event, __perf_remove_from_context, (void *)flags); in perf_remove_from_context()
2120 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); in perf_remove_from_context()
2122 (event->attach_state & PERF_ATTACH_GROUP)) { in perf_remove_from_context()
2128 perf_group_detach(event); in perf_remove_from_context()
2134 * Cross CPU call to disable a performance event
2136 static void __perf_event_disable(struct perf_event *event, in __perf_event_disable() argument
2141 if (event->state < PERF_EVENT_STATE_INACTIVE) in __perf_event_disable()
2146 update_cgrp_time_from_event(event); in __perf_event_disable()
2149 if (event == event->group_leader) in __perf_event_disable()
2150 group_sched_out(event, cpuctx, ctx); in __perf_event_disable()
2152 event_sched_out(event, cpuctx, ctx); in __perf_event_disable()
2154 perf_event_set_state(event, PERF_EVENT_STATE_OFF); in __perf_event_disable()
2158 * Disable an event.
2160 * If event->ctx is a cloned context, callers must make sure that
2161 * every task struct that event->ctx->task could possibly point to
2164 * hold the top-level event's child_mutex, so any descendant that
2167 * When called from perf_pending_event it's OK because event->ctx
2171 static void _perf_event_disable(struct perf_event *event) in _perf_event_disable() argument
2173 struct perf_event_context *ctx = event->ctx; in _perf_event_disable()
2176 if (event->state <= PERF_EVENT_STATE_OFF) { in _perf_event_disable()
2182 event_function_call(event, __perf_event_disable, NULL); in _perf_event_disable()
2185 void perf_event_disable_local(struct perf_event *event) in perf_event_disable_local() argument
2187 event_function_local(event, __perf_event_disable, NULL); in perf_event_disable_local()
2194 void perf_event_disable(struct perf_event *event) in perf_event_disable() argument
2198 ctx = perf_event_ctx_lock(event); in perf_event_disable()
2199 _perf_event_disable(event); in perf_event_disable()
2200 perf_event_ctx_unlock(event, ctx); in perf_event_disable()
2204 void perf_event_disable_inatomic(struct perf_event *event) in perf_event_disable_inatomic() argument
2206 WRITE_ONCE(event->pending_disable, smp_processor_id()); in perf_event_disable_inatomic()
2208 irq_work_queue(&event->pending); in perf_event_disable_inatomic()
2211 static void perf_set_shadow_time(struct perf_event *event, in perf_set_shadow_time() argument
2229 * - event is guaranteed scheduled in in perf_set_shadow_time()
2239 if (is_cgroup_event(event)) in perf_set_shadow_time()
2240 perf_cgroup_set_shadow_time(event, event->tstamp); in perf_set_shadow_time()
2242 event->shadow_ctx_time = event->tstamp - ctx->timestamp; in perf_set_shadow_time()
2247 static void perf_log_throttle(struct perf_event *event, int enable);
2248 static void perf_log_itrace_start(struct perf_event *event);
2251 event_sched_in(struct perf_event *event, in event_sched_in() argument
2259 if (event->state <= PERF_EVENT_STATE_OFF) in event_sched_in()
2262 WRITE_ONCE(event->oncpu, smp_processor_id()); in event_sched_in()
2264 * Order event::oncpu write to happen before the ACTIVE state is in event_sched_in()
2269 perf_event_set_state(event, PERF_EVENT_STATE_ACTIVE); in event_sched_in()
2276 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) { in event_sched_in()
2277 perf_log_throttle(event, 1); in event_sched_in()
2278 event->hw.interrupts = 0; in event_sched_in()
2281 perf_pmu_disable(event->pmu); in event_sched_in()
2283 perf_set_shadow_time(event, ctx); in event_sched_in()
2285 perf_log_itrace_start(event); in event_sched_in()
2287 if (event->pmu->add(event, PERF_EF_START)) { in event_sched_in()
2288 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); in event_sched_in()
2289 event->oncpu = -1; in event_sched_in()
2294 if (!is_software_event(event)) in event_sched_in()
2298 if (event->attr.freq && event->attr.sample_freq) in event_sched_in()
2301 if (event->attr.exclusive) in event_sched_in()
2305 perf_pmu_enable(event->pmu); in event_sched_in()
2315 struct perf_event *event, *partial_group = NULL; in group_sched_in() local
2332 for_each_sibling_event(event, group_event) { in group_sched_in()
2333 if (event_sched_in(event, cpuctx, ctx)) { in group_sched_in()
2334 partial_group = event; in group_sched_in()
2346 * The events up to the failed event are scheduled out normally. in group_sched_in()
2348 for_each_sibling_event(event, group_event) { in group_sched_in()
2349 if (event == partial_group) in group_sched_in()
2352 event_sched_out(event, cpuctx, ctx); in group_sched_in()
2364 * Work out whether we can put this event group on the CPU now.
2366 static int group_can_go_on(struct perf_event *event, in group_can_go_on() argument
2373 if (event->group_caps & PERF_EV_CAP_SOFTWARE) in group_can_go_on()
2385 if (event->attr.exclusive && cpuctx->active_oncpu) in group_can_go_on()
2394 static void add_event_to_ctx(struct perf_event *event, in add_event_to_ctx() argument
2397 list_add_event(event, ctx); in add_event_to_ctx()
2398 perf_group_attach(event); in add_event_to_ctx()
2443 * time an event is added, only do it for the groups of equal priority and
2487 * Cross CPU call to install and enable a performance event
2494 struct perf_event *event = info; in __perf_install_in_context() local
2495 struct perf_event_context *ctx = event->ctx; in __perf_install_in_context()
2526 if (is_cgroup_event(event)) { in __perf_install_in_context()
2528 * If the current cgroup doesn't match the event's in __perf_install_in_context()
2533 event->cgrp->css.cgroup); in __perf_install_in_context()
2539 add_event_to_ctx(event, ctx); in __perf_install_in_context()
2540 ctx_resched(cpuctx, task_ctx, get_event_type(event)); in __perf_install_in_context()
2542 add_event_to_ctx(event, ctx); in __perf_install_in_context()
2551 static bool exclusive_event_installable(struct perf_event *event,
2555 * Attach a performance event to a context.
2561 struct perf_event *event, in perf_install_in_context() argument
2568 WARN_ON_ONCE(!exclusive_event_installable(event, ctx)); in perf_install_in_context()
2570 if (event->cpu != -1) in perf_install_in_context()
2571 event->cpu = cpu; in perf_install_in_context()
2574 * Ensures that if we can observe event->ctx, both the event and ctx in perf_install_in_context()
2577 smp_store_release(&event->ctx, ctx); in perf_install_in_context()
2580 cpu_function_call(cpu, __perf_install_in_context, event); in perf_install_in_context()
2622 if (!task_function_call(task, __perf_install_in_context, event)) in perf_install_in_context()
2638 * thus we can safely install the event. in perf_install_in_context()
2644 add_event_to_ctx(event, ctx); in perf_install_in_context()
2649 * Cross CPU call to enable a performance event
2651 static void __perf_event_enable(struct perf_event *event, in __perf_event_enable() argument
2656 struct perf_event *leader = event->group_leader; in __perf_event_enable()
2659 if (event->state >= PERF_EVENT_STATE_INACTIVE || in __perf_event_enable()
2660 event->state <= PERF_EVENT_STATE_ERROR) in __perf_event_enable()
2666 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); in __perf_event_enable()
2671 if (!event_filter_match(event)) { in __perf_event_enable()
2677 * If the event is in a group and isn't the group leader, in __perf_event_enable()
2680 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) { in __perf_event_enable()
2689 ctx_resched(cpuctx, task_ctx, get_event_type(event)); in __perf_event_enable()
2693 * Enable an event.
2695 * If event->ctx is a cloned context, callers must make sure that
2696 * every task struct that event->ctx->task could possibly point to
2701 static void _perf_event_enable(struct perf_event *event) in _perf_event_enable() argument
2703 struct perf_event_context *ctx = event->ctx; in _perf_event_enable()
2706 if (event->state >= PERF_EVENT_STATE_INACTIVE || in _perf_event_enable()
2707 event->state < PERF_EVENT_STATE_ERROR) { in _perf_event_enable()
2713 * If the event is in error state, clear that first. in _perf_event_enable()
2715 * That way, if we see the event in error state below, we know that it in _perf_event_enable()
2719 if (event->state == PERF_EVENT_STATE_ERROR) in _perf_event_enable()
2720 event->state = PERF_EVENT_STATE_OFF; in _perf_event_enable()
2723 event_function_call(event, __perf_event_enable, NULL); in _perf_event_enable()
2729 void perf_event_enable(struct perf_event *event) in perf_event_enable() argument
2733 ctx = perf_event_ctx_lock(event); in perf_event_enable()
2734 _perf_event_enable(event); in perf_event_enable()
2735 perf_event_ctx_unlock(event, ctx); in perf_event_enable()
2740 struct perf_event *event; member
2747 struct perf_event *event = sd->event; in __perf_event_stop() local
2750 if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE) in __perf_event_stop()
2758 * so we need to check again lest we try to stop another CPU's event. in __perf_event_stop()
2760 if (READ_ONCE(event->oncpu) != smp_processor_id()) in __perf_event_stop()
2763 event->pmu->stop(event, PERF_EF_UPDATE); in __perf_event_stop()
2771 * Since this is happening on an event-local CPU, no trace is lost in __perf_event_stop()
2775 event->pmu->start(event, 0); in __perf_event_stop()
2780 static int perf_event_stop(struct perf_event *event, int restart) in perf_event_stop() argument
2783 .event = event, in perf_event_stop()
2789 if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE) in perf_event_stop()
2796 * We only want to restart ACTIVE events, so if the event goes in perf_event_stop()
2797 * inactive here (event->oncpu==-1), there's nothing more to do; in perf_event_stop()
2800 ret = cpu_function_call(READ_ONCE(event->oncpu), in perf_event_stop()
2813 * event::addr_filter_ranges array and bump the event::addr_filters_gen;
2814 * (p2) when an event is scheduled in (pmu::add), it calls
2818 * If (p1) happens while the event is active, we restart it to force (p2).
2829 void perf_event_addr_filters_sync(struct perf_event *event) in perf_event_addr_filters_sync() argument
2831 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in perf_event_addr_filters_sync()
2833 if (!has_addr_filter(event)) in perf_event_addr_filters_sync()
2837 if (event->addr_filters_gen != event->hw.addr_filters_gen) { in perf_event_addr_filters_sync()
2838 event->pmu->addr_filters_sync(event); in perf_event_addr_filters_sync()
2839 event->hw.addr_filters_gen = event->addr_filters_gen; in perf_event_addr_filters_sync()
2845 static int _perf_event_refresh(struct perf_event *event, int refresh) in _perf_event_refresh() argument
2850 if (event->attr.inherit || !is_sampling_event(event)) in _perf_event_refresh()
2853 atomic_add(refresh, &event->event_limit); in _perf_event_refresh()
2854 _perf_event_enable(event); in _perf_event_refresh()
2862 int perf_event_refresh(struct perf_event *event, int refresh) in perf_event_refresh() argument
2867 ctx = perf_event_ctx_lock(event); in perf_event_refresh()
2868 ret = _perf_event_refresh(event, refresh); in perf_event_refresh()
2869 perf_event_ctx_unlock(event, ctx); in perf_event_refresh()
2890 static int perf_event_modify_attr(struct perf_event *event, in perf_event_modify_attr() argument
2893 if (event->attr.type != attr->type) in perf_event_modify_attr()
2896 switch (event->attr.type) { in perf_event_modify_attr()
2898 return perf_event_modify_breakpoint(event, attr); in perf_event_modify_attr()
2909 struct perf_event *event, *tmp; in ctx_sched_out() local
2957 list_for_each_entry_safe(event, tmp, &ctx->pinned_active, active_list) in ctx_sched_out()
2958 group_sched_out(event, cpuctx, ctx); in ctx_sched_out()
2962 list_for_each_entry_safe(event, tmp, &ctx->flexible_active, active_list) in ctx_sched_out()
2963 group_sched_out(event, cpuctx, ctx); in ctx_sched_out()
3006 static void __perf_event_sync_stat(struct perf_event *event, in __perf_event_sync_stat() argument
3011 if (!event->attr.inherit_stat) in __perf_event_sync_stat()
3015 * Update the event value, we cannot use perf_event_read() in __perf_event_sync_stat()
3018 * we know the event must be on the current CPU, therefore we in __perf_event_sync_stat()
3021 if (event->state == PERF_EVENT_STATE_ACTIVE) in __perf_event_sync_stat()
3022 event->pmu->read(event); in __perf_event_sync_stat()
3024 perf_event_update_time(event); in __perf_event_sync_stat()
3027 * In order to keep per-task stats reliable we need to flip the event in __perf_event_sync_stat()
3031 value = local64_xchg(&event->count, value); in __perf_event_sync_stat()
3034 swap(event->total_time_enabled, next_event->total_time_enabled); in __perf_event_sync_stat()
3035 swap(event->total_time_running, next_event->total_time_running); in __perf_event_sync_stat()
3040 perf_event_update_userpage(event); in __perf_event_sync_stat()
3047 struct perf_event *event, *next_event; in perf_event_sync_stat() local
3054 event = list_first_entry(&ctx->event_list, in perf_event_sync_stat()
3060 while (&event->event_entry != &ctx->event_list && in perf_event_sync_stat()
3063 __perf_event_sync_stat(event, next_event); in perf_event_sync_stat()
3065 event = list_next_entry(event, event_entry); in perf_event_sync_stat()
3170 * This callback is relevant even to per-cpu events; for example multi event
3210 * We stop each event and update the event value in event->count.
3213 * sets the disabled bit in the control field of event _before_
3214 * accessing the event control register. If a NMI hits, then it will
3215 * not restart the event.
3234 * cgroup event are system-wide mode only in __perf_event_task_sched_out()
3286 static int pinned_sched_in(struct perf_event *event, void *data) in pinned_sched_in() argument
3290 if (event->state <= PERF_EVENT_STATE_OFF) in pinned_sched_in()
3293 if (!event_filter_match(event)) in pinned_sched_in()
3296 if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) { in pinned_sched_in()
3297 if (!group_sched_in(event, sid->cpuctx, sid->ctx)) in pinned_sched_in()
3298 list_add_tail(&event->active_list, &sid->ctx->pinned_active); in pinned_sched_in()
3305 if (event->state == PERF_EVENT_STATE_INACTIVE) in pinned_sched_in()
3306 perf_event_set_state(event, PERF_EVENT_STATE_ERROR); in pinned_sched_in()
3311 static int flexible_sched_in(struct perf_event *event, void *data) in flexible_sched_in() argument
3315 if (event->state <= PERF_EVENT_STATE_OFF) in flexible_sched_in()
3318 if (!event_filter_match(event)) in flexible_sched_in()
3321 if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) { in flexible_sched_in()
3322 if (!group_sched_in(event, sid->cpuctx, sid->ctx)) in flexible_sched_in()
3323 list_add_tail(&event->active_list, &sid->ctx->flexible_active); in flexible_sched_in()
3452 * We restore the event value and then enable it.
3455 * sets the enabled bit in the control field of event _before_
3456 * accessing the event control register. If a NMI hits, then it will
3457 * keep the event running.
3467 * to switch in PMU state; cgroup event are system-wide mode only. in __perf_event_task_sched_in()
3490 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) in perf_calculate_period() argument
3492 u64 frequency = event->attr.sample_freq; in perf_calculate_period()
3566 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable) in perf_adjust_period() argument
3568 struct hw_perf_event *hwc = &event->hw; in perf_adjust_period()
3572 period = perf_calculate_period(event, nsec, count); in perf_adjust_period()
3586 event->pmu->stop(event, PERF_EF_UPDATE); in perf_adjust_period()
3591 event->pmu->start(event, PERF_EF_RELOAD); in perf_adjust_period()
3603 struct perf_event *event; in perf_adjust_freq_unthr_context() local
3619 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { in perf_adjust_freq_unthr_context()
3620 if (event->state != PERF_EVENT_STATE_ACTIVE) in perf_adjust_freq_unthr_context()
3623 if (!event_filter_match(event)) in perf_adjust_freq_unthr_context()
3626 perf_pmu_disable(event->pmu); in perf_adjust_freq_unthr_context()
3628 hwc = &event->hw; in perf_adjust_freq_unthr_context()
3632 perf_log_throttle(event, 1); in perf_adjust_freq_unthr_context()
3633 event->pmu->start(event, 0); in perf_adjust_freq_unthr_context()
3636 if (!event->attr.freq || !event->attr.sample_freq) in perf_adjust_freq_unthr_context()
3640 * stop the event and update event->count in perf_adjust_freq_unthr_context()
3642 event->pmu->stop(event, PERF_EF_UPDATE); in perf_adjust_freq_unthr_context()
3644 now = local64_read(&event->count); in perf_adjust_freq_unthr_context()
3649 * restart the event in perf_adjust_freq_unthr_context()
3651 * we have stopped the event so tell that in perf_adjust_freq_unthr_context()
3656 perf_adjust_period(event, period, delta, false); in perf_adjust_freq_unthr_context()
3658 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); in perf_adjust_freq_unthr_context()
3660 perf_pmu_enable(event->pmu); in perf_adjust_freq_unthr_context()
3668 * Move @event to the tail of the @ctx's elegible events.
3670 static void rotate_ctx(struct perf_event_context *ctx, struct perf_event *event) in rotate_ctx() argument
3679 perf_event_groups_delete(&ctx->flexible_groups, event); in rotate_ctx()
3680 perf_event_groups_insert(&ctx->flexible_groups, event); in rotate_ctx()
3698 * events, thus the event count values are stable. in perf_rotate_context()
3761 static int event_enable_on_exec(struct perf_event *event, in event_enable_on_exec() argument
3764 if (!event->attr.enable_on_exec) in event_enable_on_exec()
3767 event->attr.enable_on_exec = 0; in event_enable_on_exec()
3768 if (event->state >= PERF_EVENT_STATE_INACTIVE) in event_enable_on_exec()
3771 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); in event_enable_on_exec()
3785 struct perf_event *event; in perf_event_enable_on_exec() local
3797 list_for_each_entry(event, &ctx->event_list, event_entry) { in perf_event_enable_on_exec()
3798 enabled |= event_enable_on_exec(event, ctx); in perf_event_enable_on_exec()
3799 event_type |= get_event_type(event); in perf_event_enable_on_exec()
3803 * Unclone and reschedule this context if we enabled any event. in perf_event_enable_on_exec()
3821 struct perf_event *event; member
3826 static int __perf_event_read_cpu(struct perf_event *event, int event_cpu) in __perf_event_read_cpu() argument
3830 if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) { in __perf_event_read_cpu()
3844 * Cross CPU call to read the hardware event
3849 struct perf_event *sub, *event = data->event; in __perf_event_read() local
3850 struct perf_event_context *ctx = event->ctx; in __perf_event_read()
3852 struct pmu *pmu = event->pmu; in __perf_event_read()
3858 * event->count would have been updated to a recent sample in __perf_event_read()
3859 * when the event was scheduled out. in __perf_event_read()
3867 update_cgrp_time_from_event(event); in __perf_event_read()
3870 perf_event_update_time(event); in __perf_event_read()
3872 perf_event_update_sibling_time(event); in __perf_event_read()
3874 if (event->state != PERF_EVENT_STATE_ACTIVE) in __perf_event_read()
3878 pmu->read(event); in __perf_event_read()
3885 pmu->read(event); in __perf_event_read()
3887 for_each_sibling_event(sub, event) { in __perf_event_read()
3890 * Use sibling's PMU rather than @event's since in __perf_event_read()
3903 static inline u64 perf_event_count(struct perf_event *event) in perf_event_count() argument
3905 return local64_read(&event->count) + atomic64_read(&event->child_count); in perf_event_count()
3909 * NMI-safe method to read a local event, that is an event that
3916 int perf_event_read_local(struct perf_event *event, u64 *value, in perf_event_read_local() argument
3929 * It must not be an event with inherit set, we cannot read in perf_event_read_local()
3932 if (event->attr.inherit) { in perf_event_read_local()
3937 /* If this is a per-task event, it must be for current */ in perf_event_read_local()
3938 if ((event->attach_state & PERF_ATTACH_TASK) && in perf_event_read_local()
3939 event->hw.target != current) { in perf_event_read_local()
3944 /* If this is a per-CPU event, it must be for this CPU */ in perf_event_read_local()
3945 if (!(event->attach_state & PERF_ATTACH_TASK) && in perf_event_read_local()
3946 event->cpu != smp_processor_id()) { in perf_event_read_local()
3951 /* If this is a pinned event it must be running on this CPU */ in perf_event_read_local()
3952 if (event->attr.pinned && event->oncpu != smp_processor_id()) { in perf_event_read_local()
3958 * If the event is currently on this CPU, its either a per-task event, in perf_event_read_local()
3962 if (event->oncpu == smp_processor_id()) in perf_event_read_local()
3963 event->pmu->read(event); in perf_event_read_local()
3965 *value = local64_read(&event->count); in perf_event_read_local()
3967 u64 now = event->shadow_ctx_time + perf_clock(); in perf_event_read_local()
3970 __perf_update_times(event, now, &__enabled, &__running); in perf_event_read_local()
3982 static int perf_event_read(struct perf_event *event, bool group) in perf_event_read() argument
3984 enum perf_event_state state = READ_ONCE(event->state); in perf_event_read()
3988 * If event is enabled and currently active on a CPU, update the in perf_event_read()
3989 * value in the event structure: in perf_event_read()
4003 event_cpu = READ_ONCE(event->oncpu); in perf_event_read()
4008 .event = event, in perf_event_read()
4014 event_cpu = __perf_event_read_cpu(event, event_cpu); in perf_event_read()
4020 * If event_cpu isn't a valid CPU it means the event got in perf_event_read()
4021 * scheduled out and that will have updated the event count. in perf_event_read()
4023 * Therefore, either way, we'll have an up-to-date event count in perf_event_read()
4031 struct perf_event_context *ctx = event->ctx; in perf_event_read()
4035 state = event->state; in perf_event_read()
4047 update_cgrp_time_from_event(event); in perf_event_read()
4050 perf_event_update_time(event); in perf_event_read()
4052 perf_event_update_sibling_time(event); in perf_event_read()
4119 struct perf_event *event) in find_get_context() argument
4126 int cpu = event->cpu; in find_get_context()
4129 /* Must be root to operate on a CPU event: */ in find_get_context()
4146 if (event->attach_state & PERF_ATTACH_TASK_DATA) { in find_get_context()
4213 static void perf_event_free_filter(struct perf_event *event);
4214 static void perf_event_free_bpf_prog(struct perf_event *event);
4218 struct perf_event *event; in free_event_rcu() local
4220 event = container_of(head, struct perf_event, rcu_head); in free_event_rcu()
4221 if (event->ns) in free_event_rcu()
4222 put_pid_ns(event->ns); in free_event_rcu()
4223 perf_event_free_filter(event); in free_event_rcu()
4224 kfree(event); in free_event_rcu()
4227 static void ring_buffer_attach(struct perf_event *event,
4230 static void detach_sb_event(struct perf_event *event) in detach_sb_event() argument
4232 struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu); in detach_sb_event()
4235 list_del_rcu(&event->sb_list); in detach_sb_event()
4239 static bool is_sb_event(struct perf_event *event) in is_sb_event() argument
4241 struct perf_event_attr *attr = &event->attr; in is_sb_event()
4243 if (event->parent) in is_sb_event()
4246 if (event->attach_state & PERF_ATTACH_TASK) in is_sb_event()
4257 static void unaccount_pmu_sb_event(struct perf_event *event) in unaccount_pmu_sb_event() argument
4259 if (is_sb_event(event)) in unaccount_pmu_sb_event()
4260 detach_sb_event(event); in unaccount_pmu_sb_event()
4263 static void unaccount_event_cpu(struct perf_event *event, int cpu) in unaccount_event_cpu() argument
4265 if (event->parent) in unaccount_event_cpu()
4268 if (is_cgroup_event(event)) in unaccount_event_cpu()
4294 static void unaccount_event(struct perf_event *event) in unaccount_event() argument
4298 if (event->parent) in unaccount_event()
4301 if (event->attach_state & PERF_ATTACH_TASK) in unaccount_event()
4303 if (event->attr.mmap || event->attr.mmap_data) in unaccount_event()
4305 if (event->attr.comm) in unaccount_event()
4307 if (event->attr.namespaces) in unaccount_event()
4309 if (event->attr.task) in unaccount_event()
4311 if (event->attr.freq) in unaccount_event()
4313 if (event->attr.context_switch) { in unaccount_event()
4317 if (is_cgroup_event(event)) in unaccount_event()
4319 if (has_branch_stack(event)) in unaccount_event()
4327 unaccount_event_cpu(event, event->cpu); in unaccount_event()
4329 unaccount_pmu_sb_event(event); in unaccount_event()
4342 * (PERF_PMU_CAP_EXCLUSIVE). Such pmus can only have one event scheduled
4352 static int exclusive_event_init(struct perf_event *event) in exclusive_event_init() argument
4354 struct pmu *pmu = event->pmu; in exclusive_event_init()
4367 * Since this is called in perf_event_alloc() path, event::ctx in exclusive_event_init()
4369 * to mean "per-task event", because unlike other attach states it in exclusive_event_init()
4372 if (event->attach_state & PERF_ATTACH_TASK) { in exclusive_event_init()
4383 static void exclusive_event_destroy(struct perf_event *event) in exclusive_event_destroy() argument
4385 struct pmu *pmu = event->pmu; in exclusive_event_destroy()
4391 if (event->attach_state & PERF_ATTACH_TASK) in exclusive_event_destroy()
4407 static bool exclusive_event_installable(struct perf_event *event, in exclusive_event_installable() argument
4411 struct pmu *pmu = event->pmu; in exclusive_event_installable()
4419 if (exclusive_event_match(iter_event, event)) in exclusive_event_installable()
4426 static void perf_addr_filters_splice(struct perf_event *event,
4429 static void _free_event(struct perf_event *event) in _free_event() argument
4431 irq_work_sync(&event->pending); in _free_event()
4433 unaccount_event(event); in _free_event()
4435 if (event->rb) { in _free_event()
4437 * Can happen when we close an event with re-directed output. in _free_event()
4442 mutex_lock(&event->mmap_mutex); in _free_event()
4443 ring_buffer_attach(event, NULL); in _free_event()
4444 mutex_unlock(&event->mmap_mutex); in _free_event()
4447 if (is_cgroup_event(event)) in _free_event()
4448 perf_detach_cgroup(event); in _free_event()
4450 if (!event->parent) { in _free_event()
4451 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) in _free_event()
4455 perf_event_free_bpf_prog(event); in _free_event()
4456 perf_addr_filters_splice(event, NULL); in _free_event()
4457 kfree(event->addr_filter_ranges); in _free_event()
4459 if (event->destroy) in _free_event()
4460 event->destroy(event); in _free_event()
4466 if (event->hw.target) in _free_event()
4467 put_task_struct(event->hw.target); in _free_event()
4473 if (event->ctx) in _free_event()
4474 put_ctx(event->ctx); in _free_event()
4476 exclusive_event_destroy(event); in _free_event()
4477 module_put(event->pmu->module); in _free_event()
4479 call_rcu(&event->rcu_head, free_event_rcu); in _free_event()
4484 * where the event isn't exposed yet and inherited events.
4486 static void free_event(struct perf_event *event) in free_event() argument
4488 if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1, in free_event()
4489 "unexpected event refcount: %ld; ptr=%p\n", in free_event()
4490 atomic_long_read(&event->refcount), event)) { in free_event()
4495 _free_event(event); in free_event()
4499 * Remove user event from the owner task.
4501 static void perf_remove_from_owner(struct perf_event *event) in perf_remove_from_owner() argument
4509 * indeed free this event, otherwise we need to serialize on in perf_remove_from_owner()
4512 owner = READ_ONCE(event->owner); in perf_remove_from_owner()
4535 * We have to re-check the event->owner field, if it is cleared in perf_remove_from_owner()
4538 * event. in perf_remove_from_owner()
4540 if (event->owner) { in perf_remove_from_owner()
4541 list_del_init(&event->owner_entry); in perf_remove_from_owner()
4542 smp_store_release(&event->owner, NULL); in perf_remove_from_owner()
4549 static void put_event(struct perf_event *event) in put_event() argument
4551 if (!atomic_long_dec_and_test(&event->refcount)) in put_event()
4554 _free_event(event); in put_event()
4558 * Kill an event dead; while event:refcount will preserve the event
4562 int perf_event_release_kernel(struct perf_event *event) in perf_event_release_kernel() argument
4564 struct perf_event_context *ctx = event->ctx; in perf_event_release_kernel()
4573 WARN_ON_ONCE(event->attach_state & in perf_event_release_kernel()
4578 if (!is_kernel_event(event)) in perf_event_release_kernel()
4579 perf_remove_from_owner(event); in perf_event_release_kernel()
4581 ctx = perf_event_ctx_lock(event); in perf_event_release_kernel()
4583 perf_remove_from_context(event, DETACH_GROUP); in perf_event_release_kernel()
4587 * Mark this event as STATE_DEAD, there is no external reference to it in perf_event_release_kernel()
4590 * Anybody acquiring event->child_mutex after the below loop _must_ in perf_event_release_kernel()
4597 event->state = PERF_EVENT_STATE_DEAD; in perf_event_release_kernel()
4600 perf_event_ctx_unlock(event, ctx); in perf_event_release_kernel()
4603 mutex_lock(&event->child_mutex); in perf_event_release_kernel()
4604 list_for_each_entry(child, &event->child_list, child_list) { in perf_event_release_kernel()
4615 * Since the event cannot get freed while we hold the in perf_event_release_kernel()
4626 mutex_unlock(&event->child_mutex); in perf_event_release_kernel()
4628 mutex_lock(&event->child_mutex); in perf_event_release_kernel()
4635 tmp = list_first_entry_or_null(&event->child_list, in perf_event_release_kernel()
4644 put_event(event); in perf_event_release_kernel()
4647 mutex_unlock(&event->child_mutex); in perf_event_release_kernel()
4652 mutex_unlock(&event->child_mutex); in perf_event_release_kernel()
4661 * Wake any perf_event_free_task() waiting for this event to be in perf_event_release_kernel()
4669 put_event(event); /* Must be the 'last' reference */ in perf_event_release_kernel()
4683 static u64 __perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) in __perf_event_read_value() argument
4691 mutex_lock(&event->child_mutex); in __perf_event_read_value()
4693 (void)perf_event_read(event, false); in __perf_event_read_value()
4694 total += perf_event_count(event); in __perf_event_read_value()
4696 *enabled += event->total_time_enabled + in __perf_event_read_value()
4697 atomic64_read(&event->child_total_time_enabled); in __perf_event_read_value()
4698 *running += event->total_time_running + in __perf_event_read_value()
4699 atomic64_read(&event->child_total_time_running); in __perf_event_read_value()
4701 list_for_each_entry(child, &event->child_list, child_list) { in __perf_event_read_value()
4707 mutex_unlock(&event->child_mutex); in __perf_event_read_value()
4712 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) in perf_event_read_value() argument
4717 ctx = perf_event_ctx_lock(event); in perf_event_read_value()
4718 count = __perf_event_read_value(event, enabled, running); in perf_event_read_value()
4719 perf_event_ctx_unlock(event, ctx); in perf_event_read_value()
4772 static int perf_read_group(struct perf_event *event, in perf_read_group() argument
4775 struct perf_event *leader = event->group_leader, *child; in perf_read_group()
4782 values = kzalloc(event->read_size, GFP_KERNEL); in perf_read_group()
4806 ret = event->read_size; in perf_read_group()
4807 if (copy_to_user(buf, values, event->read_size)) in perf_read_group()
4818 static int perf_read_one(struct perf_event *event, in perf_read_one() argument
4825 values[n++] = __perf_event_read_value(event, &enabled, &running); in perf_read_one()
4831 values[n++] = primary_event_id(event); in perf_read_one()
4839 static bool is_event_hup(struct perf_event *event) in is_event_hup() argument
4843 if (event->state > PERF_EVENT_STATE_EXIT) in is_event_hup()
4846 mutex_lock(&event->child_mutex); in is_event_hup()
4847 no_children = list_empty(&event->child_list); in is_event_hup()
4848 mutex_unlock(&event->child_mutex); in is_event_hup()
4853 * Read the performance event - simple non blocking version for now
4856 __perf_read(struct perf_event *event, char __user *buf, size_t count) in __perf_read() argument
4858 u64 read_format = event->attr.read_format; in __perf_read()
4862 * Return end-of-file for a read on an event that is in in __perf_read()
4866 if (event->state == PERF_EVENT_STATE_ERROR) in __perf_read()
4869 if (count < event->read_size) in __perf_read()
4872 WARN_ON_ONCE(event->ctx->parent_ctx); in __perf_read()
4874 ret = perf_read_group(event, read_format, buf); in __perf_read()
4876 ret = perf_read_one(event, read_format, buf); in __perf_read()
4884 struct perf_event *event = file->private_data; in perf_read() local
4888 ctx = perf_event_ctx_lock(event); in perf_read()
4889 ret = __perf_read(event, buf, count); in perf_read()
4890 perf_event_ctx_unlock(event, ctx); in perf_read()
4897 struct perf_event *event = file->private_data; in perf_poll() local
4901 poll_wait(file, &event->waitq, wait); in perf_poll()
4903 if (is_event_hup(event)) in perf_poll()
4907 * Pin the event->rb by taking event->mmap_mutex; otherwise in perf_poll()
4910 mutex_lock(&event->mmap_mutex); in perf_poll()
4911 rb = event->rb; in perf_poll()
4914 mutex_unlock(&event->mmap_mutex); in perf_poll()
4918 static void _perf_event_reset(struct perf_event *event) in _perf_event_reset() argument
4920 (void)perf_event_read(event, false); in _perf_event_reset()
4921 local64_set(&event->count, 0); in _perf_event_reset()
4922 perf_event_update_userpage(event); in _perf_event_reset()
4926 * Holding the top-level event's child_mutex means that any
4927 * descendant process that has inherited this event will block
4931 static void perf_event_for_each_child(struct perf_event *event, in perf_event_for_each_child() argument
4936 WARN_ON_ONCE(event->ctx->parent_ctx); in perf_event_for_each_child()
4938 mutex_lock(&event->child_mutex); in perf_event_for_each_child()
4939 func(event); in perf_event_for_each_child()
4940 list_for_each_entry(child, &event->child_list, child_list) in perf_event_for_each_child()
4942 mutex_unlock(&event->child_mutex); in perf_event_for_each_child()
4945 static void perf_event_for_each(struct perf_event *event, in perf_event_for_each() argument
4948 struct perf_event_context *ctx = event->ctx; in perf_event_for_each()
4953 event = event->group_leader; in perf_event_for_each()
4955 perf_event_for_each_child(event, func); in perf_event_for_each()
4956 for_each_sibling_event(sibling, event) in perf_event_for_each()
4960 static void __perf_event_period(struct perf_event *event, in __perf_event_period() argument
4968 if (event->attr.freq) { in __perf_event_period()
4969 event->attr.sample_freq = value; in __perf_event_period()
4971 event->attr.sample_period = value; in __perf_event_period()
4972 event->hw.sample_period = value; in __perf_event_period()
4975 active = (event->state == PERF_EVENT_STATE_ACTIVE); in __perf_event_period()
4980 * trying to unthrottle while we already re-started the event. in __perf_event_period()
4982 if (event->hw.interrupts == MAX_INTERRUPTS) { in __perf_event_period()
4983 event->hw.interrupts = 0; in __perf_event_period()
4984 perf_log_throttle(event, 1); in __perf_event_period()
4986 event->pmu->stop(event, PERF_EF_UPDATE); in __perf_event_period()
4989 local64_set(&event->hw.period_left, 0); in __perf_event_period()
4992 event->pmu->start(event, PERF_EF_RELOAD); in __perf_event_period()
4997 static int perf_event_check_period(struct perf_event *event, u64 value) in perf_event_check_period() argument
4999 return event->pmu->check_period(event, value); in perf_event_check_period()
5002 static int perf_event_period(struct perf_event *event, u64 __user *arg) in perf_event_period() argument
5006 if (!is_sampling_event(event)) in perf_event_period()
5015 if (event->attr.freq && value > sysctl_perf_event_sample_rate) in perf_event_period()
5018 if (perf_event_check_period(event, value)) in perf_event_period()
5021 if (!event->attr.freq && (value & (1ULL << 63))) in perf_event_period()
5024 event_function_call(event, __perf_event_period, &value); in perf_event_period()
5045 static int perf_event_set_output(struct perf_event *event,
5047 static int perf_event_set_filter(struct perf_event *event, void __user *arg);
5048 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd);
5052 static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg) in _perf_ioctl() argument
5069 return _perf_event_refresh(event, arg); in _perf_ioctl()
5072 return perf_event_period(event, (u64 __user *)arg); in _perf_ioctl()
5076 u64 id = primary_event_id(event); in _perf_ioctl()
5093 ret = perf_event_set_output(event, output_event); in _perf_ioctl()
5096 ret = perf_event_set_output(event, NULL); in _perf_ioctl()
5102 return perf_event_set_filter(event, (void __user *)arg); in _perf_ioctl()
5105 return perf_event_set_bpf_prog(event, arg); in _perf_ioctl()
5111 rb = rcu_dereference(event->rb); in _perf_ioctl()
5122 return perf_event_query_prog_array(event, (void __user *)arg); in _perf_ioctl()
5132 return perf_event_modify_attr(event, &new_attr); in _perf_ioctl()
5139 perf_event_for_each(event, func); in _perf_ioctl()
5141 perf_event_for_each_child(event, func); in _perf_ioctl()
5148 struct perf_event *event = file->private_data; in perf_ioctl() local
5152 ctx = perf_event_ctx_lock(event); in perf_ioctl()
5153 ret = _perf_ioctl(event, cmd, arg); in perf_ioctl()
5154 perf_event_ctx_unlock(event, ctx); in perf_ioctl()
5184 struct perf_event *event; in perf_event_task_enable() local
5187 list_for_each_entry(event, &current->perf_event_list, owner_entry) { in perf_event_task_enable()
5188 ctx = perf_event_ctx_lock(event); in perf_event_task_enable()
5189 perf_event_for_each_child(event, _perf_event_enable); in perf_event_task_enable()
5190 perf_event_ctx_unlock(event, ctx); in perf_event_task_enable()
5200 struct perf_event *event; in perf_event_task_disable() local
5203 list_for_each_entry(event, &current->perf_event_list, owner_entry) { in perf_event_task_disable()
5204 ctx = perf_event_ctx_lock(event); in perf_event_task_disable()
5205 perf_event_for_each_child(event, _perf_event_disable); in perf_event_task_disable()
5206 perf_event_ctx_unlock(event, ctx); in perf_event_task_disable()
5213 static int perf_event_index(struct perf_event *event) in perf_event_index() argument
5215 if (event->hw.state & PERF_HES_STOPPED) in perf_event_index()
5218 if (event->state != PERF_EVENT_STATE_ACTIVE) in perf_event_index()
5221 return event->pmu->event_idx(event); in perf_event_index()
5224 static void calc_timer_values(struct perf_event *event, in calc_timer_values() argument
5232 ctx_time = event->shadow_ctx_time + *now; in calc_timer_values()
5233 __perf_update_times(event, ctx_time, enabled, running); in calc_timer_values()
5236 static void perf_event_init_userpage(struct perf_event *event) in perf_event_init_userpage() argument
5242 rb = rcu_dereference(event->rb); in perf_event_init_userpage()
5259 struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now) in arch_perf_update_userpage() argument
5268 void perf_event_update_userpage(struct perf_event *event) in perf_event_update_userpage() argument
5275 rb = rcu_dereference(event->rb); in perf_event_update_userpage()
5281 * based on snapshot values taken when the event in perf_event_update_userpage()
5288 calc_timer_values(event, &now, &enabled, &running); in perf_event_update_userpage()
5298 userpg->index = perf_event_index(event); in perf_event_update_userpage()
5299 userpg->offset = perf_event_count(event); in perf_event_update_userpage()
5301 userpg->offset -= local64_read(&event->hw.prev_count); in perf_event_update_userpage()
5304 atomic64_read(&event->child_total_time_enabled); in perf_event_update_userpage()
5307 atomic64_read(&event->child_total_time_running); in perf_event_update_userpage()
5309 arch_perf_update_userpage(event, userpg, now); in perf_event_update_userpage()
5321 struct perf_event *event = vmf->vma->vm_file->private_data; in perf_mmap_fault() local
5332 rb = rcu_dereference(event->rb); in perf_mmap_fault()
5354 static void ring_buffer_attach(struct perf_event *event, in ring_buffer_attach() argument
5360 if (event->rb) { in ring_buffer_attach()
5363 * event->rb_entry and wait/clear when adding event->rb_entry. in ring_buffer_attach()
5365 WARN_ON_ONCE(event->rcu_pending); in ring_buffer_attach()
5367 old_rb = event->rb; in ring_buffer_attach()
5369 list_del_rcu(&event->rb_entry); in ring_buffer_attach()
5372 event->rcu_batches = get_state_synchronize_rcu(); in ring_buffer_attach()
5373 event->rcu_pending = 1; in ring_buffer_attach()
5377 if (event->rcu_pending) { in ring_buffer_attach()
5378 cond_synchronize_rcu(event->rcu_batches); in ring_buffer_attach()
5379 event->rcu_pending = 0; in ring_buffer_attach()
5383 list_add_rcu(&event->rb_entry, &rb->event_list); in ring_buffer_attach()
5388 * Avoid racing with perf_mmap_close(AUX): stop the event in ring_buffer_attach()
5389 * before swizzling the event::rb pointer; if it's getting in ring_buffer_attach()
5397 if (has_aux(event)) in ring_buffer_attach()
5398 perf_event_stop(event, 0); in ring_buffer_attach()
5400 rcu_assign_pointer(event->rb, rb); in ring_buffer_attach()
5409 wake_up_all(&event->waitq); in ring_buffer_attach()
5413 static void ring_buffer_wakeup(struct perf_event *event) in ring_buffer_wakeup() argument
5418 rb = rcu_dereference(event->rb); in ring_buffer_wakeup()
5420 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) in ring_buffer_wakeup()
5421 wake_up_all(&event->waitq); in ring_buffer_wakeup()
5426 struct ring_buffer *ring_buffer_get(struct perf_event *event) in ring_buffer_get() argument
5431 rb = rcu_dereference(event->rb); in ring_buffer_get()
5453 struct perf_event *event = vma->vm_file->private_data; in perf_mmap_open() local
5455 atomic_inc(&event->mmap_count); in perf_mmap_open()
5456 atomic_inc(&event->rb->mmap_count); in perf_mmap_open()
5459 atomic_inc(&event->rb->aux_mmap_count); in perf_mmap_open()
5461 if (event->pmu->event_mapped) in perf_mmap_open()
5462 event->pmu->event_mapped(event, vma->vm_mm); in perf_mmap_open()
5465 static void perf_pmu_output_stop(struct perf_event *event);
5469 * event, or through other events by use of perf_event_set_output().
5477 struct perf_event *event = vma->vm_file->private_data; in perf_mmap_close() local
5478 struct ring_buffer *rb = ring_buffer_get(event); in perf_mmap_close()
5484 if (event->pmu->event_unmapped) in perf_mmap_close()
5485 event->pmu->event_unmapped(event, vma->vm_mm); in perf_mmap_close()
5489 * event->mmap_count, so it is ok to use event->mmap_mutex to in perf_mmap_close()
5493 atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) { in perf_mmap_close()
5500 perf_pmu_output_stop(event); in perf_mmap_close()
5510 mutex_unlock(&event->mmap_mutex); in perf_mmap_close()
5516 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) in perf_mmap_close()
5519 ring_buffer_attach(event, NULL); in perf_mmap_close()
5520 mutex_unlock(&event->mmap_mutex); in perf_mmap_close()
5533 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) { in perf_mmap_close()
5534 if (!atomic_long_inc_not_zero(&event->refcount)) { in perf_mmap_close()
5536 * This event is en-route to free_event() which will in perf_mmap_close()
5543 mutex_lock(&event->mmap_mutex); in perf_mmap_close()
5549 * If we find a different rb; ignore this event, a next in perf_mmap_close()
5554 if (event->rb == rb) in perf_mmap_close()
5555 ring_buffer_attach(event, NULL); in perf_mmap_close()
5557 mutex_unlock(&event->mmap_mutex); in perf_mmap_close()
5558 put_event(event); in perf_mmap_close()
5594 struct perf_event *event = file->private_data; in perf_mmap() local
5609 if (event->cpu == -1 && event->attr.inherit) in perf_mmap()
5627 if (!event->rb) in perf_mmap()
5632 mutex_lock(&event->mmap_mutex); in perf_mmap()
5635 rb = event->rb; in perf_mmap()
5687 WARN_ON_ONCE(event->ctx->parent_ctx); in perf_mmap()
5689 mutex_lock(&event->mmap_mutex); in perf_mmap()
5690 if (event->rb) { in perf_mmap()
5691 if (event->rb->nr_pages != nr_pages) { in perf_mmap()
5696 if (!atomic_inc_not_zero(&event->rb->mmap_count)) { in perf_mmap()
5702 mutex_unlock(&event->mmap_mutex); in perf_mmap()
5742 WARN_ON(!rb && event->rb); in perf_mmap()
5749 event->attr.watermark ? event->attr.wakeup_watermark : 0, in perf_mmap()
5750 event->cpu, flags); in perf_mmap()
5761 ring_buffer_attach(event, rb); in perf_mmap()
5763 perf_event_init_userpage(event); in perf_mmap()
5764 perf_event_update_userpage(event); in perf_mmap()
5766 ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages, in perf_mmap()
5767 event->attr.aux_watermark, flags); in perf_mmap()
5777 atomic_inc(&event->mmap_count); in perf_mmap()
5782 mutex_unlock(&event->mmap_mutex); in perf_mmap()
5791 if (event->pmu->event_mapped) in perf_mmap()
5792 event->pmu->event_mapped(event, vma->vm_mm); in perf_mmap()
5800 struct perf_event *event = filp->private_data; in perf_fasync() local
5804 retval = fasync_helper(fd, filp, on, &event->fasync); in perf_fasync()
5825 * Perf event wakeup
5831 static inline struct fasync_struct **perf_event_fasync(struct perf_event *event) in perf_event_fasync() argument
5834 if (event->parent) in perf_event_fasync()
5835 event = event->parent; in perf_event_fasync()
5836 return &event->fasync; in perf_event_fasync()
5839 void perf_event_wakeup(struct perf_event *event) in perf_event_wakeup() argument
5841 ring_buffer_wakeup(event); in perf_event_wakeup()
5843 if (event->pending_kill) { in perf_event_wakeup()
5844 kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill); in perf_event_wakeup()
5845 event->pending_kill = 0; in perf_event_wakeup()
5849 static void perf_pending_event_disable(struct perf_event *event) in perf_pending_event_disable() argument
5851 int cpu = READ_ONCE(event->pending_disable); in perf_pending_event_disable()
5857 WRITE_ONCE(event->pending_disable, -1); in perf_pending_event_disable()
5858 perf_event_disable_local(event); in perf_pending_event_disable()
5880 * But the event runs on CPU-B and wants disabling there. in perf_pending_event_disable()
5882 irq_work_queue_on(&event->pending, cpu); in perf_pending_event_disable()
5887 struct perf_event *event = container_of(entry, struct perf_event, pending); in perf_pending_event() local
5896 perf_pending_event_disable(event); in perf_pending_event()
5898 if (event->pending_wakeup) { in perf_pending_event()
5899 event->pending_wakeup = 0; in perf_pending_event()
5900 perf_event_wakeup(event); in perf_pending_event()
6068 struct perf_event *event) in __perf_event_header__init_id() argument
6070 u64 sample_type = event->attr.sample_type; in __perf_event_header__init_id()
6073 header->size += event->id_header_size; in __perf_event_header__init_id()
6077 data->tid_entry.pid = perf_event_pid(event, current); in __perf_event_header__init_id()
6078 data->tid_entry.tid = perf_event_tid(event, current); in __perf_event_header__init_id()
6082 data->time = perf_event_clock(event); in __perf_event_header__init_id()
6085 data->id = primary_event_id(event); in __perf_event_header__init_id()
6088 data->stream_id = event->id; in __perf_event_header__init_id()
6098 struct perf_event *event) in perf_event_header__init_id() argument
6100 if (event->attr.sample_id_all) in perf_event_header__init_id()
6101 __perf_event_header__init_id(header, data, event); in perf_event_header__init_id()
6128 void perf_event__output_id_sample(struct perf_event *event, in perf_event__output_id_sample() argument
6132 if (event->attr.sample_id_all) in perf_event__output_id_sample()
6137 struct perf_event *event, in perf_output_read_one() argument
6140 u64 read_format = event->attr.read_format; in perf_output_read_one()
6144 values[n++] = perf_event_count(event); in perf_output_read_one()
6147 atomic64_read(&event->child_total_time_enabled); in perf_output_read_one()
6151 atomic64_read(&event->child_total_time_running); in perf_output_read_one()
6154 values[n++] = primary_event_id(event); in perf_output_read_one()
6160 struct perf_event *event, in perf_output_read_group() argument
6163 struct perf_event *leader = event->group_leader, *sub; in perf_output_read_group()
6164 u64 read_format = event->attr.read_format; in perf_output_read_group()
6176 if ((leader != event) && in perf_output_read_group()
6189 if ((sub != event) && in perf_output_read_group()
6212 struct perf_event *event) in perf_output_read() argument
6215 u64 read_format = event->attr.read_format; in perf_output_read()
6219 * based on snapshot values taken when the event in perf_output_read()
6227 calc_timer_values(event, &now, &enabled, &running); in perf_output_read()
6229 if (event->attr.read_format & PERF_FORMAT_GROUP) in perf_output_read()
6230 perf_output_read_group(handle, event, enabled, running); in perf_output_read()
6232 perf_output_read_one(handle, event, enabled, running); in perf_output_read()
6238 struct perf_event *event) in perf_output_sample() argument
6272 perf_output_read(handle, event); in perf_output_sample()
6343 u64 mask = event->attr.sample_regs_user; in perf_output_sample()
6374 u64 mask = event->attr.sample_regs_intr; in perf_output_sample()
6385 if (!event->attr.watermark) { in perf_output_sample()
6386 int wakeup_events = event->attr.wakeup_events; in perf_output_sample()
6438 perf_callchain(struct perf_event *event, struct pt_regs *regs) in perf_callchain() argument
6440 bool kernel = !event->attr.exclude_callchain_kernel; in perf_callchain()
6441 bool user = !event->attr.exclude_callchain_user; in perf_callchain()
6443 bool crosstask = event->ctx->task && event->ctx->task != current; in perf_callchain()
6444 const u32 max_stack = event->attr.sample_max_stack; in perf_callchain()
6457 struct perf_event *event, in perf_prepare_sample() argument
6460 u64 sample_type = event->attr.sample_type; in perf_prepare_sample()
6463 header->size = sizeof(*header) + event->header_size; in perf_prepare_sample()
6468 __perf_event_header__init_id(header, data, event); in perf_prepare_sample()
6477 data->callchain = perf_callchain(event, regs); in perf_prepare_sample()
6527 u64 mask = event->attr.sample_regs_user; in perf_prepare_sample()
6541 u16 stack_size = event->attr.sample_stack_user; in perf_prepare_sample()
6566 u64 mask = event->attr.sample_regs_intr; in perf_prepare_sample()
6579 __perf_event_output(struct perf_event *event, in __perf_event_output() argument
6592 perf_prepare_sample(&header, data, event, regs); in __perf_event_output()
6594 if (output_begin(&handle, event, header.size)) in __perf_event_output()
6597 perf_output_sample(&handle, &header, data, event); in __perf_event_output()
6606 perf_event_output_forward(struct perf_event *event, in perf_event_output_forward() argument
6610 __perf_event_output(event, data, regs, perf_output_begin_forward); in perf_event_output_forward()
6614 perf_event_output_backward(struct perf_event *event, in perf_event_output_backward() argument
6618 __perf_event_output(event, data, regs, perf_output_begin_backward); in perf_event_output_backward()
6622 perf_event_output(struct perf_event *event, in perf_event_output() argument
6626 __perf_event_output(event, data, regs, perf_output_begin); in perf_event_output()
6641 perf_event_read_event(struct perf_event *event, in perf_event_read_event() argument
6650 .size = sizeof(read_event) + event->read_size, in perf_event_read_event()
6652 .pid = perf_event_pid(event, task), in perf_event_read_event()
6653 .tid = perf_event_tid(event, task), in perf_event_read_event()
6657 perf_event_header__init_id(&read_event.header, &sample, event); in perf_event_read_event()
6658 ret = perf_output_begin(&handle, event, read_event.header.size); in perf_event_read_event()
6663 perf_output_read(&handle, event); in perf_event_read_event()
6664 perf_event__output_id_sample(event, &handle, &sample); in perf_event_read_event()
6669 typedef void (perf_iterate_f)(struct perf_event *event, void *data);
6676 struct perf_event *event; in perf_iterate_ctx() local
6678 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { in perf_iterate_ctx()
6680 if (event->state < PERF_EVENT_STATE_INACTIVE) in perf_iterate_ctx()
6682 if (!event_filter_match(event)) in perf_iterate_ctx()
6686 output(event, data); in perf_iterate_ctx()
6693 struct perf_event *event; in perf_iterate_sb_cpu() local
6695 list_for_each_entry_rcu(event, &pel->list, sb_list) { in perf_iterate_sb_cpu()
6698 * if we observe event->ctx, both event and ctx will be in perf_iterate_sb_cpu()
6701 if (!smp_load_acquire(&event->ctx)) in perf_iterate_sb_cpu()
6704 if (event->state < PERF_EVENT_STATE_INACTIVE) in perf_iterate_sb_cpu()
6706 if (!event_filter_match(event)) in perf_iterate_sb_cpu()
6708 output(event, data); in perf_iterate_sb_cpu()
6716 * your event, otherwise it might not get delivered.
6754 static void perf_event_addr_filters_exec(struct perf_event *event, void *data) in perf_event_addr_filters_exec() argument
6756 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in perf_event_addr_filters_exec()
6761 if (!has_addr_filter(event)) in perf_event_addr_filters_exec()
6767 event->addr_filter_ranges[count].start = 0; in perf_event_addr_filters_exec()
6768 event->addr_filter_ranges[count].size = 0; in perf_event_addr_filters_exec()
6776 event->addr_filters_gen++; in perf_event_addr_filters_exec()
6780 perf_event_stop(event, 1); in perf_event_addr_filters_exec()
6807 static void __perf_event_output_stop(struct perf_event *event, void *data) in __perf_event_output_stop() argument
6809 struct perf_event *parent = event->parent; in __perf_event_output_stop()
6813 .event = event, in __perf_event_output_stop()
6816 if (!has_aux(event)) in __perf_event_output_stop()
6820 parent = event; in __perf_event_output_stop()
6826 * We are using event::rb to determine if the event should be stopped, in __perf_event_output_stop()
6828 * which will make us skip the event that actually needs to be stopped. in __perf_event_output_stop()
6829 * So ring_buffer_attach() has to stop an aux event before re-assigning in __perf_event_output_stop()
6838 struct perf_event *event = info; in __perf_pmu_output_stop() local
6839 struct pmu *pmu = event->ctx->pmu; in __perf_pmu_output_stop()
6842 .rb = event->rb, in __perf_pmu_output_stop()
6855 static void perf_pmu_output_stop(struct perf_event *event) in perf_pmu_output_stop() argument
6862 list_for_each_entry_rcu(iter, &event->rb->event_list, rb_entry) { in perf_pmu_output_stop()
6866 * sufficient to stop the event itself if it's active, since in perf_pmu_output_stop()
6876 err = cpu_function_call(cpu, __perf_pmu_output_stop, event); in perf_pmu_output_stop()
6906 static int perf_event_task_match(struct perf_event *event) in perf_event_task_match() argument
6908 return event->attr.comm || event->attr.mmap || in perf_event_task_match()
6909 event->attr.mmap2 || event->attr.mmap_data || in perf_event_task_match()
6910 event->attr.task; in perf_event_task_match()
6913 static void perf_event_task_output(struct perf_event *event, in perf_event_task_output() argument
6922 if (!perf_event_task_match(event)) in perf_event_task_output()
6925 perf_event_header__init_id(&task_event->event_id.header, &sample, event); in perf_event_task_output()
6927 ret = perf_output_begin(&handle, event, in perf_event_task_output()
6932 task_event->event_id.pid = perf_event_pid(event, task); in perf_event_task_output()
6933 task_event->event_id.tid = perf_event_tid(event, task); in perf_event_task_output()
6936 task_event->event_id.ppid = perf_event_pid(event, in perf_event_task_output()
6938 task_event->event_id.ptid = perf_event_pid(event, in perf_event_task_output()
6941 task_event->event_id.ppid = perf_event_pid(event, current); in perf_event_task_output()
6942 task_event->event_id.ptid = perf_event_tid(event, current); in perf_event_task_output()
6945 task_event->event_id.time = perf_event_clock(event); in perf_event_task_output()
6949 perf_event__output_id_sample(event, &handle, &sample); in perf_event_task_output()
7012 static int perf_event_comm_match(struct perf_event *event) in perf_event_comm_match() argument
7014 return event->attr.comm; in perf_event_comm_match()
7017 static void perf_event_comm_output(struct perf_event *event, in perf_event_comm_output() argument
7026 if (!perf_event_comm_match(event)) in perf_event_comm_output()
7029 perf_event_header__init_id(&comm_event->event_id.header, &sample, event); in perf_event_comm_output()
7030 ret = perf_output_begin(&handle, event, in perf_event_comm_output()
7036 comm_event->event_id.pid = perf_event_pid(event, comm_event->task); in perf_event_comm_output()
7037 comm_event->event_id.tid = perf_event_tid(event, comm_event->task); in perf_event_comm_output()
7043 perf_event__output_id_sample(event, &handle, &sample); in perf_event_comm_output()
7111 static int perf_event_namespaces_match(struct perf_event *event) in perf_event_namespaces_match() argument
7113 return event->attr.namespaces; in perf_event_namespaces_match()
7116 static void perf_event_namespaces_output(struct perf_event *event, in perf_event_namespaces_output() argument
7125 if (!perf_event_namespaces_match(event)) in perf_event_namespaces_output()
7129 &sample, event); in perf_event_namespaces_output()
7130 ret = perf_output_begin(&handle, event, in perf_event_namespaces_output()
7135 namespaces_event->event_id.pid = perf_event_pid(event, in perf_event_namespaces_output()
7137 namespaces_event->event_id.tid = perf_event_tid(event, in perf_event_namespaces_output()
7142 perf_event__output_id_sample(event, &handle, &sample); in perf_event_namespaces_output()
7249 static int perf_event_mmap_match(struct perf_event *event, in perf_event_mmap_match() argument
7256 return (!executable && event->attr.mmap_data) || in perf_event_mmap_match()
7257 (executable && (event->attr.mmap || event->attr.mmap2)); in perf_event_mmap_match()
7260 static void perf_event_mmap_output(struct perf_event *event, in perf_event_mmap_output() argument
7270 if (!perf_event_mmap_match(event, data)) in perf_event_mmap_output()
7273 if (event->attr.mmap2) { in perf_event_mmap_output()
7283 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event); in perf_event_mmap_output()
7284 ret = perf_output_begin(&handle, event, in perf_event_mmap_output()
7289 mmap_event->event_id.pid = perf_event_pid(event, current); in perf_event_mmap_output()
7290 mmap_event->event_id.tid = perf_event_tid(event, current); in perf_event_mmap_output()
7294 if (event->attr.mmap2) { in perf_event_mmap_output()
7306 perf_event__output_id_sample(event, &handle, &sample); in perf_event_mmap_output()
7479 static void __perf_addr_filters_adjust(struct perf_event *event, void *data) in __perf_addr_filters_adjust() argument
7481 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in __perf_addr_filters_adjust()
7487 if (!has_addr_filter(event)) in __perf_addr_filters_adjust()
7496 &event->addr_filter_ranges[count])) in __perf_addr_filters_adjust()
7503 event->addr_filters_gen++; in __perf_addr_filters_adjust()
7507 perf_event_stop(event, 1); in __perf_addr_filters_adjust()
7571 void perf_event_aux_event(struct perf_event *event, unsigned long head, in perf_event_aux_event() argument
7593 perf_event_header__init_id(&rec.header, &sample, event); in perf_event_aux_event()
7594 ret = perf_output_begin(&handle, event, rec.header.size); in perf_event_aux_event()
7600 perf_event__output_id_sample(event, &handle, &sample); in perf_event_aux_event()
7608 void perf_log_lost_samples(struct perf_event *event, u64 lost) in perf_log_lost_samples() argument
7626 perf_event_header__init_id(&lost_samples_event.header, &sample, event); in perf_log_lost_samples()
7628 ret = perf_output_begin(&handle, event, in perf_log_lost_samples()
7634 perf_event__output_id_sample(event, &handle, &sample); in perf_log_lost_samples()
7653 static int perf_event_switch_match(struct perf_event *event) in perf_event_switch_match() argument
7655 return event->attr.context_switch; in perf_event_switch_match()
7658 static void perf_event_switch_output(struct perf_event *event, void *data) in perf_event_switch_output() argument
7665 if (!perf_event_switch_match(event)) in perf_event_switch_output()
7669 if (event->ctx->task) { in perf_event_switch_output()
7676 perf_event_pid(event, se->next_prev); in perf_event_switch_output()
7678 perf_event_tid(event, se->next_prev); in perf_event_switch_output()
7681 perf_event_header__init_id(&se->event_id.header, &sample, event); in perf_event_switch_output()
7683 ret = perf_output_begin(&handle, event, se->event_id.header.size); in perf_event_switch_output()
7687 if (event->ctx->task) in perf_event_switch_output()
7692 perf_event__output_id_sample(event, &handle, &sample); in perf_event_switch_output()
7731 static void perf_log_throttle(struct perf_event *event, int enable) in perf_log_throttle() argument
7748 .time = perf_event_clock(event), in perf_log_throttle()
7749 .id = primary_event_id(event), in perf_log_throttle()
7750 .stream_id = event->id, in perf_log_throttle()
7756 perf_event_header__init_id(&throttle_event.header, &sample, event); in perf_log_throttle()
7758 ret = perf_output_begin(&handle, event, in perf_log_throttle()
7764 perf_event__output_id_sample(event, &handle, &sample); in perf_log_throttle()
7768 void perf_event_itrace_started(struct perf_event *event) in perf_event_itrace_started() argument
7770 event->attach_state |= PERF_ATTACH_ITRACE; in perf_event_itrace_started()
7773 static void perf_log_itrace_start(struct perf_event *event) in perf_log_itrace_start() argument
7784 if (event->parent) in perf_log_itrace_start()
7785 event = event->parent; in perf_log_itrace_start()
7787 if (!(event->pmu->capabilities & PERF_PMU_CAP_ITRACE) || in perf_log_itrace_start()
7788 event->attach_state & PERF_ATTACH_ITRACE) in perf_log_itrace_start()
7794 rec.pid = perf_event_pid(event, current); in perf_log_itrace_start()
7795 rec.tid = perf_event_tid(event, current); in perf_log_itrace_start()
7797 perf_event_header__init_id(&rec.header, &sample, event); in perf_log_itrace_start()
7798 ret = perf_output_begin(&handle, event, rec.header.size); in perf_log_itrace_start()
7804 perf_event__output_id_sample(event, &handle, &sample); in perf_log_itrace_start()
7810 __perf_event_account_interrupt(struct perf_event *event, int throttle) in __perf_event_account_interrupt() argument
7812 struct hw_perf_event *hwc = &event->hw; in __perf_event_account_interrupt()
7827 perf_log_throttle(event, 0); in __perf_event_account_interrupt()
7832 if (event->attr.freq) { in __perf_event_account_interrupt()
7839 perf_adjust_period(event, delta, hwc->last_period, true); in __perf_event_account_interrupt()
7845 int perf_event_account_interrupt(struct perf_event *event) in perf_event_account_interrupt() argument
7847 return __perf_event_account_interrupt(event, 1); in perf_event_account_interrupt()
7851 * Generic event overflow handling, sampling.
7854 static int __perf_event_overflow(struct perf_event *event, in __perf_event_overflow() argument
7858 int events = atomic_read(&event->event_limit); in __perf_event_overflow()
7865 if (unlikely(!is_sampling_event(event))) in __perf_event_overflow()
7868 ret = __perf_event_account_interrupt(event, throttle); in __perf_event_overflow()
7875 event->pending_kill = POLL_IN; in __perf_event_overflow()
7876 if (events && atomic_dec_and_test(&event->event_limit)) { in __perf_event_overflow()
7878 event->pending_kill = POLL_HUP; in __perf_event_overflow()
7880 perf_event_disable_inatomic(event); in __perf_event_overflow()
7883 READ_ONCE(event->overflow_handler)(event, data, regs); in __perf_event_overflow() local
7885 if (*perf_event_fasync(event) && event->pending_kill) { in __perf_event_overflow()
7886 event->pending_wakeup = 1; in __perf_event_overflow()
7887 irq_work_queue(&event->pending); in __perf_event_overflow()
7893 int perf_event_overflow(struct perf_event *event, in perf_event_overflow() argument
7897 return __perf_event_overflow(event, 1, data, regs); in perf_event_overflow()
7901 * Generic software event infrastructure
7916 * We directly increment event->count and keep a second value in
7917 * event->hw.period_left to count intervals. This period event
7922 u64 perf_swevent_set_period(struct perf_event *event) in perf_swevent_set_period() argument
7924 struct hw_perf_event *hwc = &event->hw; in perf_swevent_set_period()
7945 static void perf_swevent_overflow(struct perf_event *event, u64 overflow, in perf_swevent_overflow() argument
7949 struct hw_perf_event *hwc = &event->hw; in perf_swevent_overflow()
7953 overflow = perf_swevent_set_period(event); in perf_swevent_overflow()
7959 if (__perf_event_overflow(event, throttle, in perf_swevent_overflow()
7971 static void perf_swevent_event(struct perf_event *event, u64 nr, in perf_swevent_event() argument
7975 struct hw_perf_event *hwc = &event->hw; in perf_swevent_event()
7977 local64_add(nr, &event->count); in perf_swevent_event()
7982 if (!is_sampling_event(event)) in perf_swevent_event()
7985 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) { in perf_swevent_event()
7987 return perf_swevent_overflow(event, 1, data, regs); in perf_swevent_event()
7989 data->period = event->hw.last_period; in perf_swevent_event()
7991 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) in perf_swevent_event()
7992 return perf_swevent_overflow(event, 1, data, regs); in perf_swevent_event()
7997 perf_swevent_overflow(event, 0, data, regs); in perf_swevent_event()
8000 static int perf_exclude_event(struct perf_event *event, in perf_exclude_event() argument
8003 if (event->hw.state & PERF_HES_STOPPED) in perf_exclude_event()
8007 if (event->attr.exclude_user && user_mode(regs)) in perf_exclude_event()
8010 if (event->attr.exclude_kernel && !user_mode(regs)) in perf_exclude_event()
8017 static int perf_swevent_match(struct perf_event *event, in perf_swevent_match() argument
8023 if (event->attr.type != type) in perf_swevent_match()
8026 if (event->attr.config != event_id) in perf_swevent_match()
8029 if (perf_exclude_event(event, regs)) in perf_swevent_match()
8063 /* For the event head insertion and removal in the hlist */
8065 find_swevent_head(struct swevent_htable *swhash, struct perf_event *event) in find_swevent_head() argument
8068 u32 event_id = event->attr.config; in find_swevent_head()
8069 u64 type = event->attr.type; in find_swevent_head()
8072 * Event scheduling is always serialized against hlist allocation in find_swevent_head()
8077 lockdep_is_held(&event->ctx->lock)); in find_swevent_head()
8090 struct perf_event *event; in do_perf_sw_event() local
8098 hlist_for_each_entry_rcu(event, head, hlist_entry) { in do_perf_sw_event()
8099 if (perf_swevent_match(event, type, event_id, data, regs)) in do_perf_sw_event()
8100 perf_swevent_event(event, nr, data, regs); in do_perf_sw_event()
8150 static void perf_swevent_read(struct perf_event *event) in perf_swevent_read() argument
8154 static int perf_swevent_add(struct perf_event *event, int flags) in perf_swevent_add() argument
8157 struct hw_perf_event *hwc = &event->hw; in perf_swevent_add()
8160 if (is_sampling_event(event)) { in perf_swevent_add()
8162 perf_swevent_set_period(event); in perf_swevent_add()
8167 head = find_swevent_head(swhash, event); in perf_swevent_add()
8171 hlist_add_head_rcu(&event->hlist_entry, head); in perf_swevent_add()
8172 perf_event_update_userpage(event); in perf_swevent_add()
8177 static void perf_swevent_del(struct perf_event *event, int flags) in perf_swevent_del() argument
8179 hlist_del_rcu(&event->hlist_entry); in perf_swevent_del()
8182 static void perf_swevent_start(struct perf_event *event, int flags) in perf_swevent_start() argument
8184 event->hw.state = 0; in perf_swevent_start()
8187 static void perf_swevent_stop(struct perf_event *event, int flags) in perf_swevent_stop() argument
8189 event->hw.state = PERF_HES_STOPPED; in perf_swevent_stop()
8281 static void sw_perf_event_destroy(struct perf_event *event) in sw_perf_event_destroy() argument
8283 u64 event_id = event->attr.config; in sw_perf_event_destroy()
8285 WARN_ON(event->parent); in sw_perf_event_destroy()
8291 static int perf_swevent_init(struct perf_event *event) in perf_swevent_init() argument
8293 u64 event_id = event->attr.config; in perf_swevent_init()
8295 if (event->attr.type != PERF_TYPE_SOFTWARE) in perf_swevent_init()
8301 if (has_branch_stack(event)) in perf_swevent_init()
8316 if (!event->parent) { in perf_swevent_init()
8324 event->destroy = sw_perf_event_destroy; in perf_swevent_init()
8345 static int perf_tp_filter_match(struct perf_event *event, in perf_tp_filter_match() argument
8351 if (event->parent) in perf_tp_filter_match()
8352 event = event->parent; in perf_tp_filter_match()
8354 if (likely(!event->filter) || filter_match_preds(event->filter, record)) in perf_tp_filter_match()
8359 static int perf_tp_event_match(struct perf_event *event, in perf_tp_event_match() argument
8363 if (event->hw.state & PERF_HES_STOPPED) in perf_tp_event_match()
8368 if (event->attr.exclude_kernel) in perf_tp_event_match()
8371 if (!perf_tp_filter_match(event, data)) in perf_tp_event_match()
8389 perf_tp_event(call->event.type, count, raw_data, size, regs, head, in perf_trace_run_bpf_submit()
8399 struct perf_event *event; in perf_tp_event() local
8413 hlist_for_each_entry_rcu(event, head, hlist_entry) { in perf_tp_event()
8414 if (perf_tp_event_match(event, &data, regs)) in perf_tp_event()
8415 perf_swevent_event(event, count, &data, regs); in perf_tp_event()
8420 * deliver this event there too. in perf_tp_event()
8431 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { in perf_tp_event()
8432 if (event->cpu != smp_processor_id()) in perf_tp_event()
8434 if (event->attr.type != PERF_TYPE_TRACEPOINT) in perf_tp_event()
8436 if (event->attr.config != entry->type) in perf_tp_event()
8438 if (perf_tp_event_match(event, &data, regs)) in perf_tp_event()
8439 perf_swevent_event(event, count, &data, regs); in perf_tp_event()
8449 static void tp_perf_event_destroy(struct perf_event *event) in tp_perf_event_destroy() argument
8451 perf_trace_destroy(event); in tp_perf_event_destroy()
8454 static int perf_tp_event_init(struct perf_event *event) in perf_tp_event_init() argument
8458 if (event->attr.type != PERF_TYPE_TRACEPOINT) in perf_tp_event_init()
8464 if (has_branch_stack(event)) in perf_tp_event_init()
8467 err = perf_trace_init(event); in perf_tp_event_init()
8471 event->destroy = tp_perf_event_destroy; in perf_tp_event_init()
8518 static int perf_kprobe_event_init(struct perf_event *event);
8530 static int perf_kprobe_event_init(struct perf_event *event) in perf_kprobe_event_init() argument
8535 if (event->attr.type != perf_kprobe.type) in perf_kprobe_event_init()
8544 if (has_branch_stack(event)) in perf_kprobe_event_init()
8547 is_retprobe = event->attr.config & PERF_PROBE_CONFIG_IS_RETPROBE; in perf_kprobe_event_init()
8548 err = perf_kprobe_init(event, is_retprobe); in perf_kprobe_event_init()
8552 event->destroy = perf_kprobe_destroy; in perf_kprobe_event_init()
8559 static int perf_uprobe_event_init(struct perf_event *event);
8571 static int perf_uprobe_event_init(struct perf_event *event) in perf_uprobe_event_init() argument
8576 if (event->attr.type != perf_uprobe.type) in perf_uprobe_event_init()
8585 if (has_branch_stack(event)) in perf_uprobe_event_init()
8588 is_retprobe = event->attr.config & PERF_PROBE_CONFIG_IS_RETPROBE; in perf_uprobe_event_init()
8589 err = perf_uprobe_init(event, is_retprobe); in perf_uprobe_event_init()
8593 event->destroy = perf_uprobe_destroy; in perf_uprobe_event_init()
8610 static void perf_event_free_filter(struct perf_event *event) in perf_event_free_filter() argument
8612 ftrace_profile_free_filter(event); in perf_event_free_filter()
8616 static void bpf_overflow_handler(struct perf_event *event, in bpf_overflow_handler() argument
8622 .event = event, in bpf_overflow_handler()
8631 ret = BPF_PROG_RUN(event->prog, &ctx); in bpf_overflow_handler()
8639 event->orig_overflow_handler(event, data, regs); in bpf_overflow_handler()
8642 static int perf_event_set_bpf_handler(struct perf_event *event, u32 prog_fd) in perf_event_set_bpf_handler() argument
8646 if (event->overflow_handler_context) in perf_event_set_bpf_handler()
8650 if (event->prog) in perf_event_set_bpf_handler()
8657 event->prog = prog; in perf_event_set_bpf_handler()
8658 event->orig_overflow_handler = READ_ONCE(event->overflow_handler); in perf_event_set_bpf_handler()
8659 WRITE_ONCE(event->overflow_handler, bpf_overflow_handler); in perf_event_set_bpf_handler()
8663 static void perf_event_free_bpf_handler(struct perf_event *event) in perf_event_free_bpf_handler() argument
8665 struct bpf_prog *prog = event->prog; in perf_event_free_bpf_handler()
8670 WRITE_ONCE(event->overflow_handler, event->orig_overflow_handler); in perf_event_free_bpf_handler()
8671 event->prog = NULL; in perf_event_free_bpf_handler()
8675 static int perf_event_set_bpf_handler(struct perf_event *event, u32 prog_fd) in perf_event_set_bpf_handler() argument
8679 static void perf_event_free_bpf_handler(struct perf_event *event) in perf_event_free_bpf_handler() argument
8685 * returns true if the event is a tracepoint, or a kprobe/upprobe created
8688 static inline bool perf_event_is_tracing(struct perf_event *event) in perf_event_is_tracing() argument
8690 if (event->pmu == &perf_tracepoint) in perf_event_is_tracing()
8693 if (event->pmu == &perf_kprobe) in perf_event_is_tracing()
8697 if (event->pmu == &perf_uprobe) in perf_event_is_tracing()
8703 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd) in perf_event_set_bpf_prog() argument
8709 if (!perf_event_is_tracing(event)) in perf_event_set_bpf_prog()
8710 return perf_event_set_bpf_handler(event, prog_fd); in perf_event_set_bpf_prog()
8712 is_kprobe = event->tp_event->flags & TRACE_EVENT_FL_UKPROBE; in perf_event_set_bpf_prog()
8713 is_tracepoint = event->tp_event->flags & TRACE_EVENT_FL_TRACEPOINT; in perf_event_set_bpf_prog()
8714 is_syscall_tp = is_syscall_trace_event(event->tp_event); in perf_event_set_bpf_prog()
8733 !(event->tp_event->flags & TRACE_EVENT_FL_KPROBE)) { in perf_event_set_bpf_prog()
8739 int off = trace_event_get_offsets(event->tp_event); in perf_event_set_bpf_prog()
8747 ret = perf_event_attach_bpf_prog(event, prog); in perf_event_set_bpf_prog()
8753 static void perf_event_free_bpf_prog(struct perf_event *event) in perf_event_free_bpf_prog() argument
8755 if (!perf_event_is_tracing(event)) { in perf_event_free_bpf_prog()
8756 perf_event_free_bpf_handler(event); in perf_event_free_bpf_prog()
8759 perf_event_detach_bpf_prog(event); in perf_event_free_bpf_prog()
8768 static void perf_event_free_filter(struct perf_event *event) in perf_event_free_filter() argument
8772 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd) in perf_event_set_bpf_prog() argument
8777 static void perf_event_free_bpf_prog(struct perf_event *event) in perf_event_free_bpf_prog() argument
8799 perf_addr_filter_new(struct perf_event *event, struct list_head *filters) in perf_addr_filter_new() argument
8801 int node = cpu_to_node(event->cpu == -1 ? 0 : event->cpu); in perf_addr_filter_new()
8828 static void perf_addr_filters_splice(struct perf_event *event, in perf_addr_filters_splice() argument
8834 if (!has_addr_filter(event)) in perf_addr_filters_splice()
8838 if (event->parent) in perf_addr_filters_splice()
8841 raw_spin_lock_irqsave(&event->addr_filters.lock, flags); in perf_addr_filters_splice()
8843 list_splice_init(&event->addr_filters.list, &list); in perf_addr_filters_splice()
8845 list_splice(head, &event->addr_filters.list); in perf_addr_filters_splice()
8847 raw_spin_unlock_irqrestore(&event->addr_filters.lock, flags); in perf_addr_filters_splice()
8873 * Update event's address range filters based on the
8876 static void perf_event_addr_filters_apply(struct perf_event *event) in perf_event_addr_filters_apply() argument
8878 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in perf_event_addr_filters_apply()
8879 struct task_struct *task = READ_ONCE(event->ctx->task); in perf_event_addr_filters_apply()
8886 * We may observe TASK_TOMBSTONE, which means that the event tear-down in perf_event_addr_filters_apply()
8893 mm = get_task_mm(event->ctx->task); in perf_event_addr_filters_apply()
8907 event->addr_filter_ranges[count].start = 0; in perf_event_addr_filters_apply()
8908 event->addr_filter_ranges[count].size = 0; in perf_event_addr_filters_apply()
8910 perf_addr_filter_apply(filter, mm, &event->addr_filter_ranges[count]); in perf_event_addr_filters_apply()
8912 event->addr_filter_ranges[count].start = filter->offset; in perf_event_addr_filters_apply()
8913 event->addr_filter_ranges[count].size = filter->size; in perf_event_addr_filters_apply()
8919 event->addr_filters_gen++; in perf_event_addr_filters_apply()
8929 perf_event_stop(event, 1); in perf_event_addr_filters_apply()
8983 perf_event_parse_addr_filter(struct perf_event *event, char *fstr, in perf_event_parse_addr_filter() argument
9010 filter = perf_addr_filter_new(event, filters); in perf_event_parse_addr_filter()
9068 * Make sure that it doesn't contradict itself or the event's in perf_event_parse_addr_filter()
9073 if (kernel && event->attr.exclude_kernel) in perf_event_parse_addr_filter()
9097 if (!event->ctx->task) in perf_event_parse_addr_filter()
9112 event->addr_filters.nr_file_filters++; in perf_event_parse_addr_filter()
9138 perf_event_set_addr_filter(struct perf_event *event, char *filter_str) in perf_event_set_addr_filter() argument
9147 lockdep_assert_held(&event->ctx->mutex); in perf_event_set_addr_filter()
9149 if (WARN_ON_ONCE(event->parent)) in perf_event_set_addr_filter()
9152 ret = perf_event_parse_addr_filter(event, filter_str, &filters); in perf_event_set_addr_filter()
9156 ret = event->pmu->addr_filters_validate(&filters); in perf_event_set_addr_filter()
9161 perf_addr_filters_splice(event, &filters); in perf_event_set_addr_filter()
9164 perf_event_for_each_child(event, perf_event_addr_filters_apply); in perf_event_set_addr_filter()
9172 event->addr_filters.nr_file_filters = 0; in perf_event_set_addr_filter()
9177 static int perf_event_set_filter(struct perf_event *event, void __user *arg) in perf_event_set_filter() argument
9187 if (perf_event_is_tracing(event)) { in perf_event_set_filter()
9188 struct perf_event_context *ctx = event->ctx; in perf_event_set_filter()
9198 * This can result in event getting moved to a different ctx, in perf_event_set_filter()
9202 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str); in perf_event_set_filter()
9206 if (has_addr_filter(event)) in perf_event_set_filter()
9207 ret = perf_event_set_addr_filter(event, filter_str); in perf_event_set_filter()
9222 struct perf_event *event; in perf_swevent_hrtimer() local
9225 event = container_of(hrtimer, struct perf_event, hw.hrtimer); in perf_swevent_hrtimer()
9227 if (event->state != PERF_EVENT_STATE_ACTIVE) in perf_swevent_hrtimer()
9230 event->pmu->read(event); in perf_swevent_hrtimer()
9232 perf_sample_data_init(&data, 0, event->hw.last_period); in perf_swevent_hrtimer()
9235 if (regs && !perf_exclude_event(event, regs)) { in perf_swevent_hrtimer()
9236 if (!(event->attr.exclude_idle && is_idle_task(current))) in perf_swevent_hrtimer()
9237 if (__perf_event_overflow(event, 1, &data, regs)) in perf_swevent_hrtimer()
9241 period = max_t(u64, 10000, event->hw.sample_period); in perf_swevent_hrtimer()
9247 static void perf_swevent_start_hrtimer(struct perf_event *event) in perf_swevent_start_hrtimer() argument
9249 struct hw_perf_event *hwc = &event->hw; in perf_swevent_start_hrtimer()
9252 if (!is_sampling_event(event)) in perf_swevent_start_hrtimer()
9268 static void perf_swevent_cancel_hrtimer(struct perf_event *event) in perf_swevent_cancel_hrtimer() argument
9270 struct hw_perf_event *hwc = &event->hw; in perf_swevent_cancel_hrtimer()
9272 if (is_sampling_event(event)) { in perf_swevent_cancel_hrtimer()
9280 static void perf_swevent_init_hrtimer(struct perf_event *event) in perf_swevent_init_hrtimer() argument
9282 struct hw_perf_event *hwc = &event->hw; in perf_swevent_init_hrtimer()
9284 if (!is_sampling_event(event)) in perf_swevent_init_hrtimer()
9294 if (event->attr.freq) { in perf_swevent_init_hrtimer()
9295 long freq = event->attr.sample_freq; in perf_swevent_init_hrtimer()
9297 event->attr.sample_period = NSEC_PER_SEC / freq; in perf_swevent_init_hrtimer()
9298 hwc->sample_period = event->attr.sample_period; in perf_swevent_init_hrtimer()
9301 event->attr.freq = 0; in perf_swevent_init_hrtimer()
9306 * Software event: cpu wall time clock
9309 static void cpu_clock_event_update(struct perf_event *event) in cpu_clock_event_update() argument
9315 prev = local64_xchg(&event->hw.prev_count, now); in cpu_clock_event_update()
9316 local64_add(now - prev, &event->count); in cpu_clock_event_update()
9319 static void cpu_clock_event_start(struct perf_event *event, int flags) in cpu_clock_event_start() argument
9321 local64_set(&event->hw.prev_count, local_clock()); in cpu_clock_event_start()
9322 perf_swevent_start_hrtimer(event); in cpu_clock_event_start()
9325 static void cpu_clock_event_stop(struct perf_event *event, int flags) in cpu_clock_event_stop() argument
9327 perf_swevent_cancel_hrtimer(event); in cpu_clock_event_stop()
9328 cpu_clock_event_update(event); in cpu_clock_event_stop()
9331 static int cpu_clock_event_add(struct perf_event *event, int flags) in cpu_clock_event_add() argument
9334 cpu_clock_event_start(event, flags); in cpu_clock_event_add()
9335 perf_event_update_userpage(event); in cpu_clock_event_add()
9340 static void cpu_clock_event_del(struct perf_event *event, int flags) in cpu_clock_event_del() argument
9342 cpu_clock_event_stop(event, flags); in cpu_clock_event_del()
9345 static void cpu_clock_event_read(struct perf_event *event) in cpu_clock_event_read() argument
9347 cpu_clock_event_update(event); in cpu_clock_event_read()
9350 static int cpu_clock_event_init(struct perf_event *event) in cpu_clock_event_init() argument
9352 if (event->attr.type != PERF_TYPE_SOFTWARE) in cpu_clock_event_init()
9355 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK) in cpu_clock_event_init()
9361 if (has_branch_stack(event)) in cpu_clock_event_init()
9364 perf_swevent_init_hrtimer(event); in cpu_clock_event_init()
9383 * Software event: task time clock
9386 static void task_clock_event_update(struct perf_event *event, u64 now) in task_clock_event_update() argument
9391 prev = local64_xchg(&event->hw.prev_count, now); in task_clock_event_update()
9393 local64_add(delta, &event->count); in task_clock_event_update()
9396 static void task_clock_event_start(struct perf_event *event, int flags) in task_clock_event_start() argument
9398 local64_set(&event->hw.prev_count, event->ctx->time); in task_clock_event_start()
9399 perf_swevent_start_hrtimer(event); in task_clock_event_start()
9402 static void task_clock_event_stop(struct perf_event *event, int flags) in task_clock_event_stop() argument
9404 perf_swevent_cancel_hrtimer(event); in task_clock_event_stop()
9405 task_clock_event_update(event, event->ctx->time); in task_clock_event_stop()
9408 static int task_clock_event_add(struct perf_event *event, int flags) in task_clock_event_add() argument
9411 task_clock_event_start(event, flags); in task_clock_event_add()
9412 perf_event_update_userpage(event); in task_clock_event_add()
9417 static void task_clock_event_del(struct perf_event *event, int flags) in task_clock_event_del() argument
9419 task_clock_event_stop(event, PERF_EF_UPDATE); in task_clock_event_del()
9422 static void task_clock_event_read(struct perf_event *event) in task_clock_event_read() argument
9425 u64 delta = now - event->ctx->timestamp; in task_clock_event_read()
9426 u64 time = event->ctx->time + delta; in task_clock_event_read()
9428 task_clock_event_update(event, time); in task_clock_event_read()
9431 static int task_clock_event_init(struct perf_event *event) in task_clock_event_init() argument
9433 if (event->attr.type != PERF_TYPE_SOFTWARE) in task_clock_event_init()
9436 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK) in task_clock_event_init()
9442 if (has_branch_stack(event)) in task_clock_event_init()
9445 perf_swevent_init_hrtimer(event); in task_clock_event_init()
9476 static int perf_event_nop_int(struct perf_event *event, u64 value) in perf_event_nop_int() argument
9518 static int perf_event_idx_default(struct perf_event *event) in perf_event_idx_default() argument
9835 static int perf_try_init_event(struct pmu *pmu, struct perf_event *event) in perf_try_init_event() argument
9846 * if this is a sibling event, acquire the ctx->mutex to protect in perf_try_init_event()
9849 if (event->group_leader != event && pmu->task_ctx_nr != perf_sw_context) { in perf_try_init_event()
9854 ctx = perf_event_ctx_lock_nested(event->group_leader, in perf_try_init_event()
9859 event->pmu = pmu; in perf_try_init_event()
9860 ret = pmu->event_init(event); in perf_try_init_event()
9863 perf_event_ctx_unlock(event->group_leader, ctx); in perf_try_init_event()
9871 static struct pmu *perf_init_event(struct perf_event *event) in perf_init_event() argument
9880 if (event->parent && event->parent->pmu) { in perf_init_event()
9881 pmu = event->parent->pmu; in perf_init_event()
9882 ret = perf_try_init_event(pmu, event); in perf_init_event()
9888 pmu = idr_find(&pmu_idr, event->attr.type); in perf_init_event()
9891 ret = perf_try_init_event(pmu, event); in perf_init_event()
9898 ret = perf_try_init_event(pmu, event); in perf_init_event()
9914 static void attach_sb_event(struct perf_event *event) in attach_sb_event() argument
9916 struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu); in attach_sb_event()
9919 list_add_rcu(&event->sb_list, &pel->list); in attach_sb_event()
9930 static void account_pmu_sb_event(struct perf_event *event) in account_pmu_sb_event() argument
9932 if (is_sb_event(event)) in account_pmu_sb_event()
9933 attach_sb_event(event); in account_pmu_sb_event()
9936 static void account_event_cpu(struct perf_event *event, int cpu) in account_event_cpu() argument
9938 if (event->parent) in account_event_cpu()
9941 if (is_cgroup_event(event)) in account_event_cpu()
9966 static void account_event(struct perf_event *event) in account_event() argument
9970 if (event->parent) in account_event()
9973 if (event->attach_state & PERF_ATTACH_TASK) in account_event()
9975 if (event->attr.mmap || event->attr.mmap_data) in account_event()
9977 if (event->attr.comm) in account_event()
9979 if (event->attr.namespaces) in account_event()
9981 if (event->attr.task) in account_event()
9983 if (event->attr.freq) in account_event()
9985 if (event->attr.context_switch) { in account_event()
9989 if (has_branch_stack(event)) in account_event()
9991 if (is_cgroup_event(event)) in account_event()
10022 account_event_cpu(event, event->cpu); in account_event()
10024 account_pmu_sb_event(event); in account_event()
10028 * Allocate and initialize an event structure
10039 struct perf_event *event; in perf_event_alloc() local
10048 event = kzalloc(sizeof(*event), GFP_KERNEL); in perf_event_alloc()
10049 if (!event) in perf_event_alloc()
10057 group_leader = event; in perf_event_alloc()
10059 mutex_init(&event->child_mutex); in perf_event_alloc()
10060 INIT_LIST_HEAD(&event->child_list); in perf_event_alloc()
10062 INIT_LIST_HEAD(&event->event_entry); in perf_event_alloc()
10063 INIT_LIST_HEAD(&event->sibling_list); in perf_event_alloc()
10064 INIT_LIST_HEAD(&event->active_list); in perf_event_alloc()
10065 init_event_group(event); in perf_event_alloc()
10066 INIT_LIST_HEAD(&event->rb_entry); in perf_event_alloc()
10067 INIT_LIST_HEAD(&event->active_entry); in perf_event_alloc()
10068 INIT_LIST_HEAD(&event->addr_filters.list); in perf_event_alloc()
10069 INIT_HLIST_NODE(&event->hlist_entry); in perf_event_alloc()
10072 init_waitqueue_head(&event->waitq); in perf_event_alloc()
10073 event->pending_disable = -1; in perf_event_alloc()
10074 init_irq_work(&event->pending, perf_pending_event); in perf_event_alloc()
10076 mutex_init(&event->mmap_mutex); in perf_event_alloc()
10077 raw_spin_lock_init(&event->addr_filters.lock); in perf_event_alloc()
10079 atomic_long_set(&event->refcount, 1); in perf_event_alloc()
10080 event->cpu = cpu; in perf_event_alloc()
10081 event->attr = *attr; in perf_event_alloc()
10082 event->group_leader = group_leader; in perf_event_alloc()
10083 event->pmu = NULL; in perf_event_alloc()
10084 event->oncpu = -1; in perf_event_alloc()
10086 event->parent = parent_event; in perf_event_alloc()
10088 event->ns = get_pid_ns(task_active_pid_ns(current)); in perf_event_alloc()
10089 event->id = atomic64_inc_return(&perf_event_id); in perf_event_alloc()
10091 event->state = PERF_EVENT_STATE_INACTIVE; in perf_event_alloc()
10094 event->attach_state = PERF_ATTACH_TASK; in perf_event_alloc()
10101 event->hw.target = task; in perf_event_alloc()
10104 event->clock = &local_clock; in perf_event_alloc()
10106 event->clock = parent_event->clock; in perf_event_alloc()
10119 event->prog = prog; in perf_event_alloc()
10120 event->orig_overflow_handler = in perf_event_alloc()
10127 event->overflow_handler = overflow_handler; in perf_event_alloc()
10128 event->overflow_handler_context = context; in perf_event_alloc()
10129 } else if (is_write_backward(event)){ in perf_event_alloc()
10130 event->overflow_handler = perf_event_output_backward; in perf_event_alloc()
10131 event->overflow_handler_context = NULL; in perf_event_alloc()
10133 event->overflow_handler = perf_event_output_forward; in perf_event_alloc()
10134 event->overflow_handler_context = NULL; in perf_event_alloc()
10137 perf_event__state_init(event); in perf_event_alloc()
10141 hwc = &event->hw; in perf_event_alloc()
10156 if (!has_branch_stack(event)) in perf_event_alloc()
10157 event->attr.branch_sample_type = 0; in perf_event_alloc()
10160 err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader); in perf_event_alloc()
10165 pmu = perf_init_event(event); in perf_event_alloc()
10171 err = exclusive_event_init(event); in perf_event_alloc()
10175 if (has_addr_filter(event)) { in perf_event_alloc()
10176 event->addr_filter_ranges = kcalloc(pmu->nr_addr_filters, in perf_event_alloc()
10179 if (!event->addr_filter_ranges) { in perf_event_alloc()
10188 if (event->parent) { in perf_event_alloc()
10189 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in perf_event_alloc()
10192 memcpy(event->addr_filter_ranges, in perf_event_alloc()
10193 event->parent->addr_filter_ranges, in perf_event_alloc()
10199 event->addr_filters_gen = 1; in perf_event_alloc()
10202 if (!event->parent) { in perf_event_alloc()
10203 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) { in perf_event_alloc()
10211 account_event(event); in perf_event_alloc()
10213 return event; in perf_event_alloc()
10216 kfree(event->addr_filter_ranges); in perf_event_alloc()
10219 exclusive_event_destroy(event); in perf_event_alloc()
10222 if (event->destroy) in perf_event_alloc()
10223 event->destroy(event); in perf_event_alloc()
10226 if (is_cgroup_event(event)) in perf_event_alloc()
10227 perf_detach_cgroup(event); in perf_event_alloc()
10228 if (event->ns) in perf_event_alloc()
10229 put_pid_ns(event->ns); in perf_event_alloc()
10230 if (event->hw.target) in perf_event_alloc()
10231 put_task_struct(event->hw.target); in perf_event_alloc()
10232 kfree(event); in perf_event_alloc()
10373 perf_event_set_output(struct perf_event *event, struct perf_event *output_event) in perf_event_set_output() argument
10382 if (event == output_event) in perf_event_set_output()
10388 if (output_event->cpu != event->cpu) in perf_event_set_output()
10394 if (output_event->cpu == -1 && output_event->ctx != event->ctx) in perf_event_set_output()
10400 if (output_event->clock != event->clock) in perf_event_set_output()
10407 if (is_write_backward(output_event) != is_write_backward(event)) in perf_event_set_output()
10413 if (has_aux(event) && has_aux(output_event) && in perf_event_set_output()
10414 event->pmu != output_event->pmu) in perf_event_set_output()
10418 mutex_lock(&event->mmap_mutex); in perf_event_set_output()
10420 if (atomic_read(&event->mmap_count)) in perf_event_set_output()
10430 ring_buffer_attach(event, rb); in perf_event_set_output()
10434 mutex_unlock(&event->mmap_mutex); in perf_event_set_output()
10449 static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id) in perf_event_set_clock() argument
10455 event->clock = &ktime_get_mono_fast_ns; in perf_event_set_clock()
10460 event->clock = &ktime_get_raw_fast_ns; in perf_event_set_clock()
10465 event->clock = &ktime_get_real_ns; in perf_event_set_clock()
10469 event->clock = &ktime_get_boot_ns; in perf_event_set_clock()
10473 event->clock = &ktime_get_tai_ns; in perf_event_set_clock()
10480 if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI)) in perf_event_set_clock()
10518 * sys_perf_event_open - open a performance event, associate it to a task/cpu
10523 * @group_fd: group leader event fd
10530 struct perf_event *event, *sibling; in SYSCALL_DEFINE5() local
10624 * perf_install_in_context() call for this new event to in SYSCALL_DEFINE5()
10636 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, in SYSCALL_DEFINE5()
10638 if (IS_ERR(event)) { in SYSCALL_DEFINE5()
10639 err = PTR_ERR(event); in SYSCALL_DEFINE5()
10643 if (is_sampling_event(event)) { in SYSCALL_DEFINE5()
10644 if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) { in SYSCALL_DEFINE5()
10654 pmu = event->pmu; in SYSCALL_DEFINE5()
10657 err = perf_event_set_clock(event, attr.clockid); in SYSCALL_DEFINE5()
10663 event->event_caps |= PERF_EV_CAP_SOFTWARE; in SYSCALL_DEFINE5()
10666 if (is_software_event(event) && in SYSCALL_DEFINE5()
10669 * If the event is a sw event, but the group_leader in SYSCALL_DEFINE5()
10677 } else if (!is_software_event(event) && in SYSCALL_DEFINE5()
10682 * try to add a hardware event, move the whole group to in SYSCALL_DEFINE5()
10692 ctx = find_get_context(pmu, task, event); in SYSCALL_DEFINE5()
10699 * Look up the group leader (we will attach this event to it): in SYSCALL_DEFINE5()
10712 if (group_leader->clock != event->clock) in SYSCALL_DEFINE5()
10720 if (group_leader->cpu != event->cpu) in SYSCALL_DEFINE5()
10746 err = perf_event_set_output(event, output_event); in SYSCALL_DEFINE5()
10751 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, in SYSCALL_DEFINE5()
10774 * if this new event wound up on the same ctx, if so in SYSCALL_DEFINE5()
10806 if (!perf_event_validate_size(event)) { in SYSCALL_DEFINE5()
10813 * Check if the @cpu we're creating an event for is online. in SYSCALL_DEFINE5()
10830 * because we need to serialize with concurrent event creation. in SYSCALL_DEFINE5()
10832 if (!exclusive_event_installable(event, ctx)) { in SYSCALL_DEFINE5()
10881 * event. What we want here is event in the initial in SYSCALL_DEFINE5()
10892 * perf_install_in_context() which is the point the event is active and in SYSCALL_DEFINE5()
10895 perf_event__header_size(event); in SYSCALL_DEFINE5()
10896 perf_event__id_header_size(event); in SYSCALL_DEFINE5()
10898 event->owner = current; in SYSCALL_DEFINE5()
10900 perf_install_in_context(ctx, event, event->cpu); in SYSCALL_DEFINE5()
10913 list_add_tail(&event->owner_entry, &current->perf_event_list); in SYSCALL_DEFINE5()
10918 * new event on the sibling_list. This ensures destruction in SYSCALL_DEFINE5()
10938 * and that will take care of freeing the event. in SYSCALL_DEFINE5()
10941 free_event(event); in SYSCALL_DEFINE5()
10969 struct perf_event *event; in perf_event_create_kernel_counter() local
10976 event = perf_event_alloc(attr, cpu, task, NULL, NULL, in perf_event_create_kernel_counter()
10978 if (IS_ERR(event)) { in perf_event_create_kernel_counter()
10979 err = PTR_ERR(event); in perf_event_create_kernel_counter()
10984 event->owner = TASK_TOMBSTONE; in perf_event_create_kernel_counter()
10986 ctx = find_get_context(event->pmu, task, event); in perf_event_create_kernel_counter()
11001 * Check if the @cpu we're creating an event for is online. in perf_event_create_kernel_counter()
11014 if (!exclusive_event_installable(event, ctx)) { in perf_event_create_kernel_counter()
11019 perf_install_in_context(ctx, event, event->cpu); in perf_event_create_kernel_counter()
11023 return event; in perf_event_create_kernel_counter()
11030 free_event(event); in perf_event_create_kernel_counter()
11040 struct perf_event *event, *tmp; in perf_pmu_migrate_context() local
11051 list_for_each_entry_safe(event, tmp, &src_ctx->event_list, in perf_pmu_migrate_context()
11053 perf_remove_from_context(event, 0); in perf_pmu_migrate_context()
11054 unaccount_event_cpu(event, src_cpu); in perf_pmu_migrate_context()
11056 list_add(&event->migrate_entry, &events); in perf_pmu_migrate_context()
11072 list_for_each_entry_safe(event, tmp, &events, migrate_entry) { in perf_pmu_migrate_context()
11073 if (event->group_leader == event) in perf_pmu_migrate_context()
11076 list_del(&event->migrate_entry); in perf_pmu_migrate_context()
11077 if (event->state >= PERF_EVENT_STATE_OFF) in perf_pmu_migrate_context()
11078 event->state = PERF_EVENT_STATE_INACTIVE; in perf_pmu_migrate_context()
11079 account_event_cpu(event, dst_cpu); in perf_pmu_migrate_context()
11080 perf_install_in_context(dst_ctx, event, dst_cpu); in perf_pmu_migrate_context()
11088 list_for_each_entry_safe(event, tmp, &events, migrate_entry) { in perf_pmu_migrate_context()
11089 list_del(&event->migrate_entry); in perf_pmu_migrate_context()
11090 if (event->state >= PERF_EVENT_STATE_OFF) in perf_pmu_migrate_context()
11091 event->state = PERF_EVENT_STATE_INACTIVE; in perf_pmu_migrate_context()
11092 account_event_cpu(event, dst_cpu); in perf_pmu_migrate_context()
11093 perf_install_in_context(dst_ctx, event, dst_cpu); in perf_pmu_migrate_context()
11164 * Remove this event from the parent's list in perf_event_exit_event()
11241 * When a child task exits, feed back event values to parent events.
11248 struct perf_event *event, *tmp; in perf_event_exit_task() local
11252 list_for_each_entry_safe(event, tmp, &child->perf_event_list, in perf_event_exit_task()
11254 list_del_init(&event->owner_entry); in perf_event_exit_task()
11261 smp_store_release(&event->owner, NULL); in perf_event_exit_task()
11277 static void perf_free_event(struct perf_event *event, in perf_free_event() argument
11280 struct perf_event *parent = event->parent; in perf_free_event()
11286 list_del_init(&event->child_list); in perf_free_event()
11292 perf_group_detach(event); in perf_free_event()
11293 list_del_event(event, ctx); in perf_free_event()
11295 free_event(event); in perf_free_event()
11308 struct perf_event *event, *tmp; in perf_event_free_task() local
11329 list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) in perf_event_free_task()
11330 perf_free_event(event, ctx); in perf_event_free_task()
11343 * _free_event()'s put_task_struct(event->hw.target) will be a in perf_event_free_task()
11385 const struct perf_event_attr *perf_event_attrs(struct perf_event *event) in perf_event_attrs() argument
11387 if (!event) in perf_event_attrs()
11390 return &event->attr; in perf_event_attrs()
11394 * Inherit an event from parent task to child task.
11461 * Make the child state follow the state of the parent event, in inherit_event()
11499 * Link this into the parent event's child list in inherit_event()
11508 * Inherits an event group.
11546 * Creates the child task context and tries to inherit the event-group.
11549 * inherited_all set when we 'fail' to inherit an orphaned event; this is
11557 inherit_task_group(struct perf_event *event, struct task_struct *parent, in inherit_task_group() argument
11565 if (!event->attr.inherit) { in inherit_task_group()
11585 ret = inherit_group(event, parent, parent_ctx, in inherit_task_group()
11601 struct perf_event *event; in perf_event_init_context() local
11635 perf_event_groups_for_each(event, &parent_ctx->pinned_groups) { in perf_event_init_context()
11636 ret = inherit_task_group(event, parent, parent_ctx, in perf_event_init_context()
11651 perf_event_groups_for_each(event, &parent_ctx->flexible_groups) { in perf_event_init_context()
11652 ret = inherit_task_group(event, parent, parent_ctx, in perf_event_init_context()
11756 struct perf_event *event; in __perf_event_exit_context() local
11760 list_for_each_entry(event, &ctx->event_list, event_entry) in __perf_event_exit_context()
11761 __perf_remove_from_context(event, cpuctx, ctx, (void *)DETACH_GROUP); in __perf_event_exit_context()