Lines Matching +full:protect +full:- +full:exec
4 * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
12 * For licencing details see kernel-base/COPYING
21 * Kernel-internal data types and definitions:
98 * -1ULL means invalid/unknown.
108 * The hw_idx index is between -1 (unknown) and max depth,
128 int idx; /* index in shared_regs->regs[] */
132 * struct hw_perf_event - performance event hardware details:
154 /* for tp_event->class */
199 #define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */
276 * struct pmu - generic performance monitoring unit
289 * various common per-pmu feature flags
303 * Fully disable/enable this PMU, can be used to protect from the PMI
313 * -ENOENT -- @event is not for this PMU
315 * -ENODEV -- @event is for this PMU but PMU not present
316 * -EBUSY -- @event is for this PMU but PMU temporarily unavailable
317 * -EINVAL -- @event is for this PMU but @event is not valid
318 * -EOPNOTSUPP -- @event is for this PMU, @event is valid, but not supported
319 * -EACCES -- @event is for this PMU, @event is valid, but no privileges
321 * 0 -- @event is for this PMU and valid
335 * Flags for ->add()/->del()/ ->start()/->stop(). There are
344 * transaction, see the ->*_txn() methods.
353 * ->add() called without PERF_EF_START should result in the same state
354 * as ->add() followed by ->stop().
356 * ->del() must always PERF_EF_UPDATE stop an event. If it calls
357 * ->stop() that must deal with already being stopped without
367 * returns !0. ->start() will be used to continue.
372 * is on -- will be called from NMI context with the PMU generates
375 * ->stop() with PERF_EF_UPDATE will read the counter and update
376 * period/count values like ->read() would.
378 * ->start() with PERF_EF_RELOAD will reprogram the counter
379 * value, must be preceded by a ->stop() with PERF_EF_UPDATE.
397 * Start the transaction, after this ->add() doesn't need to
404 * If ->start_txn() disabled the ->add() schedulability test
405 * then ->commit_txn() is required to perform one. On success
407 * open until ->cancel_txn() is called.
413 * Will cancel the transaction, assumes ->del() is called
414 * for each successful ->add() during the transaction.
422 * if no implementation is provided it will default to: event->hw.idx + 1.
427 * context-switches callback
438 * PMU specific parts of task perf event context (i.e. ctx->task_ctx_data)
448 * Set up pmu-private data structures for an AUX area
455 * Free pmu-private AUX data structures
461 * state, so that preempting ->start()/->stop() callbacks does
475 * supplied filters are valid, -errno otherwise.
485 * translate hw-agnostic filters into hardware configuration in
488 * Runs as a part of filter sync sequence that is done in ->start()
502 * or non-zero for "match".
508 * Filter events for PMU-specific reasons.
525 * struct perf_addr_filter - address range filter definition
527 * @path: object file's path for file-based filters
532 * This is a hardware-agnostic filter configuration as specified by the user.
543 * struct perf_addr_filters_head - container for address range filters
547 * @nr_file_filters: number of file-based filters
564 * enum perf_event_state - the states of an event:
567 PERF_EVENT_STATE_DEAD = -4,
568 PERF_EVENT_STATE_EXIT = -3,
569 PERF_EVENT_STATE_ERROR = -2,
570 PERF_EVENT_STATE_OFF = -1,
620 if ((event)->group_leader == (event)) \
621 list_for_each_entry((sibling), &(event)->sibling_list, sibling_list)
624 * struct perf_event - performance event kernel representation:
630 * modifications require ctx->lock
636 * Locked for modification by both ctx->mutex and ctx->lock; holding
674 * been scheduled in, if this is a per-task event)
698 * Protect attach/detach and child_list:
733 /* vma address array for file-based filders */
780 * struct perf_event_context - event context structure
787 * Protect the states of the events in the list,
792 * Protect the list of events. Locking either mutex or lock
849 * struct perf_event_cpu_context - per cpu event context structure
872 * Per-CPU storage for iterators used in visit_groups_merge. The default
903 * This is a per-cpu dynamically allocated data structure.
926 ctx ? lockdep_is_held(&ctx->lock) in perf_cgroup_from_task()
1039 data->addr = addr; in perf_sample_data_init()
1040 data->raw = NULL; in perf_sample_data_init()
1041 data->br_stack = NULL; in perf_sample_data_init()
1042 data->period = period; in perf_sample_data_init()
1043 data->weight = 0; in perf_sample_data_init()
1044 data->data_src.val = PERF_MEM_NA; in perf_sample_data_init()
1045 data->txn = 0; in perf_sample_data_init()
1074 if (likely(event->overflow_handler == perf_event_output_forward)) in is_default_overflow_handler()
1076 if (unlikely(event->overflow_handler == perf_event_output_backward)) in is_default_overflow_handler()
1095 struct perf_event_attr *attr = &event->attr; in event_has_any_exclude_flag()
1097 return attr->exclude_idle || attr->exclude_user || in event_has_any_exclude_flag()
1098 attr->exclude_kernel || attr->exclude_hv || in event_has_any_exclude_flag()
1099 attr->exclude_guest || attr->exclude_host; in event_has_any_exclude_flag()
1104 return event->attr.sample_period != 0; in is_sampling_event()
1112 return event->event_caps & PERF_EV_CAP_SOFTWARE; in is_software_event()
1120 return event->ctx->pmu->task_ctx_nr == perf_sw_context; in in_software_context()
1125 return pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE; in is_exclusive_pmu()
1138 * When generating a perf sample in-line, instead of from an interrupt /
1140 * like: SW_CONTEXT_SWITCHES, SW_MIGRATIONS and the tie-in with tracepoints.
1143 * - ip for PERF_SAMPLE_IP
1144 * - cs for user_mode() tests
1145 * - sp for PERF_SAMPLE_CALLCHAIN
1146 * - eflags for MISC bits and CALLCHAIN (see: perf_hw_regs())
1194 task->sched_migrated = 1; in perf_event_task_migrate()
1203 if (perf_sw_migrate_enabled() && task->sched_migrated) { in perf_event_task_sched_in()
1208 task->sched_migrated = 0; in perf_event_task_sched_in()
1233 * Callbacks are RCU-protected and must be READ_ONCE to avoid reloading in perf_get_guest_cbs()
1236 * non-NULL perf_guest_cbs is visible to readers, and to prevent a in perf_get_guest_cbs()
1245 extern void perf_event_comm(struct task_struct *tsk, bool exec);
1271 if (ctx->contexts < sysctl_perf_event_max_contexts_per_stack) { in perf_callchain_store_context()
1272 struct perf_callchain_entry *entry = ctx->entry; in perf_callchain_store_context()
1273 entry->ip[entry->nr++] = ip; in perf_callchain_store_context()
1274 ++ctx->contexts; in perf_callchain_store_context()
1277 ctx->contexts_maxed = true; in perf_callchain_store_context()
1278 return -1; /* no more room, stop walking the stack */ in perf_callchain_store_context()
1284 if (ctx->nr < ctx->max_stack && !ctx->contexts_maxed) { in perf_callchain_store()
1285 struct perf_callchain_entry *entry = ctx->entry; in perf_callchain_store()
1286 entry->ip[entry->nr++] = ip; in perf_callchain_store()
1287 ++ctx->nr; in perf_callchain_store()
1290 return -1; /* no more room, stop walking the stack */ in perf_callchain_store()
1318 return sysctl_perf_event_paranoid > -1; in perf_is_paranoid()
1324 return -EACCES; in perf_allow_kernel()
1332 return -EACCES; in perf_allow_cpu()
1339 if (sysctl_perf_event_paranoid > -1 && !perfmon_capable()) in perf_allow_tracepoint()
1340 return -EPERM; in perf_allow_tracepoint()
1363 return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK; in has_branch_stack()
1368 return event->attr.branch_sample_type != 0; in needs_branch_stack()
1373 return event->pmu->setup_aux; in has_aux()
1378 return !!event->attr.write_backward; in is_write_backward()
1383 return event->pmu->nr_addr_filters; in has_addr_filter()
1392 struct perf_addr_filters_head *ifh = &event->addr_filters; in perf_event_addr_filters()
1394 if (event->parent) in perf_event_addr_filters()
1395 ifh = &event->parent->addr_filters; in perf_event_addr_filters()
1442 unsigned long size) { return -EINVAL; } in perf_aux_output_skip()
1457 static inline struct file *perf_event_get(unsigned int fd) { return ERR_PTR(-EINVAL); } in perf_event_get()
1460 return ERR_PTR(-EINVAL); in perf_get_event()
1464 return ERR_PTR(-EINVAL); in perf_event_attrs()
1469 return -EINVAL; in perf_event_read_local()
1472 static inline int perf_event_task_disable(void) { return -EINVAL; } in perf_event_task_disable()
1473 static inline int perf_event_task_enable(void) { return -EINVAL; } in perf_event_task_enable()
1476 return -EINVAL; in perf_event_refresh()
1500 static inline void perf_event_comm(struct task_struct *tsk, bool exec) { } in perf_event_comm() argument
1509 static inline int perf_swevent_get_recursion_context(void) { return -1; } in perf_swevent_get_recursion_context()
1514 static inline int __perf_event_disable(void *info) { return -1; } in __perf_event_disable()
1519 return -EINVAL; in perf_event_period()
1535 return frag->pad < sizeof(u64); in perf_raw_frag_last()