/kernel/ |
D | auditsc.c | 914 static inline void audit_proctitle_free(struct audit_context *context) in audit_proctitle_free() argument 916 kfree(context->proctitle.value); in audit_proctitle_free() 917 context->proctitle.value = NULL; in audit_proctitle_free() 918 context->proctitle.len = 0; in audit_proctitle_free() 921 static inline void audit_free_module(struct audit_context *context) in audit_free_module() argument 923 if (context->type == AUDIT_KERN_MODULE) { in audit_free_module() 924 kfree(context->module.name); in audit_free_module() 925 context->module.name = NULL; in audit_free_module() 928 static inline void audit_free_names(struct audit_context *context) in audit_free_names() argument 932 list_for_each_entry_safe(n, next, &context->names_list, list) { in audit_free_names() [all …]
|
D | audit_tree.c | 525 static void audit_tree_log_remove_rule(struct audit_context *context, in audit_tree_log_remove_rule() argument 532 ab = audit_log_start(context, GFP_KERNEL, AUDIT_CONFIG_CHANGE); in audit_tree_log_remove_rule() 542 static void kill_rules(struct audit_context *context, struct audit_tree *tree) in kill_rules() argument 553 audit_tree_log_remove_rule(context, rule); in kill_rules() 972 void audit_kill_trees(struct audit_context *context) in audit_kill_trees() argument 974 struct list_head *list = &context->killed_trees; in audit_kill_trees() 983 kill_rules(context, victim); in audit_kill_trees()
|
D | audit.h | 108 } context; member 299 extern void audit_kill_trees(struct audit_context *context); 330 #define audit_kill_trees(context) BUG() argument
|
D | ptrace.c | 1194 tmp = mm->context.exec_fdpic_loadmap; in ptrace_request() 1197 tmp = mm->context.interp_fdpic_loadmap; in ptrace_request()
|
D | audit.c | 1086 static void audit_log_common_recv_msg(struct audit_context *context, in audit_log_common_recv_msg() argument 1097 *ab = audit_log_start(context, GFP_KERNEL, msg_type); in audit_log_common_recv_msg()
|
/kernel/time/ |
D | Kconfig | 52 # and not from the timer interrupt context 155 tickless cputime accounting. The former case relies on context 159 bool "Force user context tracking" 164 support the user context tracking subsystem. But there are also 169 user context tracking backend but doesn't yet fulfill all the 172 for user context tracking and the subsystems that rely on it: RCU 175 dynticks subsystem by forcing the user context tracking on all 179 architecture backend for the user context tracking.
|
/kernel/events/ |
D | hw_breakpoint.c | 743 void *context, in register_user_hw_breakpoint() argument 747 context); in register_user_hw_breakpoint() 844 void *context) in register_wide_hw_breakpoint() argument 857 triggered, context); in register_wide_hw_breakpoint()
|
D | core.c | 11872 void *context, int cgroup_fd) in perf_event_alloc() argument 11955 context = parent_event->overflow_handler_context; in perf_event_alloc() 11970 event->overflow_handler_context = context; in perf_event_alloc() 12806 void *context) in perf_event_create_kernel_counter() argument 12822 overflow_handler, context, -1); in perf_event_create_kernel_counter()
|
/kernel/trace/ |
D | trace_entries.h | 421 __field( int, context ) 427 __entry->context,
|
D | trace_osnoise.c | 330 int context; /* timer context */ member 585 entry->context = sample->context; in __trace_timerlat_sample() 1791 s.context = IRQ_CONTEXT; in timerlat_irq() 1911 s.context = THREAD_CONTEXT; in timerlat_main() 2518 s.context = THREAD_URET; in timerlat_fd_read() 2553 s.context = THREAD_CONTEXT; in timerlat_fd_read()
|
D | Kconfig | 151 # enabled by all tracers (context switch and event tracer) they select TRACING. 445 In the context of high-performance computing (HPC), the Operating 448 context of Linux, NMIs, IRQs, SoftIRQs, and any other system thread 491 timer latency observed at the hardirq context before the 500 stacktrace at the IRQ context, which helps to identify the code 517 bool "Trace process context switches and events" 812 generally used outside of that context, and is normally
|
D | trace_events_user.c | 1607 int context; in user_event_perf() local 1610 ®s, &context); in user_event_perf() 1624 perf_trace_buf_submit(perf_entry, size, context, in user_event_perf() 1631 perf_swevent_put_recursion_context(context); in user_event_perf()
|
D | trace_events_hist.c | 3394 struct snapshot_context *context = cond_data; in cond_snapshot_update() local 3403 track_val = get_track_val(track_data->hist_data, context->elt, in cond_snapshot_update() 3410 memcpy(track_data->key, context->key, track_data->key_len); in cond_snapshot_update() 3412 elt_data = context->elt->private_data; in cond_snapshot_update() 3430 struct snapshot_context context; in save_track_data_snapshot() local 3432 context.elt = elt; in save_track_data_snapshot() 3433 context.key = key; in save_track_data_snapshot() 3435 tracing_snapshot_cond(file->tr, &context); in save_track_data_snapshot()
|
D | trace_output.c | 1463 timerlat_lat_context[field->context], in trace_timerlat_print() 1480 field->context, in trace_timerlat_raw()
|
/kernel/debug/kdb/ |
D | kdb_cmds | 2 # These commands are executed in kdb_init() context, no SMP, no
|
/kernel/rcu/ |
D | Kconfig | 84 that uses only voluntary context switch (not preemption!), 100 that uses only context switch (including preemption) and 102 context switches on all online CPUs, including idle ones, 245 workloads will incur significant increases in context-switch
|
/kernel/irq/ |
D | Kconfig | 30 # Support for delayed migration from interrupt context
|