/kernel/trace/ |
D | trace_events_filter_test.h | 11 TP_PROTO(int a, int b, int c, int d, int e, int f, int g, int h), 13 TP_ARGS(a, b, c, d, e, f, g, h), 22 __field(int, g) 33 __entry->g = g; 39 __entry->e, __entry->f, __entry->g, __entry->h)
|
D | ftrace.c | 3447 static int ftrace_match(char *str, struct ftrace_glob *g) in ftrace_match() argument 3452 switch (g->type) { in ftrace_match() 3454 if (strcmp(str, g->search) == 0) in ftrace_match() 3458 if (strncmp(str, g->search, g->len) == 0) in ftrace_match() 3462 if (strstr(str, g->search)) in ftrace_match() 3467 if (slen >= g->len && in ftrace_match() 3468 memcmp(str + slen - g->len, g->search, g->len) == 0) in ftrace_match() 5698 struct task_struct *g, *t; in alloc_retstack_tasklist() local 5713 do_each_thread(g, t) { in alloc_retstack_tasklist() 5726 } while_each_thread(g, t); in alloc_retstack_tasklist()
|
D | Kconfig | 260 ftrace interface, e.g.: 406 Tracing also is possible using the ftrace interface, e.g.: 551 However, it should be safe to use on e.g. unused portion of VRAM.
|
D | trace_events_filter.c | 2334 .e = ve, .f = vf, .g = vg, .h = vh }, \
|
/kernel/ |
D | hung_task.c | 137 static bool rcu_lock_break(struct task_struct *g, struct task_struct *t) in rcu_lock_break() argument 141 get_task_struct(g); in rcu_lock_break() 146 can_cont = pid_alive(g) && pid_alive(t); in rcu_lock_break() 148 put_task_struct(g); in rcu_lock_break() 162 struct task_struct *g, *t; in check_hung_uninterruptible_tasks() local 172 for_each_process_thread(g, t) { in check_hung_uninterruptible_tasks() 176 if (!rcu_lock_break(g, t)) in check_hung_uninterruptible_tasks()
|
D | sys.c | 181 struct task_struct *g, *p; in SYSCALL_DEFINE3() local 228 do_each_thread(g, p) { in SYSCALL_DEFINE3() 231 } while_each_thread(g, p); in SYSCALL_DEFINE3() 251 struct task_struct *g, *p; in SYSCALL_DEFINE2() local 296 do_each_thread(g, p) { in SYSCALL_DEFINE2() 302 } while_each_thread(g, p); in SYSCALL_DEFINE2() 971 struct task_struct *g; in SYSCALL_DEFINE2() local 974 g = pid_task(pgrp, PIDTYPE_PGID); in SYSCALL_DEFINE2() 975 if (!g || task_session(g) != task_session(group_leader)) in SYSCALL_DEFINE2()
|
D | cpu.c | 308 struct task_struct *g, *p; in check_for_tasks() local 311 for_each_process_thread(g, p) { in check_for_tasks()
|
D | exit.c | 304 struct task_struct *c, *g, *p = current; in mm_update_next_owner() local 343 for_each_process(g) { in mm_update_next_owner() 344 if (g->flags & PF_KTHREAD) in mm_update_next_owner() 346 for_each_thread(g, c) { in mm_update_next_owner()
|
D | cgroup.c | 1835 struct task_struct *p, *g; in cgroup_enable_task_cg_lists() local 1852 do_each_thread(g, p) { in cgroup_enable_task_cg_lists() 1877 } while_each_thread(g, p); in cgroup_enable_task_cg_lists()
|
/kernel/power/ |
D | process.c | 31 struct task_struct *g, *p; in try_to_freeze_tasks() local 54 for_each_process_thread(g, p) { in try_to_freeze_tasks() 108 for_each_process_thread(g, p) { in try_to_freeze_tasks() 196 struct task_struct *g, *p; in thaw_processes() local 215 for_each_process_thread(g, p) { in thaw_processes() 234 struct task_struct *g, *p; in thaw_kernel_threads() local 242 for_each_process_thread(g, p) { in thaw_kernel_threads()
|
D | main.c | 609 static struct attribute * g[] = { variable 640 .attrs = g,
|
D | hibernate.c | 1065 static struct attribute * g[] = { variable 1075 .attrs = g,
|
D | Kconfig | 9 suspend-to-RAM state (e.g. the ACPI S3 state). 256 notification of APM "events" (e.g. battery status change).
|
/kernel/debug/kdb/ |
D | kdb_private.h | 234 #define kdb_do_each_thread(g, p) do_each_thread(g, p) argument 235 #define kdb_while_each_thread(g, p) while_each_thread(g, p) argument
|
D | kdb_bt.c | 118 struct task_struct *g, *p; in kdb_bt() local 131 kdb_do_each_thread(g, p) { in kdb_bt() 138 } kdb_while_each_thread(g, p); in kdb_bt()
|
D | kdb_main.c | 2294 const struct task_struct *p, *g; in kdb_ps_suppressed() local 2300 kdb_do_each_thread(g, p) { in kdb_ps_suppressed() 2303 } kdb_while_each_thread(g, p); in kdb_ps_suppressed() 2351 struct task_struct *g, *p; in kdb_ps() local 2370 kdb_do_each_thread(g, p) { in kdb_ps() 2375 } kdb_while_each_thread(g, p); in kdb_ps()
|
/kernel/gcov/ |
D | Kconfig | 9 This option enables gcov-based code profiling (e.g. for code coverage 19 For a single file (e.g. main.o):
|
/kernel/debug/ |
D | gdbstub.c | 699 struct task_struct *g; in gdb_cmd_query() local 727 do_each_thread(g, p) { in gdb_cmd_query() 737 } while_each_thread(g, p); in gdb_cmd_query()
|
/kernel/rcu/ |
D | update.c | 648 struct task_struct *g, *t; in rcu_tasks_kthread() local 707 for_each_process_thread(g, t) { in rcu_tasks_kthread()
|
/kernel/sched/ |
D | debug.c | 147 struct task_struct *g, *p; in print_rq() local 157 for_each_process_thread(g, p) { in print_rq()
|
D | core.c | 5094 struct task_struct *g, *p; in show_state_filter() local 5104 for_each_process_thread(g, p) { in show_state_filter() 8024 struct task_struct *g, *p; in normalize_rt_tasks() local 8030 for_each_process_thread(g, p) { in normalize_rt_tasks() 8250 struct task_struct *g, *p; in tg_has_rt_tasks() local 8258 for_each_process_thread(g, p) { in tg_has_rt_tasks()
|
/kernel/locking/ |
D | lockdep.c | 4176 struct task_struct *g, *p; in debug_show_all_locks() local 4209 do_each_thread(g, p) { in debug_show_all_locks() 4222 } while_each_thread(g, p); in debug_show_all_locks()
|