/kernel/ |
D | cpu.c | 57 enum cpuhp_state target; member 171 trace_cpuhp_enter(cpu, st->target, state, cb); in cpuhp_invoke_callback() 183 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); in cpuhp_invoke_callback() 195 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); in cpuhp_invoke_callback() 220 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); in cpuhp_invoke_callback() 453 cpuhp_set_state(struct cpuhp_cpu_state *st, enum cpuhp_state target) in cpuhp_set_state() argument 460 st->target = target; in cpuhp_set_state() 462 st->bringup = st->state < target; in cpuhp_set_state() 483 st->target = prev_state; in cpuhp_reset_state() 490 if (!st->single && st->state == st->target) in __cpuhp_kick_ap() [all …]
|
D | test_kprobes.c | 18 static u32 (*target)(u32 value); variable 66 ret = target(rand1); in test_kprobe() 125 ret = target(rand1); in test_kprobes() 206 ret = target(rand1); in test_kretprobe() 254 ret = target(rand1); in test_kretprobes() 275 target = kprobe_target; in init_test_probes()
|
D | capability.c | 122 struct task_struct *target; in cap_get_target_pid() local 126 target = find_task_by_vpid(pid); in cap_get_target_pid() 127 if (!target) in cap_get_target_pid() 130 ret = security_capget(target, pEp, pIp, pPp); in cap_get_target_pid()
|
D | jump_label.c | 71 jea->target = jeb->target - delta; in jump_label_swap() 75 jeb->target = tmp.target + delta; in jump_label_swap()
|
D | workqueue.c | 2653 struct work_struct *target, struct worker *worker) in insert_wq_barrier() argument 2667 init_completion_map(&barr->done, &target->lockdep_map); in insert_wq_barrier() 2678 unsigned long *bits = work_data_bits(target); in insert_wq_barrier() 2680 head = target->entry.next; in insert_wq_barrier()
|
D | module.c | 859 use->target = b; in add_module_usage() 894 struct module *i = use->target; in module_unload_free() 1726 sysfs_remove_link(use->target->holders_dir, mod->name); in del_usage_links() 1739 ret = sysfs_create_link(use->target->holders_dir, in add_usage_links()
|
D | signal.c | 1465 struct task_struct *target) in kill_as_cred_perm() argument 1467 const struct cred *pcred = __task_cred(target); in kill_as_cred_perm()
|
/kernel/gcov/ |
D | fs.c | 305 char *target; in link_target() local 316 target = kasprintf(GFP_KERNEL, "%s/%s.%s", dir, copy, ext); in link_target() 318 target = kasprintf(GFP_KERNEL, "%s.%s", copy, ext); in link_target() 321 return target; in link_target() 369 char *target; in add_links() local 379 target = get_link_target( in add_links() 382 if (!target) in add_links() 384 basename = kbasename(target); in add_links() 385 if (basename == target) in add_links() 388 parent, target); in add_links() [all …]
|
/kernel/trace/ |
D | trace_events_filter.c | 100 int target; member 121 t = prog[N].target; in update_preds() 122 s = prog[t].target; in update_preds() 124 prog[t].target = N; in update_preds() 125 prog[N].target = s; in update_preds() 474 prog[N].target = N-1; in predicate_parse() 554 prog[N].target = 1; /* TRUE */ in predicate_parse() 556 prog[N+1].target = 0; /* FALSE */ in predicate_parse() 557 prog[N-1].target = N; in predicate_parse() 562 int target = prog[i].target; in predicate_parse() local [all …]
|
D | trace_uprobe.c | 1209 if (event->hw.target->mm == mm) in __uprobe_perf_filter() 1220 return __uprobe_perf_filter(filter, event->hw.target->mm); in trace_uprobe_filter_event() 1229 if (event->hw.target) { in trace_uprobe_filter_remove() 1232 (event->hw.target->flags & PF_EXITING) || in trace_uprobe_filter_remove() 1250 if (event->hw.target) { in trace_uprobe_filter_add()
|
D | trace_kprobe.c | 1748 int (*target)(int, int, int, int, int, int); in kprobe_trace_self_tests_init() local 1760 target = kprobe_trace_selftest_target; in kprobe_trace_self_tests_init() 1811 ret = target(1, 2, 3, 4, 5, 6); in kprobe_trace_self_tests_init()
|
/kernel/events/ |
D | hw_breakpoint.c | 107 struct task_struct *tsk = bp->hw.target; in task_bp_pinned() 112 if (iter->hw.target == tsk && in task_bp_pinned() 144 if (!bp->hw.target) in fetch_bp_busy_slots() 201 if (!bp->hw.target) { in toggle_bp_slot()
|
D | core.c | 4043 event->hw.target != current) { in perf_event_read_local() 4576 if (event->hw.target) in _free_event() 4577 put_task_struct(event->hw.target); in _free_event() 10482 event->hw.target = get_task_struct(task); in perf_event_alloc() 10635 if (event->hw.target) in perf_event_alloc() 10636 put_task_struct(event->hw.target); in perf_event_alloc()
|
/kernel/locking/ |
D | lockdep.c | 1551 print_circular_bug_entry(struct lock_list *target, int depth) in print_circular_bug_entry() argument 1556 print_lock_name(target->class); in print_circular_bug_entry() 1558 print_lock_trace(target->trace, 6); in print_circular_bug_entry() 1567 struct lock_class *target = hlock_class(tgt); in print_circular_lock_scenario() local 1589 __print_lock_name(target); in print_circular_lock_scenario() 1597 __print_lock_name(target); in print_circular_lock_scenario() 1603 __print_lock_name(target); in print_circular_lock_scenario() 1649 struct lock_list *target, in print_circular_bug() argument 1665 depth = get_lock_depth(target); in print_circular_bug() 1667 print_circular_bug_header(target, depth, check_src, check_tgt); in print_circular_bug() [all …]
|
/kernel/sched/ |
D | fair.c | 5539 int target = nr_cpumask_bits; in wake_affine() local 5542 target = wake_affine_idle(this_cpu, prev_cpu, sync); in wake_affine() 5544 if (sched_feat(WA_WEIGHT) && target == nr_cpumask_bits) in wake_affine() 5545 target = wake_affine_weight(sd, p, this_cpu, prev_cpu, sync); in wake_affine() 5548 if (target == nr_cpumask_bits) in wake_affine() 5553 return target; in wake_affine() 5873 static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target) in select_idle_core() argument 5881 if (!test_idle_cores(target, false)) in select_idle_core() 5886 for_each_cpu_wrap(core, cpus, target) { in select_idle_core() 5902 set_idle_cores(target, 0); in select_idle_core() [all …]
|
D | deadline.c | 1628 int target = find_later_rq(p); in select_task_rq_dl() local 1630 if (target != -1 && in select_task_rq_dl() 1632 cpu_rq(target)->dl.earliest_dl.curr) || in select_task_rq_dl() 1633 (cpu_rq(target)->dl.dl_nr_running == 0))) in select_task_rq_dl() 1634 cpu = target; in select_task_rq_dl()
|
D | rt.c | 1475 int target = find_lowest_rq(p); in select_task_rq_rt() local 1481 if (target != -1 && in select_task_rq_rt() 1482 p->prio < cpu_rq(target)->rt.highest_prio.curr) in select_task_rq_rt() 1483 cpu = target; in select_task_rq_rt()
|
/kernel/dma/ |
D | swiotlb.c | 629 enum dma_sync_target target) in swiotlb_tbl_sync_single() argument 638 switch (target) { in swiotlb_tbl_sync_single()
|
/kernel/bpf/ |
D | inode.c | 352 const char *target) in bpf_symlink() argument 354 char *link = kstrdup(target, GFP_USER | __GFP_NOWARN); in bpf_symlink()
|
/kernel/debug/kdb/ |
D | kdb_main.c | 2055 kdb_printf("%s ", use->target->name); in kdb_lsmod()
|