/kernel/ |
D | cpu.c | 67 enum cpuhp_state target; member 191 trace_cpuhp_enter(cpu, st->target, state, cb); in cpuhp_invoke_callback() 201 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); in cpuhp_invoke_callback() 213 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); in cpuhp_invoke_callback() 238 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); in cpuhp_invoke_callback() 500 cpuhp_set_state(int cpu, struct cpuhp_cpu_state *st, enum cpuhp_state target) in cpuhp_set_state() argument 503 bool bringup = st->state < target; in cpuhp_set_state() 508 st->target = target; in cpuhp_set_state() 523 st->target = prev_state; in cpuhp_reset_state() 553 if (!st->single && st->state == st->target) in __cpuhp_kick_ap() [all …]
|
D | regset.c | 6 static int __regset_get(struct task_struct *target, in __regset_get() argument 23 res = regset->regset_get(target, regset, in __regset_get() 33 int regset_get(struct task_struct *target, in regset_get() argument 38 return __regset_get(target, regset, size, &data); in regset_get() 42 int regset_get_alloc(struct task_struct *target, in regset_get_alloc() argument 48 return __regset_get(target, regset, size, data); in regset_get_alloc() 61 int copy_regset_to_user(struct task_struct *target, in copy_regset_to_user() argument 71 ret = regset_get_alloc(target, regset, size, &buf); in copy_regset_to_user()
|
D | cfi.c | 11 unsigned long *target, u32 type) in report_cfi_failure() argument 13 if (target) in report_cfi_failure() 15 (void *)addr, (void *)*target, type); in report_cfi_failure()
|
D | capability.c | 122 struct task_struct *target; in cap_get_target_pid() local 126 target = find_task_by_vpid(pid); in cap_get_target_pid() 127 if (!target) in cap_get_target_pid() 130 ret = security_capget(target, pEp, pIp, pPp); in cap_get_target_pid()
|
D | exit.c | 1547 struct task_struct *target) in is_effectively_child() argument 1550 !ptrace ? target->real_parent : target->parent; in is_effectively_child() 1563 struct task_struct *target; in do_wait_pid() local 1567 target = pid_task(wo->wo_pid, PIDTYPE_TGID); in do_wait_pid() 1568 if (target && is_effectively_child(wo, ptrace, target)) { in do_wait_pid() 1569 retval = wait_consider_task(wo, ptrace, target); in do_wait_pid() 1575 target = pid_task(wo->wo_pid, PIDTYPE_PID); in do_wait_pid() 1576 if (target && target->ptrace && in do_wait_pid() 1577 is_effectively_child(wo, ptrace, target)) { in do_wait_pid() 1578 retval = wait_consider_task(wo, ptrace, target); in do_wait_pid()
|
D | jump_label.c | 71 jea->target = jeb->target - delta; in jump_label_swap() 75 jeb->target = tmp.target + delta; in jump_label_swap()
|
/kernel/gcov/ |
D | fs.c | 415 char *target; in link_target() local 426 target = kasprintf(GFP_KERNEL, "%s/%s.%s", dir, copy, ext); in link_target() 428 target = kasprintf(GFP_KERNEL, "%s.%s", copy, ext); in link_target() 431 return target; in link_target() 479 char *target; in add_links() local 489 target = get_link_target( in add_links() 492 if (!target) in add_links() 494 basename = kbasename(target); in add_links() 495 if (basename == target) in add_links() 498 parent, target); in add_links() [all …]
|
/kernel/trace/ |
D | trace_events_filter.c | 137 int target; member 158 t = prog[N].target; in update_preds() 159 s = prog[t].target; in update_preds() 161 prog[t].target = N; in update_preds() 162 prog[N].target = s; in update_preds() 511 prog[N].target = N-1; in predicate_parse() 591 prog[N].target = 1; /* TRUE */ in predicate_parse() 593 prog[N+1].target = 0; /* FALSE */ in predicate_parse() 594 prog[N-1].target = N; in predicate_parse() 599 int target = prog[i].target; in predicate_parse() local [all …]
|
D | trace_uprobe.c | 1210 if (event->hw.target->mm == mm) in __uprobe_perf_filter() 1221 return __uprobe_perf_filter(filter, event->hw.target->mm); in trace_uprobe_filter_event() 1230 if (event->hw.target) { in trace_uprobe_filter_remove() 1233 (event->hw.target->flags & PF_EXITING) || in trace_uprobe_filter_remove() 1251 if (event->hw.target) { in trace_uprobe_filter_add()
|
D | trace_kprobe.c | 1993 int (*target)(int, int, int, int, int, int); in kprobe_trace_self_tests_init() local 2003 target = kprobe_trace_selftest_target; in kprobe_trace_self_tests_init() 2052 ret = target(1, 2, 3, 4, 5, 6); in kprobe_trace_self_tests_init()
|
/kernel/events/ |
D | hw_breakpoint.c | 76 .key_offset = offsetof(struct hw_perf_event, target), 77 .key_len = sizeof_field(struct hw_perf_event, target), 110 struct task_struct *tsk = bp->hw.target; in get_task_bps_mutex() 334 head = rhltable_lookup(&task_bps_ht, &bp->hw.target, task_bps_ht_params); in task_bp_pinned() 376 if (bp->hw.target && bp->cpu < 0) { in max_bp_pinned_slots() 394 if (!bp->hw.target) in max_bp_pinned_slots() 416 if (!bp->hw.target) { in toggle_bp_slot()
|
/kernel/bpf/ |
D | map_iter.c | 92 .target = "bpf_map", 169 .target = "bpf_map_elem",
|
D | link_iter.c | 92 .target = "bpf_link",
|
D | prog_iter.c | 92 .target = "bpf_prog",
|
D | task_iter.c | 721 .target = "task", 742 .target = "task_file", 765 .target = "task_vma",
|
D | bpf_iter.c | 356 if (!strcmp(attach_fname + prefix_len, iter->reg_info->target)) { in bpf_iter_prog_supported() 447 iter_link->tinfo->reg_info->target); in bpf_iter_link_show_fdinfo() 468 target_name = iter_link->tinfo->reg_info->target; in bpf_iter_link_fill_link_info()
|
D | inode.c | 386 struct dentry *dentry, const char *target) in bpf_symlink() argument 388 char *link = kstrdup(target, GFP_USER | __GFP_NOWARN); in bpf_symlink()
|
D | cgroup_iter.c | 276 .target = "cgroup",
|
/kernel/locking/ |
D | lockdep.c | 1858 print_circular_bug_entry(struct lock_list *target, int depth) in print_circular_bug_entry() argument 1863 print_lock_name(target->class); in print_circular_bug_entry() 1865 print_lock_trace(target->trace, 6); in print_circular_bug_entry() 1874 struct lock_class *target = hlock_class(tgt); in print_circular_lock_scenario() local 1896 __print_lock_name(target); in print_circular_lock_scenario() 1904 __print_lock_name(target); in print_circular_lock_scenario() 1910 __print_lock_name(target); in print_circular_lock_scenario() 2013 struct lock_list *target, in print_circular_bug() argument 2029 depth = get_lock_depth(target); in print_circular_bug() 2031 print_circular_bug_header(target, depth, check_src, check_tgt); in print_circular_bug() [all …]
|
/kernel/sched/ |
D | fair.c | 6513 int target = nr_cpumask_bits; in wake_affine() local 6516 target = wake_affine_idle(this_cpu, prev_cpu, sync); in wake_affine() 6518 if (sched_feat(WA_WEIGHT) && target == nr_cpumask_bits) in wake_affine() 6519 target = wake_affine_weight(sd, p, this_cpu, prev_cpu, sync); in wake_affine() 6522 if (target != this_cpu) in wake_affine() 6527 return target; in wake_affine() 6744 static int select_idle_smt(struct task_struct *p, int target) in select_idle_smt() argument 6748 for_each_cpu_and(cpu, cpu_smt_mask(target), p->cpus_ptr) { in select_idle_smt() 6749 if (cpu == target) in select_idle_smt() 6774 static inline int select_idle_smt(struct task_struct *p, int target) in select_idle_smt() argument [all …]
|
D | rt.c | 1732 int target = find_lowest_rq(p); in select_task_rq_rt() local 1738 if (!test && target != -1 && !rt_task_fits_cpu(p, target)) in select_task_rq_rt() 1745 if (target != -1 && in select_task_rq_rt() 1746 p->prio < cpu_rq(target)->rt.highest_prio.curr) in select_task_rq_rt() 1747 cpu = target; in select_task_rq_rt()
|
D | deadline.c | 1865 int target = find_later_rq(p); in select_task_rq_dl() local 1867 if (target != -1 && in select_task_rq_dl() 1868 dl_task_is_earliest_deadline(p, cpu_rq(target))) in select_task_rq_dl() 1869 cpu = target; in select_task_rq_dl()
|
/kernel/module/ |
D | kdb.c | 55 kdb_printf("%s ", use->target->name); in kdb_lsmod()
|
D | sysfs.c | 250 sysfs_remove_link(use->target->holders_dir, mod->name); in del_usage_links() 263 ret = sysfs_create_link(use->target->holders_dir, in add_usage_links()
|
/kernel/bpf/preload/iterators/ |
D | Makefile | 51 $(Q)$(CLANG) -g -O2 -target bpf $(INCLUDES) \
|