/kernel/ |
D | ptrace.c | 67 void __ptrace_link(struct task_struct *child, struct task_struct *new_parent, in __ptrace_link() argument 70 BUG_ON(!list_empty(&child->ptrace_entry)); in __ptrace_link() 71 list_add(&child->ptrace_entry, &new_parent->ptraced); in __ptrace_link() 72 child->parent = new_parent; in __ptrace_link() 73 child->ptracer_cred = get_cred(ptracer_cred); in __ptrace_link() 82 static void ptrace_link(struct task_struct *child, struct task_struct *new_parent) in ptrace_link() argument 84 __ptrace_link(child, new_parent, current_cred()); in ptrace_link() 115 void __ptrace_unlink(struct task_struct *child) in __ptrace_unlink() argument 118 BUG_ON(!child->ptrace); in __ptrace_unlink() 120 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); in __ptrace_unlink() [all …]
|
D | resource.c | 70 if (p->child) in next_resource() 71 return p->child; in next_resource() 94 for (p = p->child; p && l < *pos; p = r_next(m, p, &l)) in r_start() 197 p = &root->child; in __request_resource() 217 p = &old->parent->child; in __release_resource() 223 if (release_child || !(tmp->child)) { in __release_resource() 226 for (chd = tmp->child;; chd = chd->sibling) { in __release_resource() 231 *p = tmp->child; in __release_resource() 247 p = r->child; in __release_child_resources() 248 r->child = NULL; in __release_child_resources() [all …]
|
D | sysctl.c | 279 .child = kern_table, 284 .child = vm_table, 289 .child = fs_table, 294 .child = debug_table, 299 .child = dev_table, 776 .child = random_table, 781 .child = usermodehelper_table, 787 .child = firmware_config_table, 1173 .child = key_sysctls, 1869 .child = inotify_table, [all …]
|
D | fork.c | 1263 static int wait_for_vfork_done(struct task_struct *child, in wait_for_vfork_done() argument 1275 task_lock(child); in wait_for_vfork_done() 1276 child->vfork_done = NULL; in wait_for_vfork_done() 1277 task_unlock(child); in wait_for_vfork_done() 1280 put_task_struct(child); in wait_for_vfork_done() 2663 struct task_struct *leader, *parent, *child; in walk_process_tree() local 2670 list_for_each_entry(child, &parent->children, sibling) { in walk_process_tree() 2671 res = visitor(child, data); in walk_process_tree() 2675 leader = child; in walk_process_tree() 2684 child = leader; in walk_process_tree() [all …]
|
D | seccomp.c | 317 struct seccomp_filter *child) in is_ancestor() argument 322 for (; child; child = child->prev) in is_ancestor() 323 if (child == parent) in is_ancestor()
|
D | utsname_sysctl.c | 120 .child = uts_kern_table,
|
D | sysctl_binary.c | 47 const struct bin_table *child; member 1248 if (table->child) { in get_sysctl() 1250 table = table->child; in get_sysctl()
|
D | user_namespace.c | 1221 const struct user_namespace *child) in in_userns() argument 1224 for (ns = child; ns->level > ancestor->level; ns = ns->parent) in in_userns()
|
/kernel/bpf/ |
D | lpm_trie.c | 25 struct lpm_trie_node __rcu *child[2]; member 267 node = rcu_dereference(node->child[next_bit]); in trie_lookup_elem() 336 RCU_INIT_POINTER(new_node->child[0], NULL); in trie_update_elem() 337 RCU_INIT_POINTER(new_node->child[1], NULL); in trie_update_elem() 357 slot = &node->child[next_bit]; in trie_update_elem() 372 new_node->child[0] = node->child[0]; in trie_update_elem() 373 new_node->child[1] = node->child[1]; in trie_update_elem() 389 rcu_assign_pointer(new_node->child[next_bit], node); in trie_update_elem() 406 rcu_assign_pointer(im_node->child[0], node); in trie_update_elem() 407 rcu_assign_pointer(im_node->child[1], new_node); in trie_update_elem() [all …]
|
/kernel/sched/ |
D | topology.c | 90 if (group == sd->groups && sd->child && in sched_domain_debug_one() 91 !cpumask_equal(sched_domain_span(sd->child), in sched_domain_debug_one() 678 parent->parent->child = tmp; in cpu_attach_domain() 696 sd->child = NULL; in cpu_attach_domain() 860 if (!sibling->child) in build_balance_mask() 864 if (!cpumask_equal(sg_span, sched_domain_span(sibling->child))) in build_balance_mask() 892 if (sd->child) in build_group_from_child_sched_domain() 893 cpumask_copy(sg_span, sched_domain_span(sd->child)); in build_group_from_child_sched_domain() 1063 struct sched_domain *child = sd->child; in get_group() local 1067 if (child) in get_group() [all …]
|
D | debug.c | 202 .child = sd_ctl_dir, 226 if (entry->child) in sd_free_ctl_entry() 227 sd_free_ctl_entry(&entry->child); in sd_free_ctl_entry() 287 entry->child = sd_alloc_ctl_domain_table(sd); in sd_alloc_ctl_cpu_table() 310 WARN_ON(sd_ctl_dir[0].child); in register_sched_domain_sysctl() 311 sd_ctl_dir[0].child = cpu_entries; in register_sched_domain_sysctl() 342 if (e->child) in register_sched_domain_sysctl() 343 sd_free_ctl_entry(&e->child); in register_sched_domain_sysctl() 350 e->child = sd_alloc_ctl_cpu_table(i); in register_sched_domain_sysctl()
|
D | rt.c | 2483 struct task_group *child; in tg_rt_schedulable() local 2518 list_for_each_entry_rcu(child, &tg->children, siblings) { in tg_rt_schedulable() 2519 period = ktime_to_ns(child->rt_bandwidth.rt_period); in tg_rt_schedulable() 2520 runtime = child->rt_bandwidth.rt_runtime; in tg_rt_schedulable() 2522 if (child == d->tg) { in tg_rt_schedulable()
|
D | fair.c | 5783 sd = sd->child; in find_idlest_cpu() 5789 sd = sd->child; in find_idlest_cpu() 5796 sd = sd->child; in find_idlest_cpu() 7887 struct sched_domain *child = sd->child; in update_group_capacity() local 7896 if (!child) { in update_group_capacity() 7905 if (child->flags & SD_OVERLAP) { in update_group_capacity() 7942 group = child->groups; in update_group_capacity() 7950 } while (group != child->groups); in update_group_capacity() 8307 struct sched_domain *child = env->sd->child; in update_sd_lb_stats() local 8311 bool prefer_sibling = child && child->flags & SD_PREFER_SIBLING; in update_sd_lb_stats()
|
D | core.c | 714 struct task_group *parent, *child; in walk_tg_tree_from() local 723 list_for_each_entry_rcu(child, &parent->children, siblings) { in walk_tg_tree_from() 724 parent = child; in walk_tg_tree_from() 734 child = parent; in walk_tg_tree_from()
|
D | sched.h | 1350 #define for_each_lower_domain(sd) for (; sd; sd = sd->child)
|
/kernel/events/ |
D | core.c | 4675 struct perf_event *child, *tmp; in perf_event_release_kernel() local 4714 list_for_each_entry(child, &event->child_list, child_list) { in perf_event_release_kernel() 4720 ctx = READ_ONCE(child->ctx); in perf_event_release_kernel() 4747 if (tmp == child) { in perf_event_release_kernel() 4748 perf_remove_from_context(child, DETACH_GROUP); in perf_event_release_kernel() 4749 list_move(&child->child_list, &free_list); in perf_event_release_kernel() 4764 list_for_each_entry_safe(child, tmp, &free_list, child_list) { in perf_event_release_kernel() 4765 void *var = &child->ctx->refcount; in perf_event_release_kernel() 4767 list_del(&child->child_list); in perf_event_release_kernel() 4768 free_event(child); in perf_event_release_kernel() [all …]
|
/kernel/cgroup/ |
D | cgroup.c | 705 #define cgroup_for_each_live_child(child, cgrp) \ argument 706 list_for_each_entry((child), &(cgrp)->self.children, self.sibling) \ 708 cgroup_is_dead(child); })) \ 800 struct cgroup *child = NULL; in cgroup_update_populated() local 808 if (!child) { in cgroup_update_populated() 811 if (cgroup_is_threaded(child)) in cgroup_update_populated() 825 child = cgrp; in cgroup_update_populated() 3279 struct cgroup *cgrp, *child; in cgroup_subtree_control_write() local 3334 cgroup_for_each_live_child(child, cgrp) { in cgroup_subtree_control_write() 3335 if (child->subtree_control & (1 << ssid)) { in cgroup_subtree_control_write() [all …]
|
D | legacy_freezer.c | 260 struct freezer *child = css_freezer(pos); in update_if_frozen() local 262 if ((child->state & CGROUP_FREEZER_ONLINE) && in update_if_frozen() 263 !(child->state & CGROUP_FROZEN)) { in update_if_frozen()
|
/kernel/livepatch/ |
D | transition.c | 612 void klp_copy_process(struct task_struct *child) in klp_copy_process() argument 614 child->patch_state = current->patch_state; in klp_copy_process()
|
/kernel/irq/ |
D | irqdomain.c | 1108 struct irq_data *child) in irq_domain_insert_irq_data() argument 1113 irq_data_get_node(child)); in irq_domain_insert_irq_data() 1115 child->parent_data = irq_data; in irq_domain_insert_irq_data() 1116 irq_data->irq = child->irq; in irq_domain_insert_irq_data() 1117 irq_data->common = child->common; in irq_domain_insert_irq_data()
|
/kernel/trace/ |
D | trace_events.c | 709 struct dentry *child; in remove_event_file_dir() local 713 list_for_each_entry(child, &dir->d_subdirs, d_child) { in remove_event_file_dir() 714 if (d_really_is_positive(child)) /* probably unneeded */ in remove_event_file_dir() 715 d_inode(child)->i_private = NULL; in remove_event_file_dir()
|
/kernel/locking/ |
D | lockdep.c | 1425 static inline struct lock_list *get_lock_parent(struct lock_list *child) in get_lock_parent() argument 1427 return child->parent; in get_lock_parent() 1430 static inline int get_lock_depth(struct lock_list *child) in get_lock_depth() argument 1435 while ((parent = get_lock_parent(child))) { in get_lock_depth() 1436 child = parent; in get_lock_depth()
|