/kernel/ |
D | ptrace.c | 37 void __ptrace_link(struct task_struct *child, struct task_struct *new_parent) in __ptrace_link() argument 39 BUG_ON(!list_empty(&child->ptrace_entry)); in __ptrace_link() 40 list_add(&child->ptrace_entry, &new_parent->ptraced); in __ptrace_link() 41 child->parent = new_parent; in __ptrace_link() 72 void __ptrace_unlink(struct task_struct *child) in __ptrace_unlink() argument 74 BUG_ON(!child->ptrace); in __ptrace_unlink() 76 child->ptrace = 0; in __ptrace_unlink() 77 child->parent = child->real_parent; in __ptrace_unlink() 78 list_del_init(&child->ptrace_entry); in __ptrace_unlink() 80 spin_lock(&child->sighand->siglock); in __ptrace_unlink() [all …]
|
D | resource.c | 68 if (p->child) in next_resource() 69 return p->child; in next_resource() 92 for (p = p->child; p && l < *pos; p = r_next(m, p, &l)) in r_start() 219 p = &root->child; in __request_resource() 239 p = &old->parent->child; in __release_resource() 259 p = r->child; in __release_child_resources() 260 r->child = NULL; in __release_child_resources() 359 for (p = iomem_resource.child; p; p = next_resource(p, sibling_only)) { in find_next_iomem_res() 511 for (p = iomem_resource.child; p ; p = p->sibling) { in region_is_ram() 560 struct resource *this = root->child; in __find_resource() [all …]
|
D | cgroup.c | 380 #define cgroup_for_each_live_child(child, cgrp) \ argument 381 list_for_each_entry((child), &(cgrp)->self.children, self.sibling) \ 383 cgroup_is_dead(child); })) \ 2646 struct cgroup *cgrp, *child; in cgroup_subtree_control_write() local 2711 cgroup_for_each_live_child(child, cgrp) { in cgroup_subtree_control_write() 2714 if (!cgroup_css(child, ss)) in cgroup_subtree_control_write() 2717 cgroup_get(child); in cgroup_subtree_control_write() 2718 prepare_to_wait(&child->offline_waitq, &wait, in cgroup_subtree_control_write() 2722 finish_wait(&child->offline_waitq, &wait); in cgroup_subtree_control_write() 2723 cgroup_put(child); in cgroup_subtree_control_write() [all …]
|
D | seccomp.c | 235 struct seccomp_filter *child) in is_ancestor() argument 240 for (; child; child = child->prev) in is_ancestor() 241 if (child == parent) in is_ancestor()
|
D | cgroup_freezer.c | 263 struct freezer *child = css_freezer(pos); in update_if_frozen() local 265 if ((child->state & CGROUP_FREEZER_ONLINE) && in update_if_frozen() 266 !(child->state & CGROUP_FROZEN)) { in update_if_frozen()
|
D | sysctl.c | 240 .child = kern_table, 245 .child = vm_table, 250 .child = fs_table, 255 .child = debug_table, 260 .child = dev_table, 788 .child = random_table, 793 .child = usermodehelper_table, 1131 .child = key_sysctls, 1718 .child = inotify_table, 1725 .child = epoll_table, [all …]
|
D | utsname_sysctl.c | 114 .child = uts_kern_table,
|
D | fork.c | 799 static int wait_for_vfork_done(struct task_struct *child, in wait_for_vfork_done() argument 809 task_lock(child); in wait_for_vfork_done() 810 child->vfork_done = NULL; in wait_for_vfork_done() 811 task_unlock(child); in wait_for_vfork_done() 814 put_task_struct(child); in wait_for_vfork_done()
|
D | sysctl_binary.c | 45 const struct bin_table *child; member 1265 if (table->child) { in get_sysctl() 1267 table = table->child; in get_sysctl()
|
/kernel/sched/ |
D | core.c | 803 struct task_group *parent, *child; in walk_tg_tree_from() local 812 list_for_each_entry_rcu(child, &parent->children, siblings) { in walk_tg_tree_from() 813 parent = child; in walk_tg_tree_from() 823 child = parent; in walk_tg_tree_from() 5253 .child = sd_ctl_dir, 5277 if (entry->child) in sd_free_ctl_entry() 5278 sd_free_ctl_entry(&entry->child); in sd_free_ctl_entry() 5340 table->child = sd_alloc_ctl_energy_table((struct sched_group_energy *)sg->sge); in sd_alloc_ctl_group_table() 5404 entry->child = sd_alloc_ctl_group_table(sg); in sd_alloc_ctl_domain_table() 5430 entry->child = sd_alloc_ctl_domain_table(sd); in sd_alloc_ctl_cpu_table() [all …]
|
D | fair.c | 4792 if (sd->child && group_first_cpu(sg) != cpu) in sched_group_energy() 4832 if (!sd->child) in sched_group_energy() 5736 sd = sd->child; in select_task_rq_fair() 5742 sd = sd->child; in select_task_rq_fair() 5749 sd = sd->child; in select_task_rq_fair() 6973 struct sched_domain *child = sd->child; in update_group_capacity() local 6982 if (!child) { in update_group_capacity() 6990 if (child->flags & SD_OVERLAP) { in update_group_capacity() 7026 group = child->groups; in update_group_capacity() 7033 } while (group != child->groups); in update_group_capacity() [all …]
|
D | sched.h | 778 #define for_each_lower_domain(sd) for (; sd; sd = sd->child)
|
/kernel/events/ |
D | core.c | 3647 struct perf_event *child; in perf_event_read_value() local 3660 list_for_each_entry(child, &event->child_list, child_list) { in perf_event_read_value() 3661 total += perf_event_read(child); in perf_event_read_value() 3662 *enabled += child->total_time_enabled; in perf_event_read_value() 3663 *running += child->total_time_running; in perf_event_read_value() 3835 struct perf_event *child; in perf_event_for_each_child() local 3841 list_for_each_entry(child, &event->child_list, child_list) in perf_event_for_each_child() 3842 func(child); in perf_event_for_each_child() 7877 struct task_struct *child) in sync_child_event() argument 7883 perf_event_read_event(child_event, child); in sync_child_event() [all …]
|
/kernel/trace/ |
D | trace_events.c | 460 struct dentry *child; in remove_event_file_dir() local 464 list_for_each_entry(child, &dir->d_subdirs, d_child) { in remove_event_file_dir() 465 if (child->d_inode) /* probably unneeded */ in remove_event_file_dir() 466 child->d_inode->i_private = NULL; in remove_event_file_dir()
|
/kernel/locking/ |
D | lockdep.c | 968 static inline struct lock_list *get_lock_parent(struct lock_list *child) in get_lock_parent() argument 970 return child->parent; in get_lock_parent() 973 static inline int get_lock_depth(struct lock_list *child) in get_lock_depth() argument 978 while ((parent = get_lock_parent(child))) { in get_lock_depth() 979 child = parent; in get_lock_depth()
|