/kernel/ |
D | audit_watch.c | 53 struct audit_parent *parent; /* associated parent */ member 70 static void audit_free_parent(struct audit_parent *parent) in audit_free_parent() argument 72 WARN_ON(!list_empty(&parent->watches)); in audit_free_parent() 73 kfree(parent); in audit_free_parent() 78 struct audit_parent *parent; in audit_watch_free_mark() local 80 parent = container_of(entry, struct audit_parent, mark); in audit_watch_free_mark() 81 audit_free_parent(parent); in audit_watch_free_mark() 84 static void audit_get_parent(struct audit_parent *parent) in audit_get_parent() argument 86 if (likely(parent)) in audit_get_parent() 87 fsnotify_get_mark(&parent->mark); in audit_get_parent() [all …]
|
D | resource.c | 71 while (!p->sibling && p->parent) in next_resource() 72 p = p->parent; in next_resource() 112 for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent) in r_show() 113 if (p->parent == root) in r_show() 235 new->parent = root; in __request_resource() 249 p = &old->parent->child; in __release_resource() 259 chd->parent = tmp->parent; in __release_resource() 266 old->parent = NULL; in __release_resource() 285 tmp->parent = NULL; in __release_child_resources() 746 if ( new->parent ) { in allocate_resource() [all …]
|
D | user_namespace.c | 118 ns->parent = parent_ns; in create_user_ns() 177 struct user_namespace *parent, *ns = in free_user_ns() local 182 parent = ns->parent; in free_user_ns() 190 ns = parent; in free_user_ns() 191 } while (atomic_dec_and_test(&parent->count)); in free_user_ns() 484 if ((lower_ns == ns) && lower_ns->parent) in uid_m_show() 485 lower_ns = lower_ns->parent; in uid_m_show() 505 if ((lower_ns == ns) && lower_ns->parent) in gid_m_show() 506 lower_ns = lower_ns->parent; in gid_m_show() 526 if ((lower_ns == ns) && lower_ns->parent) in projid_m_show() [all …]
|
D | pid_namespace.c | 128 ns->parent = get_pid_ns(parent_pid_ns); in create_pid_namespace() 192 struct pid_namespace *parent; in put_pid_ns() local 195 parent = ns->parent; in put_pid_ns() 198 ns = parent; in put_pid_ns() 401 ancestor = ancestor->parent; in pidns_install() 416 pid_ns = p = to_pid_ns(ns)->parent; in pidns_get_parent() 422 p = p->parent; in pidns_get_parent()
|
D | cpuset.c | 150 return css_cs(cs->css.parent); in parent_cs() 890 struct cpuset *parent = parent_cs(cp); in update_cpumasks_hier() local 892 cpumask_and(new_cpus, cp->cpus_allowed, parent->effective_cpus); in update_cpumasks_hier() 900 cpumask_copy(new_cpus, parent->effective_cpus); in update_cpumasks_hier() 1161 struct cpuset *parent = parent_cs(cp); in update_nodemasks_hier() local 1163 nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems); in update_nodemasks_hier() 1171 *new_mems = parent->effective_mems; in update_nodemasks_hier() 1985 struct cpuset *parent = parent_cs(cs); in cpuset_css_online() local 1989 if (!parent) in cpuset_css_online() 1995 if (is_spread_page(parent)) in cpuset_css_online() [all …]
|
D | cgroup_freezer.c | 62 return css_freezer(freezer->css.parent); in parent_freezer() 108 struct freezer *parent = parent_freezer(freezer); in freezer_css_online() local 114 if (parent && (parent->state & CGROUP_FREEZING)) { in freezer_css_online() 398 struct freezer *parent = parent_freezer(pos_f); in freezer_change_state() local 409 parent->state & CGROUP_FREEZING, in freezer_change_state()
|
D | cgroup.c | 366 struct cgroup_subsys_state *parent_css = cgrp->self.parent; in cgroup_parent() 376 struct cgroup *parent = cgroup_parent(cgrp); in cgroup_control() local 379 if (parent) in cgroup_control() 380 return parent->subtree_control; in cgroup_control() 391 struct cgroup *parent = cgroup_parent(cgrp); in cgroup_ss_mask() local 393 if (parent) in cgroup_ss_mask() 394 return parent->subtree_ss_mask; in cgroup_ss_mask() 503 struct cgroup *cgrp = of->kn->parent->priv; in of_css() 1425 cgrp = kn->parent->priv; in cgroup_kn_unlock() 1458 cgrp = kn->parent->priv; in cgroup_kn_lock_live() [all …]
|
D | exit.c | 351 kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent) in kill_orphaned_pgrp() argument 356 if (!parent) in kill_orphaned_pgrp() 360 parent = tsk->real_parent; in kill_orphaned_pgrp() 367 if (task_pgrp(parent) != pgrp && in kill_orphaned_pgrp() 368 task_session(parent) == task_session(tsk) && in kill_orphaned_pgrp() 649 BUG_ON((!t->ptrace) != (t->parent == father)); in forget_original_parent() 651 t->parent = t->real_parent; in forget_original_parent() 1527 if ((wo->wo_flags & __WNOTHREAD) && wait->private != p->parent) in child_wait_callback() 1533 void __wake_up_parent(struct task_struct *p, struct task_struct *parent) in __wake_up_parent() argument 1535 __wake_up_sync_key(&parent->signal->wait_chldexit, in __wake_up_parent()
|
D | relay.c | 304 struct dentry *parent, in create_buf_file_default_callback() argument 427 dentry = chan->cb->create_buf_file(tmpname, chan->parent, in relay_create_buf_file() 561 struct dentry *parent, in relay_open() argument 585 chan->parent = parent; in relay_open() 648 struct dentry *parent) in relay_late_setup_files() argument 669 chan->parent = parent; in relay_late_setup_files()
|
D | signal.c | 1603 if (tsk->parent_exec_id != tsk->parent->self_exec_id) in do_notify_parent() 1621 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent)); in do_notify_parent() 1622 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns), in do_notify_parent() 1640 psig = tsk->parent->sighand; in do_notify_parent() 1665 __group_send_sig_info(sig, &info, tsk->parent); in do_notify_parent() 1666 __wake_up_parent(tsk, tsk->parent); in do_notify_parent() 1690 struct task_struct *parent; in do_notify_parent_cldstop() local 1695 parent = tsk->parent; in do_notify_parent_cldstop() 1698 parent = tsk->real_parent; in do_notify_parent_cldstop() 1707 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent)); in do_notify_parent_cldstop() [all …]
|
D | ptrace.c | 46 (current != tsk->parent) || in ptrace_access_vm() 65 child->parent = new_parent; in __ptrace_link() 117 child->parent = child->real_parent; in __ptrace_unlink() 188 WARN_ON(!task->ptrace || task->parent != current); in ptrace_unfreeze_traced() 233 if (child->ptrace && child->parent == current) { in ptrace_check_attach() 457 ret = security_ptrace_traceme(current->parent); in ptrace_traceme()
|
/kernel/gcov/ |
D | fs.c | 54 struct gcov_node *parent; member 366 static void add_links(struct gcov_node *node, struct dentry *parent) in add_links() argument 388 parent, target); in add_links() 413 const char *name, struct gcov_node *parent) in init_node() argument 422 node->parent = parent; in init_node() 431 static struct gcov_node *new_node(struct gcov_node *parent, in new_node() argument 445 init_node(node, info, name, parent); in new_node() 449 parent->dentry, node, &gcov_data_fops); in new_node() 451 node->dentry = debugfs_create_dir(node->name, parent->dentry); in new_node() 458 add_links(node, parent->dentry); in new_node() [all …]
|
/kernel/bpf/ |
D | cgroup.c | 45 void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent) in cgroup_bpf_inherit() argument 52 e = rcu_dereference_protected(parent->bpf.effective[type], in cgroup_bpf_inherit() 55 cgrp->bpf.disallow_override[type] = parent->bpf.disallow_override[type]; in cgroup_bpf_inherit() 86 int __cgroup_bpf_update(struct cgroup *cgrp, struct cgroup *parent, in __cgroup_bpf_update() argument 94 if (parent) { in __cgroup_bpf_update() 95 overridable = !parent->bpf.disallow_override[type]; in __cgroup_bpf_update() 96 effective = rcu_dereference_protected(parent->bpf.effective[type], in __cgroup_bpf_update()
|
/kernel/irq/ |
D | irqdomain.c | 921 struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent, in irq_domain_create_hierarchy() argument 935 domain->parent = parent; in irq_domain_create_hierarchy() 1030 struct irq_domain *parent; in irq_domain_alloc_irq_data() local 1038 for (parent = domain->parent; parent; parent = parent->parent) { in irq_domain_alloc_irq_data() 1039 irq_data = irq_domain_insert_irq_data(parent, irq_data); in irq_domain_alloc_irq_data() 1178 BUG_ON(!domain->parent); in irq_domain_free_irqs_recursive() 1179 irq_domain_free_irqs_recursive(domain->parent, irq_base, in irq_domain_free_irqs_recursive() 1189 struct irq_domain *parent = domain->parent; in irq_domain_alloc_irqs_recursive() local 1192 BUG_ON(recursive && !parent); in irq_domain_alloc_irqs_recursive() 1194 ret = irq_domain_alloc_irqs_recursive(parent, irq_base, in irq_domain_alloc_irqs_recursive() [all …]
|
D | msi.c | 92 struct irq_data *parent = irq_data->parent_data; in msi_domain_set_affinity() local 96 ret = parent->chip->irq_set_affinity(parent, mask, force); in msi_domain_set_affinity() 134 if (domain->parent) { in msi_domain_alloc() 268 struct irq_domain *parent) in msi_create_irq_domain() argument 275 return irq_domain_create_hierarchy(parent, 0, 0, fwnode, in msi_create_irq_domain()
|
/kernel/locking/ |
D | rtmutex.c | 248 struct rb_node *parent = NULL; in rt_mutex_enqueue() local 253 parent = *link; in rt_mutex_enqueue() 254 entry = rb_entry(parent, struct rt_mutex_waiter, tree_entry); in rt_mutex_enqueue() 256 link = &parent->rb_left; in rt_mutex_enqueue() 258 link = &parent->rb_right; in rt_mutex_enqueue() 266 rb_link_node(&waiter->tree_entry, parent, link); in rt_mutex_enqueue() 287 struct rb_node *parent = NULL; in rt_mutex_enqueue_pi() local 292 parent = *link; in rt_mutex_enqueue_pi() 293 entry = rb_entry(parent, struct rt_mutex_waiter, pi_tree_entry); in rt_mutex_enqueue_pi() 295 link = &parent->rb_left; in rt_mutex_enqueue_pi() [all …]
|
D | lockdep.c | 934 struct lock_list *parent) in mark_lock_accessed() argument 940 lock->parent = parent; in mark_lock_accessed() 955 return child->parent; in get_lock_parent() 961 struct lock_list *parent; in get_lock_depth() local 963 while ((parent = get_lock_parent(child))) { in get_lock_depth() 964 child = parent; in get_lock_depth() 1087 struct lock_class *parent = prt->class; in print_circular_lock_scenario() local 1102 if (parent != source) { in print_circular_lock_scenario() 1106 __print_lock_name(parent); in print_circular_lock_scenario() 1119 __print_lock_name(parent); in print_circular_lock_scenario() [all …]
|
/kernel/sched/ |
D | core.c | 717 struct task_group *parent, *child; in walk_tg_tree_from() local 720 parent = from; in walk_tg_tree_from() 723 ret = (*down)(parent, data); in walk_tg_tree_from() 726 list_for_each_entry_rcu(child, &parent->children, siblings) { in walk_tg_tree_from() 727 parent = child; in walk_tg_tree_from() 733 ret = (*up)(parent, data); in walk_tg_tree_from() 734 if (ret || parent == from) in walk_tg_tree_from() 737 child = parent; in walk_tg_tree_from() 738 parent = parent->parent; in walk_tg_tree_from() 739 if (parent) in walk_tg_tree_from() [all …]
|
D | deadline.c | 163 struct rb_node *parent = NULL; in enqueue_pushable_dl_task() local 170 parent = *link; in enqueue_pushable_dl_task() 171 entry = rb_entry(parent, struct task_struct, in enqueue_pushable_dl_task() 174 link = &parent->rb_left; in enqueue_pushable_dl_task() 176 link = &parent->rb_right; in enqueue_pushable_dl_task() 186 rb_link_node(&p->pushable_dl_tasks, parent, link); in enqueue_pushable_dl_task() 977 struct rb_node *parent = NULL; in __enqueue_dl_entity() local 984 parent = *link; in __enqueue_dl_entity() 985 entry = rb_entry(parent, struct sched_dl_entity, rb_node); in __enqueue_dl_entity() 987 link = &parent->rb_left; in __enqueue_dl_entity() [all …]
|
D | sched.h | 293 struct task_group *parent; member 338 extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent); 343 struct sched_entity *parent); 351 extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent); 354 struct sched_rt_entity *parent); 356 extern struct task_group *sched_create_group(struct task_group *parent); 358 struct task_group *parent); 894 __sd; __sd = __sd->parent) 1050 p->se.parent = tg->se[cpu]; in set_task_rq() 1055 p->rt.parent = tg->rt_se[cpu]; in set_task_rq()
|
D | cpudeadline.c | 19 static inline int parent(int i) in parent() function 87 p = parent(idx); in cpudl_heapify_up() 104 if (idx > 0 && dl_time_before(cp->elements[parent(idx)].dl, in cpudl_heapify()
|
/kernel/power/ |
D | wakelock.c | 153 struct rb_node *parent = *node; in wakelock_lookup_add() local 159 parent = *node; in wakelock_lookup_add() 191 rb_link_node(&wl->node, parent, node); in wakelock_lookup_add()
|
/kernel/rcu/ |
D | tree_exp.h | 91 rnp_up = rnp->parent; in sync_exp_reset_tree_hotplug() 102 rnp_up = rnp_up->parent; in sync_exp_reset_tree_hotplug() 165 if (rnp->parent == NULL) { in __rcu_report_exp_rnp() 175 rnp = rnp->parent; in __rcu_report_exp_rnp() 266 for (; rnp != NULL; rnp = rnp->parent) { in exp_funnel_lock()
|
/kernel/events/ |
D | core.c | 255 if (!event->parent) { in event_function_call() 1270 if (event->parent) in perf_event_pid() 1271 event = event->parent; in perf_event_pid() 1281 if (event->parent) in perf_event_tid() 1282 event = event->parent; in perf_event_tid() 1295 if (event->parent) in primary_event_id() 1296 id = event->parent->id; in primary_event_id() 2841 struct perf_event_context *parent, *next_parent; in perf_event_context_sched_out() local 2857 parent = rcu_dereference(ctx->parent_ctx); in perf_event_context_sched_out() 2861 if (!parent && !next_parent) in perf_event_context_sched_out() [all …]
|
/kernel/trace/ |
D | trace_stat.c | 78 struct rb_node **new = &(root->rb_node), *parent = NULL; in insert_stat() local 97 parent = *new; in insert_stat() 104 rb_link_node(&data->node, parent, new); in insert_stat()
|