/kernel/ |
D | audit_watch.c | 41 struct audit_parent *parent; /* associated parent */ member 58 static void audit_free_parent(struct audit_parent *parent) in audit_free_parent() argument 60 WARN_ON(!list_empty(&parent->watches)); in audit_free_parent() 61 kfree(parent); in audit_free_parent() 66 struct audit_parent *parent; in audit_watch_free_mark() local 68 parent = container_of(entry, struct audit_parent, mark); in audit_watch_free_mark() 69 audit_free_parent(parent); in audit_watch_free_mark() 72 static void audit_get_parent(struct audit_parent *parent) in audit_get_parent() argument 74 if (likely(parent)) in audit_get_parent() 75 fsnotify_get_mark(&parent->mark); in audit_get_parent() [all …]
|
D | resource.c | 72 while (!p->sibling && p->parent) in next_resource() 73 p = p->parent; in next_resource() 113 for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent) in r_show() 114 if (p->parent == root) in r_show() 203 new->parent = root; in __request_resource() 217 p = &old->parent->child; in __release_resource() 227 chd->parent = tmp->parent; in __release_resource() 234 old->parent = NULL; in __release_resource() 253 tmp->parent = NULL; in __release_child_resources() 740 if ( new->parent ) { in allocate_resource() [all …]
|
D | user_namespace.c | 116 ns->parent = parent_ns; in create_user_ns() 176 struct user_namespace *parent, *ns = in free_user_ns() local 181 parent = ns->parent; in free_user_ns() 199 ns = parent; in free_user_ns() 200 } while (atomic_dec_and_test(&parent->count)); in free_user_ns() 585 if ((lower_ns == ns) && lower_ns->parent) in uid_m_show() 586 lower_ns = lower_ns->parent; in uid_m_show() 606 if ((lower_ns == ns) && lower_ns->parent) in gid_m_show() 607 lower_ns = lower_ns->parent; in gid_m_show() 627 if ((lower_ns == ns) && lower_ns->parent) in projid_m_show() [all …]
|
D | pid_namespace.c | 115 ns->parent = get_pid_ns(parent_pid_ns); in create_pid_namespace() 170 struct pid_namespace *parent; in put_pid_ns() local 173 parent = ns->parent; in put_pid_ns() 176 ns = parent; in put_pid_ns() 405 ancestor = ancestor->parent; in pidns_install() 420 pid_ns = p = to_pid_ns(ns)->parent; in pidns_get_parent() 426 p = p->parent; in pidns_get_parent()
|
D | exit.c | 318 kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent) in kill_orphaned_pgrp() argument 323 if (!parent) in kill_orphaned_pgrp() 327 parent = tsk->real_parent; in kill_orphaned_pgrp() 334 if (task_pgrp(parent) != pgrp && in kill_orphaned_pgrp() 335 task_session(parent) == task_session(tsk) && in kill_orphaned_pgrp() 623 BUG_ON((!t->ptrace) != (t->parent == father)); in forget_original_parent() 625 t->parent = t->real_parent; in forget_original_parent() 1407 if ((wo->wo_flags & __WNOTHREAD) && wait->private != p->parent) in child_wait_callback() 1413 void __wake_up_parent(struct task_struct *p, struct task_struct *parent) in __wake_up_parent() argument 1415 __wake_up_sync_key(&parent->signal->wait_chldexit, in __wake_up_parent()
|
D | relay.c | 305 struct dentry *parent, in create_buf_file_default_callback() argument 428 dentry = chan->cb->create_buf_file(tmpname, chan->parent, in relay_create_buf_file() 564 struct dentry *parent, in relay_open() argument 588 chan->parent = parent; in relay_open() 651 struct dentry *parent) in relay_late_setup_files() argument 672 chan->parent = parent; in relay_late_setup_files()
|
/kernel/gcov/ |
D | fs.c | 55 struct gcov_node *parent; member 366 static void add_links(struct gcov_node *node, struct dentry *parent) in add_links() argument 388 parent, target); in add_links() 411 const char *name, struct gcov_node *parent) in init_node() argument 420 node->parent = parent; in init_node() 429 static struct gcov_node *new_node(struct gcov_node *parent, in new_node() argument 443 init_node(node, info, name, parent); in new_node() 447 parent->dentry, node, &gcov_data_fops); in new_node() 449 node->dentry = debugfs_create_dir(node->name, parent->dentry); in new_node() 451 add_links(node, parent->dentry); in new_node() [all …]
|
/kernel/cgroup/ |
D | cpuset.c | 205 return css_cs(cs->css.parent); in parent_cs() 1056 struct cpuset *cs, struct cpuset *parent) in compute_effective_cpumask() argument 1058 if (parent->nr_subparts_cpus) { in compute_effective_cpumask() 1059 cpumask_or(new_cpus, parent->effective_cpus, in compute_effective_cpumask() 1060 parent->subparts_cpus); in compute_effective_cpumask() 1064 cpumask_and(new_cpus, cs->cpus_allowed, parent->effective_cpus); in compute_effective_cpumask() 1123 struct cpuset *parent = parent_cs(cpuset); in update_parent_subparts_cpumask() local 1135 if (!is_partition_root(parent) || in update_parent_subparts_cpumask() 1153 (!cpumask_subset(cpuset->cpus_allowed, parent->effective_cpus) || in update_parent_subparts_cpumask() 1154 cpumask_equal(cpuset->cpus_allowed, parent->effective_cpus))) in update_parent_subparts_cpumask() [all …]
|
D | rstat.c | 28 struct cgroup *parent; in cgroup_rstat_updated() local 52 for (parent = cgroup_parent(cgrp); parent; in cgroup_rstat_updated() 53 cgrp = parent, parent = cgroup_parent(cgrp)) { in cgroup_rstat_updated() 55 struct cgroup_rstat_cpu *prstatc = cgroup_rstat_cpu(parent, cpu); in cgroup_rstat_updated() 119 struct cgroup *parent = cgroup_parent(pos); in cgroup_rstat_cpu_pop_updated() local 120 struct cgroup_rstat_cpu *prstatc = cgroup_rstat_cpu(parent, cpu); in cgroup_rstat_cpu_pop_updated() 130 WARN_ON_ONCE(*nextp == parent); in cgroup_rstat_cpu_pop_updated() 317 struct cgroup *parent = cgroup_parent(cgrp); in cgroup_base_stat_flush() local 343 if (parent) in cgroup_base_stat_flush() 344 cgroup_base_stat_accumulate(&parent->pending_bstat, &delta); in cgroup_base_stat_flush()
|
D | legacy_freezer.c | 62 return css_freezer(freezer->css.parent); in parent_freezer() 108 struct freezer *parent = parent_freezer(freezer); in freezer_css_online() local 114 if (parent && (parent->state & CGROUP_FREEZING)) { in freezer_css_online() 398 struct freezer *parent = parent_freezer(pos_f); in freezer_change_state() local 409 parent->state & CGROUP_FREEZING, in freezer_change_state()
|
D | cgroup.c | 420 struct cgroup *parent = cgroup_parent(cgrp); in cgroup_control() local 423 if (parent) { in cgroup_control() 424 u16 ss_mask = parent->subtree_control; in cgroup_control() 441 struct cgroup *parent = cgroup_parent(cgrp); in cgroup_ss_mask() local 443 if (parent) { in cgroup_ss_mask() 444 u16 ss_mask = parent->subtree_ss_mask; in cgroup_ss_mask() 632 struct cgroup *cgrp = of->kn->parent->priv; in of_css() 1570 cgrp = kn->parent->priv; in cgroup_kn_unlock() 1602 cgrp = kn->parent->priv; in cgroup_kn_lock_live() 3165 if (css->parent && in cgroup_apply_control_disable() [all …]
|
/kernel/bpf/ |
D | lpm_trie.c | 436 struct lpm_trie_node *node, *parent; in trie_delete_elem() local 455 parent = NULL; in trie_delete_elem() 464 parent = node; in trie_delete_elem() 495 if (parent && (parent->flags & LPM_TREE_NODE_FLAG_IM) && in trie_delete_elem() 497 if (node == rcu_access_pointer(parent->child[0])) in trie_delete_elem() 499 *trim2, rcu_access_pointer(parent->child[1])); in trie_delete_elem() 502 *trim2, rcu_access_pointer(parent->child[0])); in trie_delete_elem() 503 kfree_rcu(parent, rcu); in trie_delete_elem() 632 struct lpm_trie_node *node, *next_node = NULL, *parent, *search_root; in trie_get_next_key() local 686 parent = node_stack[stack_ptr - 1]; in trie_get_next_key() [all …]
|
/kernel/sched/ |
D | topology.c | 39 if (sd->parent) in sched_domain_debug_one() 109 if (sd->parent && in sched_domain_debug_one() 110 !cpumask_subset(groupmask, sched_domain_span(sd->parent))) in sched_domain_debug_one() 133 sd = sd->parent; in sched_domain_debug() 174 sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) in sd_parent_degenerate() argument 176 unsigned long cflags = sd->flags, pflags = parent->flags; in sd_parent_degenerate() 178 if (sd_degenerate(parent)) in sd_parent_degenerate() 181 if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent))) in sd_parent_degenerate() 185 if (parent->groups == parent->groups->next) { in sd_parent_degenerate() 600 struct sched_domain *parent = sd->parent; in destroy_sched_domains_rcu() local [all …]
|
D | cpudeadline.c | 11 static inline int parent(int i) in parent() function 80 p = parent(idx); in cpudl_heapify_up() 97 if (idx > 0 && dl_time_before(cp->elements[parent(idx)].dl, in cpudl_heapify()
|
D | core.c | 714 struct task_group *parent, *child; in walk_tg_tree_from() local 717 parent = from; in walk_tg_tree_from() 720 ret = (*down)(parent, data); in walk_tg_tree_from() 723 list_for_each_entry_rcu(child, &parent->children, siblings) { in walk_tg_tree_from() 724 parent = child; in walk_tg_tree_from() 730 ret = (*up)(parent, data); in walk_tg_tree_from() 731 if (ret || parent == from) in walk_tg_tree_from() 734 child = parent; in walk_tg_tree_from() 735 parent = parent->parent; in walk_tg_tree_from() 736 if (parent) in walk_tg_tree_from() [all …]
|
D | sched.h | 385 struct task_group *parent; member 442 extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent); 447 struct sched_entity *parent); 455 extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent); 458 struct sched_rt_entity *parent); 465 extern struct task_group *sched_create_group(struct task_group *parent); 467 struct task_group *parent); 1348 __sd; __sd = __sd->parent) 1515 p->se.parent = tg->se[cpu]; in set_task_rq() 1520 p->rt.parent = tg->rt_se[cpu]; in set_task_rq()
|
D | deadline.c | 449 struct rb_node *parent = NULL; in enqueue_pushable_dl_task() local 456 parent = *link; in enqueue_pushable_dl_task() 457 entry = rb_entry(parent, struct task_struct, in enqueue_pushable_dl_task() 460 link = &parent->rb_left; in enqueue_pushable_dl_task() 462 link = &parent->rb_right; in enqueue_pushable_dl_task() 470 rb_link_node(&p->pushable_dl_tasks, parent, link); in enqueue_pushable_dl_task() 1406 struct rb_node *parent = NULL; in __enqueue_dl_entity() local 1413 parent = *link; in __enqueue_dl_entity() 1414 entry = rb_entry(parent, struct sched_dl_entity, rb_node); in __enqueue_dl_entity() 1416 link = &parent->rb_left; in __enqueue_dl_entity() [all …]
|
/kernel/irq/ |
D | irqdomain.c | 1049 struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent, in irq_domain_create_hierarchy() argument 1063 domain->parent = parent; in irq_domain_create_hierarchy() 1147 struct irq_domain *parent; in irq_domain_alloc_irq_data() local 1155 for (parent = domain->parent; parent; parent = parent->parent) { in irq_domain_alloc_irq_data() 1156 irq_data = irq_domain_insert_irq_data(parent, irq_data); in irq_domain_alloc_irq_data() 1433 if (domain->parent != root_irq_data->domain) in irq_domain_push_irq() 1581 if (!domain->parent) in irq_domain_alloc_irqs_parent() 1584 return irq_domain_alloc_irqs_hierarchy(domain->parent, irq_base, in irq_domain_alloc_irqs_parent() 1600 if (!domain->parent) in irq_domain_free_irqs_parent() 1603 irq_domain_free_irqs_hierarchy(domain->parent, irq_base, nr_irqs); in irq_domain_free_irqs_parent() [all …]
|
D | msi.c | 104 struct irq_data *parent = irq_data->parent_data; in msi_domain_set_affinity() local 108 ret = parent->chip->irq_set_affinity(parent, mask, force); in msi_domain_set_affinity() 149 if (domain->parent) { in msi_domain_alloc() 283 struct irq_domain *parent) in msi_create_irq_domain() argument 292 domain = irq_domain_create_hierarchy(parent, IRQ_DOMAIN_FLAG_MSI, 0, in msi_create_irq_domain()
|
/kernel/locking/ |
D | rtmutex.c | 276 struct rb_node *parent = NULL; in rt_mutex_enqueue() local 281 parent = *link; in rt_mutex_enqueue() 282 entry = rb_entry(parent, struct rt_mutex_waiter, tree_entry); in rt_mutex_enqueue() 284 link = &parent->rb_left; in rt_mutex_enqueue() 286 link = &parent->rb_right; in rt_mutex_enqueue() 291 rb_link_node(&waiter->tree_entry, parent, link); in rt_mutex_enqueue() 309 struct rb_node *parent = NULL; in rt_mutex_enqueue_pi() local 314 parent = *link; in rt_mutex_enqueue_pi() 315 entry = rb_entry(parent, struct rt_mutex_waiter, pi_tree_entry); in rt_mutex_enqueue_pi() 317 link = &parent->rb_left; in rt_mutex_enqueue_pi() [all …]
|
D | lockdep.c | 1406 struct lock_list *parent) in mark_lock_accessed() argument 1412 lock->parent = parent; in mark_lock_accessed() 1427 return child->parent; in get_lock_parent() 1433 struct lock_list *parent; in get_lock_depth() local 1435 while ((parent = get_lock_parent(child))) { in get_lock_depth() 1436 child = parent; in get_lock_depth() 1568 struct lock_class *parent = prt->class; in print_circular_lock_scenario() local 1583 if (parent != source) { in print_circular_lock_scenario() 1587 __print_lock_name(parent); in print_circular_lock_scenario() 1600 __print_lock_name(parent); in print_circular_lock_scenario() [all …]
|
/kernel/events/ |
D | core.c | 258 if (!event->parent) { in event_function_call() 732 for (css = &cgrp->css; css; css = css->parent) { in update_cgrp_time_from_cpuctx() 776 for (css = &cgrp->css; css; css = css->parent) { in perf_cgroup_set_timestamp() 1327 if (event->parent) in perf_event_pid_type() 1328 event = event->parent; in perf_event_pid_type() 1355 if (event->parent) in primary_event_id() 1356 id = event->parent->id; in primary_event_id() 1551 struct rb_node *parent; in perf_event_groups_insert() local 1557 parent = *node; in perf_event_groups_insert() 1560 parent = *node; in perf_event_groups_insert() [all …]
|
/kernel/power/ |
D | wakelock.c | 154 struct rb_node *parent = *node; in wakelock_lookup_add() local 160 parent = *node; in wakelock_lookup_add() 199 rb_link_node(&wl->node, parent, node); in wakelock_lookup_add()
|
/kernel/rcu/ |
D | tree_exp.h | 108 rnp_up = rnp->parent; in sync_exp_reset_tree_hotplug() 119 rnp_up = rnp_up->parent; in sync_exp_reset_tree_hotplug() 201 if (rnp->parent == NULL) { in __rcu_report_exp_rnp() 211 rnp = rnp->parent; in __rcu_report_exp_rnp() 295 for (; rnp != NULL; rnp = rnp->parent) { in exp_funnel_lock()
|
/kernel/trace/ |
D | trace_stat.c | 79 struct rb_node **new = &(root->rb_node), *parent = NULL; in insert_stat() local 98 parent = *new; in insert_stat() 105 rb_link_node(&data->node, parent, new); in insert_stat()
|