Home
last modified time | relevance | path

Searched refs:node (Results 1 – 19 of 19) sorted by relevance

/kernel/power/
Dswsusp.c73 struct rb_node node; member
88 ext = container_of(*new, struct swsusp_extent, node); in swsusp_extents_insert()
116 rb_link_node(&ext->node, parent, new); in swsusp_extents_insert()
117 rb_insert_color(&ext->node, &swsusp_extents); in swsusp_extents_insert()
148 struct rb_node *node; in free_all_swap_pages() local
150 while ((node = swsusp_extents.rb_node)) { in free_all_swap_pages()
154 ext = container_of(node, struct swsusp_extent, node); in free_all_swap_pages()
155 rb_erase(node, &swsusp_extents); in free_all_swap_pages()
277 struct list_head node; member
302 list_add_tail(&entry->node, &nvs_list); in hibernate_nvs_register()
[all …]
Duserwakelock.c35 struct rb_node node; member
83 l = rb_entry(parent, struct user_wake_lock, node); in lookup_wake_lock_name()
117 rb_link_node(&l->node, parent, p); in lookup_wake_lock_name()
118 rb_insert_color(&l->node, &user_wake_locks); in lookup_wake_lock_name()
139 l = rb_entry(n, struct user_wake_lock, node); in wake_lock_show()
187 l = rb_entry(n, struct user_wake_lock, node); in wake_unlock_show()
/kernel/
Dpm_qos_params.c142 struct requirement_list *node; in update_target() local
148 list_for_each_entry(node, in update_target()
151 extreme_value, node->value); in update_target()
254 struct requirement_list *node; in pm_qos_update_requirement() local
258 list_for_each_entry(node, in pm_qos_update_requirement()
260 if (strcmp(node->name, name) == 0) { in pm_qos_update_requirement()
262 node->value = in pm_qos_update_requirement()
265 node->value = new_value; in pm_qos_update_requirement()
289 struct requirement_list *node; in pm_qos_remove_requirement() local
293 list_for_each_entry(node, in pm_qos_remove_requirement()
[all …]
Daudit_tree.c29 struct node { struct
118 size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node); in alloc_chunk()
201 static struct audit_chunk *find_chunk(struct node *p) in find_chunk()
208 static void untag_chunk(struct node *p) in untag_chunk()
362 struct node *p; in tag_chunk()
466 struct node *p; in prune_one()
468 p = list_entry(victim->chunks.next, struct node, list); in prune_one()
488 struct node *node = list_entry(p, struct node, list); in trim_marked() local
490 if (node->index & (1U<<31)) { in trim_marked()
497 struct node *node; in trim_marked() local
[all …]
Dhrtimer.c502 timer = rb_entry(base->first, struct hrtimer, node); in hrtimer_force_reprogram()
800 entry = rb_entry(parent, struct hrtimer, node); in enqueue_hrtimer()
819 base->first = &timer->node; in enqueue_hrtimer()
821 rb_link_node(&timer->node, parent, link); in enqueue_hrtimer()
822 rb_insert_color(&timer->node, &base->active); in enqueue_hrtimer()
851 if (base->first == &timer->node) { in __remove_hrtimer()
852 base->first = rb_next(&timer->node); in __remove_hrtimer()
857 rb_erase(&timer->node, &base->active); in __remove_hrtimer()
1059 timer = rb_entry(base->first, struct hrtimer, node); in hrtimer_get_next_event()
1220 struct rb_node *node; in hrtimer_interrupt() local
[all …]
Dmarker.c359 struct hlist_node *node; in get_marker() local
364 hlist_for_each_entry(e, node, head, hlist) { in get_marker()
378 struct hlist_node *node; in add_marker() local
387 hlist_for_each_entry(e, node, head, hlist) { in add_marker()
434 struct hlist_node *node; in remove_marker() local
441 hlist_for_each_entry(e, node, head, hlist) { in remove_marker()
778 struct hlist_node *node; in get_marker_from_private_data() local
782 hlist_for_each_entry(entry, node, head, hlist) { in get_marker_from_private_data()
869 struct hlist_node *node; in marker_get_private_data() local
876 hlist_for_each_entry(e, node, head, hlist) { in marker_get_private_data()
Dprofile.c365 int node, cpu = (unsigned long)__cpu; in profile_cpu_callback() local
371 node = cpu_to_node(cpu); in profile_cpu_callback()
374 page = alloc_pages_node(node, in profile_cpu_callback()
382 page = alloc_pages_node(node, in profile_cpu_callback()
570 int node = cpu_to_node(cpu); in create_hash_tables() local
573 page = alloc_pages_node(node, in create_hash_tables()
580 page = alloc_pages_node(node, in create_hash_tables()
Dkprobes.c310 struct hlist_node *node; in get_kprobe() local
314 hlist_for_each_entry_rcu(p, node, head, hlist) { in get_kprobe()
457 struct hlist_node *node, *tmp; in kprobe_flush_task() local
467 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { in kprobe_flush_task()
473 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { in kprobe_flush_task()
1083 struct hlist_node *node; in kprobes_module_callback() local
1100 hlist_for_each_entry_rcu(p, node, head, hlist) in kprobes_module_callback()
1229 struct hlist_node *node; in show_kprobe_addr() local
1238 hlist_for_each_entry_rcu(p, node, head, hlist) { in show_kprobe_addr()
1273 struct hlist_node *node; in enable_all_kprobes() local
[all …]
Dcpuset.c2281 int node; /* node that zone z is on */ in __cpuset_zone_allowed_softwall() local
2287 node = zone_to_nid(z); in __cpuset_zone_allowed_softwall()
2289 if (node_isset(node, current->mems_allowed)) in __cpuset_zone_allowed_softwall()
2310 allowed = node_isset(node, cs->mems_allowed); in __cpuset_zone_allowed_softwall()
2341 int node; /* node that zone z is on */ in __cpuset_zone_allowed_hardwall() local
2345 node = zone_to_nid(z); in __cpuset_zone_allowed_hardwall()
2346 if (node_isset(node, current->mems_allowed)) in __cpuset_zone_allowed_hardwall()
2412 int node; in cpuset_mem_spread_node() local
2414 node = next_node(current->cpuset_mem_spread_rotor, current->mems_allowed); in cpuset_mem_spread_node()
2415 if (node == MAX_NUMNODES) in cpuset_mem_spread_node()
[all …]
Dtracepoint.c181 struct hlist_node *node; in get_tracepoint() local
186 hlist_for_each_entry(e, node, head, hlist) { in get_tracepoint()
200 struct hlist_node *node; in add_tracepoint() local
206 hlist_for_each_entry(e, node, head, hlist) { in add_tracepoint()
Dpid.c322 hlist_add_head_rcu(&link->node, &pid->tasks[type]); in attach_pid()
335 hlist_del_rcu(&link->node); in __change_pid()
362 hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node); in transfer_pid()
372 result = hlist_entry(first, struct task_struct, pids[(type)].node); in pid_task()
Drcutree.c59 .level = { &name.node[0] }, \
137 return &rsp->node[0]; in rcu_get_root()
427 struct rcu_node *rnp_end = &rsp->node[NUM_RCU_NODES]; in print_other_cpu_stall()
603 for (rnp_cur = &rsp->node[0]; rnp_cur < rnp_end; rnp_cur++) in rcu_start_gp()
620 rnp_end = &rsp->node[NUM_RCU_NODES]; in rcu_start_gp()
1000 struct rcu_node *rnp_end = &rsp->node[NUM_RCU_NODES]; in rcu_process_dyntick()
Drcutree_trace.c162 for (rnp = &rsp->node[0]; rnp - &rsp->node[0] < NUM_RCU_NODES; rnp++) { in print_one_rcu_state()
Dcgroup.c326 struct hlist_node *node; in find_existing_css_set() local
345 hlist_for_each_entry(cg, node, hhead, hlist) { in find_existing_css_set()
652 struct list_head *node; in cgroup_clear_directory() local
656 node = dentry->d_subdirs.next; in cgroup_clear_directory()
657 while (node != &dentry->d_subdirs) { in cgroup_clear_directory()
658 struct dentry *d = list_entry(node, struct dentry, d_u.d_child); in cgroup_clear_directory()
659 list_del_init(node); in cgroup_clear_directory()
671 node = dentry->d_subdirs.next; in cgroup_clear_directory()
1056 struct hlist_node *node; in cgroup_get_sb() local
1059 hlist_for_each_entry(cg, node, hhead, hlist) in cgroup_get_sb()
Dsched.c2518 struct hlist_node *node; in fire_sched_in_preempt_notifiers() local
2520 hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link) in fire_sched_in_preempt_notifiers()
2529 struct hlist_node *node; in fire_sched_out_preempt_notifiers() local
2531 hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link) in fire_sched_out_preempt_notifiers()
7208 static int find_next_best_node(int node, nodemask_t *used_nodes) in find_next_best_node() argument
7216 n = (node + i) % nr_node_ids; in find_next_best_node()
7226 val = node_distance(node, n); in find_next_best_node()
7247 static void sched_domain_node_span(int node, struct cpumask *span) in sched_domain_node_span() argument
7255 cpumask_or(span, span, cpumask_of_node(node)); in sched_domain_node_span()
7256 node_set(node, used_nodes); in sched_domain_node_span()
[all …]
/kernel/irq/
Dnuma_migrate.c64 int node; in __real_move_irq_desc() local
76 node = cpu_to_node(cpu); in __real_move_irq_desc()
77 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); in __real_move_irq_desc()
106 int node, old_node; in move_irq_desc() local
114 node = cpu_to_node(cpu); in move_irq_desc()
116 if (old_node != node) in move_irq_desc()
Dhandle.c88 int node; in init_kstat_irqs() local
93 node = cpu_to_node(cpu); in init_kstat_irqs()
94 ptr = kzalloc_node(bytes, GFP_ATOMIC, node); in init_kstat_irqs()
95 printk(KERN_DEBUG " alloc kstat_irqs on cpu %d node %d\n", cpu, node); in init_kstat_irqs()
177 int node; in irq_to_desc_alloc_cpu() local
197 node = cpu_to_node(cpu); in irq_to_desc_alloc_cpu()
198 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); in irq_to_desc_alloc_cpu()
200 irq, cpu, node); in irq_to_desc_alloc_cpu()
/kernel/time/
Dclockevents.c232 struct list_head *node, *tmp; in clockevents_notify() local
243 list_for_each_safe(node, tmp, &clockevents_released) in clockevents_notify()
244 list_del(node); in clockevents_notify()
Dtimer_list.c101 timer = rb_entry(curr, struct hrtimer, node); in print_active_timers()