Home
last modified time | relevance | path

Searched refs:node (Results 1 – 25 of 47) sorted by relevance

12

/kernel/gcov/
Dfs.c145 static struct gcov_info *get_node_info(struct gcov_node *node) in get_node_info() argument
147 if (node->num_loaded > 0) in get_node_info()
148 return node->loaded_info[0]; in get_node_info()
150 return node->unloaded_info; in get_node_info()
157 static struct gcov_info *get_accumulated_info(struct gcov_node *node) in get_accumulated_info() argument
162 if (node->unloaded_info) in get_accumulated_info()
163 info = gcov_info_dup(node->unloaded_info); in get_accumulated_info()
165 info = gcov_info_dup(node->loaded_info[i++]); in get_accumulated_info()
168 for (; i < node->num_loaded; i++) in get_accumulated_info()
169 gcov_info_add(info, node->loaded_info[i]); in get_accumulated_info()
[all …]
/kernel/locking/
Dosq_lock.c37 struct optimistic_spin_node *node, in osq_wait_next() argument
72 if (node->next) { in osq_wait_next()
73 next = xchg(&node->next, NULL); in osq_wait_next()
86 struct optimistic_spin_node *node = this_cpu_ptr(&osq_node); in osq_lock() local
91 node->locked = 0; in osq_lock()
92 node->next = NULL; in osq_lock()
93 node->cpu = curr; in osq_lock()
106 node->prev = prev; in osq_lock()
107 WRITE_ONCE(prev->next, node); in osq_lock()
118 while (!READ_ONCE(node->locked)) { in osq_lock()
[all …]
Dqspinlock_paravirt.h168 struct pv_node *node; member
206 static struct qspinlock **pv_hash(struct qspinlock *lock, struct pv_node *node) in pv_hash() argument
215 WRITE_ONCE(he->node, node); in pv_hash()
237 struct pv_node *node; in pv_unhash() local
241 node = READ_ONCE(he->node); in pv_unhash()
243 return node; in pv_unhash()
272 static void pv_init_node(struct mcs_spinlock *node) in pv_init_node() argument
274 struct pv_node *pn = (struct pv_node *)node; in pv_init_node()
287 static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev) in pv_wait_node() argument
289 struct pv_node *pn = (struct pv_node *)node; in pv_wait_node()
[all …]
Dmcs_spinlock.h62 void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node) in mcs_spin_lock() argument
67 node->locked = 0; in mcs_spin_lock()
68 node->next = NULL; in mcs_spin_lock()
76 prev = xchg(lock, node); in mcs_spin_lock()
88 WRITE_ONCE(prev->next, node); in mcs_spin_lock()
91 arch_mcs_spin_lock_contended(&node->locked); in mcs_spin_lock()
99 void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node) in mcs_spin_unlock() argument
101 struct mcs_spinlock *next = READ_ONCE(node->next); in mcs_spin_unlock()
107 if (likely(cmpxchg_release(lock, node, NULL) == node)) in mcs_spin_unlock()
110 while (!(next = READ_ONCE(node->next))) in mcs_spin_unlock()
Dqspinlock.c250 static __always_inline void __pv_init_node(struct mcs_spinlock *node) { } in __pv_init_node() argument
251 static __always_inline void __pv_wait_node(struct mcs_spinlock *node, in __pv_wait_node() argument
254 struct mcs_spinlock *node) { } in __pv_kick_node() argument
256 struct mcs_spinlock *node) in __pv_wait_head_or_lock() argument
412 struct mcs_spinlock *prev, *next, *node; in queued_spin_lock_slowpath() local
493 node = this_cpu_ptr(&mcs_nodes[0]); in queued_spin_lock_slowpath()
494 idx = node->count++; in queued_spin_lock_slowpath()
497 node += idx; in queued_spin_lock_slowpath()
498 node->locked = 0; in queued_spin_lock_slowpath()
499 node->next = NULL; in queued_spin_lock_slowpath()
[all …]
/kernel/power/
Dwakelock.c28 struct rb_node node; member
39 struct rb_node *node; in pm_show_wakelocks() local
46 for (node = rb_first(&wakelocks_tree); node; node = rb_next(node)) { in pm_show_wakelocks()
47 wl = rb_entry(node, struct wakelock, node); in pm_show_wakelocks()
124 rb_erase(&wl->node, &wakelocks_tree); in __wakelocks_gc()
152 struct rb_node **node = &wakelocks_tree.rb_node; in wakelock_lookup_add() local
153 struct rb_node *parent = *node; in wakelock_lookup_add()
156 while (*node) { in wakelock_lookup_add()
159 parent = *node; in wakelock_lookup_add()
160 wl = rb_entry(*node, struct wakelock, node); in wakelock_lookup_add()
[all …]
Dqos.c151 struct plist_node *node; in pm_qos_get_value() local
165 plist_for_each(node, &c->list) in pm_qos_get_value()
166 total_value += node->prio; in pm_qos_get_value()
229 plist_for_each_entry(req, &c->list, node) { in pm_qos_dbg_show_requests()
232 if ((req->node).prio != c->default_value) { in pm_qos_dbg_show_requests()
238 (req->node).prio, state); in pm_qos_dbg_show_requests()
273 int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, in pm_qos_update_target() argument
289 plist_del(node, &c->list); in pm_qos_update_target()
297 plist_del(node, &c->list); in pm_qos_update_target()
299 plist_node_init(node, new_value); in pm_qos_update_target()
[all …]
Dsnapshot.c370 struct rtree_node *node; member
404 struct rtree_node *node; in alloc_rtree_node() local
406 node = chain_alloc(ca, sizeof(struct rtree_node)); in alloc_rtree_node()
407 if (!node) in alloc_rtree_node()
410 node->data = get_image_page(gfp_mask, safe_needed); in alloc_rtree_node()
411 if (!node->data) in alloc_rtree_node()
414 list_add_tail(&node->list, list); in alloc_rtree_node()
416 return node; in alloc_rtree_node()
429 struct rtree_node *node, *block, **dst; in add_rtree_block() local
444 node = alloc_rtree_node(gfp_mask, safe_needed, ca, in add_rtree_block()
[all …]
Dswap.c123 struct rb_node node; member
138 ext = rb_entry(*new, struct swsusp_extent, node); in swsusp_extents_insert()
166 rb_link_node(&ext->node, parent, new); in swsusp_extents_insert()
167 rb_insert_color(&ext->node, &swsusp_extents); in swsusp_extents_insert()
198 struct rb_node *node; in free_all_swap_pages() local
200 while ((node = swsusp_extents.rb_node)) { in free_all_swap_pages()
204 ext = container_of(node, struct swsusp_extent, node); in free_all_swap_pages()
205 rb_erase(node, &swsusp_extents); in free_all_swap_pages()
/kernel/trace/
Dtrace_stat.c26 struct rb_node node; member
50 rbtree_postorder_for_each_entry_safe(snode, n, &session->stat_root, node) { in __reset_stat_session()
94 this = container_of(*new, struct stat_node, node); in insert_stat()
104 rb_link_node(&data->node, parent, new); in insert_stat()
105 rb_insert_color(&data->node, root); in insert_stat()
175 struct rb_node *node; in stat_seq_start() local
189 node = rb_first(&session->stat_root); in stat_seq_start()
190 for (i = 0; node && i < n; i++) in stat_seq_start()
191 node = rb_next(node); in stat_seq_start()
193 return node; in stat_seq_start()
[all …]
/kernel/bpf/
Dpercpu_freelist.c32 struct pcpu_freelist_node *node) in __pcpu_freelist_push() argument
35 node->next = head->first; in __pcpu_freelist_push()
36 head->first = node; in __pcpu_freelist_push()
41 struct pcpu_freelist_node *node) in pcpu_freelist_push() argument
45 __pcpu_freelist_push(head, node); in pcpu_freelist_push()
80 struct pcpu_freelist_node *node; in pcpu_freelist_pop() local
89 node = head->first; in pcpu_freelist_pop()
90 if (node) { in pcpu_freelist_pop()
91 head->first = node->next; in pcpu_freelist_pop()
93 return node; in pcpu_freelist_pop()
/kernel/irq/
Dirqdesc.c57 static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) in alloc_masks() argument
60 gfp, node)) in alloc_masks()
64 if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { in alloc_masks()
72 static void desc_smp_init(struct irq_desc *desc, int node, in desc_smp_init() argument
83 desc->irq_common_data.node = node; in desc_smp_init()
89 alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; } in alloc_masks() argument
91 desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { } in desc_smp_init() argument
94 static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node, in desc_set_defaults() argument
116 desc_smp_init(desc, node, affinity); in desc_set_defaults()
342 static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags, in alloc_desc() argument
[all …]
/kernel/
Daudit_tree.c31 struct node { struct
144 size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node); in alloc_chunk()
211 static struct audit_chunk *find_chunk(struct node *p) in find_chunk()
218 static void untag_chunk(struct node *p) in untag_chunk()
361 struct node *p; in tag_chunk()
499 struct node *p; in prune_one()
501 p = list_entry(victim->chunks.next, struct node, list); in prune_one()
521 struct node *node = list_entry(p, struct node, list); in trim_marked() local
523 if (node->index & (1U<<31)) { in trim_marked()
530 struct node *node; in trim_marked() local
[all …]
Dkthread.c33 int node; member
232 current->pref_node_fork = create->node; in create_kthread()
250 void *data, int node, in __kthread_create_on_node() argument
263 create->node = node; in __kthread_create_on_node()
330 void *data, int node, in kthread_create_on_node() argument
338 task = __kthread_create_on_node(threadfn, data, node, namefmt, args); in kthread_create_on_node()
619 struct kthread_work, node); in kthread_worker_fn()
620 list_del_init(&work->node); in kthread_worker_fn()
743 return !list_empty(&work->node) || work->canceling; in queuing_blocked()
750 WARN_ON_ONCE(!list_empty(&work->node)); in kthread_insert_work_sanity_check()
[all …]
Dcpu.c57 struct hlist_node *node; member
86 struct hlist_node *node);
91 struct hlist_node *node);
129 bool bringup, struct hlist_node *node) in cpuhp_invoke_callback() argument
133 int (*cbm)(unsigned int cpu, struct hlist_node *node); in cpuhp_invoke_callback()
151 if (node) { in cpuhp_invoke_callback()
152 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); in cpuhp_invoke_callback()
153 ret = cbm(cpu, node); in cpuhp_invoke_callback()
160 hlist_for_each(node, &step->list) { in cpuhp_invoke_callback()
161 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); in cpuhp_invoke_callback()
[all …]
Dworkqueue.c149 int node; /* I: the associated node ID */ member
408 list_for_each_entry((worker), &(pool)->workers, node) \
561 int node) in unbound_pwq_by_node() argument
571 if (unlikely(node == NUMA_NO_NODE)) in unbound_pwq_by_node()
574 return rcu_dereference_raw(wq->numa_pwq_tbl[node]); in unbound_pwq_by_node()
1669 static struct worker *alloc_worker(int node) in alloc_worker() argument
1673 worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node); in alloc_worker()
1677 INIT_LIST_HEAD(&worker->node); in alloc_worker()
1712 list_add_tail(&worker->node, &pool->workers); in worker_attach_to_pool()
1732 list_del(&worker->node); in worker_detach_from_pool()
[all …]
Dpadata.c774 static int padata_cpu_online(unsigned int cpu, struct hlist_node *node) in padata_cpu_online() argument
779 pinst = hlist_entry_safe(node, struct padata_instance, node); in padata_cpu_online()
789 static int padata_cpu_prep_down(unsigned int cpu, struct hlist_node *node) in padata_cpu_prep_down() argument
794 pinst = hlist_entry_safe(node, struct padata_instance, node); in padata_cpu_prep_down()
810 cpuhp_state_remove_instance_nocalls(hp_online, &pinst->node); in __padata_free()
1008 cpuhp_state_add_instance_nocalls(hp_online, &pinst->node); in padata_alloc()
Dfork.c141 static inline struct task_struct *alloc_task_struct_node(int node) in alloc_task_struct_node() argument
143 return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node); in alloc_task_struct_node()
173 static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node) in alloc_thread_stack_node() argument
197 0, node, __builtin_return_address(0)); in alloc_thread_stack_node()
208 struct page *page = alloc_pages_node(node, THREADINFO_GFP, in alloc_thread_stack_node()
245 int node) in alloc_thread_stack_node() argument
247 return kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node); in alloc_thread_stack_node()
473 static struct task_struct *dup_task_struct(struct task_struct *orig, int node) in dup_task_struct() argument
480 if (node == NUMA_NO_NODE) in dup_task_struct()
481 node = tsk_fork_get_node(orig); in dup_task_struct()
[all …]
Ducount.c119 hlist_for_each_entry(ucounts, hashent, node) { in find_ucounts()
149 hlist_add_head(&new->node, hashent); in get_ucounts()
168 hlist_del_init(&ucounts->node); in put_ucounts()
Dpid.c394 hlist_add_head_rcu(&link->node, &link->pid->tasks[type]); in attach_pid()
407 hlist_del_rcu(&link->node); in __change_pid()
434 hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node); in transfer_pid()
445 result = hlist_entry(first, struct task_struct, pids[(type)].node); in pid_task()
/kernel/time/
Dalarmtimer.c140 timerqueue_del(&base->timerqueue, &alarm->node); in alarmtimer_enqueue()
142 timerqueue_add(&base->timerqueue, &alarm->node); in alarmtimer_enqueue()
160 timerqueue_del(&base->timerqueue, &alarm->node); in alarmtimer_dequeue()
191 hrtimer_set_expires(&alarm->timer, alarm->node.expires); in alarmtimer_fired()
204 return ktime_sub(alarm->node.expires, base->gettime()); in alarm_expires_remaining()
320 timerqueue_init(&alarm->node); in alarm_init()
341 alarm->node.expires = start; in alarm_start()
343 hrtimer_start(&alarm->timer, alarm->node.expires, HRTIMER_MODE_ABS); in alarm_start()
368 hrtimer_set_expires(&alarm->timer, alarm->node.expires); in alarm_restart()
421 delta = ktime_sub(now, alarm->node.expires); in alarm_forward()
[all …]
Dhrtimer.c478 timer = container_of(next, struct hrtimer, node); in __hrtimer_get_next_event()
875 return timerqueue_add(&base->active, &timer->node); in enqueue_hrtimer()
899 if (!timerqueue_del(&base->active, &timer->node)) in __remove_hrtimer()
1147 timerqueue_init(&timer->node); in __hrtimer_init()
1291 struct timerqueue_node *node; in __hrtimer_run_queues() local
1299 while ((node = timerqueue_getnext(&base->active))) { in __hrtimer_run_queues()
1302 timer = container_of(node, struct hrtimer, node); in __hrtimer_run_queues()
1632 struct timerqueue_node *node; in migrate_hrtimer_list() local
1634 while ((node = timerqueue_getnext(&old_base->active))) { in migrate_hrtimer_list()
1635 timer = container_of(node, struct hrtimer, node); in migrate_hrtimer_list()
/kernel/events/
Dring_buffer.c501 static struct page *rb_alloc_aux_page(int node, int order) in rb_alloc_aux_page() argument
509 page = alloc_pages_node(node, PERF_AUX_GFP, order); in rb_alloc_aux_page()
567 int node = (event->cpu == -1) ? -1 : cpu_to_node(event->cpu); in rb_alloc_aux() local
593 rb->aux_pages = kzalloc_node(nr_pages * sizeof(void *), GFP_KERNEL, node); in rb_alloc_aux()
603 page = rb_alloc_aux_page(node, order); in rb_alloc_aux()
683 int node; in perf_mmap_alloc_page() local
685 node = (cpu == -1) ? cpu : cpu_to_node(cpu); in perf_mmap_alloc_page()
686 page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); in perf_mmap_alloc_page()
/kernel/rcu/
Dtree.h268 for ((rnp) = &(rsp)->node[0]; \
269 (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
277 for ((rnp) = &(rsp)->node[0]; \
288 (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
479 struct rcu_node node[NUM_RCU_NODES]; /* Hierarchy. */ member
/kernel/livepatch/
Dcore.c49 struct list_head node; member
72 list_for_each_entry(ops, &klp_ops, node) { in klp_find_ops()
372 list_del(&ops->node); in klp_disable_func()
412 list_add(&ops->node, &klp_ops); in klp_enable_func()
443 list_del(&ops->node); in klp_enable_func()

12