Home
last modified time | relevance | path

Searched refs:node (Results 1 – 25 of 42) sorted by relevance

12

/kernel/gcov/
Dfs.c145 static struct gcov_info *get_node_info(struct gcov_node *node) in get_node_info() argument
147 if (node->num_loaded > 0) in get_node_info()
148 return node->loaded_info[0]; in get_node_info()
150 return node->unloaded_info; in get_node_info()
157 static struct gcov_info *get_accumulated_info(struct gcov_node *node) in get_accumulated_info() argument
162 if (node->unloaded_info) in get_accumulated_info()
163 info = gcov_info_dup(node->unloaded_info); in get_accumulated_info()
165 info = gcov_info_dup(node->loaded_info[i++]); in get_accumulated_info()
168 for (; i < node->num_loaded; i++) in get_accumulated_info()
169 gcov_info_add(info, node->loaded_info[i]); in get_accumulated_info()
[all …]
/kernel/locking/
Dosq_lock.c37 struct optimistic_spin_node *node, in osq_wait_next() argument
72 if (node->next) { in osq_wait_next()
73 next = xchg(&node->next, NULL); in osq_wait_next()
86 struct optimistic_spin_node *node = this_cpu_ptr(&osq_node); in osq_lock() local
91 node->locked = 0; in osq_lock()
92 node->next = NULL; in osq_lock()
93 node->cpu = curr; in osq_lock()
106 node->prev = prev; in osq_lock()
120 WRITE_ONCE(prev->next, node); in osq_lock()
131 while (!READ_ONCE(node->locked)) { in osq_lock()
[all …]
Dqspinlock_paravirt.h61 struct pv_node *node; member
99 static struct qspinlock **pv_hash(struct qspinlock *lock, struct pv_node *node) in pv_hash() argument
106 WRITE_ONCE(he->node, node); in pv_hash()
127 struct pv_node *node; in pv_unhash() local
131 node = READ_ONCE(he->node); in pv_unhash()
133 return node; in pv_unhash()
149 static void pv_init_node(struct mcs_spinlock *node) in pv_init_node() argument
151 struct pv_node *pn = (struct pv_node *)node; in pv_init_node()
164 static void pv_wait_node(struct mcs_spinlock *node) in pv_wait_node() argument
166 struct pv_node *pn = (struct pv_node *)node; in pv_wait_node()
[all …]
Dmcs_spinlock.h62 void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node) in mcs_spin_lock() argument
67 node->locked = 0; in mcs_spin_lock()
68 node->next = NULL; in mcs_spin_lock()
76 prev = xchg(lock, node); in mcs_spin_lock()
88 WRITE_ONCE(prev->next, node); in mcs_spin_lock()
91 arch_mcs_spin_lock_contended(&node->locked); in mcs_spin_lock()
99 void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node) in mcs_spin_unlock() argument
101 struct mcs_spinlock *next = READ_ONCE(node->next); in mcs_spin_unlock()
107 if (likely(cmpxchg_release(lock, node, NULL) == node)) in mcs_spin_unlock()
110 while (!(next = READ_ONCE(node->next))) in mcs_spin_unlock()
Dqspinlock.c240 static __always_inline void __pv_init_node(struct mcs_spinlock *node) { } in __pv_init_node() argument
241 static __always_inline void __pv_wait_node(struct mcs_spinlock *node) { } in __pv_wait_node() argument
243 struct mcs_spinlock *node) { } in __pv_kick_node() argument
245 struct mcs_spinlock *node) { } in __pv_wait_head() argument
343 struct mcs_spinlock *prev, *next, *node; in queued_spin_lock_slowpath() local
421 node = this_cpu_ptr(&mcs_nodes[0]); in queued_spin_lock_slowpath()
422 idx = node->count++; in queued_spin_lock_slowpath()
425 node += idx; in queued_spin_lock_slowpath()
434 node->locked = 0; in queued_spin_lock_slowpath()
435 node->next = NULL; in queued_spin_lock_slowpath()
[all …]
/kernel/power/
Dwakelock.c28 struct rb_node node; member
39 struct rb_node *node; in pm_show_wakelocks() local
45 for (node = rb_first(&wakelocks_tree); node; node = rb_next(node)) { in pm_show_wakelocks()
46 wl = rb_entry(node, struct wakelock, node); in pm_show_wakelocks()
120 rb_erase(&wl->node, &wakelocks_tree); in __wakelocks_gc()
148 struct rb_node **node = &wakelocks_tree.rb_node; in wakelock_lookup_add() local
149 struct rb_node *parent = *node; in wakelock_lookup_add()
152 while (*node) { in wakelock_lookup_add()
155 parent = *node; in wakelock_lookup_add()
156 wl = rb_entry(*node, struct wakelock, node); in wakelock_lookup_add()
[all …]
Dqos.c151 struct plist_node *node; in pm_qos_get_value() local
165 plist_for_each(node, &c->list) in pm_qos_get_value()
166 total_value += node->prio; in pm_qos_get_value()
229 plist_for_each_entry(req, &c->list, node) { in pm_qos_dbg_show_requests()
232 if ((req->node).prio != c->default_value) { in pm_qos_dbg_show_requests()
238 (req->node).prio, state); in pm_qos_dbg_show_requests()
273 int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, in pm_qos_update_target() argument
289 plist_del(node, &c->list); in pm_qos_update_target()
297 plist_del(node, &c->list); in pm_qos_update_target()
299 plist_node_init(node, new_value); in pm_qos_update_target()
[all …]
Dsnapshot.c300 struct rtree_node *node; member
335 struct rtree_node *node; in alloc_rtree_node() local
337 node = chain_alloc(ca, sizeof(struct rtree_node)); in alloc_rtree_node()
338 if (!node) in alloc_rtree_node()
341 node->data = get_image_page(gfp_mask, safe_needed); in alloc_rtree_node()
342 if (!node->data) in alloc_rtree_node()
345 list_add_tail(&node->list, list); in alloc_rtree_node()
347 return node; in alloc_rtree_node()
360 struct rtree_node *node, *block, **dst; in add_rtree_block() local
375 node = alloc_rtree_node(gfp_mask, safe_needed, ca, in add_rtree_block()
[all …]
Dswap.c123 struct rb_node node; member
138 ext = rb_entry(*new, struct swsusp_extent, node); in swsusp_extents_insert()
166 rb_link_node(&ext->node, parent, new); in swsusp_extents_insert()
167 rb_insert_color(&ext->node, &swsusp_extents); in swsusp_extents_insert()
198 struct rb_node *node; in free_all_swap_pages() local
200 while ((node = swsusp_extents.rb_node)) { in free_all_swap_pages()
204 ext = container_of(node, struct swsusp_extent, node); in free_all_swap_pages()
205 rb_erase(node, &swsusp_extents); in free_all_swap_pages()
/kernel/trace/
Dtrace_stat.c26 struct rb_node node; member
50 rbtree_postorder_for_each_entry_safe(snode, n, &session->stat_root, node) { in __reset_stat_session()
94 this = container_of(*new, struct stat_node, node); in insert_stat()
104 rb_link_node(&data->node, parent, new); in insert_stat()
105 rb_insert_color(&data->node, root); in insert_stat()
175 struct rb_node *node; in stat_seq_start() local
189 node = rb_first(&session->stat_root); in stat_seq_start()
190 for (i = 0; node && i < n; i++) in stat_seq_start()
191 node = rb_next(node); in stat_seq_start()
193 return node; in stat_seq_start()
[all …]
/kernel/irq/
Dirqdesc.c39 static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) in alloc_masks() argument
42 gfp, node)) in alloc_masks()
46 if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { in alloc_masks()
54 static void desc_smp_init(struct irq_desc *desc, int node) in desc_smp_init() argument
61 desc->irq_common_data.node = node; in desc_smp_init()
67 alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; } in alloc_masks() argument
68 static inline void desc_smp_init(struct irq_desc *desc, int node) { } in desc_smp_init() argument
71 static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node, in desc_set_defaults() argument
93 desc_smp_init(desc, node); in desc_set_defaults()
144 static struct irq_desc *alloc_desc(int irq, int node, struct module *owner) in alloc_desc() argument
[all …]
Dirqdomain.c27 irq_hw_number_t hwirq, int node);
837 irq_hw_number_t hwirq, int node) in irq_domain_alloc_descs() argument
842 virq = irq_alloc_descs(virq, virq, cnt, node); in irq_domain_alloc_descs()
847 virq = irq_alloc_descs_from(hint, cnt, node); in irq_domain_alloc_descs()
849 virq = irq_alloc_descs_from(1, cnt, node); in irq_domain_alloc_descs()
1170 unsigned int nr_irqs, int node, void *arg, in __irq_domain_alloc_irqs() argument
1189 virq = irq_domain_alloc_descs(irq_base, nr_irqs, 0, node); in __irq_domain_alloc_irqs()
/kernel/
Daudit_tree.c31 struct node { struct
144 size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node); in alloc_chunk()
211 static struct audit_chunk *find_chunk(struct node *p) in find_chunk()
218 static void untag_chunk(struct node *p) in untag_chunk()
361 struct node *p; in tag_chunk()
499 struct node *p; in prune_one()
501 p = list_entry(victim->chunks.next, struct node, list); in prune_one()
521 struct node *node = list_entry(p, struct node, list); in trim_marked() local
523 if (node->index & (1U<<31)) { in trim_marked()
530 struct node *node; in trim_marked() local
[all …]
Dkthread.c33 int node; member
232 current->pref_node_fork = create->node; in create_kthread()
273 void *data, int node, in kthread_create_on_node() argument
286 create->node = node; in kthread_create_on_node()
600 struct kthread_work, node); in kthread_worker_fn()
601 list_del_init(&work->node); in kthread_worker_fn()
627 return !list_empty(&work->node) || work->canceling; in queuing_blocked()
637 list_add_tail(&work->node, pos); in insert_kthread_work()
706 if (!list_empty(&work->node)) in flush_kthread_work()
707 insert_kthread_work(worker, &fwork.work, work->node.next); in flush_kthread_work()
[all …]
Dworkqueue.c148 int node; /* I: the associated node ID */ member
390 list_for_each_entry((worker), &(pool)->workers, node) \
569 int node) in unbound_pwq_by_node() argument
579 if (unlikely(node == NUMA_NO_NODE)) in unbound_pwq_by_node()
582 return rcu_dereference_raw(wq->numa_pwq_tbl[node]); in unbound_pwq_by_node()
1641 static struct worker *alloc_worker(int node) in alloc_worker() argument
1645 worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node); in alloc_worker()
1649 INIT_LIST_HEAD(&worker->node); in alloc_worker()
1684 list_add_tail(&worker->node, &pool->workers); in worker_attach_to_pool()
1704 list_del(&worker->node); in worker_detach_from_pool()
[all …]
Dprofile.c334 int node, cpu = (unsigned long)__cpu; in profile_cpu_callback() local
340 node = cpu_to_mem(cpu); in profile_cpu_callback()
343 page = __alloc_pages_node(node, in profile_cpu_callback()
351 page = __alloc_pages_node(node, in profile_cpu_callback()
544 int node = cpu_to_mem(cpu); in create_hash_tables() local
547 page = __alloc_pages_node(node, in create_hash_tables()
554 page = __alloc_pages_node(node, in create_hash_tables()
Dfork.c142 static inline struct task_struct *alloc_task_struct_node(int node) in alloc_task_struct_node() argument
144 return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node); in alloc_task_struct_node()
165 int node) in alloc_thread_stack_node() argument
167 struct page *page = alloc_kmem_pages_node(node, THREADINFO_GFP, in alloc_thread_stack_node()
184 int node) in alloc_thread_stack_node() argument
186 return kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node); in alloc_thread_stack_node()
341 static struct task_struct *dup_task_struct(struct task_struct *orig, int node) in dup_task_struct() argument
347 if (node == NUMA_NO_NODE) in dup_task_struct()
348 node = tsk_fork_get_node(orig); in dup_task_struct()
349 tsk = alloc_task_struct_node(node); in dup_task_struct()
[all …]
Dpid.c392 hlist_add_head_rcu(&link->node, &link->pid->tasks[type]); in attach_pid()
405 hlist_del_rcu(&link->node); in __change_pid()
432 hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node); in transfer_pid()
443 result = hlist_entry(first, struct task_struct, pids[(type)].node); in pid_task()
Dcpuset.c2582 int __cpuset_node_allowed(int node, gfp_t gfp_mask) in __cpuset_node_allowed() argument
2590 if (node_isset(node, current->mems_allowed)) in __cpuset_node_allowed()
2609 allowed = node_isset(node, cs->mems_allowed); in __cpuset_node_allowed()
2645 int node; in cpuset_spread_node() local
2647 node = next_node(*rotor, current->mems_allowed); in cpuset_spread_node()
2648 if (node == MAX_NUMNODES) in cpuset_spread_node()
2649 node = first_node(current->mems_allowed); in cpuset_spread_node()
2650 *rotor = node; in cpuset_spread_node()
2651 return node; in cpuset_spread_node()
Dworkqueue_internal.h41 struct list_head node; /* A: anchored at pool->workers */ member
/kernel/time/
Dalarmtimer.c141 timerqueue_del(&base->timerqueue, &alarm->node); in alarmtimer_enqueue()
143 timerqueue_add(&base->timerqueue, &alarm->node); in alarmtimer_enqueue()
161 timerqueue_del(&base->timerqueue, &alarm->node); in alarmtimer_dequeue()
192 hrtimer_set_expires(&alarm->timer, alarm->node.expires); in alarmtimer_fired()
205 return ktime_sub(alarm->node.expires, base->gettime()); in alarm_expires_remaining()
305 timerqueue_init(&alarm->node); in alarm_init()
326 alarm->node.expires = start; in alarm_start()
328 hrtimer_start(&alarm->timer, alarm->node.expires, HRTIMER_MODE_ABS); in alarm_start()
353 hrtimer_set_expires(&alarm->timer, alarm->node.expires); in alarm_restart()
406 delta = ktime_sub(now, alarm->node.expires); in alarm_forward()
[all …]
Dhrtimer.c490 timer = container_of(next, struct hrtimer, node); in __hrtimer_get_next_event()
894 return timerqueue_add(&base->active, &timer->node); in enqueue_hrtimer()
919 if (!timerqueue_del(&base->active, &timer->node)) in __remove_hrtimer()
1156 timerqueue_init(&timer->node); in __hrtimer_init()
1300 struct timerqueue_node *node; in __hrtimer_run_queues() local
1308 while ((node = timerqueue_getnext(&base->active))) { in __hrtimer_run_queues()
1311 timer = container_of(node, struct hrtimer, node); in __hrtimer_run_queues()
1640 struct timerqueue_node *node; in migrate_hrtimer_list() local
1642 while ((node = timerqueue_getnext(&old_base->active))) { in migrate_hrtimer_list()
1643 timer = container_of(node, struct hrtimer, node); in migrate_hrtimer_list()
/kernel/events/
Dring_buffer.c468 static struct page *rb_alloc_aux_page(int node, int order) in rb_alloc_aux_page() argument
476 page = alloc_pages_node(node, PERF_AUX_GFP, order); in rb_alloc_aux_page()
526 int node = (event->cpu == -1) ? -1 : cpu_to_node(event->cpu); in rb_alloc_aux() local
552 rb->aux_pages = kzalloc_node(nr_pages * sizeof(void *), GFP_KERNEL, node); in rb_alloc_aux()
562 page = rb_alloc_aux_page(node, order); in rb_alloc_aux()
653 int node; in perf_mmap_alloc_page() local
655 node = (cpu == -1) ? cpu : cpu_to_node(cpu); in perf_mmap_alloc_page()
656 page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); in perf_mmap_alloc_page()
/kernel/rcu/
Dtree.h259 for ((rnp) = &(rsp)->node[0]; \
260 (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
268 for ((rnp) = &(rsp)->node[0]; \
279 (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
459 struct rcu_node node[NUM_RCU_NODES]; /* Hierarchy. */ member
/kernel/livepatch/
Dcore.c46 struct list_head node; member
69 list_for_each_entry(ops, &klp_ops, node) { in klp_find_ops()
371 list_del(&ops->node); in klp_disable_func()
402 list_add(&ops->node, &klp_ops); in klp_enable_func()
433 list_del(&ops->node); in klp_enable_func()

12