Home
last modified time | relevance | path

Searched refs:node (Results 1 – 25 of 88) sorted by relevance

1234

/kernel/gcov/
Dfs.c255 static struct gcov_info *get_node_info(struct gcov_node *node) in get_node_info() argument
257 if (node->num_loaded > 0) in get_node_info()
258 return node->loaded_info[0]; in get_node_info()
260 return node->unloaded_info; in get_node_info()
267 static struct gcov_info *get_accumulated_info(struct gcov_node *node) in get_accumulated_info() argument
272 if (node->unloaded_info) in get_accumulated_info()
273 info = gcov_info_dup(node->unloaded_info); in get_accumulated_info()
275 info = gcov_info_dup(node->loaded_info[i++]); in get_accumulated_info()
278 for (; i < node->num_loaded; i++) in get_accumulated_info()
279 gcov_info_add(info, node->loaded_info[i]); in get_accumulated_info()
[all …]
/kernel/bpf/
Dbpf_lru_list.c42 static bool bpf_lru_node_is_ref(const struct bpf_lru_node *node) in bpf_lru_node_is_ref() argument
44 return READ_ONCE(node->ref); in bpf_lru_node_is_ref()
47 static void bpf_lru_node_clear_ref(struct bpf_lru_node *node) in bpf_lru_node_clear_ref() argument
49 WRITE_ONCE(node->ref, 0); in bpf_lru_node_clear_ref()
67 struct bpf_lru_node *node, in __bpf_lru_node_move_to_free() argument
71 if (WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(node->type))) in __bpf_lru_node_move_to_free()
77 if (&node->list == l->next_inactive_rotation) in __bpf_lru_node_move_to_free()
80 bpf_lru_list_count_dec(l, node->type); in __bpf_lru_node_move_to_free()
82 node->type = tgt_free_type; in __bpf_lru_node_move_to_free()
83 list_move(&node->list, free_list); in __bpf_lru_node_move_to_free()
[all …]
Dlpm_trie.c167 const struct lpm_trie_node *node, in __longest_prefix_match() argument
170 u32 limit = min(node->prefixlen, key->prefixlen); in __longest_prefix_match()
182 u64 diff = be64_to_cpu(*(__be64 *)node->data ^ in __longest_prefix_match()
195 u32 diff = be32_to_cpu(*(__be32 *)&node->data[i] ^ in __longest_prefix_match()
207 u16 diff = be16_to_cpu(*(__be16 *)&node->data[i] ^ in __longest_prefix_match()
219 prefixlen += 8 - fls(node->data[i] ^ key->data[i]); in __longest_prefix_match()
229 const struct lpm_trie_node *node, in longest_prefix_match() argument
232 return __longest_prefix_match(trie, node, key); in longest_prefix_match()
239 struct lpm_trie_node *node, *found = NULL; in trie_lookup_elem() local
247 for (node = rcu_dereference_check(trie->root, rcu_read_lock_bh_held()); in trie_lookup_elem()
[all …]
Dpercpu_freelist.c31 struct pcpu_freelist_node *node) in pcpu_freelist_push_node() argument
33 node->next = head->first; in pcpu_freelist_push_node()
34 WRITE_ONCE(head->first, node); in pcpu_freelist_push_node()
38 struct pcpu_freelist_node *node) in ___pcpu_freelist_push() argument
41 pcpu_freelist_push_node(head, node); in ___pcpu_freelist_push()
46 struct pcpu_freelist_node *node) in pcpu_freelist_try_push_extra() argument
51 pcpu_freelist_push_node(&s->extralist, node); in pcpu_freelist_try_push_extra()
57 struct pcpu_freelist_node *node) in ___pcpu_freelist_push_nmi() argument
68 pcpu_freelist_push_node(head, node); in ___pcpu_freelist_push_nmi()
75 if (pcpu_freelist_try_push_extra(s, node)) in ___pcpu_freelist_push_nmi()
[all …]
Dcrypto.c62 struct bpf_crypto_type_list *node; in bpf_crypto_register_type() local
66 list_for_each_entry(node, &bpf_crypto_types, list) { in bpf_crypto_register_type()
67 if (!strcmp(node->type->name, type->name)) in bpf_crypto_register_type()
71 node = kmalloc(sizeof(*node), GFP_KERNEL); in bpf_crypto_register_type()
73 if (!node) in bpf_crypto_register_type()
76 node->type = type; in bpf_crypto_register_type()
77 list_add(&node->list, &bpf_crypto_types); in bpf_crypto_register_type()
89 struct bpf_crypto_type_list *node; in bpf_crypto_unregister_type() local
93 list_for_each_entry(node, &bpf_crypto_types, list) { in bpf_crypto_unregister_type()
94 if (strcmp(node->type->name, type->name)) in bpf_crypto_unregister_type()
[all …]
Dbpf_lru_list.h51 typedef bool (*del_from_htab_func)(void *arg, struct bpf_lru_node *node);
66 static inline void bpf_lru_node_set_ref(struct bpf_lru_node *node) in bpf_lru_node_set_ref() argument
68 if (!READ_ONCE(node->ref)) in bpf_lru_node_set_ref()
69 WRITE_ONCE(node->ref, 1); in bpf_lru_node_set_ref()
78 void bpf_lru_push_free(struct bpf_lru *lru, struct bpf_lru_node *node);
Dlocal_storage.c71 struct rb_node *node; in cgroup_storage_lookup() local
76 node = root->rb_node; in cgroup_storage_lookup()
77 while (node) { in cgroup_storage_lookup()
80 storage = container_of(node, struct bpf_cgroup_storage, node); in cgroup_storage_lookup()
84 node = node->rb_left; in cgroup_storage_lookup()
87 node = node->rb_right; in cgroup_storage_lookup()
111 this = container_of(*new, struct bpf_cgroup_storage, node); in cgroup_storage_insert()
126 rb_link_node(&storage->node, parent, new); in cgroup_storage_insert()
127 rb_insert_color(&storage->node, root); in cgroup_storage_insert()
607 rb_erase(&storage->node, root); in bpf_cgroup_storage_unlink()
/kernel/trace/
Drethook.c23 struct llist_node *node; in rethook_flush_task() local
25 node = __llist_del_all(&tk->rethooks); in rethook_flush_task()
26 while (node) { in rethook_flush_task()
27 rhn = container_of(node, struct rethook_node, llist); in rethook_flush_task()
28 node = node->next; in rethook_flush_task()
73 struct rethook_node *node = nod; in rethook_init_node() local
75 node->rethook = context; in rethook_init_node()
129 struct rethook_node *node = container_of(head, struct rethook_node, rcu); in free_rethook_node_rcu() local
130 struct rethook *rh = node->rethook; in free_rethook_node_rcu()
132 objpool_drop(node, &rh->pool); in free_rethook_node_rcu()
[all …]
Dtrace_boot.c25 trace_boot_set_instance_options(struct trace_array *tr, struct xbc_node *node) in trace_boot_set_instance_options() argument
33 xbc_node_for_each_array_value(node, "options", anode, p) { in trace_boot_set_instance_options()
43 p = xbc_node_find_value(node, "tracing_on", NULL); in trace_boot_set_instance_options()
53 p = xbc_node_find_value(node, "trace_clock", NULL); in trace_boot_set_instance_options()
59 p = xbc_node_find_value(node, "buffer_size", NULL); in trace_boot_set_instance_options()
68 p = xbc_node_find_value(node, "cpumask", NULL); in trace_boot_set_instance_options()
83 trace_boot_enable_events(struct trace_array *tr, struct xbc_node *node) in trace_boot_enable_events() argument
89 xbc_node_for_each_array_value(node, "events", anode, p) { in trace_boot_enable_events()
102 trace_boot_add_kprobe_event(struct xbc_node *node, const char *event) in trace_boot_add_kprobe_event() argument
110 xbc_node_for_each_array_value(node, "probes", anode, val) { in trace_boot_add_kprobe_event()
[all …]
Dtrace_stat.c27 struct rb_node node; member
51 rbtree_postorder_for_each_entry_safe(snode, n, &session->stat_root, node) { in __reset_stat_session()
93 this = container_of(*new, struct stat_node, node); in insert_stat()
103 rb_link_node(&data->node, parent, new); in insert_stat()
104 rb_insert_color(&data->node, root); in insert_stat()
174 struct rb_node *node; in stat_seq_start() local
188 node = rb_first(&session->stat_root); in stat_seq_start()
189 for (i = 0; node && i < n; i++) in stat_seq_start()
190 node = rb_next(node); in stat_seq_start()
192 return node; in stat_seq_start()
[all …]
/kernel/locking/
Dosq_lock.c32 static inline int node_cpu(struct optimistic_spin_node *node) in node_cpu() argument
34 return node->cpu - 1; in node_cpu()
55 struct optimistic_spin_node *node, in osq_wait_next() argument
81 if (node->next) { in osq_wait_next()
84 next = xchg(&node->next, NULL); in osq_wait_next()
95 struct optimistic_spin_node *node = this_cpu_ptr(&osq_node); in osq_lock() local
100 node->locked = 0; in osq_lock()
101 node->next = NULL; in osq_lock()
102 node->cpu = curr; in osq_lock()
115 node->prev = prev; in osq_lock()
[all …]
Dqspinlock_paravirt.h169 struct pv_node *node; member
208 static struct qspinlock **pv_hash(struct qspinlock *lock, struct pv_node *node) in pv_hash() argument
218 WRITE_ONCE(he->node, node); in pv_hash()
240 struct pv_node *node; in pv_unhash() local
244 node = READ_ONCE(he->node); in pv_unhash()
246 return node; in pv_unhash()
275 static void pv_init_node(struct mcs_spinlock *node) in pv_init_node() argument
277 struct pv_node *pn = (struct pv_node *)node; in pv_init_node()
290 static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev) in pv_wait_node() argument
292 struct pv_node *pn = (struct pv_node *)node; in pv_wait_node()
[all …]
Dmcs_spinlock.h65 void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node) in mcs_spin_lock() argument
70 node->locked = 0; in mcs_spin_lock()
71 node->next = NULL; in mcs_spin_lock()
79 prev = xchg(lock, node); in mcs_spin_lock()
91 WRITE_ONCE(prev->next, node); in mcs_spin_lock()
94 arch_mcs_spin_lock_contended(&node->locked); in mcs_spin_lock()
102 void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node) in mcs_spin_unlock() argument
104 struct mcs_spinlock *next = READ_ONCE(node->next); in mcs_spin_unlock()
110 if (likely(cmpxchg_release(lock, node, NULL) == node)) in mcs_spin_unlock()
113 while (!(next = READ_ONCE(node->next))) in mcs_spin_unlock()
Dqspinlock.c270 static __always_inline void __pv_init_node(struct mcs_spinlock *node) { } in __pv_init_node() argument
271 static __always_inline void __pv_wait_node(struct mcs_spinlock *node, in __pv_wait_node() argument
274 struct mcs_spinlock *node) { } in __pv_kick_node() argument
276 struct mcs_spinlock *node) in __pv_wait_head_or_lock() argument
315 struct mcs_spinlock *prev, *next, *node; in queued_spin_lock_slowpath() local
398 node = this_cpu_ptr(&qnodes[0].mcs); in queued_spin_lock_slowpath()
399 idx = node->count++; in queued_spin_lock_slowpath()
420 node = grab_mcs_node(node, idx); in queued_spin_lock_slowpath()
434 node->locked = 0; in queued_spin_lock_slowpath()
435 node->next = NULL; in queued_spin_lock_slowpath()
[all …]
/kernel/power/
Dwakelock.c29 struct rb_node node; member
40 struct rb_node *node; in pm_show_wakelocks() local
46 for (node = rb_first(&wakelocks_tree); node; node = rb_next(node)) { in pm_show_wakelocks()
47 wl = rb_entry(node, struct wakelock, node); in pm_show_wakelocks()
125 rb_erase(&wl->node, &wakelocks_tree); in __wakelocks_gc()
153 struct rb_node **node = &wakelocks_tree.rb_node; in wakelock_lookup_add() local
154 struct rb_node *parent = *node; in wakelock_lookup_add()
157 while (*node) { in wakelock_lookup_add()
160 parent = *node; in wakelock_lookup_add()
161 wl = rb_entry(*node, struct wakelock, node); in wakelock_lookup_add()
[all …]
Dsnapshot.c407 struct rtree_node *node; member
446 struct rtree_node *node; in alloc_rtree_node() local
448 node = chain_alloc(ca, sizeof(struct rtree_node)); in alloc_rtree_node()
449 if (!node) in alloc_rtree_node()
452 node->data = get_image_page(gfp_mask, safe_needed); in alloc_rtree_node()
453 if (!node->data) in alloc_rtree_node()
456 list_add_tail(&node->list, list); in alloc_rtree_node()
458 return node; in alloc_rtree_node()
471 struct rtree_node *node, *block, **dst; in add_rtree_block() local
486 node = alloc_rtree_node(gfp_mask, safe_needed, ca, in add_rtree_block()
[all …]
Dqos.c99 int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, in pm_qos_update_target() argument
115 plist_del(node, &c->list); in pm_qos_update_target()
122 plist_del(node, &c->list); in pm_qos_update_target()
125 plist_node_init(node, new_value); in pm_qos_update_target()
126 plist_add(node, &c->list); in pm_qos_update_target()
159 list_del(&req->node); in pm_qos_flags_remove_req()
160 list_for_each_entry(req, &pqf->list, node) in pm_qos_flags_remove_req()
195 INIT_LIST_HEAD(&req->node); in pm_qos_update_flags()
196 list_add_tail(&req->node, &pqf->list); in pm_qos_update_flags()
253 int ret = pm_qos_update_target(req->qos, &req->node, action, value); in cpu_latency_qos_apply()
[all …]
/kernel/
Dsmp.c38 #define CSD_TYPE(_csd) ((_csd)->node.u_flags & CSD_FLAG_TYPE_MASK)
207 return csd->node.dst; /* Other CSD_TYPE_ values might not have ->dst. */ in csd_lock_wait_getcpu()
236 unsigned int flags = READ_ONCE(csd->node.u_flags); in csd_lock_wait_toolong()
340 smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK)); in csd_lock_wait()
349 smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK)); in csd_lock_wait()
356 csd->node.u_flags |= CSD_FLAG_LOCK; in csd_lock()
368 WARN_ON(!(csd->node.u_flags & CSD_FLAG_LOCK)); in csd_unlock()
373 smp_store_release(&csd->node.u_flags, 0); in csd_unlock()
378 void __smp_call_single_queue(int cpu, struct llist_node *node) in __smp_call_single_queue() argument
391 csd = container_of(node, call_single_data_t, node.llist); in __smp_call_single_queue()
[all …]
Dirq_work.c61 oflags = atomic_fetch_or(IRQ_WORK_CLAIMED | CSD_TYPE_IRQ_WORK, &work->node.a_flags); in irq_work_claim()
95 work_flags = atomic_read(&work->node.a_flags); in __irq_work_queue_local()
107 if (!llist_add(&work->node.llist, list)) in __irq_work_queue_local()
163 !(atomic_read(&work->node.a_flags) & IRQ_WORK_HARD_IRQ)) { in irq_work_queue_on()
165 if (!llist_add(&work->node.llist, &per_cpu(lazy_list, cpu))) in irq_work_queue_on()
173 __smp_call_single_queue(cpu, &work->node.llist); in irq_work_queue_on()
212 flags = atomic_read(&work->node.a_flags); in irq_work_single()
214 atomic_set(&work->node.a_flags, flags); in irq_work_single()
229 (void)atomic_cmpxchg(&work->node.a_flags, flags, flags & ~IRQ_WORK_BUSY); in irq_work_single()
252 llist_for_each_entry_safe(work, tmp, llnode, node.llist) in irq_work_run_list()
Dkthread.c44 int node; member
409 current->pref_node_fork = create->node; in create_kthread()
430 void *data, int node, in __kthread_create_on_node() argument
443 create->node = node; in __kthread_create_on_node()
505 void *data, int node, in kthread_create_on_node() argument
513 task = __kthread_create_on_node(threadfn, data, node, namefmt, args); in kthread_create_on_node()
836 struct kthread_work, node); in kthread_worker_fn()
837 list_del_init(&work->node); in kthread_worker_fn()
875 int node = NUMA_NO_NODE; in __kthread_create_worker() local
884 node = cpu_to_node(cpu); in __kthread_create_worker()
[all …]
Dscs.c31 static void *__scs_alloc(int node) in __scs_alloc() argument
47 GFP_SCS, PAGE_KERNEL, 0, node, in __scs_alloc()
54 void *scs_alloc(int node) in scs_alloc() argument
58 s = __scs_alloc(node); in scs_alloc()
114 int scs_prepare(struct task_struct *tsk, int node) in scs_prepare() argument
121 s = scs_alloc(node); in scs_prepare()
Dcpu.c76 struct hlist_node *node; member
130 struct hlist_node *node);
135 struct hlist_node *node);
170 bool bringup, struct hlist_node *node, in cpuhp_invoke_callback() argument
175 int (*cbm)(unsigned int cpu, struct hlist_node *node); in cpuhp_invoke_callback()
201 if (node) { in cpuhp_invoke_callback()
203 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); in cpuhp_invoke_callback()
204 ret = cbm(cpu, node); in cpuhp_invoke_callback()
211 hlist_for_each(node, &step->list) { in cpuhp_invoke_callback()
212 if (lastp && node == *lastp) in cpuhp_invoke_callback()
[all …]
/kernel/module/
Dtree_lookup.c24 struct module_memory *mod_mem = container_of(n, struct module_memory, mtn.node); in __mod_tree_val()
31 struct module_memory *mod_mem = container_of(n, struct module_memory, mtn.node); in __mod_tree_size()
64 static noinline void __mod_tree_insert(struct mod_tree_node *node, struct mod_tree_root *tree) in __mod_tree_insert() argument
66 latch_tree_insert(&node->node, &tree->root, &mod_tree_ops); in __mod_tree_insert()
69 static void __mod_tree_remove(struct mod_tree_node *node, struct mod_tree_root *tree) in __mod_tree_remove() argument
71 latch_tree_erase(&node->node, &tree->root, &mod_tree_ops); in __mod_tree_remove()
111 return container_of(ltn, struct mod_tree_node, node)->mod; in mod_find()
/kernel/irq/
Dirqdesc.c54 static int alloc_masks(struct irq_desc *desc, int node) in alloc_masks() argument
57 GFP_KERNEL, node)) in alloc_masks()
62 GFP_KERNEL, node)) { in alloc_masks()
69 if (!zalloc_cpumask_var_node(&desc->pending_mask, GFP_KERNEL, node)) { in alloc_masks()
80 static void desc_smp_init(struct irq_desc *desc, int node, in desc_smp_init() argument
91 desc->irq_common_data.node = node; in desc_smp_init()
108 alloc_masks(struct irq_desc *desc, int node) { return 0; } in alloc_masks() argument
110 desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { } in desc_smp_init() argument
114 static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node, in desc_set_defaults() argument
138 desc_smp_init(desc, node, affinity); in desc_set_defaults()
[all …]
/kernel/time/
Dalarmtimer.c162 timerqueue_del(&base->timerqueue, &alarm->node); in alarmtimer_enqueue()
164 timerqueue_add(&base->timerqueue, &alarm->node); in alarmtimer_enqueue()
182 timerqueue_del(&base->timerqueue, &alarm->node); in alarmtimer_dequeue()
213 hrtimer_set_expires(&alarm->timer, alarm->node.expires); in alarmtimer_fired()
227 return ktime_sub(alarm->node.expires, base->get_ktime()); in alarm_expires_remaining()
339 timerqueue_init(&alarm->node); in __alarm_init()
372 alarm->node.expires = start; in alarm_start()
374 hrtimer_start(&alarm->timer, alarm->node.expires, HRTIMER_MODE_ABS); in alarm_start()
401 hrtimer_set_expires(&alarm->timer, alarm->node.expires); in alarm_restart()
456 delta = ktime_sub(now, alarm->node.expires); in alarm_forward()
[all …]

1234