| /kernel/gcov/ |
| D | fs.c | 145 static struct gcov_info *get_node_info(struct gcov_node *node) in get_node_info() argument 147 if (node->num_loaded > 0) in get_node_info() 148 return node->loaded_info[0]; in get_node_info() 150 return node->unloaded_info; in get_node_info() 157 static struct gcov_info *get_accumulated_info(struct gcov_node *node) in get_accumulated_info() argument 162 if (node->unloaded_info) in get_accumulated_info() 163 info = gcov_info_dup(node->unloaded_info); in get_accumulated_info() 165 info = gcov_info_dup(node->loaded_info[i++]); in get_accumulated_info() 168 for (; i < node->num_loaded; i++) in get_accumulated_info() 169 gcov_info_add(info, node->loaded_info[i]); in get_accumulated_info() [all …]
|
| /kernel/bpf/ |
| D | bpf_lru_list.c | 42 static bool bpf_lru_node_is_ref(const struct bpf_lru_node *node) in bpf_lru_node_is_ref() argument 44 return node->ref; in bpf_lru_node_is_ref() 62 struct bpf_lru_node *node, in __bpf_lru_node_move_to_free() argument 66 if (WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(node->type))) in __bpf_lru_node_move_to_free() 72 if (&node->list == l->next_inactive_rotation) in __bpf_lru_node_move_to_free() 75 bpf_lru_list_count_dec(l, node->type); in __bpf_lru_node_move_to_free() 77 node->type = tgt_free_type; in __bpf_lru_node_move_to_free() 78 list_move(&node->list, free_list); in __bpf_lru_node_move_to_free() 83 struct bpf_lru_node *node, in __bpf_lru_node_move_in() argument 86 if (WARN_ON_ONCE(!IS_LOCAL_LIST_TYPE(node->type)) || in __bpf_lru_node_move_in() [all …]
|
| D | lpm_trie.c | 165 const struct lpm_trie_node *node, in longest_prefix_match() argument 168 u32 limit = min(node->prefixlen, key->prefixlen); in longest_prefix_match() 180 u64 diff = be64_to_cpu(*(__be64 *)node->data ^ in longest_prefix_match() 193 u32 diff = be32_to_cpu(*(__be32 *)&node->data[i] ^ in longest_prefix_match() 205 u16 diff = be16_to_cpu(*(__be16 *)&node->data[i] ^ in longest_prefix_match() 217 prefixlen += 8 - fls(node->data[i] ^ key->data[i]); in longest_prefix_match() 230 struct lpm_trie_node *node, *found = NULL; in trie_lookup_elem() local 235 for (node = rcu_dereference(trie->root); node;) { in trie_lookup_elem() 243 matchlen = longest_prefix_match(trie, node, key); in trie_lookup_elem() 245 found = node; in trie_lookup_elem() [all …]
|
| D | xskmap.c | 35 struct xsk_map_node *node; in xsk_map_node_alloc() local 38 node = kzalloc(sizeof(*node), GFP_ATOMIC | __GFP_NOWARN); in xsk_map_node_alloc() 39 if (!node) in xsk_map_node_alloc() 44 kfree(node); in xsk_map_node_alloc() 48 node->map = map; in xsk_map_node_alloc() 49 node->map_entry = map_entry; in xsk_map_node_alloc() 50 return node; in xsk_map_node_alloc() 53 static void xsk_map_node_free(struct xsk_map_node *node) in xsk_map_node_free() argument 55 xsk_map_put(node->map); in xsk_map_node_free() 56 kfree(node); in xsk_map_node_free() [all …]
|
| D | percpu_freelist.c | 29 struct pcpu_freelist_node *node) in ___pcpu_freelist_push() argument 32 node->next = head->first; in ___pcpu_freelist_push() 33 head->first = node; in ___pcpu_freelist_push() 38 struct pcpu_freelist_node *node) in __pcpu_freelist_push() argument 42 ___pcpu_freelist_push(head, node); in __pcpu_freelist_push() 46 struct pcpu_freelist_node *node) in pcpu_freelist_push() argument 51 __pcpu_freelist_push(s, node); in pcpu_freelist_push() 87 struct pcpu_freelist_node *node; in __pcpu_freelist_pop() local 94 node = head->first; in __pcpu_freelist_pop() 95 if (node) { in __pcpu_freelist_pop() [all …]
|
| D | bpf_lru_list.h | 50 typedef bool (*del_from_htab_func)(void *arg, struct bpf_lru_node *node); 64 static inline void bpf_lru_node_set_ref(struct bpf_lru_node *node) in bpf_lru_node_set_ref() argument 69 if (!node->ref) in bpf_lru_node_set_ref() 70 node->ref = 1; in bpf_lru_node_set_ref() 79 void bpf_lru_push_free(struct bpf_lru *lru, struct bpf_lru_node *node); 80 void bpf_lru_promote(struct bpf_lru *lru, struct bpf_lru_node *node);
|
| D | local_storage.c | 53 struct rb_node *node; in cgroup_storage_lookup() local 58 node = root->rb_node; in cgroup_storage_lookup() 59 while (node) { in cgroup_storage_lookup() 62 storage = container_of(node, struct bpf_cgroup_storage, node); in cgroup_storage_lookup() 66 node = node->rb_left; in cgroup_storage_lookup() 69 node = node->rb_right; in cgroup_storage_lookup() 93 this = container_of(*new, struct bpf_cgroup_storage, node); in cgroup_storage_insert() 108 rb_link_node(&storage->node, parent, new); in cgroup_storage_insert() 109 rb_insert_color(&storage->node, root); in cgroup_storage_insert() 594 rb_erase(&storage->node, root); in bpf_cgroup_storage_unlink()
|
| /kernel/locking/ |
| D | osq_lock.c | 25 static inline int node_cpu(struct optimistic_spin_node *node) in node_cpu() argument 27 return node->cpu - 1; in node_cpu() 43 struct optimistic_spin_node *node, in osq_wait_next() argument 78 if (node->next) { in osq_wait_next() 79 next = xchg(&node->next, NULL); in osq_wait_next() 92 struct optimistic_spin_node *node = this_cpu_ptr(&osq_node); in osq_lock() local 97 node->locked = 0; in osq_lock() 98 node->next = NULL; in osq_lock() 99 node->cpu = curr; in osq_lock() 112 node->prev = prev; in osq_lock() [all …]
|
| D | qspinlock_paravirt.h | 173 struct pv_node *node; member 212 static struct qspinlock **pv_hash(struct qspinlock *lock, struct pv_node *node) in pv_hash() argument 221 WRITE_ONCE(he->node, node); in pv_hash() 243 struct pv_node *node; in pv_unhash() local 247 node = READ_ONCE(he->node); in pv_unhash() 249 return node; in pv_unhash() 278 static void pv_init_node(struct mcs_spinlock *node) in pv_init_node() argument 280 struct pv_node *pn = (struct pv_node *)node; in pv_init_node() 293 static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev) in pv_wait_node() argument 295 struct pv_node *pn = (struct pv_node *)node; in pv_wait_node() [all …]
|
| D | mcs_spinlock.h | 65 void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node) in mcs_spin_lock() argument 70 node->locked = 0; in mcs_spin_lock() 71 node->next = NULL; in mcs_spin_lock() 79 prev = xchg(lock, node); in mcs_spin_lock() 91 WRITE_ONCE(prev->next, node); in mcs_spin_lock() 94 arch_mcs_spin_lock_contended(&node->locked); in mcs_spin_lock() 102 void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node) in mcs_spin_unlock() argument 104 struct mcs_spinlock *next = READ_ONCE(node->next); in mcs_spin_unlock() 110 if (likely(cmpxchg_release(lock, node, NULL) == node)) in mcs_spin_unlock() 113 while (!(next = READ_ONCE(node->next))) in mcs_spin_unlock()
|
| D | qspinlock.c | 271 static __always_inline void __pv_init_node(struct mcs_spinlock *node) { } in __pv_init_node() argument 272 static __always_inline void __pv_wait_node(struct mcs_spinlock *node, in __pv_wait_node() argument 275 struct mcs_spinlock *node) { } in __pv_kick_node() argument 277 struct mcs_spinlock *node) in __pv_wait_head_or_lock() argument 316 struct mcs_spinlock *prev, *next, *node; in queued_spin_lock_slowpath() local 399 node = this_cpu_ptr(&qnodes[0].mcs); in queued_spin_lock_slowpath() 400 idx = node->count++; in queued_spin_lock_slowpath() 419 node = grab_mcs_node(node, idx); in queued_spin_lock_slowpath() 433 node->locked = 0; in queued_spin_lock_slowpath() 434 node->next = NULL; in queued_spin_lock_slowpath() [all …]
|
| /kernel/power/ |
| D | wakelock.c | 29 struct rb_node node; member 40 struct rb_node *node; in pm_show_wakelocks() local 47 for (node = rb_first(&wakelocks_tree); node; node = rb_next(node)) { in pm_show_wakelocks() 48 wl = rb_entry(node, struct wakelock, node); in pm_show_wakelocks() 125 rb_erase(&wl->node, &wakelocks_tree); in __wakelocks_gc() 153 struct rb_node **node = &wakelocks_tree.rb_node; in wakelock_lookup_add() local 154 struct rb_node *parent = *node; in wakelock_lookup_add() 157 while (*node) { in wakelock_lookup_add() 160 parent = *node; in wakelock_lookup_add() 161 wl = rb_entry(*node, struct wakelock, node); in wakelock_lookup_add() [all …]
|
| D | qos.c | 104 struct plist_node *node; in pm_qos_get_value() local 118 plist_for_each(node, &c->list) in pm_qos_get_value() 119 total_value += node->prio; in pm_qos_get_value() 181 plist_for_each_entry(req, &c->list, node) { in pm_qos_debug_show() 184 if ((req->node).prio != c->default_value) { in pm_qos_debug_show() 190 (req->node).prio, state); in pm_qos_debug_show() 214 int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, in pm_qos_update_target() argument 230 plist_del(node, &c->list); in pm_qos_update_target() 238 plist_del(node, &c->list); in pm_qos_update_target() 241 plist_node_init(node, new_value); in pm_qos_update_target() [all …]
|
| D | snapshot.c | 372 struct rtree_node *node; member 406 struct rtree_node *node; in alloc_rtree_node() local 408 node = chain_alloc(ca, sizeof(struct rtree_node)); in alloc_rtree_node() 409 if (!node) in alloc_rtree_node() 412 node->data = get_image_page(gfp_mask, safe_needed); in alloc_rtree_node() 413 if (!node->data) in alloc_rtree_node() 416 list_add_tail(&node->list, list); in alloc_rtree_node() 418 return node; in alloc_rtree_node() 431 struct rtree_node *node, *block, **dst; in add_rtree_block() local 446 node = alloc_rtree_node(gfp_mask, safe_needed, ca, in add_rtree_block() [all …]
|
| /kernel/trace/ |
| D | trace_stat.c | 27 struct rb_node node; member 51 rbtree_postorder_for_each_entry_safe(snode, n, &session->stat_root, node) { in __reset_stat_session() 95 this = container_of(*new, struct stat_node, node); in insert_stat() 105 rb_link_node(&data->node, parent, new); in insert_stat() 106 rb_insert_color(&data->node, root); in insert_stat() 176 struct rb_node *node; in stat_seq_start() local 190 node = rb_first(&session->stat_root); in stat_seq_start() 191 for (i = 0; node && i < n; i++) in stat_seq_start() 192 node = rb_next(node); in stat_seq_start() 194 return node; in stat_seq_start() [all …]
|
| /kernel/irq/ |
| D | irqdesc.c | 55 static int alloc_masks(struct irq_desc *desc, int node) in alloc_masks() argument 58 GFP_KERNEL, node)) in alloc_masks() 63 GFP_KERNEL, node)) { in alloc_masks() 70 if (!zalloc_cpumask_var_node(&desc->pending_mask, GFP_KERNEL, node)) { in alloc_masks() 81 static void desc_smp_init(struct irq_desc *desc, int node, in desc_smp_init() argument 92 desc->irq_common_data.node = node; in desc_smp_init() 98 alloc_masks(struct irq_desc *desc, int node) { return 0; } in alloc_masks() argument 100 desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { } in desc_smp_init() argument 103 static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node, in desc_set_defaults() argument 127 desc_smp_init(desc, node, affinity); in desc_set_defaults() [all …]
|
| D | affinity.c | 46 int node; in alloc_node_to_cpumask() local 52 for (node = 0; node < nr_node_ids; node++) { in alloc_node_to_cpumask() 53 if (!zalloc_cpumask_var(&masks[node], GFP_KERNEL)) in alloc_node_to_cpumask() 60 while (--node >= 0) in alloc_node_to_cpumask() 61 free_cpumask_var(masks[node]); in alloc_node_to_cpumask() 68 int node; in free_node_to_cpumask() local 70 for (node = 0; node < nr_node_ids; node++) in free_node_to_cpumask() 71 free_cpumask_var(masks[node]); in free_node_to_cpumask()
|
| /kernel/ |
| D | kthread.c | 37 int node; member 275 current->pref_node_fork = create->node; in create_kthread() 294 void *data, int node, in __kthread_create_on_node() argument 307 create->node = node; in __kthread_create_on_node() 380 void *data, int node, in kthread_create_on_node() argument 388 task = __kthread_create_on_node(threadfn, data, node, namefmt, args); in kthread_create_on_node() 663 struct kthread_work, node); in kthread_worker_fn() 664 list_del_init(&work->node); in kthread_worker_fn() 687 int node = NUMA_NO_NODE; in __kthread_create_worker() local 696 node = cpu_to_node(cpu); in __kthread_create_worker() [all …]
|
| D | audit_tree.c | 33 struct node { struct 194 size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node); in alloc_chunk() 274 static struct audit_chunk *find_chunk(struct node *p) in find_chunk() 327 static void remove_chunk_node(struct audit_chunk *chunk, struct node *p) in remove_chunk_node() 464 struct node *p; in tag_chunk() 575 struct node *p; in prune_tree_chunks() 579 p = list_first_entry(&victim->chunks, struct node, list); in prune_tree_chunks() 621 struct node *node = list_entry(p, struct node, list); in trim_marked() local 623 if (node->index & (1U<<31)) { in trim_marked() 689 struct node *node; in audit_trim_trees() local [all …]
|
| D | cpu.c | 65 struct hlist_node *node; member 117 struct hlist_node *node); 122 struct hlist_node *node); 148 bool bringup, struct hlist_node *node, in cpuhp_invoke_callback() argument 153 int (*cbm)(unsigned int cpu, struct hlist_node *node); in cpuhp_invoke_callback() 181 if (node) { in cpuhp_invoke_callback() 183 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); in cpuhp_invoke_callback() 184 ret = cbm(cpu, node); in cpuhp_invoke_callback() 191 hlist_for_each(node, &step->list) { in cpuhp_invoke_callback() 192 if (lastp && node == *lastp) in cpuhp_invoke_callback() [all …]
|
| D | workqueue.c | 150 int node; /* I: the associated node ID */ member 411 list_for_each_entry((worker), &(pool)->workers, node) \ 565 int node) in unbound_pwq_by_node() argument 575 if (unlikely(node == NUMA_NO_NODE)) in unbound_pwq_by_node() 578 return rcu_dereference_raw(wq->numa_pwq_tbl[node]); in unbound_pwq_by_node() 1537 static int workqueue_select_cpu_near(int node) in workqueue_select_cpu_near() argument 1546 if (node < 0 || node >= MAX_NUMNODES || !node_online(node)) in workqueue_select_cpu_near() 1551 if (node == cpu_to_node(cpu)) in workqueue_select_cpu_near() 1555 cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask); in workqueue_select_cpu_near() 1581 bool queue_work_node(int node, struct workqueue_struct *wq, in queue_work_node() argument [all …]
|
| D | scs.c | 50 static void *scs_alloc(int node) in scs_alloc() argument 72 node, __builtin_return_address(0)); in scs_alloc() 121 static inline void *scs_alloc(int node) in scs_alloc() argument 125 s = kmem_cache_alloc_node(scs_cache, GFP_SCS, node); in scs_alloc() 173 int scs_prepare(struct task_struct *tsk, int node) in scs_prepare() argument 177 s = scs_alloc(node); in scs_prepare()
|
| /kernel/time/ |
| D | alarmtimer.c | 163 timerqueue_del(&base->timerqueue, &alarm->node); in alarmtimer_enqueue() 165 timerqueue_add(&base->timerqueue, &alarm->node); in alarmtimer_enqueue() 183 timerqueue_del(&base->timerqueue, &alarm->node); in alarmtimer_dequeue() 214 hrtimer_set_expires(&alarm->timer, alarm->node.expires); in alarmtimer_fired() 228 return ktime_sub(alarm->node.expires, base->gettime()); in alarm_expires_remaining() 329 timerqueue_init(&alarm->node); in __alarm_init() 362 alarm->node.expires = start; in alarm_start() 364 hrtimer_start(&alarm->timer, alarm->node.expires, HRTIMER_MODE_ABS); in alarm_start() 391 hrtimer_set_expires(&alarm->timer, alarm->node.expires); in alarm_restart() 446 delta = ktime_sub(now, alarm->node.expires); in alarm_forward() [all …]
|
| /kernel/livepatch/ |
| D | shadow.c | 55 struct hlist_node node; member 89 hash_for_each_possible_rcu(klp_shadow_hash, shadow, node, in klp_shadow_get() 157 hash_add_rcu(klp_shadow_hash, &new_shadow->node, in __klp_shadow_get_or_alloc() 237 hash_del_rcu(&shadow->node); in klp_shadow_free_struct() 261 hash_for_each_possible(klp_shadow_hash, shadow, node, in klp_shadow_free() 292 hash_for_each(klp_shadow_hash, i, shadow, node) { in klp_shadow_free_all()
|
| D | patch.c | 30 list_for_each_entry(ops, &klp_ops, node) { in klp_find_ops() 160 list_del(&ops->node); in klp_unpatch_func() 201 list_add(&ops->node, &klp_ops); in klp_patch_func() 232 list_del(&ops->node); in klp_patch_func()
|