/kernel/gcov/ |
D | fs.c | 255 static struct gcov_info *get_node_info(struct gcov_node *node) in get_node_info() argument 257 if (node->num_loaded > 0) in get_node_info() 258 return node->loaded_info[0]; in get_node_info() 260 return node->unloaded_info; in get_node_info() 267 static struct gcov_info *get_accumulated_info(struct gcov_node *node) in get_accumulated_info() argument 272 if (node->unloaded_info) in get_accumulated_info() 273 info = gcov_info_dup(node->unloaded_info); in get_accumulated_info() 275 info = gcov_info_dup(node->loaded_info[i++]); in get_accumulated_info() 278 for (; i < node->num_loaded; i++) in get_accumulated_info() 279 gcov_info_add(info, node->loaded_info[i]); in get_accumulated_info() [all …]
|
/kernel/bpf/ |
D | bpf_lru_list.c | 42 static bool bpf_lru_node_is_ref(const struct bpf_lru_node *node) in bpf_lru_node_is_ref() argument 44 return READ_ONCE(node->ref); in bpf_lru_node_is_ref() 47 static void bpf_lru_node_clear_ref(struct bpf_lru_node *node) in bpf_lru_node_clear_ref() argument 49 WRITE_ONCE(node->ref, 0); in bpf_lru_node_clear_ref() 67 struct bpf_lru_node *node, in __bpf_lru_node_move_to_free() argument 71 if (WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(node->type))) in __bpf_lru_node_move_to_free() 77 if (&node->list == l->next_inactive_rotation) in __bpf_lru_node_move_to_free() 80 bpf_lru_list_count_dec(l, node->type); in __bpf_lru_node_move_to_free() 82 node->type = tgt_free_type; in __bpf_lru_node_move_to_free() 83 list_move(&node->list, free_list); in __bpf_lru_node_move_to_free() [all …]
|
D | lpm_trie.c | 165 const struct lpm_trie_node *node, in longest_prefix_match() argument 168 u32 limit = min(node->prefixlen, key->prefixlen); in longest_prefix_match() 180 u64 diff = be64_to_cpu(*(__be64 *)node->data ^ in longest_prefix_match() 193 u32 diff = be32_to_cpu(*(__be32 *)&node->data[i] ^ in longest_prefix_match() 205 u16 diff = be16_to_cpu(*(__be16 *)&node->data[i] ^ in longest_prefix_match() 217 prefixlen += 8 - fls(node->data[i] ^ key->data[i]); in longest_prefix_match() 230 struct lpm_trie_node *node, *found = NULL; in trie_lookup_elem() local 238 for (node = rcu_dereference_check(trie->root, rcu_read_lock_bh_held()); in trie_lookup_elem() 239 node;) { in trie_lookup_elem() 247 matchlen = longest_prefix_match(trie, node, key); in trie_lookup_elem() [all …]
|
D | percpu_freelist.c | 31 struct pcpu_freelist_node *node) in pcpu_freelist_push_node() argument 33 node->next = head->first; in pcpu_freelist_push_node() 34 head->first = node; in pcpu_freelist_push_node() 38 struct pcpu_freelist_node *node) in ___pcpu_freelist_push() argument 41 pcpu_freelist_push_node(head, node); in ___pcpu_freelist_push() 46 struct pcpu_freelist_node *node) in pcpu_freelist_try_push_extra() argument 51 pcpu_freelist_push_node(&s->extralist, node); in pcpu_freelist_try_push_extra() 57 struct pcpu_freelist_node *node) in ___pcpu_freelist_push_nmi() argument 67 pcpu_freelist_push_node(head, node); in ___pcpu_freelist_push_nmi() 77 pcpu_freelist_try_push_extra(s, node)) in ___pcpu_freelist_push_nmi() [all …]
|
D | bpf_lru_list.h | 50 typedef bool (*del_from_htab_func)(void *arg, struct bpf_lru_node *node); 64 static inline void bpf_lru_node_set_ref(struct bpf_lru_node *node) in bpf_lru_node_set_ref() argument 66 if (!READ_ONCE(node->ref)) in bpf_lru_node_set_ref() 67 WRITE_ONCE(node->ref, 1); in bpf_lru_node_set_ref() 76 void bpf_lru_push_free(struct bpf_lru *lru, struct bpf_lru_node *node); 77 void bpf_lru_promote(struct bpf_lru *lru, struct bpf_lru_node *node);
|
/kernel/trace/ |
D | trace_boot.c | 25 trace_boot_set_instance_options(struct trace_array *tr, struct xbc_node *node) in trace_boot_set_instance_options() argument 33 xbc_node_for_each_array_value(node, "options", anode, p) { in trace_boot_set_instance_options() 43 p = xbc_node_find_value(node, "tracing_on", NULL); in trace_boot_set_instance_options() 53 p = xbc_node_find_value(node, "trace_clock", NULL); in trace_boot_set_instance_options() 59 p = xbc_node_find_value(node, "buffer_size", NULL); in trace_boot_set_instance_options() 68 p = xbc_node_find_value(node, "cpumask", NULL); in trace_boot_set_instance_options() 83 trace_boot_enable_events(struct trace_array *tr, struct xbc_node *node) in trace_boot_enable_events() argument 89 xbc_node_for_each_array_value(node, "events", anode, p) { in trace_boot_enable_events() 102 trace_boot_add_kprobe_event(struct xbc_node *node, const char *event) in trace_boot_add_kprobe_event() argument 110 xbc_node_for_each_array_value(node, "probes", anode, val) { in trace_boot_add_kprobe_event() [all …]
|
D | trace_stat.c | 27 struct rb_node node; member 51 rbtree_postorder_for_each_entry_safe(snode, n, &session->stat_root, node) { in __reset_stat_session() 93 this = container_of(*new, struct stat_node, node); in insert_stat() 103 rb_link_node(&data->node, parent, new); in insert_stat() 104 rb_insert_color(&data->node, root); in insert_stat() 174 struct rb_node *node; in stat_seq_start() local 188 node = rb_first(&session->stat_root); in stat_seq_start() 189 for (i = 0; node && i < n; i++) in stat_seq_start() 190 node = rb_next(node); in stat_seq_start() 192 return node; in stat_seq_start() [all …]
|
/kernel/locking/ |
D | osq_lock.c | 25 static inline int node_cpu(struct optimistic_spin_node *node) in node_cpu() argument 27 return node->cpu - 1; in node_cpu() 43 struct optimistic_spin_node *node, in osq_wait_next() argument 78 if (node->next) { in osq_wait_next() 79 next = xchg(&node->next, NULL); in osq_wait_next() 92 struct optimistic_spin_node *node = this_cpu_ptr(&osq_node); in osq_lock() local 97 node->locked = 0; in osq_lock() 98 node->next = NULL; in osq_lock() 99 node->cpu = curr; in osq_lock() 112 node->prev = prev; in osq_lock() [all …]
|
D | qspinlock_paravirt.h | 173 struct pv_node *node; member 212 static struct qspinlock **pv_hash(struct qspinlock *lock, struct pv_node *node) in pv_hash() argument 221 WRITE_ONCE(he->node, node); in pv_hash() 243 struct pv_node *node; in pv_unhash() local 247 node = READ_ONCE(he->node); in pv_unhash() 249 return node; in pv_unhash() 278 static void pv_init_node(struct mcs_spinlock *node) in pv_init_node() argument 280 struct pv_node *pn = (struct pv_node *)node; in pv_init_node() 293 static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev) in pv_wait_node() argument 295 struct pv_node *pn = (struct pv_node *)node; in pv_wait_node() [all …]
|
D | mcs_spinlock.h | 65 void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node) in mcs_spin_lock() argument 70 node->locked = 0; in mcs_spin_lock() 71 node->next = NULL; in mcs_spin_lock() 79 prev = xchg(lock, node); in mcs_spin_lock() 91 WRITE_ONCE(prev->next, node); in mcs_spin_lock() 94 arch_mcs_spin_lock_contended(&node->locked); in mcs_spin_lock() 102 void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node) in mcs_spin_unlock() argument 104 struct mcs_spinlock *next = READ_ONCE(node->next); in mcs_spin_unlock() 110 if (likely(cmpxchg_release(lock, node, NULL) == node)) in mcs_spin_unlock() 113 while (!(next = READ_ONCE(node->next))) in mcs_spin_unlock()
|
D | qspinlock.c | 272 static __always_inline void __pv_init_node(struct mcs_spinlock *node) { } in __pv_init_node() argument 273 static __always_inline void __pv_wait_node(struct mcs_spinlock *node, in __pv_wait_node() argument 276 struct mcs_spinlock *node) { } in __pv_kick_node() argument 278 struct mcs_spinlock *node) in __pv_wait_head_or_lock() argument 317 struct mcs_spinlock *prev, *next, *node; in queued_spin_lock_slowpath() local 400 node = this_cpu_ptr(&qnodes[0].mcs); in queued_spin_lock_slowpath() 401 idx = node->count++; in queued_spin_lock_slowpath() 420 node = grab_mcs_node(node, idx); in queued_spin_lock_slowpath() 434 node->locked = 0; in queued_spin_lock_slowpath() 435 node->next = NULL; in queued_spin_lock_slowpath() [all …]
|
/kernel/power/ |
D | wakelock.c | 29 struct rb_node node; member 40 struct rb_node *node; in pm_show_wakelocks() local 46 for (node = rb_first(&wakelocks_tree); node; node = rb_next(node)) { in pm_show_wakelocks() 47 wl = rb_entry(node, struct wakelock, node); in pm_show_wakelocks() 122 rb_erase(&wl->node, &wakelocks_tree); in __wakelocks_gc() 150 struct rb_node **node = &wakelocks_tree.rb_node; in wakelock_lookup_add() local 151 struct rb_node *parent = *node; in wakelock_lookup_add() 154 while (*node) { in wakelock_lookup_add() 157 parent = *node; in wakelock_lookup_add() 158 wl = rb_entry(*node, struct wakelock, node); in wakelock_lookup_add() [all …]
|
D | snapshot.c | 405 struct rtree_node *node; member 439 struct rtree_node *node; in alloc_rtree_node() local 441 node = chain_alloc(ca, sizeof(struct rtree_node)); in alloc_rtree_node() 442 if (!node) in alloc_rtree_node() 445 node->data = get_image_page(gfp_mask, safe_needed); in alloc_rtree_node() 446 if (!node->data) in alloc_rtree_node() 449 list_add_tail(&node->list, list); in alloc_rtree_node() 451 return node; in alloc_rtree_node() 464 struct rtree_node *node, *block, **dst; in add_rtree_block() local 479 node = alloc_rtree_node(gfp_mask, safe_needed, ca, in add_rtree_block() [all …]
|
D | qos.c | 99 int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, in pm_qos_update_target() argument 115 plist_del(node, &c->list); in pm_qos_update_target() 122 plist_del(node, &c->list); in pm_qos_update_target() 125 plist_node_init(node, new_value); in pm_qos_update_target() 126 plist_add(node, &c->list); in pm_qos_update_target() 159 list_del(&req->node); in pm_qos_flags_remove_req() 160 list_for_each_entry(req, &pqf->list, node) in pm_qos_flags_remove_req() 195 INIT_LIST_HEAD(&req->node); in pm_qos_update_flags() 196 list_add_tail(&req->node, &pqf->list); in pm_qos_update_flags() 248 int ret = pm_qos_update_target(req->qos, &req->node, action, value); in cpu_latency_qos_apply() [all …]
|
/kernel/ |
D | smp.c | 32 #define CSD_TYPE(_csd) ((_csd)->node.u_flags & CSD_FLAG_TYPE_MASK) 241 return csd->node.dst; /* Other CSD_TYPE_ values might not have ->dst. */ in csd_lock_wait_getcpu() 288 unsigned int srccpu = csd->node.src; in csd_lock_print_extended() 331 unsigned int flags = READ_ONCE(csd->node.u_flags); in csd_lock_wait_toolong() 411 smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK)); in csd_lock_wait() 414 static void __smp_call_single_queue_debug(int cpu, struct llist_node *node) in __smp_call_single_queue_debug() argument 422 if (llist_add(node, &per_cpu(call_single_queue, cpu))) { in __smp_call_single_queue_debug() 440 smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK)); in csd_lock_wait() 447 csd->node.u_flags |= CSD_FLAG_LOCK; in csd_lock() 459 WARN_ON(!(csd->node.u_flags & CSD_FLAG_LOCK)); in csd_unlock() [all …]
|
D | irq_work.c | 34 oflags = atomic_fetch_or(IRQ_WORK_CLAIMED | CSD_TYPE_IRQ_WORK, &work->node.a_flags); in irq_work_claim() 56 if (atomic_read(&work->node.a_flags) & IRQ_WORK_LAZY) { in __irq_work_queue_local() 57 if (llist_add(&work->node.llist, this_cpu_ptr(&lazy_list)) && in __irq_work_queue_local() 61 if (llist_add(&work->node.llist, this_cpu_ptr(&raised_list))) in __irq_work_queue_local() 107 __smp_call_single_queue(cpu, &work->node.llist); in irq_work_queue_on() 145 flags = atomic_read(&work->node.a_flags); in irq_work_single() 147 atomic_set(&work->node.a_flags, flags); in irq_work_single() 162 (void)atomic_cmpxchg(&work->node.a_flags, flags, flags & ~IRQ_WORK_BUSY); in irq_work_single() 176 llist_for_each_entry_safe(work, tmp, llnode, node.llist) in irq_work_run_list()
|
D | audit_tree.c | 33 struct node { struct 272 static struct audit_chunk *find_chunk(struct node *p) in find_chunk() 325 static void remove_chunk_node(struct audit_chunk *chunk, struct node *p) in remove_chunk_node() 462 struct node *p; in tag_chunk() 573 struct node *p; in prune_tree_chunks() 577 p = list_first_entry(&victim->chunks, struct node, list); in prune_tree_chunks() 619 struct node *node = list_entry(p, struct node, list); in trim_marked() local 621 if (node->index & (1U<<31)) { in trim_marked() 687 struct node *node; in audit_trim_trees() local 705 list_for_each_entry(node, &tree->chunks, list) { in audit_trim_trees() [all …]
|
D | kthread.c | 43 int node; member 339 current->pref_node_fork = create->node; in create_kthread() 358 void *data, int node, in __kthread_create_on_node() argument 371 create->node = node; in __kthread_create_on_node() 445 void *data, int node, in kthread_create_on_node() argument 453 task = __kthread_create_on_node(threadfn, data, node, namefmt, args); in kthread_create_on_node() 755 struct kthread_work, node); in kthread_worker_fn() 756 list_del_init(&work->node); in kthread_worker_fn() 786 int node = NUMA_NO_NODE; in __kthread_create_worker() local 795 node = cpu_to_node(cpu); in __kthread_create_worker() [all …]
|
D | cpu.c | 75 struct hlist_node *node; member 128 struct hlist_node *node); 133 struct hlist_node *node); 168 bool bringup, struct hlist_node *node, in cpuhp_invoke_callback() argument 173 int (*cbm)(unsigned int cpu, struct hlist_node *node); in cpuhp_invoke_callback() 199 if (node) { in cpuhp_invoke_callback() 201 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); in cpuhp_invoke_callback() 202 ret = cbm(cpu, node); in cpuhp_invoke_callback() 209 hlist_for_each(node, &step->list) { in cpuhp_invoke_callback() 210 if (lastp && node == *lastp) in cpuhp_invoke_callback() [all …]
|
D | scs.c | 31 static void *__scs_alloc(int node) in __scs_alloc() argument 47 GFP_SCS, PAGE_KERNEL, 0, node, in __scs_alloc() 54 void *scs_alloc(int node) in scs_alloc() argument 58 s = __scs_alloc(node); in scs_alloc() 114 int scs_prepare(struct task_struct *tsk, int node) in scs_prepare() argument 121 s = scs_alloc(node); in scs_prepare()
|
D | workqueue.c | 155 int node; /* I: the associated node ID */ member 434 list_for_each_entry((worker), &(pool)->workers, node) \ 586 int node) in unbound_pwq_by_node() argument 596 if (unlikely(node == NUMA_NO_NODE)) in unbound_pwq_by_node() 599 return rcu_dereference_raw(wq->numa_pwq_tbl[node]); in unbound_pwq_by_node() 1584 static int workqueue_select_cpu_near(int node) in workqueue_select_cpu_near() argument 1593 if (node < 0 || node >= MAX_NUMNODES || !node_online(node)) in workqueue_select_cpu_near() 1598 if (node == cpu_to_node(cpu)) in workqueue_select_cpu_near() 1602 cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask); in workqueue_select_cpu_near() 1628 bool queue_work_node(int node, struct workqueue_struct *wq, in queue_work_node() argument [all …]
|
/kernel/irq/ |
D | irqdesc.c | 55 static int alloc_masks(struct irq_desc *desc, int node) in alloc_masks() argument 58 GFP_KERNEL, node)) in alloc_masks() 63 GFP_KERNEL, node)) { in alloc_masks() 70 if (!zalloc_cpumask_var_node(&desc->pending_mask, GFP_KERNEL, node)) { in alloc_masks() 81 static void desc_smp_init(struct irq_desc *desc, int node, in desc_smp_init() argument 92 desc->irq_common_data.node = node; in desc_smp_init() 98 alloc_masks(struct irq_desc *desc, int node) { return 0; } in alloc_masks() argument 100 desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { } in desc_smp_init() argument 103 static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node, in desc_set_defaults() argument 127 desc_smp_init(desc, node, affinity); in desc_set_defaults() [all …]
|
D | affinity.c | 46 int node; in alloc_node_to_cpumask() local 52 for (node = 0; node < nr_node_ids; node++) { in alloc_node_to_cpumask() 53 if (!zalloc_cpumask_var(&masks[node], GFP_KERNEL)) in alloc_node_to_cpumask() 60 while (--node >= 0) in alloc_node_to_cpumask() 61 free_cpumask_var(masks[node]); in alloc_node_to_cpumask() 68 int node; in free_node_to_cpumask() local 70 for (node = 0; node < nr_node_ids; node++) in free_node_to_cpumask() 71 free_cpumask_var(masks[node]); in free_node_to_cpumask()
|
/kernel/time/ |
D | alarmtimer.c | 163 timerqueue_del(&base->timerqueue, &alarm->node); in alarmtimer_enqueue() 165 timerqueue_add(&base->timerqueue, &alarm->node); in alarmtimer_enqueue() 183 timerqueue_del(&base->timerqueue, &alarm->node); in alarmtimer_dequeue() 214 hrtimer_set_expires(&alarm->timer, alarm->node.expires); in alarmtimer_fired() 228 return ktime_sub(alarm->node.expires, base->get_ktime()); in alarm_expires_remaining() 329 timerqueue_init(&alarm->node); in __alarm_init() 362 alarm->node.expires = start; in alarm_start() 364 hrtimer_start(&alarm->timer, alarm->node.expires, HRTIMER_MODE_ABS); in alarm_start() 391 hrtimer_set_expires(&alarm->timer, alarm->node.expires); in alarm_restart() 446 delta = ktime_sub(now, alarm->node.expires); in alarm_forward() [all …]
|
/kernel/livepatch/ |
D | shadow.c | 55 struct hlist_node node; member 89 hash_for_each_possible_rcu(klp_shadow_hash, shadow, node, in klp_shadow_get() 157 hash_add_rcu(klp_shadow_hash, &new_shadow->node, in __klp_shadow_get_or_alloc() 237 hash_del_rcu(&shadow->node); in klp_shadow_free_struct() 261 hash_for_each_possible(klp_shadow_hash, shadow, node, in klp_shadow_free() 292 hash_for_each(klp_shadow_hash, i, shadow, node) { in klp_shadow_free_all()
|