/kernel/bpf/ |
D | bpf_lru_list.c | 52 static void bpf_lru_list_count_inc(struct bpf_lru_list *l, in bpf_lru_list_count_inc() argument 56 l->counts[type]++; in bpf_lru_list_count_inc() 59 static void bpf_lru_list_count_dec(struct bpf_lru_list *l, in bpf_lru_list_count_dec() argument 63 l->counts[type]--; in bpf_lru_list_count_dec() 66 static void __bpf_lru_node_move_to_free(struct bpf_lru_list *l, in __bpf_lru_node_move_to_free() argument 77 if (&node->list == l->next_inactive_rotation) in __bpf_lru_node_move_to_free() 78 l->next_inactive_rotation = l->next_inactive_rotation->prev; in __bpf_lru_node_move_to_free() 80 bpf_lru_list_count_dec(l, node->type); in __bpf_lru_node_move_to_free() 87 static void __bpf_lru_node_move_in(struct bpf_lru_list *l, in __bpf_lru_node_move_in() argument 95 bpf_lru_list_count_inc(l, tgt_type); in __bpf_lru_node_move_in() [all …]
|
D | hashtab.c | 197 static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size, in htab_elem_set_ptr() argument 200 *(void __percpu **)(l->key + key_size) = pptr; in htab_elem_set_ptr() 203 static inline void __percpu *htab_elem_get_ptr(struct htab_elem *l, u32 key_size) in htab_elem_get_ptr() argument 205 return *(void __percpu **)(l->key + key_size); in htab_elem_get_ptr() 208 static void *fd_htab_map_get_ptr(const struct bpf_map *map, struct htab_elem *l) in fd_htab_map_get_ptr() argument 210 return *(void **)(l->key + roundup(map->key_size, 8)); in fd_htab_map_get_ptr() 297 struct htab_elem *l; in prealloc_lru_pop() local 300 l = container_of(node, struct htab_elem, lru_node); in prealloc_lru_pop() 301 memcpy(l->key, key, htab->map.key_size); in prealloc_lru_pop() 302 return l; in prealloc_lru_pop() [all …]
|
D | helpers.c | 280 arch_spinlock_t *l = (void *)lock; in __bpf_spin_lock() local 287 BUILD_BUG_ON(sizeof(*l) != sizeof(__u32)); in __bpf_spin_lock() 289 arch_spin_lock(l); in __bpf_spin_lock() 294 arch_spinlock_t *l = (void *)lock; in __bpf_spin_unlock() local 296 arch_spin_unlock(l); in __bpf_spin_unlock() 303 atomic_t *l = (void *)lock; in __bpf_spin_lock() local 305 BUILD_BUG_ON(sizeof(*l) != sizeof(*lock)); in __bpf_spin_lock() 307 atomic_cond_read_relaxed(l, !VAL); in __bpf_spin_lock() 308 } while (atomic_xchg(l, 1)); in __bpf_spin_lock() 313 atomic_t *l = (void *)lock; in __bpf_spin_unlock() local [all …]
|
D | offload.c | 42 struct rhash_head l; member 54 .head_offset = offsetof(struct bpf_offload_netdev, l), 608 err = rhashtable_insert_fast(&offdevs, &ondev->l, offdevs_params); in bpf_offload_dev_netdev_register() 639 WARN_ON(rhashtable_remove_fast(&offdevs, &ondev->l, offdevs_params)); in bpf_offload_dev_netdev_unregister()
|
/kernel/cgroup/ |
D | cgroup-v1.c | 199 struct cgroup_pidlist *l, *tmp_l; in cgroup1_pidlist_destroy_all() local 202 list_for_each_entry_safe(l, tmp_l, &cgrp->pidlists, links) in cgroup1_pidlist_destroy_all() 203 mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, 0); in cgroup1_pidlist_destroy_all() 213 struct cgroup_pidlist *l = container_of(dwork, struct cgroup_pidlist, in cgroup_pidlist_destroy_work_fn() local 217 mutex_lock(&l->owner->pidlist_mutex); in cgroup_pidlist_destroy_work_fn() 224 list_del(&l->links); in cgroup_pidlist_destroy_work_fn() 225 kvfree(l->list); in cgroup_pidlist_destroy_work_fn() 226 put_pid_ns(l->key.ns); in cgroup_pidlist_destroy_work_fn() 227 tofree = l; in cgroup_pidlist_destroy_work_fn() 230 mutex_unlock(&l->owner->pidlist_mutex); in cgroup_pidlist_destroy_work_fn() [all …]
|
D | cgroup.c | 4764 struct list_head *l; in css_task_iter_next_css_set() local 4772 l = it->tcset_pos->next; in css_task_iter_next_css_set() 4774 if (l != it->tcset_head) { in css_task_iter_next_css_set() 4775 it->tcset_pos = l; in css_task_iter_next_css_set() 4776 return container_of(l, struct css_set, in css_task_iter_next_css_set() 4784 l = it->cset_pos; in css_task_iter_next_css_set() 4785 l = l->next; in css_task_iter_next_css_set() 4786 if (l == it->cset_head) { in css_task_iter_next_css_set() 4792 cset = container_of(l, struct css_set, e_cset_node[it->ss->id]); in css_task_iter_next_css_set() 4794 link = list_entry(l, struct cgrp_cset_link, cset_link); in css_task_iter_next_css_set() [all …]
|
/kernel/module/ |
D | tracking.c | 57 size_t l; in print_unloaded_tainted_modules() local 59 l = module_flags_taint(mod_taint->taints, buf); in print_unloaded_tainted_modules() 60 buf[l++] = '\0'; in print_unloaded_tainted_modules() 90 size_t l; in unloaded_tainted_modules_seq_show() local 93 l = module_flags_taint(mod_taint->taints, buf); in unloaded_tainted_modules_seq_show() 94 buf[l++] = '\0'; in unloaded_tainted_modules_seq_show()
|
D | main.c | 884 size_t l = 0; in module_flags_taint() local 889 buf[l++] = taint_flags[i].c_true; in module_flags_taint() 892 return l; in module_flags_taint() 964 size_t l; in show_taint() local 966 l = module_flags_taint(mk->mod->taints, buffer); in show_taint() 967 buffer[l++] = '\n'; in show_taint() 968 return l; in show_taint()
|
/kernel/locking/ |
D | mcs_spinlock.h | 32 #define arch_mcs_spin_lock_contended(l) \ argument 34 smp_cond_load_acquire(l, VAL); \ 44 #define arch_mcs_spin_unlock_contended(l) \ argument 45 smp_store_release((l), 1)
|
D | spinlock.c | 49 # define arch_read_relax(l) cpu_relax() argument 52 # define arch_write_relax(l) cpu_relax() argument 55 # define arch_spin_relax(l) cpu_relax() argument
|
D | rtmutex_common.h | 79 extern int rt_mutex_futex_trylock(struct rt_mutex_base *l); 80 extern int __rt_mutex_futex_trylock(struct rt_mutex_base *l);
|
D | qspinlock_paravirt.h | 80 #define queued_spin_trylock(l) pv_hybrid_queued_unfair_trylock(l) argument
|
D | lockdep_proc.c | 409 static int lock_stat_cmp(const void *l, const void *r) in lock_stat_cmp() argument 411 const struct lock_stat_data *dl = l, *dr = r; in lock_stat_cmp()
|
/kernel/sched/ |
D | cpudeadline.c | 27 int l, r, largest; in cpudl_heapify_down() local 39 l = left_child(idx); in cpudl_heapify_down() 44 if ((l < cp->size) && dl_time_before(orig_dl, in cpudl_heapify_down() 45 cp->elements[l].dl)) { in cpudl_heapify_down() 46 largest = l; in cpudl_heapify_down() 47 largest_dl = cp->elements[l].dl; in cpudl_heapify_down()
|
/kernel/trace/ |
D | trace_branch.c | 301 int l; in annotate_branch_stat_show() local 308 l = snprintf(NULL, 0, "/%lu", p->constant); in annotate_branch_stat_show() 309 l = l > 8 ? 0 : 8 - l; in annotate_branch_stat_show() 312 p->data.correct, p->constant, l, p->data.incorrect); in annotate_branch_stat_show()
|
D | trace_stat.c | 217 struct stat_node *l = container_of(v, struct stat_node, node); in stat_seq_show() local 222 return session->ts->stat_show(s, l->stat); in stat_seq_show()
|
D | trace_events.c | 1235 loff_t l; in t_start() local 1240 for (l = 0; l <= *pos; ) { in t_start() 1241 file = t_next(m, file, &l); in t_start() 1268 loff_t l; in s_start() local 1273 for (l = 0; l <= *pos; ) { in s_start() 1274 file = s_next(m, file, &l); in s_start() 1618 loff_t l = 0; in f_start() local 1625 while (l < *pos && p) in f_start() 1626 p = f_next(m, p, &l); in f_start()
|
D | ftrace.c | 3377 loff_t l; in t_probe_start() local 3388 for (l = 0; l <= (*pos - iter->mod_pos); ) { in t_probe_start() 3389 p = t_probe_next(m, &l); in t_probe_start() 3452 loff_t l; in t_mod_start() local 3463 for (l = 0; l <= (*pos - iter->func_pos); ) { in t_mod_start() 3464 p = t_mod_next(m, &l); in t_mod_start() 3543 loff_t l = *pos; /* t_probe_start() must use original pos */ in t_next() local 3558 return t_mod_start(m, &l); in t_next() 3564 return t_mod_start(m, &l); in t_next() 3580 loff_t l; in t_start() local [all …]
|
D | trace.c | 649 loff_t l = 0; in trace_pid_start() local 657 for (pid++; pid && l < *pos; in trace_pid_start() 658 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l)) in trace_pid_start() 4107 loff_t l = 0; in s_start() local 4145 for (p = iter; p && l < *pos; p = s_next(m, p, &l)) in s_start() 4156 l = *pos - 1; in s_start() 4157 p = s_next(m, p, &l); in s_start() 5070 loff_t l = 0; in t_start() local 5075 for (; t && l < *pos; t = t_next(m, t, &l)) in t_start() 5903 loff_t l = 0; in saved_cmdlines_start() local [all …]
|
D | ring_buffer.c | 661 rb_time_read_cmpxchg(local_t *l, unsigned long expect, unsigned long set) in rb_time_read_cmpxchg() argument 665 ret = local_cmpxchg(l, expect, set); in rb_time_read_cmpxchg()
|
/kernel/trace/rv/ |
D | rv.c | 429 loff_t l; in enabled_monitors_start() local 438 for (l = 0; l <= *pos; ) { in enabled_monitors_start() 439 m_def = enabled_monitors_next(m, m_def, &l); in enabled_monitors_start()
|
/kernel/ |
D | audit.h | 309 #define audit_to_watch(k, p, l, o) (-EINVAL) argument 310 #define audit_add_watch(k, l) (-EINVAL) argument 315 #define audit_alloc_mark(k, p, l) (ERR_PTR(-EINVAL)) argument
|
D | resource.c | 95 loff_t l = 0; in r_start() local 97 for (p = p->child; p && l < *pos; p = r_next(m, p, &l)) in r_start() 1648 loff_t l; in iomem_map_sanity_check() local 1651 for (p = p->child; p ; p = r_next(NULL, p, &l)) { in iomem_map_sanity_check()
|
/kernel/printk/ |
D | printk.c | 3737 size_t l = 0; in kmsg_dump_get_line() local 3749 l = record_print_text(&r, syslog, printk_time); in kmsg_dump_get_line() 3755 l = get_record_print_text_size(&info, line_count, syslog, in kmsg_dump_get_line() 3764 *len = l; in kmsg_dump_get_line()
|
/kernel/time/ |
D | ntp.c | 740 time_constant = max(time_constant, 0l); in process_adjtimex_modes()
|