/kernel/bpf/ |
D | bpf_lru_list.c | 52 static void bpf_lru_list_count_inc(struct bpf_lru_list *l, in bpf_lru_list_count_inc() 59 static void bpf_lru_list_count_dec(struct bpf_lru_list *l, in bpf_lru_list_count_dec() 66 static void __bpf_lru_node_move_to_free(struct bpf_lru_list *l, in __bpf_lru_node_move_to_free() 87 static void __bpf_lru_node_move_in(struct bpf_lru_list *l, in __bpf_lru_node_move_in() 105 static void __bpf_lru_node_move(struct bpf_lru_list *l, in __bpf_lru_node_move() 129 static bool bpf_lru_list_inactive_low(const struct bpf_lru_list *l) in bpf_lru_list_inactive_low() 145 struct bpf_lru_list *l) in __bpf_lru_list_rotate_active() 172 struct bpf_lru_list *l) in __bpf_lru_list_rotate_inactive() 212 struct bpf_lru_list *l, in __bpf_lru_list_shrink_inactive() 242 static void __bpf_lru_list_rotate(struct bpf_lru *lru, struct bpf_lru_list *l) in __bpf_lru_list_rotate() [all …]
|
D | hashtab.c | 184 static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size, in htab_elem_set_ptr() 190 static inline void __percpu *htab_elem_get_ptr(struct htab_elem *l, u32 key_size) in htab_elem_get_ptr() 195 static void *fd_htab_map_get_ptr(const struct bpf_map *map, struct htab_elem *l) in fd_htab_map_get_ptr() 239 struct htab_elem *l; in prealloc_lru_pop() local 321 struct pcpu_freelist_node *l; in alloc_extra_elems() local 538 struct htab_elem *l; in lookup_elem_raw() local 556 struct htab_elem *l; in lookup_nulls_elem_raw() local 578 struct htab_elem *l; in __htab_map_lookup_elem() local 596 struct htab_elem *l = __htab_map_lookup_elem(map, key); in htab_map_lookup_elem() local 633 struct htab_elem *l = __htab_map_lookup_elem(map, key); in __htab_lru_map_lookup_elem() local [all …]
|
D | helpers.c | 239 arch_spinlock_t *l = (void *)lock; in __bpf_spin_lock() local 253 arch_spinlock_t *l = (void *)lock; in __bpf_spin_unlock() local 262 atomic_t *l = (void *)lock; in __bpf_spin_lock() local 272 atomic_t *l = (void *)lock; in __bpf_spin_unlock() local
|
D | offload.c | 42 struct rhash_head l; member
|
/kernel/locking/ |
D | rtmutex.h | 14 #define rt_mutex_deadlock_check(l) (0) argument 17 #define debug_rt_mutex_lock(l) do { } while (0) argument 18 #define debug_rt_mutex_proxy_lock(l,p) do { } while (0) argument 19 #define debug_rt_mutex_proxy_unlock(l) do { } while (0) argument 20 #define debug_rt_mutex_unlock(l) do { } while (0) argument 22 #define debug_rt_mutex_deadlock(d, a ,l) do { } while (0) argument
|
D | mcs_spinlock.h | 32 #define arch_mcs_spin_lock_contended(l) \ argument 44 #define arch_mcs_spin_unlock_contended(l) \ argument
|
D | spinlock.c | 49 # define arch_read_relax(l) cpu_relax() argument 52 # define arch_write_relax(l) cpu_relax() argument 55 # define arch_spin_relax(l) cpu_relax() argument
|
D | rtmutex.c | 145 # define rt_mutex_cmpxchg_acquire(l,c,n) (cmpxchg_acquire(&l->owner, c, n) == c) argument 146 # define rt_mutex_cmpxchg_release(l,c,n) (cmpxchg_release(&l->owner, c, n) == c) argument 205 # define rt_mutex_cmpxchg_acquire(l,c,n) (0) argument 206 # define rt_mutex_cmpxchg_release(l,c,n) (0) argument
|
D | qspinlock_paravirt.h | 80 #define queued_spin_trylock(l) pv_hybrid_queued_unfair_trylock(l) argument
|
D | lockdep_proc.c | 407 static int lock_stat_cmp(const void *l, const void *r) in lock_stat_cmp()
|
/kernel/cgroup/ |
D | cgroup-v1.c | 200 struct cgroup_pidlist *l, *tmp_l; in cgroup1_pidlist_destroy_all() local 214 struct cgroup_pidlist *l = container_of(dwork, struct cgroup_pidlist, in cgroup_pidlist_destroy_work_fn() local 282 struct cgroup_pidlist *l; in cgroup_pidlist_find() local 303 struct cgroup_pidlist *l; in cgroup_pidlist_find_create() local 336 struct cgroup_pidlist *l; in pidlist_array_load() local 400 struct cgroup_pidlist *l; in cgroup_pidlist_start() local 454 struct cgroup_pidlist *l = ctx->procs1.pidlist; in cgroup_pidlist_stop() local 466 struct cgroup_pidlist *l = ctx->procs1.pidlist; in cgroup_pidlist_next() local
|
D | cgroup.c | 4514 struct list_head *l; in css_task_iter_next_css_set() local
|
/kernel/sched/ |
D | cpudeadline.c | 28 int l, r, largest; in cpudl_heapify_down() local
|
/kernel/ |
D | audit.h | 301 #define audit_to_watch(k, p, l, o) (-EINVAL) argument 302 #define audit_add_watch(k, l) (-EINVAL) argument 307 #define audit_alloc_mark(k, p, l) (ERR_PTR(-EINVAL)) argument
|
D | resource.c | 92 loff_t l = 0; in r_start() local 1596 loff_t l; in iomem_map_sanity_check() local 1646 loff_t l; in iomem_is_exclusive() local
|
D | module.c | 1196 size_t l = 0; in module_flags_taint() local 1265 size_t l; in show_taint() local
|
/kernel/trace/ |
D | trace_stat.c | 217 struct stat_node *l = container_of(v, struct stat_node, node); in stat_seq_show() local
|
D | trace_branch.c | 301 int l; in annotate_branch_stat_show() local
|
D | trace_events.c | 993 loff_t l; in t_start() local 1026 loff_t l; in s_start() local 1371 loff_t l = 0; in f_start() local
|
D | trace.c | 642 loff_t l = 0; in trace_pid_start() local 3705 loff_t l = 0; in s_start() local 4651 loff_t l = 0; in t_start() local 5467 loff_t l = 0; in saved_cmdlines_start() local 5625 loff_t l = 0; in eval_map_start() local
|
D | ftrace.c | 3360 loff_t l; in t_probe_start() local 3435 loff_t l; in t_mod_start() local 3526 loff_t l = *pos; /* t_probe_start() must use original pos */ in t_next() local 3563 loff_t l; in t_start() local
|
D | ring_buffer.c | 692 rb_time_read_cmpxchg(local_t *l, unsigned long expect, unsigned long set) in rb_time_read_cmpxchg()
|
/kernel/irq/ |
D | affinity.c | 107 static int ncpus_cmp_func(const void *l, const void *r) in ncpus_cmp_func()
|
/kernel/printk/ |
D | printk.c | 3312 size_t l = 0; in kmsg_dump_get_line_nolock() local 3402 size_t l = 0; in kmsg_dump_get_buffer() local
|
/kernel/events/ |
D | uprobes.c | 616 static int match_uprobe(struct uprobe *l, struct uprobe *r) in match_uprobe()
|