Home
last modified time | relevance | path

Searched refs:l (Results 1 – 25 of 26) sorted by relevance

12

/kernel/bpf/
Dbpf_lru_list.c47 static void bpf_lru_list_count_inc(struct bpf_lru_list *l, in bpf_lru_list_count_inc() argument
51 l->counts[type]++; in bpf_lru_list_count_inc()
54 static void bpf_lru_list_count_dec(struct bpf_lru_list *l, in bpf_lru_list_count_dec() argument
58 l->counts[type]--; in bpf_lru_list_count_dec()
61 static void __bpf_lru_node_move_to_free(struct bpf_lru_list *l, in __bpf_lru_node_move_to_free() argument
72 if (&node->list == l->next_inactive_rotation) in __bpf_lru_node_move_to_free()
73 l->next_inactive_rotation = l->next_inactive_rotation->prev; in __bpf_lru_node_move_to_free()
75 bpf_lru_list_count_dec(l, node->type); in __bpf_lru_node_move_to_free()
82 static void __bpf_lru_node_move_in(struct bpf_lru_list *l, in __bpf_lru_node_move_in() argument
90 bpf_lru_list_count_inc(l, tgt_type); in __bpf_lru_node_move_in()
[all …]
Dhashtab.c79 static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size, in htab_elem_set_ptr() argument
82 *(void __percpu **)(l->key + key_size) = pptr; in htab_elem_set_ptr()
85 static inline void __percpu *htab_elem_get_ptr(struct htab_elem *l, u32 key_size) in htab_elem_get_ptr() argument
87 return *(void __percpu **)(l->key + key_size); in htab_elem_get_ptr()
90 static void *fd_htab_map_get_ptr(const struct bpf_map *map, struct htab_elem *l) in fd_htab_map_get_ptr() argument
92 return *(void **)(l->key + roundup(map->key_size, 8)); in fd_htab_map_get_ptr()
123 struct htab_elem *l; in prealloc_lru_pop() local
126 l = container_of(node, struct htab_elem, lru_node); in prealloc_lru_pop()
127 memcpy(l->key, key, htab->map.key_size); in prealloc_lru_pop()
128 return l; in prealloc_lru_pop()
[all …]
Dhelpers.c223 arch_spinlock_t *l = (void *)lock; in __bpf_spin_lock() local
230 BUILD_BUG_ON(sizeof(*l) != sizeof(__u32)); in __bpf_spin_lock()
232 arch_spin_lock(l); in __bpf_spin_lock()
237 arch_spinlock_t *l = (void *)lock; in __bpf_spin_unlock() local
239 arch_spin_unlock(l); in __bpf_spin_unlock()
246 atomic_t *l = (void *)lock; in __bpf_spin_lock() local
248 BUILD_BUG_ON(sizeof(*l) != sizeof(*lock)); in __bpf_spin_lock()
250 atomic_cond_read_relaxed(l, !VAL); in __bpf_spin_lock()
251 } while (atomic_xchg(l, 1)); in __bpf_spin_lock()
256 atomic_t *l = (void *)lock; in __bpf_spin_unlock() local
[all …]
Doffload.c42 struct rhash_head l; member
54 .head_offset = offsetof(struct bpf_offload_netdev, l),
611 err = rhashtable_insert_fast(&offdevs, &ondev->l, offdevs_params); in bpf_offload_dev_netdev_register()
642 WARN_ON(rhashtable_remove_fast(&offdevs, &ondev->l, offdevs_params)); in bpf_offload_dev_netdev_unregister()
/kernel/cgroup/
Dcgroup-v1.c202 struct cgroup_pidlist *l, *tmp_l; in cgroup1_pidlist_destroy_all() local
205 list_for_each_entry_safe(l, tmp_l, &cgrp->pidlists, links) in cgroup1_pidlist_destroy_all()
206 mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, 0); in cgroup1_pidlist_destroy_all()
216 struct cgroup_pidlist *l = container_of(dwork, struct cgroup_pidlist, in cgroup_pidlist_destroy_work_fn() local
220 mutex_lock(&l->owner->pidlist_mutex); in cgroup_pidlist_destroy_work_fn()
227 list_del(&l->links); in cgroup_pidlist_destroy_work_fn()
228 kvfree(l->list); in cgroup_pidlist_destroy_work_fn()
229 put_pid_ns(l->key.ns); in cgroup_pidlist_destroy_work_fn()
230 tofree = l; in cgroup_pidlist_destroy_work_fn()
233 mutex_unlock(&l->owner->pidlist_mutex); in cgroup_pidlist_destroy_work_fn()
[all …]
Dcgroup.c4392 struct list_head *l; in css_task_iter_next_css_set() local
4400 l = it->tcset_pos->next; in css_task_iter_next_css_set()
4402 if (l != it->tcset_head) { in css_task_iter_next_css_set()
4403 it->tcset_pos = l; in css_task_iter_next_css_set()
4404 return container_of(l, struct css_set, in css_task_iter_next_css_set()
4412 l = it->cset_pos; in css_task_iter_next_css_set()
4413 l = l->next; in css_task_iter_next_css_set()
4414 if (l == it->cset_head) { in css_task_iter_next_css_set()
4420 cset = container_of(l, struct css_set, e_cset_node[it->ss->id]); in css_task_iter_next_css_set()
4422 link = list_entry(l, struct cgrp_cset_link, cset_link); in css_task_iter_next_css_set()
[all …]
/kernel/locking/
Drtmutex.h14 #define rt_mutex_deadlock_check(l) (0) argument
17 #define debug_rt_mutex_lock(l) do { } while (0) argument
18 #define debug_rt_mutex_proxy_lock(l,p) do { } while (0) argument
19 #define debug_rt_mutex_proxy_unlock(l) do { } while (0) argument
20 #define debug_rt_mutex_unlock(l) do { } while (0) argument
22 #define debug_rt_mutex_deadlock(d, a ,l) do { } while (0) argument
Dmcs_spinlock.h32 #define arch_mcs_spin_lock_contended(l) \ argument
34 smp_cond_load_acquire(l, VAL); \
44 #define arch_mcs_spin_unlock_contended(l) \ argument
45 smp_store_release((l), 1)
Drtmutex.c144 # define rt_mutex_cmpxchg_relaxed(l,c,n) (cmpxchg_relaxed(&l->owner, c, n) == c) argument
145 # define rt_mutex_cmpxchg_acquire(l,c,n) (cmpxchg_acquire(&l->owner, c, n) == c) argument
146 # define rt_mutex_cmpxchg_release(l,c,n) (cmpxchg_release(&l->owner, c, n) == c) argument
205 # define rt_mutex_cmpxchg_relaxed(l,c,n) (0) argument
206 # define rt_mutex_cmpxchg_acquire(l,c,n) (0) argument
207 # define rt_mutex_cmpxchg_release(l,c,n) (0) argument
Drtmutex_common.h151 extern int rt_mutex_futex_trylock(struct rt_mutex *l);
152 extern int __rt_mutex_futex_trylock(struct rt_mutex *l);
Dspinlock.c49 # define arch_read_relax(l) cpu_relax() argument
52 # define arch_write_relax(l) cpu_relax() argument
55 # define arch_spin_relax(l) cpu_relax() argument
Dqspinlock_paravirt.h80 #define queued_spin_trylock(l) pv_hybrid_queued_unfair_trylock(l) argument
Dlockdep_proc.c357 static int lock_stat_cmp(const void *l, const void *r) in lock_stat_cmp() argument
359 const struct lock_stat_data *dl = l, *dr = r; in lock_stat_cmp()
/kernel/sched/
Dcpudeadline.c28 int l, r, largest; in cpudl_heapify_down() local
40 l = left_child(idx); in cpudl_heapify_down()
45 if ((l < cp->size) && dl_time_before(orig_dl, in cpudl_heapify_down()
46 cp->elements[l].dl)) { in cpudl_heapify_down()
47 largest = l; in cpudl_heapify_down()
48 largest_dl = cp->elements[l].dl; in cpudl_heapify_down()
/kernel/trace/
Dtrace_branch.c301 int l; in annotate_branch_stat_show() local
308 l = snprintf(NULL, 0, "/%lu", p->constant); in annotate_branch_stat_show()
309 l = l > 8 ? 0 : 8 - l; in annotate_branch_stat_show()
312 p->data.correct, p->constant, l, p->data.incorrect); in annotate_branch_stat_show()
Dtrace_stat.c219 struct stat_node *l = container_of(v, struct stat_node, node); in stat_seq_show() local
224 return session->ts->stat_show(s, l->stat); in stat_seq_show()
Dtrace_events.c923 loff_t l; in t_start() local
928 for (l = 0; l <= *pos; ) { in t_start()
929 file = t_next(m, file, &l); in t_start()
956 loff_t l; in s_start() local
961 for (l = 0; l <= *pos; ) { in s_start()
962 file = s_next(m, file, &l); in s_start()
1268 loff_t l = 0; in f_start() local
1275 while (l < *pos && p) in f_start()
1276 p = f_next(m, p, &l); in f_start()
Dftrace.c3150 loff_t l; in t_probe_start() local
3161 for (l = 0; l <= (*pos - iter->mod_pos); ) { in t_probe_start()
3162 p = t_probe_next(m, &l); in t_probe_start()
3225 loff_t l; in t_mod_start() local
3236 for (l = 0; l <= (*pos - iter->func_pos); ) { in t_mod_start()
3237 p = t_mod_next(m, &l); in t_mod_start()
3316 loff_t l = *pos; /* t_probe_start() must use original pos */ in t_next() local
3331 return t_mod_start(m, &l); in t_next()
3337 return t_mod_start(m, &l); in t_next()
3353 loff_t l; in t_start() local
[all …]
Dtrace.c464 loff_t l = 0; in trace_pid_start() local
471 for (pid++; pid && l < *pos; in trace_pid_start()
472 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l)) in trace_pid_start()
3446 loff_t l = 0; in s_start() local
3480 for (p = iter; p && l < *pos; p = s_next(m, p, &l)) in s_start()
3491 l = *pos - 1; in s_start()
3492 p = s_next(m, p, &l); in s_start()
4337 loff_t l = 0; in t_start() local
4342 for (; t && l < *pos; t = t_next(m, t, &l)) in t_start()
5047 loff_t l = 0; in saved_tgids_start() local
[all …]
/kernel/
Daudit.h297 #define audit_to_watch(k, p, l, o) (-EINVAL) argument
298 #define audit_add_watch(k, l) (-EINVAL) argument
303 #define audit_alloc_mark(k, p, l) (ERR_PTR(-EINVAL)) argument
Dresource.c92 loff_t l = 0; in r_start() local
94 for (p = p->child; p && l < *pos; p = r_next(m, p, &l)) in r_start()
1536 loff_t l; in iomem_map_sanity_check() local
1539 for (p = p->child; p ; p = r_next(NULL, p, &l)) { in iomem_map_sanity_check()
1586 loff_t l; in iomem_is_exclusive() local
1595 for (p = p->child; p ; p = r_next(NULL, p, &l)) { in iomem_is_exclusive()
Dmodule.c1180 size_t l = 0; in module_flags_taint() local
1185 buf[l++] = taint_flags[i].c_true; in module_flags_taint()
1188 return l; in module_flags_taint()
1249 size_t l; in show_taint() local
1251 l = module_flags_taint(mk->mod, buffer); in show_taint()
1252 buffer[l++] = '\n'; in show_taint()
1253 return l; in show_taint()
/kernel/printk/
Dprintk.c3167 size_t l = 0; in kmsg_dump_get_line_nolock() local
3184 l = msg_print_text(msg, syslog, printk_time, line, size); in kmsg_dump_get_line_nolock()
3191 *len = l; in kmsg_dump_get_line_nolock()
3253 size_t l = 0; in kmsg_dump_get_buffer() local
3279 l += msg_print_text(msg, true, time, NULL, 0); in kmsg_dump_get_buffer()
3287 while (l >= size && seq < dumper->next_seq) { in kmsg_dump_get_buffer()
3290 l -= msg_print_text(msg, true, time, NULL, 0); in kmsg_dump_get_buffer()
3299 l = 0; in kmsg_dump_get_buffer()
3303 l += msg_print_text(msg, syslog, time, buf + l, size - l); in kmsg_dump_get_buffer()
3314 *len = l; in kmsg_dump_get_buffer()
/kernel/irq/
Daffinity.c107 static int ncpus_cmp_func(const void *l, const void *r) in ncpus_cmp_func() argument
109 const struct node_vectors *ln = l; in ncpus_cmp_func()
/kernel/events/
Duprobes.c622 static int match_uprobe(struct uprobe *l, struct uprobe *r) in match_uprobe() argument
624 if (l->inode < r->inode) in match_uprobe()
627 if (l->inode > r->inode) in match_uprobe()
630 if (l->offset < r->offset) in match_uprobe()
633 if (l->offset > r->offset) in match_uprobe()

12