/kernel/bpf/ |
D | hashtab.c | 131 struct htab_elem *l; in lookup_elem_raw() local 133 hlist_for_each_entry_rcu(l, head, hash_node) in lookup_elem_raw() 134 if (l->hash == hash && !memcmp(&l->key, key, key_size)) in lookup_elem_raw() 135 return l; in lookup_elem_raw() 145 struct htab_elem *l; in htab_map_lookup_elem() local 157 l = lookup_elem_raw(head, hash, key, key_size); in htab_map_lookup_elem() 159 if (l) in htab_map_lookup_elem() 160 return l->key + round_up(map->key_size, 8); in htab_map_lookup_elem() 170 struct htab_elem *l, *next_l; in htab_map_get_next_key() local 186 l = lookup_elem_raw(head, hash, key, key_size); in htab_map_get_next_key() [all …]
|
/kernel/locking/ |
D | rtmutex.h | 13 #define rt_mutex_deadlock_check(l) (0) argument 16 #define debug_rt_mutex_lock(l) do { } while (0) argument 17 #define debug_rt_mutex_proxy_lock(l,p) do { } while (0) argument 18 #define debug_rt_mutex_proxy_unlock(l) do { } while (0) argument 19 #define debug_rt_mutex_unlock(l) do { } while (0) argument 21 #define debug_rt_mutex_deadlock(d, a ,l) do { } while (0) argument
|
D | qspinlock_paravirt.h | 221 struct __qspinlock *l = (void *)lock; in pv_kick_node() local 240 WRITE_ONCE(l->locked, _Q_SLOW_VAL); in pv_kick_node() 251 struct __qspinlock *l = (void *)lock; in pv_wait_head() local 264 if (!READ_ONCE(l->locked)) in pv_wait_head() 283 if (!cmpxchg(&l->locked, _Q_LOCKED_VAL, _Q_SLOW_VAL)) { in pv_wait_head() 293 pv_wait(&l->locked, _Q_SLOW_VAL); in pv_wait_head() 316 struct __qspinlock *l = (void *)lock; in __pv_queued_spin_unlock() local 325 locked = cmpxchg(&l->locked, _Q_LOCKED_VAL, 0); in __pv_queued_spin_unlock() 355 smp_store_release(&l->locked, 0); in __pv_queued_spin_unlock()
|
D | mcs_spinlock.h | 28 #define arch_mcs_spin_lock_contended(l) \ argument 30 while (!(smp_load_acquire(l))) \ 41 #define arch_mcs_spin_unlock_contended(l) \ argument 42 smp_store_release((l), 1)
|
D | qspinlock.c | 160 struct __qspinlock *l = (void *)lock; in clear_pending_set_locked() local 162 WRITE_ONCE(l->locked_pending, _Q_LOCKED_VAL); in clear_pending_set_locked() 177 struct __qspinlock *l = (void *)lock; in xchg_tail() local 179 return (u32)xchg(&l->tail, tail >> _Q_TAIL_OFFSET) << _Q_TAIL_OFFSET; in xchg_tail() 229 struct __qspinlock *l = (void *)lock; in set_locked() local 231 WRITE_ONCE(l->locked, _Q_LOCKED_VAL); in set_locked()
|
D | spinlock.c | 35 #define raw_read_can_lock(l) read_can_lock(l) argument 36 #define raw_write_can_lock(l) write_can_lock(l) argument 42 # define arch_read_relax(l) cpu_relax() argument 45 # define arch_write_relax(l) cpu_relax() argument 48 # define arch_spin_relax(l) cpu_relax() argument
|
D | mutex-debug.h | 42 struct mutex *l = container_of(lock, struct mutex, wait_lock); \ 47 DEBUG_LOCKS_WARN_ON(l->magic != l); \
|
D | qrwlock.c | 127 struct __qrwlock *l = (struct __qrwlock *)lock; in queued_write_lock_slowpath() local 129 if (!READ_ONCE(l->wmode) && in queued_write_lock_slowpath() 130 (cmpxchg_relaxed(&l->wmode, 0, _QW_WAITING) == 0)) in queued_write_lock_slowpath()
|
D | rtmutex.c | 141 # define rt_mutex_cmpxchg_relaxed(l,c,n) (cmpxchg_relaxed(&l->owner, c, n) == c) argument 142 # define rt_mutex_cmpxchg_acquire(l,c,n) (cmpxchg_acquire(&l->owner, c, n) == c) argument 143 # define rt_mutex_cmpxchg_release(l,c,n) (cmpxchg_release(&l->owner, c, n) == c) argument 202 # define rt_mutex_cmpxchg_relaxed(l,c,n) (0) argument 203 # define rt_mutex_cmpxchg_acquire(l,c,n) (0) argument 204 # define rt_mutex_cmpxchg_release(l,c,n) (0) argument
|
D | rtmutex_common.h | 114 extern int rt_mutex_futex_trylock(struct rt_mutex *l); 115 extern int __rt_mutex_futex_trylock(struct rt_mutex *l);
|
D | lockdep_proc.c | 380 static int lock_stat_cmp(const void *l, const void *r) in lock_stat_cmp() argument 382 const struct lock_stat_data *dl = l, *dr = r; in lock_stat_cmp()
|
/kernel/ |
D | cgroup.c | 3897 struct list_head *l = it->cset_pos; in css_task_iter_advance_css_set() local 3905 l = l->next; in css_task_iter_advance_css_set() 3906 if (l == it->cset_head) { in css_task_iter_advance_css_set() 3913 cset = container_of(l, struct css_set, in css_task_iter_advance_css_set() 3916 link = list_entry(l, struct cgrp_cset_link, cset_link); in css_task_iter_advance_css_set() 3921 it->cset_pos = l; in css_task_iter_advance_css_set() 3957 struct list_head *l = it->task_pos; in css_task_iter_advance() local 3960 WARN_ON_ONCE(!l); in css_task_iter_advance() 3967 l = l->next; in css_task_iter_advance() 3969 if (l == it->tasks_head) in css_task_iter_advance() [all …]
|
D | audit.h | 286 #define audit_to_watch(k, p, l, o) (-EINVAL) argument 287 #define audit_add_watch(k, l) (-EINVAL) argument 292 #define audit_alloc_mark(k, p, l) (ERR_PTR(-EINVAL)) argument
|
D | resource.c | 91 loff_t l = 0; in r_start() local 93 for (p = p->child; p && l < *pos; p = r_next(m, p, &l)) in r_start() 1444 loff_t l; in iomem_map_sanity_check() local 1447 for (p = p->child; p ; p = r_next(NULL, p, &l)) { in iomem_map_sanity_check() 1494 loff_t l; in iomem_is_exclusive() local 1503 for (p = p->child; p ; p = r_next(NULL, p, &l)) { in iomem_is_exclusive()
|
D | module.c | 1161 size_t l = 0; in module_flags_taint() local 1164 buf[l++] = 'P'; in module_flags_taint() 1166 buf[l++] = 'O'; in module_flags_taint() 1168 buf[l++] = 'F'; in module_flags_taint() 1170 buf[l++] = 'C'; in module_flags_taint() 1172 buf[l++] = 'E'; in module_flags_taint() 1178 return l; in module_flags_taint() 1240 size_t l; in show_taint() local 1242 l = module_flags_taint(mk->mod, buffer); in show_taint() 1243 buffer[l++] = '\n'; in show_taint() [all …]
|
/kernel/sched/ |
D | cpudeadline.c | 46 int l, r, largest; in cpudl_heapify() local 50 l = left_child(idx); in cpudl_heapify() 54 if ((l < cp->size) && dl_time_before(cp->elements[idx].dl, in cpudl_heapify() 55 cp->elements[l].dl)) in cpudl_heapify() 56 largest = l; in cpudl_heapify()
|
/kernel/printk/ |
D | printk.c | 2952 size_t l = 0; in kmsg_dump_get_line_nolock() local 2969 l = msg_print_text(msg, 0, syslog, line, size); in kmsg_dump_get_line_nolock() 2976 *len = l; in kmsg_dump_get_line_nolock() 3039 size_t l = 0; in kmsg_dump_get_buffer() local 3065 l += msg_print_text(msg, prev, true, NULL, 0); in kmsg_dump_get_buffer() 3075 while (l >= size && seq < dumper->next_seq) { in kmsg_dump_get_buffer() 3078 l -= msg_print_text(msg, prev, true, NULL, 0); in kmsg_dump_get_buffer() 3088 l = 0; in kmsg_dump_get_buffer() 3092 l += msg_print_text(msg, prev, syslog, buf + l, size - l); in kmsg_dump_get_buffer() 3104 *len = l; in kmsg_dump_get_buffer()
|
/kernel/trace/ |
D | trace_stat.c | 218 struct stat_node *l = container_of(v, struct stat_node, node); in stat_seq_show() local 223 return session->ts->stat_show(s, l->stat); in stat_seq_show()
|
D | trace_events.c | 888 loff_t l; in t_start() local 893 for (l = 0; l <= *pos; ) { in t_start() 894 file = t_next(m, file, &l); in t_start() 921 loff_t l; in s_start() local 926 for (l = 0; l <= *pos; ) { in s_start() 927 file = s_next(m, file, &l); in s_start() 1247 loff_t l = 0; in f_start() local 1254 while (l < *pos && p) in f_start() 1255 p = f_next(m, p, &l); in f_start()
|
D | trace.c | 2499 loff_t l = 0; in s_start() local 2530 for (p = iter; p && l < *pos; p = s_next(m, p, &l)) in s_start() 2541 l = *pos - 1; in s_start() 2542 p = s_next(m, p, &l); in s_start() 3359 loff_t l = 0; in t_start() local 3364 for (; t && l < *pos; t = t_next(m, t, &l)) in t_start() 3933 loff_t l = 0; in saved_cmdlines_start() local 3939 while (l <= *pos) { in saved_cmdlines_start() 3940 v = saved_cmdlines_next(m, v, &l); in saved_cmdlines_start() 4098 loff_t l = 0; in enum_map_start() local [all …]
|
D | ftrace.c | 3071 loff_t l; in t_hash_start() local 3080 for (l = 0; l <= (*pos - iter->func_pos); ) { in t_hash_start() 3081 p = t_hash_next(m, &l); in t_hash_start() 3177 loff_t l; in t_start() local 3217 for (l = 0; l <= *pos; ) { in t_start() 3218 p = t_next(m, p, &l); in t_start()
|
/kernel/events/ |
D | uprobes.c | 374 static int match_uprobe(struct uprobe *l, struct uprobe *r) in match_uprobe() argument 376 if (l->inode < r->inode) in match_uprobe() 379 if (l->inode > r->inode) in match_uprobe() 382 if (l->offset < r->offset) in match_uprobe() 385 if (l->offset > r->offset) in match_uprobe()
|
/kernel/time/ |
D | ntp.c | 634 time_constant = max(time_constant, 0l); in process_adjtimex_modes()
|