/kernel/ |
D | rtmutex.h | 13 #define rt_mutex_deadlock_check(l) (0) argument 15 #define rt_mutex_deadlock_account_unlock(l) do { } while (0) argument 18 #define debug_rt_mutex_lock(l) do { } while (0) argument 19 #define debug_rt_mutex_proxy_lock(l,p) do { } while (0) argument 20 #define debug_rt_mutex_proxy_unlock(l) do { } while (0) argument 21 #define debug_rt_mutex_unlock(l) do { } while (0) argument 23 #define debug_rt_mutex_deadlock(d, a ,l) do { } while (0) argument
|
D | cgroup.c | 2939 struct list_head *l = it->cg_link; in cgroup_advance_iter() local 2945 l = l->next; in cgroup_advance_iter() 2946 if (l == &cgrp->css_sets) { in cgroup_advance_iter() 2950 link = list_entry(l, struct cg_cgroup_link, cgrp_link_list); in cgroup_advance_iter() 2953 it->cg_link = l; in cgroup_advance_iter() 3120 struct list_head *l = it->task; in cgroup_iter_next() local 3126 res = list_entry(l, struct task_struct, cg_list); in cgroup_iter_next() 3128 l = l->next; in cgroup_iter_next() 3130 if (l == &link->cg->tasks) { in cgroup_iter_next() 3135 it->task = l; in cgroup_iter_next() [all …]
|
D | mutex-debug.h | 42 struct mutex *l = container_of(lock, struct mutex, wait_lock); \ 47 DEBUG_LOCKS_WARN_ON(l->magic != l); \
|
D | spinlock.c | 35 #define raw_read_can_lock(l) read_can_lock(l) argument 36 #define raw_write_can_lock(l) write_can_lock(l) argument
|
D | printk.c | 2678 size_t l = 0; in kmsg_dump_get_line_nolock() local 2695 l = msg_print_text(msg, 0, syslog, line, size); in kmsg_dump_get_line_nolock() 2702 *len = l; in kmsg_dump_get_line_nolock() 2765 size_t l = 0; in kmsg_dump_get_buffer() local 2791 l += msg_print_text(msg, prev, true, NULL, 0); in kmsg_dump_get_buffer() 2801 while (l > size && seq < dumper->next_seq) { in kmsg_dump_get_buffer() 2804 l -= msg_print_text(msg, prev, true, NULL, 0); in kmsg_dump_get_buffer() 2814 l = 0; in kmsg_dump_get_buffer() 2819 l += msg_print_text(msg, prev, syslog, buf + l, size - l); in kmsg_dump_get_buffer() 2831 *len = l; in kmsg_dump_get_buffer()
|
D | resource.c | 81 loff_t l = 0; in r_start() local 83 for (p = p->child; p && l < *pos; p = r_next(m, p, &l)) in r_start() 1271 loff_t l; in iomem_map_sanity_check() local 1274 for (p = p->child; p ; p = r_next(NULL, p, &l)) { in iomem_map_sanity_check() 1324 loff_t l; in iomem_is_exclusive() local 1333 for (p = p->child; p ; p = r_next(NULL, p, &l)) { in iomem_is_exclusive()
|
D | params.c | 233 tmptype l; \ 236 ret = strtolfn(val, 0, &l); \ 237 if (ret < 0 || ((type)l != l)) \ 239 *((type *)kp->arg) = l; \
|
D | audit.h | 271 #define audit_to_watch(k, p, l, o) (-EINVAL) argument 272 #define audit_add_watch(k, l) (-EINVAL) argument
|
D | rtmutex.c | 76 # define rt_mutex_cmpxchg(l,c,n) (cmpxchg(&l->owner, c, n) == c) argument 86 # define rt_mutex_cmpxchg(l,c,n) (0) argument
|
D | module.c | 1033 size_t l = 0; in module_flags_taint() local 1036 buf[l++] = 'P'; in module_flags_taint() 1038 buf[l++] = 'O'; in module_flags_taint() 1040 buf[l++] = 'F'; in module_flags_taint() 1042 buf[l++] = 'C'; in module_flags_taint() 1048 return l; in module_flags_taint() 1110 size_t l; in show_taint() local 1112 l = module_flags_taint(mk->mod, buffer); in show_taint() 1113 buffer[l++] = '\n'; in show_taint() 1114 return l; in show_taint()
|
D | lockdep_proc.c | 378 static int lock_stat_cmp(const void *l, const void *r) in lock_stat_cmp() argument 380 const struct lock_stat_data *dl = l, *dr = r; in lock_stat_cmp()
|
/kernel/trace/ |
D | trace_events.c | 559 loff_t l; in t_start() local 564 for (l = 0; l <= *pos; ) { in t_start() 565 file = t_next(m, file, &l); in t_start() 592 loff_t l; in s_start() local 597 for (l = 0; l <= *pos; ) { in s_start() 598 file = s_next(m, file, &l); in s_start() 813 loff_t l = 0; in f_start() local 822 p = f_next(m, p, &l); in f_start() 823 } while (p && l < *pos); in f_start()
|
D | trace_stat.c | 249 struct stat_node *l = container_of(v, struct stat_node, node); in stat_seq_show() local 254 return session->ts->stat_show(s, l->stat); in stat_seq_show()
|
D | ftrace.c | 2383 loff_t l; in t_hash_start() local 2392 for (l = 0; l <= (*pos - iter->func_pos); ) { in t_hash_start() 2393 p = t_hash_next(m, &l); in t_hash_start() 2489 loff_t l; in t_start() local 2527 for (l = 0; l <= *pos; ) { in t_start() 2528 p = t_next(m, p, &l); in t_start()
|
D | trace.c | 2241 loff_t l = 0; in s_start() local 2275 for (p = iter; p && l < *pos; p = s_next(m, p, &l)) in s_start() 2286 l = *pos - 1; in s_start() 2287 p = s_next(m, p, &l); in s_start() 3006 loff_t l = 0; in t_start() local 3009 for (t = trace_types; t && l < *pos; t = t_next(m, t, &l)) in t_start()
|
/kernel/events/ |
D | uprobes.c | 335 static int match_uprobe(struct uprobe *l, struct uprobe *r) in match_uprobe() argument 337 if (l->inode < r->inode) in match_uprobe() 340 if (l->inode > r->inode) in match_uprobe() 343 if (l->offset < r->offset) in match_uprobe() 346 if (l->offset > r->offset) in match_uprobe()
|
/kernel/time/ |
D | ntp.c | 586 time_constant = max(time_constant, 0l); in process_adjtimex_modes()
|