/kernel/power/ |
D | userwakelock.c | 46 struct user_wake_lock *l; in lookup_wake_lock_name() local 83 l = rb_entry(parent, struct user_wake_lock, node); in lookup_wake_lock_name() 84 diff = strncmp(buf, l->name, name_len); in lookup_wake_lock_name() 85 if (!diff && l->name[name_len]) in lookup_wake_lock_name() 89 name_len, buf, l->name, diff); in lookup_wake_lock_name() 96 return l; in lookup_wake_lock_name() 106 l = kzalloc(sizeof(*l) + name_len + 1, GFP_KERNEL); in lookup_wake_lock_name() 107 if (l == NULL) { in lookup_wake_lock_name() 113 memcpy(l->name, buf, name_len); in lookup_wake_lock_name() 115 pr_info("lookup_wake_lock_name: new wake lock %s\n", l->name); in lookup_wake_lock_name() [all …]
|
/kernel/ |
D | kfifo.c | 122 unsigned int l; in __kfifo_put() local 134 l = min(len, fifo->size - (fifo->in & (fifo->size - 1))); in __kfifo_put() 135 memcpy(fifo->buffer + (fifo->in & (fifo->size - 1)), buffer, l); in __kfifo_put() 138 memcpy(fifo->buffer, buffer + l, len - l); in __kfifo_put() 168 unsigned int l; in __kfifo_get() local 180 l = min(len, fifo->size - (fifo->out & (fifo->size - 1))); in __kfifo_get() 181 memcpy(buffer, fifo->buffer + (fifo->out & (fifo->size - 1)), l); in __kfifo_get() 184 memcpy(buffer + l, fifo->buffer, len - l); in __kfifo_get()
|
D | rtmutex.h | 13 #define rt_mutex_deadlock_check(l) (0) argument 15 #define rt_mutex_deadlock_account_unlock(l) do { } while (0) argument 18 #define debug_rt_mutex_lock(l) do { } while (0) argument 19 #define debug_rt_mutex_proxy_lock(l,p) do { } while (0) argument 20 #define debug_rt_mutex_proxy_unlock(l) do { } while (0) argument 21 #define debug_rt_mutex_unlock(l) do { } while (0) argument 23 #define debug_rt_mutex_deadlock(d, a ,l) do { } while (0) argument
|
D | timeconst.pl | 321 my @l = (); 325 push(@l, 'undef'); 327 push(@l, "\'".$v."\'"); 329 push(@l, $v.''); 332 return join(',', @l);
|
D | mutex-debug.h | 40 struct mutex *l = container_of(lock, struct mutex, wait_lock); \ 45 DEBUG_LOCKS_WARN_ON(l->magic != l); \
|
D | resource.c | 61 loff_t l = 0; in r_start() local 63 for (p = p->child; p && l < *pos; p = r_next(m, p, &l)) in r_start() 843 loff_t l; in iomem_map_sanity_check() local 846 for (p = p->child; p ; p = r_next(NULL, p, &l)) { in iomem_map_sanity_check() 896 loff_t l; in iomem_is_exclusive() local 905 for (p = p->child; p ; p = r_next(NULL, p, &l)) { in iomem_is_exclusive()
|
D | params.c | 183 tmptype l; \ 187 ret = strtolfn(val, 0, &l); \ 188 if (ret == -EINVAL || ((type)l != l)) \ 190 *((type *)kp->arg) = l; \
|
D | cgroup.c | 1759 struct list_head *l = it->cg_link; in cgroup_advance_iter() local 1765 l = l->next; in cgroup_advance_iter() 1766 if (l == &cgrp->css_sets) { in cgroup_advance_iter() 1770 link = list_entry(l, struct cg_cgroup_link, cgrp_link_list); in cgroup_advance_iter() 1773 it->cg_link = l; in cgroup_advance_iter() 1824 struct list_head *l = it->task; in cgroup_iter_next() local 1830 res = list_entry(l, struct task_struct, cg_list); in cgroup_iter_next() 1832 l = l->next; in cgroup_iter_next() 1834 if (l == &link->cg->tasks) { in cgroup_iter_next() 1839 it->task = l; in cgroup_iter_next()
|
D | rtmutex.c | 82 # define rt_mutex_cmpxchg(l,c,n) (cmpxchg(&l->owner, c, n) == c) argument 92 # define rt_mutex_cmpxchg(l,c,n) (0) argument
|
D | lockdep_proc.c | 449 static int lock_stat_cmp(const void *l, const void *r) in lock_stat_cmp() argument 451 const struct lock_stat_data *dl = l, *dr = r; in lock_stat_cmp()
|
/kernel/trace/ |
D | trace_branch.c | 214 loff_t l = 0; in t_start() local 216 for (; t && l < *pos; t = t_next(m, t, &l)) in t_start()
|
D | trace_stack.c | 214 loff_t l = 0; in t_start() local 222 for (; t && l < *pos; t = t_next(m, t, &l)) in t_start()
|
D | trace.c | 1433 loff_t l = 0; local 1458 for (p = iter; p && l < *pos; p = s_next(m, p, &l)) 1462 l = *pos - 1; 1463 p = s_next(m, p, &l); 2573 loff_t l = 0; local 2576 for (; t && l < *pos; t = t_next(m, t, &l))
|
/kernel/time/ |
D | ntp.c | 383 time_constant = max(time_constant, 0l); in do_adjtimex()
|