/kernel/ |
D | wait.c | 13 void init_waitqueue_head(wait_queue_head_t *q) in init_waitqueue_head() argument 15 spin_lock_init(&q->lock); in init_waitqueue_head() 16 INIT_LIST_HEAD(&q->task_list); in init_waitqueue_head() 21 void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait) in add_wait_queue() argument 26 spin_lock_irqsave(&q->lock, flags); in add_wait_queue() 27 __add_wait_queue(q, wait); in add_wait_queue() 28 spin_unlock_irqrestore(&q->lock, flags); in add_wait_queue() 32 void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait) in add_wait_queue_exclusive() argument 37 spin_lock_irqsave(&q->lock, flags); in add_wait_queue_exclusive() 38 __add_wait_queue_tail(q, wait); in add_wait_queue_exclusive() [all …]
|
D | latencytop.c | 67 int q, same = 1; in account_global_scheduler_latency() local 75 for (q = 0 ; q < LT_BACKTRACEDEPTH ; q++) { in account_global_scheduler_latency() 76 unsigned long record = lat->backtrace[q]; in account_global_scheduler_latency() 78 if (latency_record[i].backtrace[q] != record) { in account_global_scheduler_latency() 119 int i, q; in account_scheduler_latency() local 151 for (q = 0 ; q < LT_BACKTRACEDEPTH ; q++) { in account_scheduler_latency() 152 unsigned long record = lat.backtrace[q]; in account_scheduler_latency() 154 if (mylat->backtrace[q] != record) { in account_scheduler_latency() 188 int q; in lstats_show() local 193 for (q = 0; q < LT_BACKTRACEDEPTH; q++) { in lstats_show() [all …]
|
D | futex.c | 612 static void wake_futex(struct futex_q *q) in wake_futex() argument 614 plist_del(&q->list, &q->list.plist); in wake_futex() 619 wake_up(&q->waiter); in wake_futex() 630 q->lock_ptr = NULL; in wake_futex() 975 static inline struct futex_hash_bucket *queue_lock(struct futex_q *q) in queue_lock() argument 979 init_waitqueue_head(&q->waiter); in queue_lock() 981 get_futex_key_refs(&q->key); in queue_lock() 982 hb = hash_futex(&q->key); in queue_lock() 983 q->lock_ptr = &hb->lock; in queue_lock() 989 static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb) in queue_me() argument [all …]
|
D | signal.c | 190 struct sigqueue *q = NULL; in __sigqueue_alloc() local 204 q = kmem_cache_alloc(sigqueue_cachep, flags); in __sigqueue_alloc() 205 if (unlikely(q == NULL)) { in __sigqueue_alloc() 209 INIT_LIST_HEAD(&q->list); in __sigqueue_alloc() 210 q->flags = 0; in __sigqueue_alloc() 211 q->user = user; in __sigqueue_alloc() 214 return q; in __sigqueue_alloc() 217 static void __sigqueue_free(struct sigqueue *q) in __sigqueue_free() argument 219 if (q->flags & SIGQUEUE_PREALLOC) in __sigqueue_free() 221 atomic_dec(&q->user->sigpending); in __sigqueue_free() [all …]
|
D | smp.c | 42 struct call_single_queue *q = &per_cpu(call_single_queue, i); in init_call_single_data() local 44 spin_lock_init(&q->lock); in init_call_single_data() 45 INIT_LIST_HEAD(&q->list); in init_call_single_data() 155 struct call_single_queue *q = &__get_cpu_var(call_single_queue); in generic_smp_call_function_single_interrupt() local 163 while (!list_empty(&q->list)) { in generic_smp_call_function_single_interrupt() 166 spin_lock(&q->lock); in generic_smp_call_function_single_interrupt() 167 list_replace_init(&q->list, &list); in generic_smp_call_function_single_interrupt() 168 spin_unlock(&q->lock); in generic_smp_call_function_single_interrupt()
|
D | cpuset.c | 417 static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q) in is_cpuset_subset() argument 419 return cpumask_subset(p->cpus_allowed, q->cpus_allowed) && in is_cpuset_subset() 420 nodes_subset(p->mems_allowed, q->mems_allowed) && in is_cpuset_subset() 421 is_cpu_exclusive(p) <= is_cpu_exclusive(q) && in is_cpuset_subset() 422 is_mem_exclusive(p) <= is_mem_exclusive(q); in is_cpuset_subset() 544 LIST_HEAD(q); in update_domain_attr_tree() 546 list_add(&c->stack_list, &q); in update_domain_attr_tree() 547 while (!list_empty(&q)) { in update_domain_attr_tree() 552 cp = list_first_entry(&q, struct cpuset, stack_list); in update_domain_attr_tree() 553 list_del(q.next); in update_domain_attr_tree() [all …]
|
D | auditfilter.c | 1451 static void audit_list(int pid, int seq, struct sk_buff_head *q) in audit_list() argument 1469 skb_queue_tail(q, skb); in audit_list() 1475 skb_queue_tail(q, skb); in audit_list() 1479 static void audit_list_rules(int pid, int seq, struct sk_buff_head *q) in audit_list_rules() argument 1497 skb_queue_tail(q, skb); in audit_list_rules() 1503 skb_queue_tail(q, skb); in audit_list_rules() 1572 skb_queue_head_init(&dest->q); in audit_receive_filter() 1576 audit_list(pid, seq, &dest->q); in audit_receive_filter() 1578 audit_list_rules(pid, seq, &dest->q); in audit_receive_filter() 1583 skb_queue_purge(&dest->q); in audit_receive_filter()
|
D | auditsc.c | 373 struct audit_tree_refs *q; in unroll_tree_refs() local 384 for (q = p; q != ctx->trees; q = q->next, n = 31) { in unroll_tree_refs() 386 audit_put_chunk(q->c[n]); in unroll_tree_refs() 387 q->c[n] = NULL; in unroll_tree_refs() 391 audit_put_chunk(q->c[n]); in unroll_tree_refs() 392 q->c[n] = NULL; in unroll_tree_refs() 401 struct audit_tree_refs *p, *q; in free_tree_refs() local 402 for (p = ctx->first_trees; p; p = q) { in free_tree_refs() 403 q = p->next; in free_tree_refs()
|
D | sched.c | 4753 void __wake_up_common(wait_queue_head_t *q, unsigned int mode, in __wake_up_common() argument 4758 list_for_each_entry_safe(curr, next, &q->task_list, task_list) { in __wake_up_common() 4774 void __wake_up(wait_queue_head_t *q, unsigned int mode, in __wake_up() argument 4779 spin_lock_irqsave(&q->lock, flags); in __wake_up() 4780 __wake_up_common(q, mode, nr_exclusive, 0, key); in __wake_up() 4781 spin_unlock_irqrestore(&q->lock, flags); in __wake_up() 4788 void __wake_up_locked(wait_queue_head_t *q, unsigned int mode) in __wake_up_locked() argument 4790 __wake_up_common(q, mode, 1, 0, NULL); in __wake_up_locked() 4807 __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) in __wake_up_sync() argument 4812 if (unlikely(!q)) in __wake_up_sync() [all …]
|
D | audit.h | 106 struct sk_buff_head q; member
|
D | audit_tree.c | 480 struct list_head *p, *q; in trim_marked() local 487 for (p = tree->chunks.next; p != &tree->chunks; p = q) { in trim_marked() 489 q = p->next; in trim_marked()
|
D | sysctl.c | 1810 static void try_attach(struct ctl_table_header *p, struct ctl_table_header *q) in try_attach() argument 1812 struct ctl_table *to = p->ctl_table, *by = q->ctl_table; in try_attach() 1818 if (by == q->attached_by) in try_attach() 1827 q->attached_by = by; in try_attach() 1828 q->attached_to = to; in try_attach() 1829 q->parent = p; in try_attach()
|
D | kprobes.c | 129 struct task_struct *p, *q; in check_safety() local 130 do_each_thread(p, q) { in check_safety() 137 } while_each_thread(p, q); in check_safety()
|
D | cgroup.c | 1986 struct task_struct *q = heap->ptrs[i]; in cgroup_scan_tasks() local 1988 latest_time = q->start_time; in cgroup_scan_tasks() 1989 latest_task = q; in cgroup_scan_tasks() 1992 scan->process_task(q, scan); in cgroup_scan_tasks() 1993 put_task_struct(q); in cgroup_scan_tasks()
|
D | audit.c | 490 while ((skb = __skb_dequeue(&dest->q)) != NULL) in audit_send_list()
|