/kernel/sched/ |
D | wait.c | 15 void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *key) in __init_waitqueue_head() argument 17 spin_lock_init(&q->lock); in __init_waitqueue_head() 18 lockdep_set_class_and_name(&q->lock, key, name); in __init_waitqueue_head() 19 INIT_LIST_HEAD(&q->task_list); in __init_waitqueue_head() 24 void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait) in add_wait_queue() argument 29 spin_lock_irqsave(&q->lock, flags); in add_wait_queue() 30 __add_wait_queue(q, wait); in add_wait_queue() 31 spin_unlock_irqrestore(&q->lock, flags); in add_wait_queue() 35 void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait) in add_wait_queue_exclusive() argument 40 spin_lock_irqsave(&q->lock, flags); in add_wait_queue_exclusive() [all …]
|
D | core.c | 2880 struct rq *q; in nr_running_integral() local 2885 q = cpu_rq(cpu); in nr_running_integral() 2894 seqcnt = read_seqcount_begin(&q->ave_seqcnt); in nr_running_integral() 2895 integral = do_nr_running_integral(q); in nr_running_integral() 2896 if (read_seqcount_retry(&q->ave_seqcnt, seqcnt)) { in nr_running_integral() 2897 read_seqcount_begin(&q->ave_seqcnt); in nr_running_integral() 2898 integral = q->nr_running_integral; in nr_running_integral()
|
/kernel/trace/ |
D | blktrace.c | 330 static int __blk_trace_remove(struct request_queue *q) in __blk_trace_remove() argument 334 bt = xchg(&q->blk_trace, NULL); in __blk_trace_remove() 344 int blk_trace_remove(struct request_queue *q) in blk_trace_remove() argument 348 mutex_lock(&q->blk_trace_mutex); in blk_trace_remove() 349 ret = __blk_trace_remove(q); in blk_trace_remove() 350 mutex_unlock(&q->blk_trace_mutex); in blk_trace_remove() 467 int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, in do_blk_trace_setup() argument 491 if (q->blk_trace) { in do_blk_trace_setup() 563 if (cmpxchg(&q->blk_trace, NULL, bt)) in do_blk_trace_setup() 574 static int __blk_trace_setup(struct request_queue *q, char *name, dev_t dev, in __blk_trace_setup() argument [all …]
|
/kernel/ |
D | futex.c | 1505 static void __unqueue_futex(struct futex_q *q) in __unqueue_futex() argument 1509 if (WARN_ON_SMP(!q->lock_ptr || !spin_is_locked(q->lock_ptr)) in __unqueue_futex() 1510 || WARN_ON(plist_node_empty(&q->list))) in __unqueue_futex() 1513 hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock); in __unqueue_futex() 1514 plist_del(&q->list, &hb->chain); in __unqueue_futex() 1524 static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q) in mark_wake_futex() argument 1526 struct task_struct *p = q->task; in mark_wake_futex() 1528 if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n")) in mark_wake_futex() 1536 __unqueue_futex(q); in mark_wake_futex() 1544 q->lock_ptr = NULL; in mark_wake_futex() [all …]
|
D | latencytop.c | 105 int q, same = 1; in account_global_scheduler_latency() local 113 for (q = 0; q < LT_BACKTRACEDEPTH; q++) { in account_global_scheduler_latency() 114 unsigned long record = lat->backtrace[q]; in account_global_scheduler_latency() 116 if (latency_record[i].backtrace[q] != record) { in account_global_scheduler_latency() 176 int i, q; in __account_scheduler_latency() local 203 for (q = 0; q < LT_BACKTRACEDEPTH; q++) { in __account_scheduler_latency() 204 unsigned long record = lat.backtrace[q]; in __account_scheduler_latency() 206 if (mylat->backtrace[q] != record) { in __account_scheduler_latency() 248 int q; in lstats_show() local 251 for (q = 0; q < LT_BACKTRACEDEPTH; q++) { in lstats_show() [all …]
|
D | signal.c | 374 struct sigqueue *q = NULL; in __sigqueue_alloc() local 394 q = kmem_cache_alloc(sigqueue_cachep, flags); in __sigqueue_alloc() 399 if (unlikely(q == NULL)) { in __sigqueue_alloc() 403 INIT_LIST_HEAD(&q->list); in __sigqueue_alloc() 404 q->flags = 0; in __sigqueue_alloc() 405 q->user = user; in __sigqueue_alloc() 408 return q; in __sigqueue_alloc() 411 static void __sigqueue_free(struct sigqueue *q) in __sigqueue_free() argument 413 if (q->flags & SIGQUEUE_PREALLOC) in __sigqueue_free() 415 if (atomic_dec_and_test(&q->user->sigpending)) in __sigqueue_free() [all …]
|
D | cgroup_pids.c | 141 struct pids_cgroup *p, *q; in pids_try_charge() local 159 for (q = pids; q != p; q = parent_pids(q)) in pids_try_charge() 160 pids_cancel(q, num); in pids_try_charge()
|
D | auditsc.c | 254 struct audit_tree_refs *q; in unroll_tree_refs() local 265 for (q = p; q != ctx->trees; q = q->next, n = 31) { in unroll_tree_refs() 267 audit_put_chunk(q->c[n]); in unroll_tree_refs() 268 q->c[n] = NULL; in unroll_tree_refs() 272 audit_put_chunk(q->c[n]); in unroll_tree_refs() 273 q->c[n] = NULL; in unroll_tree_refs() 282 struct audit_tree_refs *p, *q; in free_tree_refs() local 283 for (p = ctx->first_trees; p; p = q) { in free_tree_refs() 284 q = p->next; in free_tree_refs()
|
D | auditfilter.c | 1041 static void audit_list_rules(__u32 portid, int seq, struct sk_buff_head *q) in audit_list_rules() argument 1060 skb_queue_tail(q, skb); in audit_list_rules() 1066 skb_queue_tail(q, skb); in audit_list_rules() 1158 skb_queue_head_init(&dest->q); in audit_list_rules_send() 1161 audit_list_rules(portid, seq, &dest->q); in audit_list_rules_send() 1166 skb_queue_purge(&dest->q); in audit_list_rules_send()
|
D | kexec_core.c | 1211 char *q; in get_last_crashkernel() local 1221 q = end_p - strlen(suffix_tbl[i]); in get_last_crashkernel() 1222 if (!strncmp(q, suffix_tbl[i], in get_last_crashkernel() 1228 q = end_p - strlen(suffix); in get_last_crashkernel() 1229 if (!strncmp(q, suffix, strlen(suffix))) in get_last_crashkernel()
|
D | ptrace.c | 686 struct sigqueue *q; in ptrace_peek_siginfo() local 715 list_for_each_entry(q, &pending->list, list) { in ptrace_peek_siginfo() 718 copy_siginfo(&info, &q->info); in ptrace_peek_siginfo()
|
D | audit_tree.c | 513 struct list_head *p, *q; in trim_marked() local 520 for (p = tree->chunks.next; p != &tree->chunks; p = q) { in trim_marked() 522 q = p->next; in trim_marked()
|
D | cpuset.c | 400 static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q) in is_cpuset_subset() argument 402 return cpumask_subset(p->cpus_requested, q->cpus_requested) && in is_cpuset_subset() 403 nodes_subset(p->mems_allowed, q->mems_allowed) && in is_cpuset_subset() 404 is_cpu_exclusive(p) <= is_cpu_exclusive(q) && in is_cpuset_subset() 405 is_mem_exclusive(p) <= is_mem_exclusive(q); in is_cpuset_subset()
|
D | audit.h | 244 struct sk_buff_head q; member
|
D | audit.c | 551 while ((skb = __skb_dequeue(&dest->q)) != NULL) in audit_send_list()
|
/kernel/time/ |
D | timeconst.bc | 53 print "#error \qinclude/generated/timeconst.h has the wrong HZ value!\q\n"
|