/kernel/sched/ |
D | swait.c | 7 void __init_swait_queue_head(struct swait_queue_head *q, const char *name, in __init_swait_queue_head() argument 10 raw_spin_lock_init(&q->lock); in __init_swait_queue_head() 11 lockdep_set_class_and_name(&q->lock, key, name); in __init_swait_queue_head() 12 INIT_LIST_HEAD(&q->task_list); in __init_swait_queue_head() 22 void swake_up_locked(struct swait_queue_head *q) in swake_up_locked() argument 26 if (list_empty(&q->task_list)) in swake_up_locked() 29 curr = list_first_entry(&q->task_list, typeof(*curr), task_list); in swake_up_locked() 42 void swake_up_all_locked(struct swait_queue_head *q) in swake_up_all_locked() argument 44 while (!list_empty(&q->task_list)) in swake_up_all_locked() 45 swake_up_locked(q); in swake_up_all_locked() [all …]
|
D | sched.h | 3228 extern void swake_up_all_locked(struct swait_queue_head *q); 3229 extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
|
/kernel/trace/ |
D | blktrace.c | 313 static void blk_trace_free(struct request_queue *q, struct blk_trace *bt) in blk_trace_free() argument 322 debugfs_lookup_and_remove("dropped", q->debugfs_dir); in blk_trace_free() 323 debugfs_lookup_and_remove("msg", q->debugfs_dir); in blk_trace_free() 348 static void blk_trace_cleanup(struct request_queue *q, struct blk_trace *bt) in blk_trace_cleanup() argument 351 blk_trace_free(q, bt); in blk_trace_cleanup() 355 static int __blk_trace_remove(struct request_queue *q) in __blk_trace_remove() argument 359 bt = rcu_replace_pointer(q->blk_trace, NULL, in __blk_trace_remove() 360 lockdep_is_held(&q->debugfs_mutex)); in __blk_trace_remove() 365 blk_trace_cleanup(q, bt); in __blk_trace_remove() 370 int blk_trace_remove(struct request_queue *q) in blk_trace_remove() argument [all …]
|
D | trace_boot.c | 564 char *q; in trace_boot_set_ftrace_filter() local 567 q = kstrdup(p, GFP_KERNEL); in trace_boot_set_ftrace_filter() 568 if (!q) in trace_boot_set_ftrace_filter() 570 if (ftrace_set_filter(tr->ops, q, strlen(q), 0) < 0) in trace_boot_set_ftrace_filter() 574 kfree(q); in trace_boot_set_ftrace_filter() 577 q = kstrdup(p, GFP_KERNEL); in trace_boot_set_ftrace_filter() 578 if (!q) in trace_boot_set_ftrace_filter() 580 if (ftrace_set_notrace(tr->ops, q, strlen(q), 0) < 0) in trace_boot_set_ftrace_filter() 584 kfree(q); in trace_boot_set_ftrace_filter()
|
D | trace_events_filter.c | 1233 char q; in parse_pred() local 1322 q = str[i]; in parse_pred() 1324 q = 0; in parse_pred() 1327 if (q && str[i] == q) in parse_pred() 1329 if (!q && (str[i] == ')' || str[i] == '&' || in parse_pred() 1334 if (q) in parse_pred() 1348 char q = str[i]; in parse_pred() local 1370 if (str[i] == q) in parse_pred()
|
D | trace_events_inject.c | 105 char q = str[i]; in parse_field() local 116 if (str[i] == q) in parse_field()
|
D | trace.c | 3918 char *q; in trace_event_format() local 3927 new_fmt = q = iter->fmt; in trace_event_format() 3929 if (unlikely(q - new_fmt + 3 > iter->fmt_size)) { in trace_event_format() 3933 q += iter->fmt - new_fmt; in trace_event_format() 3937 *q++ = *p++; in trace_event_format() 3942 *q++ = *p++; in trace_event_format() 3944 *q++ = *p++; in trace_event_format() 3945 *q++ = 'x'; in trace_event_format() 3949 *q = '\0'; in trace_event_format()
|
/kernel/futex/ |
D | core.c | 1517 static void __unqueue_futex(struct futex_q *q) in __unqueue_futex() argument 1521 if (WARN_ON_SMP(!q->lock_ptr) || WARN_ON(plist_node_empty(&q->list))) in __unqueue_futex() 1523 lockdep_assert_held(q->lock_ptr); in __unqueue_futex() 1525 hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock); in __unqueue_futex() 1526 plist_del(&q->list, &hb->chain); in __unqueue_futex() 1536 static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q) in mark_wake_futex() argument 1538 struct task_struct *p = q->task; in mark_wake_futex() 1540 if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n")) in mark_wake_futex() 1544 __unqueue_futex(q); in mark_wake_futex() 1552 smp_store_release(&q->lock_ptr, NULL); in mark_wake_futex() [all …]
|
/kernel/ |
D | latencytop.c | 97 int q, same = 1; in account_global_scheduler_latency() local 105 for (q = 0; q < LT_BACKTRACEDEPTH; q++) { in account_global_scheduler_latency() 106 unsigned long record = lat->backtrace[q]; in account_global_scheduler_latency() 108 if (latency_record[i].backtrace[q] != record) { in account_global_scheduler_latency() 154 int i, q; in __account_scheduler_latency() local 182 for (q = 0; q < LT_BACKTRACEDEPTH; q++) { in __account_scheduler_latency() 183 unsigned long record = lat.backtrace[q]; in __account_scheduler_latency() 185 if (mylat->backtrace[q] != record) { in __account_scheduler_latency() 227 int q; in lstats_show() local 230 for (q = 0; q < LT_BACKTRACEDEPTH; q++) { in lstats_show() [all …]
|
D | signal.c | 417 struct sigqueue *q = NULL; in __sigqueue_alloc() local 437 q = kmem_cache_alloc(sigqueue_cachep, gfp_flags); in __sigqueue_alloc() 442 if (unlikely(q == NULL)) { in __sigqueue_alloc() 445 INIT_LIST_HEAD(&q->list); in __sigqueue_alloc() 446 q->flags = sigqueue_flags; in __sigqueue_alloc() 447 q->ucounts = ucounts; in __sigqueue_alloc() 449 return q; in __sigqueue_alloc() 452 static void __sigqueue_free(struct sigqueue *q) in __sigqueue_free() argument 454 if (q->flags & SIGQUEUE_PREALLOC) in __sigqueue_free() 456 if (q->ucounts) { in __sigqueue_free() [all …]
|
D | watch_queue.c | 319 struct watch_type_filter *q; in watch_queue_set_filter() local 365 q = wfilter->filters; in watch_queue_set_filter() 370 q->type = tf[i].type; in watch_queue_set_filter() 371 q->info_filter = tf[i].info_filter; in watch_queue_set_filter() 372 q->info_mask = tf[i].info_mask; in watch_queue_set_filter() 373 q->subtype_filter[0] = tf[i].subtype_filter[0]; in watch_queue_set_filter() 374 __set_bit(q->type, wfilter->type_filter); in watch_queue_set_filter() 375 q++; in watch_queue_set_filter()
|
D | crash_core.c | 200 char *q; in get_last_crashkernel() local 210 q = end_p - strlen(suffix_tbl[i]); in get_last_crashkernel() 211 if (!strncmp(q, suffix_tbl[i], in get_last_crashkernel() 217 q = end_p - strlen(suffix); in get_last_crashkernel() 218 if (!strncmp(q, suffix, strlen(suffix))) in get_last_crashkernel()
|
D | auditsc.c | 281 struct audit_tree_refs *q; in unroll_tree_refs() local 293 for (q = p; q != ctx->trees; q = q->next, n = 31) { in unroll_tree_refs() 295 audit_put_chunk(q->c[n]); in unroll_tree_refs() 296 q->c[n] = NULL; in unroll_tree_refs() 300 audit_put_chunk(q->c[n]); in unroll_tree_refs() 301 q->c[n] = NULL; in unroll_tree_refs() 309 struct audit_tree_refs *p, *q; in free_tree_refs() local 311 for (p = ctx->first_trees; p; p = q) { in free_tree_refs() 312 q = p->next; in free_tree_refs()
|
D | auditfilter.c | 1069 static void audit_list_rules(int seq, struct sk_buff_head *q) in audit_list_rules() argument 1088 skb_queue_tail(q, skb); in audit_list_rules() 1094 skb_queue_tail(q, skb); in audit_list_rules() 1178 skb_queue_head_init(&dest->q); in audit_list_rules_send() 1181 audit_list_rules(seq, &dest->q); in audit_list_rules_send() 1186 skb_queue_purge(&dest->q); in audit_list_rules_send()
|
D | audit_tree.c | 611 struct list_head *p, *q; in trim_marked() local 618 for (p = tree->chunks.next; p != &tree->chunks; p = q) { in trim_marked() 620 q = p->next; in trim_marked()
|
D | ptrace.c | 740 struct sigqueue *q; in ptrace_peek_siginfo() local 769 list_for_each_entry(q, &pending->list, list) { in ptrace_peek_siginfo() 772 copy_siginfo(&info, &q->info); in ptrace_peek_siginfo()
|
D | audit.h | 236 struct sk_buff_head q; member
|
D | audit.c | 931 while ((skb = __skb_dequeue(&dest->q)) != NULL) in audit_send_list_thread()
|
/kernel/cgroup/ |
D | pids.c | 146 struct pids_cgroup *p, *q; in pids_try_charge() local 164 for (q = pids; q != p; q = parent_pids(q)) in pids_try_charge() 165 pids_cancel(q, num); in pids_try_charge()
|
D | cpuset.c | 504 static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q) in is_cpuset_subset() argument 506 return cpumask_subset(p->cpus_requested, q->cpus_requested) && in is_cpuset_subset() 507 nodes_subset(p->mems_allowed, q->mems_allowed) && in is_cpuset_subset() 508 is_cpu_exclusive(p) <= is_cpu_exclusive(q) && in is_cpuset_subset() 509 is_mem_exclusive(p) <= is_mem_exclusive(q); in is_cpuset_subset()
|
/kernel/bpf/ |
D | cpumap.c | 47 void *q[CPU_MAP_BULK_SIZE]; member 709 struct ptr_ring *q; in bq_flush_to_queue() local 715 q = rcpu->queue; in bq_flush_to_queue() 716 spin_lock(&q->producer_lock); in bq_flush_to_queue() 719 struct xdp_frame *xdpf = bq->q[i]; in bq_flush_to_queue() 722 err = __ptr_ring_produce(q, xdpf); in bq_flush_to_queue() 730 spin_unlock(&q->producer_lock); in bq_flush_to_queue() 758 bq->q[bq->count++] = xdpf; in bq_enqueue()
|
D | devmap.c | 56 struct xdp_frame *q[DEV_MAP_BULK_SIZE]; member 376 struct xdp_frame *xdpf = bq->q[i]; in bq_xmit_all() 382 to_send = dev_map_bpf_prog_run(bq->xdp_prog, bq->q, cnt, dev); in bq_xmit_all() 387 sent = dev->netdev_ops->ndo_xdp_xmit(dev, to_send, bq->q, flags); in bq_xmit_all() 400 xdp_return_frame_rx_napi(bq->q[i]); in bq_xmit_all() 467 bq->q[bq->count++] = xdpf; in bq_enqueue()
|
/kernel/time/ |
D | timeconst.bc | 55 print "#error \qinclude/generated/timeconst.h has the wrong HZ value!\q\n"
|
/kernel/events/ |
D | uprobes.c | 324 struct list_head *pos, *q; in delayed_uprobe_remove() local 330 list_for_each_safe(pos, q, &delayed_uprobe_list) { in delayed_uprobe_remove() 1329 struct list_head *pos, *q; in delayed_ref_ctr_inc() local 1335 list_for_each_safe(pos, q, &delayed_uprobe_list) { in delayed_ref_ctr_inc()
|