Home
last modified time | relevance | path

Searched refs:queue (Results 1 – 6 of 6) sorted by relevance

/kernel/sched/
Drt.c84 INIT_LIST_HEAD(array->queue + i); in init_rt_rq()
910 list_for_each_entry(rt_se, array->queue + idx, run_list) { in dump_throttled_rt_tasks()
1255 if (list_empty(array->queue + rt_se_prio(rt_se))) in __delist_rt_entity()
1266 struct list_head *queue = array->queue + rt_se_prio(rt_se); in __enqueue_rt_entity() local
1283 list_add(&rt_se->run_list, queue); in __enqueue_rt_entity()
1285 list_add_tail(&rt_se->run_list, queue); in __enqueue_rt_entity()
1393 struct list_head *queue = array->queue + rt_se_prio(rt_se); in requeue_rt_entity() local
1396 list_move(&rt_se->run_list, queue); in requeue_rt_entity()
1398 list_move_tail(&rt_se->run_list, queue); in requeue_rt_entity()
1539 struct list_head *queue; in pick_next_rt_entity() local
[all …]
Dsched.h158 struct list_head queue[MAX_RT_PRIO]; member
/kernel/locking/
Dqspinlock.c419 goto queue; in queued_spin_lock_slowpath()
445 goto queue; in queued_spin_lock_slowpath()
492 queue: in queued_spin_lock_slowpath()
/kernel/
Dpadata.c111 struct padata_parallel_queue *queue; in padata_do_parallel() local
138 queue = per_cpu_ptr(pd->pqueue, target_cpu); in padata_do_parallel()
140 spin_lock(&queue->parallel.lock); in padata_do_parallel()
141 list_add_tail(&padata->list, &queue->parallel.list); in padata_do_parallel()
142 spin_unlock(&queue->parallel.lock); in padata_do_parallel()
144 queue_work_on(target_cpu, pinst->wq, &queue->work); in padata_do_parallel()
Dsignal.c406 void flush_sigqueue(struct sigpending *queue) in flush_sigqueue() argument
410 sigemptyset(&queue->signal); in flush_sigqueue()
411 while (!list_empty(&queue->list)) { in flush_sigqueue()
412 q = list_entry(queue->list.next, struct sigqueue , list); in flush_sigqueue()
/kernel/trace/
DKconfig430 on a given queue. Tracing allows you to see any traffic happening
431 on a block device queue. For more information (and the userspace