Home
last modified time | relevance | path

Searched refs:queue (Results 1 – 9 of 9) sorted by relevance

/kernel/bpf/
Dcpumap.c62 struct ptr_ring *queue; member
221 __cpu_map_ring_cleanup(rcpu->queue); in put_cpu_map_entry()
222 ptr_ring_cleanup(rcpu->queue, NULL); in put_cpu_map_entry()
223 kfree(rcpu->queue); in put_cpu_map_entry()
310 while (!kthread_should_stop() || !__ptr_ring_empty(rcpu->queue)) { in cpu_map_kthread_run()
319 if (__ptr_ring_empty(rcpu->queue)) { in cpu_map_kthread_run()
322 if (__ptr_ring_empty(rcpu->queue)) { in cpu_map_kthread_run()
337 n = __ptr_ring_consume_batched(rcpu->queue, frames, in cpu_map_kthread_run()
441 rcpu->queue = kzalloc_node(sizeof(*rcpu->queue), gfp, numa); in __cpu_map_entry_alloc()
442 if (!rcpu->queue) in __cpu_map_entry_alloc()
[all …]
/kernel/
Dwatch_queue.c220 wqueue = rcu_dereference(watch->queue); in __post_watch_notification()
425 put_watch_queue(rcu_access_pointer(watch->queue)); in free_watch()
458 rcu_assign_pointer(watch->queue, wqueue); in init_watch()
467 struct watch_queue *wq = rcu_access_pointer(w->queue); in add_one_watch()
507 wqueue = rcu_access_pointer(watch->queue); in add_watch_to_object()
544 (watch->id == id && rcu_access_pointer(watch->queue) == wq)) in remove_watch_from_object()
565 wqueue = rcu_dereference(watch->queue); in remove_watch_from_object()
Daudit.c747 struct sk_buff_head *queue, in kauditd_send_queue() argument
760 skb_tail = skb_peek_tail(queue); in kauditd_send_queue()
761 while ((skb != skb_tail) && (skb = skb_dequeue(queue))) { in kauditd_send_queue()
Dsignal.c464 void flush_sigqueue(struct sigpending *queue) in flush_sigqueue() argument
468 sigemptyset(&queue->signal); in flush_sigqueue()
469 while (!list_empty(&queue->list)) { in flush_sigqueue()
470 q = list_entry(queue->list.next, struct sigqueue , list); in flush_sigqueue()
/kernel/locking/
Dqspinlock.c345 goto queue; in queued_spin_lock_slowpath()
367 goto queue; in queued_spin_lock_slowpath()
397 queue: in queued_spin_lock_slowpath()
Drwsem.c1020 goto queue; in rwsem_down_read_slowpath()
1049 queue: in rwsem_down_read_slowpath()
/kernel/sched/
Drt.c92 INIT_LIST_HEAD(array->queue + i); in init_rt_rq()
1286 if (list_empty(array->queue + rt_se_prio(rt_se))) in __delist_rt_entity()
1297 struct list_head *queue = array->queue + rt_se_prio(rt_se); in __enqueue_rt_entity() local
1314 list_add(&rt_se->run_list, queue); in __enqueue_rt_entity()
1316 list_add_tail(&rt_se->run_list, queue); in __enqueue_rt_entity()
1448 struct list_head *queue = array->queue + rt_se_prio(rt_se); in requeue_rt_entity() local
1451 list_move(&rt_se->run_list, queue); in requeue_rt_entity()
1453 list_move_tail(&rt_se->run_list, queue); in requeue_rt_entity()
1701 struct list_head *queue; in pick_next_rt_entity() local
1707 queue = array->queue + idx; in pick_next_rt_entity()
[all …]
Dsched.h257 struct list_head queue[MAX_RT_PRIO]; member
/kernel/trace/
DKconfig516 on a given queue. Tracing allows you to see any traffic happening
517 on a block device queue. For more information (and the userspace