Searched refs:queue (Results 1 – 9 of 9) sorted by relevance
/kernel/bpf/ |
D | cpumap.c | 62 struct ptr_ring *queue; member 221 __cpu_map_ring_cleanup(rcpu->queue); in put_cpu_map_entry() 222 ptr_ring_cleanup(rcpu->queue, NULL); in put_cpu_map_entry() 223 kfree(rcpu->queue); in put_cpu_map_entry() 310 while (!kthread_should_stop() || !__ptr_ring_empty(rcpu->queue)) { in cpu_map_kthread_run() 319 if (__ptr_ring_empty(rcpu->queue)) { in cpu_map_kthread_run() 322 if (__ptr_ring_empty(rcpu->queue)) { in cpu_map_kthread_run() 337 n = __ptr_ring_consume_batched(rcpu->queue, frames, in cpu_map_kthread_run() 441 rcpu->queue = kzalloc_node(sizeof(*rcpu->queue), gfp, numa); in __cpu_map_entry_alloc() 442 if (!rcpu->queue) in __cpu_map_entry_alloc() [all …]
|
/kernel/ |
D | watch_queue.c | 220 wqueue = rcu_dereference(watch->queue); in __post_watch_notification() 425 put_watch_queue(rcu_access_pointer(watch->queue)); in free_watch() 458 rcu_assign_pointer(watch->queue, wqueue); in init_watch() 467 struct watch_queue *wq = rcu_access_pointer(w->queue); in add_one_watch() 507 wqueue = rcu_access_pointer(watch->queue); in add_watch_to_object() 544 (watch->id == id && rcu_access_pointer(watch->queue) == wq)) in remove_watch_from_object() 565 wqueue = rcu_dereference(watch->queue); in remove_watch_from_object()
|
D | audit.c | 747 struct sk_buff_head *queue, in kauditd_send_queue() argument 760 skb_tail = skb_peek_tail(queue); in kauditd_send_queue() 761 while ((skb != skb_tail) && (skb = skb_dequeue(queue))) { in kauditd_send_queue()
|
D | signal.c | 464 void flush_sigqueue(struct sigpending *queue) in flush_sigqueue() argument 468 sigemptyset(&queue->signal); in flush_sigqueue() 469 while (!list_empty(&queue->list)) { in flush_sigqueue() 470 q = list_entry(queue->list.next, struct sigqueue , list); in flush_sigqueue()
|
/kernel/locking/ |
D | qspinlock.c | 345 goto queue; in queued_spin_lock_slowpath() 367 goto queue; in queued_spin_lock_slowpath() 397 queue: in queued_spin_lock_slowpath()
|
D | rwsem.c | 1020 goto queue; in rwsem_down_read_slowpath() 1049 queue: in rwsem_down_read_slowpath()
|
/kernel/sched/ |
D | rt.c | 92 INIT_LIST_HEAD(array->queue + i); in init_rt_rq() 1286 if (list_empty(array->queue + rt_se_prio(rt_se))) in __delist_rt_entity() 1297 struct list_head *queue = array->queue + rt_se_prio(rt_se); in __enqueue_rt_entity() local 1314 list_add(&rt_se->run_list, queue); in __enqueue_rt_entity() 1316 list_add_tail(&rt_se->run_list, queue); in __enqueue_rt_entity() 1448 struct list_head *queue = array->queue + rt_se_prio(rt_se); in requeue_rt_entity() local 1451 list_move(&rt_se->run_list, queue); in requeue_rt_entity() 1453 list_move_tail(&rt_se->run_list, queue); in requeue_rt_entity() 1701 struct list_head *queue; in pick_next_rt_entity() local 1707 queue = array->queue + idx; in pick_next_rt_entity() [all …]
|
D | sched.h | 257 struct list_head queue[MAX_RT_PRIO]; member
|
/kernel/trace/ |
D | Kconfig | 516 on a given queue. Tracing allows you to see any traffic happening 517 on a block device queue. For more information (and the userspace
|