Searched refs:queue (Results 1 – 10 of 10) sorted by relevance
/kernel/bpf/ |
D | cpumap.c | 65 struct ptr_ring *queue; member 275 while (!kthread_should_stop() || !__ptr_ring_empty(rcpu->queue)) { in cpu_map_kthread_run() 285 if (__ptr_ring_empty(rcpu->queue)) { in cpu_map_kthread_run() 288 if (__ptr_ring_empty(rcpu->queue)) { in cpu_map_kthread_run() 305 n = __ptr_ring_consume_batched(rcpu->queue, frames, in cpu_map_kthread_run() 416 rcpu->queue = bpf_map_kmalloc_node(map, sizeof(*rcpu->queue), gfp, in __cpu_map_entry_alloc() 418 if (!rcpu->queue) in __cpu_map_entry_alloc() 421 err = ptr_ring_init(rcpu->queue, value->qsize, gfp); in __cpu_map_entry_alloc() 456 ptr_ring_cleanup(rcpu->queue, NULL); in __cpu_map_entry_alloc() 458 kfree(rcpu->queue); in __cpu_map_entry_alloc() [all …]
|
/kernel/ |
D | watch_queue.c | 216 wqueue = rcu_dereference(watch->queue); in __post_watch_notification() 418 put_watch_queue(rcu_access_pointer(watch->queue)); in free_watch() 451 rcu_assign_pointer(watch->queue, wqueue); in init_watch() 460 struct watch_queue *wq = rcu_access_pointer(w->queue); in add_one_watch() 500 wqueue = rcu_access_pointer(watch->queue); in add_watch_to_object() 537 (watch->id == id && rcu_access_pointer(watch->queue) == wq)) in remove_watch_from_object() 558 wqueue = rcu_dereference(watch->queue); in remove_watch_from_object()
|
D | audit.c | 744 struct sk_buff_head *queue, in kauditd_send_queue() argument 757 skb_tail = skb_peek_tail(queue); in kauditd_send_queue() 758 while ((skb != skb_tail) && (skb = skb_dequeue(queue))) { in kauditd_send_queue()
|
D | signal.c | 467 void flush_sigqueue(struct sigpending *queue) in flush_sigqueue() argument 471 sigemptyset(&queue->signal); in flush_sigqueue() 472 while (!list_empty(&queue->list)) { in flush_sigqueue() 473 q = list_entry(queue->list.next, struct sigqueue , list); in flush_sigqueue()
|
/kernel/locking/ |
D | qspinlock.c | 346 goto queue; in queued_spin_lock_slowpath() 368 goto queue; in queued_spin_lock_slowpath() 398 queue: in queued_spin_lock_slowpath()
|
D | rwsem.c | 1047 goto queue; in rwsem_down_read_slowpath() 1080 queue: in rwsem_down_read_slowpath()
|
/kernel/sched/ |
D | rt.c | 143 INIT_LIST_HEAD(array->queue + i); in init_rt_rq() 1333 if (list_empty(array->queue + rt_se_prio(rt_se))) in __delist_rt_entity() 1450 struct list_head *queue = array->queue + rt_se_prio(rt_se); in __enqueue_rt_entity() local 1467 list_add(&rt_se->run_list, queue); in __enqueue_rt_entity() 1469 list_add_tail(&rt_se->run_list, queue); in __enqueue_rt_entity() 1608 struct list_head *queue = array->queue + rt_se_prio(rt_se); in requeue_rt_entity() local 1611 list_move(&rt_se->run_list, queue); in requeue_rt_entity() 1613 list_move_tail(&rt_se->run_list, queue); in requeue_rt_entity() 1862 struct list_head *queue; in pick_next_rt_entity() local 1868 queue = array->queue + idx; in pick_next_rt_entity() [all …]
|
D | sched.h | 279 struct list_head queue[MAX_RT_PRIO]; member
|
/kernel/trace/ |
D | blktrace.c | 912 blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_BOUNCE, 0); in blk_add_trace_bio_bounce() 924 blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_BACKMERGE, in blk_add_trace_bio_backmerge() 930 blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_FRONTMERGE, in blk_add_trace_bio_frontmerge() 936 blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_QUEUE, 0); in blk_add_trace_bio_queue() 941 blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_GETRQ, 0); in blk_add_trace_getrq() 978 struct request_queue *q = bio->bi_bdev->bd_disk->queue; in blk_add_trace_split() 1007 struct request_queue *q = bio->bi_bdev->bd_disk->queue; in blk_add_trace_bio_remap()
|
D | Kconfig | 654 on a given queue. Tracing allows you to see any traffic happening 655 on a block device queue. For more information (and the userspace
|