Searched refs:queue (Results 1 – 10 of 10) sorted by relevance
/kernel/bpf/ |
D | cpumap.c | 65 struct ptr_ring *queue; member 158 __cpu_map_ring_cleanup(rcpu->queue); in put_cpu_map_entry() 159 ptr_ring_cleanup(rcpu->queue, NULL); in put_cpu_map_entry() 160 kfree(rcpu->queue); in put_cpu_map_entry() 318 while (!kthread_should_stop() || !__ptr_ring_empty(rcpu->queue)) { in cpu_map_kthread_run() 328 if (__ptr_ring_empty(rcpu->queue)) { in cpu_map_kthread_run() 331 if (__ptr_ring_empty(rcpu->queue)) { in cpu_map_kthread_run() 346 n = __ptr_ring_consume_batched(rcpu->queue, frames, in cpu_map_kthread_run() 458 rcpu->queue = bpf_map_kmalloc_node(map, sizeof(*rcpu->queue), gfp, in __cpu_map_entry_alloc() 460 if (!rcpu->queue) in __cpu_map_entry_alloc() [all …]
|
/kernel/ |
D | watch_queue.c | 217 wqueue = rcu_dereference(watch->queue); in __post_watch_notification() 419 put_watch_queue(rcu_access_pointer(watch->queue)); in free_watch() 452 rcu_assign_pointer(watch->queue, wqueue); in init_watch() 461 struct watch_queue *wq = rcu_access_pointer(w->queue); in add_one_watch() 501 wqueue = rcu_access_pointer(watch->queue); in add_watch_to_object() 538 (watch->id == id && rcu_access_pointer(watch->queue) == wq)) in remove_watch_from_object() 559 wqueue = rcu_dereference(watch->queue); in remove_watch_from_object()
|
D | audit.c | 745 struct sk_buff_head *queue, in kauditd_send_queue() argument 758 skb_tail = skb_peek_tail(queue); in kauditd_send_queue() 759 while ((skb != skb_tail) && (skb = skb_dequeue(queue))) { in kauditd_send_queue()
|
D | signal.c | 465 void flush_sigqueue(struct sigpending *queue) in flush_sigqueue() argument 469 sigemptyset(&queue->signal); in flush_sigqueue() 470 while (!list_empty(&queue->list)) { in flush_sigqueue() 471 q = list_entry(queue->list.next, struct sigqueue , list); in flush_sigqueue()
|
/kernel/locking/ |
D | qspinlock.c | 346 goto queue; in queued_spin_lock_slowpath() 368 goto queue; in queued_spin_lock_slowpath() 398 queue: in queued_spin_lock_slowpath()
|
D | rwsem.c | 1036 goto queue; in rwsem_down_read_slowpath() 1061 queue: in rwsem_down_read_slowpath()
|
/kernel/sched/ |
D | rt.c | 143 INIT_LIST_HEAD(array->queue + i); in init_rt_rq() 1333 if (list_empty(array->queue + rt_se_prio(rt_se))) in __delist_rt_entity() 1450 struct list_head *queue = array->queue + rt_se_prio(rt_se); in __enqueue_rt_entity() local 1467 list_add(&rt_se->run_list, queue); in __enqueue_rt_entity() 1469 list_add_tail(&rt_se->run_list, queue); in __enqueue_rt_entity() 1608 struct list_head *queue = array->queue + rt_se_prio(rt_se); in requeue_rt_entity() local 1611 list_move(&rt_se->run_list, queue); in requeue_rt_entity() 1613 list_move_tail(&rt_se->run_list, queue); in requeue_rt_entity() 1865 struct list_head *queue; in pick_next_rt_entity() local 1871 queue = array->queue + idx; in pick_next_rt_entity() [all …]
|
D | sched.h | 278 struct list_head queue[MAX_RT_PRIO]; member
|
/kernel/trace/ |
D | blktrace.c | 915 blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_BOUNCE, 0); in blk_add_trace_bio_bounce() 927 blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_BACKMERGE, in blk_add_trace_bio_backmerge() 933 blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_FRONTMERGE, in blk_add_trace_bio_frontmerge() 939 blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_QUEUE, 0); in blk_add_trace_bio_queue() 944 blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_GETRQ, 0); in blk_add_trace_getrq() 981 struct request_queue *q = bio->bi_bdev->bd_disk->queue; in blk_add_trace_split() 1010 struct request_queue *q = bio->bi_bdev->bd_disk->queue; in blk_add_trace_bio_remap()
|
D | Kconfig | 625 on a given queue. Tracing allows you to see any traffic happening 626 on a block device queue. For more information (and the userspace
|