Home
last modified time | relevance | path

Searched refs:list (Results 1 – 17 of 17) sorted by relevance

/block/
Dholder.c6 struct list_head list; member
16 list_for_each_entry(holder, &disk->slave_bdevs, list) in bd_find_holder_disk()
98 INIT_LIST_HEAD(&holder->list); in bd_link_disk_holder()
109 list_add(&holder->list, &disk->slave_bdevs); in bd_link_disk_holder()
149 list_del_init(&holder->list); in bd_unlink_disk_holder()
162 list_for_each_entry(holder, &disk->slave_bdevs, list) { in bd_register_pending_holders()
171 list_for_each_entry_continue_reverse(holder, &disk->slave_bdevs, list) in bd_register_pending_holders()
Dblk-mq.h49 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
76 struct list_head *list);
81 struct list_head *list);
310 static inline void blk_mq_free_requests(struct list_head *list) in blk_mq_free_requests() argument
312 while (!list_empty(list)) { in blk_mq_free_requests()
313 struct request *rq = list_entry_rq(list->next); in blk_mq_free_requests()
Dblk-mq.c582 static void blk_complete_reqs(struct llist_head *list) in blk_complete_reqs() argument
584 struct llist_node *entry = llist_reverse_order(llist_del_all(list)); in blk_complete_reqs()
635 struct llist_head *list; in blk_mq_complete_send_ipi() local
639 list = &per_cpu(blk_cpu_done, cpu); in blk_mq_complete_send_ipi()
640 if (llist_add(&rq->ipi_list, list)) { in blk_mq_complete_send_ipi()
648 struct llist_head *list; in blk_mq_raise_softirq() local
651 list = this_cpu_ptr(&blk_cpu_done); in blk_mq_raise_softirq()
652 if (llist_add(&rq->ipi_list, list)) in blk_mq_raise_softirq()
998 struct list_head *list; member
1009 list_splice_tail_init(&ctx->rq_lists[type], flush_data->list); in flush_busy_ctx()
[all …]
Dblk-flush.c186 list_move_tail(&rq->flush.list, pending); in blk_flush_complete_seq()
190 list_move_tail(&rq->flush.list, &fq->flush_data_in_flight); in blk_flush_complete_seq()
202 list_del_init(&rq->flush.list); in blk_flush_complete_seq()
257 list_for_each_entry_safe(rq, n, running, flush.list) { in flush_end_io()
290 list_first_entry(pending, struct request, flush.list); in blk_kick_flush()
435 INIT_LIST_HEAD(&rq->flush.list); in blk_insert_flush()
Dblk-mq-sched.c469 LIST_HEAD(list); in blk_mq_sched_insert_request()
471 list_add(&rq->queuelist, &list); in blk_mq_sched_insert_request()
472 e->type->ops.insert_requests(hctx, &list, at_head); in blk_mq_sched_insert_request()
486 struct list_head *list, bool run_queue_async) in blk_mq_sched_insert_requests() argument
500 e->type->ops.insert_requests(hctx, list, false); in blk_mq_sched_insert_requests()
508 blk_mq_try_issue_list_directly(hctx, list); in blk_mq_sched_insert_requests()
509 if (list_empty(list)) in blk_mq_sched_insert_requests()
512 blk_mq_insert_requests(hctx, ctx, list); in blk_mq_sched_insert_requests()
Dblk-stat.c65 list_for_each_entry_rcu(cb, &q->stats->callbacks, list) { in blk_stat_add()
152 list_add_tail_rcu(&cb->list, &q->stats->callbacks); in blk_stat_add_callback()
163 list_del_rcu(&cb->list); in blk_stat_remove_callback()
Dblk-core.c1369 void blk_steal_bios(struct bio_list *list, struct request *rq) in blk_steal_bios() argument
1372 if (list->tail) in blk_steal_bios()
1373 list->tail->bi_next = rq->bio; in blk_steal_bios()
1375 list->head = rq->bio; in blk_steal_bios()
1376 list->tail = rq->biotail; in blk_steal_bios()
1703 list); in flush_plug_callbacks()
1704 list_del(&cb->list); in flush_plug_callbacks()
1719 list_for_each_entry(cb, &plug->cb_list, list) in blk_check_plugged()
1729 list_add(&cb->list, &plug->cb_list); in blk_check_plugged()
Delevator.c127 list_for_each_entry(e, &elv_list, list) { in elevator_find()
558 list_add_tail(&e->list, &elv_list); in elv_register()
571 list_del_init(&e->list); in elv_unregister()
654 list_for_each_entry(e, &elv_list, list) { in elevator_get_by_features()
805 list_for_each_entry(__e, &elv_list, list) { in elv_iosched_show()
Dkyber-iosched.c662 struct list_head *list; member
672 flush_data->list); in flush_busy_kcq()
681 struct list_head *list) in kyber_flush_busy_kcqs() argument
686 .list = list, in kyber_flush_busy_kcqs()
Dblk-stat.h23 struct list_head list; member
Dblk-mq-sched.h25 struct list_head *list, bool run_queue_async);
Dmq-deadline.c875 struct list_head *list, bool at_head) in dd_insert_requests() argument
882 while (!list_empty(list)) { in dd_insert_requests()
885 rq = list_first_entry(list, struct request, queuelist); in dd_insert_requests()
Dblk-merge.c1109 bool blk_bio_list_merge(struct request_queue *q, struct list_head *list, in blk_bio_list_merge() argument
1115 list_for_each_entry_reverse(rq, list, queuelist) { in blk_bio_list_merge()
Dblk.h220 bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
Dbfq-iosched.c6057 struct list_head *list, bool at_head) in bfq_insert_requests() argument
6059 while (!list_empty(list)) { in bfq_insert_requests()
6062 rq = list_first_entry(list, struct request, queuelist); in bfq_insert_requests()
/block/partitions/
Dldm.c545 struct vblk *v = list_entry (item, struct vblk, list); in ldm_get_disk_objid()
591 vb = list_entry (item, struct vblk, list); in ldm_create_data_partitions()
1187 list_add (&vb->list, &ldb->v_dgrp); in ldm_ldmdb_add()
1191 list_add (&vb->list, &ldb->v_disk); in ldm_ldmdb_add()
1194 list_add (&vb->list, &ldb->v_volu); in ldm_ldmdb_add()
1197 list_add (&vb->list, &ldb->v_comp); in ldm_ldmdb_add()
1202 struct vblk *v = list_entry (item, struct vblk, list); in ldm_ldmdb_add()
1205 list_add_tail (&vb->list, &v->list); in ldm_ldmdb_add()
1209 list_add_tail (&vb->list, &ldb->v_part); in ldm_ldmdb_add()
1253 f = list_entry (item, struct frag, list); in ldm_frag_add()
[all …]
Dldm.h88 struct list_head list; member
179 struct list_head list; member