Home
last modified time | relevance | path

Searched refs:queue (Results 1 – 25 of 34) sorted by relevance

12

/block/
Dblk-mq-rdma.c28 unsigned int queue, cpu; in blk_mq_rdma_map_queues() local
30 for (queue = 0; queue < map->nr_queues; queue++) { in blk_mq_rdma_map_queues()
31 mask = ib_get_vector_affinity(dev, first_vec + queue); in blk_mq_rdma_map_queues()
36 map->mq_map[cpu] = map->queue_offset + queue; in blk_mq_rdma_map_queues()
Dblk-mq-pci.c30 unsigned int queue, cpu; in blk_mq_pci_map_queues() local
32 for (queue = 0; queue < qmap->nr_queues; queue++) { in blk_mq_pci_map_queues()
33 mask = pci_irq_get_affinity(pdev, queue + offset); in blk_mq_pci_map_queues()
38 qmap->mq_map[cpu] = qmap->queue_offset + queue; in blk_mq_pci_map_queues()
Dblk-mq-virtio.c28 unsigned int queue, cpu; in blk_mq_virtio_map_queues() local
33 for (queue = 0; queue < qmap->nr_queues; queue++) { in blk_mq_virtio_map_queues()
34 mask = vdev->config->get_vq_affinity(vdev, first_vec + queue); in blk_mq_virtio_map_queues()
39 qmap->mq_map[cpu] = qmap->queue_offset + queue; in blk_mq_virtio_map_queues()
Dbsg.c31 struct request_queue *queue; member
215 struct request_queue *q = bd->queue; in bsg_put_device()
255 bd->queue = rq; in bsg_add_device()
274 if (bd->queue == q) { in __bsg_get_device()
300 bd = __bsg_get_device(iminor(inode), bcd->queue); in bsg_get_device()
302 bd = bsg_add_device(inode, bcd->queue, file); in bsg_get_device()
319 bcd = &bd->queue->bsg_dev; in bsg_open()
332 bcd = &bd->queue->bsg_dev; in bsg_release()
344 int queue; in bsg_set_command_q() local
346 if (get_user(queue, uarg)) in bsg_set_command_q()
[all …]
Dblk-integrity.c123 struct blk_integrity *b1 = &gd1->queue->integrity; in blk_integrity_compare()
124 struct blk_integrity *b2 = &gd2->queue->integrity; in blk_integrity_compare()
225 struct blk_integrity *bi = &disk->queue->integrity; in integrity_attr_show()
237 struct blk_integrity *bi = &disk->queue->integrity; in integrity_attr_store()
399 struct blk_integrity *bi = &disk->queue->integrity; in blk_integrity_register()
404 ilog2(queue_logical_block_size(disk->queue)); in blk_integrity_register()
409 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, disk->queue); in blk_integrity_register()
412 if (disk->queue->ksm) { in blk_integrity_register()
414 blk_ksm_unregister(disk->queue); in blk_integrity_register()
429 struct blk_integrity *bi = &disk->queue->integrity; in blk_integrity_unregister()
[all …]
Dblk-mq.h35 struct request_queue *queue; member
208 atomic_inc(&hctx->queue->nr_active_requests_shared_sbitmap); in __blk_mq_inc_active_requests()
216 atomic_dec(&hctx->queue->nr_active_requests_shared_sbitmap); in __blk_mq_dec_active_requests()
224 return atomic_read(&hctx->queue->nr_active_requests_shared_sbitmap); in __blk_mq_active_requests()
306 struct request_queue *q = hctx->queue; in hctx_may_queue()
Dbfq-iosched.h447 struct request_queue *queue; member
1075 if (likely(!blk_trace_note_message_enabled((bfqd)->queue))) \
1078 blk_add_cgroup_trace_msg((bfqd)->queue, \
1085 blk_add_cgroup_trace_msg((bfqd)->queue, \
1093 if (likely(!blk_trace_note_message_enabled((bfqd)->queue))) \
1096 blk_add_trace_msg((bfqd)->queue, "bfq%s%c " fmt, pid_str, \
1105 blk_add_trace_msg((bfqd)->queue, "bfq " fmt, ##args)
Dblk-mq-sched.c118 struct request_queue *q = hctx->queue; in __blk_mq_do_dispatch_sched()
129 max_dispatch = hctx->queue->nr_requests; in __blk_mq_do_dispatch_sched()
232 struct request_queue *q = hctx->queue; in blk_mq_do_dispatch_ctx()
282 struct request_queue *q = hctx->queue; in __blk_mq_sched_dispatch_requests()
335 struct request_queue *q = hctx->queue; in blk_mq_sched_dispatch_requests()
487 struct request_queue *q = hctx->queue; in blk_mq_sched_insert_requests()
496 e = hctx->queue->elevator; in blk_mq_sched_insert_requests()
Dblk-mq-sysfs.c70 q = ctx->queue; in blk_mq_sysfs_show()
91 q = ctx->queue; in blk_mq_sysfs_store()
112 q = hctx->queue; in blk_mq_hw_sysfs_show()
134 q = hctx->queue; in blk_mq_hw_sysfs_store()
243 struct request_queue *q = hctx->queue; in blk_mq_register_hctx()
Dgenhd.c750 if (disk->queue->backing_dev_info->dev) { in register_disk()
752 &disk->queue->backing_dev_info->dev->kobj, in register_disk()
784 elevator_init_mq(disk->queue); in __device_add_disk()
814 struct backing_dev_info *bdi = disk->queue->backing_dev_info; in __device_add_disk()
834 WARN_ON_ONCE(!blk_get_queue(disk->queue)); in __device_add_disk()
923 if (disk->queue) { in del_gendisk()
929 bdi_unregister(disk->queue->backing_dev_info); in del_gendisk()
1292 struct request_queue *q = part_to_disk(p)->queue; in part_stat_show()
1336 struct request_queue *q = part_to_disk(p)->queue; in part_inflight_show()
1361 return sprintf(buf, "%d\n", queue_alignment_offset(disk->queue)); in disk_alignment_offset_show()
[all …]
Dblk-timeout.c43 int set = test_bit(QUEUE_FLAG_FAIL_IO, &disk->queue->queue_flags); in part_timeout_show()
55 struct request_queue *q = disk->queue; in part_timeout_store()
Dmq-deadline-main.c81 struct request_queue *queue; member
495 struct deadline_data *dd = hctx->queue->elevator->elevator_data; in dd_dispatch_request()
549 struct request_queue *q = hctx->queue; in dd_depth_updated()
571 dd_deactivate_policy(dd->queue); in dd_exit_sched()
616 dd->queue = q; in dd_init_sched()
715 struct request_queue *q = hctx->queue; in dd_insert_request()
777 struct request_queue *q = hctx->queue; in dd_insert_requests()
799 struct deadline_data *dd = hctx->queue->elevator->elevator_data; in dd_has_write_work()
857 struct deadline_data *dd = hctx->queue->elevator->elevator_data; in dd_has_work()
Dblk-zoned.c128 sector_t zone_sectors = blk_queue_zone_sectors(disk->queue); in blkdev_nr_zones()
130 if (!blk_queue_is_zoned(disk->queue)) in blkdev_nr_zones()
433 struct request_queue *q = disk->queue; in blk_revalidate_zone_cb()
517 struct request_queue *q = disk->queue; in blk_revalidate_disk_zones()
Dbfq-iosched.c234 (!blk_queue_nonrot(bfqd->queue) || \
434 blk_mq_run_hw_queues(bfqd->queue, true); in bfq_schedule_dispatch()
3775 if (blk_queue_nonrot(bfqd->queue)) in bfq_bfqq_is_slow()
4116 !blk_queue_nonrot(bfqd->queue) && !bfqd->hw_tag, in idling_boosts_thr_without_issues()
4150 ((!blk_queue_nonrot(bfqd->queue) || !bfqd->hw_tag) && in idling_boosts_thr_without_issues()
4348 if (blk_queue_nonrot(bfqd->queue) && in bfq_choose_bfqq_for_injection()
4641 bfq_dispatch_remove(bfqd->queue, rq); in bfq_dispatch_rq_from_bfqq()
4675 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; in bfq_has_work()
4687 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; in __bfq_dispatch_request()
4827 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; in bfq_dispatch_request()
[all …]
Dblk-mq-tag.c29 struct request_queue *q = hctx->queue; in __blk_mq_tag_busy()
61 struct request_queue *q = hctx->queue; in __blk_mq_tag_idle()
237 if (rq->q == hctx->queue && rq->mq_hctx == hctx) in bt_iter()
599 struct blk_mq_tag_set *set = hctx->queue->tag_set; in blk_mq_tag_update_depth()
Dblk-core.c485 struct request_queue *q = bio->bi_disk->queue; in bio_queue_enter()
771 trace_block_bio_remap(bio->bi_disk->queue, bio, part_devt(p), in blk_partition_remap()
818 struct request_queue *q = bio->bi_disk->queue; in submit_bio_checks()
943 blk_queue_exit(disk->queue); in __submit_bio()
977 struct request_queue *q = bio->bi_disk->queue; in __submit_bio_noacct()
998 if (q == bio->bi_disk->queue) in __submit_bio_noacct()
1029 blk_queue_exit(disk->queue); in __submit_bio_noacct_mq()
1098 count = queue_logical_block_size(bio->bi_disk->queue) >> 9; in submit_bio()
Dblk-throttle.c206 struct request_queue *queue; member
378 if (likely(!blk_trace_note_message_enabled(__td->queue))) \
381 blk_add_cgroup_trace_msg(__td->queue, \
384 blk_add_trace_msg(__td->queue, "throtl " fmt, ##args); \
600 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) { in blk_throtl_update_limit_valid()
1275 struct request_queue *q = td->queue; in throtl_pending_timer_fn()
1343 struct request_queue *q = td->queue; in blk_throtl_dispatch_work_fn()
1421 global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) { in tg_conf_updated()
1919 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) { in throtl_can_upgrade()
1965 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) { in throtl_upgrade_state()
[all …]
Dblk-cgroup.c611 __acquires(rcu) __acquires(&disk->queue->queue_lock) in blkg_conf_prep()
622 q = disk->queue; in blkg_conf_prep()
727 __releases(&ctx->disk->queue->queue_lock) __releases(rcu) in blkg_conf_finish()
729 spin_unlock_irq(&ctx->disk->queue->queue_lock); in blkg_conf_finish()
826 struct blkcg_gq *blkg = blk_queue_root_blkg(disk->queue); in blkcg_fill_root_iostats()
1831 blkg = blkg_lookup_create(css_to_blkcg(css), bio->bi_disk->queue); in blkg_tryget_closest()
1867 blkg_get(bio->bi_disk->queue->root_blkg); in bio_associate_blkg_from_css()
1868 bio->bi_blkg = bio->bi_disk->queue->root_blkg; in bio_associate_blkg_from_css()
Dblk-mq-debugfs.c420 blk_mq_tagset_busy_iter(hctx->queue->tag_set, hctx_show_busy_rq, in hctx_busy_show()
469 struct request_queue *q = hctx->queue; in hctx_tags_show()
486 struct request_queue *q = hctx->queue; in hctx_tags_bitmap_show()
503 struct request_queue *q = hctx->queue; in hctx_sched_tags_show()
520 struct request_queue *q = hctx->queue; in hctx_sched_tags_bitmap_show()
Dblk-mq.c900 if (blk_mq_request_started(rq) && rq->q == hctx->queue) { in blk_mq_rq_inflight()
1365 struct request_queue *q = hctx->queue; in blk_mq_dispatch_rq_list()
1583 if (hctx->queue->nr_hw_queues == 1) in blk_mq_hctx_next_cpu()
1690 need_run = !blk_queue_quiesced(hctx->queue) && in blk_mq_run_hw_queue()
1909 trace_block_rq_insert(hctx->queue, rq); in __blk_mq_insert_req_list()
1971 trace_block_rq_insert(hctx->queue, rq); in blk_mq_insert_requests()
2205 hctx->queue->mq_ops->commit_rqs && queued) in blk_mq_try_issue_list_directly()
2206 hctx->queue->mq_ops->commit_rqs(hctx); in blk_mq_try_issue_list_directly()
2252 struct request_queue *q = bio->bi_disk->queue; in blk_mq_submit_bio()
2654 if (percpu_ref_tryget(&hctx->queue->q_usage_counter)) { in blk_mq_hctx_notify_offline()
[all …]
Dblk-settings.c651 struct request_queue *t = disk->queue; in disk_stack_limits()
664 blk_queue_update_readahead(disk->queue); in disk_stack_limits()
877 disk->queue->limits.zoned = model; in blk_queue_set_zoned()
Dblk-mq-sched.h72 struct elevator_queue *e = hctx->queue->elevator; in blk_mq_sched_has_work()
Dbio-integrity.c143 bvec_gap_to_prev(bio->bi_disk->queue, in bio_integrity_add_page()
212 struct request_queue *q = bio->bi_disk->queue; in bio_integrity_prep()
DKconfig.iosched24 synchronous writes, it will self-tune queue depths to achieve that
/block/partitions/
Dcore.c204 queue_limit_alignment_offset(&part_to_disk(p)->queue->limits, in part_alignment_offset_show()
214 queue_limit_discard_alignment(&part_to_disk(p)->queue->limits, in part_discard_alignment_show()
386 switch (disk->queue->limits.zoned) { in add_partition()
394 disk->queue->limits.zoned = BLK_ZONED_NONE; in add_partition()
735 if (disk->queue->limits.zoned == BLK_ZONED_HM) { in blk_add_partitions()

12