/block/ |
D | blk-mq-pci.c | 29 unsigned int queue, cpu; in blk_mq_pci_map_queues() local 31 for (queue = 0; queue < qmap->nr_queues; queue++) { in blk_mq_pci_map_queues() 32 mask = pci_irq_get_affinity(pdev, queue + offset); in blk_mq_pci_map_queues() 37 qmap->mq_map[cpu] = qmap->queue_offset + queue; in blk_mq_pci_map_queues()
|
D | blk-mq-virtio.c | 27 unsigned int queue, cpu; in blk_mq_virtio_map_queues() local 32 for (queue = 0; queue < qmap->nr_queues; queue++) { in blk_mq_virtio_map_queues() 33 mask = vdev->config->get_vq_affinity(vdev, first_vec + queue); in blk_mq_virtio_map_queues() 38 qmap->mq_map[cpu] = qmap->queue_offset + queue; in blk_mq_virtio_map_queues()
|
D | blk-mq-cpumap.c | 21 unsigned int queue, cpu; in blk_mq_map_queues() local 30 for (queue = 0; queue < qmap->nr_queues; queue++) { in blk_mq_map_queues() 31 for_each_cpu(cpu, &masks[queue]) in blk_mq_map_queues() 32 qmap->mq_map[cpu] = qmap->queue_offset + queue; in blk_mq_map_queues()
|
D | blk-mq-sched.c | 89 struct request_queue *q = hctx->queue; in __blk_mq_do_dispatch_sched() 100 max_dispatch = hctx->queue->nr_requests; in __blk_mq_do_dispatch_sched() 217 struct request_queue *q = hctx->queue; in blk_mq_do_dispatch_ctx() 308 if (hctx->queue->elevator) in __blk_mq_sched_dispatch_requests() 321 struct request_queue *q = hctx->queue; in blk_mq_sched_dispatch_requests() 399 static void blk_mq_exit_sched_shared_tags(struct request_queue *queue) in blk_mq_exit_sched_shared_tags() argument 401 blk_mq_free_rq_map(queue->sched_shared_tags); in blk_mq_exit_sched_shared_tags() 402 queue->sched_shared_tags = NULL; in blk_mq_exit_sched_shared_tags() 423 static int blk_mq_init_sched_shared_tags(struct request_queue *queue) in blk_mq_init_sched_shared_tags() argument 425 struct blk_mq_tag_set *set = queue->tag_set; in blk_mq_init_sched_shared_tags() [all …]
|
D | blk-integrity.c | 123 struct blk_integrity *b1 = &gd1->queue->integrity; in blk_integrity_compare() 124 struct blk_integrity *b2 = &gd2->queue->integrity; in blk_integrity_compare() 217 return &dev_to_disk(dev)->queue->integrity; in dev_to_bi() 364 struct blk_integrity *bi = &disk->queue->integrity; in blk_integrity_register() 369 ilog2(queue_logical_block_size(disk->queue)); in blk_integrity_register() 374 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, disk->queue); in blk_integrity_register() 377 if (disk->queue->crypto_profile) { in blk_integrity_register() 379 disk->queue->crypto_profile = NULL; in blk_integrity_register() 394 struct blk_integrity *bi = &disk->queue->integrity; in blk_integrity_unregister() 401 blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, disk->queue); in blk_integrity_unregister()
|
D | bsg.c | 24 struct request_queue *queue; member 67 ret = bd->sg_io_fn(bd->queue, &hdr, open_for_write, in bsg_sg_io() 76 if (!blk_get_queue(to_bsg_device(inode)->queue)) in bsg_open() 83 blk_put_queue(to_bsg_device(inode)->queue); in bsg_release() 107 struct request_queue *q = bd->queue; in bsg_ioctl() 180 struct gendisk *disk = bd->queue->disk; in bsg_unregister_queue() 200 bd->queue = q; in bsg_register_queue()
|
D | blk-mq-debugfs.c | 369 blk_mq_tagset_busy_iter(hctx->queue->tag_set, hctx_show_busy_rq, in hctx_busy_show() 418 struct request_queue *q = hctx->queue; in hctx_tags_show() 435 struct request_queue *q = hctx->queue; in hctx_tags_bitmap_show() 452 struct request_queue *q = hctx->queue; in hctx_sched_tags_show() 469 struct request_queue *q = hctx->queue; in hctx_sched_tags_bitmap_show() 721 if (!hctx->queue->debugfs_dir) in blk_mq_debugfs_unregister_hctx() 790 lockdep_assert_held(&rqos->disk->queue->debugfs_mutex); in blk_mq_debugfs_unregister_rqos() 792 if (!rqos->disk->queue->debugfs_dir) in blk_mq_debugfs_unregister_rqos() 800 struct request_queue *q = rqos->disk->queue; in blk_mq_debugfs_register_rqos() 842 lockdep_assert_held(&hctx->queue->debugfs_mutex); in blk_mq_debugfs_unregister_sched_hctx() [all …]
|
D | genhd.c | 402 if (queue_is_mq(disk->queue) && disk->fops->poll_bio) in device_add_disk() 411 elevator_init_mq(disk->queue); in device_add_disk() 536 rq_qos_exit(disk->queue); in device_add_disk() 551 if (disk->queue->elevator) in device_add_disk() 552 elevator_exit(disk->queue); in device_add_disk() 585 blk_queue_flag_set(QUEUE_FLAG_DYING, disk->queue); in __blk_mark_disk_dead() 595 blk_queue_start_drain(disk->queue); in __blk_mark_disk_dead() 633 struct request_queue *q = disk->queue; in del_gendisk() 1163 blk_trace_remove(disk->queue); in disk_release() 1172 if (queue_is_mq(disk->queue) && in disk_release() [all …]
|
D | bfq-iosched.h | 514 struct request_queue *queue; member 1181 if (likely(!blk_trace_note_message_enabled((bfqd)->queue))) \ 1184 blk_add_cgroup_trace_msg((bfqd)->queue, \ 1190 blk_add_cgroup_trace_msg((bfqd)->queue, \ 1198 if (likely(!blk_trace_note_message_enabled((bfqd)->queue))) \ 1201 blk_add_trace_msg((bfqd)->queue, "%s " fmt, pid_str, ##args); \ 1208 blk_add_trace_msg((bfqd)->queue, "bfq " fmt, ##args)
|
D | blk-ia-ranges.c | 111 struct request_queue *q = disk->queue; in disk_register_independent_access_ranges() 154 struct request_queue *q = disk->queue; in disk_unregister_independent_access_ranges() 269 GFP_KERNEL, disk->queue->node); in disk_alloc_independent_access_ranges() 290 struct request_queue *q = disk->queue; in disk_set_independent_access_ranges()
|
D | blk-cgroup.c | 312 blkg = kzalloc_node(sizeof(*blkg), gfp_mask, disk->queue->node); in blkg_alloc() 320 if (!blk_get_queue(disk->queue)) in blkg_alloc() 323 blkg->q = disk->queue; in blkg_alloc() 343 if (!blkcg_policy_enabled(disk->queue, pol)) in blkg_alloc() 362 blk_put_queue(disk->queue); in blkg_alloc() 382 lockdep_assert_held(&disk->queue->queue_lock); in blkg_create() 385 if (blk_queue_dying(disk->queue)) { in blkg_create() 408 blkg->parent = blkg_lookup(blkcg_parent(blkcg), disk->queue); in blkg_create() 426 ret = radix_tree_insert(&blkcg->blkg_tree, disk->queue->id, blkg); in blkg_create() 429 list_add(&blkg->q_node, &disk->queue->blkg_list); in blkg_create() [all …]
|
D | blk-mq-sysfs.c | 59 q = hctx->queue; in blk_mq_hw_sysfs_show() 160 struct request_queue *q = hctx->queue; in blk_mq_register_hctx() 221 struct request_queue *q = disk->queue; in blk_mq_sysfs_register() 258 struct request_queue *q = disk->queue; in blk_mq_sysfs_unregister()
|
D | blk-mq.h | 28 struct request_queue *queue; member 277 atomic_inc(&hctx->queue->nr_active_requests_shared_tags); in __blk_mq_inc_active_requests() 286 atomic_sub(val, &hctx->queue->nr_active_requests_shared_tags); in __blk_mq_sub_active_requests() 299 return atomic_read(&hctx->queue->nr_active_requests_shared_tags); in __blk_mq_active_requests()
|
D | blk-timeout.c | 43 int set = test_bit(QUEUE_FLAG_FAIL_IO, &disk->queue->queue_flags); in part_timeout_show() 55 struct request_queue *q = disk->queue; in part_timeout_store()
|
D | blk-sysfs.c | 677 struct request_queue *q = disk->queue; in queue_attr_visible() 691 struct request_queue *q = disk->queue; in blk_mq_queue_attr_visible() 719 struct request_queue *q = disk->queue; in queue_attr_show() 736 struct request_queue *q = disk->queue; in queue_attr_store() 772 struct request_queue *q = disk->queue; in blk_debugfs_remove() 789 struct request_queue *q = disk->queue; in blk_register_queue() 874 struct request_queue *q = disk->queue; in blk_unregister_queue()
|
D | blk-throttle.c | 64 struct request_queue *queue; member 226 if (likely(!blk_trace_note_message_enabled(__td->queue))) \ 229 blk_add_cgroup_trace_msg(__td->queue, \ 232 blk_add_trace_msg(__td->queue, "throtl " fmt, ##args); \ 454 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) { in blk_throtl_update_limit_valid() 1185 q = td->queue; in throtl_pending_timer_fn() 1254 struct request_queue *q = td->queue; in blk_throtl_dispatch_work_fn() 1332 global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) { in tg_conf_updated() 1721 struct request_queue *q = disk->queue; in blk_throtl_cancel_bios() 1886 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) { in throtl_can_upgrade() [all …]
|
D | blk-crypto-sysfs.c | 131 struct request_queue *q = disk->queue; in blk_crypto_sysfs_register() 155 kobject_put(disk->queue->crypto_kobject); in blk_crypto_sysfs_unregister()
|
D | blk-zoned.c | 196 need_reset = blk_alloc_zone_bitmap(disk->queue->node, disk->nr_zones); in blkdev_zone_reset_all_emulated() 462 struct request_queue *q = disk->queue; in blk_revalidate_zone_cb() 565 struct request_queue *q = disk->queue; in blk_revalidate_disk_zones() 646 struct request_queue *q = disk->queue; in disk_clear_zone_settings()
|
D | blk-wbt.c | 714 struct request_queue *q = disk->queue; in wbt_enable_default() 766 RQWB(rqos)->rq_depth.queue_depth = blk_queue_depth(rqos->disk->queue); in wbt_queue_depth_changed() 774 blk_stat_remove_callback(rqos->disk->queue, rwb->cb); in wbt_exit() 784 struct rq_qos *rqos = wbt_rq_qos(disk->queue); in wbt_disable_default() 900 struct request_queue *q = disk->queue; in wbt_init()
|
D | bfq-iosched.c | 234 (!blk_queue_nonrot(bfqd->queue) || \ 480 blk_mq_run_hw_queues(bfqd->queue, true); in bfq_schedule_dispatch() 4171 if (blk_queue_nonrot(bfqd->queue)) in bfq_bfqq_is_slow() 4493 !blk_queue_nonrot(bfqd->queue) && !bfqd->hw_tag, in idling_boosts_thr_without_issues() 4527 ((!blk_queue_nonrot(bfqd->queue) || !bfqd->hw_tag) && in idling_boosts_thr_without_issues() 4728 if (blk_queue_nonrot(bfqd->queue) && in bfq_choose_bfqq_for_injection() 5118 bfq_dispatch_remove(bfqd->queue, rq); in bfq_dispatch_rq_from_bfqq() 5149 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; in bfq_has_work() 5161 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; in __bfq_dispatch_request() 5302 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; in bfq_dispatch_request() [all …]
|
D | mq-deadline.c | 598 struct deadline_data *dd = hctx->queue->elevator->elevator_data; in dd_dispatch_request() 633 const unsigned int nrr = hctx->queue->nr_requests; in dd_to_word_depth() 660 struct request_queue *q = hctx->queue; in dd_depth_updated() 811 struct request_queue *q = hctx->queue; in dd_insert_request() 881 struct request_queue *q = hctx->queue; in dd_insert_requests() 906 struct deadline_data *dd = hctx->queue->elevator->elevator_data; in dd_has_write_work() 971 struct deadline_data *dd = hctx->queue->elevator->elevator_data; in dd_has_work()
|
D | blk-mq.c | 1065 struct request_queue *q = hctx->queue; in blk_mq_flush_tag_batch() 2030 if (hctx->queue->mq_ops->commit_rqs && queued) { in blk_mq_commit_rqs() 2031 trace_block_unplug(hctx->queue, queued, !from_schedule); in blk_mq_commit_rqs() 2032 hctx->queue->mq_ops->commit_rqs(hctx); in blk_mq_commit_rqs() 2043 struct request_queue *q = hctx->queue; in blk_mq_dispatch_rq_list() 2202 if (hctx->queue->nr_hw_queues == 1) in blk_mq_hctx_next_cpu() 2281 __blk_mq_run_dispatch_ops(hctx->queue, false, in blk_mq_run_hw_queue() 2282 need_run = !blk_queue_quiesced(hctx->queue) && in blk_mq_run_hw_queue() 2293 blk_mq_run_dispatch_ops(hctx->queue, in blk_mq_run_hw_queue() 2463 blk_mq_run_dispatch_ops(hctx->queue, in blk_mq_run_work_fn() [all …]
|
D | blk-iolatency.c | 333 unsigned long qd = blkiolat->rqos.disk->queue->nr_requests; in scale_cookie_change() 375 unsigned long qd = iolat->blkiolat->rqos.disk->queue->nr_requests; in scale_change() 668 blkiolat->rqos.disk->queue->root_blkg) { in blkiolatency_timer_fn() 752 blk_mq_freeze_queue(blkiolat->rqos.disk->queue); in blkiolatency_enable_work_fn() 754 blk_mq_unfreeze_queue(blkiolat->rqos.disk->queue); in blkiolatency_enable_work_fn()
|
D | blk-iocost.c | 815 if (!blk_queue_nonrot(disk->queue)) in ioc_autop_idx() 819 if (blk_queue_depth(disk->queue) == 1) in ioc_autop_idx() 3228 if (!queue_is_mq(disk->queue)) { in ioc_qos_write() 3233 ioc = q_to_ioc(disk->queue); in ioc_qos_write() 3238 ioc = q_to_ioc(disk->queue); in ioc_qos_write() 3241 blk_mq_freeze_queue(disk->queue); in ioc_qos_write() 3242 blk_mq_quiesce_queue(disk->queue); in ioc_qos_write() 3316 blk_stat_enable_accounting(disk->queue); in ioc_qos_write() 3317 blk_queue_flag_set(QUEUE_FLAG_RQ_ALLOC_TIME, disk->queue); in ioc_qos_write() 3320 blk_stat_disable_accounting(disk->queue); in ioc_qos_write() [all …]
|
/block/partitions/ |
D | efi.c | 137 queue_logical_block_size(disk->queue)) - 1ULL; in last_lba() 240 (queue_logical_block_size(state->disk->queue) / 512); in read_lba() 310 unsigned ssz = queue_logical_block_size(state->disk->queue); in alloc_read_gpt_header() 357 queue_logical_block_size(state->disk->queue)) { in is_gpt_valid() 360 queue_logical_block_size(state->disk->queue)); in is_gpt_valid() 718 unsigned ssz = queue_logical_block_size(state->disk->queue) / 512; in efi_partition()
|