/kernel/linux/linux-5.10/drivers/block/rnbd/ |
D | rnbd-srv-dev.h | 51 return queue_max_segments(bdev_get_queue(dev->bdev)); in rnbd_dev_get_max_segs() 56 return queue_max_hw_sectors(bdev_get_queue(dev->bdev)); in rnbd_dev_get_max_hw_sects() 61 return blk_queue_secure_erase(bdev_get_queue(dev->bdev)); in rnbd_dev_get_secure_discard() 66 if (!blk_queue_discard(bdev_get_queue(dev->bdev))) in rnbd_dev_get_max_discard_sects() 69 return blk_queue_get_max_sectors(bdev_get_queue(dev->bdev), in rnbd_dev_get_max_discard_sects() 75 return bdev_get_queue(dev->bdev)->limits.discard_granularity; in rnbd_dev_get_discard_granularity() 80 return bdev_get_queue(dev->bdev)->limits.discard_alignment; in rnbd_dev_get_discard_alignment()
|
/kernel/linux/linux-5.10/block/ |
D | blk-zoned.c | 161 if (!blk_queue_is_zoned(bdev_get_queue(bdev)) || in blkdev_report_zones() 176 if (!blk_queue_zone_resetall(bdev_get_queue(bdev))) in blkdev_allow_reset_all_zones() 206 struct request_queue *q = bdev_get_queue(bdev); in blkdev_zone_mgmt() 292 q = bdev_get_queue(bdev); in blkdev_report_zones_ioctl() 350 q = bdev_get_queue(bdev); in blkdev_zone_mgmt_ioctl()
|
D | blk-lib.c | 29 struct request_queue *q = bdev_get_queue(bdev); in __blkdev_issue_discard() 169 struct request_queue *q = bdev_get_queue(bdev); in __blkdev_issue_write_same() 252 struct request_queue *q = bdev_get_queue(bdev); in __blkdev_issue_write_zeroes() 306 struct request_queue *q = bdev_get_queue(bdev); in __blkdev_issue_zero_pages()
|
D | ioctl.c | 123 struct request_queue *q = bdev_get_queue(bdev); in blk_ioctl_discard() 550 queue_max_sectors(bdev_get_queue(bdev))); in blkdev_common_ioctl() 553 return put_ushort(argp, !blk_queue_nonrot(bdev_get_queue(bdev))); in blkdev_common_ioctl()
|
D | blk-settings.c | 653 if (blk_stack_limits(&t->limits, &bdev_get_queue(bdev)->limits, in disk_stack_limits()
|
/kernel/linux/linux-5.10/include/linux/ |
D | blkdev.h | 975 static inline struct request_queue *bdev_get_queue(struct block_device *bdev) in bdev_get_queue() function 1453 return queue_logical_block_size(bdev_get_queue(bdev)); in bdev_logical_block_size() 1463 return queue_physical_block_size(bdev_get_queue(bdev)); in bdev_physical_block_size() 1473 return queue_io_min(bdev_get_queue(bdev)); in bdev_io_min() 1483 return queue_io_opt(bdev_get_queue(bdev)); in bdev_io_opt() 1505 struct request_queue *q = bdev_get_queue(bdev); in bdev_alignment_offset() 1564 struct request_queue *q = bdev_get_queue(bdev); in bdev_discard_alignment() 1574 struct request_queue *q = bdev_get_queue(bdev); in bdev_write_same() 1584 struct request_queue *q = bdev_get_queue(bdev); in bdev_write_zeroes_sectors() 1594 struct request_queue *q = bdev_get_queue(bdev); in bdev_zoned_model() [all …]
|
/kernel/linux/linux-5.10/drivers/md/ |
D | dm-table.c | 423 struct request_queue *q = bdev_get_queue(bdev); in dm_set_device_limits() 875 struct request_queue *q = bdev_get_queue(bdev); in device_is_rq_stackable() 1384 struct request_queue *q = bdev_get_queue(dev->bdev); in device_not_zoned_model() 1423 struct request_queue *q = bdev_get_queue(dev->bdev); in device_not_matches_zone_sectors() 1581 struct request_queue *q = bdev_get_queue(dev->bdev); in device_flush_capable() 1631 struct request_queue *q = bdev_get_queue(dev->bdev); in device_is_rotational() 1639 struct request_queue *q = bdev_get_queue(dev->bdev); in device_is_not_random() 1647 struct request_queue *q = bdev_get_queue(dev->bdev); in device_not_write_same_capable() 1674 struct request_queue *q = bdev_get_queue(dev->bdev); in device_not_write_zeroes_capable() 1701 struct request_queue *q = bdev_get_queue(dev->bdev); in device_not_nowait_capable() [all …]
|
D | dm-zoned-target.c | 590 if (blk_queue_dying(bdev_get_queue(dmz_dev->bdev))) { in dmz_bdev_is_dying() 790 q = bdev_get_queue(zoned_dev->bdev); in dmz_fixup_devices() 808 q = bdev_get_queue(zoned_dev->bdev); in dmz_fixup_devices()
|
D | md-linear.c | 100 if (blk_queue_discard(bdev_get_queue(rdev->bdev))) in linear_conf()
|
D | raid0.c | 398 if (blk_queue_discard(bdev_get_queue(rdev->bdev))) in raid0_run() 512 trace_block_bio_remap(bdev_get_queue(rdev->bdev), in raid0_handle_discard()
|
D | dm-mpath.c | 532 q = bdev_get_queue(bdev); in multipath_clone_and_map() 873 struct request_queue *q = bdev_get_queue(bdev); in setup_scsi_dh() 952 q = bdev_get_queue(p->path.dev->bdev); in parse_path() 1618 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev); in activate_or_offline_path() 2053 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev); in pgpath_busy()
|
D | dm-clone-target.c | 2030 struct request_queue *q = bdev_get_queue(bdev); in bdev_supports_discards() 2042 struct queue_limits *dest_limits = &bdev_get_queue(dest_dev)->limits; in disable_passdown_if_not_supported() 2064 struct queue_limits *dest_limits = &bdev_get_queue(dest_bdev)->limits; in set_discard_limits()
|
D | dm-io.c | 306 struct request_queue *q = bdev_get_queue(where->bdev); in do_region()
|
/kernel/linux/linux-5.10/drivers/target/ |
D | target_core_iblock.c | 106 q = bdev_get_queue(bd); in iblock_configure_device() 696 struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd); in iblock_execute_rw() 795 struct request_queue *q = bdev_get_queue(bd); in iblock_get_blocks() 856 struct request_queue *q = bdev_get_queue(bd); in iblock_get_write_cache()
|
/kernel/linux/linux-5.10/fs/jfs/ |
D | ioctl.c | 125 struct request_queue *q = bdev_get_queue(sb->s_bdev); in jfs_ioctl()
|
D | super.c | 377 struct request_queue *q = bdev_get_queue(sb->s_bdev); in parse_options() 396 struct request_queue *q = bdev_get_queue(sb->s_bdev); in parse_options()
|
/kernel/linux/linux-5.10/fs/xfs/ |
D | xfs_discard.c | 155 struct request_queue *q = bdev_get_queue(mp->m_ddev_targp->bt_bdev); in xfs_ioc_trim()
|
/kernel/linux/linux-5.10/fs/iomap/ |
D | direct-io.c | 70 dio->submit.last_queue = bdev_get_queue(iomap->bdev); in iomap_dio_submit_bio() 241 blk_queue_fua(bdev_get_queue(iomap->bdev))) in iomap_dio_bio_actor()
|
/kernel/linux/linux-5.10/fs/crypto/ |
D | inline_crypt.c | 40 devs[0] = bdev_get_queue(sb->s_bdev); in fscrypt_get_devices()
|
/kernel/linux/linux-5.10/drivers/nvme/target/ |
D | io-cmd-bdev.c | 13 const struct queue_limits *ql = &bdev_get_queue(bdev)->limits; in nvmet_bdev_set_limits()
|
/kernel/linux/linux-5.10/drivers/block/xen-blkback/ |
D | xenbus.c | 518 q = bdev_get_queue(bdev); in xen_vbd_create() 578 struct request_queue *q = bdev_get_queue(bdev); in xen_blkbk_discard()
|
/kernel/linux/linux-5.10/fs/ |
D | block_dev.c | 294 !blk_poll(bdev_get_queue(bdev), qc, true)) in __blkdev_direct_IO_simple() 330 struct request_queue *q = bdev_get_queue(bdev); in blkdev_iopoll() 490 !blk_poll(bdev_get_queue(bdev), qc, true)) in __blkdev_direct_IO()
|
/kernel/linux/linux-5.10/fs/fat/ |
D | file.c | 130 struct request_queue *q = bdev_get_queue(sb->s_bdev); in fat_ioctl_fitrim()
|
/kernel/linux/linux-5.10/drivers/dax/ |
D | super.c | 176 q = bdev_get_queue(bdev); in __bdev_dax_supported()
|
/kernel/linux/linux-5.10/drivers/md/bcache/ |
D | request.c | 1013 !blk_queue_discard(bdev_get_queue(dc->bdev))) in cached_dev_write() 1127 !blk_queue_discard(bdev_get_queue(dc->bdev))) in detached_dev_do_request()
|