Home
last modified time | relevance | path

Searched refs:bdev_get_queue (Results 1 – 25 of 64) sorted by relevance

123

/kernel/linux/linux-5.10/drivers/block/rnbd/
Drnbd-srv-dev.h51 return queue_max_segments(bdev_get_queue(dev->bdev)); in rnbd_dev_get_max_segs()
56 return queue_max_hw_sectors(bdev_get_queue(dev->bdev)); in rnbd_dev_get_max_hw_sects()
61 return blk_queue_secure_erase(bdev_get_queue(dev->bdev)); in rnbd_dev_get_secure_discard()
66 if (!blk_queue_discard(bdev_get_queue(dev->bdev))) in rnbd_dev_get_max_discard_sects()
69 return blk_queue_get_max_sectors(bdev_get_queue(dev->bdev), in rnbd_dev_get_max_discard_sects()
75 return bdev_get_queue(dev->bdev)->limits.discard_granularity; in rnbd_dev_get_discard_granularity()
80 return bdev_get_queue(dev->bdev)->limits.discard_alignment; in rnbd_dev_get_discard_alignment()
/kernel/linux/linux-5.10/block/
Dblk-zoned.c161 if (!blk_queue_is_zoned(bdev_get_queue(bdev)) || in blkdev_report_zones()
176 if (!blk_queue_zone_resetall(bdev_get_queue(bdev))) in blkdev_allow_reset_all_zones()
206 struct request_queue *q = bdev_get_queue(bdev); in blkdev_zone_mgmt()
292 q = bdev_get_queue(bdev); in blkdev_report_zones_ioctl()
350 q = bdev_get_queue(bdev); in blkdev_zone_mgmt_ioctl()
Dblk-lib.c29 struct request_queue *q = bdev_get_queue(bdev); in __blkdev_issue_discard()
169 struct request_queue *q = bdev_get_queue(bdev); in __blkdev_issue_write_same()
252 struct request_queue *q = bdev_get_queue(bdev); in __blkdev_issue_write_zeroes()
306 struct request_queue *q = bdev_get_queue(bdev); in __blkdev_issue_zero_pages()
Dioctl.c123 struct request_queue *q = bdev_get_queue(bdev); in blk_ioctl_discard()
550 queue_max_sectors(bdev_get_queue(bdev))); in blkdev_common_ioctl()
553 return put_ushort(argp, !blk_queue_nonrot(bdev_get_queue(bdev))); in blkdev_common_ioctl()
Dblk-settings.c653 if (blk_stack_limits(&t->limits, &bdev_get_queue(bdev)->limits, in disk_stack_limits()
/kernel/linux/linux-5.10/include/linux/
Dblkdev.h975 static inline struct request_queue *bdev_get_queue(struct block_device *bdev) in bdev_get_queue() function
1453 return queue_logical_block_size(bdev_get_queue(bdev)); in bdev_logical_block_size()
1463 return queue_physical_block_size(bdev_get_queue(bdev)); in bdev_physical_block_size()
1473 return queue_io_min(bdev_get_queue(bdev)); in bdev_io_min()
1483 return queue_io_opt(bdev_get_queue(bdev)); in bdev_io_opt()
1505 struct request_queue *q = bdev_get_queue(bdev); in bdev_alignment_offset()
1564 struct request_queue *q = bdev_get_queue(bdev); in bdev_discard_alignment()
1574 struct request_queue *q = bdev_get_queue(bdev); in bdev_write_same()
1584 struct request_queue *q = bdev_get_queue(bdev); in bdev_write_zeroes_sectors()
1594 struct request_queue *q = bdev_get_queue(bdev); in bdev_zoned_model()
[all …]
/kernel/linux/linux-5.10/drivers/md/
Ddm-table.c423 struct request_queue *q = bdev_get_queue(bdev); in dm_set_device_limits()
875 struct request_queue *q = bdev_get_queue(bdev); in device_is_rq_stackable()
1384 struct request_queue *q = bdev_get_queue(dev->bdev); in device_not_zoned_model()
1423 struct request_queue *q = bdev_get_queue(dev->bdev); in device_not_matches_zone_sectors()
1581 struct request_queue *q = bdev_get_queue(dev->bdev); in device_flush_capable()
1631 struct request_queue *q = bdev_get_queue(dev->bdev); in device_is_rotational()
1639 struct request_queue *q = bdev_get_queue(dev->bdev); in device_is_not_random()
1647 struct request_queue *q = bdev_get_queue(dev->bdev); in device_not_write_same_capable()
1674 struct request_queue *q = bdev_get_queue(dev->bdev); in device_not_write_zeroes_capable()
1701 struct request_queue *q = bdev_get_queue(dev->bdev); in device_not_nowait_capable()
[all …]
Ddm-zoned-target.c590 if (blk_queue_dying(bdev_get_queue(dmz_dev->bdev))) { in dmz_bdev_is_dying()
790 q = bdev_get_queue(zoned_dev->bdev); in dmz_fixup_devices()
808 q = bdev_get_queue(zoned_dev->bdev); in dmz_fixup_devices()
Dmd-linear.c100 if (blk_queue_discard(bdev_get_queue(rdev->bdev))) in linear_conf()
Draid0.c398 if (blk_queue_discard(bdev_get_queue(rdev->bdev))) in raid0_run()
512 trace_block_bio_remap(bdev_get_queue(rdev->bdev), in raid0_handle_discard()
Ddm-mpath.c532 q = bdev_get_queue(bdev); in multipath_clone_and_map()
873 struct request_queue *q = bdev_get_queue(bdev); in setup_scsi_dh()
952 q = bdev_get_queue(p->path.dev->bdev); in parse_path()
1618 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev); in activate_or_offline_path()
2053 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev); in pgpath_busy()
Ddm-clone-target.c2030 struct request_queue *q = bdev_get_queue(bdev); in bdev_supports_discards()
2042 struct queue_limits *dest_limits = &bdev_get_queue(dest_dev)->limits; in disable_passdown_if_not_supported()
2064 struct queue_limits *dest_limits = &bdev_get_queue(dest_bdev)->limits; in set_discard_limits()
Ddm-io.c306 struct request_queue *q = bdev_get_queue(where->bdev); in do_region()
/kernel/linux/linux-5.10/drivers/target/
Dtarget_core_iblock.c106 q = bdev_get_queue(bd); in iblock_configure_device()
696 struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd); in iblock_execute_rw()
795 struct request_queue *q = bdev_get_queue(bd); in iblock_get_blocks()
856 struct request_queue *q = bdev_get_queue(bd); in iblock_get_write_cache()
/kernel/linux/linux-5.10/fs/jfs/
Dioctl.c125 struct request_queue *q = bdev_get_queue(sb->s_bdev); in jfs_ioctl()
Dsuper.c377 struct request_queue *q = bdev_get_queue(sb->s_bdev); in parse_options()
396 struct request_queue *q = bdev_get_queue(sb->s_bdev); in parse_options()
/kernel/linux/linux-5.10/fs/xfs/
Dxfs_discard.c155 struct request_queue *q = bdev_get_queue(mp->m_ddev_targp->bt_bdev); in xfs_ioc_trim()
/kernel/linux/linux-5.10/fs/iomap/
Ddirect-io.c70 dio->submit.last_queue = bdev_get_queue(iomap->bdev); in iomap_dio_submit_bio()
241 blk_queue_fua(bdev_get_queue(iomap->bdev))) in iomap_dio_bio_actor()
/kernel/linux/linux-5.10/fs/crypto/
Dinline_crypt.c40 devs[0] = bdev_get_queue(sb->s_bdev); in fscrypt_get_devices()
/kernel/linux/linux-5.10/drivers/nvme/target/
Dio-cmd-bdev.c13 const struct queue_limits *ql = &bdev_get_queue(bdev)->limits; in nvmet_bdev_set_limits()
/kernel/linux/linux-5.10/drivers/block/xen-blkback/
Dxenbus.c518 q = bdev_get_queue(bdev); in xen_vbd_create()
578 struct request_queue *q = bdev_get_queue(bdev); in xen_blkbk_discard()
/kernel/linux/linux-5.10/fs/
Dblock_dev.c294 !blk_poll(bdev_get_queue(bdev), qc, true)) in __blkdev_direct_IO_simple()
330 struct request_queue *q = bdev_get_queue(bdev); in blkdev_iopoll()
490 !blk_poll(bdev_get_queue(bdev), qc, true)) in __blkdev_direct_IO()
/kernel/linux/linux-5.10/fs/fat/
Dfile.c130 struct request_queue *q = bdev_get_queue(sb->s_bdev); in fat_ioctl_fitrim()
/kernel/linux/linux-5.10/drivers/dax/
Dsuper.c176 q = bdev_get_queue(bdev); in __bdev_dax_supported()
/kernel/linux/linux-5.10/drivers/md/bcache/
Drequest.c1013 !blk_queue_discard(bdev_get_queue(dc->bdev))) in cached_dev_write()
1127 !blk_queue_discard(bdev_get_queue(dc->bdev))) in detached_dev_do_request()

123