Home
last modified time | relevance | path

Searched refs:max_sectors (Results 1 – 25 of 116) sorted by relevance

12345

/drivers/usb/storage/
Dscsiglue.c103 unsigned int max_sectors = 64; in slave_configure() local
106 max_sectors = PAGE_SIZE >> 9; in slave_configure()
107 if (queue_max_hw_sectors(sdev->request_queue) > max_sectors) in slave_configure()
109 max_sectors); in slave_configure()
603 static DEVICE_ATTR_RW(max_sectors);
661 .max_sectors = 240,
/drivers/infiniband/ulp/iser/
Discsi_iser.c103 module_param_cb(max_sectors, &iscsi_iser_size_ops, &iser_max_sectors,
105 MODULE_PARM_DESC(max_sectors, "Max number of sectors in a single scsi command, should > 0 (default:…
685 shost->max_sectors = min(iser_max_sectors, max_fr_sectors); in iscsi_iser_session_create()
689 shost->max_sectors); in iscsi_iser_session_create()
691 if (shost->max_sectors < iser_max_sectors) in iscsi_iser_session_create()
693 iser_max_sectors, shost->max_sectors); in iscsi_iser_session_create()
Diser_verbs.c509 unsigned int max_sectors) in iser_calc_scsi_params() argument
533 sg_tablesize = DIV_ROUND_UP(max_sectors * SECTOR_SIZE, SZ_4K); in iser_calc_scsi_params()
/drivers/scsi/
Dhosts.c455 if (sht->max_sectors) in scsi_host_alloc()
456 shost->max_sectors = sht->max_sectors; in scsi_host_alloc()
458 shost->max_sectors = SCSI_DEFAULT_MAX_SECTORS; in scsi_host_alloc()
/drivers/md/
Draid1.c596 static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors) in read_balance() argument
786 *max_sectors = sectors; in read_balance()
1211 int max_sectors; in raid1_read_request() local
1251 rdisk = read_balance(conf, r1_bio, &max_sectors); in raid1_read_request()
1283 if (max_sectors < bio_sectors(bio)) { in raid1_read_request()
1284 struct bio *split = bio_split(bio, max_sectors, in raid1_read_request()
1290 r1_bio->sectors = max_sectors; in raid1_read_request()
1331 int max_sectors; in raid1_write_request() local
1376 max_sectors = r1_bio->sectors; in raid1_write_request()
1406 is_bad = is_badblock(rdev, r1_bio->sector, max_sectors, in raid1_write_request()
[all …]
Draid10.c706 int *max_sectors) in read_balance() argument
855 *max_sectors = best_good_sectors; in read_balance()
1164 int max_sectors; in raid10_read_request() local
1201 rdev = read_balance(conf, r10_bio, &max_sectors); in raid10_read_request()
1216 if (max_sectors < bio_sectors(bio)) { in raid10_read_request()
1217 struct bio *split = bio_split(bio, max_sectors, in raid10_read_request()
1225 r10_bio->sectors = max_sectors; in raid10_read_request()
1410 int max_sectors; in raid10_write_request() local
1475 max_sectors = r10_bio->sectors; in raid10_write_request()
1500 is_bad = is_badblock(rdev, dev_sector, max_sectors, in raid10_write_request()
[all …]
Dmd.c2216 sector_t max_sectors; in super_1_rdev_size_change() local
2223 max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9; in super_1_rdev_size_change()
2224 max_sectors -= rdev->data_offset; in super_1_rdev_size_change()
2225 if (!num_sectors || num_sectors > max_sectors) in super_1_rdev_size_change()
2226 num_sectors = max_sectors; in super_1_rdev_size_change()
2244 max_sectors = sb_start - bm_space - 4*2; in super_1_rdev_size_change()
2246 if (!num_sectors || num_sectors > max_sectors) in super_1_rdev_size_change()
2247 num_sectors = max_sectors; in super_1_rdev_size_change()
5099 unsigned long long max_sectors, resync; in sync_completed_show() local
5110 max_sectors = mddev->resync_max_sectors; in sync_completed_show()
[all …]
Ddm-thin.c4064 if (limits->max_sectors < pool->sectors_per_block) { in pool_io_hints()
4065 while (!is_factor(pool->sectors_per_block, limits->max_sectors)) { in pool_io_hints()
4066 if ((limits->max_sectors & (limits->max_sectors - 1)) == 0) in pool_io_hints()
4067 limits->max_sectors--; in pool_io_hints()
4068 limits->max_sectors = rounddown_pow_of_two(limits->max_sectors); in pool_io_hints()
4078 if (is_factor(pool->sectors_per_block, limits->max_sectors)) in pool_io_hints()
4079 blk_limits_io_min(limits, limits->max_sectors << SECTOR_SHIFT); in pool_io_hints()
/drivers/target/
Dtarget_core_xcopy.c674 unsigned int max_sectors; in target_xcopy_do_work() local
698 max_sectors = min(src_dev->dev_attrib.hw_max_sectors, in target_xcopy_do_work()
700 max_sectors = min_t(u32, max_sectors, XCOPY_MAX_SECTORS); in target_xcopy_do_work()
702 max_nolb = min_t(u16, max_sectors, ((u16)(~0U))); in target_xcopy_do_work()
Dtarget_core_device.c522 static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size) in se_dev_align_max_sectors() argument
531 aligned_max_sectors = rounddown(max_sectors, alignment); in se_dev_align_max_sectors()
533 if (max_sectors != aligned_max_sectors) in se_dev_align_max_sectors()
535 max_sectors, aligned_max_sectors); in se_dev_align_max_sectors()
/drivers/scsi/aacraid/
Dcommctrl.c659 (dev->scsi_host_ptr->max_sectors << 9)) { in aac_send_raw_srb()
662 dev->scsi_host_ptr->max_sectors << 9); in aac_send_raw_srb()
726 (dev->scsi_host_ptr->max_sectors << 9) : in aac_send_raw_srb()
781 (dev->scsi_host_ptr->max_sectors << 9) : in aac_send_raw_srb()
839 (dev->scsi_host_ptr->max_sectors << 9) : in aac_send_raw_srb()
882 (dev->scsi_host_ptr->max_sectors << 9) : in aac_send_raw_srb()
Dcomminit.c123 cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9); in aac_alloc_comm()
185 cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9); in aac_alloc_comm()
607 host->max_sectors = (status[1] >> 16) << 1; in aac_init_adapter()
Dlinit.c1498 .max_sectors = 128,
1720 shost->max_sectors = (shost->sg_tablesize * 8) + 112; in aac_probe_one()
1726 shost->max_sectors = (shost->sg_tablesize * 8) + 112; in aac_probe_one()
1730 shost->max_segment_size = shost->max_sectors << 9; in aac_probe_one()
/drivers/scsi/cxlflash/
Dmain.h94 u64 max_sectors; member
/drivers/block/null_blk/
Dnull_blk.h90 unsigned int max_sectors; /* Max sectors per command */ member
Dmain.c156 module_param_named(max_sectors, g_max_sectors, int, 0444);
157 MODULE_PARM_DESC(max_sectors, "Maximum size of a command (in 512B sectors)");
357 NULLB_DEVICE_ATTR(max_sectors, uint, NULL);
605 dev->max_sectors = g_max_sectors; in null_alloc_dev()
1913 if (dev->max_sectors) in null_add_dev()
1914 blk_queue_max_hw_sectors(nullb->q, dev->max_sectors); in null_add_dev()
/drivers/scsi/megaraid/
Dmega_common.h187 uint16_t max_sectors; member
Dmegaraid_mbox.c166 module_param_named(max_sectors, megaraid_max_sectors, int, 0);
167 MODULE_PARM_DESC(max_sectors,
634 host->max_sectors = adapter->max_sectors; in megaraid_io_attach()
833 adapter->max_sectors = megaraid_max_sectors; in megaraid_init_mbox()
/drivers/ata/
Dlibata-core.c1163 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors) in ata_read_native_max_address() argument
1194 *max_sectors = ata_tf_to_lba48(&tf) + 1; in ata_read_native_max_address()
1196 *max_sectors = ata_tf_to_lba(&tf) + 1; in ata_read_native_max_address()
1198 (*max_sectors)--; in ata_read_native_max_address()
2571 dev->max_sectors = 0; in ata_dev_configure()
2712 dev->max_sectors = ATA_MAX_SECTORS; in ata_dev_configure()
2714 dev->max_sectors = ATA_MAX_SECTORS_LBA48; in ata_dev_configure()
2722 dev->max_sectors = ATA_MAX_SECTORS; in ata_dev_configure()
2727 dev->max_sectors = ATA_MAX_SECTORS_TAPE; in ata_dev_configure()
2732 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, in ata_dev_configure()
[all …]
Dpata_it821x.c505 if (adev->max_sectors > 255) in it821x_dev_config()
506 adev->max_sectors = 255; in it821x_dev_config()
Dpata_pdc202xx_old.c262 adev->max_sectors = 256; in pdc2026x_dev_config()
/drivers/scsi/arm/
Doak.c117 .max_sectors = 128,
Dcumana_1.c227 .max_sectors = 128,
/drivers/scsi/snic/
Dsnic_ctl.c213 snic->shost->max_sectors = snic->fwinfo.max_io_sz >> 9; in snic_io_exch_ver_cmpl_handler()
/drivers/scsi/bfa/
Dbfad_im.c738 sht->max_sectors = max_xfer_size << 1; in bfad_scsi_host_alloc()
813 .max_sectors = BFAD_MAX_SECTORS,
835 .max_sectors = BFAD_MAX_SECTORS,

12345