/drivers/target/ |
D | target_core_sbc.c | 216 static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors) in sbc_get_size() argument 218 return cmd->se_dev->dev_attrib.block_size * sectors; in sbc_get_size() 286 unsigned int sectors = sbc_get_write_same_sectors(cmd); in sbc_setup_write_same() local 295 if (sectors > cmd->se_dev->dev_attrib.max_write_same_len) { in sbc_setup_write_same() 297 sectors, cmd->se_dev->dev_attrib.max_write_same_len); in sbc_setup_write_same() 303 if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) || in sbc_setup_write_same() 304 ((cmd->t_task_lba + sectors) > end_lba)) { in sbc_setup_write_same() 306 (unsigned long long)end_lba, cmd->t_task_lba, sectors); in sbc_setup_write_same() 334 ret = sbc_check_prot(dev, cmd, flags >> 5, sectors, true); in sbc_setup_write_same() 721 u32 sectors, bool is_write) in sbc_check_prot() argument [all …]
|
/drivers/md/ |
D | raid0.c | 68 sector_t curr_zone_end, sectors; in create_strip_zones() local 87 sectors = rdev1->sectors; in create_strip_zones() 88 sector_div(sectors, mddev->chunk_sectors); in create_strip_zones() 89 rdev1->sectors = sectors * mddev->chunk_sectors; in create_strip_zones() 99 (unsigned long long)rdev1->sectors, in create_strip_zones() 101 (unsigned long long)rdev2->sectors); in create_strip_zones() 107 if (rdev2->sectors == rdev1->sectors) { in create_strip_zones() 198 if (!smallest || (rdev1->sectors < smallest->sectors)) in create_strip_zones() 208 zone->zone_end = smallest->sectors * cnt; in create_strip_zones() 221 zone->dev_start = smallest->sectors; in create_strip_zones() [all …]
|
D | md-linear.c | 49 static sector_t linear_size(struct mddev *mddev, sector_t sectors, int raid_disks) in linear_size() argument 55 WARN_ONCE(sectors || raid_disks, in linear_size() 79 sector_t sectors; in linear_conf() local 89 sectors = rdev->sectors; in linear_conf() 90 sector_div(sectors, mddev->chunk_sectors); in linear_conf() 91 rdev->sectors = sectors * mddev->chunk_sectors; in linear_conf() 97 conf->array_sectors += rdev->sectors; in linear_conf() 117 conf->disks[0].end_sector = conf->disks[0].rdev->sectors; in linear_conf() 122 conf->disks[i].rdev->sectors; in linear_conf() 228 start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors; in linear_make_request() [all …]
|
D | raid1.c | 65 sector_t hi = lo + r1_bio->sectors; in check_and_add_serial() 339 r1_bio->sector + (r1_bio->sectors); in update_head_pos() 422 r1_bio->sectors, in close_write() 454 sector_t hi = r1_bio->sector + r1_bio->sectors; in raid1_end_write_request() 516 if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors, in raid1_end_write_request() 564 sector_t sectors) in align_to_barrier_unit_end() argument 568 WARN_ON(sectors == 0); in align_to_barrier_unit_end() 576 if (len > sectors) in align_to_barrier_unit_end() 577 len = sectors; in align_to_barrier_unit_end() 599 int sectors; in read_balance() local [all …]
|
D | raid10.c | 320 r10_bio->devs[slot].addr + (r10_bio->sectors); in update_head_pos() 407 r10_bio->sectors, in close_write() 513 r10_bio->sectors, in raid10_end_write_request() 710 int sectors = r10_bio->sectors; in read_balance() local 737 && (this_sector + sectors >= conf->next_resync)) || in read_balance() 740 this_sector + sectors))) in read_balance() 755 r10_bio->devs[slot].addr + sectors > in read_balance() 769 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) in read_balance() 773 if (is_badblock(rdev, dev_sector, sectors, in read_balance() 784 if (!do_balance && sectors > bad_sectors) in read_balance() [all …]
|
D | dm-stats.c | 26 unsigned long long sectors[2]; member 585 p->sectors[idx] += len; in dm_stat_for_entry() 714 shared->tmp.sectors[READ] = 0; in __dm_stat_init_temporary_percpu_totals() 715 shared->tmp.sectors[WRITE] = 0; in __dm_stat_init_temporary_percpu_totals() 732 shared->tmp.sectors[READ] += READ_ONCE(p->sectors[READ]); in __dm_stat_init_temporary_percpu_totals() 733 shared->tmp.sectors[WRITE] += READ_ONCE(p->sectors[WRITE]); in __dm_stat_init_temporary_percpu_totals() 765 p->sectors[READ] -= shared->tmp.sectors[READ]; in __dm_stat_clear() 766 p->sectors[WRITE] -= shared->tmp.sectors[WRITE]; in __dm_stat_clear() 882 shared->tmp.sectors[READ], in dm_stats_print() 886 shared->tmp.sectors[WRITE], in dm_stats_print()
|
D | md.h | 51 sector_t sectors; /* Device size (in 512bytes sectors) */ member 218 static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors, in is_badblock() argument 223 sectors, in is_badblock() 231 extern int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors, 233 extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors, 594 int (*resize) (struct mddev *mddev, sector_t sectors); 595 sector_t (*size) (struct mddev *mddev, sector_t sectors, int raid_disks);
|
/drivers/scsi/ |
D | scsicam.c | 181 unsigned long heads, sectors, cylinders, temp; in setsize() local 184 sectors = 62L; /* Maximize sectors per track */ in setsize() 186 temp = cylinders * sectors; /* Compute divisor for heads */ in setsize() 191 sectors = capacity / temp; /* Compute value for sectors per in setsize() 194 sectors++; /* Else, increment number of sectors */ in setsize() 195 temp = heads * sectors; /* Compute divisor for cylinders */ in setsize() 203 *secs = (unsigned int) sectors; in setsize()
|
D | ps3rom.c | 161 u32 sectors) in ps3rom_read_request() argument 166 __func__, __LINE__, sectors, start_sector); in ps3rom_read_request() 170 sectors, 0, dev->bounce_lpar, &dev->tag); in ps3rom_read_request() 182 u32 sectors) in ps3rom_write_request() argument 187 __func__, __LINE__, sectors, start_sector); in ps3rom_write_request() 193 sectors, 0, dev->bounce_lpar, &dev->tag); in ps3rom_write_request()
|
/drivers/mtd/ |
D | ssfdc.c | 23 unsigned char sectors; member 318 ssfdc->sectors = 32; in ssfdcr_add_mtd() 319 get_chs(mtd->size, NULL, &ssfdc->heads, &ssfdc->sectors); in ssfdcr_add_mtd() 321 ((long)ssfdc->sectors * (long)ssfdc->heads)); in ssfdcr_add_mtd() 324 ssfdc->cylinders, ssfdc->heads , ssfdc->sectors, in ssfdcr_add_mtd() 326 (long)ssfdc->sectors); in ssfdcr_add_mtd() 329 (long)ssfdc->sectors; in ssfdcr_add_mtd() 411 ssfdc->cylinders, ssfdc->heads, ssfdc->sectors); in ssfdcr_getgeo() 414 geo->sectors = ssfdc->sectors; in ssfdcr_getgeo()
|
D | nftlcore.c | 71 nftl->sectors = nftl->mbd.size / temp; in nftl_add_mtd() 73 nftl->sectors++; in nftl_add_mtd() 74 temp = nftl->cylinders * nftl->sectors; in nftl_add_mtd() 79 temp = nftl->heads * nftl->sectors; in nftl_add_mtd() 84 if (nftl->mbd.size != nftl->heads * nftl->cylinders * nftl->sectors) { in nftl_add_mtd() 93 nftl->cylinders, nftl->heads , nftl->sectors, in nftl_add_mtd() 95 (long)nftl->sectors ); in nftl_add_mtd() 771 geo->sectors = nftl->sectors; in nftl_getgeo()
|
D | inftlcore.c | 78 inftl->sectors = inftl->mbd.size / temp; in inftl_add_mtd() 80 inftl->sectors++; in inftl_add_mtd() 81 temp = inftl->cylinders * inftl->sectors; in inftl_add_mtd() 86 temp = inftl->heads * inftl->sectors; in inftl_add_mtd() 91 if (inftl->mbd.size != inftl->heads * inftl->cylinders * inftl->sectors) { in inftl_add_mtd() 100 inftl->cylinders, inftl->heads , inftl->sectors, in inftl_add_mtd() 102 (long)inftl->sectors ); in inftl_add_mtd() 921 geo->sectors = inftl->sectors; in inftl_getgeo()
|
/drivers/usb/storage/ |
D | datafab.c | 60 unsigned long sectors; /* total sector count */ member 138 u32 sectors) in datafab_read_data() argument 153 if (sectors > 0x0FFFFFFF) in datafab_read_data() 162 totallen = sectors * info->ssize; in datafab_read_data() 221 u32 sectors) in datafab_write_data() argument 237 if (sectors > 0x0FFFFFFF) in datafab_write_data() 246 totallen = sectors * info->ssize; in datafab_write_data() 420 info->sectors = ((u32)(reply[117]) << 24) | in datafab_id_device() 582 info->sectors, info->ssize); in datafab_transport() 586 ((__be32 *) ptr)[0] = cpu_to_be32(info->sectors - 1); in datafab_transport()
|
D | jumpshot.c | 96 unsigned long sectors; /* total sector count */ member 156 u32 sectors) in jumpshot_read_data() argument 174 totallen = sectors * info->ssize; in jumpshot_read_data() 233 u32 sectors) in jumpshot_write_data() argument 251 totallen = sectors * info->ssize; in jumpshot_write_data() 354 info->sectors = ((u32)(reply[117]) << 24) | in jumpshot_id_device() 513 info->sectors, info->ssize); in jumpshot_transport() 517 ((__be32 *) ptr)[0] = cpu_to_be32(info->sectors - 1); in jumpshot_transport()
|
D | sddr55.c | 190 unsigned short sectors) { in sddr55_read_data() argument 209 len = min((unsigned int) sectors, (unsigned int) info->blocksize >> in sddr55_read_data() 217 while (sectors>0) { in sddr55_read_data() 227 pages = min((unsigned int) sectors << info->smallpageshift, in sddr55_read_data() 295 sectors -= pages >> info->smallpageshift; in sddr55_read_data() 309 unsigned short sectors) { in sddr55_write_data() argument 336 len = min((unsigned int) sectors, (unsigned int) info->blocksize >> in sddr55_write_data() 344 while (sectors > 0) { in sddr55_write_data() 354 pages = min((unsigned int) sectors << info->smallpageshift, in sddr55_write_data() 503 sectors -= pages >> info->smallpageshift; in sddr55_write_data()
|
D | alauda.c | 914 unsigned int sectors) in alauda_read_data() argument 935 len = min(sectors, blocksize) * (pagesize + 64); in alauda_read_data() 949 while (sectors > 0) { in alauda_read_data() 965 pages = min(sectors, blocksize - page); in alauda_read_data() 998 sectors -= pages; in alauda_read_data() 1009 unsigned int sectors) in alauda_write_data() argument 1027 len = min(sectors, blocksize) * pagesize; in alauda_write_data() 1051 while (sectors > 0) { in alauda_write_data() 1053 unsigned int pages = min(sectors, blocksize - page); in alauda_write_data() 1075 sectors -= pages; in alauda_write_data()
|
/drivers/md/bcache/ |
D | alloc.c | 86 void bch_rescale_priorities(struct cache_set *c, int sectors) in bch_rescale_priorities() argument 93 atomic_sub(sectors, &c->rescale); in bch_rescale_priorities() 608 unsigned int sectors, in bch_alloc_sectors() argument 656 sectors = min(sectors, b->sectors_free); in bch_alloc_sectors() 658 SET_KEY_OFFSET(k, KEY_OFFSET(k) + sectors); in bch_alloc_sectors() 659 SET_KEY_SIZE(k, sectors); in bch_alloc_sectors() 670 b->sectors_free -= sectors; in bch_alloc_sectors() 673 SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors); in bch_alloc_sectors() 675 atomic_long_add(sectors, in bch_alloc_sectors()
|
D | request.c | 117 unsigned int sectors = min(bio_sectors(bio), in bch_data_invalidate() local 123 bio->bi_iter.bi_sector += sectors; in bch_data_invalidate() 124 bio->bi_iter.bi_size -= sectors << 9; in bch_data_invalidate() 129 sectors)); in bch_data_invalidate() 367 unsigned int sectors, congested; in check_should_bypass() local 439 sectors = max(task->sequential_io, in check_should_bypass() 443 sectors >= dc->sequential_cutoff >> 9) { in check_should_bypass() 448 if (congested && sectors >= congested) { in check_should_bypass() 526 unsigned int sectors = KEY_INODE(k) == s->iop.inode in cache_lookup_fn() local 530 int ret = s->d->cache_miss(b, s, bio, sectors); in cache_lookup_fn() [all …]
|
/drivers/block/paride/ |
D | pd.c | 229 int sectors; member 350 s = (block % disk->sectors) + 1; in pd_ide_command() 351 h = (block /= disk->sectors) % disk->heads; in pd_ide_command() 638 pd_send_command(disk, disk->sectors, 0, disk->heads - 1, 0, 0, in pd_init_dev_parms() 720 disk->sectors = le16_to_cpu(*(__le16 *) (pd_scratch + 12)); in pd_identify() 726 disk->capacity = disk->sectors * disk->heads * disk->cylinders; in pd_identify() 742 disk->cylinders, disk->heads, disk->sectors, in pd_identify() 812 geo->sectors = PD_LOG_SECTS; in pd_getgeo() 813 geo->cylinders = disk->capacity / (geo->heads * geo->sectors); in pd_getgeo() 816 geo->sectors = disk->sectors; in pd_getgeo()
|
/drivers/mtd/nand/raw/ |
D | mtk_ecc.c | 63 u32 sectors; member 143 if (dec & ecc->sectors) { in mtk_ecc_irq() 149 ecc->sectors = 0; in mtk_ecc_irq() 206 if (config->sectors) in mtk_ecc_config() 207 ecc->sectors = 1 << (config->sectors - 1); in mtk_ecc_config() 214 int sectors) in mtk_ecc_get_stats() argument 222 for (i = 0; i < sectors; i++) { in mtk_ecc_get_stats()
|
D | mtk_nand.c | 730 u32 sectors) in mtk_nfc_read_fdm() argument 739 for (i = 0; i < sectors; i++) { in mtk_nfc_read_fdm() 916 u32 sectors) in mtk_nfc_update_ecc_stats() argument 927 memset(buf, 0xff, sectors * chip->ecc.size); in mtk_nfc_update_ecc_stats() 928 for (i = 0; i < sectors; i++) in mtk_nfc_update_ecc_stats() 933 mtk_ecc_get_stats(nfc->ecc, &stats, sectors); in mtk_nfc_update_ecc_stats() 947 u32 column, sectors, start, end, reg; in mtk_nfc_read_subpage() local 958 sectors = end - start; in mtk_nfc_read_subpage() 961 len = sectors * chip->ecc.size + (raw ? sectors * spare : 0); in mtk_nfc_read_subpage() 981 nfc->ecc_cfg.sectors = sectors; in mtk_nfc_read_subpage() [all …]
|
/drivers/mtd/spi-nor/ |
D | Kconfig | 14 bool "Use small 4096 B erase sectors" 17 Many flash memories support erasing small (4096 B) sectors. Depending 21 small sectors. On the other hand erasing should be faster when using 22 64 KiB block instead of 16 × 4 KiB sectors.
|
/drivers/ata/ |
D | libata-core.c | 84 u16 heads, u16 sectors); 653 block = (cyl * dev->heads + head) * dev->sectors + sect - 1; in ata_tf_read_block() 762 track = (u32)block / dev->sectors; in ata_build_rw_tf() 765 sect = (u32)block % dev->sectors + 1; in ata_build_rw_tf() 1127 u64 sectors = 0; in ata_tf_to_lba48() local 1129 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40; in ata_tf_to_lba48() 1130 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32; in ata_tf_to_lba48() 1131 sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24; in ata_tf_to_lba48() 1132 sectors |= (tf->lbah & 0xff) << 16; in ata_tf_to_lba48() 1133 sectors |= (tf->lbam & 0xff) << 8; in ata_tf_to_lba48() [all …]
|
/drivers/mmc/core/ |
D | mmc_test.c | 101 unsigned int sectors; member 535 unsigned int count, unsigned int sectors, struct timespec64 ts, in mmc_test_save_transfer_result() argument 548 tr->sectors = sectors; in mmc_test_save_transfer_result() 562 unsigned int rate, iops, sectors = bytes >> 9; in mmc_test_print_rate() local 572 mmc_hostname(test->card->host), sectors, sectors >> 1, in mmc_test_print_rate() 573 (sectors & 1 ? ".5" : ""), (u64)ts.tv_sec, in mmc_test_print_rate() 577 mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops); in mmc_test_print_rate() 587 unsigned int rate, iops, sectors = bytes >> 9; in mmc_test_print_avg_rate() local 599 mmc_hostname(test->card->host), count, sectors, count, in mmc_test_print_avg_rate() 600 sectors >> 1, (sectors & 1 ? ".5" : ""), in mmc_test_print_avg_rate() [all …]
|
/drivers/ps3/ |
D | ps3stor_lib.c | 265 u64 start_sector, u64 sectors, int write) in ps3stor_read_write_sectors() argument 272 __func__, __LINE__, op, sectors, start_sector); in ps3stor_read_write_sectors() 276 start_sector, sectors, 0, lpar, in ps3stor_read_write_sectors() 279 start_sector, sectors, 0, lpar, in ps3stor_read_write_sectors()
|