Home
last modified time | relevance | path

Searched refs:sectors (Results 1 – 25 of 135) sorted by relevance

123456

/drivers/target/
Dtarget_core_sbc.c174 static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors) in sbc_get_size() argument
176 return cmd->se_dev->dev_attrib.block_size * sectors; in sbc_get_size()
256 unsigned int sectors = sbc_get_write_same_sectors(cmd); in sbc_setup_write_same() local
264 if (sectors > cmd->se_dev->dev_attrib.max_write_same_len) { in sbc_setup_write_same()
266 sectors, cmd->se_dev->dev_attrib.max_write_same_len); in sbc_setup_write_same()
272 if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) || in sbc_setup_write_same()
273 ((cmd->t_task_lba + sectors) > end_lba)) { in sbc_setup_write_same()
275 (unsigned long long)end_lba, cmd->t_task_lba, sectors); in sbc_setup_write_same()
641 u32 sectors, bool is_write) in sbc_check_prot() argument
671 cmd->prot_length = dev->prot_length * sectors; in sbc_check_prot()
[all …]
/drivers/md/
Draid0.c83 sector_t curr_zone_end, sectors; in create_strip_zones() local
101 sectors = rdev1->sectors; in create_strip_zones()
102 sector_div(sectors, mddev->chunk_sectors); in create_strip_zones()
103 rdev1->sectors = sectors * mddev->chunk_sectors; in create_strip_zones()
110 (unsigned long long)rdev1->sectors, in create_strip_zones()
112 (unsigned long long)rdev2->sectors); in create_strip_zones()
118 if (rdev2->sectors == rdev1->sectors) { in create_strip_zones()
201 if (!smallest || (rdev1->sectors < smallest->sectors)) in create_strip_zones()
214 zone->zone_end = smallest->sectors * cnt; in create_strip_zones()
227 zone->dev_start = smallest->sectors; in create_strip_zones()
[all …]
Dlinear.c80 bvm->bi_sector -= dev0->end_sector - dev0->rdev->sectors; in linear_mergeable_bvec()
121 static sector_t linear_size(struct mddev *mddev, sector_t sectors, int raid_disks) in linear_size() argument
128 WARN_ONCE(sectors || raid_disks, in linear_size()
154 sector_t sectors; in linear_conf() local
164 sectors = rdev->sectors; in linear_conf()
165 sector_div(sectors, mddev->chunk_sectors); in linear_conf()
166 rdev->sectors = sectors * mddev->chunk_sectors; in linear_conf()
172 conf->array_sectors += rdev->sectors; in linear_conf()
192 conf->disks[0].end_sector = conf->disks[0].rdev->sectors; in linear_conf()
197 conf->disks[i].rdev->sectors; in linear_conf()
[all …]
Draid1.c293 r1_bio->sector + (r1_bio->sectors); in update_head_pos()
378 r1_bio->sectors, in close_write()
452 r1_bio->sector, r1_bio->sectors, in raid1_end_write_request()
514 int sectors; in read_balance() local
532 sectors = r1_bio->sectors; in read_balance()
542 choose_first = (conf->mddev->recovery_cp < this_sector + sectors); in read_balance()
558 rdev->recovery_offset < this_sector + sectors) in read_balance()
564 if (is_badblock(rdev, this_sector, sectors, in read_balance()
571 best_good_sectors = sectors; in read_balance()
580 if (is_badblock(rdev, this_sector, sectors, in read_balance()
[all …]
Draid10.c330 r10_bio->devs[slot].addr + (r10_bio->sectors); in update_head_pos()
420 r10_bio->sectors, in close_write()
505 r10_bio->sectors, in raid10_end_write_request()
788 int sectors = r10_bio->sectors; in read_balance() local
799 sectors = r10_bio->sectors; in read_balance()
812 && (this_sector + sectors >= conf->next_resync)) in read_balance()
826 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) in read_balance()
833 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) in read_balance()
837 if (is_badblock(rdev, dev_sector, sectors, in read_balance()
848 if (!do_balance && sectors > bad_sectors) in read_balance()
[all …]
Dmd.h42 sector_t sectors; /* Device size (in 512bytes sectors) */ member
184 extern int md_is_badblock(struct badblocks *bb, sector_t s, int sectors,
186 static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors, in is_badblock() argument
191 sectors, in is_badblock()
199 extern int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
201 extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
472 int (*resize) (struct mddev *mddev, sector_t sectors);
473 sector_t (*size) (struct mddev *mddev, sector_t sectors, int raid_disks);
Dmd.c251 unsigned int sectors; in md_make_request() local
284 sectors = bio_sectors(bio); in md_make_request()
291 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors); in md_make_request()
699 rdev->sectors = 0; in md_rdev_clear()
1018 rdev->sectors = rdev->sb_start; in super_90_load()
1023 if (rdev->sectors >= (2ULL << 32) && sb->level >= 1) in super_90_load()
1024 rdev->sectors = (2ULL << 32) - 2; in super_90_load()
1026 if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1) in super_90_load()
1358 static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
1365 sector_t sectors; in super_1_load() local
[all …]
Ddm-stats.c25 unsigned long long sectors[2]; member
482 p->sectors[idx] += len; in dm_stat_for_entry()
583 shared->tmp.sectors[READ] += ACCESS_ONCE(p->sectors[READ]); in __dm_stat_init_temporary_percpu_totals()
584 shared->tmp.sectors[WRITE] += ACCESS_ONCE(p->sectors[WRITE]); in __dm_stat_init_temporary_percpu_totals()
611 p->sectors[READ] -= shared->tmp.sectors[READ]; in __dm_stat_clear()
612 p->sectors[WRITE] -= shared->tmp.sectors[WRITE]; in __dm_stat_clear()
714 shared->tmp.sectors[READ], in dm_stats_print()
718 shared->tmp.sectors[WRITE], in dm_stats_print()
/drivers/scsi/
Dscsicam.c234 unsigned long heads, sectors, cylinders, temp; in setsize() local
237 sectors = 62L; /* Maximize sectors per track */ in setsize()
239 temp = cylinders * sectors; /* Compute divisor for heads */ in setsize()
244 sectors = capacity / temp; /* Compute value for sectors per in setsize()
247 sectors++; /* Else, increment number of sectors */ in setsize()
248 temp = heads * sectors; /* Compute divisor for cylinders */ in setsize()
256 *secs = (unsigned int) sectors; in setsize()
Dps3rom.c173 u32 sectors) in ps3rom_read_request() argument
178 __func__, __LINE__, sectors, start_sector); in ps3rom_read_request()
182 sectors, 0, dev->bounce_lpar, &dev->tag); in ps3rom_read_request()
194 u32 sectors) in ps3rom_write_request() argument
199 __func__, __LINE__, sectors, start_sector); in ps3rom_write_request()
205 sectors, 0, dev->bounce_lpar, &dev->tag); in ps3rom_write_request()
/drivers/mtd/
Dssfdc.c26 unsigned char sectors; member
321 ssfdc->sectors = 32; in ssfdcr_add_mtd()
322 get_chs(mtd->size, NULL, &ssfdc->heads, &ssfdc->sectors); in ssfdcr_add_mtd()
324 ((long)ssfdc->sectors * (long)ssfdc->heads)); in ssfdcr_add_mtd()
327 ssfdc->cylinders, ssfdc->heads , ssfdc->sectors, in ssfdcr_add_mtd()
329 (long)ssfdc->sectors); in ssfdcr_add_mtd()
332 (long)ssfdc->sectors; in ssfdcr_add_mtd()
414 ssfdc->cylinders, ssfdc->heads, ssfdc->sectors); in ssfdcr_getgeo()
417 geo->sectors = ssfdc->sectors; in ssfdcr_getgeo()
Dnftlcore.c84 nftl->sectors = nftl->mbd.size / temp; in nftl_add_mtd()
86 nftl->sectors++; in nftl_add_mtd()
87 temp = nftl->cylinders * nftl->sectors; in nftl_add_mtd()
92 temp = nftl->heads * nftl->sectors; in nftl_add_mtd()
97 if (nftl->mbd.size != nftl->heads * nftl->cylinders * nftl->sectors) { in nftl_add_mtd()
106 nftl->cylinders, nftl->heads , nftl->sectors, in nftl_add_mtd()
108 (long)nftl->sectors ); in nftl_add_mtd()
785 geo->sectors = nftl->sectors; in nftl_getgeo()
Dinftlcore.c91 inftl->sectors = inftl->mbd.size / temp; in inftl_add_mtd()
93 inftl->sectors++; in inftl_add_mtd()
94 temp = inftl->cylinders * inftl->sectors; in inftl_add_mtd()
99 temp = inftl->heads * inftl->sectors; in inftl_add_mtd()
104 if (inftl->mbd.size != inftl->heads * inftl->cylinders * inftl->sectors) { in inftl_add_mtd()
113 inftl->cylinders, inftl->heads , inftl->sectors, in inftl_add_mtd()
115 (long)inftl->sectors ); in inftl_add_mtd()
934 geo->sectors = inftl->sectors; in inftl_getgeo()
/drivers/mtd/spi-nor/
DKconfig11 bool "Use small 4096 B erase sectors"
14 Many flash memories support erasing small (4096 B) sectors. Depending
18 small sectors. On the other hand erasing should be faster when using
19 64 KiB block instead of 16 × 4 KiB sectors.
/drivers/usb/storage/
Ddatafab.c68 unsigned long sectors; /* total sector count */ member
146 u32 sectors) in datafab_read_data() argument
161 if (sectors > 0x0FFFFFFF) in datafab_read_data()
170 totallen = sectors * info->ssize; in datafab_read_data()
229 u32 sectors) in datafab_write_data() argument
245 if (sectors > 0x0FFFFFFF) in datafab_write_data()
254 totallen = sectors * info->ssize; in datafab_write_data()
429 info->sectors = ((u32)(reply[117]) << 24) | in datafab_id_device()
591 info->sectors, info->ssize); in datafab_transport()
595 ((__be32 *) ptr)[0] = cpu_to_be32(info->sectors - 1); in datafab_transport()
Djumpshot.c105 unsigned long sectors; /* total sector count */ member
165 u32 sectors) in jumpshot_read_data() argument
183 totallen = sectors * info->ssize; in jumpshot_read_data()
242 u32 sectors) in jumpshot_write_data() argument
260 totallen = sectors * info->ssize; in jumpshot_write_data()
363 info->sectors = ((u32)(reply[117]) << 24) | in jumpshot_id_device()
522 info->sectors, info->ssize); in jumpshot_transport()
526 ((__be32 *) ptr)[0] = cpu_to_be32(info->sectors - 1); in jumpshot_transport()
Dsddr55.c197 unsigned short sectors) { in sddr55_read_data() argument
216 len = min((unsigned int) sectors, (unsigned int) info->blocksize >> in sddr55_read_data()
224 while (sectors>0) { in sddr55_read_data()
234 pages = min((unsigned int) sectors << info->smallpageshift, in sddr55_read_data()
302 sectors -= pages >> info->smallpageshift; in sddr55_read_data()
316 unsigned short sectors) { in sddr55_write_data() argument
343 len = min((unsigned int) sectors, (unsigned int) info->blocksize >> in sddr55_write_data()
351 while (sectors > 0) { in sddr55_write_data()
361 pages = min((unsigned int) sectors << info->smallpageshift, in sddr55_write_data()
510 sectors -= pages >> info->smallpageshift; in sddr55_write_data()
Dalauda.c909 unsigned int sectors) in alauda_read_data() argument
930 len = min(sectors, blocksize) * (pagesize + 64); in alauda_read_data()
946 while (sectors > 0) { in alauda_read_data()
962 pages = min(sectors, blocksize - page); in alauda_read_data()
993 sectors -= pages; in alauda_read_data()
1004 unsigned int sectors) in alauda_write_data() argument
1022 len = min(sectors, blocksize) * pagesize; in alauda_write_data()
1049 while (sectors > 0) { in alauda_write_data()
1051 unsigned int pages = min(sectors, blocksize - page); in alauda_write_data()
1073 sectors -= pages; in alauda_write_data()
/drivers/md/bcache/
Drequest.c124 unsigned sectors = min(bio_sectors(bio), in bch_data_invalidate() local
130 bio->bi_iter.bi_sector += sectors; in bch_data_invalidate()
131 bio->bi_iter.bi_size -= sectors << 9; in bch_data_invalidate()
134 &KEY(op->inode, bio->bi_iter.bi_sector, sectors)); in bch_data_invalidate()
369 unsigned sectors, congested = bch_get_congested(c); in check_should_bypass() local
430 sectors = max(task->sequential_io, in check_should_bypass()
434 sectors >= dc->sequential_cutoff >> 9) { in check_should_bypass()
439 if (congested && sectors >= congested) { in check_should_bypass()
516 unsigned sectors = KEY_INODE(k) == s->iop.inode in cache_lookup_fn() local
521 int ret = s->d->cache_miss(b, s, bio, sectors); in cache_lookup_fn()
[all …]
Dalloc.c84 void bch_rescale_priorities(struct cache_set *c, int sectors) in bch_rescale_priorities() argument
92 atomic_sub(sectors, &c->rescale); in bch_rescale_priorities()
572 bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors, in bch_alloc_sectors() argument
618 sectors = min(sectors, b->sectors_free); in bch_alloc_sectors()
620 SET_KEY_OFFSET(k, KEY_OFFSET(k) + sectors); in bch_alloc_sectors()
621 SET_KEY_SIZE(k, sectors); in bch_alloc_sectors()
632 b->sectors_free -= sectors; in bch_alloc_sectors()
635 SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors); in bch_alloc_sectors()
637 atomic_long_add(sectors, in bch_alloc_sectors()
/drivers/block/paride/
Dpd.c229 int sectors; member
341 s = (block % disk->sectors) + 1; in pd_ide_command()
342 h = (block /= disk->sectors) % disk->heads; in pd_ide_command()
590 pd_send_command(disk, disk->sectors, 0, disk->heads - 1, 0, 0, in pd_init_dev_parms()
672 disk->sectors = le16_to_cpu(*(__le16 *) (pd_scratch + 12)); in pd_identify()
678 disk->capacity = disk->sectors * disk->heads * disk->cylinders; in pd_identify()
694 disk->cylinders, disk->heads, disk->sectors, in pd_identify()
760 geo->sectors = PD_LOG_SECTS; in pd_getgeo()
761 geo->cylinders = disk->capacity / (geo->heads * geo->sectors); in pd_getgeo()
764 geo->sectors = disk->sectors; in pd_getgeo()
/drivers/mmc/card/
Dmmc_test.c97 unsigned int sectors; member
521 unsigned int count, unsigned int sectors, struct timespec ts, in mmc_test_save_transfer_result() argument
534 tr->sectors = sectors; in mmc_test_save_transfer_result()
548 unsigned int rate, iops, sectors = bytes >> 9; in mmc_test_print_rate() local
558 mmc_hostname(test->card->host), sectors, sectors >> 1, in mmc_test_print_rate()
559 (sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec, in mmc_test_print_rate()
563 mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops); in mmc_test_print_rate()
573 unsigned int rate, iops, sectors = bytes >> 9; in mmc_test_print_avg_rate() local
585 mmc_hostname(test->card->host), count, sectors, count, in mmc_test_print_avg_rate()
586 sectors >> 1, (sectors & 1 ? ".5" : ""), in mmc_test_print_avg_rate()
[all …]
/drivers/block/rsxx/
Ddev.c96 geo->sectors = 16; in rsxx_getgeo()
97 do_div(blocks, (geo->heads * geo->sectors)); in rsxx_getgeo()
101 geo->sectors = 0; in rsxx_getgeo()
138 part_stat_add(cpu, part0, sectors[rw], bio_sectors(bio)); in disk_stats_complete()
/drivers/scsi/aacraid/
Dlinit.c336 param->sectors = 63; in aac_biosparm()
339 param->sectors = 32; in aac_biosparm()
343 param->sectors = 32; in aac_biosparm()
346 param->cylinders = cap_to_cyls(capacity, param->heads * param->sectors); in aac_biosparm()
370 param->sectors = 32; in aac_biosparm()
374 param->sectors = 32; in aac_biosparm()
378 param->sectors = 63; in aac_biosparm()
389 param->cylinders = cap_to_cyls(capacity, param->heads * param->sectors); in aac_biosparm()
390 if (num < 4 && end_sec == param->sectors) { in aac_biosparm()
393 param->heads, param->sectors, num)); in aac_biosparm()
[all …]
/drivers/ps3/
Dps3stor_lib.c277 u64 start_sector, u64 sectors, int write) in ps3stor_read_write_sectors() argument
284 __func__, __LINE__, op, sectors, start_sector); in ps3stor_read_write_sectors()
288 start_sector, sectors, 0, lpar, in ps3stor_read_write_sectors()
291 start_sector, sectors, 0, lpar, in ps3stor_read_write_sectors()

123456