Searched refs:chunk_sectors (Results 1 – 15 of 15) sorted by relevance
/drivers/md/ |
D | raid0.c | 88 sector_div(sectors, mddev->chunk_sectors); in create_strip_zones() 89 rdev1->sectors = sectors * mddev->chunk_sectors; in create_strip_zones() 135 if ((mddev->chunk_sectors << 9) % blksize) { in create_strip_zones() 138 mddev->chunk_sectors << 9, blksize); in create_strip_zones() 281 sector_div(first_sector, mddev->chunk_sectors); in create_strip_zones() 331 unsigned int chunk_sects = mddev->chunk_sectors; in map_sector() 367 ~(sector_t)(mddev->chunk_sectors-1)); in raid0_size() 379 if (mddev->chunk_sectors == 0) { in raid0_run() 398 blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); in raid0_run() 399 blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors); in raid0_run() [all …]
|
D | md-linear.c | 88 if (mddev->chunk_sectors) { in linear_conf() 90 sector_div(sectors, mddev->chunk_sectors); in linear_conf() 91 rdev->sectors = sectors * mddev->chunk_sectors; in linear_conf() 283 seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2); in linear_status()
|
D | raid5.c | 826 if (!sector_div(tmp_sec, conf->chunk_sectors)) in stripe_add_to_batch_list() 2924 : conf->chunk_sectors; in raid5_compute_sector() 3120 : conf->chunk_sectors; in raid5_compute_blocknr() 3459 if (first + conf->chunk_sectors * (count - 1) != last) in add_stripe_bio() 3534 previous ? conf->prev_chunk_sectors : conf->chunk_sectors; in stripe_set_idx() 5307 unsigned int chunk_sectors; in in_chunk_boundary() local 5312 chunk_sectors = min(conf->chunk_sectors, conf->prev_chunk_sectors); in in_chunk_boundary() 5313 return chunk_sectors >= in in_chunk_boundary() 5314 ((sector & (chunk_sectors - 1)) + bio_sectors); in in_chunk_boundary() 5484 unsigned chunk_sects = mddev->chunk_sectors; in chunk_aligned_read() [all …]
|
D | dm-zoned-target.c | 998 unsigned int chunk_sectors = dmz_zone_nr_sectors(dmz->metadata); in dmz_io_hints() local 1008 limits->max_discard_sectors = chunk_sectors; in dmz_io_hints() 1009 limits->max_hw_discard_sectors = chunk_sectors; in dmz_io_hints() 1010 limits->max_write_zeroes_sectors = chunk_sectors; in dmz_io_hints() 1013 limits->chunk_sectors = chunk_sectors; in dmz_io_hints() 1014 limits->max_sectors = chunk_sectors; in dmz_io_hints()
|
D | dm-raid.c | 717 mddev->new_chunk_sectors = mddev->chunk_sectors; in rs_set_cur() 730 mddev->chunk_sectors = mddev->new_chunk_sectors; in rs_set_new() 979 if (region_size < rs->md.chunk_sectors) { in validate_region_size() 1160 rs->md.new_chunk_sectors = rs->md.chunk_sectors = value; in parse_raid_params() 1490 if (rs->md.chunk_sectors) in parse_raid_params() 1491 max_io_len = rs->md.chunk_sectors; in parse_raid_params() 1540 uint32_t min_stripes = max(mddev->chunk_sectors, mddev->new_chunk_sectors) / 2; in rs_set_raid456_stripe_cache() 1879 rs->md.new_chunk_sectors != rs->md.chunk_sectors; in rs_is_layout_change() 2121 sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors); in super_sync() 2233 mddev->chunk_sectors = le32_to_cpu(sb->stripe_sectors); in super_init_validation() [all …]
|
D | raid5-ppl.c | 328 (data_sector >> ilog2(conf->chunk_sectors) == in ppl_log_stripe() 329 data_sector_last >> ilog2(conf->chunk_sectors)) && in ppl_log_stripe() 824 if ((pp_size >> 9) < conf->chunk_sectors) { in ppl_recover_entry() 833 (data_disks - 1) * conf->chunk_sectors + in ppl_recover_entry() 837 strip_sectors = conf->chunk_sectors; in ppl_recover_entry() 871 (disk * conf->chunk_sectors); in ppl_recover_entry()
|
D | dm-unstripe.c | 175 limits->chunk_sectors = uc->chunk_size; in unstripe_io_hints()
|
D | md.c | 1340 mddev->chunk_sectors = sb->chunk_size >> 9; in super_90_validate() 1369 mddev->new_chunk_sectors = mddev->chunk_sectors; in super_90_validate() 1519 sb->chunk_size = mddev->chunk_sectors << 9; in super_90_sync() 1853 mddev->chunk_sectors = le32_to_cpu(sb->chunksize); in super_1_validate() 1911 mddev->new_chunk_sectors = mddev->chunk_sectors; in super_1_validate() 2070 sb->chunksize = cpu_to_le32(mddev->chunk_sectors); in super_1_sync() 2721 (mddev->chunk_sectors != le32_to_cpu(sb->chunksize))) in does_sb_need_changing() 4074 mddev->new_chunk_sectors = mddev->chunk_sectors; in level_store() 4097 mddev->chunk_sectors = mddev->new_chunk_sectors; in level_store() 4299 mddev->chunk_sectors != mddev->new_chunk_sectors) in chunk_size_show() [all …]
|
D | raid5.h | 567 int chunk_sectors; member
|
D | raid10.c | 1560 seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2); in raid10_status() 2864 window_size = (chunks + extra_chunk) * conf->mddev->chunk_sectors; in raid10_set_cluster_sync_high() 3584 chunk = mddev->chunk_sectors; in setup_geo() 3739 blk_queue_io_opt(conf->mddev->queue, (conf->mddev->chunk_sectors << 9) * in raid10_set_io_opt() 3784 mddev->chunk_sectors); in raid10_run() 3787 blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9); in raid10_run() 4017 mddev->new_chunk_sectors = mddev->chunk_sectors; in raid10_takeover_raid0() 4887 mddev->chunk_sectors = 1 << conf->geo.chunk_shift; in raid10_finish_reshape()
|
D | md.h | 314 int chunk_sectors; member
|
D | dm-table.c | 1784 zone_sectors = ti_limits.chunk_sectors; in dm_calculate_queue_limits() 1826 zone_sectors = limits->chunk_sectors; in dm_calculate_queue_limits()
|
D | raid5-cache.c | 198 sector_div(sect, conf->chunk_sectors); in r5c_tree_index() 365 conf->chunk_sectors >> RAID5_STRIPE_SHIFT(conf))) in r5c_check_cached_full_stripe()
|
D | raid1.c | 3238 if (mddev->chunk_sectors != mddev->new_chunk_sectors || in raid1_reshape() 3241 mddev->new_chunk_sectors = mddev->chunk_sectors; in raid1_reshape()
|
/drivers/char/ |
D | ps3flash.c | 26 u64 chunk_sectors; member 38 start_sector, priv->chunk_sectors, in ps3flash_read_write_sectors() 118 sector = *pos / dev->bounce_size * priv->chunk_sectors; in ps3flash_read() 151 sector += priv->chunk_sectors; in ps3flash_read() 187 sector = *pos / dev->bounce_size * priv->chunk_sectors; in ps3flash_write() 226 sector += priv->chunk_sectors; in ps3flash_write() 376 priv->chunk_sectors = dev->bounce_size / dev->blk_size; in ps3flash_probe()
|