Lines Matching refs:chunk_sectors
103 sector_div(sectors, mddev->chunk_sectors); in create_strip_zones()
104 rdev1->sectors = sectors * mddev->chunk_sectors; in create_strip_zones()
274 if ((mddev->chunk_sectors << 9) % queue_logical_block_size(mddev->queue)) { in create_strip_zones()
277 mddev->chunk_sectors << 9); in create_strip_zones()
281 blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9); in create_strip_zones()
283 (mddev->chunk_sectors << 9) * mddev->raid_disks); in create_strip_zones()
332 unsigned int chunk_sects = mddev->chunk_sectors; in map_sector()
375 unsigned int chunk_sectors = mddev->chunk_sectors; in raid0_mergeable_bvec() local
381 if (is_power_of_2(chunk_sectors)) in raid0_mergeable_bvec()
382 max = (chunk_sectors - ((sector & (chunk_sectors-1)) in raid0_mergeable_bvec()
385 max = (chunk_sectors - (sector_div(sector, chunk_sectors) in raid0_mergeable_bvec()
421 ~(sector_t)(mddev->chunk_sectors-1)); in raid0_size()
433 if (mddev->chunk_sectors == 0) { in raid0_run()
440 blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); in raid0_run()
441 blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors); in raid0_run()
442 blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors); in raid0_run()
470 (mddev->chunk_sectors << 9) / PAGE_SIZE; in raid0_run()
525 chunk_sects = mddev->chunk_sectors; in raid0_make_request()
577 seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2); in raid0_status()
605 mddev->new_chunk_sectors = mddev->chunk_sectors; in raid0_takeover_raid45()
645 mddev->new_chunk_sectors = mddev->chunk_sectors; in raid0_takeover_raid10()
688 mddev->chunk_sectors = chunksect; in raid0_takeover_raid1()