Home
last modified time | relevance | path

Searched refs:dev_sectors (Results 1 – 12 of 12) sorted by relevance

/drivers/md/
Ddm-raid.c434 return rs->md.recovery_cp < rs->md.dev_sectors; in rs_is_recovering()
689 rdev->sectors = mddev->dev_sectors; in rs_set_rdev_sectors()
1608 if (ds < rs->md.dev_sectors) { in _check_data_dev_sectors()
1624 sector_t array_sectors = rs->ti->len, dev_sectors = rs->ti->len; in rs_set_dev_and_array_sectors() local
1644 dev_sectors *= rs->raid10_copies; in rs_set_dev_and_array_sectors()
1645 if (sector_div(dev_sectors, data_stripes)) in rs_set_dev_and_array_sectors()
1648 array_sectors = (data_stripes + delta_disks) * dev_sectors; in rs_set_dev_and_array_sectors()
1652 } else if (sector_div(dev_sectors, data_stripes)) in rs_set_dev_and_array_sectors()
1657 array_sectors = (data_stripes + delta_disks) * dev_sectors; in rs_set_dev_and_array_sectors()
1661 rdev->sectors = dev_sectors; in rs_set_dev_and_array_sectors()
[all …]
Draid10.h62 sector_t dev_sectors; /* temp copy of member
Dmd.c1236 mddev->dev_sectors = ((sector_t)sb->size) * 2; in super_90_validate()
1377 sb->size = mddev->dev_sectors / 2; in super_90_sync()
1488 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) in super_90_rdev_size_change()
1749 mddev->dev_sectors = le64_to_cpu(sb->size); in super_1_validate()
1926 sb->size = cpu_to_le64(mddev->dev_sectors); in super_1_sync()
2073 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) in super_1_rdev_size_change()
2296 (mddev->dev_sectors == 0 || rdev->sectors < mddev->dev_sectors)) { in bind_rdev_to_array()
2305 mddev->dev_sectors = rdev->sectors; in bind_rdev_to_array()
2537 if ((mddev->dev_sectors != le64_to_cpu(sb->size)) || in does_sb_need_changing()
3178 + mddev->dev_sectors > rdev->sectors) in new_offset_store()
[all …]
Draid10.c2932 return mddev->dev_sectors - sector_nr; in raid10_sync_request()
2936 max_sector = mddev->dev_sectors; in raid10_sync_request()
3534 sectors = conf->dev_sectors; in raid10_size()
3564 conf->dev_sectors = size << conf->geo.chunk_shift; in calc_sectors()
3679 calc_sectors(conf, mddev->dev_sectors); in setup_conf()
3693 conf->prev.stride = conf->dev_sectors; in setup_conf()
3882 mddev->dev_sectors = conf->dev_sectors; in raid10_run()
3999 if (sectors > mddev->dev_sectors && in raid10_resize()
4005 mddev->dev_sectors = conf->dev_sectors; in raid10_resize()
4031 mddev->dev_sectors = size; in raid10_takeover_raid0()
Dmd-faulty.c289 return mddev->dev_sectors; in faulty_size()
Draid1.c2653 max_sector = mddev->dev_sectors; in raid1_sync_request()
2935 return mddev->dev_sectors; in raid1_size()
3215 if (sectors > mddev->dev_sectors && in raid1_resize()
3216 mddev->recovery_cp > mddev->dev_sectors) { in raid1_resize()
3217 mddev->recovery_cp = mddev->dev_sectors; in raid1_resize()
3220 mddev->dev_sectors = sectors; in raid1_resize()
Dmd-multipath.c362 return mddev->dev_sectors; in multipath_size()
Draid5.c5842 BUG_ON((mddev->dev_sectors & in reshape_request()
5962 if (last_sector >= mddev->dev_sectors) in reshape_request()
5963 last_sector = mddev->dev_sectors - 1; in reshape_request()
6027 sector_t max_sector = mddev->dev_sectors; in raid5_sync_request()
6068 sector_t rv = mddev->dev_sectors - sector_nr; in raid5_sync_request()
6737 sectors = mddev->dev_sectors; in raid5_size()
7377 mddev->dev_sectors &= ~(mddev->chunk_sectors - 1); in raid5_run()
7378 mddev->resync_max_sectors = mddev->dev_sectors; in raid5_run()
7784 if (sectors > mddev->dev_sectors && in raid5_resize()
7785 mddev->recovery_cp > mddev->dev_sectors) { in raid5_resize()
[all …]
Dmd.h311 sector_t dev_sectors; /* used size of member
Draid0.c665 rdev->sectors = mddev->dev_sectors; in raid0_takeover_raid45()
Dmd-bitmap.c244 < (rdev->data_offset + mddev->dev_sectors in write_sb_page()
254 if (rdev->data_offset + mddev->dev_sectors in write_sb_page()
/drivers/ata/
Dlibata-scsi.c1678 u64 dev_sectors = qc->dev->n_sectors; in ata_scsi_verify_xlat() local
1706 if (block >= dev_sectors) in ata_scsi_verify_xlat()
1708 if ((block + n_block) > dev_sectors) in ata_scsi_verify_xlat()