Lines Matching refs:sectors
330 r10_bio->devs[slot].addr + (r10_bio->sectors); in update_head_pos()
420 r10_bio->sectors, in close_write()
505 r10_bio->sectors, in raid10_end_write_request()
788 int sectors = r10_bio->sectors; in read_balance() local
799 sectors = r10_bio->sectors; in read_balance()
812 && (this_sector + sectors >= conf->next_resync)) in read_balance()
826 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) in read_balance()
833 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) in read_balance()
837 if (is_badblock(rdev, dev_sector, sectors, in read_balance()
848 if (!do_balance && sectors > bad_sectors) in read_balance()
849 sectors = bad_sectors; in read_balance()
850 if (best_good_sectors > sectors) in read_balance()
851 best_good_sectors = sectors; in read_balance()
866 best_good_sectors = sectors; in read_balance()
1172 int sectors; in __make_request() local
1183 sectors = bio_sectors(bio); in __make_request()
1186 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) { in __make_request()
1194 sectors); in __make_request()
1201 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) in __make_request()
1202 : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe && in __make_request()
1218 r10_bio->sectors = sectors; in __make_request()
1263 if (max_sectors < r10_bio->sectors) { in __make_request()
1269 r10_bio->sectors = max_sectors; in __make_request()
1286 r10_bio->sectors = bio_sectors(bio) - sectors_handled; in __make_request()
1322 max_sectors = r10_bio->sectors; in __make_request()
1436 if (max_sectors < r10_bio->sectors) { in __make_request()
1440 r10_bio->sectors = max_sectors; in __make_request()
1452 bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0); in __make_request()
1551 r10_bio->sectors = bio_sectors(bio) - sectors_handled; in __make_request()
1978 atomic_add(r10_bio->sectors, in end_sync_read()
2001 sector_t s = r10_bio->sectors; in end_sync_request()
2052 r10_bio->sectors, in end_sync_write()
2097 vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9); in sync_request_write()
2113 int sectors = r10_bio->sectors; in sync_request_write() local
2116 if (sectors < (len / 512)) in sync_request_write()
2117 len = sectors * 512; in sync_request_write()
2122 sectors -= len/512; in sync_request_write()
2126 atomic64_add(r10_bio->sectors, &mddev->resync_mismatches); in sync_request_write()
2139 tbio->bi_iter.bi_size = r10_bio->sectors << 9; in sync_request_write()
2188 md_done_sync(mddev, r10_bio->sectors, 1); in sync_request_write()
2216 int sectors = r10_bio->sectors; in fix_recovery_read_error() local
2221 while (sectors) { in fix_recovery_read_error()
2222 int s = sectors; in fix_recovery_read_error()
2281 sectors -= s; in fix_recovery_read_error()
2363 int sectors, struct page *page, int rw) in r10_sync_page_io() argument
2368 if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors) in r10_sync_page_io()
2371 if (sync_page_io(rdev, sector, sectors << 9, page, rw, false)) in r10_sync_page_io()
2381 if (!rdev_set_badblocks(rdev, sector, sectors, 0)) in r10_sync_page_io()
2397 int sectors = r10_bio->sectors; in fix_read_error() local
2431 while(sectors) { in fix_read_error()
2432 int s = sectors; in fix_read_error()
2585 sectors -= s; in fix_read_error()
2609 int sectors; in narrow_write_error() local
2610 int sect_to_write = r10_bio->sectors; in narrow_write_error()
2618 sectors = ((r10_bio->sector + block_sectors) in narrow_write_error()
2624 if (sectors > sect_to_write) in narrow_write_error()
2625 sectors = sect_to_write; in narrow_write_error()
2628 bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors); in narrow_write_error()
2636 sectors, 0) in narrow_write_error()
2640 sect_to_write -= sectors; in narrow_write_error()
2641 sector += sectors; in narrow_write_error()
2642 sectors = block_sectors; in narrow_write_error()
2710 if (max_sectors < r10_bio->sectors) { in handle_read_error()
2716 r10_bio->sectors = max_sectors; in handle_read_error()
2728 r10_bio->sectors = bio_sectors(mbio) - sectors_handled; in handle_read_error()
2764 r10_bio->sectors, 0); in handle_write_completed()
2769 r10_bio->sectors, 0)) in handle_write_completed()
2780 r10_bio->sectors, 0); in handle_write_completed()
2785 r10_bio->sectors, 0)) in handle_write_completed()
2799 r10_bio->sectors, 0); in handle_write_completed()
2816 r10_bio->sectors, 0); in handle_write_completed()
3310 r10_bio->sectors = (sector_nr | chunk_mask) - sector_nr + 1; in sync_request()
3424 r10_bio->sectors = nr_sectors; in sync_request()
3432 r10_bio->sectors = nr_sectors; in sync_request()
3463 raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks) in raid10_size() argument
3471 if (!sectors) in raid10_size()
3472 sectors = conf->dev_sectors; in raid10_size()
3474 size = sectors >> conf->geo.chunk_shift; in raid10_size()
3882 static int raid10_resize(struct mddev *mddev, sector_t sectors) in raid10_resize() argument
3906 size = raid10_size(mddev, sectors, 0); in raid10_resize()
3918 if (sectors > mddev->dev_sectors && in raid10_resize()
3923 calc_sectors(conf, sectors); in raid10_resize()
4424 r10_bio->sectors = last - sector_nr + 1; in reshape_request()
4512 r10_bio->sectors = nr_sectors; in reshape_request()
4515 md_sync_acct(read_bio->bi_bdev, r10_bio->sectors); in reshape_request()
4551 md_done_sync(mddev, r10_bio->sectors, 0); in reshape_request_write()
4573 md_sync_acct(b->bi_bdev, r10_bio->sectors); in reshape_request_write()
4611 int sectors = r10_bio->sectors; in handle_reshape_read_error() local
4625 while (sectors) { in handle_reshape_read_error()
4626 int s = sectors; in handle_reshape_read_error()
4663 sectors -= s; in handle_reshape_read_error()
4701 md_done_sync(r10_bio->mddev, r10_bio->sectors, 1); in end_reshape_request()