• Home
  • Raw
  • Download

Lines Matching refs:sector

137 static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)  in r5_next_bio()  argument
140 if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS) in r5_next_bio()
451 (unsigned long long)sh->sector); in remove_hash()
458 struct hlist_head *hp = stripe_hash(conf, sh->sector); in insert_hash()
461 (unsigned long long)sh->sector); in insert_hash()
523 static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) in init_stripe() argument
533 (unsigned long long)sector); in init_stripe()
538 sh->sector = sector; in init_stripe()
539 stripe_set_idx(sector, conf, previous, sh); in init_stripe()
548 (unsigned long long)sh->sector, i, dev->toread, in init_stripe()
562 static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector, in __find_stripe() argument
567 pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector); in __find_stripe()
568 hlist_for_each_entry(sh, stripe_hash(conf, sector), hash) in __find_stripe()
569 if (sh->sector == sector && sh->generation == generation) in __find_stripe()
571 pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector); in __find_stripe()
658 get_active_stripe(struct r5conf *conf, sector_t sector, in get_active_stripe() argument
662 int hash = stripe_hash_locks_hash(sector); in get_active_stripe()
664 pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector); in get_active_stripe()
672 sh = __find_stripe(conf, sector, conf->generation - previous); in get_active_stripe()
689 init_stripe(sh, sector, previous); in get_active_stripe()
811 int bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS, in ops_run_io()
856 __func__, (unsigned long long)sh->sector, in ops_run_io()
860 bi->bi_iter.bi_sector = (sh->sector in ops_run_io()
863 bi->bi_iter.bi_sector = (sh->sector in ops_run_io()
887 sh->dev[i].sector); in ops_run_io()
906 __func__, (unsigned long long)sh->sector, in ops_run_io()
910 rbi->bi_iter.bi_sector = (sh->sector in ops_run_io()
913 rbi->bi_iter.bi_sector = (sh->sector in ops_run_io()
931 sh->dev[i].sector); in ops_run_io()
938 bi->bi_rw, i, (unsigned long long)sh->sector); in ops_run_io()
947 sector_t sector, struct dma_async_tx_descriptor *tx, in async_copy_data() argument
957 if (bio->bi_iter.bi_sector >= sector) in async_copy_data()
958 page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512; in async_copy_data()
960 page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512; in async_copy_data()
1015 (unsigned long long)sh->sector); in ops_complete_biofill()
1033 dev->sector + STRIPE_SECTORS) { in ops_complete_biofill()
1034 rbi2 = r5_next_bio(rbi, dev->sector); in ops_complete_biofill()
1058 (unsigned long long)sh->sector); in ops_run_biofill()
1069 dev->sector + STRIPE_SECTORS) { in ops_run_biofill()
1071 dev->sector, tx, sh); in ops_run_biofill()
1072 rbi = r5_next_bio(rbi, dev->sector); in ops_run_biofill()
1100 (unsigned long long)sh->sector); in ops_complete_compute()
1134 __func__, (unsigned long long)sh->sector, target); in ops_run_compute5()
1208 __func__, (unsigned long long)sh->sector, target); in ops_run_compute6_1()
1258 __func__, (unsigned long long)sh->sector, target, target2); in ops_run_compute6_2()
1286 __func__, (unsigned long long)sh->sector, faila, failb); in ops_run_compute6_2()
1354 (unsigned long long)sh->sector); in ops_complete_prexor()
1370 (unsigned long long)sh->sector); in ops_run_prexor()
1393 (unsigned long long)sh->sector); in ops_run_biodrain()
1411 dev->sector + STRIPE_SECTORS) { in ops_run_biodrain()
1420 dev->sector, tx, sh); in ops_run_biodrain()
1427 wbi = r5_next_bio(wbi, dev->sector); in ops_run_biodrain()
1445 (unsigned long long)sh->sector); in ops_complete_reconstruct()
1495 (unsigned long long)sh->sector); in ops_run_reconstruct5()
1555 pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector); in ops_run_reconstruct6()
1585 (unsigned long long)sh->sector); in ops_complete_check()
1605 (unsigned long long)sh->sector); in ops_run_check_p()
1633 (unsigned long long)sh->sector, checkp); in ops_run_check_pq()
1990 (unsigned long long)sh->sector, i, atomic_read(&sh->count), in raid5_end_read_request()
2007 s = sh->sector + rdev->new_data_offset; in raid5_end_read_request()
2009 s = sh->sector + rdev->data_offset; in raid5_end_read_request()
2088 rdev, sh->sector, STRIPE_SECTORS, 0))) in raid5_end_read_request()
2128 (unsigned long long)sh->sector, i, atomic_read(&sh->count), in raid5_end_write_request()
2138 else if (is_badblock(rdev, sh->sector, in raid5_end_write_request()
2150 } else if (is_badblock(rdev, sh->sector, in raid5_end_write_request()
2187 dev->sector = compute_blocknr(sh, i, previous); in raid5_build_block()
2426 sector_t new_sector = sh->sector, check; in compute_blocknr()
2532 if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx in compute_blocknr()
2625 __func__, (unsigned long long)sh->sector, in schedule_reconstruction()
2642 (unsigned long long)sh->sector); in add_stripe_bio()
2675 sector_t sector = sh->dev[dd_idx].sector; in add_stripe_bio() local
2677 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && in add_stripe_bio()
2678 bi && bi->bi_iter.bi_sector <= sector; in add_stripe_bio()
2679 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) { in add_stripe_bio()
2680 if (bio_end_sector(bi) >= sector) in add_stripe_bio()
2681 sector = bio_end_sector(bi); in add_stripe_bio()
2683 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS) in add_stripe_bio()
2689 (unsigned long long)sh->sector, dd_idx); in add_stripe_bio()
2693 bitmap_startwrite(conf->mddev->bitmap, sh->sector, in add_stripe_bio()
2746 sh->sector, in handle_failed_stripe()
2764 sh->dev[i].sector + STRIPE_SECTORS) { in handle_failed_stripe()
2765 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); in handle_failed_stripe()
2775 bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_failed_stripe()
2788 sh->dev[i].sector + STRIPE_SECTORS) { in handle_failed_stripe()
2789 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); in handle_failed_stripe()
2812 sh->dev[i].sector + STRIPE_SECTORS) { in handle_failed_stripe()
2814 r5_next_bio(bi, sh->dev[i].sector); in handle_failed_stripe()
2824 bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_failed_stripe()
2865 && !rdev_set_badblocks(rdev, sh->sector, in handle_failed_sync()
2872 && !rdev_set_badblocks(rdev, sh->sector, in handle_failed_sync()
2892 && (rdev->recovery_offset <= sh->sector in want_replace()
2893 || rdev->mddev->recovery_cp <= sh->sector)) in want_replace()
2925 sh->sector >= sh->raid_conf->mddev->recovery_cp) in fetch_block()
2942 (unsigned long long)sh->sector, disk_idx); in fetch_block()
2971 (unsigned long long)sh->sector, in fetch_block()
3046 dev->sector + STRIPE_SECTORS) { in handle_stripe_clean_event()
3047 wbi2 = r5_next_bio(wbi, dev->sector); in handle_stripe_clean_event()
3055 bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_stripe_clean_event()
3111 (recovery_cp < MaxSector && sh->sector >= recovery_cp && in handle_stripe_dirtying()
3119 (unsigned long long)sh->sector); in handle_stripe_dirtying()
3144 (unsigned long long)sh->sector, rmw, rcw); in handle_stripe_dirtying()
3151 (unsigned long long)sh->sector, rmw); in handle_stripe_dirtying()
3202 (unsigned long long)sh->sector, in handle_stripe_dirtying()
3308 (unsigned long long) sh->sector); in handle_parity_checks5()
3471 (unsigned long long) sh->sector); in handle_parity_checks6()
3609 rdev->recovery_offset >= sh->sector + STRIPE_SECTORS && in analyse_stripe()
3610 !is_badblock(rdev, sh->sector, STRIPE_SECTORS, in analyse_stripe()
3622 is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS, in analyse_stripe()
3649 else if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset) in analyse_stripe()
3718 sh->sector >= conf->mddev->recovery_cp || in analyse_stripe()
3759 (unsigned long long)sh->sector, sh->state, in handle_stripe()
3950 = get_active_stripe(conf, sh->sector, 1, 1, 1); in handle_stripe()
3979 stripe_set_idx(sh->sector, conf, 0, sh); in handle_stripe()
4014 if (!rdev_set_badblocks(rdev, sh->sector, in handle_stripe()
4021 rdev_clear_badblocks(rdev, sh->sector, in handle_stripe()
4030 rdev_clear_badblocks(rdev, sh->sector, in handle_stripe()
4126 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); in raid5_mergeable_bvec() local
4136 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; in raid5_mergeable_bvec()
4146 sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev); in in_chunk_boundary() local
4153 ((sector & (chunk_sectors - 1)) + bio_sectors); in in_chunk_boundary()
4567 sh->sector, in make_discard_request()
5106 sector_t sector, logical_sector, last_sector; in retry_aligned_read() local
5113 sector = raid5_compute_sector(conf, logical_sector, in retry_aligned_read()
5119 sector += STRIPE_SECTORS, in retry_aligned_read()
5126 sh = get_active_stripe(conf, sector, 0, 1, 1); in retry_aligned_read()