• Home
  • Raw
  • Download

Lines Matching refs:devs

94 	int size = offsetof(struct r10bio, devs[conf->copies]);  in r10bio_pool_alloc()
151 r10_bio->devs[j].bio = bio; in r10buf_pool_alloc()
157 r10_bio->devs[j].repl_bio = bio; in r10buf_pool_alloc()
164 struct bio *rbio = r10_bio->devs[j].repl_bio; in r10buf_pool_alloc()
171 bio = r10_bio->devs[j].bio; in r10buf_pool_alloc()
199 if (r10_bio->devs[j].bio) in r10buf_pool_alloc()
200 bio_put(r10_bio->devs[j].bio); in r10buf_pool_alloc()
201 if (r10_bio->devs[j].repl_bio) in r10buf_pool_alloc()
202 bio_put(r10_bio->devs[j].repl_bio); in r10buf_pool_alloc()
218 struct bio *bio = r10bio->devs[j].bio; in r10buf_pool_free()
226 bio = r10bio->devs[j].repl_bio; in r10buf_pool_free()
242 struct bio **bio = & r10_bio->devs[i].bio; in put_all_bios()
246 bio = &r10_bio->devs[i].repl_bio; in put_all_bios()
317 conf->mirrors[r10_bio->devs[slot].devnum].head_position = in update_head_pos()
318 r10_bio->devs[slot].addr + (r10_bio->sectors); in update_head_pos()
331 if (r10_bio->devs[slot].bio == bio) in find_bio_disk()
333 if (r10_bio->devs[slot].repl_bio == bio) { in find_bio_disk()
346 return r10_bio->devs[slot].devnum; in find_bio_disk()
358 rdev = r10_bio->devs[slot].rdev; in raid10_end_read_request()
479 r10_bio->devs[slot].bio = NULL; in raid10_end_write_request()
511 r10_bio->devs[slot].addr, in raid10_end_write_request()
516 r10_bio->devs[slot].repl_bio = IO_MADE_GOOD; in raid10_end_write_request()
518 r10_bio->devs[slot].bio = IO_MADE_GOOD; in raid10_end_write_request()
594 r10bio->devs[slot].devnum = d; in __raid10_find_phys()
595 r10bio->devs[slot].addr = s; in __raid10_find_phys()
612 r10bio->devs[slot].devnum = d; in __raid10_find_phys()
613 r10bio->devs[slot].addr = s; in __raid10_find_phys()
749 if (r10_bio->devs[slot].bio == IO_BLOCKED) in read_balance()
751 disk = r10_bio->devs[slot].devnum; in read_balance()
754 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) in read_balance()
760 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) in read_balance()
763 dev_sector = r10_bio->devs[slot].addr; in read_balance()
819 new_distance = r10_bio->devs[slot].addr; in read_balance()
821 new_distance = abs(r10_bio->devs[slot].addr - in read_balance()
1131 if (slot >= 0 && r10_bio->devs[slot].rdev) { in raid10_read_request()
1147 disk = r10_bio->devs[slot].devnum; in raid10_read_request()
1154 err_rdev = r10_bio->devs[slot].rdev; in raid10_read_request()
1190 r10_bio->devs[slot].bio = read_bio; in raid10_read_request()
1191 r10_bio->devs[slot].rdev = rdev; in raid10_read_request()
1193 read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr + in raid10_read_request()
1223 int devnum = r10_bio->devs[n_copy].devnum; in raid10_write_one_disk()
1238 r10_bio->devs[n_copy].repl_bio = mbio; in raid10_write_one_disk()
1240 r10_bio->devs[n_copy].bio = mbio; in raid10_write_one_disk()
1242 mbio->bi_iter.bi_sector = (r10_bio->devs[n_copy].addr + in raid10_write_one_disk()
1348 int d = r10_bio->devs[i].devnum; in raid10_write_request()
1369 r10_bio->devs[i].bio = NULL; in raid10_write_request()
1370 r10_bio->devs[i].repl_bio = NULL; in raid10_write_request()
1378 sector_t dev_sector = r10_bio->devs[i].addr; in raid10_write_request()
1418 r10_bio->devs[i].bio = bio; in raid10_write_request()
1422 r10_bio->devs[i].repl_bio = bio; in raid10_write_request()
1434 if (r10_bio->devs[j].bio) { in raid10_write_request()
1435 d = r10_bio->devs[j].devnum; in raid10_write_request()
1438 if (r10_bio->devs[j].repl_bio) { in raid10_write_request()
1440 d = r10_bio->devs[j].devnum; in raid10_write_request()
1475 if (r10_bio->devs[i].bio) in raid10_write_request()
1477 if (r10_bio->devs[i].repl_bio) in raid10_write_request()
1497 memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * conf->copies); in __make_request()
1969 r10_bio->devs[slot].addr, in end_sync_write()
2007 if (!r10_bio->devs[i].bio->bi_status) in sync_request_write()
2014 fbio = r10_bio->devs[i].bio; in sync_request_write()
2026 tbio = r10_bio->devs[i].bio; in sync_request_write()
2034 d = r10_bio->devs[i].devnum; in sync_request_write()
2036 if (!r10_bio->devs[i].bio->bi_status) { in sync_request_write()
2075 tbio->bi_iter.bi_sector = r10_bio->devs[i].addr; in sync_request_write()
2098 tbio = r10_bio->devs[i].repl_bio; in sync_request_write()
2101 if (r10_bio->devs[i].bio->bi_end_io != end_sync_write in sync_request_write()
2102 && r10_bio->devs[i].bio != fbio) in sync_request_write()
2104 d = r10_bio->devs[i].devnum; in sync_request_write()
2139 struct bio *bio = r10_bio->devs[0].bio; in fix_recovery_read_error()
2143 int dr = r10_bio->devs[0].devnum; in fix_recovery_read_error()
2144 int dw = r10_bio->devs[1].devnum; in fix_recovery_read_error()
2157 addr = r10_bio->devs[0].addr + sect, in fix_recovery_read_error()
2165 addr = r10_bio->devs[1].addr + sect; in fix_recovery_read_error()
2189 addr = r10_bio->devs[1].addr + sect; in fix_recovery_read_error()
2215 struct bio *wbio = r10_bio->devs[1].bio; in recovery_request_write()
2216 struct bio *wbio2 = r10_bio->devs[1].repl_bio; in recovery_request_write()
2238 d = r10_bio->devs[1].devnum; in recovery_request_write()
2326 int d = r10_bio->devs[r10_bio->read_slot].devnum; in fix_read_error()
2350 r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED; in fix_read_error()
2368 d = r10_bio->devs[sl].devnum; in fix_read_error()
2373 is_badblock(rdev, r10_bio->devs[sl].addr + sect, s, in fix_read_error()
2378 r10_bio->devs[sl].addr + in fix_read_error()
2399 int dn = r10_bio->devs[r10_bio->read_slot].devnum; in fix_read_error()
2404 r10_bio->devs[r10_bio->read_slot].addr in fix_read_error()
2408 r10_bio->devs[r10_bio->read_slot].bio in fix_read_error()
2423 d = r10_bio->devs[sl].devnum; in fix_read_error()
2433 r10_bio->devs[sl].addr + in fix_read_error()
2459 d = r10_bio->devs[sl].devnum; in fix_read_error()
2469 r10_bio->devs[sl].addr + in fix_read_error()
2510 struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev; in narrow_write_error()
2546 wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector); in narrow_write_error()
2571 struct md_rdev *rdev = r10_bio->devs[slot].rdev; in handle_read_error()
2581 bio = r10_bio->devs[slot].bio; in handle_read_error()
2583 r10_bio->devs[slot].bio = NULL; in handle_read_error()
2586 r10_bio->devs[slot].bio = IO_BLOCKED; in handle_read_error()
2614 int dev = r10_bio->devs[m].devnum; in handle_write_completed()
2616 if (r10_bio->devs[m].bio == NULL || in handle_write_completed()
2617 r10_bio->devs[m].bio->bi_end_io == NULL) in handle_write_completed()
2619 if (!r10_bio->devs[m].bio->bi_status) { in handle_write_completed()
2622 r10_bio->devs[m].addr, in handle_write_completed()
2627 r10_bio->devs[m].addr, in handle_write_completed()
2632 if (r10_bio->devs[m].repl_bio == NULL || in handle_write_completed()
2633 r10_bio->devs[m].repl_bio->bi_end_io == NULL) in handle_write_completed()
2636 if (!r10_bio->devs[m].repl_bio->bi_status) { in handle_write_completed()
2639 r10_bio->devs[m].addr, in handle_write_completed()
2644 r10_bio->devs[m].addr, in handle_write_completed()
2653 int dev = r10_bio->devs[m].devnum; in handle_write_completed()
2654 struct bio *bio = r10_bio->devs[m].bio; in handle_write_completed()
2659 r10_bio->devs[m].addr, in handle_write_completed()
2671 bio = r10_bio->devs[m].repl_bio; in handle_write_completed()
2676 r10_bio->devs[m].addr, in handle_write_completed()
2808 bio = r10bio->devs[i].bio; in raid10_alloc_init_r10buf()
2812 bio = r10bio->devs[i].repl_bio; in raid10_alloc_init_r10buf()
3130 int d = r10_bio->devs[j].devnum; in raid10_sync_request()
3141 sector = r10_bio->devs[j].addr; in raid10_sync_request()
3155 bio = r10_bio->devs[0].bio; in raid10_sync_request()
3162 from_addr = r10_bio->devs[j].addr; in raid10_sync_request()
3170 if (r10_bio->devs[k].devnum == i) in raid10_sync_request()
3173 to_addr = r10_bio->devs[k].addr; in raid10_sync_request()
3174 r10_bio->devs[0].devnum = d; in raid10_sync_request()
3175 r10_bio->devs[0].addr = from_addr; in raid10_sync_request()
3176 r10_bio->devs[1].devnum = i; in raid10_sync_request()
3177 r10_bio->devs[1].addr = to_addr; in raid10_sync_request()
3180 bio = r10_bio->devs[1].bio; in raid10_sync_request()
3190 r10_bio->devs[1].bio->bi_end_io = NULL; in raid10_sync_request()
3193 bio = r10_bio->devs[1].repl_bio; in raid10_sync_request()
3222 if (r10_bio->devs[k].devnum == i) in raid10_sync_request()
3228 r10_bio->devs[k].addr, in raid10_sync_request()
3234 r10_bio->devs[k].addr, in raid10_sync_request()
3258 if (r10_bio->devs[0].bio->bi_opf & MD_FAILFAST) { in raid10_sync_request()
3265 int d = r10_bio->devs[j].devnum; in raid10_sync_request()
3272 r10_bio->devs[0].bio->bi_opf in raid10_sync_request()
3325 int d = r10_bio->devs[i].devnum; in raid10_sync_request()
3330 if (r10_bio->devs[i].repl_bio) in raid10_sync_request()
3331 r10_bio->devs[i].repl_bio->bi_end_io = NULL; in raid10_sync_request()
3333 bio = r10_bio->devs[i].bio; in raid10_sync_request()
3341 sector = r10_bio->devs[i].addr; in raid10_sync_request()
3374 bio = r10_bio->devs[i].repl_bio; in raid10_sync_request()
3377 sector = r10_bio->devs[i].addr; in raid10_sync_request()
3392 int d = r10_bio->devs[i].devnum; in raid10_sync_request()
3393 if (r10_bio->devs[i].bio->bi_end_io) in raid10_sync_request()
3396 if (r10_bio->devs[i].repl_bio && in raid10_sync_request()
3397 r10_bio->devs[i].repl_bio->bi_end_io) in raid10_sync_request()
3989 static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs) in raid10_takeover_raid0() argument
3999 sector_div(size, devs); in raid10_takeover_raid0()
4542 read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr in reshape_request()
4552 r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum; in reshape_request()
4589 int d = r10_bio->devs[s/2].devnum; in reshape_request()
4593 b = r10_bio->devs[s/2].repl_bio; in reshape_request()
4596 b = r10_bio->devs[s/2].bio; in reshape_request()
4602 b->bi_iter.bi_sector = r10_bio->devs[s/2].addr + in reshape_request()
4613 pages = get_resync_pages(r10_bio->devs[0].bio)->pages; in reshape_request()
4680 int d = r10_bio->devs[s/2].devnum; in reshape_request_write()
4685 b = r10_bio->devs[s/2].repl_bio; in reshape_request_write()
4688 b = r10_bio->devs[s/2].bio; in reshape_request_write()
4746 r10b = kmalloc(struct_size(r10b, devs, conf->copies), GFP_NOIO); in handle_reshape_read_error()
4753 pages = get_resync_pages(r10_bio->devs[0].bio)->pages; in handle_reshape_read_error()
4768 int d = r10b->devs[slot].devnum; in handle_reshape_read_error()
4776 addr = r10b->devs[slot].addr + idx * PAGE_SIZE; in handle_reshape_read_error()