Lines Matching refs:devs
109 int size = offsetof(struct r10bio, devs[conf->geo.raid_disks]); in r10bio_pool_alloc()
167 r10_bio->devs[j].bio = bio; in r10buf_pool_alloc()
174 r10_bio->devs[j].repl_bio = bio; in r10buf_pool_alloc()
181 struct bio *rbio = r10_bio->devs[j].repl_bio; in r10buf_pool_alloc()
188 bio = r10_bio->devs[j].bio; in r10buf_pool_alloc()
216 if (r10_bio->devs[j].bio) in r10buf_pool_alloc()
217 bio_uninit(r10_bio->devs[j].bio); in r10buf_pool_alloc()
218 kfree(r10_bio->devs[j].bio); in r10buf_pool_alloc()
219 if (r10_bio->devs[j].repl_bio) in r10buf_pool_alloc()
220 bio_uninit(r10_bio->devs[j].repl_bio); in r10buf_pool_alloc()
221 kfree(r10_bio->devs[j].repl_bio); in r10buf_pool_alloc()
237 struct bio *bio = r10bio->devs[j].bio; in r10buf_pool_free()
246 bio = r10bio->devs[j].repl_bio; in r10buf_pool_free()
264 struct bio **bio = & r10_bio->devs[i].bio; in put_all_bios()
268 bio = &r10_bio->devs[i].repl_bio; in put_all_bios()
345 conf->mirrors[r10_bio->devs[slot].devnum].head_position = in update_head_pos()
346 r10_bio->devs[slot].addr + (r10_bio->sectors); in update_head_pos()
359 if (r10_bio->devs[slot].bio == bio) in find_bio_disk()
361 if (r10_bio->devs[slot].repl_bio == bio) { in find_bio_disk()
373 return r10_bio->devs[slot].devnum; in find_bio_disk()
385 rdev = r10_bio->devs[slot].rdev; in raid10_end_read_request()
502 r10_bio->devs[slot].bio = NULL; in raid10_end_write_request()
529 if (rdev_has_badblock(rdev, r10_bio->devs[slot].addr, in raid10_end_write_request()
534 r10_bio->devs[slot].repl_bio = IO_MADE_GOOD; in raid10_end_write_request()
536 r10_bio->devs[slot].bio = IO_MADE_GOOD; in raid10_end_write_request()
612 r10bio->devs[slot].devnum = d; in __raid10_find_phys()
613 r10bio->devs[slot].addr = s; in __raid10_find_phys()
630 r10bio->devs[slot].devnum = d; in __raid10_find_phys()
631 r10bio->devs[slot].addr = s; in __raid10_find_phys()
757 if (r10_bio->devs[slot].bio == IO_BLOCKED) in read_balance()
759 disk = r10_bio->devs[slot].devnum; in read_balance()
762 r10_bio->devs[slot].addr + sectors > in read_balance()
769 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) in read_balance()
772 dev_sector = r10_bio->devs[slot].addr; in read_balance()
828 new_distance = r10_bio->devs[slot].addr; in read_balance()
830 new_distance = abs(r10_bio->devs[slot].addr - in read_balance()
1159 if (slot >= 0 && r10_bio->devs[slot].rdev) { in raid10_read_request()
1174 disk = r10_bio->devs[slot].devnum; in raid10_read_request()
1181 err_rdev = r10_bio->devs[slot].rdev; in raid10_read_request()
1229 r10_bio->devs[slot].bio = read_bio; in raid10_read_request()
1230 r10_bio->devs[slot].rdev = rdev; in raid10_read_request()
1232 read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr + in raid10_read_request()
1256 int devnum = r10_bio->devs[n_copy].devnum; in raid10_write_one_disk()
1265 r10_bio->devs[n_copy].repl_bio = mbio; in raid10_write_one_disk()
1267 r10_bio->devs[n_copy].bio = mbio; in raid10_write_one_disk()
1269 mbio->bi_iter.bi_sector = (r10_bio->devs[n_copy].addr + in raid10_write_one_disk()
1304 sector_t dev_sector = r10_bio->devs[i].addr; in wait_blocked_dev()
1425 int d = r10_bio->devs[i].devnum; in raid10_write_request()
1435 r10_bio->devs[i].bio = NULL; in raid10_write_request()
1436 r10_bio->devs[i].repl_bio = NULL; in raid10_write_request()
1442 sector_t dev_sector = r10_bio->devs[i].addr; in raid10_write_request()
1479 r10_bio->devs[i].bio = bio; in raid10_write_request()
1483 r10_bio->devs[i].repl_bio = bio; in raid10_write_request()
1511 if (r10_bio->devs[i].bio) in raid10_write_request()
1513 if (r10_bio->devs[i].repl_bio) in raid10_write_request()
1520 int d = r10_bio->devs[k].devnum; in raid10_write_request()
1524 if (r10_bio->devs[k].bio) { in raid10_write_request()
1526 r10_bio->devs[k].bio = NULL; in raid10_write_request()
1528 if (r10_bio->devs[k].repl_bio) { in raid10_write_request()
1530 r10_bio->devs[k].repl_bio = NULL; in raid10_write_request()
1553 memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * in __make_request()
1736 memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * geo->raid_disks); in raid10_handle_discard()
1765 r10_bio->devs[disk].bio = NULL; in raid10_handle_discard()
1766 r10_bio->devs[disk].repl_bio = NULL; in raid10_handle_discard()
1776 r10_bio->devs[disk].bio = bio; in raid10_handle_discard()
1780 r10_bio->devs[disk].repl_bio = bio; in raid10_handle_discard()
1822 if (r10_bio->devs[disk].bio) { in raid10_handle_discard()
1828 r10_bio->devs[disk].bio = mbio; in raid10_handle_discard()
1829 r10_bio->devs[disk].devnum = disk; in raid10_handle_discard()
1836 if (r10_bio->devs[disk].repl_bio) { in raid10_handle_discard()
1842 r10_bio->devs[disk].repl_bio = rbio; in raid10_handle_discard()
1843 r10_bio->devs[disk].devnum = disk; in raid10_handle_discard()
2325 } else if (rdev_has_badblock(rdev, r10_bio->devs[slot].addr, in end_sync_write()
2363 if (!r10_bio->devs[i].bio->bi_status) in sync_request_write()
2370 fbio = r10_bio->devs[i].bio; in sync_request_write()
2382 tbio = r10_bio->devs[i].bio; in sync_request_write()
2390 d = r10_bio->devs[i].devnum; in sync_request_write()
2392 if (!r10_bio->devs[i].bio->bi_status) { in sync_request_write()
2431 tbio->bi_iter.bi_sector = r10_bio->devs[i].addr; in sync_request_write()
2449 tbio = r10_bio->devs[i].repl_bio; in sync_request_write()
2452 if (r10_bio->devs[i].bio->bi_end_io != end_sync_write in sync_request_write()
2453 && r10_bio->devs[i].bio != fbio) in sync_request_write()
2487 struct bio *bio = r10_bio->devs[0].bio; in fix_recovery_read_error()
2491 int dr = r10_bio->devs[0].devnum; in fix_recovery_read_error()
2492 int dw = r10_bio->devs[1].devnum; in fix_recovery_read_error()
2505 addr = r10_bio->devs[0].addr + sect; in fix_recovery_read_error()
2513 addr = r10_bio->devs[1].addr + sect; in fix_recovery_read_error()
2537 addr = r10_bio->devs[1].addr + sect; in fix_recovery_read_error()
2563 struct bio *wbio = r10_bio->devs[1].bio; in recovery_request_write()
2564 struct bio *wbio2 = r10_bio->devs[1].repl_bio; in recovery_request_write()
2586 d = r10_bio->devs[1].devnum; in recovery_request_write()
2631 int d = r10_bio->devs[slot].devnum; in fix_read_error()
2644 r10_bio->devs[slot].bio = IO_BLOCKED; in fix_read_error()
2658 d = r10_bio->devs[sl].devnum; in fix_read_error()
2664 r10_bio->devs[sl].addr + sect, in fix_read_error()
2668 r10_bio->devs[sl].addr + in fix_read_error()
2687 int dn = r10_bio->devs[slot].devnum; in fix_read_error()
2692 r10_bio->devs[slot].addr in fix_read_error()
2696 r10_bio->devs[slot].bio in fix_read_error()
2708 d = r10_bio->devs[sl].devnum; in fix_read_error()
2717 r10_bio->devs[sl].addr + in fix_read_error()
2740 d = r10_bio->devs[sl].devnum; in fix_read_error()
2749 r10_bio->devs[sl].addr + in fix_read_error()
2787 struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev; in narrow_write_error()
2824 wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector); in narrow_write_error()
2848 struct md_rdev *rdev = r10_bio->devs[slot].rdev; in handle_read_error()
2858 bio = r10_bio->devs[slot].bio; in handle_read_error()
2860 r10_bio->devs[slot].bio = NULL; in handle_read_error()
2863 r10_bio->devs[slot].bio = IO_BLOCKED; in handle_read_error()
2895 int dev = r10_bio->devs[m].devnum; in handle_write_completed()
2897 if (r10_bio->devs[m].bio == NULL || in handle_write_completed()
2898 r10_bio->devs[m].bio->bi_end_io == NULL) in handle_write_completed()
2900 if (!r10_bio->devs[m].bio->bi_status) { in handle_write_completed()
2903 r10_bio->devs[m].addr, in handle_write_completed()
2908 r10_bio->devs[m].addr, in handle_write_completed()
2913 if (r10_bio->devs[m].repl_bio == NULL || in handle_write_completed()
2914 r10_bio->devs[m].repl_bio->bi_end_io == NULL) in handle_write_completed()
2917 if (!r10_bio->devs[m].repl_bio->bi_status) { in handle_write_completed()
2920 r10_bio->devs[m].addr, in handle_write_completed()
2925 r10_bio->devs[m].addr, in handle_write_completed()
2934 int dev = r10_bio->devs[m].devnum; in handle_write_completed()
2935 struct bio *bio = r10_bio->devs[m].bio; in handle_write_completed()
2940 r10_bio->devs[m].addr, in handle_write_completed()
2949 bio = r10_bio->devs[m].repl_bio; in handle_write_completed()
2954 r10_bio->devs[m].addr, in handle_write_completed()
3084 bio = r10bio->devs[i].bio; in raid10_alloc_init_r10buf()
3088 bio = r10bio->devs[i].repl_bio; in raid10_alloc_init_r10buf()
3405 int d = r10_bio->devs[j].devnum; in raid10_sync_request()
3415 sector = r10_bio->devs[j].addr; in raid10_sync_request()
3429 bio = r10_bio->devs[0].bio; in raid10_sync_request()
3436 from_addr = r10_bio->devs[j].addr; in raid10_sync_request()
3444 if (r10_bio->devs[k].devnum == i) in raid10_sync_request()
3447 to_addr = r10_bio->devs[k].addr; in raid10_sync_request()
3448 r10_bio->devs[0].devnum = d; in raid10_sync_request()
3449 r10_bio->devs[0].addr = from_addr; in raid10_sync_request()
3450 r10_bio->devs[1].devnum = i; in raid10_sync_request()
3451 r10_bio->devs[1].addr = to_addr; in raid10_sync_request()
3454 bio = r10_bio->devs[1].bio; in raid10_sync_request()
3464 r10_bio->devs[1].bio->bi_end_io = NULL; in raid10_sync_request()
3467 bio = r10_bio->devs[1].repl_bio; in raid10_sync_request()
3495 if (r10_bio->devs[k].devnum == i) in raid10_sync_request()
3501 r10_bio->devs[k].addr, in raid10_sync_request()
3507 r10_bio->devs[k].addr, in raid10_sync_request()
3535 if (r10_bio->devs[0].bio->bi_opf & MD_FAILFAST) { in raid10_sync_request()
3542 int d = r10_bio->devs[j].devnum; in raid10_sync_request()
3549 r10_bio->devs[0].bio->bi_opf in raid10_sync_request()
3603 int d = r10_bio->devs[i].devnum; in raid10_sync_request()
3608 if (r10_bio->devs[i].repl_bio) in raid10_sync_request()
3609 r10_bio->devs[i].repl_bio->bi_end_io = NULL; in raid10_sync_request()
3611 bio = r10_bio->devs[i].bio; in raid10_sync_request()
3617 sector = r10_bio->devs[i].addr; in raid10_sync_request()
3648 bio = r10_bio->devs[i].repl_bio; in raid10_sync_request()
3651 sector = r10_bio->devs[i].addr; in raid10_sync_request()
3665 int d = r10_bio->devs[i].devnum; in raid10_sync_request()
3666 if (r10_bio->devs[i].bio->bi_end_io) in raid10_sync_request()
3669 if (r10_bio->devs[i].repl_bio && in raid10_sync_request()
3670 r10_bio->devs[i].repl_bio->bi_end_io) in raid10_sync_request()
4258 static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs) in raid10_takeover_raid0() argument
4268 sector_div(size, devs); in raid10_takeover_raid0()
4799 read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr in reshape_request()
4804 r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum; in reshape_request()
4840 int d = r10_bio->devs[s/2].devnum; in reshape_request()
4844 b = r10_bio->devs[s/2].repl_bio; in reshape_request()
4847 b = r10_bio->devs[s/2].bio; in reshape_request()
4853 b->bi_iter.bi_sector = r10_bio->devs[s/2].addr + in reshape_request()
4864 pages = get_resync_pages(r10_bio->devs[0].bio)->pages; in reshape_request()
4929 int d = r10_bio->devs[s/2].devnum; in reshape_request_write()
4933 b = r10_bio->devs[s/2].repl_bio; in reshape_request_write()
4936 b = r10_bio->devs[s/2].bio; in reshape_request_write()
4990 r10b = kmalloc(struct_size(r10b, devs, conf->copies), GFP_NOIO); in handle_reshape_read_error()
4997 pages = get_resync_pages(r10_bio->devs[0].bio)->pages; in handle_reshape_read_error()
5011 int d = r10b->devs[slot].devnum; in handle_reshape_read_error()
5019 addr = r10b->devs[slot].addr + idx * PAGE_SIZE; in handle_reshape_read_error()