Lines Matching refs:devs
108 int size = offsetof(struct r10bio, devs[conf->geo.raid_disks]); in r10bio_pool_alloc()
166 r10_bio->devs[j].bio = bio; in r10buf_pool_alloc()
173 r10_bio->devs[j].repl_bio = bio; in r10buf_pool_alloc()
180 struct bio *rbio = r10_bio->devs[j].repl_bio; in r10buf_pool_alloc()
187 bio = r10_bio->devs[j].bio; in r10buf_pool_alloc()
215 if (r10_bio->devs[j].bio) in r10buf_pool_alloc()
216 bio_uninit(r10_bio->devs[j].bio); in r10buf_pool_alloc()
217 kfree(r10_bio->devs[j].bio); in r10buf_pool_alloc()
218 if (r10_bio->devs[j].repl_bio) in r10buf_pool_alloc()
219 bio_uninit(r10_bio->devs[j].repl_bio); in r10buf_pool_alloc()
220 kfree(r10_bio->devs[j].repl_bio); in r10buf_pool_alloc()
236 struct bio *bio = r10bio->devs[j].bio; in r10buf_pool_free()
245 bio = r10bio->devs[j].repl_bio; in r10buf_pool_free()
263 struct bio **bio = & r10_bio->devs[i].bio; in put_all_bios()
267 bio = &r10_bio->devs[i].repl_bio; in put_all_bios()
344 conf->mirrors[r10_bio->devs[slot].devnum].head_position = in update_head_pos()
345 r10_bio->devs[slot].addr + (r10_bio->sectors); in update_head_pos()
358 if (r10_bio->devs[slot].bio == bio) in find_bio_disk()
360 if (r10_bio->devs[slot].repl_bio == bio) { in find_bio_disk()
372 return r10_bio->devs[slot].devnum; in find_bio_disk()
384 rdev = r10_bio->devs[slot].rdev; in raid10_end_read_request()
505 r10_bio->devs[slot].bio = NULL; in raid10_end_write_request()
532 if (rdev_has_badblock(rdev, r10_bio->devs[slot].addr, in raid10_end_write_request()
537 r10_bio->devs[slot].repl_bio = IO_MADE_GOOD; in raid10_end_write_request()
539 r10_bio->devs[slot].bio = IO_MADE_GOOD; in raid10_end_write_request()
615 r10bio->devs[slot].devnum = d; in __raid10_find_phys()
616 r10bio->devs[slot].addr = s; in __raid10_find_phys()
633 r10bio->devs[slot].devnum = d; in __raid10_find_phys()
634 r10bio->devs[slot].addr = s; in __raid10_find_phys()
760 if (r10_bio->devs[slot].bio == IO_BLOCKED) in read_balance()
762 disk = r10_bio->devs[slot].devnum; in read_balance()
765 r10_bio->devs[slot].addr + sectors > in read_balance()
772 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) in read_balance()
775 dev_sector = r10_bio->devs[slot].addr; in read_balance()
831 new_distance = r10_bio->devs[slot].addr; in read_balance()
833 new_distance = abs(r10_bio->devs[slot].addr - in read_balance()
1163 if (slot >= 0 && r10_bio->devs[slot].rdev) { in raid10_read_request()
1178 disk = r10_bio->devs[slot].devnum; in raid10_read_request()
1185 err_rdev = r10_bio->devs[slot].rdev; in raid10_read_request()
1225 r10_bio->devs[slot].bio = read_bio; in raid10_read_request()
1226 r10_bio->devs[slot].rdev = rdev; in raid10_read_request()
1228 read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr + in raid10_read_request()
1251 int devnum = r10_bio->devs[n_copy].devnum; in raid10_write_one_disk()
1259 r10_bio->devs[n_copy].repl_bio = mbio; in raid10_write_one_disk()
1261 r10_bio->devs[n_copy].bio = mbio; in raid10_write_one_disk()
1263 mbio->bi_iter.bi_sector = (r10_bio->devs[n_copy].addr + in raid10_write_one_disk()
1311 sector_t dev_sector = r10_bio->devs[i].addr; in wait_blocked_dev()
1420 int d = r10_bio->devs[i].devnum; in raid10_write_request()
1430 r10_bio->devs[i].bio = NULL; in raid10_write_request()
1431 r10_bio->devs[i].repl_bio = NULL; in raid10_write_request()
1439 sector_t dev_sector = r10_bio->devs[i].addr; in raid10_write_request()
1470 r10_bio->devs[i].bio = bio; in raid10_write_request()
1474 r10_bio->devs[i].repl_bio = bio; in raid10_write_request()
1500 if (r10_bio->devs[i].bio) in raid10_write_request()
1502 if (r10_bio->devs[i].repl_bio) in raid10_write_request()
1522 memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * in __make_request()
1696 memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * geo->raid_disks); in raid10_handle_discard()
1724 r10_bio->devs[disk].bio = NULL; in raid10_handle_discard()
1725 r10_bio->devs[disk].repl_bio = NULL; in raid10_handle_discard()
1735 r10_bio->devs[disk].bio = bio; in raid10_handle_discard()
1739 r10_bio->devs[disk].repl_bio = bio; in raid10_handle_discard()
1781 if (r10_bio->devs[disk].bio) { in raid10_handle_discard()
1787 r10_bio->devs[disk].bio = mbio; in raid10_handle_discard()
1788 r10_bio->devs[disk].devnum = disk; in raid10_handle_discard()
1795 if (r10_bio->devs[disk].repl_bio) { in raid10_handle_discard()
1801 r10_bio->devs[disk].repl_bio = rbio; in raid10_handle_discard()
1802 r10_bio->devs[disk].devnum = disk; in raid10_handle_discard()
2284 } else if (rdev_has_badblock(rdev, r10_bio->devs[slot].addr, in end_sync_write()
2322 if (!r10_bio->devs[i].bio->bi_status) in sync_request_write()
2329 fbio = r10_bio->devs[i].bio; in sync_request_write()
2341 tbio = r10_bio->devs[i].bio; in sync_request_write()
2349 d = r10_bio->devs[i].devnum; in sync_request_write()
2351 if (!r10_bio->devs[i].bio->bi_status) { in sync_request_write()
2390 tbio->bi_iter.bi_sector = r10_bio->devs[i].addr; in sync_request_write()
2411 tbio = r10_bio->devs[i].repl_bio; in sync_request_write()
2414 if (r10_bio->devs[i].bio->bi_end_io != end_sync_write in sync_request_write()
2415 && r10_bio->devs[i].bio != fbio) in sync_request_write()
2417 d = r10_bio->devs[i].devnum; in sync_request_write()
2452 struct bio *bio = r10_bio->devs[0].bio; in fix_recovery_read_error()
2456 int dr = r10_bio->devs[0].devnum; in fix_recovery_read_error()
2457 int dw = r10_bio->devs[1].devnum; in fix_recovery_read_error()
2470 addr = r10_bio->devs[0].addr + sect; in fix_recovery_read_error()
2478 addr = r10_bio->devs[1].addr + sect; in fix_recovery_read_error()
2502 addr = r10_bio->devs[1].addr + sect; in fix_recovery_read_error()
2528 struct bio *wbio = r10_bio->devs[1].bio; in recovery_request_write()
2529 struct bio *wbio2 = r10_bio->devs[1].repl_bio; in recovery_request_write()
2551 d = r10_bio->devs[1].devnum; in recovery_request_write()
2599 int d = r10_bio->devs[slot].devnum; in fix_read_error()
2612 r10_bio->devs[slot].bio = IO_BLOCKED; in fix_read_error()
2626 d = r10_bio->devs[sl].devnum; in fix_read_error()
2632 r10_bio->devs[sl].addr + sect, in fix_read_error()
2636 r10_bio->devs[sl].addr + in fix_read_error()
2655 int dn = r10_bio->devs[slot].devnum; in fix_read_error()
2660 r10_bio->devs[slot].addr in fix_read_error()
2664 r10_bio->devs[slot].bio in fix_read_error()
2676 d = r10_bio->devs[sl].devnum; in fix_read_error()
2685 r10_bio->devs[sl].addr + in fix_read_error()
2708 d = r10_bio->devs[sl].devnum; in fix_read_error()
2717 r10_bio->devs[sl].addr + in fix_read_error()
2755 struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev; in narrow_write_error()
2792 wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector); in narrow_write_error()
2816 struct md_rdev *rdev = r10_bio->devs[slot].rdev; in handle_read_error()
2826 bio = r10_bio->devs[slot].bio; in handle_read_error()
2828 r10_bio->devs[slot].bio = NULL; in handle_read_error()
2831 r10_bio->devs[slot].bio = IO_BLOCKED; in handle_read_error()
2863 int dev = r10_bio->devs[m].devnum; in handle_write_completed()
2865 if (r10_bio->devs[m].bio == NULL || in handle_write_completed()
2866 r10_bio->devs[m].bio->bi_end_io == NULL) in handle_write_completed()
2868 if (!r10_bio->devs[m].bio->bi_status) { in handle_write_completed()
2871 r10_bio->devs[m].addr, in handle_write_completed()
2876 r10_bio->devs[m].addr, in handle_write_completed()
2881 if (r10_bio->devs[m].repl_bio == NULL || in handle_write_completed()
2882 r10_bio->devs[m].repl_bio->bi_end_io == NULL) in handle_write_completed()
2885 if (!r10_bio->devs[m].repl_bio->bi_status) { in handle_write_completed()
2888 r10_bio->devs[m].addr, in handle_write_completed()
2893 r10_bio->devs[m].addr, in handle_write_completed()
2902 int dev = r10_bio->devs[m].devnum; in handle_write_completed()
2903 struct bio *bio = r10_bio->devs[m].bio; in handle_write_completed()
2908 r10_bio->devs[m].addr, in handle_write_completed()
2920 bio = r10_bio->devs[m].repl_bio; in handle_write_completed()
2925 r10_bio->devs[m].addr, in handle_write_completed()
3057 bio = r10bio->devs[i].bio; in raid10_alloc_init_r10buf()
3061 bio = r10bio->devs[i].repl_bio; in raid10_alloc_init_r10buf()
3378 int d = r10_bio->devs[j].devnum; in raid10_sync_request()
3388 sector = r10_bio->devs[j].addr; in raid10_sync_request()
3402 bio = r10_bio->devs[0].bio; in raid10_sync_request()
3409 from_addr = r10_bio->devs[j].addr; in raid10_sync_request()
3417 if (r10_bio->devs[k].devnum == i) in raid10_sync_request()
3420 to_addr = r10_bio->devs[k].addr; in raid10_sync_request()
3421 r10_bio->devs[0].devnum = d; in raid10_sync_request()
3422 r10_bio->devs[0].addr = from_addr; in raid10_sync_request()
3423 r10_bio->devs[1].devnum = i; in raid10_sync_request()
3424 r10_bio->devs[1].addr = to_addr; in raid10_sync_request()
3427 bio = r10_bio->devs[1].bio; in raid10_sync_request()
3437 r10_bio->devs[1].bio->bi_end_io = NULL; in raid10_sync_request()
3440 bio = r10_bio->devs[1].repl_bio; in raid10_sync_request()
3468 if (r10_bio->devs[k].devnum == i) in raid10_sync_request()
3474 r10_bio->devs[k].addr, in raid10_sync_request()
3480 r10_bio->devs[k].addr, in raid10_sync_request()
3508 if (r10_bio->devs[0].bio->bi_opf & MD_FAILFAST) { in raid10_sync_request()
3515 int d = r10_bio->devs[j].devnum; in raid10_sync_request()
3522 r10_bio->devs[0].bio->bi_opf in raid10_sync_request()
3576 int d = r10_bio->devs[i].devnum; in raid10_sync_request()
3581 if (r10_bio->devs[i].repl_bio) in raid10_sync_request()
3582 r10_bio->devs[i].repl_bio->bi_end_io = NULL; in raid10_sync_request()
3584 bio = r10_bio->devs[i].bio; in raid10_sync_request()
3590 sector = r10_bio->devs[i].addr; in raid10_sync_request()
3621 bio = r10_bio->devs[i].repl_bio; in raid10_sync_request()
3624 sector = r10_bio->devs[i].addr; in raid10_sync_request()
3638 int d = r10_bio->devs[i].devnum; in raid10_sync_request()
3639 if (r10_bio->devs[i].bio->bi_end_io) in raid10_sync_request()
3642 if (r10_bio->devs[i].repl_bio && in raid10_sync_request()
3643 r10_bio->devs[i].repl_bio->bi_end_io) in raid10_sync_request()
4232 static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs) in raid10_takeover_raid0() argument
4242 sector_div(size, devs); in raid10_takeover_raid0()
4773 read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr in reshape_request()
4778 r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum; in reshape_request()
4814 int d = r10_bio->devs[s/2].devnum; in reshape_request()
4818 b = r10_bio->devs[s/2].repl_bio; in reshape_request()
4821 b = r10_bio->devs[s/2].bio; in reshape_request()
4827 b->bi_iter.bi_sector = r10_bio->devs[s/2].addr + in reshape_request()
4838 pages = get_resync_pages(r10_bio->devs[0].bio)->pages; in reshape_request()
4904 int d = r10_bio->devs[s/2].devnum; in reshape_request_write()
4908 b = r10_bio->devs[s/2].repl_bio; in reshape_request_write()
4911 b = r10_bio->devs[s/2].bio; in reshape_request_write()
4966 r10b = kmalloc(struct_size(r10b, devs, conf->copies), GFP_NOIO); in handle_reshape_read_error()
4973 pages = get_resync_pages(r10_bio->devs[0].bio)->pages; in handle_reshape_read_error()
4987 int d = r10b->devs[slot].devnum; in handle_reshape_read_error()
4995 addr = r10b->devs[slot].addr + idx * PAGE_SIZE; in handle_reshape_read_error()