Lines Matching refs:devs
109 int size = offsetof(struct r10bio, devs[conf->geo.raid_disks]); in r10bio_pool_alloc()
167 r10_bio->devs[j].bio = bio; in r10buf_pool_alloc()
174 r10_bio->devs[j].repl_bio = bio; in r10buf_pool_alloc()
181 struct bio *rbio = r10_bio->devs[j].repl_bio; in r10buf_pool_alloc()
188 bio = r10_bio->devs[j].bio; in r10buf_pool_alloc()
216 if (r10_bio->devs[j].bio) in r10buf_pool_alloc()
217 bio_uninit(r10_bio->devs[j].bio); in r10buf_pool_alloc()
218 kfree(r10_bio->devs[j].bio); in r10buf_pool_alloc()
219 if (r10_bio->devs[j].repl_bio) in r10buf_pool_alloc()
220 bio_uninit(r10_bio->devs[j].repl_bio); in r10buf_pool_alloc()
221 kfree(r10_bio->devs[j].repl_bio); in r10buf_pool_alloc()
237 struct bio *bio = r10bio->devs[j].bio; in r10buf_pool_free()
246 bio = r10bio->devs[j].repl_bio; in r10buf_pool_free()
264 struct bio **bio = & r10_bio->devs[i].bio; in put_all_bios()
268 bio = &r10_bio->devs[i].repl_bio; in put_all_bios()
347 conf->mirrors[r10_bio->devs[slot].devnum].head_position = in update_head_pos()
348 r10_bio->devs[slot].addr + (r10_bio->sectors); in update_head_pos()
361 if (r10_bio->devs[slot].bio == bio) in find_bio_disk()
363 if (r10_bio->devs[slot].repl_bio == bio) { in find_bio_disk()
375 return r10_bio->devs[slot].devnum; in find_bio_disk()
387 rdev = r10_bio->devs[slot].rdev; in raid10_end_read_request()
507 r10_bio->devs[slot].bio = NULL; in raid10_end_write_request()
539 r10_bio->devs[slot].addr, in raid10_end_write_request()
544 r10_bio->devs[slot].repl_bio = IO_MADE_GOOD; in raid10_end_write_request()
546 r10_bio->devs[slot].bio = IO_MADE_GOOD; in raid10_end_write_request()
622 r10bio->devs[slot].devnum = d; in __raid10_find_phys()
623 r10bio->devs[slot].addr = s; in __raid10_find_phys()
640 r10bio->devs[slot].devnum = d; in __raid10_find_phys()
641 r10bio->devs[slot].addr = s; in __raid10_find_phys()
777 if (r10_bio->devs[slot].bio == IO_BLOCKED) in read_balance()
779 disk = r10_bio->devs[slot].devnum; in read_balance()
782 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) in read_balance()
788 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) in read_balance()
791 dev_sector = r10_bio->devs[slot].addr; in read_balance()
847 new_distance = r10_bio->devs[slot].addr; in read_balance()
849 new_distance = abs(r10_bio->devs[slot].addr - in read_balance()
1189 if (slot >= 0 && r10_bio->devs[slot].rdev) { in raid10_read_request()
1205 disk = r10_bio->devs[slot].devnum; in raid10_read_request()
1212 err_rdev = r10_bio->devs[slot].rdev; in raid10_read_request()
1251 r10_bio->devs[slot].bio = read_bio; in raid10_read_request()
1252 r10_bio->devs[slot].rdev = rdev; in raid10_read_request()
1254 read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr + in raid10_read_request()
1282 int devnum = r10_bio->devs[n_copy].devnum; in raid10_write_one_disk()
1297 r10_bio->devs[n_copy].repl_bio = mbio; in raid10_write_one_disk()
1299 r10_bio->devs[n_copy].bio = mbio; in raid10_write_one_disk()
1301 mbio->bi_iter.bi_sector = (r10_bio->devs[n_copy].addr + in raid10_write_one_disk()
1362 sector_t dev_sector = r10_bio->devs[i].addr; in wait_blocked_dev()
1474 int d = r10_bio->devs[i].devnum; in raid10_write_request()
1485 r10_bio->devs[i].bio = NULL; in raid10_write_request()
1486 r10_bio->devs[i].repl_bio = NULL; in raid10_write_request()
1494 sector_t dev_sector = r10_bio->devs[i].addr; in raid10_write_request()
1525 r10_bio->devs[i].bio = bio; in raid10_write_request()
1529 r10_bio->devs[i].repl_bio = bio; in raid10_write_request()
1555 if (r10_bio->devs[i].bio) in raid10_write_request()
1557 if (r10_bio->devs[i].repl_bio) in raid10_write_request()
1577 memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * in __make_request()
1760 memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * geo->raid_disks); in raid10_handle_discard()
1789 r10_bio->devs[disk].bio = NULL; in raid10_handle_discard()
1790 r10_bio->devs[disk].repl_bio = NULL; in raid10_handle_discard()
1800 r10_bio->devs[disk].bio = bio; in raid10_handle_discard()
1804 r10_bio->devs[disk].repl_bio = bio; in raid10_handle_discard()
1847 if (r10_bio->devs[disk].bio) { in raid10_handle_discard()
1853 r10_bio->devs[disk].bio = mbio; in raid10_handle_discard()
1854 r10_bio->devs[disk].devnum = disk; in raid10_handle_discard()
1861 if (r10_bio->devs[disk].repl_bio) { in raid10_handle_discard()
1867 r10_bio->devs[disk].repl_bio = rbio; in raid10_handle_discard()
1868 r10_bio->devs[disk].devnum = disk; in raid10_handle_discard()
2368 r10_bio->devs[slot].addr, in end_sync_write()
2406 if (!r10_bio->devs[i].bio->bi_status) in sync_request_write()
2413 fbio = r10_bio->devs[i].bio; in sync_request_write()
2425 tbio = r10_bio->devs[i].bio; in sync_request_write()
2433 d = r10_bio->devs[i].devnum; in sync_request_write()
2435 if (!r10_bio->devs[i].bio->bi_status) { in sync_request_write()
2474 tbio->bi_iter.bi_sector = r10_bio->devs[i].addr; in sync_request_write()
2495 tbio = r10_bio->devs[i].repl_bio; in sync_request_write()
2498 if (r10_bio->devs[i].bio->bi_end_io != end_sync_write in sync_request_write()
2499 && r10_bio->devs[i].bio != fbio) in sync_request_write()
2501 d = r10_bio->devs[i].devnum; in sync_request_write()
2536 struct bio *bio = r10_bio->devs[0].bio; in fix_recovery_read_error()
2540 int dr = r10_bio->devs[0].devnum; in fix_recovery_read_error()
2541 int dw = r10_bio->devs[1].devnum; in fix_recovery_read_error()
2554 addr = r10_bio->devs[0].addr + sect, in fix_recovery_read_error()
2562 addr = r10_bio->devs[1].addr + sect; in fix_recovery_read_error()
2586 addr = r10_bio->devs[1].addr + sect; in fix_recovery_read_error()
2624 d = r10_bio->devs[1].devnum; in recovery_request_write()
2625 wbio = r10_bio->devs[1].bio; in recovery_request_write()
2626 wbio2 = r10_bio->devs[1].repl_bio; in recovery_request_write()
2720 int d = r10_bio->devs[r10_bio->read_slot].devnum; in fix_read_error()
2741 r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED; in fix_read_error()
2759 d = r10_bio->devs[sl].devnum; in fix_read_error()
2764 is_badblock(rdev, r10_bio->devs[sl].addr + sect, s, in fix_read_error()
2769 r10_bio->devs[sl].addr + in fix_read_error()
2790 int dn = r10_bio->devs[r10_bio->read_slot].devnum; in fix_read_error()
2795 r10_bio->devs[r10_bio->read_slot].addr in fix_read_error()
2799 r10_bio->devs[r10_bio->read_slot].bio in fix_read_error()
2812 d = r10_bio->devs[sl].devnum; in fix_read_error()
2822 r10_bio->devs[sl].addr + in fix_read_error()
2846 d = r10_bio->devs[sl].devnum; in fix_read_error()
2856 r10_bio->devs[sl].addr + in fix_read_error()
2896 struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev; in narrow_write_error()
2933 wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector); in narrow_write_error()
2957 struct md_rdev *rdev = r10_bio->devs[slot].rdev; in handle_read_error()
2967 bio = r10_bio->devs[slot].bio; in handle_read_error()
2969 r10_bio->devs[slot].bio = NULL; in handle_read_error()
2972 r10_bio->devs[slot].bio = IO_BLOCKED; in handle_read_error()
3000 int dev = r10_bio->devs[m].devnum; in handle_write_completed()
3002 if (r10_bio->devs[m].bio == NULL || in handle_write_completed()
3003 r10_bio->devs[m].bio->bi_end_io == NULL) in handle_write_completed()
3005 if (!r10_bio->devs[m].bio->bi_status) { in handle_write_completed()
3008 r10_bio->devs[m].addr, in handle_write_completed()
3013 r10_bio->devs[m].addr, in handle_write_completed()
3018 if (r10_bio->devs[m].repl_bio == NULL || in handle_write_completed()
3019 r10_bio->devs[m].repl_bio->bi_end_io == NULL) in handle_write_completed()
3022 if (!r10_bio->devs[m].repl_bio->bi_status) { in handle_write_completed()
3025 r10_bio->devs[m].addr, in handle_write_completed()
3030 r10_bio->devs[m].addr, in handle_write_completed()
3039 int dev = r10_bio->devs[m].devnum; in handle_write_completed()
3040 struct bio *bio = r10_bio->devs[m].bio; in handle_write_completed()
3045 r10_bio->devs[m].addr, in handle_write_completed()
3057 bio = r10_bio->devs[m].repl_bio; in handle_write_completed()
3062 r10_bio->devs[m].addr, in handle_write_completed()
3194 bio = r10bio->devs[i].bio; in raid10_alloc_init_r10buf()
3198 bio = r10bio->devs[i].repl_bio; in raid10_alloc_init_r10buf()
3516 int d = r10_bio->devs[j].devnum; in raid10_sync_request()
3527 sector = r10_bio->devs[j].addr; in raid10_sync_request()
3541 bio = r10_bio->devs[0].bio; in raid10_sync_request()
3548 from_addr = r10_bio->devs[j].addr; in raid10_sync_request()
3556 if (r10_bio->devs[k].devnum == i) in raid10_sync_request()
3559 to_addr = r10_bio->devs[k].addr; in raid10_sync_request()
3560 r10_bio->devs[0].devnum = d; in raid10_sync_request()
3561 r10_bio->devs[0].addr = from_addr; in raid10_sync_request()
3562 r10_bio->devs[1].devnum = i; in raid10_sync_request()
3563 r10_bio->devs[1].addr = to_addr; in raid10_sync_request()
3566 bio = r10_bio->devs[1].bio; in raid10_sync_request()
3576 r10_bio->devs[1].bio->bi_end_io = NULL; in raid10_sync_request()
3579 bio = r10_bio->devs[1].repl_bio; in raid10_sync_request()
3608 if (r10_bio->devs[k].devnum == i) in raid10_sync_request()
3614 r10_bio->devs[k].addr, in raid10_sync_request()
3620 r10_bio->devs[k].addr, in raid10_sync_request()
3644 if (r10_bio->devs[0].bio->bi_opf & MD_FAILFAST) { in raid10_sync_request()
3651 int d = r10_bio->devs[j].devnum; in raid10_sync_request()
3658 r10_bio->devs[0].bio->bi_opf in raid10_sync_request()
3711 int d = r10_bio->devs[i].devnum; in raid10_sync_request()
3716 if (r10_bio->devs[i].repl_bio) in raid10_sync_request()
3717 r10_bio->devs[i].repl_bio->bi_end_io = NULL; in raid10_sync_request()
3719 bio = r10_bio->devs[i].bio; in raid10_sync_request()
3727 sector = r10_bio->devs[i].addr; in raid10_sync_request()
3760 bio = r10_bio->devs[i].repl_bio; in raid10_sync_request()
3763 sector = r10_bio->devs[i].addr; in raid10_sync_request()
3778 int d = r10_bio->devs[i].devnum; in raid10_sync_request()
3779 if (r10_bio->devs[i].bio->bi_end_io) in raid10_sync_request()
3782 if (r10_bio->devs[i].repl_bio && in raid10_sync_request()
3783 r10_bio->devs[i].repl_bio->bi_end_io) in raid10_sync_request()
4363 static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs) in raid10_takeover_raid0() argument
4373 sector_div(size, devs); in raid10_takeover_raid0()
4915 read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr in reshape_request()
4920 r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum; in reshape_request()
4957 int d = r10_bio->devs[s/2].devnum; in reshape_request()
4961 b = r10_bio->devs[s/2].repl_bio; in reshape_request()
4964 b = r10_bio->devs[s/2].bio; in reshape_request()
4970 b->bi_iter.bi_sector = r10_bio->devs[s/2].addr + in reshape_request()
4981 pages = get_resync_pages(r10_bio->devs[0].bio)->pages; in reshape_request()
5048 int d = r10_bio->devs[s/2].devnum; in reshape_request_write()
5053 b = r10_bio->devs[s/2].repl_bio; in reshape_request_write()
5056 b = r10_bio->devs[s/2].bio; in reshape_request_write()
5114 r10b = kmalloc(struct_size(r10b, devs, conf->copies), GFP_NOIO); in handle_reshape_read_error()
5121 pages = get_resync_pages(r10_bio->devs[0].bio)->pages; in handle_reshape_read_error()
5136 int d = r10b->devs[slot].devnum; in handle_reshape_read_error()
5144 addr = r10b->devs[slot].addr + idx * PAGE_SIZE; in handle_reshape_read_error()