Lines Matching refs:sector

427 		(unsigned long long)sh->sector);  in remove_hash()
434 struct hlist_head *hp = stripe_hash(conf, sh->sector); in insert_hash()
437 (unsigned long long)sh->sector); in insert_hash()
576 static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) in init_stripe() argument
587 (unsigned long long)sector); in init_stripe()
592 sh->sector = sector; in init_stripe()
593 stripe_set_idx(sector, conf, previous, sh); in init_stripe()
602 (unsigned long long)sh->sector, i, dev->toread, in init_stripe()
608 dev->sector = raid5_compute_blocknr(sh, i, previous); in init_stripe()
618 static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector, in __find_stripe() argument
623 pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector); in __find_stripe()
624 hlist_for_each_entry(sh, stripe_hash(conf, sector), hash) in __find_stripe()
625 if (sh->sector == sector && sh->generation == generation) in __find_stripe()
627 pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector); in __find_stripe()
632 sector_t sector, short generation, int hash) in find_get_stripe() argument
637 sh = __find_stripe(conf, sector, generation); in find_get_stripe()
804 struct stripe_request_ctx *ctx, sector_t sector, in raid5_get_active_stripe() argument
808 int hash = stripe_hash_locks_hash(conf, sector); in raid5_get_active_stripe()
811 pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector); in raid5_get_active_stripe()
834 sh = find_get_stripe(conf, sector, conf->generation - previous, in raid5_get_active_stripe()
843 init_stripe(sh, sector, previous); in raid5_get_active_stripe()
922 tmp_sec = sh->sector; in stripe_add_to_batch_list()
925 head_sector = sh->sector - RAID5_STRIPE_SECTORS(conf); in stripe_add_to_batch_list()
927 if (last_sh && head_sector == last_sh->sector) { in stripe_add_to_batch_list()
1044 if (da->sector > db->sector) in cmp_stripe()
1046 if (da->sector < db->sector) in cmp_stripe()
1111 static void defer_issue_bios(struct r5conf *conf, sector_t sector, in defer_issue_bios() argument
1121 ent->sector = sector; in defer_issue_bios()
1213 int bad = rdev_has_badblock(rdev, sh->sector, in ops_run_io()
1252 __func__, (unsigned long long)sh->sector, in ops_run_io()
1258 bi->bi_iter.bi_sector = (sh->sector in ops_run_io()
1261 bi->bi_iter.bi_sector = (sh->sector in ops_run_io()
1292 mddev_trace_remap(conf->mddev, bi, sh->dev[i].sector); in ops_run_io()
1308 __func__, (unsigned long long)sh->sector, in ops_run_io()
1314 rbi->bi_iter.bi_sector = (sh->sector in ops_run_io()
1317 rbi->bi_iter.bi_sector = (sh->sector in ops_run_io()
1332 mddev_trace_remap(conf->mddev, rbi, sh->dev[i].sector); in ops_run_io()
1340 bi->bi_opf, i, (unsigned long long)sh->sector); in ops_run_io()
1354 defer_issue_bios(conf, head_sh->sector, &pending_bios); in ops_run_io()
1359 unsigned int poff, sector_t sector, struct dma_async_tx_descriptor *tx, in async_copy_data() argument
1370 if (bio->bi_iter.bi_sector >= sector) in async_copy_data()
1371 page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512; in async_copy_data()
1373 page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512; in async_copy_data()
1429 (unsigned long long)sh->sector); in ops_complete_biofill()
1447 dev->sector + RAID5_STRIPE_SECTORS(conf)) { in ops_complete_biofill()
1448 rbi2 = r5_next_bio(conf, rbi, dev->sector); in ops_complete_biofill()
1469 (unsigned long long)sh->sector); in ops_run_biofill()
1480 dev->sector + RAID5_STRIPE_SECTORS(conf)) { in ops_run_biofill()
1483 dev->sector, tx, sh, 0); in ops_run_biofill()
1484 rbi = r5_next_bio(conf, rbi, dev->sector); in ops_run_biofill()
1512 (unsigned long long)sh->sector); in ops_complete_compute()
1565 __func__, (unsigned long long)sh->sector, target); in ops_run_compute5()
1670 __func__, (unsigned long long)sh->sector, target); in ops_run_compute6_1()
1726 __func__, (unsigned long long)sh->sector, target, target2); in ops_run_compute6_2()
1757 __func__, (unsigned long long)sh->sector, faila, failb); in ops_run_compute6_2()
1833 (unsigned long long)sh->sector); in ops_complete_prexor()
1859 (unsigned long long)sh->sector); in ops_run_prexor5()
1895 (unsigned long long)sh->sector); in ops_run_prexor6()
1916 (unsigned long long)sh->sector); in ops_run_biodrain()
1943 dev->sector + RAID5_STRIPE_SECTORS(conf)) { in ops_run_biodrain()
1953 dev->sector, tx, sh, in ops_run_biodrain()
1962 wbi = r5_next_bio(conf, wbi, dev->sector); in ops_run_biodrain()
1989 (unsigned long long)sh->sector); in ops_complete_reconstruct()
2044 (unsigned long long)sh->sector); in ops_run_reconstruct5()
2137 pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector); in ops_run_reconstruct6()
2192 (unsigned long long)sh->sector); in ops_complete_check()
2214 (unsigned long long)sh->sector); in ops_run_check_p()
2248 (unsigned long long)sh->sector, checkp); in ops_run_check_pq()
2719 (unsigned long long)sh->sector, i, atomic_read(&sh->count), in raid5_end_read_request()
2736 s = sh->sector + rdev->new_data_offset; in raid5_end_read_request()
2738 s = sh->sector + rdev->data_offset; in raid5_end_read_request()
2823 rdev, sh->sector, RAID5_STRIPE_SECTORS(conf), 0))) in raid5_end_read_request()
2861 (unsigned long long)sh->sector, i, atomic_read(&sh->count), in raid5_end_write_request()
2871 else if (rdev_has_badblock(rdev, sh->sector, in raid5_end_write_request()
2881 } else if (rdev_has_badblock(rdev, sh->sector, in raid5_end_write_request()
3152 sector_t new_sector = sh->sector, check; in raid5_compute_blocknr()
3258 if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx in raid5_compute_blocknr()
3428 __func__, (unsigned long long)sh->sector, in schedule_reconstruction()
3439 bi->bi_iter.bi_sector, sh->sector); in stripe_bio_overlaps()
3467 sector_t sector; in stripe_bio_overlaps() local
3476 sector = sh->dev[i].sector; in stripe_bio_overlaps()
3477 if (count == 0 || sector < first) in stripe_bio_overlaps()
3478 first = sector; in stripe_bio_overlaps()
3479 if (sector > last) in stripe_bio_overlaps()
3480 last = sector; in stripe_bio_overlaps()
3522 sector_t sector = sh->dev[dd_idx].sector; in __add_stripe_bio() local
3524 sector < sh->dev[dd_idx].sector + RAID5_STRIPE_SECTORS(conf) && in __add_stripe_bio()
3525 bi && bi->bi_iter.bi_sector <= sector; in __add_stripe_bio()
3526 bi = r5_next_bio(conf, bi, sh->dev[dd_idx].sector)) { in __add_stripe_bio()
3527 if (bio_end_sector(bi) >= sector) in __add_stripe_bio()
3528 sector = bio_end_sector(bi); in __add_stripe_bio()
3530 if (sector >= sh->dev[dd_idx].sector + RAID5_STRIPE_SECTORS(conf)) in __add_stripe_bio()
3536 (*bip)->bi_iter.bi_sector, sh->sector, dd_idx, in __add_stripe_bio()
3537 sh->dev[dd_idx].sector); in __add_stripe_bio()
3604 sh->sector, in handle_failed_stripe()
3623 sh->dev[i].sector + RAID5_STRIPE_SECTORS(conf)) { in handle_failed_stripe()
3624 struct bio *nextbi = r5_next_bio(conf, bi, sh->dev[i].sector); in handle_failed_stripe()
3639 sh->dev[i].sector + RAID5_STRIPE_SECTORS(conf)) { in handle_failed_stripe()
3640 struct bio *bi2 = r5_next_bio(conf, bi, sh->dev[i].sector); in handle_failed_stripe()
3663 sh->dev[i].sector + RAID5_STRIPE_SECTORS(conf)) { in handle_failed_stripe()
3665 r5_next_bio(conf, bi, sh->dev[i].sector); in handle_failed_stripe()
3714 && !rdev_set_badblocks(rdev, sh->sector, in handle_failed_sync()
3722 && !rdev_set_badblocks(rdev, sh->sector, in handle_failed_sync()
3742 && (rdev->recovery_offset <= sh->sector in want_replace()
3743 || rdev->mddev->resync_offset <= sh->sector)) in want_replace()
3835 sh->sector < sh->raid_conf->mddev->resync_offset) in need_this_block()
3886 (unsigned long long)sh->sector, disk_idx); in fetch_block()
3915 (unsigned long long)sh->sector, in fetch_block()
4013 dev->sector + RAID5_STRIPE_SECTORS(conf)) { in handle_stripe_clean_event()
4014 wbi2 = r5_next_bio(conf, wbi, dev->sector); in handle_stripe_clean_event()
4110 (resync_offset < MaxSector && sh->sector >= resync_offset && in handle_stripe_dirtying()
4118 (unsigned long long)sh->sector); in handle_stripe_dirtying()
4147 (unsigned long long)sh->sector, sh->state, rmw, rcw); in handle_stripe_dirtying()
4152 sh->sector, rmw); in handle_stripe_dirtying()
4233 (unsigned long long)sh->sector, rcw, qread, in handle_stripe_dirtying()
4325 (unsigned long long) sh->sector, in handle_parity_checks5()
4326 (unsigned long long) sh->sector + in handle_parity_checks5()
4345 (unsigned long long) sh->sector); in handle_parity_checks5()
4489 (unsigned long long) sh->sector, in handle_parity_checks6()
4490 (unsigned long long) sh->sector + in handle_parity_checks6()
4521 (unsigned long long) sh->sector); in handle_parity_checks6()
4660 rdev->recovery_offset >= sh->sector + RAID5_STRIPE_SECTORS(conf) && in analyse_stripe()
4661 !rdev_has_badblock(rdev, sh->sector, in analyse_stripe()
4675 is_bad = rdev_has_badblock(rdev, sh->sector, in analyse_stripe()
4701 else if (sh->sector + RAID5_STRIPE_SECTORS(conf) <= rdev->recovery_offset) in analyse_stripe()
4780 sh->sector >= conf->mddev->resync_offset || in analyse_stripe()
4931 (unsigned long long)sh->sector, sh->state, in handle_stripe()
5171 = raid5_get_active_stripe(conf, NULL, sh->sector, in handle_stripe()
5202 stripe_set_idx(sh->sector, conf, 0, sh); in handle_stripe()
5237 if (!rdev_set_badblocks(rdev, sh->sector, in handle_stripe()
5244 rdev_clear_badblocks(rdev, sh->sector, in handle_stripe()
5253 rdev_clear_badblocks(rdev, sh->sector, in handle_stripe()
5316 sector_t sector = bio->bi_iter.bi_sector; in in_chunk_boundary() local
5322 ((sector & (chunk_sectors - 1)) + bio_sectors); in in_chunk_boundary()
5398 sector_t sector, end_sector; in raid5_read_one_chunk() local
5407 sector = raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector, 0, in raid5_read_one_chunk()
5409 end_sector = sector + bio_sectors(raid_bio); in raid5_read_one_chunk()
5411 if (r5c_big_stripe_cached(conf, sector)) in raid5_read_one_chunk()
5428 if (rdev_has_badblock(rdev, sector, bio_sectors(raid_bio))) { in raid5_read_one_chunk()
5440 align_bio->bi_iter.bi_sector = sector; in raid5_read_one_chunk()
5472 sector_t sector = raid_bio->bi_iter.bi_sector; in chunk_aligned_read() local
5474 unsigned sectors = chunk_sects - (sector & (chunk_sects-1)); in chunk_aligned_read()
5749 static bool ahead_of_reshape(struct mddev *mddev, sector_t sector, in ahead_of_reshape() argument
5752 return mddev->reshape_backwards ? sector < reshape_sector : in ahead_of_reshape()
5753 sector >= reshape_sector; in ahead_of_reshape()
5774 min_sector = min(min_sector, sh->dev[dd_idx].sector); in stripe_ahead_of_reshape()
5775 max_sector = max(max_sector, sh->dev[dd_idx].sector); in stripe_ahead_of_reshape()
5804 if (dev->sector < ctx->first_sector || in add_all_stripe_bios()
5805 dev->sector >= ctx->last_sector) in add_all_stripe_bios()
5829 if (dev->sector < ctx->first_sector || in add_all_stripe_bios()
5830 dev->sector >= ctx->last_sector) in add_all_stripe_bios()
5834 clear_bit((dev->sector - ctx->first_sector) >> in add_all_stripe_bios()
6042 sector_t sector; in raid5_bio_lowest_chunk_sector() local
6045 sector = raid5_compute_sector(conf, r_sector, 0, &dd_idx, &sh); in raid5_bio_lowest_chunk_sector()
6046 chunk_offset = sector_div(sector, sectors_per_chunk); in raid5_bio_lowest_chunk_sector()
6586 sector_t sector, logical_sector, last_sector; in retry_aligned_read() local
6592 sector = raid5_compute_sector(conf, logical_sector, in retry_aligned_read()
6598 sector += RAID5_STRIPE_SECTORS(conf), in retry_aligned_read()
6605 sh = raid5_get_active_stripe(conf, NULL, sector, in retry_aligned_read()