Lines Matching refs:dio

365 static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map);
366 static int dm_integrity_map_inline(struct dm_integrity_io *dio, bool from_map);
1556 static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio) in submit_flush_bio() argument
1562 bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); in submit_flush_bio()
1588 static void do_endio_flush(struct dm_integrity_c *ic, struct dm_integrity_io *dio) in do_endio_flush() argument
1590 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); in do_endio_flush()
1592 if (unlikely(dio->fua) && likely(!bio->bi_status) && likely(!dm_integrity_failed(ic))) in do_endio_flush()
1593 submit_flush_bio(ic, dio); in do_endio_flush()
1598 static void dec_in_flight(struct dm_integrity_io *dio) in dec_in_flight() argument
1600 if (atomic_dec_and_test(&dio->in_flight)) { in dec_in_flight()
1601 struct dm_integrity_c *ic = dio->ic; in dec_in_flight()
1604 remove_range(ic, &dio->range); in dec_in_flight()
1606 if (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD)) in dec_in_flight()
1609 bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); in dec_in_flight()
1610 if (unlikely(dio->bi_status) && !bio->bi_status) in dec_in_flight()
1611 bio->bi_status = dio->bi_status; in dec_in_flight()
1612 if (likely(!bio->bi_status) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) { in dec_in_flight()
1613 dio->range.logical_sector += dio->range.n_sectors; in dec_in_flight()
1614 bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT); in dec_in_flight()
1615 INIT_WORK(&dio->work, integrity_bio_wait); in dec_in_flight()
1616 queue_work(ic->offload_wq, &dio->work); in dec_in_flight()
1619 do_endio_flush(ic, dio); in dec_in_flight()
1625 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io)); in integrity_end_io() local
1627 dm_bio_restore(&dio->bio_details, bio); in integrity_end_io()
1631 if (dio->completion) in integrity_end_io()
1632 complete(dio->completion); in integrity_end_io()
1634 dec_in_flight(dio); in integrity_end_io()
1690 static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checksum) in integrity_recheck() argument
1692 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); in integrity_recheck()
1693 struct dm_integrity_c *ic = dio->ic; in integrity_recheck()
1699 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset); in integrity_recheck()
1700 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, in integrity_recheck()
1701 &dio->metadata_offset); in integrity_recheck()
1703 logical_sector = dio->range.logical_sector; in integrity_recheck()
1707 __bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) { in integrity_recheck()
1727 alignment = dio->range.logical_sector | bio_sectors(bio) | (PAGE_SIZE >> SECTOR_SHIFT); in integrity_recheck()
1736 dio->bi_status = errno_to_blk_status(r); in integrity_recheck()
1741 r = dm_integrity_rw_tag(ic, checksum, &dio->metadata_block, in integrity_recheck()
1742 &dio->metadata_offset, ic->tag_size, TAG_CMP); in integrity_recheck()
1752 dio->bi_status = errno_to_blk_status(r); in integrity_recheck()
1771 struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work); in integrity_metadata() local
1772 struct dm_integrity_c *ic = dio->ic; in integrity_metadata()
1780 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); in integrity_metadata()
1790 if (likely(dio->op != REQ_OP_DISCARD)) in integrity_metadata()
1804 if (unlikely(dio->op == REQ_OP_DISCARD)) { in integrity_metadata()
1805 unsigned int bi_size = dio->bio_details.bi_iter.bi_size; in integrity_metadata()
1815 r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset, in integrity_metadata()
1831 sector = dio->range.logical_sector; in integrity_metadata()
1832 sectors_to_process = dio->range.n_sectors; in integrity_metadata()
1834 __bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) { in integrity_metadata()
1852 r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset, in integrity_metadata()
1853 checksums_ptr - checksums, dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE); in integrity_metadata()
1858 integrity_recheck(dio, checksums_onstack); in integrity_metadata()
1877 struct bio_integrity_payload *bip = dio->bio_details.bi_integrity; in integrity_metadata()
1882 unsigned int data_to_process = dio->range.n_sectors; in integrity_metadata()
1894 r = dm_integrity_rw_tag(ic, tag, &dio->metadata_block, &dio->metadata_offset, in integrity_metadata()
1895 this_len, dio->op == REQ_OP_READ ? TAG_READ : TAG_WRITE); in integrity_metadata()
1905 dec_in_flight(dio); in integrity_metadata()
1908 dio->bi_status = errno_to_blk_status(r); in integrity_metadata()
1909 dec_in_flight(dio); in integrity_metadata()
1944 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io)); in dm_integrity_map() local
1949 dio->ic = ic; in dm_integrity_map()
1950 dio->bi_status = 0; in dm_integrity_map()
1951 dio->op = bio_op(bio); in dm_integrity_map()
1955 dio->integrity_payload = NULL; in dm_integrity_map()
1956 dio->integrity_payload_from_mempool = false; in dm_integrity_map()
1957 dio->integrity_range_locked = false; in dm_integrity_map()
1958 return dm_integrity_map_inline(dio, true); in dm_integrity_map()
1961 if (unlikely(dio->op == REQ_OP_DISCARD)) { in dm_integrity_map()
1977 submit_flush_bio(ic, dio); in dm_integrity_map()
1981 dio->range.logical_sector = dm_target_offset(ti, bio->bi_iter.bi_sector); in dm_integrity_map()
1982 dio->fua = dio->op == REQ_OP_WRITE && bio->bi_opf & REQ_FUA; in dm_integrity_map()
1983 if (unlikely(dio->fua)) { in dm_integrity_map()
1990 if (unlikely(!dm_integrity_check_limits(ic, dio->range.logical_sector, bio))) in dm_integrity_map()
2015 if (unlikely(ic->mode == 'R') && unlikely(dio->op != REQ_OP_READ)) in dm_integrity_map()
2018 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset); in dm_integrity_map()
2019 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset); in dm_integrity_map()
2022 dm_integrity_map_continue(dio, true); in dm_integrity_map()
2026 static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio, in __journal_read_write() argument
2029 struct dm_integrity_c *ic = dio->ic; in __journal_read_write()
2033 logical_sector = dio->range.logical_sector; in __journal_read_write()
2034 n_sectors = dio->range.n_sectors; in __journal_read_write()
2045 if (likely(dio->op == REQ_OP_WRITE)) in __journal_read_write()
2051 if (unlikely(dio->op == REQ_OP_READ)) { in __journal_read_write()
2102 if (likely(dio->op == REQ_OP_WRITE)) in __journal_read_write()
2110 } else if (likely(dio->op == REQ_OP_WRITE)) in __journal_read_write()
2114 if (likely(dio->op == REQ_OP_WRITE)) { in __journal_read_write()
2152 if (unlikely(dio->op == REQ_OP_READ)) in __journal_read_write()
2157 if (likely(dio->op == REQ_OP_WRITE)) { in __journal_read_write()
2166 remove_range(ic, &dio->range); in __journal_read_write()
2171 dio->range.logical_sector = logical_sector; in __journal_read_write()
2172 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset); in __journal_read_write()
2173 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset); in __journal_read_write()
2180 static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map) in dm_integrity_map_continue() argument
2182 struct dm_integrity_c *ic = dio->ic; in dm_integrity_map_continue()
2183 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); in dm_integrity_map_continue()
2189 bool need_sync_io = ic->internal_hash && dio->op == REQ_OP_READ; in dm_integrity_map_continue()
2191 if (unlikely(dio->op == REQ_OP_DISCARD) && ic->mode != 'D') in dm_integrity_map_continue()
2195 INIT_WORK(&dio->work, integrity_bio_wait); in dm_integrity_map_continue()
2196 queue_work(ic->offload_wq, &dio->work); in dm_integrity_map_continue()
2208 dio->range.n_sectors = bio_sectors(bio); in dm_integrity_map_continue()
2210 if (ic->mode == 'J' && likely(dio->op != REQ_OP_DISCARD)) { in dm_integrity_map_continue()
2211 if (dio->op == REQ_OP_WRITE) { in dm_integrity_map_continue()
2215 dio->range.n_sectors = min(dio->range.n_sectors, in dm_integrity_map_continue()
2217 if (unlikely(!dio->range.n_sectors)) { in dm_integrity_map_continue()
2223 range_sectors = dio->range.n_sectors >> ic->sb->log2_sectors_per_block; in dm_integrity_map_continue()
2241 add_journal_node(ic, &ic->journal_tree[pos], dio->range.logical_sector + i); in dm_integrity_map_continue()
2255 } while ((i += ic->sectors_per_block) < dio->range.n_sectors); in dm_integrity_map_continue()
2262 journal_read_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector); in dm_integrity_map_continue()
2264 if (unlikely(dio->range.n_sectors > next_sector - dio->range.logical_sector)) in dm_integrity_map_continue()
2265 dio->range.n_sectors = next_sector - dio->range.logical_sector; in dm_integrity_map_continue()
2270 for (i = ic->sectors_per_block; i < dio->range.n_sectors; i += ic->sectors_per_block, jp++) { in dm_integrity_map_continue()
2271 if (!test_journal_node(ic, jp, dio->range.logical_sector + i)) in dm_integrity_map_continue()
2274 dio->range.n_sectors = i; in dm_integrity_map_continue()
2278 if (unlikely(!add_new_range(ic, &dio->range, true))) { in dm_integrity_map_continue()
2287 INIT_WORK(&dio->work, integrity_bio_wait); in dm_integrity_map_continue()
2288 queue_work(ic->wait_wq, &dio->work); in dm_integrity_map_continue()
2292 dio->range.n_sectors = ic->sectors_per_block; in dm_integrity_map_continue()
2293 wait_and_add_new_range(ic, &dio->range); in dm_integrity_map_continue()
2303 new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector); in dm_integrity_map_continue()
2305 remove_range_unlocked(ic, &dio->range); in dm_integrity_map_continue()
2310 if (ic->mode == 'J' && likely(dio->op == REQ_OP_DISCARD) && !discard_retried) { in dm_integrity_map_continue()
2314 new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector); in dm_integrity_map_continue()
2316 unlikely(next_sector < dio->range.logical_sector - dio->range.n_sectors)) { in dm_integrity_map_continue()
2317 remove_range_unlocked(ic, &dio->range); in dm_integrity_map_continue()
2336 if (ic->mode == 'B' && (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD))) { in dm_integrity_map_continue()
2337 if (!block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector, in dm_integrity_map_continue()
2338 dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) { in dm_integrity_map_continue()
2341 bbs = sector_to_bitmap_block(ic, dio->range.logical_sector); in dm_integrity_map_continue()
2350 dio->in_flight = (atomic_t)ATOMIC_INIT(2); in dm_integrity_map_continue()
2354 dio->completion = &read_comp; in dm_integrity_map_continue()
2356 dio->completion = NULL; in dm_integrity_map_continue()
2358 dm_bio_record(&dio->bio_details, bio); in dm_integrity_map_continue()
2363 bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT; in dm_integrity_map_continue()
2365 if (unlikely(dio->op == REQ_OP_DISCARD) && likely(ic->mode != 'D')) { in dm_integrity_map_continue()
2366 integrity_metadata(&dio->work); in dm_integrity_map_continue()
2369 dio->in_flight = (atomic_t)ATOMIC_INIT(1); in dm_integrity_map_continue()
2370 dio->completion = NULL; in dm_integrity_map_continue()
2382 dio->range.logical_sector + dio->range.n_sectors > recalc_sector) in dm_integrity_map_continue()
2385 if (!block_bitmap_op(ic, ic->recalc_bitmap, dio->range.logical_sector, in dm_integrity_map_continue()
2386 dio->range.n_sectors, BITMAP_OP_TEST_ALL_CLEAR)) in dm_integrity_map_continue()
2391 integrity_metadata(&dio->work); in dm_integrity_map_continue()
2394 dec_in_flight(dio); in dm_integrity_map_continue()
2396 INIT_WORK(&dio->work, integrity_metadata); in dm_integrity_map_continue()
2397 queue_work(ic->metadata_wq, &dio->work); in dm_integrity_map_continue()
2403 if (unlikely(__journal_read_write(dio, bio, journal_section, journal_entry))) in dm_integrity_map_continue()
2406 do_endio_flush(ic, dio); in dm_integrity_map_continue()
2409 static int dm_integrity_map_inline(struct dm_integrity_io *dio, bool from_map) in dm_integrity_map_inline() argument
2411 struct dm_integrity_c *ic = dio->ic; in dm_integrity_map_inline()
2412 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); in dm_integrity_map_inline()
2428 if (!dio->integrity_payload) { in dm_integrity_map_inline()
2430 dio->payload_len = ic->tuple_size * (bio_sectors(bio) >> ic->sb->log2_sectors_per_block); in dm_integrity_map_inline()
2433 dio->payload_len += extra_size; in dm_integrity_map_inline()
2434dio->integrity_payload = kmalloc(dio->payload_len, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | _… in dm_integrity_map_inline()
2435 if (unlikely(!dio->integrity_payload)) { in dm_integrity_map_inline()
2437 if (dio->payload_len > x_size) { in dm_integrity_map_inline()
2450 dio->range.logical_sector = bio->bi_iter.bi_sector; in dm_integrity_map_inline()
2451 dio->range.n_sectors = bio_sectors(bio); in dm_integrity_map_inline()
2463 if (likely(dio->range.logical_sector + dio->range.n_sectors <= recalc_sector)) in dm_integrity_map_inline()
2468 if (dio->range.logical_sector + dio->range.n_sectors <= recalc_sector) in dm_integrity_map_inline()
2470 if (unlikely(!add_new_range(ic, &dio->range, true))) { in dm_integrity_map_inline()
2473 INIT_WORK(&dio->work, integrity_bio_wait); in dm_integrity_map_inline()
2474 queue_work(ic->wait_wq, &dio->work); in dm_integrity_map_inline()
2477 wait_and_add_new_range(ic, &dio->range); in dm_integrity_map_inline()
2479 dio->integrity_range_locked = true; in dm_integrity_map_inline()
2484 if (unlikely(!dio->integrity_payload)) { in dm_integrity_map_inline()
2485 dio->integrity_payload = page_to_virt((struct page *)mempool_alloc(&ic->recheck_pool, GFP_NOIO)); in dm_integrity_map_inline()
2486 dio->integrity_payload_from_mempool = true; in dm_integrity_map_inline()
2489 dio->bio_details.bi_iter = bio->bi_iter; in dm_integrity_map_inline()
2504 if (dio->op == REQ_OP_WRITE) { in dm_integrity_map_inline()
2506 while (dio->bio_details.bi_iter.bi_size) { in dm_integrity_map_inline()
2507 struct bio_vec bv = bio_iter_iovec(bio, dio->bio_details.bi_iter); in dm_integrity_map_inline()
2510 memset(dio->integrity_payload + pos + ic->tag_size, 0, ic->tuple_size - ic->tuple_size); in dm_integrity_map_inline()
2511 …integrity_sector_checksum(ic, dio->bio_details.bi_iter.bi_sector, mem, dio->integrity_payload + po… in dm_integrity_map_inline()
2514 bio_advance_iter_single(bio, &dio->bio_details.bi_iter, ic->sectors_per_block << SECTOR_SHIFT); in dm_integrity_map_inline()
2518 ret = bio_integrity_add_page(bio, virt_to_page(dio->integrity_payload), in dm_integrity_map_inline()
2519 dio->payload_len, offset_in_page(dio->integrity_payload)); in dm_integrity_map_inline()
2520 if (unlikely(ret != dio->payload_len)) { in dm_integrity_map_inline()
2529 static inline void dm_integrity_free_payload(struct dm_integrity_io *dio) in dm_integrity_free_payload() argument
2531 struct dm_integrity_c *ic = dio->ic; in dm_integrity_free_payload()
2532 if (unlikely(dio->integrity_payload_from_mempool)) in dm_integrity_free_payload()
2533 mempool_free(virt_to_page(dio->integrity_payload), &ic->recheck_pool); in dm_integrity_free_payload()
2535 kfree(dio->integrity_payload); in dm_integrity_free_payload()
2536 dio->integrity_payload = NULL; in dm_integrity_free_payload()
2537 dio->integrity_payload_from_mempool = false; in dm_integrity_free_payload()
2542 struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work); in dm_integrity_inline_recheck() local
2543 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); in dm_integrity_inline_recheck()
2544 struct dm_integrity_c *ic = dio->ic; in dm_integrity_inline_recheck()
2548 dio->integrity_payload = page_to_virt((struct page *)mempool_alloc(&ic->recheck_pool, GFP_NOIO)); in dm_integrity_inline_recheck()
2549 dio->integrity_payload_from_mempool = true; in dm_integrity_inline_recheck()
2551 outgoing_data = dio->integrity_payload + PAGE_SIZE; in dm_integrity_inline_recheck()
2553 while (dio->bio_details.bi_iter.bi_size) { in dm_integrity_inline_recheck()
2572 r = bio_integrity_add_page(outgoing_bio, virt_to_page(dio->integrity_payload), ic->tuple_size, 0); in dm_integrity_inline_recheck()
2580 outgoing_bio->bi_iter.bi_sector = dio->bio_details.bi_iter.bi_sector + ic->start + SB_SECTORS; in dm_integrity_inline_recheck()
2591 integrity_sector_checksum(ic, dio->bio_details.bi_iter.bi_sector, outgoing_data, digest); in dm_integrity_inline_recheck()
2592 …if (unlikely(crypto_memneq(digest, dio->integrity_payload, min(crypto_shash_digestsize(ic->interna… in dm_integrity_inline_recheck()
2594 ic->dev->bdev, dio->bio_details.bi_iter.bi_sector); in dm_integrity_inline_recheck()
2597 bio, dio->bio_details.bi_iter.bi_sector, 0); in dm_integrity_inline_recheck()
2604 bv = bio_iter_iovec(bio, dio->bio_details.bi_iter); in dm_integrity_inline_recheck()
2609 bio_advance_iter_single(bio, &dio->bio_details.bi_iter, ic->sectors_per_block << SECTOR_SHIFT); in dm_integrity_inline_recheck()
2619 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io)); in dm_integrity_end_io() local
2620 if (dio->op == REQ_OP_READ && likely(*status == BLK_STS_OK)) { in dm_integrity_end_io()
2623 unlikely(dio->integrity_range_locked)) in dm_integrity_end_io()
2625 while (dio->bio_details.bi_iter.bi_size) { in dm_integrity_end_io()
2627 struct bio_vec bv = bio_iter_iovec(bio, dio->bio_details.bi_iter); in dm_integrity_end_io()
2630 integrity_sector_checksum(ic, dio->bio_details.bi_iter.bi_sector, mem, digest); in dm_integrity_end_io()
2631 if (unlikely(crypto_memneq(digest, dio->integrity_payload + pos, in dm_integrity_end_io()
2634 dm_integrity_free_payload(dio); in dm_integrity_end_io()
2635 INIT_WORK(&dio->work, dm_integrity_inline_recheck); in dm_integrity_end_io()
2636 queue_work(ic->offload_wq, &dio->work); in dm_integrity_end_io()
2641 bio_advance_iter_single(bio, &dio->bio_details.bi_iter, ic->sectors_per_block << SECTOR_SHIFT); in dm_integrity_end_io()
2645 dm_integrity_free_payload(dio); in dm_integrity_end_io()
2646 if (unlikely(dio->integrity_range_locked)) in dm_integrity_end_io()
2647 remove_range(ic, &dio->range); in dm_integrity_end_io()
2654 struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work); in integrity_bio_wait() local
2655 struct dm_integrity_c *ic = dio->ic; in integrity_bio_wait()
2658 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); in integrity_bio_wait()
2659 int r = dm_integrity_map_inline(dio, false); in integrity_bio_wait()
2673 dm_integrity_map_continue(dio, false); in integrity_bio_wait()
3291 struct dm_integrity_io *dio; in bitmap_block_work() local
3293 dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io)); in bitmap_block_work()
3295 if (block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector, in bitmap_block_work()
3296 dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) { in bitmap_block_work()
3297 remove_range(ic, &dio->range); in bitmap_block_work()
3298 INIT_WORK(&dio->work, integrity_bio_wait); in bitmap_block_work()
3299 queue_work(ic->offload_wq, &dio->work); in bitmap_block_work()
3301 block_bitmap_op(ic, ic->journal, dio->range.logical_sector, in bitmap_block_work()
3302 dio->range.n_sectors, BITMAP_OP_SET); in bitmap_block_work()
3315 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io)); in bitmap_block_work() local
3317 block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector, in bitmap_block_work()
3318 dio->range.n_sectors, BITMAP_OP_SET); in bitmap_block_work()
3320 remove_range(ic, &dio->range); in bitmap_block_work()
3321 INIT_WORK(&dio->work, integrity_bio_wait); in bitmap_block_work()
3322 queue_work(ic->offload_wq, &dio->work); in bitmap_block_work()