Lines Matching refs:s
172 struct dm_dev *dm_snap_origin(struct dm_snapshot *s) in dm_snap_origin() argument
174 return s->origin; in dm_snap_origin()
178 struct dm_dev *dm_snap_cow(struct dm_snapshot *s) in dm_snap_cow() argument
180 return s->cow; in dm_snap_cow()
259 static void track_chunk(struct dm_snapshot *s, struct bio *bio, chunk_t chunk) in track_chunk() argument
265 spin_lock_irq(&s->tracked_chunk_lock); in track_chunk()
267 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]); in track_chunk()
268 spin_unlock_irq(&s->tracked_chunk_lock); in track_chunk()
271 static void stop_tracking_chunk(struct dm_snapshot *s, struct bio *bio) in stop_tracking_chunk() argument
276 spin_lock_irqsave(&s->tracked_chunk_lock, flags); in stop_tracking_chunk()
278 spin_unlock_irqrestore(&s->tracked_chunk_lock, flags); in stop_tracking_chunk()
281 static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk) in __chunk_is_tracked() argument
286 spin_lock_irq(&s->tracked_chunk_lock); in __chunk_is_tracked()
289 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) { in __chunk_is_tracked()
296 spin_unlock_irq(&s->tracked_chunk_lock); in __chunk_is_tracked()
305 static void __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk) in __check_for_conflicting_io() argument
307 while (__chunk_is_tracked(s, chunk)) in __check_for_conflicting_io()
453 struct dm_snapshot *s; in __find_snapshots_sharing_cow() local
462 list_for_each_entry(s, &o->snapshots, list) { in __find_snapshots_sharing_cow()
463 if (dm_target_is_snapshot_merge(s->ti) && snap_merge) in __find_snapshots_sharing_cow()
464 *snap_merge = s; in __find_snapshots_sharing_cow()
465 if (!bdev_equal(s->cow->bdev, snap->cow->bdev)) in __find_snapshots_sharing_cow()
468 down_read(&s->lock); in __find_snapshots_sharing_cow()
469 active = s->active; in __find_snapshots_sharing_cow()
470 up_read(&s->lock); in __find_snapshots_sharing_cow()
474 *snap_src = s; in __find_snapshots_sharing_cow()
476 *snap_dest = s; in __find_snapshots_sharing_cow()
532 static void __insert_snapshot(struct origin *o, struct dm_snapshot *s) in __insert_snapshot() argument
538 if (l->store->chunk_size < s->store->chunk_size) in __insert_snapshot()
540 list_add_tail(&s->list, &l->list); in __insert_snapshot()
594 static void reregister_snapshot(struct dm_snapshot *s) in reregister_snapshot() argument
596 struct block_device *bdev = s->origin->bdev; in reregister_snapshot()
600 list_del(&s->list); in reregister_snapshot()
601 __insert_snapshot(__lookup_origin(bdev), s); in reregister_snapshot()
606 static void unregister_snapshot(struct dm_snapshot *s) in unregister_snapshot() argument
611 o = __lookup_origin(s->origin->bdev); in unregister_snapshot()
613 list_del(&s->list); in unregister_snapshot()
635 static void dm_exception_table_lock_init(struct dm_snapshot *s, chunk_t chunk, in dm_exception_table_lock_init() argument
638 struct dm_exception_table *complete = &s->complete; in dm_exception_table_lock_init()
639 struct dm_exception_table *pending = &s->pending; in dm_exception_table_lock_init()
742 static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s) in alloc_pending_exception() argument
744 struct dm_snap_pending_exception *pe = mempool_alloc(&s->pending_pool, in alloc_pending_exception()
747 atomic_inc(&s->pending_exceptions_count); in alloc_pending_exception()
748 pe->snap = s; in alloc_pending_exception()
755 struct dm_snapshot *s = pe->snap; in free_pending_exception() local
757 mempool_free(pe, &s->pending_pool); in free_pending_exception()
759 atomic_dec(&s->pending_exceptions_count); in free_pending_exception()
824 struct dm_snapshot *s = context; in dm_add_exception() local
842 dm_exception_table_lock_init(s, old, &lock); in dm_add_exception()
845 dm_insert_exception(&s->complete, e); in dm_add_exception()
884 static int init_hash_tables(struct dm_snapshot *s) in init_hash_tables() argument
892 cow_dev_size = get_dev_size(s->cow->bdev); in init_hash_tables()
895 hash_size = cow_dev_size >> s->store->chunk_shift; in init_hash_tables()
901 if (dm_exception_table_init(&s->complete, hash_size, in init_hash_tables()
913 if (dm_exception_table_init(&s->pending, hash_size, 0)) { in init_hash_tables()
914 dm_exception_table_exit(&s->complete, exception_cache); in init_hash_tables()
921 static void merge_shutdown(struct dm_snapshot *s) in merge_shutdown() argument
923 clear_bit_unlock(RUNNING_MERGE, &s->state_bits); in merge_shutdown()
925 wake_up_bit(&s->state_bits, RUNNING_MERGE); in merge_shutdown()
928 static struct bio *__release_queued_bios_after_merge(struct dm_snapshot *s) in __release_queued_bios_after_merge() argument
930 s->first_merging_chunk = 0; in __release_queued_bios_after_merge()
931 s->num_merging_chunks = 0; in __release_queued_bios_after_merge()
933 return bio_list_get(&s->bios_queued_during_merge); in __release_queued_bios_after_merge()
939 static int __remove_single_exception_chunk(struct dm_snapshot *s, in __remove_single_exception_chunk() argument
944 e = dm_lookup_exception(&s->complete, old_chunk); in __remove_single_exception_chunk()
988 static int remove_single_exception_chunk(struct dm_snapshot *s) in remove_single_exception_chunk() argument
992 chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1; in remove_single_exception_chunk()
994 down_write(&s->lock); in remove_single_exception_chunk()
1001 r = __remove_single_exception_chunk(s, old_chunk); in remove_single_exception_chunk()
1004 } while (old_chunk-- > s->first_merging_chunk); in remove_single_exception_chunk()
1006 b = __release_queued_bios_after_merge(s); in remove_single_exception_chunk()
1009 up_write(&s->lock); in remove_single_exception_chunk()
1042 static void snapshot_merge_next_chunks(struct dm_snapshot *s) in snapshot_merge_next_chunks() argument
1050 BUG_ON(!test_bit(RUNNING_MERGE, &s->state_bits)); in snapshot_merge_next_chunks()
1051 if (unlikely(test_bit(SHUTDOWN_MERGE, &s->state_bits))) in snapshot_merge_next_chunks()
1057 if (!s->valid) { in snapshot_merge_next_chunks()
1062 linear_chunks = s->store->type->prepare_merge(s->store, &old_chunk, in snapshot_merge_next_chunks()
1067 down_write(&s->lock); in snapshot_merge_next_chunks()
1068 s->merge_failed = true; in snapshot_merge_next_chunks()
1069 up_write(&s->lock); in snapshot_merge_next_chunks()
1082 io_size = linear_chunks * s->store->chunk_size; in snapshot_merge_next_chunks()
1084 dest.bdev = s->origin->bdev; in snapshot_merge_next_chunks()
1085 dest.sector = chunk_to_sector(s->store, old_chunk); in snapshot_merge_next_chunks()
1088 src.bdev = s->cow->bdev; in snapshot_merge_next_chunks()
1089 src.sector = chunk_to_sector(s->store, new_chunk); in snapshot_merge_next_chunks()
1102 while (origin_write_extent(s, dest.sector, io_size)) { in snapshot_merge_next_chunks()
1110 down_write(&s->lock); in snapshot_merge_next_chunks()
1111 s->first_merging_chunk = old_chunk; in snapshot_merge_next_chunks()
1112 s->num_merging_chunks = linear_chunks; in snapshot_merge_next_chunks()
1113 up_write(&s->lock); in snapshot_merge_next_chunks()
1117 __check_for_conflicting_io(s, old_chunk + i); in snapshot_merge_next_chunks()
1119 dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s); in snapshot_merge_next_chunks()
1123 merge_shutdown(s); in snapshot_merge_next_chunks()
1130 struct dm_snapshot *s = context; in merge_callback() local
1141 if (blkdev_issue_flush(s->origin->bdev) < 0) { in merge_callback()
1146 if (s->store->type->commit_merge(s->store, in merge_callback()
1147 s->num_merging_chunks) < 0) { in merge_callback()
1152 if (remove_single_exception_chunk(s) < 0) in merge_callback()
1155 snapshot_merge_next_chunks(s); in merge_callback()
1160 down_write(&s->lock); in merge_callback()
1161 s->merge_failed = true; in merge_callback()
1162 b = __release_queued_bios_after_merge(s); in merge_callback()
1163 up_write(&s->lock); in merge_callback()
1166 merge_shutdown(s); in merge_callback()
1169 static void start_merge(struct dm_snapshot *s) in start_merge() argument
1171 if (!test_and_set_bit(RUNNING_MERGE, &s->state_bits)) in start_merge()
1172 snapshot_merge_next_chunks(s); in start_merge()
1178 static void stop_merge(struct dm_snapshot *s) in stop_merge() argument
1180 set_bit(SHUTDOWN_MERGE, &s->state_bits); in stop_merge()
1181 wait_on_bit(&s->state_bits, RUNNING_MERGE, TASK_UNINTERRUPTIBLE); in stop_merge()
1182 clear_bit(SHUTDOWN_MERGE, &s->state_bits); in stop_merge()
1185 static int parse_snapshot_features(struct dm_arg_set *as, struct dm_snapshot *s, in parse_snapshot_features() argument
1211 s->discard_zeroes_cow = true; in parse_snapshot_features()
1214 s->discard_passdown_origin = true; in parse_snapshot_features()
1223 if (!s->discard_zeroes_cow && s->discard_passdown_origin) { in parse_snapshot_features()
1241 struct dm_snapshot *s; in snapshot_ctr() local
1260 s = kzalloc(sizeof(*s), GFP_KERNEL); in snapshot_ctr()
1261 if (!s) { in snapshot_ctr()
1270 r = parse_snapshot_features(&as, s, ti); in snapshot_ctr()
1278 r = dm_get_device(ti, origin_path, origin_mode, &s->origin); in snapshot_ctr()
1288 r = dm_get_device(ti, cow_path, dm_table_get_mode(ti->table), &s->cow); in snapshot_ctr()
1293 if (s->cow->bdev && s->cow->bdev == s->origin->bdev) { in snapshot_ctr()
1299 r = dm_exception_store_create(ti, argc, argv, s, &args_used, &s->store); in snapshot_ctr()
1309 s->ti = ti; in snapshot_ctr()
1310 s->valid = 1; in snapshot_ctr()
1311 s->snapshot_overflowed = 0; in snapshot_ctr()
1312 s->active = 0; in snapshot_ctr()
1313 atomic_set(&s->pending_exceptions_count, 0); in snapshot_ctr()
1314 spin_lock_init(&s->pe_allocation_lock); in snapshot_ctr()
1315 s->exception_start_sequence = 0; in snapshot_ctr()
1316 s->exception_complete_sequence = 0; in snapshot_ctr()
1317 s->out_of_order_tree = RB_ROOT; in snapshot_ctr()
1318 init_rwsem(&s->lock); in snapshot_ctr()
1319 INIT_LIST_HEAD(&s->list); in snapshot_ctr()
1320 spin_lock_init(&s->pe_lock); in snapshot_ctr()
1321 s->state_bits = 0; in snapshot_ctr()
1322 s->merge_failed = false; in snapshot_ctr()
1323 s->first_merging_chunk = 0; in snapshot_ctr()
1324 s->num_merging_chunks = 0; in snapshot_ctr()
1325 bio_list_init(&s->bios_queued_during_merge); in snapshot_ctr()
1328 if (init_hash_tables(s)) { in snapshot_ctr()
1334 init_waitqueue_head(&s->in_progress_wait); in snapshot_ctr()
1336 s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle); in snapshot_ctr()
1337 if (IS_ERR(s->kcopyd_client)) { in snapshot_ctr()
1338 r = PTR_ERR(s->kcopyd_client); in snapshot_ctr()
1343 r = mempool_init_slab_pool(&s->pending_pool, MIN_IOS, pending_cache); in snapshot_ctr()
1350 INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]); in snapshot_ctr()
1352 spin_lock_init(&s->tracked_chunk_lock); in snapshot_ctr()
1354 ti->private = s; in snapshot_ctr()
1356 if (s->discard_zeroes_cow) in snapshot_ctr()
1357 ti->num_discard_bios = (s->discard_passdown_origin ? 2 : 1); in snapshot_ctr()
1362 r = register_snapshot(s); in snapshot_ctr()
1378 s->store->chunk_size = 0; in snapshot_ctr()
1382 r = s->store->type->read_metadata(s->store, dm_add_exception, in snapshot_ctr()
1383 (void *)s); in snapshot_ctr()
1388 s->valid = 0; in snapshot_ctr()
1392 if (!s->store->chunk_size) { in snapshot_ctr()
1398 r = dm_set_target_max_io_len(ti, s->store->chunk_size); in snapshot_ctr()
1405 unregister_snapshot(s); in snapshot_ctr()
1407 mempool_exit(&s->pending_pool); in snapshot_ctr()
1409 dm_kcopyd_client_destroy(s->kcopyd_client); in snapshot_ctr()
1411 dm_exception_table_exit(&s->pending, pending_cache); in snapshot_ctr()
1412 dm_exception_table_exit(&s->complete, exception_cache); in snapshot_ctr()
1414 dm_exception_store_destroy(s->store); in snapshot_ctr()
1416 dm_put_device(ti, s->cow); in snapshot_ctr()
1418 dm_put_device(ti, s->origin); in snapshot_ctr()
1421 kfree(s); in snapshot_ctr()
1426 static void __free_exceptions(struct dm_snapshot *s) in __free_exceptions() argument
1428 dm_kcopyd_client_destroy(s->kcopyd_client); in __free_exceptions()
1429 s->kcopyd_client = NULL; in __free_exceptions()
1431 dm_exception_table_exit(&s->pending, pending_cache); in __free_exceptions()
1432 dm_exception_table_exit(&s->complete, exception_cache); in __free_exceptions()
1473 struct dm_snapshot *s = ti->private; in snapshot_dtr() local
1478 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); in snapshot_dtr()
1479 if (snap_src && snap_dest && (s == snap_src)) { in snapshot_dtr()
1488 stop_merge(s); in snapshot_dtr()
1492 unregister_snapshot(s); in snapshot_dtr()
1494 while (atomic_read(&s->pending_exceptions_count)) in snapshot_dtr()
1504 BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i])); in snapshot_dtr()
1507 __free_exceptions(s); in snapshot_dtr()
1509 mempool_exit(&s->pending_pool); in snapshot_dtr()
1511 dm_exception_store_destroy(s->store); in snapshot_dtr()
1513 dm_put_device(ti, s->cow); in snapshot_dtr()
1515 dm_put_device(ti, s->origin); in snapshot_dtr()
1517 WARN_ON(s->in_progress); in snapshot_dtr()
1519 kfree(s); in snapshot_dtr()
1522 static void account_start_copy(struct dm_snapshot *s) in account_start_copy() argument
1524 spin_lock(&s->in_progress_wait.lock); in account_start_copy()
1525 s->in_progress++; in account_start_copy()
1526 spin_unlock(&s->in_progress_wait.lock); in account_start_copy()
1529 static void account_end_copy(struct dm_snapshot *s) in account_end_copy() argument
1531 spin_lock(&s->in_progress_wait.lock); in account_end_copy()
1532 BUG_ON(!s->in_progress); in account_end_copy()
1533 s->in_progress--; in account_end_copy()
1534 if (likely(s->in_progress <= cow_threshold) && in account_end_copy()
1535 unlikely(waitqueue_active(&s->in_progress_wait))) in account_end_copy()
1536 wake_up_locked(&s->in_progress_wait); in account_end_copy()
1537 spin_unlock(&s->in_progress_wait.lock); in account_end_copy()
1540 static bool wait_for_in_progress(struct dm_snapshot *s, bool unlock_origins) in wait_for_in_progress() argument
1542 if (unlikely(s->in_progress > cow_threshold)) { in wait_for_in_progress()
1543 spin_lock(&s->in_progress_wait.lock); in wait_for_in_progress()
1544 if (likely(s->in_progress > cow_threshold)) { in wait_for_in_progress()
1554 __add_wait_queue(&s->in_progress_wait, &wait); in wait_for_in_progress()
1556 spin_unlock(&s->in_progress_wait.lock); in wait_for_in_progress()
1560 remove_wait_queue(&s->in_progress_wait, &wait); in wait_for_in_progress()
1563 spin_unlock(&s->in_progress_wait.lock); in wait_for_in_progress()
1588 static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio) in retry_origin_bios() argument
1596 r = do_origin(s->origin, bio, false); in retry_origin_bios()
1618 static void __invalidate_snapshot(struct dm_snapshot *s, int err) in __invalidate_snapshot() argument
1620 if (!s->valid) in __invalidate_snapshot()
1628 if (s->store->type->drop_snapshot) in __invalidate_snapshot()
1629 s->store->type->drop_snapshot(s->store); in __invalidate_snapshot()
1631 s->valid = 0; in __invalidate_snapshot()
1633 dm_table_event(s->ti->table); in __invalidate_snapshot()
1636 static void invalidate_snapshot(struct dm_snapshot *s, int err) in invalidate_snapshot() argument
1638 down_write(&s->lock); in invalidate_snapshot()
1639 __invalidate_snapshot(s, err); in invalidate_snapshot()
1640 up_write(&s->lock); in invalidate_snapshot()
1647 struct dm_snapshot *s = pe->snap; in pending_complete() local
1654 dm_exception_table_lock_init(s, pe->e.old_chunk, &lock); in pending_complete()
1658 invalidate_snapshot(s, -EIO); in pending_complete()
1667 invalidate_snapshot(s, -ENOMEM); in pending_complete()
1675 down_read(&s->lock); in pending_complete()
1677 if (!s->valid) { in pending_complete()
1678 up_read(&s->lock); in pending_complete()
1692 dm_insert_exception(&s->complete, e); in pending_complete()
1693 up_read(&s->lock); in pending_complete()
1696 if (__chunk_is_tracked(s, pe->e.old_chunk)) { in pending_complete()
1698 __check_for_conflicting_io(s, pe->e.old_chunk); in pending_complete()
1726 retry_origin_bios(s, origin_bios); in pending_complete()
1733 struct dm_snapshot *s = pe->snap; in complete_exception() local
1736 s->store->type->commit_exception(s->store, &pe->e, !pe->copy_error, in complete_exception()
1747 struct dm_snapshot *s = pe->snap; in copy_callback() local
1751 if (pe->exception_sequence == s->exception_complete_sequence) { in copy_callback()
1754 s->exception_complete_sequence++; in copy_callback()
1757 next = rb_first(&s->out_of_order_tree); in copy_callback()
1761 if (pe->exception_sequence != s->exception_complete_sequence) in copy_callback()
1764 s->exception_complete_sequence++; in copy_callback()
1765 rb_erase(&pe->out_of_order_node, &s->out_of_order_tree); in copy_callback()
1771 struct rb_node **p = &s->out_of_order_tree.rb_node; in copy_callback()
1786 rb_insert_color(&pe->out_of_order_node, &s->out_of_order_tree); in copy_callback()
1788 account_end_copy(s); in copy_callback()
1796 struct dm_snapshot *s = pe->snap; in start_copy() local
1798 struct block_device *bdev = s->origin->bdev; in start_copy()
1804 src.sector = chunk_to_sector(s->store, pe->e.old_chunk); in start_copy()
1805 src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector); in start_copy()
1807 dest.bdev = s->cow->bdev; in start_copy()
1808 dest.sector = chunk_to_sector(s->store, pe->e.new_chunk); in start_copy()
1812 account_start_copy(s); in start_copy()
1813 dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe); in start_copy()
1826 struct dm_snapshot *s = pe->snap; in start_full_bio() local
1832 account_start_copy(s); in start_full_bio()
1833 callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client, in start_full_bio()
1843 __lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk) in __lookup_pending_exception() argument
1845 struct dm_exception *e = dm_lookup_exception(&s->pending, chunk); in __lookup_pending_exception()
1860 __insert_pending_exception(struct dm_snapshot *s, in __insert_pending_exception() argument
1869 spin_lock(&s->pe_allocation_lock); in __insert_pending_exception()
1870 if (s->store->type->prepare_exception(s->store, &pe->e)) { in __insert_pending_exception()
1871 spin_unlock(&s->pe_allocation_lock); in __insert_pending_exception()
1876 pe->exception_sequence = s->exception_start_sequence++; in __insert_pending_exception()
1877 spin_unlock(&s->pe_allocation_lock); in __insert_pending_exception()
1879 dm_insert_exception(&s->pending, &pe->e); in __insert_pending_exception()
1893 __find_pending_exception(struct dm_snapshot *s, in __find_pending_exception() argument
1898 pe2 = __lookup_pending_exception(s, chunk); in __find_pending_exception()
1904 return __insert_pending_exception(s, pe, chunk); in __find_pending_exception()
1907 static void remap_exception(struct dm_snapshot *s, struct dm_exception *e, in remap_exception() argument
1910 bio_set_dev(bio, s->cow->bdev); in remap_exception()
1912 chunk_to_sector(s->store, dm_chunk_number(e->new_chunk) + in remap_exception()
1914 (bio->bi_iter.bi_sector & s->store->chunk_mask); in remap_exception()
1920 struct dm_snapshot *s = bio->bi_private; in zero_callback() local
1922 account_end_copy(s); in zero_callback()
1927 static void zero_exception(struct dm_snapshot *s, struct dm_exception *e, in zero_exception() argument
1932 dest.bdev = s->cow->bdev; in zero_exception()
1934 dest.count = s->store->chunk_size; in zero_exception()
1936 account_start_copy(s); in zero_exception()
1938 bio->bi_private = s; in zero_exception()
1939 dm_kcopyd_zero(s->kcopyd_client, 1, &dest, 0, zero_callback, bio); in zero_exception()
1942 static bool io_overlaps_chunk(struct dm_snapshot *s, struct bio *bio) in io_overlaps_chunk() argument
1945 (s->store->chunk_size << SECTOR_SHIFT); in io_overlaps_chunk()
1951 struct dm_snapshot *s = ti->private; in snapshot_map() local
1960 bio_set_dev(bio, s->cow->bdev); in snapshot_map()
1964 chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector); in snapshot_map()
1965 dm_exception_table_lock_init(s, chunk, &lock); in snapshot_map()
1969 if (!s->valid) in snapshot_map()
1973 while (unlikely(!wait_for_in_progress(s, false))) in snapshot_map()
1977 down_read(&s->lock); in snapshot_map()
1980 if (!s->valid || (unlikely(s->snapshot_overflowed) && in snapshot_map()
1987 if (s->discard_passdown_origin && dm_bio_get_target_bio_nr(bio)) { in snapshot_map()
1994 bio_set_dev(bio, s->origin->bdev); in snapshot_map()
1995 track_chunk(s, bio, chunk); in snapshot_map()
2002 e = dm_lookup_exception(&s->complete, chunk); in snapshot_map()
2004 remap_exception(s, e, bio, chunk); in snapshot_map()
2006 io_overlaps_chunk(s, bio)) { in snapshot_map()
2008 up_read(&s->lock); in snapshot_map()
2009 zero_exception(s, e, bio, chunk); in snapshot_map()
2032 pe = __lookup_pending_exception(s, chunk); in snapshot_map()
2035 pe = alloc_pending_exception(s); in snapshot_map()
2038 e = dm_lookup_exception(&s->complete, chunk); in snapshot_map()
2041 remap_exception(s, e, bio, chunk); in snapshot_map()
2045 pe = __find_pending_exception(s, pe, chunk); in snapshot_map()
2048 up_read(&s->lock); in snapshot_map()
2050 down_write(&s->lock); in snapshot_map()
2052 if (s->store->userspace_supports_overflow) { in snapshot_map()
2053 if (s->valid && !s->snapshot_overflowed) { in snapshot_map()
2054 s->snapshot_overflowed = 1; in snapshot_map()
2058 __invalidate_snapshot(s, -ENOMEM); in snapshot_map()
2059 up_write(&s->lock); in snapshot_map()
2066 remap_exception(s, &pe->e, bio, chunk); in snapshot_map()
2070 if (!pe->started && io_overlaps_chunk(s, bio)) { in snapshot_map()
2074 up_read(&s->lock); in snapshot_map()
2087 up_read(&s->lock); in snapshot_map()
2093 bio_set_dev(bio, s->origin->bdev); in snapshot_map()
2094 track_chunk(s, bio, chunk); in snapshot_map()
2099 up_read(&s->lock); in snapshot_map()
2119 struct dm_snapshot *s = ti->private; in snapshot_merge_map() local
2127 bio_set_dev(bio, s->origin->bdev); in snapshot_merge_map()
2129 bio_set_dev(bio, s->cow->bdev); in snapshot_merge_map()
2139 chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector); in snapshot_merge_map()
2141 down_write(&s->lock); in snapshot_merge_map()
2144 if (!s->valid) in snapshot_merge_map()
2148 e = dm_lookup_exception(&s->complete, chunk); in snapshot_merge_map()
2152 chunk >= s->first_merging_chunk && in snapshot_merge_map()
2153 chunk < (s->first_merging_chunk + in snapshot_merge_map()
2154 s->num_merging_chunks)) { in snapshot_merge_map()
2155 bio_set_dev(bio, s->origin->bdev); in snapshot_merge_map()
2156 bio_list_add(&s->bios_queued_during_merge, bio); in snapshot_merge_map()
2161 remap_exception(s, e, bio, chunk); in snapshot_merge_map()
2164 track_chunk(s, bio, chunk); in snapshot_merge_map()
2169 bio_set_dev(bio, s->origin->bdev); in snapshot_merge_map()
2172 up_write(&s->lock); in snapshot_merge_map()
2173 return do_origin(s->origin, bio, false); in snapshot_merge_map()
2177 up_write(&s->lock); in snapshot_merge_map()
2185 struct dm_snapshot *s = ti->private; in snapshot_end_io() local
2188 stop_tracking_chunk(s, bio); in snapshot_end_io()
2195 struct dm_snapshot *s = ti->private; in snapshot_merge_presuspend() local
2197 stop_merge(s); in snapshot_merge_presuspend()
2203 struct dm_snapshot *s = ti->private; in snapshot_preresume() local
2207 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); in snapshot_preresume()
2210 if (s == snap_src) { in snapshot_preresume()
2226 struct dm_snapshot *s = ti->private; in snapshot_resume() local
2234 o = __lookup_dm_origin(s->origin->bdev); in snapshot_resume()
2238 (void) __find_snapshots_sharing_cow(s, NULL, NULL, &snap_merging); in snapshot_resume()
2261 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); in snapshot_resume()
2280 reregister_snapshot(s); in snapshot_resume()
2282 down_write(&s->lock); in snapshot_resume()
2283 s->active = 1; in snapshot_resume()
2284 up_write(&s->lock); in snapshot_resume()
2300 struct dm_snapshot *s = ti->private; in snapshot_merge_resume() local
2310 ti->max_io_len = get_origin_minimum_chunksize(s->origin->bdev); in snapshot_merge_resume()
2312 start_merge(s); in snapshot_merge_resume()
2566 struct dm_snapshot *s; in do_origin() local
2568 list_for_each_entry(s, &o->snapshots, list) in do_origin()
2569 if (unlikely(!wait_for_in_progress(s, true))) in do_origin()