1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
4 *
5 * This file is released under the GPL.
6 */
7
8 #include <linux/blkdev.h>
9 #include <linux/device-mapper.h>
10 #include <linux/delay.h>
11 #include <linux/fs.h>
12 #include <linux/init.h>
13 #include <linux/kdev_t.h>
14 #include <linux/list.h>
15 #include <linux/list_bl.h>
16 #include <linux/mempool.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/log2.h>
21 #include <linux/dm-kcopyd.h>
22
23 #include "dm.h"
24
25 #include "dm-exception-store.h"
26
27 #define DM_MSG_PREFIX "snapshots"
28
29 static const char dm_snapshot_merge_target_name[] = "snapshot-merge";
30
31 #define dm_target_is_snapshot_merge(ti) \
32 ((ti)->type->name == dm_snapshot_merge_target_name)
33
34 /*
35 * The size of the mempool used to track chunks in use.
36 */
37 #define MIN_IOS 256
38
39 #define DM_TRACKED_CHUNK_HASH_SIZE 16
40 #define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \
41 (DM_TRACKED_CHUNK_HASH_SIZE - 1))
42
43 struct dm_exception_table {
44 uint32_t hash_mask;
45 unsigned int hash_shift;
46 struct hlist_bl_head *table;
47 };
48
49 struct dm_snapshot {
50 struct rw_semaphore lock;
51
52 struct dm_dev *origin;
53 struct dm_dev *cow;
54
55 struct dm_target *ti;
56
57 /* List of snapshots per Origin */
58 struct list_head list;
59
60 /*
61 * You can't use a snapshot if this is 0 (e.g. if full).
62 * A snapshot-merge target never clears this.
63 */
64 int valid;
65
66 /*
67 * The snapshot overflowed because of a write to the snapshot device.
68 * We don't have to invalidate the snapshot in this case, but we need
69 * to prevent further writes.
70 */
71 int snapshot_overflowed;
72
73 /* Origin writes don't trigger exceptions until this is set */
74 int active;
75
76 atomic_t pending_exceptions_count;
77
78 spinlock_t pe_allocation_lock;
79
80 /* Protected by "pe_allocation_lock" */
81 sector_t exception_start_sequence;
82
83 /* Protected by kcopyd single-threaded callback */
84 sector_t exception_complete_sequence;
85
86 /*
87 * A list of pending exceptions that completed out of order.
88 * Protected by kcopyd single-threaded callback.
89 */
90 struct rb_root out_of_order_tree;
91
92 mempool_t pending_pool;
93
94 struct dm_exception_table pending;
95 struct dm_exception_table complete;
96
97 /*
98 * pe_lock protects all pending_exception operations and access
99 * as well as the snapshot_bios list.
100 */
101 spinlock_t pe_lock;
102
103 /* Chunks with outstanding reads */
104 spinlock_t tracked_chunk_lock;
105 struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
106
107 /* The on disk metadata handler */
108 struct dm_exception_store *store;
109
110 unsigned int in_progress;
111 struct wait_queue_head in_progress_wait;
112
113 struct dm_kcopyd_client *kcopyd_client;
114
115 /* Wait for events based on state_bits */
116 unsigned long state_bits;
117
118 /* Range of chunks currently being merged. */
119 chunk_t first_merging_chunk;
120 int num_merging_chunks;
121
122 /*
123 * The merge operation failed if this flag is set.
124 * Failure modes are handled as follows:
125 * - I/O error reading the header
126 * => don't load the target; abort.
127 * - Header does not have "valid" flag set
128 * => use the origin; forget about the snapshot.
129 * - I/O error when reading exceptions
130 * => don't load the target; abort.
131 * (We can't use the intermediate origin state.)
132 * - I/O error while merging
133 * => stop merging; set merge_failed; process I/O normally.
134 */
135 bool merge_failed:1;
136
137 bool discard_zeroes_cow:1;
138 bool discard_passdown_origin:1;
139
140 /*
141 * Incoming bios that overlap with chunks being merged must wait
142 * for them to be committed.
143 */
144 struct bio_list bios_queued_during_merge;
145 };
146
147 /*
148 * state_bits:
149 * RUNNING_MERGE - Merge operation is in progress.
150 * SHUTDOWN_MERGE - Set to signal that merge needs to be stopped;
151 * cleared afterwards.
152 */
153 #define RUNNING_MERGE 0
154 #define SHUTDOWN_MERGE 1
155
156 /*
157 * Maximum number of chunks being copied on write.
158 *
159 * The value was decided experimentally as a trade-off between memory
160 * consumption, stalling the kernel's workqueues and maintaining a high enough
161 * throughput.
162 */
163 #define DEFAULT_COW_THRESHOLD 2048
164
165 static unsigned int cow_threshold = DEFAULT_COW_THRESHOLD;
166 module_param_named(snapshot_cow_threshold, cow_threshold, uint, 0644);
167 MODULE_PARM_DESC(snapshot_cow_threshold, "Maximum number of chunks being copied on write");
168
169 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
170 "A percentage of time allocated for copy on write");
171
dm_snap_origin(struct dm_snapshot * s)172 struct dm_dev *dm_snap_origin(struct dm_snapshot *s)
173 {
174 return s->origin;
175 }
176 EXPORT_SYMBOL(dm_snap_origin);
177
dm_snap_cow(struct dm_snapshot * s)178 struct dm_dev *dm_snap_cow(struct dm_snapshot *s)
179 {
180 return s->cow;
181 }
182 EXPORT_SYMBOL(dm_snap_cow);
183
chunk_to_sector(struct dm_exception_store * store,chunk_t chunk)184 static sector_t chunk_to_sector(struct dm_exception_store *store,
185 chunk_t chunk)
186 {
187 return chunk << store->chunk_shift;
188 }
189
bdev_equal(struct block_device * lhs,struct block_device * rhs)190 static int bdev_equal(struct block_device *lhs, struct block_device *rhs)
191 {
192 /*
193 * There is only ever one instance of a particular block
194 * device so we can compare pointers safely.
195 */
196 return lhs == rhs;
197 }
198
199 struct dm_snap_pending_exception {
200 struct dm_exception e;
201
202 /*
203 * Origin buffers waiting for this to complete are held
204 * in a bio list
205 */
206 struct bio_list origin_bios;
207 struct bio_list snapshot_bios;
208
209 /* Pointer back to snapshot context */
210 struct dm_snapshot *snap;
211
212 /*
213 * 1 indicates the exception has already been sent to
214 * kcopyd.
215 */
216 int started;
217
218 /* There was copying error. */
219 int copy_error;
220
221 /* A sequence number, it is used for in-order completion. */
222 sector_t exception_sequence;
223
224 struct rb_node out_of_order_node;
225
226 /*
227 * For writing a complete chunk, bypassing the copy.
228 */
229 struct bio *full_bio;
230 bio_end_io_t *full_bio_end_io;
231 };
232
233 /*
234 * Hash table mapping origin volumes to lists of snapshots and
235 * a lock to protect it
236 */
237 static struct kmem_cache *exception_cache;
238 static struct kmem_cache *pending_cache;
239
240 struct dm_snap_tracked_chunk {
241 struct hlist_node node;
242 chunk_t chunk;
243 };
244
init_tracked_chunk(struct bio * bio)245 static void init_tracked_chunk(struct bio *bio)
246 {
247 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
248
249 INIT_HLIST_NODE(&c->node);
250 }
251
is_bio_tracked(struct bio * bio)252 static bool is_bio_tracked(struct bio *bio)
253 {
254 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
255
256 return !hlist_unhashed(&c->node);
257 }
258
track_chunk(struct dm_snapshot * s,struct bio * bio,chunk_t chunk)259 static void track_chunk(struct dm_snapshot *s, struct bio *bio, chunk_t chunk)
260 {
261 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
262
263 c->chunk = chunk;
264
265 spin_lock_irq(&s->tracked_chunk_lock);
266 hlist_add_head(&c->node,
267 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]);
268 spin_unlock_irq(&s->tracked_chunk_lock);
269 }
270
stop_tracking_chunk(struct dm_snapshot * s,struct bio * bio)271 static void stop_tracking_chunk(struct dm_snapshot *s, struct bio *bio)
272 {
273 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
274 unsigned long flags;
275
276 spin_lock_irqsave(&s->tracked_chunk_lock, flags);
277 hlist_del(&c->node);
278 spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
279 }
280
__chunk_is_tracked(struct dm_snapshot * s,chunk_t chunk)281 static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
282 {
283 struct dm_snap_tracked_chunk *c;
284 int found = 0;
285
286 spin_lock_irq(&s->tracked_chunk_lock);
287
288 hlist_for_each_entry(c,
289 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) {
290 if (c->chunk == chunk) {
291 found = 1;
292 break;
293 }
294 }
295
296 spin_unlock_irq(&s->tracked_chunk_lock);
297
298 return found;
299 }
300
301 /*
302 * This conflicting I/O is extremely improbable in the caller,
303 * so fsleep(1000) is sufficient and there is no need for a wait queue.
304 */
__check_for_conflicting_io(struct dm_snapshot * s,chunk_t chunk)305 static void __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk)
306 {
307 while (__chunk_is_tracked(s, chunk))
308 fsleep(1000);
309 }
310
311 /*
312 * One of these per registered origin, held in the snapshot_origins hash
313 */
314 struct origin {
315 /* The origin device */
316 struct block_device *bdev;
317
318 struct list_head hash_list;
319
320 /* List of snapshots for this origin */
321 struct list_head snapshots;
322 };
323
324 /*
325 * This structure is allocated for each origin target
326 */
327 struct dm_origin {
328 struct dm_dev *dev;
329 struct dm_target *ti;
330 unsigned int split_boundary;
331 struct list_head hash_list;
332 };
333
334 /*
335 * Size of the hash table for origin volumes. If we make this
336 * the size of the minors list then it should be nearly perfect
337 */
338 #define ORIGIN_HASH_SIZE 256
339 #define ORIGIN_MASK 0xFF
340 static struct list_head *_origins;
341 static struct list_head *_dm_origins;
342 static struct rw_semaphore _origins_lock;
343
344 static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done);
345 static DEFINE_SPINLOCK(_pending_exceptions_done_spinlock);
346 static uint64_t _pending_exceptions_done_count;
347
init_origin_hash(void)348 static int init_origin_hash(void)
349 {
350 int i;
351
352 _origins = kmalloc_array(ORIGIN_HASH_SIZE, sizeof(struct list_head),
353 GFP_KERNEL);
354 if (!_origins) {
355 DMERR("unable to allocate memory for _origins");
356 return -ENOMEM;
357 }
358 for (i = 0; i < ORIGIN_HASH_SIZE; i++)
359 INIT_LIST_HEAD(_origins + i);
360
361 _dm_origins = kmalloc_array(ORIGIN_HASH_SIZE,
362 sizeof(struct list_head),
363 GFP_KERNEL);
364 if (!_dm_origins) {
365 DMERR("unable to allocate memory for _dm_origins");
366 kfree(_origins);
367 return -ENOMEM;
368 }
369 for (i = 0; i < ORIGIN_HASH_SIZE; i++)
370 INIT_LIST_HEAD(_dm_origins + i);
371
372 init_rwsem(&_origins_lock);
373
374 return 0;
375 }
376
exit_origin_hash(void)377 static void exit_origin_hash(void)
378 {
379 kfree(_origins);
380 kfree(_dm_origins);
381 }
382
origin_hash(struct block_device * bdev)383 static unsigned int origin_hash(struct block_device *bdev)
384 {
385 return bdev->bd_dev & ORIGIN_MASK;
386 }
387
__lookup_origin(struct block_device * origin)388 static struct origin *__lookup_origin(struct block_device *origin)
389 {
390 struct list_head *ol;
391 struct origin *o;
392
393 ol = &_origins[origin_hash(origin)];
394 list_for_each_entry(o, ol, hash_list)
395 if (bdev_equal(o->bdev, origin))
396 return o;
397
398 return NULL;
399 }
400
__insert_origin(struct origin * o)401 static void __insert_origin(struct origin *o)
402 {
403 struct list_head *sl = &_origins[origin_hash(o->bdev)];
404
405 list_add_tail(&o->hash_list, sl);
406 }
407
__lookup_dm_origin(struct block_device * origin)408 static struct dm_origin *__lookup_dm_origin(struct block_device *origin)
409 {
410 struct list_head *ol;
411 struct dm_origin *o;
412
413 ol = &_dm_origins[origin_hash(origin)];
414 list_for_each_entry(o, ol, hash_list)
415 if (bdev_equal(o->dev->bdev, origin))
416 return o;
417
418 return NULL;
419 }
420
__insert_dm_origin(struct dm_origin * o)421 static void __insert_dm_origin(struct dm_origin *o)
422 {
423 struct list_head *sl = &_dm_origins[origin_hash(o->dev->bdev)];
424
425 list_add_tail(&o->hash_list, sl);
426 }
427
__remove_dm_origin(struct dm_origin * o)428 static void __remove_dm_origin(struct dm_origin *o)
429 {
430 list_del(&o->hash_list);
431 }
432
433 /*
434 * _origins_lock must be held when calling this function.
435 * Returns number of snapshots registered using the supplied cow device, plus:
436 * snap_src - a snapshot suitable for use as a source of exception handover
437 * snap_dest - a snapshot capable of receiving exception handover.
438 * snap_merge - an existing snapshot-merge target linked to the same origin.
439 * There can be at most one snapshot-merge target. The parameter is optional.
440 *
441 * Possible return values and states of snap_src and snap_dest.
442 * 0: NULL, NULL - first new snapshot
443 * 1: snap_src, NULL - normal snapshot
444 * 2: snap_src, snap_dest - waiting for handover
445 * 2: snap_src, NULL - handed over, waiting for old to be deleted
446 * 1: NULL, snap_dest - source got destroyed without handover
447 */
__find_snapshots_sharing_cow(struct dm_snapshot * snap,struct dm_snapshot ** snap_src,struct dm_snapshot ** snap_dest,struct dm_snapshot ** snap_merge)448 static int __find_snapshots_sharing_cow(struct dm_snapshot *snap,
449 struct dm_snapshot **snap_src,
450 struct dm_snapshot **snap_dest,
451 struct dm_snapshot **snap_merge)
452 {
453 struct dm_snapshot *s;
454 struct origin *o;
455 int count = 0;
456 int active;
457
458 o = __lookup_origin(snap->origin->bdev);
459 if (!o)
460 goto out;
461
462 list_for_each_entry(s, &o->snapshots, list) {
463 if (dm_target_is_snapshot_merge(s->ti) && snap_merge)
464 *snap_merge = s;
465 if (!bdev_equal(s->cow->bdev, snap->cow->bdev))
466 continue;
467
468 down_read(&s->lock);
469 active = s->active;
470 up_read(&s->lock);
471
472 if (active) {
473 if (snap_src)
474 *snap_src = s;
475 } else if (snap_dest)
476 *snap_dest = s;
477
478 count++;
479 }
480
481 out:
482 return count;
483 }
484
485 /*
486 * On success, returns 1 if this snapshot is a handover destination,
487 * otherwise returns 0.
488 */
__validate_exception_handover(struct dm_snapshot * snap)489 static int __validate_exception_handover(struct dm_snapshot *snap)
490 {
491 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
492 struct dm_snapshot *snap_merge = NULL;
493
494 /* Does snapshot need exceptions handed over to it? */
495 if ((__find_snapshots_sharing_cow(snap, &snap_src, &snap_dest,
496 &snap_merge) == 2) ||
497 snap_dest) {
498 snap->ti->error = "Snapshot cow pairing for exception table handover failed";
499 return -EINVAL;
500 }
501
502 /*
503 * If no snap_src was found, snap cannot become a handover
504 * destination.
505 */
506 if (!snap_src)
507 return 0;
508
509 /*
510 * Non-snapshot-merge handover?
511 */
512 if (!dm_target_is_snapshot_merge(snap->ti))
513 return 1;
514
515 /*
516 * Do not allow more than one merging snapshot.
517 */
518 if (snap_merge) {
519 snap->ti->error = "A snapshot is already merging.";
520 return -EINVAL;
521 }
522
523 if (!snap_src->store->type->prepare_merge ||
524 !snap_src->store->type->commit_merge) {
525 snap->ti->error = "Snapshot exception store does not support snapshot-merge.";
526 return -EINVAL;
527 }
528
529 return 1;
530 }
531
__insert_snapshot(struct origin * o,struct dm_snapshot * s)532 static void __insert_snapshot(struct origin *o, struct dm_snapshot *s)
533 {
534 struct dm_snapshot *l;
535
536 /* Sort the list according to chunk size, largest-first smallest-last */
537 list_for_each_entry(l, &o->snapshots, list)
538 if (l->store->chunk_size < s->store->chunk_size)
539 break;
540 list_add_tail(&s->list, &l->list);
541 }
542
543 /*
544 * Make a note of the snapshot and its origin so we can look it
545 * up when the origin has a write on it.
546 *
547 * Also validate snapshot exception store handovers.
548 * On success, returns 1 if this registration is a handover destination,
549 * otherwise returns 0.
550 */
register_snapshot(struct dm_snapshot * snap)551 static int register_snapshot(struct dm_snapshot *snap)
552 {
553 struct origin *o, *new_o = NULL;
554 struct block_device *bdev = snap->origin->bdev;
555 int r = 0;
556
557 new_o = kmalloc(sizeof(*new_o), GFP_KERNEL);
558 if (!new_o)
559 return -ENOMEM;
560
561 down_write(&_origins_lock);
562
563 r = __validate_exception_handover(snap);
564 if (r < 0) {
565 kfree(new_o);
566 goto out;
567 }
568
569 o = __lookup_origin(bdev);
570 if (o)
571 kfree(new_o);
572 else {
573 /* New origin */
574 o = new_o;
575
576 /* Initialise the struct */
577 INIT_LIST_HEAD(&o->snapshots);
578 o->bdev = bdev;
579
580 __insert_origin(o);
581 }
582
583 __insert_snapshot(o, snap);
584
585 out:
586 up_write(&_origins_lock);
587
588 return r;
589 }
590
591 /*
592 * Move snapshot to correct place in list according to chunk size.
593 */
reregister_snapshot(struct dm_snapshot * s)594 static void reregister_snapshot(struct dm_snapshot *s)
595 {
596 struct block_device *bdev = s->origin->bdev;
597
598 down_write(&_origins_lock);
599
600 list_del(&s->list);
601 __insert_snapshot(__lookup_origin(bdev), s);
602
603 up_write(&_origins_lock);
604 }
605
unregister_snapshot(struct dm_snapshot * s)606 static void unregister_snapshot(struct dm_snapshot *s)
607 {
608 struct origin *o;
609
610 down_write(&_origins_lock);
611 o = __lookup_origin(s->origin->bdev);
612
613 list_del(&s->list);
614 if (o && list_empty(&o->snapshots)) {
615 list_del(&o->hash_list);
616 kfree(o);
617 }
618
619 up_write(&_origins_lock);
620 }
621
622 /*
623 * Implementation of the exception hash tables.
624 * The lowest hash_shift bits of the chunk number are ignored, allowing
625 * some consecutive chunks to be grouped together.
626 */
627 static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk);
628
629 /* Lock to protect access to the completed and pending exception hash tables. */
630 struct dm_exception_table_lock {
631 struct hlist_bl_head *complete_slot;
632 struct hlist_bl_head *pending_slot;
633 };
634
dm_exception_table_lock_init(struct dm_snapshot * s,chunk_t chunk,struct dm_exception_table_lock * lock)635 static void dm_exception_table_lock_init(struct dm_snapshot *s, chunk_t chunk,
636 struct dm_exception_table_lock *lock)
637 {
638 struct dm_exception_table *complete = &s->complete;
639 struct dm_exception_table *pending = &s->pending;
640
641 lock->complete_slot = &complete->table[exception_hash(complete, chunk)];
642 lock->pending_slot = &pending->table[exception_hash(pending, chunk)];
643 }
644
dm_exception_table_lock(struct dm_exception_table_lock * lock)645 static void dm_exception_table_lock(struct dm_exception_table_lock *lock)
646 {
647 hlist_bl_lock(lock->complete_slot);
648 hlist_bl_lock(lock->pending_slot);
649 }
650
dm_exception_table_unlock(struct dm_exception_table_lock * lock)651 static void dm_exception_table_unlock(struct dm_exception_table_lock *lock)
652 {
653 hlist_bl_unlock(lock->pending_slot);
654 hlist_bl_unlock(lock->complete_slot);
655 }
656
dm_exception_table_init(struct dm_exception_table * et,uint32_t size,unsigned int hash_shift)657 static int dm_exception_table_init(struct dm_exception_table *et,
658 uint32_t size, unsigned int hash_shift)
659 {
660 unsigned int i;
661
662 et->hash_shift = hash_shift;
663 et->hash_mask = size - 1;
664 et->table = kvmalloc_array(size, sizeof(struct hlist_bl_head),
665 GFP_KERNEL);
666 if (!et->table)
667 return -ENOMEM;
668
669 for (i = 0; i < size; i++)
670 INIT_HLIST_BL_HEAD(et->table + i);
671
672 return 0;
673 }
674
dm_exception_table_exit(struct dm_exception_table * et,struct kmem_cache * mem)675 static void dm_exception_table_exit(struct dm_exception_table *et,
676 struct kmem_cache *mem)
677 {
678 struct hlist_bl_head *slot;
679 struct dm_exception *ex;
680 struct hlist_bl_node *pos, *n;
681 int i, size;
682
683 size = et->hash_mask + 1;
684 for (i = 0; i < size; i++) {
685 slot = et->table + i;
686
687 hlist_bl_for_each_entry_safe(ex, pos, n, slot, hash_list)
688 kmem_cache_free(mem, ex);
689 }
690
691 kvfree(et->table);
692 }
693
exception_hash(struct dm_exception_table * et,chunk_t chunk)694 static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk)
695 {
696 return (chunk >> et->hash_shift) & et->hash_mask;
697 }
698
dm_remove_exception(struct dm_exception * e)699 static void dm_remove_exception(struct dm_exception *e)
700 {
701 hlist_bl_del(&e->hash_list);
702 }
703
704 /*
705 * Return the exception data for a sector, or NULL if not
706 * remapped.
707 */
dm_lookup_exception(struct dm_exception_table * et,chunk_t chunk)708 static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et,
709 chunk_t chunk)
710 {
711 struct hlist_bl_head *slot;
712 struct hlist_bl_node *pos;
713 struct dm_exception *e;
714
715 slot = &et->table[exception_hash(et, chunk)];
716 hlist_bl_for_each_entry(e, pos, slot, hash_list)
717 if (chunk >= e->old_chunk &&
718 chunk <= e->old_chunk + dm_consecutive_chunk_count(e))
719 return e;
720
721 return NULL;
722 }
723
alloc_completed_exception(gfp_t gfp)724 static struct dm_exception *alloc_completed_exception(gfp_t gfp)
725 {
726 struct dm_exception *e;
727
728 e = kmem_cache_alloc(exception_cache, gfp);
729 if (!e && gfp == GFP_NOIO)
730 e = kmem_cache_alloc(exception_cache, GFP_ATOMIC);
731
732 return e;
733 }
734
free_completed_exception(struct dm_exception * e)735 static void free_completed_exception(struct dm_exception *e)
736 {
737 kmem_cache_free(exception_cache, e);
738 }
739
alloc_pending_exception(struct dm_snapshot * s)740 static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s)
741 {
742 struct dm_snap_pending_exception *pe = mempool_alloc(&s->pending_pool,
743 GFP_NOIO);
744
745 atomic_inc(&s->pending_exceptions_count);
746 pe->snap = s;
747
748 return pe;
749 }
750
free_pending_exception(struct dm_snap_pending_exception * pe)751 static void free_pending_exception(struct dm_snap_pending_exception *pe)
752 {
753 struct dm_snapshot *s = pe->snap;
754
755 mempool_free(pe, &s->pending_pool);
756 smp_mb__before_atomic();
757 atomic_dec(&s->pending_exceptions_count);
758 }
759
dm_insert_exception(struct dm_exception_table * eh,struct dm_exception * new_e)760 static void dm_insert_exception(struct dm_exception_table *eh,
761 struct dm_exception *new_e)
762 {
763 struct hlist_bl_head *l;
764 struct hlist_bl_node *pos;
765 struct dm_exception *e = NULL;
766
767 l = &eh->table[exception_hash(eh, new_e->old_chunk)];
768
769 /* Add immediately if this table doesn't support consecutive chunks */
770 if (!eh->hash_shift)
771 goto out;
772
773 /* List is ordered by old_chunk */
774 hlist_bl_for_each_entry(e, pos, l, hash_list) {
775 /* Insert after an existing chunk? */
776 if (new_e->old_chunk == (e->old_chunk +
777 dm_consecutive_chunk_count(e) + 1) &&
778 new_e->new_chunk == (dm_chunk_number(e->new_chunk) +
779 dm_consecutive_chunk_count(e) + 1)) {
780 dm_consecutive_chunk_count_inc(e);
781 free_completed_exception(new_e);
782 return;
783 }
784
785 /* Insert before an existing chunk? */
786 if (new_e->old_chunk == (e->old_chunk - 1) &&
787 new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) {
788 dm_consecutive_chunk_count_inc(e);
789 e->old_chunk--;
790 e->new_chunk--;
791 free_completed_exception(new_e);
792 return;
793 }
794
795 if (new_e->old_chunk < e->old_chunk)
796 break;
797 }
798
799 out:
800 if (!e) {
801 /*
802 * Either the table doesn't support consecutive chunks or slot
803 * l is empty.
804 */
805 hlist_bl_add_head(&new_e->hash_list, l);
806 } else if (new_e->old_chunk < e->old_chunk) {
807 /* Add before an existing exception */
808 hlist_bl_add_before(&new_e->hash_list, &e->hash_list);
809 } else {
810 /* Add to l's tail: e is the last exception in this slot */
811 hlist_bl_add_behind(&new_e->hash_list, &e->hash_list);
812 }
813 }
814
815 /*
816 * Callback used by the exception stores to load exceptions when
817 * initialising.
818 */
dm_add_exception(void * context,chunk_t old,chunk_t new)819 static int dm_add_exception(void *context, chunk_t old, chunk_t new)
820 {
821 struct dm_exception_table_lock lock;
822 struct dm_snapshot *s = context;
823 struct dm_exception *e;
824
825 e = alloc_completed_exception(GFP_KERNEL);
826 if (!e)
827 return -ENOMEM;
828
829 e->old_chunk = old;
830
831 /* Consecutive_count is implicitly initialised to zero */
832 e->new_chunk = new;
833
834 /*
835 * Although there is no need to lock access to the exception tables
836 * here, if we don't then hlist_bl_add_head(), called by
837 * dm_insert_exception(), will complain about accessing the
838 * corresponding list without locking it first.
839 */
840 dm_exception_table_lock_init(s, old, &lock);
841
842 dm_exception_table_lock(&lock);
843 dm_insert_exception(&s->complete, e);
844 dm_exception_table_unlock(&lock);
845
846 return 0;
847 }
848
849 /*
850 * Return a minimum chunk size of all snapshots that have the specified origin.
851 * Return zero if the origin has no snapshots.
852 */
__minimum_chunk_size(struct origin * o)853 static uint32_t __minimum_chunk_size(struct origin *o)
854 {
855 struct dm_snapshot *snap;
856 unsigned int chunk_size = rounddown_pow_of_two(UINT_MAX);
857
858 if (o)
859 list_for_each_entry(snap, &o->snapshots, list)
860 chunk_size = min_not_zero(chunk_size,
861 snap->store->chunk_size);
862
863 return (uint32_t) chunk_size;
864 }
865
866 /*
867 * Hard coded magic.
868 */
calc_max_buckets(void)869 static int calc_max_buckets(void)
870 {
871 /* use a fixed size of 2MB */
872 unsigned long mem = 2 * 1024 * 1024;
873
874 mem /= sizeof(struct hlist_bl_head);
875
876 return mem;
877 }
878
879 /*
880 * Allocate room for a suitable hash table.
881 */
init_hash_tables(struct dm_snapshot * s)882 static int init_hash_tables(struct dm_snapshot *s)
883 {
884 sector_t hash_size, cow_dev_size, max_buckets;
885
886 /*
887 * Calculate based on the size of the original volume or
888 * the COW volume...
889 */
890 cow_dev_size = get_dev_size(s->cow->bdev);
891 max_buckets = calc_max_buckets();
892
893 hash_size = cow_dev_size >> s->store->chunk_shift;
894 hash_size = min(hash_size, max_buckets);
895
896 if (hash_size < 64)
897 hash_size = 64;
898 hash_size = rounddown_pow_of_two(hash_size);
899 if (dm_exception_table_init(&s->complete, hash_size,
900 DM_CHUNK_CONSECUTIVE_BITS))
901 return -ENOMEM;
902
903 /*
904 * Allocate hash table for in-flight exceptions
905 * Make this smaller than the real hash table
906 */
907 hash_size >>= 3;
908 if (hash_size < 64)
909 hash_size = 64;
910
911 if (dm_exception_table_init(&s->pending, hash_size, 0)) {
912 dm_exception_table_exit(&s->complete, exception_cache);
913 return -ENOMEM;
914 }
915
916 return 0;
917 }
918
merge_shutdown(struct dm_snapshot * s)919 static void merge_shutdown(struct dm_snapshot *s)
920 {
921 clear_bit_unlock(RUNNING_MERGE, &s->state_bits);
922 smp_mb__after_atomic();
923 wake_up_bit(&s->state_bits, RUNNING_MERGE);
924 }
925
__release_queued_bios_after_merge(struct dm_snapshot * s)926 static struct bio *__release_queued_bios_after_merge(struct dm_snapshot *s)
927 {
928 s->first_merging_chunk = 0;
929 s->num_merging_chunks = 0;
930
931 return bio_list_get(&s->bios_queued_during_merge);
932 }
933
934 /*
935 * Remove one chunk from the index of completed exceptions.
936 */
__remove_single_exception_chunk(struct dm_snapshot * s,chunk_t old_chunk)937 static int __remove_single_exception_chunk(struct dm_snapshot *s,
938 chunk_t old_chunk)
939 {
940 struct dm_exception *e;
941
942 e = dm_lookup_exception(&s->complete, old_chunk);
943 if (!e) {
944 DMERR("Corruption detected: exception for block %llu is on disk but not in memory",
945 (unsigned long long)old_chunk);
946 return -EINVAL;
947 }
948
949 /*
950 * If this is the only chunk using this exception, remove exception.
951 */
952 if (!dm_consecutive_chunk_count(e)) {
953 dm_remove_exception(e);
954 free_completed_exception(e);
955 return 0;
956 }
957
958 /*
959 * The chunk may be either at the beginning or the end of a
960 * group of consecutive chunks - never in the middle. We are
961 * removing chunks in the opposite order to that in which they
962 * were added, so this should always be true.
963 * Decrement the consecutive chunk counter and adjust the
964 * starting point if necessary.
965 */
966 if (old_chunk == e->old_chunk) {
967 e->old_chunk++;
968 e->new_chunk++;
969 } else if (old_chunk != e->old_chunk +
970 dm_consecutive_chunk_count(e)) {
971 DMERR("Attempt to merge block %llu from the middle of a chunk range [%llu - %llu]",
972 (unsigned long long)old_chunk,
973 (unsigned long long)e->old_chunk,
974 (unsigned long long)
975 e->old_chunk + dm_consecutive_chunk_count(e));
976 return -EINVAL;
977 }
978
979 dm_consecutive_chunk_count_dec(e);
980
981 return 0;
982 }
983
984 static void flush_bios(struct bio *bio);
985
remove_single_exception_chunk(struct dm_snapshot * s)986 static int remove_single_exception_chunk(struct dm_snapshot *s)
987 {
988 struct bio *b = NULL;
989 int r;
990 chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1;
991
992 down_write(&s->lock);
993
994 /*
995 * Process chunks (and associated exceptions) in reverse order
996 * so that dm_consecutive_chunk_count_dec() accounting works.
997 */
998 do {
999 r = __remove_single_exception_chunk(s, old_chunk);
1000 if (r)
1001 goto out;
1002 } while (old_chunk-- > s->first_merging_chunk);
1003
1004 b = __release_queued_bios_after_merge(s);
1005
1006 out:
1007 up_write(&s->lock);
1008 if (b)
1009 flush_bios(b);
1010
1011 return r;
1012 }
1013
1014 static int origin_write_extent(struct dm_snapshot *merging_snap,
1015 sector_t sector, unsigned int chunk_size);
1016
1017 static void merge_callback(int read_err, unsigned long write_err,
1018 void *context);
1019
read_pending_exceptions_done_count(void)1020 static uint64_t read_pending_exceptions_done_count(void)
1021 {
1022 uint64_t pending_exceptions_done;
1023
1024 spin_lock(&_pending_exceptions_done_spinlock);
1025 pending_exceptions_done = _pending_exceptions_done_count;
1026 spin_unlock(&_pending_exceptions_done_spinlock);
1027
1028 return pending_exceptions_done;
1029 }
1030
increment_pending_exceptions_done_count(void)1031 static void increment_pending_exceptions_done_count(void)
1032 {
1033 spin_lock(&_pending_exceptions_done_spinlock);
1034 _pending_exceptions_done_count++;
1035 spin_unlock(&_pending_exceptions_done_spinlock);
1036
1037 wake_up_all(&_pending_exceptions_done);
1038 }
1039
snapshot_merge_next_chunks(struct dm_snapshot * s)1040 static void snapshot_merge_next_chunks(struct dm_snapshot *s)
1041 {
1042 int i, linear_chunks;
1043 chunk_t old_chunk, new_chunk;
1044 struct dm_io_region src, dest;
1045 sector_t io_size;
1046 uint64_t previous_count;
1047
1048 BUG_ON(!test_bit(RUNNING_MERGE, &s->state_bits));
1049 if (unlikely(test_bit(SHUTDOWN_MERGE, &s->state_bits)))
1050 goto shut;
1051
1052 /*
1053 * valid flag never changes during merge, so no lock required.
1054 */
1055 if (!s->valid) {
1056 DMERR("Snapshot is invalid: can't merge");
1057 goto shut;
1058 }
1059
1060 linear_chunks = s->store->type->prepare_merge(s->store, &old_chunk,
1061 &new_chunk);
1062 if (linear_chunks <= 0) {
1063 if (linear_chunks < 0) {
1064 DMERR("Read error in exception store: shutting down merge");
1065 down_write(&s->lock);
1066 s->merge_failed = true;
1067 up_write(&s->lock);
1068 }
1069 goto shut;
1070 }
1071
1072 /* Adjust old_chunk and new_chunk to reflect start of linear region */
1073 old_chunk = old_chunk + 1 - linear_chunks;
1074 new_chunk = new_chunk + 1 - linear_chunks;
1075
1076 /*
1077 * Use one (potentially large) I/O to copy all 'linear_chunks'
1078 * from the exception store to the origin
1079 */
1080 io_size = linear_chunks * s->store->chunk_size;
1081
1082 dest.bdev = s->origin->bdev;
1083 dest.sector = chunk_to_sector(s->store, old_chunk);
1084 dest.count = min(io_size, get_dev_size(dest.bdev) - dest.sector);
1085
1086 src.bdev = s->cow->bdev;
1087 src.sector = chunk_to_sector(s->store, new_chunk);
1088 src.count = dest.count;
1089
1090 /*
1091 * Reallocate any exceptions needed in other snapshots then
1092 * wait for the pending exceptions to complete.
1093 * Each time any pending exception (globally on the system)
1094 * completes we are woken and repeat the process to find out
1095 * if we can proceed. While this may not seem a particularly
1096 * efficient algorithm, it is not expected to have any
1097 * significant impact on performance.
1098 */
1099 previous_count = read_pending_exceptions_done_count();
1100 while (origin_write_extent(s, dest.sector, io_size)) {
1101 wait_event(_pending_exceptions_done,
1102 (read_pending_exceptions_done_count() !=
1103 previous_count));
1104 /* Retry after the wait, until all exceptions are done. */
1105 previous_count = read_pending_exceptions_done_count();
1106 }
1107
1108 down_write(&s->lock);
1109 s->first_merging_chunk = old_chunk;
1110 s->num_merging_chunks = linear_chunks;
1111 up_write(&s->lock);
1112
1113 /* Wait until writes to all 'linear_chunks' drain */
1114 for (i = 0; i < linear_chunks; i++)
1115 __check_for_conflicting_io(s, old_chunk + i);
1116
1117 dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s);
1118 return;
1119
1120 shut:
1121 merge_shutdown(s);
1122 }
1123
1124 static void error_bios(struct bio *bio);
1125
merge_callback(int read_err,unsigned long write_err,void * context)1126 static void merge_callback(int read_err, unsigned long write_err, void *context)
1127 {
1128 struct dm_snapshot *s = context;
1129 struct bio *b = NULL;
1130
1131 if (read_err || write_err) {
1132 if (read_err)
1133 DMERR("Read error: shutting down merge.");
1134 else
1135 DMERR("Write error: shutting down merge.");
1136 goto shut;
1137 }
1138
1139 if (blkdev_issue_flush(s->origin->bdev) < 0) {
1140 DMERR("Flush after merge failed: shutting down merge");
1141 goto shut;
1142 }
1143
1144 if (s->store->type->commit_merge(s->store,
1145 s->num_merging_chunks) < 0) {
1146 DMERR("Write error in exception store: shutting down merge");
1147 goto shut;
1148 }
1149
1150 if (remove_single_exception_chunk(s) < 0)
1151 goto shut;
1152
1153 snapshot_merge_next_chunks(s);
1154
1155 return;
1156
1157 shut:
1158 down_write(&s->lock);
1159 s->merge_failed = true;
1160 b = __release_queued_bios_after_merge(s);
1161 up_write(&s->lock);
1162 error_bios(b);
1163
1164 merge_shutdown(s);
1165 }
1166
start_merge(struct dm_snapshot * s)1167 static void start_merge(struct dm_snapshot *s)
1168 {
1169 if (!test_and_set_bit(RUNNING_MERGE, &s->state_bits))
1170 snapshot_merge_next_chunks(s);
1171 }
1172
1173 /*
1174 * Stop the merging process and wait until it finishes.
1175 */
stop_merge(struct dm_snapshot * s)1176 static void stop_merge(struct dm_snapshot *s)
1177 {
1178 set_bit(SHUTDOWN_MERGE, &s->state_bits);
1179 wait_on_bit(&s->state_bits, RUNNING_MERGE, TASK_UNINTERRUPTIBLE);
1180 clear_bit(SHUTDOWN_MERGE, &s->state_bits);
1181 }
1182
parse_snapshot_features(struct dm_arg_set * as,struct dm_snapshot * s,struct dm_target * ti)1183 static int parse_snapshot_features(struct dm_arg_set *as, struct dm_snapshot *s,
1184 struct dm_target *ti)
1185 {
1186 int r;
1187 unsigned int argc;
1188 const char *arg_name;
1189
1190 static const struct dm_arg _args[] = {
1191 {0, 2, "Invalid number of feature arguments"},
1192 };
1193
1194 /*
1195 * No feature arguments supplied.
1196 */
1197 if (!as->argc)
1198 return 0;
1199
1200 r = dm_read_arg_group(_args, as, &argc, &ti->error);
1201 if (r)
1202 return -EINVAL;
1203
1204 while (argc && !r) {
1205 arg_name = dm_shift_arg(as);
1206 argc--;
1207
1208 if (!strcasecmp(arg_name, "discard_zeroes_cow"))
1209 s->discard_zeroes_cow = true;
1210
1211 else if (!strcasecmp(arg_name, "discard_passdown_origin"))
1212 s->discard_passdown_origin = true;
1213
1214 else {
1215 ti->error = "Unrecognised feature requested";
1216 r = -EINVAL;
1217 break;
1218 }
1219 }
1220
1221 if (!s->discard_zeroes_cow && s->discard_passdown_origin) {
1222 /*
1223 * TODO: really these are disjoint.. but ti->num_discard_bios
1224 * and dm_bio_get_target_bio_nr() require rigid constraints.
1225 */
1226 ti->error = "discard_passdown_origin feature depends on discard_zeroes_cow";
1227 r = -EINVAL;
1228 }
1229
1230 return r;
1231 }
1232
1233 /*
1234 * Construct a snapshot mapping:
1235 * <origin_dev> <COW-dev> <p|po|n> <chunk-size> [<# feature args> [<arg>]*]
1236 */
snapshot_ctr(struct dm_target * ti,unsigned int argc,char ** argv)1237 static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1238 {
1239 struct dm_snapshot *s;
1240 struct dm_arg_set as;
1241 int i;
1242 int r = -EINVAL;
1243 char *origin_path, *cow_path;
1244 dev_t origin_dev, cow_dev;
1245 unsigned int args_used, num_flush_bios = 1;
1246 fmode_t origin_mode = FMODE_READ;
1247
1248 if (argc < 4) {
1249 ti->error = "requires 4 or more arguments";
1250 r = -EINVAL;
1251 goto bad;
1252 }
1253
1254 if (dm_target_is_snapshot_merge(ti)) {
1255 num_flush_bios = 2;
1256 origin_mode = FMODE_WRITE;
1257 }
1258
1259 s = kzalloc(sizeof(*s), GFP_KERNEL);
1260 if (!s) {
1261 ti->error = "Cannot allocate private snapshot structure";
1262 r = -ENOMEM;
1263 goto bad;
1264 }
1265
1266 as.argc = argc;
1267 as.argv = argv;
1268 dm_consume_args(&as, 4);
1269 r = parse_snapshot_features(&as, s, ti);
1270 if (r)
1271 goto bad_features;
1272
1273 origin_path = argv[0];
1274 argv++;
1275 argc--;
1276
1277 r = dm_get_device(ti, origin_path, origin_mode, &s->origin);
1278 if (r) {
1279 ti->error = "Cannot get origin device";
1280 goto bad_origin;
1281 }
1282 origin_dev = s->origin->bdev->bd_dev;
1283
1284 cow_path = argv[0];
1285 argv++;
1286 argc--;
1287
1288 cow_dev = dm_get_dev_t(cow_path);
1289 if (cow_dev && cow_dev == origin_dev) {
1290 ti->error = "COW device cannot be the same as origin device";
1291 r = -EINVAL;
1292 goto bad_cow;
1293 }
1294
1295 r = dm_get_device(ti, cow_path, dm_table_get_mode(ti->table), &s->cow);
1296 if (r) {
1297 ti->error = "Cannot get COW device";
1298 goto bad_cow;
1299 }
1300
1301 r = dm_exception_store_create(ti, argc, argv, s, &args_used, &s->store);
1302 if (r) {
1303 ti->error = "Couldn't create exception store";
1304 r = -EINVAL;
1305 goto bad_store;
1306 }
1307
1308 argv += args_used;
1309 argc -= args_used;
1310
1311 s->ti = ti;
1312 s->valid = 1;
1313 s->snapshot_overflowed = 0;
1314 s->active = 0;
1315 atomic_set(&s->pending_exceptions_count, 0);
1316 spin_lock_init(&s->pe_allocation_lock);
1317 s->exception_start_sequence = 0;
1318 s->exception_complete_sequence = 0;
1319 s->out_of_order_tree = RB_ROOT;
1320 init_rwsem(&s->lock);
1321 INIT_LIST_HEAD(&s->list);
1322 spin_lock_init(&s->pe_lock);
1323 s->state_bits = 0;
1324 s->merge_failed = false;
1325 s->first_merging_chunk = 0;
1326 s->num_merging_chunks = 0;
1327 bio_list_init(&s->bios_queued_during_merge);
1328
1329 /* Allocate hash table for COW data */
1330 if (init_hash_tables(s)) {
1331 ti->error = "Unable to allocate hash table space";
1332 r = -ENOMEM;
1333 goto bad_hash_tables;
1334 }
1335
1336 init_waitqueue_head(&s->in_progress_wait);
1337
1338 s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
1339 if (IS_ERR(s->kcopyd_client)) {
1340 r = PTR_ERR(s->kcopyd_client);
1341 ti->error = "Could not create kcopyd client";
1342 goto bad_kcopyd;
1343 }
1344
1345 r = mempool_init_slab_pool(&s->pending_pool, MIN_IOS, pending_cache);
1346 if (r) {
1347 ti->error = "Could not allocate mempool for pending exceptions";
1348 goto bad_pending_pool;
1349 }
1350
1351 for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
1352 INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]);
1353
1354 spin_lock_init(&s->tracked_chunk_lock);
1355
1356 ti->private = s;
1357 ti->num_flush_bios = num_flush_bios;
1358 if (s->discard_zeroes_cow)
1359 ti->num_discard_bios = (s->discard_passdown_origin ? 2 : 1);
1360 ti->per_io_data_size = sizeof(struct dm_snap_tracked_chunk);
1361
1362 /* Add snapshot to the list of snapshots for this origin */
1363 /* Exceptions aren't triggered till snapshot_resume() is called */
1364 r = register_snapshot(s);
1365 if (r == -ENOMEM) {
1366 ti->error = "Snapshot origin struct allocation failed";
1367 goto bad_load_and_register;
1368 } else if (r < 0) {
1369 /* invalid handover, register_snapshot has set ti->error */
1370 goto bad_load_and_register;
1371 }
1372
1373 /*
1374 * Metadata must only be loaded into one table at once, so skip this
1375 * if metadata will be handed over during resume.
1376 * Chunk size will be set during the handover - set it to zero to
1377 * ensure it's ignored.
1378 */
1379 if (r > 0) {
1380 s->store->chunk_size = 0;
1381 return 0;
1382 }
1383
1384 r = s->store->type->read_metadata(s->store, dm_add_exception,
1385 (void *)s);
1386 if (r < 0) {
1387 ti->error = "Failed to read snapshot metadata";
1388 goto bad_read_metadata;
1389 } else if (r > 0) {
1390 s->valid = 0;
1391 DMWARN("Snapshot is marked invalid.");
1392 }
1393
1394 if (!s->store->chunk_size) {
1395 ti->error = "Chunk size not set";
1396 r = -EINVAL;
1397 goto bad_read_metadata;
1398 }
1399
1400 r = dm_set_target_max_io_len(ti, s->store->chunk_size);
1401 if (r)
1402 goto bad_read_metadata;
1403
1404 return 0;
1405
1406 bad_read_metadata:
1407 unregister_snapshot(s);
1408 bad_load_and_register:
1409 mempool_exit(&s->pending_pool);
1410 bad_pending_pool:
1411 dm_kcopyd_client_destroy(s->kcopyd_client);
1412 bad_kcopyd:
1413 dm_exception_table_exit(&s->pending, pending_cache);
1414 dm_exception_table_exit(&s->complete, exception_cache);
1415 bad_hash_tables:
1416 dm_exception_store_destroy(s->store);
1417 bad_store:
1418 dm_put_device(ti, s->cow);
1419 bad_cow:
1420 dm_put_device(ti, s->origin);
1421 bad_origin:
1422 bad_features:
1423 kfree(s);
1424 bad:
1425 return r;
1426 }
1427
__free_exceptions(struct dm_snapshot * s)1428 static void __free_exceptions(struct dm_snapshot *s)
1429 {
1430 dm_kcopyd_client_destroy(s->kcopyd_client);
1431 s->kcopyd_client = NULL;
1432
1433 dm_exception_table_exit(&s->pending, pending_cache);
1434 dm_exception_table_exit(&s->complete, exception_cache);
1435 }
1436
__handover_exceptions(struct dm_snapshot * snap_src,struct dm_snapshot * snap_dest)1437 static void __handover_exceptions(struct dm_snapshot *snap_src,
1438 struct dm_snapshot *snap_dest)
1439 {
1440 union {
1441 struct dm_exception_table table_swap;
1442 struct dm_exception_store *store_swap;
1443 } u;
1444
1445 /*
1446 * Swap all snapshot context information between the two instances.
1447 */
1448 u.table_swap = snap_dest->complete;
1449 snap_dest->complete = snap_src->complete;
1450 snap_src->complete = u.table_swap;
1451
1452 u.store_swap = snap_dest->store;
1453 snap_dest->store = snap_src->store;
1454 snap_dest->store->userspace_supports_overflow = u.store_swap->userspace_supports_overflow;
1455 snap_src->store = u.store_swap;
1456
1457 snap_dest->store->snap = snap_dest;
1458 snap_src->store->snap = snap_src;
1459
1460 snap_dest->ti->max_io_len = snap_dest->store->chunk_size;
1461 snap_dest->valid = snap_src->valid;
1462 snap_dest->snapshot_overflowed = snap_src->snapshot_overflowed;
1463
1464 /*
1465 * Set source invalid to ensure it receives no further I/O.
1466 */
1467 snap_src->valid = 0;
1468 }
1469
snapshot_dtr(struct dm_target * ti)1470 static void snapshot_dtr(struct dm_target *ti)
1471 {
1472 #ifdef CONFIG_DM_DEBUG
1473 int i;
1474 #endif
1475 struct dm_snapshot *s = ti->private;
1476 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1477
1478 down_read(&_origins_lock);
1479 /* Check whether exception handover must be cancelled */
1480 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1481 if (snap_src && snap_dest && (s == snap_src)) {
1482 down_write(&snap_dest->lock);
1483 snap_dest->valid = 0;
1484 up_write(&snap_dest->lock);
1485 DMERR("Cancelling snapshot handover.");
1486 }
1487 up_read(&_origins_lock);
1488
1489 if (dm_target_is_snapshot_merge(ti))
1490 stop_merge(s);
1491
1492 /* Prevent further origin writes from using this snapshot. */
1493 /* After this returns there can be no new kcopyd jobs. */
1494 unregister_snapshot(s);
1495
1496 while (atomic_read(&s->pending_exceptions_count))
1497 fsleep(1000);
1498 /*
1499 * Ensure instructions in mempool_exit aren't reordered
1500 * before atomic_read.
1501 */
1502 smp_mb();
1503
1504 #ifdef CONFIG_DM_DEBUG
1505 for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
1506 BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i]));
1507 #endif
1508
1509 __free_exceptions(s);
1510
1511 mempool_exit(&s->pending_pool);
1512
1513 dm_exception_store_destroy(s->store);
1514
1515 dm_put_device(ti, s->cow);
1516
1517 dm_put_device(ti, s->origin);
1518
1519 WARN_ON(s->in_progress);
1520
1521 kfree(s);
1522 }
1523
account_start_copy(struct dm_snapshot * s)1524 static void account_start_copy(struct dm_snapshot *s)
1525 {
1526 spin_lock(&s->in_progress_wait.lock);
1527 s->in_progress++;
1528 spin_unlock(&s->in_progress_wait.lock);
1529 }
1530
account_end_copy(struct dm_snapshot * s)1531 static void account_end_copy(struct dm_snapshot *s)
1532 {
1533 spin_lock(&s->in_progress_wait.lock);
1534 BUG_ON(!s->in_progress);
1535 s->in_progress--;
1536 if (likely(s->in_progress <= cow_threshold) &&
1537 unlikely(waitqueue_active(&s->in_progress_wait)))
1538 wake_up_locked(&s->in_progress_wait);
1539 spin_unlock(&s->in_progress_wait.lock);
1540 }
1541
wait_for_in_progress(struct dm_snapshot * s,bool unlock_origins)1542 static bool wait_for_in_progress(struct dm_snapshot *s, bool unlock_origins)
1543 {
1544 if (unlikely(s->in_progress > cow_threshold)) {
1545 spin_lock(&s->in_progress_wait.lock);
1546 if (likely(s->in_progress > cow_threshold)) {
1547 /*
1548 * NOTE: this throttle doesn't account for whether
1549 * the caller is servicing an IO that will trigger a COW
1550 * so excess throttling may result for chunks not required
1551 * to be COW'd. But if cow_threshold was reached, extra
1552 * throttling is unlikely to negatively impact performance.
1553 */
1554 DECLARE_WAITQUEUE(wait, current);
1555
1556 __add_wait_queue(&s->in_progress_wait, &wait);
1557 __set_current_state(TASK_UNINTERRUPTIBLE);
1558 spin_unlock(&s->in_progress_wait.lock);
1559 if (unlock_origins)
1560 up_read(&_origins_lock);
1561 io_schedule();
1562 remove_wait_queue(&s->in_progress_wait, &wait);
1563 return false;
1564 }
1565 spin_unlock(&s->in_progress_wait.lock);
1566 }
1567 return true;
1568 }
1569
1570 /*
1571 * Flush a list of buffers.
1572 */
flush_bios(struct bio * bio)1573 static void flush_bios(struct bio *bio)
1574 {
1575 struct bio *n;
1576
1577 while (bio) {
1578 n = bio->bi_next;
1579 bio->bi_next = NULL;
1580 submit_bio_noacct(bio);
1581 bio = n;
1582 }
1583 }
1584
1585 static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit);
1586
1587 /*
1588 * Flush a list of buffers.
1589 */
retry_origin_bios(struct dm_snapshot * s,struct bio * bio)1590 static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio)
1591 {
1592 struct bio *n;
1593 int r;
1594
1595 while (bio) {
1596 n = bio->bi_next;
1597 bio->bi_next = NULL;
1598 r = do_origin(s->origin, bio, false);
1599 if (r == DM_MAPIO_REMAPPED)
1600 submit_bio_noacct(bio);
1601 bio = n;
1602 }
1603 }
1604
1605 /*
1606 * Error a list of buffers.
1607 */
error_bios(struct bio * bio)1608 static void error_bios(struct bio *bio)
1609 {
1610 struct bio *n;
1611
1612 while (bio) {
1613 n = bio->bi_next;
1614 bio->bi_next = NULL;
1615 bio_io_error(bio);
1616 bio = n;
1617 }
1618 }
1619
__invalidate_snapshot(struct dm_snapshot * s,int err)1620 static void __invalidate_snapshot(struct dm_snapshot *s, int err)
1621 {
1622 if (!s->valid)
1623 return;
1624
1625 if (err == -EIO)
1626 DMERR("Invalidating snapshot: Error reading/writing.");
1627 else if (err == -ENOMEM)
1628 DMERR("Invalidating snapshot: Unable to allocate exception.");
1629
1630 if (s->store->type->drop_snapshot)
1631 s->store->type->drop_snapshot(s->store);
1632
1633 s->valid = 0;
1634
1635 dm_table_event(s->ti->table);
1636 }
1637
invalidate_snapshot(struct dm_snapshot * s,int err)1638 static void invalidate_snapshot(struct dm_snapshot *s, int err)
1639 {
1640 down_write(&s->lock);
1641 __invalidate_snapshot(s, err);
1642 up_write(&s->lock);
1643 }
1644
pending_complete(void * context,int success)1645 static void pending_complete(void *context, int success)
1646 {
1647 struct dm_snap_pending_exception *pe = context;
1648 struct dm_exception *e;
1649 struct dm_snapshot *s = pe->snap;
1650 struct bio *origin_bios = NULL;
1651 struct bio *snapshot_bios = NULL;
1652 struct bio *full_bio = NULL;
1653 struct dm_exception_table_lock lock;
1654 int error = 0;
1655
1656 dm_exception_table_lock_init(s, pe->e.old_chunk, &lock);
1657
1658 if (!success) {
1659 /* Read/write error - snapshot is unusable */
1660 invalidate_snapshot(s, -EIO);
1661 error = 1;
1662
1663 dm_exception_table_lock(&lock);
1664 goto out;
1665 }
1666
1667 e = alloc_completed_exception(GFP_NOIO);
1668 if (!e) {
1669 invalidate_snapshot(s, -ENOMEM);
1670 error = 1;
1671
1672 dm_exception_table_lock(&lock);
1673 goto out;
1674 }
1675 *e = pe->e;
1676
1677 down_read(&s->lock);
1678 dm_exception_table_lock(&lock);
1679 if (!s->valid) {
1680 up_read(&s->lock);
1681 free_completed_exception(e);
1682 error = 1;
1683
1684 goto out;
1685 }
1686
1687 /*
1688 * Add a proper exception. After inserting the completed exception all
1689 * subsequent snapshot reads to this chunk will be redirected to the
1690 * COW device. This ensures that we do not starve. Moreover, as long
1691 * as the pending exception exists, neither origin writes nor snapshot
1692 * merging can overwrite the chunk in origin.
1693 */
1694 dm_insert_exception(&s->complete, e);
1695 up_read(&s->lock);
1696
1697 /* Wait for conflicting reads to drain */
1698 if (__chunk_is_tracked(s, pe->e.old_chunk)) {
1699 dm_exception_table_unlock(&lock);
1700 __check_for_conflicting_io(s, pe->e.old_chunk);
1701 dm_exception_table_lock(&lock);
1702 }
1703
1704 out:
1705 /* Remove the in-flight exception from the list */
1706 dm_remove_exception(&pe->e);
1707
1708 dm_exception_table_unlock(&lock);
1709
1710 snapshot_bios = bio_list_get(&pe->snapshot_bios);
1711 origin_bios = bio_list_get(&pe->origin_bios);
1712 full_bio = pe->full_bio;
1713 if (full_bio)
1714 full_bio->bi_end_io = pe->full_bio_end_io;
1715 increment_pending_exceptions_done_count();
1716
1717 /* Submit any pending write bios */
1718 if (error) {
1719 if (full_bio)
1720 bio_io_error(full_bio);
1721 error_bios(snapshot_bios);
1722 } else {
1723 if (full_bio)
1724 bio_endio(full_bio);
1725 flush_bios(snapshot_bios);
1726 }
1727
1728 retry_origin_bios(s, origin_bios);
1729
1730 free_pending_exception(pe);
1731 }
1732
complete_exception(struct dm_snap_pending_exception * pe)1733 static void complete_exception(struct dm_snap_pending_exception *pe)
1734 {
1735 struct dm_snapshot *s = pe->snap;
1736
1737 /* Update the metadata if we are persistent */
1738 s->store->type->commit_exception(s->store, &pe->e, !pe->copy_error,
1739 pending_complete, pe);
1740 }
1741
1742 /*
1743 * Called when the copy I/O has finished. kcopyd actually runs
1744 * this code so don't block.
1745 */
copy_callback(int read_err,unsigned long write_err,void * context)1746 static void copy_callback(int read_err, unsigned long write_err, void *context)
1747 {
1748 struct dm_snap_pending_exception *pe = context;
1749 struct dm_snapshot *s = pe->snap;
1750
1751 pe->copy_error = read_err || write_err;
1752
1753 if (pe->exception_sequence == s->exception_complete_sequence) {
1754 struct rb_node *next;
1755
1756 s->exception_complete_sequence++;
1757 complete_exception(pe);
1758
1759 next = rb_first(&s->out_of_order_tree);
1760 while (next) {
1761 pe = rb_entry(next, struct dm_snap_pending_exception,
1762 out_of_order_node);
1763 if (pe->exception_sequence != s->exception_complete_sequence)
1764 break;
1765 next = rb_next(next);
1766 s->exception_complete_sequence++;
1767 rb_erase(&pe->out_of_order_node, &s->out_of_order_tree);
1768 complete_exception(pe);
1769 cond_resched();
1770 }
1771 } else {
1772 struct rb_node *parent = NULL;
1773 struct rb_node **p = &s->out_of_order_tree.rb_node;
1774 struct dm_snap_pending_exception *pe2;
1775
1776 while (*p) {
1777 pe2 = rb_entry(*p, struct dm_snap_pending_exception, out_of_order_node);
1778 parent = *p;
1779
1780 BUG_ON(pe->exception_sequence == pe2->exception_sequence);
1781 if (pe->exception_sequence < pe2->exception_sequence)
1782 p = &((*p)->rb_left);
1783 else
1784 p = &((*p)->rb_right);
1785 }
1786
1787 rb_link_node(&pe->out_of_order_node, parent, p);
1788 rb_insert_color(&pe->out_of_order_node, &s->out_of_order_tree);
1789 }
1790 account_end_copy(s);
1791 }
1792
1793 /*
1794 * Dispatches the copy operation to kcopyd.
1795 */
start_copy(struct dm_snap_pending_exception * pe)1796 static void start_copy(struct dm_snap_pending_exception *pe)
1797 {
1798 struct dm_snapshot *s = pe->snap;
1799 struct dm_io_region src, dest;
1800 struct block_device *bdev = s->origin->bdev;
1801 sector_t dev_size;
1802
1803 dev_size = get_dev_size(bdev);
1804
1805 src.bdev = bdev;
1806 src.sector = chunk_to_sector(s->store, pe->e.old_chunk);
1807 src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector);
1808
1809 dest.bdev = s->cow->bdev;
1810 dest.sector = chunk_to_sector(s->store, pe->e.new_chunk);
1811 dest.count = src.count;
1812
1813 /* Hand over to kcopyd */
1814 account_start_copy(s);
1815 dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe);
1816 }
1817
full_bio_end_io(struct bio * bio)1818 static void full_bio_end_io(struct bio *bio)
1819 {
1820 void *callback_data = bio->bi_private;
1821
1822 dm_kcopyd_do_callback(callback_data, 0, bio->bi_status ? 1 : 0);
1823 }
1824
start_full_bio(struct dm_snap_pending_exception * pe,struct bio * bio)1825 static void start_full_bio(struct dm_snap_pending_exception *pe,
1826 struct bio *bio)
1827 {
1828 struct dm_snapshot *s = pe->snap;
1829 void *callback_data;
1830
1831 pe->full_bio = bio;
1832 pe->full_bio_end_io = bio->bi_end_io;
1833
1834 account_start_copy(s);
1835 callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client,
1836 copy_callback, pe);
1837
1838 bio->bi_end_io = full_bio_end_io;
1839 bio->bi_private = callback_data;
1840
1841 submit_bio_noacct(bio);
1842 }
1843
1844 static struct dm_snap_pending_exception *
__lookup_pending_exception(struct dm_snapshot * s,chunk_t chunk)1845 __lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk)
1846 {
1847 struct dm_exception *e = dm_lookup_exception(&s->pending, chunk);
1848
1849 if (!e)
1850 return NULL;
1851
1852 return container_of(e, struct dm_snap_pending_exception, e);
1853 }
1854
1855 /*
1856 * Inserts a pending exception into the pending table.
1857 *
1858 * NOTE: a write lock must be held on the chunk's pending exception table slot
1859 * before calling this.
1860 */
1861 static struct dm_snap_pending_exception *
__insert_pending_exception(struct dm_snapshot * s,struct dm_snap_pending_exception * pe,chunk_t chunk)1862 __insert_pending_exception(struct dm_snapshot *s,
1863 struct dm_snap_pending_exception *pe, chunk_t chunk)
1864 {
1865 pe->e.old_chunk = chunk;
1866 bio_list_init(&pe->origin_bios);
1867 bio_list_init(&pe->snapshot_bios);
1868 pe->started = 0;
1869 pe->full_bio = NULL;
1870
1871 spin_lock(&s->pe_allocation_lock);
1872 if (s->store->type->prepare_exception(s->store, &pe->e)) {
1873 spin_unlock(&s->pe_allocation_lock);
1874 free_pending_exception(pe);
1875 return NULL;
1876 }
1877
1878 pe->exception_sequence = s->exception_start_sequence++;
1879 spin_unlock(&s->pe_allocation_lock);
1880
1881 dm_insert_exception(&s->pending, &pe->e);
1882
1883 return pe;
1884 }
1885
1886 /*
1887 * Looks to see if this snapshot already has a pending exception
1888 * for this chunk, otherwise it allocates a new one and inserts
1889 * it into the pending table.
1890 *
1891 * NOTE: a write lock must be held on the chunk's pending exception table slot
1892 * before calling this.
1893 */
1894 static struct dm_snap_pending_exception *
__find_pending_exception(struct dm_snapshot * s,struct dm_snap_pending_exception * pe,chunk_t chunk)1895 __find_pending_exception(struct dm_snapshot *s,
1896 struct dm_snap_pending_exception *pe, chunk_t chunk)
1897 {
1898 struct dm_snap_pending_exception *pe2;
1899
1900 pe2 = __lookup_pending_exception(s, chunk);
1901 if (pe2) {
1902 free_pending_exception(pe);
1903 return pe2;
1904 }
1905
1906 return __insert_pending_exception(s, pe, chunk);
1907 }
1908
remap_exception(struct dm_snapshot * s,struct dm_exception * e,struct bio * bio,chunk_t chunk)1909 static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
1910 struct bio *bio, chunk_t chunk)
1911 {
1912 bio_set_dev(bio, s->cow->bdev);
1913 bio->bi_iter.bi_sector =
1914 chunk_to_sector(s->store, dm_chunk_number(e->new_chunk) +
1915 (chunk - e->old_chunk)) +
1916 (bio->bi_iter.bi_sector & s->store->chunk_mask);
1917 }
1918
zero_callback(int read_err,unsigned long write_err,void * context)1919 static void zero_callback(int read_err, unsigned long write_err, void *context)
1920 {
1921 struct bio *bio = context;
1922 struct dm_snapshot *s = bio->bi_private;
1923
1924 account_end_copy(s);
1925 bio->bi_status = write_err ? BLK_STS_IOERR : 0;
1926 bio_endio(bio);
1927 }
1928
zero_exception(struct dm_snapshot * s,struct dm_exception * e,struct bio * bio,chunk_t chunk)1929 static void zero_exception(struct dm_snapshot *s, struct dm_exception *e,
1930 struct bio *bio, chunk_t chunk)
1931 {
1932 struct dm_io_region dest;
1933
1934 dest.bdev = s->cow->bdev;
1935 dest.sector = bio->bi_iter.bi_sector;
1936 dest.count = s->store->chunk_size;
1937
1938 account_start_copy(s);
1939 WARN_ON_ONCE(bio->bi_private);
1940 bio->bi_private = s;
1941 dm_kcopyd_zero(s->kcopyd_client, 1, &dest, 0, zero_callback, bio);
1942 }
1943
io_overlaps_chunk(struct dm_snapshot * s,struct bio * bio)1944 static bool io_overlaps_chunk(struct dm_snapshot *s, struct bio *bio)
1945 {
1946 return bio->bi_iter.bi_size ==
1947 (s->store->chunk_size << SECTOR_SHIFT);
1948 }
1949
snapshot_map(struct dm_target * ti,struct bio * bio)1950 static int snapshot_map(struct dm_target *ti, struct bio *bio)
1951 {
1952 struct dm_exception *e;
1953 struct dm_snapshot *s = ti->private;
1954 int r = DM_MAPIO_REMAPPED;
1955 chunk_t chunk;
1956 struct dm_snap_pending_exception *pe = NULL;
1957 struct dm_exception_table_lock lock;
1958
1959 init_tracked_chunk(bio);
1960
1961 if (bio->bi_opf & REQ_PREFLUSH) {
1962 bio_set_dev(bio, s->cow->bdev);
1963 return DM_MAPIO_REMAPPED;
1964 }
1965
1966 chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
1967 dm_exception_table_lock_init(s, chunk, &lock);
1968
1969 /* Full snapshots are not usable */
1970 /* To get here the table must be live so s->active is always set. */
1971 if (!s->valid)
1972 return DM_MAPIO_KILL;
1973
1974 if (bio_data_dir(bio) == WRITE) {
1975 while (unlikely(!wait_for_in_progress(s, false)))
1976 ; /* wait_for_in_progress() has slept */
1977 }
1978
1979 down_read(&s->lock);
1980 dm_exception_table_lock(&lock);
1981
1982 if (!s->valid || (unlikely(s->snapshot_overflowed) &&
1983 bio_data_dir(bio) == WRITE)) {
1984 r = DM_MAPIO_KILL;
1985 goto out_unlock;
1986 }
1987
1988 if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
1989 if (s->discard_passdown_origin && dm_bio_get_target_bio_nr(bio)) {
1990 /*
1991 * passdown discard to origin (without triggering
1992 * snapshot exceptions via do_origin; doing so would
1993 * defeat the goal of freeing space in origin that is
1994 * implied by the "discard_passdown_origin" feature)
1995 */
1996 bio_set_dev(bio, s->origin->bdev);
1997 track_chunk(s, bio, chunk);
1998 goto out_unlock;
1999 }
2000 /* discard to snapshot (target_bio_nr == 0) zeroes exceptions */
2001 }
2002
2003 /* If the block is already remapped - use that, else remap it */
2004 e = dm_lookup_exception(&s->complete, chunk);
2005 if (e) {
2006 remap_exception(s, e, bio, chunk);
2007 if (unlikely(bio_op(bio) == REQ_OP_DISCARD) &&
2008 io_overlaps_chunk(s, bio)) {
2009 dm_exception_table_unlock(&lock);
2010 up_read(&s->lock);
2011 zero_exception(s, e, bio, chunk);
2012 r = DM_MAPIO_SUBMITTED; /* discard is not issued */
2013 goto out;
2014 }
2015 goto out_unlock;
2016 }
2017
2018 if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
2019 /*
2020 * If no exception exists, complete discard immediately
2021 * otherwise it'll trigger copy-out.
2022 */
2023 bio_endio(bio);
2024 r = DM_MAPIO_SUBMITTED;
2025 goto out_unlock;
2026 }
2027
2028 /*
2029 * Write to snapshot - higher level takes care of RW/RO
2030 * flags so we should only get this if we are
2031 * writable.
2032 */
2033 if (bio_data_dir(bio) == WRITE) {
2034 pe = __lookup_pending_exception(s, chunk);
2035 if (!pe) {
2036 dm_exception_table_unlock(&lock);
2037 pe = alloc_pending_exception(s);
2038 dm_exception_table_lock(&lock);
2039
2040 e = dm_lookup_exception(&s->complete, chunk);
2041 if (e) {
2042 free_pending_exception(pe);
2043 remap_exception(s, e, bio, chunk);
2044 goto out_unlock;
2045 }
2046
2047 pe = __find_pending_exception(s, pe, chunk);
2048 if (!pe) {
2049 dm_exception_table_unlock(&lock);
2050 up_read(&s->lock);
2051
2052 down_write(&s->lock);
2053
2054 if (s->store->userspace_supports_overflow) {
2055 if (s->valid && !s->snapshot_overflowed) {
2056 s->snapshot_overflowed = 1;
2057 DMERR("Snapshot overflowed: Unable to allocate exception.");
2058 }
2059 } else
2060 __invalidate_snapshot(s, -ENOMEM);
2061 up_write(&s->lock);
2062
2063 r = DM_MAPIO_KILL;
2064 goto out;
2065 }
2066 }
2067
2068 remap_exception(s, &pe->e, bio, chunk);
2069
2070 r = DM_MAPIO_SUBMITTED;
2071
2072 if (!pe->started && io_overlaps_chunk(s, bio)) {
2073 pe->started = 1;
2074
2075 dm_exception_table_unlock(&lock);
2076 up_read(&s->lock);
2077
2078 start_full_bio(pe, bio);
2079 goto out;
2080 }
2081
2082 bio_list_add(&pe->snapshot_bios, bio);
2083
2084 if (!pe->started) {
2085 /* this is protected by the exception table lock */
2086 pe->started = 1;
2087
2088 dm_exception_table_unlock(&lock);
2089 up_read(&s->lock);
2090
2091 start_copy(pe);
2092 goto out;
2093 }
2094 } else {
2095 bio_set_dev(bio, s->origin->bdev);
2096 track_chunk(s, bio, chunk);
2097 }
2098
2099 out_unlock:
2100 dm_exception_table_unlock(&lock);
2101 up_read(&s->lock);
2102 out:
2103 return r;
2104 }
2105
2106 /*
2107 * A snapshot-merge target behaves like a combination of a snapshot
2108 * target and a snapshot-origin target. It only generates new
2109 * exceptions in other snapshots and not in the one that is being
2110 * merged.
2111 *
2112 * For each chunk, if there is an existing exception, it is used to
2113 * redirect I/O to the cow device. Otherwise I/O is sent to the origin,
2114 * which in turn might generate exceptions in other snapshots.
2115 * If merging is currently taking place on the chunk in question, the
2116 * I/O is deferred by adding it to s->bios_queued_during_merge.
2117 */
snapshot_merge_map(struct dm_target * ti,struct bio * bio)2118 static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
2119 {
2120 struct dm_exception *e;
2121 struct dm_snapshot *s = ti->private;
2122 int r = DM_MAPIO_REMAPPED;
2123 chunk_t chunk;
2124
2125 init_tracked_chunk(bio);
2126
2127 if (bio->bi_opf & REQ_PREFLUSH) {
2128 if (!dm_bio_get_target_bio_nr(bio))
2129 bio_set_dev(bio, s->origin->bdev);
2130 else
2131 bio_set_dev(bio, s->cow->bdev);
2132 return DM_MAPIO_REMAPPED;
2133 }
2134
2135 if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
2136 /* Once merging, discards no longer effect change */
2137 bio_endio(bio);
2138 return DM_MAPIO_SUBMITTED;
2139 }
2140
2141 chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
2142
2143 down_write(&s->lock);
2144
2145 /* Full merging snapshots are redirected to the origin */
2146 if (!s->valid)
2147 goto redirect_to_origin;
2148
2149 /* If the block is already remapped - use that */
2150 e = dm_lookup_exception(&s->complete, chunk);
2151 if (e) {
2152 /* Queue writes overlapping with chunks being merged */
2153 if (bio_data_dir(bio) == WRITE &&
2154 chunk >= s->first_merging_chunk &&
2155 chunk < (s->first_merging_chunk +
2156 s->num_merging_chunks)) {
2157 bio_set_dev(bio, s->origin->bdev);
2158 bio_list_add(&s->bios_queued_during_merge, bio);
2159 r = DM_MAPIO_SUBMITTED;
2160 goto out_unlock;
2161 }
2162
2163 remap_exception(s, e, bio, chunk);
2164
2165 if (bio_data_dir(bio) == WRITE)
2166 track_chunk(s, bio, chunk);
2167 goto out_unlock;
2168 }
2169
2170 redirect_to_origin:
2171 bio_set_dev(bio, s->origin->bdev);
2172
2173 if (bio_data_dir(bio) == WRITE) {
2174 up_write(&s->lock);
2175 return do_origin(s->origin, bio, false);
2176 }
2177
2178 out_unlock:
2179 up_write(&s->lock);
2180
2181 return r;
2182 }
2183
snapshot_end_io(struct dm_target * ti,struct bio * bio,blk_status_t * error)2184 static int snapshot_end_io(struct dm_target *ti, struct bio *bio,
2185 blk_status_t *error)
2186 {
2187 struct dm_snapshot *s = ti->private;
2188
2189 if (is_bio_tracked(bio))
2190 stop_tracking_chunk(s, bio);
2191
2192 return DM_ENDIO_DONE;
2193 }
2194
snapshot_merge_presuspend(struct dm_target * ti)2195 static void snapshot_merge_presuspend(struct dm_target *ti)
2196 {
2197 struct dm_snapshot *s = ti->private;
2198
2199 stop_merge(s);
2200 }
2201
snapshot_preresume(struct dm_target * ti)2202 static int snapshot_preresume(struct dm_target *ti)
2203 {
2204 int r = 0;
2205 struct dm_snapshot *s = ti->private;
2206 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
2207
2208 down_read(&_origins_lock);
2209 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
2210 if (snap_src && snap_dest) {
2211 down_read(&snap_src->lock);
2212 if (s == snap_src) {
2213 DMERR("Unable to resume snapshot source until handover completes.");
2214 r = -EINVAL;
2215 } else if (!dm_suspended(snap_src->ti)) {
2216 DMERR("Unable to perform snapshot handover until source is suspended.");
2217 r = -EINVAL;
2218 }
2219 up_read(&snap_src->lock);
2220 }
2221 up_read(&_origins_lock);
2222
2223 return r;
2224 }
2225
snapshot_resume(struct dm_target * ti)2226 static void snapshot_resume(struct dm_target *ti)
2227 {
2228 struct dm_snapshot *s = ti->private;
2229 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL, *snap_merging = NULL;
2230 struct dm_origin *o;
2231 struct mapped_device *origin_md = NULL;
2232 bool must_restart_merging = false;
2233
2234 down_read(&_origins_lock);
2235
2236 o = __lookup_dm_origin(s->origin->bdev);
2237 if (o)
2238 origin_md = dm_table_get_md(o->ti->table);
2239 if (!origin_md) {
2240 (void) __find_snapshots_sharing_cow(s, NULL, NULL, &snap_merging);
2241 if (snap_merging)
2242 origin_md = dm_table_get_md(snap_merging->ti->table);
2243 }
2244 if (origin_md == dm_table_get_md(ti->table))
2245 origin_md = NULL;
2246 if (origin_md) {
2247 if (dm_hold(origin_md))
2248 origin_md = NULL;
2249 }
2250
2251 up_read(&_origins_lock);
2252
2253 if (origin_md) {
2254 dm_internal_suspend_fast(origin_md);
2255 if (snap_merging && test_bit(RUNNING_MERGE, &snap_merging->state_bits)) {
2256 must_restart_merging = true;
2257 stop_merge(snap_merging);
2258 }
2259 }
2260
2261 down_read(&_origins_lock);
2262
2263 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
2264 if (snap_src && snap_dest) {
2265 down_write(&snap_src->lock);
2266 down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
2267 __handover_exceptions(snap_src, snap_dest);
2268 up_write(&snap_dest->lock);
2269 up_write(&snap_src->lock);
2270 }
2271
2272 up_read(&_origins_lock);
2273
2274 if (origin_md) {
2275 if (must_restart_merging)
2276 start_merge(snap_merging);
2277 dm_internal_resume_fast(origin_md);
2278 dm_put(origin_md);
2279 }
2280
2281 /* Now we have correct chunk size, reregister */
2282 reregister_snapshot(s);
2283
2284 down_write(&s->lock);
2285 s->active = 1;
2286 up_write(&s->lock);
2287 }
2288
get_origin_minimum_chunksize(struct block_device * bdev)2289 static uint32_t get_origin_minimum_chunksize(struct block_device *bdev)
2290 {
2291 uint32_t min_chunksize;
2292
2293 down_read(&_origins_lock);
2294 min_chunksize = __minimum_chunk_size(__lookup_origin(bdev));
2295 up_read(&_origins_lock);
2296
2297 return min_chunksize;
2298 }
2299
snapshot_merge_resume(struct dm_target * ti)2300 static void snapshot_merge_resume(struct dm_target *ti)
2301 {
2302 struct dm_snapshot *s = ti->private;
2303
2304 /*
2305 * Handover exceptions from existing snapshot.
2306 */
2307 snapshot_resume(ti);
2308
2309 /*
2310 * snapshot-merge acts as an origin, so set ti->max_io_len
2311 */
2312 ti->max_io_len = get_origin_minimum_chunksize(s->origin->bdev);
2313
2314 start_merge(s);
2315 }
2316
snapshot_status(struct dm_target * ti,status_type_t type,unsigned int status_flags,char * result,unsigned int maxlen)2317 static void snapshot_status(struct dm_target *ti, status_type_t type,
2318 unsigned int status_flags, char *result, unsigned int maxlen)
2319 {
2320 unsigned int sz = 0;
2321 struct dm_snapshot *snap = ti->private;
2322 unsigned int num_features;
2323
2324 switch (type) {
2325 case STATUSTYPE_INFO:
2326
2327 down_write(&snap->lock);
2328
2329 if (!snap->valid)
2330 DMEMIT("Invalid");
2331 else if (snap->merge_failed)
2332 DMEMIT("Merge failed");
2333 else if (snap->snapshot_overflowed)
2334 DMEMIT("Overflow");
2335 else {
2336 if (snap->store->type->usage) {
2337 sector_t total_sectors, sectors_allocated,
2338 metadata_sectors;
2339 snap->store->type->usage(snap->store,
2340 &total_sectors,
2341 §ors_allocated,
2342 &metadata_sectors);
2343 DMEMIT("%llu/%llu %llu",
2344 (unsigned long long)sectors_allocated,
2345 (unsigned long long)total_sectors,
2346 (unsigned long long)metadata_sectors);
2347 } else
2348 DMEMIT("Unknown");
2349 }
2350
2351 up_write(&snap->lock);
2352
2353 break;
2354
2355 case STATUSTYPE_TABLE:
2356 /*
2357 * kdevname returns a static pointer so we need
2358 * to make private copies if the output is to
2359 * make sense.
2360 */
2361 DMEMIT("%s %s", snap->origin->name, snap->cow->name);
2362 sz += snap->store->type->status(snap->store, type, result + sz,
2363 maxlen - sz);
2364 num_features = snap->discard_zeroes_cow + snap->discard_passdown_origin;
2365 if (num_features) {
2366 DMEMIT(" %u", num_features);
2367 if (snap->discard_zeroes_cow)
2368 DMEMIT(" discard_zeroes_cow");
2369 if (snap->discard_passdown_origin)
2370 DMEMIT(" discard_passdown_origin");
2371 }
2372 break;
2373
2374 case STATUSTYPE_IMA:
2375 DMEMIT_TARGET_NAME_VERSION(ti->type);
2376 DMEMIT(",snap_origin_name=%s", snap->origin->name);
2377 DMEMIT(",snap_cow_name=%s", snap->cow->name);
2378 DMEMIT(",snap_valid=%c", snap->valid ? 'y' : 'n');
2379 DMEMIT(",snap_merge_failed=%c", snap->merge_failed ? 'y' : 'n');
2380 DMEMIT(",snapshot_overflowed=%c", snap->snapshot_overflowed ? 'y' : 'n');
2381 DMEMIT(";");
2382 break;
2383 }
2384 }
2385
snapshot_iterate_devices(struct dm_target * ti,iterate_devices_callout_fn fn,void * data)2386 static int snapshot_iterate_devices(struct dm_target *ti,
2387 iterate_devices_callout_fn fn, void *data)
2388 {
2389 struct dm_snapshot *snap = ti->private;
2390 int r;
2391
2392 r = fn(ti, snap->origin, 0, ti->len, data);
2393
2394 if (!r)
2395 r = fn(ti, snap->cow, 0, get_dev_size(snap->cow->bdev), data);
2396
2397 return r;
2398 }
2399
snapshot_io_hints(struct dm_target * ti,struct queue_limits * limits)2400 static void snapshot_io_hints(struct dm_target *ti, struct queue_limits *limits)
2401 {
2402 struct dm_snapshot *snap = ti->private;
2403
2404 if (snap->discard_zeroes_cow) {
2405 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
2406
2407 down_read(&_origins_lock);
2408
2409 (void) __find_snapshots_sharing_cow(snap, &snap_src, &snap_dest, NULL);
2410 if (snap_src && snap_dest)
2411 snap = snap_src;
2412
2413 /* All discards are split on chunk_size boundary */
2414 limits->discard_granularity = snap->store->chunk_size;
2415 limits->max_discard_sectors = snap->store->chunk_size;
2416
2417 up_read(&_origins_lock);
2418 }
2419 }
2420
2421 /*
2422 *---------------------------------------------------------------
2423 * Origin methods
2424 *---------------------------------------------------------------
2425 */
2426 /*
2427 * If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any
2428 * supplied bio was ignored. The caller may submit it immediately.
2429 * (No remapping actually occurs as the origin is always a direct linear
2430 * map.)
2431 *
2432 * If further exceptions are required, DM_MAPIO_SUBMITTED is returned
2433 * and any supplied bio is added to a list to be submitted once all
2434 * the necessary exceptions exist.
2435 */
__origin_write(struct list_head * snapshots,sector_t sector,struct bio * bio)2436 static int __origin_write(struct list_head *snapshots, sector_t sector,
2437 struct bio *bio)
2438 {
2439 int r = DM_MAPIO_REMAPPED;
2440 struct dm_snapshot *snap;
2441 struct dm_exception *e;
2442 struct dm_snap_pending_exception *pe, *pe2;
2443 struct dm_snap_pending_exception *pe_to_start_now = NULL;
2444 struct dm_snap_pending_exception *pe_to_start_last = NULL;
2445 struct dm_exception_table_lock lock;
2446 chunk_t chunk;
2447
2448 /* Do all the snapshots on this origin */
2449 list_for_each_entry(snap, snapshots, list) {
2450 /*
2451 * Don't make new exceptions in a merging snapshot
2452 * because it has effectively been deleted
2453 */
2454 if (dm_target_is_snapshot_merge(snap->ti))
2455 continue;
2456
2457 /* Nothing to do if writing beyond end of snapshot */
2458 if (sector >= dm_table_get_size(snap->ti->table))
2459 continue;
2460
2461 /*
2462 * Remember, different snapshots can have
2463 * different chunk sizes.
2464 */
2465 chunk = sector_to_chunk(snap->store, sector);
2466 dm_exception_table_lock_init(snap, chunk, &lock);
2467
2468 down_read(&snap->lock);
2469 dm_exception_table_lock(&lock);
2470
2471 /* Only deal with valid and active snapshots */
2472 if (!snap->valid || !snap->active)
2473 goto next_snapshot;
2474
2475 pe = __lookup_pending_exception(snap, chunk);
2476 if (!pe) {
2477 /*
2478 * Check exception table to see if block is already
2479 * remapped in this snapshot and trigger an exception
2480 * if not.
2481 */
2482 e = dm_lookup_exception(&snap->complete, chunk);
2483 if (e)
2484 goto next_snapshot;
2485
2486 dm_exception_table_unlock(&lock);
2487 pe = alloc_pending_exception(snap);
2488 dm_exception_table_lock(&lock);
2489
2490 pe2 = __lookup_pending_exception(snap, chunk);
2491
2492 if (!pe2) {
2493 e = dm_lookup_exception(&snap->complete, chunk);
2494 if (e) {
2495 free_pending_exception(pe);
2496 goto next_snapshot;
2497 }
2498
2499 pe = __insert_pending_exception(snap, pe, chunk);
2500 if (!pe) {
2501 dm_exception_table_unlock(&lock);
2502 up_read(&snap->lock);
2503
2504 invalidate_snapshot(snap, -ENOMEM);
2505 continue;
2506 }
2507 } else {
2508 free_pending_exception(pe);
2509 pe = pe2;
2510 }
2511 }
2512
2513 r = DM_MAPIO_SUBMITTED;
2514
2515 /*
2516 * If an origin bio was supplied, queue it to wait for the
2517 * completion of this exception, and start this one last,
2518 * at the end of the function.
2519 */
2520 if (bio) {
2521 bio_list_add(&pe->origin_bios, bio);
2522 bio = NULL;
2523
2524 if (!pe->started) {
2525 pe->started = 1;
2526 pe_to_start_last = pe;
2527 }
2528 }
2529
2530 if (!pe->started) {
2531 pe->started = 1;
2532 pe_to_start_now = pe;
2533 }
2534
2535 next_snapshot:
2536 dm_exception_table_unlock(&lock);
2537 up_read(&snap->lock);
2538
2539 if (pe_to_start_now) {
2540 start_copy(pe_to_start_now);
2541 pe_to_start_now = NULL;
2542 }
2543 }
2544
2545 /*
2546 * Submit the exception against which the bio is queued last,
2547 * to give the other exceptions a head start.
2548 */
2549 if (pe_to_start_last)
2550 start_copy(pe_to_start_last);
2551
2552 return r;
2553 }
2554
2555 /*
2556 * Called on a write from the origin driver.
2557 */
do_origin(struct dm_dev * origin,struct bio * bio,bool limit)2558 static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit)
2559 {
2560 struct origin *o;
2561 int r = DM_MAPIO_REMAPPED;
2562
2563 again:
2564 down_read(&_origins_lock);
2565 o = __lookup_origin(origin->bdev);
2566 if (o) {
2567 if (limit) {
2568 struct dm_snapshot *s;
2569
2570 list_for_each_entry(s, &o->snapshots, list)
2571 if (unlikely(!wait_for_in_progress(s, true)))
2572 goto again;
2573 }
2574
2575 r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio);
2576 }
2577 up_read(&_origins_lock);
2578
2579 return r;
2580 }
2581
2582 /*
2583 * Trigger exceptions in all non-merging snapshots.
2584 *
2585 * The chunk size of the merging snapshot may be larger than the chunk
2586 * size of some other snapshot so we may need to reallocate multiple
2587 * chunks in other snapshots.
2588 *
2589 * We scan all the overlapping exceptions in the other snapshots.
2590 * Returns 1 if anything was reallocated and must be waited for,
2591 * otherwise returns 0.
2592 *
2593 * size must be a multiple of merging_snap's chunk_size.
2594 */
origin_write_extent(struct dm_snapshot * merging_snap,sector_t sector,unsigned int size)2595 static int origin_write_extent(struct dm_snapshot *merging_snap,
2596 sector_t sector, unsigned int size)
2597 {
2598 int must_wait = 0;
2599 sector_t n;
2600 struct origin *o;
2601
2602 /*
2603 * The origin's __minimum_chunk_size() got stored in max_io_len
2604 * by snapshot_merge_resume().
2605 */
2606 down_read(&_origins_lock);
2607 o = __lookup_origin(merging_snap->origin->bdev);
2608 for (n = 0; n < size; n += merging_snap->ti->max_io_len)
2609 if (__origin_write(&o->snapshots, sector + n, NULL) ==
2610 DM_MAPIO_SUBMITTED)
2611 must_wait = 1;
2612 up_read(&_origins_lock);
2613
2614 return must_wait;
2615 }
2616
2617 /*
2618 * Origin: maps a linear range of a device, with hooks for snapshotting.
2619 */
2620
2621 /*
2622 * Construct an origin mapping: <dev_path>
2623 * The context for an origin is merely a 'struct dm_dev *'
2624 * pointing to the real device.
2625 */
origin_ctr(struct dm_target * ti,unsigned int argc,char ** argv)2626 static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2627 {
2628 int r;
2629 struct dm_origin *o;
2630
2631 if (argc != 1) {
2632 ti->error = "origin: incorrect number of arguments";
2633 return -EINVAL;
2634 }
2635
2636 o = kmalloc(sizeof(struct dm_origin), GFP_KERNEL);
2637 if (!o) {
2638 ti->error = "Cannot allocate private origin structure";
2639 r = -ENOMEM;
2640 goto bad_alloc;
2641 }
2642
2643 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &o->dev);
2644 if (r) {
2645 ti->error = "Cannot get target device";
2646 goto bad_open;
2647 }
2648
2649 o->ti = ti;
2650 ti->private = o;
2651 ti->num_flush_bios = 1;
2652
2653 return 0;
2654
2655 bad_open:
2656 kfree(o);
2657 bad_alloc:
2658 return r;
2659 }
2660
origin_dtr(struct dm_target * ti)2661 static void origin_dtr(struct dm_target *ti)
2662 {
2663 struct dm_origin *o = ti->private;
2664
2665 dm_put_device(ti, o->dev);
2666 kfree(o);
2667 }
2668
origin_map(struct dm_target * ti,struct bio * bio)2669 static int origin_map(struct dm_target *ti, struct bio *bio)
2670 {
2671 struct dm_origin *o = ti->private;
2672 unsigned int available_sectors;
2673
2674 bio_set_dev(bio, o->dev->bdev);
2675
2676 if (unlikely(bio->bi_opf & REQ_PREFLUSH))
2677 return DM_MAPIO_REMAPPED;
2678
2679 if (bio_data_dir(bio) != WRITE)
2680 return DM_MAPIO_REMAPPED;
2681
2682 available_sectors = o->split_boundary -
2683 ((unsigned int)bio->bi_iter.bi_sector & (o->split_boundary - 1));
2684
2685 if (bio_sectors(bio) > available_sectors)
2686 dm_accept_partial_bio(bio, available_sectors);
2687
2688 /* Only tell snapshots if this is a write */
2689 return do_origin(o->dev, bio, true);
2690 }
2691
2692 /*
2693 * Set the target "max_io_len" field to the minimum of all the snapshots'
2694 * chunk sizes.
2695 */
origin_resume(struct dm_target * ti)2696 static void origin_resume(struct dm_target *ti)
2697 {
2698 struct dm_origin *o = ti->private;
2699
2700 o->split_boundary = get_origin_minimum_chunksize(o->dev->bdev);
2701
2702 down_write(&_origins_lock);
2703 __insert_dm_origin(o);
2704 up_write(&_origins_lock);
2705 }
2706
origin_postsuspend(struct dm_target * ti)2707 static void origin_postsuspend(struct dm_target *ti)
2708 {
2709 struct dm_origin *o = ti->private;
2710
2711 down_write(&_origins_lock);
2712 __remove_dm_origin(o);
2713 up_write(&_origins_lock);
2714 }
2715
origin_status(struct dm_target * ti,status_type_t type,unsigned int status_flags,char * result,unsigned int maxlen)2716 static void origin_status(struct dm_target *ti, status_type_t type,
2717 unsigned int status_flags, char *result, unsigned int maxlen)
2718 {
2719 struct dm_origin *o = ti->private;
2720
2721 switch (type) {
2722 case STATUSTYPE_INFO:
2723 result[0] = '\0';
2724 break;
2725
2726 case STATUSTYPE_TABLE:
2727 snprintf(result, maxlen, "%s", o->dev->name);
2728 break;
2729 case STATUSTYPE_IMA:
2730 result[0] = '\0';
2731 break;
2732 }
2733 }
2734
origin_iterate_devices(struct dm_target * ti,iterate_devices_callout_fn fn,void * data)2735 static int origin_iterate_devices(struct dm_target *ti,
2736 iterate_devices_callout_fn fn, void *data)
2737 {
2738 struct dm_origin *o = ti->private;
2739
2740 return fn(ti, o->dev, 0, ti->len, data);
2741 }
2742
2743 static struct target_type origin_target = {
2744 .name = "snapshot-origin",
2745 .version = {1, 9, 0},
2746 .module = THIS_MODULE,
2747 .ctr = origin_ctr,
2748 .dtr = origin_dtr,
2749 .map = origin_map,
2750 .resume = origin_resume,
2751 .postsuspend = origin_postsuspend,
2752 .status = origin_status,
2753 .iterate_devices = origin_iterate_devices,
2754 };
2755
2756 static struct target_type snapshot_target = {
2757 .name = "snapshot",
2758 .version = {1, 16, 0},
2759 .module = THIS_MODULE,
2760 .ctr = snapshot_ctr,
2761 .dtr = snapshot_dtr,
2762 .map = snapshot_map,
2763 .end_io = snapshot_end_io,
2764 .preresume = snapshot_preresume,
2765 .resume = snapshot_resume,
2766 .status = snapshot_status,
2767 .iterate_devices = snapshot_iterate_devices,
2768 .io_hints = snapshot_io_hints,
2769 };
2770
2771 static struct target_type merge_target = {
2772 .name = dm_snapshot_merge_target_name,
2773 .version = {1, 5, 0},
2774 .module = THIS_MODULE,
2775 .ctr = snapshot_ctr,
2776 .dtr = snapshot_dtr,
2777 .map = snapshot_merge_map,
2778 .end_io = snapshot_end_io,
2779 .presuspend = snapshot_merge_presuspend,
2780 .preresume = snapshot_preresume,
2781 .resume = snapshot_merge_resume,
2782 .status = snapshot_status,
2783 .iterate_devices = snapshot_iterate_devices,
2784 .io_hints = snapshot_io_hints,
2785 };
2786
dm_snapshot_init(void)2787 static int __init dm_snapshot_init(void)
2788 {
2789 int r;
2790
2791 r = dm_exception_store_init();
2792 if (r) {
2793 DMERR("Failed to initialize exception stores");
2794 return r;
2795 }
2796
2797 r = init_origin_hash();
2798 if (r) {
2799 DMERR("init_origin_hash failed.");
2800 goto bad_origin_hash;
2801 }
2802
2803 exception_cache = KMEM_CACHE(dm_exception, 0);
2804 if (!exception_cache) {
2805 DMERR("Couldn't create exception cache.");
2806 r = -ENOMEM;
2807 goto bad_exception_cache;
2808 }
2809
2810 pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0);
2811 if (!pending_cache) {
2812 DMERR("Couldn't create pending cache.");
2813 r = -ENOMEM;
2814 goto bad_pending_cache;
2815 }
2816
2817 r = dm_register_target(&snapshot_target);
2818 if (r < 0) {
2819 DMERR("snapshot target register failed %d", r);
2820 goto bad_register_snapshot_target;
2821 }
2822
2823 r = dm_register_target(&origin_target);
2824 if (r < 0) {
2825 DMERR("Origin target register failed %d", r);
2826 goto bad_register_origin_target;
2827 }
2828
2829 r = dm_register_target(&merge_target);
2830 if (r < 0) {
2831 DMERR("Merge target register failed %d", r);
2832 goto bad_register_merge_target;
2833 }
2834
2835 return 0;
2836
2837 bad_register_merge_target:
2838 dm_unregister_target(&origin_target);
2839 bad_register_origin_target:
2840 dm_unregister_target(&snapshot_target);
2841 bad_register_snapshot_target:
2842 kmem_cache_destroy(pending_cache);
2843 bad_pending_cache:
2844 kmem_cache_destroy(exception_cache);
2845 bad_exception_cache:
2846 exit_origin_hash();
2847 bad_origin_hash:
2848 dm_exception_store_exit();
2849
2850 return r;
2851 }
2852
dm_snapshot_exit(void)2853 static void __exit dm_snapshot_exit(void)
2854 {
2855 dm_unregister_target(&snapshot_target);
2856 dm_unregister_target(&origin_target);
2857 dm_unregister_target(&merge_target);
2858
2859 exit_origin_hash();
2860 kmem_cache_destroy(pending_cache);
2861 kmem_cache_destroy(exception_cache);
2862
2863 dm_exception_store_exit();
2864 }
2865
2866 /* Module hooks */
2867 module_init(dm_snapshot_init);
2868 module_exit(dm_snapshot_exit);
2869
2870 MODULE_DESCRIPTION(DM_NAME " snapshot target");
2871 MODULE_AUTHOR("Joe Thornber");
2872 MODULE_LICENSE("GPL");
2873 MODULE_ALIAS("dm-snapshot-origin");
2874 MODULE_ALIAS("dm-snapshot-merge");
2875