1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2001 Sistina Software (UK) Limited.
4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 *
6 * This file is released under the GPL.
7 */
8
9 #include "dm-core.h"
10 #include "dm-rq.h"
11
12 #include <linux/module.h>
13 #include <linux/vmalloc.h>
14 #include <linux/blkdev.h>
15 #include <linux/blk-integrity.h>
16 #include <linux/namei.h>
17 #include <linux/ctype.h>
18 #include <linux/string.h>
19 #include <linux/slab.h>
20 #include <linux/interrupt.h>
21 #include <linux/mutex.h>
22 #include <linux/delay.h>
23 #include <linux/atomic.h>
24 #include <linux/blk-mq.h>
25 #include <linux/mount.h>
26 #include <linux/dax.h>
27
28 #define DM_MSG_PREFIX "table"
29
30 #define NODE_SIZE L1_CACHE_BYTES
31 #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
32 #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
33
34 /*
35 * Similar to ceiling(log_size(n))
36 */
int_log(unsigned int n,unsigned int base)37 static unsigned int int_log(unsigned int n, unsigned int base)
38 {
39 int result = 0;
40
41 while (n > 1) {
42 n = dm_div_up(n, base);
43 result++;
44 }
45
46 return result;
47 }
48
49 /*
50 * Calculate the index of the child node of the n'th node k'th key.
51 */
get_child(unsigned int n,unsigned int k)52 static inline unsigned int get_child(unsigned int n, unsigned int k)
53 {
54 return (n * CHILDREN_PER_NODE) + k;
55 }
56
57 /*
58 * Return the n'th node of level l from table t.
59 */
get_node(struct dm_table * t,unsigned int l,unsigned int n)60 static inline sector_t *get_node(struct dm_table *t,
61 unsigned int l, unsigned int n)
62 {
63 return t->index[l] + (n * KEYS_PER_NODE);
64 }
65
66 /*
67 * Return the highest key that you could lookup from the n'th
68 * node on level l of the btree.
69 */
high(struct dm_table * t,unsigned int l,unsigned int n)70 static sector_t high(struct dm_table *t, unsigned int l, unsigned int n)
71 {
72 for (; l < t->depth - 1; l++)
73 n = get_child(n, CHILDREN_PER_NODE - 1);
74
75 if (n >= t->counts[l])
76 return (sector_t) -1;
77
78 return get_node(t, l, n)[KEYS_PER_NODE - 1];
79 }
80
81 /*
82 * Fills in a level of the btree based on the highs of the level
83 * below it.
84 */
setup_btree_index(unsigned int l,struct dm_table * t)85 static int setup_btree_index(unsigned int l, struct dm_table *t)
86 {
87 unsigned int n, k;
88 sector_t *node;
89
90 for (n = 0U; n < t->counts[l]; n++) {
91 node = get_node(t, l, n);
92
93 for (k = 0U; k < KEYS_PER_NODE; k++)
94 node[k] = high(t, l + 1, get_child(n, k));
95 }
96
97 return 0;
98 }
99
100 /*
101 * highs, and targets are managed as dynamic arrays during a
102 * table load.
103 */
alloc_targets(struct dm_table * t,unsigned int num)104 static int alloc_targets(struct dm_table *t, unsigned int num)
105 {
106 sector_t *n_highs;
107 struct dm_target *n_targets;
108
109 /*
110 * Allocate both the target array and offset array at once.
111 */
112 n_highs = kvcalloc(num, sizeof(struct dm_target) + sizeof(sector_t),
113 GFP_KERNEL);
114 if (!n_highs)
115 return -ENOMEM;
116
117 n_targets = (struct dm_target *) (n_highs + num);
118
119 memset(n_highs, -1, sizeof(*n_highs) * num);
120 kvfree(t->highs);
121
122 t->num_allocated = num;
123 t->highs = n_highs;
124 t->targets = n_targets;
125
126 return 0;
127 }
128
dm_table_create(struct dm_table ** result,fmode_t mode,unsigned int num_targets,struct mapped_device * md)129 int dm_table_create(struct dm_table **result, fmode_t mode,
130 unsigned int num_targets, struct mapped_device *md)
131 {
132 struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
133
134 if (!t)
135 return -ENOMEM;
136
137 INIT_LIST_HEAD(&t->devices);
138
139 if (!num_targets)
140 num_targets = KEYS_PER_NODE;
141
142 num_targets = dm_round_up(num_targets, KEYS_PER_NODE);
143
144 if (!num_targets) {
145 kfree(t);
146 return -ENOMEM;
147 }
148
149 if (alloc_targets(t, num_targets)) {
150 kfree(t);
151 return -ENOMEM;
152 }
153
154 t->type = DM_TYPE_NONE;
155 t->mode = mode;
156 t->md = md;
157 *result = t;
158 return 0;
159 }
160
free_devices(struct list_head * devices,struct mapped_device * md)161 static void free_devices(struct list_head *devices, struct mapped_device *md)
162 {
163 struct list_head *tmp, *next;
164
165 list_for_each_safe(tmp, next, devices) {
166 struct dm_dev_internal *dd =
167 list_entry(tmp, struct dm_dev_internal, list);
168 DMWARN("%s: dm_table_destroy: dm_put_device call missing for %s",
169 dm_device_name(md), dd->dm_dev->name);
170 dm_put_table_device(md, dd->dm_dev);
171 kfree(dd);
172 }
173 }
174
175 static void dm_table_destroy_crypto_profile(struct dm_table *t);
176
dm_table_destroy(struct dm_table * t)177 void dm_table_destroy(struct dm_table *t)
178 {
179 if (!t)
180 return;
181
182 /* free the indexes */
183 if (t->depth >= 2)
184 kvfree(t->index[t->depth - 2]);
185
186 /* free the targets */
187 for (unsigned int i = 0; i < t->num_targets; i++) {
188 struct dm_target *ti = dm_table_get_target(t, i);
189
190 if (ti->type->dtr)
191 ti->type->dtr(ti);
192
193 dm_put_target_type(ti->type);
194 }
195
196 kvfree(t->highs);
197
198 /* free the device list */
199 free_devices(&t->devices, t->md);
200
201 dm_free_md_mempools(t->mempools);
202
203 dm_table_destroy_crypto_profile(t);
204
205 kfree(t);
206 }
207
208 /*
209 * See if we've already got a device in the list.
210 */
find_device(struct list_head * l,dev_t dev)211 static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
212 {
213 struct dm_dev_internal *dd;
214
215 list_for_each_entry(dd, l, list)
216 if (dd->dm_dev->bdev->bd_dev == dev)
217 return dd;
218
219 return NULL;
220 }
221
222 /*
223 * If possible, this checks an area of a destination device is invalid.
224 */
device_area_is_invalid(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)225 static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
226 sector_t start, sector_t len, void *data)
227 {
228 struct queue_limits *limits = data;
229 struct block_device *bdev = dev->bdev;
230 sector_t dev_size = bdev_nr_sectors(bdev);
231 unsigned short logical_block_size_sectors =
232 limits->logical_block_size >> SECTOR_SHIFT;
233
234 if (!dev_size)
235 return 0;
236
237 if ((start >= dev_size) || (start + len > dev_size)) {
238 DMERR("%s: %pg too small for target: start=%llu, len=%llu, dev_size=%llu",
239 dm_device_name(ti->table->md), bdev,
240 (unsigned long long)start,
241 (unsigned long long)len,
242 (unsigned long long)dev_size);
243 return 1;
244 }
245
246 /*
247 * If the target is mapped to zoned block device(s), check
248 * that the zones are not partially mapped.
249 */
250 if (bdev_is_zoned(bdev)) {
251 unsigned int zone_sectors = bdev_zone_sectors(bdev);
252
253 if (start & (zone_sectors - 1)) {
254 DMERR("%s: start=%llu not aligned to h/w zone size %u of %pg",
255 dm_device_name(ti->table->md),
256 (unsigned long long)start,
257 zone_sectors, bdev);
258 return 1;
259 }
260
261 /*
262 * Note: The last zone of a zoned block device may be smaller
263 * than other zones. So for a target mapping the end of a
264 * zoned block device with such a zone, len would not be zone
265 * aligned. We do not allow such last smaller zone to be part
266 * of the mapping here to ensure that mappings with multiple
267 * devices do not end up with a smaller zone in the middle of
268 * the sector range.
269 */
270 if (len & (zone_sectors - 1)) {
271 DMERR("%s: len=%llu not aligned to h/w zone size %u of %pg",
272 dm_device_name(ti->table->md),
273 (unsigned long long)len,
274 zone_sectors, bdev);
275 return 1;
276 }
277 }
278
279 if (logical_block_size_sectors <= 1)
280 return 0;
281
282 if (start & (logical_block_size_sectors - 1)) {
283 DMERR("%s: start=%llu not aligned to h/w logical block size %u of %pg",
284 dm_device_name(ti->table->md),
285 (unsigned long long)start,
286 limits->logical_block_size, bdev);
287 return 1;
288 }
289
290 if (len & (logical_block_size_sectors - 1)) {
291 DMERR("%s: len=%llu not aligned to h/w logical block size %u of %pg",
292 dm_device_name(ti->table->md),
293 (unsigned long long)len,
294 limits->logical_block_size, bdev);
295 return 1;
296 }
297
298 return 0;
299 }
300
301 /*
302 * This upgrades the mode on an already open dm_dev, being
303 * careful to leave things as they were if we fail to reopen the
304 * device and not to touch the existing bdev field in case
305 * it is accessed concurrently.
306 */
upgrade_mode(struct dm_dev_internal * dd,fmode_t new_mode,struct mapped_device * md)307 static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
308 struct mapped_device *md)
309 {
310 int r;
311 struct dm_dev *old_dev, *new_dev;
312
313 old_dev = dd->dm_dev;
314
315 r = dm_get_table_device(md, dd->dm_dev->bdev->bd_dev,
316 dd->dm_dev->mode | new_mode, &new_dev);
317 if (r)
318 return r;
319
320 dd->dm_dev = new_dev;
321 dm_put_table_device(md, old_dev);
322
323 return 0;
324 }
325
326 /*
327 * Convert the path to a device
328 */
dm_get_dev_t(const char * path)329 dev_t dm_get_dev_t(const char *path)
330 {
331 dev_t dev;
332
333 if (lookup_bdev(path, &dev))
334 dev = name_to_dev_t(path);
335 return dev;
336 }
337 EXPORT_SYMBOL_GPL(dm_get_dev_t);
338
339 /*
340 * Add a device to the list, or just increment the usage count if
341 * it's already present.
342 */
dm_get_device(struct dm_target * ti,const char * path,fmode_t mode,struct dm_dev ** result)343 int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
344 struct dm_dev **result)
345 {
346 int r;
347 dev_t dev;
348 unsigned int major, minor;
349 char dummy;
350 struct dm_dev_internal *dd;
351 struct dm_table *t = ti->table;
352
353 BUG_ON(!t);
354
355 if (sscanf(path, "%u:%u%c", &major, &minor, &dummy) == 2) {
356 /* Extract the major/minor numbers */
357 dev = MKDEV(major, minor);
358 if (MAJOR(dev) != major || MINOR(dev) != minor)
359 return -EOVERFLOW;
360 } else {
361 dev = dm_get_dev_t(path);
362 if (!dev)
363 return -ENODEV;
364 }
365 if (dev == disk_devt(t->md->disk))
366 return -EINVAL;
367
368 dd = find_device(&t->devices, dev);
369 if (!dd) {
370 dd = kmalloc(sizeof(*dd), GFP_KERNEL);
371 if (!dd)
372 return -ENOMEM;
373
374 r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev);
375 if (r) {
376 kfree(dd);
377 return r;
378 }
379
380 refcount_set(&dd->count, 1);
381 list_add(&dd->list, &t->devices);
382 goto out;
383
384 } else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) {
385 r = upgrade_mode(dd, mode, t->md);
386 if (r)
387 return r;
388 }
389 refcount_inc(&dd->count);
390 out:
391 *result = dd->dm_dev;
392 return 0;
393 }
394 EXPORT_SYMBOL(dm_get_device);
395
dm_set_device_limits(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)396 static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
397 sector_t start, sector_t len, void *data)
398 {
399 struct queue_limits *limits = data;
400 struct block_device *bdev = dev->bdev;
401 struct request_queue *q = bdev_get_queue(bdev);
402
403 if (unlikely(!q)) {
404 DMWARN("%s: Cannot set limits for nonexistent device %pg",
405 dm_device_name(ti->table->md), bdev);
406 return 0;
407 }
408
409 if (blk_stack_limits(limits, &q->limits,
410 get_start_sect(bdev) + start) < 0)
411 DMWARN("%s: adding target device %pg caused an alignment inconsistency: "
412 "physical_block_size=%u, logical_block_size=%u, "
413 "alignment_offset=%u, start=%llu",
414 dm_device_name(ti->table->md), bdev,
415 q->limits.physical_block_size,
416 q->limits.logical_block_size,
417 q->limits.alignment_offset,
418 (unsigned long long) start << SECTOR_SHIFT);
419 return 0;
420 }
421
422 /*
423 * Decrement a device's use count and remove it if necessary.
424 */
dm_put_device(struct dm_target * ti,struct dm_dev * d)425 void dm_put_device(struct dm_target *ti, struct dm_dev *d)
426 {
427 int found = 0;
428 struct list_head *devices = &ti->table->devices;
429 struct dm_dev_internal *dd;
430
431 list_for_each_entry(dd, devices, list) {
432 if (dd->dm_dev == d) {
433 found = 1;
434 break;
435 }
436 }
437 if (!found) {
438 DMERR("%s: device %s not in table devices list",
439 dm_device_name(ti->table->md), d->name);
440 return;
441 }
442 if (refcount_dec_and_test(&dd->count)) {
443 dm_put_table_device(ti->table->md, d);
444 list_del(&dd->list);
445 kfree(dd);
446 }
447 }
448 EXPORT_SYMBOL(dm_put_device);
449
450 /*
451 * Checks to see if the target joins onto the end of the table.
452 */
adjoin(struct dm_table * t,struct dm_target * ti)453 static int adjoin(struct dm_table *t, struct dm_target *ti)
454 {
455 struct dm_target *prev;
456
457 if (!t->num_targets)
458 return !ti->begin;
459
460 prev = &t->targets[t->num_targets - 1];
461 return (ti->begin == (prev->begin + prev->len));
462 }
463
464 /*
465 * Used to dynamically allocate the arg array.
466 *
467 * We do first allocation with GFP_NOIO because dm-mpath and dm-thin must
468 * process messages even if some device is suspended. These messages have a
469 * small fixed number of arguments.
470 *
471 * On the other hand, dm-switch needs to process bulk data using messages and
472 * excessive use of GFP_NOIO could cause trouble.
473 */
realloc_argv(unsigned int * size,char ** old_argv)474 static char **realloc_argv(unsigned int *size, char **old_argv)
475 {
476 char **argv;
477 unsigned int new_size;
478 gfp_t gfp;
479
480 if (*size) {
481 new_size = *size * 2;
482 gfp = GFP_KERNEL;
483 } else {
484 new_size = 8;
485 gfp = GFP_NOIO;
486 }
487 argv = kmalloc_array(new_size, sizeof(*argv), gfp);
488 if (argv && old_argv) {
489 memcpy(argv, old_argv, *size * sizeof(*argv));
490 *size = new_size;
491 }
492
493 kfree(old_argv);
494 return argv;
495 }
496
497 /*
498 * Destructively splits up the argument list to pass to ctr.
499 */
dm_split_args(int * argc,char *** argvp,char * input)500 int dm_split_args(int *argc, char ***argvp, char *input)
501 {
502 char *start, *end = input, *out, **argv = NULL;
503 unsigned int array_size = 0;
504
505 *argc = 0;
506
507 if (!input) {
508 *argvp = NULL;
509 return 0;
510 }
511
512 argv = realloc_argv(&array_size, argv);
513 if (!argv)
514 return -ENOMEM;
515
516 while (1) {
517 /* Skip whitespace */
518 start = skip_spaces(end);
519
520 if (!*start)
521 break; /* success, we hit the end */
522
523 /* 'out' is used to remove any back-quotes */
524 end = out = start;
525 while (*end) {
526 /* Everything apart from '\0' can be quoted */
527 if (*end == '\\' && *(end + 1)) {
528 *out++ = *(end + 1);
529 end += 2;
530 continue;
531 }
532
533 if (isspace(*end))
534 break; /* end of token */
535
536 *out++ = *end++;
537 }
538
539 /* have we already filled the array ? */
540 if ((*argc + 1) > array_size) {
541 argv = realloc_argv(&array_size, argv);
542 if (!argv)
543 return -ENOMEM;
544 }
545
546 /* we know this is whitespace */
547 if (*end)
548 end++;
549
550 /* terminate the string and put it in the array */
551 *out = '\0';
552 argv[*argc] = start;
553 (*argc)++;
554 }
555
556 *argvp = argv;
557 return 0;
558 }
559
560 /*
561 * Impose necessary and sufficient conditions on a devices's table such
562 * that any incoming bio which respects its logical_block_size can be
563 * processed successfully. If it falls across the boundary between
564 * two or more targets, the size of each piece it gets split into must
565 * be compatible with the logical_block_size of the target processing it.
566 */
validate_hardware_logical_block_alignment(struct dm_table * t,struct queue_limits * limits)567 static int validate_hardware_logical_block_alignment(struct dm_table *t,
568 struct queue_limits *limits)
569 {
570 /*
571 * This function uses arithmetic modulo the logical_block_size
572 * (in units of 512-byte sectors).
573 */
574 unsigned short device_logical_block_size_sects =
575 limits->logical_block_size >> SECTOR_SHIFT;
576
577 /*
578 * Offset of the start of the next table entry, mod logical_block_size.
579 */
580 unsigned short next_target_start = 0;
581
582 /*
583 * Given an aligned bio that extends beyond the end of a
584 * target, how many sectors must the next target handle?
585 */
586 unsigned short remaining = 0;
587
588 struct dm_target *ti;
589 struct queue_limits ti_limits;
590 unsigned int i;
591
592 /*
593 * Check each entry in the table in turn.
594 */
595 for (i = 0; i < t->num_targets; i++) {
596 ti = dm_table_get_target(t, i);
597
598 blk_set_stacking_limits(&ti_limits);
599
600 /* combine all target devices' limits */
601 if (ti->type->iterate_devices)
602 ti->type->iterate_devices(ti, dm_set_device_limits,
603 &ti_limits);
604
605 /*
606 * If the remaining sectors fall entirely within this
607 * table entry are they compatible with its logical_block_size?
608 */
609 if (remaining < ti->len &&
610 remaining & ((ti_limits.logical_block_size >>
611 SECTOR_SHIFT) - 1))
612 break; /* Error */
613
614 next_target_start =
615 (unsigned short) ((next_target_start + ti->len) &
616 (device_logical_block_size_sects - 1));
617 remaining = next_target_start ?
618 device_logical_block_size_sects - next_target_start : 0;
619 }
620
621 if (remaining) {
622 DMERR("%s: table line %u (start sect %llu len %llu) "
623 "not aligned to h/w logical block size %u",
624 dm_device_name(t->md), i,
625 (unsigned long long) ti->begin,
626 (unsigned long long) ti->len,
627 limits->logical_block_size);
628 return -EINVAL;
629 }
630
631 return 0;
632 }
633
dm_table_add_target(struct dm_table * t,const char * type,sector_t start,sector_t len,char * params)634 int dm_table_add_target(struct dm_table *t, const char *type,
635 sector_t start, sector_t len, char *params)
636 {
637 int r = -EINVAL, argc;
638 char **argv;
639 struct dm_target *ti;
640
641 if (t->singleton) {
642 DMERR("%s: target type %s must appear alone in table",
643 dm_device_name(t->md), t->targets->type->name);
644 return -EINVAL;
645 }
646
647 BUG_ON(t->num_targets >= t->num_allocated);
648
649 ti = t->targets + t->num_targets;
650 memset(ti, 0, sizeof(*ti));
651
652 if (!len) {
653 DMERR("%s: zero-length target", dm_device_name(t->md));
654 return -EINVAL;
655 }
656
657 ti->type = dm_get_target_type(type);
658 if (!ti->type) {
659 DMERR("%s: %s: unknown target type", dm_device_name(t->md), type);
660 return -EINVAL;
661 }
662
663 if (dm_target_needs_singleton(ti->type)) {
664 if (t->num_targets) {
665 ti->error = "singleton target type must appear alone in table";
666 goto bad;
667 }
668 t->singleton = true;
669 }
670
671 if (dm_target_always_writeable(ti->type) && !(t->mode & FMODE_WRITE)) {
672 ti->error = "target type may not be included in a read-only table";
673 goto bad;
674 }
675
676 if (t->immutable_target_type) {
677 if (t->immutable_target_type != ti->type) {
678 ti->error = "immutable target type cannot be mixed with other target types";
679 goto bad;
680 }
681 } else if (dm_target_is_immutable(ti->type)) {
682 if (t->num_targets) {
683 ti->error = "immutable target type cannot be mixed with other target types";
684 goto bad;
685 }
686 t->immutable_target_type = ti->type;
687 }
688
689 if (dm_target_has_integrity(ti->type))
690 t->integrity_added = 1;
691
692 ti->table = t;
693 ti->begin = start;
694 ti->len = len;
695 ti->error = "Unknown error";
696
697 /*
698 * Does this target adjoin the previous one ?
699 */
700 if (!adjoin(t, ti)) {
701 ti->error = "Gap in table";
702 goto bad;
703 }
704
705 r = dm_split_args(&argc, &argv, params);
706 if (r) {
707 ti->error = "couldn't split parameters";
708 goto bad;
709 }
710
711 r = ti->type->ctr(ti, argc, argv);
712 kfree(argv);
713 if (r)
714 goto bad;
715
716 t->highs[t->num_targets++] = ti->begin + ti->len - 1;
717
718 if (!ti->num_discard_bios && ti->discards_supported)
719 DMWARN("%s: %s: ignoring discards_supported because num_discard_bios is zero.",
720 dm_device_name(t->md), type);
721
722 if (ti->limit_swap_bios && !static_key_enabled(&swap_bios_enabled.key))
723 static_branch_enable(&swap_bios_enabled);
724
725 return 0;
726
727 bad:
728 DMERR("%s: %s: %s (%pe)", dm_device_name(t->md), type, ti->error, ERR_PTR(r));
729 dm_put_target_type(ti->type);
730 return r;
731 }
732
733 /*
734 * Target argument parsing helpers.
735 */
validate_next_arg(const struct dm_arg * arg,struct dm_arg_set * arg_set,unsigned int * value,char ** error,unsigned int grouped)736 static int validate_next_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
737 unsigned int *value, char **error, unsigned int grouped)
738 {
739 const char *arg_str = dm_shift_arg(arg_set);
740 char dummy;
741
742 if (!arg_str ||
743 (sscanf(arg_str, "%u%c", value, &dummy) != 1) ||
744 (*value < arg->min) ||
745 (*value > arg->max) ||
746 (grouped && arg_set->argc < *value)) {
747 *error = arg->error;
748 return -EINVAL;
749 }
750
751 return 0;
752 }
753
dm_read_arg(const struct dm_arg * arg,struct dm_arg_set * arg_set,unsigned int * value,char ** error)754 int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
755 unsigned int *value, char **error)
756 {
757 return validate_next_arg(arg, arg_set, value, error, 0);
758 }
759 EXPORT_SYMBOL(dm_read_arg);
760
dm_read_arg_group(const struct dm_arg * arg,struct dm_arg_set * arg_set,unsigned int * value,char ** error)761 int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set,
762 unsigned int *value, char **error)
763 {
764 return validate_next_arg(arg, arg_set, value, error, 1);
765 }
766 EXPORT_SYMBOL(dm_read_arg_group);
767
dm_shift_arg(struct dm_arg_set * as)768 const char *dm_shift_arg(struct dm_arg_set *as)
769 {
770 char *r;
771
772 if (as->argc) {
773 as->argc--;
774 r = *as->argv;
775 as->argv++;
776 return r;
777 }
778
779 return NULL;
780 }
781 EXPORT_SYMBOL(dm_shift_arg);
782
dm_consume_args(struct dm_arg_set * as,unsigned int num_args)783 void dm_consume_args(struct dm_arg_set *as, unsigned int num_args)
784 {
785 BUG_ON(as->argc < num_args);
786 as->argc -= num_args;
787 as->argv += num_args;
788 }
789 EXPORT_SYMBOL(dm_consume_args);
790
__table_type_bio_based(enum dm_queue_mode table_type)791 static bool __table_type_bio_based(enum dm_queue_mode table_type)
792 {
793 return (table_type == DM_TYPE_BIO_BASED ||
794 table_type == DM_TYPE_DAX_BIO_BASED);
795 }
796
__table_type_request_based(enum dm_queue_mode table_type)797 static bool __table_type_request_based(enum dm_queue_mode table_type)
798 {
799 return table_type == DM_TYPE_REQUEST_BASED;
800 }
801
dm_table_set_type(struct dm_table * t,enum dm_queue_mode type)802 void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type)
803 {
804 t->type = type;
805 }
806 EXPORT_SYMBOL_GPL(dm_table_set_type);
807
808 /* validate the dax capability of the target device span */
device_not_dax_capable(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)809 static int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev,
810 sector_t start, sector_t len, void *data)
811 {
812 if (dev->dax_dev)
813 return false;
814
815 DMDEBUG("%pg: error: dax unsupported by block device", dev->bdev);
816 return true;
817 }
818
819 /* Check devices support synchronous DAX */
device_not_dax_synchronous_capable(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)820 static int device_not_dax_synchronous_capable(struct dm_target *ti, struct dm_dev *dev,
821 sector_t start, sector_t len, void *data)
822 {
823 return !dev->dax_dev || !dax_synchronous(dev->dax_dev);
824 }
825
dm_table_supports_dax(struct dm_table * t,iterate_devices_callout_fn iterate_fn)826 static bool dm_table_supports_dax(struct dm_table *t,
827 iterate_devices_callout_fn iterate_fn)
828 {
829 /* Ensure that all targets support DAX. */
830 for (unsigned int i = 0; i < t->num_targets; i++) {
831 struct dm_target *ti = dm_table_get_target(t, i);
832
833 if (!ti->type->direct_access)
834 return false;
835
836 if (!ti->type->iterate_devices ||
837 ti->type->iterate_devices(ti, iterate_fn, NULL))
838 return false;
839 }
840
841 return true;
842 }
843
device_is_rq_stackable(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)844 static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev,
845 sector_t start, sector_t len, void *data)
846 {
847 struct block_device *bdev = dev->bdev;
848 struct request_queue *q = bdev_get_queue(bdev);
849
850 /* request-based cannot stack on partitions! */
851 if (bdev_is_partition(bdev))
852 return false;
853
854 return queue_is_mq(q);
855 }
856
dm_table_determine_type(struct dm_table * t)857 static int dm_table_determine_type(struct dm_table *t)
858 {
859 unsigned int bio_based = 0, request_based = 0, hybrid = 0;
860 struct dm_target *ti;
861 struct list_head *devices = dm_table_get_devices(t);
862 enum dm_queue_mode live_md_type = dm_get_md_type(t->md);
863
864 if (t->type != DM_TYPE_NONE) {
865 /* target already set the table's type */
866 if (t->type == DM_TYPE_BIO_BASED) {
867 /* possibly upgrade to a variant of bio-based */
868 goto verify_bio_based;
869 }
870 BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED);
871 goto verify_rq_based;
872 }
873
874 for (unsigned int i = 0; i < t->num_targets; i++) {
875 ti = dm_table_get_target(t, i);
876 if (dm_target_hybrid(ti))
877 hybrid = 1;
878 else if (dm_target_request_based(ti))
879 request_based = 1;
880 else
881 bio_based = 1;
882
883 if (bio_based && request_based) {
884 DMERR("Inconsistent table: different target types can't be mixed up");
885 return -EINVAL;
886 }
887 }
888
889 if (hybrid && !bio_based && !request_based) {
890 /*
891 * The targets can work either way.
892 * Determine the type from the live device.
893 * Default to bio-based if device is new.
894 */
895 if (__table_type_request_based(live_md_type))
896 request_based = 1;
897 else
898 bio_based = 1;
899 }
900
901 if (bio_based) {
902 verify_bio_based:
903 /* We must use this table as bio-based */
904 t->type = DM_TYPE_BIO_BASED;
905 if (dm_table_supports_dax(t, device_not_dax_capable) ||
906 (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) {
907 t->type = DM_TYPE_DAX_BIO_BASED;
908 }
909 return 0;
910 }
911
912 BUG_ON(!request_based); /* No targets in this table */
913
914 t->type = DM_TYPE_REQUEST_BASED;
915
916 verify_rq_based:
917 /*
918 * Request-based dm supports only tables that have a single target now.
919 * To support multiple targets, request splitting support is needed,
920 * and that needs lots of changes in the block-layer.
921 * (e.g. request completion process for partial completion.)
922 */
923 if (t->num_targets > 1) {
924 DMERR("request-based DM doesn't support multiple targets");
925 return -EINVAL;
926 }
927
928 if (list_empty(devices)) {
929 int srcu_idx;
930 struct dm_table *live_table = dm_get_live_table(t->md, &srcu_idx);
931
932 /* inherit live table's type */
933 if (live_table)
934 t->type = live_table->type;
935 dm_put_live_table(t->md, srcu_idx);
936 return 0;
937 }
938
939 ti = dm_table_get_immutable_target(t);
940 if (!ti) {
941 DMERR("table load rejected: immutable target is required");
942 return -EINVAL;
943 } else if (ti->max_io_len) {
944 DMERR("table load rejected: immutable target that splits IO is not supported");
945 return -EINVAL;
946 }
947
948 /* Non-request-stackable devices can't be used for request-based dm */
949 if (!ti->type->iterate_devices ||
950 !ti->type->iterate_devices(ti, device_is_rq_stackable, NULL)) {
951 DMERR("table load rejected: including non-request-stackable devices");
952 return -EINVAL;
953 }
954
955 return 0;
956 }
957
dm_table_get_type(struct dm_table * t)958 enum dm_queue_mode dm_table_get_type(struct dm_table *t)
959 {
960 return t->type;
961 }
962
dm_table_get_immutable_target_type(struct dm_table * t)963 struct target_type *dm_table_get_immutable_target_type(struct dm_table *t)
964 {
965 return t->immutable_target_type;
966 }
967
dm_table_get_immutable_target(struct dm_table * t)968 struct dm_target *dm_table_get_immutable_target(struct dm_table *t)
969 {
970 /* Immutable target is implicitly a singleton */
971 if (t->num_targets > 1 ||
972 !dm_target_is_immutable(t->targets[0].type))
973 return NULL;
974
975 return t->targets;
976 }
977
dm_table_get_wildcard_target(struct dm_table * t)978 struct dm_target *dm_table_get_wildcard_target(struct dm_table *t)
979 {
980 for (unsigned int i = 0; i < t->num_targets; i++) {
981 struct dm_target *ti = dm_table_get_target(t, i);
982
983 if (dm_target_is_wildcard(ti->type))
984 return ti;
985 }
986
987 return NULL;
988 }
989
dm_table_bio_based(struct dm_table * t)990 bool dm_table_bio_based(struct dm_table *t)
991 {
992 return __table_type_bio_based(dm_table_get_type(t));
993 }
994
dm_table_request_based(struct dm_table * t)995 bool dm_table_request_based(struct dm_table *t)
996 {
997 return __table_type_request_based(dm_table_get_type(t));
998 }
999
1000 static bool dm_table_supports_poll(struct dm_table *t);
1001
dm_table_alloc_md_mempools(struct dm_table * t,struct mapped_device * md)1002 static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
1003 {
1004 enum dm_queue_mode type = dm_table_get_type(t);
1005 unsigned int per_io_data_size = 0, front_pad, io_front_pad;
1006 unsigned int min_pool_size = 0, pool_size;
1007 struct dm_md_mempools *pools;
1008
1009 if (unlikely(type == DM_TYPE_NONE)) {
1010 DMERR("no table type is set, can't allocate mempools");
1011 return -EINVAL;
1012 }
1013
1014 pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
1015 if (!pools)
1016 return -ENOMEM;
1017
1018 if (type == DM_TYPE_REQUEST_BASED) {
1019 pool_size = dm_get_reserved_rq_based_ios();
1020 front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
1021 goto init_bs;
1022 }
1023
1024 for (unsigned int i = 0; i < t->num_targets; i++) {
1025 struct dm_target *ti = dm_table_get_target(t, i);
1026
1027 per_io_data_size = max(per_io_data_size, ti->per_io_data_size);
1028 min_pool_size = max(min_pool_size, ti->num_flush_bios);
1029 }
1030 pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size);
1031 front_pad = roundup(per_io_data_size,
1032 __alignof__(struct dm_target_io)) + DM_TARGET_IO_BIO_OFFSET;
1033
1034 io_front_pad = roundup(per_io_data_size,
1035 __alignof__(struct dm_io)) + DM_IO_BIO_OFFSET;
1036 if (bioset_init(&pools->io_bs, pool_size, io_front_pad,
1037 dm_table_supports_poll(t) ? BIOSET_PERCPU_CACHE : 0))
1038 goto out_free_pools;
1039 if (t->integrity_supported &&
1040 bioset_integrity_create(&pools->io_bs, pool_size))
1041 goto out_free_pools;
1042 init_bs:
1043 if (bioset_init(&pools->bs, pool_size, front_pad, 0))
1044 goto out_free_pools;
1045 if (t->integrity_supported &&
1046 bioset_integrity_create(&pools->bs, pool_size))
1047 goto out_free_pools;
1048
1049 t->mempools = pools;
1050 return 0;
1051
1052 out_free_pools:
1053 dm_free_md_mempools(pools);
1054 return -ENOMEM;
1055 }
1056
setup_indexes(struct dm_table * t)1057 static int setup_indexes(struct dm_table *t)
1058 {
1059 int i;
1060 unsigned int total = 0;
1061 sector_t *indexes;
1062
1063 /* allocate the space for *all* the indexes */
1064 for (i = t->depth - 2; i >= 0; i--) {
1065 t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE);
1066 total += t->counts[i];
1067 }
1068
1069 indexes = kvcalloc(total, NODE_SIZE, GFP_KERNEL);
1070 if (!indexes)
1071 return -ENOMEM;
1072
1073 /* set up internal nodes, bottom-up */
1074 for (i = t->depth - 2; i >= 0; i--) {
1075 t->index[i] = indexes;
1076 indexes += (KEYS_PER_NODE * t->counts[i]);
1077 setup_btree_index(i, t);
1078 }
1079
1080 return 0;
1081 }
1082
1083 /*
1084 * Builds the btree to index the map.
1085 */
dm_table_build_index(struct dm_table * t)1086 static int dm_table_build_index(struct dm_table *t)
1087 {
1088 int r = 0;
1089 unsigned int leaf_nodes;
1090
1091 /* how many indexes will the btree have ? */
1092 leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE);
1093 t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE);
1094
1095 /* leaf layer has already been set up */
1096 t->counts[t->depth - 1] = leaf_nodes;
1097 t->index[t->depth - 1] = t->highs;
1098
1099 if (t->depth >= 2)
1100 r = setup_indexes(t);
1101
1102 return r;
1103 }
1104
integrity_profile_exists(struct gendisk * disk)1105 static bool integrity_profile_exists(struct gendisk *disk)
1106 {
1107 return !!blk_get_integrity(disk);
1108 }
1109
1110 /*
1111 * Get a disk whose integrity profile reflects the table's profile.
1112 * Returns NULL if integrity support was inconsistent or unavailable.
1113 */
dm_table_get_integrity_disk(struct dm_table * t)1114 static struct gendisk *dm_table_get_integrity_disk(struct dm_table *t)
1115 {
1116 struct list_head *devices = dm_table_get_devices(t);
1117 struct dm_dev_internal *dd = NULL;
1118 struct gendisk *prev_disk = NULL, *template_disk = NULL;
1119
1120 for (unsigned int i = 0; i < t->num_targets; i++) {
1121 struct dm_target *ti = dm_table_get_target(t, i);
1122
1123 if (!dm_target_passes_integrity(ti->type))
1124 goto no_integrity;
1125 }
1126
1127 list_for_each_entry(dd, devices, list) {
1128 template_disk = dd->dm_dev->bdev->bd_disk;
1129 if (!integrity_profile_exists(template_disk))
1130 goto no_integrity;
1131 else if (prev_disk &&
1132 blk_integrity_compare(prev_disk, template_disk) < 0)
1133 goto no_integrity;
1134 prev_disk = template_disk;
1135 }
1136
1137 return template_disk;
1138
1139 no_integrity:
1140 if (prev_disk)
1141 DMWARN("%s: integrity not set: %s and %s profile mismatch",
1142 dm_device_name(t->md),
1143 prev_disk->disk_name,
1144 template_disk->disk_name);
1145 return NULL;
1146 }
1147
1148 /*
1149 * Register the mapped device for blk_integrity support if the
1150 * underlying devices have an integrity profile. But all devices may
1151 * not have matching profiles (checking all devices isn't reliable
1152 * during table load because this table may use other DM device(s) which
1153 * must be resumed before they will have an initialized integity
1154 * profile). Consequently, stacked DM devices force a 2 stage integrity
1155 * profile validation: First pass during table load, final pass during
1156 * resume.
1157 */
dm_table_register_integrity(struct dm_table * t)1158 static int dm_table_register_integrity(struct dm_table *t)
1159 {
1160 struct mapped_device *md = t->md;
1161 struct gendisk *template_disk = NULL;
1162
1163 /* If target handles integrity itself do not register it here. */
1164 if (t->integrity_added)
1165 return 0;
1166
1167 template_disk = dm_table_get_integrity_disk(t);
1168 if (!template_disk)
1169 return 0;
1170
1171 if (!integrity_profile_exists(dm_disk(md))) {
1172 t->integrity_supported = true;
1173 /*
1174 * Register integrity profile during table load; we can do
1175 * this because the final profile must match during resume.
1176 */
1177 blk_integrity_register(dm_disk(md),
1178 blk_get_integrity(template_disk));
1179 return 0;
1180 }
1181
1182 /*
1183 * If DM device already has an initialized integrity
1184 * profile the new profile should not conflict.
1185 */
1186 if (blk_integrity_compare(dm_disk(md), template_disk) < 0) {
1187 DMERR("%s: conflict with existing integrity profile: %s profile mismatch",
1188 dm_device_name(t->md),
1189 template_disk->disk_name);
1190 return 1;
1191 }
1192
1193 /* Preserve existing integrity profile */
1194 t->integrity_supported = true;
1195 return 0;
1196 }
1197
1198 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
1199
1200 struct dm_crypto_profile {
1201 struct blk_crypto_profile profile;
1202 struct mapped_device *md;
1203 };
1204
1205 struct dm_keyslot_evict_args {
1206 const struct blk_crypto_key *key;
1207 int err;
1208 };
1209
dm_keyslot_evict_callback(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)1210 static int dm_keyslot_evict_callback(struct dm_target *ti, struct dm_dev *dev,
1211 sector_t start, sector_t len, void *data)
1212 {
1213 struct dm_keyslot_evict_args *args = data;
1214 int err;
1215
1216 err = blk_crypto_evict_key(dev->bdev, args->key);
1217 if (!args->err)
1218 args->err = err;
1219 /* Always try to evict the key from all devices. */
1220 return 0;
1221 }
1222
1223 /*
1224 * When an inline encryption key is evicted from a device-mapper device, evict
1225 * it from all the underlying devices.
1226 */
dm_keyslot_evict(struct blk_crypto_profile * profile,const struct blk_crypto_key * key,unsigned int slot)1227 static int dm_keyslot_evict(struct blk_crypto_profile *profile,
1228 const struct blk_crypto_key *key, unsigned int slot)
1229 {
1230 struct mapped_device *md =
1231 container_of(profile, struct dm_crypto_profile, profile)->md;
1232 struct dm_keyslot_evict_args args = { key };
1233 struct dm_table *t;
1234 int srcu_idx;
1235
1236 t = dm_get_live_table(md, &srcu_idx);
1237 if (!t)
1238 return 0;
1239
1240 for (unsigned int i = 0; i < t->num_targets; i++) {
1241 struct dm_target *ti = dm_table_get_target(t, i);
1242
1243 if (!ti->type->iterate_devices)
1244 continue;
1245 ti->type->iterate_devices(ti, dm_keyslot_evict_callback, &args);
1246 }
1247
1248 dm_put_live_table(md, srcu_idx);
1249 return args.err;
1250 }
1251
1252 static int
device_intersect_crypto_capabilities(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)1253 device_intersect_crypto_capabilities(struct dm_target *ti, struct dm_dev *dev,
1254 sector_t start, sector_t len, void *data)
1255 {
1256 struct blk_crypto_profile *parent = data;
1257 struct blk_crypto_profile *child =
1258 bdev_get_queue(dev->bdev)->crypto_profile;
1259
1260 blk_crypto_intersect_capabilities(parent, child);
1261 return 0;
1262 }
1263
dm_destroy_crypto_profile(struct blk_crypto_profile * profile)1264 void dm_destroy_crypto_profile(struct blk_crypto_profile *profile)
1265 {
1266 struct dm_crypto_profile *dmcp = container_of(profile,
1267 struct dm_crypto_profile,
1268 profile);
1269
1270 if (!profile)
1271 return;
1272
1273 blk_crypto_profile_destroy(profile);
1274 kfree(dmcp);
1275 }
1276
dm_table_destroy_crypto_profile(struct dm_table * t)1277 static void dm_table_destroy_crypto_profile(struct dm_table *t)
1278 {
1279 dm_destroy_crypto_profile(t->crypto_profile);
1280 t->crypto_profile = NULL;
1281 }
1282
1283 /*
1284 * Constructs and initializes t->crypto_profile with a crypto profile that
1285 * represents the common set of crypto capabilities of the devices described by
1286 * the dm_table. However, if the constructed crypto profile doesn't support all
1287 * crypto capabilities that are supported by the current mapped_device, it
1288 * returns an error instead, since we don't support removing crypto capabilities
1289 * on table changes. Finally, if the constructed crypto profile is "empty" (has
1290 * no crypto capabilities at all), it just sets t->crypto_profile to NULL.
1291 */
dm_table_construct_crypto_profile(struct dm_table * t)1292 static int dm_table_construct_crypto_profile(struct dm_table *t)
1293 {
1294 struct dm_crypto_profile *dmcp;
1295 struct blk_crypto_profile *profile;
1296 unsigned int i;
1297 bool empty_profile = true;
1298
1299 dmcp = kmalloc(sizeof(*dmcp), GFP_KERNEL);
1300 if (!dmcp)
1301 return -ENOMEM;
1302 dmcp->md = t->md;
1303
1304 profile = &dmcp->profile;
1305 blk_crypto_profile_init(profile, 0);
1306 profile->ll_ops.keyslot_evict = dm_keyslot_evict;
1307 profile->max_dun_bytes_supported = UINT_MAX;
1308 memset(profile->modes_supported, 0xFF,
1309 sizeof(profile->modes_supported));
1310
1311 for (i = 0; i < t->num_targets; i++) {
1312 struct dm_target *ti = dm_table_get_target(t, i);
1313
1314 if (!dm_target_passes_crypto(ti->type)) {
1315 blk_crypto_intersect_capabilities(profile, NULL);
1316 break;
1317 }
1318 if (!ti->type->iterate_devices)
1319 continue;
1320 ti->type->iterate_devices(ti,
1321 device_intersect_crypto_capabilities,
1322 profile);
1323 }
1324
1325 if (t->md->queue &&
1326 !blk_crypto_has_capabilities(profile,
1327 t->md->queue->crypto_profile)) {
1328 DMERR("Inline encryption capabilities of new DM table were more restrictive than the old table's. This is not supported!");
1329 dm_destroy_crypto_profile(profile);
1330 return -EINVAL;
1331 }
1332
1333 /*
1334 * If the new profile doesn't actually support any crypto capabilities,
1335 * we may as well represent it with a NULL profile.
1336 */
1337 for (i = 0; i < ARRAY_SIZE(profile->modes_supported); i++) {
1338 if (profile->modes_supported[i]) {
1339 empty_profile = false;
1340 break;
1341 }
1342 }
1343
1344 if (empty_profile) {
1345 dm_destroy_crypto_profile(profile);
1346 profile = NULL;
1347 }
1348
1349 /*
1350 * t->crypto_profile is only set temporarily while the table is being
1351 * set up, and it gets set to NULL after the profile has been
1352 * transferred to the request_queue.
1353 */
1354 t->crypto_profile = profile;
1355
1356 return 0;
1357 }
1358
dm_update_crypto_profile(struct request_queue * q,struct dm_table * t)1359 static void dm_update_crypto_profile(struct request_queue *q,
1360 struct dm_table *t)
1361 {
1362 if (!t->crypto_profile)
1363 return;
1364
1365 /* Make the crypto profile less restrictive. */
1366 if (!q->crypto_profile) {
1367 blk_crypto_register(t->crypto_profile, q);
1368 } else {
1369 blk_crypto_update_capabilities(q->crypto_profile,
1370 t->crypto_profile);
1371 dm_destroy_crypto_profile(t->crypto_profile);
1372 }
1373 t->crypto_profile = NULL;
1374 }
1375
1376 #else /* CONFIG_BLK_INLINE_ENCRYPTION */
1377
dm_table_construct_crypto_profile(struct dm_table * t)1378 static int dm_table_construct_crypto_profile(struct dm_table *t)
1379 {
1380 return 0;
1381 }
1382
dm_destroy_crypto_profile(struct blk_crypto_profile * profile)1383 void dm_destroy_crypto_profile(struct blk_crypto_profile *profile)
1384 {
1385 }
1386
dm_table_destroy_crypto_profile(struct dm_table * t)1387 static void dm_table_destroy_crypto_profile(struct dm_table *t)
1388 {
1389 }
1390
dm_update_crypto_profile(struct request_queue * q,struct dm_table * t)1391 static void dm_update_crypto_profile(struct request_queue *q,
1392 struct dm_table *t)
1393 {
1394 }
1395
1396 #endif /* !CONFIG_BLK_INLINE_ENCRYPTION */
1397
1398 /*
1399 * Prepares the table for use by building the indices,
1400 * setting the type, and allocating mempools.
1401 */
dm_table_complete(struct dm_table * t)1402 int dm_table_complete(struct dm_table *t)
1403 {
1404 int r;
1405
1406 r = dm_table_determine_type(t);
1407 if (r) {
1408 DMERR("unable to determine table type");
1409 return r;
1410 }
1411
1412 r = dm_table_build_index(t);
1413 if (r) {
1414 DMERR("unable to build btrees");
1415 return r;
1416 }
1417
1418 r = dm_table_register_integrity(t);
1419 if (r) {
1420 DMERR("could not register integrity profile.");
1421 return r;
1422 }
1423
1424 r = dm_table_construct_crypto_profile(t);
1425 if (r) {
1426 DMERR("could not construct crypto profile.");
1427 return r;
1428 }
1429
1430 r = dm_table_alloc_md_mempools(t, t->md);
1431 if (r)
1432 DMERR("unable to allocate mempools");
1433
1434 return r;
1435 }
1436
1437 static DEFINE_MUTEX(_event_lock);
dm_table_event_callback(struct dm_table * t,void (* fn)(void *),void * context)1438 void dm_table_event_callback(struct dm_table *t,
1439 void (*fn)(void *), void *context)
1440 {
1441 mutex_lock(&_event_lock);
1442 t->event_fn = fn;
1443 t->event_context = context;
1444 mutex_unlock(&_event_lock);
1445 }
1446
dm_table_event(struct dm_table * t)1447 void dm_table_event(struct dm_table *t)
1448 {
1449 mutex_lock(&_event_lock);
1450 if (t->event_fn)
1451 t->event_fn(t->event_context);
1452 mutex_unlock(&_event_lock);
1453 }
1454 EXPORT_SYMBOL(dm_table_event);
1455
dm_table_get_size(struct dm_table * t)1456 inline sector_t dm_table_get_size(struct dm_table *t)
1457 {
1458 return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
1459 }
1460 EXPORT_SYMBOL(dm_table_get_size);
1461
1462 /*
1463 * Search the btree for the correct target.
1464 *
1465 * Caller should check returned pointer for NULL
1466 * to trap I/O beyond end of device.
1467 */
dm_table_find_target(struct dm_table * t,sector_t sector)1468 struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
1469 {
1470 unsigned int l, n = 0, k = 0;
1471 sector_t *node;
1472
1473 if (unlikely(sector >= dm_table_get_size(t)))
1474 return NULL;
1475
1476 for (l = 0; l < t->depth; l++) {
1477 n = get_child(n, k);
1478 node = get_node(t, l, n);
1479
1480 for (k = 0; k < KEYS_PER_NODE; k++)
1481 if (node[k] >= sector)
1482 break;
1483 }
1484
1485 return &t->targets[(KEYS_PER_NODE * n) + k];
1486 }
1487
device_not_poll_capable(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)1488 static int device_not_poll_capable(struct dm_target *ti, struct dm_dev *dev,
1489 sector_t start, sector_t len, void *data)
1490 {
1491 struct request_queue *q = bdev_get_queue(dev->bdev);
1492
1493 return !test_bit(QUEUE_FLAG_POLL, &q->queue_flags);
1494 }
1495
1496 /*
1497 * type->iterate_devices() should be called when the sanity check needs to
1498 * iterate and check all underlying data devices. iterate_devices() will
1499 * iterate all underlying data devices until it encounters a non-zero return
1500 * code, returned by whether the input iterate_devices_callout_fn, or
1501 * iterate_devices() itself internally.
1502 *
1503 * For some target type (e.g. dm-stripe), one call of iterate_devices() may
1504 * iterate multiple underlying devices internally, in which case a non-zero
1505 * return code returned by iterate_devices_callout_fn will stop the iteration
1506 * in advance.
1507 *
1508 * Cases requiring _any_ underlying device supporting some kind of attribute,
1509 * should use the iteration structure like dm_table_any_dev_attr(), or call
1510 * it directly. @func should handle semantics of positive examples, e.g.
1511 * capable of something.
1512 *
1513 * Cases requiring _all_ underlying devices supporting some kind of attribute,
1514 * should use the iteration structure like dm_table_supports_nowait() or
1515 * dm_table_supports_discards(). Or introduce dm_table_all_devs_attr() that
1516 * uses an @anti_func that handle semantics of counter examples, e.g. not
1517 * capable of something. So: return !dm_table_any_dev_attr(t, anti_func, data);
1518 */
dm_table_any_dev_attr(struct dm_table * t,iterate_devices_callout_fn func,void * data)1519 static bool dm_table_any_dev_attr(struct dm_table *t,
1520 iterate_devices_callout_fn func, void *data)
1521 {
1522 for (unsigned int i = 0; i < t->num_targets; i++) {
1523 struct dm_target *ti = dm_table_get_target(t, i);
1524
1525 if (ti->type->iterate_devices &&
1526 ti->type->iterate_devices(ti, func, data))
1527 return true;
1528 }
1529
1530 return false;
1531 }
1532
count_device(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)1533 static int count_device(struct dm_target *ti, struct dm_dev *dev,
1534 sector_t start, sector_t len, void *data)
1535 {
1536 unsigned int *num_devices = data;
1537
1538 (*num_devices)++;
1539
1540 return 0;
1541 }
1542
dm_table_supports_poll(struct dm_table * t)1543 static bool dm_table_supports_poll(struct dm_table *t)
1544 {
1545 for (unsigned int i = 0; i < t->num_targets; i++) {
1546 struct dm_target *ti = dm_table_get_target(t, i);
1547
1548 if (!ti->type->iterate_devices ||
1549 ti->type->iterate_devices(ti, device_not_poll_capable, NULL))
1550 return false;
1551 }
1552
1553 return true;
1554 }
1555
1556 /*
1557 * Check whether a table has no data devices attached using each
1558 * target's iterate_devices method.
1559 * Returns false if the result is unknown because a target doesn't
1560 * support iterate_devices.
1561 */
dm_table_has_no_data_devices(struct dm_table * t)1562 bool dm_table_has_no_data_devices(struct dm_table *t)
1563 {
1564 for (unsigned int i = 0; i < t->num_targets; i++) {
1565 struct dm_target *ti = dm_table_get_target(t, i);
1566 unsigned int num_devices = 0;
1567
1568 if (!ti->type->iterate_devices)
1569 return false;
1570
1571 ti->type->iterate_devices(ti, count_device, &num_devices);
1572 if (num_devices)
1573 return false;
1574 }
1575
1576 return true;
1577 }
1578
device_not_zoned_model(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)1579 static int device_not_zoned_model(struct dm_target *ti, struct dm_dev *dev,
1580 sector_t start, sector_t len, void *data)
1581 {
1582 struct request_queue *q = bdev_get_queue(dev->bdev);
1583 enum blk_zoned_model *zoned_model = data;
1584
1585 return blk_queue_zoned_model(q) != *zoned_model;
1586 }
1587
1588 /*
1589 * Check the device zoned model based on the target feature flag. If the target
1590 * has the DM_TARGET_ZONED_HM feature flag set, host-managed zoned devices are
1591 * also accepted but all devices must have the same zoned model. If the target
1592 * has the DM_TARGET_MIXED_ZONED_MODEL feature set, the devices can have any
1593 * zoned model with all zoned devices having the same zone size.
1594 */
dm_table_supports_zoned_model(struct dm_table * t,enum blk_zoned_model zoned_model)1595 static bool dm_table_supports_zoned_model(struct dm_table *t,
1596 enum blk_zoned_model zoned_model)
1597 {
1598 for (unsigned int i = 0; i < t->num_targets; i++) {
1599 struct dm_target *ti = dm_table_get_target(t, i);
1600
1601 if (dm_target_supports_zoned_hm(ti->type)) {
1602 if (!ti->type->iterate_devices ||
1603 ti->type->iterate_devices(ti, device_not_zoned_model,
1604 &zoned_model))
1605 return false;
1606 } else if (!dm_target_supports_mixed_zoned_model(ti->type)) {
1607 if (zoned_model == BLK_ZONED_HM)
1608 return false;
1609 }
1610 }
1611
1612 return true;
1613 }
1614
device_not_matches_zone_sectors(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)1615 static int device_not_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev,
1616 sector_t start, sector_t len, void *data)
1617 {
1618 unsigned int *zone_sectors = data;
1619
1620 if (!bdev_is_zoned(dev->bdev))
1621 return 0;
1622 return bdev_zone_sectors(dev->bdev) != *zone_sectors;
1623 }
1624
1625 /*
1626 * Check consistency of zoned model and zone sectors across all targets. For
1627 * zone sectors, if the destination device is a zoned block device, it shall
1628 * have the specified zone_sectors.
1629 */
validate_hardware_zoned_model(struct dm_table * t,enum blk_zoned_model zoned_model,unsigned int zone_sectors)1630 static int validate_hardware_zoned_model(struct dm_table *t,
1631 enum blk_zoned_model zoned_model,
1632 unsigned int zone_sectors)
1633 {
1634 if (zoned_model == BLK_ZONED_NONE)
1635 return 0;
1636
1637 if (!dm_table_supports_zoned_model(t, zoned_model)) {
1638 DMERR("%s: zoned model is not consistent across all devices",
1639 dm_device_name(t->md));
1640 return -EINVAL;
1641 }
1642
1643 /* Check zone size validity and compatibility */
1644 if (!zone_sectors || !is_power_of_2(zone_sectors))
1645 return -EINVAL;
1646
1647 if (dm_table_any_dev_attr(t, device_not_matches_zone_sectors, &zone_sectors)) {
1648 DMERR("%s: zone sectors is not consistent across all zoned devices",
1649 dm_device_name(t->md));
1650 return -EINVAL;
1651 }
1652
1653 return 0;
1654 }
1655
1656 /*
1657 * Establish the new table's queue_limits and validate them.
1658 */
dm_calculate_queue_limits(struct dm_table * t,struct queue_limits * limits)1659 int dm_calculate_queue_limits(struct dm_table *t,
1660 struct queue_limits *limits)
1661 {
1662 struct queue_limits ti_limits;
1663 enum blk_zoned_model zoned_model = BLK_ZONED_NONE;
1664 unsigned int zone_sectors = 0;
1665
1666 blk_set_stacking_limits(limits);
1667
1668 for (unsigned int i = 0; i < t->num_targets; i++) {
1669 struct dm_target *ti = dm_table_get_target(t, i);
1670
1671 blk_set_stacking_limits(&ti_limits);
1672
1673 if (!ti->type->iterate_devices)
1674 goto combine_limits;
1675
1676 /*
1677 * Combine queue limits of all the devices this target uses.
1678 */
1679 ti->type->iterate_devices(ti, dm_set_device_limits,
1680 &ti_limits);
1681
1682 if (zoned_model == BLK_ZONED_NONE && ti_limits.zoned != BLK_ZONED_NONE) {
1683 /*
1684 * After stacking all limits, validate all devices
1685 * in table support this zoned model and zone sectors.
1686 */
1687 zoned_model = ti_limits.zoned;
1688 zone_sectors = ti_limits.chunk_sectors;
1689 }
1690
1691 /* Set I/O hints portion of queue limits */
1692 if (ti->type->io_hints)
1693 ti->type->io_hints(ti, &ti_limits);
1694
1695 /*
1696 * Check each device area is consistent with the target's
1697 * overall queue limits.
1698 */
1699 if (ti->type->iterate_devices(ti, device_area_is_invalid,
1700 &ti_limits))
1701 return -EINVAL;
1702
1703 combine_limits:
1704 /*
1705 * Merge this target's queue limits into the overall limits
1706 * for the table.
1707 */
1708 if (blk_stack_limits(limits, &ti_limits, 0) < 0)
1709 DMWARN("%s: adding target device (start sect %llu len %llu) "
1710 "caused an alignment inconsistency",
1711 dm_device_name(t->md),
1712 (unsigned long long) ti->begin,
1713 (unsigned long long) ti->len);
1714 }
1715
1716 /*
1717 * Verify that the zoned model and zone sectors, as determined before
1718 * any .io_hints override, are the same across all devices in the table.
1719 * - this is especially relevant if .io_hints is emulating a disk-managed
1720 * zoned model (aka BLK_ZONED_NONE) on host-managed zoned block devices.
1721 * BUT...
1722 */
1723 if (limits->zoned != BLK_ZONED_NONE) {
1724 /*
1725 * ...IF the above limits stacking determined a zoned model
1726 * validate that all of the table's devices conform to it.
1727 */
1728 zoned_model = limits->zoned;
1729 zone_sectors = limits->chunk_sectors;
1730 }
1731 if (validate_hardware_zoned_model(t, zoned_model, zone_sectors))
1732 return -EINVAL;
1733
1734 return validate_hardware_logical_block_alignment(t, limits);
1735 }
1736
1737 /*
1738 * Verify that all devices have an integrity profile that matches the
1739 * DM device's registered integrity profile. If the profiles don't
1740 * match then unregister the DM device's integrity profile.
1741 */
dm_table_verify_integrity(struct dm_table * t)1742 static void dm_table_verify_integrity(struct dm_table *t)
1743 {
1744 struct gendisk *template_disk = NULL;
1745
1746 if (t->integrity_added)
1747 return;
1748
1749 if (t->integrity_supported) {
1750 /*
1751 * Verify that the original integrity profile
1752 * matches all the devices in this table.
1753 */
1754 template_disk = dm_table_get_integrity_disk(t);
1755 if (template_disk &&
1756 blk_integrity_compare(dm_disk(t->md), template_disk) >= 0)
1757 return;
1758 }
1759
1760 if (integrity_profile_exists(dm_disk(t->md))) {
1761 DMWARN("%s: unable to establish an integrity profile",
1762 dm_device_name(t->md));
1763 blk_integrity_unregister(dm_disk(t->md));
1764 }
1765 }
1766
device_flush_capable(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)1767 static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
1768 sector_t start, sector_t len, void *data)
1769 {
1770 unsigned long flush = (unsigned long) data;
1771 struct request_queue *q = bdev_get_queue(dev->bdev);
1772
1773 return (q->queue_flags & flush);
1774 }
1775
dm_table_supports_flush(struct dm_table * t,unsigned long flush)1776 static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush)
1777 {
1778 /*
1779 * Require at least one underlying device to support flushes.
1780 * t->devices includes internal dm devices such as mirror logs
1781 * so we need to use iterate_devices here, which targets
1782 * supporting flushes must provide.
1783 */
1784 for (unsigned int i = 0; i < t->num_targets; i++) {
1785 struct dm_target *ti = dm_table_get_target(t, i);
1786
1787 if (!ti->num_flush_bios)
1788 continue;
1789
1790 if (ti->flush_supported)
1791 return true;
1792
1793 if (ti->type->iterate_devices &&
1794 ti->type->iterate_devices(ti, device_flush_capable, (void *) flush))
1795 return true;
1796 }
1797
1798 return false;
1799 }
1800
device_dax_write_cache_enabled(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)1801 static int device_dax_write_cache_enabled(struct dm_target *ti,
1802 struct dm_dev *dev, sector_t start,
1803 sector_t len, void *data)
1804 {
1805 struct dax_device *dax_dev = dev->dax_dev;
1806
1807 if (!dax_dev)
1808 return false;
1809
1810 if (dax_write_cache_enabled(dax_dev))
1811 return true;
1812 return false;
1813 }
1814
device_is_rotational(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)1815 static int device_is_rotational(struct dm_target *ti, struct dm_dev *dev,
1816 sector_t start, sector_t len, void *data)
1817 {
1818 return !bdev_nonrot(dev->bdev);
1819 }
1820
device_is_not_random(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)1821 static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
1822 sector_t start, sector_t len, void *data)
1823 {
1824 struct request_queue *q = bdev_get_queue(dev->bdev);
1825
1826 return !blk_queue_add_random(q);
1827 }
1828
device_not_write_zeroes_capable(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)1829 static int device_not_write_zeroes_capable(struct dm_target *ti, struct dm_dev *dev,
1830 sector_t start, sector_t len, void *data)
1831 {
1832 struct request_queue *q = bdev_get_queue(dev->bdev);
1833
1834 return !q->limits.max_write_zeroes_sectors;
1835 }
1836
dm_table_supports_write_zeroes(struct dm_table * t)1837 static bool dm_table_supports_write_zeroes(struct dm_table *t)
1838 {
1839 for (unsigned int i = 0; i < t->num_targets; i++) {
1840 struct dm_target *ti = dm_table_get_target(t, i);
1841
1842 if (!ti->num_write_zeroes_bios)
1843 return false;
1844
1845 if (!ti->type->iterate_devices ||
1846 ti->type->iterate_devices(ti, device_not_write_zeroes_capable, NULL))
1847 return false;
1848 }
1849
1850 return true;
1851 }
1852
device_not_nowait_capable(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)1853 static int device_not_nowait_capable(struct dm_target *ti, struct dm_dev *dev,
1854 sector_t start, sector_t len, void *data)
1855 {
1856 return !bdev_nowait(dev->bdev);
1857 }
1858
dm_table_supports_nowait(struct dm_table * t)1859 static bool dm_table_supports_nowait(struct dm_table *t)
1860 {
1861 for (unsigned int i = 0; i < t->num_targets; i++) {
1862 struct dm_target *ti = dm_table_get_target(t, i);
1863
1864 if (!dm_target_supports_nowait(ti->type))
1865 return false;
1866
1867 if (!ti->type->iterate_devices ||
1868 ti->type->iterate_devices(ti, device_not_nowait_capable, NULL))
1869 return false;
1870 }
1871
1872 return true;
1873 }
1874
device_not_discard_capable(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)1875 static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev,
1876 sector_t start, sector_t len, void *data)
1877 {
1878 return !bdev_max_discard_sectors(dev->bdev);
1879 }
1880
dm_table_supports_discards(struct dm_table * t)1881 static bool dm_table_supports_discards(struct dm_table *t)
1882 {
1883 for (unsigned int i = 0; i < t->num_targets; i++) {
1884 struct dm_target *ti = dm_table_get_target(t, i);
1885
1886 if (!ti->num_discard_bios)
1887 return false;
1888
1889 /*
1890 * Either the target provides discard support (as implied by setting
1891 * 'discards_supported') or it relies on _all_ data devices having
1892 * discard support.
1893 */
1894 if (!ti->discards_supported &&
1895 (!ti->type->iterate_devices ||
1896 ti->type->iterate_devices(ti, device_not_discard_capable, NULL)))
1897 return false;
1898 }
1899
1900 return true;
1901 }
1902
device_not_secure_erase_capable(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)1903 static int device_not_secure_erase_capable(struct dm_target *ti,
1904 struct dm_dev *dev, sector_t start,
1905 sector_t len, void *data)
1906 {
1907 return !bdev_max_secure_erase_sectors(dev->bdev);
1908 }
1909
dm_table_supports_secure_erase(struct dm_table * t)1910 static bool dm_table_supports_secure_erase(struct dm_table *t)
1911 {
1912 for (unsigned int i = 0; i < t->num_targets; i++) {
1913 struct dm_target *ti = dm_table_get_target(t, i);
1914
1915 if (!ti->num_secure_erase_bios)
1916 return false;
1917
1918 if (!ti->type->iterate_devices ||
1919 ti->type->iterate_devices(ti, device_not_secure_erase_capable, NULL))
1920 return false;
1921 }
1922
1923 return true;
1924 }
1925
device_requires_stable_pages(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)1926 static int device_requires_stable_pages(struct dm_target *ti,
1927 struct dm_dev *dev, sector_t start,
1928 sector_t len, void *data)
1929 {
1930 return bdev_stable_writes(dev->bdev);
1931 }
1932
dm_table_set_restrictions(struct dm_table * t,struct request_queue * q,struct queue_limits * limits)1933 int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1934 struct queue_limits *limits)
1935 {
1936 bool wc = false, fua = false;
1937 int r;
1938
1939 /*
1940 * Copy table's limits to the DM device's request_queue
1941 */
1942 q->limits = *limits;
1943
1944 if (dm_table_supports_nowait(t))
1945 blk_queue_flag_set(QUEUE_FLAG_NOWAIT, q);
1946 else
1947 blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, q);
1948
1949 if (!dm_table_supports_discards(t)) {
1950 q->limits.max_discard_sectors = 0;
1951 q->limits.max_hw_discard_sectors = 0;
1952 q->limits.discard_granularity = 0;
1953 q->limits.discard_alignment = 0;
1954 q->limits.discard_misaligned = 0;
1955 }
1956
1957 if (!dm_table_supports_secure_erase(t))
1958 q->limits.max_secure_erase_sectors = 0;
1959
1960 if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) {
1961 wc = true;
1962 if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_FUA)))
1963 fua = true;
1964 }
1965 blk_queue_write_cache(q, wc, fua);
1966
1967 if (dm_table_supports_dax(t, device_not_dax_capable)) {
1968 blk_queue_flag_set(QUEUE_FLAG_DAX, q);
1969 if (dm_table_supports_dax(t, device_not_dax_synchronous_capable))
1970 set_dax_synchronous(t->md->dax_dev);
1971 } else
1972 blk_queue_flag_clear(QUEUE_FLAG_DAX, q);
1973
1974 if (dm_table_any_dev_attr(t, device_dax_write_cache_enabled, NULL))
1975 dax_write_cache(t->md->dax_dev, true);
1976
1977 /* Ensure that all underlying devices are non-rotational. */
1978 if (dm_table_any_dev_attr(t, device_is_rotational, NULL))
1979 blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
1980 else
1981 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
1982
1983 if (!dm_table_supports_write_zeroes(t))
1984 q->limits.max_write_zeroes_sectors = 0;
1985
1986 dm_table_verify_integrity(t);
1987
1988 /*
1989 * Some devices don't use blk_integrity but still want stable pages
1990 * because they do their own checksumming.
1991 * If any underlying device requires stable pages, a table must require
1992 * them as well. Only targets that support iterate_devices are considered:
1993 * don't want error, zero, etc to require stable pages.
1994 */
1995 if (dm_table_any_dev_attr(t, device_requires_stable_pages, NULL))
1996 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q);
1997 else
1998 blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, q);
1999
2000 /*
2001 * Determine whether or not this queue's I/O timings contribute
2002 * to the entropy pool, Only request-based targets use this.
2003 * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
2004 * have it set.
2005 */
2006 if (blk_queue_add_random(q) &&
2007 dm_table_any_dev_attr(t, device_is_not_random, NULL))
2008 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
2009
2010 /*
2011 * For a zoned target, setup the zones related queue attributes
2012 * and resources necessary for zone append emulation if necessary.
2013 */
2014 if (blk_queue_is_zoned(q)) {
2015 r = dm_set_zones_restrictions(t, q);
2016 if (r)
2017 return r;
2018 if (!static_key_enabled(&zoned_enabled.key))
2019 static_branch_enable(&zoned_enabled);
2020 }
2021
2022 dm_update_crypto_profile(q, t);
2023 disk_update_readahead(t->md->disk);
2024
2025 /*
2026 * Check for request-based device is left to
2027 * dm_mq_init_request_queue()->blk_mq_init_allocated_queue().
2028 *
2029 * For bio-based device, only set QUEUE_FLAG_POLL when all
2030 * underlying devices supporting polling.
2031 */
2032 if (__table_type_bio_based(t->type)) {
2033 if (dm_table_supports_poll(t))
2034 blk_queue_flag_set(QUEUE_FLAG_POLL, q);
2035 else
2036 blk_queue_flag_clear(QUEUE_FLAG_POLL, q);
2037 }
2038
2039 return 0;
2040 }
2041
dm_table_get_devices(struct dm_table * t)2042 struct list_head *dm_table_get_devices(struct dm_table *t)
2043 {
2044 return &t->devices;
2045 }
2046
dm_table_get_mode(struct dm_table * t)2047 fmode_t dm_table_get_mode(struct dm_table *t)
2048 {
2049 return t->mode;
2050 }
2051 EXPORT_SYMBOL(dm_table_get_mode);
2052
2053 enum suspend_mode {
2054 PRESUSPEND,
2055 PRESUSPEND_UNDO,
2056 POSTSUSPEND,
2057 };
2058
suspend_targets(struct dm_table * t,enum suspend_mode mode)2059 static void suspend_targets(struct dm_table *t, enum suspend_mode mode)
2060 {
2061 lockdep_assert_held(&t->md->suspend_lock);
2062
2063 for (unsigned int i = 0; i < t->num_targets; i++) {
2064 struct dm_target *ti = dm_table_get_target(t, i);
2065
2066 switch (mode) {
2067 case PRESUSPEND:
2068 if (ti->type->presuspend)
2069 ti->type->presuspend(ti);
2070 break;
2071 case PRESUSPEND_UNDO:
2072 if (ti->type->presuspend_undo)
2073 ti->type->presuspend_undo(ti);
2074 break;
2075 case POSTSUSPEND:
2076 if (ti->type->postsuspend)
2077 ti->type->postsuspend(ti);
2078 break;
2079 }
2080 }
2081 }
2082
dm_table_presuspend_targets(struct dm_table * t)2083 void dm_table_presuspend_targets(struct dm_table *t)
2084 {
2085 if (!t)
2086 return;
2087
2088 suspend_targets(t, PRESUSPEND);
2089 }
2090
dm_table_presuspend_undo_targets(struct dm_table * t)2091 void dm_table_presuspend_undo_targets(struct dm_table *t)
2092 {
2093 if (!t)
2094 return;
2095
2096 suspend_targets(t, PRESUSPEND_UNDO);
2097 }
2098
dm_table_postsuspend_targets(struct dm_table * t)2099 void dm_table_postsuspend_targets(struct dm_table *t)
2100 {
2101 if (!t)
2102 return;
2103
2104 suspend_targets(t, POSTSUSPEND);
2105 }
2106
dm_table_resume_targets(struct dm_table * t)2107 int dm_table_resume_targets(struct dm_table *t)
2108 {
2109 unsigned int i;
2110 int r = 0;
2111
2112 lockdep_assert_held(&t->md->suspend_lock);
2113
2114 for (i = 0; i < t->num_targets; i++) {
2115 struct dm_target *ti = dm_table_get_target(t, i);
2116
2117 if (!ti->type->preresume)
2118 continue;
2119
2120 r = ti->type->preresume(ti);
2121 if (r) {
2122 DMERR("%s: %s: preresume failed, error = %d",
2123 dm_device_name(t->md), ti->type->name, r);
2124 return r;
2125 }
2126 }
2127
2128 for (i = 0; i < t->num_targets; i++) {
2129 struct dm_target *ti = dm_table_get_target(t, i);
2130
2131 if (ti->type->resume)
2132 ti->type->resume(ti);
2133 }
2134
2135 return 0;
2136 }
2137
dm_table_get_md(struct dm_table * t)2138 struct mapped_device *dm_table_get_md(struct dm_table *t)
2139 {
2140 return t->md;
2141 }
2142 EXPORT_SYMBOL(dm_table_get_md);
2143
dm_table_device_name(struct dm_table * t)2144 const char *dm_table_device_name(struct dm_table *t)
2145 {
2146 return dm_device_name(t->md);
2147 }
2148 EXPORT_SYMBOL_GPL(dm_table_device_name);
2149
dm_table_run_md_queue_async(struct dm_table * t)2150 void dm_table_run_md_queue_async(struct dm_table *t)
2151 {
2152 if (!dm_table_request_based(t))
2153 return;
2154
2155 if (t->md->queue)
2156 blk_mq_run_hw_queues(t->md->queue, true);
2157 }
2158 EXPORT_SYMBOL(dm_table_run_md_queue_async);
2159
2160