1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (C) 2001 Sistina Software (UK) Limited. |
4 | * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. |
5 | * |
6 | * This file is released under the GPL. |
7 | */ |
8 | |
9 | #include "dm-core.h" |
10 | #include "dm-rq.h" |
11 | |
12 | #include <linux/module.h> |
13 | #include <linux/vmalloc.h> |
14 | #include <linux/blkdev.h> |
15 | #include <linux/blk-integrity.h> |
16 | #include <linux/namei.h> |
17 | #include <linux/ctype.h> |
18 | #include <linux/string.h> |
19 | #include <linux/slab.h> |
20 | #include <linux/interrupt.h> |
21 | #include <linux/mutex.h> |
22 | #include <linux/delay.h> |
23 | #include <linux/atomic.h> |
24 | #include <linux/blk-mq.h> |
25 | #include <linux/mount.h> |
26 | #include <linux/dax.h> |
27 | |
28 | #define DM_MSG_PREFIX "table" |
29 | |
30 | #define NODE_SIZE L1_CACHE_BYTES |
31 | #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t)) |
32 | #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1) |
33 | |
34 | /* |
35 | * Similar to ceiling(log_size(n)) |
36 | */ |
37 | static unsigned int int_log(unsigned int n, unsigned int base) |
38 | { |
39 | int result = 0; |
40 | |
41 | while (n > 1) { |
42 | n = dm_div_up(n, base); |
43 | result++; |
44 | } |
45 | |
46 | return result; |
47 | } |
48 | |
49 | /* |
50 | * Calculate the index of the child node of the n'th node k'th key. |
51 | */ |
52 | static inline unsigned int get_child(unsigned int n, unsigned int k) |
53 | { |
54 | return (n * CHILDREN_PER_NODE) + k; |
55 | } |
56 | |
57 | /* |
58 | * Return the n'th node of level l from table t. |
59 | */ |
60 | static inline sector_t *get_node(struct dm_table *t, |
61 | unsigned int l, unsigned int n) |
62 | { |
63 | return t->index[l] + (n * KEYS_PER_NODE); |
64 | } |
65 | |
66 | /* |
67 | * Return the highest key that you could lookup from the n'th |
68 | * node on level l of the btree. |
69 | */ |
70 | static sector_t high(struct dm_table *t, unsigned int l, unsigned int n) |
71 | { |
72 | for (; l < t->depth - 1; l++) |
73 | n = get_child(n, CHILDREN_PER_NODE - 1); |
74 | |
75 | if (n >= t->counts[l]) |
76 | return (sector_t) -1; |
77 | |
78 | return get_node(t, l, n)[KEYS_PER_NODE - 1]; |
79 | } |
80 | |
81 | /* |
82 | * Fills in a level of the btree based on the highs of the level |
83 | * below it. |
84 | */ |
85 | static int setup_btree_index(unsigned int l, struct dm_table *t) |
86 | { |
87 | unsigned int n, k; |
88 | sector_t *node; |
89 | |
90 | for (n = 0U; n < t->counts[l]; n++) { |
91 | node = get_node(t, l, n); |
92 | |
93 | for (k = 0U; k < KEYS_PER_NODE; k++) |
94 | node[k] = high(t, l: l + 1, n: get_child(n, k)); |
95 | } |
96 | |
97 | return 0; |
98 | } |
99 | |
100 | /* |
101 | * highs, and targets are managed as dynamic arrays during a |
102 | * table load. |
103 | */ |
104 | static int alloc_targets(struct dm_table *t, unsigned int num) |
105 | { |
106 | sector_t *n_highs; |
107 | struct dm_target *n_targets; |
108 | |
109 | /* |
110 | * Allocate both the target array and offset array at once. |
111 | */ |
112 | n_highs = kvcalloc(n: num, size: sizeof(struct dm_target) + sizeof(sector_t), |
113 | GFP_KERNEL); |
114 | if (!n_highs) |
115 | return -ENOMEM; |
116 | |
117 | n_targets = (struct dm_target *) (n_highs + num); |
118 | |
119 | memset(n_highs, -1, sizeof(*n_highs) * num); |
120 | kvfree(addr: t->highs); |
121 | |
122 | t->num_allocated = num; |
123 | t->highs = n_highs; |
124 | t->targets = n_targets; |
125 | |
126 | return 0; |
127 | } |
128 | |
129 | int dm_table_create(struct dm_table **result, blk_mode_t mode, |
130 | unsigned int num_targets, struct mapped_device *md) |
131 | { |
132 | struct dm_table *t; |
133 | |
134 | if (num_targets > DM_MAX_TARGETS) |
135 | return -EOVERFLOW; |
136 | |
137 | t = kzalloc(size: sizeof(*t), GFP_KERNEL); |
138 | |
139 | if (!t) |
140 | return -ENOMEM; |
141 | |
142 | INIT_LIST_HEAD(list: &t->devices); |
143 | init_rwsem(&t->devices_lock); |
144 | |
145 | if (!num_targets) |
146 | num_targets = KEYS_PER_NODE; |
147 | |
148 | num_targets = dm_round_up(num_targets, KEYS_PER_NODE); |
149 | |
150 | if (!num_targets) { |
151 | kfree(objp: t); |
152 | return -EOVERFLOW; |
153 | } |
154 | |
155 | if (alloc_targets(t, num: num_targets)) { |
156 | kfree(objp: t); |
157 | return -ENOMEM; |
158 | } |
159 | |
160 | t->type = DM_TYPE_NONE; |
161 | t->mode = mode; |
162 | t->md = md; |
163 | *result = t; |
164 | return 0; |
165 | } |
166 | |
167 | static void free_devices(struct list_head *devices, struct mapped_device *md) |
168 | { |
169 | struct list_head *tmp, *next; |
170 | |
171 | list_for_each_safe(tmp, next, devices) { |
172 | struct dm_dev_internal *dd = |
173 | list_entry(tmp, struct dm_dev_internal, list); |
174 | DMWARN("%s: dm_table_destroy: dm_put_device call missing for %s" , |
175 | dm_device_name(md), dd->dm_dev->name); |
176 | dm_put_table_device(md, d: dd->dm_dev); |
177 | kfree(objp: dd); |
178 | } |
179 | } |
180 | |
181 | static void dm_table_destroy_crypto_profile(struct dm_table *t); |
182 | |
183 | void dm_table_destroy(struct dm_table *t) |
184 | { |
185 | if (!t) |
186 | return; |
187 | |
188 | /* free the indexes */ |
189 | if (t->depth >= 2) |
190 | kvfree(addr: t->index[t->depth - 2]); |
191 | |
192 | /* free the targets */ |
193 | for (unsigned int i = 0; i < t->num_targets; i++) { |
194 | struct dm_target *ti = dm_table_get_target(t, index: i); |
195 | |
196 | if (ti->type->dtr) |
197 | ti->type->dtr(ti); |
198 | |
199 | dm_put_target_type(tt: ti->type); |
200 | } |
201 | |
202 | kvfree(addr: t->highs); |
203 | |
204 | /* free the device list */ |
205 | free_devices(devices: &t->devices, md: t->md); |
206 | |
207 | dm_free_md_mempools(pools: t->mempools); |
208 | |
209 | dm_table_destroy_crypto_profile(t); |
210 | |
211 | kfree(objp: t); |
212 | } |
213 | |
214 | /* |
215 | * See if we've already got a device in the list. |
216 | */ |
217 | static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev) |
218 | { |
219 | struct dm_dev_internal *dd; |
220 | |
221 | list_for_each_entry(dd, l, list) |
222 | if (dd->dm_dev->bdev->bd_dev == dev) |
223 | return dd; |
224 | |
225 | return NULL; |
226 | } |
227 | |
228 | /* |
229 | * If possible, this checks an area of a destination device is invalid. |
230 | */ |
231 | static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, |
232 | sector_t start, sector_t len, void *data) |
233 | { |
234 | struct queue_limits *limits = data; |
235 | struct block_device *bdev = dev->bdev; |
236 | sector_t dev_size = bdev_nr_sectors(bdev); |
237 | unsigned short logical_block_size_sectors = |
238 | limits->logical_block_size >> SECTOR_SHIFT; |
239 | |
240 | if (!dev_size) |
241 | return 0; |
242 | |
243 | if ((start >= dev_size) || (start + len > dev_size)) { |
244 | DMERR("%s: %pg too small for target: start=%llu, len=%llu, dev_size=%llu" , |
245 | dm_device_name(ti->table->md), bdev, |
246 | (unsigned long long)start, |
247 | (unsigned long long)len, |
248 | (unsigned long long)dev_size); |
249 | return 1; |
250 | } |
251 | |
252 | /* |
253 | * If the target is mapped to zoned block device(s), check |
254 | * that the zones are not partially mapped. |
255 | */ |
256 | if (bdev_is_zoned(bdev)) { |
257 | unsigned int zone_sectors = bdev_zone_sectors(bdev); |
258 | |
259 | if (start & (zone_sectors - 1)) { |
260 | DMERR("%s: start=%llu not aligned to h/w zone size %u of %pg" , |
261 | dm_device_name(ti->table->md), |
262 | (unsigned long long)start, |
263 | zone_sectors, bdev); |
264 | return 1; |
265 | } |
266 | |
267 | /* |
268 | * Note: The last zone of a zoned block device may be smaller |
269 | * than other zones. So for a target mapping the end of a |
270 | * zoned block device with such a zone, len would not be zone |
271 | * aligned. We do not allow such last smaller zone to be part |
272 | * of the mapping here to ensure that mappings with multiple |
273 | * devices do not end up with a smaller zone in the middle of |
274 | * the sector range. |
275 | */ |
276 | if (len & (zone_sectors - 1)) { |
277 | DMERR("%s: len=%llu not aligned to h/w zone size %u of %pg" , |
278 | dm_device_name(ti->table->md), |
279 | (unsigned long long)len, |
280 | zone_sectors, bdev); |
281 | return 1; |
282 | } |
283 | } |
284 | |
285 | if (logical_block_size_sectors <= 1) |
286 | return 0; |
287 | |
288 | if (start & (logical_block_size_sectors - 1)) { |
289 | DMERR("%s: start=%llu not aligned to h/w logical block size %u of %pg" , |
290 | dm_device_name(ti->table->md), |
291 | (unsigned long long)start, |
292 | limits->logical_block_size, bdev); |
293 | return 1; |
294 | } |
295 | |
296 | if (len & (logical_block_size_sectors - 1)) { |
297 | DMERR("%s: len=%llu not aligned to h/w logical block size %u of %pg" , |
298 | dm_device_name(ti->table->md), |
299 | (unsigned long long)len, |
300 | limits->logical_block_size, bdev); |
301 | return 1; |
302 | } |
303 | |
304 | return 0; |
305 | } |
306 | |
307 | /* |
308 | * This upgrades the mode on an already open dm_dev, being |
309 | * careful to leave things as they were if we fail to reopen the |
310 | * device and not to touch the existing bdev field in case |
311 | * it is accessed concurrently. |
312 | */ |
313 | static int upgrade_mode(struct dm_dev_internal *dd, blk_mode_t new_mode, |
314 | struct mapped_device *md) |
315 | { |
316 | int r; |
317 | struct dm_dev *old_dev, *new_dev; |
318 | |
319 | old_dev = dd->dm_dev; |
320 | |
321 | r = dm_get_table_device(md, dev: dd->dm_dev->bdev->bd_dev, |
322 | mode: dd->dm_dev->mode | new_mode, result: &new_dev); |
323 | if (r) |
324 | return r; |
325 | |
326 | dd->dm_dev = new_dev; |
327 | dm_put_table_device(md, d: old_dev); |
328 | |
329 | return 0; |
330 | } |
331 | |
332 | /* |
333 | * Add a device to the list, or just increment the usage count if |
334 | * it's already present. |
335 | * |
336 | * Note: the __ref annotation is because this function can call the __init |
337 | * marked early_lookup_bdev when called during early boot code from dm-init.c. |
338 | */ |
339 | int __ref dm_get_device(struct dm_target *ti, const char *path, blk_mode_t mode, |
340 | struct dm_dev **result) |
341 | { |
342 | int r; |
343 | dev_t dev; |
344 | unsigned int major, minor; |
345 | char dummy; |
346 | struct dm_dev_internal *dd; |
347 | struct dm_table *t = ti->table; |
348 | |
349 | BUG_ON(!t); |
350 | |
351 | if (sscanf(path, "%u:%u%c" , &major, &minor, &dummy) == 2) { |
352 | /* Extract the major/minor numbers */ |
353 | dev = MKDEV(major, minor); |
354 | if (MAJOR(dev) != major || MINOR(dev) != minor) |
355 | return -EOVERFLOW; |
356 | } else { |
357 | r = lookup_bdev(pathname: path, dev: &dev); |
358 | #ifndef MODULE |
359 | if (r && system_state < SYSTEM_RUNNING) |
360 | r = early_lookup_bdev(pathname: path, dev: &dev); |
361 | #endif |
362 | if (r) |
363 | return r; |
364 | } |
365 | if (dev == disk_devt(disk: t->md->disk)) |
366 | return -EINVAL; |
367 | |
368 | down_write(sem: &t->devices_lock); |
369 | |
370 | dd = find_device(l: &t->devices, dev); |
371 | if (!dd) { |
372 | dd = kmalloc(size: sizeof(*dd), GFP_KERNEL); |
373 | if (!dd) { |
374 | r = -ENOMEM; |
375 | goto unlock_ret_r; |
376 | } |
377 | |
378 | r = dm_get_table_device(md: t->md, dev, mode, result: &dd->dm_dev); |
379 | if (r) { |
380 | kfree(objp: dd); |
381 | goto unlock_ret_r; |
382 | } |
383 | |
384 | refcount_set(r: &dd->count, n: 1); |
385 | list_add(new: &dd->list, head: &t->devices); |
386 | goto out; |
387 | |
388 | } else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) { |
389 | r = upgrade_mode(dd, new_mode: mode, md: t->md); |
390 | if (r) |
391 | goto unlock_ret_r; |
392 | } |
393 | refcount_inc(r: &dd->count); |
394 | out: |
395 | up_write(sem: &t->devices_lock); |
396 | *result = dd->dm_dev; |
397 | return 0; |
398 | |
399 | unlock_ret_r: |
400 | up_write(sem: &t->devices_lock); |
401 | return r; |
402 | } |
403 | EXPORT_SYMBOL(dm_get_device); |
404 | |
405 | static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, |
406 | sector_t start, sector_t len, void *data) |
407 | { |
408 | struct queue_limits *limits = data; |
409 | struct block_device *bdev = dev->bdev; |
410 | struct request_queue *q = bdev_get_queue(bdev); |
411 | |
412 | if (unlikely(!q)) { |
413 | DMWARN("%s: Cannot set limits for nonexistent device %pg" , |
414 | dm_device_name(ti->table->md), bdev); |
415 | return 0; |
416 | } |
417 | |
418 | if (blk_stack_limits(t: limits, b: &q->limits, |
419 | offset: get_start_sect(bdev) + start) < 0) |
420 | DMWARN("%s: adding target device %pg caused an alignment inconsistency: " |
421 | "physical_block_size=%u, logical_block_size=%u, " |
422 | "alignment_offset=%u, start=%llu" , |
423 | dm_device_name(ti->table->md), bdev, |
424 | q->limits.physical_block_size, |
425 | q->limits.logical_block_size, |
426 | q->limits.alignment_offset, |
427 | (unsigned long long) start << SECTOR_SHIFT); |
428 | return 0; |
429 | } |
430 | |
431 | /* |
432 | * Decrement a device's use count and remove it if necessary. |
433 | */ |
434 | void dm_put_device(struct dm_target *ti, struct dm_dev *d) |
435 | { |
436 | int found = 0; |
437 | struct dm_table *t = ti->table; |
438 | struct list_head *devices = &t->devices; |
439 | struct dm_dev_internal *dd; |
440 | |
441 | down_write(sem: &t->devices_lock); |
442 | |
443 | list_for_each_entry(dd, devices, list) { |
444 | if (dd->dm_dev == d) { |
445 | found = 1; |
446 | break; |
447 | } |
448 | } |
449 | if (!found) { |
450 | DMERR("%s: device %s not in table devices list" , |
451 | dm_device_name(t->md), d->name); |
452 | goto unlock_ret; |
453 | } |
454 | if (refcount_dec_and_test(r: &dd->count)) { |
455 | dm_put_table_device(md: t->md, d); |
456 | list_del(entry: &dd->list); |
457 | kfree(objp: dd); |
458 | } |
459 | |
460 | unlock_ret: |
461 | up_write(sem: &t->devices_lock); |
462 | } |
463 | EXPORT_SYMBOL(dm_put_device); |
464 | |
465 | /* |
466 | * Checks to see if the target joins onto the end of the table. |
467 | */ |
468 | static int adjoin(struct dm_table *t, struct dm_target *ti) |
469 | { |
470 | struct dm_target *prev; |
471 | |
472 | if (!t->num_targets) |
473 | return !ti->begin; |
474 | |
475 | prev = &t->targets[t->num_targets - 1]; |
476 | return (ti->begin == (prev->begin + prev->len)); |
477 | } |
478 | |
479 | /* |
480 | * Used to dynamically allocate the arg array. |
481 | * |
482 | * We do first allocation with GFP_NOIO because dm-mpath and dm-thin must |
483 | * process messages even if some device is suspended. These messages have a |
484 | * small fixed number of arguments. |
485 | * |
486 | * On the other hand, dm-switch needs to process bulk data using messages and |
487 | * excessive use of GFP_NOIO could cause trouble. |
488 | */ |
489 | static char **realloc_argv(unsigned int *size, char **old_argv) |
490 | { |
491 | char **argv; |
492 | unsigned int new_size; |
493 | gfp_t gfp; |
494 | |
495 | if (*size) { |
496 | new_size = *size * 2; |
497 | gfp = GFP_KERNEL; |
498 | } else { |
499 | new_size = 8; |
500 | gfp = GFP_NOIO; |
501 | } |
502 | argv = kmalloc_array(n: new_size, size: sizeof(*argv), flags: gfp); |
503 | if (argv && old_argv) { |
504 | memcpy(argv, old_argv, *size * sizeof(*argv)); |
505 | *size = new_size; |
506 | } |
507 | |
508 | kfree(objp: old_argv); |
509 | return argv; |
510 | } |
511 | |
512 | /* |
513 | * Destructively splits up the argument list to pass to ctr. |
514 | */ |
515 | int dm_split_args(int *argc, char ***argvp, char *input) |
516 | { |
517 | char *start, *end = input, *out, **argv = NULL; |
518 | unsigned int array_size = 0; |
519 | |
520 | *argc = 0; |
521 | |
522 | if (!input) { |
523 | *argvp = NULL; |
524 | return 0; |
525 | } |
526 | |
527 | argv = realloc_argv(size: &array_size, old_argv: argv); |
528 | if (!argv) |
529 | return -ENOMEM; |
530 | |
531 | while (1) { |
532 | /* Skip whitespace */ |
533 | start = skip_spaces(end); |
534 | |
535 | if (!*start) |
536 | break; /* success, we hit the end */ |
537 | |
538 | /* 'out' is used to remove any back-quotes */ |
539 | end = out = start; |
540 | while (*end) { |
541 | /* Everything apart from '\0' can be quoted */ |
542 | if (*end == '\\' && *(end + 1)) { |
543 | *out++ = *(end + 1); |
544 | end += 2; |
545 | continue; |
546 | } |
547 | |
548 | if (isspace(*end)) |
549 | break; /* end of token */ |
550 | |
551 | *out++ = *end++; |
552 | } |
553 | |
554 | /* have we already filled the array ? */ |
555 | if ((*argc + 1) > array_size) { |
556 | argv = realloc_argv(size: &array_size, old_argv: argv); |
557 | if (!argv) |
558 | return -ENOMEM; |
559 | } |
560 | |
561 | /* we know this is whitespace */ |
562 | if (*end) |
563 | end++; |
564 | |
565 | /* terminate the string and put it in the array */ |
566 | *out = '\0'; |
567 | argv[*argc] = start; |
568 | (*argc)++; |
569 | } |
570 | |
571 | *argvp = argv; |
572 | return 0; |
573 | } |
574 | |
575 | /* |
576 | * Impose necessary and sufficient conditions on a devices's table such |
577 | * that any incoming bio which respects its logical_block_size can be |
578 | * processed successfully. If it falls across the boundary between |
579 | * two or more targets, the size of each piece it gets split into must |
580 | * be compatible with the logical_block_size of the target processing it. |
581 | */ |
582 | static int validate_hardware_logical_block_alignment(struct dm_table *t, |
583 | struct queue_limits *limits) |
584 | { |
585 | /* |
586 | * This function uses arithmetic modulo the logical_block_size |
587 | * (in units of 512-byte sectors). |
588 | */ |
589 | unsigned short device_logical_block_size_sects = |
590 | limits->logical_block_size >> SECTOR_SHIFT; |
591 | |
592 | /* |
593 | * Offset of the start of the next table entry, mod logical_block_size. |
594 | */ |
595 | unsigned short next_target_start = 0; |
596 | |
597 | /* |
598 | * Given an aligned bio that extends beyond the end of a |
599 | * target, how many sectors must the next target handle? |
600 | */ |
601 | unsigned short remaining = 0; |
602 | |
603 | struct dm_target *ti; |
604 | struct queue_limits ti_limits; |
605 | unsigned int i; |
606 | |
607 | /* |
608 | * Check each entry in the table in turn. |
609 | */ |
610 | for (i = 0; i < t->num_targets; i++) { |
611 | ti = dm_table_get_target(t, index: i); |
612 | |
613 | blk_set_stacking_limits(lim: &ti_limits); |
614 | |
615 | /* combine all target devices' limits */ |
616 | if (ti->type->iterate_devices) |
617 | ti->type->iterate_devices(ti, dm_set_device_limits, |
618 | &ti_limits); |
619 | |
620 | /* |
621 | * If the remaining sectors fall entirely within this |
622 | * table entry are they compatible with its logical_block_size? |
623 | */ |
624 | if (remaining < ti->len && |
625 | remaining & ((ti_limits.logical_block_size >> |
626 | SECTOR_SHIFT) - 1)) |
627 | break; /* Error */ |
628 | |
629 | next_target_start = |
630 | (unsigned short) ((next_target_start + ti->len) & |
631 | (device_logical_block_size_sects - 1)); |
632 | remaining = next_target_start ? |
633 | device_logical_block_size_sects - next_target_start : 0; |
634 | } |
635 | |
636 | if (remaining) { |
637 | DMERR("%s: table line %u (start sect %llu len %llu) " |
638 | "not aligned to h/w logical block size %u" , |
639 | dm_device_name(t->md), i, |
640 | (unsigned long long) ti->begin, |
641 | (unsigned long long) ti->len, |
642 | limits->logical_block_size); |
643 | return -EINVAL; |
644 | } |
645 | |
646 | return 0; |
647 | } |
648 | |
649 | int dm_table_add_target(struct dm_table *t, const char *type, |
650 | sector_t start, sector_t len, char *params) |
651 | { |
652 | int r = -EINVAL, argc; |
653 | char **argv; |
654 | struct dm_target *ti; |
655 | |
656 | if (t->singleton) { |
657 | DMERR("%s: target type %s must appear alone in table" , |
658 | dm_device_name(t->md), t->targets->type->name); |
659 | return -EINVAL; |
660 | } |
661 | |
662 | BUG_ON(t->num_targets >= t->num_allocated); |
663 | |
664 | ti = t->targets + t->num_targets; |
665 | memset(ti, 0, sizeof(*ti)); |
666 | |
667 | if (!len) { |
668 | DMERR("%s: zero-length target" , dm_device_name(t->md)); |
669 | return -EINVAL; |
670 | } |
671 | |
672 | ti->type = dm_get_target_type(name: type); |
673 | if (!ti->type) { |
674 | DMERR("%s: %s: unknown target type" , dm_device_name(t->md), type); |
675 | return -EINVAL; |
676 | } |
677 | |
678 | if (dm_target_needs_singleton(ti->type)) { |
679 | if (t->num_targets) { |
680 | ti->error = "singleton target type must appear alone in table" ; |
681 | goto bad; |
682 | } |
683 | t->singleton = true; |
684 | } |
685 | |
686 | if (dm_target_always_writeable(ti->type) && |
687 | !(t->mode & BLK_OPEN_WRITE)) { |
688 | ti->error = "target type may not be included in a read-only table" ; |
689 | goto bad; |
690 | } |
691 | |
692 | if (t->immutable_target_type) { |
693 | if (t->immutable_target_type != ti->type) { |
694 | ti->error = "immutable target type cannot be mixed with other target types" ; |
695 | goto bad; |
696 | } |
697 | } else if (dm_target_is_immutable(ti->type)) { |
698 | if (t->num_targets) { |
699 | ti->error = "immutable target type cannot be mixed with other target types" ; |
700 | goto bad; |
701 | } |
702 | t->immutable_target_type = ti->type; |
703 | } |
704 | |
705 | if (dm_target_has_integrity(ti->type)) |
706 | t->integrity_added = 1; |
707 | |
708 | ti->table = t; |
709 | ti->begin = start; |
710 | ti->len = len; |
711 | ti->error = "Unknown error" ; |
712 | |
713 | /* |
714 | * Does this target adjoin the previous one ? |
715 | */ |
716 | if (!adjoin(t, ti)) { |
717 | ti->error = "Gap in table" ; |
718 | goto bad; |
719 | } |
720 | |
721 | r = dm_split_args(argc: &argc, argvp: &argv, input: params); |
722 | if (r) { |
723 | ti->error = "couldn't split parameters" ; |
724 | goto bad; |
725 | } |
726 | |
727 | r = ti->type->ctr(ti, argc, argv); |
728 | kfree(objp: argv); |
729 | if (r) |
730 | goto bad; |
731 | |
732 | t->highs[t->num_targets++] = ti->begin + ti->len - 1; |
733 | |
734 | if (!ti->num_discard_bios && ti->discards_supported) |
735 | DMWARN("%s: %s: ignoring discards_supported because num_discard_bios is zero." , |
736 | dm_device_name(t->md), type); |
737 | |
738 | if (ti->limit_swap_bios && !static_key_enabled(&swap_bios_enabled.key)) |
739 | static_branch_enable(&swap_bios_enabled); |
740 | |
741 | return 0; |
742 | |
743 | bad: |
744 | DMERR("%s: %s: %s (%pe)" , dm_device_name(t->md), type, ti->error, ERR_PTR(r)); |
745 | dm_put_target_type(tt: ti->type); |
746 | return r; |
747 | } |
748 | |
749 | /* |
750 | * Target argument parsing helpers. |
751 | */ |
752 | static int validate_next_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set, |
753 | unsigned int *value, char **error, unsigned int grouped) |
754 | { |
755 | const char *arg_str = dm_shift_arg(as: arg_set); |
756 | char dummy; |
757 | |
758 | if (!arg_str || |
759 | (sscanf(arg_str, "%u%c" , value, &dummy) != 1) || |
760 | (*value < arg->min) || |
761 | (*value > arg->max) || |
762 | (grouped && arg_set->argc < *value)) { |
763 | *error = arg->error; |
764 | return -EINVAL; |
765 | } |
766 | |
767 | return 0; |
768 | } |
769 | |
770 | int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set, |
771 | unsigned int *value, char **error) |
772 | { |
773 | return validate_next_arg(arg, arg_set, value, error, grouped: 0); |
774 | } |
775 | EXPORT_SYMBOL(dm_read_arg); |
776 | |
777 | int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set, |
778 | unsigned int *value, char **error) |
779 | { |
780 | return validate_next_arg(arg, arg_set, value, error, grouped: 1); |
781 | } |
782 | EXPORT_SYMBOL(dm_read_arg_group); |
783 | |
784 | const char *dm_shift_arg(struct dm_arg_set *as) |
785 | { |
786 | char *r; |
787 | |
788 | if (as->argc) { |
789 | as->argc--; |
790 | r = *as->argv; |
791 | as->argv++; |
792 | return r; |
793 | } |
794 | |
795 | return NULL; |
796 | } |
797 | EXPORT_SYMBOL(dm_shift_arg); |
798 | |
799 | void dm_consume_args(struct dm_arg_set *as, unsigned int num_args) |
800 | { |
801 | BUG_ON(as->argc < num_args); |
802 | as->argc -= num_args; |
803 | as->argv += num_args; |
804 | } |
805 | EXPORT_SYMBOL(dm_consume_args); |
806 | |
807 | static bool __table_type_bio_based(enum dm_queue_mode table_type) |
808 | { |
809 | return (table_type == DM_TYPE_BIO_BASED || |
810 | table_type == DM_TYPE_DAX_BIO_BASED); |
811 | } |
812 | |
813 | static bool __table_type_request_based(enum dm_queue_mode table_type) |
814 | { |
815 | return table_type == DM_TYPE_REQUEST_BASED; |
816 | } |
817 | |
818 | void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type) |
819 | { |
820 | t->type = type; |
821 | } |
822 | EXPORT_SYMBOL_GPL(dm_table_set_type); |
823 | |
824 | /* validate the dax capability of the target device span */ |
825 | static int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev, |
826 | sector_t start, sector_t len, void *data) |
827 | { |
828 | if (dev->dax_dev) |
829 | return false; |
830 | |
831 | DMDEBUG("%pg: error: dax unsupported by block device" , dev->bdev); |
832 | return true; |
833 | } |
834 | |
835 | /* Check devices support synchronous DAX */ |
836 | static int device_not_dax_synchronous_capable(struct dm_target *ti, struct dm_dev *dev, |
837 | sector_t start, sector_t len, void *data) |
838 | { |
839 | return !dev->dax_dev || !dax_synchronous(dax_dev: dev->dax_dev); |
840 | } |
841 | |
842 | static bool dm_table_supports_dax(struct dm_table *t, |
843 | iterate_devices_callout_fn iterate_fn) |
844 | { |
845 | /* Ensure that all targets support DAX. */ |
846 | for (unsigned int i = 0; i < t->num_targets; i++) { |
847 | struct dm_target *ti = dm_table_get_target(t, index: i); |
848 | |
849 | if (!ti->type->direct_access) |
850 | return false; |
851 | |
852 | if (dm_target_is_wildcard(ti->type) || |
853 | !ti->type->iterate_devices || |
854 | ti->type->iterate_devices(ti, iterate_fn, NULL)) |
855 | return false; |
856 | } |
857 | |
858 | return true; |
859 | } |
860 | |
861 | static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev, |
862 | sector_t start, sector_t len, void *data) |
863 | { |
864 | struct block_device *bdev = dev->bdev; |
865 | struct request_queue *q = bdev_get_queue(bdev); |
866 | |
867 | /* request-based cannot stack on partitions! */ |
868 | if (bdev_is_partition(bdev)) |
869 | return false; |
870 | |
871 | return queue_is_mq(q); |
872 | } |
873 | |
874 | static int dm_table_determine_type(struct dm_table *t) |
875 | { |
876 | unsigned int bio_based = 0, request_based = 0, hybrid = 0; |
877 | struct dm_target *ti; |
878 | struct list_head *devices = dm_table_get_devices(t); |
879 | enum dm_queue_mode live_md_type = dm_get_md_type(md: t->md); |
880 | |
881 | if (t->type != DM_TYPE_NONE) { |
882 | /* target already set the table's type */ |
883 | if (t->type == DM_TYPE_BIO_BASED) { |
884 | /* possibly upgrade to a variant of bio-based */ |
885 | goto verify_bio_based; |
886 | } |
887 | BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED); |
888 | goto verify_rq_based; |
889 | } |
890 | |
891 | for (unsigned int i = 0; i < t->num_targets; i++) { |
892 | ti = dm_table_get_target(t, index: i); |
893 | if (dm_target_hybrid(ti)) |
894 | hybrid = 1; |
895 | else if (dm_target_request_based(ti)) |
896 | request_based = 1; |
897 | else |
898 | bio_based = 1; |
899 | |
900 | if (bio_based && request_based) { |
901 | DMERR("Inconsistent table: different target types can't be mixed up" ); |
902 | return -EINVAL; |
903 | } |
904 | } |
905 | |
906 | if (hybrid && !bio_based && !request_based) { |
907 | /* |
908 | * The targets can work either way. |
909 | * Determine the type from the live device. |
910 | * Default to bio-based if device is new. |
911 | */ |
912 | if (__table_type_request_based(table_type: live_md_type)) |
913 | request_based = 1; |
914 | else |
915 | bio_based = 1; |
916 | } |
917 | |
918 | if (bio_based) { |
919 | verify_bio_based: |
920 | /* We must use this table as bio-based */ |
921 | t->type = DM_TYPE_BIO_BASED; |
922 | if (dm_table_supports_dax(t, iterate_fn: device_not_dax_capable) || |
923 | (list_empty(head: devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) { |
924 | t->type = DM_TYPE_DAX_BIO_BASED; |
925 | } |
926 | return 0; |
927 | } |
928 | |
929 | BUG_ON(!request_based); /* No targets in this table */ |
930 | |
931 | t->type = DM_TYPE_REQUEST_BASED; |
932 | |
933 | verify_rq_based: |
934 | /* |
935 | * Request-based dm supports only tables that have a single target now. |
936 | * To support multiple targets, request splitting support is needed, |
937 | * and that needs lots of changes in the block-layer. |
938 | * (e.g. request completion process for partial completion.) |
939 | */ |
940 | if (t->num_targets > 1) { |
941 | DMERR("request-based DM doesn't support multiple targets" ); |
942 | return -EINVAL; |
943 | } |
944 | |
945 | if (list_empty(head: devices)) { |
946 | int srcu_idx; |
947 | struct dm_table *live_table = dm_get_live_table(md: t->md, srcu_idx: &srcu_idx); |
948 | |
949 | /* inherit live table's type */ |
950 | if (live_table) |
951 | t->type = live_table->type; |
952 | dm_put_live_table(md: t->md, srcu_idx); |
953 | return 0; |
954 | } |
955 | |
956 | ti = dm_table_get_immutable_target(t); |
957 | if (!ti) { |
958 | DMERR("table load rejected: immutable target is required" ); |
959 | return -EINVAL; |
960 | } else if (ti->max_io_len) { |
961 | DMERR("table load rejected: immutable target that splits IO is not supported" ); |
962 | return -EINVAL; |
963 | } |
964 | |
965 | /* Non-request-stackable devices can't be used for request-based dm */ |
966 | if (!ti->type->iterate_devices || |
967 | !ti->type->iterate_devices(ti, device_is_rq_stackable, NULL)) { |
968 | DMERR("table load rejected: including non-request-stackable devices" ); |
969 | return -EINVAL; |
970 | } |
971 | |
972 | return 0; |
973 | } |
974 | |
975 | enum dm_queue_mode dm_table_get_type(struct dm_table *t) |
976 | { |
977 | return t->type; |
978 | } |
979 | |
980 | struct target_type *dm_table_get_immutable_target_type(struct dm_table *t) |
981 | { |
982 | return t->immutable_target_type; |
983 | } |
984 | |
985 | struct dm_target *dm_table_get_immutable_target(struct dm_table *t) |
986 | { |
987 | /* Immutable target is implicitly a singleton */ |
988 | if (t->num_targets > 1 || |
989 | !dm_target_is_immutable(t->targets[0].type)) |
990 | return NULL; |
991 | |
992 | return t->targets; |
993 | } |
994 | |
995 | struct dm_target *dm_table_get_wildcard_target(struct dm_table *t) |
996 | { |
997 | for (unsigned int i = 0; i < t->num_targets; i++) { |
998 | struct dm_target *ti = dm_table_get_target(t, index: i); |
999 | |
1000 | if (dm_target_is_wildcard(ti->type)) |
1001 | return ti; |
1002 | } |
1003 | |
1004 | return NULL; |
1005 | } |
1006 | |
1007 | bool dm_table_bio_based(struct dm_table *t) |
1008 | { |
1009 | return __table_type_bio_based(table_type: dm_table_get_type(t)); |
1010 | } |
1011 | |
1012 | bool dm_table_request_based(struct dm_table *t) |
1013 | { |
1014 | return __table_type_request_based(table_type: dm_table_get_type(t)); |
1015 | } |
1016 | |
1017 | static bool dm_table_supports_poll(struct dm_table *t); |
1018 | |
1019 | static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md) |
1020 | { |
1021 | enum dm_queue_mode type = dm_table_get_type(t); |
1022 | unsigned int per_io_data_size = 0, front_pad, io_front_pad; |
1023 | unsigned int min_pool_size = 0, pool_size; |
1024 | struct dm_md_mempools *pools; |
1025 | |
1026 | if (unlikely(type == DM_TYPE_NONE)) { |
1027 | DMERR("no table type is set, can't allocate mempools" ); |
1028 | return -EINVAL; |
1029 | } |
1030 | |
1031 | pools = kzalloc_node(size: sizeof(*pools), GFP_KERNEL, node: md->numa_node_id); |
1032 | if (!pools) |
1033 | return -ENOMEM; |
1034 | |
1035 | if (type == DM_TYPE_REQUEST_BASED) { |
1036 | pool_size = dm_get_reserved_rq_based_ios(); |
1037 | front_pad = offsetof(struct dm_rq_clone_bio_info, clone); |
1038 | goto init_bs; |
1039 | } |
1040 | |
1041 | for (unsigned int i = 0; i < t->num_targets; i++) { |
1042 | struct dm_target *ti = dm_table_get_target(t, index: i); |
1043 | |
1044 | per_io_data_size = max(per_io_data_size, ti->per_io_data_size); |
1045 | min_pool_size = max(min_pool_size, ti->num_flush_bios); |
1046 | } |
1047 | pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size); |
1048 | front_pad = roundup(per_io_data_size, |
1049 | __alignof__(struct dm_target_io)) + DM_TARGET_IO_BIO_OFFSET; |
1050 | |
1051 | io_front_pad = roundup(per_io_data_size, |
1052 | __alignof__(struct dm_io)) + DM_IO_BIO_OFFSET; |
1053 | if (bioset_init(&pools->io_bs, pool_size, io_front_pad, |
1054 | flags: dm_table_supports_poll(t) ? BIOSET_PERCPU_CACHE : 0)) |
1055 | goto out_free_pools; |
1056 | if (t->integrity_supported && |
1057 | bioset_integrity_create(&pools->io_bs, pool_size)) |
1058 | goto out_free_pools; |
1059 | init_bs: |
1060 | if (bioset_init(&pools->bs, pool_size, front_pad, flags: 0)) |
1061 | goto out_free_pools; |
1062 | if (t->integrity_supported && |
1063 | bioset_integrity_create(&pools->bs, pool_size)) |
1064 | goto out_free_pools; |
1065 | |
1066 | t->mempools = pools; |
1067 | return 0; |
1068 | |
1069 | out_free_pools: |
1070 | dm_free_md_mempools(pools); |
1071 | return -ENOMEM; |
1072 | } |
1073 | |
1074 | static int setup_indexes(struct dm_table *t) |
1075 | { |
1076 | int i; |
1077 | unsigned int total = 0; |
1078 | sector_t *indexes; |
1079 | |
1080 | /* allocate the space for *all* the indexes */ |
1081 | for (i = t->depth - 2; i >= 0; i--) { |
1082 | t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE); |
1083 | total += t->counts[i]; |
1084 | } |
1085 | |
1086 | indexes = kvcalloc(n: total, NODE_SIZE, GFP_KERNEL); |
1087 | if (!indexes) |
1088 | return -ENOMEM; |
1089 | |
1090 | /* set up internal nodes, bottom-up */ |
1091 | for (i = t->depth - 2; i >= 0; i--) { |
1092 | t->index[i] = indexes; |
1093 | indexes += (KEYS_PER_NODE * t->counts[i]); |
1094 | setup_btree_index(l: i, t); |
1095 | } |
1096 | |
1097 | return 0; |
1098 | } |
1099 | |
1100 | /* |
1101 | * Builds the btree to index the map. |
1102 | */ |
1103 | static int dm_table_build_index(struct dm_table *t) |
1104 | { |
1105 | int r = 0; |
1106 | unsigned int leaf_nodes; |
1107 | |
1108 | /* how many indexes will the btree have ? */ |
1109 | leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE); |
1110 | t->depth = 1 + int_log(n: leaf_nodes, CHILDREN_PER_NODE); |
1111 | |
1112 | /* leaf layer has already been set up */ |
1113 | t->counts[t->depth - 1] = leaf_nodes; |
1114 | t->index[t->depth - 1] = t->highs; |
1115 | |
1116 | if (t->depth >= 2) |
1117 | r = setup_indexes(t); |
1118 | |
1119 | return r; |
1120 | } |
1121 | |
1122 | static bool integrity_profile_exists(struct gendisk *disk) |
1123 | { |
1124 | return !!blk_get_integrity(disk); |
1125 | } |
1126 | |
1127 | /* |
1128 | * Get a disk whose integrity profile reflects the table's profile. |
1129 | * Returns NULL if integrity support was inconsistent or unavailable. |
1130 | */ |
1131 | static struct gendisk *dm_table_get_integrity_disk(struct dm_table *t) |
1132 | { |
1133 | struct list_head *devices = dm_table_get_devices(t); |
1134 | struct dm_dev_internal *dd = NULL; |
1135 | struct gendisk *prev_disk = NULL, *template_disk = NULL; |
1136 | |
1137 | for (unsigned int i = 0; i < t->num_targets; i++) { |
1138 | struct dm_target *ti = dm_table_get_target(t, index: i); |
1139 | |
1140 | if (!dm_target_passes_integrity(ti->type)) |
1141 | goto no_integrity; |
1142 | } |
1143 | |
1144 | list_for_each_entry(dd, devices, list) { |
1145 | template_disk = dd->dm_dev->bdev->bd_disk; |
1146 | if (!integrity_profile_exists(disk: template_disk)) |
1147 | goto no_integrity; |
1148 | else if (prev_disk && |
1149 | blk_integrity_compare(prev_disk, template_disk) < 0) |
1150 | goto no_integrity; |
1151 | prev_disk = template_disk; |
1152 | } |
1153 | |
1154 | return template_disk; |
1155 | |
1156 | no_integrity: |
1157 | if (prev_disk) |
1158 | DMWARN("%s: integrity not set: %s and %s profile mismatch" , |
1159 | dm_device_name(t->md), |
1160 | prev_disk->disk_name, |
1161 | template_disk->disk_name); |
1162 | return NULL; |
1163 | } |
1164 | |
1165 | /* |
1166 | * Register the mapped device for blk_integrity support if the |
1167 | * underlying devices have an integrity profile. But all devices may |
1168 | * not have matching profiles (checking all devices isn't reliable |
1169 | * during table load because this table may use other DM device(s) which |
1170 | * must be resumed before they will have an initialized integity |
1171 | * profile). Consequently, stacked DM devices force a 2 stage integrity |
1172 | * profile validation: First pass during table load, final pass during |
1173 | * resume. |
1174 | */ |
1175 | static int dm_table_register_integrity(struct dm_table *t) |
1176 | { |
1177 | struct mapped_device *md = t->md; |
1178 | struct gendisk *template_disk = NULL; |
1179 | |
1180 | /* If target handles integrity itself do not register it here. */ |
1181 | if (t->integrity_added) |
1182 | return 0; |
1183 | |
1184 | template_disk = dm_table_get_integrity_disk(t); |
1185 | if (!template_disk) |
1186 | return 0; |
1187 | |
1188 | if (!integrity_profile_exists(disk: dm_disk(md))) { |
1189 | t->integrity_supported = true; |
1190 | /* |
1191 | * Register integrity profile during table load; we can do |
1192 | * this because the final profile must match during resume. |
1193 | */ |
1194 | blk_integrity_register(dm_disk(md), |
1195 | blk_get_integrity(disk: template_disk)); |
1196 | return 0; |
1197 | } |
1198 | |
1199 | /* |
1200 | * If DM device already has an initialized integrity |
1201 | * profile the new profile should not conflict. |
1202 | */ |
1203 | if (blk_integrity_compare(dm_disk(md), template_disk) < 0) { |
1204 | DMERR("%s: conflict with existing integrity profile: %s profile mismatch" , |
1205 | dm_device_name(t->md), |
1206 | template_disk->disk_name); |
1207 | return 1; |
1208 | } |
1209 | |
1210 | /* Preserve existing integrity profile */ |
1211 | t->integrity_supported = true; |
1212 | return 0; |
1213 | } |
1214 | |
1215 | #ifdef CONFIG_BLK_INLINE_ENCRYPTION |
1216 | |
1217 | struct dm_crypto_profile { |
1218 | struct blk_crypto_profile profile; |
1219 | struct mapped_device *md; |
1220 | }; |
1221 | |
1222 | static int dm_keyslot_evict_callback(struct dm_target *ti, struct dm_dev *dev, |
1223 | sector_t start, sector_t len, void *data) |
1224 | { |
1225 | const struct blk_crypto_key *key = data; |
1226 | |
1227 | blk_crypto_evict_key(bdev: dev->bdev, key); |
1228 | return 0; |
1229 | } |
1230 | |
1231 | /* |
1232 | * When an inline encryption key is evicted from a device-mapper device, evict |
1233 | * it from all the underlying devices. |
1234 | */ |
1235 | static int dm_keyslot_evict(struct blk_crypto_profile *profile, |
1236 | const struct blk_crypto_key *key, unsigned int slot) |
1237 | { |
1238 | struct mapped_device *md = |
1239 | container_of(profile, struct dm_crypto_profile, profile)->md; |
1240 | struct dm_table *t; |
1241 | int srcu_idx; |
1242 | |
1243 | t = dm_get_live_table(md, srcu_idx: &srcu_idx); |
1244 | if (!t) |
1245 | return 0; |
1246 | |
1247 | for (unsigned int i = 0; i < t->num_targets; i++) { |
1248 | struct dm_target *ti = dm_table_get_target(t, index: i); |
1249 | |
1250 | if (!ti->type->iterate_devices) |
1251 | continue; |
1252 | ti->type->iterate_devices(ti, dm_keyslot_evict_callback, |
1253 | (void *)key); |
1254 | } |
1255 | |
1256 | dm_put_live_table(md, srcu_idx); |
1257 | return 0; |
1258 | } |
1259 | |
1260 | static int |
1261 | device_intersect_crypto_capabilities(struct dm_target *ti, struct dm_dev *dev, |
1262 | sector_t start, sector_t len, void *data) |
1263 | { |
1264 | struct blk_crypto_profile *parent = data; |
1265 | struct blk_crypto_profile *child = |
1266 | bdev_get_queue(bdev: dev->bdev)->crypto_profile; |
1267 | |
1268 | blk_crypto_intersect_capabilities(parent, child); |
1269 | return 0; |
1270 | } |
1271 | |
1272 | void dm_destroy_crypto_profile(struct blk_crypto_profile *profile) |
1273 | { |
1274 | struct dm_crypto_profile *dmcp = container_of(profile, |
1275 | struct dm_crypto_profile, |
1276 | profile); |
1277 | |
1278 | if (!profile) |
1279 | return; |
1280 | |
1281 | blk_crypto_profile_destroy(profile); |
1282 | kfree(objp: dmcp); |
1283 | } |
1284 | |
1285 | static void dm_table_destroy_crypto_profile(struct dm_table *t) |
1286 | { |
1287 | dm_destroy_crypto_profile(profile: t->crypto_profile); |
1288 | t->crypto_profile = NULL; |
1289 | } |
1290 | |
1291 | /* |
1292 | * Constructs and initializes t->crypto_profile with a crypto profile that |
1293 | * represents the common set of crypto capabilities of the devices described by |
1294 | * the dm_table. However, if the constructed crypto profile doesn't support all |
1295 | * crypto capabilities that are supported by the current mapped_device, it |
1296 | * returns an error instead, since we don't support removing crypto capabilities |
1297 | * on table changes. Finally, if the constructed crypto profile is "empty" (has |
1298 | * no crypto capabilities at all), it just sets t->crypto_profile to NULL. |
1299 | */ |
1300 | static int dm_table_construct_crypto_profile(struct dm_table *t) |
1301 | { |
1302 | struct dm_crypto_profile *dmcp; |
1303 | struct blk_crypto_profile *profile; |
1304 | unsigned int i; |
1305 | bool empty_profile = true; |
1306 | |
1307 | dmcp = kmalloc(size: sizeof(*dmcp), GFP_KERNEL); |
1308 | if (!dmcp) |
1309 | return -ENOMEM; |
1310 | dmcp->md = t->md; |
1311 | |
1312 | profile = &dmcp->profile; |
1313 | blk_crypto_profile_init(profile, num_slots: 0); |
1314 | profile->ll_ops.keyslot_evict = dm_keyslot_evict; |
1315 | profile->max_dun_bytes_supported = UINT_MAX; |
1316 | memset(profile->modes_supported, 0xFF, |
1317 | sizeof(profile->modes_supported)); |
1318 | |
1319 | for (i = 0; i < t->num_targets; i++) { |
1320 | struct dm_target *ti = dm_table_get_target(t, index: i); |
1321 | |
1322 | if (!dm_target_passes_crypto(ti->type)) { |
1323 | blk_crypto_intersect_capabilities(parent: profile, NULL); |
1324 | break; |
1325 | } |
1326 | if (!ti->type->iterate_devices) |
1327 | continue; |
1328 | ti->type->iterate_devices(ti, |
1329 | device_intersect_crypto_capabilities, |
1330 | profile); |
1331 | } |
1332 | |
1333 | if (t->md->queue && |
1334 | !blk_crypto_has_capabilities(target: profile, |
1335 | reference: t->md->queue->crypto_profile)) { |
1336 | DMERR("Inline encryption capabilities of new DM table were more restrictive than the old table's. This is not supported!" ); |
1337 | dm_destroy_crypto_profile(profile); |
1338 | return -EINVAL; |
1339 | } |
1340 | |
1341 | /* |
1342 | * If the new profile doesn't actually support any crypto capabilities, |
1343 | * we may as well represent it with a NULL profile. |
1344 | */ |
1345 | for (i = 0; i < ARRAY_SIZE(profile->modes_supported); i++) { |
1346 | if (profile->modes_supported[i]) { |
1347 | empty_profile = false; |
1348 | break; |
1349 | } |
1350 | } |
1351 | |
1352 | if (empty_profile) { |
1353 | dm_destroy_crypto_profile(profile); |
1354 | profile = NULL; |
1355 | } |
1356 | |
1357 | /* |
1358 | * t->crypto_profile is only set temporarily while the table is being |
1359 | * set up, and it gets set to NULL after the profile has been |
1360 | * transferred to the request_queue. |
1361 | */ |
1362 | t->crypto_profile = profile; |
1363 | |
1364 | return 0; |
1365 | } |
1366 | |
1367 | static void dm_update_crypto_profile(struct request_queue *q, |
1368 | struct dm_table *t) |
1369 | { |
1370 | if (!t->crypto_profile) |
1371 | return; |
1372 | |
1373 | /* Make the crypto profile less restrictive. */ |
1374 | if (!q->crypto_profile) { |
1375 | blk_crypto_register(profile: t->crypto_profile, q); |
1376 | } else { |
1377 | blk_crypto_update_capabilities(dst: q->crypto_profile, |
1378 | src: t->crypto_profile); |
1379 | dm_destroy_crypto_profile(profile: t->crypto_profile); |
1380 | } |
1381 | t->crypto_profile = NULL; |
1382 | } |
1383 | |
1384 | #else /* CONFIG_BLK_INLINE_ENCRYPTION */ |
1385 | |
1386 | static int dm_table_construct_crypto_profile(struct dm_table *t) |
1387 | { |
1388 | return 0; |
1389 | } |
1390 | |
1391 | void dm_destroy_crypto_profile(struct blk_crypto_profile *profile) |
1392 | { |
1393 | } |
1394 | |
1395 | static void dm_table_destroy_crypto_profile(struct dm_table *t) |
1396 | { |
1397 | } |
1398 | |
1399 | static void dm_update_crypto_profile(struct request_queue *q, |
1400 | struct dm_table *t) |
1401 | { |
1402 | } |
1403 | |
1404 | #endif /* !CONFIG_BLK_INLINE_ENCRYPTION */ |
1405 | |
1406 | /* |
1407 | * Prepares the table for use by building the indices, |
1408 | * setting the type, and allocating mempools. |
1409 | */ |
1410 | int dm_table_complete(struct dm_table *t) |
1411 | { |
1412 | int r; |
1413 | |
1414 | r = dm_table_determine_type(t); |
1415 | if (r) { |
1416 | DMERR("unable to determine table type" ); |
1417 | return r; |
1418 | } |
1419 | |
1420 | r = dm_table_build_index(t); |
1421 | if (r) { |
1422 | DMERR("unable to build btrees" ); |
1423 | return r; |
1424 | } |
1425 | |
1426 | r = dm_table_register_integrity(t); |
1427 | if (r) { |
1428 | DMERR("could not register integrity profile." ); |
1429 | return r; |
1430 | } |
1431 | |
1432 | r = dm_table_construct_crypto_profile(t); |
1433 | if (r) { |
1434 | DMERR("could not construct crypto profile." ); |
1435 | return r; |
1436 | } |
1437 | |
1438 | r = dm_table_alloc_md_mempools(t, md: t->md); |
1439 | if (r) |
1440 | DMERR("unable to allocate mempools" ); |
1441 | |
1442 | return r; |
1443 | } |
1444 | |
1445 | static DEFINE_MUTEX(_event_lock); |
1446 | void dm_table_event_callback(struct dm_table *t, |
1447 | void (*fn)(void *), void *context) |
1448 | { |
1449 | mutex_lock(&_event_lock); |
1450 | t->event_fn = fn; |
1451 | t->event_context = context; |
1452 | mutex_unlock(lock: &_event_lock); |
1453 | } |
1454 | |
1455 | void dm_table_event(struct dm_table *t) |
1456 | { |
1457 | mutex_lock(&_event_lock); |
1458 | if (t->event_fn) |
1459 | t->event_fn(t->event_context); |
1460 | mutex_unlock(lock: &_event_lock); |
1461 | } |
1462 | EXPORT_SYMBOL(dm_table_event); |
1463 | |
1464 | inline sector_t dm_table_get_size(struct dm_table *t) |
1465 | { |
1466 | return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0; |
1467 | } |
1468 | EXPORT_SYMBOL(dm_table_get_size); |
1469 | |
1470 | /* |
1471 | * Search the btree for the correct target. |
1472 | * |
1473 | * Caller should check returned pointer for NULL |
1474 | * to trap I/O beyond end of device. |
1475 | */ |
1476 | struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) |
1477 | { |
1478 | unsigned int l, n = 0, k = 0; |
1479 | sector_t *node; |
1480 | |
1481 | if (unlikely(sector >= dm_table_get_size(t))) |
1482 | return NULL; |
1483 | |
1484 | for (l = 0; l < t->depth; l++) { |
1485 | n = get_child(n, k); |
1486 | node = get_node(t, l, n); |
1487 | |
1488 | for (k = 0; k < KEYS_PER_NODE; k++) |
1489 | if (node[k] >= sector) |
1490 | break; |
1491 | } |
1492 | |
1493 | return &t->targets[(KEYS_PER_NODE * n) + k]; |
1494 | } |
1495 | |
1496 | static int device_not_poll_capable(struct dm_target *ti, struct dm_dev *dev, |
1497 | sector_t start, sector_t len, void *data) |
1498 | { |
1499 | struct request_queue *q = bdev_get_queue(bdev: dev->bdev); |
1500 | |
1501 | return !test_bit(QUEUE_FLAG_POLL, &q->queue_flags); |
1502 | } |
1503 | |
1504 | /* |
1505 | * type->iterate_devices() should be called when the sanity check needs to |
1506 | * iterate and check all underlying data devices. iterate_devices() will |
1507 | * iterate all underlying data devices until it encounters a non-zero return |
1508 | * code, returned by whether the input iterate_devices_callout_fn, or |
1509 | * iterate_devices() itself internally. |
1510 | * |
1511 | * For some target type (e.g. dm-stripe), one call of iterate_devices() may |
1512 | * iterate multiple underlying devices internally, in which case a non-zero |
1513 | * return code returned by iterate_devices_callout_fn will stop the iteration |
1514 | * in advance. |
1515 | * |
1516 | * Cases requiring _any_ underlying device supporting some kind of attribute, |
1517 | * should use the iteration structure like dm_table_any_dev_attr(), or call |
1518 | * it directly. @func should handle semantics of positive examples, e.g. |
1519 | * capable of something. |
1520 | * |
1521 | * Cases requiring _all_ underlying devices supporting some kind of attribute, |
1522 | * should use the iteration structure like dm_table_supports_nowait() or |
1523 | * dm_table_supports_discards(). Or introduce dm_table_all_devs_attr() that |
1524 | * uses an @anti_func that handle semantics of counter examples, e.g. not |
1525 | * capable of something. So: return !dm_table_any_dev_attr(t, anti_func, data); |
1526 | */ |
1527 | static bool dm_table_any_dev_attr(struct dm_table *t, |
1528 | iterate_devices_callout_fn func, void *data) |
1529 | { |
1530 | for (unsigned int i = 0; i < t->num_targets; i++) { |
1531 | struct dm_target *ti = dm_table_get_target(t, index: i); |
1532 | |
1533 | if (ti->type->iterate_devices && |
1534 | ti->type->iterate_devices(ti, func, data)) |
1535 | return true; |
1536 | } |
1537 | |
1538 | return false; |
1539 | } |
1540 | |
1541 | static int count_device(struct dm_target *ti, struct dm_dev *dev, |
1542 | sector_t start, sector_t len, void *data) |
1543 | { |
1544 | unsigned int *num_devices = data; |
1545 | |
1546 | (*num_devices)++; |
1547 | |
1548 | return 0; |
1549 | } |
1550 | |
1551 | static bool dm_table_supports_poll(struct dm_table *t) |
1552 | { |
1553 | for (unsigned int i = 0; i < t->num_targets; i++) { |
1554 | struct dm_target *ti = dm_table_get_target(t, index: i); |
1555 | |
1556 | if (!ti->type->iterate_devices || |
1557 | ti->type->iterate_devices(ti, device_not_poll_capable, NULL)) |
1558 | return false; |
1559 | } |
1560 | |
1561 | return true; |
1562 | } |
1563 | |
1564 | /* |
1565 | * Check whether a table has no data devices attached using each |
1566 | * target's iterate_devices method. |
1567 | * Returns false if the result is unknown because a target doesn't |
1568 | * support iterate_devices. |
1569 | */ |
1570 | bool dm_table_has_no_data_devices(struct dm_table *t) |
1571 | { |
1572 | for (unsigned int i = 0; i < t->num_targets; i++) { |
1573 | struct dm_target *ti = dm_table_get_target(t, index: i); |
1574 | unsigned int num_devices = 0; |
1575 | |
1576 | if (!ti->type->iterate_devices) |
1577 | return false; |
1578 | |
1579 | ti->type->iterate_devices(ti, count_device, &num_devices); |
1580 | if (num_devices) |
1581 | return false; |
1582 | } |
1583 | |
1584 | return true; |
1585 | } |
1586 | |
1587 | static int device_not_zoned(struct dm_target *ti, struct dm_dev *dev, |
1588 | sector_t start, sector_t len, void *data) |
1589 | { |
1590 | bool *zoned = data; |
1591 | |
1592 | return bdev_is_zoned(bdev: dev->bdev) != *zoned; |
1593 | } |
1594 | |
1595 | static int device_is_zoned_model(struct dm_target *ti, struct dm_dev *dev, |
1596 | sector_t start, sector_t len, void *data) |
1597 | { |
1598 | return bdev_is_zoned(bdev: dev->bdev); |
1599 | } |
1600 | |
1601 | /* |
1602 | * Check the device zoned model based on the target feature flag. If the target |
1603 | * has the DM_TARGET_ZONED_HM feature flag set, host-managed zoned devices are |
1604 | * also accepted but all devices must have the same zoned model. If the target |
1605 | * has the DM_TARGET_MIXED_ZONED_MODEL feature set, the devices can have any |
1606 | * zoned model with all zoned devices having the same zone size. |
1607 | */ |
1608 | static bool dm_table_supports_zoned(struct dm_table *t, bool zoned) |
1609 | { |
1610 | for (unsigned int i = 0; i < t->num_targets; i++) { |
1611 | struct dm_target *ti = dm_table_get_target(t, index: i); |
1612 | |
1613 | /* |
1614 | * For the wildcard target (dm-error), if we do not have a |
1615 | * backing device, we must always return false. If we have a |
1616 | * backing device, the result must depend on checking zoned |
1617 | * model, like for any other target. So for this, check directly |
1618 | * if the target backing device is zoned as we get "false" when |
1619 | * dm-error was set without a backing device. |
1620 | */ |
1621 | if (dm_target_is_wildcard(ti->type) && |
1622 | !ti->type->iterate_devices(ti, device_is_zoned_model, NULL)) |
1623 | return false; |
1624 | |
1625 | if (dm_target_supports_zoned_hm(ti->type)) { |
1626 | if (!ti->type->iterate_devices || |
1627 | ti->type->iterate_devices(ti, device_not_zoned, |
1628 | &zoned)) |
1629 | return false; |
1630 | } else if (!dm_target_supports_mixed_zoned_model(ti->type)) { |
1631 | if (zoned) |
1632 | return false; |
1633 | } |
1634 | } |
1635 | |
1636 | return true; |
1637 | } |
1638 | |
1639 | static int device_not_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev, |
1640 | sector_t start, sector_t len, void *data) |
1641 | { |
1642 | unsigned int *zone_sectors = data; |
1643 | |
1644 | if (!bdev_is_zoned(bdev: dev->bdev)) |
1645 | return 0; |
1646 | return bdev_zone_sectors(bdev: dev->bdev) != *zone_sectors; |
1647 | } |
1648 | |
1649 | /* |
1650 | * Check consistency of zoned model and zone sectors across all targets. For |
1651 | * zone sectors, if the destination device is a zoned block device, it shall |
1652 | * have the specified zone_sectors. |
1653 | */ |
1654 | static int validate_hardware_zoned(struct dm_table *t, bool zoned, |
1655 | unsigned int zone_sectors) |
1656 | { |
1657 | if (!zoned) |
1658 | return 0; |
1659 | |
1660 | if (!dm_table_supports_zoned(t, zoned)) { |
1661 | DMERR("%s: zoned model is not consistent across all devices" , |
1662 | dm_device_name(t->md)); |
1663 | return -EINVAL; |
1664 | } |
1665 | |
1666 | /* Check zone size validity and compatibility */ |
1667 | if (!zone_sectors || !is_power_of_2(n: zone_sectors)) |
1668 | return -EINVAL; |
1669 | |
1670 | if (dm_table_any_dev_attr(t, func: device_not_matches_zone_sectors, data: &zone_sectors)) { |
1671 | DMERR("%s: zone sectors is not consistent across all zoned devices" , |
1672 | dm_device_name(t->md)); |
1673 | return -EINVAL; |
1674 | } |
1675 | |
1676 | return 0; |
1677 | } |
1678 | |
1679 | /* |
1680 | * Establish the new table's queue_limits and validate them. |
1681 | */ |
1682 | int dm_calculate_queue_limits(struct dm_table *t, |
1683 | struct queue_limits *limits) |
1684 | { |
1685 | struct queue_limits ti_limits; |
1686 | unsigned int zone_sectors = 0; |
1687 | bool zoned = false; |
1688 | |
1689 | blk_set_stacking_limits(lim: limits); |
1690 | |
1691 | for (unsigned int i = 0; i < t->num_targets; i++) { |
1692 | struct dm_target *ti = dm_table_get_target(t, index: i); |
1693 | |
1694 | blk_set_stacking_limits(lim: &ti_limits); |
1695 | |
1696 | if (!ti->type->iterate_devices) { |
1697 | /* Set I/O hints portion of queue limits */ |
1698 | if (ti->type->io_hints) |
1699 | ti->type->io_hints(ti, &ti_limits); |
1700 | goto combine_limits; |
1701 | } |
1702 | |
1703 | /* |
1704 | * Combine queue limits of all the devices this target uses. |
1705 | */ |
1706 | ti->type->iterate_devices(ti, dm_set_device_limits, |
1707 | &ti_limits); |
1708 | |
1709 | if (!zoned && ti_limits.zoned) { |
1710 | /* |
1711 | * After stacking all limits, validate all devices |
1712 | * in table support this zoned model and zone sectors. |
1713 | */ |
1714 | zoned = ti_limits.zoned; |
1715 | zone_sectors = ti_limits.chunk_sectors; |
1716 | } |
1717 | |
1718 | /* Set I/O hints portion of queue limits */ |
1719 | if (ti->type->io_hints) |
1720 | ti->type->io_hints(ti, &ti_limits); |
1721 | |
1722 | /* |
1723 | * Check each device area is consistent with the target's |
1724 | * overall queue limits. |
1725 | */ |
1726 | if (ti->type->iterate_devices(ti, device_area_is_invalid, |
1727 | &ti_limits)) |
1728 | return -EINVAL; |
1729 | |
1730 | combine_limits: |
1731 | /* |
1732 | * Merge this target's queue limits into the overall limits |
1733 | * for the table. |
1734 | */ |
1735 | if (blk_stack_limits(t: limits, b: &ti_limits, offset: 0) < 0) |
1736 | DMWARN("%s: adding target device (start sect %llu len %llu) " |
1737 | "caused an alignment inconsistency" , |
1738 | dm_device_name(t->md), |
1739 | (unsigned long long) ti->begin, |
1740 | (unsigned long long) ti->len); |
1741 | } |
1742 | |
1743 | /* |
1744 | * Verify that the zoned model and zone sectors, as determined before |
1745 | * any .io_hints override, are the same across all devices in the table. |
1746 | * - this is especially relevant if .io_hints is emulating a disk-managed |
1747 | * zoned model on host-managed zoned block devices. |
1748 | * BUT... |
1749 | */ |
1750 | if (limits->zoned) { |
1751 | /* |
1752 | * ...IF the above limits stacking determined a zoned model |
1753 | * validate that all of the table's devices conform to it. |
1754 | */ |
1755 | zoned = limits->zoned; |
1756 | zone_sectors = limits->chunk_sectors; |
1757 | } |
1758 | if (validate_hardware_zoned(t, zoned, zone_sectors)) |
1759 | return -EINVAL; |
1760 | |
1761 | return validate_hardware_logical_block_alignment(t, limits); |
1762 | } |
1763 | |
1764 | /* |
1765 | * Verify that all devices have an integrity profile that matches the |
1766 | * DM device's registered integrity profile. If the profiles don't |
1767 | * match then unregister the DM device's integrity profile. |
1768 | */ |
1769 | static void dm_table_verify_integrity(struct dm_table *t) |
1770 | { |
1771 | struct gendisk *template_disk = NULL; |
1772 | |
1773 | if (t->integrity_added) |
1774 | return; |
1775 | |
1776 | if (t->integrity_supported) { |
1777 | /* |
1778 | * Verify that the original integrity profile |
1779 | * matches all the devices in this table. |
1780 | */ |
1781 | template_disk = dm_table_get_integrity_disk(t); |
1782 | if (template_disk && |
1783 | blk_integrity_compare(dm_disk(md: t->md), template_disk) >= 0) |
1784 | return; |
1785 | } |
1786 | |
1787 | if (integrity_profile_exists(disk: dm_disk(md: t->md))) { |
1788 | DMWARN("%s: unable to establish an integrity profile" , |
1789 | dm_device_name(t->md)); |
1790 | blk_integrity_unregister(dm_disk(md: t->md)); |
1791 | } |
1792 | } |
1793 | |
1794 | static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev, |
1795 | sector_t start, sector_t len, void *data) |
1796 | { |
1797 | unsigned long flush = (unsigned long) data; |
1798 | struct request_queue *q = bdev_get_queue(bdev: dev->bdev); |
1799 | |
1800 | return (q->queue_flags & flush); |
1801 | } |
1802 | |
1803 | static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush) |
1804 | { |
1805 | /* |
1806 | * Require at least one underlying device to support flushes. |
1807 | * t->devices includes internal dm devices such as mirror logs |
1808 | * so we need to use iterate_devices here, which targets |
1809 | * supporting flushes must provide. |
1810 | */ |
1811 | for (unsigned int i = 0; i < t->num_targets; i++) { |
1812 | struct dm_target *ti = dm_table_get_target(t, index: i); |
1813 | |
1814 | if (!ti->num_flush_bios) |
1815 | continue; |
1816 | |
1817 | if (ti->flush_supported) |
1818 | return true; |
1819 | |
1820 | if (ti->type->iterate_devices && |
1821 | ti->type->iterate_devices(ti, device_flush_capable, (void *) flush)) |
1822 | return true; |
1823 | } |
1824 | |
1825 | return false; |
1826 | } |
1827 | |
1828 | static int device_dax_write_cache_enabled(struct dm_target *ti, |
1829 | struct dm_dev *dev, sector_t start, |
1830 | sector_t len, void *data) |
1831 | { |
1832 | struct dax_device *dax_dev = dev->dax_dev; |
1833 | |
1834 | if (!dax_dev) |
1835 | return false; |
1836 | |
1837 | if (dax_write_cache_enabled(dax_dev)) |
1838 | return true; |
1839 | return false; |
1840 | } |
1841 | |
1842 | static int device_is_rotational(struct dm_target *ti, struct dm_dev *dev, |
1843 | sector_t start, sector_t len, void *data) |
1844 | { |
1845 | return !bdev_nonrot(bdev: dev->bdev); |
1846 | } |
1847 | |
1848 | static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev, |
1849 | sector_t start, sector_t len, void *data) |
1850 | { |
1851 | struct request_queue *q = bdev_get_queue(bdev: dev->bdev); |
1852 | |
1853 | return !blk_queue_add_random(q); |
1854 | } |
1855 | |
1856 | static int device_not_write_zeroes_capable(struct dm_target *ti, struct dm_dev *dev, |
1857 | sector_t start, sector_t len, void *data) |
1858 | { |
1859 | struct request_queue *q = bdev_get_queue(bdev: dev->bdev); |
1860 | |
1861 | return !q->limits.max_write_zeroes_sectors; |
1862 | } |
1863 | |
1864 | static bool dm_table_supports_write_zeroes(struct dm_table *t) |
1865 | { |
1866 | for (unsigned int i = 0; i < t->num_targets; i++) { |
1867 | struct dm_target *ti = dm_table_get_target(t, index: i); |
1868 | |
1869 | if (!ti->num_write_zeroes_bios) |
1870 | return false; |
1871 | |
1872 | if (!ti->type->iterate_devices || |
1873 | ti->type->iterate_devices(ti, device_not_write_zeroes_capable, NULL)) |
1874 | return false; |
1875 | } |
1876 | |
1877 | return true; |
1878 | } |
1879 | |
1880 | static int device_not_nowait_capable(struct dm_target *ti, struct dm_dev *dev, |
1881 | sector_t start, sector_t len, void *data) |
1882 | { |
1883 | return !bdev_nowait(bdev: dev->bdev); |
1884 | } |
1885 | |
1886 | static bool dm_table_supports_nowait(struct dm_table *t) |
1887 | { |
1888 | for (unsigned int i = 0; i < t->num_targets; i++) { |
1889 | struct dm_target *ti = dm_table_get_target(t, index: i); |
1890 | |
1891 | if (!dm_target_supports_nowait(ti->type)) |
1892 | return false; |
1893 | |
1894 | if (!ti->type->iterate_devices || |
1895 | ti->type->iterate_devices(ti, device_not_nowait_capable, NULL)) |
1896 | return false; |
1897 | } |
1898 | |
1899 | return true; |
1900 | } |
1901 | |
1902 | static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev, |
1903 | sector_t start, sector_t len, void *data) |
1904 | { |
1905 | return !bdev_max_discard_sectors(bdev: dev->bdev); |
1906 | } |
1907 | |
1908 | static bool dm_table_supports_discards(struct dm_table *t) |
1909 | { |
1910 | for (unsigned int i = 0; i < t->num_targets; i++) { |
1911 | struct dm_target *ti = dm_table_get_target(t, index: i); |
1912 | |
1913 | if (!ti->num_discard_bios) |
1914 | return false; |
1915 | |
1916 | /* |
1917 | * Either the target provides discard support (as implied by setting |
1918 | * 'discards_supported') or it relies on _all_ data devices having |
1919 | * discard support. |
1920 | */ |
1921 | if (!ti->discards_supported && |
1922 | (!ti->type->iterate_devices || |
1923 | ti->type->iterate_devices(ti, device_not_discard_capable, NULL))) |
1924 | return false; |
1925 | } |
1926 | |
1927 | return true; |
1928 | } |
1929 | |
1930 | static int device_not_secure_erase_capable(struct dm_target *ti, |
1931 | struct dm_dev *dev, sector_t start, |
1932 | sector_t len, void *data) |
1933 | { |
1934 | return !bdev_max_secure_erase_sectors(bdev: dev->bdev); |
1935 | } |
1936 | |
1937 | static bool dm_table_supports_secure_erase(struct dm_table *t) |
1938 | { |
1939 | for (unsigned int i = 0; i < t->num_targets; i++) { |
1940 | struct dm_target *ti = dm_table_get_target(t, index: i); |
1941 | |
1942 | if (!ti->num_secure_erase_bios) |
1943 | return false; |
1944 | |
1945 | if (!ti->type->iterate_devices || |
1946 | ti->type->iterate_devices(ti, device_not_secure_erase_capable, NULL)) |
1947 | return false; |
1948 | } |
1949 | |
1950 | return true; |
1951 | } |
1952 | |
1953 | static int device_requires_stable_pages(struct dm_target *ti, |
1954 | struct dm_dev *dev, sector_t start, |
1955 | sector_t len, void *data) |
1956 | { |
1957 | return bdev_stable_writes(bdev: dev->bdev); |
1958 | } |
1959 | |
1960 | int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, |
1961 | struct queue_limits *limits) |
1962 | { |
1963 | bool wc = false, fua = false; |
1964 | int r; |
1965 | |
1966 | /* |
1967 | * Copy table's limits to the DM device's request_queue |
1968 | */ |
1969 | q->limits = *limits; |
1970 | |
1971 | if (dm_table_supports_nowait(t)) |
1972 | blk_queue_flag_set(QUEUE_FLAG_NOWAIT, q); |
1973 | else |
1974 | blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, q); |
1975 | |
1976 | if (!dm_table_supports_discards(t)) { |
1977 | q->limits.max_discard_sectors = 0; |
1978 | q->limits.max_hw_discard_sectors = 0; |
1979 | q->limits.discard_granularity = 0; |
1980 | q->limits.discard_alignment = 0; |
1981 | q->limits.discard_misaligned = 0; |
1982 | } |
1983 | |
1984 | if (!dm_table_supports_secure_erase(t)) |
1985 | q->limits.max_secure_erase_sectors = 0; |
1986 | |
1987 | if (dm_table_supports_flush(t, flush: (1UL << QUEUE_FLAG_WC))) { |
1988 | wc = true; |
1989 | if (dm_table_supports_flush(t, flush: (1UL << QUEUE_FLAG_FUA))) |
1990 | fua = true; |
1991 | } |
1992 | blk_queue_write_cache(q, enabled: wc, fua); |
1993 | |
1994 | if (dm_table_supports_dax(t, iterate_fn: device_not_dax_capable)) { |
1995 | blk_queue_flag_set(QUEUE_FLAG_DAX, q); |
1996 | if (dm_table_supports_dax(t, iterate_fn: device_not_dax_synchronous_capable)) |
1997 | set_dax_synchronous(t->md->dax_dev); |
1998 | } else |
1999 | blk_queue_flag_clear(QUEUE_FLAG_DAX, q); |
2000 | |
2001 | if (dm_table_any_dev_attr(t, func: device_dax_write_cache_enabled, NULL)) |
2002 | dax_write_cache(dax_dev: t->md->dax_dev, wc: true); |
2003 | |
2004 | /* Ensure that all underlying devices are non-rotational. */ |
2005 | if (dm_table_any_dev_attr(t, func: device_is_rotational, NULL)) |
2006 | blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); |
2007 | else |
2008 | blk_queue_flag_set(QUEUE_FLAG_NONROT, q); |
2009 | |
2010 | if (!dm_table_supports_write_zeroes(t)) |
2011 | q->limits.max_write_zeroes_sectors = 0; |
2012 | |
2013 | dm_table_verify_integrity(t); |
2014 | |
2015 | /* |
2016 | * Some devices don't use blk_integrity but still want stable pages |
2017 | * because they do their own checksumming. |
2018 | * If any underlying device requires stable pages, a table must require |
2019 | * them as well. Only targets that support iterate_devices are considered: |
2020 | * don't want error, zero, etc to require stable pages. |
2021 | */ |
2022 | if (dm_table_any_dev_attr(t, func: device_requires_stable_pages, NULL)) |
2023 | blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q); |
2024 | else |
2025 | blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, q); |
2026 | |
2027 | /* |
2028 | * Determine whether or not this queue's I/O timings contribute |
2029 | * to the entropy pool, Only request-based targets use this. |
2030 | * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not |
2031 | * have it set. |
2032 | */ |
2033 | if (blk_queue_add_random(q) && |
2034 | dm_table_any_dev_attr(t, func: device_is_not_random, NULL)) |
2035 | blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); |
2036 | |
2037 | /* |
2038 | * For a zoned target, setup the zones related queue attributes |
2039 | * and resources necessary for zone append emulation if necessary. |
2040 | */ |
2041 | if (blk_queue_is_zoned(q)) { |
2042 | r = dm_set_zones_restrictions(t, q); |
2043 | if (r) |
2044 | return r; |
2045 | if (!static_key_enabled(&zoned_enabled.key)) |
2046 | static_branch_enable(&zoned_enabled); |
2047 | } |
2048 | |
2049 | dm_update_crypto_profile(q, t); |
2050 | disk_update_readahead(disk: t->md->disk); |
2051 | |
2052 | /* |
2053 | * Check for request-based device is left to |
2054 | * dm_mq_init_request_queue()->blk_mq_init_allocated_queue(). |
2055 | * |
2056 | * For bio-based device, only set QUEUE_FLAG_POLL when all |
2057 | * underlying devices supporting polling. |
2058 | */ |
2059 | if (__table_type_bio_based(table_type: t->type)) { |
2060 | if (dm_table_supports_poll(t)) |
2061 | blk_queue_flag_set(QUEUE_FLAG_POLL, q); |
2062 | else |
2063 | blk_queue_flag_clear(QUEUE_FLAG_POLL, q); |
2064 | } |
2065 | |
2066 | return 0; |
2067 | } |
2068 | |
2069 | struct list_head *dm_table_get_devices(struct dm_table *t) |
2070 | { |
2071 | return &t->devices; |
2072 | } |
2073 | |
2074 | blk_mode_t dm_table_get_mode(struct dm_table *t) |
2075 | { |
2076 | return t->mode; |
2077 | } |
2078 | EXPORT_SYMBOL(dm_table_get_mode); |
2079 | |
2080 | enum suspend_mode { |
2081 | PRESUSPEND, |
2082 | PRESUSPEND_UNDO, |
2083 | POSTSUSPEND, |
2084 | }; |
2085 | |
2086 | static void suspend_targets(struct dm_table *t, enum suspend_mode mode) |
2087 | { |
2088 | lockdep_assert_held(&t->md->suspend_lock); |
2089 | |
2090 | for (unsigned int i = 0; i < t->num_targets; i++) { |
2091 | struct dm_target *ti = dm_table_get_target(t, index: i); |
2092 | |
2093 | switch (mode) { |
2094 | case PRESUSPEND: |
2095 | if (ti->type->presuspend) |
2096 | ti->type->presuspend(ti); |
2097 | break; |
2098 | case PRESUSPEND_UNDO: |
2099 | if (ti->type->presuspend_undo) |
2100 | ti->type->presuspend_undo(ti); |
2101 | break; |
2102 | case POSTSUSPEND: |
2103 | if (ti->type->postsuspend) |
2104 | ti->type->postsuspend(ti); |
2105 | break; |
2106 | } |
2107 | } |
2108 | } |
2109 | |
2110 | void dm_table_presuspend_targets(struct dm_table *t) |
2111 | { |
2112 | if (!t) |
2113 | return; |
2114 | |
2115 | suspend_targets(t, mode: PRESUSPEND); |
2116 | } |
2117 | |
2118 | void dm_table_presuspend_undo_targets(struct dm_table *t) |
2119 | { |
2120 | if (!t) |
2121 | return; |
2122 | |
2123 | suspend_targets(t, mode: PRESUSPEND_UNDO); |
2124 | } |
2125 | |
2126 | void dm_table_postsuspend_targets(struct dm_table *t) |
2127 | { |
2128 | if (!t) |
2129 | return; |
2130 | |
2131 | suspend_targets(t, mode: POSTSUSPEND); |
2132 | } |
2133 | |
2134 | int dm_table_resume_targets(struct dm_table *t) |
2135 | { |
2136 | unsigned int i; |
2137 | int r = 0; |
2138 | |
2139 | lockdep_assert_held(&t->md->suspend_lock); |
2140 | |
2141 | for (i = 0; i < t->num_targets; i++) { |
2142 | struct dm_target *ti = dm_table_get_target(t, index: i); |
2143 | |
2144 | if (!ti->type->preresume) |
2145 | continue; |
2146 | |
2147 | r = ti->type->preresume(ti); |
2148 | if (r) { |
2149 | DMERR("%s: %s: preresume failed, error = %d" , |
2150 | dm_device_name(t->md), ti->type->name, r); |
2151 | return r; |
2152 | } |
2153 | } |
2154 | |
2155 | for (i = 0; i < t->num_targets; i++) { |
2156 | struct dm_target *ti = dm_table_get_target(t, index: i); |
2157 | |
2158 | if (ti->type->resume) |
2159 | ti->type->resume(ti); |
2160 | } |
2161 | |
2162 | return 0; |
2163 | } |
2164 | |
2165 | struct mapped_device *dm_table_get_md(struct dm_table *t) |
2166 | { |
2167 | return t->md; |
2168 | } |
2169 | EXPORT_SYMBOL(dm_table_get_md); |
2170 | |
2171 | const char *dm_table_device_name(struct dm_table *t) |
2172 | { |
2173 | return dm_device_name(md: t->md); |
2174 | } |
2175 | EXPORT_SYMBOL_GPL(dm_table_device_name); |
2176 | |
2177 | void dm_table_run_md_queue_async(struct dm_table *t) |
2178 | { |
2179 | if (!dm_table_request_based(t)) |
2180 | return; |
2181 | |
2182 | if (t->md->queue) |
2183 | blk_mq_run_hw_queues(q: t->md->queue, async: true); |
2184 | } |
2185 | EXPORT_SYMBOL(dm_table_run_md_queue_async); |
2186 | |
2187 | |