1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
2 | /* |
3 | * Copyright (C) 2001 Sistina Software (UK) Limited. |
4 | * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. |
5 | * |
6 | * This file is released under the LGPL. |
7 | */ |
8 | |
9 | #ifndef _LINUX_DEVICE_MAPPER_H |
10 | #define _LINUX_DEVICE_MAPPER_H |
11 | |
12 | #include <linux/bio.h> |
13 | #include <linux/blkdev.h> |
14 | #include <linux/dm-ioctl.h> |
15 | #include <linux/math64.h> |
16 | #include <linux/ratelimit.h> |
17 | |
18 | struct dm_dev; |
19 | struct dm_target; |
20 | struct dm_table; |
21 | struct dm_report_zones_args; |
22 | struct mapped_device; |
23 | struct bio_vec; |
24 | enum dax_access_mode; |
25 | |
26 | /* |
27 | * Type of table, mapped_device's mempool and request_queue |
28 | */ |
29 | enum dm_queue_mode { |
30 | DM_TYPE_NONE = 0, |
31 | DM_TYPE_BIO_BASED = 1, |
32 | DM_TYPE_REQUEST_BASED = 2, |
33 | DM_TYPE_DAX_BIO_BASED = 3, |
34 | }; |
35 | |
36 | typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE, STATUSTYPE_IMA } status_type_t; |
37 | |
38 | union map_info { |
39 | void *ptr; |
40 | }; |
41 | |
42 | /* |
43 | * In the constructor the target parameter will already have the |
44 | * table, type, begin and len fields filled in. |
45 | */ |
46 | typedef int (*dm_ctr_fn) (struct dm_target *target, |
47 | unsigned int argc, char **argv); |
48 | |
49 | /* |
50 | * The destructor doesn't need to free the dm_target, just |
51 | * anything hidden ti->private. |
52 | */ |
53 | typedef void (*dm_dtr_fn) (struct dm_target *ti); |
54 | |
55 | /* |
56 | * The map function must return: |
57 | * < 0: error |
58 | * = 0: The target will handle the io by resubmitting it later |
59 | * = 1: simple remap complete |
60 | * = 2: The target wants to push back the io |
61 | */ |
62 | typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio); |
63 | typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti, |
64 | struct request *rq, |
65 | union map_info *map_context, |
66 | struct request **clone); |
67 | typedef void (*dm_release_clone_request_fn) (struct request *clone, |
68 | union map_info *map_context); |
69 | |
70 | /* |
71 | * Returns: |
72 | * < 0 : error (currently ignored) |
73 | * 0 : ended successfully |
74 | * 1 : for some reason the io has still not completed (eg, |
75 | * multipath target might want to requeue a failed io). |
76 | * 2 : The target wants to push back the io |
77 | */ |
78 | typedef int (*dm_endio_fn) (struct dm_target *ti, |
79 | struct bio *bio, blk_status_t *error); |
80 | typedef int (*dm_request_endio_fn) (struct dm_target *ti, |
81 | struct request *clone, blk_status_t error, |
82 | union map_info *map_context); |
83 | |
84 | typedef void (*dm_presuspend_fn) (struct dm_target *ti); |
85 | typedef void (*dm_presuspend_undo_fn) (struct dm_target *ti); |
86 | typedef void (*dm_postsuspend_fn) (struct dm_target *ti); |
87 | typedef int (*dm_preresume_fn) (struct dm_target *ti); |
88 | typedef void (*dm_resume_fn) (struct dm_target *ti); |
89 | |
90 | typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type, |
91 | unsigned int status_flags, char *result, unsigned int maxlen); |
92 | |
93 | typedef int (*dm_message_fn) (struct dm_target *ti, unsigned int argc, char **argv, |
94 | char *result, unsigned int maxlen); |
95 | |
96 | typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev); |
97 | |
98 | #ifdef CONFIG_BLK_DEV_ZONED |
99 | typedef int (*dm_report_zones_fn) (struct dm_target *ti, |
100 | struct dm_report_zones_args *args, |
101 | unsigned int nr_zones); |
102 | #else |
103 | /* |
104 | * Define dm_report_zones_fn so that targets can assign to NULL if |
105 | * CONFIG_BLK_DEV_ZONED disabled. Otherwise each target needs to do |
106 | * awkward #ifdefs in their target_type, etc. |
107 | */ |
108 | typedef int (*dm_report_zones_fn) (struct dm_target *dummy); |
109 | #endif |
110 | |
111 | /* |
112 | * These iteration functions are typically used to check (and combine) |
113 | * properties of underlying devices. |
114 | * E.g. Does at least one underlying device support flush? |
115 | * Does any underlying device not support WRITE_SAME? |
116 | * |
117 | * The callout function is called once for each contiguous section of |
118 | * an underlying device. State can be maintained in *data. |
119 | * Return non-zero to stop iterating through any further devices. |
120 | */ |
121 | typedef int (*iterate_devices_callout_fn) (struct dm_target *ti, |
122 | struct dm_dev *dev, |
123 | sector_t start, sector_t len, |
124 | void *data); |
125 | |
126 | /* |
127 | * This function must iterate through each section of device used by the |
128 | * target until it encounters a non-zero return code, which it then returns. |
129 | * Returns zero if no callout returned non-zero. |
130 | */ |
131 | typedef int (*dm_iterate_devices_fn) (struct dm_target *ti, |
132 | iterate_devices_callout_fn fn, |
133 | void *data); |
134 | |
135 | typedef void (*dm_io_hints_fn) (struct dm_target *ti, |
136 | struct queue_limits *limits); |
137 | |
138 | /* |
139 | * Returns: |
140 | * 0: The target can handle the next I/O immediately. |
141 | * 1: The target can't handle the next I/O immediately. |
142 | */ |
143 | typedef int (*dm_busy_fn) (struct dm_target *ti); |
144 | |
145 | /* |
146 | * Returns: |
147 | * < 0 : error |
148 | * >= 0 : the number of bytes accessible at the address |
149 | */ |
150 | typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff, |
151 | long nr_pages, enum dax_access_mode node, void **kaddr, |
152 | pfn_t *pfn); |
153 | typedef int (*dm_dax_zero_page_range_fn)(struct dm_target *ti, pgoff_t pgoff, |
154 | size_t nr_pages); |
155 | |
156 | /* |
157 | * Returns: |
158 | * != 0 : number of bytes transferred |
159 | * 0 : recovery write failed |
160 | */ |
161 | typedef size_t (*dm_dax_recovery_write_fn)(struct dm_target *ti, pgoff_t pgoff, |
162 | void *addr, size_t bytes, struct iov_iter *i); |
163 | |
164 | void dm_error(const char *message); |
165 | |
166 | struct dm_dev { |
167 | struct block_device *bdev; |
168 | struct bdev_handle *bdev_handle; |
169 | struct dax_device *dax_dev; |
170 | blk_mode_t mode; |
171 | char name[16]; |
172 | }; |
173 | |
174 | /* |
175 | * Constructors should call these functions to ensure destination devices |
176 | * are opened/closed correctly. |
177 | */ |
178 | int dm_get_device(struct dm_target *ti, const char *path, blk_mode_t mode, |
179 | struct dm_dev **result); |
180 | void dm_put_device(struct dm_target *ti, struct dm_dev *d); |
181 | |
182 | /* |
183 | * Information about a target type |
184 | */ |
185 | |
186 | struct target_type { |
187 | uint64_t features; |
188 | const char *name; |
189 | struct module *module; |
190 | unsigned int version[3]; |
191 | dm_ctr_fn ctr; |
192 | dm_dtr_fn dtr; |
193 | dm_map_fn map; |
194 | dm_clone_and_map_request_fn clone_and_map_rq; |
195 | dm_release_clone_request_fn release_clone_rq; |
196 | dm_endio_fn end_io; |
197 | dm_request_endio_fn rq_end_io; |
198 | dm_presuspend_fn presuspend; |
199 | dm_presuspend_undo_fn presuspend_undo; |
200 | dm_postsuspend_fn postsuspend; |
201 | dm_preresume_fn preresume; |
202 | dm_resume_fn resume; |
203 | dm_status_fn status; |
204 | dm_message_fn message; |
205 | dm_prepare_ioctl_fn prepare_ioctl; |
206 | dm_report_zones_fn report_zones; |
207 | dm_busy_fn busy; |
208 | dm_iterate_devices_fn iterate_devices; |
209 | dm_io_hints_fn io_hints; |
210 | dm_dax_direct_access_fn direct_access; |
211 | dm_dax_zero_page_range_fn dax_zero_page_range; |
212 | dm_dax_recovery_write_fn dax_recovery_write; |
213 | |
214 | /* For internal device-mapper use. */ |
215 | struct list_head list; |
216 | }; |
217 | |
218 | /* |
219 | * Target features |
220 | */ |
221 | |
222 | /* |
223 | * Any table that contains an instance of this target must have only one. |
224 | */ |
225 | #define DM_TARGET_SINGLETON 0x00000001 |
226 | #define dm_target_needs_singleton(type) ((type)->features & DM_TARGET_SINGLETON) |
227 | |
228 | /* |
229 | * Indicates that a target does not support read-only devices. |
230 | */ |
231 | #define DM_TARGET_ALWAYS_WRITEABLE 0x00000002 |
232 | #define dm_target_always_writeable(type) \ |
233 | ((type)->features & DM_TARGET_ALWAYS_WRITEABLE) |
234 | |
235 | /* |
236 | * Any device that contains a table with an instance of this target may never |
237 | * have tables containing any different target type. |
238 | */ |
239 | #define DM_TARGET_IMMUTABLE 0x00000004 |
240 | #define dm_target_is_immutable(type) ((type)->features & DM_TARGET_IMMUTABLE) |
241 | |
242 | /* |
243 | * Indicates that a target may replace any target; even immutable targets. |
244 | * .map, .map_rq, .clone_and_map_rq and .release_clone_rq are all defined. |
245 | */ |
246 | #define DM_TARGET_WILDCARD 0x00000008 |
247 | #define dm_target_is_wildcard(type) ((type)->features & DM_TARGET_WILDCARD) |
248 | |
249 | /* |
250 | * A target implements own bio data integrity. |
251 | */ |
252 | #define DM_TARGET_INTEGRITY 0x00000010 |
253 | #define dm_target_has_integrity(type) ((type)->features & DM_TARGET_INTEGRITY) |
254 | |
255 | /* |
256 | * A target passes integrity data to the lower device. |
257 | */ |
258 | #define DM_TARGET_PASSES_INTEGRITY 0x00000020 |
259 | #define dm_target_passes_integrity(type) ((type)->features & DM_TARGET_PASSES_INTEGRITY) |
260 | |
261 | /* |
262 | * Indicates support for zoned block devices: |
263 | * - DM_TARGET_ZONED_HM: the target also supports host-managed zoned |
264 | * block devices but does not support combining different zoned models. |
265 | * - DM_TARGET_MIXED_ZONED_MODEL: the target supports combining multiple |
266 | * devices with different zoned models. |
267 | */ |
268 | #ifdef CONFIG_BLK_DEV_ZONED |
269 | #define DM_TARGET_ZONED_HM 0x00000040 |
270 | #define dm_target_supports_zoned_hm(type) ((type)->features & DM_TARGET_ZONED_HM) |
271 | #else |
272 | #define DM_TARGET_ZONED_HM 0x00000000 |
273 | #define dm_target_supports_zoned_hm(type) (false) |
274 | #endif |
275 | |
276 | /* |
277 | * A target handles REQ_NOWAIT |
278 | */ |
279 | #define DM_TARGET_NOWAIT 0x00000080 |
280 | #define dm_target_supports_nowait(type) ((type)->features & DM_TARGET_NOWAIT) |
281 | |
282 | /* |
283 | * A target supports passing through inline crypto support. |
284 | */ |
285 | #define DM_TARGET_PASSES_CRYPTO 0x00000100 |
286 | #define dm_target_passes_crypto(type) ((type)->features & DM_TARGET_PASSES_CRYPTO) |
287 | |
288 | #ifdef CONFIG_BLK_DEV_ZONED |
289 | #define DM_TARGET_MIXED_ZONED_MODEL 0x00000200 |
290 | #define dm_target_supports_mixed_zoned_model(type) \ |
291 | ((type)->features & DM_TARGET_MIXED_ZONED_MODEL) |
292 | #else |
293 | #define DM_TARGET_MIXED_ZONED_MODEL 0x00000000 |
294 | #define dm_target_supports_mixed_zoned_model(type) (false) |
295 | #endif |
296 | |
297 | struct dm_target { |
298 | struct dm_table *table; |
299 | struct target_type *type; |
300 | |
301 | /* target limits */ |
302 | sector_t begin; |
303 | sector_t len; |
304 | |
305 | /* If non-zero, maximum size of I/O submitted to a target. */ |
306 | uint32_t max_io_len; |
307 | |
308 | /* |
309 | * A number of zero-length barrier bios that will be submitted |
310 | * to the target for the purpose of flushing cache. |
311 | * |
312 | * The bio number can be accessed with dm_bio_get_target_bio_nr. |
313 | * It is a responsibility of the target driver to remap these bios |
314 | * to the real underlying devices. |
315 | */ |
316 | unsigned int num_flush_bios; |
317 | |
318 | /* |
319 | * The number of discard bios that will be submitted to the target. |
320 | * The bio number can be accessed with dm_bio_get_target_bio_nr. |
321 | */ |
322 | unsigned int num_discard_bios; |
323 | |
324 | /* |
325 | * The number of secure erase bios that will be submitted to the target. |
326 | * The bio number can be accessed with dm_bio_get_target_bio_nr. |
327 | */ |
328 | unsigned int num_secure_erase_bios; |
329 | |
330 | /* |
331 | * The number of WRITE ZEROES bios that will be submitted to the target. |
332 | * The bio number can be accessed with dm_bio_get_target_bio_nr. |
333 | */ |
334 | unsigned int num_write_zeroes_bios; |
335 | |
336 | /* |
337 | * The minimum number of extra bytes allocated in each io for the |
338 | * target to use. |
339 | */ |
340 | unsigned int per_io_data_size; |
341 | |
342 | /* target specific data */ |
343 | void *private; |
344 | |
345 | /* Used to provide an error string from the ctr */ |
346 | char *error; |
347 | |
348 | /* |
349 | * Set if this target needs to receive flushes regardless of |
350 | * whether or not its underlying devices have support. |
351 | */ |
352 | bool flush_supported:1; |
353 | |
354 | /* |
355 | * Set if this target needs to receive discards regardless of |
356 | * whether or not its underlying devices have support. |
357 | */ |
358 | bool discards_supported:1; |
359 | |
360 | /* |
361 | * Set if this target requires that discards be split on |
362 | * 'max_discard_sectors' boundaries. |
363 | */ |
364 | bool max_discard_granularity:1; |
365 | |
366 | /* |
367 | * Set if this target requires that secure_erases be split on |
368 | * 'max_secure_erase_sectors' boundaries. |
369 | */ |
370 | bool max_secure_erase_granularity:1; |
371 | |
372 | /* |
373 | * Set if this target requires that write_zeroes be split on |
374 | * 'max_write_zeroes_sectors' boundaries. |
375 | */ |
376 | bool max_write_zeroes_granularity:1; |
377 | |
378 | /* |
379 | * Set if we need to limit the number of in-flight bios when swapping. |
380 | */ |
381 | bool limit_swap_bios:1; |
382 | |
383 | /* |
384 | * Set if this target implements a zoned device and needs emulation of |
385 | * zone append operations using regular writes. |
386 | */ |
387 | bool emulate_zone_append:1; |
388 | |
389 | /* |
390 | * Set if the target will submit IO using dm_submit_bio_remap() |
391 | * after returning DM_MAPIO_SUBMITTED from its map function. |
392 | */ |
393 | bool accounts_remapped_io:1; |
394 | |
395 | /* |
396 | * Set if the target will submit the DM bio without first calling |
397 | * bio_set_dev(). NOTE: ideally a target should _not_ need this. |
398 | */ |
399 | bool needs_bio_set_dev:1; |
400 | }; |
401 | |
402 | void *dm_per_bio_data(struct bio *bio, size_t data_size); |
403 | struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size); |
404 | unsigned int dm_bio_get_target_bio_nr(const struct bio *bio); |
405 | |
406 | u64 dm_start_time_ns_from_clone(struct bio *bio); |
407 | |
408 | int dm_register_target(struct target_type *t); |
409 | void dm_unregister_target(struct target_type *t); |
410 | |
411 | /* |
412 | * Target argument parsing. |
413 | */ |
414 | struct dm_arg_set { |
415 | unsigned int argc; |
416 | char **argv; |
417 | }; |
418 | |
419 | /* |
420 | * The minimum and maximum value of a numeric argument, together with |
421 | * the error message to use if the number is found to be outside that range. |
422 | */ |
423 | struct dm_arg { |
424 | unsigned int min; |
425 | unsigned int max; |
426 | char *error; |
427 | }; |
428 | |
429 | /* |
430 | * Validate the next argument, either returning it as *value or, if invalid, |
431 | * returning -EINVAL and setting *error. |
432 | */ |
433 | int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set, |
434 | unsigned int *value, char **error); |
435 | |
436 | /* |
437 | * Process the next argument as the start of a group containing between |
438 | * arg->min and arg->max further arguments. Either return the size as |
439 | * *num_args or, if invalid, return -EINVAL and set *error. |
440 | */ |
441 | int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set, |
442 | unsigned int *num_args, char **error); |
443 | |
444 | /* |
445 | * Return the current argument and shift to the next. |
446 | */ |
447 | const char *dm_shift_arg(struct dm_arg_set *as); |
448 | |
449 | /* |
450 | * Move through num_args arguments. |
451 | */ |
452 | void dm_consume_args(struct dm_arg_set *as, unsigned int num_args); |
453 | |
454 | /* |
455 | *---------------------------------------------------------------- |
456 | * Functions for creating and manipulating mapped devices. |
457 | * Drop the reference with dm_put when you finish with the object. |
458 | *---------------------------------------------------------------- |
459 | */ |
460 | |
461 | /* |
462 | * DM_ANY_MINOR chooses the next available minor number. |
463 | */ |
464 | #define DM_ANY_MINOR (-1) |
465 | int dm_create(int minor, struct mapped_device **md); |
466 | |
467 | /* |
468 | * Reference counting for md. |
469 | */ |
470 | struct mapped_device *dm_get_md(dev_t dev); |
471 | void dm_get(struct mapped_device *md); |
472 | int dm_hold(struct mapped_device *md); |
473 | void dm_put(struct mapped_device *md); |
474 | |
475 | /* |
476 | * An arbitrary pointer may be stored alongside a mapped device. |
477 | */ |
478 | void dm_set_mdptr(struct mapped_device *md, void *ptr); |
479 | void *dm_get_mdptr(struct mapped_device *md); |
480 | |
481 | /* |
482 | * A device can still be used while suspended, but I/O is deferred. |
483 | */ |
484 | int dm_suspend(struct mapped_device *md, unsigned int suspend_flags); |
485 | int dm_resume(struct mapped_device *md); |
486 | |
487 | /* |
488 | * Event functions. |
489 | */ |
490 | uint32_t dm_get_event_nr(struct mapped_device *md); |
491 | int dm_wait_event(struct mapped_device *md, int event_nr); |
492 | uint32_t dm_next_uevent_seq(struct mapped_device *md); |
493 | void dm_uevent_add(struct mapped_device *md, struct list_head *elist); |
494 | |
495 | /* |
496 | * Info functions. |
497 | */ |
498 | const char *dm_device_name(struct mapped_device *md); |
499 | int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid); |
500 | struct gendisk *dm_disk(struct mapped_device *md); |
501 | int dm_suspended(struct dm_target *ti); |
502 | int dm_post_suspending(struct dm_target *ti); |
503 | int dm_noflush_suspending(struct dm_target *ti); |
504 | void dm_accept_partial_bio(struct bio *bio, unsigned int n_sectors); |
505 | void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone); |
506 | union map_info *dm_get_rq_mapinfo(struct request *rq); |
507 | |
508 | #ifdef CONFIG_BLK_DEV_ZONED |
509 | struct dm_report_zones_args { |
510 | struct dm_target *tgt; |
511 | sector_t next_sector; |
512 | |
513 | void *orig_data; |
514 | report_zones_cb orig_cb; |
515 | unsigned int zone_idx; |
516 | |
517 | /* must be filled by ->report_zones before calling dm_report_zones_cb */ |
518 | sector_t start; |
519 | }; |
520 | int dm_report_zones(struct block_device *bdev, sector_t start, sector_t sector, |
521 | struct dm_report_zones_args *args, unsigned int nr_zones); |
522 | #endif /* CONFIG_BLK_DEV_ZONED */ |
523 | |
524 | /* |
525 | * Device mapper functions to parse and create devices specified by the |
526 | * parameter "dm-mod.create=" |
527 | */ |
528 | int __init dm_early_create(struct dm_ioctl *dmi, |
529 | struct dm_target_spec **spec_array, |
530 | char **target_params_array); |
531 | |
532 | /* |
533 | * Geometry functions. |
534 | */ |
535 | int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo); |
536 | int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo); |
537 | |
538 | /* |
539 | *--------------------------------------------------------------- |
540 | * Functions for manipulating device-mapper tables. |
541 | *--------------------------------------------------------------- |
542 | */ |
543 | |
544 | /* |
545 | * First create an empty table. |
546 | */ |
547 | int dm_table_create(struct dm_table **result, blk_mode_t mode, |
548 | unsigned int num_targets, struct mapped_device *md); |
549 | |
550 | /* |
551 | * Then call this once for each target. |
552 | */ |
553 | int dm_table_add_target(struct dm_table *t, const char *type, |
554 | sector_t start, sector_t len, char *params); |
555 | |
556 | /* |
557 | * Target can use this to set the table's type. |
558 | * Can only ever be called from a target's ctr. |
559 | * Useful for "hybrid" target (supports both bio-based |
560 | * and request-based). |
561 | */ |
562 | void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type); |
563 | |
564 | /* |
565 | * Finally call this to make the table ready for use. |
566 | */ |
567 | int dm_table_complete(struct dm_table *t); |
568 | |
569 | /* |
570 | * Destroy the table when finished. |
571 | */ |
572 | void dm_table_destroy(struct dm_table *t); |
573 | |
574 | /* |
575 | * Target may require that it is never sent I/O larger than len. |
576 | */ |
577 | int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len); |
578 | |
579 | /* |
580 | * Table reference counting. |
581 | */ |
582 | struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx); |
583 | void dm_put_live_table(struct mapped_device *md, int srcu_idx); |
584 | void dm_sync_table(struct mapped_device *md); |
585 | |
586 | /* |
587 | * Queries |
588 | */ |
589 | sector_t dm_table_get_size(struct dm_table *t); |
590 | blk_mode_t dm_table_get_mode(struct dm_table *t); |
591 | struct mapped_device *dm_table_get_md(struct dm_table *t); |
592 | const char *dm_table_device_name(struct dm_table *t); |
593 | |
594 | /* |
595 | * Trigger an event. |
596 | */ |
597 | void dm_table_event(struct dm_table *t); |
598 | |
599 | /* |
600 | * Run the queue for request-based targets. |
601 | */ |
602 | void dm_table_run_md_queue_async(struct dm_table *t); |
603 | |
604 | /* |
605 | * The device must be suspended before calling this method. |
606 | * Returns the previous table, which the caller must destroy. |
607 | */ |
608 | struct dm_table *dm_swap_table(struct mapped_device *md, |
609 | struct dm_table *t); |
610 | |
611 | /* |
612 | * Table blk_crypto_profile functions |
613 | */ |
614 | void dm_destroy_crypto_profile(struct blk_crypto_profile *profile); |
615 | |
616 | /* |
617 | *--------------------------------------------------------------- |
618 | * Macros. |
619 | *--------------------------------------------------------------- |
620 | */ |
621 | #define DM_NAME "device-mapper" |
622 | |
623 | #define DM_FMT(fmt) DM_NAME ": " DM_MSG_PREFIX ": " fmt "\n" |
624 | |
625 | #define DMCRIT(fmt, ...) pr_crit(DM_FMT(fmt), ##__VA_ARGS__) |
626 | |
627 | #define DMERR(fmt, ...) pr_err(DM_FMT(fmt), ##__VA_ARGS__) |
628 | #define DMERR_LIMIT(fmt, ...) pr_err_ratelimited(DM_FMT(fmt), ##__VA_ARGS__) |
629 | #define DMWARN(fmt, ...) pr_warn(DM_FMT(fmt), ##__VA_ARGS__) |
630 | #define DMWARN_LIMIT(fmt, ...) pr_warn_ratelimited(DM_FMT(fmt), ##__VA_ARGS__) |
631 | #define DMINFO(fmt, ...) pr_info(DM_FMT(fmt), ##__VA_ARGS__) |
632 | #define DMINFO_LIMIT(fmt, ...) pr_info_ratelimited(DM_FMT(fmt), ##__VA_ARGS__) |
633 | |
634 | #define DMDEBUG(fmt, ...) pr_debug(DM_FMT(fmt), ##__VA_ARGS__) |
635 | #define DMDEBUG_LIMIT(fmt, ...) pr_debug_ratelimited(DM_FMT(fmt), ##__VA_ARGS__) |
636 | |
637 | #define DMEMIT(x...) (sz += ((sz >= maxlen) ? 0 : scnprintf(result + sz, maxlen - sz, x))) |
638 | |
639 | #define DMEMIT_TARGET_NAME_VERSION(y) \ |
640 | DMEMIT("target_name=%s,target_version=%u.%u.%u", \ |
641 | (y)->name, (y)->version[0], (y)->version[1], (y)->version[2]) |
642 | |
643 | /** |
644 | * module_dm() - Helper macro for DM targets that don't do anything |
645 | * special in their module_init and module_exit. |
646 | * Each module may only use this macro once, and calling it replaces |
647 | * module_init() and module_exit(). |
648 | * |
649 | * @name: DM target's name |
650 | */ |
651 | #define module_dm(name) \ |
652 | static int __init dm_##name##_init(void) \ |
653 | { \ |
654 | return dm_register_target(&(name##_target)); \ |
655 | } \ |
656 | module_init(dm_##name##_init) \ |
657 | static void __exit dm_##name##_exit(void) \ |
658 | { \ |
659 | dm_unregister_target(&(name##_target)); \ |
660 | } \ |
661 | module_exit(dm_##name##_exit) |
662 | |
663 | /* |
664 | * Definitions of return values from target end_io function. |
665 | */ |
666 | #define DM_ENDIO_DONE 0 |
667 | #define DM_ENDIO_INCOMPLETE 1 |
668 | #define DM_ENDIO_REQUEUE 2 |
669 | #define DM_ENDIO_DELAY_REQUEUE 3 |
670 | |
671 | /* |
672 | * Definitions of return values from target map function. |
673 | */ |
674 | #define DM_MAPIO_SUBMITTED 0 |
675 | #define DM_MAPIO_REMAPPED 1 |
676 | #define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE |
677 | #define DM_MAPIO_DELAY_REQUEUE DM_ENDIO_DELAY_REQUEUE |
678 | #define DM_MAPIO_KILL 4 |
679 | |
680 | #define dm_sector_div64(x, y)( \ |
681 | { \ |
682 | u64 _res; \ |
683 | (x) = div64_u64_rem(x, y, &_res); \ |
684 | _res; \ |
685 | } \ |
686 | ) |
687 | |
688 | /* |
689 | * Ceiling(n / sz) |
690 | */ |
691 | #define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz)) |
692 | |
693 | #define dm_sector_div_up(n, sz) ( \ |
694 | { \ |
695 | sector_t _r = ((n) + (sz) - 1); \ |
696 | sector_div(_r, (sz)); \ |
697 | _r; \ |
698 | } \ |
699 | ) |
700 | |
701 | /* |
702 | * ceiling(n / size) * size |
703 | */ |
704 | #define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz)) |
705 | |
706 | /* |
707 | * Sector offset taken relative to the start of the target instead of |
708 | * relative to the start of the device. |
709 | */ |
710 | #define dm_target_offset(ti, sector) ((sector) - (ti)->begin) |
711 | |
712 | static inline sector_t to_sector(unsigned long long n) |
713 | { |
714 | return (n >> SECTOR_SHIFT); |
715 | } |
716 | |
717 | static inline unsigned long to_bytes(sector_t n) |
718 | { |
719 | return (n << SECTOR_SHIFT); |
720 | } |
721 | |
722 | #endif /* _LINUX_DEVICE_MAPPER_H */ |
723 | |