1/*
2 * Copyright (C) 2001 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the LGPL.
6 */
7
8#ifndef _LINUX_DEVICE_MAPPER_H
9#define _LINUX_DEVICE_MAPPER_H
10
11#include <linux/bio.h>
12#include <linux/blkdev.h>
13#include <linux/dm-ioctl.h>
14#include <linux/math64.h>
15#include <linux/ratelimit.h>
16
17struct dm_dev;
18struct dm_target;
19struct dm_table;
20struct mapped_device;
21struct bio_vec;
22
23/*
24 * Type of table, mapped_device's mempool and request_queue
25 */
26enum dm_queue_mode {
27 DM_TYPE_NONE = 0,
28 DM_TYPE_BIO_BASED = 1,
29 DM_TYPE_REQUEST_BASED = 2,
30 DM_TYPE_DAX_BIO_BASED = 3,
31 DM_TYPE_NVME_BIO_BASED = 4,
32};
33
34typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
35
36union map_info {
37 void *ptr;
38};
39
40/*
41 * In the constructor the target parameter will already have the
42 * table, type, begin and len fields filled in.
43 */
44typedef int (*dm_ctr_fn) (struct dm_target *target,
45 unsigned int argc, char **argv);
46
47/*
48 * The destructor doesn't need to free the dm_target, just
49 * anything hidden ti->private.
50 */
51typedef void (*dm_dtr_fn) (struct dm_target *ti);
52
53/*
54 * The map function must return:
55 * < 0: error
56 * = 0: The target will handle the io by resubmitting it later
57 * = 1: simple remap complete
58 * = 2: The target wants to push back the io
59 */
60typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio);
61typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti,
62 struct request *rq,
63 union map_info *map_context,
64 struct request **clone);
65typedef void (*dm_release_clone_request_fn) (struct request *clone);
66
67/*
68 * Returns:
69 * < 0 : error (currently ignored)
70 * 0 : ended successfully
71 * 1 : for some reason the io has still not completed (eg,
72 * multipath target might want to requeue a failed io).
73 * 2 : The target wants to push back the io
74 */
75typedef int (*dm_endio_fn) (struct dm_target *ti,
76 struct bio *bio, blk_status_t *error);
77typedef int (*dm_request_endio_fn) (struct dm_target *ti,
78 struct request *clone, blk_status_t error,
79 union map_info *map_context);
80
81typedef void (*dm_presuspend_fn) (struct dm_target *ti);
82typedef void (*dm_presuspend_undo_fn) (struct dm_target *ti);
83typedef void (*dm_postsuspend_fn) (struct dm_target *ti);
84typedef int (*dm_preresume_fn) (struct dm_target *ti);
85typedef void (*dm_resume_fn) (struct dm_target *ti);
86
87typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
88 unsigned status_flags, char *result, unsigned maxlen);
89
90typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv,
91 char *result, unsigned maxlen);
92
93typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev);
94
95typedef int (*dm_report_zones_fn) (struct dm_target *ti, sector_t sector,
96 struct blk_zone *zones,
97 unsigned int *nr_zones,
98 gfp_t gfp_mask);
99
100/*
101 * These iteration functions are typically used to check (and combine)
102 * properties of underlying devices.
103 * E.g. Does at least one underlying device support flush?
104 * Does any underlying device not support WRITE_SAME?
105 *
106 * The callout function is called once for each contiguous section of
107 * an underlying device. State can be maintained in *data.
108 * Return non-zero to stop iterating through any further devices.
109 */
110typedef int (*iterate_devices_callout_fn) (struct dm_target *ti,
111 struct dm_dev *dev,
112 sector_t start, sector_t len,
113 void *data);
114
115/*
116 * This function must iterate through each section of device used by the
117 * target until it encounters a non-zero return code, which it then returns.
118 * Returns zero if no callout returned non-zero.
119 */
120typedef int (*dm_iterate_devices_fn) (struct dm_target *ti,
121 iterate_devices_callout_fn fn,
122 void *data);
123
124typedef void (*dm_io_hints_fn) (struct dm_target *ti,
125 struct queue_limits *limits);
126
127/*
128 * Returns:
129 * 0: The target can handle the next I/O immediately.
130 * 1: The target can't handle the next I/O immediately.
131 */
132typedef int (*dm_busy_fn) (struct dm_target *ti);
133
134/*
135 * Returns:
136 * < 0 : error
137 * >= 0 : the number of bytes accessible at the address
138 */
139typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff,
140 long nr_pages, void **kaddr, pfn_t *pfn);
141typedef size_t (*dm_dax_copy_iter_fn)(struct dm_target *ti, pgoff_t pgoff,
142 void *addr, size_t bytes, struct iov_iter *i);
143#define PAGE_SECTORS (PAGE_SIZE / 512)
144
145void dm_error(const char *message);
146
147struct dm_dev {
148 struct block_device *bdev;
149 struct dax_device *dax_dev;
150 fmode_t mode;
151 char name[16];
152};
153
154dev_t dm_get_dev_t(const char *path);
155
156/*
157 * Constructors should call these functions to ensure destination devices
158 * are opened/closed correctly.
159 */
160int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
161 struct dm_dev **result);
162void dm_put_device(struct dm_target *ti, struct dm_dev *d);
163
164/*
165 * Information about a target type
166 */
167
168struct target_type {
169 uint64_t features;
170 const char *name;
171 struct module *module;
172 unsigned version[3];
173 dm_ctr_fn ctr;
174 dm_dtr_fn dtr;
175 dm_map_fn map;
176 dm_clone_and_map_request_fn clone_and_map_rq;
177 dm_release_clone_request_fn release_clone_rq;
178 dm_endio_fn end_io;
179 dm_request_endio_fn rq_end_io;
180 dm_presuspend_fn presuspend;
181 dm_presuspend_undo_fn presuspend_undo;
182 dm_postsuspend_fn postsuspend;
183 dm_preresume_fn preresume;
184 dm_resume_fn resume;
185 dm_status_fn status;
186 dm_message_fn message;
187 dm_prepare_ioctl_fn prepare_ioctl;
188#ifdef CONFIG_BLK_DEV_ZONED
189 dm_report_zones_fn report_zones;
190#endif
191 dm_busy_fn busy;
192 dm_iterate_devices_fn iterate_devices;
193 dm_io_hints_fn io_hints;
194 dm_dax_direct_access_fn direct_access;
195 dm_dax_copy_iter_fn dax_copy_from_iter;
196 dm_dax_copy_iter_fn dax_copy_to_iter;
197
198 /* For internal device-mapper use. */
199 struct list_head list;
200};
201
202/*
203 * Target features
204 */
205
206/*
207 * Any table that contains an instance of this target must have only one.
208 */
209#define DM_TARGET_SINGLETON 0x00000001
210#define dm_target_needs_singleton(type) ((type)->features & DM_TARGET_SINGLETON)
211
212/*
213 * Indicates that a target does not support read-only devices.
214 */
215#define DM_TARGET_ALWAYS_WRITEABLE 0x00000002
216#define dm_target_always_writeable(type) \
217 ((type)->features & DM_TARGET_ALWAYS_WRITEABLE)
218
219/*
220 * Any device that contains a table with an instance of this target may never
221 * have tables containing any different target type.
222 */
223#define DM_TARGET_IMMUTABLE 0x00000004
224#define dm_target_is_immutable(type) ((type)->features & DM_TARGET_IMMUTABLE)
225
226/*
227 * Indicates that a target may replace any target; even immutable targets.
228 * .map, .map_rq, .clone_and_map_rq and .release_clone_rq are all defined.
229 */
230#define DM_TARGET_WILDCARD 0x00000008
231#define dm_target_is_wildcard(type) ((type)->features & DM_TARGET_WILDCARD)
232
233/*
234 * A target implements own bio data integrity.
235 */
236#define DM_TARGET_INTEGRITY 0x00000010
237#define dm_target_has_integrity(type) ((type)->features & DM_TARGET_INTEGRITY)
238
239/*
240 * A target passes integrity data to the lower device.
241 */
242#define DM_TARGET_PASSES_INTEGRITY 0x00000020
243#define dm_target_passes_integrity(type) ((type)->features & DM_TARGET_PASSES_INTEGRITY)
244
245/*
246 * Indicates that a target supports host-managed zoned block devices.
247 */
248#define DM_TARGET_ZONED_HM 0x00000040
249#define dm_target_supports_zoned_hm(type) ((type)->features & DM_TARGET_ZONED_HM)
250
251struct dm_target {
252 struct dm_table *table;
253 struct target_type *type;
254
255 /* target limits */
256 sector_t begin;
257 sector_t len;
258
259 /* If non-zero, maximum size of I/O submitted to a target. */
260 uint32_t max_io_len;
261
262 /*
263 * A number of zero-length barrier bios that will be submitted
264 * to the target for the purpose of flushing cache.
265 *
266 * The bio number can be accessed with dm_bio_get_target_bio_nr.
267 * It is a responsibility of the target driver to remap these bios
268 * to the real underlying devices.
269 */
270 unsigned num_flush_bios;
271
272 /*
273 * The number of discard bios that will be submitted to the target.
274 * The bio number can be accessed with dm_bio_get_target_bio_nr.
275 */
276 unsigned num_discard_bios;
277
278 /*
279 * The number of secure erase bios that will be submitted to the target.
280 * The bio number can be accessed with dm_bio_get_target_bio_nr.
281 */
282 unsigned num_secure_erase_bios;
283
284 /*
285 * The number of WRITE SAME bios that will be submitted to the target.
286 * The bio number can be accessed with dm_bio_get_target_bio_nr.
287 */
288 unsigned num_write_same_bios;
289
290 /*
291 * The number of WRITE ZEROES bios that will be submitted to the target.
292 * The bio number can be accessed with dm_bio_get_target_bio_nr.
293 */
294 unsigned num_write_zeroes_bios;
295
296 /*
297 * The minimum number of extra bytes allocated in each io for the
298 * target to use.
299 */
300 unsigned per_io_data_size;
301
302 /* target specific data */
303 void *private;
304
305 /* Used to provide an error string from the ctr */
306 char *error;
307
308 /*
309 * Set if this target needs to receive flushes regardless of
310 * whether or not its underlying devices have support.
311 */
312 bool flush_supported:1;
313
314 /*
315 * Set if this target needs to receive discards regardless of
316 * whether or not its underlying devices have support.
317 */
318 bool discards_supported:1;
319};
320
321/* Each target can link one of these into the table */
322struct dm_target_callbacks {
323 struct list_head list;
324 int (*congested_fn) (struct dm_target_callbacks *, int);
325};
326
327void *dm_per_bio_data(struct bio *bio, size_t data_size);
328struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size);
329unsigned dm_bio_get_target_bio_nr(const struct bio *bio);
330
331int dm_register_target(struct target_type *t);
332void dm_unregister_target(struct target_type *t);
333
334/*
335 * Target argument parsing.
336 */
337struct dm_arg_set {
338 unsigned argc;
339 char **argv;
340};
341
342/*
343 * The minimum and maximum value of a numeric argument, together with
344 * the error message to use if the number is found to be outside that range.
345 */
346struct dm_arg {
347 unsigned min;
348 unsigned max;
349 char *error;
350};
351
352/*
353 * Validate the next argument, either returning it as *value or, if invalid,
354 * returning -EINVAL and setting *error.
355 */
356int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
357 unsigned *value, char **error);
358
359/*
360 * Process the next argument as the start of a group containing between
361 * arg->min and arg->max further arguments. Either return the size as
362 * *num_args or, if invalid, return -EINVAL and set *error.
363 */
364int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set,
365 unsigned *num_args, char **error);
366
367/*
368 * Return the current argument and shift to the next.
369 */
370const char *dm_shift_arg(struct dm_arg_set *as);
371
372/*
373 * Move through num_args arguments.
374 */
375void dm_consume_args(struct dm_arg_set *as, unsigned num_args);
376
377/*-----------------------------------------------------------------
378 * Functions for creating and manipulating mapped devices.
379 * Drop the reference with dm_put when you finish with the object.
380 *---------------------------------------------------------------*/
381
382/*
383 * DM_ANY_MINOR chooses the next available minor number.
384 */
385#define DM_ANY_MINOR (-1)
386int dm_create(int minor, struct mapped_device **md);
387
388/*
389 * Reference counting for md.
390 */
391struct mapped_device *dm_get_md(dev_t dev);
392void dm_get(struct mapped_device *md);
393int dm_hold(struct mapped_device *md);
394void dm_put(struct mapped_device *md);
395
396/*
397 * An arbitrary pointer may be stored alongside a mapped device.
398 */
399void dm_set_mdptr(struct mapped_device *md, void *ptr);
400void *dm_get_mdptr(struct mapped_device *md);
401
402/*
403 * A device can still be used while suspended, but I/O is deferred.
404 */
405int dm_suspend(struct mapped_device *md, unsigned suspend_flags);
406int dm_resume(struct mapped_device *md);
407
408/*
409 * Event functions.
410 */
411uint32_t dm_get_event_nr(struct mapped_device *md);
412int dm_wait_event(struct mapped_device *md, int event_nr);
413uint32_t dm_next_uevent_seq(struct mapped_device *md);
414void dm_uevent_add(struct mapped_device *md, struct list_head *elist);
415
416/*
417 * Info functions.
418 */
419const char *dm_device_name(struct mapped_device *md);
420int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
421struct gendisk *dm_disk(struct mapped_device *md);
422int dm_suspended(struct dm_target *ti);
423int dm_noflush_suspending(struct dm_target *ti);
424void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors);
425void dm_remap_zone_report(struct dm_target *ti, sector_t start,
426 struct blk_zone *zones, unsigned int *nr_zones);
427union map_info *dm_get_rq_mapinfo(struct request *rq);
428
429/*
430 * Device mapper functions to parse and create devices specified by the
431 * parameter "dm-mod.create="
432 */
433int __init dm_early_create(struct dm_ioctl *dmi,
434 struct dm_target_spec **spec_array,
435 char **target_params_array);
436
437struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
438
439/*
440 * Geometry functions.
441 */
442int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
443int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
444
445/*-----------------------------------------------------------------
446 * Functions for manipulating device-mapper tables.
447 *---------------------------------------------------------------*/
448
449/*
450 * First create an empty table.
451 */
452int dm_table_create(struct dm_table **result, fmode_t mode,
453 unsigned num_targets, struct mapped_device *md);
454
455/*
456 * Then call this once for each target.
457 */
458int dm_table_add_target(struct dm_table *t, const char *type,
459 sector_t start, sector_t len, char *params);
460
461/*
462 * Target_ctr should call this if it needs to add any callbacks.
463 */
464void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb);
465
466/*
467 * Target can use this to set the table's type.
468 * Can only ever be called from a target's ctr.
469 * Useful for "hybrid" target (supports both bio-based
470 * and request-based).
471 */
472void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type);
473
474/*
475 * Finally call this to make the table ready for use.
476 */
477int dm_table_complete(struct dm_table *t);
478
479/*
480 * Destroy the table when finished.
481 */
482void dm_table_destroy(struct dm_table *t);
483
484/*
485 * Target may require that it is never sent I/O larger than len.
486 */
487int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len);
488
489/*
490 * Table reference counting.
491 */
492struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx);
493void dm_put_live_table(struct mapped_device *md, int srcu_idx);
494void dm_sync_table(struct mapped_device *md);
495
496/*
497 * Queries
498 */
499sector_t dm_table_get_size(struct dm_table *t);
500unsigned int dm_table_get_num_targets(struct dm_table *t);
501fmode_t dm_table_get_mode(struct dm_table *t);
502struct mapped_device *dm_table_get_md(struct dm_table *t);
503const char *dm_table_device_name(struct dm_table *t);
504
505/*
506 * Trigger an event.
507 */
508void dm_table_event(struct dm_table *t);
509
510/*
511 * Run the queue for request-based targets.
512 */
513void dm_table_run_md_queue_async(struct dm_table *t);
514
515/*
516 * The device must be suspended before calling this method.
517 * Returns the previous table, which the caller must destroy.
518 */
519struct dm_table *dm_swap_table(struct mapped_device *md,
520 struct dm_table *t);
521
522/*
523 * A wrapper around vmalloc.
524 */
525void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
526
527/*-----------------------------------------------------------------
528 * Macros.
529 *---------------------------------------------------------------*/
530#define DM_NAME "device-mapper"
531
532#define DM_RATELIMIT(pr_func, fmt, ...) \
533do { \
534 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, \
535 DEFAULT_RATELIMIT_BURST); \
536 \
537 if (__ratelimit(&rs)) \
538 pr_func(DM_FMT(fmt), ##__VA_ARGS__); \
539} while (0)
540
541#define DM_FMT(fmt) DM_NAME ": " DM_MSG_PREFIX ": " fmt "\n"
542
543#define DMCRIT(fmt, ...) pr_crit(DM_FMT(fmt), ##__VA_ARGS__)
544
545#define DMERR(fmt, ...) pr_err(DM_FMT(fmt), ##__VA_ARGS__)
546#define DMERR_LIMIT(fmt, ...) DM_RATELIMIT(pr_err, fmt, ##__VA_ARGS__)
547#define DMWARN(fmt, ...) pr_warn(DM_FMT(fmt), ##__VA_ARGS__)
548#define DMWARN_LIMIT(fmt, ...) DM_RATELIMIT(pr_warn, fmt, ##__VA_ARGS__)
549#define DMINFO(fmt, ...) pr_info(DM_FMT(fmt), ##__VA_ARGS__)
550#define DMINFO_LIMIT(fmt, ...) DM_RATELIMIT(pr_info, fmt, ##__VA_ARGS__)
551
552#ifdef CONFIG_DM_DEBUG
553#define DMDEBUG(fmt, ...) printk(KERN_DEBUG DM_FMT(fmt), ##__VA_ARGS__)
554#define DMDEBUG_LIMIT(fmt, ...) DM_RATELIMIT(pr_debug, fmt, ##__VA_ARGS__)
555#else
556#define DMDEBUG(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
557#define DMDEBUG_LIMIT(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
558#endif
559
560#define DMEMIT(x...) sz += ((sz >= maxlen) ? \
561 0 : scnprintf(result + sz, maxlen - sz, x))
562
563/*
564 * Definitions of return values from target end_io function.
565 */
566#define DM_ENDIO_DONE 0
567#define DM_ENDIO_INCOMPLETE 1
568#define DM_ENDIO_REQUEUE 2
569#define DM_ENDIO_DELAY_REQUEUE 3
570
571/*
572 * Definitions of return values from target map function.
573 */
574#define DM_MAPIO_SUBMITTED 0
575#define DM_MAPIO_REMAPPED 1
576#define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE
577#define DM_MAPIO_DELAY_REQUEUE DM_ENDIO_DELAY_REQUEUE
578#define DM_MAPIO_KILL 4
579
580#define dm_sector_div64(x, y)( \
581{ \
582 u64 _res; \
583 (x) = div64_u64_rem(x, y, &_res); \
584 _res; \
585} \
586)
587
588/*
589 * Ceiling(n / sz)
590 */
591#define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz))
592
593#define dm_sector_div_up(n, sz) ( \
594{ \
595 sector_t _r = ((n) + (sz) - 1); \
596 sector_div(_r, (sz)); \
597 _r; \
598} \
599)
600
601/*
602 * ceiling(n / size) * size
603 */
604#define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz))
605
606#define dm_array_too_big(fixed, obj, num) \
607 ((num) > (UINT_MAX - (fixed)) / (obj))
608
609/*
610 * Sector offset taken relative to the start of the target instead of
611 * relative to the start of the device.
612 */
613#define dm_target_offset(ti, sector) ((sector) - (ti)->begin)
614
615static inline sector_t to_sector(unsigned long long n)
616{
617 return (n >> SECTOR_SHIFT);
618}
619
620static inline unsigned long to_bytes(sector_t n)
621{
622 return (n << SECTOR_SHIFT);
623}
624
625#endif /* _LINUX_DEVICE_MAPPER_H */
626