1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
2 | /* |
3 | * Internal header file _only_ for device mapper core |
4 | * |
5 | * Copyright (C) 2016 Red Hat, Inc. All rights reserved. |
6 | * |
7 | * This file is released under the LGPL. |
8 | */ |
9 | |
10 | #ifndef DM_CORE_INTERNAL_H |
11 | #define DM_CORE_INTERNAL_H |
12 | |
13 | #include <linux/kthread.h> |
14 | #include <linux/ktime.h> |
15 | #include <linux/blk-mq.h> |
16 | #include <linux/blk-crypto-profile.h> |
17 | #include <linux/jump_label.h> |
18 | |
19 | #include <trace/events/block.h> |
20 | |
21 | #include "dm.h" |
22 | #include "dm-ima.h" |
23 | |
24 | #define DM_RESERVED_MAX_IOS 1024 |
25 | |
26 | struct dm_io; |
27 | |
28 | struct dm_kobject_holder { |
29 | struct kobject kobj; |
30 | struct completion completion; |
31 | }; |
32 | |
33 | /* |
34 | * DM core internal structures used directly by dm.c, dm-rq.c and dm-table.c. |
35 | * DM targets must _not_ deference a mapped_device or dm_table to directly |
36 | * access their members! |
37 | */ |
38 | |
39 | /* |
40 | * For mempools pre-allocation at the table loading time. |
41 | */ |
42 | struct dm_md_mempools { |
43 | struct bio_set bs; |
44 | struct bio_set io_bs; |
45 | }; |
46 | |
47 | struct mapped_device { |
48 | struct mutex suspend_lock; |
49 | |
50 | struct mutex table_devices_lock; |
51 | struct list_head table_devices; |
52 | |
53 | /* |
54 | * The current mapping (struct dm_table *). |
55 | * Use dm_get_live_table{_fast} or take suspend_lock for |
56 | * dereference. |
57 | */ |
58 | void __rcu *map; |
59 | |
60 | unsigned long flags; |
61 | |
62 | /* Protect queue and type against concurrent access. */ |
63 | struct mutex type_lock; |
64 | enum dm_queue_mode type; |
65 | |
66 | int numa_node_id; |
67 | struct request_queue *queue; |
68 | |
69 | atomic_t holders; |
70 | atomic_t open_count; |
71 | |
72 | struct dm_target *immutable_target; |
73 | struct target_type *immutable_target_type; |
74 | |
75 | char name[16]; |
76 | struct gendisk *disk; |
77 | struct dax_device *dax_dev; |
78 | |
79 | wait_queue_head_t wait; |
80 | unsigned long __percpu *pending_io; |
81 | |
82 | /* forced geometry settings */ |
83 | struct hd_geometry geometry; |
84 | |
85 | /* |
86 | * Processing queue (flush) |
87 | */ |
88 | struct workqueue_struct *wq; |
89 | |
90 | /* |
91 | * A list of ios that arrived while we were suspended. |
92 | */ |
93 | struct work_struct work; |
94 | spinlock_t deferred_lock; |
95 | struct bio_list deferred; |
96 | |
97 | /* |
98 | * requeue work context is needed for cloning one new bio |
99 | * to represent the dm_io to be requeued, since each |
100 | * dm_io may point to the original bio from FS. |
101 | */ |
102 | struct work_struct requeue_work; |
103 | struct dm_io *requeue_list; |
104 | |
105 | void *interface_ptr; |
106 | |
107 | /* |
108 | * Event handling. |
109 | */ |
110 | wait_queue_head_t eventq; |
111 | atomic_t event_nr; |
112 | atomic_t uevent_seq; |
113 | struct list_head uevent_list; |
114 | spinlock_t uevent_lock; /* Protect access to uevent_list */ |
115 | |
116 | /* for blk-mq request-based DM support */ |
117 | bool init_tio_pdu:1; |
118 | struct blk_mq_tag_set *tag_set; |
119 | |
120 | struct dm_stats stats; |
121 | |
122 | /* the number of internal suspends */ |
123 | unsigned int internal_suspend_count; |
124 | |
125 | int swap_bios; |
126 | struct semaphore swap_bios_semaphore; |
127 | struct mutex swap_bios_lock; |
128 | |
129 | /* |
130 | * io objects are allocated from here. |
131 | */ |
132 | struct dm_md_mempools *mempools; |
133 | |
134 | /* kobject and completion */ |
135 | struct dm_kobject_holder kobj_holder; |
136 | |
137 | struct srcu_struct io_barrier; |
138 | |
139 | #ifdef CONFIG_BLK_DEV_ZONED |
140 | unsigned int nr_zones; |
141 | unsigned int *zwp_offset; |
142 | #endif |
143 | |
144 | #ifdef CONFIG_IMA |
145 | struct dm_ima_measurements ima; |
146 | #endif |
147 | }; |
148 | |
149 | /* |
150 | * Bits for the flags field of struct mapped_device. |
151 | */ |
152 | #define DMF_BLOCK_IO_FOR_SUSPEND 0 |
153 | #define DMF_SUSPENDED 1 |
154 | #define DMF_FROZEN 2 |
155 | #define DMF_FREEING 3 |
156 | #define DMF_DELETING 4 |
157 | #define DMF_NOFLUSH_SUSPENDING 5 |
158 | #define DMF_DEFERRED_REMOVE 6 |
159 | #define DMF_SUSPENDED_INTERNALLY 7 |
160 | #define DMF_POST_SUSPENDING 8 |
161 | #define DMF_EMULATE_ZONE_APPEND 9 |
162 | |
163 | void disable_discard(struct mapped_device *md); |
164 | void disable_write_zeroes(struct mapped_device *md); |
165 | |
166 | static inline sector_t dm_get_size(struct mapped_device *md) |
167 | { |
168 | return get_capacity(disk: md->disk); |
169 | } |
170 | |
171 | static inline struct dm_stats *dm_get_stats(struct mapped_device *md) |
172 | { |
173 | return &md->stats; |
174 | } |
175 | |
176 | DECLARE_STATIC_KEY_FALSE(stats_enabled); |
177 | DECLARE_STATIC_KEY_FALSE(swap_bios_enabled); |
178 | DECLARE_STATIC_KEY_FALSE(zoned_enabled); |
179 | |
180 | static inline bool dm_emulate_zone_append(struct mapped_device *md) |
181 | { |
182 | if (blk_queue_is_zoned(q: md->queue)) |
183 | return test_bit(DMF_EMULATE_ZONE_APPEND, &md->flags); |
184 | return false; |
185 | } |
186 | |
187 | #define DM_TABLE_MAX_DEPTH 16 |
188 | |
189 | struct dm_table { |
190 | struct mapped_device *md; |
191 | enum dm_queue_mode type; |
192 | |
193 | /* btree table */ |
194 | unsigned int depth; |
195 | unsigned int counts[DM_TABLE_MAX_DEPTH]; /* in nodes */ |
196 | sector_t *index[DM_TABLE_MAX_DEPTH]; |
197 | |
198 | unsigned int num_targets; |
199 | unsigned int num_allocated; |
200 | sector_t *highs; |
201 | struct dm_target *targets; |
202 | |
203 | struct target_type *immutable_target_type; |
204 | |
205 | bool integrity_supported:1; |
206 | bool singleton:1; |
207 | unsigned integrity_added:1; |
208 | |
209 | /* |
210 | * Indicates the rw permissions for the new logical device. This |
211 | * should be a combination of BLK_OPEN_READ and BLK_OPEN_WRITE. |
212 | */ |
213 | blk_mode_t mode; |
214 | |
215 | /* a list of devices used by this table */ |
216 | struct list_head devices; |
217 | struct rw_semaphore devices_lock; |
218 | |
219 | /* events get handed up using this callback */ |
220 | void (*event_fn)(void *data); |
221 | void *event_context; |
222 | |
223 | struct dm_md_mempools *mempools; |
224 | |
225 | #ifdef CONFIG_BLK_INLINE_ENCRYPTION |
226 | struct blk_crypto_profile *crypto_profile; |
227 | #endif |
228 | }; |
229 | |
230 | static inline struct dm_target *dm_table_get_target(struct dm_table *t, |
231 | unsigned int index) |
232 | { |
233 | BUG_ON(index >= t->num_targets); |
234 | return t->targets + index; |
235 | } |
236 | |
237 | /* |
238 | * One of these is allocated per clone bio. |
239 | */ |
240 | #define DM_TIO_MAGIC 28714 |
241 | struct dm_target_io { |
242 | unsigned short magic; |
243 | blk_short_t flags; |
244 | unsigned int target_bio_nr; |
245 | struct dm_io *io; |
246 | struct dm_target *ti; |
247 | unsigned int *len_ptr; |
248 | sector_t old_sector; |
249 | struct bio clone; |
250 | }; |
251 | #define DM_TARGET_IO_BIO_OFFSET (offsetof(struct dm_target_io, clone)) |
252 | #define DM_IO_BIO_OFFSET \ |
253 | (offsetof(struct dm_target_io, clone) + offsetof(struct dm_io, tio)) |
254 | |
255 | /* |
256 | * dm_target_io flags |
257 | */ |
258 | enum { |
259 | DM_TIO_INSIDE_DM_IO, |
260 | DM_TIO_IS_DUPLICATE_BIO |
261 | }; |
262 | |
263 | static inline bool dm_tio_flagged(struct dm_target_io *tio, unsigned int bit) |
264 | { |
265 | return (tio->flags & (1U << bit)) != 0; |
266 | } |
267 | |
268 | static inline void dm_tio_set_flag(struct dm_target_io *tio, unsigned int bit) |
269 | { |
270 | tio->flags |= (1U << bit); |
271 | } |
272 | |
273 | static inline bool dm_tio_is_normal(struct dm_target_io *tio) |
274 | { |
275 | return (dm_tio_flagged(tio, bit: DM_TIO_INSIDE_DM_IO) && |
276 | !dm_tio_flagged(tio, bit: DM_TIO_IS_DUPLICATE_BIO)); |
277 | } |
278 | |
279 | /* |
280 | * One of these is allocated per original bio. |
281 | * It contains the first clone used for that original. |
282 | */ |
283 | #define DM_IO_MAGIC 19577 |
284 | struct dm_io { |
285 | unsigned short magic; |
286 | blk_short_t flags; |
287 | spinlock_t lock; |
288 | unsigned long start_time; |
289 | void *data; |
290 | struct dm_io *next; |
291 | struct dm_stats_aux stats_aux; |
292 | blk_status_t status; |
293 | atomic_t io_count; |
294 | struct mapped_device *md; |
295 | |
296 | /* The three fields represent mapped part of original bio */ |
297 | struct bio *orig_bio; |
298 | unsigned int sector_offset; /* offset to end of orig_bio */ |
299 | unsigned int sectors; |
300 | |
301 | /* last member of dm_target_io is 'struct bio' */ |
302 | struct dm_target_io tio; |
303 | }; |
304 | |
305 | /* |
306 | * dm_io flags |
307 | */ |
308 | enum { |
309 | DM_IO_ACCOUNTED, |
310 | DM_IO_WAS_SPLIT, |
311 | DM_IO_BLK_STAT |
312 | }; |
313 | |
314 | static inline bool dm_io_flagged(struct dm_io *io, unsigned int bit) |
315 | { |
316 | return (io->flags & (1U << bit)) != 0; |
317 | } |
318 | |
319 | static inline void dm_io_set_flag(struct dm_io *io, unsigned int bit) |
320 | { |
321 | io->flags |= (1U << bit); |
322 | } |
323 | |
324 | void dm_io_rewind(struct dm_io *io, struct bio_set *bs); |
325 | |
326 | static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj) |
327 | { |
328 | return &container_of(kobj, struct dm_kobject_holder, kobj)->completion; |
329 | } |
330 | |
331 | unsigned int __dm_get_module_param(unsigned int *module_param, unsigned int def, unsigned int max); |
332 | |
333 | static inline bool dm_message_test_buffer_overflow(char *result, unsigned int maxlen) |
334 | { |
335 | return !maxlen || strlen(result) + 1 >= maxlen; |
336 | } |
337 | |
338 | extern atomic_t dm_global_event_nr; |
339 | extern wait_queue_head_t dm_global_eventq; |
340 | void dm_issue_global_event(void); |
341 | |
342 | #endif |
343 | |