1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (C) 2003 Sistina Software Limited. |
4 | * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved. |
5 | * |
6 | * This file is released under the GPL. |
7 | */ |
8 | |
9 | #include <linux/device-mapper.h> |
10 | |
11 | #include "dm-rq.h" |
12 | #include "dm-bio-record.h" |
13 | #include "dm-path-selector.h" |
14 | #include "dm-uevent.h" |
15 | |
16 | #include <linux/blkdev.h> |
17 | #include <linux/ctype.h> |
18 | #include <linux/init.h> |
19 | #include <linux/mempool.h> |
20 | #include <linux/module.h> |
21 | #include <linux/pagemap.h> |
22 | #include <linux/slab.h> |
23 | #include <linux/time.h> |
24 | #include <linux/timer.h> |
25 | #include <linux/workqueue.h> |
26 | #include <linux/delay.h> |
27 | #include <scsi/scsi_dh.h> |
28 | #include <linux/atomic.h> |
29 | #include <linux/blk-mq.h> |
30 | |
31 | static struct workqueue_struct *dm_mpath_wq; |
32 | |
33 | #define DM_MSG_PREFIX "multipath" |
34 | #define DM_PG_INIT_DELAY_MSECS 2000 |
35 | #define DM_PG_INIT_DELAY_DEFAULT ((unsigned int) -1) |
36 | #define QUEUE_IF_NO_PATH_TIMEOUT_DEFAULT 0 |
37 | |
38 | static unsigned long queue_if_no_path_timeout_secs = QUEUE_IF_NO_PATH_TIMEOUT_DEFAULT; |
39 | |
40 | /* Path properties */ |
41 | struct pgpath { |
42 | struct list_head list; |
43 | |
44 | struct priority_group *pg; /* Owning PG */ |
45 | unsigned int fail_count; /* Cumulative failure count */ |
46 | |
47 | struct dm_path path; |
48 | struct delayed_work activate_path; |
49 | |
50 | bool is_active:1; /* Path status */ |
51 | }; |
52 | |
53 | #define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path) |
54 | |
55 | /* |
56 | * Paths are grouped into Priority Groups and numbered from 1 upwards. |
57 | * Each has a path selector which controls which path gets used. |
58 | */ |
59 | struct priority_group { |
60 | struct list_head list; |
61 | |
62 | struct multipath *m; /* Owning multipath instance */ |
63 | struct path_selector ps; |
64 | |
65 | unsigned int pg_num; /* Reference number */ |
66 | unsigned int nr_pgpaths; /* Number of paths in PG */ |
67 | struct list_head pgpaths; |
68 | |
69 | bool bypassed:1; /* Temporarily bypass this PG? */ |
70 | }; |
71 | |
72 | /* Multipath context */ |
73 | struct multipath { |
74 | unsigned long flags; /* Multipath state flags */ |
75 | |
76 | spinlock_t lock; |
77 | enum dm_queue_mode queue_mode; |
78 | |
79 | struct pgpath *current_pgpath; |
80 | struct priority_group *current_pg; |
81 | struct priority_group *next_pg; /* Switch to this PG if set */ |
82 | |
83 | atomic_t nr_valid_paths; /* Total number of usable paths */ |
84 | unsigned int nr_priority_groups; |
85 | struct list_head priority_groups; |
86 | |
87 | const char *hw_handler_name; |
88 | char *hw_handler_params; |
89 | wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */ |
90 | unsigned int pg_init_retries; /* Number of times to retry pg_init */ |
91 | unsigned int pg_init_delay_msecs; /* Number of msecs before pg_init retry */ |
92 | atomic_t pg_init_in_progress; /* Only one pg_init allowed at once */ |
93 | atomic_t pg_init_count; /* Number of times pg_init called */ |
94 | |
95 | struct mutex work_mutex; |
96 | struct work_struct trigger_event; |
97 | struct dm_target *ti; |
98 | |
99 | struct work_struct process_queued_bios; |
100 | struct bio_list queued_bios; |
101 | |
102 | struct timer_list nopath_timer; /* Timeout for queue_if_no_path */ |
103 | }; |
104 | |
105 | /* |
106 | * Context information attached to each io we process. |
107 | */ |
108 | struct dm_mpath_io { |
109 | struct pgpath *pgpath; |
110 | size_t nr_bytes; |
111 | u64 start_time_ns; |
112 | }; |
113 | |
114 | typedef int (*action_fn) (struct pgpath *pgpath); |
115 | |
116 | static struct workqueue_struct *kmultipathd, *kmpath_handlerd; |
117 | static void trigger_event(struct work_struct *work); |
118 | static void activate_or_offline_path(struct pgpath *pgpath); |
119 | static void activate_path_work(struct work_struct *work); |
120 | static void process_queued_bios(struct work_struct *work); |
121 | static void queue_if_no_path_timeout_work(struct timer_list *t); |
122 | |
123 | /* |
124 | *----------------------------------------------- |
125 | * Multipath state flags. |
126 | *----------------------------------------------- |
127 | */ |
128 | #define MPATHF_QUEUE_IO 0 /* Must we queue all I/O? */ |
129 | #define MPATHF_QUEUE_IF_NO_PATH 1 /* Queue I/O if last path fails? */ |
130 | #define MPATHF_SAVED_QUEUE_IF_NO_PATH 2 /* Saved state during suspension */ |
131 | #define MPATHF_RETAIN_ATTACHED_HW_HANDLER 3 /* If there's already a hw_handler present, don't change it. */ |
132 | #define MPATHF_PG_INIT_DISABLED 4 /* pg_init is not currently allowed */ |
133 | #define MPATHF_PG_INIT_REQUIRED 5 /* pg_init needs calling? */ |
134 | #define MPATHF_PG_INIT_DELAY_RETRY 6 /* Delay pg_init retry? */ |
135 | |
136 | static bool mpath_double_check_test_bit(int MPATHF_bit, struct multipath *m) |
137 | { |
138 | bool r = test_bit(MPATHF_bit, &m->flags); |
139 | |
140 | if (r) { |
141 | unsigned long flags; |
142 | |
143 | spin_lock_irqsave(&m->lock, flags); |
144 | r = test_bit(MPATHF_bit, &m->flags); |
145 | spin_unlock_irqrestore(lock: &m->lock, flags); |
146 | } |
147 | |
148 | return r; |
149 | } |
150 | |
151 | /* |
152 | *----------------------------------------------- |
153 | * Allocation routines |
154 | *----------------------------------------------- |
155 | */ |
156 | static struct pgpath *alloc_pgpath(void) |
157 | { |
158 | struct pgpath *pgpath = kzalloc(size: sizeof(*pgpath), GFP_KERNEL); |
159 | |
160 | if (!pgpath) |
161 | return NULL; |
162 | |
163 | pgpath->is_active = true; |
164 | |
165 | return pgpath; |
166 | } |
167 | |
168 | static void free_pgpath(struct pgpath *pgpath) |
169 | { |
170 | kfree(objp: pgpath); |
171 | } |
172 | |
173 | static struct priority_group *alloc_priority_group(void) |
174 | { |
175 | struct priority_group *pg; |
176 | |
177 | pg = kzalloc(size: sizeof(*pg), GFP_KERNEL); |
178 | |
179 | if (pg) |
180 | INIT_LIST_HEAD(list: &pg->pgpaths); |
181 | |
182 | return pg; |
183 | } |
184 | |
185 | static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti) |
186 | { |
187 | struct pgpath *pgpath, *tmp; |
188 | |
189 | list_for_each_entry_safe(pgpath, tmp, pgpaths, list) { |
190 | list_del(entry: &pgpath->list); |
191 | dm_put_device(ti, d: pgpath->path.dev); |
192 | free_pgpath(pgpath); |
193 | } |
194 | } |
195 | |
196 | static void free_priority_group(struct priority_group *pg, |
197 | struct dm_target *ti) |
198 | { |
199 | struct path_selector *ps = &pg->ps; |
200 | |
201 | if (ps->type) { |
202 | ps->type->destroy(ps); |
203 | dm_put_path_selector(pst: ps->type); |
204 | } |
205 | |
206 | free_pgpaths(pgpaths: &pg->pgpaths, ti); |
207 | kfree(objp: pg); |
208 | } |
209 | |
210 | static struct multipath *alloc_multipath(struct dm_target *ti) |
211 | { |
212 | struct multipath *m; |
213 | |
214 | m = kzalloc(size: sizeof(*m), GFP_KERNEL); |
215 | if (m) { |
216 | INIT_LIST_HEAD(list: &m->priority_groups); |
217 | spin_lock_init(&m->lock); |
218 | atomic_set(v: &m->nr_valid_paths, i: 0); |
219 | INIT_WORK(&m->trigger_event, trigger_event); |
220 | mutex_init(&m->work_mutex); |
221 | |
222 | m->queue_mode = DM_TYPE_NONE; |
223 | |
224 | m->ti = ti; |
225 | ti->private = m; |
226 | |
227 | timer_setup(&m->nopath_timer, queue_if_no_path_timeout_work, 0); |
228 | } |
229 | |
230 | return m; |
231 | } |
232 | |
233 | static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m) |
234 | { |
235 | if (m->queue_mode == DM_TYPE_NONE) { |
236 | m->queue_mode = DM_TYPE_REQUEST_BASED; |
237 | } else if (m->queue_mode == DM_TYPE_BIO_BASED) { |
238 | INIT_WORK(&m->process_queued_bios, process_queued_bios); |
239 | /* |
240 | * bio-based doesn't support any direct scsi_dh management; |
241 | * it just discovers if a scsi_dh is attached. |
242 | */ |
243 | set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, addr: &m->flags); |
244 | } |
245 | |
246 | dm_table_set_type(t: ti->table, type: m->queue_mode); |
247 | |
248 | /* |
249 | * Init fields that are only used when a scsi_dh is attached |
250 | * - must do this unconditionally (really doesn't hurt non-SCSI uses) |
251 | */ |
252 | set_bit(MPATHF_QUEUE_IO, addr: &m->flags); |
253 | atomic_set(v: &m->pg_init_in_progress, i: 0); |
254 | atomic_set(v: &m->pg_init_count, i: 0); |
255 | m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT; |
256 | init_waitqueue_head(&m->pg_init_wait); |
257 | |
258 | return 0; |
259 | } |
260 | |
261 | static void free_multipath(struct multipath *m) |
262 | { |
263 | struct priority_group *pg, *tmp; |
264 | |
265 | list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) { |
266 | list_del(entry: &pg->list); |
267 | free_priority_group(pg, ti: m->ti); |
268 | } |
269 | |
270 | kfree(objp: m->hw_handler_name); |
271 | kfree(objp: m->hw_handler_params); |
272 | mutex_destroy(lock: &m->work_mutex); |
273 | kfree(objp: m); |
274 | } |
275 | |
276 | static struct dm_mpath_io *get_mpio(union map_info *info) |
277 | { |
278 | return info->ptr; |
279 | } |
280 | |
281 | static size_t multipath_per_bio_data_size(void) |
282 | { |
283 | return sizeof(struct dm_mpath_io) + sizeof(struct dm_bio_details); |
284 | } |
285 | |
286 | static struct dm_mpath_io *get_mpio_from_bio(struct bio *bio) |
287 | { |
288 | return dm_per_bio_data(bio, data_size: multipath_per_bio_data_size()); |
289 | } |
290 | |
291 | static struct dm_bio_details *get_bio_details_from_mpio(struct dm_mpath_io *mpio) |
292 | { |
293 | /* dm_bio_details is immediately after the dm_mpath_io in bio's per-bio-data */ |
294 | void *bio_details = mpio + 1; |
295 | return bio_details; |
296 | } |
297 | |
298 | static void multipath_init_per_bio_data(struct bio *bio, struct dm_mpath_io **mpio_p) |
299 | { |
300 | struct dm_mpath_io *mpio = get_mpio_from_bio(bio); |
301 | struct dm_bio_details *bio_details = get_bio_details_from_mpio(mpio); |
302 | |
303 | mpio->nr_bytes = bio->bi_iter.bi_size; |
304 | mpio->pgpath = NULL; |
305 | mpio->start_time_ns = 0; |
306 | *mpio_p = mpio; |
307 | |
308 | dm_bio_record(bd: bio_details, bio); |
309 | } |
310 | |
311 | /* |
312 | *----------------------------------------------- |
313 | * Path selection |
314 | *----------------------------------------------- |
315 | */ |
316 | static int __pg_init_all_paths(struct multipath *m) |
317 | { |
318 | struct pgpath *pgpath; |
319 | unsigned long pg_init_delay = 0; |
320 | |
321 | lockdep_assert_held(&m->lock); |
322 | |
323 | if (atomic_read(v: &m->pg_init_in_progress) || test_bit(MPATHF_PG_INIT_DISABLED, &m->flags)) |
324 | return 0; |
325 | |
326 | atomic_inc(v: &m->pg_init_count); |
327 | clear_bit(MPATHF_PG_INIT_REQUIRED, addr: &m->flags); |
328 | |
329 | /* Check here to reset pg_init_required */ |
330 | if (!m->current_pg) |
331 | return 0; |
332 | |
333 | if (test_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags)) |
334 | pg_init_delay = msecs_to_jiffies(m: m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ? |
335 | m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS); |
336 | list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) { |
337 | /* Skip failed paths */ |
338 | if (!pgpath->is_active) |
339 | continue; |
340 | if (queue_delayed_work(wq: kmpath_handlerd, dwork: &pgpath->activate_path, |
341 | delay: pg_init_delay)) |
342 | atomic_inc(v: &m->pg_init_in_progress); |
343 | } |
344 | return atomic_read(v: &m->pg_init_in_progress); |
345 | } |
346 | |
347 | static int pg_init_all_paths(struct multipath *m) |
348 | { |
349 | int ret; |
350 | unsigned long flags; |
351 | |
352 | spin_lock_irqsave(&m->lock, flags); |
353 | ret = __pg_init_all_paths(m); |
354 | spin_unlock_irqrestore(lock: &m->lock, flags); |
355 | |
356 | return ret; |
357 | } |
358 | |
359 | static void __switch_pg(struct multipath *m, struct priority_group *pg) |
360 | { |
361 | lockdep_assert_held(&m->lock); |
362 | |
363 | m->current_pg = pg; |
364 | |
365 | /* Must we initialise the PG first, and queue I/O till it's ready? */ |
366 | if (m->hw_handler_name) { |
367 | set_bit(MPATHF_PG_INIT_REQUIRED, addr: &m->flags); |
368 | set_bit(MPATHF_QUEUE_IO, addr: &m->flags); |
369 | } else { |
370 | clear_bit(MPATHF_PG_INIT_REQUIRED, addr: &m->flags); |
371 | clear_bit(MPATHF_QUEUE_IO, addr: &m->flags); |
372 | } |
373 | |
374 | atomic_set(v: &m->pg_init_count, i: 0); |
375 | } |
376 | |
377 | static struct pgpath *choose_path_in_pg(struct multipath *m, |
378 | struct priority_group *pg, |
379 | size_t nr_bytes) |
380 | { |
381 | unsigned long flags; |
382 | struct dm_path *path; |
383 | struct pgpath *pgpath; |
384 | |
385 | path = pg->ps.type->select_path(&pg->ps, nr_bytes); |
386 | if (!path) |
387 | return ERR_PTR(error: -ENXIO); |
388 | |
389 | pgpath = path_to_pgpath(path); |
390 | |
391 | if (unlikely(READ_ONCE(m->current_pg) != pg)) { |
392 | /* Only update current_pgpath if pg changed */ |
393 | spin_lock_irqsave(&m->lock, flags); |
394 | m->current_pgpath = pgpath; |
395 | __switch_pg(m, pg); |
396 | spin_unlock_irqrestore(lock: &m->lock, flags); |
397 | } |
398 | |
399 | return pgpath; |
400 | } |
401 | |
402 | static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes) |
403 | { |
404 | unsigned long flags; |
405 | struct priority_group *pg; |
406 | struct pgpath *pgpath; |
407 | unsigned int bypassed = 1; |
408 | |
409 | if (!atomic_read(v: &m->nr_valid_paths)) { |
410 | spin_lock_irqsave(&m->lock, flags); |
411 | clear_bit(MPATHF_QUEUE_IO, addr: &m->flags); |
412 | spin_unlock_irqrestore(lock: &m->lock, flags); |
413 | goto failed; |
414 | } |
415 | |
416 | /* Were we instructed to switch PG? */ |
417 | if (READ_ONCE(m->next_pg)) { |
418 | spin_lock_irqsave(&m->lock, flags); |
419 | pg = m->next_pg; |
420 | if (!pg) { |
421 | spin_unlock_irqrestore(lock: &m->lock, flags); |
422 | goto check_current_pg; |
423 | } |
424 | m->next_pg = NULL; |
425 | spin_unlock_irqrestore(lock: &m->lock, flags); |
426 | pgpath = choose_path_in_pg(m, pg, nr_bytes); |
427 | if (!IS_ERR_OR_NULL(ptr: pgpath)) |
428 | return pgpath; |
429 | } |
430 | |
431 | /* Don't change PG until it has no remaining paths */ |
432 | check_current_pg: |
433 | pg = READ_ONCE(m->current_pg); |
434 | if (pg) { |
435 | pgpath = choose_path_in_pg(m, pg, nr_bytes); |
436 | if (!IS_ERR_OR_NULL(ptr: pgpath)) |
437 | return pgpath; |
438 | } |
439 | |
440 | /* |
441 | * Loop through priority groups until we find a valid path. |
442 | * First time we skip PGs marked 'bypassed'. |
443 | * Second time we only try the ones we skipped, but set |
444 | * pg_init_delay_retry so we do not hammer controllers. |
445 | */ |
446 | do { |
447 | list_for_each_entry(pg, &m->priority_groups, list) { |
448 | if (pg->bypassed == !!bypassed) |
449 | continue; |
450 | pgpath = choose_path_in_pg(m, pg, nr_bytes); |
451 | if (!IS_ERR_OR_NULL(ptr: pgpath)) { |
452 | if (!bypassed) { |
453 | spin_lock_irqsave(&m->lock, flags); |
454 | set_bit(MPATHF_PG_INIT_DELAY_RETRY, addr: &m->flags); |
455 | spin_unlock_irqrestore(lock: &m->lock, flags); |
456 | } |
457 | return pgpath; |
458 | } |
459 | } |
460 | } while (bypassed--); |
461 | |
462 | failed: |
463 | spin_lock_irqsave(&m->lock, flags); |
464 | m->current_pgpath = NULL; |
465 | m->current_pg = NULL; |
466 | spin_unlock_irqrestore(lock: &m->lock, flags); |
467 | |
468 | return NULL; |
469 | } |
470 | |
471 | /* |
472 | * dm_report_EIO() is a macro instead of a function to make pr_debug_ratelimited() |
473 | * report the function name and line number of the function from which |
474 | * it has been invoked. |
475 | */ |
476 | #define dm_report_EIO(m) \ |
477 | DMDEBUG_LIMIT("%s: returning EIO; QIFNP = %d; SQIFNP = %d; DNFS = %d", \ |
478 | dm_table_device_name((m)->ti->table), \ |
479 | test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags), \ |
480 | test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags), \ |
481 | dm_noflush_suspending((m)->ti)) |
482 | |
483 | /* |
484 | * Check whether bios must be queued in the device-mapper core rather |
485 | * than here in the target. |
486 | */ |
487 | static bool __must_push_back(struct multipath *m) |
488 | { |
489 | return dm_noflush_suspending(ti: m->ti); |
490 | } |
491 | |
492 | static bool must_push_back_rq(struct multipath *m) |
493 | { |
494 | unsigned long flags; |
495 | bool ret; |
496 | |
497 | spin_lock_irqsave(&m->lock, flags); |
498 | ret = (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) || __must_push_back(m)); |
499 | spin_unlock_irqrestore(lock: &m->lock, flags); |
500 | |
501 | return ret; |
502 | } |
503 | |
504 | /* |
505 | * Map cloned requests (request-based multipath) |
506 | */ |
507 | static int multipath_clone_and_map(struct dm_target *ti, struct request *rq, |
508 | union map_info *map_context, |
509 | struct request **__clone) |
510 | { |
511 | struct multipath *m = ti->private; |
512 | size_t nr_bytes = blk_rq_bytes(rq); |
513 | struct pgpath *pgpath; |
514 | struct block_device *bdev; |
515 | struct dm_mpath_io *mpio = get_mpio(info: map_context); |
516 | struct request_queue *q; |
517 | struct request *clone; |
518 | |
519 | /* Do we need to select a new pgpath? */ |
520 | pgpath = READ_ONCE(m->current_pgpath); |
521 | if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m)) |
522 | pgpath = choose_pgpath(m, nr_bytes); |
523 | |
524 | if (!pgpath) { |
525 | if (must_push_back_rq(m)) |
526 | return DM_MAPIO_DELAY_REQUEUE; |
527 | dm_report_EIO(m); /* Failed */ |
528 | return DM_MAPIO_KILL; |
529 | } else if (mpath_double_check_test_bit(MPATHF_QUEUE_IO, m) || |
530 | mpath_double_check_test_bit(MPATHF_PG_INIT_REQUIRED, m)) { |
531 | pg_init_all_paths(m); |
532 | return DM_MAPIO_DELAY_REQUEUE; |
533 | } |
534 | |
535 | mpio->pgpath = pgpath; |
536 | mpio->nr_bytes = nr_bytes; |
537 | |
538 | bdev = pgpath->path.dev->bdev; |
539 | q = bdev_get_queue(bdev); |
540 | clone = blk_mq_alloc_request(q, opf: rq->cmd_flags | REQ_NOMERGE, |
541 | flags: BLK_MQ_REQ_NOWAIT); |
542 | if (IS_ERR(ptr: clone)) { |
543 | /* EBUSY, ENODEV or EWOULDBLOCK: requeue */ |
544 | if (blk_queue_dying(q)) { |
545 | atomic_inc(v: &m->pg_init_in_progress); |
546 | activate_or_offline_path(pgpath); |
547 | return DM_MAPIO_DELAY_REQUEUE; |
548 | } |
549 | |
550 | /* |
551 | * blk-mq's SCHED_RESTART can cover this requeue, so we |
552 | * needn't deal with it by DELAY_REQUEUE. More importantly, |
553 | * we have to return DM_MAPIO_REQUEUE so that blk-mq can |
554 | * get the queue busy feedback (via BLK_STS_RESOURCE), |
555 | * otherwise I/O merging can suffer. |
556 | */ |
557 | return DM_MAPIO_REQUEUE; |
558 | } |
559 | clone->bio = clone->biotail = NULL; |
560 | clone->cmd_flags |= REQ_FAILFAST_TRANSPORT; |
561 | *__clone = clone; |
562 | |
563 | if (pgpath->pg->ps.type->start_io) |
564 | pgpath->pg->ps.type->start_io(&pgpath->pg->ps, |
565 | &pgpath->path, |
566 | nr_bytes); |
567 | return DM_MAPIO_REMAPPED; |
568 | } |
569 | |
570 | static void multipath_release_clone(struct request *clone, |
571 | union map_info *map_context) |
572 | { |
573 | if (unlikely(map_context)) { |
574 | /* |
575 | * non-NULL map_context means caller is still map |
576 | * method; must undo multipath_clone_and_map() |
577 | */ |
578 | struct dm_mpath_io *mpio = get_mpio(info: map_context); |
579 | struct pgpath *pgpath = mpio->pgpath; |
580 | |
581 | if (pgpath && pgpath->pg->ps.type->end_io) |
582 | pgpath->pg->ps.type->end_io(&pgpath->pg->ps, |
583 | &pgpath->path, |
584 | mpio->nr_bytes, |
585 | clone->io_start_time_ns); |
586 | } |
587 | |
588 | blk_mq_free_request(rq: clone); |
589 | } |
590 | |
591 | /* |
592 | * Map cloned bios (bio-based multipath) |
593 | */ |
594 | |
595 | static void __multipath_queue_bio(struct multipath *m, struct bio *bio) |
596 | { |
597 | /* Queue for the daemon to resubmit */ |
598 | bio_list_add(bl: &m->queued_bios, bio); |
599 | if (!test_bit(MPATHF_QUEUE_IO, &m->flags)) |
600 | queue_work(wq: kmultipathd, work: &m->process_queued_bios); |
601 | } |
602 | |
603 | static void multipath_queue_bio(struct multipath *m, struct bio *bio) |
604 | { |
605 | unsigned long flags; |
606 | |
607 | spin_lock_irqsave(&m->lock, flags); |
608 | __multipath_queue_bio(m, bio); |
609 | spin_unlock_irqrestore(lock: &m->lock, flags); |
610 | } |
611 | |
612 | static struct pgpath *__map_bio(struct multipath *m, struct bio *bio) |
613 | { |
614 | struct pgpath *pgpath; |
615 | unsigned long flags; |
616 | |
617 | /* Do we need to select a new pgpath? */ |
618 | pgpath = READ_ONCE(m->current_pgpath); |
619 | if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m)) |
620 | pgpath = choose_pgpath(m, nr_bytes: bio->bi_iter.bi_size); |
621 | |
622 | if (!pgpath) { |
623 | spin_lock_irqsave(&m->lock, flags); |
624 | if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { |
625 | __multipath_queue_bio(m, bio); |
626 | pgpath = ERR_PTR(error: -EAGAIN); |
627 | } |
628 | spin_unlock_irqrestore(lock: &m->lock, flags); |
629 | |
630 | } else if (mpath_double_check_test_bit(MPATHF_QUEUE_IO, m) || |
631 | mpath_double_check_test_bit(MPATHF_PG_INIT_REQUIRED, m)) { |
632 | multipath_queue_bio(m, bio); |
633 | pg_init_all_paths(m); |
634 | return ERR_PTR(error: -EAGAIN); |
635 | } |
636 | |
637 | return pgpath; |
638 | } |
639 | |
640 | static int __multipath_map_bio(struct multipath *m, struct bio *bio, |
641 | struct dm_mpath_io *mpio) |
642 | { |
643 | struct pgpath *pgpath = __map_bio(m, bio); |
644 | |
645 | if (IS_ERR(ptr: pgpath)) |
646 | return DM_MAPIO_SUBMITTED; |
647 | |
648 | if (!pgpath) { |
649 | if (__must_push_back(m)) |
650 | return DM_MAPIO_REQUEUE; |
651 | dm_report_EIO(m); |
652 | return DM_MAPIO_KILL; |
653 | } |
654 | |
655 | mpio->pgpath = pgpath; |
656 | |
657 | if (dm_ps_use_hr_timer(pgpath->pg->ps.type)) |
658 | mpio->start_time_ns = ktime_get_ns(); |
659 | |
660 | bio->bi_status = 0; |
661 | bio_set_dev(bio, bdev: pgpath->path.dev->bdev); |
662 | bio->bi_opf |= REQ_FAILFAST_TRANSPORT; |
663 | |
664 | if (pgpath->pg->ps.type->start_io) |
665 | pgpath->pg->ps.type->start_io(&pgpath->pg->ps, |
666 | &pgpath->path, |
667 | mpio->nr_bytes); |
668 | return DM_MAPIO_REMAPPED; |
669 | } |
670 | |
671 | static int multipath_map_bio(struct dm_target *ti, struct bio *bio) |
672 | { |
673 | struct multipath *m = ti->private; |
674 | struct dm_mpath_io *mpio = NULL; |
675 | |
676 | multipath_init_per_bio_data(bio, mpio_p: &mpio); |
677 | return __multipath_map_bio(m, bio, mpio); |
678 | } |
679 | |
680 | static void process_queued_io_list(struct multipath *m) |
681 | { |
682 | if (m->queue_mode == DM_TYPE_REQUEST_BASED) |
683 | dm_mq_kick_requeue_list(md: dm_table_get_md(t: m->ti->table)); |
684 | else if (m->queue_mode == DM_TYPE_BIO_BASED) |
685 | queue_work(wq: kmultipathd, work: &m->process_queued_bios); |
686 | } |
687 | |
688 | static void process_queued_bios(struct work_struct *work) |
689 | { |
690 | int r; |
691 | unsigned long flags; |
692 | struct bio *bio; |
693 | struct bio_list bios; |
694 | struct blk_plug plug; |
695 | struct multipath *m = |
696 | container_of(work, struct multipath, process_queued_bios); |
697 | |
698 | bio_list_init(bl: &bios); |
699 | |
700 | spin_lock_irqsave(&m->lock, flags); |
701 | |
702 | if (bio_list_empty(bl: &m->queued_bios)) { |
703 | spin_unlock_irqrestore(lock: &m->lock, flags); |
704 | return; |
705 | } |
706 | |
707 | bio_list_merge(bl: &bios, bl2: &m->queued_bios); |
708 | bio_list_init(bl: &m->queued_bios); |
709 | |
710 | spin_unlock_irqrestore(lock: &m->lock, flags); |
711 | |
712 | blk_start_plug(&plug); |
713 | while ((bio = bio_list_pop(bl: &bios))) { |
714 | struct dm_mpath_io *mpio = get_mpio_from_bio(bio); |
715 | |
716 | dm_bio_restore(bd: get_bio_details_from_mpio(mpio), bio); |
717 | r = __multipath_map_bio(m, bio, mpio); |
718 | switch (r) { |
719 | case DM_MAPIO_KILL: |
720 | bio->bi_status = BLK_STS_IOERR; |
721 | bio_endio(bio); |
722 | break; |
723 | case DM_MAPIO_REQUEUE: |
724 | bio->bi_status = BLK_STS_DM_REQUEUE; |
725 | bio_endio(bio); |
726 | break; |
727 | case DM_MAPIO_REMAPPED: |
728 | submit_bio_noacct(bio); |
729 | break; |
730 | case DM_MAPIO_SUBMITTED: |
731 | break; |
732 | default: |
733 | WARN_ONCE(true, "__multipath_map_bio() returned %d\n" , r); |
734 | } |
735 | } |
736 | blk_finish_plug(&plug); |
737 | } |
738 | |
739 | /* |
740 | * If we run out of usable paths, should we queue I/O or error it? |
741 | */ |
742 | static int queue_if_no_path(struct multipath *m, bool f_queue_if_no_path, |
743 | bool save_old_value, const char *caller) |
744 | { |
745 | unsigned long flags; |
746 | bool queue_if_no_path_bit, saved_queue_if_no_path_bit; |
747 | const char *dm_dev_name = dm_table_device_name(t: m->ti->table); |
748 | |
749 | DMDEBUG("%s: %s caller=%s f_queue_if_no_path=%d save_old_value=%d" , |
750 | dm_dev_name, __func__, caller, f_queue_if_no_path, save_old_value); |
751 | |
752 | spin_lock_irqsave(&m->lock, flags); |
753 | |
754 | queue_if_no_path_bit = test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags); |
755 | saved_queue_if_no_path_bit = test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags); |
756 | |
757 | if (save_old_value) { |
758 | if (unlikely(!queue_if_no_path_bit && saved_queue_if_no_path_bit)) { |
759 | DMERR("%s: QIFNP disabled but saved as enabled, saving again loses state, not saving!" , |
760 | dm_dev_name); |
761 | } else |
762 | assign_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, addr: &m->flags, value: queue_if_no_path_bit); |
763 | } else if (!f_queue_if_no_path && saved_queue_if_no_path_bit) { |
764 | /* due to "fail_if_no_path" message, need to honor it. */ |
765 | clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, addr: &m->flags); |
766 | } |
767 | assign_bit(MPATHF_QUEUE_IF_NO_PATH, addr: &m->flags, value: f_queue_if_no_path); |
768 | |
769 | DMDEBUG("%s: after %s changes; QIFNP = %d; SQIFNP = %d; DNFS = %d" , |
770 | dm_dev_name, __func__, |
771 | test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags), |
772 | test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags), |
773 | dm_noflush_suspending(m->ti)); |
774 | |
775 | spin_unlock_irqrestore(lock: &m->lock, flags); |
776 | |
777 | if (!f_queue_if_no_path) { |
778 | dm_table_run_md_queue_async(t: m->ti->table); |
779 | process_queued_io_list(m); |
780 | } |
781 | |
782 | return 0; |
783 | } |
784 | |
785 | /* |
786 | * If the queue_if_no_path timeout fires, turn off queue_if_no_path and |
787 | * process any queued I/O. |
788 | */ |
789 | static void queue_if_no_path_timeout_work(struct timer_list *t) |
790 | { |
791 | struct multipath *m = from_timer(m, t, nopath_timer); |
792 | |
793 | DMWARN("queue_if_no_path timeout on %s, failing queued IO" , |
794 | dm_table_device_name(m->ti->table)); |
795 | queue_if_no_path(m, f_queue_if_no_path: false, save_old_value: false, caller: __func__); |
796 | } |
797 | |
798 | /* |
799 | * Enable the queue_if_no_path timeout if necessary. |
800 | * Called with m->lock held. |
801 | */ |
802 | static void enable_nopath_timeout(struct multipath *m) |
803 | { |
804 | unsigned long queue_if_no_path_timeout = |
805 | READ_ONCE(queue_if_no_path_timeout_secs) * HZ; |
806 | |
807 | lockdep_assert_held(&m->lock); |
808 | |
809 | if (queue_if_no_path_timeout > 0 && |
810 | atomic_read(v: &m->nr_valid_paths) == 0 && |
811 | test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { |
812 | mod_timer(timer: &m->nopath_timer, |
813 | expires: jiffies + queue_if_no_path_timeout); |
814 | } |
815 | } |
816 | |
817 | static void disable_nopath_timeout(struct multipath *m) |
818 | { |
819 | del_timer_sync(timer: &m->nopath_timer); |
820 | } |
821 | |
822 | /* |
823 | * An event is triggered whenever a path is taken out of use. |
824 | * Includes path failure and PG bypass. |
825 | */ |
826 | static void trigger_event(struct work_struct *work) |
827 | { |
828 | struct multipath *m = |
829 | container_of(work, struct multipath, trigger_event); |
830 | |
831 | dm_table_event(t: m->ti->table); |
832 | } |
833 | |
834 | /* |
835 | *--------------------------------------------------------------- |
836 | * Constructor/argument parsing: |
837 | * <#multipath feature args> [<arg>]* |
838 | * <#hw_handler args> [hw_handler [<arg>]*] |
839 | * <#priority groups> |
840 | * <initial priority group> |
841 | * [<selector> <#selector args> [<arg>]* |
842 | * <#paths> <#per-path selector args> |
843 | * [<path> [<arg>]* ]+ ]+ |
844 | *--------------------------------------------------------------- |
845 | */ |
846 | static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg, |
847 | struct dm_target *ti) |
848 | { |
849 | int r; |
850 | struct path_selector_type *pst; |
851 | unsigned int ps_argc; |
852 | |
853 | static const struct dm_arg _args[] = { |
854 | {0, 1024, "invalid number of path selector args" }, |
855 | }; |
856 | |
857 | pst = dm_get_path_selector(name: dm_shift_arg(as)); |
858 | if (!pst) { |
859 | ti->error = "unknown path selector type" ; |
860 | return -EINVAL; |
861 | } |
862 | |
863 | r = dm_read_arg_group(arg: _args, arg_set: as, num_args: &ps_argc, error: &ti->error); |
864 | if (r) { |
865 | dm_put_path_selector(pst); |
866 | return -EINVAL; |
867 | } |
868 | |
869 | r = pst->create(&pg->ps, ps_argc, as->argv); |
870 | if (r) { |
871 | dm_put_path_selector(pst); |
872 | ti->error = "path selector constructor failed" ; |
873 | return r; |
874 | } |
875 | |
876 | pg->ps.type = pst; |
877 | dm_consume_args(as, num_args: ps_argc); |
878 | |
879 | return 0; |
880 | } |
881 | |
882 | static int setup_scsi_dh(struct block_device *bdev, struct multipath *m, |
883 | const char **attached_handler_name, char **error) |
884 | { |
885 | struct request_queue *q = bdev_get_queue(bdev); |
886 | int r; |
887 | |
888 | if (mpath_double_check_test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, m)) { |
889 | retain: |
890 | if (*attached_handler_name) { |
891 | /* |
892 | * Clear any hw_handler_params associated with a |
893 | * handler that isn't already attached. |
894 | */ |
895 | if (m->hw_handler_name && strcmp(*attached_handler_name, m->hw_handler_name)) { |
896 | kfree(objp: m->hw_handler_params); |
897 | m->hw_handler_params = NULL; |
898 | } |
899 | |
900 | /* |
901 | * Reset hw_handler_name to match the attached handler |
902 | * |
903 | * NB. This modifies the table line to show the actual |
904 | * handler instead of the original table passed in. |
905 | */ |
906 | kfree(objp: m->hw_handler_name); |
907 | m->hw_handler_name = *attached_handler_name; |
908 | *attached_handler_name = NULL; |
909 | } |
910 | } |
911 | |
912 | if (m->hw_handler_name) { |
913 | r = scsi_dh_attach(q, m->hw_handler_name); |
914 | if (r == -EBUSY) { |
915 | DMINFO("retaining handler on device %pg" , bdev); |
916 | goto retain; |
917 | } |
918 | if (r < 0) { |
919 | *error = "error attaching hardware handler" ; |
920 | return r; |
921 | } |
922 | |
923 | if (m->hw_handler_params) { |
924 | r = scsi_dh_set_params(q, m->hw_handler_params); |
925 | if (r < 0) { |
926 | *error = "unable to set hardware handler parameters" ; |
927 | return r; |
928 | } |
929 | } |
930 | } |
931 | |
932 | return 0; |
933 | } |
934 | |
935 | static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps, |
936 | struct dm_target *ti) |
937 | { |
938 | int r; |
939 | struct pgpath *p; |
940 | struct multipath *m = ti->private; |
941 | struct request_queue *q; |
942 | const char *attached_handler_name = NULL; |
943 | |
944 | /* we need at least a path arg */ |
945 | if (as->argc < 1) { |
946 | ti->error = "no device given" ; |
947 | return ERR_PTR(error: -EINVAL); |
948 | } |
949 | |
950 | p = alloc_pgpath(); |
951 | if (!p) |
952 | return ERR_PTR(error: -ENOMEM); |
953 | |
954 | r = dm_get_device(ti, path: dm_shift_arg(as), mode: dm_table_get_mode(t: ti->table), |
955 | result: &p->path.dev); |
956 | if (r) { |
957 | ti->error = "error getting device" ; |
958 | goto bad; |
959 | } |
960 | |
961 | q = bdev_get_queue(bdev: p->path.dev->bdev); |
962 | attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL); |
963 | if (attached_handler_name || m->hw_handler_name) { |
964 | INIT_DELAYED_WORK(&p->activate_path, activate_path_work); |
965 | r = setup_scsi_dh(bdev: p->path.dev->bdev, m, attached_handler_name: &attached_handler_name, error: &ti->error); |
966 | kfree(objp: attached_handler_name); |
967 | if (r) { |
968 | dm_put_device(ti, d: p->path.dev); |
969 | goto bad; |
970 | } |
971 | } |
972 | |
973 | r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error); |
974 | if (r) { |
975 | dm_put_device(ti, d: p->path.dev); |
976 | goto bad; |
977 | } |
978 | |
979 | return p; |
980 | bad: |
981 | free_pgpath(pgpath: p); |
982 | return ERR_PTR(error: r); |
983 | } |
984 | |
985 | static struct priority_group *parse_priority_group(struct dm_arg_set *as, |
986 | struct multipath *m) |
987 | { |
988 | static const struct dm_arg _args[] = { |
989 | {1, 1024, "invalid number of paths" }, |
990 | {0, 1024, "invalid number of selector args" } |
991 | }; |
992 | |
993 | int r; |
994 | unsigned int i, nr_selector_args, nr_args; |
995 | struct priority_group *pg; |
996 | struct dm_target *ti = m->ti; |
997 | |
998 | if (as->argc < 2) { |
999 | as->argc = 0; |
1000 | ti->error = "not enough priority group arguments" ; |
1001 | return ERR_PTR(error: -EINVAL); |
1002 | } |
1003 | |
1004 | pg = alloc_priority_group(); |
1005 | if (!pg) { |
1006 | ti->error = "couldn't allocate priority group" ; |
1007 | return ERR_PTR(error: -ENOMEM); |
1008 | } |
1009 | pg->m = m; |
1010 | |
1011 | r = parse_path_selector(as, pg, ti); |
1012 | if (r) |
1013 | goto bad; |
1014 | |
1015 | /* |
1016 | * read the paths |
1017 | */ |
1018 | r = dm_read_arg(arg: _args, arg_set: as, value: &pg->nr_pgpaths, error: &ti->error); |
1019 | if (r) |
1020 | goto bad; |
1021 | |
1022 | r = dm_read_arg(arg: _args + 1, arg_set: as, value: &nr_selector_args, error: &ti->error); |
1023 | if (r) |
1024 | goto bad; |
1025 | |
1026 | nr_args = 1 + nr_selector_args; |
1027 | for (i = 0; i < pg->nr_pgpaths; i++) { |
1028 | struct pgpath *pgpath; |
1029 | struct dm_arg_set path_args; |
1030 | |
1031 | if (as->argc < nr_args) { |
1032 | ti->error = "not enough path parameters" ; |
1033 | r = -EINVAL; |
1034 | goto bad; |
1035 | } |
1036 | |
1037 | path_args.argc = nr_args; |
1038 | path_args.argv = as->argv; |
1039 | |
1040 | pgpath = parse_path(as: &path_args, ps: &pg->ps, ti); |
1041 | if (IS_ERR(ptr: pgpath)) { |
1042 | r = PTR_ERR(ptr: pgpath); |
1043 | goto bad; |
1044 | } |
1045 | |
1046 | pgpath->pg = pg; |
1047 | list_add_tail(new: &pgpath->list, head: &pg->pgpaths); |
1048 | dm_consume_args(as, num_args: nr_args); |
1049 | } |
1050 | |
1051 | return pg; |
1052 | |
1053 | bad: |
1054 | free_priority_group(pg, ti); |
1055 | return ERR_PTR(error: r); |
1056 | } |
1057 | |
1058 | static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m) |
1059 | { |
1060 | unsigned int hw_argc; |
1061 | int ret; |
1062 | struct dm_target *ti = m->ti; |
1063 | |
1064 | static const struct dm_arg _args[] = { |
1065 | {0, 1024, "invalid number of hardware handler args" }, |
1066 | }; |
1067 | |
1068 | if (dm_read_arg_group(arg: _args, arg_set: as, num_args: &hw_argc, error: &ti->error)) |
1069 | return -EINVAL; |
1070 | |
1071 | if (!hw_argc) |
1072 | return 0; |
1073 | |
1074 | if (m->queue_mode == DM_TYPE_BIO_BASED) { |
1075 | dm_consume_args(as, num_args: hw_argc); |
1076 | DMERR("bio-based multipath doesn't allow hardware handler args" ); |
1077 | return 0; |
1078 | } |
1079 | |
1080 | m->hw_handler_name = kstrdup(s: dm_shift_arg(as), GFP_KERNEL); |
1081 | if (!m->hw_handler_name) |
1082 | return -EINVAL; |
1083 | |
1084 | if (hw_argc > 1) { |
1085 | char *p; |
1086 | int i, j, len = 4; |
1087 | |
1088 | for (i = 0; i <= hw_argc - 2; i++) |
1089 | len += strlen(as->argv[i]) + 1; |
1090 | p = m->hw_handler_params = kzalloc(size: len, GFP_KERNEL); |
1091 | if (!p) { |
1092 | ti->error = "memory allocation failed" ; |
1093 | ret = -ENOMEM; |
1094 | goto fail; |
1095 | } |
1096 | j = sprintf(buf: p, fmt: "%d" , hw_argc - 1); |
1097 | for (i = 0, p += j + 1; i <= hw_argc - 2; i++, p += j + 1) |
1098 | j = sprintf(buf: p, fmt: "%s" , as->argv[i]); |
1099 | } |
1100 | dm_consume_args(as, num_args: hw_argc - 1); |
1101 | |
1102 | return 0; |
1103 | fail: |
1104 | kfree(objp: m->hw_handler_name); |
1105 | m->hw_handler_name = NULL; |
1106 | return ret; |
1107 | } |
1108 | |
1109 | static int parse_features(struct dm_arg_set *as, struct multipath *m) |
1110 | { |
1111 | int r; |
1112 | unsigned int argc; |
1113 | struct dm_target *ti = m->ti; |
1114 | const char *arg_name; |
1115 | |
1116 | static const struct dm_arg _args[] = { |
1117 | {0, 8, "invalid number of feature args" }, |
1118 | {1, 50, "pg_init_retries must be between 1 and 50" }, |
1119 | {0, 60000, "pg_init_delay_msecs must be between 0 and 60000" }, |
1120 | }; |
1121 | |
1122 | r = dm_read_arg_group(arg: _args, arg_set: as, num_args: &argc, error: &ti->error); |
1123 | if (r) |
1124 | return -EINVAL; |
1125 | |
1126 | if (!argc) |
1127 | return 0; |
1128 | |
1129 | do { |
1130 | arg_name = dm_shift_arg(as); |
1131 | argc--; |
1132 | |
1133 | if (!strcasecmp(s1: arg_name, s2: "queue_if_no_path" )) { |
1134 | r = queue_if_no_path(m, f_queue_if_no_path: true, save_old_value: false, caller: __func__); |
1135 | continue; |
1136 | } |
1137 | |
1138 | if (!strcasecmp(s1: arg_name, s2: "retain_attached_hw_handler" )) { |
1139 | set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, addr: &m->flags); |
1140 | continue; |
1141 | } |
1142 | |
1143 | if (!strcasecmp(s1: arg_name, s2: "pg_init_retries" ) && |
1144 | (argc >= 1)) { |
1145 | r = dm_read_arg(arg: _args + 1, arg_set: as, value: &m->pg_init_retries, error: &ti->error); |
1146 | argc--; |
1147 | continue; |
1148 | } |
1149 | |
1150 | if (!strcasecmp(s1: arg_name, s2: "pg_init_delay_msecs" ) && |
1151 | (argc >= 1)) { |
1152 | r = dm_read_arg(arg: _args + 2, arg_set: as, value: &m->pg_init_delay_msecs, error: &ti->error); |
1153 | argc--; |
1154 | continue; |
1155 | } |
1156 | |
1157 | if (!strcasecmp(s1: arg_name, s2: "queue_mode" ) && |
1158 | (argc >= 1)) { |
1159 | const char *queue_mode_name = dm_shift_arg(as); |
1160 | |
1161 | if (!strcasecmp(s1: queue_mode_name, s2: "bio" )) |
1162 | m->queue_mode = DM_TYPE_BIO_BASED; |
1163 | else if (!strcasecmp(s1: queue_mode_name, s2: "rq" ) || |
1164 | !strcasecmp(s1: queue_mode_name, s2: "mq" )) |
1165 | m->queue_mode = DM_TYPE_REQUEST_BASED; |
1166 | else { |
1167 | ti->error = "Unknown 'queue_mode' requested" ; |
1168 | r = -EINVAL; |
1169 | } |
1170 | argc--; |
1171 | continue; |
1172 | } |
1173 | |
1174 | ti->error = "Unrecognised multipath feature request" ; |
1175 | r = -EINVAL; |
1176 | } while (argc && !r); |
1177 | |
1178 | return r; |
1179 | } |
1180 | |
1181 | static int multipath_ctr(struct dm_target *ti, unsigned int argc, char **argv) |
1182 | { |
1183 | /* target arguments */ |
1184 | static const struct dm_arg _args[] = { |
1185 | {0, 1024, "invalid number of priority groups" }, |
1186 | {0, 1024, "invalid initial priority group number" }, |
1187 | }; |
1188 | |
1189 | int r; |
1190 | struct multipath *m; |
1191 | struct dm_arg_set as; |
1192 | unsigned int pg_count = 0; |
1193 | unsigned int next_pg_num; |
1194 | unsigned long flags; |
1195 | |
1196 | as.argc = argc; |
1197 | as.argv = argv; |
1198 | |
1199 | m = alloc_multipath(ti); |
1200 | if (!m) { |
1201 | ti->error = "can't allocate multipath" ; |
1202 | return -EINVAL; |
1203 | } |
1204 | |
1205 | r = parse_features(as: &as, m); |
1206 | if (r) |
1207 | goto bad; |
1208 | |
1209 | r = alloc_multipath_stage2(ti, m); |
1210 | if (r) |
1211 | goto bad; |
1212 | |
1213 | r = parse_hw_handler(as: &as, m); |
1214 | if (r) |
1215 | goto bad; |
1216 | |
1217 | r = dm_read_arg(arg: _args, arg_set: &as, value: &m->nr_priority_groups, error: &ti->error); |
1218 | if (r) |
1219 | goto bad; |
1220 | |
1221 | r = dm_read_arg(arg: _args + 1, arg_set: &as, value: &next_pg_num, error: &ti->error); |
1222 | if (r) |
1223 | goto bad; |
1224 | |
1225 | if ((!m->nr_priority_groups && next_pg_num) || |
1226 | (m->nr_priority_groups && !next_pg_num)) { |
1227 | ti->error = "invalid initial priority group" ; |
1228 | r = -EINVAL; |
1229 | goto bad; |
1230 | } |
1231 | |
1232 | /* parse the priority groups */ |
1233 | while (as.argc) { |
1234 | struct priority_group *pg; |
1235 | unsigned int nr_valid_paths = atomic_read(v: &m->nr_valid_paths); |
1236 | |
1237 | pg = parse_priority_group(as: &as, m); |
1238 | if (IS_ERR(ptr: pg)) { |
1239 | r = PTR_ERR(ptr: pg); |
1240 | goto bad; |
1241 | } |
1242 | |
1243 | nr_valid_paths += pg->nr_pgpaths; |
1244 | atomic_set(v: &m->nr_valid_paths, i: nr_valid_paths); |
1245 | |
1246 | list_add_tail(new: &pg->list, head: &m->priority_groups); |
1247 | pg_count++; |
1248 | pg->pg_num = pg_count; |
1249 | if (!--next_pg_num) |
1250 | m->next_pg = pg; |
1251 | } |
1252 | |
1253 | if (pg_count != m->nr_priority_groups) { |
1254 | ti->error = "priority group count mismatch" ; |
1255 | r = -EINVAL; |
1256 | goto bad; |
1257 | } |
1258 | |
1259 | spin_lock_irqsave(&m->lock, flags); |
1260 | enable_nopath_timeout(m); |
1261 | spin_unlock_irqrestore(lock: &m->lock, flags); |
1262 | |
1263 | ti->num_flush_bios = 1; |
1264 | ti->num_discard_bios = 1; |
1265 | ti->num_write_zeroes_bios = 1; |
1266 | if (m->queue_mode == DM_TYPE_BIO_BASED) |
1267 | ti->per_io_data_size = multipath_per_bio_data_size(); |
1268 | else |
1269 | ti->per_io_data_size = sizeof(struct dm_mpath_io); |
1270 | |
1271 | return 0; |
1272 | |
1273 | bad: |
1274 | free_multipath(m); |
1275 | return r; |
1276 | } |
1277 | |
1278 | static void multipath_wait_for_pg_init_completion(struct multipath *m) |
1279 | { |
1280 | DEFINE_WAIT(wait); |
1281 | |
1282 | while (1) { |
1283 | prepare_to_wait(wq_head: &m->pg_init_wait, wq_entry: &wait, TASK_UNINTERRUPTIBLE); |
1284 | |
1285 | if (!atomic_read(v: &m->pg_init_in_progress)) |
1286 | break; |
1287 | |
1288 | io_schedule(); |
1289 | } |
1290 | finish_wait(wq_head: &m->pg_init_wait, wq_entry: &wait); |
1291 | } |
1292 | |
1293 | static void flush_multipath_work(struct multipath *m) |
1294 | { |
1295 | if (m->hw_handler_name) { |
1296 | unsigned long flags; |
1297 | |
1298 | if (!atomic_read(v: &m->pg_init_in_progress)) |
1299 | goto skip; |
1300 | |
1301 | spin_lock_irqsave(&m->lock, flags); |
1302 | if (atomic_read(v: &m->pg_init_in_progress) && |
1303 | !test_and_set_bit(MPATHF_PG_INIT_DISABLED, addr: &m->flags)) { |
1304 | spin_unlock_irqrestore(lock: &m->lock, flags); |
1305 | |
1306 | flush_workqueue(kmpath_handlerd); |
1307 | multipath_wait_for_pg_init_completion(m); |
1308 | |
1309 | spin_lock_irqsave(&m->lock, flags); |
1310 | clear_bit(MPATHF_PG_INIT_DISABLED, addr: &m->flags); |
1311 | } |
1312 | spin_unlock_irqrestore(lock: &m->lock, flags); |
1313 | } |
1314 | skip: |
1315 | if (m->queue_mode == DM_TYPE_BIO_BASED) |
1316 | flush_work(work: &m->process_queued_bios); |
1317 | flush_work(work: &m->trigger_event); |
1318 | } |
1319 | |
1320 | static void multipath_dtr(struct dm_target *ti) |
1321 | { |
1322 | struct multipath *m = ti->private; |
1323 | |
1324 | disable_nopath_timeout(m); |
1325 | flush_multipath_work(m); |
1326 | free_multipath(m); |
1327 | } |
1328 | |
1329 | /* |
1330 | * Take a path out of use. |
1331 | */ |
1332 | static int fail_path(struct pgpath *pgpath) |
1333 | { |
1334 | unsigned long flags; |
1335 | struct multipath *m = pgpath->pg->m; |
1336 | |
1337 | spin_lock_irqsave(&m->lock, flags); |
1338 | |
1339 | if (!pgpath->is_active) |
1340 | goto out; |
1341 | |
1342 | DMWARN("%s: Failing path %s." , |
1343 | dm_table_device_name(m->ti->table), |
1344 | pgpath->path.dev->name); |
1345 | |
1346 | pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path); |
1347 | pgpath->is_active = false; |
1348 | pgpath->fail_count++; |
1349 | |
1350 | atomic_dec(v: &m->nr_valid_paths); |
1351 | |
1352 | if (pgpath == m->current_pgpath) |
1353 | m->current_pgpath = NULL; |
1354 | |
1355 | dm_path_uevent(event_type: DM_UEVENT_PATH_FAILED, ti: m->ti, |
1356 | path: pgpath->path.dev->name, nr_valid_paths: atomic_read(v: &m->nr_valid_paths)); |
1357 | |
1358 | queue_work(wq: dm_mpath_wq, work: &m->trigger_event); |
1359 | |
1360 | enable_nopath_timeout(m); |
1361 | |
1362 | out: |
1363 | spin_unlock_irqrestore(lock: &m->lock, flags); |
1364 | |
1365 | return 0; |
1366 | } |
1367 | |
1368 | /* |
1369 | * Reinstate a previously-failed path |
1370 | */ |
1371 | static int reinstate_path(struct pgpath *pgpath) |
1372 | { |
1373 | int r = 0, run_queue = 0; |
1374 | unsigned long flags; |
1375 | struct multipath *m = pgpath->pg->m; |
1376 | unsigned int nr_valid_paths; |
1377 | |
1378 | spin_lock_irqsave(&m->lock, flags); |
1379 | |
1380 | if (pgpath->is_active) |
1381 | goto out; |
1382 | |
1383 | DMWARN("%s: Reinstating path %s." , |
1384 | dm_table_device_name(m->ti->table), |
1385 | pgpath->path.dev->name); |
1386 | |
1387 | r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path); |
1388 | if (r) |
1389 | goto out; |
1390 | |
1391 | pgpath->is_active = true; |
1392 | |
1393 | nr_valid_paths = atomic_inc_return(v: &m->nr_valid_paths); |
1394 | if (nr_valid_paths == 1) { |
1395 | m->current_pgpath = NULL; |
1396 | run_queue = 1; |
1397 | } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) { |
1398 | if (queue_work(wq: kmpath_handlerd, work: &pgpath->activate_path.work)) |
1399 | atomic_inc(v: &m->pg_init_in_progress); |
1400 | } |
1401 | |
1402 | dm_path_uevent(event_type: DM_UEVENT_PATH_REINSTATED, ti: m->ti, |
1403 | path: pgpath->path.dev->name, nr_valid_paths); |
1404 | |
1405 | schedule_work(work: &m->trigger_event); |
1406 | |
1407 | out: |
1408 | spin_unlock_irqrestore(lock: &m->lock, flags); |
1409 | if (run_queue) { |
1410 | dm_table_run_md_queue_async(t: m->ti->table); |
1411 | process_queued_io_list(m); |
1412 | } |
1413 | |
1414 | if (pgpath->is_active) |
1415 | disable_nopath_timeout(m); |
1416 | |
1417 | return r; |
1418 | } |
1419 | |
1420 | /* |
1421 | * Fail or reinstate all paths that match the provided struct dm_dev. |
1422 | */ |
1423 | static int action_dev(struct multipath *m, struct dm_dev *dev, |
1424 | action_fn action) |
1425 | { |
1426 | int r = -EINVAL; |
1427 | struct pgpath *pgpath; |
1428 | struct priority_group *pg; |
1429 | |
1430 | list_for_each_entry(pg, &m->priority_groups, list) { |
1431 | list_for_each_entry(pgpath, &pg->pgpaths, list) { |
1432 | if (pgpath->path.dev == dev) |
1433 | r = action(pgpath); |
1434 | } |
1435 | } |
1436 | |
1437 | return r; |
1438 | } |
1439 | |
1440 | /* |
1441 | * Temporarily try to avoid having to use the specified PG |
1442 | */ |
1443 | static void bypass_pg(struct multipath *m, struct priority_group *pg, |
1444 | bool bypassed) |
1445 | { |
1446 | unsigned long flags; |
1447 | |
1448 | spin_lock_irqsave(&m->lock, flags); |
1449 | |
1450 | pg->bypassed = bypassed; |
1451 | m->current_pgpath = NULL; |
1452 | m->current_pg = NULL; |
1453 | |
1454 | spin_unlock_irqrestore(lock: &m->lock, flags); |
1455 | |
1456 | schedule_work(work: &m->trigger_event); |
1457 | } |
1458 | |
1459 | /* |
1460 | * Switch to using the specified PG from the next I/O that gets mapped |
1461 | */ |
1462 | static int switch_pg_num(struct multipath *m, const char *pgstr) |
1463 | { |
1464 | struct priority_group *pg; |
1465 | unsigned int pgnum; |
1466 | unsigned long flags; |
1467 | char dummy; |
1468 | |
1469 | if (!pgstr || (sscanf(pgstr, "%u%c" , &pgnum, &dummy) != 1) || !pgnum || |
1470 | !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) { |
1471 | DMWARN("invalid PG number supplied to %s" , __func__); |
1472 | return -EINVAL; |
1473 | } |
1474 | |
1475 | spin_lock_irqsave(&m->lock, flags); |
1476 | list_for_each_entry(pg, &m->priority_groups, list) { |
1477 | pg->bypassed = false; |
1478 | if (--pgnum) |
1479 | continue; |
1480 | |
1481 | m->current_pgpath = NULL; |
1482 | m->current_pg = NULL; |
1483 | m->next_pg = pg; |
1484 | } |
1485 | spin_unlock_irqrestore(lock: &m->lock, flags); |
1486 | |
1487 | schedule_work(work: &m->trigger_event); |
1488 | return 0; |
1489 | } |
1490 | |
1491 | /* |
1492 | * Set/clear bypassed status of a PG. |
1493 | * PGs are numbered upwards from 1 in the order they were declared. |
1494 | */ |
1495 | static int bypass_pg_num(struct multipath *m, const char *pgstr, bool bypassed) |
1496 | { |
1497 | struct priority_group *pg; |
1498 | unsigned int pgnum; |
1499 | char dummy; |
1500 | |
1501 | if (!pgstr || (sscanf(pgstr, "%u%c" , &pgnum, &dummy) != 1) || !pgnum || |
1502 | !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) { |
1503 | DMWARN("invalid PG number supplied to bypass_pg" ); |
1504 | return -EINVAL; |
1505 | } |
1506 | |
1507 | list_for_each_entry(pg, &m->priority_groups, list) { |
1508 | if (!--pgnum) |
1509 | break; |
1510 | } |
1511 | |
1512 | bypass_pg(m, pg, bypassed); |
1513 | return 0; |
1514 | } |
1515 | |
1516 | /* |
1517 | * Should we retry pg_init immediately? |
1518 | */ |
1519 | static bool pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath) |
1520 | { |
1521 | unsigned long flags; |
1522 | bool limit_reached = false; |
1523 | |
1524 | spin_lock_irqsave(&m->lock, flags); |
1525 | |
1526 | if (atomic_read(v: &m->pg_init_count) <= m->pg_init_retries && |
1527 | !test_bit(MPATHF_PG_INIT_DISABLED, &m->flags)) |
1528 | set_bit(MPATHF_PG_INIT_REQUIRED, addr: &m->flags); |
1529 | else |
1530 | limit_reached = true; |
1531 | |
1532 | spin_unlock_irqrestore(lock: &m->lock, flags); |
1533 | |
1534 | return limit_reached; |
1535 | } |
1536 | |
1537 | static void pg_init_done(void *data, int errors) |
1538 | { |
1539 | struct pgpath *pgpath = data; |
1540 | struct priority_group *pg = pgpath->pg; |
1541 | struct multipath *m = pg->m; |
1542 | unsigned long flags; |
1543 | bool delay_retry = false; |
1544 | |
1545 | /* device or driver problems */ |
1546 | switch (errors) { |
1547 | case SCSI_DH_OK: |
1548 | break; |
1549 | case SCSI_DH_NOSYS: |
1550 | if (!m->hw_handler_name) { |
1551 | errors = 0; |
1552 | break; |
1553 | } |
1554 | DMERR("Could not failover the device: Handler scsi_dh_%s " |
1555 | "Error %d." , m->hw_handler_name, errors); |
1556 | /* |
1557 | * Fail path for now, so we do not ping pong |
1558 | */ |
1559 | fail_path(pgpath); |
1560 | break; |
1561 | case SCSI_DH_DEV_TEMP_BUSY: |
1562 | /* |
1563 | * Probably doing something like FW upgrade on the |
1564 | * controller so try the other pg. |
1565 | */ |
1566 | bypass_pg(m, pg, bypassed: true); |
1567 | break; |
1568 | case SCSI_DH_RETRY: |
1569 | /* Wait before retrying. */ |
1570 | delay_retry = true; |
1571 | fallthrough; |
1572 | case SCSI_DH_IMM_RETRY: |
1573 | case SCSI_DH_RES_TEMP_UNAVAIL: |
1574 | if (pg_init_limit_reached(m, pgpath)) |
1575 | fail_path(pgpath); |
1576 | errors = 0; |
1577 | break; |
1578 | case SCSI_DH_DEV_OFFLINED: |
1579 | default: |
1580 | /* |
1581 | * We probably do not want to fail the path for a device |
1582 | * error, but this is what the old dm did. In future |
1583 | * patches we can do more advanced handling. |
1584 | */ |
1585 | fail_path(pgpath); |
1586 | } |
1587 | |
1588 | spin_lock_irqsave(&m->lock, flags); |
1589 | if (errors) { |
1590 | if (pgpath == m->current_pgpath) { |
1591 | DMERR("Could not failover device. Error %d." , errors); |
1592 | m->current_pgpath = NULL; |
1593 | m->current_pg = NULL; |
1594 | } |
1595 | } else if (!test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) |
1596 | pg->bypassed = false; |
1597 | |
1598 | if (atomic_dec_return(v: &m->pg_init_in_progress) > 0) |
1599 | /* Activations of other paths are still on going */ |
1600 | goto out; |
1601 | |
1602 | if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) { |
1603 | if (delay_retry) |
1604 | set_bit(MPATHF_PG_INIT_DELAY_RETRY, addr: &m->flags); |
1605 | else |
1606 | clear_bit(MPATHF_PG_INIT_DELAY_RETRY, addr: &m->flags); |
1607 | |
1608 | if (__pg_init_all_paths(m)) |
1609 | goto out; |
1610 | } |
1611 | clear_bit(MPATHF_QUEUE_IO, addr: &m->flags); |
1612 | |
1613 | process_queued_io_list(m); |
1614 | |
1615 | /* |
1616 | * Wake up any thread waiting to suspend. |
1617 | */ |
1618 | wake_up(&m->pg_init_wait); |
1619 | |
1620 | out: |
1621 | spin_unlock_irqrestore(lock: &m->lock, flags); |
1622 | } |
1623 | |
1624 | static void activate_or_offline_path(struct pgpath *pgpath) |
1625 | { |
1626 | struct request_queue *q = bdev_get_queue(bdev: pgpath->path.dev->bdev); |
1627 | |
1628 | if (pgpath->is_active && !blk_queue_dying(q)) |
1629 | scsi_dh_activate(q, pg_init_done, pgpath); |
1630 | else |
1631 | pg_init_done(data: pgpath, errors: SCSI_DH_DEV_OFFLINED); |
1632 | } |
1633 | |
1634 | static void activate_path_work(struct work_struct *work) |
1635 | { |
1636 | struct pgpath *pgpath = |
1637 | container_of(work, struct pgpath, activate_path.work); |
1638 | |
1639 | activate_or_offline_path(pgpath); |
1640 | } |
1641 | |
1642 | static int multipath_end_io(struct dm_target *ti, struct request *clone, |
1643 | blk_status_t error, union map_info *map_context) |
1644 | { |
1645 | struct dm_mpath_io *mpio = get_mpio(info: map_context); |
1646 | struct pgpath *pgpath = mpio->pgpath; |
1647 | int r = DM_ENDIO_DONE; |
1648 | |
1649 | /* |
1650 | * We don't queue any clone request inside the multipath target |
1651 | * during end I/O handling, since those clone requests don't have |
1652 | * bio clones. If we queue them inside the multipath target, |
1653 | * we need to make bio clones, that requires memory allocation. |
1654 | * (See drivers/md/dm-rq.c:end_clone_bio() about why the clone requests |
1655 | * don't have bio clones.) |
1656 | * Instead of queueing the clone request here, we queue the original |
1657 | * request into dm core, which will remake a clone request and |
1658 | * clone bios for it and resubmit it later. |
1659 | */ |
1660 | if (error && blk_path_error(error)) { |
1661 | struct multipath *m = ti->private; |
1662 | |
1663 | if (error == BLK_STS_RESOURCE) |
1664 | r = DM_ENDIO_DELAY_REQUEUE; |
1665 | else |
1666 | r = DM_ENDIO_REQUEUE; |
1667 | |
1668 | if (pgpath) |
1669 | fail_path(pgpath); |
1670 | |
1671 | if (!atomic_read(v: &m->nr_valid_paths) && |
1672 | !must_push_back_rq(m)) { |
1673 | if (error == BLK_STS_IOERR) |
1674 | dm_report_EIO(m); |
1675 | /* complete with the original error */ |
1676 | r = DM_ENDIO_DONE; |
1677 | } |
1678 | } |
1679 | |
1680 | if (pgpath) { |
1681 | struct path_selector *ps = &pgpath->pg->ps; |
1682 | |
1683 | if (ps->type->end_io) |
1684 | ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes, |
1685 | clone->io_start_time_ns); |
1686 | } |
1687 | |
1688 | return r; |
1689 | } |
1690 | |
1691 | static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone, |
1692 | blk_status_t *error) |
1693 | { |
1694 | struct multipath *m = ti->private; |
1695 | struct dm_mpath_io *mpio = get_mpio_from_bio(bio: clone); |
1696 | struct pgpath *pgpath = mpio->pgpath; |
1697 | unsigned long flags; |
1698 | int r = DM_ENDIO_DONE; |
1699 | |
1700 | if (!*error || !blk_path_error(error: *error)) |
1701 | goto done; |
1702 | |
1703 | if (pgpath) |
1704 | fail_path(pgpath); |
1705 | |
1706 | if (!atomic_read(v: &m->nr_valid_paths)) { |
1707 | spin_lock_irqsave(&m->lock, flags); |
1708 | if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { |
1709 | if (__must_push_back(m)) { |
1710 | r = DM_ENDIO_REQUEUE; |
1711 | } else { |
1712 | dm_report_EIO(m); |
1713 | *error = BLK_STS_IOERR; |
1714 | } |
1715 | spin_unlock_irqrestore(lock: &m->lock, flags); |
1716 | goto done; |
1717 | } |
1718 | spin_unlock_irqrestore(lock: &m->lock, flags); |
1719 | } |
1720 | |
1721 | multipath_queue_bio(m, bio: clone); |
1722 | r = DM_ENDIO_INCOMPLETE; |
1723 | done: |
1724 | if (pgpath) { |
1725 | struct path_selector *ps = &pgpath->pg->ps; |
1726 | |
1727 | if (ps->type->end_io) |
1728 | ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes, |
1729 | (mpio->start_time_ns ?: |
1730 | dm_start_time_ns_from_clone(bio: clone))); |
1731 | } |
1732 | |
1733 | return r; |
1734 | } |
1735 | |
1736 | /* |
1737 | * Suspend with flush can't complete until all the I/O is processed |
1738 | * so if the last path fails we must error any remaining I/O. |
1739 | * - Note that if the freeze_bdev fails while suspending, the |
1740 | * queue_if_no_path state is lost - userspace should reset it. |
1741 | * Otherwise, during noflush suspend, queue_if_no_path will not change. |
1742 | */ |
1743 | static void multipath_presuspend(struct dm_target *ti) |
1744 | { |
1745 | struct multipath *m = ti->private; |
1746 | |
1747 | /* FIXME: bio-based shouldn't need to always disable queue_if_no_path */ |
1748 | if (m->queue_mode == DM_TYPE_BIO_BASED || !dm_noflush_suspending(ti: m->ti)) |
1749 | queue_if_no_path(m, f_queue_if_no_path: false, save_old_value: true, caller: __func__); |
1750 | } |
1751 | |
1752 | static void multipath_postsuspend(struct dm_target *ti) |
1753 | { |
1754 | struct multipath *m = ti->private; |
1755 | |
1756 | mutex_lock(&m->work_mutex); |
1757 | flush_multipath_work(m); |
1758 | mutex_unlock(lock: &m->work_mutex); |
1759 | } |
1760 | |
1761 | /* |
1762 | * Restore the queue_if_no_path setting. |
1763 | */ |
1764 | static void multipath_resume(struct dm_target *ti) |
1765 | { |
1766 | struct multipath *m = ti->private; |
1767 | unsigned long flags; |
1768 | |
1769 | spin_lock_irqsave(&m->lock, flags); |
1770 | if (test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags)) { |
1771 | set_bit(MPATHF_QUEUE_IF_NO_PATH, addr: &m->flags); |
1772 | clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, addr: &m->flags); |
1773 | } |
1774 | |
1775 | DMDEBUG("%s: %s finished; QIFNP = %d; SQIFNP = %d" , |
1776 | dm_table_device_name(m->ti->table), __func__, |
1777 | test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags), |
1778 | test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags)); |
1779 | |
1780 | spin_unlock_irqrestore(lock: &m->lock, flags); |
1781 | } |
1782 | |
1783 | /* |
1784 | * Info output has the following format: |
1785 | * num_multipath_feature_args [multipath_feature_args]* |
1786 | * num_handler_status_args [handler_status_args]* |
1787 | * num_groups init_group_number |
1788 | * [A|D|E num_ps_status_args [ps_status_args]* |
1789 | * num_paths num_selector_args |
1790 | * [path_dev A|F fail_count [selector_args]* ]+ ]+ |
1791 | * |
1792 | * Table output has the following format (identical to the constructor string): |
1793 | * num_feature_args [features_args]* |
1794 | * num_handler_args hw_handler [hw_handler_args]* |
1795 | * num_groups init_group_number |
1796 | * [priority selector-name num_ps_args [ps_args]* |
1797 | * num_paths num_selector_args [path_dev [selector_args]* ]+ ]+ |
1798 | */ |
1799 | static void multipath_status(struct dm_target *ti, status_type_t type, |
1800 | unsigned int status_flags, char *result, unsigned int maxlen) |
1801 | { |
1802 | int sz = 0, pg_counter, pgpath_counter; |
1803 | unsigned long flags; |
1804 | struct multipath *m = ti->private; |
1805 | struct priority_group *pg; |
1806 | struct pgpath *p; |
1807 | unsigned int pg_num; |
1808 | char state; |
1809 | |
1810 | spin_lock_irqsave(&m->lock, flags); |
1811 | |
1812 | /* Features */ |
1813 | if (type == STATUSTYPE_INFO) |
1814 | DMEMIT("2 %u %u " , test_bit(MPATHF_QUEUE_IO, &m->flags), |
1815 | atomic_read(&m->pg_init_count)); |
1816 | else { |
1817 | DMEMIT("%u " , test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) + |
1818 | (m->pg_init_retries > 0) * 2 + |
1819 | (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 + |
1820 | test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags) + |
1821 | (m->queue_mode != DM_TYPE_REQUEST_BASED) * 2); |
1822 | |
1823 | if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) |
1824 | DMEMIT("queue_if_no_path " ); |
1825 | if (m->pg_init_retries) |
1826 | DMEMIT("pg_init_retries %u " , m->pg_init_retries); |
1827 | if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) |
1828 | DMEMIT("pg_init_delay_msecs %u " , m->pg_init_delay_msecs); |
1829 | if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) |
1830 | DMEMIT("retain_attached_hw_handler " ); |
1831 | if (m->queue_mode != DM_TYPE_REQUEST_BASED) { |
1832 | switch (m->queue_mode) { |
1833 | case DM_TYPE_BIO_BASED: |
1834 | DMEMIT("queue_mode bio " ); |
1835 | break; |
1836 | default: |
1837 | WARN_ON_ONCE(true); |
1838 | break; |
1839 | } |
1840 | } |
1841 | } |
1842 | |
1843 | if (!m->hw_handler_name || type == STATUSTYPE_INFO) |
1844 | DMEMIT("0 " ); |
1845 | else |
1846 | DMEMIT("1 %s " , m->hw_handler_name); |
1847 | |
1848 | DMEMIT("%u " , m->nr_priority_groups); |
1849 | |
1850 | if (m->next_pg) |
1851 | pg_num = m->next_pg->pg_num; |
1852 | else if (m->current_pg) |
1853 | pg_num = m->current_pg->pg_num; |
1854 | else |
1855 | pg_num = (m->nr_priority_groups ? 1 : 0); |
1856 | |
1857 | DMEMIT("%u " , pg_num); |
1858 | |
1859 | switch (type) { |
1860 | case STATUSTYPE_INFO: |
1861 | list_for_each_entry(pg, &m->priority_groups, list) { |
1862 | if (pg->bypassed) |
1863 | state = 'D'; /* Disabled */ |
1864 | else if (pg == m->current_pg) |
1865 | state = 'A'; /* Currently Active */ |
1866 | else |
1867 | state = 'E'; /* Enabled */ |
1868 | |
1869 | DMEMIT("%c " , state); |
1870 | |
1871 | if (pg->ps.type->status) |
1872 | sz += pg->ps.type->status(&pg->ps, NULL, type, |
1873 | result + sz, |
1874 | maxlen - sz); |
1875 | else |
1876 | DMEMIT("0 " ); |
1877 | |
1878 | DMEMIT("%u %u " , pg->nr_pgpaths, |
1879 | pg->ps.type->info_args); |
1880 | |
1881 | list_for_each_entry(p, &pg->pgpaths, list) { |
1882 | DMEMIT("%s %s %u " , p->path.dev->name, |
1883 | p->is_active ? "A" : "F" , |
1884 | p->fail_count); |
1885 | if (pg->ps.type->status) |
1886 | sz += pg->ps.type->status(&pg->ps, |
1887 | &p->path, type, result + sz, |
1888 | maxlen - sz); |
1889 | } |
1890 | } |
1891 | break; |
1892 | |
1893 | case STATUSTYPE_TABLE: |
1894 | list_for_each_entry(pg, &m->priority_groups, list) { |
1895 | DMEMIT("%s " , pg->ps.type->name); |
1896 | |
1897 | if (pg->ps.type->status) |
1898 | sz += pg->ps.type->status(&pg->ps, NULL, type, |
1899 | result + sz, |
1900 | maxlen - sz); |
1901 | else |
1902 | DMEMIT("0 " ); |
1903 | |
1904 | DMEMIT("%u %u " , pg->nr_pgpaths, |
1905 | pg->ps.type->table_args); |
1906 | |
1907 | list_for_each_entry(p, &pg->pgpaths, list) { |
1908 | DMEMIT("%s " , p->path.dev->name); |
1909 | if (pg->ps.type->status) |
1910 | sz += pg->ps.type->status(&pg->ps, |
1911 | &p->path, type, result + sz, |
1912 | maxlen - sz); |
1913 | } |
1914 | } |
1915 | break; |
1916 | |
1917 | case STATUSTYPE_IMA: |
1918 | sz = 0; /*reset the result pointer*/ |
1919 | |
1920 | DMEMIT_TARGET_NAME_VERSION(ti->type); |
1921 | DMEMIT(",nr_priority_groups=%u" , m->nr_priority_groups); |
1922 | |
1923 | pg_counter = 0; |
1924 | list_for_each_entry(pg, &m->priority_groups, list) { |
1925 | if (pg->bypassed) |
1926 | state = 'D'; /* Disabled */ |
1927 | else if (pg == m->current_pg) |
1928 | state = 'A'; /* Currently Active */ |
1929 | else |
1930 | state = 'E'; /* Enabled */ |
1931 | DMEMIT(",pg_state_%d=%c" , pg_counter, state); |
1932 | DMEMIT(",nr_pgpaths_%d=%u" , pg_counter, pg->nr_pgpaths); |
1933 | DMEMIT(",path_selector_name_%d=%s" , pg_counter, pg->ps.type->name); |
1934 | |
1935 | pgpath_counter = 0; |
1936 | list_for_each_entry(p, &pg->pgpaths, list) { |
1937 | DMEMIT(",path_name_%d_%d=%s,is_active_%d_%d=%c,fail_count_%d_%d=%u" , |
1938 | pg_counter, pgpath_counter, p->path.dev->name, |
1939 | pg_counter, pgpath_counter, p->is_active ? 'A' : 'F', |
1940 | pg_counter, pgpath_counter, p->fail_count); |
1941 | if (pg->ps.type->status) { |
1942 | DMEMIT(",path_selector_status_%d_%d=" , |
1943 | pg_counter, pgpath_counter); |
1944 | sz += pg->ps.type->status(&pg->ps, &p->path, |
1945 | type, result + sz, |
1946 | maxlen - sz); |
1947 | } |
1948 | pgpath_counter++; |
1949 | } |
1950 | pg_counter++; |
1951 | } |
1952 | DMEMIT(";" ); |
1953 | break; |
1954 | } |
1955 | |
1956 | spin_unlock_irqrestore(lock: &m->lock, flags); |
1957 | } |
1958 | |
1959 | static int multipath_message(struct dm_target *ti, unsigned int argc, char **argv, |
1960 | char *result, unsigned int maxlen) |
1961 | { |
1962 | int r = -EINVAL; |
1963 | struct dm_dev *dev; |
1964 | struct multipath *m = ti->private; |
1965 | action_fn action; |
1966 | unsigned long flags; |
1967 | |
1968 | mutex_lock(&m->work_mutex); |
1969 | |
1970 | if (dm_suspended(ti)) { |
1971 | r = -EBUSY; |
1972 | goto out; |
1973 | } |
1974 | |
1975 | if (argc == 1) { |
1976 | if (!strcasecmp(s1: argv[0], s2: "queue_if_no_path" )) { |
1977 | r = queue_if_no_path(m, f_queue_if_no_path: true, save_old_value: false, caller: __func__); |
1978 | spin_lock_irqsave(&m->lock, flags); |
1979 | enable_nopath_timeout(m); |
1980 | spin_unlock_irqrestore(lock: &m->lock, flags); |
1981 | goto out; |
1982 | } else if (!strcasecmp(s1: argv[0], s2: "fail_if_no_path" )) { |
1983 | r = queue_if_no_path(m, f_queue_if_no_path: false, save_old_value: false, caller: __func__); |
1984 | disable_nopath_timeout(m); |
1985 | goto out; |
1986 | } |
1987 | } |
1988 | |
1989 | if (argc != 2) { |
1990 | DMWARN("Invalid multipath message arguments. Expected 2 arguments, got %d." , argc); |
1991 | goto out; |
1992 | } |
1993 | |
1994 | if (!strcasecmp(s1: argv[0], s2: "disable_group" )) { |
1995 | r = bypass_pg_num(m, pgstr: argv[1], bypassed: true); |
1996 | goto out; |
1997 | } else if (!strcasecmp(s1: argv[0], s2: "enable_group" )) { |
1998 | r = bypass_pg_num(m, pgstr: argv[1], bypassed: false); |
1999 | goto out; |
2000 | } else if (!strcasecmp(s1: argv[0], s2: "switch_group" )) { |
2001 | r = switch_pg_num(m, pgstr: argv[1]); |
2002 | goto out; |
2003 | } else if (!strcasecmp(s1: argv[0], s2: "reinstate_path" )) |
2004 | action = reinstate_path; |
2005 | else if (!strcasecmp(s1: argv[0], s2: "fail_path" )) |
2006 | action = fail_path; |
2007 | else { |
2008 | DMWARN("Unrecognised multipath message received: %s" , argv[0]); |
2009 | goto out; |
2010 | } |
2011 | |
2012 | r = dm_get_device(ti, path: argv[1], mode: dm_table_get_mode(t: ti->table), result: &dev); |
2013 | if (r) { |
2014 | DMWARN("message: error getting device %s" , |
2015 | argv[1]); |
2016 | goto out; |
2017 | } |
2018 | |
2019 | r = action_dev(m, dev, action); |
2020 | |
2021 | dm_put_device(ti, d: dev); |
2022 | |
2023 | out: |
2024 | mutex_unlock(lock: &m->work_mutex); |
2025 | return r; |
2026 | } |
2027 | |
2028 | static int multipath_prepare_ioctl(struct dm_target *ti, |
2029 | struct block_device **bdev) |
2030 | { |
2031 | struct multipath *m = ti->private; |
2032 | struct pgpath *pgpath; |
2033 | unsigned long flags; |
2034 | int r; |
2035 | |
2036 | pgpath = READ_ONCE(m->current_pgpath); |
2037 | if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m)) |
2038 | pgpath = choose_pgpath(m, nr_bytes: 0); |
2039 | |
2040 | if (pgpath) { |
2041 | if (!mpath_double_check_test_bit(MPATHF_QUEUE_IO, m)) { |
2042 | *bdev = pgpath->path.dev->bdev; |
2043 | r = 0; |
2044 | } else { |
2045 | /* pg_init has not started or completed */ |
2046 | r = -ENOTCONN; |
2047 | } |
2048 | } else { |
2049 | /* No path is available */ |
2050 | r = -EIO; |
2051 | spin_lock_irqsave(&m->lock, flags); |
2052 | if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) |
2053 | r = -ENOTCONN; |
2054 | spin_unlock_irqrestore(lock: &m->lock, flags); |
2055 | } |
2056 | |
2057 | if (r == -ENOTCONN) { |
2058 | if (!READ_ONCE(m->current_pg)) { |
2059 | /* Path status changed, redo selection */ |
2060 | (void) choose_pgpath(m, nr_bytes: 0); |
2061 | } |
2062 | spin_lock_irqsave(&m->lock, flags); |
2063 | if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) |
2064 | (void) __pg_init_all_paths(m); |
2065 | spin_unlock_irqrestore(lock: &m->lock, flags); |
2066 | dm_table_run_md_queue_async(t: m->ti->table); |
2067 | process_queued_io_list(m); |
2068 | } |
2069 | |
2070 | /* |
2071 | * Only pass ioctls through if the device sizes match exactly. |
2072 | */ |
2073 | if (!r && ti->len != bdev_nr_sectors(bdev: (*bdev))) |
2074 | return 1; |
2075 | return r; |
2076 | } |
2077 | |
2078 | static int multipath_iterate_devices(struct dm_target *ti, |
2079 | iterate_devices_callout_fn fn, void *data) |
2080 | { |
2081 | struct multipath *m = ti->private; |
2082 | struct priority_group *pg; |
2083 | struct pgpath *p; |
2084 | int ret = 0; |
2085 | |
2086 | list_for_each_entry(pg, &m->priority_groups, list) { |
2087 | list_for_each_entry(p, &pg->pgpaths, list) { |
2088 | ret = fn(ti, p->path.dev, ti->begin, ti->len, data); |
2089 | if (ret) |
2090 | goto out; |
2091 | } |
2092 | } |
2093 | |
2094 | out: |
2095 | return ret; |
2096 | } |
2097 | |
2098 | static int pgpath_busy(struct pgpath *pgpath) |
2099 | { |
2100 | struct request_queue *q = bdev_get_queue(bdev: pgpath->path.dev->bdev); |
2101 | |
2102 | return blk_lld_busy(q); |
2103 | } |
2104 | |
2105 | /* |
2106 | * We return "busy", only when we can map I/Os but underlying devices |
2107 | * are busy (so even if we map I/Os now, the I/Os will wait on |
2108 | * the underlying queue). |
2109 | * In other words, if we want to kill I/Os or queue them inside us |
2110 | * due to map unavailability, we don't return "busy". Otherwise, |
2111 | * dm core won't give us the I/Os and we can't do what we want. |
2112 | */ |
2113 | static int multipath_busy(struct dm_target *ti) |
2114 | { |
2115 | bool busy = false, has_active = false; |
2116 | struct multipath *m = ti->private; |
2117 | struct priority_group *pg, *next_pg; |
2118 | struct pgpath *pgpath; |
2119 | |
2120 | /* pg_init in progress */ |
2121 | if (atomic_read(v: &m->pg_init_in_progress)) |
2122 | return true; |
2123 | |
2124 | /* no paths available, for blk-mq: rely on IO mapping to delay requeue */ |
2125 | if (!atomic_read(v: &m->nr_valid_paths)) { |
2126 | unsigned long flags; |
2127 | |
2128 | spin_lock_irqsave(&m->lock, flags); |
2129 | if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { |
2130 | spin_unlock_irqrestore(lock: &m->lock, flags); |
2131 | return (m->queue_mode != DM_TYPE_REQUEST_BASED); |
2132 | } |
2133 | spin_unlock_irqrestore(lock: &m->lock, flags); |
2134 | } |
2135 | |
2136 | /* Guess which priority_group will be used at next mapping time */ |
2137 | pg = READ_ONCE(m->current_pg); |
2138 | next_pg = READ_ONCE(m->next_pg); |
2139 | if (unlikely(!READ_ONCE(m->current_pgpath) && next_pg)) |
2140 | pg = next_pg; |
2141 | |
2142 | if (!pg) { |
2143 | /* |
2144 | * We don't know which pg will be used at next mapping time. |
2145 | * We don't call choose_pgpath() here to avoid to trigger |
2146 | * pg_init just by busy checking. |
2147 | * So we don't know whether underlying devices we will be using |
2148 | * at next mapping time are busy or not. Just try mapping. |
2149 | */ |
2150 | return busy; |
2151 | } |
2152 | |
2153 | /* |
2154 | * If there is one non-busy active path at least, the path selector |
2155 | * will be able to select it. So we consider such a pg as not busy. |
2156 | */ |
2157 | busy = true; |
2158 | list_for_each_entry(pgpath, &pg->pgpaths, list) { |
2159 | if (pgpath->is_active) { |
2160 | has_active = true; |
2161 | if (!pgpath_busy(pgpath)) { |
2162 | busy = false; |
2163 | break; |
2164 | } |
2165 | } |
2166 | } |
2167 | |
2168 | if (!has_active) { |
2169 | /* |
2170 | * No active path in this pg, so this pg won't be used and |
2171 | * the current_pg will be changed at next mapping time. |
2172 | * We need to try mapping to determine it. |
2173 | */ |
2174 | busy = false; |
2175 | } |
2176 | |
2177 | return busy; |
2178 | } |
2179 | |
2180 | /* |
2181 | *--------------------------------------------------------------- |
2182 | * Module setup |
2183 | *--------------------------------------------------------------- |
2184 | */ |
2185 | static struct target_type multipath_target = { |
2186 | .name = "multipath" , |
2187 | .version = {1, 14, 0}, |
2188 | .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE | |
2189 | DM_TARGET_PASSES_INTEGRITY, |
2190 | .module = THIS_MODULE, |
2191 | .ctr = multipath_ctr, |
2192 | .dtr = multipath_dtr, |
2193 | .clone_and_map_rq = multipath_clone_and_map, |
2194 | .release_clone_rq = multipath_release_clone, |
2195 | .rq_end_io = multipath_end_io, |
2196 | .map = multipath_map_bio, |
2197 | .end_io = multipath_end_io_bio, |
2198 | .presuspend = multipath_presuspend, |
2199 | .postsuspend = multipath_postsuspend, |
2200 | .resume = multipath_resume, |
2201 | .status = multipath_status, |
2202 | .message = multipath_message, |
2203 | .prepare_ioctl = multipath_prepare_ioctl, |
2204 | .iterate_devices = multipath_iterate_devices, |
2205 | .busy = multipath_busy, |
2206 | }; |
2207 | |
2208 | static int __init dm_multipath_init(void) |
2209 | { |
2210 | int r = -ENOMEM; |
2211 | |
2212 | kmultipathd = alloc_workqueue(fmt: "kmpathd" , flags: WQ_MEM_RECLAIM, max_active: 0); |
2213 | if (!kmultipathd) { |
2214 | DMERR("failed to create workqueue kmpathd" ); |
2215 | goto bad_alloc_kmultipathd; |
2216 | } |
2217 | |
2218 | /* |
2219 | * A separate workqueue is used to handle the device handlers |
2220 | * to avoid overloading existing workqueue. Overloading the |
2221 | * old workqueue would also create a bottleneck in the |
2222 | * path of the storage hardware device activation. |
2223 | */ |
2224 | kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd" , |
2225 | WQ_MEM_RECLAIM); |
2226 | if (!kmpath_handlerd) { |
2227 | DMERR("failed to create workqueue kmpath_handlerd" ); |
2228 | goto bad_alloc_kmpath_handlerd; |
2229 | } |
2230 | |
2231 | dm_mpath_wq = alloc_workqueue(fmt: "dm_mpath_wq" , flags: 0, max_active: 0); |
2232 | if (!dm_mpath_wq) { |
2233 | DMERR("failed to create workqueue dm_mpath_wq" ); |
2234 | goto bad_alloc_dm_mpath_wq; |
2235 | } |
2236 | |
2237 | r = dm_register_target(t: &multipath_target); |
2238 | if (r < 0) |
2239 | goto bad_register_target; |
2240 | |
2241 | return 0; |
2242 | |
2243 | bad_register_target: |
2244 | destroy_workqueue(wq: dm_mpath_wq); |
2245 | bad_alloc_dm_mpath_wq: |
2246 | destroy_workqueue(wq: kmpath_handlerd); |
2247 | bad_alloc_kmpath_handlerd: |
2248 | destroy_workqueue(wq: kmultipathd); |
2249 | bad_alloc_kmultipathd: |
2250 | return r; |
2251 | } |
2252 | |
2253 | static void __exit dm_multipath_exit(void) |
2254 | { |
2255 | destroy_workqueue(wq: dm_mpath_wq); |
2256 | destroy_workqueue(wq: kmpath_handlerd); |
2257 | destroy_workqueue(wq: kmultipathd); |
2258 | |
2259 | dm_unregister_target(t: &multipath_target); |
2260 | } |
2261 | |
2262 | module_init(dm_multipath_init); |
2263 | module_exit(dm_multipath_exit); |
2264 | |
2265 | module_param_named(queue_if_no_path_timeout_secs, queue_if_no_path_timeout_secs, ulong, 0644); |
2266 | MODULE_PARM_DESC(queue_if_no_path_timeout_secs, "No available paths queue IO timeout in seconds" ); |
2267 | |
2268 | MODULE_DESCRIPTION(DM_NAME " multipath target" ); |
2269 | MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>" ); |
2270 | MODULE_LICENSE("GPL" ); |
2271 | |