blob: 54ecfea2cf47b81d6a96de09297187fb3f45f3b5 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/*
2 * Copyright (C) 2003 Sistina Software Limited.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#include <linux/device-mapper.h>
9
10#include "dm-rq.h"
11#include "dm-bio-record.h"
12#include "dm-path-selector.h"
13#include "dm-uevent.h"
14
15#include <linux/blkdev.h>
16#include <linux/ctype.h>
17#include <linux/init.h>
18#include <linux/mempool.h>
19#include <linux/module.h>
20#include <linux/pagemap.h>
21#include <linux/slab.h>
22#include <linux/time.h>
23#include <linux/workqueue.h>
24#include <linux/delay.h>
25#include <scsi/scsi_dh.h>
26#include <linux/atomic.h>
27#include <linux/blk-mq.h>
28
29#define DM_MSG_PREFIX "multipath"
30#define DM_PG_INIT_DELAY_MSECS 2000
31#define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
32
33/* Path properties */
34struct pgpath {
35 struct list_head list;
36
37 struct priority_group *pg; /* Owning PG */
38 unsigned fail_count; /* Cumulative failure count */
39
40 struct dm_path path;
41 struct delayed_work activate_path;
42
43 bool is_active:1; /* Path status */
44};
45
46#define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
47
48/*
49 * Paths are grouped into Priority Groups and numbered from 1 upwards.
50 * Each has a path selector which controls which path gets used.
51 */
52struct priority_group {
53 struct list_head list;
54
55 struct multipath *m; /* Owning multipath instance */
56 struct path_selector ps;
57
58 unsigned pg_num; /* Reference number */
59 unsigned nr_pgpaths; /* Number of paths in PG */
60 struct list_head pgpaths;
61
62 bool bypassed:1; /* Temporarily bypass this PG? */
63};
64
65/* Multipath context */
66struct multipath {
67 unsigned long flags; /* Multipath state flags */
68
69 spinlock_t lock;
70 enum dm_queue_mode queue_mode;
71
72 struct pgpath *current_pgpath;
73 struct priority_group *current_pg;
74 struct priority_group *next_pg; /* Switch to this PG if set */
75
76 atomic_t nr_valid_paths; /* Total number of usable paths */
77 unsigned nr_priority_groups;
78 struct list_head priority_groups;
79
80 const char *hw_handler_name;
81 char *hw_handler_params;
82 wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */
83 unsigned pg_init_retries; /* Number of times to retry pg_init */
84 unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */
85 atomic_t pg_init_in_progress; /* Only one pg_init allowed at once */
86 atomic_t pg_init_count; /* Number of times pg_init called */
87
88 struct mutex work_mutex;
89 struct work_struct trigger_event;
90 struct dm_target *ti;
91
92 struct work_struct process_queued_bios;
93 struct bio_list queued_bios;
94};
95
96/*
97 * Context information attached to each io we process.
98 */
99struct dm_mpath_io {
100 struct pgpath *pgpath;
101 size_t nr_bytes;
102};
103
104typedef int (*action_fn) (struct pgpath *pgpath);
105
106static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
107static void trigger_event(struct work_struct *work);
108static void activate_or_offline_path(struct pgpath *pgpath);
109static void activate_path_work(struct work_struct *work);
110static void process_queued_bios(struct work_struct *work);
111
112/*-----------------------------------------------
113 * Multipath state flags.
114 *-----------------------------------------------*/
115
116#define MPATHF_QUEUE_IO 0 /* Must we queue all I/O? */
117#define MPATHF_QUEUE_IF_NO_PATH 1 /* Queue I/O if last path fails? */
118#define MPATHF_SAVED_QUEUE_IF_NO_PATH 2 /* Saved state during suspension */
119#define MPATHF_RETAIN_ATTACHED_HW_HANDLER 3 /* If there's already a hw_handler present, don't change it. */
120#define MPATHF_PG_INIT_DISABLED 4 /* pg_init is not currently allowed */
121#define MPATHF_PG_INIT_REQUIRED 5 /* pg_init needs calling? */
122#define MPATHF_PG_INIT_DELAY_RETRY 6 /* Delay pg_init retry? */
123
124/*-----------------------------------------------
125 * Allocation routines
126 *-----------------------------------------------*/
127
128static struct pgpath *alloc_pgpath(void)
129{
130 struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
131
132 if (!pgpath)
133 return NULL;
134
135 pgpath->is_active = true;
136
137 return pgpath;
138}
139
140static void free_pgpath(struct pgpath *pgpath)
141{
142 kfree(pgpath);
143}
144
145static struct priority_group *alloc_priority_group(void)
146{
147 struct priority_group *pg;
148
149 pg = kzalloc(sizeof(*pg), GFP_KERNEL);
150
151 if (pg)
152 INIT_LIST_HEAD(&pg->pgpaths);
153
154 return pg;
155}
156
157static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
158{
159 struct pgpath *pgpath, *tmp;
160
161 list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
162 list_del(&pgpath->list);
163 dm_put_device(ti, pgpath->path.dev);
164 free_pgpath(pgpath);
165 }
166}
167
168static void free_priority_group(struct priority_group *pg,
169 struct dm_target *ti)
170{
171 struct path_selector *ps = &pg->ps;
172
173 if (ps->type) {
174 ps->type->destroy(ps);
175 dm_put_path_selector(ps->type);
176 }
177
178 free_pgpaths(&pg->pgpaths, ti);
179 kfree(pg);
180}
181
182static struct multipath *alloc_multipath(struct dm_target *ti)
183{
184 struct multipath *m;
185
186 m = kzalloc(sizeof(*m), GFP_KERNEL);
187 if (m) {
188 INIT_LIST_HEAD(&m->priority_groups);
189 spin_lock_init(&m->lock);
190 atomic_set(&m->nr_valid_paths, 0);
191 INIT_WORK(&m->trigger_event, trigger_event);
192 mutex_init(&m->work_mutex);
193
194 m->queue_mode = DM_TYPE_NONE;
195
196 m->ti = ti;
197 ti->private = m;
198 }
199
200 return m;
201}
202
203static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
204{
205 if (m->queue_mode == DM_TYPE_NONE) {
David Brazdil0f672f62019-12-10 10:32:29 +0000206 m->queue_mode = DM_TYPE_REQUEST_BASED;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000207 } else if (m->queue_mode == DM_TYPE_BIO_BASED) {
208 INIT_WORK(&m->process_queued_bios, process_queued_bios);
209 /*
210 * bio-based doesn't support any direct scsi_dh management;
211 * it just discovers if a scsi_dh is attached.
212 */
213 set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
214 }
215
216 dm_table_set_type(ti->table, m->queue_mode);
217
218 /*
219 * Init fields that are only used when a scsi_dh is attached
220 * - must do this unconditionally (really doesn't hurt non-SCSI uses)
221 */
222 set_bit(MPATHF_QUEUE_IO, &m->flags);
223 atomic_set(&m->pg_init_in_progress, 0);
224 atomic_set(&m->pg_init_count, 0);
225 m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
226 init_waitqueue_head(&m->pg_init_wait);
227
228 return 0;
229}
230
231static void free_multipath(struct multipath *m)
232{
233 struct priority_group *pg, *tmp;
234
235 list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) {
236 list_del(&pg->list);
237 free_priority_group(pg, m->ti);
238 }
239
240 kfree(m->hw_handler_name);
241 kfree(m->hw_handler_params);
242 mutex_destroy(&m->work_mutex);
243 kfree(m);
244}
245
246static struct dm_mpath_io *get_mpio(union map_info *info)
247{
248 return info->ptr;
249}
250
251static size_t multipath_per_bio_data_size(void)
252{
253 return sizeof(struct dm_mpath_io) + sizeof(struct dm_bio_details);
254}
255
256static struct dm_mpath_io *get_mpio_from_bio(struct bio *bio)
257{
258 return dm_per_bio_data(bio, multipath_per_bio_data_size());
259}
260
261static struct dm_bio_details *get_bio_details_from_mpio(struct dm_mpath_io *mpio)
262{
263 /* dm_bio_details is immediately after the dm_mpath_io in bio's per-bio-data */
264 void *bio_details = mpio + 1;
265 return bio_details;
266}
267
268static void multipath_init_per_bio_data(struct bio *bio, struct dm_mpath_io **mpio_p)
269{
270 struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
271 struct dm_bio_details *bio_details = get_bio_details_from_mpio(mpio);
272
273 mpio->nr_bytes = bio->bi_iter.bi_size;
274 mpio->pgpath = NULL;
275 *mpio_p = mpio;
276
277 dm_bio_record(bio_details, bio);
278}
279
280/*-----------------------------------------------
281 * Path selection
282 *-----------------------------------------------*/
283
284static int __pg_init_all_paths(struct multipath *m)
285{
286 struct pgpath *pgpath;
287 unsigned long pg_init_delay = 0;
288
289 lockdep_assert_held(&m->lock);
290
291 if (atomic_read(&m->pg_init_in_progress) || test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
292 return 0;
293
294 atomic_inc(&m->pg_init_count);
295 clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
296
297 /* Check here to reset pg_init_required */
298 if (!m->current_pg)
299 return 0;
300
301 if (test_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags))
302 pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
303 m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
304 list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
305 /* Skip failed paths */
306 if (!pgpath->is_active)
307 continue;
308 if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path,
309 pg_init_delay))
310 atomic_inc(&m->pg_init_in_progress);
311 }
312 return atomic_read(&m->pg_init_in_progress);
313}
314
315static int pg_init_all_paths(struct multipath *m)
316{
317 int ret;
318 unsigned long flags;
319
320 spin_lock_irqsave(&m->lock, flags);
321 ret = __pg_init_all_paths(m);
322 spin_unlock_irqrestore(&m->lock, flags);
323
324 return ret;
325}
326
327static void __switch_pg(struct multipath *m, struct priority_group *pg)
328{
329 m->current_pg = pg;
330
331 /* Must we initialise the PG first, and queue I/O till it's ready? */
332 if (m->hw_handler_name) {
333 set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
334 set_bit(MPATHF_QUEUE_IO, &m->flags);
335 } else {
336 clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
337 clear_bit(MPATHF_QUEUE_IO, &m->flags);
338 }
339
340 atomic_set(&m->pg_init_count, 0);
341}
342
343static struct pgpath *choose_path_in_pg(struct multipath *m,
344 struct priority_group *pg,
345 size_t nr_bytes)
346{
347 unsigned long flags;
348 struct dm_path *path;
349 struct pgpath *pgpath;
350
351 path = pg->ps.type->select_path(&pg->ps, nr_bytes);
352 if (!path)
353 return ERR_PTR(-ENXIO);
354
355 pgpath = path_to_pgpath(path);
356
357 if (unlikely(READ_ONCE(m->current_pg) != pg)) {
358 /* Only update current_pgpath if pg changed */
359 spin_lock_irqsave(&m->lock, flags);
360 m->current_pgpath = pgpath;
361 __switch_pg(m, pg);
362 spin_unlock_irqrestore(&m->lock, flags);
363 }
364
365 return pgpath;
366}
367
368static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
369{
370 unsigned long flags;
371 struct priority_group *pg;
372 struct pgpath *pgpath;
373 unsigned bypassed = 1;
374
375 if (!atomic_read(&m->nr_valid_paths)) {
376 clear_bit(MPATHF_QUEUE_IO, &m->flags);
377 goto failed;
378 }
379
380 /* Were we instructed to switch PG? */
381 if (READ_ONCE(m->next_pg)) {
382 spin_lock_irqsave(&m->lock, flags);
383 pg = m->next_pg;
384 if (!pg) {
385 spin_unlock_irqrestore(&m->lock, flags);
386 goto check_current_pg;
387 }
388 m->next_pg = NULL;
389 spin_unlock_irqrestore(&m->lock, flags);
390 pgpath = choose_path_in_pg(m, pg, nr_bytes);
391 if (!IS_ERR_OR_NULL(pgpath))
392 return pgpath;
393 }
394
395 /* Don't change PG until it has no remaining paths */
396check_current_pg:
397 pg = READ_ONCE(m->current_pg);
398 if (pg) {
399 pgpath = choose_path_in_pg(m, pg, nr_bytes);
400 if (!IS_ERR_OR_NULL(pgpath))
401 return pgpath;
402 }
403
404 /*
405 * Loop through priority groups until we find a valid path.
406 * First time we skip PGs marked 'bypassed'.
407 * Second time we only try the ones we skipped, but set
408 * pg_init_delay_retry so we do not hammer controllers.
409 */
410 do {
411 list_for_each_entry(pg, &m->priority_groups, list) {
412 if (pg->bypassed == !!bypassed)
413 continue;
414 pgpath = choose_path_in_pg(m, pg, nr_bytes);
415 if (!IS_ERR_OR_NULL(pgpath)) {
416 if (!bypassed)
417 set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
418 return pgpath;
419 }
420 }
421 } while (bypassed--);
422
423failed:
424 spin_lock_irqsave(&m->lock, flags);
425 m->current_pgpath = NULL;
426 m->current_pg = NULL;
427 spin_unlock_irqrestore(&m->lock, flags);
428
429 return NULL;
430}
431
432/*
433 * dm_report_EIO() is a macro instead of a function to make pr_debug()
434 * report the function name and line number of the function from which
435 * it has been invoked.
436 */
437#define dm_report_EIO(m) \
438do { \
439 struct mapped_device *md = dm_table_get_md((m)->ti->table); \
440 \
441 pr_debug("%s: returning EIO; QIFNP = %d; SQIFNP = %d; DNFS = %d\n", \
442 dm_device_name(md), \
443 test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags), \
444 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags), \
445 dm_noflush_suspending((m)->ti)); \
446} while (0)
447
448/*
449 * Check whether bios must be queued in the device-mapper core rather
450 * than here in the target.
451 *
452 * If MPATHF_QUEUE_IF_NO_PATH and MPATHF_SAVED_QUEUE_IF_NO_PATH hold
453 * the same value then we are not between multipath_presuspend()
454 * and multipath_resume() calls and we have no need to check
455 * for the DMF_NOFLUSH_SUSPENDING flag.
456 */
457static bool __must_push_back(struct multipath *m, unsigned long flags)
458{
459 return ((test_bit(MPATHF_QUEUE_IF_NO_PATH, &flags) !=
460 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &flags)) &&
461 dm_noflush_suspending(m->ti));
462}
463
464/*
465 * Following functions use READ_ONCE to get atomic access to
466 * all m->flags to avoid taking spinlock
467 */
468static bool must_push_back_rq(struct multipath *m)
469{
470 unsigned long flags = READ_ONCE(m->flags);
471 return test_bit(MPATHF_QUEUE_IF_NO_PATH, &flags) || __must_push_back(m, flags);
472}
473
474static bool must_push_back_bio(struct multipath *m)
475{
476 unsigned long flags = READ_ONCE(m->flags);
477 return __must_push_back(m, flags);
478}
479
480/*
481 * Map cloned requests (request-based multipath)
482 */
483static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
484 union map_info *map_context,
485 struct request **__clone)
486{
487 struct multipath *m = ti->private;
488 size_t nr_bytes = blk_rq_bytes(rq);
489 struct pgpath *pgpath;
490 struct block_device *bdev;
491 struct dm_mpath_io *mpio = get_mpio(map_context);
492 struct request_queue *q;
493 struct request *clone;
494
495 /* Do we need to select a new pgpath? */
496 pgpath = READ_ONCE(m->current_pgpath);
497 if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
498 pgpath = choose_pgpath(m, nr_bytes);
499
500 if (!pgpath) {
501 if (must_push_back_rq(m))
502 return DM_MAPIO_DELAY_REQUEUE;
503 dm_report_EIO(m); /* Failed */
504 return DM_MAPIO_KILL;
505 } else if (test_bit(MPATHF_QUEUE_IO, &m->flags) ||
506 test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
507 pg_init_all_paths(m);
508 return DM_MAPIO_DELAY_REQUEUE;
509 }
510
511 mpio->pgpath = pgpath;
512 mpio->nr_bytes = nr_bytes;
513
514 bdev = pgpath->path.dev->bdev;
515 q = bdev_get_queue(bdev);
516 clone = blk_get_request(q, rq->cmd_flags | REQ_NOMERGE,
517 BLK_MQ_REQ_NOWAIT);
518 if (IS_ERR(clone)) {
519 /* EBUSY, ENODEV or EWOULDBLOCK: requeue */
520 if (blk_queue_dying(q)) {
521 atomic_inc(&m->pg_init_in_progress);
522 activate_or_offline_path(pgpath);
523 return DM_MAPIO_DELAY_REQUEUE;
524 }
525
526 /*
527 * blk-mq's SCHED_RESTART can cover this requeue, so we
528 * needn't deal with it by DELAY_REQUEUE. More importantly,
529 * we have to return DM_MAPIO_REQUEUE so that blk-mq can
530 * get the queue busy feedback (via BLK_STS_RESOURCE),
531 * otherwise I/O merging can suffer.
532 */
David Brazdil0f672f62019-12-10 10:32:29 +0000533 return DM_MAPIO_REQUEUE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000534 }
535 clone->bio = clone->biotail = NULL;
536 clone->rq_disk = bdev->bd_disk;
537 clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
538 *__clone = clone;
539
540 if (pgpath->pg->ps.type->start_io)
541 pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
542 &pgpath->path,
543 nr_bytes);
544 return DM_MAPIO_REMAPPED;
545}
546
David Brazdil0f672f62019-12-10 10:32:29 +0000547static void multipath_release_clone(struct request *clone,
548 union map_info *map_context)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000549{
David Brazdil0f672f62019-12-10 10:32:29 +0000550 if (unlikely(map_context)) {
551 /*
552 * non-NULL map_context means caller is still map
553 * method; must undo multipath_clone_and_map()
554 */
555 struct dm_mpath_io *mpio = get_mpio(map_context);
556 struct pgpath *pgpath = mpio->pgpath;
557
558 if (pgpath && pgpath->pg->ps.type->end_io)
559 pgpath->pg->ps.type->end_io(&pgpath->pg->ps,
560 &pgpath->path,
561 mpio->nr_bytes);
562 }
563
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000564 blk_put_request(clone);
565}
566
567/*
568 * Map cloned bios (bio-based multipath)
569 */
570
571static struct pgpath *__map_bio(struct multipath *m, struct bio *bio)
572{
573 struct pgpath *pgpath;
574 unsigned long flags;
575 bool queue_io;
576
577 /* Do we need to select a new pgpath? */
578 pgpath = READ_ONCE(m->current_pgpath);
Olivier Deprez0e641232021-09-23 10:07:05 +0200579 if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000580 pgpath = choose_pgpath(m, bio->bi_iter.bi_size);
581
Olivier Deprez0e641232021-09-23 10:07:05 +0200582 /* MPATHF_QUEUE_IO might have been cleared by choose_pgpath. */
583 queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags);
584
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000585 if ((pgpath && queue_io) ||
586 (!pgpath && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))) {
587 /* Queue for the daemon to resubmit */
588 spin_lock_irqsave(&m->lock, flags);
589 bio_list_add(&m->queued_bios, bio);
590 spin_unlock_irqrestore(&m->lock, flags);
591
592 /* PG_INIT_REQUIRED cannot be set without QUEUE_IO */
593 if (queue_io || test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
594 pg_init_all_paths(m);
595 else if (!queue_io)
596 queue_work(kmultipathd, &m->process_queued_bios);
597
598 return ERR_PTR(-EAGAIN);
599 }
600
601 return pgpath;
602}
603
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000604static int __multipath_map_bio(struct multipath *m, struct bio *bio,
605 struct dm_mpath_io *mpio)
606{
Olivier Deprez0e641232021-09-23 10:07:05 +0200607 struct pgpath *pgpath = __map_bio(m, bio);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000608
609 if (IS_ERR(pgpath))
610 return DM_MAPIO_SUBMITTED;
611
612 if (!pgpath) {
613 if (must_push_back_bio(m))
614 return DM_MAPIO_REQUEUE;
615 dm_report_EIO(m);
616 return DM_MAPIO_KILL;
617 }
618
619 mpio->pgpath = pgpath;
620
621 bio->bi_status = 0;
622 bio_set_dev(bio, pgpath->path.dev->bdev);
623 bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
624
625 if (pgpath->pg->ps.type->start_io)
626 pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
627 &pgpath->path,
628 mpio->nr_bytes);
629 return DM_MAPIO_REMAPPED;
630}
631
632static int multipath_map_bio(struct dm_target *ti, struct bio *bio)
633{
634 struct multipath *m = ti->private;
635 struct dm_mpath_io *mpio = NULL;
636
637 multipath_init_per_bio_data(bio, &mpio);
638 return __multipath_map_bio(m, bio, mpio);
639}
640
641static void process_queued_io_list(struct multipath *m)
642{
David Brazdil0f672f62019-12-10 10:32:29 +0000643 if (m->queue_mode == DM_TYPE_REQUEST_BASED)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000644 dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table));
645 else if (m->queue_mode == DM_TYPE_BIO_BASED)
646 queue_work(kmultipathd, &m->process_queued_bios);
647}
648
649static void process_queued_bios(struct work_struct *work)
650{
651 int r;
652 unsigned long flags;
653 struct bio *bio;
654 struct bio_list bios;
655 struct blk_plug plug;
656 struct multipath *m =
657 container_of(work, struct multipath, process_queued_bios);
658
659 bio_list_init(&bios);
660
661 spin_lock_irqsave(&m->lock, flags);
662
663 if (bio_list_empty(&m->queued_bios)) {
664 spin_unlock_irqrestore(&m->lock, flags);
665 return;
666 }
667
668 bio_list_merge(&bios, &m->queued_bios);
669 bio_list_init(&m->queued_bios);
670
671 spin_unlock_irqrestore(&m->lock, flags);
672
673 blk_start_plug(&plug);
674 while ((bio = bio_list_pop(&bios))) {
675 struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
676 dm_bio_restore(get_bio_details_from_mpio(mpio), bio);
677 r = __multipath_map_bio(m, bio, mpio);
678 switch (r) {
679 case DM_MAPIO_KILL:
680 bio->bi_status = BLK_STS_IOERR;
681 bio_endio(bio);
682 break;
683 case DM_MAPIO_REQUEUE:
684 bio->bi_status = BLK_STS_DM_REQUEUE;
685 bio_endio(bio);
686 break;
687 case DM_MAPIO_REMAPPED:
688 generic_make_request(bio);
689 break;
690 case DM_MAPIO_SUBMITTED:
691 break;
692 default:
693 WARN_ONCE(true, "__multipath_map_bio() returned %d\n", r);
694 }
695 }
696 blk_finish_plug(&plug);
697}
698
699/*
700 * If we run out of usable paths, should we queue I/O or error it?
701 */
702static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
703 bool save_old_value)
704{
705 unsigned long flags;
706
707 spin_lock_irqsave(&m->lock, flags);
708 assign_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags,
709 (save_old_value && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) ||
710 (!save_old_value && queue_if_no_path));
711 assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags, queue_if_no_path);
712 spin_unlock_irqrestore(&m->lock, flags);
713
714 if (!queue_if_no_path) {
715 dm_table_run_md_queue_async(m->ti->table);
716 process_queued_io_list(m);
717 }
718
719 return 0;
720}
721
722/*
723 * An event is triggered whenever a path is taken out of use.
724 * Includes path failure and PG bypass.
725 */
726static void trigger_event(struct work_struct *work)
727{
728 struct multipath *m =
729 container_of(work, struct multipath, trigger_event);
730
731 dm_table_event(m->ti->table);
732}
733
734/*-----------------------------------------------------------------
735 * Constructor/argument parsing:
736 * <#multipath feature args> [<arg>]*
737 * <#hw_handler args> [hw_handler [<arg>]*]
738 * <#priority groups>
739 * <initial priority group>
740 * [<selector> <#selector args> [<arg>]*
741 * <#paths> <#per-path selector args>
742 * [<path> [<arg>]* ]+ ]+
743 *---------------------------------------------------------------*/
744static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
745 struct dm_target *ti)
746{
747 int r;
748 struct path_selector_type *pst;
749 unsigned ps_argc;
750
751 static const struct dm_arg _args[] = {
752 {0, 1024, "invalid number of path selector args"},
753 };
754
755 pst = dm_get_path_selector(dm_shift_arg(as));
756 if (!pst) {
757 ti->error = "unknown path selector type";
758 return -EINVAL;
759 }
760
761 r = dm_read_arg_group(_args, as, &ps_argc, &ti->error);
762 if (r) {
763 dm_put_path_selector(pst);
764 return -EINVAL;
765 }
766
767 r = pst->create(&pg->ps, ps_argc, as->argv);
768 if (r) {
769 dm_put_path_selector(pst);
770 ti->error = "path selector constructor failed";
771 return r;
772 }
773
774 pg->ps.type = pst;
775 dm_consume_args(as, ps_argc);
776
777 return 0;
778}
779
780static int setup_scsi_dh(struct block_device *bdev, struct multipath *m,
781 const char **attached_handler_name, char **error)
782{
783 struct request_queue *q = bdev_get_queue(bdev);
784 int r;
785
786 if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) {
787retain:
788 if (*attached_handler_name) {
789 /*
790 * Clear any hw_handler_params associated with a
791 * handler that isn't already attached.
792 */
793 if (m->hw_handler_name && strcmp(*attached_handler_name, m->hw_handler_name)) {
794 kfree(m->hw_handler_params);
795 m->hw_handler_params = NULL;
796 }
797
798 /*
799 * Reset hw_handler_name to match the attached handler
800 *
801 * NB. This modifies the table line to show the actual
802 * handler instead of the original table passed in.
803 */
804 kfree(m->hw_handler_name);
805 m->hw_handler_name = *attached_handler_name;
806 *attached_handler_name = NULL;
807 }
808 }
809
810 if (m->hw_handler_name) {
811 r = scsi_dh_attach(q, m->hw_handler_name);
812 if (r == -EBUSY) {
813 char b[BDEVNAME_SIZE];
814
815 printk(KERN_INFO "dm-mpath: retaining handler on device %s\n",
816 bdevname(bdev, b));
817 goto retain;
818 }
819 if (r < 0) {
820 *error = "error attaching hardware handler";
821 return r;
822 }
823
824 if (m->hw_handler_params) {
825 r = scsi_dh_set_params(q, m->hw_handler_params);
826 if (r < 0) {
827 *error = "unable to set hardware handler parameters";
828 return r;
829 }
830 }
831 }
832
833 return 0;
834}
835
836static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
837 struct dm_target *ti)
838{
839 int r;
840 struct pgpath *p;
841 struct multipath *m = ti->private;
842 struct request_queue *q;
843 const char *attached_handler_name = NULL;
844
845 /* we need at least a path arg */
846 if (as->argc < 1) {
847 ti->error = "no device given";
848 return ERR_PTR(-EINVAL);
849 }
850
851 p = alloc_pgpath();
852 if (!p)
853 return ERR_PTR(-ENOMEM);
854
855 r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
856 &p->path.dev);
857 if (r) {
858 ti->error = "error getting device";
859 goto bad;
860 }
861
862 q = bdev_get_queue(p->path.dev->bdev);
863 attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
864 if (attached_handler_name || m->hw_handler_name) {
865 INIT_DELAYED_WORK(&p->activate_path, activate_path_work);
866 r = setup_scsi_dh(p->path.dev->bdev, m, &attached_handler_name, &ti->error);
David Brazdil0f672f62019-12-10 10:32:29 +0000867 kfree(attached_handler_name);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000868 if (r) {
869 dm_put_device(ti, p->path.dev);
870 goto bad;
871 }
872 }
873
874 r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
875 if (r) {
876 dm_put_device(ti, p->path.dev);
877 goto bad;
878 }
879
880 return p;
881 bad:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000882 free_pgpath(p);
883 return ERR_PTR(r);
884}
885
886static struct priority_group *parse_priority_group(struct dm_arg_set *as,
887 struct multipath *m)
888{
889 static const struct dm_arg _args[] = {
890 {1, 1024, "invalid number of paths"},
891 {0, 1024, "invalid number of selector args"}
892 };
893
894 int r;
895 unsigned i, nr_selector_args, nr_args;
896 struct priority_group *pg;
897 struct dm_target *ti = m->ti;
898
899 if (as->argc < 2) {
900 as->argc = 0;
901 ti->error = "not enough priority group arguments";
902 return ERR_PTR(-EINVAL);
903 }
904
905 pg = alloc_priority_group();
906 if (!pg) {
907 ti->error = "couldn't allocate priority group";
908 return ERR_PTR(-ENOMEM);
909 }
910 pg->m = m;
911
912 r = parse_path_selector(as, pg, ti);
913 if (r)
914 goto bad;
915
916 /*
917 * read the paths
918 */
919 r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error);
920 if (r)
921 goto bad;
922
923 r = dm_read_arg(_args + 1, as, &nr_selector_args, &ti->error);
924 if (r)
925 goto bad;
926
927 nr_args = 1 + nr_selector_args;
928 for (i = 0; i < pg->nr_pgpaths; i++) {
929 struct pgpath *pgpath;
930 struct dm_arg_set path_args;
931
932 if (as->argc < nr_args) {
933 ti->error = "not enough path parameters";
934 r = -EINVAL;
935 goto bad;
936 }
937
938 path_args.argc = nr_args;
939 path_args.argv = as->argv;
940
941 pgpath = parse_path(&path_args, &pg->ps, ti);
942 if (IS_ERR(pgpath)) {
943 r = PTR_ERR(pgpath);
944 goto bad;
945 }
946
947 pgpath->pg = pg;
948 list_add_tail(&pgpath->list, &pg->pgpaths);
949 dm_consume_args(as, nr_args);
950 }
951
952 return pg;
953
954 bad:
955 free_priority_group(pg, ti);
956 return ERR_PTR(r);
957}
958
959static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
960{
961 unsigned hw_argc;
962 int ret;
963 struct dm_target *ti = m->ti;
964
965 static const struct dm_arg _args[] = {
966 {0, 1024, "invalid number of hardware handler args"},
967 };
968
969 if (dm_read_arg_group(_args, as, &hw_argc, &ti->error))
970 return -EINVAL;
971
972 if (!hw_argc)
973 return 0;
974
975 if (m->queue_mode == DM_TYPE_BIO_BASED) {
976 dm_consume_args(as, hw_argc);
977 DMERR("bio-based multipath doesn't allow hardware handler args");
978 return 0;
979 }
980
981 m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
982 if (!m->hw_handler_name)
983 return -EINVAL;
984
985 if (hw_argc > 1) {
986 char *p;
987 int i, j, len = 4;
988
989 for (i = 0; i <= hw_argc - 2; i++)
990 len += strlen(as->argv[i]) + 1;
991 p = m->hw_handler_params = kzalloc(len, GFP_KERNEL);
992 if (!p) {
993 ti->error = "memory allocation failed";
994 ret = -ENOMEM;
995 goto fail;
996 }
997 j = sprintf(p, "%d", hw_argc - 1);
998 for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1)
999 j = sprintf(p, "%s", as->argv[i]);
1000 }
1001 dm_consume_args(as, hw_argc - 1);
1002
1003 return 0;
1004fail:
1005 kfree(m->hw_handler_name);
1006 m->hw_handler_name = NULL;
1007 return ret;
1008}
1009
1010static int parse_features(struct dm_arg_set *as, struct multipath *m)
1011{
1012 int r;
1013 unsigned argc;
1014 struct dm_target *ti = m->ti;
1015 const char *arg_name;
1016
1017 static const struct dm_arg _args[] = {
1018 {0, 8, "invalid number of feature args"},
1019 {1, 50, "pg_init_retries must be between 1 and 50"},
1020 {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
1021 };
1022
1023 r = dm_read_arg_group(_args, as, &argc, &ti->error);
1024 if (r)
1025 return -EINVAL;
1026
1027 if (!argc)
1028 return 0;
1029
1030 do {
1031 arg_name = dm_shift_arg(as);
1032 argc--;
1033
1034 if (!strcasecmp(arg_name, "queue_if_no_path")) {
1035 r = queue_if_no_path(m, true, false);
1036 continue;
1037 }
1038
1039 if (!strcasecmp(arg_name, "retain_attached_hw_handler")) {
1040 set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
1041 continue;
1042 }
1043
1044 if (!strcasecmp(arg_name, "pg_init_retries") &&
1045 (argc >= 1)) {
1046 r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error);
1047 argc--;
1048 continue;
1049 }
1050
1051 if (!strcasecmp(arg_name, "pg_init_delay_msecs") &&
1052 (argc >= 1)) {
1053 r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error);
1054 argc--;
1055 continue;
1056 }
1057
1058 if (!strcasecmp(arg_name, "queue_mode") &&
1059 (argc >= 1)) {
1060 const char *queue_mode_name = dm_shift_arg(as);
1061
1062 if (!strcasecmp(queue_mode_name, "bio"))
1063 m->queue_mode = DM_TYPE_BIO_BASED;
David Brazdil0f672f62019-12-10 10:32:29 +00001064 else if (!strcasecmp(queue_mode_name, "rq") ||
1065 !strcasecmp(queue_mode_name, "mq"))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001066 m->queue_mode = DM_TYPE_REQUEST_BASED;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001067 else {
1068 ti->error = "Unknown 'queue_mode' requested";
1069 r = -EINVAL;
1070 }
1071 argc--;
1072 continue;
1073 }
1074
1075 ti->error = "Unrecognised multipath feature request";
1076 r = -EINVAL;
1077 } while (argc && !r);
1078
1079 return r;
1080}
1081
1082static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
1083{
1084 /* target arguments */
1085 static const struct dm_arg _args[] = {
1086 {0, 1024, "invalid number of priority groups"},
1087 {0, 1024, "invalid initial priority group number"},
1088 };
1089
1090 int r;
1091 struct multipath *m;
1092 struct dm_arg_set as;
1093 unsigned pg_count = 0;
1094 unsigned next_pg_num;
1095
1096 as.argc = argc;
1097 as.argv = argv;
1098
1099 m = alloc_multipath(ti);
1100 if (!m) {
1101 ti->error = "can't allocate multipath";
1102 return -EINVAL;
1103 }
1104
1105 r = parse_features(&as, m);
1106 if (r)
1107 goto bad;
1108
1109 r = alloc_multipath_stage2(ti, m);
1110 if (r)
1111 goto bad;
1112
1113 r = parse_hw_handler(&as, m);
1114 if (r)
1115 goto bad;
1116
1117 r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error);
1118 if (r)
1119 goto bad;
1120
1121 r = dm_read_arg(_args + 1, &as, &next_pg_num, &ti->error);
1122 if (r)
1123 goto bad;
1124
1125 if ((!m->nr_priority_groups && next_pg_num) ||
1126 (m->nr_priority_groups && !next_pg_num)) {
1127 ti->error = "invalid initial priority group";
1128 r = -EINVAL;
1129 goto bad;
1130 }
1131
1132 /* parse the priority groups */
1133 while (as.argc) {
1134 struct priority_group *pg;
1135 unsigned nr_valid_paths = atomic_read(&m->nr_valid_paths);
1136
1137 pg = parse_priority_group(&as, m);
1138 if (IS_ERR(pg)) {
1139 r = PTR_ERR(pg);
1140 goto bad;
1141 }
1142
1143 nr_valid_paths += pg->nr_pgpaths;
1144 atomic_set(&m->nr_valid_paths, nr_valid_paths);
1145
1146 list_add_tail(&pg->list, &m->priority_groups);
1147 pg_count++;
1148 pg->pg_num = pg_count;
1149 if (!--next_pg_num)
1150 m->next_pg = pg;
1151 }
1152
1153 if (pg_count != m->nr_priority_groups) {
1154 ti->error = "priority group count mismatch";
1155 r = -EINVAL;
1156 goto bad;
1157 }
1158
1159 ti->num_flush_bios = 1;
1160 ti->num_discard_bios = 1;
1161 ti->num_write_same_bios = 1;
1162 ti->num_write_zeroes_bios = 1;
1163 if (m->queue_mode == DM_TYPE_BIO_BASED)
1164 ti->per_io_data_size = multipath_per_bio_data_size();
1165 else
1166 ti->per_io_data_size = sizeof(struct dm_mpath_io);
1167
1168 return 0;
1169
1170 bad:
1171 free_multipath(m);
1172 return r;
1173}
1174
1175static void multipath_wait_for_pg_init_completion(struct multipath *m)
1176{
1177 DEFINE_WAIT(wait);
1178
1179 while (1) {
1180 prepare_to_wait(&m->pg_init_wait, &wait, TASK_UNINTERRUPTIBLE);
1181
1182 if (!atomic_read(&m->pg_init_in_progress))
1183 break;
1184
1185 io_schedule();
1186 }
1187 finish_wait(&m->pg_init_wait, &wait);
1188}
1189
1190static void flush_multipath_work(struct multipath *m)
1191{
1192 if (m->hw_handler_name) {
Olivier Deprez0e641232021-09-23 10:07:05 +02001193 unsigned long flags;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001194
Olivier Deprez0e641232021-09-23 10:07:05 +02001195 if (!atomic_read(&m->pg_init_in_progress))
1196 goto skip;
1197
1198 spin_lock_irqsave(&m->lock, flags);
1199 if (atomic_read(&m->pg_init_in_progress) &&
1200 !test_and_set_bit(MPATHF_PG_INIT_DISABLED, &m->flags)) {
1201 spin_unlock_irqrestore(&m->lock, flags);
1202
David Brazdil0f672f62019-12-10 10:32:29 +00001203 flush_workqueue(kmpath_handlerd);
Olivier Deprez0e641232021-09-23 10:07:05 +02001204 multipath_wait_for_pg_init_completion(m);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001205
Olivier Deprez0e641232021-09-23 10:07:05 +02001206 spin_lock_irqsave(&m->lock, flags);
1207 clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
1208 }
1209 spin_unlock_irqrestore(&m->lock, flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001210 }
Olivier Deprez0e641232021-09-23 10:07:05 +02001211skip:
David Brazdil0f672f62019-12-10 10:32:29 +00001212 if (m->queue_mode == DM_TYPE_BIO_BASED)
1213 flush_work(&m->process_queued_bios);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001214 flush_work(&m->trigger_event);
1215}
1216
1217static void multipath_dtr(struct dm_target *ti)
1218{
1219 struct multipath *m = ti->private;
1220
1221 flush_multipath_work(m);
1222 free_multipath(m);
1223}
1224
1225/*
1226 * Take a path out of use.
1227 */
1228static int fail_path(struct pgpath *pgpath)
1229{
1230 unsigned long flags;
1231 struct multipath *m = pgpath->pg->m;
1232
1233 spin_lock_irqsave(&m->lock, flags);
1234
1235 if (!pgpath->is_active)
1236 goto out;
1237
1238 DMWARN("Failing path %s.", pgpath->path.dev->name);
1239
1240 pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
1241 pgpath->is_active = false;
1242 pgpath->fail_count++;
1243
1244 atomic_dec(&m->nr_valid_paths);
1245
1246 if (pgpath == m->current_pgpath)
1247 m->current_pgpath = NULL;
1248
1249 dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
1250 pgpath->path.dev->name, atomic_read(&m->nr_valid_paths));
1251
1252 schedule_work(&m->trigger_event);
1253
1254out:
1255 spin_unlock_irqrestore(&m->lock, flags);
1256
1257 return 0;
1258}
1259
1260/*
1261 * Reinstate a previously-failed path
1262 */
1263static int reinstate_path(struct pgpath *pgpath)
1264{
1265 int r = 0, run_queue = 0;
1266 unsigned long flags;
1267 struct multipath *m = pgpath->pg->m;
1268 unsigned nr_valid_paths;
1269
1270 spin_lock_irqsave(&m->lock, flags);
1271
1272 if (pgpath->is_active)
1273 goto out;
1274
1275 DMWARN("Reinstating path %s.", pgpath->path.dev->name);
1276
1277 r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
1278 if (r)
1279 goto out;
1280
1281 pgpath->is_active = true;
1282
1283 nr_valid_paths = atomic_inc_return(&m->nr_valid_paths);
1284 if (nr_valid_paths == 1) {
1285 m->current_pgpath = NULL;
1286 run_queue = 1;
1287 } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
1288 if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
1289 atomic_inc(&m->pg_init_in_progress);
1290 }
1291
1292 dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
1293 pgpath->path.dev->name, nr_valid_paths);
1294
1295 schedule_work(&m->trigger_event);
1296
1297out:
1298 spin_unlock_irqrestore(&m->lock, flags);
1299 if (run_queue) {
1300 dm_table_run_md_queue_async(m->ti->table);
1301 process_queued_io_list(m);
1302 }
1303
1304 return r;
1305}
1306
1307/*
1308 * Fail or reinstate all paths that match the provided struct dm_dev.
1309 */
1310static int action_dev(struct multipath *m, struct dm_dev *dev,
1311 action_fn action)
1312{
1313 int r = -EINVAL;
1314 struct pgpath *pgpath;
1315 struct priority_group *pg;
1316
1317 list_for_each_entry(pg, &m->priority_groups, list) {
1318 list_for_each_entry(pgpath, &pg->pgpaths, list) {
1319 if (pgpath->path.dev == dev)
1320 r = action(pgpath);
1321 }
1322 }
1323
1324 return r;
1325}
1326
1327/*
1328 * Temporarily try to avoid having to use the specified PG
1329 */
1330static void bypass_pg(struct multipath *m, struct priority_group *pg,
1331 bool bypassed)
1332{
1333 unsigned long flags;
1334
1335 spin_lock_irqsave(&m->lock, flags);
1336
1337 pg->bypassed = bypassed;
1338 m->current_pgpath = NULL;
1339 m->current_pg = NULL;
1340
1341 spin_unlock_irqrestore(&m->lock, flags);
1342
1343 schedule_work(&m->trigger_event);
1344}
1345
1346/*
1347 * Switch to using the specified PG from the next I/O that gets mapped
1348 */
1349static int switch_pg_num(struct multipath *m, const char *pgstr)
1350{
1351 struct priority_group *pg;
1352 unsigned pgnum;
1353 unsigned long flags;
1354 char dummy;
1355
1356 if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
1357 !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
1358 DMWARN("invalid PG number supplied to switch_pg_num");
1359 return -EINVAL;
1360 }
1361
1362 spin_lock_irqsave(&m->lock, flags);
1363 list_for_each_entry(pg, &m->priority_groups, list) {
1364 pg->bypassed = false;
1365 if (--pgnum)
1366 continue;
1367
1368 m->current_pgpath = NULL;
1369 m->current_pg = NULL;
1370 m->next_pg = pg;
1371 }
1372 spin_unlock_irqrestore(&m->lock, flags);
1373
1374 schedule_work(&m->trigger_event);
1375 return 0;
1376}
1377
1378/*
1379 * Set/clear bypassed status of a PG.
1380 * PGs are numbered upwards from 1 in the order they were declared.
1381 */
1382static int bypass_pg_num(struct multipath *m, const char *pgstr, bool bypassed)
1383{
1384 struct priority_group *pg;
1385 unsigned pgnum;
1386 char dummy;
1387
1388 if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
1389 !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
1390 DMWARN("invalid PG number supplied to bypass_pg");
1391 return -EINVAL;
1392 }
1393
1394 list_for_each_entry(pg, &m->priority_groups, list) {
1395 if (!--pgnum)
1396 break;
1397 }
1398
1399 bypass_pg(m, pg, bypassed);
1400 return 0;
1401}
1402
1403/*
1404 * Should we retry pg_init immediately?
1405 */
1406static bool pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
1407{
1408 unsigned long flags;
1409 bool limit_reached = false;
1410
1411 spin_lock_irqsave(&m->lock, flags);
1412
1413 if (atomic_read(&m->pg_init_count) <= m->pg_init_retries &&
1414 !test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
1415 set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
1416 else
1417 limit_reached = true;
1418
1419 spin_unlock_irqrestore(&m->lock, flags);
1420
1421 return limit_reached;
1422}
1423
1424static void pg_init_done(void *data, int errors)
1425{
1426 struct pgpath *pgpath = data;
1427 struct priority_group *pg = pgpath->pg;
1428 struct multipath *m = pg->m;
1429 unsigned long flags;
1430 bool delay_retry = false;
1431
1432 /* device or driver problems */
1433 switch (errors) {
1434 case SCSI_DH_OK:
1435 break;
1436 case SCSI_DH_NOSYS:
1437 if (!m->hw_handler_name) {
1438 errors = 0;
1439 break;
1440 }
1441 DMERR("Could not failover the device: Handler scsi_dh_%s "
1442 "Error %d.", m->hw_handler_name, errors);
1443 /*
1444 * Fail path for now, so we do not ping pong
1445 */
1446 fail_path(pgpath);
1447 break;
1448 case SCSI_DH_DEV_TEMP_BUSY:
1449 /*
1450 * Probably doing something like FW upgrade on the
1451 * controller so try the other pg.
1452 */
1453 bypass_pg(m, pg, true);
1454 break;
1455 case SCSI_DH_RETRY:
1456 /* Wait before retrying. */
1457 delay_retry = 1;
1458 /* fall through */
1459 case SCSI_DH_IMM_RETRY:
1460 case SCSI_DH_RES_TEMP_UNAVAIL:
1461 if (pg_init_limit_reached(m, pgpath))
1462 fail_path(pgpath);
1463 errors = 0;
1464 break;
1465 case SCSI_DH_DEV_OFFLINED:
1466 default:
1467 /*
1468 * We probably do not want to fail the path for a device
1469 * error, but this is what the old dm did. In future
1470 * patches we can do more advanced handling.
1471 */
1472 fail_path(pgpath);
1473 }
1474
1475 spin_lock_irqsave(&m->lock, flags);
1476 if (errors) {
1477 if (pgpath == m->current_pgpath) {
1478 DMERR("Could not failover device. Error %d.", errors);
1479 m->current_pgpath = NULL;
1480 m->current_pg = NULL;
1481 }
1482 } else if (!test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
1483 pg->bypassed = false;
1484
1485 if (atomic_dec_return(&m->pg_init_in_progress) > 0)
1486 /* Activations of other paths are still on going */
1487 goto out;
1488
1489 if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
1490 if (delay_retry)
1491 set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
1492 else
1493 clear_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
1494
1495 if (__pg_init_all_paths(m))
1496 goto out;
1497 }
1498 clear_bit(MPATHF_QUEUE_IO, &m->flags);
1499
1500 process_queued_io_list(m);
1501
1502 /*
1503 * Wake up any thread waiting to suspend.
1504 */
1505 wake_up(&m->pg_init_wait);
1506
1507out:
1508 spin_unlock_irqrestore(&m->lock, flags);
1509}
1510
1511static void activate_or_offline_path(struct pgpath *pgpath)
1512{
1513 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
1514
1515 if (pgpath->is_active && !blk_queue_dying(q))
1516 scsi_dh_activate(q, pg_init_done, pgpath);
1517 else
1518 pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
1519}
1520
1521static void activate_path_work(struct work_struct *work)
1522{
1523 struct pgpath *pgpath =
1524 container_of(work, struct pgpath, activate_path.work);
1525
1526 activate_or_offline_path(pgpath);
1527}
1528
1529static int multipath_end_io(struct dm_target *ti, struct request *clone,
1530 blk_status_t error, union map_info *map_context)
1531{
1532 struct dm_mpath_io *mpio = get_mpio(map_context);
1533 struct pgpath *pgpath = mpio->pgpath;
1534 int r = DM_ENDIO_DONE;
1535
1536 /*
1537 * We don't queue any clone request inside the multipath target
1538 * during end I/O handling, since those clone requests don't have
1539 * bio clones. If we queue them inside the multipath target,
1540 * we need to make bio clones, that requires memory allocation.
1541 * (See drivers/md/dm-rq.c:end_clone_bio() about why the clone requests
1542 * don't have bio clones.)
1543 * Instead of queueing the clone request here, we queue the original
1544 * request into dm core, which will remake a clone request and
1545 * clone bios for it and resubmit it later.
1546 */
1547 if (error && blk_path_error(error)) {
1548 struct multipath *m = ti->private;
1549
1550 if (error == BLK_STS_RESOURCE)
1551 r = DM_ENDIO_DELAY_REQUEUE;
1552 else
1553 r = DM_ENDIO_REQUEUE;
1554
1555 if (pgpath)
1556 fail_path(pgpath);
1557
1558 if (atomic_read(&m->nr_valid_paths) == 0 &&
1559 !must_push_back_rq(m)) {
1560 if (error == BLK_STS_IOERR)
1561 dm_report_EIO(m);
1562 /* complete with the original error */
1563 r = DM_ENDIO_DONE;
1564 }
1565 }
1566
1567 if (pgpath) {
1568 struct path_selector *ps = &pgpath->pg->ps;
1569
1570 if (ps->type->end_io)
1571 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
1572 }
1573
1574 return r;
1575}
1576
1577static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
1578 blk_status_t *error)
1579{
1580 struct multipath *m = ti->private;
1581 struct dm_mpath_io *mpio = get_mpio_from_bio(clone);
1582 struct pgpath *pgpath = mpio->pgpath;
1583 unsigned long flags;
1584 int r = DM_ENDIO_DONE;
1585
1586 if (!*error || !blk_path_error(*error))
1587 goto done;
1588
1589 if (pgpath)
1590 fail_path(pgpath);
1591
1592 if (atomic_read(&m->nr_valid_paths) == 0 &&
1593 !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
1594 if (must_push_back_bio(m)) {
1595 r = DM_ENDIO_REQUEUE;
1596 } else {
1597 dm_report_EIO(m);
1598 *error = BLK_STS_IOERR;
1599 }
1600 goto done;
1601 }
1602
1603 spin_lock_irqsave(&m->lock, flags);
1604 bio_list_add(&m->queued_bios, clone);
1605 spin_unlock_irqrestore(&m->lock, flags);
1606 if (!test_bit(MPATHF_QUEUE_IO, &m->flags))
1607 queue_work(kmultipathd, &m->process_queued_bios);
1608
1609 r = DM_ENDIO_INCOMPLETE;
1610done:
1611 if (pgpath) {
1612 struct path_selector *ps = &pgpath->pg->ps;
1613
1614 if (ps->type->end_io)
1615 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
1616 }
1617
1618 return r;
1619}
1620
1621/*
1622 * Suspend can't complete until all the I/O is processed so if
1623 * the last path fails we must error any remaining I/O.
1624 * Note that if the freeze_bdev fails while suspending, the
1625 * queue_if_no_path state is lost - userspace should reset it.
1626 */
1627static void multipath_presuspend(struct dm_target *ti)
1628{
1629 struct multipath *m = ti->private;
1630
1631 queue_if_no_path(m, false, true);
1632}
1633
1634static void multipath_postsuspend(struct dm_target *ti)
1635{
1636 struct multipath *m = ti->private;
1637
1638 mutex_lock(&m->work_mutex);
1639 flush_multipath_work(m);
1640 mutex_unlock(&m->work_mutex);
1641}
1642
1643/*
1644 * Restore the queue_if_no_path setting.
1645 */
1646static void multipath_resume(struct dm_target *ti)
1647{
1648 struct multipath *m = ti->private;
1649 unsigned long flags;
1650
1651 spin_lock_irqsave(&m->lock, flags);
1652 assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags,
1653 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags));
1654 spin_unlock_irqrestore(&m->lock, flags);
1655}
1656
1657/*
1658 * Info output has the following format:
1659 * num_multipath_feature_args [multipath_feature_args]*
1660 * num_handler_status_args [handler_status_args]*
1661 * num_groups init_group_number
1662 * [A|D|E num_ps_status_args [ps_status_args]*
1663 * num_paths num_selector_args
1664 * [path_dev A|F fail_count [selector_args]* ]+ ]+
1665 *
1666 * Table output has the following format (identical to the constructor string):
1667 * num_feature_args [features_args]*
1668 * num_handler_args hw_handler [hw_handler_args]*
1669 * num_groups init_group_number
1670 * [priority selector-name num_ps_args [ps_args]*
1671 * num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
1672 */
1673static void multipath_status(struct dm_target *ti, status_type_t type,
1674 unsigned status_flags, char *result, unsigned maxlen)
1675{
1676 int sz = 0;
1677 unsigned long flags;
1678 struct multipath *m = ti->private;
1679 struct priority_group *pg;
1680 struct pgpath *p;
1681 unsigned pg_num;
1682 char state;
1683
1684 spin_lock_irqsave(&m->lock, flags);
1685
1686 /* Features */
1687 if (type == STATUSTYPE_INFO)
1688 DMEMIT("2 %u %u ", test_bit(MPATHF_QUEUE_IO, &m->flags),
1689 atomic_read(&m->pg_init_count));
1690 else {
1691 DMEMIT("%u ", test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) +
1692 (m->pg_init_retries > 0) * 2 +
1693 (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 +
1694 test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags) +
1695 (m->queue_mode != DM_TYPE_REQUEST_BASED) * 2);
1696
1697 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
1698 DMEMIT("queue_if_no_path ");
1699 if (m->pg_init_retries)
1700 DMEMIT("pg_init_retries %u ", m->pg_init_retries);
1701 if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
1702 DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
1703 if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags))
1704 DMEMIT("retain_attached_hw_handler ");
1705 if (m->queue_mode != DM_TYPE_REQUEST_BASED) {
1706 switch(m->queue_mode) {
1707 case DM_TYPE_BIO_BASED:
1708 DMEMIT("queue_mode bio ");
1709 break;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001710 default:
1711 WARN_ON_ONCE(true);
1712 break;
1713 }
1714 }
1715 }
1716
1717 if (!m->hw_handler_name || type == STATUSTYPE_INFO)
1718 DMEMIT("0 ");
1719 else
1720 DMEMIT("1 %s ", m->hw_handler_name);
1721
1722 DMEMIT("%u ", m->nr_priority_groups);
1723
1724 if (m->next_pg)
1725 pg_num = m->next_pg->pg_num;
1726 else if (m->current_pg)
1727 pg_num = m->current_pg->pg_num;
1728 else
1729 pg_num = (m->nr_priority_groups ? 1 : 0);
1730
1731 DMEMIT("%u ", pg_num);
1732
1733 switch (type) {
1734 case STATUSTYPE_INFO:
1735 list_for_each_entry(pg, &m->priority_groups, list) {
1736 if (pg->bypassed)
1737 state = 'D'; /* Disabled */
1738 else if (pg == m->current_pg)
1739 state = 'A'; /* Currently Active */
1740 else
1741 state = 'E'; /* Enabled */
1742
1743 DMEMIT("%c ", state);
1744
1745 if (pg->ps.type->status)
1746 sz += pg->ps.type->status(&pg->ps, NULL, type,
1747 result + sz,
1748 maxlen - sz);
1749 else
1750 DMEMIT("0 ");
1751
1752 DMEMIT("%u %u ", pg->nr_pgpaths,
1753 pg->ps.type->info_args);
1754
1755 list_for_each_entry(p, &pg->pgpaths, list) {
1756 DMEMIT("%s %s %u ", p->path.dev->name,
1757 p->is_active ? "A" : "F",
1758 p->fail_count);
1759 if (pg->ps.type->status)
1760 sz += pg->ps.type->status(&pg->ps,
1761 &p->path, type, result + sz,
1762 maxlen - sz);
1763 }
1764 }
1765 break;
1766
1767 case STATUSTYPE_TABLE:
1768 list_for_each_entry(pg, &m->priority_groups, list) {
1769 DMEMIT("%s ", pg->ps.type->name);
1770
1771 if (pg->ps.type->status)
1772 sz += pg->ps.type->status(&pg->ps, NULL, type,
1773 result + sz,
1774 maxlen - sz);
1775 else
1776 DMEMIT("0 ");
1777
1778 DMEMIT("%u %u ", pg->nr_pgpaths,
1779 pg->ps.type->table_args);
1780
1781 list_for_each_entry(p, &pg->pgpaths, list) {
1782 DMEMIT("%s ", p->path.dev->name);
1783 if (pg->ps.type->status)
1784 sz += pg->ps.type->status(&pg->ps,
1785 &p->path, type, result + sz,
1786 maxlen - sz);
1787 }
1788 }
1789 break;
1790 }
1791
1792 spin_unlock_irqrestore(&m->lock, flags);
1793}
1794
1795static int multipath_message(struct dm_target *ti, unsigned argc, char **argv,
1796 char *result, unsigned maxlen)
1797{
1798 int r = -EINVAL;
1799 struct dm_dev *dev;
1800 struct multipath *m = ti->private;
1801 action_fn action;
1802
1803 mutex_lock(&m->work_mutex);
1804
1805 if (dm_suspended(ti)) {
1806 r = -EBUSY;
1807 goto out;
1808 }
1809
1810 if (argc == 1) {
1811 if (!strcasecmp(argv[0], "queue_if_no_path")) {
1812 r = queue_if_no_path(m, true, false);
1813 goto out;
1814 } else if (!strcasecmp(argv[0], "fail_if_no_path")) {
1815 r = queue_if_no_path(m, false, false);
1816 goto out;
1817 }
1818 }
1819
1820 if (argc != 2) {
1821 DMWARN("Invalid multipath message arguments. Expected 2 arguments, got %d.", argc);
1822 goto out;
1823 }
1824
1825 if (!strcasecmp(argv[0], "disable_group")) {
1826 r = bypass_pg_num(m, argv[1], true);
1827 goto out;
1828 } else if (!strcasecmp(argv[0], "enable_group")) {
1829 r = bypass_pg_num(m, argv[1], false);
1830 goto out;
1831 } else if (!strcasecmp(argv[0], "switch_group")) {
1832 r = switch_pg_num(m, argv[1]);
1833 goto out;
1834 } else if (!strcasecmp(argv[0], "reinstate_path"))
1835 action = reinstate_path;
1836 else if (!strcasecmp(argv[0], "fail_path"))
1837 action = fail_path;
1838 else {
1839 DMWARN("Unrecognised multipath message received: %s", argv[0]);
1840 goto out;
1841 }
1842
1843 r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev);
1844 if (r) {
1845 DMWARN("message: error getting device %s",
1846 argv[1]);
1847 goto out;
1848 }
1849
1850 r = action_dev(m, dev, action);
1851
1852 dm_put_device(ti, dev);
1853
1854out:
1855 mutex_unlock(&m->work_mutex);
1856 return r;
1857}
1858
1859static int multipath_prepare_ioctl(struct dm_target *ti,
1860 struct block_device **bdev)
1861{
1862 struct multipath *m = ti->private;
1863 struct pgpath *current_pgpath;
1864 int r;
1865
1866 current_pgpath = READ_ONCE(m->current_pgpath);
Olivier Deprez0e641232021-09-23 10:07:05 +02001867 if (!current_pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001868 current_pgpath = choose_pgpath(m, 0);
1869
1870 if (current_pgpath) {
1871 if (!test_bit(MPATHF_QUEUE_IO, &m->flags)) {
1872 *bdev = current_pgpath->path.dev->bdev;
1873 r = 0;
1874 } else {
1875 /* pg_init has not started or completed */
1876 r = -ENOTCONN;
1877 }
1878 } else {
1879 /* No path is available */
1880 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
1881 r = -ENOTCONN;
1882 else
1883 r = -EIO;
1884 }
1885
1886 if (r == -ENOTCONN) {
1887 if (!READ_ONCE(m->current_pg)) {
1888 /* Path status changed, redo selection */
1889 (void) choose_pgpath(m, 0);
1890 }
1891 if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
1892 pg_init_all_paths(m);
1893 dm_table_run_md_queue_async(m->ti->table);
1894 process_queued_io_list(m);
1895 }
1896
1897 /*
1898 * Only pass ioctls through if the device sizes match exactly.
1899 */
1900 if (!r && ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
1901 return 1;
1902 return r;
1903}
1904
1905static int multipath_iterate_devices(struct dm_target *ti,
1906 iterate_devices_callout_fn fn, void *data)
1907{
1908 struct multipath *m = ti->private;
1909 struct priority_group *pg;
1910 struct pgpath *p;
1911 int ret = 0;
1912
1913 list_for_each_entry(pg, &m->priority_groups, list) {
1914 list_for_each_entry(p, &pg->pgpaths, list) {
1915 ret = fn(ti, p->path.dev, ti->begin, ti->len, data);
1916 if (ret)
1917 goto out;
1918 }
1919 }
1920
1921out:
1922 return ret;
1923}
1924
1925static int pgpath_busy(struct pgpath *pgpath)
1926{
1927 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
1928
1929 return blk_lld_busy(q);
1930}
1931
1932/*
1933 * We return "busy", only when we can map I/Os but underlying devices
1934 * are busy (so even if we map I/Os now, the I/Os will wait on
1935 * the underlying queue).
1936 * In other words, if we want to kill I/Os or queue them inside us
1937 * due to map unavailability, we don't return "busy". Otherwise,
1938 * dm core won't give us the I/Os and we can't do what we want.
1939 */
1940static int multipath_busy(struct dm_target *ti)
1941{
1942 bool busy = false, has_active = false;
1943 struct multipath *m = ti->private;
1944 struct priority_group *pg, *next_pg;
1945 struct pgpath *pgpath;
1946
1947 /* pg_init in progress */
1948 if (atomic_read(&m->pg_init_in_progress))
1949 return true;
1950
1951 /* no paths available, for blk-mq: rely on IO mapping to delay requeue */
1952 if (!atomic_read(&m->nr_valid_paths) && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
David Brazdil0f672f62019-12-10 10:32:29 +00001953 return (m->queue_mode != DM_TYPE_REQUEST_BASED);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001954
1955 /* Guess which priority_group will be used at next mapping time */
1956 pg = READ_ONCE(m->current_pg);
1957 next_pg = READ_ONCE(m->next_pg);
1958 if (unlikely(!READ_ONCE(m->current_pgpath) && next_pg))
1959 pg = next_pg;
1960
1961 if (!pg) {
1962 /*
1963 * We don't know which pg will be used at next mapping time.
1964 * We don't call choose_pgpath() here to avoid to trigger
1965 * pg_init just by busy checking.
1966 * So we don't know whether underlying devices we will be using
1967 * at next mapping time are busy or not. Just try mapping.
1968 */
1969 return busy;
1970 }
1971
1972 /*
1973 * If there is one non-busy active path at least, the path selector
1974 * will be able to select it. So we consider such a pg as not busy.
1975 */
1976 busy = true;
1977 list_for_each_entry(pgpath, &pg->pgpaths, list) {
1978 if (pgpath->is_active) {
1979 has_active = true;
1980 if (!pgpath_busy(pgpath)) {
1981 busy = false;
1982 break;
1983 }
1984 }
1985 }
1986
1987 if (!has_active) {
1988 /*
1989 * No active path in this pg, so this pg won't be used and
1990 * the current_pg will be changed at next mapping time.
1991 * We need to try mapping to determine it.
1992 */
1993 busy = false;
1994 }
1995
1996 return busy;
1997}
1998
1999/*-----------------------------------------------------------------
2000 * Module setup
2001 *---------------------------------------------------------------*/
2002static struct target_type multipath_target = {
2003 .name = "multipath",
2004 .version = {1, 13, 0},
2005 .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE |
2006 DM_TARGET_PASSES_INTEGRITY,
2007 .module = THIS_MODULE,
2008 .ctr = multipath_ctr,
2009 .dtr = multipath_dtr,
2010 .clone_and_map_rq = multipath_clone_and_map,
2011 .release_clone_rq = multipath_release_clone,
2012 .rq_end_io = multipath_end_io,
2013 .map = multipath_map_bio,
2014 .end_io = multipath_end_io_bio,
2015 .presuspend = multipath_presuspend,
2016 .postsuspend = multipath_postsuspend,
2017 .resume = multipath_resume,
2018 .status = multipath_status,
2019 .message = multipath_message,
2020 .prepare_ioctl = multipath_prepare_ioctl,
2021 .iterate_devices = multipath_iterate_devices,
2022 .busy = multipath_busy,
2023};
2024
2025static int __init dm_multipath_init(void)
2026{
2027 int r;
2028
2029 kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
2030 if (!kmultipathd) {
2031 DMERR("failed to create workqueue kmpathd");
2032 r = -ENOMEM;
2033 goto bad_alloc_kmultipathd;
2034 }
2035
2036 /*
2037 * A separate workqueue is used to handle the device handlers
2038 * to avoid overloading existing workqueue. Overloading the
2039 * old workqueue would also create a bottleneck in the
2040 * path of the storage hardware device activation.
2041 */
2042 kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd",
2043 WQ_MEM_RECLAIM);
2044 if (!kmpath_handlerd) {
2045 DMERR("failed to create workqueue kmpath_handlerd");
2046 r = -ENOMEM;
2047 goto bad_alloc_kmpath_handlerd;
2048 }
2049
2050 r = dm_register_target(&multipath_target);
2051 if (r < 0) {
2052 DMERR("request-based register failed %d", r);
2053 r = -EINVAL;
2054 goto bad_register_target;
2055 }
2056
2057 return 0;
2058
2059bad_register_target:
2060 destroy_workqueue(kmpath_handlerd);
2061bad_alloc_kmpath_handlerd:
2062 destroy_workqueue(kmultipathd);
2063bad_alloc_kmultipathd:
2064 return r;
2065}
2066
2067static void __exit dm_multipath_exit(void)
2068{
2069 destroy_workqueue(kmpath_handlerd);
2070 destroy_workqueue(kmultipathd);
2071
2072 dm_unregister_target(&multipath_target);
2073}
2074
2075module_init(dm_multipath_init);
2076module_exit(dm_multipath_exit);
2077
2078MODULE_DESCRIPTION(DM_NAME " multipath target");
2079MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
2080MODULE_LICENSE("GPL");