1
2
3
4
5
6
7
8#include <linux/device-mapper.h>
9
10#include "dm-rq.h"
11#include "dm-bio-record.h"
12#include "dm-path-selector.h"
13#include "dm-uevent.h"
14
15#include <linux/blkdev.h>
16#include <linux/ctype.h>
17#include <linux/init.h>
18#include <linux/mempool.h>
19#include <linux/module.h>
20#include <linux/pagemap.h>
21#include <linux/slab.h>
22#include <linux/time.h>
23#include <linux/workqueue.h>
24#include <linux/delay.h>
25#include <scsi/scsi_dh.h>
26#include <linux/atomic.h>
27#include <linux/blk-mq.h>
28
29#define DM_MSG_PREFIX "multipath"
30#define DM_PG_INIT_DELAY_MSECS 2000
31#define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
32
33
34struct pgpath {
35 struct list_head list;
36
37 struct priority_group *pg;
38 unsigned fail_count;
39
40 struct dm_path path;
41 struct delayed_work activate_path;
42
43 bool is_active:1;
44};
45
46#define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
47
48
49
50
51
52struct priority_group {
53 struct list_head list;
54
55 struct multipath *m;
56 struct path_selector ps;
57
58 unsigned pg_num;
59 unsigned nr_pgpaths;
60 struct list_head pgpaths;
61
62 bool bypassed:1;
63};
64
65
66struct multipath {
67 struct list_head list;
68 struct dm_target *ti;
69
70 const char *hw_handler_name;
71 char *hw_handler_params;
72
73 spinlock_t lock;
74
75 unsigned nr_priority_groups;
76 struct list_head priority_groups;
77
78 wait_queue_head_t pg_init_wait;
79
80 struct pgpath *current_pgpath;
81 struct priority_group *current_pg;
82 struct priority_group *next_pg;
83
84 unsigned long flags;
85
86 unsigned pg_init_retries;
87 unsigned pg_init_delay_msecs;
88
89 atomic_t nr_valid_paths;
90 atomic_t pg_init_in_progress;
91 atomic_t pg_init_count;
92
93 enum dm_queue_mode queue_mode;
94
95 struct mutex work_mutex;
96 struct work_struct trigger_event;
97
98 struct work_struct process_queued_bios;
99 struct bio_list queued_bios;
100};
101
102
103
104
105struct dm_mpath_io {
106 struct pgpath *pgpath;
107 size_t nr_bytes;
108};
109
110typedef int (*action_fn) (struct pgpath *pgpath);
111
112static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
113static void trigger_event(struct work_struct *work);
114static void activate_or_offline_path(struct pgpath *pgpath);
115static void activate_path_work(struct work_struct *work);
116static void process_queued_bios(struct work_struct *work);
117
118
119
120
121
122#define MPATHF_QUEUE_IO 0
123#define MPATHF_QUEUE_IF_NO_PATH 1
124#define MPATHF_SAVED_QUEUE_IF_NO_PATH 2
125#define MPATHF_RETAIN_ATTACHED_HW_HANDLER 3
126#define MPATHF_PG_INIT_DISABLED 4
127#define MPATHF_PG_INIT_REQUIRED 5
128#define MPATHF_PG_INIT_DELAY_RETRY 6
129
130
131
132
133
134static struct pgpath *alloc_pgpath(void)
135{
136 struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
137
138 if (pgpath) {
139 pgpath->is_active = true;
140 INIT_DELAYED_WORK(&pgpath->activate_path, activate_path_work);
141 }
142
143 return pgpath;
144}
145
146static void free_pgpath(struct pgpath *pgpath)
147{
148 kfree(pgpath);
149}
150
151static struct priority_group *alloc_priority_group(void)
152{
153 struct priority_group *pg;
154
155 pg = kzalloc(sizeof(*pg), GFP_KERNEL);
156
157 if (pg)
158 INIT_LIST_HEAD(&pg->pgpaths);
159
160 return pg;
161}
162
163static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
164{
165 struct pgpath *pgpath, *tmp;
166
167 list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
168 list_del(&pgpath->list);
169 dm_put_device(ti, pgpath->path.dev);
170 free_pgpath(pgpath);
171 }
172}
173
174static void free_priority_group(struct priority_group *pg,
175 struct dm_target *ti)
176{
177 struct path_selector *ps = &pg->ps;
178
179 if (ps->type) {
180 ps->type->destroy(ps);
181 dm_put_path_selector(ps->type);
182 }
183
184 free_pgpaths(&pg->pgpaths, ti);
185 kfree(pg);
186}
187
188static struct multipath *alloc_multipath(struct dm_target *ti)
189{
190 struct multipath *m;
191
192 m = kzalloc(sizeof(*m), GFP_KERNEL);
193 if (m) {
194 INIT_LIST_HEAD(&m->priority_groups);
195 spin_lock_init(&m->lock);
196 set_bit(MPATHF_QUEUE_IO, &m->flags);
197 atomic_set(&m->nr_valid_paths, 0);
198 atomic_set(&m->pg_init_in_progress, 0);
199 atomic_set(&m->pg_init_count, 0);
200 m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
201 INIT_WORK(&m->trigger_event, trigger_event);
202 init_waitqueue_head(&m->pg_init_wait);
203 mutex_init(&m->work_mutex);
204
205 m->queue_mode = DM_TYPE_NONE;
206
207 m->ti = ti;
208 ti->private = m;
209 }
210
211 return m;
212}
213
214static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
215{
216 if (m->queue_mode == DM_TYPE_NONE) {
217
218
219
220 if (dm_use_blk_mq(dm_table_get_md(ti->table)))
221 m->queue_mode = DM_TYPE_MQ_REQUEST_BASED;
222 else
223 m->queue_mode = DM_TYPE_REQUEST_BASED;
224 } else if (m->queue_mode == DM_TYPE_BIO_BASED) {
225 INIT_WORK(&m->process_queued_bios, process_queued_bios);
226
227
228
229
230 set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
231 }
232
233 dm_table_set_type(ti->table, m->queue_mode);
234
235 return 0;
236}
237
238static void free_multipath(struct multipath *m)
239{
240 struct priority_group *pg, *tmp;
241
242 list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) {
243 list_del(&pg->list);
244 free_priority_group(pg, m->ti);
245 }
246
247 kfree(m->hw_handler_name);
248 kfree(m->hw_handler_params);
249 kfree(m);
250}
251
252static struct dm_mpath_io *get_mpio(union map_info *info)
253{
254 return info->ptr;
255}
256
257static size_t multipath_per_bio_data_size(void)
258{
259 return sizeof(struct dm_mpath_io) + sizeof(struct dm_bio_details);
260}
261
262static struct dm_mpath_io *get_mpio_from_bio(struct bio *bio)
263{
264 return dm_per_bio_data(bio, multipath_per_bio_data_size());
265}
266
267static struct dm_bio_details *get_bio_details_from_bio(struct bio *bio)
268{
269
270 struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
271 void *bio_details = mpio + 1;
272
273 return bio_details;
274}
275
276static void multipath_init_per_bio_data(struct bio *bio, struct dm_mpath_io **mpio_p,
277 struct dm_bio_details **bio_details_p)
278{
279 struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
280 struct dm_bio_details *bio_details = get_bio_details_from_bio(bio);
281
282 memset(mpio, 0, sizeof(*mpio));
283 memset(bio_details, 0, sizeof(*bio_details));
284 dm_bio_record(bio_details, bio);
285
286 if (mpio_p)
287 *mpio_p = mpio;
288 if (bio_details_p)
289 *bio_details_p = bio_details;
290}
291
292
293
294
295
296static int __pg_init_all_paths(struct multipath *m)
297{
298 struct pgpath *pgpath;
299 unsigned long pg_init_delay = 0;
300
301 lockdep_assert_held(&m->lock);
302
303 if (atomic_read(&m->pg_init_in_progress) || test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
304 return 0;
305
306 atomic_inc(&m->pg_init_count);
307 clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
308
309
310 if (!m->current_pg)
311 return 0;
312
313 if (test_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags))
314 pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
315 m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
316 list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
317
318 if (!pgpath->is_active)
319 continue;
320 if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path,
321 pg_init_delay))
322 atomic_inc(&m->pg_init_in_progress);
323 }
324 return atomic_read(&m->pg_init_in_progress);
325}
326
327static int pg_init_all_paths(struct multipath *m)
328{
329 int ret;
330 unsigned long flags;
331
332 spin_lock_irqsave(&m->lock, flags);
333 ret = __pg_init_all_paths(m);
334 spin_unlock_irqrestore(&m->lock, flags);
335
336 return ret;
337}
338
339static void __switch_pg(struct multipath *m, struct priority_group *pg)
340{
341 m->current_pg = pg;
342
343
344 if (m->hw_handler_name) {
345 set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
346 set_bit(MPATHF_QUEUE_IO, &m->flags);
347 } else {
348 clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
349 clear_bit(MPATHF_QUEUE_IO, &m->flags);
350 }
351
352 atomic_set(&m->pg_init_count, 0);
353}
354
355static struct pgpath *choose_path_in_pg(struct multipath *m,
356 struct priority_group *pg,
357 size_t nr_bytes)
358{
359 unsigned long flags;
360 struct dm_path *path;
361 struct pgpath *pgpath;
362
363 path = pg->ps.type->select_path(&pg->ps, nr_bytes);
364 if (!path)
365 return ERR_PTR(-ENXIO);
366
367 pgpath = path_to_pgpath(path);
368
369 if (unlikely(READ_ONCE(m->current_pg) != pg)) {
370
371 spin_lock_irqsave(&m->lock, flags);
372 m->current_pgpath = pgpath;
373 __switch_pg(m, pg);
374 spin_unlock_irqrestore(&m->lock, flags);
375 }
376
377 return pgpath;
378}
379
380static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
381{
382 unsigned long flags;
383 struct priority_group *pg;
384 struct pgpath *pgpath;
385 unsigned bypassed = 1;
386
387 if (!atomic_read(&m->nr_valid_paths)) {
388 clear_bit(MPATHF_QUEUE_IO, &m->flags);
389 goto failed;
390 }
391
392
393 if (READ_ONCE(m->next_pg)) {
394 spin_lock_irqsave(&m->lock, flags);
395 pg = m->next_pg;
396 if (!pg) {
397 spin_unlock_irqrestore(&m->lock, flags);
398 goto check_current_pg;
399 }
400 m->next_pg = NULL;
401 spin_unlock_irqrestore(&m->lock, flags);
402 pgpath = choose_path_in_pg(m, pg, nr_bytes);
403 if (!IS_ERR_OR_NULL(pgpath))
404 return pgpath;
405 }
406
407
408check_current_pg:
409 pg = READ_ONCE(m->current_pg);
410 if (pg) {
411 pgpath = choose_path_in_pg(m, pg, nr_bytes);
412 if (!IS_ERR_OR_NULL(pgpath))
413 return pgpath;
414 }
415
416
417
418
419
420
421
422 do {
423 list_for_each_entry(pg, &m->priority_groups, list) {
424 if (pg->bypassed == !!bypassed)
425 continue;
426 pgpath = choose_path_in_pg(m, pg, nr_bytes);
427 if (!IS_ERR_OR_NULL(pgpath)) {
428 if (!bypassed)
429 set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
430 return pgpath;
431 }
432 }
433 } while (bypassed--);
434
435failed:
436 spin_lock_irqsave(&m->lock, flags);
437 m->current_pgpath = NULL;
438 m->current_pg = NULL;
439 spin_unlock_irqrestore(&m->lock, flags);
440
441 return NULL;
442}
443
444
445
446
447
448
449#define dm_report_EIO(m) \
450do { \
451 struct mapped_device *md = dm_table_get_md((m)->ti->table); \
452 \
453 pr_debug("%s: returning EIO; QIFNP = %d; SQIFNP = %d; DNFS = %d\n", \
454 dm_device_name(md), \
455 test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags), \
456 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags), \
457 dm_noflush_suspending((m)->ti)); \
458} while (0)
459
460
461
462
463
464
465
466
467
468
469static bool __must_push_back(struct multipath *m, unsigned long flags)
470{
471 return ((test_bit(MPATHF_QUEUE_IF_NO_PATH, &flags) !=
472 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &flags)) &&
473 dm_noflush_suspending(m->ti));
474}
475
476
477
478
479
480static bool must_push_back_rq(struct multipath *m)
481{
482 unsigned long flags = READ_ONCE(m->flags);
483 return test_bit(MPATHF_QUEUE_IF_NO_PATH, &flags) || __must_push_back(m, flags);
484}
485
486static bool must_push_back_bio(struct multipath *m)
487{
488 unsigned long flags = READ_ONCE(m->flags);
489 return __must_push_back(m, flags);
490}
491
492
493
494
495static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
496 union map_info *map_context,
497 struct request **__clone)
498{
499 struct multipath *m = ti->private;
500 size_t nr_bytes = blk_rq_bytes(rq);
501 struct pgpath *pgpath;
502 struct block_device *bdev;
503 struct dm_mpath_io *mpio = get_mpio(map_context);
504 struct request_queue *q;
505 struct request *clone;
506
507
508 pgpath = READ_ONCE(m->current_pgpath);
509 if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
510 pgpath = choose_pgpath(m, nr_bytes);
511
512 if (!pgpath) {
513 if (must_push_back_rq(m))
514 return DM_MAPIO_DELAY_REQUEUE;
515 dm_report_EIO(m);
516 return DM_MAPIO_KILL;
517 } else if (test_bit(MPATHF_QUEUE_IO, &m->flags) ||
518 test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
519 if (pg_init_all_paths(m))
520 return DM_MAPIO_DELAY_REQUEUE;
521 return DM_MAPIO_REQUEUE;
522 }
523
524 memset(mpio, 0, sizeof(*mpio));
525 mpio->pgpath = pgpath;
526 mpio->nr_bytes = nr_bytes;
527
528 bdev = pgpath->path.dev->bdev;
529 q = bdev_get_queue(bdev);
530 clone = blk_get_request(q, rq->cmd_flags | REQ_NOMERGE, GFP_ATOMIC);
531 if (IS_ERR(clone)) {
532
533 bool queue_dying = blk_queue_dying(q);
534 if (queue_dying) {
535 atomic_inc(&m->pg_init_in_progress);
536 activate_or_offline_path(pgpath);
537 }
538 return DM_MAPIO_DELAY_REQUEUE;
539 }
540 clone->bio = clone->biotail = NULL;
541 clone->rq_disk = bdev->bd_disk;
542 clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
543 *__clone = clone;
544
545 if (pgpath->pg->ps.type->start_io)
546 pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
547 &pgpath->path,
548 nr_bytes);
549 return DM_MAPIO_REMAPPED;
550}
551
552static void multipath_release_clone(struct request *clone)
553{
554 blk_put_request(clone);
555}
556
557
558
559
560static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_mpath_io *mpio)
561{
562 size_t nr_bytes = bio->bi_iter.bi_size;
563 struct pgpath *pgpath;
564 unsigned long flags;
565 bool queue_io;
566
567
568 pgpath = READ_ONCE(m->current_pgpath);
569 queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags);
570 if (!pgpath || !queue_io)
571 pgpath = choose_pgpath(m, nr_bytes);
572
573 if ((pgpath && queue_io) ||
574 (!pgpath && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))) {
575
576 spin_lock_irqsave(&m->lock, flags);
577 bio_list_add(&m->queued_bios, bio);
578 spin_unlock_irqrestore(&m->lock, flags);
579
580 if (queue_io || test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
581 pg_init_all_paths(m);
582 else if (!queue_io)
583 queue_work(kmultipathd, &m->process_queued_bios);
584 return DM_MAPIO_SUBMITTED;
585 }
586
587 if (!pgpath) {
588 if (must_push_back_bio(m))
589 return DM_MAPIO_REQUEUE;
590 dm_report_EIO(m);
591 return DM_MAPIO_KILL;
592 }
593
594 mpio->pgpath = pgpath;
595 mpio->nr_bytes = nr_bytes;
596
597 bio->bi_status = 0;
598 bio_set_dev(bio, pgpath->path.dev->bdev);
599 bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
600
601 if (pgpath->pg->ps.type->start_io)
602 pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
603 &pgpath->path,
604 nr_bytes);
605 return DM_MAPIO_REMAPPED;
606}
607
608static int multipath_map_bio(struct dm_target *ti, struct bio *bio)
609{
610 struct multipath *m = ti->private;
611 struct dm_mpath_io *mpio = NULL;
612
613 multipath_init_per_bio_data(bio, &mpio, NULL);
614
615 return __multipath_map_bio(m, bio, mpio);
616}
617
618static void process_queued_io_list(struct multipath *m)
619{
620 if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED)
621 dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table));
622 else if (m->queue_mode == DM_TYPE_BIO_BASED)
623 queue_work(kmultipathd, &m->process_queued_bios);
624}
625
626static void process_queued_bios(struct work_struct *work)
627{
628 int r;
629 unsigned long flags;
630 struct bio *bio;
631 struct bio_list bios;
632 struct blk_plug plug;
633 struct multipath *m =
634 container_of(work, struct multipath, process_queued_bios);
635
636 bio_list_init(&bios);
637
638 spin_lock_irqsave(&m->lock, flags);
639
640 if (bio_list_empty(&m->queued_bios)) {
641 spin_unlock_irqrestore(&m->lock, flags);
642 return;
643 }
644
645 bio_list_merge(&bios, &m->queued_bios);
646 bio_list_init(&m->queued_bios);
647
648 spin_unlock_irqrestore(&m->lock, flags);
649
650 blk_start_plug(&plug);
651 while ((bio = bio_list_pop(&bios))) {
652 r = __multipath_map_bio(m, bio, get_mpio_from_bio(bio));
653 switch (r) {
654 case DM_MAPIO_KILL:
655 bio->bi_status = BLK_STS_IOERR;
656 bio_endio(bio);
657 break;
658 case DM_MAPIO_REQUEUE:
659 bio->bi_status = BLK_STS_DM_REQUEUE;
660 bio_endio(bio);
661 break;
662 case DM_MAPIO_REMAPPED:
663 generic_make_request(bio);
664 break;
665 case 0:
666 break;
667 default:
668 WARN_ONCE(true, "__multipath_map_bio() returned %d\n", r);
669 }
670 }
671 blk_finish_plug(&plug);
672}
673
674
675
676
677static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
678 bool save_old_value)
679{
680 unsigned long flags;
681
682 spin_lock_irqsave(&m->lock, flags);
683 assign_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags,
684 (save_old_value && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) ||
685 (!save_old_value && queue_if_no_path));
686 assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags, queue_if_no_path);
687 spin_unlock_irqrestore(&m->lock, flags);
688
689 if (!queue_if_no_path) {
690 dm_table_run_md_queue_async(m->ti->table);
691 process_queued_io_list(m);
692 }
693
694 return 0;
695}
696
697
698
699
700
701static void trigger_event(struct work_struct *work)
702{
703 struct multipath *m =
704 container_of(work, struct multipath, trigger_event);
705
706 dm_table_event(m->ti->table);
707}
708
709
710
711
712
713
714
715
716
717
718
719static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
720 struct dm_target *ti)
721{
722 int r;
723 struct path_selector_type *pst;
724 unsigned ps_argc;
725
726 static const struct dm_arg _args[] = {
727 {0, 1024, "invalid number of path selector args"},
728 };
729
730 pst = dm_get_path_selector(dm_shift_arg(as));
731 if (!pst) {
732 ti->error = "unknown path selector type";
733 return -EINVAL;
734 }
735
736 r = dm_read_arg_group(_args, as, &ps_argc, &ti->error);
737 if (r) {
738 dm_put_path_selector(pst);
739 return -EINVAL;
740 }
741
742 r = pst->create(&pg->ps, ps_argc, as->argv);
743 if (r) {
744 dm_put_path_selector(pst);
745 ti->error = "path selector constructor failed";
746 return r;
747 }
748
749 pg->ps.type = pst;
750 dm_consume_args(as, ps_argc);
751
752 return 0;
753}
754
755static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
756 struct dm_target *ti)
757{
758 int r;
759 struct pgpath *p;
760 struct multipath *m = ti->private;
761 struct request_queue *q = NULL;
762 const char *attached_handler_name;
763
764
765 if (as->argc < 1) {
766 ti->error = "no device given";
767 return ERR_PTR(-EINVAL);
768 }
769
770 p = alloc_pgpath();
771 if (!p)
772 return ERR_PTR(-ENOMEM);
773
774 r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
775 &p->path.dev);
776 if (r) {
777 ti->error = "error getting device";
778 goto bad;
779 }
780
781 if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags) || m->hw_handler_name)
782 q = bdev_get_queue(p->path.dev->bdev);
783
784 if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) {
785retain:
786 attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
787 if (attached_handler_name) {
788
789
790
791
792 if (m->hw_handler_name && strcmp(attached_handler_name, m->hw_handler_name)) {
793 kfree(m->hw_handler_params);
794 m->hw_handler_params = NULL;
795 }
796
797
798
799
800
801
802
803 kfree(m->hw_handler_name);
804 m->hw_handler_name = attached_handler_name;
805 }
806 }
807
808 if (m->hw_handler_name) {
809 r = scsi_dh_attach(q, m->hw_handler_name);
810 if (r == -EBUSY) {
811 char b[BDEVNAME_SIZE];
812
813 printk(KERN_INFO "dm-mpath: retaining handler on device %s\n",
814 bdevname(p->path.dev->bdev, b));
815 goto retain;
816 }
817 if (r < 0) {
818 ti->error = "error attaching hardware handler";
819 dm_put_device(ti, p->path.dev);
820 goto bad;
821 }
822
823 if (m->hw_handler_params) {
824 r = scsi_dh_set_params(q, m->hw_handler_params);
825 if (r < 0) {
826 ti->error = "unable to set hardware "
827 "handler parameters";
828 dm_put_device(ti, p->path.dev);
829 goto bad;
830 }
831 }
832 }
833
834 r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
835 if (r) {
836 dm_put_device(ti, p->path.dev);
837 goto bad;
838 }
839
840 return p;
841
842 bad:
843 free_pgpath(p);
844 return ERR_PTR(r);
845}
846
847static struct priority_group *parse_priority_group(struct dm_arg_set *as,
848 struct multipath *m)
849{
850 static const struct dm_arg _args[] = {
851 {1, 1024, "invalid number of paths"},
852 {0, 1024, "invalid number of selector args"}
853 };
854
855 int r;
856 unsigned i, nr_selector_args, nr_args;
857 struct priority_group *pg;
858 struct dm_target *ti = m->ti;
859
860 if (as->argc < 2) {
861 as->argc = 0;
862 ti->error = "not enough priority group arguments";
863 return ERR_PTR(-EINVAL);
864 }
865
866 pg = alloc_priority_group();
867 if (!pg) {
868 ti->error = "couldn't allocate priority group";
869 return ERR_PTR(-ENOMEM);
870 }
871 pg->m = m;
872
873 r = parse_path_selector(as, pg, ti);
874 if (r)
875 goto bad;
876
877
878
879
880 r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error);
881 if (r)
882 goto bad;
883
884 r = dm_read_arg(_args + 1, as, &nr_selector_args, &ti->error);
885 if (r)
886 goto bad;
887
888 nr_args = 1 + nr_selector_args;
889 for (i = 0; i < pg->nr_pgpaths; i++) {
890 struct pgpath *pgpath;
891 struct dm_arg_set path_args;
892
893 if (as->argc < nr_args) {
894 ti->error = "not enough path parameters";
895 r = -EINVAL;
896 goto bad;
897 }
898
899 path_args.argc = nr_args;
900 path_args.argv = as->argv;
901
902 pgpath = parse_path(&path_args, &pg->ps, ti);
903 if (IS_ERR(pgpath)) {
904 r = PTR_ERR(pgpath);
905 goto bad;
906 }
907
908 pgpath->pg = pg;
909 list_add_tail(&pgpath->list, &pg->pgpaths);
910 dm_consume_args(as, nr_args);
911 }
912
913 return pg;
914
915 bad:
916 free_priority_group(pg, ti);
917 return ERR_PTR(r);
918}
919
920static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
921{
922 unsigned hw_argc;
923 int ret;
924 struct dm_target *ti = m->ti;
925
926 static const struct dm_arg _args[] = {
927 {0, 1024, "invalid number of hardware handler args"},
928 };
929
930 if (dm_read_arg_group(_args, as, &hw_argc, &ti->error))
931 return -EINVAL;
932
933 if (!hw_argc)
934 return 0;
935
936 if (m->queue_mode == DM_TYPE_BIO_BASED) {
937 dm_consume_args(as, hw_argc);
938 DMERR("bio-based multipath doesn't allow hardware handler args");
939 return 0;
940 }
941
942 m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
943 if (!m->hw_handler_name)
944 return -EINVAL;
945
946 if (hw_argc > 1) {
947 char *p;
948 int i, j, len = 4;
949
950 for (i = 0; i <= hw_argc - 2; i++)
951 len += strlen(as->argv[i]) + 1;
952 p = m->hw_handler_params = kzalloc(len, GFP_KERNEL);
953 if (!p) {
954 ti->error = "memory allocation failed";
955 ret = -ENOMEM;
956 goto fail;
957 }
958 j = sprintf(p, "%d", hw_argc - 1);
959 for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1)
960 j = sprintf(p, "%s", as->argv[i]);
961 }
962 dm_consume_args(as, hw_argc - 1);
963
964 return 0;
965fail:
966 kfree(m->hw_handler_name);
967 m->hw_handler_name = NULL;
968 return ret;
969}
970
971static int parse_features(struct dm_arg_set *as, struct multipath *m)
972{
973 int r;
974 unsigned argc;
975 struct dm_target *ti = m->ti;
976 const char *arg_name;
977
978 static const struct dm_arg _args[] = {
979 {0, 8, "invalid number of feature args"},
980 {1, 50, "pg_init_retries must be between 1 and 50"},
981 {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
982 };
983
984 r = dm_read_arg_group(_args, as, &argc, &ti->error);
985 if (r)
986 return -EINVAL;
987
988 if (!argc)
989 return 0;
990
991 do {
992 arg_name = dm_shift_arg(as);
993 argc--;
994
995 if (!strcasecmp(arg_name, "queue_if_no_path")) {
996 r = queue_if_no_path(m, true, false);
997 continue;
998 }
999
1000 if (!strcasecmp(arg_name, "retain_attached_hw_handler")) {
1001 set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
1002 continue;
1003 }
1004
1005 if (!strcasecmp(arg_name, "pg_init_retries") &&
1006 (argc >= 1)) {
1007 r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error);
1008 argc--;
1009 continue;
1010 }
1011
1012 if (!strcasecmp(arg_name, "pg_init_delay_msecs") &&
1013 (argc >= 1)) {
1014 r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error);
1015 argc--;
1016 continue;
1017 }
1018
1019 if (!strcasecmp(arg_name, "queue_mode") &&
1020 (argc >= 1)) {
1021 const char *queue_mode_name = dm_shift_arg(as);
1022
1023 if (!strcasecmp(queue_mode_name, "bio"))
1024 m->queue_mode = DM_TYPE_BIO_BASED;
1025 else if (!strcasecmp(queue_mode_name, "rq"))
1026 m->queue_mode = DM_TYPE_REQUEST_BASED;
1027 else if (!strcasecmp(queue_mode_name, "mq"))
1028 m->queue_mode = DM_TYPE_MQ_REQUEST_BASED;
1029 else {
1030 ti->error = "Unknown 'queue_mode' requested";
1031 r = -EINVAL;
1032 }
1033 argc--;
1034 continue;
1035 }
1036
1037 ti->error = "Unrecognised multipath feature request";
1038 r = -EINVAL;
1039 } while (argc && !r);
1040
1041 return r;
1042}
1043
1044static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
1045{
1046
1047 static const struct dm_arg _args[] = {
1048 {0, 1024, "invalid number of priority groups"},
1049 {0, 1024, "invalid initial priority group number"},
1050 };
1051
1052 int r;
1053 struct multipath *m;
1054 struct dm_arg_set as;
1055 unsigned pg_count = 0;
1056 unsigned next_pg_num;
1057
1058 as.argc = argc;
1059 as.argv = argv;
1060
1061 m = alloc_multipath(ti);
1062 if (!m) {
1063 ti->error = "can't allocate multipath";
1064 return -EINVAL;
1065 }
1066
1067 r = parse_features(&as, m);
1068 if (r)
1069 goto bad;
1070
1071 r = alloc_multipath_stage2(ti, m);
1072 if (r)
1073 goto bad;
1074
1075 r = parse_hw_handler(&as, m);
1076 if (r)
1077 goto bad;
1078
1079 r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error);
1080 if (r)
1081 goto bad;
1082
1083 r = dm_read_arg(_args + 1, &as, &next_pg_num, &ti->error);
1084 if (r)
1085 goto bad;
1086
1087 if ((!m->nr_priority_groups && next_pg_num) ||
1088 (m->nr_priority_groups && !next_pg_num)) {
1089 ti->error = "invalid initial priority group";
1090 r = -EINVAL;
1091 goto bad;
1092 }
1093
1094
1095 while (as.argc) {
1096 struct priority_group *pg;
1097 unsigned nr_valid_paths = atomic_read(&m->nr_valid_paths);
1098
1099 pg = parse_priority_group(&as, m);
1100 if (IS_ERR(pg)) {
1101 r = PTR_ERR(pg);
1102 goto bad;
1103 }
1104
1105 nr_valid_paths += pg->nr_pgpaths;
1106 atomic_set(&m->nr_valid_paths, nr_valid_paths);
1107
1108 list_add_tail(&pg->list, &m->priority_groups);
1109 pg_count++;
1110 pg->pg_num = pg_count;
1111 if (!--next_pg_num)
1112 m->next_pg = pg;
1113 }
1114
1115 if (pg_count != m->nr_priority_groups) {
1116 ti->error = "priority group count mismatch";
1117 r = -EINVAL;
1118 goto bad;
1119 }
1120
1121 ti->num_flush_bios = 1;
1122 ti->num_discard_bios = 1;
1123 ti->num_write_same_bios = 1;
1124 ti->num_write_zeroes_bios = 1;
1125 if (m->queue_mode == DM_TYPE_BIO_BASED)
1126 ti->per_io_data_size = multipath_per_bio_data_size();
1127 else
1128 ti->per_io_data_size = sizeof(struct dm_mpath_io);
1129
1130 return 0;
1131
1132 bad:
1133 free_multipath(m);
1134 return r;
1135}
1136
1137static void multipath_wait_for_pg_init_completion(struct multipath *m)
1138{
1139 DEFINE_WAIT(wait);
1140
1141 while (1) {
1142 prepare_to_wait(&m->pg_init_wait, &wait, TASK_UNINTERRUPTIBLE);
1143
1144 if (!atomic_read(&m->pg_init_in_progress))
1145 break;
1146
1147 io_schedule();
1148 }
1149 finish_wait(&m->pg_init_wait, &wait);
1150}
1151
1152static void flush_multipath_work(struct multipath *m)
1153{
1154 set_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
1155 smp_mb__after_atomic();
1156
1157 flush_workqueue(kmpath_handlerd);
1158 multipath_wait_for_pg_init_completion(m);
1159 flush_workqueue(kmultipathd);
1160 flush_work(&m->trigger_event);
1161
1162 clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
1163 smp_mb__after_atomic();
1164}
1165
1166static void multipath_dtr(struct dm_target *ti)
1167{
1168 struct multipath *m = ti->private;
1169
1170 flush_multipath_work(m);
1171 free_multipath(m);
1172}
1173
1174
1175
1176
1177static int fail_path(struct pgpath *pgpath)
1178{
1179 unsigned long flags;
1180 struct multipath *m = pgpath->pg->m;
1181
1182 spin_lock_irqsave(&m->lock, flags);
1183
1184 if (!pgpath->is_active)
1185 goto out;
1186
1187 DMWARN("Failing path %s.", pgpath->path.dev->name);
1188
1189 pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
1190 pgpath->is_active = false;
1191 pgpath->fail_count++;
1192
1193 atomic_dec(&m->nr_valid_paths);
1194
1195 if (pgpath == m->current_pgpath)
1196 m->current_pgpath = NULL;
1197
1198 dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
1199 pgpath->path.dev->name, atomic_read(&m->nr_valid_paths));
1200
1201 schedule_work(&m->trigger_event);
1202
1203out:
1204 spin_unlock_irqrestore(&m->lock, flags);
1205
1206 return 0;
1207}
1208
1209
1210
1211
1212static int reinstate_path(struct pgpath *pgpath)
1213{
1214 int r = 0, run_queue = 0;
1215 unsigned long flags;
1216 struct multipath *m = pgpath->pg->m;
1217 unsigned nr_valid_paths;
1218
1219 spin_lock_irqsave(&m->lock, flags);
1220
1221 if (pgpath->is_active)
1222 goto out;
1223
1224 DMWARN("Reinstating path %s.", pgpath->path.dev->name);
1225
1226 r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
1227 if (r)
1228 goto out;
1229
1230 pgpath->is_active = true;
1231
1232 nr_valid_paths = atomic_inc_return(&m->nr_valid_paths);
1233 if (nr_valid_paths == 1) {
1234 m->current_pgpath = NULL;
1235 run_queue = 1;
1236 } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
1237 if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
1238 atomic_inc(&m->pg_init_in_progress);
1239 }
1240
1241 dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
1242 pgpath->path.dev->name, nr_valid_paths);
1243
1244 schedule_work(&m->trigger_event);
1245
1246out:
1247 spin_unlock_irqrestore(&m->lock, flags);
1248 if (run_queue) {
1249 dm_table_run_md_queue_async(m->ti->table);
1250 process_queued_io_list(m);
1251 }
1252
1253 return r;
1254}
1255
1256
1257
1258
1259static int action_dev(struct multipath *m, struct dm_dev *dev,
1260 action_fn action)
1261{
1262 int r = -EINVAL;
1263 struct pgpath *pgpath;
1264 struct priority_group *pg;
1265
1266 list_for_each_entry(pg, &m->priority_groups, list) {
1267 list_for_each_entry(pgpath, &pg->pgpaths, list) {
1268 if (pgpath->path.dev == dev)
1269 r = action(pgpath);
1270 }
1271 }
1272
1273 return r;
1274}
1275
1276
1277
1278
1279static void bypass_pg(struct multipath *m, struct priority_group *pg,
1280 bool bypassed)
1281{
1282 unsigned long flags;
1283
1284 spin_lock_irqsave(&m->lock, flags);
1285
1286 pg->bypassed = bypassed;
1287 m->current_pgpath = NULL;
1288 m->current_pg = NULL;
1289
1290 spin_unlock_irqrestore(&m->lock, flags);
1291
1292 schedule_work(&m->trigger_event);
1293}
1294
1295
1296
1297
1298static int switch_pg_num(struct multipath *m, const char *pgstr)
1299{
1300 struct priority_group *pg;
1301 unsigned pgnum;
1302 unsigned long flags;
1303 char dummy;
1304
1305 if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
1306 !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
1307 DMWARN("invalid PG number supplied to switch_pg_num");
1308 return -EINVAL;
1309 }
1310
1311 spin_lock_irqsave(&m->lock, flags);
1312 list_for_each_entry(pg, &m->priority_groups, list) {
1313 pg->bypassed = false;
1314 if (--pgnum)
1315 continue;
1316
1317 m->current_pgpath = NULL;
1318 m->current_pg = NULL;
1319 m->next_pg = pg;
1320 }
1321 spin_unlock_irqrestore(&m->lock, flags);
1322
1323 schedule_work(&m->trigger_event);
1324 return 0;
1325}
1326
1327
1328
1329
1330
1331static int bypass_pg_num(struct multipath *m, const char *pgstr, bool bypassed)
1332{
1333 struct priority_group *pg;
1334 unsigned pgnum;
1335 char dummy;
1336
1337 if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
1338 !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
1339 DMWARN("invalid PG number supplied to bypass_pg");
1340 return -EINVAL;
1341 }
1342
1343 list_for_each_entry(pg, &m->priority_groups, list) {
1344 if (!--pgnum)
1345 break;
1346 }
1347
1348 bypass_pg(m, pg, bypassed);
1349 return 0;
1350}
1351
1352
1353
1354
1355static bool pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
1356{
1357 unsigned long flags;
1358 bool limit_reached = false;
1359
1360 spin_lock_irqsave(&m->lock, flags);
1361
1362 if (atomic_read(&m->pg_init_count) <= m->pg_init_retries &&
1363 !test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
1364 set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
1365 else
1366 limit_reached = true;
1367
1368 spin_unlock_irqrestore(&m->lock, flags);
1369
1370 return limit_reached;
1371}
1372
1373static void pg_init_done(void *data, int errors)
1374{
1375 struct pgpath *pgpath = data;
1376 struct priority_group *pg = pgpath->pg;
1377 struct multipath *m = pg->m;
1378 unsigned long flags;
1379 bool delay_retry = false;
1380
1381
1382 switch (errors) {
1383 case SCSI_DH_OK:
1384 break;
1385 case SCSI_DH_NOSYS:
1386 if (!m->hw_handler_name) {
1387 errors = 0;
1388 break;
1389 }
1390 DMERR("Could not failover the device: Handler scsi_dh_%s "
1391 "Error %d.", m->hw_handler_name, errors);
1392
1393
1394
1395 fail_path(pgpath);
1396 break;
1397 case SCSI_DH_DEV_TEMP_BUSY:
1398
1399
1400
1401
1402 bypass_pg(m, pg, true);
1403 break;
1404 case SCSI_DH_RETRY:
1405
1406 delay_retry = 1;
1407
1408 case SCSI_DH_IMM_RETRY:
1409 case SCSI_DH_RES_TEMP_UNAVAIL:
1410 if (pg_init_limit_reached(m, pgpath))
1411 fail_path(pgpath);
1412 errors = 0;
1413 break;
1414 case SCSI_DH_DEV_OFFLINED:
1415 default:
1416
1417
1418
1419
1420
1421 fail_path(pgpath);
1422 }
1423
1424 spin_lock_irqsave(&m->lock, flags);
1425 if (errors) {
1426 if (pgpath == m->current_pgpath) {
1427 DMERR("Could not failover device. Error %d.", errors);
1428 m->current_pgpath = NULL;
1429 m->current_pg = NULL;
1430 }
1431 } else if (!test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
1432 pg->bypassed = false;
1433
1434 if (atomic_dec_return(&m->pg_init_in_progress) > 0)
1435
1436 goto out;
1437
1438 if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
1439 if (delay_retry)
1440 set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
1441 else
1442 clear_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
1443
1444 if (__pg_init_all_paths(m))
1445 goto out;
1446 }
1447 clear_bit(MPATHF_QUEUE_IO, &m->flags);
1448
1449 process_queued_io_list(m);
1450
1451
1452
1453
1454 wake_up(&m->pg_init_wait);
1455
1456out:
1457 spin_unlock_irqrestore(&m->lock, flags);
1458}
1459
1460static void activate_or_offline_path(struct pgpath *pgpath)
1461{
1462 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
1463
1464 if (pgpath->is_active && !blk_queue_dying(q))
1465 scsi_dh_activate(q, pg_init_done, pgpath);
1466 else
1467 pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
1468}
1469
1470static void activate_path_work(struct work_struct *work)
1471{
1472 struct pgpath *pgpath =
1473 container_of(work, struct pgpath, activate_path.work);
1474
1475 activate_or_offline_path(pgpath);
1476}
1477
1478static int noretry_error(blk_status_t error)
1479{
1480 switch (error) {
1481 case BLK_STS_NOTSUPP:
1482 case BLK_STS_NOSPC:
1483 case BLK_STS_TARGET:
1484 case BLK_STS_NEXUS:
1485 case BLK_STS_MEDIUM:
1486 return 1;
1487 }
1488
1489
1490 return 0;
1491}
1492
1493static int multipath_end_io(struct dm_target *ti, struct request *clone,
1494 blk_status_t error, union map_info *map_context)
1495{
1496 struct dm_mpath_io *mpio = get_mpio(map_context);
1497 struct pgpath *pgpath = mpio->pgpath;
1498 int r = DM_ENDIO_DONE;
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511 if (error && !noretry_error(error)) {
1512 struct multipath *m = ti->private;
1513
1514 r = DM_ENDIO_REQUEUE;
1515
1516 if (pgpath)
1517 fail_path(pgpath);
1518
1519 if (atomic_read(&m->nr_valid_paths) == 0 &&
1520 !must_push_back_rq(m)) {
1521 if (error == BLK_STS_IOERR)
1522 dm_report_EIO(m);
1523
1524 r = DM_ENDIO_DONE;
1525 }
1526 }
1527
1528 if (pgpath) {
1529 struct path_selector *ps = &pgpath->pg->ps;
1530
1531 if (ps->type->end_io)
1532 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
1533 }
1534
1535 return r;
1536}
1537
1538static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
1539 blk_status_t *error)
1540{
1541 struct multipath *m = ti->private;
1542 struct dm_mpath_io *mpio = get_mpio_from_bio(clone);
1543 struct pgpath *pgpath = mpio->pgpath;
1544 unsigned long flags;
1545 int r = DM_ENDIO_DONE;
1546
1547 if (!*error || noretry_error(*error))
1548 goto done;
1549
1550 if (pgpath)
1551 fail_path(pgpath);
1552
1553 if (atomic_read(&m->nr_valid_paths) == 0 &&
1554 !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
1555 if (must_push_back_bio(m)) {
1556 r = DM_ENDIO_REQUEUE;
1557 } else {
1558 dm_report_EIO(m);
1559 *error = BLK_STS_IOERR;
1560 }
1561 goto done;
1562 }
1563
1564
1565 dm_bio_restore(get_bio_details_from_bio(clone), clone);
1566
1567 spin_lock_irqsave(&m->lock, flags);
1568 bio_list_add(&m->queued_bios, clone);
1569 spin_unlock_irqrestore(&m->lock, flags);
1570 if (!test_bit(MPATHF_QUEUE_IO, &m->flags))
1571 queue_work(kmultipathd, &m->process_queued_bios);
1572
1573 r = DM_ENDIO_INCOMPLETE;
1574done:
1575 if (pgpath) {
1576 struct path_selector *ps = &pgpath->pg->ps;
1577
1578 if (ps->type->end_io)
1579 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
1580 }
1581
1582 return r;
1583}
1584
1585
1586
1587
1588
1589
1590
1591static void multipath_presuspend(struct dm_target *ti)
1592{
1593 struct multipath *m = ti->private;
1594
1595 queue_if_no_path(m, false, true);
1596}
1597
1598static void multipath_postsuspend(struct dm_target *ti)
1599{
1600 struct multipath *m = ti->private;
1601
1602 mutex_lock(&m->work_mutex);
1603 flush_multipath_work(m);
1604 mutex_unlock(&m->work_mutex);
1605}
1606
1607
1608
1609
1610static void multipath_resume(struct dm_target *ti)
1611{
1612 struct multipath *m = ti->private;
1613 unsigned long flags;
1614
1615 spin_lock_irqsave(&m->lock, flags);
1616 assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags,
1617 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags));
1618 spin_unlock_irqrestore(&m->lock, flags);
1619}
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637static void multipath_status(struct dm_target *ti, status_type_t type,
1638 unsigned status_flags, char *result, unsigned maxlen)
1639{
1640 int sz = 0;
1641 unsigned long flags;
1642 struct multipath *m = ti->private;
1643 struct priority_group *pg;
1644 struct pgpath *p;
1645 unsigned pg_num;
1646 char state;
1647
1648 spin_lock_irqsave(&m->lock, flags);
1649
1650
1651 if (type == STATUSTYPE_INFO)
1652 DMEMIT("2 %u %u ", test_bit(MPATHF_QUEUE_IO, &m->flags),
1653 atomic_read(&m->pg_init_count));
1654 else {
1655 DMEMIT("%u ", test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) +
1656 (m->pg_init_retries > 0) * 2 +
1657 (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 +
1658 test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags) +
1659 (m->queue_mode != DM_TYPE_REQUEST_BASED) * 2);
1660
1661 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
1662 DMEMIT("queue_if_no_path ");
1663 if (m->pg_init_retries)
1664 DMEMIT("pg_init_retries %u ", m->pg_init_retries);
1665 if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
1666 DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
1667 if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags))
1668 DMEMIT("retain_attached_hw_handler ");
1669 if (m->queue_mode != DM_TYPE_REQUEST_BASED) {
1670 switch(m->queue_mode) {
1671 case DM_TYPE_BIO_BASED:
1672 DMEMIT("queue_mode bio ");
1673 break;
1674 case DM_TYPE_MQ_REQUEST_BASED:
1675 DMEMIT("queue_mode mq ");
1676 break;
1677 default:
1678 WARN_ON_ONCE(true);
1679 break;
1680 }
1681 }
1682 }
1683
1684 if (!m->hw_handler_name || type == STATUSTYPE_INFO)
1685 DMEMIT("0 ");
1686 else
1687 DMEMIT("1 %s ", m->hw_handler_name);
1688
1689 DMEMIT("%u ", m->nr_priority_groups);
1690
1691 if (m->next_pg)
1692 pg_num = m->next_pg->pg_num;
1693 else if (m->current_pg)
1694 pg_num = m->current_pg->pg_num;
1695 else
1696 pg_num = (m->nr_priority_groups ? 1 : 0);
1697
1698 DMEMIT("%u ", pg_num);
1699
1700 switch (type) {
1701 case STATUSTYPE_INFO:
1702 list_for_each_entry(pg, &m->priority_groups, list) {
1703 if (pg->bypassed)
1704 state = 'D';
1705 else if (pg == m->current_pg)
1706 state = 'A';
1707 else
1708 state = 'E';
1709
1710 DMEMIT("%c ", state);
1711
1712 if (pg->ps.type->status)
1713 sz += pg->ps.type->status(&pg->ps, NULL, type,
1714 result + sz,
1715 maxlen - sz);
1716 else
1717 DMEMIT("0 ");
1718
1719 DMEMIT("%u %u ", pg->nr_pgpaths,
1720 pg->ps.type->info_args);
1721
1722 list_for_each_entry(p, &pg->pgpaths, list) {
1723 DMEMIT("%s %s %u ", p->path.dev->name,
1724 p->is_active ? "A" : "F",
1725 p->fail_count);
1726 if (pg->ps.type->status)
1727 sz += pg->ps.type->status(&pg->ps,
1728 &p->path, type, result + sz,
1729 maxlen - sz);
1730 }
1731 }
1732 break;
1733
1734 case STATUSTYPE_TABLE:
1735 list_for_each_entry(pg, &m->priority_groups, list) {
1736 DMEMIT("%s ", pg->ps.type->name);
1737
1738 if (pg->ps.type->status)
1739 sz += pg->ps.type->status(&pg->ps, NULL, type,
1740 result + sz,
1741 maxlen - sz);
1742 else
1743 DMEMIT("0 ");
1744
1745 DMEMIT("%u %u ", pg->nr_pgpaths,
1746 pg->ps.type->table_args);
1747
1748 list_for_each_entry(p, &pg->pgpaths, list) {
1749 DMEMIT("%s ", p->path.dev->name);
1750 if (pg->ps.type->status)
1751 sz += pg->ps.type->status(&pg->ps,
1752 &p->path, type, result + sz,
1753 maxlen - sz);
1754 }
1755 }
1756 break;
1757 }
1758
1759 spin_unlock_irqrestore(&m->lock, flags);
1760}
1761
1762static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
1763{
1764 int r = -EINVAL;
1765 struct dm_dev *dev;
1766 struct multipath *m = ti->private;
1767 action_fn action;
1768
1769 mutex_lock(&m->work_mutex);
1770
1771 if (dm_suspended(ti)) {
1772 r = -EBUSY;
1773 goto out;
1774 }
1775
1776 if (argc == 1) {
1777 if (!strcasecmp(argv[0], "queue_if_no_path")) {
1778 r = queue_if_no_path(m, true, false);
1779 goto out;
1780 } else if (!strcasecmp(argv[0], "fail_if_no_path")) {
1781 r = queue_if_no_path(m, false, false);
1782 goto out;
1783 }
1784 }
1785
1786 if (argc != 2) {
1787 DMWARN("Invalid multipath message arguments. Expected 2 arguments, got %d.", argc);
1788 goto out;
1789 }
1790
1791 if (!strcasecmp(argv[0], "disable_group")) {
1792 r = bypass_pg_num(m, argv[1], true);
1793 goto out;
1794 } else if (!strcasecmp(argv[0], "enable_group")) {
1795 r = bypass_pg_num(m, argv[1], false);
1796 goto out;
1797 } else if (!strcasecmp(argv[0], "switch_group")) {
1798 r = switch_pg_num(m, argv[1]);
1799 goto out;
1800 } else if (!strcasecmp(argv[0], "reinstate_path"))
1801 action = reinstate_path;
1802 else if (!strcasecmp(argv[0], "fail_path"))
1803 action = fail_path;
1804 else {
1805 DMWARN("Unrecognised multipath message received: %s", argv[0]);
1806 goto out;
1807 }
1808
1809 r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev);
1810 if (r) {
1811 DMWARN("message: error getting device %s",
1812 argv[1]);
1813 goto out;
1814 }
1815
1816 r = action_dev(m, dev, action);
1817
1818 dm_put_device(ti, dev);
1819
1820out:
1821 mutex_unlock(&m->work_mutex);
1822 return r;
1823}
1824
1825static int multipath_prepare_ioctl(struct dm_target *ti,
1826 struct block_device **bdev, fmode_t *mode)
1827{
1828 struct multipath *m = ti->private;
1829 struct pgpath *current_pgpath;
1830 int r;
1831
1832 current_pgpath = READ_ONCE(m->current_pgpath);
1833 if (!current_pgpath)
1834 current_pgpath = choose_pgpath(m, 0);
1835
1836 if (current_pgpath) {
1837 if (!test_bit(MPATHF_QUEUE_IO, &m->flags)) {
1838 *bdev = current_pgpath->path.dev->bdev;
1839 *mode = current_pgpath->path.dev->mode;
1840 r = 0;
1841 } else {
1842
1843 r = -ENOTCONN;
1844 }
1845 } else {
1846
1847 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
1848 r = -ENOTCONN;
1849 else
1850 r = -EIO;
1851 }
1852
1853 if (r == -ENOTCONN) {
1854 if (!READ_ONCE(m->current_pg)) {
1855
1856 (void) choose_pgpath(m, 0);
1857 }
1858 if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
1859 pg_init_all_paths(m);
1860 dm_table_run_md_queue_async(m->ti->table);
1861 process_queued_io_list(m);
1862 }
1863
1864
1865
1866
1867 if (!r && ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
1868 return 1;
1869 return r;
1870}
1871
1872static int multipath_iterate_devices(struct dm_target *ti,
1873 iterate_devices_callout_fn fn, void *data)
1874{
1875 struct multipath *m = ti->private;
1876 struct priority_group *pg;
1877 struct pgpath *p;
1878 int ret = 0;
1879
1880 list_for_each_entry(pg, &m->priority_groups, list) {
1881 list_for_each_entry(p, &pg->pgpaths, list) {
1882 ret = fn(ti, p->path.dev, ti->begin, ti->len, data);
1883 if (ret)
1884 goto out;
1885 }
1886 }
1887
1888out:
1889 return ret;
1890}
1891
1892static int pgpath_busy(struct pgpath *pgpath)
1893{
1894 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
1895
1896 return blk_lld_busy(q);
1897}
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907static int multipath_busy(struct dm_target *ti)
1908{
1909 bool busy = false, has_active = false;
1910 struct multipath *m = ti->private;
1911 struct priority_group *pg, *next_pg;
1912 struct pgpath *pgpath;
1913
1914
1915 if (atomic_read(&m->pg_init_in_progress))
1916 return true;
1917
1918
1919 if (!atomic_read(&m->nr_valid_paths) && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
1920 return (m->queue_mode != DM_TYPE_MQ_REQUEST_BASED);
1921
1922
1923 pg = READ_ONCE(m->current_pg);
1924 next_pg = READ_ONCE(m->next_pg);
1925 if (unlikely(!READ_ONCE(m->current_pgpath) && next_pg))
1926 pg = next_pg;
1927
1928 if (!pg) {
1929
1930
1931
1932
1933
1934
1935
1936 return busy;
1937 }
1938
1939
1940
1941
1942
1943 busy = true;
1944 list_for_each_entry(pgpath, &pg->pgpaths, list) {
1945 if (pgpath->is_active) {
1946 has_active = true;
1947 if (!pgpath_busy(pgpath)) {
1948 busy = false;
1949 break;
1950 }
1951 }
1952 }
1953
1954 if (!has_active) {
1955
1956
1957
1958
1959
1960 busy = false;
1961 }
1962
1963 return busy;
1964}
1965
1966
1967
1968
1969static struct target_type multipath_target = {
1970 .name = "multipath",
1971 .version = {1, 12, 0},
1972 .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE,
1973 .module = THIS_MODULE,
1974 .ctr = multipath_ctr,
1975 .dtr = multipath_dtr,
1976 .clone_and_map_rq = multipath_clone_and_map,
1977 .release_clone_rq = multipath_release_clone,
1978 .rq_end_io = multipath_end_io,
1979 .map = multipath_map_bio,
1980 .end_io = multipath_end_io_bio,
1981 .presuspend = multipath_presuspend,
1982 .postsuspend = multipath_postsuspend,
1983 .resume = multipath_resume,
1984 .status = multipath_status,
1985 .message = multipath_message,
1986 .prepare_ioctl = multipath_prepare_ioctl,
1987 .iterate_devices = multipath_iterate_devices,
1988 .busy = multipath_busy,
1989};
1990
1991static int __init dm_multipath_init(void)
1992{
1993 int r;
1994
1995 kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
1996 if (!kmultipathd) {
1997 DMERR("failed to create workqueue kmpathd");
1998 r = -ENOMEM;
1999 goto bad_alloc_kmultipathd;
2000 }
2001
2002
2003
2004
2005
2006
2007
2008 kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd",
2009 WQ_MEM_RECLAIM);
2010 if (!kmpath_handlerd) {
2011 DMERR("failed to create workqueue kmpath_handlerd");
2012 r = -ENOMEM;
2013 goto bad_alloc_kmpath_handlerd;
2014 }
2015
2016 r = dm_register_target(&multipath_target);
2017 if (r < 0) {
2018 DMERR("request-based register failed %d", r);
2019 r = -EINVAL;
2020 goto bad_register_target;
2021 }
2022
2023 return 0;
2024
2025bad_register_target:
2026 destroy_workqueue(kmpath_handlerd);
2027bad_alloc_kmpath_handlerd:
2028 destroy_workqueue(kmultipathd);
2029bad_alloc_kmultipathd:
2030 return r;
2031}
2032
2033static void __exit dm_multipath_exit(void)
2034{
2035 destroy_workqueue(kmpath_handlerd);
2036 destroy_workqueue(kmultipathd);
2037
2038 dm_unregister_target(&multipath_target);
2039}
2040
2041module_init(dm_multipath_init);
2042module_exit(dm_multipath_exit);
2043
2044MODULE_DESCRIPTION(DM_NAME " multipath target");
2045MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
2046MODULE_LICENSE("GPL");
2047