1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/module.h>
29#include <linux/moduleparam.h>
30#include <linux/init.h>
31#include <linux/blkdev.h>
32#include <linux/delay.h>
33#include <linux/kthread.h>
34#include <linux/spinlock.h>
35#include <linux/async.h>
36#include <linux/slab.h>
37#include <asm/unaligned.h>
38
39#include <scsi/scsi.h>
40#include <scsi/scsi_cmnd.h>
41#include <scsi/scsi_device.h>
42#include <scsi/scsi_driver.h>
43#include <scsi/scsi_devinfo.h>
44#include <scsi/scsi_host.h>
45#include <scsi/scsi_transport.h>
46#include <scsi/scsi_dh.h>
47#include <scsi/scsi_eh.h>
48
49#include "scsi_priv.h"
50#include "scsi_logging.h"
51
52#define ALLOC_FAILURE_MSG KERN_ERR "%s: Allocation failure during" \
53 " SCSI scanning, some SCSI devices might not be configured\n"
54
55
56
57
58#define SCSI_TIMEOUT (2*HZ)
59#define SCSI_REPORT_LUNS_TIMEOUT (30*HZ)
60
61
62
63
64#define SCSI_UID_SER_NUM 'S'
65#define SCSI_UID_UNKNOWN 'Z'
66
67
68
69
70
71
72
73
74
75
76
77
78
79#define SCSI_SCAN_NO_RESPONSE 0
80#define SCSI_SCAN_TARGET_PRESENT 1
81#define SCSI_SCAN_LUN_PRESENT 2
82
83static const char *scsi_null_device_strs = "nullnullnullnull";
84
85#define MAX_SCSI_LUNS 512
86
87#ifdef CONFIG_SCSI_MULTI_LUN
88static unsigned int max_scsi_luns = MAX_SCSI_LUNS;
89#else
90static unsigned int max_scsi_luns = 1;
91#endif
92
93module_param_named(max_luns, max_scsi_luns, uint, S_IRUGO|S_IWUSR);
94MODULE_PARM_DESC(max_luns,
95 "last scsi LUN (should be between 1 and 2^32-1)");
96
97#ifdef CONFIG_SCSI_SCAN_ASYNC
98#define SCSI_SCAN_TYPE_DEFAULT "async"
99#else
100#define SCSI_SCAN_TYPE_DEFAULT "sync"
101#endif
102
103char scsi_scan_type[7] = SCSI_SCAN_TYPE_DEFAULT;
104
105#define MAX_INIT_REPORT_LUNS 511
106
107module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type),
108 S_IRUGO|S_IWUSR);
109MODULE_PARM_DESC(scan, "sync, async, manual, or none. "
110 "Setting to 'manual' disables automatic scanning, but allows "
111 "for manual device scan via the 'scan' sysfs attribute.");
112
113
114
115
116
117
118
119
120static unsigned int max_scsi_report_luns = 16383;
121
122module_param_named(max_report_luns, max_scsi_report_luns, uint, S_IRUGO|S_IWUSR);
123MODULE_PARM_DESC(max_report_luns,
124 "REPORT LUNS maximum number of LUNS received (should be"
125 " between 1 and 16383)");
126
127static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ + 18;
128
129module_param_named(inq_timeout, scsi_inq_timeout, uint, S_IRUGO|S_IWUSR);
130MODULE_PARM_DESC(inq_timeout,
131 "Timeout (in seconds) waiting for devices to answer INQUIRY."
132 " Default is 20. Some devices may need more; most need less.");
133
134
135static DEFINE_SPINLOCK(async_scan_lock);
136static LIST_HEAD(scanning_hosts);
137
138struct async_scan_data {
139 struct list_head list;
140 struct Scsi_Host *shost;
141 struct completion prev_finished;
142};
143
144
145
146
147
148
149
150
151
152int scsi_complete_async_scans(void)
153{
154 struct async_scan_data *data;
155
156 do {
157 if (list_empty(&scanning_hosts))
158 return 0;
159
160
161
162
163 data = kmalloc(sizeof(*data), GFP_KERNEL);
164 if (!data)
165 msleep(1);
166 } while (!data);
167
168 data->shost = NULL;
169 init_completion(&data->prev_finished);
170
171 spin_lock(&async_scan_lock);
172
173 if (list_empty(&scanning_hosts))
174 goto done;
175 list_add_tail(&data->list, &scanning_hosts);
176 spin_unlock(&async_scan_lock);
177
178 printk(KERN_INFO "scsi: waiting for bus probes to complete ...\n");
179 wait_for_completion(&data->prev_finished);
180
181 spin_lock(&async_scan_lock);
182 list_del(&data->list);
183 if (!list_empty(&scanning_hosts)) {
184 struct async_scan_data *next = list_entry(scanning_hosts.next,
185 struct async_scan_data, list);
186 complete(&next->prev_finished);
187 }
188 done:
189 spin_unlock(&async_scan_lock);
190
191 kfree(data);
192 return 0;
193}
194
195
196
197
198
199
200
201
202
203
204static void scsi_unlock_floptical(struct scsi_device *sdev,
205 unsigned char *result)
206{
207 unsigned char scsi_cmd[MAX_COMMAND_SIZE];
208
209 sdev_printk(KERN_NOTICE, sdev, "unlocking floptical drive\n");
210 scsi_cmd[0] = MODE_SENSE;
211 scsi_cmd[1] = 0;
212 scsi_cmd[2] = 0x2e;
213 scsi_cmd[3] = 0;
214 scsi_cmd[4] = 0x2a;
215 scsi_cmd[5] = 0;
216 scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE, result, 0x2a, NULL,
217 SCSI_TIMEOUT, 3, NULL);
218}
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
235 unsigned int lun, void *hostdata)
236{
237 struct scsi_device *sdev;
238 int display_failure_msg = 1, ret;
239 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
240 extern void scsi_evt_thread(struct work_struct *work);
241 extern void scsi_requeue_run_queue(struct work_struct *work);
242
243 sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size,
244 GFP_ATOMIC);
245 if (!sdev)
246 goto out;
247
248 sdev->vendor = scsi_null_device_strs;
249 sdev->model = scsi_null_device_strs;
250 sdev->rev = scsi_null_device_strs;
251 sdev->host = shost;
252 sdev->queue_ramp_up_period = SCSI_DEFAULT_RAMP_UP_PERIOD;
253 sdev->id = starget->id;
254 sdev->lun = lun;
255 sdev->channel = starget->channel;
256 sdev->sdev_state = SDEV_CREATED;
257 INIT_LIST_HEAD(&sdev->siblings);
258 INIT_LIST_HEAD(&sdev->same_target_siblings);
259 INIT_LIST_HEAD(&sdev->cmd_list);
260 INIT_LIST_HEAD(&sdev->starved_entry);
261 INIT_LIST_HEAD(&sdev->event_list);
262 spin_lock_init(&sdev->list_lock);
263 spin_lock_init(&sdev->inquiry_lock);
264 INIT_WORK(&sdev->event_work, scsi_evt_thread);
265 INIT_WORK(&sdev->requeue_work, scsi_requeue_run_queue);
266
267 sdev->sdev_gendev.parent = get_device(&starget->dev);
268 sdev->sdev_target = starget;
269
270
271 sdev->hostdata = hostdata;
272
273
274
275 sdev->max_device_blocked = SCSI_DEFAULT_DEVICE_BLOCKED;
276
277
278
279
280 sdev->type = -1;
281
282
283
284
285
286
287 sdev->borken = 1;
288
289 if (shost_use_blk_mq(shost))
290 sdev->request_queue = scsi_mq_alloc_queue(sdev);
291 else
292 sdev->request_queue = scsi_alloc_queue(sdev);
293 if (!sdev->request_queue) {
294
295
296 put_device(&starget->dev);
297 kfree(sdev);
298 goto out;
299 }
300 WARN_ON_ONCE(!blk_get_queue(sdev->request_queue));
301 sdev->request_queue->queuedata = sdev;
302 if (!shost_use_blk_mq(sdev->host) && shost->hostt->use_host_wide_tags) {
303 blk_queue_init_tags(sdev->request_queue,
304 sdev->host->cmd_per_lun, shost->bqt,
305 shost->hostt->tag_alloc_policy);
306 }
307 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun ?
308 sdev->host->cmd_per_lun : 1);
309
310 scsi_sysfs_device_initialize(sdev);
311
312 if (shost->hostt->slave_alloc) {
313 ret = shost->hostt->slave_alloc(sdev);
314 if (ret) {
315
316
317
318
319 if (ret == -ENXIO)
320 display_failure_msg = 0;
321 goto out_device_destroy;
322 }
323 }
324
325 return sdev;
326
327out_device_destroy:
328 __scsi_remove_device(sdev);
329out:
330 if (display_failure_msg)
331 printk(ALLOC_FAILURE_MSG, __func__);
332 return NULL;
333}
334
335static void scsi_target_destroy(struct scsi_target *starget)
336{
337 struct device *dev = &starget->dev;
338 struct Scsi_Host *shost = dev_to_shost(dev->parent);
339 unsigned long flags;
340
341 BUG_ON(starget->state == STARGET_DEL);
342 starget->state = STARGET_DEL;
343 transport_destroy_device(dev);
344 spin_lock_irqsave(shost->host_lock, flags);
345 if (shost->hostt->target_destroy)
346 shost->hostt->target_destroy(starget);
347 list_del_init(&starget->siblings);
348 spin_unlock_irqrestore(shost->host_lock, flags);
349 put_device(dev);
350}
351
352static void scsi_target_dev_release(struct device *dev)
353{
354 struct device *parent = dev->parent;
355 struct scsi_target *starget = to_scsi_target(dev);
356
357 kfree(starget);
358 put_device(parent);
359}
360
361static struct device_type scsi_target_type = {
362 .name = "scsi_target",
363 .release = scsi_target_dev_release,
364};
365
366int scsi_is_target_device(const struct device *dev)
367{
368 return dev->type == &scsi_target_type;
369}
370EXPORT_SYMBOL(scsi_is_target_device);
371
372static struct scsi_target *__scsi_find_target(struct device *parent,
373 int channel, uint id)
374{
375 struct scsi_target *starget, *found_starget = NULL;
376 struct Scsi_Host *shost = dev_to_shost(parent);
377
378
379
380 list_for_each_entry(starget, &shost->__targets, siblings) {
381 if (starget->id == id &&
382 starget->channel == channel) {
383 found_starget = starget;
384 break;
385 }
386 }
387 if (found_starget)
388 get_device(&found_starget->dev);
389
390 return found_starget;
391}
392
393
394
395
396
397
398
399
400
401
402static void scsi_target_reap_ref_release(struct kref *kref)
403{
404 struct scsi_target *starget
405 = container_of(kref, struct scsi_target, reap_ref);
406
407
408
409
410
411
412 if ((starget->state != STARGET_CREATED) &&
413 (starget->state != STARGET_CREATED_REMOVE)) {
414 transport_remove_device(&starget->dev);
415 device_del(&starget->dev);
416 }
417 scsi_target_destroy(starget);
418}
419
420static void scsi_target_reap_ref_put(struct scsi_target *starget)
421{
422 kref_put(&starget->reap_ref, scsi_target_reap_ref_release);
423}
424
425
426
427
428
429
430
431
432
433
434
435
436
437static struct scsi_target *scsi_alloc_target(struct device *parent,
438 int channel, uint id)
439{
440 struct Scsi_Host *shost = dev_to_shost(parent);
441 struct device *dev = NULL;
442 unsigned long flags;
443 const int size = sizeof(struct scsi_target)
444 + shost->transportt->target_size;
445 struct scsi_target *starget;
446 struct scsi_target *found_target;
447 int error, ref_got;
448
449 starget = kzalloc(size, GFP_KERNEL);
450 if (!starget) {
451 printk(KERN_ERR "%s: allocation failure\n", __func__);
452 return NULL;
453 }
454 dev = &starget->dev;
455 device_initialize(dev);
456 kref_init(&starget->reap_ref);
457 dev->parent = get_device(parent);
458 dev_set_name(dev, "target%d:%d:%d", shost->host_no, channel, id);
459 dev->bus = &scsi_bus_type;
460 dev->type = &scsi_target_type;
461 starget->id = id;
462 starget->channel = channel;
463 starget->can_queue = 0;
464 INIT_LIST_HEAD(&starget->siblings);
465 INIT_LIST_HEAD(&starget->devices);
466 starget->state = STARGET_CREATED;
467 starget->scsi_level = SCSI_2;
468 starget->max_target_blocked = SCSI_DEFAULT_TARGET_BLOCKED;
469 retry:
470 spin_lock_irqsave(shost->host_lock, flags);
471
472 found_target = __scsi_find_target(parent, channel, id);
473 if (found_target)
474 goto found;
475
476 list_add_tail(&starget->siblings, &shost->__targets);
477 spin_unlock_irqrestore(shost->host_lock, flags);
478
479 transport_setup_device(dev);
480 if (shost->hostt->target_alloc) {
481 error = shost->hostt->target_alloc(starget);
482
483 if(error) {
484 dev_printk(KERN_ERR, dev, "target allocation failed, error %d\n", error);
485
486
487 scsi_target_destroy(starget);
488 return NULL;
489 }
490 }
491 get_device(dev);
492
493 return starget;
494
495 found:
496
497
498
499
500
501 ref_got = kref_get_unless_zero(&found_target->reap_ref);
502
503 spin_unlock_irqrestore(shost->host_lock, flags);
504 if (ref_got) {
505 put_device(dev);
506 return found_target;
507 }
508
509
510
511
512
513
514
515
516
517 put_device(&found_target->dev);
518
519
520
521
522 msleep(1);
523 goto retry;
524}
525
526
527
528
529
530
531
532
533
534void scsi_target_reap(struct scsi_target *starget)
535{
536
537
538
539
540
541 BUG_ON(starget->state == STARGET_DEL);
542 scsi_target_reap_ref_put(starget);
543}
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560void scsi_sanitize_inquiry_string(unsigned char *s, int len)
561{
562 int terminated = 0;
563
564 for (; len > 0; (--len, ++s)) {
565 if (*s == 0)
566 terminated = 1;
567 if (terminated || *s < 0x20 || *s > 0x7e)
568 *s = ' ';
569 }
570}
571EXPORT_SYMBOL(scsi_sanitize_inquiry_string);
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
588 int result_len, int *bflags)
589{
590 unsigned char scsi_cmd[MAX_COMMAND_SIZE];
591 int first_inquiry_len, try_inquiry_len, next_inquiry_len;
592 int response_len = 0;
593 int pass, count, result;
594 struct scsi_sense_hdr sshdr;
595
596 *bflags = 0;
597
598
599
600
601 first_inquiry_len = sdev->inquiry_len ? sdev->inquiry_len : 36;
602 try_inquiry_len = first_inquiry_len;
603 pass = 1;
604
605 next_pass:
606 SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
607 "scsi scan: INQUIRY pass %d length %d\n",
608 pass, try_inquiry_len));
609
610
611 for (count = 0; count < 3; ++count) {
612 int resid;
613
614 memset(scsi_cmd, 0, 6);
615 scsi_cmd[0] = INQUIRY;
616 scsi_cmd[4] = (unsigned char) try_inquiry_len;
617
618 memset(inq_result, 0, try_inquiry_len);
619
620 result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE,
621 inq_result, try_inquiry_len, &sshdr,
622 HZ / 2 + HZ * scsi_inq_timeout, 3,
623 &resid);
624
625 SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
626 "scsi scan: INQUIRY %s with code 0x%x\n",
627 result ? "failed" : "successful", result));
628
629 if (result) {
630
631
632
633
634
635
636 if ((driver_byte(result) & DRIVER_SENSE) &&
637 scsi_sense_valid(&sshdr)) {
638 if ((sshdr.sense_key == UNIT_ATTENTION) &&
639 ((sshdr.asc == 0x28) ||
640 (sshdr.asc == 0x29)) &&
641 (sshdr.ascq == 0))
642 continue;
643 }
644 } else {
645
646
647
648
649
650 if (resid == try_inquiry_len)
651 continue;
652 }
653 break;
654 }
655
656 if (result == 0) {
657 scsi_sanitize_inquiry_string(&inq_result[8], 8);
658 scsi_sanitize_inquiry_string(&inq_result[16], 16);
659 scsi_sanitize_inquiry_string(&inq_result[32], 4);
660
661 response_len = inq_result[4] + 5;
662 if (response_len > 255)
663 response_len = first_inquiry_len;
664
665
666
667
668
669
670
671
672 *bflags = scsi_get_device_flags(sdev, &inq_result[8],
673 &inq_result[16]);
674
675
676
677 if (pass == 1) {
678 if (BLIST_INQUIRY_36 & *bflags)
679 next_inquiry_len = 36;
680 else if (BLIST_INQUIRY_58 & *bflags)
681 next_inquiry_len = 58;
682 else if (sdev->inquiry_len)
683 next_inquiry_len = sdev->inquiry_len;
684 else
685 next_inquiry_len = response_len;
686
687
688 if (next_inquiry_len > try_inquiry_len) {
689 try_inquiry_len = next_inquiry_len;
690 pass = 2;
691 goto next_pass;
692 }
693 }
694
695 } else if (pass == 2) {
696 sdev_printk(KERN_INFO, sdev,
697 "scsi scan: %d byte inquiry failed. "
698 "Consider BLIST_INQUIRY_36 for this device\n",
699 try_inquiry_len);
700
701
702
703 try_inquiry_len = first_inquiry_len;
704 pass = 3;
705 goto next_pass;
706 }
707
708
709
710 if (result)
711 return -EIO;
712
713
714 sdev->inquiry_len = min(try_inquiry_len, response_len);
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731 if (sdev->inquiry_len < 36) {
732 if (!sdev->host->short_inquiry) {
733 shost_printk(KERN_INFO, sdev->host,
734 "scsi scan: INQUIRY result too short (%d),"
735 " using 36\n", sdev->inquiry_len);
736 sdev->host->short_inquiry = 1;
737 }
738 sdev->inquiry_len = 36;
739 }
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759 sdev->scsi_level = inq_result[2] & 0x07;
760 if (sdev->scsi_level >= 2 ||
761 (sdev->scsi_level == 1 && (inq_result[3] & 0x0f) == 1))
762 sdev->scsi_level++;
763 sdev->sdev_target->scsi_level = sdev->scsi_level;
764
765
766
767
768
769 sdev->lun_in_cdb = 0;
770 if (sdev->scsi_level <= SCSI_2 &&
771 sdev->scsi_level != SCSI_UNKNOWN &&
772 !sdev->host->no_scsi2_lun_in_cdb)
773 sdev->lun_in_cdb = 1;
774
775 return 0;
776}
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
794 int *bflags, int async)
795{
796 int ret;
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818 sdev->inquiry = kmemdup(inq_result,
819 max_t(size_t, sdev->inquiry_len, 36),
820 GFP_ATOMIC);
821 if (sdev->inquiry == NULL)
822 return SCSI_SCAN_NO_RESPONSE;
823
824 sdev->vendor = (char *) (sdev->inquiry + 8);
825 sdev->model = (char *) (sdev->inquiry + 16);
826 sdev->rev = (char *) (sdev->inquiry + 32);
827
828 if (strncmp(sdev->vendor, "ATA ", 8) == 0) {
829
830
831
832
833
834
835 sdev->allow_restart = 1;
836 }
837
838 if (*bflags & BLIST_ISROM) {
839 sdev->type = TYPE_ROM;
840 sdev->removable = 1;
841 } else {
842 sdev->type = (inq_result[0] & 0x1f);
843 sdev->removable = (inq_result[1] & 0x80) >> 7;
844
845
846
847
848
849
850 if (scsi_is_wlun(sdev->lun) && sdev->type != TYPE_WLUN) {
851 sdev_printk(KERN_WARNING, sdev,
852 "%s: correcting incorrect peripheral device type 0x%x for W-LUN 0x%16xhN\n",
853 __func__, sdev->type, (unsigned int)sdev->lun);
854 sdev->type = TYPE_WLUN;
855 }
856
857 }
858
859 switch (sdev->type) {
860 case TYPE_RBC:
861 case TYPE_TAPE:
862 case TYPE_DISK:
863 case TYPE_PRINTER:
864 case TYPE_MOD:
865 case TYPE_PROCESSOR:
866 case TYPE_SCANNER:
867 case TYPE_MEDIUM_CHANGER:
868 case TYPE_ENCLOSURE:
869 case TYPE_COMM:
870 case TYPE_RAID:
871 case TYPE_OSD:
872 sdev->writeable = 1;
873 break;
874 case TYPE_ROM:
875 case TYPE_WORM:
876 sdev->writeable = 0;
877 break;
878 default:
879 sdev_printk(KERN_INFO, sdev, "unknown device type %d\n",
880 sdev->type);
881 }
882
883 if (sdev->type == TYPE_RBC || sdev->type == TYPE_ROM) {
884
885
886
887
888 if ((*bflags & BLIST_REPORTLUN2) == 0)
889 *bflags |= BLIST_NOREPORTLUN;
890 }
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908 sdev->inq_periph_qual = (inq_result[0] >> 5) & 7;
909 sdev->lockable = sdev->removable;
910 sdev->soft_reset = (inq_result[7] & 1) && ((inq_result[3] & 7) == 2);
911
912 if (sdev->scsi_level >= SCSI_3 ||
913 (sdev->inquiry_len > 56 && inq_result[56] & 0x04))
914 sdev->ppr = 1;
915 if (inq_result[7] & 0x60)
916 sdev->wdtr = 1;
917 if (inq_result[7] & 0x10)
918 sdev->sdtr = 1;
919
920 sdev_printk(KERN_NOTICE, sdev, "%s %.8s %.16s %.4s PQ: %d "
921 "ANSI: %d%s\n", scsi_device_type(sdev->type),
922 sdev->vendor, sdev->model, sdev->rev,
923 sdev->inq_periph_qual, inq_result[2] & 0x07,
924 (inq_result[3] & 0x0f) == 1 ? " CCS" : "");
925
926 if ((sdev->scsi_level >= SCSI_2) && (inq_result[7] & 2) &&
927 !(*bflags & BLIST_NOTQ))
928 sdev->tagged_supported = 1;
929
930
931
932
933
934
935 if ((*bflags & BLIST_BORKEN) == 0)
936 sdev->borken = 0;
937
938 if (*bflags & BLIST_NO_ULD_ATTACH)
939 sdev->no_uld_attach = 1;
940
941
942
943
944
945 if (*bflags & BLIST_SELECT_NO_ATN)
946 sdev->select_no_atn = 1;
947
948
949
950
951
952 if (*bflags & BLIST_MAX_512)
953 blk_queue_max_hw_sectors(sdev->request_queue, 512);
954
955
956
957
958 else if (*bflags & BLIST_MAX_1024)
959 blk_queue_max_hw_sectors(sdev->request_queue, 1024);
960
961
962
963
964
965 if (*bflags & BLIST_NOSTARTONADD)
966 sdev->no_start_on_add = 1;
967
968 if (*bflags & BLIST_SINGLELUN)
969 scsi_target(sdev)->single_lun = 1;
970
971 sdev->use_10_for_rw = 1;
972
973 if (*bflags & BLIST_MS_SKIP_PAGE_08)
974 sdev->skip_ms_page_8 = 1;
975
976 if (*bflags & BLIST_MS_SKIP_PAGE_3F)
977 sdev->skip_ms_page_3f = 1;
978
979 if (*bflags & BLIST_USE_10_BYTE_MS)
980 sdev->use_10_for_ms = 1;
981
982
983
984
985 if (*bflags & BLIST_NO_RSOC)
986 sdev->no_report_opcodes = 1;
987
988
989
990 ret = scsi_device_set_state(sdev, SDEV_RUNNING);
991 if (ret) {
992 ret = scsi_device_set_state(sdev, SDEV_BLOCK);
993
994 if (ret) {
995 sdev_printk(KERN_ERR, sdev,
996 "in wrong state %s to complete scan\n",
997 scsi_device_state_name(sdev->sdev_state));
998 return SCSI_SCAN_NO_RESPONSE;
999 }
1000 }
1001
1002 if (*bflags & BLIST_MS_192_BYTES_FOR_3F)
1003 sdev->use_192_bytes_for_3f = 1;
1004
1005 if (*bflags & BLIST_NOT_LOCKABLE)
1006 sdev->lockable = 0;
1007
1008 if (*bflags & BLIST_RETRY_HWERROR)
1009 sdev->retry_hwerror = 1;
1010
1011 if (*bflags & BLIST_NO_DIF)
1012 sdev->no_dif = 1;
1013
1014 if (*bflags & BLIST_UNMAP_LIMIT_WS)
1015 sdev->unmap_limit_for_ws = 1;
1016
1017 sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT;
1018
1019 if (*bflags & BLIST_TRY_VPD_PAGES)
1020 sdev->try_vpd_pages = 1;
1021 else if (*bflags & BLIST_SKIP_VPD_PAGES)
1022 sdev->skip_vpd_pages = 1;
1023
1024 transport_configure_device(&sdev->sdev_gendev);
1025
1026 if (sdev->host->hostt->slave_configure) {
1027 ret = sdev->host->hostt->slave_configure(sdev);
1028 if (ret) {
1029
1030
1031
1032
1033 if (ret != -ENXIO) {
1034 sdev_printk(KERN_ERR, sdev,
1035 "failed to configure device\n");
1036 }
1037 return SCSI_SCAN_NO_RESPONSE;
1038 }
1039 }
1040
1041 if (sdev->scsi_level >= SCSI_3)
1042 scsi_attach_vpd(sdev);
1043
1044 sdev->max_queue_depth = sdev->queue_depth;
1045
1046
1047
1048
1049
1050
1051 if (!async && scsi_sysfs_add_sdev(sdev) != 0)
1052 return SCSI_SCAN_NO_RESPONSE;
1053
1054 return SCSI_SCAN_LUN_PRESENT;
1055}
1056
1057#ifdef CONFIG_SCSI_LOGGING
1058
1059
1060
1061
1062
1063
1064
1065static unsigned char *scsi_inq_str(unsigned char *buf, unsigned char *inq,
1066 unsigned first, unsigned end)
1067{
1068 unsigned term = 0, idx;
1069
1070 for (idx = 0; idx + first < end && idx + first < inq[4] + 5; idx++) {
1071 if (inq[idx+first] > ' ') {
1072 buf[idx] = inq[idx+first];
1073 term = idx+1;
1074 } else {
1075 buf[idx] = ' ';
1076 }
1077 }
1078 buf[term] = 0;
1079 return buf;
1080}
1081#endif
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103static int scsi_probe_and_add_lun(struct scsi_target *starget,
1104 uint lun, int *bflagsp,
1105 struct scsi_device **sdevp,
1106 enum scsi_scan_mode rescan,
1107 void *hostdata)
1108{
1109 struct scsi_device *sdev;
1110 unsigned char *result;
1111 int bflags, res = SCSI_SCAN_NO_RESPONSE, result_len = 256;
1112 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1113
1114
1115
1116
1117
1118 sdev = scsi_device_lookup_by_target(starget, lun);
1119 if (sdev) {
1120 if (rescan != SCSI_SCAN_INITIAL || !scsi_device_created(sdev)) {
1121 SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
1122 "scsi scan: device exists on %s\n",
1123 dev_name(&sdev->sdev_gendev)));
1124 if (sdevp)
1125 *sdevp = sdev;
1126 else
1127 scsi_device_put(sdev);
1128
1129 if (bflagsp)
1130 *bflagsp = scsi_get_device_flags(sdev,
1131 sdev->vendor,
1132 sdev->model);
1133 return SCSI_SCAN_LUN_PRESENT;
1134 }
1135 scsi_device_put(sdev);
1136 } else
1137 sdev = scsi_alloc_sdev(starget, lun, hostdata);
1138 if (!sdev)
1139 goto out;
1140
1141 result = kmalloc(result_len, GFP_ATOMIC |
1142 ((shost->unchecked_isa_dma) ? __GFP_DMA : 0));
1143 if (!result)
1144 goto out_free_sdev;
1145
1146 if (scsi_probe_lun(sdev, result, result_len, &bflags))
1147 goto out_free_result;
1148
1149 if (bflagsp)
1150 *bflagsp = bflags;
1151
1152
1153
1154 if (((result[0] >> 5) == 3) && !(bflags & BLIST_ATTACH_PQ3)) {
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165 SCSI_LOG_SCAN_BUS(2, sdev_printk(KERN_INFO, sdev, "scsi scan:"
1166 " peripheral qualifier of 3, device not"
1167 " added\n"))
1168 if (lun == 0) {
1169 SCSI_LOG_SCAN_BUS(1, {
1170 unsigned char vend[9];
1171 unsigned char mod[17];
1172
1173 sdev_printk(KERN_INFO, sdev,
1174 "scsi scan: consider passing scsi_mod."
1175 "dev_flags=%s:%s:0x240 or 0x1000240\n",
1176 scsi_inq_str(vend, result, 8, 16),
1177 scsi_inq_str(mod, result, 16, 32));
1178 });
1179
1180 }
1181
1182 res = SCSI_SCAN_TARGET_PRESENT;
1183 goto out_free_result;
1184 }
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205 if (((result[0] >> 5) == 1 || starget->pdt_1f_for_no_lun) &&
1206 (result[0] & 0x1f) == 0x1f &&
1207 !scsi_is_wlun(lun)) {
1208 SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
1209 "scsi scan: peripheral device type"
1210 " of 31, no device added\n"));
1211 res = SCSI_SCAN_TARGET_PRESENT;
1212 goto out_free_result;
1213 }
1214
1215 res = scsi_add_lun(sdev, result, &bflags, shost->async_scan);
1216 if (res == SCSI_SCAN_LUN_PRESENT) {
1217 if (bflags & BLIST_KEY) {
1218 sdev->lockable = 0;
1219 scsi_unlock_floptical(sdev, result);
1220 }
1221 }
1222
1223 out_free_result:
1224 kfree(result);
1225 out_free_sdev:
1226 if (res == SCSI_SCAN_LUN_PRESENT) {
1227 if (sdevp) {
1228 if (scsi_device_get(sdev) == 0) {
1229 *sdevp = sdev;
1230 } else {
1231 __scsi_remove_device(sdev);
1232 res = SCSI_SCAN_NO_RESPONSE;
1233 }
1234 }
1235 } else
1236 __scsi_remove_device(sdev);
1237 out:
1238 return res;
1239}
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255static void scsi_sequential_lun_scan(struct scsi_target *starget,
1256 int bflags, int scsi_level,
1257 enum scsi_scan_mode rescan)
1258{
1259 unsigned int sparse_lun, lun, max_dev_lun;
1260 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1261
1262 SCSI_LOG_SCAN_BUS(3, starget_printk(KERN_INFO, starget,
1263 "scsi scan: Sequential scan\n"));
1264
1265 max_dev_lun = min(max_scsi_luns, shost->max_lun);
1266
1267
1268
1269
1270
1271 if (bflags & BLIST_SPARSELUN) {
1272 max_dev_lun = shost->max_lun;
1273 sparse_lun = 1;
1274 } else
1275 sparse_lun = 0;
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298 if (bflags & BLIST_FORCELUN)
1299 max_dev_lun = shost->max_lun;
1300
1301
1302
1303 if (bflags & BLIST_MAX5LUN)
1304 max_dev_lun = min(5U, max_dev_lun);
1305
1306
1307
1308
1309 if (scsi_level < SCSI_3 && !(bflags & BLIST_LARGELUN))
1310 max_dev_lun = min(8U, max_dev_lun);
1311
1312
1313
1314
1315
1316
1317 for (lun = 1; lun < max_dev_lun; ++lun)
1318 if ((scsi_probe_and_add_lun(starget, lun, NULL, NULL, rescan,
1319 NULL) != SCSI_SCAN_LUN_PRESENT) &&
1320 !sparse_lun)
1321 return;
1322}
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343int scsilun_to_int(struct scsi_lun *scsilun)
1344{
1345 int i;
1346 unsigned int lun;
1347
1348 lun = 0;
1349 for (i = 0; i < sizeof(lun); i += 2)
1350 lun = lun | (((scsilun->scsi_lun[i] << 8) |
1351 scsilun->scsi_lun[i + 1]) << (i * 8));
1352 return lun;
1353}
1354EXPORT_SYMBOL(scsilun_to_int);
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374void int_to_scsilun(unsigned int lun, struct scsi_lun *scsilun)
1375{
1376 int i;
1377
1378 memset(scsilun->scsi_lun, 0, sizeof(scsilun->scsi_lun));
1379
1380 for (i = 0; i < sizeof(lun); i += 2) {
1381 scsilun->scsi_lun[i] = (lun >> 8) & 0xFF;
1382 scsilun->scsi_lun[i+1] = lun & 0xFF;
1383 lun = lun >> 16;
1384 }
1385}
1386EXPORT_SYMBOL(int_to_scsilun);
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
1409 enum scsi_scan_mode rescan)
1410{
1411 char devname[64];
1412 unsigned char scsi_cmd[MAX_COMMAND_SIZE];
1413 unsigned int length, new_length;
1414 unsigned int lun;
1415 unsigned int num_luns;
1416 unsigned int retries;
1417 int result;
1418 struct scsi_lun *lunp, *lun_data;
1419 struct scsi_sense_hdr sshdr;
1420 struct scsi_device *sdev;
1421 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1422 int ret = 0;
1423
1424
1425
1426
1427
1428
1429
1430 if (bflags & BLIST_NOREPORTLUN)
1431 return 1;
1432 if (starget->scsi_level < SCSI_2 &&
1433 starget->scsi_level != SCSI_UNKNOWN)
1434 return 1;
1435 if (starget->scsi_level < SCSI_3 &&
1436 (!(bflags & BLIST_REPORTLUN2) || shost->max_lun <= 8))
1437 return 1;
1438 if (bflags & BLIST_NOLUN)
1439 return 0;
1440 if (starget->no_report_luns)
1441 return 1;
1442
1443 if (!(sdev = scsi_device_lookup_by_target(starget, 0))) {
1444 sdev = scsi_alloc_sdev(starget, 0, NULL);
1445 if (!sdev)
1446 return 0;
1447 if (scsi_device_get(sdev)) {
1448 __scsi_remove_device(sdev);
1449 return 0;
1450 }
1451 }
1452
1453 sprintf(devname, "host %d channel %d id %d",
1454 shost->host_no, sdev->channel, sdev->id);
1455
1456
1457
1458
1459
1460 length = (max_scsi_report_luns < MAX_INIT_REPORT_LUNS) ?
1461 (max_scsi_report_luns + 1) * sizeof(struct scsi_lun) :
1462 (MAX_INIT_REPORT_LUNS + 1) * sizeof(struct scsi_lun);
1463retry:
1464 lun_data = kmalloc(length, GFP_KERNEL |
1465 (sdev->host->unchecked_isa_dma ? __GFP_DMA : 0));
1466 if (!lun_data) {
1467 printk(ALLOC_FAILURE_MSG, __func__);
1468 goto out;
1469 }
1470
1471 scsi_cmd[0] = REPORT_LUNS;
1472
1473
1474
1475
1476 memset(&scsi_cmd[1], 0, 5);
1477
1478
1479
1480
1481 put_unaligned_be32(length, &scsi_cmd[6]);
1482
1483 scsi_cmd[10] = 0;
1484 scsi_cmd[11] = 0;
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496 for (retries = 0; retries < 3; retries++) {
1497 SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
1498 "scsi scan: Sending REPORT LUNS to (try %d)\n",
1499 retries));
1500
1501 result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE,
1502 lun_data, length, &sshdr,
1503 SCSI_REPORT_LUNS_TIMEOUT, 3, NULL);
1504
1505 SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
1506 "scsi scan: REPORT LUNS"
1507 " %s (try %d) result 0x%x\n",
1508 result ? "failed" : "successful",
1509 retries, result));
1510 if (result == 0)
1511 break;
1512 else if (scsi_sense_valid(&sshdr)) {
1513 if (sshdr.sense_key != UNIT_ATTENTION)
1514 break;
1515 }
1516 }
1517
1518 if (result) {
1519
1520
1521
1522 ret = 1;
1523 goto out_err;
1524 }
1525
1526
1527
1528
1529 new_length =
1530 get_unaligned_be32(lun_data->scsi_lun) +
1531 sizeof(struct scsi_lun);
1532 num_luns =
1533 get_unaligned_be32(lun_data->scsi_lun) /
1534 sizeof(struct scsi_lun);
1535
1536 if (new_length > length && num_luns <= max_scsi_report_luns) {
1537 length = new_length;
1538 kfree(lun_data);
1539 goto retry;
1540 }
1541
1542 if (num_luns > max_scsi_report_luns) {
1543 sdev_printk(KERN_WARNING, sdev,
1544 "Only %d (max_scsi_report_luns)"
1545 " of %d luns reported, try increasing"
1546 " max_report_luns.\n",
1547 max_scsi_report_luns, num_luns);
1548 num_luns = max_scsi_report_luns;
1549 }
1550
1551 SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
1552 "scsi scan: REPORT LUN scan\n"));
1553
1554
1555
1556
1557
1558 for (lunp = &lun_data[1]; lunp <= &lun_data[num_luns]; lunp++) {
1559 lun = scsilun_to_int(lunp);
1560
1561
1562
1563
1564
1565 if (memcmp(&lunp->scsi_lun[sizeof(lun)], "\0\0\0\0", 4)) {
1566 int i;
1567 u8 *data;
1568
1569
1570
1571
1572
1573
1574 printk(KERN_WARNING "scsi: %s lun 0x", devname);
1575 data = (char *)lunp->scsi_lun;
1576 for (i = 0; i < sizeof(struct scsi_lun); i++)
1577 printk("%02x", data[i]);
1578 printk(" has a LUN larger than currently supported.\n");
1579 } else if (lun > sdev->host->max_lun) {
1580 sdev_printk(KERN_WARNING, sdev,
1581 "lun%d has a LUN larger than"
1582 " allowed by the host adapter\n", lun);
1583 } else {
1584 int res;
1585
1586 res = scsi_probe_and_add_lun(starget,
1587 lun, NULL, NULL, rescan, NULL);
1588 if (res == SCSI_SCAN_NO_RESPONSE) {
1589
1590
1591
1592 sdev_printk(KERN_ERR, sdev,
1593 "Unexpected response"
1594 " from lun %d while scanning, scan"
1595 " aborted\n", lun);
1596 break;
1597 }
1598 }
1599 }
1600
1601 out_err:
1602 kfree(lun_data);
1603 out:
1604 scsi_device_put(sdev);
1605 if (scsi_device_created(sdev))
1606
1607
1608
1609 __scsi_remove_device(sdev);
1610 return ret;
1611}
1612
1613struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel,
1614 uint id, uint lun, void *hostdata)
1615{
1616 struct scsi_device *sdev = ERR_PTR(-ENODEV);
1617 struct device *parent = &shost->shost_gendev;
1618 struct scsi_target *starget;
1619
1620 if (strncmp(scsi_scan_type, "none", 4) == 0)
1621 return ERR_PTR(-ENODEV);
1622
1623 starget = scsi_alloc_target(parent, channel, id);
1624 if (!starget)
1625 return ERR_PTR(-ENOMEM);
1626 scsi_autopm_get_target(starget);
1627
1628 mutex_lock(&shost->scan_mutex);
1629 if (!shost->async_scan)
1630 scsi_complete_async_scans();
1631
1632 if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
1633 scsi_probe_and_add_lun(starget, lun, NULL, &sdev, 1, hostdata);
1634 scsi_autopm_put_host(shost);
1635 }
1636 mutex_unlock(&shost->scan_mutex);
1637 scsi_autopm_put_target(starget);
1638
1639
1640
1641
1642 scsi_target_reap(starget);
1643 put_device(&starget->dev);
1644
1645 return sdev;
1646}
1647EXPORT_SYMBOL(__scsi_add_device);
1648
1649int scsi_add_device(struct Scsi_Host *host, uint channel,
1650 uint target, uint lun)
1651{
1652 struct scsi_device *sdev =
1653 __scsi_add_device(host, channel, target, lun, NULL);
1654 if (IS_ERR(sdev))
1655 return PTR_ERR(sdev);
1656
1657 scsi_device_put(sdev);
1658 return 0;
1659}
1660EXPORT_SYMBOL(scsi_add_device);
1661
1662void scsi_rescan_device(struct device *dev)
1663{
1664 struct scsi_driver *drv;
1665 struct scsi_device *sdev = to_scsi_device(dev);
1666
1667 device_lock(dev);
1668
1669 scsi_attach_vpd(sdev);
1670
1671 if (sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh) {
1672 struct scsi_device_handler_aux *scsi_dh_aux =
1673 scsi_get_device_handler_aux(sdev->scsi_dh_data->scsi_dh);
1674
1675 if (scsi_dh_aux && scsi_dh_aux->rescan)
1676 scsi_dh_aux->rescan(sdev);
1677 }
1678
1679 if (dev->driver) {
1680 drv = to_scsi_driver(dev->driver);
1681 if (try_module_get(drv->owner)) {
1682 if (drv->rescan)
1683 drv->rescan(dev);
1684 module_put(drv->owner);
1685 }
1686 }
1687 device_unlock(dev);
1688}
1689EXPORT_SYMBOL(scsi_rescan_device);
1690
1691static void __scsi_scan_target(struct device *parent, unsigned int channel,
1692 unsigned int id, unsigned int lun, enum scsi_scan_mode rescan)
1693{
1694 struct Scsi_Host *shost = dev_to_shost(parent);
1695 int bflags = 0;
1696 int res;
1697 struct scsi_target *starget;
1698
1699 if (shost->this_id == id)
1700
1701
1702
1703 return;
1704
1705 starget = scsi_alloc_target(parent, channel, id);
1706 if (!starget)
1707 return;
1708 scsi_autopm_get_target(starget);
1709
1710 if (lun != SCAN_WILD_CARD) {
1711
1712
1713
1714 scsi_probe_and_add_lun(starget, lun, NULL, NULL, rescan, NULL);
1715 goto out_reap;
1716 }
1717
1718
1719
1720
1721
1722 res = scsi_probe_and_add_lun(starget, 0, &bflags, NULL, rescan, NULL);
1723 if (res == SCSI_SCAN_LUN_PRESENT || res == SCSI_SCAN_TARGET_PRESENT) {
1724 if (scsi_report_lun_scan(starget, bflags, rescan) != 0)
1725
1726
1727
1728
1729 scsi_sequential_lun_scan(starget, bflags,
1730 starget->scsi_level, rescan);
1731 }
1732
1733 out_reap:
1734 scsi_autopm_put_target(starget);
1735
1736
1737
1738
1739 scsi_target_reap(starget);
1740
1741 put_device(&starget->dev);
1742}
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762void scsi_scan_target(struct device *parent, unsigned int channel,
1763 unsigned int id, unsigned int lun, enum scsi_scan_mode rescan)
1764{
1765 struct Scsi_Host *shost = dev_to_shost(parent);
1766
1767 if (strncmp(scsi_scan_type, "none", 4) == 0)
1768 return;
1769
1770 if (rescan != SCSI_SCAN_MANUAL &&
1771 strncmp(scsi_scan_type, "manual", 6) == 0)
1772 return;
1773
1774 mutex_lock(&shost->scan_mutex);
1775 if (!shost->async_scan)
1776 scsi_complete_async_scans();
1777
1778 if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
1779 __scsi_scan_target(parent, channel, id, lun, rescan);
1780 scsi_autopm_put_host(shost);
1781 }
1782 mutex_unlock(&shost->scan_mutex);
1783}
1784EXPORT_SYMBOL(scsi_scan_target);
1785
1786static void scsi_scan_channel(struct Scsi_Host *shost, unsigned int channel,
1787 unsigned int id, unsigned int lun,
1788 enum scsi_scan_mode rescan)
1789{
1790 uint order_id;
1791
1792 if (id == SCAN_WILD_CARD)
1793 for (id = 0; id < shost->max_id; ++id) {
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803 if (shost->reverse_ordering)
1804
1805
1806
1807 order_id = shost->max_id - id - 1;
1808 else
1809 order_id = id;
1810 __scsi_scan_target(&shost->shost_gendev, channel,
1811 order_id, lun, rescan);
1812 }
1813 else
1814 __scsi_scan_target(&shost->shost_gendev, channel,
1815 id, lun, rescan);
1816}
1817
1818int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel,
1819 unsigned int id, unsigned int lun,
1820 enum scsi_scan_mode rescan)
1821{
1822 SCSI_LOG_SCAN_BUS(3, shost_printk (KERN_INFO, shost,
1823 "%s: <%u:%u:%u>\n",
1824 __func__, channel, id, lun));
1825
1826 if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) ||
1827 ((id != SCAN_WILD_CARD) && (id >= shost->max_id)) ||
1828 ((lun != SCAN_WILD_CARD) && (lun >= shost->max_lun)))
1829 return -EINVAL;
1830
1831 mutex_lock(&shost->scan_mutex);
1832 if (!shost->async_scan)
1833 scsi_complete_async_scans();
1834
1835 if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
1836 if (channel == SCAN_WILD_CARD)
1837 for (channel = 0; channel <= shost->max_channel;
1838 channel++)
1839 scsi_scan_channel(shost, channel, id, lun,
1840 rescan);
1841 else
1842 scsi_scan_channel(shost, channel, id, lun, rescan);
1843 scsi_autopm_put_host(shost);
1844 }
1845 mutex_unlock(&shost->scan_mutex);
1846
1847 return 0;
1848}
1849
1850static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
1851{
1852 struct scsi_device *sdev;
1853 shost_for_each_device(sdev, shost) {
1854
1855 if (sdev->sdev_state == SDEV_DEL)
1856 continue;
1857
1858 if (sdev->is_visible)
1859 continue;
1860 if (!scsi_host_scan_allowed(shost) ||
1861 scsi_sysfs_add_sdev(sdev) != 0)
1862 __scsi_remove_device(sdev);
1863 }
1864}
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
1877{
1878 struct async_scan_data *data;
1879 unsigned long flags;
1880
1881 if (strncmp(scsi_scan_type, "sync", 4) == 0)
1882 return NULL;
1883
1884 if (shost->async_scan) {
1885 shost_printk(KERN_DEBUG, shost, "%s called twice\n", __func__);
1886 return NULL;
1887 }
1888
1889 data = kmalloc(sizeof(*data), GFP_KERNEL);
1890 if (!data)
1891 goto err;
1892 data->shost = scsi_host_get(shost);
1893 if (!data->shost)
1894 goto err;
1895 init_completion(&data->prev_finished);
1896
1897 mutex_lock(&shost->scan_mutex);
1898 spin_lock_irqsave(shost->host_lock, flags);
1899 shost->async_scan = 1;
1900 spin_unlock_irqrestore(shost->host_lock, flags);
1901 mutex_unlock(&shost->scan_mutex);
1902
1903 spin_lock(&async_scan_lock);
1904 if (list_empty(&scanning_hosts))
1905 complete(&data->prev_finished);
1906 list_add_tail(&data->list, &scanning_hosts);
1907 spin_unlock(&async_scan_lock);
1908
1909 return data;
1910
1911 err:
1912 kfree(data);
1913 return NULL;
1914}
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924static void scsi_finish_async_scan(struct async_scan_data *data)
1925{
1926 struct Scsi_Host *shost;
1927 unsigned long flags;
1928
1929 if (!data)
1930 return;
1931
1932 shost = data->shost;
1933
1934 mutex_lock(&shost->scan_mutex);
1935
1936 if (!shost->async_scan) {
1937 shost_printk(KERN_INFO, shost, "%s called twice\n", __func__);
1938 dump_stack();
1939 mutex_unlock(&shost->scan_mutex);
1940 return;
1941 }
1942
1943 wait_for_completion(&data->prev_finished);
1944
1945 scsi_sysfs_add_devices(shost);
1946
1947 spin_lock_irqsave(shost->host_lock, flags);
1948 shost->async_scan = 0;
1949 spin_unlock_irqrestore(shost->host_lock, flags);
1950
1951 mutex_unlock(&shost->scan_mutex);
1952
1953 spin_lock(&async_scan_lock);
1954 list_del(&data->list);
1955 if (!list_empty(&scanning_hosts)) {
1956 struct async_scan_data *next = list_entry(scanning_hosts.next,
1957 struct async_scan_data, list);
1958 complete(&next->prev_finished);
1959 }
1960 spin_unlock(&async_scan_lock);
1961
1962 scsi_autopm_put_host(shost);
1963 scsi_host_put(shost);
1964 kfree(data);
1965}
1966
1967static void do_scsi_scan_host(struct Scsi_Host *shost)
1968{
1969 if (shost->hostt->scan_finished) {
1970 unsigned long start = jiffies;
1971 if (shost->hostt->scan_start)
1972 shost->hostt->scan_start(shost);
1973
1974 while (!shost->hostt->scan_finished(shost, jiffies - start))
1975 msleep(10);
1976 } else {
1977 scsi_scan_host_selected(shost, SCAN_WILD_CARD, SCAN_WILD_CARD,
1978 SCAN_WILD_CARD, 0);
1979 }
1980}
1981
1982static void do_scan_async(void *_data, async_cookie_t c)
1983{
1984 struct async_scan_data *data = _data;
1985 struct Scsi_Host *shost = data->shost;
1986
1987 do_scsi_scan_host(shost);
1988 scsi_finish_async_scan(data);
1989}
1990
1991
1992
1993
1994
1995void scsi_scan_host(struct Scsi_Host *shost)
1996{
1997 struct async_scan_data *data;
1998
1999 if (strncmp(scsi_scan_type, "none", 4) == 0 ||
2000 strncmp(scsi_scan_type, "manual", 6) == 0)
2001 return;
2002 if (scsi_autopm_get_host(shost) < 0)
2003 return;
2004
2005 data = scsi_prep_async_scan(shost);
2006 if (!data) {
2007 do_scsi_scan_host(shost);
2008 scsi_autopm_put_host(shost);
2009 return;
2010 }
2011
2012
2013
2014
2015 async_schedule(do_scan_async, data);
2016
2017
2018}
2019EXPORT_SYMBOL(scsi_scan_host);
2020
2021void scsi_forget_host(struct Scsi_Host *shost)
2022{
2023 struct scsi_device *sdev;
2024 unsigned long flags;
2025
2026 restart:
2027 spin_lock_irqsave(shost->host_lock, flags);
2028 list_for_each_entry(sdev, &shost->__devices, siblings) {
2029 if (sdev->sdev_state == SDEV_DEL)
2030 continue;
2031 spin_unlock_irqrestore(shost->host_lock, flags);
2032 __scsi_remove_device(sdev);
2033 goto restart;
2034 }
2035 spin_unlock_irqrestore(shost->host_lock, flags);
2036}
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055struct scsi_device *scsi_get_host_dev(struct Scsi_Host *shost)
2056{
2057 struct scsi_device *sdev = NULL;
2058 struct scsi_target *starget;
2059
2060 mutex_lock(&shost->scan_mutex);
2061 if (!scsi_host_scan_allowed(shost))
2062 goto out;
2063 starget = scsi_alloc_target(&shost->shost_gendev, 0, shost->this_id);
2064 if (!starget)
2065 goto out;
2066
2067 sdev = scsi_alloc_sdev(starget, 0, NULL);
2068 if (sdev)
2069 sdev->borken = 0;
2070 else
2071 scsi_target_reap(starget);
2072 put_device(&starget->dev);
2073 out:
2074 mutex_unlock(&shost->scan_mutex);
2075 return sdev;
2076}
2077EXPORT_SYMBOL(scsi_get_host_dev);
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087void scsi_free_host_dev(struct scsi_device *sdev)
2088{
2089 BUG_ON(sdev->id != sdev->host->this_id);
2090
2091 __scsi_remove_device(sdev);
2092}
2093EXPORT_SYMBOL(scsi_free_host_dev);
2094
2095