1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/module.h>
29#include <linux/moduleparam.h>
30#include <linux/init.h>
31#include <linux/blkdev.h>
32#include <linux/delay.h>
33#include <linux/kthread.h>
34#include <linux/spinlock.h>
35#include <linux/async.h>
36#include <linux/slab.h>
37#include <asm/unaligned.h>
38
39#include <scsi/scsi.h>
40#include <scsi/scsi_cmnd.h>
41#include <scsi/scsi_device.h>
42#include <scsi/scsi_driver.h>
43#include <scsi/scsi_devinfo.h>
44#include <scsi/scsi_host.h>
45#include <scsi/scsi_transport.h>
46#include <scsi/scsi_dh.h>
47#include <scsi/scsi_eh.h>
48
49#include "scsi_priv.h"
50#include "scsi_logging.h"
51
52#define ALLOC_FAILURE_MSG KERN_ERR "%s: Allocation failure during" \
53 " SCSI scanning, some SCSI devices might not be configured\n"
54
55
56
57
58#define SCSI_TIMEOUT (2*HZ)
59#define SCSI_REPORT_LUNS_TIMEOUT (30*HZ)
60
61
62
63
64#define SCSI_UID_SER_NUM 'S'
65#define SCSI_UID_UNKNOWN 'Z'
66
67
68
69
70
71
72
73
74
75
76
77
78
79#define SCSI_SCAN_NO_RESPONSE 0
80#define SCSI_SCAN_TARGET_PRESENT 1
81#define SCSI_SCAN_LUN_PRESENT 2
82
83static const char *scsi_null_device_strs = "nullnullnullnull";
84
85#define MAX_SCSI_LUNS 512
86
87static u64 max_scsi_luns = MAX_SCSI_LUNS;
88
89module_param_named(max_luns, max_scsi_luns, ullong, S_IRUGO|S_IWUSR);
90MODULE_PARM_DESC(max_luns,
91 "last scsi LUN (should be between 1 and 2^64-1)");
92
93#ifdef CONFIG_SCSI_SCAN_ASYNC
94#define SCSI_SCAN_TYPE_DEFAULT "async"
95#else
96#define SCSI_SCAN_TYPE_DEFAULT "sync"
97#endif
98
99char scsi_scan_type[7] = SCSI_SCAN_TYPE_DEFAULT;
100
101module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type),
102 S_IRUGO|S_IWUSR);
103MODULE_PARM_DESC(scan, "sync, async, manual, or none. "
104 "Setting to 'manual' disables automatic scanning, but allows "
105 "for manual device scan via the 'scan' sysfs attribute.");
106
107static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ + 18;
108
109module_param_named(inq_timeout, scsi_inq_timeout, uint, S_IRUGO|S_IWUSR);
110MODULE_PARM_DESC(inq_timeout,
111 "Timeout (in seconds) waiting for devices to answer INQUIRY."
112 " Default is 20. Some devices may need more; most need less.");
113
114
115static DEFINE_SPINLOCK(async_scan_lock);
116static LIST_HEAD(scanning_hosts);
117
118struct async_scan_data {
119 struct list_head list;
120 struct Scsi_Host *shost;
121 struct completion prev_finished;
122};
123
124
125
126
127
128
129
130
131
132int scsi_complete_async_scans(void)
133{
134 struct async_scan_data *data;
135
136 do {
137 if (list_empty(&scanning_hosts))
138 return 0;
139
140
141
142
143 data = kmalloc(sizeof(*data), GFP_KERNEL);
144 if (!data)
145 msleep(1);
146 } while (!data);
147
148 data->shost = NULL;
149 init_completion(&data->prev_finished);
150
151 spin_lock(&async_scan_lock);
152
153 if (list_empty(&scanning_hosts))
154 goto done;
155 list_add_tail(&data->list, &scanning_hosts);
156 spin_unlock(&async_scan_lock);
157
158 printk(KERN_INFO "scsi: waiting for bus probes to complete ...\n");
159 wait_for_completion(&data->prev_finished);
160
161 spin_lock(&async_scan_lock);
162 list_del(&data->list);
163 if (!list_empty(&scanning_hosts)) {
164 struct async_scan_data *next = list_entry(scanning_hosts.next,
165 struct async_scan_data, list);
166 complete(&next->prev_finished);
167 }
168 done:
169 spin_unlock(&async_scan_lock);
170
171 kfree(data);
172 return 0;
173}
174
175
176
177
178
179
180
181
182
183
184static void scsi_unlock_floptical(struct scsi_device *sdev,
185 unsigned char *result)
186{
187 unsigned char scsi_cmd[MAX_COMMAND_SIZE];
188
189 sdev_printk(KERN_NOTICE, sdev, "unlocking floptical drive\n");
190 scsi_cmd[0] = MODE_SENSE;
191 scsi_cmd[1] = 0;
192 scsi_cmd[2] = 0x2e;
193 scsi_cmd[3] = 0;
194 scsi_cmd[4] = 0x2a;
195 scsi_cmd[5] = 0;
196 scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE, result, 0x2a, NULL,
197 SCSI_TIMEOUT, 3, NULL);
198}
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
215 u64 lun, void *hostdata)
216{
217 struct scsi_device *sdev;
218 int display_failure_msg = 1, ret;
219 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
220
221 sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size,
222 GFP_ATOMIC);
223 if (!sdev)
224 goto out;
225
226 sdev->vendor = scsi_null_device_strs;
227 sdev->model = scsi_null_device_strs;
228 sdev->rev = scsi_null_device_strs;
229 sdev->host = shost;
230 sdev->queue_ramp_up_period = SCSI_DEFAULT_RAMP_UP_PERIOD;
231 sdev->id = starget->id;
232 sdev->lun = lun;
233 sdev->channel = starget->channel;
234 sdev->sdev_state = SDEV_CREATED;
235 INIT_LIST_HEAD(&sdev->siblings);
236 INIT_LIST_HEAD(&sdev->same_target_siblings);
237 INIT_LIST_HEAD(&sdev->cmd_list);
238 INIT_LIST_HEAD(&sdev->starved_entry);
239 INIT_LIST_HEAD(&sdev->event_list);
240 spin_lock_init(&sdev->list_lock);
241 mutex_init(&sdev->inquiry_mutex);
242 INIT_WORK(&sdev->event_work, scsi_evt_thread);
243 INIT_WORK(&sdev->requeue_work, scsi_requeue_run_queue);
244
245 sdev->sdev_gendev.parent = get_device(&starget->dev);
246 sdev->sdev_target = starget;
247
248
249 sdev->hostdata = hostdata;
250
251
252
253 sdev->max_device_blocked = SCSI_DEFAULT_DEVICE_BLOCKED;
254
255
256
257
258 sdev->type = -1;
259
260
261
262
263
264
265 sdev->borken = 1;
266
267 if (shost_use_blk_mq(shost))
268 sdev->request_queue = scsi_mq_alloc_queue(sdev);
269 else
270 sdev->request_queue = scsi_alloc_queue(sdev);
271 if (!sdev->request_queue) {
272
273
274 put_device(&starget->dev);
275 kfree(sdev);
276 goto out;
277 }
278 WARN_ON_ONCE(!blk_get_queue(sdev->request_queue));
279 sdev->request_queue->queuedata = sdev;
280
281 if (!shost_use_blk_mq(sdev->host)) {
282 blk_queue_init_tags(sdev->request_queue,
283 sdev->host->cmd_per_lun, shost->bqt,
284 shost->hostt->tag_alloc_policy);
285 }
286 scsi_change_queue_depth(sdev, sdev->host->cmd_per_lun ?
287 sdev->host->cmd_per_lun : 1);
288
289 scsi_sysfs_device_initialize(sdev);
290
291 if (shost->hostt->slave_alloc) {
292 ret = shost->hostt->slave_alloc(sdev);
293 if (ret) {
294
295
296
297
298 if (ret == -ENXIO)
299 display_failure_msg = 0;
300 goto out_device_destroy;
301 }
302 }
303
304 return sdev;
305
306out_device_destroy:
307 __scsi_remove_device(sdev);
308out:
309 if (display_failure_msg)
310 printk(ALLOC_FAILURE_MSG, __func__);
311 return NULL;
312}
313
314static void scsi_target_destroy(struct scsi_target *starget)
315{
316 struct device *dev = &starget->dev;
317 struct Scsi_Host *shost = dev_to_shost(dev->parent);
318 unsigned long flags;
319
320 BUG_ON(starget->state == STARGET_DEL);
321 starget->state = STARGET_DEL;
322 transport_destroy_device(dev);
323 spin_lock_irqsave(shost->host_lock, flags);
324 if (shost->hostt->target_destroy)
325 shost->hostt->target_destroy(starget);
326 list_del_init(&starget->siblings);
327 spin_unlock_irqrestore(shost->host_lock, flags);
328 put_device(dev);
329}
330
331static void scsi_target_dev_release(struct device *dev)
332{
333 struct device *parent = dev->parent;
334 struct scsi_target *starget = to_scsi_target(dev);
335
336 kfree(starget);
337 put_device(parent);
338}
339
340static struct device_type scsi_target_type = {
341 .name = "scsi_target",
342 .release = scsi_target_dev_release,
343};
344
345int scsi_is_target_device(const struct device *dev)
346{
347 return dev->type == &scsi_target_type;
348}
349EXPORT_SYMBOL(scsi_is_target_device);
350
351static struct scsi_target *__scsi_find_target(struct device *parent,
352 int channel, uint id)
353{
354 struct scsi_target *starget, *found_starget = NULL;
355 struct Scsi_Host *shost = dev_to_shost(parent);
356
357
358
359 list_for_each_entry(starget, &shost->__targets, siblings) {
360 if (starget->id == id &&
361 starget->channel == channel) {
362 found_starget = starget;
363 break;
364 }
365 }
366 if (found_starget)
367 get_device(&found_starget->dev);
368
369 return found_starget;
370}
371
372
373
374
375
376
377
378
379
380
381static void scsi_target_reap_ref_release(struct kref *kref)
382{
383 struct scsi_target *starget
384 = container_of(kref, struct scsi_target, reap_ref);
385
386
387
388
389
390
391 if (starget->state != STARGET_CREATED) {
392 transport_remove_device(&starget->dev);
393 device_del(&starget->dev);
394 }
395 scsi_target_destroy(starget);
396}
397
398static void scsi_target_reap_ref_put(struct scsi_target *starget)
399{
400 kref_put(&starget->reap_ref, scsi_target_reap_ref_release);
401}
402
403
404
405
406
407
408
409
410
411
412
413
414
415static struct scsi_target *scsi_alloc_target(struct device *parent,
416 int channel, uint id)
417{
418 struct Scsi_Host *shost = dev_to_shost(parent);
419 struct device *dev = NULL;
420 unsigned long flags;
421 const int size = sizeof(struct scsi_target)
422 + shost->transportt->target_size;
423 struct scsi_target *starget;
424 struct scsi_target *found_target;
425 int error, ref_got;
426
427 starget = kzalloc(size, GFP_KERNEL);
428 if (!starget) {
429 printk(KERN_ERR "%s: allocation failure\n", __func__);
430 return NULL;
431 }
432 dev = &starget->dev;
433 device_initialize(dev);
434 kref_init(&starget->reap_ref);
435 dev->parent = get_device(parent);
436 dev_set_name(dev, "target%d:%d:%d", shost->host_no, channel, id);
437 dev->bus = &scsi_bus_type;
438 dev->type = &scsi_target_type;
439 starget->id = id;
440 starget->channel = channel;
441 starget->can_queue = 0;
442 INIT_LIST_HEAD(&starget->siblings);
443 INIT_LIST_HEAD(&starget->devices);
444 starget->state = STARGET_CREATED;
445 starget->scsi_level = SCSI_2;
446 starget->max_target_blocked = SCSI_DEFAULT_TARGET_BLOCKED;
447 retry:
448 spin_lock_irqsave(shost->host_lock, flags);
449
450 found_target = __scsi_find_target(parent, channel, id);
451 if (found_target)
452 goto found;
453
454 list_add_tail(&starget->siblings, &shost->__targets);
455 spin_unlock_irqrestore(shost->host_lock, flags);
456
457 transport_setup_device(dev);
458 if (shost->hostt->target_alloc) {
459 error = shost->hostt->target_alloc(starget);
460
461 if(error) {
462 dev_printk(KERN_ERR, dev, "target allocation failed, error %d\n", error);
463
464
465 scsi_target_destroy(starget);
466 return NULL;
467 }
468 }
469 get_device(dev);
470
471 return starget;
472
473 found:
474
475
476
477
478
479 ref_got = kref_get_unless_zero(&found_target->reap_ref);
480
481 spin_unlock_irqrestore(shost->host_lock, flags);
482 if (ref_got) {
483 put_device(dev);
484 return found_target;
485 }
486
487
488
489
490
491
492
493
494
495 put_device(&found_target->dev);
496
497
498
499
500 msleep(1);
501 goto retry;
502}
503
504
505
506
507
508
509
510
511
512void scsi_target_reap(struct scsi_target *starget)
513{
514
515
516
517
518
519 BUG_ON(starget->state == STARGET_DEL);
520 scsi_target_reap_ref_put(starget);
521}
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538void scsi_sanitize_inquiry_string(unsigned char *s, int len)
539{
540 int terminated = 0;
541
542 for (; len > 0; (--len, ++s)) {
543 if (*s == 0)
544 terminated = 1;
545 if (terminated || *s < 0x20 || *s > 0x7e)
546 *s = ' ';
547 }
548}
549EXPORT_SYMBOL(scsi_sanitize_inquiry_string);
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
566 int result_len, int *bflags)
567{
568 unsigned char scsi_cmd[MAX_COMMAND_SIZE];
569 int first_inquiry_len, try_inquiry_len, next_inquiry_len;
570 int response_len = 0;
571 int pass, count, result;
572 struct scsi_sense_hdr sshdr;
573
574 *bflags = 0;
575
576
577
578
579 first_inquiry_len = sdev->inquiry_len ? sdev->inquiry_len : 36;
580 try_inquiry_len = first_inquiry_len;
581 pass = 1;
582
583 next_pass:
584 SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
585 "scsi scan: INQUIRY pass %d length %d\n",
586 pass, try_inquiry_len));
587
588
589 for (count = 0; count < 3; ++count) {
590 int resid;
591
592 memset(scsi_cmd, 0, 6);
593 scsi_cmd[0] = INQUIRY;
594 scsi_cmd[4] = (unsigned char) try_inquiry_len;
595
596 memset(inq_result, 0, try_inquiry_len);
597
598 result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE,
599 inq_result, try_inquiry_len, &sshdr,
600 HZ / 2 + HZ * scsi_inq_timeout, 3,
601 &resid);
602
603 SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
604 "scsi scan: INQUIRY %s with code 0x%x\n",
605 result ? "failed" : "successful", result));
606
607 if (result) {
608
609
610
611
612
613
614 if ((driver_byte(result) & DRIVER_SENSE) &&
615 scsi_sense_valid(&sshdr)) {
616 if ((sshdr.sense_key == UNIT_ATTENTION) &&
617 ((sshdr.asc == 0x28) ||
618 (sshdr.asc == 0x29)) &&
619 (sshdr.ascq == 0))
620 continue;
621 }
622 } else {
623
624
625
626
627
628 if (resid == try_inquiry_len)
629 continue;
630 }
631 break;
632 }
633
634 if (result == 0) {
635 scsi_sanitize_inquiry_string(&inq_result[8], 8);
636 scsi_sanitize_inquiry_string(&inq_result[16], 16);
637 scsi_sanitize_inquiry_string(&inq_result[32], 4);
638
639 response_len = inq_result[4] + 5;
640 if (response_len > 255)
641 response_len = first_inquiry_len;
642
643
644
645
646
647
648
649
650 *bflags = scsi_get_device_flags(sdev, &inq_result[8],
651 &inq_result[16]);
652
653
654
655 if (pass == 1) {
656 if (BLIST_INQUIRY_36 & *bflags)
657 next_inquiry_len = 36;
658 else if (BLIST_INQUIRY_58 & *bflags)
659 next_inquiry_len = 58;
660 else if (sdev->inquiry_len)
661 next_inquiry_len = sdev->inquiry_len;
662 else
663 next_inquiry_len = response_len;
664
665
666 if (next_inquiry_len > try_inquiry_len) {
667 try_inquiry_len = next_inquiry_len;
668 pass = 2;
669 goto next_pass;
670 }
671 }
672
673 } else if (pass == 2) {
674 sdev_printk(KERN_INFO, sdev,
675 "scsi scan: %d byte inquiry failed. "
676 "Consider BLIST_INQUIRY_36 for this device\n",
677 try_inquiry_len);
678
679
680
681 try_inquiry_len = first_inquiry_len;
682 pass = 3;
683 goto next_pass;
684 }
685
686
687
688 if (result)
689 return -EIO;
690
691
692 sdev->inquiry_len = min(try_inquiry_len, response_len);
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709 if (sdev->inquiry_len < 36) {
710 if (!sdev->host->short_inquiry) {
711 shost_printk(KERN_INFO, sdev->host,
712 "scsi scan: INQUIRY result too short (%d),"
713 " using 36\n", sdev->inquiry_len);
714 sdev->host->short_inquiry = 1;
715 }
716 sdev->inquiry_len = 36;
717 }
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737 sdev->scsi_level = inq_result[2] & 0x07;
738 if (sdev->scsi_level >= 2 ||
739 (sdev->scsi_level == 1 && (inq_result[3] & 0x0f) == 1))
740 sdev->scsi_level++;
741 sdev->sdev_target->scsi_level = sdev->scsi_level;
742
743
744
745
746
747 sdev->lun_in_cdb = 0;
748 if (sdev->scsi_level <= SCSI_2 &&
749 sdev->scsi_level != SCSI_UNKNOWN &&
750 !sdev->host->no_scsi2_lun_in_cdb)
751 sdev->lun_in_cdb = 1;
752
753 return 0;
754}
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
772 int *bflags, int async)
773{
774 int ret;
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796 sdev->inquiry = kmemdup(inq_result,
797 max_t(size_t, sdev->inquiry_len, 36),
798 GFP_ATOMIC);
799 if (sdev->inquiry == NULL)
800 return SCSI_SCAN_NO_RESPONSE;
801
802 sdev->vendor = (char *) (sdev->inquiry + 8);
803 sdev->model = (char *) (sdev->inquiry + 16);
804 sdev->rev = (char *) (sdev->inquiry + 32);
805
806 if (strncmp(sdev->vendor, "ATA ", 8) == 0) {
807
808
809
810
811
812
813 sdev->allow_restart = 1;
814 }
815
816 if (*bflags & BLIST_ISROM) {
817 sdev->type = TYPE_ROM;
818 sdev->removable = 1;
819 } else {
820 sdev->type = (inq_result[0] & 0x1f);
821 sdev->removable = (inq_result[1] & 0x80) >> 7;
822
823
824
825
826
827
828 if (scsi_is_wlun(sdev->lun) && sdev->type != TYPE_WLUN) {
829 sdev_printk(KERN_WARNING, sdev,
830 "%s: correcting incorrect peripheral device type 0x%x for W-LUN 0x%16xhN\n",
831 __func__, sdev->type, (unsigned int)sdev->lun);
832 sdev->type = TYPE_WLUN;
833 }
834
835 }
836
837 if (sdev->type == TYPE_RBC || sdev->type == TYPE_ROM) {
838
839
840
841
842 if ((*bflags & BLIST_REPORTLUN2) == 0)
843 *bflags |= BLIST_NOREPORTLUN;
844 }
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862 sdev->inq_periph_qual = (inq_result[0] >> 5) & 7;
863 sdev->lockable = sdev->removable;
864 sdev->soft_reset = (inq_result[7] & 1) && ((inq_result[3] & 7) == 2);
865
866 if (sdev->scsi_level >= SCSI_3 ||
867 (sdev->inquiry_len > 56 && inq_result[56] & 0x04))
868 sdev->ppr = 1;
869 if (inq_result[7] & 0x60)
870 sdev->wdtr = 1;
871 if (inq_result[7] & 0x10)
872 sdev->sdtr = 1;
873
874 sdev_printk(KERN_NOTICE, sdev, "%s %.8s %.16s %.4s PQ: %d "
875 "ANSI: %d%s\n", scsi_device_type(sdev->type),
876 sdev->vendor, sdev->model, sdev->rev,
877 sdev->inq_periph_qual, inq_result[2] & 0x07,
878 (inq_result[3] & 0x0f) == 1 ? " CCS" : "");
879
880 if ((sdev->scsi_level >= SCSI_2) && (inq_result[7] & 2) &&
881 !(*bflags & BLIST_NOTQ)) {
882 sdev->tagged_supported = 1;
883 sdev->simple_tags = 1;
884 }
885
886
887
888
889
890
891 if ((*bflags & BLIST_BORKEN) == 0)
892 sdev->borken = 0;
893
894 if (*bflags & BLIST_NO_ULD_ATTACH)
895 sdev->no_uld_attach = 1;
896
897
898
899
900
901 if (*bflags & BLIST_SELECT_NO_ATN)
902 sdev->select_no_atn = 1;
903
904
905
906
907
908 if (*bflags & BLIST_MAX_512)
909 blk_queue_max_hw_sectors(sdev->request_queue, 512);
910
911
912
913
914 else if (*bflags & BLIST_MAX_1024)
915 blk_queue_max_hw_sectors(sdev->request_queue, 1024);
916
917
918
919
920
921 if (*bflags & BLIST_NOSTARTONADD)
922 sdev->no_start_on_add = 1;
923
924 if (*bflags & BLIST_SINGLELUN)
925 scsi_target(sdev)->single_lun = 1;
926
927 sdev->use_10_for_rw = 1;
928
929 if (*bflags & BLIST_MS_SKIP_PAGE_08)
930 sdev->skip_ms_page_8 = 1;
931
932 if (*bflags & BLIST_MS_SKIP_PAGE_3F)
933 sdev->skip_ms_page_3f = 1;
934
935 if (*bflags & BLIST_USE_10_BYTE_MS)
936 sdev->use_10_for_ms = 1;
937
938
939
940
941 if (*bflags & BLIST_NO_RSOC)
942 sdev->no_report_opcodes = 1;
943
944
945
946 ret = scsi_device_set_state(sdev, SDEV_RUNNING);
947 if (ret) {
948 ret = scsi_device_set_state(sdev, SDEV_BLOCK);
949
950 if (ret) {
951 sdev_printk(KERN_ERR, sdev,
952 "in wrong state %s to complete scan\n",
953 scsi_device_state_name(sdev->sdev_state));
954 return SCSI_SCAN_NO_RESPONSE;
955 }
956 }
957
958 if (*bflags & BLIST_MS_192_BYTES_FOR_3F)
959 sdev->use_192_bytes_for_3f = 1;
960
961 if (*bflags & BLIST_NOT_LOCKABLE)
962 sdev->lockable = 0;
963
964 if (*bflags & BLIST_RETRY_HWERROR)
965 sdev->retry_hwerror = 1;
966
967 if (*bflags & BLIST_NO_DIF)
968 sdev->no_dif = 1;
969
970 if (*bflags & BLIST_SYNC_ALUA)
971 sdev->synchronous_alua = 1;
972
973 sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT;
974
975 if (*bflags & BLIST_TRY_VPD_PAGES)
976 sdev->try_vpd_pages = 1;
977 else if (*bflags & BLIST_SKIP_VPD_PAGES)
978 sdev->skip_vpd_pages = 1;
979
980 transport_configure_device(&sdev->sdev_gendev);
981
982 if (sdev->host->hostt->slave_configure) {
983 ret = sdev->host->hostt->slave_configure(sdev);
984 if (ret) {
985
986
987
988
989 if (ret != -ENXIO) {
990 sdev_printk(KERN_ERR, sdev,
991 "failed to configure device\n");
992 }
993 return SCSI_SCAN_NO_RESPONSE;
994 }
995 }
996
997 if (sdev->scsi_level >= SCSI_3)
998 scsi_attach_vpd(sdev);
999
1000 sdev->max_queue_depth = sdev->queue_depth;
1001
1002
1003
1004
1005
1006
1007 if (!async && scsi_sysfs_add_sdev(sdev) != 0)
1008 return SCSI_SCAN_NO_RESPONSE;
1009
1010 return SCSI_SCAN_LUN_PRESENT;
1011}
1012
1013#ifdef CONFIG_SCSI_LOGGING
1014
1015
1016
1017
1018
1019
1020
1021static unsigned char *scsi_inq_str(unsigned char *buf, unsigned char *inq,
1022 unsigned first, unsigned end)
1023{
1024 unsigned term = 0, idx;
1025
1026 for (idx = 0; idx + first < end && idx + first < inq[4] + 5; idx++) {
1027 if (inq[idx+first] > ' ') {
1028 buf[idx] = inq[idx+first];
1029 term = idx+1;
1030 } else {
1031 buf[idx] = ' ';
1032 }
1033 }
1034 buf[term] = 0;
1035 return buf;
1036}
1037#endif
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059static int scsi_probe_and_add_lun(struct scsi_target *starget,
1060 u64 lun, int *bflagsp,
1061 struct scsi_device **sdevp,
1062 enum scsi_scan_mode rescan,
1063 void *hostdata)
1064{
1065 struct scsi_device *sdev;
1066 unsigned char *result;
1067 int bflags, res = SCSI_SCAN_NO_RESPONSE, result_len = 256;
1068 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1069
1070
1071
1072
1073
1074 sdev = scsi_device_lookup_by_target(starget, lun);
1075 if (sdev) {
1076 if (rescan != SCSI_SCAN_INITIAL || !scsi_device_created(sdev)) {
1077 SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
1078 "scsi scan: device exists on %s\n",
1079 dev_name(&sdev->sdev_gendev)));
1080 if (sdevp)
1081 *sdevp = sdev;
1082 else
1083 scsi_device_put(sdev);
1084
1085 if (bflagsp)
1086 *bflagsp = scsi_get_device_flags(sdev,
1087 sdev->vendor,
1088 sdev->model);
1089 return SCSI_SCAN_LUN_PRESENT;
1090 }
1091 scsi_device_put(sdev);
1092 } else
1093 sdev = scsi_alloc_sdev(starget, lun, hostdata);
1094 if (!sdev)
1095 goto out;
1096
1097 result = kmalloc(result_len, GFP_ATOMIC |
1098 ((shost->unchecked_isa_dma) ? __GFP_DMA : 0));
1099 if (!result)
1100 goto out_free_sdev;
1101
1102 if (scsi_probe_lun(sdev, result, result_len, &bflags))
1103 goto out_free_result;
1104
1105 if (bflagsp)
1106 *bflagsp = bflags;
1107
1108
1109
1110 if (((result[0] >> 5) == 3) && !(bflags & BLIST_ATTACH_PQ3)) {
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121 SCSI_LOG_SCAN_BUS(2, sdev_printk(KERN_INFO, sdev, "scsi scan:"
1122 " peripheral qualifier of 3, device not"
1123 " added\n"))
1124 if (lun == 0) {
1125 SCSI_LOG_SCAN_BUS(1, {
1126 unsigned char vend[9];
1127 unsigned char mod[17];
1128
1129 sdev_printk(KERN_INFO, sdev,
1130 "scsi scan: consider passing scsi_mod."
1131 "dev_flags=%s:%s:0x240 or 0x1000240\n",
1132 scsi_inq_str(vend, result, 8, 16),
1133 scsi_inq_str(mod, result, 16, 32));
1134 });
1135
1136 }
1137
1138 res = SCSI_SCAN_TARGET_PRESENT;
1139 goto out_free_result;
1140 }
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161 if (((result[0] >> 5) == 1 || starget->pdt_1f_for_no_lun) &&
1162 (result[0] & 0x1f) == 0x1f &&
1163 !scsi_is_wlun(lun)) {
1164 SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
1165 "scsi scan: peripheral device type"
1166 " of 31, no device added\n"));
1167 res = SCSI_SCAN_TARGET_PRESENT;
1168 goto out_free_result;
1169 }
1170
1171 res = scsi_add_lun(sdev, result, &bflags, shost->async_scan);
1172 if (res == SCSI_SCAN_LUN_PRESENT) {
1173 if (bflags & BLIST_KEY) {
1174 sdev->lockable = 0;
1175 scsi_unlock_floptical(sdev, result);
1176 }
1177 }
1178
1179 out_free_result:
1180 kfree(result);
1181 out_free_sdev:
1182 if (res == SCSI_SCAN_LUN_PRESENT) {
1183 if (sdevp) {
1184 if (scsi_device_get(sdev) == 0) {
1185 *sdevp = sdev;
1186 } else {
1187 __scsi_remove_device(sdev);
1188 res = SCSI_SCAN_NO_RESPONSE;
1189 }
1190 }
1191 } else
1192 __scsi_remove_device(sdev);
1193 out:
1194 return res;
1195}
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211static void scsi_sequential_lun_scan(struct scsi_target *starget,
1212 int bflags, int scsi_level,
1213 enum scsi_scan_mode rescan)
1214{
1215 uint max_dev_lun;
1216 u64 sparse_lun, lun;
1217 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1218
1219 SCSI_LOG_SCAN_BUS(3, starget_printk(KERN_INFO, starget,
1220 "scsi scan: Sequential scan\n"));
1221
1222 max_dev_lun = min(max_scsi_luns, shost->max_lun);
1223
1224
1225
1226
1227
1228 if (bflags & BLIST_SPARSELUN) {
1229 max_dev_lun = shost->max_lun;
1230 sparse_lun = 1;
1231 } else
1232 sparse_lun = 0;
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255 if (bflags & BLIST_FORCELUN)
1256 max_dev_lun = shost->max_lun;
1257
1258
1259
1260 if (bflags & BLIST_MAX5LUN)
1261 max_dev_lun = min(5U, max_dev_lun);
1262
1263
1264
1265
1266 if (scsi_level < SCSI_3 && !(bflags & BLIST_LARGELUN))
1267 max_dev_lun = min(8U, max_dev_lun);
1268
1269
1270
1271
1272 if (!(bflags & BLIST_SCSI3LUN))
1273 max_dev_lun = min(256U, max_dev_lun);
1274
1275
1276
1277
1278
1279
1280 for (lun = 1; lun < max_dev_lun; ++lun)
1281 if ((scsi_probe_and_add_lun(starget, lun, NULL, NULL, rescan,
1282 NULL) != SCSI_SCAN_LUN_PRESENT) &&
1283 !sparse_lun)
1284 return;
1285}
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
1308 enum scsi_scan_mode rescan)
1309{
1310 unsigned char scsi_cmd[MAX_COMMAND_SIZE];
1311 unsigned int length;
1312 u64 lun;
1313 unsigned int num_luns;
1314 unsigned int retries;
1315 int result;
1316 struct scsi_lun *lunp, *lun_data;
1317 struct scsi_sense_hdr sshdr;
1318 struct scsi_device *sdev;
1319 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1320 int ret = 0;
1321
1322
1323
1324
1325
1326
1327
1328 if (bflags & BLIST_NOREPORTLUN)
1329 return 1;
1330 if (starget->scsi_level < SCSI_2 &&
1331 starget->scsi_level != SCSI_UNKNOWN)
1332 return 1;
1333 if (starget->scsi_level < SCSI_3 &&
1334 (!(bflags & BLIST_REPORTLUN2) || shost->max_lun <= 8))
1335 return 1;
1336 if (bflags & BLIST_NOLUN)
1337 return 0;
1338 if (starget->no_report_luns)
1339 return 1;
1340
1341 if (!(sdev = scsi_device_lookup_by_target(starget, 0))) {
1342 sdev = scsi_alloc_sdev(starget, 0, NULL);
1343 if (!sdev)
1344 return 0;
1345 if (scsi_device_get(sdev)) {
1346 __scsi_remove_device(sdev);
1347 return 0;
1348 }
1349 }
1350
1351
1352
1353
1354
1355
1356 length = (511 + 1) * sizeof(struct scsi_lun);
1357retry:
1358 lun_data = kmalloc(length, GFP_KERNEL |
1359 (sdev->host->unchecked_isa_dma ? __GFP_DMA : 0));
1360 if (!lun_data) {
1361 printk(ALLOC_FAILURE_MSG, __func__);
1362 goto out;
1363 }
1364
1365 scsi_cmd[0] = REPORT_LUNS;
1366
1367
1368
1369
1370 memset(&scsi_cmd[1], 0, 5);
1371
1372
1373
1374
1375 put_unaligned_be32(length, &scsi_cmd[6]);
1376
1377 scsi_cmd[10] = 0;
1378 scsi_cmd[11] = 0;
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390 for (retries = 0; retries < 3; retries++) {
1391 SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
1392 "scsi scan: Sending REPORT LUNS to (try %d)\n",
1393 retries));
1394
1395 result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE,
1396 lun_data, length, &sshdr,
1397 SCSI_REPORT_LUNS_TIMEOUT, 3, NULL);
1398
1399 SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
1400 "scsi scan: REPORT LUNS"
1401 " %s (try %d) result 0x%x\n",
1402 result ? "failed" : "successful",
1403 retries, result));
1404 if (result == 0)
1405 break;
1406 else if (scsi_sense_valid(&sshdr)) {
1407 if (sshdr.sense_key != UNIT_ATTENTION)
1408 break;
1409 }
1410 }
1411
1412 if (result) {
1413
1414
1415
1416 ret = 1;
1417 goto out_err;
1418 }
1419
1420
1421
1422
1423 if (get_unaligned_be32(lun_data->scsi_lun) +
1424 sizeof(struct scsi_lun) > length) {
1425 length = get_unaligned_be32(lun_data->scsi_lun) +
1426 sizeof(struct scsi_lun);
1427 kfree(lun_data);
1428 goto retry;
1429 }
1430 length = get_unaligned_be32(lun_data->scsi_lun);
1431
1432 num_luns = (length / sizeof(struct scsi_lun));
1433
1434 SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
1435 "scsi scan: REPORT LUN scan\n"));
1436
1437
1438
1439
1440
1441 for (lunp = &lun_data[1]; lunp <= &lun_data[num_luns]; lunp++) {
1442 lun = scsilun_to_int(lunp);
1443
1444 if (lun > sdev->host->max_lun) {
1445 sdev_printk(KERN_WARNING, sdev,
1446 "lun%llu has a LUN larger than"
1447 " allowed by the host adapter\n", lun);
1448 } else {
1449 int res;
1450
1451 res = scsi_probe_and_add_lun(starget,
1452 lun, NULL, NULL, rescan, NULL);
1453 if (res == SCSI_SCAN_NO_RESPONSE) {
1454
1455
1456
1457 sdev_printk(KERN_ERR, sdev,
1458 "Unexpected response"
1459 " from lun %llu while scanning, scan"
1460 " aborted\n", (unsigned long long)lun);
1461 break;
1462 }
1463 }
1464 }
1465
1466 out_err:
1467 kfree(lun_data);
1468 out:
1469 if (scsi_device_created(sdev))
1470
1471
1472
1473 __scsi_remove_device(sdev);
1474 scsi_device_put(sdev);
1475 return ret;
1476}
1477
1478struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel,
1479 uint id, u64 lun, void *hostdata)
1480{
1481 struct scsi_device *sdev = ERR_PTR(-ENODEV);
1482 struct device *parent = &shost->shost_gendev;
1483 struct scsi_target *starget;
1484
1485 if (strncmp(scsi_scan_type, "none", 4) == 0)
1486 return ERR_PTR(-ENODEV);
1487
1488 starget = scsi_alloc_target(parent, channel, id);
1489 if (!starget)
1490 return ERR_PTR(-ENOMEM);
1491 scsi_autopm_get_target(starget);
1492
1493 mutex_lock(&shost->scan_mutex);
1494 if (!shost->async_scan)
1495 scsi_complete_async_scans();
1496
1497 if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
1498 scsi_probe_and_add_lun(starget, lun, NULL, &sdev, 1, hostdata);
1499 scsi_autopm_put_host(shost);
1500 }
1501 mutex_unlock(&shost->scan_mutex);
1502 scsi_autopm_put_target(starget);
1503
1504
1505
1506
1507 scsi_target_reap(starget);
1508 put_device(&starget->dev);
1509
1510 return sdev;
1511}
1512EXPORT_SYMBOL(__scsi_add_device);
1513
1514int scsi_add_device(struct Scsi_Host *host, uint channel,
1515 uint target, u64 lun)
1516{
1517 struct scsi_device *sdev =
1518 __scsi_add_device(host, channel, target, lun, NULL);
1519 if (IS_ERR(sdev))
1520 return PTR_ERR(sdev);
1521
1522 scsi_device_put(sdev);
1523 return 0;
1524}
1525EXPORT_SYMBOL(scsi_add_device);
1526
1527void scsi_rescan_device(struct device *dev)
1528{
1529 struct scsi_device *sdev = to_scsi_device(dev);
1530
1531 device_lock(dev);
1532
1533 scsi_attach_vpd(sdev);
1534
1535 if (sdev->handler && sdev->handler->rescan)
1536 sdev->handler->rescan(sdev);
1537
1538 if (dev->driver && try_module_get(dev->driver->owner)) {
1539 struct scsi_driver *drv = to_scsi_driver(dev->driver);
1540
1541 if (drv->rescan)
1542 drv->rescan(dev);
1543 module_put(dev->driver->owner);
1544 }
1545 device_unlock(dev);
1546}
1547EXPORT_SYMBOL(scsi_rescan_device);
1548
1549static void __scsi_scan_target(struct device *parent, unsigned int channel,
1550 unsigned int id, u64 lun, enum scsi_scan_mode rescan)
1551{
1552 struct Scsi_Host *shost = dev_to_shost(parent);
1553 int bflags = 0;
1554 int res;
1555 struct scsi_target *starget;
1556
1557 if (shost->this_id == id)
1558
1559
1560
1561 return;
1562
1563 starget = scsi_alloc_target(parent, channel, id);
1564 if (!starget)
1565 return;
1566 scsi_autopm_get_target(starget);
1567
1568 if (lun != SCAN_WILD_CARD) {
1569
1570
1571
1572 scsi_probe_and_add_lun(starget, lun, NULL, NULL, rescan, NULL);
1573 goto out_reap;
1574 }
1575
1576
1577
1578
1579
1580 res = scsi_probe_and_add_lun(starget, 0, &bflags, NULL, rescan, NULL);
1581 if (res == SCSI_SCAN_LUN_PRESENT || res == SCSI_SCAN_TARGET_PRESENT) {
1582 if (scsi_report_lun_scan(starget, bflags, rescan) != 0)
1583
1584
1585
1586
1587 scsi_sequential_lun_scan(starget, bflags,
1588 starget->scsi_level, rescan);
1589 }
1590
1591 out_reap:
1592 scsi_autopm_put_target(starget);
1593
1594
1595
1596
1597 scsi_target_reap(starget);
1598
1599 put_device(&starget->dev);
1600}
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620void scsi_scan_target(struct device *parent, unsigned int channel,
1621 unsigned int id, u64 lun, enum scsi_scan_mode rescan)
1622{
1623 struct Scsi_Host *shost = dev_to_shost(parent);
1624
1625 if (strncmp(scsi_scan_type, "none", 4) == 0)
1626 return;
1627
1628 if (rescan != SCSI_SCAN_MANUAL &&
1629 strncmp(scsi_scan_type, "manual", 6) == 0)
1630 return;
1631
1632 mutex_lock(&shost->scan_mutex);
1633 if (!shost->async_scan)
1634 scsi_complete_async_scans();
1635
1636 if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
1637 __scsi_scan_target(parent, channel, id, lun, rescan);
1638 scsi_autopm_put_host(shost);
1639 }
1640 mutex_unlock(&shost->scan_mutex);
1641}
1642EXPORT_SYMBOL(scsi_scan_target);
1643
1644static void scsi_scan_channel(struct Scsi_Host *shost, unsigned int channel,
1645 unsigned int id, u64 lun,
1646 enum scsi_scan_mode rescan)
1647{
1648 uint order_id;
1649
1650 if (id == SCAN_WILD_CARD)
1651 for (id = 0; id < shost->max_id; ++id) {
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661 if (shost->reverse_ordering)
1662
1663
1664
1665 order_id = shost->max_id - id - 1;
1666 else
1667 order_id = id;
1668 __scsi_scan_target(&shost->shost_gendev, channel,
1669 order_id, lun, rescan);
1670 }
1671 else
1672 __scsi_scan_target(&shost->shost_gendev, channel,
1673 id, lun, rescan);
1674}
1675
1676int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel,
1677 unsigned int id, u64 lun,
1678 enum scsi_scan_mode rescan)
1679{
1680 SCSI_LOG_SCAN_BUS(3, shost_printk (KERN_INFO, shost,
1681 "%s: <%u:%u:%llu>\n",
1682 __func__, channel, id, lun));
1683
1684 if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) ||
1685 ((id != SCAN_WILD_CARD) && (id >= shost->max_id)) ||
1686 ((lun != SCAN_WILD_CARD) && (lun >= shost->max_lun)))
1687 return -EINVAL;
1688
1689 mutex_lock(&shost->scan_mutex);
1690 if (!shost->async_scan)
1691 scsi_complete_async_scans();
1692
1693 if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
1694 if (channel == SCAN_WILD_CARD)
1695 for (channel = 0; channel <= shost->max_channel;
1696 channel++)
1697 scsi_scan_channel(shost, channel, id, lun,
1698 rescan);
1699 else
1700 scsi_scan_channel(shost, channel, id, lun, rescan);
1701 scsi_autopm_put_host(shost);
1702 }
1703 mutex_unlock(&shost->scan_mutex);
1704
1705 return 0;
1706}
1707
1708static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
1709{
1710 struct scsi_device *sdev;
1711 shost_for_each_device(sdev, shost) {
1712
1713 if (sdev->sdev_state == SDEV_DEL)
1714 continue;
1715
1716 if (sdev->is_visible)
1717 continue;
1718 if (!scsi_host_scan_allowed(shost) ||
1719 scsi_sysfs_add_sdev(sdev) != 0)
1720 __scsi_remove_device(sdev);
1721 }
1722}
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
1735{
1736 struct async_scan_data *data;
1737 unsigned long flags;
1738
1739 if (strncmp(scsi_scan_type, "sync", 4) == 0)
1740 return NULL;
1741
1742 if (shost->async_scan) {
1743 shost_printk(KERN_DEBUG, shost, "%s called twice\n", __func__);
1744 return NULL;
1745 }
1746
1747 data = kmalloc(sizeof(*data), GFP_KERNEL);
1748 if (!data)
1749 goto err;
1750 data->shost = scsi_host_get(shost);
1751 if (!data->shost)
1752 goto err;
1753 init_completion(&data->prev_finished);
1754
1755 mutex_lock(&shost->scan_mutex);
1756 spin_lock_irqsave(shost->host_lock, flags);
1757 shost->async_scan = 1;
1758 spin_unlock_irqrestore(shost->host_lock, flags);
1759 mutex_unlock(&shost->scan_mutex);
1760
1761 spin_lock(&async_scan_lock);
1762 if (list_empty(&scanning_hosts))
1763 complete(&data->prev_finished);
1764 list_add_tail(&data->list, &scanning_hosts);
1765 spin_unlock(&async_scan_lock);
1766
1767 return data;
1768
1769 err:
1770 kfree(data);
1771 return NULL;
1772}
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782static void scsi_finish_async_scan(struct async_scan_data *data)
1783{
1784 struct Scsi_Host *shost;
1785 unsigned long flags;
1786
1787 if (!data)
1788 return;
1789
1790 shost = data->shost;
1791
1792 mutex_lock(&shost->scan_mutex);
1793
1794 if (!shost->async_scan) {
1795 shost_printk(KERN_INFO, shost, "%s called twice\n", __func__);
1796 dump_stack();
1797 mutex_unlock(&shost->scan_mutex);
1798 return;
1799 }
1800
1801 wait_for_completion(&data->prev_finished);
1802
1803 scsi_sysfs_add_devices(shost);
1804
1805 spin_lock_irqsave(shost->host_lock, flags);
1806 shost->async_scan = 0;
1807 spin_unlock_irqrestore(shost->host_lock, flags);
1808
1809 mutex_unlock(&shost->scan_mutex);
1810
1811 spin_lock(&async_scan_lock);
1812 list_del(&data->list);
1813 if (!list_empty(&scanning_hosts)) {
1814 struct async_scan_data *next = list_entry(scanning_hosts.next,
1815 struct async_scan_data, list);
1816 complete(&next->prev_finished);
1817 }
1818 spin_unlock(&async_scan_lock);
1819
1820 scsi_autopm_put_host(shost);
1821 scsi_host_put(shost);
1822 kfree(data);
1823}
1824
1825static void do_scsi_scan_host(struct Scsi_Host *shost)
1826{
1827 if (shost->hostt->scan_finished) {
1828 unsigned long start = jiffies;
1829 if (shost->hostt->scan_start)
1830 shost->hostt->scan_start(shost);
1831
1832 while (!shost->hostt->scan_finished(shost, jiffies - start))
1833 msleep(10);
1834 } else {
1835 scsi_scan_host_selected(shost, SCAN_WILD_CARD, SCAN_WILD_CARD,
1836 SCAN_WILD_CARD, 0);
1837 }
1838}
1839
1840static void do_scan_async(void *_data, async_cookie_t c)
1841{
1842 struct async_scan_data *data = _data;
1843 struct Scsi_Host *shost = data->shost;
1844
1845 do_scsi_scan_host(shost);
1846 scsi_finish_async_scan(data);
1847}
1848
1849
1850
1851
1852
1853void scsi_scan_host(struct Scsi_Host *shost)
1854{
1855 struct async_scan_data *data;
1856
1857 if (strncmp(scsi_scan_type, "none", 4) == 0 ||
1858 strncmp(scsi_scan_type, "manual", 6) == 0)
1859 return;
1860 if (scsi_autopm_get_host(shost) < 0)
1861 return;
1862
1863 data = scsi_prep_async_scan(shost);
1864 if (!data) {
1865 do_scsi_scan_host(shost);
1866 scsi_autopm_put_host(shost);
1867 return;
1868 }
1869
1870
1871
1872
1873 async_schedule(do_scan_async, data);
1874
1875
1876}
1877EXPORT_SYMBOL(scsi_scan_host);
1878
1879void scsi_forget_host(struct Scsi_Host *shost)
1880{
1881 struct scsi_device *sdev;
1882 unsigned long flags;
1883
1884 restart:
1885 spin_lock_irqsave(shost->host_lock, flags);
1886 list_for_each_entry(sdev, &shost->__devices, siblings) {
1887 if (sdev->sdev_state == SDEV_DEL)
1888 continue;
1889 spin_unlock_irqrestore(shost->host_lock, flags);
1890 __scsi_remove_device(sdev);
1891 goto restart;
1892 }
1893 spin_unlock_irqrestore(shost->host_lock, flags);
1894}
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913struct scsi_device *scsi_get_host_dev(struct Scsi_Host *shost)
1914{
1915 struct scsi_device *sdev = NULL;
1916 struct scsi_target *starget;
1917
1918 mutex_lock(&shost->scan_mutex);
1919 if (!scsi_host_scan_allowed(shost))
1920 goto out;
1921 starget = scsi_alloc_target(&shost->shost_gendev, 0, shost->this_id);
1922 if (!starget)
1923 goto out;
1924
1925 sdev = scsi_alloc_sdev(starget, 0, NULL);
1926 if (sdev)
1927 sdev->borken = 0;
1928 else
1929 scsi_target_reap(starget);
1930 put_device(&starget->dev);
1931 out:
1932 mutex_unlock(&shost->scan_mutex);
1933 return sdev;
1934}
1935EXPORT_SYMBOL(scsi_get_host_dev);
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945void scsi_free_host_dev(struct scsi_device *sdev)
1946{
1947 BUG_ON(sdev->id != sdev->host->this_id);
1948
1949 __scsi_remove_device(sdev);
1950}
1951EXPORT_SYMBOL(scsi_free_host_dev);
1952
1953