1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/module.h>
30#include <linux/moduleparam.h>
31#include <linux/init.h>
32#include <linux/blkdev.h>
33#include <linux/delay.h>
34#include <linux/kthread.h>
35#include <linux/spinlock.h>
36#include <linux/async.h>
37#include <linux/slab.h>
38#include <asm/unaligned.h>
39
40#include <scsi/scsi.h>
41#include <scsi/scsi_cmnd.h>
42#include <scsi/scsi_device.h>
43#include <scsi/scsi_driver.h>
44#include <scsi/scsi_devinfo.h>
45#include <scsi/scsi_host.h>
46#include <scsi/scsi_transport.h>
47#include <scsi/scsi_dh.h>
48#include <scsi/scsi_eh.h>
49
50#include "scsi_priv.h"
51#include "scsi_logging.h"
52
53#define ALLOC_FAILURE_MSG KERN_ERR "%s: Allocation failure during" \
54 " SCSI scanning, some SCSI devices might not be configured\n"
55
56
57
58
59#define SCSI_TIMEOUT (2*HZ)
60#define SCSI_REPORT_LUNS_TIMEOUT (30*HZ)
61
62
63
64
65#define SCSI_UID_SER_NUM 'S'
66#define SCSI_UID_UNKNOWN 'Z'
67
68
69
70
71
72
73
74
75
76
77
78
79
80#define SCSI_SCAN_NO_RESPONSE 0
81#define SCSI_SCAN_TARGET_PRESENT 1
82#define SCSI_SCAN_LUN_PRESENT 2
83
84static const char *scsi_null_device_strs = "nullnullnullnull";
85
86#define MAX_SCSI_LUNS 512
87
88static u64 max_scsi_luns = MAX_SCSI_LUNS;
89
90module_param_named(max_luns, max_scsi_luns, ullong, S_IRUGO|S_IWUSR);
91MODULE_PARM_DESC(max_luns,
92 "last scsi LUN (should be between 1 and 2^64-1)");
93
94#ifdef CONFIG_SCSI_SCAN_ASYNC
95#define SCSI_SCAN_TYPE_DEFAULT "async"
96#else
97#define SCSI_SCAN_TYPE_DEFAULT "sync"
98#endif
99
100char scsi_scan_type[7] = SCSI_SCAN_TYPE_DEFAULT;
101
102module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type),
103 S_IRUGO|S_IWUSR);
104MODULE_PARM_DESC(scan, "sync, async, manual, or none. "
105 "Setting to 'manual' disables automatic scanning, but allows "
106 "for manual device scan via the 'scan' sysfs attribute.");
107
108static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ + 18;
109
110module_param_named(inq_timeout, scsi_inq_timeout, uint, S_IRUGO|S_IWUSR);
111MODULE_PARM_DESC(inq_timeout,
112 "Timeout (in seconds) waiting for devices to answer INQUIRY."
113 " Default is 20. Some devices may need more; most need less.");
114
115
116static DEFINE_SPINLOCK(async_scan_lock);
117static LIST_HEAD(scanning_hosts);
118
119struct async_scan_data {
120 struct list_head list;
121 struct Scsi_Host *shost;
122 struct completion prev_finished;
123};
124
125
126
127
128
129
130
131
132
133int scsi_complete_async_scans(void)
134{
135 struct async_scan_data *data;
136
137 do {
138 if (list_empty(&scanning_hosts))
139 return 0;
140
141
142
143
144 data = kmalloc(sizeof(*data), GFP_KERNEL);
145 if (!data)
146 msleep(1);
147 } while (!data);
148
149 data->shost = NULL;
150 init_completion(&data->prev_finished);
151
152 spin_lock(&async_scan_lock);
153
154 if (list_empty(&scanning_hosts))
155 goto done;
156 list_add_tail(&data->list, &scanning_hosts);
157 spin_unlock(&async_scan_lock);
158
159 printk(KERN_INFO "scsi: waiting for bus probes to complete ...\n");
160 wait_for_completion(&data->prev_finished);
161
162 spin_lock(&async_scan_lock);
163 list_del(&data->list);
164 if (!list_empty(&scanning_hosts)) {
165 struct async_scan_data *next = list_entry(scanning_hosts.next,
166 struct async_scan_data, list);
167 complete(&next->prev_finished);
168 }
169 done:
170 spin_unlock(&async_scan_lock);
171
172 kfree(data);
173 return 0;
174}
175
176
177
178
179
180
181
182
183
184
185static void scsi_unlock_floptical(struct scsi_device *sdev,
186 unsigned char *result)
187{
188 unsigned char scsi_cmd[MAX_COMMAND_SIZE];
189
190 sdev_printk(KERN_NOTICE, sdev, "unlocking floptical drive\n");
191 scsi_cmd[0] = MODE_SENSE;
192 scsi_cmd[1] = 0;
193 scsi_cmd[2] = 0x2e;
194 scsi_cmd[3] = 0;
195 scsi_cmd[4] = 0x2a;
196 scsi_cmd[5] = 0;
197 scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE, result, 0x2a, NULL,
198 SCSI_TIMEOUT, 3, NULL);
199}
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
216 u64 lun, void *hostdata)
217{
218 unsigned int depth;
219 struct scsi_device *sdev;
220 int display_failure_msg = 1, ret;
221 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
222
223 sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size,
224 GFP_KERNEL);
225 if (!sdev)
226 goto out;
227
228 sdev->vendor = scsi_null_device_strs;
229 sdev->model = scsi_null_device_strs;
230 sdev->rev = scsi_null_device_strs;
231 sdev->host = shost;
232 sdev->queue_ramp_up_period = SCSI_DEFAULT_RAMP_UP_PERIOD;
233 sdev->id = starget->id;
234 sdev->lun = lun;
235 sdev->channel = starget->channel;
236 mutex_init(&sdev->state_mutex);
237 sdev->sdev_state = SDEV_CREATED;
238 INIT_LIST_HEAD(&sdev->siblings);
239 INIT_LIST_HEAD(&sdev->same_target_siblings);
240 INIT_LIST_HEAD(&sdev->starved_entry);
241 INIT_LIST_HEAD(&sdev->event_list);
242 spin_lock_init(&sdev->list_lock);
243 mutex_init(&sdev->inquiry_mutex);
244 INIT_WORK(&sdev->event_work, scsi_evt_thread);
245 INIT_WORK(&sdev->requeue_work, scsi_requeue_run_queue);
246
247 sdev->sdev_gendev.parent = get_device(&starget->dev);
248 sdev->sdev_target = starget;
249
250
251 sdev->hostdata = hostdata;
252
253
254
255 sdev->max_device_blocked = SCSI_DEFAULT_DEVICE_BLOCKED;
256
257
258
259
260 sdev->type = -1;
261
262
263
264
265
266
267 sdev->borken = 1;
268
269 sdev->request_queue = scsi_mq_alloc_queue(sdev);
270 if (!sdev->request_queue) {
271
272
273 put_device(&starget->dev);
274 kfree(sdev);
275 goto out;
276 }
277 WARN_ON_ONCE(!blk_get_queue(sdev->request_queue));
278 sdev->request_queue->queuedata = sdev;
279
280 depth = sdev->host->cmd_per_lun ?: 1;
281
282
283
284
285
286
287
288 if (sbitmap_init_node(&sdev->budget_map,
289 scsi_device_max_queue_depth(sdev),
290 sbitmap_calculate_shift(depth),
291 GFP_KERNEL, sdev->request_queue->node,
292 false, true)) {
293 put_device(&starget->dev);
294 kfree(sdev);
295 goto out;
296 }
297
298 scsi_change_queue_depth(sdev, depth);
299
300 scsi_sysfs_device_initialize(sdev);
301
302 if (shost->hostt->slave_alloc) {
303 ret = shost->hostt->slave_alloc(sdev);
304 if (ret) {
305
306
307
308
309 if (ret == -ENXIO)
310 display_failure_msg = 0;
311 goto out_device_destroy;
312 }
313 }
314
315 return sdev;
316
317out_device_destroy:
318 __scsi_remove_device(sdev);
319out:
320 if (display_failure_msg)
321 printk(ALLOC_FAILURE_MSG, __func__);
322 return NULL;
323}
324
325static void scsi_target_destroy(struct scsi_target *starget)
326{
327 struct device *dev = &starget->dev;
328 struct Scsi_Host *shost = dev_to_shost(dev->parent);
329 unsigned long flags;
330
331 BUG_ON(starget->state == STARGET_DEL);
332 starget->state = STARGET_DEL;
333 transport_destroy_device(dev);
334 spin_lock_irqsave(shost->host_lock, flags);
335 if (shost->hostt->target_destroy)
336 shost->hostt->target_destroy(starget);
337 list_del_init(&starget->siblings);
338 spin_unlock_irqrestore(shost->host_lock, flags);
339 put_device(dev);
340}
341
342static void scsi_target_dev_release(struct device *dev)
343{
344 struct device *parent = dev->parent;
345 struct scsi_target *starget = to_scsi_target(dev);
346
347 kfree(starget);
348 put_device(parent);
349}
350
351static struct device_type scsi_target_type = {
352 .name = "scsi_target",
353 .release = scsi_target_dev_release,
354};
355
356int scsi_is_target_device(const struct device *dev)
357{
358 return dev->type == &scsi_target_type;
359}
360EXPORT_SYMBOL(scsi_is_target_device);
361
362static struct scsi_target *__scsi_find_target(struct device *parent,
363 int channel, uint id)
364{
365 struct scsi_target *starget, *found_starget = NULL;
366 struct Scsi_Host *shost = dev_to_shost(parent);
367
368
369
370 list_for_each_entry(starget, &shost->__targets, siblings) {
371 if (starget->id == id &&
372 starget->channel == channel) {
373 found_starget = starget;
374 break;
375 }
376 }
377 if (found_starget)
378 get_device(&found_starget->dev);
379
380 return found_starget;
381}
382
383
384
385
386
387
388
389
390
391
392static void scsi_target_reap_ref_release(struct kref *kref)
393{
394 struct scsi_target *starget
395 = container_of(kref, struct scsi_target, reap_ref);
396
397
398
399
400
401
402 if ((starget->state != STARGET_CREATED) &&
403 (starget->state != STARGET_CREATED_REMOVE)) {
404 transport_remove_device(&starget->dev);
405 device_del(&starget->dev);
406 }
407 scsi_target_destroy(starget);
408}
409
410static void scsi_target_reap_ref_put(struct scsi_target *starget)
411{
412 kref_put(&starget->reap_ref, scsi_target_reap_ref_release);
413}
414
415
416
417
418
419
420
421
422
423
424
425
426
427static struct scsi_target *scsi_alloc_target(struct device *parent,
428 int channel, uint id)
429{
430 struct Scsi_Host *shost = dev_to_shost(parent);
431 struct device *dev = NULL;
432 unsigned long flags;
433 const int size = sizeof(struct scsi_target)
434 + shost->transportt->target_size;
435 struct scsi_target *starget;
436 struct scsi_target *found_target;
437 int error, ref_got;
438
439 starget = kzalloc(size, GFP_KERNEL);
440 if (!starget) {
441 printk(KERN_ERR "%s: allocation failure\n", __func__);
442 return NULL;
443 }
444 dev = &starget->dev;
445 device_initialize(dev);
446 kref_init(&starget->reap_ref);
447 dev->parent = get_device(parent);
448 dev_set_name(dev, "target%d:%d:%d", shost->host_no, channel, id);
449 dev->bus = &scsi_bus_type;
450 dev->type = &scsi_target_type;
451 starget->id = id;
452 starget->channel = channel;
453 starget->can_queue = 0;
454 INIT_LIST_HEAD(&starget->siblings);
455 INIT_LIST_HEAD(&starget->devices);
456 starget->state = STARGET_CREATED;
457 starget->scsi_level = SCSI_2;
458 starget->max_target_blocked = SCSI_DEFAULT_TARGET_BLOCKED;
459 retry:
460 spin_lock_irqsave(shost->host_lock, flags);
461
462 found_target = __scsi_find_target(parent, channel, id);
463 if (found_target)
464 goto found;
465
466 list_add_tail(&starget->siblings, &shost->__targets);
467 spin_unlock_irqrestore(shost->host_lock, flags);
468
469 transport_setup_device(dev);
470 if (shost->hostt->target_alloc) {
471 error = shost->hostt->target_alloc(starget);
472
473 if(error) {
474 dev_printk(KERN_ERR, dev, "target allocation failed, error %d\n", error);
475
476
477 scsi_target_destroy(starget);
478 return NULL;
479 }
480 }
481 get_device(dev);
482
483 return starget;
484
485 found:
486
487
488
489
490
491 ref_got = kref_get_unless_zero(&found_target->reap_ref);
492
493 spin_unlock_irqrestore(shost->host_lock, flags);
494 if (ref_got) {
495 put_device(dev);
496 return found_target;
497 }
498
499
500
501
502
503
504
505
506
507 put_device(&found_target->dev);
508
509
510
511
512 msleep(1);
513 goto retry;
514}
515
516
517
518
519
520
521
522
523
524void scsi_target_reap(struct scsi_target *starget)
525{
526
527
528
529
530
531 BUG_ON(starget->state == STARGET_DEL);
532 scsi_target_reap_ref_put(starget);
533}
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550void scsi_sanitize_inquiry_string(unsigned char *s, int len)
551{
552 int terminated = 0;
553
554 for (; len > 0; (--len, ++s)) {
555 if (*s == 0)
556 terminated = 1;
557 if (terminated || *s < 0x20 || *s > 0x7e)
558 *s = ' ';
559 }
560}
561EXPORT_SYMBOL(scsi_sanitize_inquiry_string);
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
578 int result_len, blist_flags_t *bflags)
579{
580 unsigned char scsi_cmd[MAX_COMMAND_SIZE];
581 int first_inquiry_len, try_inquiry_len, next_inquiry_len;
582 int response_len = 0;
583 int pass, count, result;
584 struct scsi_sense_hdr sshdr;
585
586 *bflags = 0;
587
588
589
590
591 first_inquiry_len = sdev->inquiry_len ? sdev->inquiry_len : 36;
592 try_inquiry_len = first_inquiry_len;
593 pass = 1;
594
595 next_pass:
596 SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
597 "scsi scan: INQUIRY pass %d length %d\n",
598 pass, try_inquiry_len));
599
600
601 for (count = 0; count < 3; ++count) {
602 int resid;
603
604 memset(scsi_cmd, 0, 6);
605 scsi_cmd[0] = INQUIRY;
606 scsi_cmd[4] = (unsigned char) try_inquiry_len;
607
608 memset(inq_result, 0, try_inquiry_len);
609
610 result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE,
611 inq_result, try_inquiry_len, &sshdr,
612 HZ / 2 + HZ * scsi_inq_timeout, 3,
613 &resid);
614
615 SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
616 "scsi scan: INQUIRY %s with code 0x%x\n",
617 result ? "failed" : "successful", result));
618
619 if (result) {
620
621
622
623
624
625
626 if (driver_byte(result) == DRIVER_SENSE &&
627 scsi_sense_valid(&sshdr)) {
628 if ((sshdr.sense_key == UNIT_ATTENTION) &&
629 ((sshdr.asc == 0x28) ||
630 (sshdr.asc == 0x29)) &&
631 (sshdr.ascq == 0))
632 continue;
633 }
634 } else {
635
636
637
638
639
640 if (resid == try_inquiry_len)
641 continue;
642 }
643 break;
644 }
645
646 if (result == 0) {
647 scsi_sanitize_inquiry_string(&inq_result[8], 8);
648 scsi_sanitize_inquiry_string(&inq_result[16], 16);
649 scsi_sanitize_inquiry_string(&inq_result[32], 4);
650
651 response_len = inq_result[4] + 5;
652 if (response_len > 255)
653 response_len = first_inquiry_len;
654
655
656
657
658
659
660
661
662 *bflags = scsi_get_device_flags(sdev, &inq_result[8],
663 &inq_result[16]);
664
665
666
667 if (pass == 1) {
668 if (BLIST_INQUIRY_36 & *bflags)
669 next_inquiry_len = 36;
670 else if (sdev->inquiry_len)
671 next_inquiry_len = sdev->inquiry_len;
672 else
673 next_inquiry_len = response_len;
674
675
676 if (next_inquiry_len > try_inquiry_len) {
677 try_inquiry_len = next_inquiry_len;
678 pass = 2;
679 goto next_pass;
680 }
681 }
682
683 } else if (pass == 2) {
684 sdev_printk(KERN_INFO, sdev,
685 "scsi scan: %d byte inquiry failed. "
686 "Consider BLIST_INQUIRY_36 for this device\n",
687 try_inquiry_len);
688
689
690
691 try_inquiry_len = first_inquiry_len;
692 pass = 3;
693 goto next_pass;
694 }
695
696
697
698 if (result)
699 return -EIO;
700
701
702 sdev->inquiry_len = min(try_inquiry_len, response_len);
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719 if (sdev->inquiry_len < 36) {
720 if (!sdev->host->short_inquiry) {
721 shost_printk(KERN_INFO, sdev->host,
722 "scsi scan: INQUIRY result too short (%d),"
723 " using 36\n", sdev->inquiry_len);
724 sdev->host->short_inquiry = 1;
725 }
726 sdev->inquiry_len = 36;
727 }
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747 sdev->scsi_level = inq_result[2] & 0x07;
748 if (sdev->scsi_level >= 2 ||
749 (sdev->scsi_level == 1 && (inq_result[3] & 0x0f) == 1))
750 sdev->scsi_level++;
751 sdev->sdev_target->scsi_level = sdev->scsi_level;
752
753
754
755
756
757 sdev->lun_in_cdb = 0;
758 if (sdev->scsi_level <= SCSI_2 &&
759 sdev->scsi_level != SCSI_UNKNOWN &&
760 !sdev->host->no_scsi2_lun_in_cdb)
761 sdev->lun_in_cdb = 1;
762
763 return 0;
764}
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
782 blist_flags_t *bflags, int async)
783{
784 int ret;
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806 sdev->inquiry = kmemdup(inq_result,
807 max_t(size_t, sdev->inquiry_len, 36),
808 GFP_KERNEL);
809 if (sdev->inquiry == NULL)
810 return SCSI_SCAN_NO_RESPONSE;
811
812 sdev->vendor = (char *) (sdev->inquiry + 8);
813 sdev->model = (char *) (sdev->inquiry + 16);
814 sdev->rev = (char *) (sdev->inquiry + 32);
815
816 if (strncmp(sdev->vendor, "ATA ", 8) == 0) {
817
818
819
820
821
822
823 sdev->allow_restart = 1;
824 }
825
826 if (*bflags & BLIST_ISROM) {
827 sdev->type = TYPE_ROM;
828 sdev->removable = 1;
829 } else {
830 sdev->type = (inq_result[0] & 0x1f);
831 sdev->removable = (inq_result[1] & 0x80) >> 7;
832
833
834
835
836
837
838 if (scsi_is_wlun(sdev->lun) && sdev->type != TYPE_WLUN) {
839 sdev_printk(KERN_WARNING, sdev,
840 "%s: correcting incorrect peripheral device type 0x%x for W-LUN 0x%16xhN\n",
841 __func__, sdev->type, (unsigned int)sdev->lun);
842 sdev->type = TYPE_WLUN;
843 }
844
845 }
846
847 if (sdev->type == TYPE_RBC || sdev->type == TYPE_ROM) {
848
849
850
851
852 if ((*bflags & BLIST_REPORTLUN2) == 0)
853 *bflags |= BLIST_NOREPORTLUN;
854 }
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872 sdev->inq_periph_qual = (inq_result[0] >> 5) & 7;
873 sdev->lockable = sdev->removable;
874 sdev->soft_reset = (inq_result[7] & 1) && ((inq_result[3] & 7) == 2);
875
876 if (sdev->scsi_level >= SCSI_3 ||
877 (sdev->inquiry_len > 56 && inq_result[56] & 0x04))
878 sdev->ppr = 1;
879 if (inq_result[7] & 0x60)
880 sdev->wdtr = 1;
881 if (inq_result[7] & 0x10)
882 sdev->sdtr = 1;
883
884 sdev_printk(KERN_NOTICE, sdev, "%s %.8s %.16s %.4s PQ: %d "
885 "ANSI: %d%s\n", scsi_device_type(sdev->type),
886 sdev->vendor, sdev->model, sdev->rev,
887 sdev->inq_periph_qual, inq_result[2] & 0x07,
888 (inq_result[3] & 0x0f) == 1 ? " CCS" : "");
889
890 if ((sdev->scsi_level >= SCSI_2) && (inq_result[7] & 2) &&
891 !(*bflags & BLIST_NOTQ)) {
892 sdev->tagged_supported = 1;
893 sdev->simple_tags = 1;
894 }
895
896
897
898
899
900
901 if ((*bflags & BLIST_BORKEN) == 0)
902 sdev->borken = 0;
903
904 if (*bflags & BLIST_NO_ULD_ATTACH)
905 sdev->no_uld_attach = 1;
906
907
908
909
910
911 if (*bflags & BLIST_SELECT_NO_ATN)
912 sdev->select_no_atn = 1;
913
914
915
916
917
918 if (*bflags & BLIST_MAX_512)
919 blk_queue_max_hw_sectors(sdev->request_queue, 512);
920
921
922
923
924 else if (*bflags & BLIST_MAX_1024)
925 blk_queue_max_hw_sectors(sdev->request_queue, 1024);
926
927
928
929
930
931 if (*bflags & BLIST_NOSTARTONADD)
932 sdev->no_start_on_add = 1;
933
934 if (*bflags & BLIST_SINGLELUN)
935 scsi_target(sdev)->single_lun = 1;
936
937 sdev->use_10_for_rw = 1;
938
939
940
941
942 if (*bflags & BLIST_NO_RSOC)
943 sdev->no_report_opcodes = 1;
944
945
946
947 mutex_lock(&sdev->state_mutex);
948 ret = scsi_device_set_state(sdev, SDEV_RUNNING);
949 if (ret)
950 ret = scsi_device_set_state(sdev, SDEV_BLOCK);
951 mutex_unlock(&sdev->state_mutex);
952
953 if (ret) {
954 sdev_printk(KERN_ERR, sdev,
955 "in wrong state %s to complete scan\n",
956 scsi_device_state_name(sdev->sdev_state));
957 return SCSI_SCAN_NO_RESPONSE;
958 }
959
960 if (*bflags & BLIST_NOT_LOCKABLE)
961 sdev->lockable = 0;
962
963 if (*bflags & BLIST_RETRY_HWERROR)
964 sdev->retry_hwerror = 1;
965
966 if (*bflags & BLIST_NO_DIF)
967 sdev->no_dif = 1;
968
969 if (*bflags & BLIST_UNMAP_LIMIT_WS)
970 sdev->unmap_limit_for_ws = 1;
971
972 sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT;
973
974 if (*bflags & BLIST_TRY_VPD_PAGES)
975 sdev->try_vpd_pages = 1;
976 else if (*bflags & BLIST_SKIP_VPD_PAGES)
977 sdev->skip_vpd_pages = 1;
978
979 transport_configure_device(&sdev->sdev_gendev);
980
981 if (sdev->host->hostt->slave_configure) {
982 ret = sdev->host->hostt->slave_configure(sdev);
983 if (ret) {
984
985
986
987
988 if (ret != -ENXIO) {
989 sdev_printk(KERN_ERR, sdev,
990 "failed to configure device\n");
991 }
992 return SCSI_SCAN_NO_RESPONSE;
993 }
994 }
995
996 if (sdev->scsi_level >= SCSI_3)
997 scsi_attach_vpd(sdev);
998
999 sdev->max_queue_depth = sdev->queue_depth;
1000 WARN_ON_ONCE(sdev->max_queue_depth > sdev->budget_map.depth);
1001 sdev->sdev_bflags = *bflags;
1002
1003
1004
1005
1006
1007
1008 if (!async && scsi_sysfs_add_sdev(sdev) != 0)
1009 return SCSI_SCAN_NO_RESPONSE;
1010
1011 return SCSI_SCAN_LUN_PRESENT;
1012}
1013
1014#ifdef CONFIG_SCSI_LOGGING
1015
1016
1017
1018
1019
1020
1021
1022static unsigned char *scsi_inq_str(unsigned char *buf, unsigned char *inq,
1023 unsigned first, unsigned end)
1024{
1025 unsigned term = 0, idx;
1026
1027 for (idx = 0; idx + first < end && idx + first < inq[4] + 5; idx++) {
1028 if (inq[idx+first] > ' ') {
1029 buf[idx] = inq[idx+first];
1030 term = idx+1;
1031 } else {
1032 buf[idx] = ' ';
1033 }
1034 }
1035 buf[term] = 0;
1036 return buf;
1037}
1038#endif
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061static int scsi_probe_and_add_lun(struct scsi_target *starget,
1062 u64 lun, blist_flags_t *bflagsp,
1063 struct scsi_device **sdevp,
1064 enum scsi_scan_mode rescan,
1065 void *hostdata)
1066{
1067 struct scsi_device *sdev;
1068 unsigned char *result;
1069 blist_flags_t bflags;
1070 int res = SCSI_SCAN_NO_RESPONSE, result_len = 256;
1071 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1072
1073
1074
1075
1076
1077 sdev = scsi_device_lookup_by_target(starget, lun);
1078 if (sdev) {
1079 if (rescan != SCSI_SCAN_INITIAL || !scsi_device_created(sdev)) {
1080 SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
1081 "scsi scan: device exists on %s\n",
1082 dev_name(&sdev->sdev_gendev)));
1083 if (sdevp)
1084 *sdevp = sdev;
1085 else
1086 scsi_device_put(sdev);
1087
1088 if (bflagsp)
1089 *bflagsp = scsi_get_device_flags(sdev,
1090 sdev->vendor,
1091 sdev->model);
1092 return SCSI_SCAN_LUN_PRESENT;
1093 }
1094 scsi_device_put(sdev);
1095 } else
1096 sdev = scsi_alloc_sdev(starget, lun, hostdata);
1097 if (!sdev)
1098 goto out;
1099
1100 result = kmalloc(result_len, GFP_KERNEL);
1101 if (!result)
1102 goto out_free_sdev;
1103
1104 if (scsi_probe_lun(sdev, result, result_len, &bflags))
1105 goto out_free_result;
1106
1107 if (bflagsp)
1108 *bflagsp = bflags;
1109
1110
1111
1112 if ((result[0] >> 5) == 3) {
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123 SCSI_LOG_SCAN_BUS(2, sdev_printk(KERN_INFO, sdev, "scsi scan:"
1124 " peripheral qualifier of 3, device not"
1125 " added\n"))
1126 if (lun == 0) {
1127 SCSI_LOG_SCAN_BUS(1, {
1128 unsigned char vend[9];
1129 unsigned char mod[17];
1130
1131 sdev_printk(KERN_INFO, sdev,
1132 "scsi scan: consider passing scsi_mod."
1133 "dev_flags=%s:%s:0x240 or 0x1000240\n",
1134 scsi_inq_str(vend, result, 8, 16),
1135 scsi_inq_str(mod, result, 16, 32));
1136 });
1137
1138 }
1139
1140 res = SCSI_SCAN_TARGET_PRESENT;
1141 goto out_free_result;
1142 }
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164 if (((result[0] >> 5) == 1 ||
1165 (starget->pdt_1f_for_no_lun && (result[0] & 0x1f) == 0x1f)) &&
1166 !scsi_is_wlun(lun)) {
1167 SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
1168 "scsi scan: peripheral device type"
1169 " of 31, no device added\n"));
1170 res = SCSI_SCAN_TARGET_PRESENT;
1171 goto out_free_result;
1172 }
1173
1174 res = scsi_add_lun(sdev, result, &bflags, shost->async_scan);
1175 if (res == SCSI_SCAN_LUN_PRESENT) {
1176 if (bflags & BLIST_KEY) {
1177 sdev->lockable = 0;
1178 scsi_unlock_floptical(sdev, result);
1179 }
1180 }
1181
1182 out_free_result:
1183 kfree(result);
1184 out_free_sdev:
1185 if (res == SCSI_SCAN_LUN_PRESENT) {
1186 if (sdevp) {
1187 if (scsi_device_get(sdev) == 0) {
1188 *sdevp = sdev;
1189 } else {
1190 __scsi_remove_device(sdev);
1191 res = SCSI_SCAN_NO_RESPONSE;
1192 }
1193 }
1194 } else
1195 __scsi_remove_device(sdev);
1196 out:
1197 return res;
1198}
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214static void scsi_sequential_lun_scan(struct scsi_target *starget,
1215 blist_flags_t bflags, int scsi_level,
1216 enum scsi_scan_mode rescan)
1217{
1218 uint max_dev_lun;
1219 u64 sparse_lun, lun;
1220 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1221
1222 SCSI_LOG_SCAN_BUS(3, starget_printk(KERN_INFO, starget,
1223 "scsi scan: Sequential scan\n"));
1224
1225 max_dev_lun = min(max_scsi_luns, shost->max_lun);
1226
1227
1228
1229
1230
1231 if (bflags & BLIST_SPARSELUN) {
1232 max_dev_lun = shost->max_lun;
1233 sparse_lun = 1;
1234 } else
1235 sparse_lun = 0;
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258 if (bflags & BLIST_FORCELUN)
1259 max_dev_lun = shost->max_lun;
1260
1261
1262
1263 if (bflags & BLIST_MAX5LUN)
1264 max_dev_lun = min(5U, max_dev_lun);
1265
1266
1267
1268
1269 if (scsi_level < SCSI_3 && !(bflags & BLIST_LARGELUN))
1270 max_dev_lun = min(8U, max_dev_lun);
1271 else
1272 max_dev_lun = min(256U, max_dev_lun);
1273
1274
1275
1276
1277
1278
1279 for (lun = 1; lun < max_dev_lun; ++lun)
1280 if ((scsi_probe_and_add_lun(starget, lun, NULL, NULL, rescan,
1281 NULL) != SCSI_SCAN_LUN_PRESENT) &&
1282 !sparse_lun)
1283 return;
1284}
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306static int scsi_report_lun_scan(struct scsi_target *starget, blist_flags_t bflags,
1307 enum scsi_scan_mode rescan)
1308{
1309 unsigned char scsi_cmd[MAX_COMMAND_SIZE];
1310 unsigned int length;
1311 u64 lun;
1312 unsigned int num_luns;
1313 unsigned int retries;
1314 int result;
1315 struct scsi_lun *lunp, *lun_data;
1316 struct scsi_sense_hdr sshdr;
1317 struct scsi_device *sdev;
1318 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1319 int ret = 0;
1320
1321
1322
1323
1324
1325
1326
1327 if (bflags & BLIST_NOREPORTLUN)
1328 return 1;
1329 if (starget->scsi_level < SCSI_2 &&
1330 starget->scsi_level != SCSI_UNKNOWN)
1331 return 1;
1332 if (starget->scsi_level < SCSI_3 &&
1333 (!(bflags & BLIST_REPORTLUN2) || shost->max_lun <= 8))
1334 return 1;
1335 if (bflags & BLIST_NOLUN)
1336 return 0;
1337 if (starget->no_report_luns)
1338 return 1;
1339
1340 if (!(sdev = scsi_device_lookup_by_target(starget, 0))) {
1341 sdev = scsi_alloc_sdev(starget, 0, NULL);
1342 if (!sdev)
1343 return 0;
1344 if (scsi_device_get(sdev)) {
1345 __scsi_remove_device(sdev);
1346 return 0;
1347 }
1348 }
1349
1350
1351
1352
1353
1354
1355 length = (511 + 1) * sizeof(struct scsi_lun);
1356retry:
1357 lun_data = kmalloc(length, GFP_KERNEL);
1358 if (!lun_data) {
1359 printk(ALLOC_FAILURE_MSG, __func__);
1360 goto out;
1361 }
1362
1363 scsi_cmd[0] = REPORT_LUNS;
1364
1365
1366
1367
1368 memset(&scsi_cmd[1], 0, 5);
1369
1370
1371
1372
1373 put_unaligned_be32(length, &scsi_cmd[6]);
1374
1375 scsi_cmd[10] = 0;
1376 scsi_cmd[11] = 0;
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388 for (retries = 0; retries < 3; retries++) {
1389 SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
1390 "scsi scan: Sending REPORT LUNS to (try %d)\n",
1391 retries));
1392
1393 result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE,
1394 lun_data, length, &sshdr,
1395 SCSI_REPORT_LUNS_TIMEOUT, 3, NULL);
1396
1397 SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
1398 "scsi scan: REPORT LUNS"
1399 " %s (try %d) result 0x%x\n",
1400 result ? "failed" : "successful",
1401 retries, result));
1402 if (result == 0)
1403 break;
1404 else if (scsi_sense_valid(&sshdr)) {
1405 if (sshdr.sense_key != UNIT_ATTENTION)
1406 break;
1407 }
1408 }
1409
1410 if (result) {
1411
1412
1413
1414 ret = 1;
1415 goto out_err;
1416 }
1417
1418
1419
1420
1421 if (get_unaligned_be32(lun_data->scsi_lun) +
1422 sizeof(struct scsi_lun) > length) {
1423 length = get_unaligned_be32(lun_data->scsi_lun) +
1424 sizeof(struct scsi_lun);
1425 kfree(lun_data);
1426 goto retry;
1427 }
1428 length = get_unaligned_be32(lun_data->scsi_lun);
1429
1430 num_luns = (length / sizeof(struct scsi_lun));
1431
1432 SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
1433 "scsi scan: REPORT LUN scan\n"));
1434
1435
1436
1437
1438
1439 for (lunp = &lun_data[1]; lunp <= &lun_data[num_luns]; lunp++) {
1440 lun = scsilun_to_int(lunp);
1441
1442 if (lun > sdev->host->max_lun) {
1443 sdev_printk(KERN_WARNING, sdev,
1444 "lun%llu has a LUN larger than"
1445 " allowed by the host adapter\n", lun);
1446 } else {
1447 int res;
1448
1449 res = scsi_probe_and_add_lun(starget,
1450 lun, NULL, NULL, rescan, NULL);
1451 if (res == SCSI_SCAN_NO_RESPONSE) {
1452
1453
1454
1455 sdev_printk(KERN_ERR, sdev,
1456 "Unexpected response"
1457 " from lun %llu while scanning, scan"
1458 " aborted\n", (unsigned long long)lun);
1459 break;
1460 }
1461 }
1462 }
1463
1464 out_err:
1465 kfree(lun_data);
1466 out:
1467 if (scsi_device_created(sdev))
1468
1469
1470
1471 __scsi_remove_device(sdev);
1472 scsi_device_put(sdev);
1473 return ret;
1474}
1475
1476struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel,
1477 uint id, u64 lun, void *hostdata)
1478{
1479 struct scsi_device *sdev = ERR_PTR(-ENODEV);
1480 struct device *parent = &shost->shost_gendev;
1481 struct scsi_target *starget;
1482
1483 if (strncmp(scsi_scan_type, "none", 4) == 0)
1484 return ERR_PTR(-ENODEV);
1485
1486 starget = scsi_alloc_target(parent, channel, id);
1487 if (!starget)
1488 return ERR_PTR(-ENOMEM);
1489 scsi_autopm_get_target(starget);
1490
1491 mutex_lock(&shost->scan_mutex);
1492 if (!shost->async_scan)
1493 scsi_complete_async_scans();
1494
1495 if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
1496 scsi_probe_and_add_lun(starget, lun, NULL, &sdev, 1, hostdata);
1497 scsi_autopm_put_host(shost);
1498 }
1499 mutex_unlock(&shost->scan_mutex);
1500 scsi_autopm_put_target(starget);
1501
1502
1503
1504
1505 scsi_target_reap(starget);
1506 put_device(&starget->dev);
1507
1508 return sdev;
1509}
1510EXPORT_SYMBOL(__scsi_add_device);
1511
1512int scsi_add_device(struct Scsi_Host *host, uint channel,
1513 uint target, u64 lun)
1514{
1515 struct scsi_device *sdev =
1516 __scsi_add_device(host, channel, target, lun, NULL);
1517 if (IS_ERR(sdev))
1518 return PTR_ERR(sdev);
1519
1520 scsi_device_put(sdev);
1521 return 0;
1522}
1523EXPORT_SYMBOL(scsi_add_device);
1524
1525void scsi_rescan_device(struct device *dev)
1526{
1527 struct scsi_device *sdev = to_scsi_device(dev);
1528
1529 device_lock(dev);
1530
1531 scsi_attach_vpd(sdev);
1532
1533 if (sdev->handler && sdev->handler->rescan)
1534 sdev->handler->rescan(sdev);
1535
1536 if (dev->driver && try_module_get(dev->driver->owner)) {
1537 struct scsi_driver *drv = to_scsi_driver(dev->driver);
1538
1539 if (drv->rescan)
1540 drv->rescan(dev);
1541 module_put(dev->driver->owner);
1542 }
1543 device_unlock(dev);
1544}
1545EXPORT_SYMBOL(scsi_rescan_device);
1546
1547static void __scsi_scan_target(struct device *parent, unsigned int channel,
1548 unsigned int id, u64 lun, enum scsi_scan_mode rescan)
1549{
1550 struct Scsi_Host *shost = dev_to_shost(parent);
1551 blist_flags_t bflags = 0;
1552 int res;
1553 struct scsi_target *starget;
1554
1555 if (shost->this_id == id)
1556
1557
1558
1559 return;
1560
1561 starget = scsi_alloc_target(parent, channel, id);
1562 if (!starget)
1563 return;
1564 scsi_autopm_get_target(starget);
1565
1566 if (lun != SCAN_WILD_CARD) {
1567
1568
1569
1570 scsi_probe_and_add_lun(starget, lun, NULL, NULL, rescan, NULL);
1571 goto out_reap;
1572 }
1573
1574
1575
1576
1577
1578 res = scsi_probe_and_add_lun(starget, 0, &bflags, NULL, rescan, NULL);
1579 if (res == SCSI_SCAN_LUN_PRESENT || res == SCSI_SCAN_TARGET_PRESENT) {
1580 if (scsi_report_lun_scan(starget, bflags, rescan) != 0)
1581
1582
1583
1584
1585 scsi_sequential_lun_scan(starget, bflags,
1586 starget->scsi_level, rescan);
1587 }
1588
1589 out_reap:
1590 scsi_autopm_put_target(starget);
1591
1592
1593
1594
1595 scsi_target_reap(starget);
1596
1597 put_device(&starget->dev);
1598}
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618void scsi_scan_target(struct device *parent, unsigned int channel,
1619 unsigned int id, u64 lun, enum scsi_scan_mode rescan)
1620{
1621 struct Scsi_Host *shost = dev_to_shost(parent);
1622
1623 if (strncmp(scsi_scan_type, "none", 4) == 0)
1624 return;
1625
1626 if (rescan != SCSI_SCAN_MANUAL &&
1627 strncmp(scsi_scan_type, "manual", 6) == 0)
1628 return;
1629
1630 mutex_lock(&shost->scan_mutex);
1631 if (!shost->async_scan)
1632 scsi_complete_async_scans();
1633
1634 if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
1635 __scsi_scan_target(parent, channel, id, lun, rescan);
1636 scsi_autopm_put_host(shost);
1637 }
1638 mutex_unlock(&shost->scan_mutex);
1639}
1640EXPORT_SYMBOL(scsi_scan_target);
1641
1642static void scsi_scan_channel(struct Scsi_Host *shost, unsigned int channel,
1643 unsigned int id, u64 lun,
1644 enum scsi_scan_mode rescan)
1645{
1646 uint order_id;
1647
1648 if (id == SCAN_WILD_CARD)
1649 for (id = 0; id < shost->max_id; ++id) {
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659 if (shost->reverse_ordering)
1660
1661
1662
1663 order_id = shost->max_id - id - 1;
1664 else
1665 order_id = id;
1666 __scsi_scan_target(&shost->shost_gendev, channel,
1667 order_id, lun, rescan);
1668 }
1669 else
1670 __scsi_scan_target(&shost->shost_gendev, channel,
1671 id, lun, rescan);
1672}
1673
1674int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel,
1675 unsigned int id, u64 lun,
1676 enum scsi_scan_mode rescan)
1677{
1678 SCSI_LOG_SCAN_BUS(3, shost_printk (KERN_INFO, shost,
1679 "%s: <%u:%u:%llu>\n",
1680 __func__, channel, id, lun));
1681
1682 if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) ||
1683 ((id != SCAN_WILD_CARD) && (id >= shost->max_id)) ||
1684 ((lun != SCAN_WILD_CARD) && (lun >= shost->max_lun)))
1685 return -EINVAL;
1686
1687 mutex_lock(&shost->scan_mutex);
1688 if (!shost->async_scan)
1689 scsi_complete_async_scans();
1690
1691 if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
1692 if (channel == SCAN_WILD_CARD)
1693 for (channel = 0; channel <= shost->max_channel;
1694 channel++)
1695 scsi_scan_channel(shost, channel, id, lun,
1696 rescan);
1697 else
1698 scsi_scan_channel(shost, channel, id, lun, rescan);
1699 scsi_autopm_put_host(shost);
1700 }
1701 mutex_unlock(&shost->scan_mutex);
1702
1703 return 0;
1704}
1705
1706static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
1707{
1708 struct scsi_device *sdev;
1709 shost_for_each_device(sdev, shost) {
1710
1711 if (sdev->sdev_state == SDEV_DEL)
1712 continue;
1713
1714 if (sdev->is_visible)
1715 continue;
1716 if (!scsi_host_scan_allowed(shost) ||
1717 scsi_sysfs_add_sdev(sdev) != 0)
1718 __scsi_remove_device(sdev);
1719 }
1720}
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
1733{
1734 struct async_scan_data *data = NULL;
1735 unsigned long flags;
1736
1737 if (strncmp(scsi_scan_type, "sync", 4) == 0)
1738 return NULL;
1739
1740 mutex_lock(&shost->scan_mutex);
1741 if (shost->async_scan) {
1742 shost_printk(KERN_DEBUG, shost, "%s called twice\n", __func__);
1743 goto err;
1744 }
1745
1746 data = kmalloc(sizeof(*data), GFP_KERNEL);
1747 if (!data)
1748 goto err;
1749 data->shost = scsi_host_get(shost);
1750 if (!data->shost)
1751 goto err;
1752 init_completion(&data->prev_finished);
1753
1754 spin_lock_irqsave(shost->host_lock, flags);
1755 shost->async_scan = 1;
1756 spin_unlock_irqrestore(shost->host_lock, flags);
1757 mutex_unlock(&shost->scan_mutex);
1758
1759 spin_lock(&async_scan_lock);
1760 if (list_empty(&scanning_hosts))
1761 complete(&data->prev_finished);
1762 list_add_tail(&data->list, &scanning_hosts);
1763 spin_unlock(&async_scan_lock);
1764
1765 return data;
1766
1767 err:
1768 mutex_unlock(&shost->scan_mutex);
1769 kfree(data);
1770 return NULL;
1771}
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781static void scsi_finish_async_scan(struct async_scan_data *data)
1782{
1783 struct Scsi_Host *shost;
1784 unsigned long flags;
1785
1786 if (!data)
1787 return;
1788
1789 shost = data->shost;
1790
1791 mutex_lock(&shost->scan_mutex);
1792
1793 if (!shost->async_scan) {
1794 shost_printk(KERN_INFO, shost, "%s called twice\n", __func__);
1795 dump_stack();
1796 mutex_unlock(&shost->scan_mutex);
1797 return;
1798 }
1799
1800 wait_for_completion(&data->prev_finished);
1801
1802 scsi_sysfs_add_devices(shost);
1803
1804 spin_lock_irqsave(shost->host_lock, flags);
1805 shost->async_scan = 0;
1806 spin_unlock_irqrestore(shost->host_lock, flags);
1807
1808 mutex_unlock(&shost->scan_mutex);
1809
1810 spin_lock(&async_scan_lock);
1811 list_del(&data->list);
1812 if (!list_empty(&scanning_hosts)) {
1813 struct async_scan_data *next = list_entry(scanning_hosts.next,
1814 struct async_scan_data, list);
1815 complete(&next->prev_finished);
1816 }
1817 spin_unlock(&async_scan_lock);
1818
1819 scsi_autopm_put_host(shost);
1820 scsi_host_put(shost);
1821 kfree(data);
1822}
1823
1824static void do_scsi_scan_host(struct Scsi_Host *shost)
1825{
1826 if (shost->hostt->scan_finished) {
1827 unsigned long start = jiffies;
1828 if (shost->hostt->scan_start)
1829 shost->hostt->scan_start(shost);
1830
1831 while (!shost->hostt->scan_finished(shost, jiffies - start))
1832 msleep(10);
1833 } else {
1834 scsi_scan_host_selected(shost, SCAN_WILD_CARD, SCAN_WILD_CARD,
1835 SCAN_WILD_CARD, 0);
1836 }
1837}
1838
1839static void do_scan_async(void *_data, async_cookie_t c)
1840{
1841 struct async_scan_data *data = _data;
1842 struct Scsi_Host *shost = data->shost;
1843
1844 do_scsi_scan_host(shost);
1845 scsi_finish_async_scan(data);
1846}
1847
1848
1849
1850
1851
1852void scsi_scan_host(struct Scsi_Host *shost)
1853{
1854 struct async_scan_data *data;
1855
1856 if (strncmp(scsi_scan_type, "none", 4) == 0 ||
1857 strncmp(scsi_scan_type, "manual", 6) == 0)
1858 return;
1859 if (scsi_autopm_get_host(shost) < 0)
1860 return;
1861
1862 data = scsi_prep_async_scan(shost);
1863 if (!data) {
1864 do_scsi_scan_host(shost);
1865 scsi_autopm_put_host(shost);
1866 return;
1867 }
1868
1869
1870
1871
1872 async_schedule(do_scan_async, data);
1873
1874
1875}
1876EXPORT_SYMBOL(scsi_scan_host);
1877
1878void scsi_forget_host(struct Scsi_Host *shost)
1879{
1880 struct scsi_device *sdev;
1881 unsigned long flags;
1882
1883 restart:
1884 spin_lock_irqsave(shost->host_lock, flags);
1885 list_for_each_entry(sdev, &shost->__devices, siblings) {
1886 if (sdev->sdev_state == SDEV_DEL)
1887 continue;
1888 spin_unlock_irqrestore(shost->host_lock, flags);
1889 __scsi_remove_device(sdev);
1890 goto restart;
1891 }
1892 spin_unlock_irqrestore(shost->host_lock, flags);
1893}
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912struct scsi_device *scsi_get_host_dev(struct Scsi_Host *shost)
1913{
1914 struct scsi_device *sdev = NULL;
1915 struct scsi_target *starget;
1916
1917 mutex_lock(&shost->scan_mutex);
1918 if (!scsi_host_scan_allowed(shost))
1919 goto out;
1920 starget = scsi_alloc_target(&shost->shost_gendev, 0, shost->this_id);
1921 if (!starget)
1922 goto out;
1923
1924 sdev = scsi_alloc_sdev(starget, 0, NULL);
1925 if (sdev)
1926 sdev->borken = 0;
1927 else
1928 scsi_target_reap(starget);
1929 put_device(&starget->dev);
1930 out:
1931 mutex_unlock(&shost->scan_mutex);
1932 return sdev;
1933}
1934EXPORT_SYMBOL(scsi_get_host_dev);
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944void scsi_free_host_dev(struct scsi_device *sdev)
1945{
1946 BUG_ON(sdev->id != sdev->host->this_id);
1947
1948 __scsi_remove_device(sdev);
1949}
1950EXPORT_SYMBOL(scsi_free_host_dev);
1951
1952