1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/module.h>
30#include <linux/moduleparam.h>
31#include <linux/init.h>
32#include <linux/blkdev.h>
33#include <linux/delay.h>
34#include <linux/kthread.h>
35#include <linux/spinlock.h>
36#include <linux/async.h>
37#include <linux/slab.h>
38#include <asm/unaligned.h>
39
40#include <scsi/scsi.h>
41#include <scsi/scsi_cmnd.h>
42#include <scsi/scsi_device.h>
43#include <scsi/scsi_driver.h>
44#include <scsi/scsi_devinfo.h>
45#include <scsi/scsi_host.h>
46#include <scsi/scsi_transport.h>
47#include <scsi/scsi_dh.h>
48#include <scsi/scsi_eh.h>
49
50#include "scsi_priv.h"
51#include "scsi_logging.h"
52
53#define ALLOC_FAILURE_MSG KERN_ERR "%s: Allocation failure during" \
54 " SCSI scanning, some SCSI devices might not be configured\n"
55
56
57
58
59#define SCSI_TIMEOUT (2*HZ)
60#define SCSI_REPORT_LUNS_TIMEOUT (30*HZ)
61
62
63
64
65#define SCSI_UID_SER_NUM 'S'
66#define SCSI_UID_UNKNOWN 'Z'
67
68
69
70
71
72
73
74
75
76
77
78
79
80#define SCSI_SCAN_NO_RESPONSE 0
81#define SCSI_SCAN_TARGET_PRESENT 1
82#define SCSI_SCAN_LUN_PRESENT 2
83
84static const char *scsi_null_device_strs = "nullnullnullnull";
85
86#define MAX_SCSI_LUNS 512
87
88static u64 max_scsi_luns = MAX_SCSI_LUNS;
89
90module_param_named(max_luns, max_scsi_luns, ullong, S_IRUGO|S_IWUSR);
91MODULE_PARM_DESC(max_luns,
92 "last scsi LUN (should be between 1 and 2^64-1)");
93
94#ifdef CONFIG_SCSI_SCAN_ASYNC
95#define SCSI_SCAN_TYPE_DEFAULT "async"
96#else
97#define SCSI_SCAN_TYPE_DEFAULT "sync"
98#endif
99
100char scsi_scan_type[7] = SCSI_SCAN_TYPE_DEFAULT;
101
102module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type),
103 S_IRUGO|S_IWUSR);
104MODULE_PARM_DESC(scan, "sync, async, manual, or none. "
105 "Setting to 'manual' disables automatic scanning, but allows "
106 "for manual device scan via the 'scan' sysfs attribute.");
107
108static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ + 18;
109
110module_param_named(inq_timeout, scsi_inq_timeout, uint, S_IRUGO|S_IWUSR);
111MODULE_PARM_DESC(inq_timeout,
112 "Timeout (in seconds) waiting for devices to answer INQUIRY."
113 " Default is 20. Some devices may need more; most need less.");
114
115
116static DEFINE_SPINLOCK(async_scan_lock);
117static LIST_HEAD(scanning_hosts);
118
119struct async_scan_data {
120 struct list_head list;
121 struct Scsi_Host *shost;
122 struct completion prev_finished;
123};
124
125
126
127
128
129
130
131
132
133int scsi_complete_async_scans(void)
134{
135 struct async_scan_data *data;
136
137 do {
138 if (list_empty(&scanning_hosts))
139 return 0;
140
141
142
143
144 data = kmalloc(sizeof(*data), GFP_KERNEL);
145 if (!data)
146 msleep(1);
147 } while (!data);
148
149 data->shost = NULL;
150 init_completion(&data->prev_finished);
151
152 spin_lock(&async_scan_lock);
153
154 if (list_empty(&scanning_hosts))
155 goto done;
156 list_add_tail(&data->list, &scanning_hosts);
157 spin_unlock(&async_scan_lock);
158
159 printk(KERN_INFO "scsi: waiting for bus probes to complete ...\n");
160 wait_for_completion(&data->prev_finished);
161
162 spin_lock(&async_scan_lock);
163 list_del(&data->list);
164 if (!list_empty(&scanning_hosts)) {
165 struct async_scan_data *next = list_entry(scanning_hosts.next,
166 struct async_scan_data, list);
167 complete(&next->prev_finished);
168 }
169 done:
170 spin_unlock(&async_scan_lock);
171
172 kfree(data);
173 return 0;
174}
175
176
177
178
179
180
181
182
183
184
185static void scsi_unlock_floptical(struct scsi_device *sdev,
186 unsigned char *result)
187{
188 unsigned char scsi_cmd[MAX_COMMAND_SIZE];
189
190 sdev_printk(KERN_NOTICE, sdev, "unlocking floptical drive\n");
191 scsi_cmd[0] = MODE_SENSE;
192 scsi_cmd[1] = 0;
193 scsi_cmd[2] = 0x2e;
194 scsi_cmd[3] = 0;
195 scsi_cmd[4] = 0x2a;
196 scsi_cmd[5] = 0;
197 scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE, result, 0x2a, NULL,
198 SCSI_TIMEOUT, 3, NULL);
199}
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
216 u64 lun, void *hostdata)
217{
218 unsigned int depth;
219 struct scsi_device *sdev;
220 struct request_queue *q;
221 int display_failure_msg = 1, ret;
222 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
223
224 sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size,
225 GFP_KERNEL);
226 if (!sdev)
227 goto out;
228
229 sdev->vendor = scsi_null_device_strs;
230 sdev->model = scsi_null_device_strs;
231 sdev->rev = scsi_null_device_strs;
232 sdev->host = shost;
233 sdev->queue_ramp_up_period = SCSI_DEFAULT_RAMP_UP_PERIOD;
234 sdev->id = starget->id;
235 sdev->lun = lun;
236 sdev->channel = starget->channel;
237 mutex_init(&sdev->state_mutex);
238 sdev->sdev_state = SDEV_CREATED;
239 INIT_LIST_HEAD(&sdev->siblings);
240 INIT_LIST_HEAD(&sdev->same_target_siblings);
241 INIT_LIST_HEAD(&sdev->starved_entry);
242 INIT_LIST_HEAD(&sdev->event_list);
243 spin_lock_init(&sdev->list_lock);
244 mutex_init(&sdev->inquiry_mutex);
245 INIT_WORK(&sdev->event_work, scsi_evt_thread);
246 INIT_WORK(&sdev->requeue_work, scsi_requeue_run_queue);
247
248 sdev->sdev_gendev.parent = get_device(&starget->dev);
249 sdev->sdev_target = starget;
250
251
252 sdev->hostdata = hostdata;
253
254
255
256 sdev->max_device_blocked = SCSI_DEFAULT_DEVICE_BLOCKED;
257
258
259
260
261 sdev->type = -1;
262
263
264
265
266
267
268 sdev->borken = 1;
269
270 sdev->sg_reserved_size = INT_MAX;
271
272 q = blk_mq_init_queue(&sdev->host->tag_set);
273 if (IS_ERR(q)) {
274
275
276 put_device(&starget->dev);
277 kfree(sdev);
278 goto out;
279 }
280 sdev->request_queue = q;
281 q->queuedata = sdev;
282 __scsi_init_queue(sdev->host, q);
283 blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
284 WARN_ON_ONCE(!blk_get_queue(q));
285
286 depth = sdev->host->cmd_per_lun ?: 1;
287
288
289
290
291
292
293
294 if (sbitmap_init_node(&sdev->budget_map,
295 scsi_device_max_queue_depth(sdev),
296 sbitmap_calculate_shift(depth),
297 GFP_KERNEL, sdev->request_queue->node,
298 false, true)) {
299 put_device(&starget->dev);
300 kfree(sdev);
301 goto out;
302 }
303
304 scsi_change_queue_depth(sdev, depth);
305
306 scsi_sysfs_device_initialize(sdev);
307
308 if (shost->hostt->slave_alloc) {
309 ret = shost->hostt->slave_alloc(sdev);
310 if (ret) {
311
312
313
314
315 if (ret == -ENXIO)
316 display_failure_msg = 0;
317 goto out_device_destroy;
318 }
319 }
320
321 return sdev;
322
323out_device_destroy:
324 __scsi_remove_device(sdev);
325out:
326 if (display_failure_msg)
327 printk(ALLOC_FAILURE_MSG, __func__);
328 return NULL;
329}
330
331static void scsi_target_destroy(struct scsi_target *starget)
332{
333 struct device *dev = &starget->dev;
334 struct Scsi_Host *shost = dev_to_shost(dev->parent);
335 unsigned long flags;
336
337 BUG_ON(starget->state == STARGET_DEL);
338 starget->state = STARGET_DEL;
339 transport_destroy_device(dev);
340 spin_lock_irqsave(shost->host_lock, flags);
341 if (shost->hostt->target_destroy)
342 shost->hostt->target_destroy(starget);
343 list_del_init(&starget->siblings);
344 spin_unlock_irqrestore(shost->host_lock, flags);
345 put_device(dev);
346}
347
348static void scsi_target_dev_release(struct device *dev)
349{
350 struct device *parent = dev->parent;
351 struct scsi_target *starget = to_scsi_target(dev);
352
353 kfree(starget);
354 put_device(parent);
355}
356
357static struct device_type scsi_target_type = {
358 .name = "scsi_target",
359 .release = scsi_target_dev_release,
360};
361
362int scsi_is_target_device(const struct device *dev)
363{
364 return dev->type == &scsi_target_type;
365}
366EXPORT_SYMBOL(scsi_is_target_device);
367
368static struct scsi_target *__scsi_find_target(struct device *parent,
369 int channel, uint id)
370{
371 struct scsi_target *starget, *found_starget = NULL;
372 struct Scsi_Host *shost = dev_to_shost(parent);
373
374
375
376 list_for_each_entry(starget, &shost->__targets, siblings) {
377 if (starget->id == id &&
378 starget->channel == channel) {
379 found_starget = starget;
380 break;
381 }
382 }
383 if (found_starget)
384 get_device(&found_starget->dev);
385
386 return found_starget;
387}
388
389
390
391
392
393
394
395
396
397
398static void scsi_target_reap_ref_release(struct kref *kref)
399{
400 struct scsi_target *starget
401 = container_of(kref, struct scsi_target, reap_ref);
402
403
404
405
406
407
408 if ((starget->state != STARGET_CREATED) &&
409 (starget->state != STARGET_CREATED_REMOVE)) {
410 transport_remove_device(&starget->dev);
411 device_del(&starget->dev);
412 }
413 scsi_target_destroy(starget);
414}
415
416static void scsi_target_reap_ref_put(struct scsi_target *starget)
417{
418 kref_put(&starget->reap_ref, scsi_target_reap_ref_release);
419}
420
421
422
423
424
425
426
427
428
429
430
431
432
433static struct scsi_target *scsi_alloc_target(struct device *parent,
434 int channel, uint id)
435{
436 struct Scsi_Host *shost = dev_to_shost(parent);
437 struct device *dev = NULL;
438 unsigned long flags;
439 const int size = sizeof(struct scsi_target)
440 + shost->transportt->target_size;
441 struct scsi_target *starget;
442 struct scsi_target *found_target;
443 int error, ref_got;
444
445 starget = kzalloc(size, GFP_KERNEL);
446 if (!starget) {
447 printk(KERN_ERR "%s: allocation failure\n", __func__);
448 return NULL;
449 }
450 dev = &starget->dev;
451 device_initialize(dev);
452 kref_init(&starget->reap_ref);
453 dev->parent = get_device(parent);
454 dev_set_name(dev, "target%d:%d:%d", shost->host_no, channel, id);
455 dev->bus = &scsi_bus_type;
456 dev->type = &scsi_target_type;
457 starget->id = id;
458 starget->channel = channel;
459 starget->can_queue = 0;
460 INIT_LIST_HEAD(&starget->siblings);
461 INIT_LIST_HEAD(&starget->devices);
462 starget->state = STARGET_CREATED;
463 starget->scsi_level = SCSI_2;
464 starget->max_target_blocked = SCSI_DEFAULT_TARGET_BLOCKED;
465 retry:
466 spin_lock_irqsave(shost->host_lock, flags);
467
468 found_target = __scsi_find_target(parent, channel, id);
469 if (found_target)
470 goto found;
471
472 list_add_tail(&starget->siblings, &shost->__targets);
473 spin_unlock_irqrestore(shost->host_lock, flags);
474
475 transport_setup_device(dev);
476 if (shost->hostt->target_alloc) {
477 error = shost->hostt->target_alloc(starget);
478
479 if(error) {
480 if (error != -ENXIO)
481 dev_err(dev, "target allocation failed, error %d\n", error);
482
483
484 scsi_target_destroy(starget);
485 return NULL;
486 }
487 }
488 get_device(dev);
489
490 return starget;
491
492 found:
493
494
495
496
497
498 ref_got = kref_get_unless_zero(&found_target->reap_ref);
499
500 spin_unlock_irqrestore(shost->host_lock, flags);
501 if (ref_got) {
502 put_device(dev);
503 return found_target;
504 }
505
506
507
508
509
510
511
512
513
514 put_device(&found_target->dev);
515
516
517
518
519 msleep(1);
520 goto retry;
521}
522
523
524
525
526
527
528
529
530
531void scsi_target_reap(struct scsi_target *starget)
532{
533
534
535
536
537
538 BUG_ON(starget->state == STARGET_DEL);
539 scsi_target_reap_ref_put(starget);
540}
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557void scsi_sanitize_inquiry_string(unsigned char *s, int len)
558{
559 int terminated = 0;
560
561 for (; len > 0; (--len, ++s)) {
562 if (*s == 0)
563 terminated = 1;
564 if (terminated || *s < 0x20 || *s > 0x7e)
565 *s = ' ';
566 }
567}
568EXPORT_SYMBOL(scsi_sanitize_inquiry_string);
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
585 int result_len, blist_flags_t *bflags)
586{
587 unsigned char scsi_cmd[MAX_COMMAND_SIZE];
588 int first_inquiry_len, try_inquiry_len, next_inquiry_len;
589 int response_len = 0;
590 int pass, count, result;
591 struct scsi_sense_hdr sshdr;
592
593 *bflags = 0;
594
595
596
597
598 first_inquiry_len = sdev->inquiry_len ? sdev->inquiry_len : 36;
599 try_inquiry_len = first_inquiry_len;
600 pass = 1;
601
602 next_pass:
603 SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
604 "scsi scan: INQUIRY pass %d length %d\n",
605 pass, try_inquiry_len));
606
607
608 for (count = 0; count < 3; ++count) {
609 int resid;
610
611 memset(scsi_cmd, 0, 6);
612 scsi_cmd[0] = INQUIRY;
613 scsi_cmd[4] = (unsigned char) try_inquiry_len;
614
615 memset(inq_result, 0, try_inquiry_len);
616
617 result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE,
618 inq_result, try_inquiry_len, &sshdr,
619 HZ / 2 + HZ * scsi_inq_timeout, 3,
620 &resid);
621
622 SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
623 "scsi scan: INQUIRY %s with code 0x%x\n",
624 result ? "failed" : "successful", result));
625
626 if (result > 0) {
627
628
629
630
631
632
633 if (scsi_status_is_check_condition(result) &&
634 scsi_sense_valid(&sshdr)) {
635 if ((sshdr.sense_key == UNIT_ATTENTION) &&
636 ((sshdr.asc == 0x28) ||
637 (sshdr.asc == 0x29)) &&
638 (sshdr.ascq == 0))
639 continue;
640 }
641 } else if (result == 0) {
642
643
644
645
646
647 if (resid == try_inquiry_len)
648 continue;
649 }
650 break;
651 }
652
653 if (result == 0) {
654 scsi_sanitize_inquiry_string(&inq_result[8], 8);
655 scsi_sanitize_inquiry_string(&inq_result[16], 16);
656 scsi_sanitize_inquiry_string(&inq_result[32], 4);
657
658 response_len = inq_result[4] + 5;
659 if (response_len > 255)
660 response_len = first_inquiry_len;
661
662
663
664
665
666
667
668
669 *bflags = scsi_get_device_flags(sdev, &inq_result[8],
670 &inq_result[16]);
671
672
673
674 if (pass == 1) {
675 if (BLIST_INQUIRY_36 & *bflags)
676 next_inquiry_len = 36;
677 else if (sdev->inquiry_len)
678 next_inquiry_len = sdev->inquiry_len;
679 else
680 next_inquiry_len = response_len;
681
682
683 if (next_inquiry_len > try_inquiry_len) {
684 try_inquiry_len = next_inquiry_len;
685 pass = 2;
686 goto next_pass;
687 }
688 }
689
690 } else if (pass == 2) {
691 sdev_printk(KERN_INFO, sdev,
692 "scsi scan: %d byte inquiry failed. "
693 "Consider BLIST_INQUIRY_36 for this device\n",
694 try_inquiry_len);
695
696
697
698 try_inquiry_len = first_inquiry_len;
699 pass = 3;
700 goto next_pass;
701 }
702
703
704
705 if (result)
706 return -EIO;
707
708
709 sdev->inquiry_len = min(try_inquiry_len, response_len);
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726 if (sdev->inquiry_len < 36) {
727 if (!sdev->host->short_inquiry) {
728 shost_printk(KERN_INFO, sdev->host,
729 "scsi scan: INQUIRY result too short (%d),"
730 " using 36\n", sdev->inquiry_len);
731 sdev->host->short_inquiry = 1;
732 }
733 sdev->inquiry_len = 36;
734 }
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754 sdev->scsi_level = inq_result[2] & 0x07;
755 if (sdev->scsi_level >= 2 ||
756 (sdev->scsi_level == 1 && (inq_result[3] & 0x0f) == 1))
757 sdev->scsi_level++;
758 sdev->sdev_target->scsi_level = sdev->scsi_level;
759
760
761
762
763
764 sdev->lun_in_cdb = 0;
765 if (sdev->scsi_level <= SCSI_2 &&
766 sdev->scsi_level != SCSI_UNKNOWN &&
767 !sdev->host->no_scsi2_lun_in_cdb)
768 sdev->lun_in_cdb = 1;
769
770 return 0;
771}
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
789 blist_flags_t *bflags, int async)
790{
791 int ret;
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813 sdev->inquiry = kmemdup(inq_result,
814 max_t(size_t, sdev->inquiry_len, 36),
815 GFP_KERNEL);
816 if (sdev->inquiry == NULL)
817 return SCSI_SCAN_NO_RESPONSE;
818
819 sdev->vendor = (char *) (sdev->inquiry + 8);
820 sdev->model = (char *) (sdev->inquiry + 16);
821 sdev->rev = (char *) (sdev->inquiry + 32);
822
823 if (strncmp(sdev->vendor, "ATA ", 8) == 0) {
824
825
826
827
828
829
830 sdev->allow_restart = 1;
831 }
832
833 if (*bflags & BLIST_ISROM) {
834 sdev->type = TYPE_ROM;
835 sdev->removable = 1;
836 } else {
837 sdev->type = (inq_result[0] & 0x1f);
838 sdev->removable = (inq_result[1] & 0x80) >> 7;
839
840
841
842
843
844
845 if (scsi_is_wlun(sdev->lun) && sdev->type != TYPE_WLUN) {
846 sdev_printk(KERN_WARNING, sdev,
847 "%s: correcting incorrect peripheral device type 0x%x for W-LUN 0x%16xhN\n",
848 __func__, sdev->type, (unsigned int)sdev->lun);
849 sdev->type = TYPE_WLUN;
850 }
851
852 }
853
854 if (sdev->type == TYPE_RBC || sdev->type == TYPE_ROM) {
855
856
857
858
859 if ((*bflags & BLIST_REPORTLUN2) == 0)
860 *bflags |= BLIST_NOREPORTLUN;
861 }
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879 sdev->inq_periph_qual = (inq_result[0] >> 5) & 7;
880 sdev->lockable = sdev->removable;
881 sdev->soft_reset = (inq_result[7] & 1) && ((inq_result[3] & 7) == 2);
882
883 if (sdev->scsi_level >= SCSI_3 ||
884 (sdev->inquiry_len > 56 && inq_result[56] & 0x04))
885 sdev->ppr = 1;
886 if (inq_result[7] & 0x60)
887 sdev->wdtr = 1;
888 if (inq_result[7] & 0x10)
889 sdev->sdtr = 1;
890
891 sdev_printk(KERN_NOTICE, sdev, "%s %.8s %.16s %.4s PQ: %d "
892 "ANSI: %d%s\n", scsi_device_type(sdev->type),
893 sdev->vendor, sdev->model, sdev->rev,
894 sdev->inq_periph_qual, inq_result[2] & 0x07,
895 (inq_result[3] & 0x0f) == 1 ? " CCS" : "");
896
897 if ((sdev->scsi_level >= SCSI_2) && (inq_result[7] & 2) &&
898 !(*bflags & BLIST_NOTQ)) {
899 sdev->tagged_supported = 1;
900 sdev->simple_tags = 1;
901 }
902
903
904
905
906
907
908 if ((*bflags & BLIST_BORKEN) == 0)
909 sdev->borken = 0;
910
911 if (*bflags & BLIST_NO_ULD_ATTACH)
912 sdev->no_uld_attach = 1;
913
914
915
916
917
918 if (*bflags & BLIST_SELECT_NO_ATN)
919 sdev->select_no_atn = 1;
920
921
922
923
924
925 if (*bflags & BLIST_MAX_512)
926 blk_queue_max_hw_sectors(sdev->request_queue, 512);
927
928
929
930
931 else if (*bflags & BLIST_MAX_1024)
932 blk_queue_max_hw_sectors(sdev->request_queue, 1024);
933
934
935
936
937
938 if (*bflags & BLIST_NOSTARTONADD)
939 sdev->no_start_on_add = 1;
940
941 if (*bflags & BLIST_SINGLELUN)
942 scsi_target(sdev)->single_lun = 1;
943
944 sdev->use_10_for_rw = 1;
945
946
947
948
949 if (*bflags & BLIST_NO_RSOC)
950 sdev->no_report_opcodes = 1;
951
952
953
954 mutex_lock(&sdev->state_mutex);
955 ret = scsi_device_set_state(sdev, SDEV_RUNNING);
956 if (ret)
957 ret = scsi_device_set_state(sdev, SDEV_BLOCK);
958 mutex_unlock(&sdev->state_mutex);
959
960 if (ret) {
961 sdev_printk(KERN_ERR, sdev,
962 "in wrong state %s to complete scan\n",
963 scsi_device_state_name(sdev->sdev_state));
964 return SCSI_SCAN_NO_RESPONSE;
965 }
966
967 if (*bflags & BLIST_NOT_LOCKABLE)
968 sdev->lockable = 0;
969
970 if (*bflags & BLIST_RETRY_HWERROR)
971 sdev->retry_hwerror = 1;
972
973 if (*bflags & BLIST_NO_DIF)
974 sdev->no_dif = 1;
975
976 if (*bflags & BLIST_UNMAP_LIMIT_WS)
977 sdev->unmap_limit_for_ws = 1;
978
979 if (*bflags & BLIST_IGN_MEDIA_CHANGE)
980 sdev->ignore_media_change = 1;
981
982 sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT;
983
984 if (*bflags & BLIST_TRY_VPD_PAGES)
985 sdev->try_vpd_pages = 1;
986 else if (*bflags & BLIST_SKIP_VPD_PAGES)
987 sdev->skip_vpd_pages = 1;
988
989 transport_configure_device(&sdev->sdev_gendev);
990
991 if (sdev->host->hostt->slave_configure) {
992 ret = sdev->host->hostt->slave_configure(sdev);
993 if (ret) {
994
995
996
997
998 if (ret != -ENXIO) {
999 sdev_printk(KERN_ERR, sdev,
1000 "failed to configure device\n");
1001 }
1002 return SCSI_SCAN_NO_RESPONSE;
1003 }
1004 }
1005
1006 if (sdev->scsi_level >= SCSI_3)
1007 scsi_attach_vpd(sdev);
1008
1009 sdev->max_queue_depth = sdev->queue_depth;
1010 WARN_ON_ONCE(sdev->max_queue_depth > sdev->budget_map.depth);
1011 sdev->sdev_bflags = *bflags;
1012
1013
1014
1015
1016
1017
1018 if (!async && scsi_sysfs_add_sdev(sdev) != 0)
1019 return SCSI_SCAN_NO_RESPONSE;
1020
1021 return SCSI_SCAN_LUN_PRESENT;
1022}
1023
1024#ifdef CONFIG_SCSI_LOGGING
1025
1026
1027
1028
1029
1030
1031
1032static unsigned char *scsi_inq_str(unsigned char *buf, unsigned char *inq,
1033 unsigned first, unsigned end)
1034{
1035 unsigned term = 0, idx;
1036
1037 for (idx = 0; idx + first < end && idx + first < inq[4] + 5; idx++) {
1038 if (inq[idx+first] > ' ') {
1039 buf[idx] = inq[idx+first];
1040 term = idx+1;
1041 } else {
1042 buf[idx] = ' ';
1043 }
1044 }
1045 buf[term] = 0;
1046 return buf;
1047}
1048#endif
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071static int scsi_probe_and_add_lun(struct scsi_target *starget,
1072 u64 lun, blist_flags_t *bflagsp,
1073 struct scsi_device **sdevp,
1074 enum scsi_scan_mode rescan,
1075 void *hostdata)
1076{
1077 struct scsi_device *sdev;
1078 unsigned char *result;
1079 blist_flags_t bflags;
1080 int res = SCSI_SCAN_NO_RESPONSE, result_len = 256;
1081 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1082
1083
1084
1085
1086
1087 sdev = scsi_device_lookup_by_target(starget, lun);
1088 if (sdev) {
1089 if (rescan != SCSI_SCAN_INITIAL || !scsi_device_created(sdev)) {
1090 SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
1091 "scsi scan: device exists on %s\n",
1092 dev_name(&sdev->sdev_gendev)));
1093 if (sdevp)
1094 *sdevp = sdev;
1095 else
1096 scsi_device_put(sdev);
1097
1098 if (bflagsp)
1099 *bflagsp = scsi_get_device_flags(sdev,
1100 sdev->vendor,
1101 sdev->model);
1102 return SCSI_SCAN_LUN_PRESENT;
1103 }
1104 scsi_device_put(sdev);
1105 } else
1106 sdev = scsi_alloc_sdev(starget, lun, hostdata);
1107 if (!sdev)
1108 goto out;
1109
1110 result = kmalloc(result_len, GFP_KERNEL);
1111 if (!result)
1112 goto out_free_sdev;
1113
1114 if (scsi_probe_lun(sdev, result, result_len, &bflags))
1115 goto out_free_result;
1116
1117 if (bflagsp)
1118 *bflagsp = bflags;
1119
1120
1121
1122 if ((result[0] >> 5) == 3) {
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133 SCSI_LOG_SCAN_BUS(2, sdev_printk(KERN_INFO, sdev, "scsi scan:"
1134 " peripheral qualifier of 3, device not"
1135 " added\n"))
1136 if (lun == 0) {
1137 SCSI_LOG_SCAN_BUS(1, {
1138 unsigned char vend[9];
1139 unsigned char mod[17];
1140
1141 sdev_printk(KERN_INFO, sdev,
1142 "scsi scan: consider passing scsi_mod."
1143 "dev_flags=%s:%s:0x240 or 0x1000240\n",
1144 scsi_inq_str(vend, result, 8, 16),
1145 scsi_inq_str(mod, result, 16, 32));
1146 });
1147
1148 }
1149
1150 res = SCSI_SCAN_TARGET_PRESENT;
1151 goto out_free_result;
1152 }
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174 if (((result[0] >> 5) == 1 ||
1175 (starget->pdt_1f_for_no_lun && (result[0] & 0x1f) == 0x1f)) &&
1176 !scsi_is_wlun(lun)) {
1177 SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
1178 "scsi scan: peripheral device type"
1179 " of 31, no device added\n"));
1180 res = SCSI_SCAN_TARGET_PRESENT;
1181 goto out_free_result;
1182 }
1183
1184 res = scsi_add_lun(sdev, result, &bflags, shost->async_scan);
1185 if (res == SCSI_SCAN_LUN_PRESENT) {
1186 if (bflags & BLIST_KEY) {
1187 sdev->lockable = 0;
1188 scsi_unlock_floptical(sdev, result);
1189 }
1190 }
1191
1192 out_free_result:
1193 kfree(result);
1194 out_free_sdev:
1195 if (res == SCSI_SCAN_LUN_PRESENT) {
1196 if (sdevp) {
1197 if (scsi_device_get(sdev) == 0) {
1198 *sdevp = sdev;
1199 } else {
1200 __scsi_remove_device(sdev);
1201 res = SCSI_SCAN_NO_RESPONSE;
1202 }
1203 }
1204 } else
1205 __scsi_remove_device(sdev);
1206 out:
1207 return res;
1208}
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224static void scsi_sequential_lun_scan(struct scsi_target *starget,
1225 blist_flags_t bflags, int scsi_level,
1226 enum scsi_scan_mode rescan)
1227{
1228 uint max_dev_lun;
1229 u64 sparse_lun, lun;
1230 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1231
1232 SCSI_LOG_SCAN_BUS(3, starget_printk(KERN_INFO, starget,
1233 "scsi scan: Sequential scan\n"));
1234
1235 max_dev_lun = min(max_scsi_luns, shost->max_lun);
1236
1237
1238
1239
1240
1241 if (bflags & BLIST_SPARSELUN) {
1242 max_dev_lun = shost->max_lun;
1243 sparse_lun = 1;
1244 } else
1245 sparse_lun = 0;
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268 if (bflags & BLIST_FORCELUN)
1269 max_dev_lun = shost->max_lun;
1270
1271
1272
1273 if (bflags & BLIST_MAX5LUN)
1274 max_dev_lun = min(5U, max_dev_lun);
1275
1276
1277
1278
1279 if (scsi_level < SCSI_3 && !(bflags & BLIST_LARGELUN))
1280 max_dev_lun = min(8U, max_dev_lun);
1281 else
1282 max_dev_lun = min(256U, max_dev_lun);
1283
1284
1285
1286
1287
1288
1289 for (lun = 1; lun < max_dev_lun; ++lun)
1290 if ((scsi_probe_and_add_lun(starget, lun, NULL, NULL, rescan,
1291 NULL) != SCSI_SCAN_LUN_PRESENT) &&
1292 !sparse_lun)
1293 return;
1294}
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316static int scsi_report_lun_scan(struct scsi_target *starget, blist_flags_t bflags,
1317 enum scsi_scan_mode rescan)
1318{
1319 unsigned char scsi_cmd[MAX_COMMAND_SIZE];
1320 unsigned int length;
1321 u64 lun;
1322 unsigned int num_luns;
1323 unsigned int retries;
1324 int result;
1325 struct scsi_lun *lunp, *lun_data;
1326 struct scsi_sense_hdr sshdr;
1327 struct scsi_device *sdev;
1328 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1329 int ret = 0;
1330
1331
1332
1333
1334
1335
1336
1337 if (bflags & BLIST_NOREPORTLUN)
1338 return 1;
1339 if (starget->scsi_level < SCSI_2 &&
1340 starget->scsi_level != SCSI_UNKNOWN)
1341 return 1;
1342 if (starget->scsi_level < SCSI_3 &&
1343 (!(bflags & BLIST_REPORTLUN2) || shost->max_lun <= 8))
1344 return 1;
1345 if (bflags & BLIST_NOLUN)
1346 return 0;
1347 if (starget->no_report_luns)
1348 return 1;
1349
1350 if (!(sdev = scsi_device_lookup_by_target(starget, 0))) {
1351 sdev = scsi_alloc_sdev(starget, 0, NULL);
1352 if (!sdev)
1353 return 0;
1354 if (scsi_device_get(sdev)) {
1355 __scsi_remove_device(sdev);
1356 return 0;
1357 }
1358 }
1359
1360
1361
1362
1363
1364
1365 length = (511 + 1) * sizeof(struct scsi_lun);
1366retry:
1367 lun_data = kmalloc(length, GFP_KERNEL);
1368 if (!lun_data) {
1369 printk(ALLOC_FAILURE_MSG, __func__);
1370 goto out;
1371 }
1372
1373 scsi_cmd[0] = REPORT_LUNS;
1374
1375
1376
1377
1378 memset(&scsi_cmd[1], 0, 5);
1379
1380
1381
1382
1383 put_unaligned_be32(length, &scsi_cmd[6]);
1384
1385 scsi_cmd[10] = 0;
1386 scsi_cmd[11] = 0;
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398 for (retries = 0; retries < 3; retries++) {
1399 SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
1400 "scsi scan: Sending REPORT LUNS to (try %d)\n",
1401 retries));
1402
1403 result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE,
1404 lun_data, length, &sshdr,
1405 SCSI_REPORT_LUNS_TIMEOUT, 3, NULL);
1406
1407 SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
1408 "scsi scan: REPORT LUNS"
1409 " %s (try %d) result 0x%x\n",
1410 result ? "failed" : "successful",
1411 retries, result));
1412 if (result == 0)
1413 break;
1414 else if (scsi_sense_valid(&sshdr)) {
1415 if (sshdr.sense_key != UNIT_ATTENTION)
1416 break;
1417 }
1418 }
1419
1420 if (result) {
1421
1422
1423
1424 ret = 1;
1425 goto out_err;
1426 }
1427
1428
1429
1430
1431 if (get_unaligned_be32(lun_data->scsi_lun) +
1432 sizeof(struct scsi_lun) > length) {
1433 length = get_unaligned_be32(lun_data->scsi_lun) +
1434 sizeof(struct scsi_lun);
1435 kfree(lun_data);
1436 goto retry;
1437 }
1438 length = get_unaligned_be32(lun_data->scsi_lun);
1439
1440 num_luns = (length / sizeof(struct scsi_lun));
1441
1442 SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
1443 "scsi scan: REPORT LUN scan\n"));
1444
1445
1446
1447
1448
1449 for (lunp = &lun_data[1]; lunp <= &lun_data[num_luns]; lunp++) {
1450 lun = scsilun_to_int(lunp);
1451
1452 if (lun > sdev->host->max_lun) {
1453 sdev_printk(KERN_WARNING, sdev,
1454 "lun%llu has a LUN larger than"
1455 " allowed by the host adapter\n", lun);
1456 } else {
1457 int res;
1458
1459 res = scsi_probe_and_add_lun(starget,
1460 lun, NULL, NULL, rescan, NULL);
1461 if (res == SCSI_SCAN_NO_RESPONSE) {
1462
1463
1464
1465 sdev_printk(KERN_ERR, sdev,
1466 "Unexpected response"
1467 " from lun %llu while scanning, scan"
1468 " aborted\n", (unsigned long long)lun);
1469 break;
1470 }
1471 }
1472 }
1473
1474 out_err:
1475 kfree(lun_data);
1476 out:
1477 if (scsi_device_created(sdev))
1478
1479
1480
1481 __scsi_remove_device(sdev);
1482 scsi_device_put(sdev);
1483 return ret;
1484}
1485
1486struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel,
1487 uint id, u64 lun, void *hostdata)
1488{
1489 struct scsi_device *sdev = ERR_PTR(-ENODEV);
1490 struct device *parent = &shost->shost_gendev;
1491 struct scsi_target *starget;
1492
1493 if (strncmp(scsi_scan_type, "none", 4) == 0)
1494 return ERR_PTR(-ENODEV);
1495
1496 starget = scsi_alloc_target(parent, channel, id);
1497 if (!starget)
1498 return ERR_PTR(-ENOMEM);
1499 scsi_autopm_get_target(starget);
1500
1501 mutex_lock(&shost->scan_mutex);
1502 if (!shost->async_scan)
1503 scsi_complete_async_scans();
1504
1505 if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
1506 scsi_probe_and_add_lun(starget, lun, NULL, &sdev, 1, hostdata);
1507 scsi_autopm_put_host(shost);
1508 }
1509 mutex_unlock(&shost->scan_mutex);
1510 scsi_autopm_put_target(starget);
1511
1512
1513
1514
1515 scsi_target_reap(starget);
1516 put_device(&starget->dev);
1517
1518 return sdev;
1519}
1520EXPORT_SYMBOL(__scsi_add_device);
1521
1522int scsi_add_device(struct Scsi_Host *host, uint channel,
1523 uint target, u64 lun)
1524{
1525 struct scsi_device *sdev =
1526 __scsi_add_device(host, channel, target, lun, NULL);
1527 if (IS_ERR(sdev))
1528 return PTR_ERR(sdev);
1529
1530 scsi_device_put(sdev);
1531 return 0;
1532}
1533EXPORT_SYMBOL(scsi_add_device);
1534
1535void scsi_rescan_device(struct device *dev)
1536{
1537 struct scsi_device *sdev = to_scsi_device(dev);
1538
1539 device_lock(dev);
1540
1541 scsi_attach_vpd(sdev);
1542
1543 if (sdev->handler && sdev->handler->rescan)
1544 sdev->handler->rescan(sdev);
1545
1546 if (dev->driver && try_module_get(dev->driver->owner)) {
1547 struct scsi_driver *drv = to_scsi_driver(dev->driver);
1548
1549 if (drv->rescan)
1550 drv->rescan(dev);
1551 module_put(dev->driver->owner);
1552 }
1553 device_unlock(dev);
1554}
1555EXPORT_SYMBOL(scsi_rescan_device);
1556
1557static void __scsi_scan_target(struct device *parent, unsigned int channel,
1558 unsigned int id, u64 lun, enum scsi_scan_mode rescan)
1559{
1560 struct Scsi_Host *shost = dev_to_shost(parent);
1561 blist_flags_t bflags = 0;
1562 int res;
1563 struct scsi_target *starget;
1564
1565 if (shost->this_id == id)
1566
1567
1568
1569 return;
1570
1571 starget = scsi_alloc_target(parent, channel, id);
1572 if (!starget)
1573 return;
1574 scsi_autopm_get_target(starget);
1575
1576 if (lun != SCAN_WILD_CARD) {
1577
1578
1579
1580 scsi_probe_and_add_lun(starget, lun, NULL, NULL, rescan, NULL);
1581 goto out_reap;
1582 }
1583
1584
1585
1586
1587
1588 res = scsi_probe_and_add_lun(starget, 0, &bflags, NULL, rescan, NULL);
1589 if (res == SCSI_SCAN_LUN_PRESENT || res == SCSI_SCAN_TARGET_PRESENT) {
1590 if (scsi_report_lun_scan(starget, bflags, rescan) != 0)
1591
1592
1593
1594
1595 scsi_sequential_lun_scan(starget, bflags,
1596 starget->scsi_level, rescan);
1597 }
1598
1599 out_reap:
1600 scsi_autopm_put_target(starget);
1601
1602
1603
1604
1605 scsi_target_reap(starget);
1606
1607 put_device(&starget->dev);
1608}
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628void scsi_scan_target(struct device *parent, unsigned int channel,
1629 unsigned int id, u64 lun, enum scsi_scan_mode rescan)
1630{
1631 struct Scsi_Host *shost = dev_to_shost(parent);
1632
1633 if (strncmp(scsi_scan_type, "none", 4) == 0)
1634 return;
1635
1636 if (rescan != SCSI_SCAN_MANUAL &&
1637 strncmp(scsi_scan_type, "manual", 6) == 0)
1638 return;
1639
1640 mutex_lock(&shost->scan_mutex);
1641 if (!shost->async_scan)
1642 scsi_complete_async_scans();
1643
1644 if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
1645 __scsi_scan_target(parent, channel, id, lun, rescan);
1646 scsi_autopm_put_host(shost);
1647 }
1648 mutex_unlock(&shost->scan_mutex);
1649}
1650EXPORT_SYMBOL(scsi_scan_target);
1651
1652static void scsi_scan_channel(struct Scsi_Host *shost, unsigned int channel,
1653 unsigned int id, u64 lun,
1654 enum scsi_scan_mode rescan)
1655{
1656 uint order_id;
1657
1658 if (id == SCAN_WILD_CARD)
1659 for (id = 0; id < shost->max_id; ++id) {
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669 if (shost->reverse_ordering)
1670
1671
1672
1673 order_id = shost->max_id - id - 1;
1674 else
1675 order_id = id;
1676 __scsi_scan_target(&shost->shost_gendev, channel,
1677 order_id, lun, rescan);
1678 }
1679 else
1680 __scsi_scan_target(&shost->shost_gendev, channel,
1681 id, lun, rescan);
1682}
1683
1684int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel,
1685 unsigned int id, u64 lun,
1686 enum scsi_scan_mode rescan)
1687{
1688 SCSI_LOG_SCAN_BUS(3, shost_printk (KERN_INFO, shost,
1689 "%s: <%u:%u:%llu>\n",
1690 __func__, channel, id, lun));
1691
1692 if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) ||
1693 ((id != SCAN_WILD_CARD) && (id >= shost->max_id)) ||
1694 ((lun != SCAN_WILD_CARD) && (lun >= shost->max_lun)))
1695 return -EINVAL;
1696
1697 mutex_lock(&shost->scan_mutex);
1698 if (!shost->async_scan)
1699 scsi_complete_async_scans();
1700
1701 if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
1702 if (channel == SCAN_WILD_CARD)
1703 for (channel = 0; channel <= shost->max_channel;
1704 channel++)
1705 scsi_scan_channel(shost, channel, id, lun,
1706 rescan);
1707 else
1708 scsi_scan_channel(shost, channel, id, lun, rescan);
1709 scsi_autopm_put_host(shost);
1710 }
1711 mutex_unlock(&shost->scan_mutex);
1712
1713 return 0;
1714}
1715
1716static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
1717{
1718 struct scsi_device *sdev;
1719 shost_for_each_device(sdev, shost) {
1720
1721 if (sdev->sdev_state == SDEV_DEL)
1722 continue;
1723
1724 if (sdev->is_visible)
1725 continue;
1726 if (!scsi_host_scan_allowed(shost) ||
1727 scsi_sysfs_add_sdev(sdev) != 0)
1728 __scsi_remove_device(sdev);
1729 }
1730}
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
1743{
1744 struct async_scan_data *data = NULL;
1745 unsigned long flags;
1746
1747 if (strncmp(scsi_scan_type, "sync", 4) == 0)
1748 return NULL;
1749
1750 mutex_lock(&shost->scan_mutex);
1751 if (shost->async_scan) {
1752 shost_printk(KERN_DEBUG, shost, "%s called twice\n", __func__);
1753 goto err;
1754 }
1755
1756 data = kmalloc(sizeof(*data), GFP_KERNEL);
1757 if (!data)
1758 goto err;
1759 data->shost = scsi_host_get(shost);
1760 if (!data->shost)
1761 goto err;
1762 init_completion(&data->prev_finished);
1763
1764 spin_lock_irqsave(shost->host_lock, flags);
1765 shost->async_scan = 1;
1766 spin_unlock_irqrestore(shost->host_lock, flags);
1767 mutex_unlock(&shost->scan_mutex);
1768
1769 spin_lock(&async_scan_lock);
1770 if (list_empty(&scanning_hosts))
1771 complete(&data->prev_finished);
1772 list_add_tail(&data->list, &scanning_hosts);
1773 spin_unlock(&async_scan_lock);
1774
1775 return data;
1776
1777 err:
1778 mutex_unlock(&shost->scan_mutex);
1779 kfree(data);
1780 return NULL;
1781}
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791static void scsi_finish_async_scan(struct async_scan_data *data)
1792{
1793 struct Scsi_Host *shost;
1794 unsigned long flags;
1795
1796 if (!data)
1797 return;
1798
1799 shost = data->shost;
1800
1801 mutex_lock(&shost->scan_mutex);
1802
1803 if (!shost->async_scan) {
1804 shost_printk(KERN_INFO, shost, "%s called twice\n", __func__);
1805 dump_stack();
1806 mutex_unlock(&shost->scan_mutex);
1807 return;
1808 }
1809
1810 wait_for_completion(&data->prev_finished);
1811
1812 scsi_sysfs_add_devices(shost);
1813
1814 spin_lock_irqsave(shost->host_lock, flags);
1815 shost->async_scan = 0;
1816 spin_unlock_irqrestore(shost->host_lock, flags);
1817
1818 mutex_unlock(&shost->scan_mutex);
1819
1820 spin_lock(&async_scan_lock);
1821 list_del(&data->list);
1822 if (!list_empty(&scanning_hosts)) {
1823 struct async_scan_data *next = list_entry(scanning_hosts.next,
1824 struct async_scan_data, list);
1825 complete(&next->prev_finished);
1826 }
1827 spin_unlock(&async_scan_lock);
1828
1829 scsi_autopm_put_host(shost);
1830 scsi_host_put(shost);
1831 kfree(data);
1832}
1833
1834static void do_scsi_scan_host(struct Scsi_Host *shost)
1835{
1836 if (shost->hostt->scan_finished) {
1837 unsigned long start = jiffies;
1838 if (shost->hostt->scan_start)
1839 shost->hostt->scan_start(shost);
1840
1841 while (!shost->hostt->scan_finished(shost, jiffies - start))
1842 msleep(10);
1843 } else {
1844 scsi_scan_host_selected(shost, SCAN_WILD_CARD, SCAN_WILD_CARD,
1845 SCAN_WILD_CARD, 0);
1846 }
1847}
1848
1849static void do_scan_async(void *_data, async_cookie_t c)
1850{
1851 struct async_scan_data *data = _data;
1852 struct Scsi_Host *shost = data->shost;
1853
1854 do_scsi_scan_host(shost);
1855 scsi_finish_async_scan(data);
1856}
1857
1858
1859
1860
1861
1862void scsi_scan_host(struct Scsi_Host *shost)
1863{
1864 struct async_scan_data *data;
1865
1866 if (strncmp(scsi_scan_type, "none", 4) == 0 ||
1867 strncmp(scsi_scan_type, "manual", 6) == 0)
1868 return;
1869 if (scsi_autopm_get_host(shost) < 0)
1870 return;
1871
1872 data = scsi_prep_async_scan(shost);
1873 if (!data) {
1874 do_scsi_scan_host(shost);
1875 scsi_autopm_put_host(shost);
1876 return;
1877 }
1878
1879
1880
1881
1882 async_schedule(do_scan_async, data);
1883
1884
1885}
1886EXPORT_SYMBOL(scsi_scan_host);
1887
1888void scsi_forget_host(struct Scsi_Host *shost)
1889{
1890 struct scsi_device *sdev;
1891 unsigned long flags;
1892
1893 restart:
1894 spin_lock_irqsave(shost->host_lock, flags);
1895 list_for_each_entry(sdev, &shost->__devices, siblings) {
1896 if (sdev->sdev_state == SDEV_DEL)
1897 continue;
1898 spin_unlock_irqrestore(shost->host_lock, flags);
1899 __scsi_remove_device(sdev);
1900 goto restart;
1901 }
1902 spin_unlock_irqrestore(shost->host_lock, flags);
1903}
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922struct scsi_device *scsi_get_host_dev(struct Scsi_Host *shost)
1923{
1924 struct scsi_device *sdev = NULL;
1925 struct scsi_target *starget;
1926
1927 mutex_lock(&shost->scan_mutex);
1928 if (!scsi_host_scan_allowed(shost))
1929 goto out;
1930 starget = scsi_alloc_target(&shost->shost_gendev, 0, shost->this_id);
1931 if (!starget)
1932 goto out;
1933
1934 sdev = scsi_alloc_sdev(starget, 0, NULL);
1935 if (sdev)
1936 sdev->borken = 0;
1937 else
1938 scsi_target_reap(starget);
1939 put_device(&starget->dev);
1940 out:
1941 mutex_unlock(&shost->scan_mutex);
1942 return sdev;
1943}
1944EXPORT_SYMBOL(scsi_get_host_dev);
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954void scsi_free_host_dev(struct scsi_device *sdev)
1955{
1956 BUG_ON(sdev->id != sdev->host->this_id);
1957
1958 __scsi_remove_device(sdev);
1959}
1960EXPORT_SYMBOL(scsi_free_host_dev);
1961
1962