1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/module.h>
30#include <linux/moduleparam.h>
31#include <linux/init.h>
32#include <linux/blkdev.h>
33#include <linux/delay.h>
34#include <linux/kthread.h>
35#include <linux/spinlock.h>
36#include <linux/async.h>
37#include <linux/slab.h>
38#include <asm/unaligned.h>
39
40#include <scsi/scsi.h>
41#include <scsi/scsi_cmnd.h>
42#include <scsi/scsi_device.h>
43#include <scsi/scsi_driver.h>
44#include <scsi/scsi_devinfo.h>
45#include <scsi/scsi_host.h>
46#include <scsi/scsi_transport.h>
47#include <scsi/scsi_dh.h>
48#include <scsi/scsi_eh.h>
49
50#include "scsi_priv.h"
51#include "scsi_logging.h"
52
53#define ALLOC_FAILURE_MSG KERN_ERR "%s: Allocation failure during" \
54 " SCSI scanning, some SCSI devices might not be configured\n"
55
56
57
58
59#define SCSI_TIMEOUT (2*HZ)
60#define SCSI_REPORT_LUNS_TIMEOUT (30*HZ)
61
62
63
64
65#define SCSI_UID_SER_NUM 'S'
66#define SCSI_UID_UNKNOWN 'Z'
67
68
69
70
71
72
73
74
75
76
77
78
79
80#define SCSI_SCAN_NO_RESPONSE 0
81#define SCSI_SCAN_TARGET_PRESENT 1
82#define SCSI_SCAN_LUN_PRESENT 2
83
84static const char *scsi_null_device_strs = "nullnullnullnull";
85
86#define MAX_SCSI_LUNS 512
87
88static u64 max_scsi_luns = MAX_SCSI_LUNS;
89
90module_param_named(max_luns, max_scsi_luns, ullong, S_IRUGO|S_IWUSR);
91MODULE_PARM_DESC(max_luns,
92 "last scsi LUN (should be between 1 and 2^64-1)");
93
94#ifdef CONFIG_SCSI_SCAN_ASYNC
95#define SCSI_SCAN_TYPE_DEFAULT "async"
96#else
97#define SCSI_SCAN_TYPE_DEFAULT "sync"
98#endif
99
100char scsi_scan_type[7] = SCSI_SCAN_TYPE_DEFAULT;
101
102module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type),
103 S_IRUGO|S_IWUSR);
104MODULE_PARM_DESC(scan, "sync, async, manual, or none. "
105 "Setting to 'manual' disables automatic scanning, but allows "
106 "for manual device scan via the 'scan' sysfs attribute.");
107
108static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ + 18;
109
110module_param_named(inq_timeout, scsi_inq_timeout, uint, S_IRUGO|S_IWUSR);
111MODULE_PARM_DESC(inq_timeout,
112 "Timeout (in seconds) waiting for devices to answer INQUIRY."
113 " Default is 20. Some devices may need more; most need less.");
114
115
116static DEFINE_SPINLOCK(async_scan_lock);
117static LIST_HEAD(scanning_hosts);
118
119struct async_scan_data {
120 struct list_head list;
121 struct Scsi_Host *shost;
122 struct completion prev_finished;
123};
124
125
126
127
128
129
130
131
132
133int scsi_complete_async_scans(void)
134{
135 struct async_scan_data *data;
136
137 do {
138 if (list_empty(&scanning_hosts))
139 return 0;
140
141
142
143
144 data = kmalloc(sizeof(*data), GFP_KERNEL);
145 if (!data)
146 msleep(1);
147 } while (!data);
148
149 data->shost = NULL;
150 init_completion(&data->prev_finished);
151
152 spin_lock(&async_scan_lock);
153
154 if (list_empty(&scanning_hosts))
155 goto done;
156 list_add_tail(&data->list, &scanning_hosts);
157 spin_unlock(&async_scan_lock);
158
159 printk(KERN_INFO "scsi: waiting for bus probes to complete ...\n");
160 wait_for_completion(&data->prev_finished);
161
162 spin_lock(&async_scan_lock);
163 list_del(&data->list);
164 if (!list_empty(&scanning_hosts)) {
165 struct async_scan_data *next = list_entry(scanning_hosts.next,
166 struct async_scan_data, list);
167 complete(&next->prev_finished);
168 }
169 done:
170 spin_unlock(&async_scan_lock);
171
172 kfree(data);
173 return 0;
174}
175
176
177
178
179
180
181
182
183
184
185static void scsi_unlock_floptical(struct scsi_device *sdev,
186 unsigned char *result)
187{
188 unsigned char scsi_cmd[MAX_COMMAND_SIZE];
189
190 sdev_printk(KERN_NOTICE, sdev, "unlocking floptical drive\n");
191 scsi_cmd[0] = MODE_SENSE;
192 scsi_cmd[1] = 0;
193 scsi_cmd[2] = 0x2e;
194 scsi_cmd[3] = 0;
195 scsi_cmd[4] = 0x2a;
196 scsi_cmd[5] = 0;
197 scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE, result, 0x2a, NULL,
198 SCSI_TIMEOUT, 3, NULL);
199}
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
216 u64 lun, void *hostdata)
217{
218 struct scsi_device *sdev;
219 int display_failure_msg = 1, ret;
220 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
221
222 sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size,
223 GFP_KERNEL);
224 if (!sdev)
225 goto out;
226
227 sdev->vendor = scsi_null_device_strs;
228 sdev->model = scsi_null_device_strs;
229 sdev->rev = scsi_null_device_strs;
230 sdev->host = shost;
231 sdev->queue_ramp_up_period = SCSI_DEFAULT_RAMP_UP_PERIOD;
232 sdev->id = starget->id;
233 sdev->lun = lun;
234 sdev->channel = starget->channel;
235 mutex_init(&sdev->state_mutex);
236 sdev->sdev_state = SDEV_CREATED;
237 INIT_LIST_HEAD(&sdev->siblings);
238 INIT_LIST_HEAD(&sdev->same_target_siblings);
239 INIT_LIST_HEAD(&sdev->cmd_list);
240 INIT_LIST_HEAD(&sdev->starved_entry);
241 INIT_LIST_HEAD(&sdev->event_list);
242 spin_lock_init(&sdev->list_lock);
243 mutex_init(&sdev->inquiry_mutex);
244 INIT_WORK(&sdev->event_work, scsi_evt_thread);
245 INIT_WORK(&sdev->requeue_work, scsi_requeue_run_queue);
246
247 sdev->sdev_gendev.parent = get_device(&starget->dev);
248 sdev->sdev_target = starget;
249
250
251 sdev->hostdata = hostdata;
252
253
254
255 sdev->max_device_blocked = SCSI_DEFAULT_DEVICE_BLOCKED;
256
257
258
259
260 sdev->type = -1;
261
262
263
264
265
266
267 sdev->borken = 1;
268
269 sdev->request_queue = scsi_mq_alloc_queue(sdev);
270 if (!sdev->request_queue) {
271
272
273 put_device(&starget->dev);
274 kfree(sdev);
275 goto out;
276 }
277 WARN_ON_ONCE(!blk_get_queue(sdev->request_queue));
278 sdev->request_queue->queuedata = sdev;
279
280 scsi_change_queue_depth(sdev, sdev->host->cmd_per_lun ?
281 sdev->host->cmd_per_lun : 1);
282
283 scsi_sysfs_device_initialize(sdev);
284
285 if (shost->hostt->slave_alloc) {
286 ret = shost->hostt->slave_alloc(sdev);
287 if (ret) {
288
289
290
291
292 if (ret == -ENXIO)
293 display_failure_msg = 0;
294 goto out_device_destroy;
295 }
296 }
297
298 return sdev;
299
300out_device_destroy:
301 __scsi_remove_device(sdev);
302out:
303 if (display_failure_msg)
304 printk(ALLOC_FAILURE_MSG, __func__);
305 return NULL;
306}
307
308static void scsi_target_destroy(struct scsi_target *starget)
309{
310 struct device *dev = &starget->dev;
311 struct Scsi_Host *shost = dev_to_shost(dev->parent);
312 unsigned long flags;
313
314 BUG_ON(starget->state == STARGET_DEL);
315 starget->state = STARGET_DEL;
316 transport_destroy_device(dev);
317 spin_lock_irqsave(shost->host_lock, flags);
318 if (shost->hostt->target_destroy)
319 shost->hostt->target_destroy(starget);
320 list_del_init(&starget->siblings);
321 spin_unlock_irqrestore(shost->host_lock, flags);
322 put_device(dev);
323}
324
325static void scsi_target_dev_release(struct device *dev)
326{
327 struct device *parent = dev->parent;
328 struct scsi_target *starget = to_scsi_target(dev);
329
330 kfree(starget);
331 put_device(parent);
332}
333
334static struct device_type scsi_target_type = {
335 .name = "scsi_target",
336 .release = scsi_target_dev_release,
337};
338
339int scsi_is_target_device(const struct device *dev)
340{
341 return dev->type == &scsi_target_type;
342}
343EXPORT_SYMBOL(scsi_is_target_device);
344
345static struct scsi_target *__scsi_find_target(struct device *parent,
346 int channel, uint id)
347{
348 struct scsi_target *starget, *found_starget = NULL;
349 struct Scsi_Host *shost = dev_to_shost(parent);
350
351
352
353 list_for_each_entry(starget, &shost->__targets, siblings) {
354 if (starget->id == id &&
355 starget->channel == channel) {
356 found_starget = starget;
357 break;
358 }
359 }
360 if (found_starget)
361 get_device(&found_starget->dev);
362
363 return found_starget;
364}
365
366
367
368
369
370
371
372
373
374
375static void scsi_target_reap_ref_release(struct kref *kref)
376{
377 struct scsi_target *starget
378 = container_of(kref, struct scsi_target, reap_ref);
379
380
381
382
383
384
385 if ((starget->state != STARGET_CREATED) &&
386 (starget->state != STARGET_CREATED_REMOVE)) {
387 transport_remove_device(&starget->dev);
388 device_del(&starget->dev);
389 }
390 scsi_target_destroy(starget);
391}
392
393static void scsi_target_reap_ref_put(struct scsi_target *starget)
394{
395 kref_put(&starget->reap_ref, scsi_target_reap_ref_release);
396}
397
398
399
400
401
402
403
404
405
406
407
408
409
410static struct scsi_target *scsi_alloc_target(struct device *parent,
411 int channel, uint id)
412{
413 struct Scsi_Host *shost = dev_to_shost(parent);
414 struct device *dev = NULL;
415 unsigned long flags;
416 const int size = sizeof(struct scsi_target)
417 + shost->transportt->target_size;
418 struct scsi_target *starget;
419 struct scsi_target *found_target;
420 int error, ref_got;
421
422 starget = kzalloc(size, GFP_KERNEL);
423 if (!starget) {
424 printk(KERN_ERR "%s: allocation failure\n", __func__);
425 return NULL;
426 }
427 dev = &starget->dev;
428 device_initialize(dev);
429 kref_init(&starget->reap_ref);
430 dev->parent = get_device(parent);
431 dev_set_name(dev, "target%d:%d:%d", shost->host_no, channel, id);
432 dev->bus = &scsi_bus_type;
433 dev->type = &scsi_target_type;
434 starget->id = id;
435 starget->channel = channel;
436 starget->can_queue = 0;
437 INIT_LIST_HEAD(&starget->siblings);
438 INIT_LIST_HEAD(&starget->devices);
439 starget->state = STARGET_CREATED;
440 starget->scsi_level = SCSI_2;
441 starget->max_target_blocked = SCSI_DEFAULT_TARGET_BLOCKED;
442 retry:
443 spin_lock_irqsave(shost->host_lock, flags);
444
445 found_target = __scsi_find_target(parent, channel, id);
446 if (found_target)
447 goto found;
448
449 list_add_tail(&starget->siblings, &shost->__targets);
450 spin_unlock_irqrestore(shost->host_lock, flags);
451
452 transport_setup_device(dev);
453 if (shost->hostt->target_alloc) {
454 error = shost->hostt->target_alloc(starget);
455
456 if(error) {
457 dev_printk(KERN_ERR, dev, "target allocation failed, error %d\n", error);
458
459
460 scsi_target_destroy(starget);
461 return NULL;
462 }
463 }
464 get_device(dev);
465
466 return starget;
467
468 found:
469
470
471
472
473
474 ref_got = kref_get_unless_zero(&found_target->reap_ref);
475
476 spin_unlock_irqrestore(shost->host_lock, flags);
477 if (ref_got) {
478 put_device(dev);
479 return found_target;
480 }
481
482
483
484
485
486
487
488
489
490 put_device(&found_target->dev);
491
492
493
494
495 msleep(1);
496 goto retry;
497}
498
499
500
501
502
503
504
505
506
507void scsi_target_reap(struct scsi_target *starget)
508{
509
510
511
512
513
514 BUG_ON(starget->state == STARGET_DEL);
515 scsi_target_reap_ref_put(starget);
516}
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533void scsi_sanitize_inquiry_string(unsigned char *s, int len)
534{
535 int terminated = 0;
536
537 for (; len > 0; (--len, ++s)) {
538 if (*s == 0)
539 terminated = 1;
540 if (terminated || *s < 0x20 || *s > 0x7e)
541 *s = ' ';
542 }
543}
544EXPORT_SYMBOL(scsi_sanitize_inquiry_string);
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
561 int result_len, blist_flags_t *bflags)
562{
563 unsigned char scsi_cmd[MAX_COMMAND_SIZE];
564 int first_inquiry_len, try_inquiry_len, next_inquiry_len;
565 int response_len = 0;
566 int pass, count, result;
567 struct scsi_sense_hdr sshdr;
568
569 *bflags = 0;
570
571
572
573
574 first_inquiry_len = sdev->inquiry_len ? sdev->inquiry_len : 36;
575 try_inquiry_len = first_inquiry_len;
576 pass = 1;
577
578 next_pass:
579 SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
580 "scsi scan: INQUIRY pass %d length %d\n",
581 pass, try_inquiry_len));
582
583
584 for (count = 0; count < 3; ++count) {
585 int resid;
586
587 memset(scsi_cmd, 0, 6);
588 scsi_cmd[0] = INQUIRY;
589 scsi_cmd[4] = (unsigned char) try_inquiry_len;
590
591 memset(inq_result, 0, try_inquiry_len);
592
593 result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE,
594 inq_result, try_inquiry_len, &sshdr,
595 HZ / 2 + HZ * scsi_inq_timeout, 3,
596 &resid);
597
598 SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
599 "scsi scan: INQUIRY %s with code 0x%x\n",
600 result ? "failed" : "successful", result));
601
602 if (result) {
603
604
605
606
607
608
609 if (driver_byte(result) == DRIVER_SENSE &&
610 scsi_sense_valid(&sshdr)) {
611 if ((sshdr.sense_key == UNIT_ATTENTION) &&
612 ((sshdr.asc == 0x28) ||
613 (sshdr.asc == 0x29)) &&
614 (sshdr.ascq == 0))
615 continue;
616 }
617 } else {
618
619
620
621
622
623 if (resid == try_inquiry_len)
624 continue;
625 }
626 break;
627 }
628
629 if (result == 0) {
630 scsi_sanitize_inquiry_string(&inq_result[8], 8);
631 scsi_sanitize_inquiry_string(&inq_result[16], 16);
632 scsi_sanitize_inquiry_string(&inq_result[32], 4);
633
634 response_len = inq_result[4] + 5;
635 if (response_len > 255)
636 response_len = first_inquiry_len;
637
638
639
640
641
642
643
644
645 *bflags = scsi_get_device_flags(sdev, &inq_result[8],
646 &inq_result[16]);
647
648
649
650 if (pass == 1) {
651 if (BLIST_INQUIRY_36 & *bflags)
652 next_inquiry_len = 36;
653 else if (sdev->inquiry_len)
654 next_inquiry_len = sdev->inquiry_len;
655 else
656 next_inquiry_len = response_len;
657
658
659 if (next_inquiry_len > try_inquiry_len) {
660 try_inquiry_len = next_inquiry_len;
661 pass = 2;
662 goto next_pass;
663 }
664 }
665
666 } else if (pass == 2) {
667 sdev_printk(KERN_INFO, sdev,
668 "scsi scan: %d byte inquiry failed. "
669 "Consider BLIST_INQUIRY_36 for this device\n",
670 try_inquiry_len);
671
672
673
674 try_inquiry_len = first_inquiry_len;
675 pass = 3;
676 goto next_pass;
677 }
678
679
680
681 if (result)
682 return -EIO;
683
684
685 sdev->inquiry_len = min(try_inquiry_len, response_len);
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702 if (sdev->inquiry_len < 36) {
703 if (!sdev->host->short_inquiry) {
704 shost_printk(KERN_INFO, sdev->host,
705 "scsi scan: INQUIRY result too short (%d),"
706 " using 36\n", sdev->inquiry_len);
707 sdev->host->short_inquiry = 1;
708 }
709 sdev->inquiry_len = 36;
710 }
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730 sdev->scsi_level = inq_result[2] & 0x07;
731 if (sdev->scsi_level >= 2 ||
732 (sdev->scsi_level == 1 && (inq_result[3] & 0x0f) == 1))
733 sdev->scsi_level++;
734 sdev->sdev_target->scsi_level = sdev->scsi_level;
735
736
737
738
739
740 sdev->lun_in_cdb = 0;
741 if (sdev->scsi_level <= SCSI_2 &&
742 sdev->scsi_level != SCSI_UNKNOWN &&
743 !sdev->host->no_scsi2_lun_in_cdb)
744 sdev->lun_in_cdb = 1;
745
746 return 0;
747}
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
765 blist_flags_t *bflags, int async)
766{
767 int ret;
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789 sdev->inquiry = kmemdup(inq_result,
790 max_t(size_t, sdev->inquiry_len, 36),
791 GFP_KERNEL);
792 if (sdev->inquiry == NULL)
793 return SCSI_SCAN_NO_RESPONSE;
794
795 sdev->vendor = (char *) (sdev->inquiry + 8);
796 sdev->model = (char *) (sdev->inquiry + 16);
797 sdev->rev = (char *) (sdev->inquiry + 32);
798
799 if (strncmp(sdev->vendor, "ATA ", 8) == 0) {
800
801
802
803
804
805
806 sdev->allow_restart = 1;
807 }
808
809 if (*bflags & BLIST_ISROM) {
810 sdev->type = TYPE_ROM;
811 sdev->removable = 1;
812 } else {
813 sdev->type = (inq_result[0] & 0x1f);
814 sdev->removable = (inq_result[1] & 0x80) >> 7;
815
816
817
818
819
820
821 if (scsi_is_wlun(sdev->lun) && sdev->type != TYPE_WLUN) {
822 sdev_printk(KERN_WARNING, sdev,
823 "%s: correcting incorrect peripheral device type 0x%x for W-LUN 0x%16xhN\n",
824 __func__, sdev->type, (unsigned int)sdev->lun);
825 sdev->type = TYPE_WLUN;
826 }
827
828 }
829
830 if (sdev->type == TYPE_RBC || sdev->type == TYPE_ROM) {
831
832
833
834
835 if ((*bflags & BLIST_REPORTLUN2) == 0)
836 *bflags |= BLIST_NOREPORTLUN;
837 }
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855 sdev->inq_periph_qual = (inq_result[0] >> 5) & 7;
856 sdev->lockable = sdev->removable;
857 sdev->soft_reset = (inq_result[7] & 1) && ((inq_result[3] & 7) == 2);
858
859 if (sdev->scsi_level >= SCSI_3 ||
860 (sdev->inquiry_len > 56 && inq_result[56] & 0x04))
861 sdev->ppr = 1;
862 if (inq_result[7] & 0x60)
863 sdev->wdtr = 1;
864 if (inq_result[7] & 0x10)
865 sdev->sdtr = 1;
866
867 sdev_printk(KERN_NOTICE, sdev, "%s %.8s %.16s %.4s PQ: %d "
868 "ANSI: %d%s\n", scsi_device_type(sdev->type),
869 sdev->vendor, sdev->model, sdev->rev,
870 sdev->inq_periph_qual, inq_result[2] & 0x07,
871 (inq_result[3] & 0x0f) == 1 ? " CCS" : "");
872
873 if ((sdev->scsi_level >= SCSI_2) && (inq_result[7] & 2) &&
874 !(*bflags & BLIST_NOTQ)) {
875 sdev->tagged_supported = 1;
876 sdev->simple_tags = 1;
877 }
878
879
880
881
882
883
884 if ((*bflags & BLIST_BORKEN) == 0)
885 sdev->borken = 0;
886
887 if (*bflags & BLIST_NO_ULD_ATTACH)
888 sdev->no_uld_attach = 1;
889
890
891
892
893
894 if (*bflags & BLIST_SELECT_NO_ATN)
895 sdev->select_no_atn = 1;
896
897
898
899
900
901 if (*bflags & BLIST_MAX_512)
902 blk_queue_max_hw_sectors(sdev->request_queue, 512);
903
904
905
906
907 else if (*bflags & BLIST_MAX_1024)
908 blk_queue_max_hw_sectors(sdev->request_queue, 1024);
909
910
911
912
913
914 if (*bflags & BLIST_NOSTARTONADD)
915 sdev->no_start_on_add = 1;
916
917 if (*bflags & BLIST_SINGLELUN)
918 scsi_target(sdev)->single_lun = 1;
919
920 sdev->use_10_for_rw = 1;
921
922
923
924
925 if (*bflags & BLIST_NO_RSOC)
926 sdev->no_report_opcodes = 1;
927
928
929
930 mutex_lock(&sdev->state_mutex);
931 ret = scsi_device_set_state(sdev, SDEV_RUNNING);
932 if (ret)
933 ret = scsi_device_set_state(sdev, SDEV_BLOCK);
934 mutex_unlock(&sdev->state_mutex);
935
936 if (ret) {
937 sdev_printk(KERN_ERR, sdev,
938 "in wrong state %s to complete scan\n",
939 scsi_device_state_name(sdev->sdev_state));
940 return SCSI_SCAN_NO_RESPONSE;
941 }
942
943 if (*bflags & BLIST_NOT_LOCKABLE)
944 sdev->lockable = 0;
945
946 if (*bflags & BLIST_RETRY_HWERROR)
947 sdev->retry_hwerror = 1;
948
949 if (*bflags & BLIST_NO_DIF)
950 sdev->no_dif = 1;
951
952 if (*bflags & BLIST_UNMAP_LIMIT_WS)
953 sdev->unmap_limit_for_ws = 1;
954
955 sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT;
956
957 if (*bflags & BLIST_TRY_VPD_PAGES)
958 sdev->try_vpd_pages = 1;
959 else if (*bflags & BLIST_SKIP_VPD_PAGES)
960 sdev->skip_vpd_pages = 1;
961
962 transport_configure_device(&sdev->sdev_gendev);
963
964 if (sdev->host->hostt->slave_configure) {
965 ret = sdev->host->hostt->slave_configure(sdev);
966 if (ret) {
967
968
969
970
971 if (ret != -ENXIO) {
972 sdev_printk(KERN_ERR, sdev,
973 "failed to configure device\n");
974 }
975 return SCSI_SCAN_NO_RESPONSE;
976 }
977 }
978
979 if (sdev->scsi_level >= SCSI_3)
980 scsi_attach_vpd(sdev);
981
982 sdev->max_queue_depth = sdev->queue_depth;
983 sdev->sdev_bflags = *bflags;
984
985
986
987
988
989
990 if (!async && scsi_sysfs_add_sdev(sdev) != 0)
991 return SCSI_SCAN_NO_RESPONSE;
992
993 return SCSI_SCAN_LUN_PRESENT;
994}
995
996#ifdef CONFIG_SCSI_LOGGING
997
998
999
1000
1001
1002
1003
1004static unsigned char *scsi_inq_str(unsigned char *buf, unsigned char *inq,
1005 unsigned first, unsigned end)
1006{
1007 unsigned term = 0, idx;
1008
1009 for (idx = 0; idx + first < end && idx + first < inq[4] + 5; idx++) {
1010 if (inq[idx+first] > ' ') {
1011 buf[idx] = inq[idx+first];
1012 term = idx+1;
1013 } else {
1014 buf[idx] = ' ';
1015 }
1016 }
1017 buf[term] = 0;
1018 return buf;
1019}
1020#endif
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043static int scsi_probe_and_add_lun(struct scsi_target *starget,
1044 u64 lun, blist_flags_t *bflagsp,
1045 struct scsi_device **sdevp,
1046 enum scsi_scan_mode rescan,
1047 void *hostdata)
1048{
1049 struct scsi_device *sdev;
1050 unsigned char *result;
1051 blist_flags_t bflags;
1052 int res = SCSI_SCAN_NO_RESPONSE, result_len = 256;
1053 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1054
1055
1056
1057
1058
1059 sdev = scsi_device_lookup_by_target(starget, lun);
1060 if (sdev) {
1061 if (rescan != SCSI_SCAN_INITIAL || !scsi_device_created(sdev)) {
1062 SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
1063 "scsi scan: device exists on %s\n",
1064 dev_name(&sdev->sdev_gendev)));
1065 if (sdevp)
1066 *sdevp = sdev;
1067 else
1068 scsi_device_put(sdev);
1069
1070 if (bflagsp)
1071 *bflagsp = scsi_get_device_flags(sdev,
1072 sdev->vendor,
1073 sdev->model);
1074 return SCSI_SCAN_LUN_PRESENT;
1075 }
1076 scsi_device_put(sdev);
1077 } else
1078 sdev = scsi_alloc_sdev(starget, lun, hostdata);
1079 if (!sdev)
1080 goto out;
1081
1082 result = kmalloc(result_len, GFP_KERNEL |
1083 ((shost->unchecked_isa_dma) ? __GFP_DMA : 0));
1084 if (!result)
1085 goto out_free_sdev;
1086
1087 if (scsi_probe_lun(sdev, result, result_len, &bflags))
1088 goto out_free_result;
1089
1090 if (bflagsp)
1091 *bflagsp = bflags;
1092
1093
1094
1095 if ((result[0] >> 5) == 3) {
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106 SCSI_LOG_SCAN_BUS(2, sdev_printk(KERN_INFO, sdev, "scsi scan:"
1107 " peripheral qualifier of 3, device not"
1108 " added\n"))
1109 if (lun == 0) {
1110 SCSI_LOG_SCAN_BUS(1, {
1111 unsigned char vend[9];
1112 unsigned char mod[17];
1113
1114 sdev_printk(KERN_INFO, sdev,
1115 "scsi scan: consider passing scsi_mod."
1116 "dev_flags=%s:%s:0x240 or 0x1000240\n",
1117 scsi_inq_str(vend, result, 8, 16),
1118 scsi_inq_str(mod, result, 16, 32));
1119 });
1120
1121 }
1122
1123 res = SCSI_SCAN_TARGET_PRESENT;
1124 goto out_free_result;
1125 }
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147 if (((result[0] >> 5) == 1 ||
1148 (starget->pdt_1f_for_no_lun && (result[0] & 0x1f) == 0x1f)) &&
1149 !scsi_is_wlun(lun)) {
1150 SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
1151 "scsi scan: peripheral device type"
1152 " of 31, no device added\n"));
1153 res = SCSI_SCAN_TARGET_PRESENT;
1154 goto out_free_result;
1155 }
1156
1157 res = scsi_add_lun(sdev, result, &bflags, shost->async_scan);
1158 if (res == SCSI_SCAN_LUN_PRESENT) {
1159 if (bflags & BLIST_KEY) {
1160 sdev->lockable = 0;
1161 scsi_unlock_floptical(sdev, result);
1162 }
1163 }
1164
1165 out_free_result:
1166 kfree(result);
1167 out_free_sdev:
1168 if (res == SCSI_SCAN_LUN_PRESENT) {
1169 if (sdevp) {
1170 if (scsi_device_get(sdev) == 0) {
1171 *sdevp = sdev;
1172 } else {
1173 __scsi_remove_device(sdev);
1174 res = SCSI_SCAN_NO_RESPONSE;
1175 }
1176 }
1177 } else
1178 __scsi_remove_device(sdev);
1179 out:
1180 return res;
1181}
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197static void scsi_sequential_lun_scan(struct scsi_target *starget,
1198 blist_flags_t bflags, int scsi_level,
1199 enum scsi_scan_mode rescan)
1200{
1201 uint max_dev_lun;
1202 u64 sparse_lun, lun;
1203 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1204
1205 SCSI_LOG_SCAN_BUS(3, starget_printk(KERN_INFO, starget,
1206 "scsi scan: Sequential scan\n"));
1207
1208 max_dev_lun = min(max_scsi_luns, shost->max_lun);
1209
1210
1211
1212
1213
1214 if (bflags & BLIST_SPARSELUN) {
1215 max_dev_lun = shost->max_lun;
1216 sparse_lun = 1;
1217 } else
1218 sparse_lun = 0;
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241 if (bflags & BLIST_FORCELUN)
1242 max_dev_lun = shost->max_lun;
1243
1244
1245
1246 if (bflags & BLIST_MAX5LUN)
1247 max_dev_lun = min(5U, max_dev_lun);
1248
1249
1250
1251
1252 if (scsi_level < SCSI_3 && !(bflags & BLIST_LARGELUN))
1253 max_dev_lun = min(8U, max_dev_lun);
1254 else
1255 max_dev_lun = min(256U, max_dev_lun);
1256
1257
1258
1259
1260
1261
1262 for (lun = 1; lun < max_dev_lun; ++lun)
1263 if ((scsi_probe_and_add_lun(starget, lun, NULL, NULL, rescan,
1264 NULL) != SCSI_SCAN_LUN_PRESENT) &&
1265 !sparse_lun)
1266 return;
1267}
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289static int scsi_report_lun_scan(struct scsi_target *starget, blist_flags_t bflags,
1290 enum scsi_scan_mode rescan)
1291{
1292 unsigned char scsi_cmd[MAX_COMMAND_SIZE];
1293 unsigned int length;
1294 u64 lun;
1295 unsigned int num_luns;
1296 unsigned int retries;
1297 int result;
1298 struct scsi_lun *lunp, *lun_data;
1299 struct scsi_sense_hdr sshdr;
1300 struct scsi_device *sdev;
1301 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1302 int ret = 0;
1303
1304
1305
1306
1307
1308
1309
1310 if (bflags & BLIST_NOREPORTLUN)
1311 return 1;
1312 if (starget->scsi_level < SCSI_2 &&
1313 starget->scsi_level != SCSI_UNKNOWN)
1314 return 1;
1315 if (starget->scsi_level < SCSI_3 &&
1316 (!(bflags & BLIST_REPORTLUN2) || shost->max_lun <= 8))
1317 return 1;
1318 if (bflags & BLIST_NOLUN)
1319 return 0;
1320 if (starget->no_report_luns)
1321 return 1;
1322
1323 if (!(sdev = scsi_device_lookup_by_target(starget, 0))) {
1324 sdev = scsi_alloc_sdev(starget, 0, NULL);
1325 if (!sdev)
1326 return 0;
1327 if (scsi_device_get(sdev)) {
1328 __scsi_remove_device(sdev);
1329 return 0;
1330 }
1331 }
1332
1333
1334
1335
1336
1337
1338 length = (511 + 1) * sizeof(struct scsi_lun);
1339retry:
1340 lun_data = kmalloc(length, GFP_KERNEL |
1341 (sdev->host->unchecked_isa_dma ? __GFP_DMA : 0));
1342 if (!lun_data) {
1343 printk(ALLOC_FAILURE_MSG, __func__);
1344 goto out;
1345 }
1346
1347 scsi_cmd[0] = REPORT_LUNS;
1348
1349
1350
1351
1352 memset(&scsi_cmd[1], 0, 5);
1353
1354
1355
1356
1357 put_unaligned_be32(length, &scsi_cmd[6]);
1358
1359 scsi_cmd[10] = 0;
1360 scsi_cmd[11] = 0;
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372 for (retries = 0; retries < 3; retries++) {
1373 SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
1374 "scsi scan: Sending REPORT LUNS to (try %d)\n",
1375 retries));
1376
1377 result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE,
1378 lun_data, length, &sshdr,
1379 SCSI_REPORT_LUNS_TIMEOUT, 3, NULL);
1380
1381 SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
1382 "scsi scan: REPORT LUNS"
1383 " %s (try %d) result 0x%x\n",
1384 result ? "failed" : "successful",
1385 retries, result));
1386 if (result == 0)
1387 break;
1388 else if (scsi_sense_valid(&sshdr)) {
1389 if (sshdr.sense_key != UNIT_ATTENTION)
1390 break;
1391 }
1392 }
1393
1394 if (result) {
1395
1396
1397
1398 ret = 1;
1399 goto out_err;
1400 }
1401
1402
1403
1404
1405 if (get_unaligned_be32(lun_data->scsi_lun) +
1406 sizeof(struct scsi_lun) > length) {
1407 length = get_unaligned_be32(lun_data->scsi_lun) +
1408 sizeof(struct scsi_lun);
1409 kfree(lun_data);
1410 goto retry;
1411 }
1412 length = get_unaligned_be32(lun_data->scsi_lun);
1413
1414 num_luns = (length / sizeof(struct scsi_lun));
1415
1416 SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
1417 "scsi scan: REPORT LUN scan\n"));
1418
1419
1420
1421
1422
1423 for (lunp = &lun_data[1]; lunp <= &lun_data[num_luns]; lunp++) {
1424 lun = scsilun_to_int(lunp);
1425
1426 if (lun > sdev->host->max_lun) {
1427 sdev_printk(KERN_WARNING, sdev,
1428 "lun%llu has a LUN larger than"
1429 " allowed by the host adapter\n", lun);
1430 } else {
1431 int res;
1432
1433 res = scsi_probe_and_add_lun(starget,
1434 lun, NULL, NULL, rescan, NULL);
1435 if (res == SCSI_SCAN_NO_RESPONSE) {
1436
1437
1438
1439 sdev_printk(KERN_ERR, sdev,
1440 "Unexpected response"
1441 " from lun %llu while scanning, scan"
1442 " aborted\n", (unsigned long long)lun);
1443 break;
1444 }
1445 }
1446 }
1447
1448 out_err:
1449 kfree(lun_data);
1450 out:
1451 if (scsi_device_created(sdev))
1452
1453
1454
1455 __scsi_remove_device(sdev);
1456 scsi_device_put(sdev);
1457 return ret;
1458}
1459
1460struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel,
1461 uint id, u64 lun, void *hostdata)
1462{
1463 struct scsi_device *sdev = ERR_PTR(-ENODEV);
1464 struct device *parent = &shost->shost_gendev;
1465 struct scsi_target *starget;
1466
1467 if (strncmp(scsi_scan_type, "none", 4) == 0)
1468 return ERR_PTR(-ENODEV);
1469
1470 starget = scsi_alloc_target(parent, channel, id);
1471 if (!starget)
1472 return ERR_PTR(-ENOMEM);
1473 scsi_autopm_get_target(starget);
1474
1475 mutex_lock(&shost->scan_mutex);
1476 if (!shost->async_scan)
1477 scsi_complete_async_scans();
1478
1479 if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
1480 scsi_probe_and_add_lun(starget, lun, NULL, &sdev, 1, hostdata);
1481 scsi_autopm_put_host(shost);
1482 }
1483 mutex_unlock(&shost->scan_mutex);
1484 scsi_autopm_put_target(starget);
1485
1486
1487
1488
1489 scsi_target_reap(starget);
1490 put_device(&starget->dev);
1491
1492 return sdev;
1493}
1494EXPORT_SYMBOL(__scsi_add_device);
1495
1496int scsi_add_device(struct Scsi_Host *host, uint channel,
1497 uint target, u64 lun)
1498{
1499 struct scsi_device *sdev =
1500 __scsi_add_device(host, channel, target, lun, NULL);
1501 if (IS_ERR(sdev))
1502 return PTR_ERR(sdev);
1503
1504 scsi_device_put(sdev);
1505 return 0;
1506}
1507EXPORT_SYMBOL(scsi_add_device);
1508
1509void scsi_rescan_device(struct device *dev)
1510{
1511 struct scsi_device *sdev = to_scsi_device(dev);
1512
1513 device_lock(dev);
1514
1515 scsi_attach_vpd(sdev);
1516
1517 if (sdev->handler && sdev->handler->rescan)
1518 sdev->handler->rescan(sdev);
1519
1520 if (dev->driver && try_module_get(dev->driver->owner)) {
1521 struct scsi_driver *drv = to_scsi_driver(dev->driver);
1522
1523 if (drv->rescan)
1524 drv->rescan(dev);
1525 module_put(dev->driver->owner);
1526 }
1527 device_unlock(dev);
1528}
1529EXPORT_SYMBOL(scsi_rescan_device);
1530
1531static void __scsi_scan_target(struct device *parent, unsigned int channel,
1532 unsigned int id, u64 lun, enum scsi_scan_mode rescan)
1533{
1534 struct Scsi_Host *shost = dev_to_shost(parent);
1535 blist_flags_t bflags = 0;
1536 int res;
1537 struct scsi_target *starget;
1538
1539 if (shost->this_id == id)
1540
1541
1542
1543 return;
1544
1545 starget = scsi_alloc_target(parent, channel, id);
1546 if (!starget)
1547 return;
1548 scsi_autopm_get_target(starget);
1549
1550 if (lun != SCAN_WILD_CARD) {
1551
1552
1553
1554 scsi_probe_and_add_lun(starget, lun, NULL, NULL, rescan, NULL);
1555 goto out_reap;
1556 }
1557
1558
1559
1560
1561
1562 res = scsi_probe_and_add_lun(starget, 0, &bflags, NULL, rescan, NULL);
1563 if (res == SCSI_SCAN_LUN_PRESENT || res == SCSI_SCAN_TARGET_PRESENT) {
1564 if (scsi_report_lun_scan(starget, bflags, rescan) != 0)
1565
1566
1567
1568
1569 scsi_sequential_lun_scan(starget, bflags,
1570 starget->scsi_level, rescan);
1571 }
1572
1573 out_reap:
1574 scsi_autopm_put_target(starget);
1575
1576
1577
1578
1579 scsi_target_reap(starget);
1580
1581 put_device(&starget->dev);
1582}
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602void scsi_scan_target(struct device *parent, unsigned int channel,
1603 unsigned int id, u64 lun, enum scsi_scan_mode rescan)
1604{
1605 struct Scsi_Host *shost = dev_to_shost(parent);
1606
1607 if (strncmp(scsi_scan_type, "none", 4) == 0)
1608 return;
1609
1610 if (rescan != SCSI_SCAN_MANUAL &&
1611 strncmp(scsi_scan_type, "manual", 6) == 0)
1612 return;
1613
1614 mutex_lock(&shost->scan_mutex);
1615 if (!shost->async_scan)
1616 scsi_complete_async_scans();
1617
1618 if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
1619 __scsi_scan_target(parent, channel, id, lun, rescan);
1620 scsi_autopm_put_host(shost);
1621 }
1622 mutex_unlock(&shost->scan_mutex);
1623}
1624EXPORT_SYMBOL(scsi_scan_target);
1625
1626static void scsi_scan_channel(struct Scsi_Host *shost, unsigned int channel,
1627 unsigned int id, u64 lun,
1628 enum scsi_scan_mode rescan)
1629{
1630 uint order_id;
1631
1632 if (id == SCAN_WILD_CARD)
1633 for (id = 0; id < shost->max_id; ++id) {
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643 if (shost->reverse_ordering)
1644
1645
1646
1647 order_id = shost->max_id - id - 1;
1648 else
1649 order_id = id;
1650 __scsi_scan_target(&shost->shost_gendev, channel,
1651 order_id, lun, rescan);
1652 }
1653 else
1654 __scsi_scan_target(&shost->shost_gendev, channel,
1655 id, lun, rescan);
1656}
1657
1658int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel,
1659 unsigned int id, u64 lun,
1660 enum scsi_scan_mode rescan)
1661{
1662 SCSI_LOG_SCAN_BUS(3, shost_printk (KERN_INFO, shost,
1663 "%s: <%u:%u:%llu>\n",
1664 __func__, channel, id, lun));
1665
1666 if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) ||
1667 ((id != SCAN_WILD_CARD) && (id >= shost->max_id)) ||
1668 ((lun != SCAN_WILD_CARD) && (lun >= shost->max_lun)))
1669 return -EINVAL;
1670
1671 mutex_lock(&shost->scan_mutex);
1672 if (!shost->async_scan)
1673 scsi_complete_async_scans();
1674
1675 if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
1676 if (channel == SCAN_WILD_CARD)
1677 for (channel = 0; channel <= shost->max_channel;
1678 channel++)
1679 scsi_scan_channel(shost, channel, id, lun,
1680 rescan);
1681 else
1682 scsi_scan_channel(shost, channel, id, lun, rescan);
1683 scsi_autopm_put_host(shost);
1684 }
1685 mutex_unlock(&shost->scan_mutex);
1686
1687 return 0;
1688}
1689
1690static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
1691{
1692 struct scsi_device *sdev;
1693 shost_for_each_device(sdev, shost) {
1694
1695 if (sdev->sdev_state == SDEV_DEL)
1696 continue;
1697
1698 if (sdev->is_visible)
1699 continue;
1700 if (!scsi_host_scan_allowed(shost) ||
1701 scsi_sysfs_add_sdev(sdev) != 0)
1702 __scsi_remove_device(sdev);
1703 }
1704}
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
1717{
1718 struct async_scan_data *data;
1719 unsigned long flags;
1720
1721 if (strncmp(scsi_scan_type, "sync", 4) == 0)
1722 return NULL;
1723
1724 if (shost->async_scan) {
1725 shost_printk(KERN_DEBUG, shost, "%s called twice\n", __func__);
1726 return NULL;
1727 }
1728
1729 data = kmalloc(sizeof(*data), GFP_KERNEL);
1730 if (!data)
1731 goto err;
1732 data->shost = scsi_host_get(shost);
1733 if (!data->shost)
1734 goto err;
1735 init_completion(&data->prev_finished);
1736
1737 mutex_lock(&shost->scan_mutex);
1738 spin_lock_irqsave(shost->host_lock, flags);
1739 shost->async_scan = 1;
1740 spin_unlock_irqrestore(shost->host_lock, flags);
1741 mutex_unlock(&shost->scan_mutex);
1742
1743 spin_lock(&async_scan_lock);
1744 if (list_empty(&scanning_hosts))
1745 complete(&data->prev_finished);
1746 list_add_tail(&data->list, &scanning_hosts);
1747 spin_unlock(&async_scan_lock);
1748
1749 return data;
1750
1751 err:
1752 kfree(data);
1753 return NULL;
1754}
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764static void scsi_finish_async_scan(struct async_scan_data *data)
1765{
1766 struct Scsi_Host *shost;
1767 unsigned long flags;
1768
1769 if (!data)
1770 return;
1771
1772 shost = data->shost;
1773
1774 mutex_lock(&shost->scan_mutex);
1775
1776 if (!shost->async_scan) {
1777 shost_printk(KERN_INFO, shost, "%s called twice\n", __func__);
1778 dump_stack();
1779 mutex_unlock(&shost->scan_mutex);
1780 return;
1781 }
1782
1783 wait_for_completion(&data->prev_finished);
1784
1785 scsi_sysfs_add_devices(shost);
1786
1787 spin_lock_irqsave(shost->host_lock, flags);
1788 shost->async_scan = 0;
1789 spin_unlock_irqrestore(shost->host_lock, flags);
1790
1791 mutex_unlock(&shost->scan_mutex);
1792
1793 spin_lock(&async_scan_lock);
1794 list_del(&data->list);
1795 if (!list_empty(&scanning_hosts)) {
1796 struct async_scan_data *next = list_entry(scanning_hosts.next,
1797 struct async_scan_data, list);
1798 complete(&next->prev_finished);
1799 }
1800 spin_unlock(&async_scan_lock);
1801
1802 scsi_autopm_put_host(shost);
1803 scsi_host_put(shost);
1804 kfree(data);
1805}
1806
1807static void do_scsi_scan_host(struct Scsi_Host *shost)
1808{
1809 if (shost->hostt->scan_finished) {
1810 unsigned long start = jiffies;
1811 if (shost->hostt->scan_start)
1812 shost->hostt->scan_start(shost);
1813
1814 while (!shost->hostt->scan_finished(shost, jiffies - start))
1815 msleep(10);
1816 } else {
1817 scsi_scan_host_selected(shost, SCAN_WILD_CARD, SCAN_WILD_CARD,
1818 SCAN_WILD_CARD, 0);
1819 }
1820}
1821
1822static void do_scan_async(void *_data, async_cookie_t c)
1823{
1824 struct async_scan_data *data = _data;
1825 struct Scsi_Host *shost = data->shost;
1826
1827 do_scsi_scan_host(shost);
1828 scsi_finish_async_scan(data);
1829}
1830
1831
1832
1833
1834
1835void scsi_scan_host(struct Scsi_Host *shost)
1836{
1837 struct async_scan_data *data;
1838
1839 if (strncmp(scsi_scan_type, "none", 4) == 0 ||
1840 strncmp(scsi_scan_type, "manual", 6) == 0)
1841 return;
1842 if (scsi_autopm_get_host(shost) < 0)
1843 return;
1844
1845 data = scsi_prep_async_scan(shost);
1846 if (!data) {
1847 do_scsi_scan_host(shost);
1848 scsi_autopm_put_host(shost);
1849 return;
1850 }
1851
1852
1853
1854
1855 async_schedule(do_scan_async, data);
1856
1857
1858}
1859EXPORT_SYMBOL(scsi_scan_host);
1860
1861void scsi_forget_host(struct Scsi_Host *shost)
1862{
1863 struct scsi_device *sdev;
1864 unsigned long flags;
1865
1866 restart:
1867 spin_lock_irqsave(shost->host_lock, flags);
1868 list_for_each_entry(sdev, &shost->__devices, siblings) {
1869 if (sdev->sdev_state == SDEV_DEL)
1870 continue;
1871 spin_unlock_irqrestore(shost->host_lock, flags);
1872 __scsi_remove_device(sdev);
1873 goto restart;
1874 }
1875 spin_unlock_irqrestore(shost->host_lock, flags);
1876}
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895struct scsi_device *scsi_get_host_dev(struct Scsi_Host *shost)
1896{
1897 struct scsi_device *sdev = NULL;
1898 struct scsi_target *starget;
1899
1900 mutex_lock(&shost->scan_mutex);
1901 if (!scsi_host_scan_allowed(shost))
1902 goto out;
1903 starget = scsi_alloc_target(&shost->shost_gendev, 0, shost->this_id);
1904 if (!starget)
1905 goto out;
1906
1907 sdev = scsi_alloc_sdev(starget, 0, NULL);
1908 if (sdev)
1909 sdev->borken = 0;
1910 else
1911 scsi_target_reap(starget);
1912 put_device(&starget->dev);
1913 out:
1914 mutex_unlock(&shost->scan_mutex);
1915 return sdev;
1916}
1917EXPORT_SYMBOL(scsi_get_host_dev);
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927void scsi_free_host_dev(struct scsi_device *sdev)
1928{
1929 BUG_ON(sdev->id != sdev->host->this_id);
1930
1931 __scsi_remove_device(sdev);
1932}
1933EXPORT_SYMBOL(scsi_free_host_dev);
1934
1935