1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40#include <linux/module.h>
41#include <linux/moduleparam.h>
42#include <linux/kernel.h>
43#include <linux/timer.h>
44#include <linux/string.h>
45#include <linux/slab.h>
46#include <linux/blkdev.h>
47#include <linux/delay.h>
48#include <linux/init.h>
49#include <linux/completion.h>
50#include <linux/unistd.h>
51#include <linux/spinlock.h>
52#include <linux/kmod.h>
53#include <linux/interrupt.h>
54#include <linux/notifier.h>
55#include <linux/cpu.h>
56#include <linux/mutex.h>
57
58#include <scsi/scsi.h>
59#include <scsi/scsi_cmnd.h>
60#include <scsi/scsi_dbg.h>
61#include <scsi/scsi_device.h>
62#include <scsi/scsi_driver.h>
63#include <scsi/scsi_eh.h>
64#include <scsi/scsi_host.h>
65#include <scsi/scsi_tcq.h>
66
67#include "scsi_priv.h"
68#include "scsi_logging.h"
69
70static void scsi_done(struct scsi_cmnd *cmd);
71
72
73
74
75
76#define MIN_RESET_DELAY (2*HZ)
77
78
79#define MIN_RESET_PERIOD (15*HZ)
80
81
82
83
84
85unsigned int scsi_logging_level;
86#if defined(CONFIG_SCSI_LOGGING)
87EXPORT_SYMBOL(scsi_logging_level);
88#endif
89
90
91
92
93
94static const char *const scsi_device_types[] = {
95 "Direct-Access ",
96 "Sequential-Access",
97 "Printer ",
98 "Processor ",
99 "WORM ",
100 "CD-ROM ",
101 "Scanner ",
102 "Optical Device ",
103 "Medium Changer ",
104 "Communications ",
105 "ASC IT8 ",
106 "ASC IT8 ",
107 "RAID ",
108 "Enclosure ",
109 "Direct-Access-RBC",
110 "Optical card ",
111 "Bridge controller",
112 "Object storage ",
113 "Automation/Drive ",
114};
115
116
117
118
119
120
121const char * scsi_device_type(unsigned type)
122{
123 if (type == 0x1e)
124 return "Well-known LUN ";
125 if (type == 0x1f)
126 return "No Device ";
127 if (type >= ARRAY_SIZE(scsi_device_types))
128 return "Unknown ";
129 return scsi_device_types[type];
130}
131
132EXPORT_SYMBOL(scsi_device_type);
133
134struct scsi_host_cmd_pool {
135 struct kmem_cache *cmd_slab;
136 struct kmem_cache *sense_slab;
137 unsigned int users;
138 char *cmd_name;
139 char *sense_name;
140 unsigned int slab_flags;
141 gfp_t gfp_mask;
142};
143
144static struct scsi_host_cmd_pool scsi_cmd_pool = {
145 .cmd_name = "scsi_cmd_cache",
146 .sense_name = "scsi_sense_cache",
147 .slab_flags = SLAB_HWCACHE_ALIGN,
148};
149
150static struct scsi_host_cmd_pool scsi_cmd_dma_pool = {
151 .cmd_name = "scsi_cmd_cache(DMA)",
152 .sense_name = "scsi_sense_cache(DMA)",
153 .slab_flags = SLAB_HWCACHE_ALIGN|SLAB_CACHE_DMA,
154 .gfp_mask = __GFP_DMA,
155};
156
157static DEFINE_MUTEX(host_cmd_pool_mutex);
158
159
160
161
162
163
164
165
166
167static struct scsi_cmnd *
168scsi_pool_alloc_command(struct scsi_host_cmd_pool *pool, gfp_t gfp_mask)
169{
170 struct scsi_cmnd *cmd;
171
172 cmd = kmem_cache_zalloc(pool->cmd_slab, gfp_mask | pool->gfp_mask);
173 if (!cmd)
174 return NULL;
175
176 cmd->sense_buffer = kmem_cache_alloc(pool->sense_slab,
177 gfp_mask | pool->gfp_mask);
178 if (!cmd->sense_buffer) {
179 kmem_cache_free(pool->cmd_slab, cmd);
180 return NULL;
181 }
182
183 return cmd;
184}
185
186
187
188
189
190
191
192
193
194static void
195scsi_pool_free_command(struct scsi_host_cmd_pool *pool,
196 struct scsi_cmnd *cmd)
197{
198 if (cmd->prot_sdb)
199 kmem_cache_free(scsi_sdb_cache, cmd->prot_sdb);
200
201 kmem_cache_free(pool->sense_slab, cmd->sense_buffer);
202 kmem_cache_free(pool->cmd_slab, cmd);
203}
204
205
206
207
208
209
210
211
212
213static struct scsi_cmnd *
214scsi_host_alloc_command(struct Scsi_Host *shost, gfp_t gfp_mask)
215{
216 struct scsi_cmnd *cmd;
217
218 cmd = scsi_pool_alloc_command(shost->cmd_pool, gfp_mask);
219 if (!cmd)
220 return NULL;
221
222 if (scsi_host_get_prot(shost) >= SHOST_DIX_TYPE0_PROTECTION) {
223 cmd->prot_sdb = kmem_cache_zalloc(scsi_sdb_cache, gfp_mask);
224
225 if (!cmd->prot_sdb) {
226 scsi_pool_free_command(shost->cmd_pool, cmd);
227 return NULL;
228 }
229 }
230
231 return cmd;
232}
233
234
235
236
237
238
239
240
241
242struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, gfp_t gfp_mask)
243{
244 struct scsi_cmnd *cmd = scsi_host_alloc_command(shost, gfp_mask);
245
246 if (unlikely(!cmd)) {
247 unsigned long flags;
248
249 spin_lock_irqsave(&shost->free_list_lock, flags);
250 if (likely(!list_empty(&shost->free_list))) {
251 cmd = list_entry(shost->free_list.next,
252 struct scsi_cmnd, list);
253 list_del_init(&cmd->list);
254 }
255 spin_unlock_irqrestore(&shost->free_list_lock, flags);
256
257 if (cmd) {
258 void *buf, *prot;
259
260 buf = cmd->sense_buffer;
261 prot = cmd->prot_sdb;
262
263 memset(cmd, 0, sizeof(*cmd));
264
265 cmd->sense_buffer = buf;
266 cmd->prot_sdb = prot;
267 }
268 }
269
270 return cmd;
271}
272EXPORT_SYMBOL_GPL(__scsi_get_command);
273
274
275
276
277
278
279
280
281struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask)
282{
283 struct scsi_cmnd *cmd;
284
285
286 if (!get_device(&dev->sdev_gendev))
287 return NULL;
288
289 cmd = __scsi_get_command(dev->host, gfp_mask);
290
291 if (likely(cmd != NULL)) {
292 unsigned long flags;
293
294 cmd->device = dev;
295 INIT_LIST_HEAD(&cmd->list);
296 spin_lock_irqsave(&dev->list_lock, flags);
297 list_add_tail(&cmd->list, &dev->cmd_list);
298 spin_unlock_irqrestore(&dev->list_lock, flags);
299 cmd->jiffies_at_alloc = jiffies;
300 } else
301 put_device(&dev->sdev_gendev);
302
303 return cmd;
304}
305EXPORT_SYMBOL(scsi_get_command);
306
307
308
309
310
311
312
313void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd,
314 struct device *dev)
315{
316 unsigned long flags;
317
318
319 spin_lock_irqsave(&shost->free_list_lock, flags);
320 if (unlikely(list_empty(&shost->free_list))) {
321 list_add(&cmd->list, &shost->free_list);
322 cmd = NULL;
323 }
324 spin_unlock_irqrestore(&shost->free_list_lock, flags);
325
326 if (likely(cmd != NULL))
327 scsi_pool_free_command(shost->cmd_pool, cmd);
328
329 put_device(dev);
330}
331EXPORT_SYMBOL(__scsi_put_command);
332
333
334
335
336
337
338
339
340
341void scsi_put_command(struct scsi_cmnd *cmd)
342{
343 struct scsi_device *sdev = cmd->device;
344 unsigned long flags;
345
346
347 spin_lock_irqsave(&cmd->device->list_lock, flags);
348 BUG_ON(list_empty(&cmd->list));
349 list_del_init(&cmd->list);
350 spin_unlock_irqrestore(&cmd->device->list_lock, flags);
351
352 __scsi_put_command(cmd->device->host, cmd, &sdev->sdev_gendev);
353}
354EXPORT_SYMBOL(scsi_put_command);
355
356static struct scsi_host_cmd_pool *scsi_get_host_cmd_pool(gfp_t gfp_mask)
357{
358 struct scsi_host_cmd_pool *retval = NULL, *pool;
359
360
361
362
363 mutex_lock(&host_cmd_pool_mutex);
364 pool = (gfp_mask & __GFP_DMA) ? &scsi_cmd_dma_pool :
365 &scsi_cmd_pool;
366 if (!pool->users) {
367 pool->cmd_slab = kmem_cache_create(pool->cmd_name,
368 sizeof(struct scsi_cmnd), 0,
369 pool->slab_flags, NULL);
370 if (!pool->cmd_slab)
371 goto fail;
372
373 pool->sense_slab = kmem_cache_create(pool->sense_name,
374 SCSI_SENSE_BUFFERSIZE, 0,
375 pool->slab_flags, NULL);
376 if (!pool->sense_slab) {
377 kmem_cache_destroy(pool->cmd_slab);
378 goto fail;
379 }
380 }
381
382 pool->users++;
383 retval = pool;
384 fail:
385 mutex_unlock(&host_cmd_pool_mutex);
386 return retval;
387}
388
389static void scsi_put_host_cmd_pool(gfp_t gfp_mask)
390{
391 struct scsi_host_cmd_pool *pool;
392
393 mutex_lock(&host_cmd_pool_mutex);
394 pool = (gfp_mask & __GFP_DMA) ? &scsi_cmd_dma_pool :
395 &scsi_cmd_pool;
396
397
398
399
400
401 BUG_ON(pool->users == 0);
402
403 if (!--pool->users) {
404 kmem_cache_destroy(pool->cmd_slab);
405 kmem_cache_destroy(pool->sense_slab);
406 }
407 mutex_unlock(&host_cmd_pool_mutex);
408}
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423struct scsi_cmnd *scsi_allocate_command(gfp_t gfp_mask)
424{
425 struct scsi_host_cmd_pool *pool = scsi_get_host_cmd_pool(gfp_mask);
426
427 if (!pool)
428 return NULL;
429
430 return scsi_pool_alloc_command(pool, gfp_mask);
431}
432EXPORT_SYMBOL(scsi_allocate_command);
433
434
435
436
437
438
439
440
441
442
443void scsi_free_command(gfp_t gfp_mask, struct scsi_cmnd *cmd)
444{
445 struct scsi_host_cmd_pool *pool = scsi_get_host_cmd_pool(gfp_mask);
446
447
448
449
450
451
452
453 BUG_ON(!pool);
454
455 scsi_pool_free_command(pool, cmd);
456
457
458
459
460
461 scsi_put_host_cmd_pool(gfp_mask);
462 scsi_put_host_cmd_pool(gfp_mask);
463}
464EXPORT_SYMBOL(scsi_free_command);
465
466
467
468
469
470
471
472
473
474
475
476int scsi_setup_command_freelist(struct Scsi_Host *shost)
477{
478 struct scsi_cmnd *cmd;
479 const gfp_t gfp_mask = shost->unchecked_isa_dma ? GFP_DMA : GFP_KERNEL;
480
481 spin_lock_init(&shost->free_list_lock);
482 INIT_LIST_HEAD(&shost->free_list);
483
484 shost->cmd_pool = scsi_get_host_cmd_pool(gfp_mask);
485
486 if (!shost->cmd_pool)
487 return -ENOMEM;
488
489
490
491
492 cmd = scsi_host_alloc_command(shost, gfp_mask);
493 if (!cmd) {
494 scsi_put_host_cmd_pool(gfp_mask);
495 shost->cmd_pool = NULL;
496 return -ENOMEM;
497 }
498 list_add(&cmd->list, &shost->free_list);
499 return 0;
500}
501
502
503
504
505
506void scsi_destroy_command_freelist(struct Scsi_Host *shost)
507{
508
509
510
511
512 if (!shost->cmd_pool)
513 return;
514
515 while (!list_empty(&shost->free_list)) {
516 struct scsi_cmnd *cmd;
517
518 cmd = list_entry(shost->free_list.next, struct scsi_cmnd, list);
519 list_del_init(&cmd->list);
520 scsi_pool_free_command(shost->cmd_pool, cmd);
521 }
522 shost->cmd_pool = NULL;
523 scsi_put_host_cmd_pool(shost->unchecked_isa_dma ? GFP_DMA : GFP_KERNEL);
524}
525
526#ifdef CONFIG_SCSI_LOGGING
527void scsi_log_send(struct scsi_cmnd *cmd)
528{
529 unsigned int level;
530
531
532
533
534
535
536
537
538
539
540
541
542 if (unlikely(scsi_logging_level)) {
543 level = SCSI_LOG_LEVEL(SCSI_LOG_MLQUEUE_SHIFT,
544 SCSI_LOG_MLQUEUE_BITS);
545 if (level > 1) {
546 scmd_printk(KERN_INFO, cmd, "Send: ");
547 if (level > 2)
548 printk("0x%p ", cmd);
549 printk("\n");
550 scsi_print_command(cmd);
551 if (level > 3) {
552 printk(KERN_INFO "buffer = 0x%p, bufflen = %d,"
553 " queuecommand 0x%p\n",
554 scsi_sglist(cmd), scsi_bufflen(cmd),
555 cmd->device->host->hostt->queuecommand);
556
557 }
558 }
559 }
560}
561
562void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
563{
564 unsigned int level;
565
566
567
568
569
570
571
572
573
574
575
576
577
578 if (unlikely(scsi_logging_level)) {
579 level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT,
580 SCSI_LOG_MLCOMPLETE_BITS);
581 if (((level > 0) && (cmd->result || disposition != SUCCESS)) ||
582 (level > 1)) {
583 scmd_printk(KERN_INFO, cmd, "Done: ");
584 if (level > 2)
585 printk("0x%p ", cmd);
586
587
588
589
590 switch (disposition) {
591 case SUCCESS:
592 printk("SUCCESS\n");
593 break;
594 case NEEDS_RETRY:
595 printk("RETRY\n");
596 break;
597 case ADD_TO_MLQUEUE:
598 printk("MLQUEUE\n");
599 break;
600 case FAILED:
601 printk("FAILED\n");
602 break;
603 case TIMEOUT_ERROR:
604
605
606
607 printk("TIMEOUT\n");
608 break;
609 default:
610 printk("UNKNOWN\n");
611 }
612 scsi_print_result(cmd);
613 scsi_print_command(cmd);
614 if (status_byte(cmd->result) & CHECK_CONDITION)
615 scsi_print_sense("", cmd);
616 if (level > 3)
617 scmd_printk(KERN_INFO, cmd,
618 "scsi host busy %d failed %d\n",
619 cmd->device->host->host_busy,
620 cmd->device->host->host_failed);
621 }
622 }
623}
624#endif
625
626
627
628
629
630
631
632
633
634static inline void scsi_cmd_get_serial(struct Scsi_Host *host, struct scsi_cmnd *cmd)
635{
636 cmd->serial_number = host->cmd_serial_number++;
637 if (cmd->serial_number == 0)
638 cmd->serial_number = host->cmd_serial_number++;
639}
640
641
642
643
644
645
646
647
648int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
649{
650 struct Scsi_Host *host = cmd->device->host;
651 unsigned long flags = 0;
652 unsigned long timeout;
653 int rtn = 0;
654
655 atomic_inc(&cmd->device->iorequest_cnt);
656
657
658 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
659
660
661
662 cmd->result = DID_NO_CONNECT << 16;
663 scsi_done(cmd);
664
665 goto out;
666 }
667
668
669 if (unlikely(scsi_device_blocked(cmd->device))) {
670
671
672
673
674
675
676
677
678 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
679
680 SCSI_LOG_MLQUEUE(3, printk("queuecommand : device blocked \n"));
681
682
683
684
685
686 goto out;
687 }
688
689
690
691
692 if (cmd->device->scsi_level <= SCSI_2 &&
693 cmd->device->scsi_level != SCSI_UNKNOWN) {
694 cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) |
695 (cmd->device->lun << 5 & 0xe0);
696 }
697
698
699
700
701
702 timeout = host->last_reset + MIN_RESET_DELAY;
703
704 if (host->resetting && time_before(jiffies, timeout)) {
705 int ticks_remaining = timeout - jiffies;
706
707
708
709
710
711
712
713
714
715 while (--ticks_remaining >= 0)
716 mdelay(1 + 999 / HZ);
717 host->resetting = 0;
718 }
719
720 scsi_log_send(cmd);
721
722
723
724
725
726 if (cmd->cmd_len > cmd->device->host->max_cmd_len) {
727 SCSI_LOG_MLQUEUE(3,
728 printk("queuecommand : command too long. "
729 "cdb_size=%d host->max_cmd_len=%d\n",
730 cmd->cmd_len, cmd->device->host->max_cmd_len));
731 cmd->result = (DID_ABORT << 16);
732
733 scsi_done(cmd);
734 goto out;
735 }
736
737 spin_lock_irqsave(host->host_lock, flags);
738
739
740
741
742
743
744 scsi_cmd_get_serial(host, cmd);
745
746 if (unlikely(host->shost_state == SHOST_DEL)) {
747 cmd->result = (DID_NO_CONNECT << 16);
748 scsi_done(cmd);
749 } else {
750 rtn = host->hostt->queuecommand(cmd, scsi_done);
751 }
752 spin_unlock_irqrestore(host->host_lock, flags);
753 if (rtn) {
754 if (rtn != SCSI_MLQUEUE_DEVICE_BUSY &&
755 rtn != SCSI_MLQUEUE_TARGET_BUSY)
756 rtn = SCSI_MLQUEUE_HOST_BUSY;
757
758 scsi_queue_insert(cmd, rtn);
759
760 SCSI_LOG_MLQUEUE(3,
761 printk("queuecommand : request rejected\n"));
762 }
763
764 out:
765 SCSI_LOG_MLQUEUE(3, printk("leaving scsi_dispatch_cmnd()\n"));
766 return rtn;
767}
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782static void scsi_done(struct scsi_cmnd *cmd)
783{
784 blk_complete_request(cmd->request);
785}
786
787
788static struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd)
789{
790 return *(struct scsi_driver **)cmd->request->rq_disk->private_data;
791}
792
793
794
795
796
797
798
799
800
801void scsi_finish_command(struct scsi_cmnd *cmd)
802{
803 struct scsi_device *sdev = cmd->device;
804 struct scsi_target *starget = scsi_target(sdev);
805 struct Scsi_Host *shost = sdev->host;
806 struct scsi_driver *drv;
807 unsigned int good_bytes;
808
809 scsi_device_unbusy(sdev);
810
811
812
813
814
815
816
817
818
819 shost->host_blocked = 0;
820 starget->target_blocked = 0;
821 sdev->device_blocked = 0;
822
823
824
825
826
827 if (SCSI_SENSE_VALID(cmd))
828 cmd->result |= (DRIVER_SENSE << 24);
829
830 SCSI_LOG_MLCOMPLETE(4, sdev_printk(KERN_INFO, sdev,
831 "Notifying upper driver of completion "
832 "(result %x)\n", cmd->result));
833
834 good_bytes = scsi_bufflen(cmd);
835 if (cmd->request->cmd_type != REQ_TYPE_BLOCK_PC) {
836 int old_good_bytes = good_bytes;
837 drv = scsi_cmd_to_driver(cmd);
838 if (drv->done)
839 good_bytes = drv->done(cmd);
840
841
842
843
844
845
846 if (good_bytes == old_good_bytes)
847 good_bytes -= scsi_get_resid(cmd);
848 }
849 scsi_io_completion(cmd, good_bytes);
850}
851EXPORT_SYMBOL(scsi_finish_command);
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags)
872{
873 unsigned long flags;
874
875
876
877
878 if (tags <= 0)
879 return;
880
881 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
882
883
884
885
886
887
888
889
890
891
892 if (!sdev->host->bqt) {
893 if (blk_queue_tagged(sdev->request_queue) &&
894 blk_queue_resize_tags(sdev->request_queue, tags) != 0)
895 goto out;
896 }
897
898 sdev->queue_depth = tags;
899 switch (tagged) {
900 case MSG_ORDERED_TAG:
901 sdev->ordered_tags = 1;
902 sdev->simple_tags = 1;
903 break;
904 case MSG_SIMPLE_TAG:
905 sdev->ordered_tags = 0;
906 sdev->simple_tags = 1;
907 break;
908 default:
909 sdev_printk(KERN_WARNING, sdev,
910 "scsi_adjust_queue_depth, bad queue type, "
911 "disabled\n");
912 case 0:
913 sdev->ordered_tags = sdev->simple_tags = 0;
914 sdev->queue_depth = tags;
915 break;
916 }
917 out:
918 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
919}
920EXPORT_SYMBOL(scsi_adjust_queue_depth);
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941int scsi_track_queue_full(struct scsi_device *sdev, int depth)
942{
943 if ((jiffies >> 4) == sdev->last_queue_full_time)
944 return 0;
945
946 sdev->last_queue_full_time = (jiffies >> 4);
947 if (sdev->last_queue_full_depth != depth) {
948 sdev->last_queue_full_count = 1;
949 sdev->last_queue_full_depth = depth;
950 } else {
951 sdev->last_queue_full_count++;
952 }
953
954 if (sdev->last_queue_full_count <= 10)
955 return 0;
956 if (sdev->last_queue_full_depth < 8) {
957
958 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
959 return -1;
960 }
961
962 if (sdev->ordered_tags)
963 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth);
964 else
965 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth);
966 return depth;
967}
968EXPORT_SYMBOL(scsi_track_queue_full);
969
970
971
972
973
974
975
976
977
978
979
980
981
982static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
983 u8 page, unsigned len)
984{
985 int result;
986 unsigned char cmd[16];
987
988 cmd[0] = INQUIRY;
989 cmd[1] = 1;
990 cmd[2] = page;
991 cmd[3] = len >> 8;
992 cmd[4] = len & 0xff;
993 cmd[5] = 0;
994
995
996
997
998
999 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer,
1000 len, NULL, 30 * HZ, 3, NULL);
1001 if (result)
1002 return result;
1003
1004
1005 if (buffer[1] != page)
1006 return -EIO;
1007
1008 return 0;
1009}
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023unsigned char *scsi_get_vpd_page(struct scsi_device *sdev, u8 page)
1024{
1025 int i, result;
1026 unsigned int len;
1027 const unsigned int init_vpd_len = 255;
1028 unsigned char *buf = kmalloc(init_vpd_len, GFP_KERNEL);
1029
1030 if (!buf)
1031 return NULL;
1032
1033
1034 result = scsi_vpd_inquiry(sdev, buf, 0, init_vpd_len);
1035 if (result)
1036 goto fail;
1037
1038
1039 if (page == 0)
1040 return buf;
1041
1042 for (i = 0; i < buf[3]; i++)
1043 if (buf[i + 4] == page)
1044 goto found;
1045
1046 goto fail;
1047
1048 found:
1049 result = scsi_vpd_inquiry(sdev, buf, page, 255);
1050 if (result)
1051 goto fail;
1052
1053
1054
1055
1056
1057 len = ((buf[2] << 8) | buf[3]) + 4;
1058 if (len <= init_vpd_len)
1059 return buf;
1060
1061 kfree(buf);
1062 buf = kmalloc(len, GFP_KERNEL);
1063 result = scsi_vpd_inquiry(sdev, buf, page, len);
1064 if (result)
1065 goto fail;
1066
1067 return buf;
1068
1069 fail:
1070 kfree(buf);
1071 return NULL;
1072}
1073EXPORT_SYMBOL_GPL(scsi_get_vpd_page);
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083int scsi_device_get(struct scsi_device *sdev)
1084{
1085 if (sdev->sdev_state == SDEV_DEL)
1086 return -ENXIO;
1087 if (!get_device(&sdev->sdev_gendev))
1088 return -ENXIO;
1089
1090
1091 try_module_get(sdev->host->hostt->module);
1092
1093 return 0;
1094}
1095EXPORT_SYMBOL(scsi_device_get);
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105void scsi_device_put(struct scsi_device *sdev)
1106{
1107#ifdef CONFIG_MODULE_UNLOAD
1108 struct module *module = sdev->host->hostt->module;
1109
1110
1111
1112 if (module && module_refcount(module) != 0)
1113 module_put(module);
1114#endif
1115 put_device(&sdev->sdev_gendev);
1116}
1117EXPORT_SYMBOL(scsi_device_put);
1118
1119
1120struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *shost,
1121 struct scsi_device *prev)
1122{
1123 struct list_head *list = (prev ? &prev->siblings : &shost->__devices);
1124 struct scsi_device *next = NULL;
1125 unsigned long flags;
1126
1127 spin_lock_irqsave(shost->host_lock, flags);
1128 while (list->next != &shost->__devices) {
1129 next = list_entry(list->next, struct scsi_device, siblings);
1130
1131 if (!scsi_device_get(next))
1132 break;
1133 next = NULL;
1134 list = list->next;
1135 }
1136 spin_unlock_irqrestore(shost->host_lock, flags);
1137
1138 if (prev)
1139 scsi_device_put(prev);
1140 return next;
1141}
1142EXPORT_SYMBOL(__scsi_iterate_devices);
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154void starget_for_each_device(struct scsi_target *starget, void *data,
1155 void (*fn)(struct scsi_device *, void *))
1156{
1157 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1158 struct scsi_device *sdev;
1159
1160 shost_for_each_device(sdev, shost) {
1161 if ((sdev->channel == starget->channel) &&
1162 (sdev->id == starget->id))
1163 fn(sdev, data);
1164 }
1165}
1166EXPORT_SYMBOL(starget_for_each_device);
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182void __starget_for_each_device(struct scsi_target *starget, void *data,
1183 void (*fn)(struct scsi_device *, void *))
1184{
1185 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1186 struct scsi_device *sdev;
1187
1188 __shost_for_each_device(sdev, shost) {
1189 if ((sdev->channel == starget->channel) &&
1190 (sdev->id == starget->id))
1191 fn(sdev, data);
1192 }
1193}
1194EXPORT_SYMBOL(__starget_for_each_device);
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *starget,
1212 uint lun)
1213{
1214 struct scsi_device *sdev;
1215
1216 list_for_each_entry(sdev, &starget->devices, same_target_siblings) {
1217 if (sdev->sdev_state == SDEV_DEL)
1218 continue;
1219 if (sdev->lun ==lun)
1220 return sdev;
1221 }
1222
1223 return NULL;
1224}
1225EXPORT_SYMBOL(__scsi_device_lookup_by_target);
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget,
1237 uint lun)
1238{
1239 struct scsi_device *sdev;
1240 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1241 unsigned long flags;
1242
1243 spin_lock_irqsave(shost->host_lock, flags);
1244 sdev = __scsi_device_lookup_by_target(starget, lun);
1245 if (sdev && scsi_device_get(sdev))
1246 sdev = NULL;
1247 spin_unlock_irqrestore(shost->host_lock, flags);
1248
1249 return sdev;
1250}
1251EXPORT_SYMBOL(scsi_device_lookup_by_target);
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269struct scsi_device *__scsi_device_lookup(struct Scsi_Host *shost,
1270 uint channel, uint id, uint lun)
1271{
1272 struct scsi_device *sdev;
1273
1274 list_for_each_entry(sdev, &shost->__devices, siblings) {
1275 if (sdev->channel == channel && sdev->id == id &&
1276 sdev->lun ==lun)
1277 return sdev;
1278 }
1279
1280 return NULL;
1281}
1282EXPORT_SYMBOL(__scsi_device_lookup);
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295struct scsi_device *scsi_device_lookup(struct Scsi_Host *shost,
1296 uint channel, uint id, uint lun)
1297{
1298 struct scsi_device *sdev;
1299 unsigned long flags;
1300
1301 spin_lock_irqsave(shost->host_lock, flags);
1302 sdev = __scsi_device_lookup(shost, channel, id, lun);
1303 if (sdev && scsi_device_get(sdev))
1304 sdev = NULL;
1305 spin_unlock_irqrestore(shost->host_lock, flags);
1306
1307 return sdev;
1308}
1309EXPORT_SYMBOL(scsi_device_lookup);
1310
1311MODULE_DESCRIPTION("SCSI core");
1312MODULE_LICENSE("GPL");
1313
1314module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR);
1315MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels");
1316
1317static int __init init_scsi(void)
1318{
1319 int error;
1320
1321 error = scsi_init_queue();
1322 if (error)
1323 return error;
1324 error = scsi_init_procfs();
1325 if (error)
1326 goto cleanup_queue;
1327 error = scsi_init_devinfo();
1328 if (error)
1329 goto cleanup_procfs;
1330 error = scsi_init_hosts();
1331 if (error)
1332 goto cleanup_devlist;
1333 error = scsi_init_sysctl();
1334 if (error)
1335 goto cleanup_hosts;
1336 error = scsi_sysfs_register();
1337 if (error)
1338 goto cleanup_sysctl;
1339
1340 scsi_netlink_init();
1341
1342 printk(KERN_NOTICE "SCSI subsystem initialized\n");
1343 return 0;
1344
1345cleanup_sysctl:
1346 scsi_exit_sysctl();
1347cleanup_hosts:
1348 scsi_exit_hosts();
1349cleanup_devlist:
1350 scsi_exit_devinfo();
1351cleanup_procfs:
1352 scsi_exit_procfs();
1353cleanup_queue:
1354 scsi_exit_queue();
1355 printk(KERN_ERR "SCSI subsystem failed to initialize, error = %d\n",
1356 -error);
1357 return error;
1358}
1359
1360static void __exit exit_scsi(void)
1361{
1362 scsi_netlink_exit();
1363 scsi_sysfs_unregister();
1364 scsi_exit_sysctl();
1365 scsi_exit_hosts();
1366 scsi_exit_devinfo();
1367 scsi_exit_procfs();
1368 scsi_exit_queue();
1369}
1370
1371subsys_initcall(init_scsi);
1372module_exit(exit_scsi);
1373