1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40#include <linux/module.h>
41#include <linux/moduleparam.h>
42#include <linux/kernel.h>
43#include <linux/timer.h>
44#include <linux/string.h>
45#include <linux/slab.h>
46#include <linux/blkdev.h>
47#include <linux/delay.h>
48#include <linux/init.h>
49#include <linux/completion.h>
50#include <linux/unistd.h>
51#include <linux/spinlock.h>
52#include <linux/kmod.h>
53#include <linux/interrupt.h>
54#include <linux/notifier.h>
55#include <linux/cpu.h>
56#include <linux/mutex.h>
57#include <linux/async.h>
58#include <asm/unaligned.h>
59
60#include <scsi/scsi.h>
61#include <scsi/scsi_cmnd.h>
62#include <scsi/scsi_dbg.h>
63#include <scsi/scsi_device.h>
64#include <scsi/scsi_driver.h>
65#include <scsi/scsi_eh.h>
66#include <scsi/scsi_host.h>
67#include <scsi/scsi_tcq.h>
68
69#include "scsi_priv.h"
70#include "scsi_logging.h"
71
72#define CREATE_TRACE_POINTS
73#include <trace/events/scsi.h>
74
75
76
77
78
79
80
81
82
83unsigned int scsi_logging_level;
84#if defined(CONFIG_SCSI_LOGGING)
85EXPORT_SYMBOL(scsi_logging_level);
86#endif
87
88
89ASYNC_DOMAIN(scsi_sd_probe_domain);
90EXPORT_SYMBOL(scsi_sd_probe_domain);
91
92
93
94
95
96static const char *const scsi_device_types[] = {
97 "Direct-Access ",
98 "Sequential-Access",
99 "Printer ",
100 "Processor ",
101 "WORM ",
102 "CD-ROM ",
103 "Scanner ",
104 "Optical Device ",
105 "Medium Changer ",
106 "Communications ",
107 "ASC IT8 ",
108 "ASC IT8 ",
109 "RAID ",
110 "Enclosure ",
111 "Direct-Access-RBC",
112 "Optical card ",
113 "Bridge controller",
114 "Object storage ",
115 "Automation/Drive ",
116};
117
118
119
120
121
122
123const char * scsi_device_type(unsigned type)
124{
125 if (type == 0x1e)
126 return "Well-known LUN ";
127 if (type == 0x1f)
128 return "No Device ";
129 if (type >= ARRAY_SIZE(scsi_device_types))
130 return "Unknown ";
131 return scsi_device_types[type];
132}
133
134EXPORT_SYMBOL(scsi_device_type);
135
136struct scsi_host_cmd_pool {
137 struct kmem_cache *cmd_slab;
138 struct kmem_cache *sense_slab;
139 unsigned int users;
140 char *cmd_name;
141 char *sense_name;
142 unsigned int slab_flags;
143 gfp_t gfp_mask;
144};
145
146static struct scsi_host_cmd_pool scsi_cmd_pool = {
147 .cmd_name = "scsi_cmd_cache",
148 .sense_name = "scsi_sense_cache",
149 .slab_flags = SLAB_HWCACHE_ALIGN,
150};
151
152static struct scsi_host_cmd_pool scsi_cmd_dma_pool = {
153 .cmd_name = "scsi_cmd_cache(DMA)",
154 .sense_name = "scsi_sense_cache(DMA)",
155 .slab_flags = SLAB_HWCACHE_ALIGN|SLAB_CACHE_DMA,
156 .gfp_mask = __GFP_DMA,
157};
158
159static DEFINE_MUTEX(host_cmd_pool_mutex);
160
161
162
163
164
165
166
167
168
169static void
170scsi_host_free_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
171{
172 struct scsi_host_cmd_pool *pool = shost->cmd_pool;
173
174 if (cmd->prot_sdb)
175 kmem_cache_free(scsi_sdb_cache, cmd->prot_sdb);
176 kmem_cache_free(pool->sense_slab, cmd->sense_buffer);
177 kmem_cache_free(pool->cmd_slab, cmd);
178}
179
180
181
182
183
184
185
186
187
188static struct scsi_cmnd *
189scsi_host_alloc_command(struct Scsi_Host *shost, gfp_t gfp_mask)
190{
191 struct scsi_host_cmd_pool *pool = shost->cmd_pool;
192 struct scsi_cmnd *cmd;
193
194 cmd = kmem_cache_zalloc(pool->cmd_slab, gfp_mask | pool->gfp_mask);
195 if (!cmd)
196 goto fail;
197
198 cmd->sense_buffer = kmem_cache_alloc(pool->sense_slab,
199 gfp_mask | pool->gfp_mask);
200 if (!cmd->sense_buffer)
201 goto fail_free_cmd;
202
203 if (scsi_host_get_prot(shost) >= SHOST_DIX_TYPE0_PROTECTION) {
204 cmd->prot_sdb = kmem_cache_zalloc(scsi_sdb_cache, gfp_mask);
205 if (!cmd->prot_sdb)
206 goto fail_free_sense;
207 }
208
209 return cmd;
210
211fail_free_sense:
212 kmem_cache_free(pool->sense_slab, cmd->sense_buffer);
213fail_free_cmd:
214 kmem_cache_free(pool->cmd_slab, cmd);
215fail:
216 return NULL;
217}
218
219
220
221
222
223
224
225
226
227struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, gfp_t gfp_mask)
228{
229 struct scsi_cmnd *cmd = scsi_host_alloc_command(shost, gfp_mask);
230
231 if (unlikely(!cmd)) {
232 unsigned long flags;
233
234 spin_lock_irqsave(&shost->free_list_lock, flags);
235 if (likely(!list_empty(&shost->free_list))) {
236 cmd = list_entry(shost->free_list.next,
237 struct scsi_cmnd, list);
238 list_del_init(&cmd->list);
239 }
240 spin_unlock_irqrestore(&shost->free_list_lock, flags);
241
242 if (cmd) {
243 void *buf, *prot;
244
245 buf = cmd->sense_buffer;
246 prot = cmd->prot_sdb;
247
248 memset(cmd, 0, sizeof(*cmd));
249
250 cmd->sense_buffer = buf;
251 cmd->prot_sdb = prot;
252 }
253 }
254
255 return cmd;
256}
257EXPORT_SYMBOL_GPL(__scsi_get_command);
258
259
260
261
262
263
264
265
266struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask)
267{
268 struct scsi_cmnd *cmd = __scsi_get_command(dev->host, gfp_mask);
269 unsigned long flags;
270
271 if (unlikely(cmd == NULL))
272 return NULL;
273
274 cmd->device = dev;
275 INIT_LIST_HEAD(&cmd->list);
276 INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
277 spin_lock_irqsave(&dev->list_lock, flags);
278 list_add_tail(&cmd->list, &dev->cmd_list);
279 spin_unlock_irqrestore(&dev->list_lock, flags);
280 cmd->jiffies_at_alloc = jiffies;
281 return cmd;
282}
283EXPORT_SYMBOL(scsi_get_command);
284
285
286
287
288
289
290void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
291{
292 unsigned long flags;
293
294 if (unlikely(list_empty(&shost->free_list))) {
295 spin_lock_irqsave(&shost->free_list_lock, flags);
296 if (list_empty(&shost->free_list)) {
297 list_add(&cmd->list, &shost->free_list);
298 cmd = NULL;
299 }
300 spin_unlock_irqrestore(&shost->free_list_lock, flags);
301 }
302
303 if (likely(cmd != NULL))
304 scsi_host_free_command(shost, cmd);
305}
306EXPORT_SYMBOL(__scsi_put_command);
307
308
309
310
311
312
313
314
315
316void scsi_put_command(struct scsi_cmnd *cmd)
317{
318 unsigned long flags;
319
320
321 spin_lock_irqsave(&cmd->device->list_lock, flags);
322 BUG_ON(list_empty(&cmd->list));
323 list_del_init(&cmd->list);
324 spin_unlock_irqrestore(&cmd->device->list_lock, flags);
325
326 cancel_delayed_work(&cmd->abort_work);
327
328 __scsi_put_command(cmd->device->host, cmd);
329}
330EXPORT_SYMBOL(scsi_put_command);
331
332static struct scsi_host_cmd_pool *
333scsi_find_host_cmd_pool(struct Scsi_Host *shost)
334{
335 if (shost->hostt->cmd_size)
336 return shost->hostt->cmd_pool;
337 if (shost->unchecked_isa_dma)
338 return &scsi_cmd_dma_pool;
339 return &scsi_cmd_pool;
340}
341
342static void
343scsi_free_host_cmd_pool(struct scsi_host_cmd_pool *pool)
344{
345 kfree(pool->sense_name);
346 kfree(pool->cmd_name);
347 kfree(pool);
348}
349
350static struct scsi_host_cmd_pool *
351scsi_alloc_host_cmd_pool(struct Scsi_Host *shost)
352{
353 struct scsi_host_template *hostt = shost->hostt;
354 struct scsi_host_cmd_pool *pool;
355
356 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
357 if (!pool)
358 return NULL;
359
360 pool->cmd_name = kasprintf(GFP_KERNEL, "%s_cmd", hostt->proc_name);
361 pool->sense_name = kasprintf(GFP_KERNEL, "%s_sense", hostt->proc_name);
362 if (!pool->cmd_name || !pool->sense_name) {
363 scsi_free_host_cmd_pool(pool);
364 return NULL;
365 }
366
367 pool->slab_flags = SLAB_HWCACHE_ALIGN;
368 if (shost->unchecked_isa_dma) {
369 pool->slab_flags |= SLAB_CACHE_DMA;
370 pool->gfp_mask = __GFP_DMA;
371 }
372
373 if (hostt->cmd_size)
374 hostt->cmd_pool = pool;
375
376 return pool;
377}
378
379static struct scsi_host_cmd_pool *
380scsi_get_host_cmd_pool(struct Scsi_Host *shost)
381{
382 struct scsi_host_template *hostt = shost->hostt;
383 struct scsi_host_cmd_pool *retval = NULL, *pool;
384 size_t cmd_size = sizeof(struct scsi_cmnd) + hostt->cmd_size;
385
386
387
388
389
390 mutex_lock(&host_cmd_pool_mutex);
391 pool = scsi_find_host_cmd_pool(shost);
392 if (!pool) {
393 pool = scsi_alloc_host_cmd_pool(shost);
394 if (!pool)
395 goto out;
396 }
397
398 if (!pool->users) {
399 pool->cmd_slab = kmem_cache_create(pool->cmd_name, cmd_size, 0,
400 pool->slab_flags, NULL);
401 if (!pool->cmd_slab)
402 goto out_free_pool;
403
404 pool->sense_slab = kmem_cache_create(pool->sense_name,
405 SCSI_SENSE_BUFFERSIZE, 0,
406 pool->slab_flags, NULL);
407 if (!pool->sense_slab)
408 goto out_free_slab;
409 }
410
411 pool->users++;
412 retval = pool;
413out:
414 mutex_unlock(&host_cmd_pool_mutex);
415 return retval;
416
417out_free_slab:
418 kmem_cache_destroy(pool->cmd_slab);
419out_free_pool:
420 if (hostt->cmd_size) {
421 scsi_free_host_cmd_pool(pool);
422 hostt->cmd_pool = NULL;
423 }
424 goto out;
425}
426
427static void scsi_put_host_cmd_pool(struct Scsi_Host *shost)
428{
429 struct scsi_host_template *hostt = shost->hostt;
430 struct scsi_host_cmd_pool *pool;
431
432 mutex_lock(&host_cmd_pool_mutex);
433 pool = scsi_find_host_cmd_pool(shost);
434
435
436
437
438
439
440 BUG_ON(pool->users == 0);
441
442 if (!--pool->users) {
443 kmem_cache_destroy(pool->cmd_slab);
444 kmem_cache_destroy(pool->sense_slab);
445 if (hostt->cmd_size) {
446 scsi_free_host_cmd_pool(pool);
447 hostt->cmd_pool = NULL;
448 }
449 }
450 mutex_unlock(&host_cmd_pool_mutex);
451}
452
453
454
455
456
457
458
459
460
461
462
463int scsi_setup_command_freelist(struct Scsi_Host *shost)
464{
465 const gfp_t gfp_mask = shost->unchecked_isa_dma ? GFP_DMA : GFP_KERNEL;
466 struct scsi_cmnd *cmd;
467
468 spin_lock_init(&shost->free_list_lock);
469 INIT_LIST_HEAD(&shost->free_list);
470
471 shost->cmd_pool = scsi_get_host_cmd_pool(shost);
472 if (!shost->cmd_pool)
473 return -ENOMEM;
474
475
476
477
478 cmd = scsi_host_alloc_command(shost, gfp_mask);
479 if (!cmd) {
480 scsi_put_host_cmd_pool(shost);
481 shost->cmd_pool = NULL;
482 return -ENOMEM;
483 }
484 list_add(&cmd->list, &shost->free_list);
485 return 0;
486}
487
488
489
490
491
492void scsi_destroy_command_freelist(struct Scsi_Host *shost)
493{
494
495
496
497
498 if (!shost->cmd_pool)
499 return;
500
501 while (!list_empty(&shost->free_list)) {
502 struct scsi_cmnd *cmd;
503
504 cmd = list_entry(shost->free_list.next, struct scsi_cmnd, list);
505 list_del_init(&cmd->list);
506 scsi_host_free_command(shost, cmd);
507 }
508 shost->cmd_pool = NULL;
509 scsi_put_host_cmd_pool(shost);
510}
511
512#ifdef CONFIG_SCSI_LOGGING
513void scsi_log_send(struct scsi_cmnd *cmd)
514{
515 unsigned int level;
516
517
518
519
520
521
522
523
524
525
526
527
528 if (unlikely(scsi_logging_level)) {
529 level = SCSI_LOG_LEVEL(SCSI_LOG_MLQUEUE_SHIFT,
530 SCSI_LOG_MLQUEUE_BITS);
531 if (level > 1) {
532 scmd_printk(KERN_INFO, cmd,
533 "Send: scmd 0x%p\n", cmd);
534 scsi_print_command(cmd);
535 }
536 }
537}
538
539void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
540{
541 unsigned int level;
542
543
544
545
546
547
548
549
550
551
552
553
554
555 if (unlikely(scsi_logging_level)) {
556 level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT,
557 SCSI_LOG_MLCOMPLETE_BITS);
558 if (((level > 0) && (cmd->result || disposition != SUCCESS)) ||
559 (level > 1)) {
560 scsi_print_result(cmd, "Done", disposition);
561 scsi_print_command(cmd);
562 if (status_byte(cmd->result) == CHECK_CONDITION)
563 scsi_print_sense(cmd);
564 if (level > 3)
565 scmd_printk(KERN_INFO, cmd,
566 "scsi host busy %d failed %d\n",
567 atomic_read(&cmd->device->host->host_busy),
568 cmd->device->host->host_failed);
569 }
570 }
571}
572#endif
573
574
575
576
577
578
579
580
581
582void scsi_cmd_get_serial(struct Scsi_Host *host, struct scsi_cmnd *cmd)
583{
584 cmd->serial_number = host->cmd_serial_number++;
585 if (cmd->serial_number == 0)
586 cmd->serial_number = host->cmd_serial_number++;
587}
588EXPORT_SYMBOL(scsi_cmd_get_serial);
589
590
591
592
593
594
595
596
597int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
598{
599 struct Scsi_Host *host = cmd->device->host;
600 int rtn = 0;
601
602 atomic_inc(&cmd->device->iorequest_cnt);
603
604
605 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
606
607
608
609 cmd->result = DID_NO_CONNECT << 16;
610 goto done;
611 }
612
613
614 if (unlikely(scsi_device_blocked(cmd->device))) {
615
616
617
618
619
620
621
622 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
623 "queuecommand : device blocked\n"));
624 return SCSI_MLQUEUE_DEVICE_BUSY;
625 }
626
627
628 if (cmd->device->lun_in_cdb)
629 cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) |
630 (cmd->device->lun << 5 & 0xe0);
631
632 scsi_log_send(cmd);
633
634
635
636
637
638 if (cmd->cmd_len > cmd->device->host->max_cmd_len) {
639 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
640 "queuecommand : command too long. "
641 "cdb_size=%d host->max_cmd_len=%d\n",
642 cmd->cmd_len, cmd->device->host->max_cmd_len));
643 cmd->result = (DID_ABORT << 16);
644 goto done;
645 }
646
647 if (unlikely(host->shost_state == SHOST_DEL)) {
648 cmd->result = (DID_NO_CONNECT << 16);
649 goto done;
650
651 }
652
653 trace_scsi_dispatch_cmd_start(cmd);
654 rtn = host->hostt->queuecommand(host, cmd);
655 if (rtn) {
656 trace_scsi_dispatch_cmd_error(cmd, rtn);
657 if (rtn != SCSI_MLQUEUE_DEVICE_BUSY &&
658 rtn != SCSI_MLQUEUE_TARGET_BUSY)
659 rtn = SCSI_MLQUEUE_HOST_BUSY;
660
661 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
662 "queuecommand : request rejected\n"));
663 }
664
665 return rtn;
666 done:
667 cmd->scsi_done(cmd);
668 return 0;
669}
670
671
672
673
674
675
676
677
678
679void scsi_finish_command(struct scsi_cmnd *cmd)
680{
681 struct scsi_device *sdev = cmd->device;
682 struct scsi_target *starget = scsi_target(sdev);
683 struct Scsi_Host *shost = sdev->host;
684 struct scsi_driver *drv;
685 unsigned int good_bytes;
686
687 scsi_device_unbusy(sdev);
688
689
690
691
692
693 if (atomic_read(&shost->host_blocked))
694 atomic_set(&shost->host_blocked, 0);
695 if (atomic_read(&starget->target_blocked))
696 atomic_set(&starget->target_blocked, 0);
697 if (atomic_read(&sdev->device_blocked))
698 atomic_set(&sdev->device_blocked, 0);
699
700
701
702
703
704 if (SCSI_SENSE_VALID(cmd))
705 cmd->result |= (DRIVER_SENSE << 24);
706
707 SCSI_LOG_MLCOMPLETE(4, sdev_printk(KERN_INFO, sdev,
708 "Notifying upper driver of completion "
709 "(result %x)\n", cmd->result));
710
711 good_bytes = scsi_bufflen(cmd);
712 if (cmd->request->cmd_type != REQ_TYPE_BLOCK_PC) {
713 int old_good_bytes = good_bytes;
714 drv = scsi_cmd_to_driver(cmd);
715 if (drv->done)
716 good_bytes = drv->done(cmd);
717
718
719
720
721
722
723 if (good_bytes == old_good_bytes)
724 good_bytes -= scsi_get_resid(cmd);
725 }
726 scsi_io_completion(cmd, good_bytes);
727}
728EXPORT_SYMBOL(scsi_finish_command);
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags)
749{
750 unsigned long flags;
751
752
753
754
755 if (tags <= 0)
756 return;
757
758 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
759
760
761
762
763
764
765
766
767
768
769 if (!shost_use_blk_mq(sdev->host) && !sdev->host->bqt) {
770 if (blk_queue_tagged(sdev->request_queue) &&
771 blk_queue_resize_tags(sdev->request_queue, tags) != 0)
772 goto out;
773 }
774
775 sdev->queue_depth = tags;
776 blk_set_queue_depth(sdev->request_queue, sdev->queue_depth);
777 switch (tagged) {
778 case 0:
779 sdev->ordered_tags = 0;
780 sdev->simple_tags = 0;
781 break;
782 case MSG_ORDERED_TAG:
783 sdev->ordered_tags = 1;
784 sdev->simple_tags = 1;
785 break;
786 case MSG_SIMPLE_TAG:
787 sdev->ordered_tags = 0;
788 sdev->simple_tags = 1;
789 break;
790 default:
791 sdev->ordered_tags = 0;
792 sdev->simple_tags = 0;
793 sdev_printk(KERN_WARNING, sdev,
794 "scsi_adjust_queue_depth, bad queue type, "
795 "disabled\n");
796 }
797 out:
798 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
799}
800EXPORT_SYMBOL(scsi_adjust_queue_depth);
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821int scsi_track_queue_full(struct scsi_device *sdev, int depth)
822{
823
824
825
826
827
828
829 if ((jiffies >> 4) == (sdev->last_queue_full_time >> 4))
830 return 0;
831
832 sdev->last_queue_full_time = jiffies;
833 if (sdev->last_queue_full_depth != depth) {
834 sdev->last_queue_full_count = 1;
835 sdev->last_queue_full_depth = depth;
836 } else {
837 sdev->last_queue_full_count++;
838 }
839
840 if (sdev->last_queue_full_count <= 10)
841 return 0;
842 if (sdev->last_queue_full_depth < 8) {
843
844 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
845 return -1;
846 }
847
848 if (sdev->ordered_tags)
849 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth);
850 else
851 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth);
852 return depth;
853}
854EXPORT_SYMBOL(scsi_track_queue_full);
855
856
857
858
859
860
861
862
863
864
865
866
867
868static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
869 u8 page, unsigned len)
870{
871 int result;
872 unsigned char cmd[16];
873
874 if (len < 4)
875 return -EINVAL;
876
877 cmd[0] = INQUIRY;
878 cmd[1] = 1;
879 cmd[2] = page;
880 cmd[3] = len >> 8;
881 cmd[4] = len & 0xff;
882 cmd[5] = 0;
883
884
885
886
887
888 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer,
889 len, NULL, 30 * HZ, 3, NULL);
890 if (result)
891 return -EIO;
892
893
894 if (buffer[1] != page)
895 return -EIO;
896
897 return get_unaligned_be16(&buffer[2]) + 4;
898}
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf,
915 int buf_len)
916{
917 int i, result;
918
919 if (sdev->skip_vpd_pages)
920 goto fail;
921
922
923 result = scsi_vpd_inquiry(sdev, buf, 0, buf_len);
924 if (result < 4)
925 goto fail;
926
927
928 if (page == 0)
929 return 0;
930
931 for (i = 4; i < min(result, buf_len); i++)
932 if (buf[i] == page)
933 goto found;
934
935 if (i < result && i >= buf_len)
936
937 goto found;
938
939 goto fail;
940
941 found:
942 result = scsi_vpd_inquiry(sdev, buf, page, buf_len);
943 if (result < 0)
944 goto fail;
945
946 return 0;
947
948 fail:
949 return -EINVAL;
950}
951EXPORT_SYMBOL_GPL(scsi_get_vpd_page);
952
953
954
955
956
957
958
959
960
961
962void scsi_attach_vpd(struct scsi_device *sdev)
963{
964 int result, i;
965 int vpd_len = SCSI_VPD_PG_LEN;
966 int pg80_supported = 0;
967 int pg83_supported = 0;
968 unsigned char __rcu *vpd_buf, *orig_vpd_buf = NULL;
969
970 if (!scsi_device_supports_vpd(sdev))
971 return;
972
973retry_pg0:
974 vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
975 if (!vpd_buf)
976 return;
977
978
979 result = scsi_vpd_inquiry(sdev, vpd_buf, 0, vpd_len);
980 if (result < 0) {
981 kfree(vpd_buf);
982 return;
983 }
984 if (result > vpd_len) {
985 vpd_len = result;
986 kfree(vpd_buf);
987 goto retry_pg0;
988 }
989
990 for (i = 4; i < result; i++) {
991 if (vpd_buf[i] == 0x80)
992 pg80_supported = 1;
993 if (vpd_buf[i] == 0x83)
994 pg83_supported = 1;
995 }
996 kfree(vpd_buf);
997 vpd_len = SCSI_VPD_PG_LEN;
998
999 if (pg80_supported) {
1000retry_pg80:
1001 vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
1002 if (!vpd_buf)
1003 return;
1004
1005 result = scsi_vpd_inquiry(sdev, vpd_buf, 0x80, vpd_len);
1006 if (result < 0) {
1007 kfree(vpd_buf);
1008 return;
1009 }
1010 if (result > vpd_len) {
1011 vpd_len = result;
1012 kfree(vpd_buf);
1013 goto retry_pg80;
1014 }
1015 spin_lock(&sdev->inquiry_lock);
1016 orig_vpd_buf = sdev->vpd_pg80;
1017 sdev->vpd_pg80_len = result;
1018 rcu_assign_pointer(sdev->vpd_pg80, vpd_buf);
1019 spin_unlock(&sdev->inquiry_lock);
1020 synchronize_rcu();
1021 if (orig_vpd_buf) {
1022 kfree(orig_vpd_buf);
1023 orig_vpd_buf = NULL;
1024 }
1025 vpd_len = SCSI_VPD_PG_LEN;
1026 }
1027
1028 if (pg83_supported) {
1029retry_pg83:
1030 vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
1031 if (!vpd_buf)
1032 return;
1033
1034 result = scsi_vpd_inquiry(sdev, vpd_buf, 0x83, vpd_len);
1035 if (result < 0) {
1036 kfree(vpd_buf);
1037 return;
1038 }
1039 if (result > vpd_len) {
1040 vpd_len = result;
1041 kfree(vpd_buf);
1042 goto retry_pg83;
1043 }
1044 spin_lock(&sdev->inquiry_lock);
1045 orig_vpd_buf = sdev->vpd_pg83;
1046 sdev->vpd_pg83_len = result;
1047 rcu_assign_pointer(sdev->vpd_pg83, vpd_buf);
1048 spin_unlock(&sdev->inquiry_lock);
1049 synchronize_rcu();
1050 if (orig_vpd_buf)
1051 kfree(orig_vpd_buf);
1052 }
1053}
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer,
1067 unsigned int len, unsigned char opcode)
1068{
1069 unsigned char cmd[16];
1070 struct scsi_sense_hdr sshdr;
1071 int result;
1072
1073 if (sdev->no_report_opcodes || sdev->scsi_level < SCSI_SPC_3)
1074 return -EINVAL;
1075
1076 memset(cmd, 0, 16);
1077 cmd[0] = MAINTENANCE_IN;
1078 cmd[1] = MI_REPORT_SUPPORTED_OPERATION_CODES;
1079 cmd[2] = 1;
1080 cmd[3] = opcode;
1081 put_unaligned_be32(len, &cmd[6]);
1082 memset(buffer, 0, len);
1083
1084 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
1085 &sshdr, 30 * HZ, 3, NULL);
1086
1087 if (result && scsi_sense_valid(&sshdr) &&
1088 sshdr.sense_key == ILLEGAL_REQUEST &&
1089 (sshdr.asc == 0x20 || sshdr.asc == 0x24) && sshdr.ascq == 0x00)
1090 return -EINVAL;
1091
1092 if ((buffer[1] & 3) == 3)
1093 return 1;
1094
1095 return 0;
1096}
1097EXPORT_SYMBOL(scsi_report_opcode);
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110int scsi_device_get(struct scsi_device *sdev)
1111{
1112 if (sdev->sdev_state == SDEV_DEL || sdev->sdev_state == SDEV_CANCEL)
1113 goto fail;
1114 if (!get_device(&sdev->sdev_gendev))
1115 goto fail;
1116 if (!try_module_get(sdev->host->hostt->module))
1117 goto fail_put_device;
1118 return 0;
1119
1120fail_put_device:
1121 put_device(&sdev->sdev_gendev);
1122fail:
1123 return -ENXIO;
1124}
1125EXPORT_SYMBOL(scsi_device_get);
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135void scsi_device_put(struct scsi_device *sdev)
1136{
1137 module_put(sdev->host->hostt->module);
1138 put_device(&sdev->sdev_gendev);
1139}
1140EXPORT_SYMBOL(scsi_device_put);
1141
1142
1143struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *shost,
1144 struct scsi_device *prev)
1145{
1146 struct list_head *list = (prev ? &prev->siblings : &shost->__devices);
1147 struct scsi_device *next = NULL;
1148 unsigned long flags;
1149
1150 spin_lock_irqsave(shost->host_lock, flags);
1151 while (list->next != &shost->__devices) {
1152 next = list_entry(list->next, struct scsi_device, siblings);
1153
1154 if (!scsi_device_get(next))
1155 break;
1156 next = NULL;
1157 list = list->next;
1158 }
1159 spin_unlock_irqrestore(shost->host_lock, flags);
1160
1161 if (prev)
1162 scsi_device_put(prev);
1163 return next;
1164}
1165EXPORT_SYMBOL(__scsi_iterate_devices);
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177void starget_for_each_device(struct scsi_target *starget, void *data,
1178 void (*fn)(struct scsi_device *, void *))
1179{
1180 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1181 struct scsi_device *sdev;
1182
1183 shost_for_each_device(sdev, shost) {
1184 if ((sdev->channel == starget->channel) &&
1185 (sdev->id == starget->id))
1186 fn(sdev, data);
1187 }
1188}
1189EXPORT_SYMBOL(starget_for_each_device);
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205void __starget_for_each_device(struct scsi_target *starget, void *data,
1206 void (*fn)(struct scsi_device *, void *))
1207{
1208 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1209 struct scsi_device *sdev;
1210
1211 __shost_for_each_device(sdev, shost) {
1212 if ((sdev->channel == starget->channel) &&
1213 (sdev->id == starget->id))
1214 fn(sdev, data);
1215 }
1216}
1217EXPORT_SYMBOL(__starget_for_each_device);
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *starget,
1235 uint lun)
1236{
1237 struct scsi_device *sdev;
1238
1239 list_for_each_entry(sdev, &starget->devices, same_target_siblings) {
1240 if (sdev->sdev_state == SDEV_DEL)
1241 continue;
1242 if (sdev->lun ==lun)
1243 return sdev;
1244 }
1245
1246 return NULL;
1247}
1248EXPORT_SYMBOL(__scsi_device_lookup_by_target);
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget,
1260 uint lun)
1261{
1262 struct scsi_device *sdev;
1263 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1264 unsigned long flags;
1265
1266 spin_lock_irqsave(shost->host_lock, flags);
1267 sdev = __scsi_device_lookup_by_target(starget, lun);
1268 if (sdev && scsi_device_get(sdev))
1269 sdev = NULL;
1270 spin_unlock_irqrestore(shost->host_lock, flags);
1271
1272 return sdev;
1273}
1274EXPORT_SYMBOL(scsi_device_lookup_by_target);
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292struct scsi_device *__scsi_device_lookup(struct Scsi_Host *shost,
1293 uint channel, uint id, uint lun)
1294{
1295 struct scsi_device *sdev;
1296
1297 list_for_each_entry(sdev, &shost->__devices, siblings) {
1298 if (sdev->sdev_state == SDEV_DEL)
1299 continue;
1300 if (sdev->channel == channel && sdev->id == id &&
1301 sdev->lun ==lun)
1302 return sdev;
1303 }
1304
1305 return NULL;
1306}
1307EXPORT_SYMBOL(__scsi_device_lookup);
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320struct scsi_device *scsi_device_lookup(struct Scsi_Host *shost,
1321 uint channel, uint id, uint lun)
1322{
1323 struct scsi_device *sdev;
1324 unsigned long flags;
1325
1326 spin_lock_irqsave(shost->host_lock, flags);
1327 sdev = __scsi_device_lookup(shost, channel, id, lun);
1328 if (sdev && scsi_device_get(sdev))
1329 sdev = NULL;
1330 spin_unlock_irqrestore(shost->host_lock, flags);
1331
1332 return sdev;
1333}
1334EXPORT_SYMBOL(scsi_device_lookup);
1335
1336MODULE_DESCRIPTION("SCSI core");
1337MODULE_LICENSE("GPL");
1338
1339module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR);
1340MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels");
1341
1342bool scsi_use_blk_mq = false;
1343module_param_named(use_blk_mq, scsi_use_blk_mq, bool, S_IWUSR | S_IRUGO);
1344
1345static int __init init_scsi(void)
1346{
1347 int error;
1348
1349 if (scsi_use_blk_mq)
1350 mark_tech_preview("scsi-mq", THIS_MODULE);
1351
1352 error = scsi_init_queue();
1353 if (error)
1354 return error;
1355 error = scsi_init_procfs();
1356 if (error)
1357 goto cleanup_queue;
1358 error = scsi_init_devinfo();
1359 if (error)
1360 goto cleanup_procfs;
1361 error = scsi_init_hosts();
1362 if (error)
1363 goto cleanup_devlist;
1364 error = scsi_init_sysctl();
1365 if (error)
1366 goto cleanup_hosts;
1367 error = scsi_sysfs_register();
1368 if (error)
1369 goto cleanup_sysctl;
1370
1371 scsi_netlink_init();
1372
1373 printk(KERN_NOTICE "SCSI subsystem initialized\n");
1374 return 0;
1375
1376cleanup_sysctl:
1377 scsi_exit_sysctl();
1378cleanup_hosts:
1379 scsi_exit_hosts();
1380cleanup_devlist:
1381 scsi_exit_devinfo();
1382cleanup_procfs:
1383 scsi_exit_procfs();
1384cleanup_queue:
1385 scsi_exit_queue();
1386 printk(KERN_ERR "SCSI subsystem failed to initialize, error = %d\n",
1387 -error);
1388 return error;
1389}
1390
1391static void __exit exit_scsi(void)
1392{
1393 scsi_netlink_exit();
1394 scsi_sysfs_unregister();
1395 scsi_exit_sysctl();
1396 scsi_exit_hosts();
1397 scsi_exit_devinfo();
1398 scsi_exit_procfs();
1399 scsi_exit_queue();
1400 async_unregister_domain(&scsi_sd_probe_domain);
1401}
1402
1403subsys_initcall(init_scsi);
1404module_exit(exit_scsi);
1405