1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40#include <linux/module.h>
41#include <linux/moduleparam.h>
42#include <linux/kernel.h>
43#include <linux/timer.h>
44#include <linux/string.h>
45#include <linux/slab.h>
46#include <linux/blkdev.h>
47#include <linux/delay.h>
48#include <linux/init.h>
49#include <linux/completion.h>
50#include <linux/unistd.h>
51#include <linux/spinlock.h>
52#include <linux/kmod.h>
53#include <linux/interrupt.h>
54#include <linux/notifier.h>
55#include <linux/cpu.h>
56#include <linux/mutex.h>
57#include <linux/async.h>
58#include <asm/unaligned.h>
59
60#include <scsi/scsi.h>
61#include <scsi/scsi_cmnd.h>
62#include <scsi/scsi_dbg.h>
63#include <scsi/scsi_device.h>
64#include <scsi/scsi_driver.h>
65#include <scsi/scsi_eh.h>
66#include <scsi/scsi_host.h>
67#include <scsi/scsi_tcq.h>
68
69#include "scsi_priv.h"
70#include "scsi_logging.h"
71
72#define CREATE_TRACE_POINTS
73#include <trace/events/scsi.h>
74
75
76
77
78
79
80
81
82
83unsigned int scsi_logging_level;
84#if defined(CONFIG_SCSI_LOGGING)
85EXPORT_SYMBOL(scsi_logging_level);
86#endif
87
88
89ASYNC_DOMAIN(scsi_sd_probe_domain);
90EXPORT_SYMBOL(scsi_sd_probe_domain);
91
92
93
94
95
96
97
98ASYNC_DOMAIN_EXCLUSIVE(scsi_sd_pm_domain);
99EXPORT_SYMBOL(scsi_sd_pm_domain);
100
101
102
103
104
105
106
107
108
109void scsi_put_command(struct scsi_cmnd *cmd)
110{
111 unsigned long flags;
112
113
114 spin_lock_irqsave(&cmd->device->list_lock, flags);
115 BUG_ON(list_empty(&cmd->list));
116 list_del_init(&cmd->list);
117 spin_unlock_irqrestore(&cmd->device->list_lock, flags);
118
119 BUG_ON(delayed_work_pending(&cmd->abort_work));
120}
121
122#ifdef CONFIG_SCSI_LOGGING
123void scsi_log_send(struct scsi_cmnd *cmd)
124{
125 unsigned int level;
126
127
128
129
130
131
132
133
134
135
136
137
138 if (unlikely(scsi_logging_level)) {
139 level = SCSI_LOG_LEVEL(SCSI_LOG_MLQUEUE_SHIFT,
140 SCSI_LOG_MLQUEUE_BITS);
141 if (level > 1) {
142 scmd_printk(KERN_INFO, cmd,
143 "Send: scmd 0x%p\n", cmd);
144 scsi_print_command(cmd);
145 }
146 }
147}
148
149void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
150{
151 unsigned int level;
152
153
154
155
156
157
158
159
160
161
162
163
164
165 if (unlikely(scsi_logging_level)) {
166 level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT,
167 SCSI_LOG_MLCOMPLETE_BITS);
168 if (((level > 0) && (cmd->result || disposition != SUCCESS)) ||
169 (level > 1)) {
170 scsi_print_result(cmd, "Done", disposition);
171 scsi_print_command(cmd);
172 if (status_byte(cmd->result) & CHECK_CONDITION)
173 scsi_print_sense(cmd);
174 if (level > 3)
175 scmd_printk(KERN_INFO, cmd,
176 "scsi host busy %d failed %d\n",
177 atomic_read(&cmd->device->host->host_busy),
178 cmd->device->host->host_failed);
179 }
180 }
181}
182#endif
183
184
185
186
187
188
189
190
191
192void scsi_cmd_get_serial(struct Scsi_Host *host, struct scsi_cmnd *cmd)
193{
194 cmd->serial_number = host->cmd_serial_number++;
195 if (cmd->serial_number == 0)
196 cmd->serial_number = host->cmd_serial_number++;
197}
198EXPORT_SYMBOL(scsi_cmd_get_serial);
199
200
201
202
203
204
205
206
207
208void scsi_finish_command(struct scsi_cmnd *cmd)
209{
210 struct scsi_device *sdev = cmd->device;
211 struct scsi_target *starget = scsi_target(sdev);
212 struct Scsi_Host *shost = sdev->host;
213 struct scsi_driver *drv;
214 unsigned int good_bytes;
215
216 scsi_device_unbusy(sdev);
217
218
219
220
221
222 if (atomic_read(&shost->host_blocked))
223 atomic_set(&shost->host_blocked, 0);
224 if (atomic_read(&starget->target_blocked))
225 atomic_set(&starget->target_blocked, 0);
226 if (atomic_read(&sdev->device_blocked))
227 atomic_set(&sdev->device_blocked, 0);
228
229
230
231
232
233 if (SCSI_SENSE_VALID(cmd))
234 cmd->result |= (DRIVER_SENSE << 24);
235
236 SCSI_LOG_MLCOMPLETE(4, sdev_printk(KERN_INFO, sdev,
237 "Notifying upper driver of completion "
238 "(result %x)\n", cmd->result));
239
240 good_bytes = scsi_bufflen(cmd);
241 if (!blk_rq_is_passthrough(cmd->request)) {
242 int old_good_bytes = good_bytes;
243 drv = scsi_cmd_to_driver(cmd);
244 if (drv->done)
245 good_bytes = drv->done(cmd);
246
247
248
249
250
251
252 if (good_bytes == old_good_bytes)
253 good_bytes -= scsi_get_resid(cmd);
254 }
255 scsi_io_completion(cmd, good_bytes);
256}
257
258
259
260
261
262
263
264
265int scsi_change_queue_depth(struct scsi_device *sdev, int depth)
266{
267 if (depth > 0) {
268 sdev->queue_depth = depth;
269 wmb();
270 }
271
272 if (sdev->request_queue)
273 blk_set_queue_depth(sdev->request_queue, depth);
274
275 return sdev->queue_depth;
276}
277EXPORT_SYMBOL(scsi_change_queue_depth);
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298int scsi_track_queue_full(struct scsi_device *sdev, int depth)
299{
300
301
302
303
304
305
306 if ((jiffies >> 4) == (sdev->last_queue_full_time >> 4))
307 return 0;
308
309 sdev->last_queue_full_time = jiffies;
310 if (sdev->last_queue_full_depth != depth) {
311 sdev->last_queue_full_count = 1;
312 sdev->last_queue_full_depth = depth;
313 } else {
314 sdev->last_queue_full_count++;
315 }
316
317 if (sdev->last_queue_full_count <= 10)
318 return 0;
319
320 return scsi_change_queue_depth(sdev, depth);
321}
322EXPORT_SYMBOL(scsi_track_queue_full);
323
324
325
326
327
328
329
330
331
332
333
334
335
336static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
337 u8 page, unsigned len)
338{
339 int result;
340 unsigned char cmd[16];
341
342 if (len < 4)
343 return -EINVAL;
344
345 cmd[0] = INQUIRY;
346 cmd[1] = 1;
347 cmd[2] = page;
348 cmd[3] = len >> 8;
349 cmd[4] = len & 0xff;
350 cmd[5] = 0;
351
352
353
354
355
356 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer,
357 len, NULL, 30 * HZ, 3, NULL);
358 if (result)
359 return -EIO;
360
361
362 if (buffer[1] != page)
363 return -EIO;
364
365 return get_unaligned_be16(&buffer[2]) + 4;
366}
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf,
383 int buf_len)
384{
385 int i, result;
386
387 if (sdev->skip_vpd_pages)
388 goto fail;
389
390
391 result = scsi_vpd_inquiry(sdev, buf, 0, buf_len);
392 if (result < 4)
393 goto fail;
394
395
396 if (page == 0)
397 return 0;
398
399 for (i = 4; i < min(result, buf_len); i++)
400 if (buf[i] == page)
401 goto found;
402
403 if (i < result && i >= buf_len)
404
405 goto found;
406
407 goto fail;
408
409 found:
410 result = scsi_vpd_inquiry(sdev, buf, page, buf_len);
411 if (result < 0)
412 goto fail;
413
414 return 0;
415
416 fail:
417 return -EINVAL;
418}
419EXPORT_SYMBOL_GPL(scsi_get_vpd_page);
420
421
422
423
424
425
426
427
428
429
430void scsi_attach_vpd(struct scsi_device *sdev)
431{
432 int result, i;
433 int vpd_len = SCSI_VPD_PG_LEN;
434 int pg80_supported = 0;
435 int pg83_supported = 0;
436 unsigned char __rcu *vpd_buf, *orig_vpd_buf = NULL;
437
438 if (!scsi_device_supports_vpd(sdev))
439 return;
440
441retry_pg0:
442 vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
443 if (!vpd_buf)
444 return;
445
446
447 result = scsi_vpd_inquiry(sdev, vpd_buf, 0, vpd_len);
448 if (result < 0) {
449 kfree(vpd_buf);
450 return;
451 }
452 if (result > vpd_len) {
453 vpd_len = result;
454 kfree(vpd_buf);
455 goto retry_pg0;
456 }
457
458 for (i = 4; i < result; i++) {
459 if (vpd_buf[i] == 0x80)
460 pg80_supported = 1;
461 if (vpd_buf[i] == 0x83)
462 pg83_supported = 1;
463 }
464 kfree(vpd_buf);
465 vpd_len = SCSI_VPD_PG_LEN;
466
467 if (pg80_supported) {
468retry_pg80:
469 vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
470 if (!vpd_buf)
471 return;
472
473 result = scsi_vpd_inquiry(sdev, vpd_buf, 0x80, vpd_len);
474 if (result < 0) {
475 kfree(vpd_buf);
476 return;
477 }
478 if (result > vpd_len) {
479 vpd_len = result;
480 kfree(vpd_buf);
481 goto retry_pg80;
482 }
483 mutex_lock(&sdev->inquiry_mutex);
484 orig_vpd_buf = sdev->vpd_pg80;
485 sdev->vpd_pg80_len = result;
486 rcu_assign_pointer(sdev->vpd_pg80, vpd_buf);
487 mutex_unlock(&sdev->inquiry_mutex);
488 synchronize_rcu();
489 if (orig_vpd_buf) {
490 kfree(orig_vpd_buf);
491 orig_vpd_buf = NULL;
492 }
493 vpd_len = SCSI_VPD_PG_LEN;
494 }
495
496 if (pg83_supported) {
497retry_pg83:
498 vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
499 if (!vpd_buf)
500 return;
501
502 result = scsi_vpd_inquiry(sdev, vpd_buf, 0x83, vpd_len);
503 if (result < 0) {
504 kfree(vpd_buf);
505 return;
506 }
507 if (result > vpd_len) {
508 vpd_len = result;
509 kfree(vpd_buf);
510 goto retry_pg83;
511 }
512 mutex_lock(&sdev->inquiry_mutex);
513 orig_vpd_buf = sdev->vpd_pg83;
514 sdev->vpd_pg83_len = result;
515 rcu_assign_pointer(sdev->vpd_pg83, vpd_buf);
516 mutex_unlock(&sdev->inquiry_mutex);
517 synchronize_rcu();
518 if (orig_vpd_buf)
519 kfree(orig_vpd_buf);
520 }
521}
522
523
524
525
526
527
528
529
530
531
532
533
534int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer,
535 unsigned int len, unsigned char opcode)
536{
537 unsigned char cmd[16];
538 struct scsi_sense_hdr sshdr;
539 int result;
540
541 if (sdev->no_report_opcodes || sdev->scsi_level < SCSI_SPC_3)
542 return -EINVAL;
543
544 memset(cmd, 0, 16);
545 cmd[0] = MAINTENANCE_IN;
546 cmd[1] = MI_REPORT_SUPPORTED_OPERATION_CODES;
547 cmd[2] = 1;
548 cmd[3] = opcode;
549 put_unaligned_be32(len, &cmd[6]);
550 memset(buffer, 0, len);
551
552 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
553 &sshdr, 30 * HZ, 3, NULL);
554
555 if (result && scsi_sense_valid(&sshdr) &&
556 sshdr.sense_key == ILLEGAL_REQUEST &&
557 (sshdr.asc == 0x20 || sshdr.asc == 0x24) && sshdr.ascq == 0x00)
558 return -EINVAL;
559
560 if ((buffer[1] & 3) == 3)
561 return 1;
562
563 return 0;
564}
565EXPORT_SYMBOL(scsi_report_opcode);
566
567
568
569
570
571
572
573
574
575
576
577
578int scsi_device_get(struct scsi_device *sdev)
579{
580 if (sdev->sdev_state == SDEV_DEL || sdev->sdev_state == SDEV_CANCEL)
581 goto fail;
582 if (!get_device(&sdev->sdev_gendev))
583 goto fail;
584 if (!try_module_get(sdev->host->hostt->module))
585 goto fail_put_device;
586 return 0;
587
588fail_put_device:
589 put_device(&sdev->sdev_gendev);
590fail:
591 return -ENXIO;
592}
593EXPORT_SYMBOL(scsi_device_get);
594
595
596
597
598
599
600
601
602
603void scsi_device_put(struct scsi_device *sdev)
604{
605 module_put(sdev->host->hostt->module);
606 put_device(&sdev->sdev_gendev);
607}
608EXPORT_SYMBOL(scsi_device_put);
609
610
611struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *shost,
612 struct scsi_device *prev)
613{
614 struct list_head *list = (prev ? &prev->siblings : &shost->__devices);
615 struct scsi_device *next = NULL;
616 unsigned long flags;
617
618 spin_lock_irqsave(shost->host_lock, flags);
619 while (list->next != &shost->__devices) {
620 next = list_entry(list->next, struct scsi_device, siblings);
621
622 if (!scsi_device_get(next))
623 break;
624 next = NULL;
625 list = list->next;
626 }
627 spin_unlock_irqrestore(shost->host_lock, flags);
628
629 if (prev)
630 scsi_device_put(prev);
631 return next;
632}
633EXPORT_SYMBOL(__scsi_iterate_devices);
634
635
636
637
638
639
640
641
642
643
644
645void starget_for_each_device(struct scsi_target *starget, void *data,
646 void (*fn)(struct scsi_device *, void *))
647{
648 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
649 struct scsi_device *sdev;
650
651 shost_for_each_device(sdev, shost) {
652 if ((sdev->channel == starget->channel) &&
653 (sdev->id == starget->id))
654 fn(sdev, data);
655 }
656}
657EXPORT_SYMBOL(starget_for_each_device);
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673void __starget_for_each_device(struct scsi_target *starget, void *data,
674 void (*fn)(struct scsi_device *, void *))
675{
676 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
677 struct scsi_device *sdev;
678
679 __shost_for_each_device(sdev, shost) {
680 if ((sdev->channel == starget->channel) &&
681 (sdev->id == starget->id))
682 fn(sdev, data);
683 }
684}
685EXPORT_SYMBOL(__starget_for_each_device);
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *starget,
703 u64 lun)
704{
705 struct scsi_device *sdev;
706
707 list_for_each_entry(sdev, &starget->devices, same_target_siblings) {
708 if (sdev->sdev_state == SDEV_DEL)
709 continue;
710 if (sdev->lun ==lun)
711 return sdev;
712 }
713
714 return NULL;
715}
716EXPORT_SYMBOL(__scsi_device_lookup_by_target);
717
718
719
720
721
722
723
724
725
726
727struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget,
728 u64 lun)
729{
730 struct scsi_device *sdev;
731 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
732 unsigned long flags;
733
734 spin_lock_irqsave(shost->host_lock, flags);
735 sdev = __scsi_device_lookup_by_target(starget, lun);
736 if (sdev && scsi_device_get(sdev))
737 sdev = NULL;
738 spin_unlock_irqrestore(shost->host_lock, flags);
739
740 return sdev;
741}
742EXPORT_SYMBOL(scsi_device_lookup_by_target);
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760struct scsi_device *__scsi_device_lookup(struct Scsi_Host *shost,
761 uint channel, uint id, u64 lun)
762{
763 struct scsi_device *sdev;
764
765 list_for_each_entry(sdev, &shost->__devices, siblings) {
766 if (sdev->sdev_state == SDEV_DEL)
767 continue;
768 if (sdev->channel == channel && sdev->id == id &&
769 sdev->lun ==lun)
770 return sdev;
771 }
772
773 return NULL;
774}
775EXPORT_SYMBOL(__scsi_device_lookup);
776
777
778
779
780
781
782
783
784
785
786
787
788struct scsi_device *scsi_device_lookup(struct Scsi_Host *shost,
789 uint channel, uint id, u64 lun)
790{
791 struct scsi_device *sdev;
792 unsigned long flags;
793
794 spin_lock_irqsave(shost->host_lock, flags);
795 sdev = __scsi_device_lookup(shost, channel, id, lun);
796 if (sdev && scsi_device_get(sdev))
797 sdev = NULL;
798 spin_unlock_irqrestore(shost->host_lock, flags);
799
800 return sdev;
801}
802EXPORT_SYMBOL(scsi_device_lookup);
803
804MODULE_DESCRIPTION("SCSI core");
805MODULE_LICENSE("GPL");
806
807module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR);
808MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels");
809
810#ifdef CONFIG_SCSI_MQ_DEFAULT
811bool scsi_use_blk_mq = true;
812#else
813bool scsi_use_blk_mq = false;
814#endif
815module_param_named(use_blk_mq, scsi_use_blk_mq, bool, S_IWUSR | S_IRUGO);
816
817static int __init init_scsi(void)
818{
819 int error;
820
821 error = scsi_init_queue();
822 if (error)
823 return error;
824 error = scsi_init_procfs();
825 if (error)
826 goto cleanup_queue;
827 error = scsi_init_devinfo();
828 if (error)
829 goto cleanup_procfs;
830 error = scsi_init_hosts();
831 if (error)
832 goto cleanup_devlist;
833 error = scsi_init_sysctl();
834 if (error)
835 goto cleanup_hosts;
836 error = scsi_sysfs_register();
837 if (error)
838 goto cleanup_sysctl;
839
840 scsi_netlink_init();
841
842 printk(KERN_NOTICE "SCSI subsystem initialized\n");
843 return 0;
844
845cleanup_sysctl:
846 scsi_exit_sysctl();
847cleanup_hosts:
848 scsi_exit_hosts();
849cleanup_devlist:
850 scsi_exit_devinfo();
851cleanup_procfs:
852 scsi_exit_procfs();
853cleanup_queue:
854 scsi_exit_queue();
855 printk(KERN_ERR "SCSI subsystem failed to initialize, error = %d\n",
856 -error);
857 return error;
858}
859
860static void __exit exit_scsi(void)
861{
862 scsi_netlink_exit();
863 scsi_sysfs_unregister();
864 scsi_exit_sysctl();
865 scsi_exit_hosts();
866 scsi_exit_devinfo();
867 scsi_exit_procfs();
868 scsi_exit_queue();
869 async_unregister_domain(&scsi_sd_probe_domain);
870}
871
872subsys_initcall(init_scsi);
873module_exit(exit_scsi);
874