1
2
3
4
5
6
7
8
9
10#include <linux/bio.h>
11#include <linux/bitops.h>
12#include <linux/blkdev.h>
13#include <linux/completion.h>
14#include <linux/kernel.h>
15#include <linux/mempool.h>
16#include <linux/slab.h>
17#include <linux/init.h>
18#include <linux/pci.h>
19#include <linux/delay.h>
20#include <linux/hardirq.h>
21#include <linux/scatterlist.h>
22
23#include <scsi/scsi.h>
24#include <scsi/scsi_cmnd.h>
25#include <scsi/scsi_dbg.h>
26#include <scsi/scsi_device.h>
27#include <scsi/scsi_driver.h>
28#include <scsi/scsi_eh.h>
29#include <scsi/scsi_host.h>
30
31#include "scsi_priv.h"
32#include "scsi_logging.h"
33
34
35#define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools)
36#define SG_MEMPOOL_SIZE 2
37
38struct scsi_host_sg_pool {
39 size_t size;
40 char *name;
41 struct kmem_cache *slab;
42 mempool_t *pool;
43};
44
45#define SP(x) { x, "sgpool-" __stringify(x) }
46#if (SCSI_MAX_SG_SEGMENTS < 32)
47#error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater)
48#endif
49static struct scsi_host_sg_pool scsi_sg_pools[] = {
50 SP(8),
51 SP(16),
52#if (SCSI_MAX_SG_SEGMENTS > 32)
53 SP(32),
54#if (SCSI_MAX_SG_SEGMENTS > 64)
55 SP(64),
56#if (SCSI_MAX_SG_SEGMENTS > 128)
57 SP(128),
58#if (SCSI_MAX_SG_SEGMENTS > 256)
59#error SCSI_MAX_SG_SEGMENTS is too large (256 MAX)
60#endif
61#endif
62#endif
63#endif
64 SP(SCSI_MAX_SG_SEGMENTS)
65};
66#undef SP
67
68struct kmem_cache *scsi_sdb_cache;
69
70static void scsi_run_queue(struct request_queue *q);
71
72
73
74
75
76
77
78
79
80
81
82
83
84static void scsi_unprep_request(struct request *req)
85{
86 struct scsi_cmnd *cmd = req->special;
87
88 blk_unprep_request(req);
89 req->special = NULL;
90
91 scsi_put_command(cmd);
92}
93
94
95
96
97
98
99
100
101
102
103
104
105
106static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
107{
108 struct Scsi_Host *host = cmd->device->host;
109 struct scsi_device *device = cmd->device;
110 struct scsi_target *starget = scsi_target(device);
111 struct request_queue *q = device->request_queue;
112 unsigned long flags;
113
114 SCSI_LOG_MLQUEUE(1,
115 printk("Inserting command %p into mlqueue\n", cmd));
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130 switch (reason) {
131 case SCSI_MLQUEUE_HOST_BUSY:
132 host->host_blocked = host->max_host_blocked;
133 break;
134 case SCSI_MLQUEUE_DEVICE_BUSY:
135 device->device_blocked = device->max_device_blocked;
136 break;
137 case SCSI_MLQUEUE_TARGET_BUSY:
138 starget->target_blocked = starget->max_target_blocked;
139 break;
140 }
141
142
143
144
145
146 if (unbusy)
147 scsi_device_unbusy(device);
148
149
150
151
152
153
154
155
156
157
158
159
160 spin_lock_irqsave(q->queue_lock, flags);
161 blk_requeue_request(q, cmd->request);
162 spin_unlock_irqrestore(q->queue_lock, flags);
163
164 scsi_run_queue(q);
165
166 return 0;
167}
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
189{
190 return __scsi_queue_insert(cmd, reason, 1);
191}
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
209 int data_direction, void *buffer, unsigned bufflen,
210 unsigned char *sense, int timeout, int retries, int flags,
211 int *resid)
212{
213 struct request *req;
214 int write = (data_direction == DMA_TO_DEVICE);
215 int ret = DRIVER_ERROR << 24;
216
217 req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
218
219 if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
220 buffer, bufflen, __GFP_WAIT))
221 goto out;
222
223 req->cmd_len = COMMAND_SIZE(cmd[0]);
224 memcpy(req->cmd, cmd, req->cmd_len);
225 req->sense = sense;
226 req->sense_len = 0;
227 req->retries = retries;
228 req->timeout = timeout;
229 req->cmd_type = REQ_TYPE_BLOCK_PC;
230 req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT;
231
232
233
234
235 blk_execute_rq(req->q, NULL, req, 1);
236
237
238
239
240
241
242
243 if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen))
244 memset(buffer + (bufflen - req->resid_len), 0, req->resid_len);
245
246 if (resid)
247 *resid = req->resid_len;
248 ret = req->errors;
249 out:
250 blk_put_request(req);
251
252 return ret;
253}
254EXPORT_SYMBOL(scsi_execute);
255
256
257int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
258 int data_direction, void *buffer, unsigned bufflen,
259 struct scsi_sense_hdr *sshdr, int timeout, int retries,
260 int *resid)
261{
262 char *sense = NULL;
263 int result;
264
265 if (sshdr) {
266 sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
267 if (!sense)
268 return DRIVER_ERROR << 24;
269 }
270 result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
271 sense, timeout, retries, 0, resid);
272 if (sshdr)
273 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
274
275 kfree(sense);
276 return result;
277}
278EXPORT_SYMBOL(scsi_execute_req);
279
280
281
282
283
284
285
286
287
288
289
290
291static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
292{
293 cmd->serial_number = 0;
294 scsi_set_resid(cmd, 0);
295 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
296 if (cmd->cmd_len == 0)
297 cmd->cmd_len = scsi_command_size(cmd->cmnd);
298}
299
300void scsi_device_unbusy(struct scsi_device *sdev)
301{
302 struct Scsi_Host *shost = sdev->host;
303 struct scsi_target *starget = scsi_target(sdev);
304 unsigned long flags;
305
306 spin_lock_irqsave(shost->host_lock, flags);
307 shost->host_busy--;
308 starget->target_busy--;
309 if (unlikely(scsi_host_in_recovery(shost) &&
310 (shost->host_failed || shost->host_eh_scheduled)))
311 scsi_eh_wakeup(shost);
312 spin_unlock(shost->host_lock);
313 spin_lock(sdev->request_queue->queue_lock);
314 sdev->device_busy--;
315 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
316}
317
318
319
320
321
322
323
324
325static void scsi_single_lun_run(struct scsi_device *current_sdev)
326{
327 struct Scsi_Host *shost = current_sdev->host;
328 struct scsi_device *sdev, *tmp;
329 struct scsi_target *starget = scsi_target(current_sdev);
330 unsigned long flags;
331
332 spin_lock_irqsave(shost->host_lock, flags);
333 starget->starget_sdev_user = NULL;
334 spin_unlock_irqrestore(shost->host_lock, flags);
335
336
337
338
339
340
341
342 blk_run_queue(current_sdev->request_queue);
343
344 spin_lock_irqsave(shost->host_lock, flags);
345 if (starget->starget_sdev_user)
346 goto out;
347 list_for_each_entry_safe(sdev, tmp, &starget->devices,
348 same_target_siblings) {
349 if (sdev == current_sdev)
350 continue;
351 if (scsi_device_get(sdev))
352 continue;
353
354 spin_unlock_irqrestore(shost->host_lock, flags);
355 blk_run_queue(sdev->request_queue);
356 spin_lock_irqsave(shost->host_lock, flags);
357
358 scsi_device_put(sdev);
359 }
360 out:
361 spin_unlock_irqrestore(shost->host_lock, flags);
362}
363
364static inline int scsi_device_is_busy(struct scsi_device *sdev)
365{
366 if (sdev->device_busy >= sdev->queue_depth || sdev->device_blocked)
367 return 1;
368
369 return 0;
370}
371
372static inline int scsi_target_is_busy(struct scsi_target *starget)
373{
374 return ((starget->can_queue > 0 &&
375 starget->target_busy >= starget->can_queue) ||
376 starget->target_blocked);
377}
378
379static inline int scsi_host_is_busy(struct Scsi_Host *shost)
380{
381 if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
382 shost->host_blocked || shost->host_self_blocked)
383 return 1;
384
385 return 0;
386}
387
388
389
390
391
392
393
394
395
396
397
398
399
400static void scsi_run_queue(struct request_queue *q)
401{
402 struct scsi_device *sdev = q->queuedata;
403 struct Scsi_Host *shost = sdev->host;
404 LIST_HEAD(starved_list);
405 unsigned long flags;
406
407 if (scsi_target(sdev)->single_lun)
408 scsi_single_lun_run(sdev);
409
410 spin_lock_irqsave(shost->host_lock, flags);
411 list_splice_init(&shost->starved_list, &starved_list);
412
413 while (!list_empty(&starved_list)) {
414 int flagset;
415
416
417
418
419
420
421
422
423
424
425
426 if (scsi_host_is_busy(shost))
427 break;
428
429 sdev = list_entry(starved_list.next,
430 struct scsi_device, starved_entry);
431 list_del_init(&sdev->starved_entry);
432 if (scsi_target_is_busy(scsi_target(sdev))) {
433 list_move_tail(&sdev->starved_entry,
434 &shost->starved_list);
435 continue;
436 }
437
438 spin_unlock(shost->host_lock);
439
440 spin_lock(sdev->request_queue->queue_lock);
441 flagset = test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) &&
442 !test_bit(QUEUE_FLAG_REENTER,
443 &sdev->request_queue->queue_flags);
444 if (flagset)
445 queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue);
446 __blk_run_queue(sdev->request_queue, false);
447 if (flagset)
448 queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue);
449 spin_unlock(sdev->request_queue->queue_lock);
450
451 spin_lock(shost->host_lock);
452 }
453
454 list_splice(&starved_list, &shost->starved_list);
455 spin_unlock_irqrestore(shost->host_lock, flags);
456
457 blk_run_queue(q);
458}
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
479{
480 struct request *req = cmd->request;
481 unsigned long flags;
482
483 spin_lock_irqsave(q->queue_lock, flags);
484 scsi_unprep_request(req);
485 blk_requeue_request(q, req);
486 spin_unlock_irqrestore(q->queue_lock, flags);
487
488 scsi_run_queue(q);
489}
490
491void scsi_next_command(struct scsi_cmnd *cmd)
492{
493 struct scsi_device *sdev = cmd->device;
494 struct request_queue *q = sdev->request_queue;
495
496
497 get_device(&sdev->sdev_gendev);
498
499 scsi_put_command(cmd);
500 scsi_run_queue(q);
501
502
503 put_device(&sdev->sdev_gendev);
504}
505
506void scsi_run_host_queues(struct Scsi_Host *shost)
507{
508 struct scsi_device *sdev;
509
510 shost_for_each_device(sdev, shost)
511 scsi_run_queue(sdev->request_queue);
512}
513
514static void __scsi_release_buffers(struct scsi_cmnd *, int);
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
539 int bytes, int requeue)
540{
541 struct request_queue *q = cmd->device->request_queue;
542 struct request *req = cmd->request;
543
544
545
546
547
548 if (blk_end_request(req, error, bytes)) {
549
550 if (error && scsi_noretry_cmd(cmd))
551 blk_end_request_all(req, error);
552 else {
553 if (requeue) {
554
555
556
557
558
559 scsi_release_buffers(cmd);
560 scsi_requeue_command(q, cmd);
561 cmd = NULL;
562 }
563 return cmd;
564 }
565 }
566
567
568
569
570
571 __scsi_release_buffers(cmd, 0);
572 scsi_next_command(cmd);
573 return NULL;
574}
575
576static inline unsigned int scsi_sgtable_index(unsigned short nents)
577{
578 unsigned int index;
579
580 BUG_ON(nents > SCSI_MAX_SG_SEGMENTS);
581
582 if (nents <= 8)
583 index = 0;
584 else
585 index = get_count_order(nents) - 3;
586
587 return index;
588}
589
590static void scsi_sg_free(struct scatterlist *sgl, unsigned int nents)
591{
592 struct scsi_host_sg_pool *sgp;
593
594 sgp = scsi_sg_pools + scsi_sgtable_index(nents);
595 mempool_free(sgl, sgp->pool);
596}
597
598static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask)
599{
600 struct scsi_host_sg_pool *sgp;
601
602 sgp = scsi_sg_pools + scsi_sgtable_index(nents);
603 return mempool_alloc(sgp->pool, gfp_mask);
604}
605
606static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents,
607 gfp_t gfp_mask)
608{
609 int ret;
610
611 BUG_ON(!nents);
612
613 ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS,
614 gfp_mask, scsi_sg_alloc);
615 if (unlikely(ret))
616 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS,
617 scsi_sg_free);
618
619 return ret;
620}
621
622static void scsi_free_sgtable(struct scsi_data_buffer *sdb)
623{
624 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free);
625}
626
627static void __scsi_release_buffers(struct scsi_cmnd *cmd, int do_bidi_check)
628{
629
630 if (cmd->sdb.table.nents)
631 scsi_free_sgtable(&cmd->sdb);
632
633 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
634
635 if (do_bidi_check && scsi_bidi_cmnd(cmd)) {
636 struct scsi_data_buffer *bidi_sdb =
637 cmd->request->next_rq->special;
638 scsi_free_sgtable(bidi_sdb);
639 kmem_cache_free(scsi_sdb_cache, bidi_sdb);
640 cmd->request->next_rq->special = NULL;
641 }
642
643 if (scsi_prot_sg_count(cmd))
644 scsi_free_sgtable(cmd->prot_sdb);
645}
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664void scsi_release_buffers(struct scsi_cmnd *cmd)
665{
666 __scsi_release_buffers(cmd, 1);
667}
668EXPORT_SYMBOL(scsi_release_buffers);
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
707{
708 int result = cmd->result;
709 struct request_queue *q = cmd->device->request_queue;
710 struct request *req = cmd->request;
711 int error = 0;
712 struct scsi_sense_hdr sshdr;
713 int sense_valid = 0;
714 int sense_deferred = 0;
715 enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
716 ACTION_DELAYED_RETRY} action;
717 char *description = NULL;
718
719 if (result) {
720 sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
721 if (sense_valid)
722 sense_deferred = scsi_sense_is_deferred(&sshdr);
723 }
724
725 if (req->cmd_type == REQ_TYPE_BLOCK_PC) {
726 req->errors = result;
727 if (result) {
728 if (sense_valid && req->sense) {
729
730
731
732 int len = 8 + cmd->sense_buffer[7];
733
734 if (len > SCSI_SENSE_BUFFERSIZE)
735 len = SCSI_SENSE_BUFFERSIZE;
736 memcpy(req->sense, cmd->sense_buffer, len);
737 req->sense_len = len;
738 }
739 if (!sense_deferred)
740 error = -EIO;
741 }
742
743 req->resid_len = scsi_get_resid(cmd);
744
745 if (scsi_bidi_cmnd(cmd)) {
746
747
748
749
750 req->next_rq->resid_len = scsi_in(cmd)->resid;
751
752 scsi_release_buffers(cmd);
753 blk_end_request_all(req, 0);
754
755 scsi_next_command(cmd);
756 return;
757 }
758 }
759
760
761 BUG_ON(blk_bidi_rq(req));
762
763
764
765
766
767 SCSI_LOG_HLCOMPLETE(1, printk("%u sectors total, "
768 "%d bytes done.\n",
769 blk_rq_sectors(req), good_bytes));
770
771
772
773
774
775
776
777 if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) {
778
779
780
781
782 if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
783 ;
784 else if (!(req->cmd_flags & REQ_QUIET))
785 scsi_print_sense("", cmd);
786 result = 0;
787
788 error = 0;
789 }
790
791
792
793
794
795
796 if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL)
797 return;
798
799 error = -EIO;
800
801 if (host_byte(result) == DID_RESET) {
802
803
804
805
806 action = ACTION_RETRY;
807 } else if (sense_valid && !sense_deferred) {
808 switch (sshdr.sense_key) {
809 case UNIT_ATTENTION:
810 if (cmd->device->removable) {
811
812
813
814 cmd->device->changed = 1;
815 description = "Media Changed";
816 action = ACTION_FAIL;
817 } else {
818
819
820
821
822
823 action = ACTION_RETRY;
824 }
825 break;
826 case ILLEGAL_REQUEST:
827
828
829
830
831
832
833
834
835 if ((cmd->device->use_10_for_rw &&
836 sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
837 (cmd->cmnd[0] == READ_10 ||
838 cmd->cmnd[0] == WRITE_10)) {
839
840 cmd->device->use_10_for_rw = 0;
841 action = ACTION_REPREP;
842 } else if (sshdr.asc == 0x10) {
843 description = "Host Data Integrity Failure";
844 action = ACTION_FAIL;
845 error = -EILSEQ;
846 } else
847 action = ACTION_FAIL;
848 break;
849 case ABORTED_COMMAND:
850 action = ACTION_FAIL;
851 if (sshdr.asc == 0x10) {
852 description = "Target Data Integrity Failure";
853 error = -EILSEQ;
854 }
855 break;
856 case NOT_READY:
857
858
859
860 if (sshdr.asc == 0x04) {
861 switch (sshdr.ascq) {
862 case 0x01:
863 case 0x04:
864 case 0x05:
865 case 0x06:
866 case 0x07:
867 case 0x08:
868 case 0x09:
869 case 0x14:
870 action = ACTION_DELAYED_RETRY;
871 break;
872 default:
873 description = "Device not ready";
874 action = ACTION_FAIL;
875 break;
876 }
877 } else {
878 description = "Device not ready";
879 action = ACTION_FAIL;
880 }
881 break;
882 case VOLUME_OVERFLOW:
883
884 action = ACTION_FAIL;
885 break;
886 default:
887 description = "Unhandled sense code";
888 action = ACTION_FAIL;
889 break;
890 }
891 } else {
892 description = "Unhandled error code";
893 action = ACTION_FAIL;
894 }
895
896 switch (action) {
897 case ACTION_FAIL:
898
899 scsi_release_buffers(cmd);
900 if (!(req->cmd_flags & REQ_QUIET)) {
901 if (description)
902 scmd_printk(KERN_INFO, cmd, "%s\n",
903 description);
904 scsi_print_result(cmd);
905 if (driver_byte(result) & DRIVER_SENSE)
906 scsi_print_sense("", cmd);
907 scsi_print_command(cmd);
908 }
909 if (blk_end_request_err(req, error))
910 scsi_requeue_command(q, cmd);
911 else
912 scsi_next_command(cmd);
913 break;
914 case ACTION_REPREP:
915
916
917
918 scsi_release_buffers(cmd);
919 scsi_requeue_command(q, cmd);
920 break;
921 case ACTION_RETRY:
922
923 __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, 0);
924 break;
925 case ACTION_DELAYED_RETRY:
926
927 __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0);
928 break;
929 }
930}
931
932static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
933 gfp_t gfp_mask)
934{
935 int count;
936
937
938
939
940 if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments,
941 gfp_mask))) {
942 return BLKPREP_DEFER;
943 }
944
945 req->buffer = NULL;
946
947
948
949
950
951 count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
952 BUG_ON(count > sdb->table.nents);
953 sdb->table.nents = count;
954 sdb->length = blk_rq_bytes(req);
955 return BLKPREP_OK;
956}
957
958
959
960
961
962
963
964
965
966
967
968
969int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
970{
971 struct request *rq = cmd->request;
972
973 int error = scsi_init_sgtable(rq, &cmd->sdb, gfp_mask);
974 if (error)
975 goto err_exit;
976
977 if (blk_bidi_rq(rq)) {
978 struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc(
979 scsi_sdb_cache, GFP_ATOMIC);
980 if (!bidi_sdb) {
981 error = BLKPREP_DEFER;
982 goto err_exit;
983 }
984
985 rq->next_rq->special = bidi_sdb;
986 error = scsi_init_sgtable(rq->next_rq, bidi_sdb, GFP_ATOMIC);
987 if (error)
988 goto err_exit;
989 }
990
991 if (blk_integrity_rq(rq)) {
992 struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
993 int ivecs, count;
994
995 BUG_ON(prot_sdb == NULL);
996 ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
997
998 if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask)) {
999 error = BLKPREP_DEFER;
1000 goto err_exit;
1001 }
1002
1003 count = blk_rq_map_integrity_sg(rq->q, rq->bio,
1004 prot_sdb->table.sgl);
1005 BUG_ON(unlikely(count > ivecs));
1006 BUG_ON(unlikely(count > queue_max_integrity_segments(rq->q)));
1007
1008 cmd->prot_sdb = prot_sdb;
1009 cmd->prot_sdb->table.nents = count;
1010 }
1011
1012 return BLKPREP_OK ;
1013
1014err_exit:
1015 scsi_release_buffers(cmd);
1016 cmd->request->special = NULL;
1017 scsi_put_command(cmd);
1018 return error;
1019}
1020EXPORT_SYMBOL(scsi_init_io);
1021
1022static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
1023 struct request *req)
1024{
1025 struct scsi_cmnd *cmd;
1026
1027 if (!req->special) {
1028 cmd = scsi_get_command(sdev, GFP_ATOMIC);
1029 if (unlikely(!cmd))
1030 return NULL;
1031 req->special = cmd;
1032 } else {
1033 cmd = req->special;
1034 }
1035
1036
1037 cmd->tag = req->tag;
1038 cmd->request = req;
1039
1040 cmd->cmnd = req->cmd;
1041
1042 return cmd;
1043}
1044
1045int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
1046{
1047 struct scsi_cmnd *cmd;
1048 int ret = scsi_prep_state_check(sdev, req);
1049
1050 if (ret != BLKPREP_OK)
1051 return ret;
1052
1053 cmd = scsi_get_cmd_from_req(sdev, req);
1054 if (unlikely(!cmd))
1055 return BLKPREP_DEFER;
1056
1057
1058
1059
1060
1061
1062
1063 if (req->bio) {
1064 int ret;
1065
1066 BUG_ON(!req->nr_phys_segments);
1067
1068 ret = scsi_init_io(cmd, GFP_ATOMIC);
1069 if (unlikely(ret))
1070 return ret;
1071 } else {
1072 BUG_ON(blk_rq_bytes(req));
1073
1074 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
1075 req->buffer = NULL;
1076 }
1077
1078 cmd->cmd_len = req->cmd_len;
1079 if (!blk_rq_bytes(req))
1080 cmd->sc_data_direction = DMA_NONE;
1081 else if (rq_data_dir(req) == WRITE)
1082 cmd->sc_data_direction = DMA_TO_DEVICE;
1083 else
1084 cmd->sc_data_direction = DMA_FROM_DEVICE;
1085
1086 cmd->transfersize = blk_rq_bytes(req);
1087 cmd->allowed = req->retries;
1088 return BLKPREP_OK;
1089}
1090EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd);
1091
1092
1093
1094
1095
1096
1097int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
1098{
1099 struct scsi_cmnd *cmd;
1100 int ret = scsi_prep_state_check(sdev, req);
1101
1102 if (ret != BLKPREP_OK)
1103 return ret;
1104
1105 if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh
1106 && sdev->scsi_dh_data->scsi_dh->prep_fn)) {
1107 ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req);
1108 if (ret != BLKPREP_OK)
1109 return ret;
1110 }
1111
1112
1113
1114
1115 BUG_ON(!req->nr_phys_segments);
1116
1117 cmd = scsi_get_cmd_from_req(sdev, req);
1118 if (unlikely(!cmd))
1119 return BLKPREP_DEFER;
1120
1121 memset(cmd->cmnd, 0, BLK_MAX_CDB);
1122 return scsi_init_io(cmd, GFP_ATOMIC);
1123}
1124EXPORT_SYMBOL(scsi_setup_fs_cmnd);
1125
1126int scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
1127{
1128 int ret = BLKPREP_OK;
1129
1130
1131
1132
1133
1134 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1135 switch (sdev->sdev_state) {
1136 case SDEV_OFFLINE:
1137
1138
1139
1140
1141
1142 sdev_printk(KERN_ERR, sdev,
1143 "rejecting I/O to offline device\n");
1144 ret = BLKPREP_KILL;
1145 break;
1146 case SDEV_DEL:
1147
1148
1149
1150
1151 sdev_printk(KERN_ERR, sdev,
1152 "rejecting I/O to dead device\n");
1153 ret = BLKPREP_KILL;
1154 break;
1155 case SDEV_QUIESCE:
1156 case SDEV_BLOCK:
1157 case SDEV_CREATED_BLOCK:
1158
1159
1160
1161 if (!(req->cmd_flags & REQ_PREEMPT))
1162 ret = BLKPREP_DEFER;
1163 break;
1164 default:
1165
1166
1167
1168
1169
1170 if (!(req->cmd_flags & REQ_PREEMPT))
1171 ret = BLKPREP_KILL;
1172 break;
1173 }
1174 }
1175 return ret;
1176}
1177EXPORT_SYMBOL(scsi_prep_state_check);
1178
1179int scsi_prep_return(struct request_queue *q, struct request *req, int ret)
1180{
1181 struct scsi_device *sdev = q->queuedata;
1182
1183 switch (ret) {
1184 case BLKPREP_KILL:
1185 req->errors = DID_NO_CONNECT << 16;
1186
1187 if (req->special) {
1188 struct scsi_cmnd *cmd = req->special;
1189 scsi_release_buffers(cmd);
1190 scsi_put_command(cmd);
1191 req->special = NULL;
1192 }
1193 break;
1194 case BLKPREP_DEFER:
1195
1196
1197
1198
1199
1200 if (sdev->device_busy == 0)
1201 blk_plug_device(q);
1202 break;
1203 default:
1204 req->cmd_flags |= REQ_DONTPREP;
1205 }
1206
1207 return ret;
1208}
1209EXPORT_SYMBOL(scsi_prep_return);
1210
1211int scsi_prep_fn(struct request_queue *q, struct request *req)
1212{
1213 struct scsi_device *sdev = q->queuedata;
1214 int ret = BLKPREP_KILL;
1215
1216 if (req->cmd_type == REQ_TYPE_BLOCK_PC)
1217 ret = scsi_setup_blk_pc_cmnd(sdev, req);
1218 return scsi_prep_return(q, req, ret);
1219}
1220EXPORT_SYMBOL(scsi_prep_fn);
1221
1222
1223
1224
1225
1226
1227
1228static inline int scsi_dev_queue_ready(struct request_queue *q,
1229 struct scsi_device *sdev)
1230{
1231 if (sdev->device_busy == 0 && sdev->device_blocked) {
1232
1233
1234
1235 if (--sdev->device_blocked == 0) {
1236 SCSI_LOG_MLQUEUE(3,
1237 sdev_printk(KERN_INFO, sdev,
1238 "unblocking device at zero depth\n"));
1239 } else {
1240 blk_plug_device(q);
1241 return 0;
1242 }
1243 }
1244 if (scsi_device_is_busy(sdev))
1245 return 0;
1246
1247 return 1;
1248}
1249
1250
1251
1252
1253
1254
1255
1256
1257static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
1258 struct scsi_device *sdev)
1259{
1260 struct scsi_target *starget = scsi_target(sdev);
1261
1262 if (starget->single_lun) {
1263 if (starget->starget_sdev_user &&
1264 starget->starget_sdev_user != sdev)
1265 return 0;
1266 starget->starget_sdev_user = sdev;
1267 }
1268
1269 if (starget->target_busy == 0 && starget->target_blocked) {
1270
1271
1272
1273 if (--starget->target_blocked == 0) {
1274 SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget,
1275 "unblocking target at zero depth\n"));
1276 } else
1277 return 0;
1278 }
1279
1280 if (scsi_target_is_busy(starget)) {
1281 if (list_empty(&sdev->starved_entry))
1282 list_add_tail(&sdev->starved_entry,
1283 &shost->starved_list);
1284 return 0;
1285 }
1286
1287
1288 if (!list_empty(&sdev->starved_entry))
1289 list_del_init(&sdev->starved_entry);
1290 return 1;
1291}
1292
1293
1294
1295
1296
1297
1298
1299
1300static inline int scsi_host_queue_ready(struct request_queue *q,
1301 struct Scsi_Host *shost,
1302 struct scsi_device *sdev)
1303{
1304 if (scsi_host_in_recovery(shost))
1305 return 0;
1306 if (shost->host_busy == 0 && shost->host_blocked) {
1307
1308
1309
1310 if (--shost->host_blocked == 0) {
1311 SCSI_LOG_MLQUEUE(3,
1312 printk("scsi%d unblocking host at zero depth\n",
1313 shost->host_no));
1314 } else {
1315 return 0;
1316 }
1317 }
1318 if (scsi_host_is_busy(shost)) {
1319 if (list_empty(&sdev->starved_entry))
1320 list_add_tail(&sdev->starved_entry, &shost->starved_list);
1321 return 0;
1322 }
1323
1324
1325 if (!list_empty(&sdev->starved_entry))
1326 list_del_init(&sdev->starved_entry);
1327
1328 return 1;
1329}
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343static int scsi_lld_busy(struct request_queue *q)
1344{
1345 struct scsi_device *sdev = q->queuedata;
1346 struct Scsi_Host *shost;
1347 struct scsi_target *starget;
1348
1349 if (!sdev)
1350 return 0;
1351
1352 shost = sdev->host;
1353 starget = scsi_target(sdev);
1354
1355 if (scsi_host_in_recovery(shost) || scsi_host_is_busy(shost) ||
1356 scsi_target_is_busy(starget) || scsi_device_is_busy(sdev))
1357 return 1;
1358
1359 return 0;
1360}
1361
1362
1363
1364
1365static void scsi_kill_request(struct request *req, struct request_queue *q)
1366{
1367 struct scsi_cmnd *cmd = req->special;
1368 struct scsi_device *sdev;
1369 struct scsi_target *starget;
1370 struct Scsi_Host *shost;
1371
1372 blk_start_request(req);
1373
1374 sdev = cmd->device;
1375 starget = scsi_target(sdev);
1376 shost = sdev->host;
1377 scsi_init_cmd_errh(cmd);
1378 cmd->result = DID_NO_CONNECT << 16;
1379 atomic_inc(&cmd->device->iorequest_cnt);
1380
1381
1382
1383
1384
1385
1386 sdev->device_busy++;
1387 spin_unlock(sdev->request_queue->queue_lock);
1388 spin_lock(shost->host_lock);
1389 shost->host_busy++;
1390 starget->target_busy++;
1391 spin_unlock(shost->host_lock);
1392 spin_lock(sdev->request_queue->queue_lock);
1393
1394 blk_complete_request(req);
1395}
1396
1397static void scsi_softirq_done(struct request *rq)
1398{
1399 struct scsi_cmnd *cmd = rq->special;
1400 unsigned long wait_for = (cmd->allowed + 1) * rq->timeout;
1401 int disposition;
1402
1403 INIT_LIST_HEAD(&cmd->eh_entry);
1404
1405 atomic_inc(&cmd->device->iodone_cnt);
1406 if (cmd->result)
1407 atomic_inc(&cmd->device->ioerr_cnt);
1408
1409 disposition = scsi_decide_disposition(cmd);
1410 if (disposition != SUCCESS &&
1411 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
1412 sdev_printk(KERN_ERR, cmd->device,
1413 "timing out command, waited %lus\n",
1414 wait_for/HZ);
1415 disposition = SUCCESS;
1416 }
1417
1418 scsi_log_completion(cmd, disposition);
1419
1420 switch (disposition) {
1421 case SUCCESS:
1422 scsi_finish_command(cmd);
1423 break;
1424 case NEEDS_RETRY:
1425 scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
1426 break;
1427 case ADD_TO_MLQUEUE:
1428 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
1429 break;
1430 default:
1431 if (!scsi_eh_scmd_add(cmd, 0))
1432 scsi_finish_command(cmd);
1433 }
1434}
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447static void scsi_request_fn(struct request_queue *q)
1448{
1449 struct scsi_device *sdev = q->queuedata;
1450 struct Scsi_Host *shost;
1451 struct scsi_cmnd *cmd;
1452 struct request *req;
1453
1454 if (!sdev) {
1455 printk("scsi: killing requests for dead queue\n");
1456 while ((req = blk_peek_request(q)) != NULL)
1457 scsi_kill_request(req, q);
1458 return;
1459 }
1460
1461 if(!get_device(&sdev->sdev_gendev))
1462
1463 return;
1464
1465
1466
1467
1468
1469 shost = sdev->host;
1470 while (!blk_queue_plugged(q)) {
1471 int rtn;
1472
1473
1474
1475
1476
1477 req = blk_peek_request(q);
1478 if (!req || !scsi_dev_queue_ready(q, sdev))
1479 break;
1480
1481 if (unlikely(!scsi_device_online(sdev))) {
1482 sdev_printk(KERN_ERR, sdev,
1483 "rejecting I/O to offline device\n");
1484 scsi_kill_request(req, q);
1485 continue;
1486 }
1487
1488
1489
1490
1491
1492 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1493 blk_start_request(req);
1494 sdev->device_busy++;
1495
1496 spin_unlock(q->queue_lock);
1497 cmd = req->special;
1498 if (unlikely(cmd == NULL)) {
1499 printk(KERN_CRIT "impossible request in %s.\n"
1500 "please mail a stack trace to "
1501 "linux-scsi@vger.kernel.org\n",
1502 __func__);
1503 blk_dump_rq_flags(req, "foo");
1504 BUG();
1505 }
1506 spin_lock(shost->host_lock);
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516 if (blk_queue_tagged(q) && !blk_rq_tagged(req)) {
1517 if (list_empty(&sdev->starved_entry))
1518 list_add_tail(&sdev->starved_entry,
1519 &shost->starved_list);
1520 goto not_ready;
1521 }
1522
1523 if (!scsi_target_queue_ready(shost, sdev))
1524 goto not_ready;
1525
1526 if (!scsi_host_queue_ready(q, shost, sdev))
1527 goto not_ready;
1528
1529 scsi_target(sdev)->target_busy++;
1530 shost->host_busy++;
1531
1532
1533
1534
1535
1536 spin_unlock_irq(shost->host_lock);
1537
1538
1539
1540
1541
1542 scsi_init_cmd_errh(cmd);
1543
1544
1545
1546
1547 rtn = scsi_dispatch_cmd(cmd);
1548 spin_lock_irq(q->queue_lock);
1549 if(rtn) {
1550
1551
1552
1553 if(sdev->device_busy == 0)
1554 blk_plug_device(q);
1555
1556 break;
1557 }
1558 }
1559
1560 goto out;
1561
1562 not_ready:
1563 spin_unlock_irq(shost->host_lock);
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573 spin_lock_irq(q->queue_lock);
1574 blk_requeue_request(q, req);
1575 sdev->device_busy--;
1576 if(sdev->device_busy == 0)
1577 blk_plug_device(q);
1578 out:
1579
1580
1581 spin_unlock_irq(q->queue_lock);
1582 put_device(&sdev->sdev_gendev);
1583 spin_lock_irq(q->queue_lock);
1584}
1585
1586u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
1587{
1588 struct device *host_dev;
1589 u64 bounce_limit = 0xffffffff;
1590
1591 if (shost->unchecked_isa_dma)
1592 return BLK_BOUNCE_ISA;
1593
1594
1595
1596
1597 if (!PCI_DMA_BUS_IS_PHYS)
1598 return BLK_BOUNCE_ANY;
1599
1600 host_dev = scsi_get_device(shost);
1601 if (host_dev && host_dev->dma_mask)
1602 bounce_limit = *host_dev->dma_mask;
1603
1604 return bounce_limit;
1605}
1606EXPORT_SYMBOL(scsi_calculate_bounce_limit);
1607
1608struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
1609 request_fn_proc *request_fn)
1610{
1611 struct request_queue *q;
1612 struct device *dev = shost->shost_gendev.parent;
1613
1614 q = blk_init_queue(request_fn, NULL);
1615 if (!q)
1616 return NULL;
1617
1618
1619
1620
1621 blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize,
1622 SCSI_MAX_SG_CHAIN_SEGMENTS));
1623
1624 if (scsi_host_prot_dma(shost)) {
1625 shost->sg_prot_tablesize =
1626 min_not_zero(shost->sg_prot_tablesize,
1627 (unsigned short)SCSI_MAX_PROT_SG_SEGMENTS);
1628 BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize);
1629 blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize);
1630 }
1631
1632 blk_queue_max_hw_sectors(q, shost->max_sectors);
1633 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1634 blk_queue_segment_boundary(q, shost->dma_boundary);
1635 dma_set_seg_boundary(dev, shost->dma_boundary);
1636
1637 blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));
1638
1639 if (!shost->use_clustering)
1640 q->limits.cluster = 0;
1641
1642
1643
1644
1645
1646
1647 blk_queue_dma_alignment(q, 0x03);
1648
1649 return q;
1650}
1651EXPORT_SYMBOL(__scsi_alloc_queue);
1652
1653struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1654{
1655 struct request_queue *q;
1656
1657 q = __scsi_alloc_queue(sdev->host, scsi_request_fn);
1658 if (!q)
1659 return NULL;
1660
1661 blk_queue_prep_rq(q, scsi_prep_fn);
1662 blk_queue_softirq_done(q, scsi_softirq_done);
1663 blk_queue_rq_timed_out(q, scsi_times_out);
1664 blk_queue_lld_busy(q, scsi_lld_busy);
1665 return q;
1666}
1667
1668void scsi_free_queue(struct request_queue *q)
1669{
1670 blk_cleanup_queue(q);
1671}
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689void scsi_block_requests(struct Scsi_Host *shost)
1690{
1691 shost->host_self_blocked = 1;
1692}
1693EXPORT_SYMBOL(scsi_block_requests);
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715void scsi_unblock_requests(struct Scsi_Host *shost)
1716{
1717 shost->host_self_blocked = 0;
1718 scsi_run_host_queues(shost);
1719}
1720EXPORT_SYMBOL(scsi_unblock_requests);
1721
1722int __init scsi_init_queue(void)
1723{
1724 int i;
1725
1726 scsi_sdb_cache = kmem_cache_create("scsi_data_buffer",
1727 sizeof(struct scsi_data_buffer),
1728 0, 0, NULL);
1729 if (!scsi_sdb_cache) {
1730 printk(KERN_ERR "SCSI: can't init scsi sdb cache\n");
1731 return -ENOMEM;
1732 }
1733
1734 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1735 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1736 int size = sgp->size * sizeof(struct scatterlist);
1737
1738 sgp->slab = kmem_cache_create(sgp->name, size, 0,
1739 SLAB_HWCACHE_ALIGN, NULL);
1740 if (!sgp->slab) {
1741 printk(KERN_ERR "SCSI: can't init sg slab %s\n",
1742 sgp->name);
1743 goto cleanup_sdb;
1744 }
1745
1746 sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE,
1747 sgp->slab);
1748 if (!sgp->pool) {
1749 printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
1750 sgp->name);
1751 goto cleanup_sdb;
1752 }
1753 }
1754
1755 return 0;
1756
1757cleanup_sdb:
1758 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1759 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1760 if (sgp->pool)
1761 mempool_destroy(sgp->pool);
1762 if (sgp->slab)
1763 kmem_cache_destroy(sgp->slab);
1764 }
1765 kmem_cache_destroy(scsi_sdb_cache);
1766
1767 return -ENOMEM;
1768}
1769
1770void scsi_exit_queue(void)
1771{
1772 int i;
1773
1774 kmem_cache_destroy(scsi_sdb_cache);
1775
1776 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1777 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1778 mempool_destroy(sgp->pool);
1779 kmem_cache_destroy(sgp->slab);
1780 }
1781}
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801int
1802scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
1803 unsigned char *buffer, int len, int timeout, int retries,
1804 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
1805{
1806 unsigned char cmd[10];
1807 unsigned char *real_buffer;
1808 int ret;
1809
1810 memset(cmd, 0, sizeof(cmd));
1811 cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0);
1812
1813 if (sdev->use_10_for_ms) {
1814 if (len > 65535)
1815 return -EINVAL;
1816 real_buffer = kmalloc(8 + len, GFP_KERNEL);
1817 if (!real_buffer)
1818 return -ENOMEM;
1819 memcpy(real_buffer + 8, buffer, len);
1820 len += 8;
1821 real_buffer[0] = 0;
1822 real_buffer[1] = 0;
1823 real_buffer[2] = data->medium_type;
1824 real_buffer[3] = data->device_specific;
1825 real_buffer[4] = data->longlba ? 0x01 : 0;
1826 real_buffer[5] = 0;
1827 real_buffer[6] = data->block_descriptor_length >> 8;
1828 real_buffer[7] = data->block_descriptor_length;
1829
1830 cmd[0] = MODE_SELECT_10;
1831 cmd[7] = len >> 8;
1832 cmd[8] = len;
1833 } else {
1834 if (len > 255 || data->block_descriptor_length > 255 ||
1835 data->longlba)
1836 return -EINVAL;
1837
1838 real_buffer = kmalloc(4 + len, GFP_KERNEL);
1839 if (!real_buffer)
1840 return -ENOMEM;
1841 memcpy(real_buffer + 4, buffer, len);
1842 len += 4;
1843 real_buffer[0] = 0;
1844 real_buffer[1] = data->medium_type;
1845 real_buffer[2] = data->device_specific;
1846 real_buffer[3] = data->block_descriptor_length;
1847
1848
1849 cmd[0] = MODE_SELECT;
1850 cmd[4] = len;
1851 }
1852
1853 ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len,
1854 sshdr, timeout, retries, NULL);
1855 kfree(real_buffer);
1856 return ret;
1857}
1858EXPORT_SYMBOL_GPL(scsi_mode_select);
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877int
1878scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
1879 unsigned char *buffer, int len, int timeout, int retries,
1880 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
1881{
1882 unsigned char cmd[12];
1883 int use_10_for_ms;
1884 int header_length;
1885 int result;
1886 struct scsi_sense_hdr my_sshdr;
1887
1888 memset(data, 0, sizeof(*data));
1889 memset(&cmd[0], 0, 12);
1890 cmd[1] = dbd & 0x18;
1891 cmd[2] = modepage;
1892
1893
1894 if (!sshdr)
1895 sshdr = &my_sshdr;
1896
1897 retry:
1898 use_10_for_ms = sdev->use_10_for_ms;
1899
1900 if (use_10_for_ms) {
1901 if (len < 8)
1902 len = 8;
1903
1904 cmd[0] = MODE_SENSE_10;
1905 cmd[8] = len;
1906 header_length = 8;
1907 } else {
1908 if (len < 4)
1909 len = 4;
1910
1911 cmd[0] = MODE_SENSE;
1912 cmd[4] = len;
1913 header_length = 4;
1914 }
1915
1916 memset(buffer, 0, len);
1917
1918 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
1919 sshdr, timeout, retries, NULL);
1920
1921
1922
1923
1924
1925
1926 if (use_10_for_ms && !scsi_status_is_good(result) &&
1927 (driver_byte(result) & DRIVER_SENSE)) {
1928 if (scsi_sense_valid(sshdr)) {
1929 if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
1930 (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
1931
1932
1933
1934 sdev->use_10_for_ms = 0;
1935 goto retry;
1936 }
1937 }
1938 }
1939
1940 if(scsi_status_is_good(result)) {
1941 if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b &&
1942 (modepage == 6 || modepage == 8))) {
1943
1944 header_length = 0;
1945 data->length = 13;
1946 data->medium_type = 0;
1947 data->device_specific = 0;
1948 data->longlba = 0;
1949 data->block_descriptor_length = 0;
1950 } else if(use_10_for_ms) {
1951 data->length = buffer[0]*256 + buffer[1] + 2;
1952 data->medium_type = buffer[2];
1953 data->device_specific = buffer[3];
1954 data->longlba = buffer[4] & 0x01;
1955 data->block_descriptor_length = buffer[6]*256
1956 + buffer[7];
1957 } else {
1958 data->length = buffer[0] + 1;
1959 data->medium_type = buffer[1];
1960 data->device_specific = buffer[2];
1961 data->block_descriptor_length = buffer[3];
1962 }
1963 data->header_length = header_length;
1964 }
1965
1966 return result;
1967}
1968EXPORT_SYMBOL(scsi_mode_sense);
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982int
1983scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
1984 struct scsi_sense_hdr *sshdr_external)
1985{
1986 char cmd[] = {
1987 TEST_UNIT_READY, 0, 0, 0, 0, 0,
1988 };
1989 struct scsi_sense_hdr *sshdr;
1990 int result;
1991
1992 if (!sshdr_external)
1993 sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL);
1994 else
1995 sshdr = sshdr_external;
1996
1997
1998 do {
1999 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr,
2000 timeout, retries, NULL);
2001 if (sdev->removable && scsi_sense_valid(sshdr) &&
2002 sshdr->sense_key == UNIT_ATTENTION)
2003 sdev->changed = 1;
2004 } while (scsi_sense_valid(sshdr) &&
2005 sshdr->sense_key == UNIT_ATTENTION && --retries);
2006
2007 if (!sshdr_external)
2008 kfree(sshdr);
2009 return result;
2010}
2011EXPORT_SYMBOL(scsi_test_unit_ready);
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021int
2022scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
2023{
2024 enum scsi_device_state oldstate = sdev->sdev_state;
2025
2026 if (state == oldstate)
2027 return 0;
2028
2029 switch (state) {
2030 case SDEV_CREATED:
2031 switch (oldstate) {
2032 case SDEV_CREATED_BLOCK:
2033 break;
2034 default:
2035 goto illegal;
2036 }
2037 break;
2038
2039 case SDEV_RUNNING:
2040 switch (oldstate) {
2041 case SDEV_CREATED:
2042 case SDEV_OFFLINE:
2043 case SDEV_QUIESCE:
2044 case SDEV_BLOCK:
2045 break;
2046 default:
2047 goto illegal;
2048 }
2049 break;
2050
2051 case SDEV_QUIESCE:
2052 switch (oldstate) {
2053 case SDEV_RUNNING:
2054 case SDEV_OFFLINE:
2055 break;
2056 default:
2057 goto illegal;
2058 }
2059 break;
2060
2061 case SDEV_OFFLINE:
2062 switch (oldstate) {
2063 case SDEV_CREATED:
2064 case SDEV_RUNNING:
2065 case SDEV_QUIESCE:
2066 case SDEV_BLOCK:
2067 break;
2068 default:
2069 goto illegal;
2070 }
2071 break;
2072
2073 case SDEV_BLOCK:
2074 switch (oldstate) {
2075 case SDEV_RUNNING:
2076 case SDEV_CREATED_BLOCK:
2077 break;
2078 default:
2079 goto illegal;
2080 }
2081 break;
2082
2083 case SDEV_CREATED_BLOCK:
2084 switch (oldstate) {
2085 case SDEV_CREATED:
2086 break;
2087 default:
2088 goto illegal;
2089 }
2090 break;
2091
2092 case SDEV_CANCEL:
2093 switch (oldstate) {
2094 case SDEV_CREATED:
2095 case SDEV_RUNNING:
2096 case SDEV_QUIESCE:
2097 case SDEV_OFFLINE:
2098 case SDEV_BLOCK:
2099 break;
2100 default:
2101 goto illegal;
2102 }
2103 break;
2104
2105 case SDEV_DEL:
2106 switch (oldstate) {
2107 case SDEV_CREATED:
2108 case SDEV_RUNNING:
2109 case SDEV_OFFLINE:
2110 case SDEV_CANCEL:
2111 break;
2112 default:
2113 goto illegal;
2114 }
2115 break;
2116
2117 }
2118 sdev->sdev_state = state;
2119 return 0;
2120
2121 illegal:
2122 SCSI_LOG_ERROR_RECOVERY(1,
2123 sdev_printk(KERN_ERR, sdev,
2124 "Illegal state transition %s->%s\n",
2125 scsi_device_state_name(oldstate),
2126 scsi_device_state_name(state))
2127 );
2128 return -EINVAL;
2129}
2130EXPORT_SYMBOL(scsi_device_set_state);
2131
2132
2133
2134
2135
2136
2137
2138
2139static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
2140{
2141 int idx = 0;
2142 char *envp[3];
2143
2144 switch (evt->evt_type) {
2145 case SDEV_EVT_MEDIA_CHANGE:
2146 envp[idx++] = "SDEV_MEDIA_CHANGE=1";
2147 break;
2148
2149 default:
2150
2151 break;
2152 }
2153
2154 envp[idx++] = NULL;
2155
2156 kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp);
2157}
2158
2159
2160
2161
2162
2163
2164
2165
2166void scsi_evt_thread(struct work_struct *work)
2167{
2168 struct scsi_device *sdev;
2169 LIST_HEAD(event_list);
2170
2171 sdev = container_of(work, struct scsi_device, event_work);
2172
2173 while (1) {
2174 struct scsi_event *evt;
2175 struct list_head *this, *tmp;
2176 unsigned long flags;
2177
2178 spin_lock_irqsave(&sdev->list_lock, flags);
2179 list_splice_init(&sdev->event_list, &event_list);
2180 spin_unlock_irqrestore(&sdev->list_lock, flags);
2181
2182 if (list_empty(&event_list))
2183 break;
2184
2185 list_for_each_safe(this, tmp, &event_list) {
2186 evt = list_entry(this, struct scsi_event, node);
2187 list_del(&evt->node);
2188 scsi_evt_emit(sdev, evt);
2189 kfree(evt);
2190 }
2191 }
2192}
2193
2194
2195
2196
2197
2198
2199
2200
2201void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt)
2202{
2203 unsigned long flags;
2204
2205#if 0
2206
2207
2208
2209 if (!test_bit(evt->evt_type, sdev->supported_events)) {
2210 kfree(evt);
2211 return;
2212 }
2213#endif
2214
2215 spin_lock_irqsave(&sdev->list_lock, flags);
2216 list_add_tail(&evt->node, &sdev->event_list);
2217 schedule_work(&sdev->event_work);
2218 spin_unlock_irqrestore(&sdev->list_lock, flags);
2219}
2220EXPORT_SYMBOL_GPL(sdev_evt_send);
2221
2222
2223
2224
2225
2226
2227
2228
2229struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
2230 gfp_t gfpflags)
2231{
2232 struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags);
2233 if (!evt)
2234 return NULL;
2235
2236 evt->evt_type = evt_type;
2237 INIT_LIST_HEAD(&evt->node);
2238
2239
2240 switch (evt_type) {
2241 case SDEV_EVT_MEDIA_CHANGE:
2242 default:
2243
2244 break;
2245 }
2246
2247 return evt;
2248}
2249EXPORT_SYMBOL_GPL(sdev_evt_alloc);
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259void sdev_evt_send_simple(struct scsi_device *sdev,
2260 enum scsi_device_event evt_type, gfp_t gfpflags)
2261{
2262 struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags);
2263 if (!evt) {
2264 sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n",
2265 evt_type);
2266 return;
2267 }
2268
2269 sdev_evt_send(sdev, evt);
2270}
2271EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288int
2289scsi_device_quiesce(struct scsi_device *sdev)
2290{
2291 int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
2292 if (err)
2293 return err;
2294
2295 scsi_run_queue(sdev->request_queue);
2296 while (sdev->device_busy) {
2297 msleep_interruptible(200);
2298 scsi_run_queue(sdev->request_queue);
2299 }
2300 return 0;
2301}
2302EXPORT_SYMBOL(scsi_device_quiesce);
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313void
2314scsi_device_resume(struct scsi_device *sdev)
2315{
2316 if(scsi_device_set_state(sdev, SDEV_RUNNING))
2317 return;
2318 scsi_run_queue(sdev->request_queue);
2319}
2320EXPORT_SYMBOL(scsi_device_resume);
2321
2322static void
2323device_quiesce_fn(struct scsi_device *sdev, void *data)
2324{
2325 scsi_device_quiesce(sdev);
2326}
2327
2328void
2329scsi_target_quiesce(struct scsi_target *starget)
2330{
2331 starget_for_each_device(starget, NULL, device_quiesce_fn);
2332}
2333EXPORT_SYMBOL(scsi_target_quiesce);
2334
2335static void
2336device_resume_fn(struct scsi_device *sdev, void *data)
2337{
2338 scsi_device_resume(sdev);
2339}
2340
2341void
2342scsi_target_resume(struct scsi_target *starget)
2343{
2344 starget_for_each_device(starget, NULL, device_resume_fn);
2345}
2346EXPORT_SYMBOL(scsi_target_resume);
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365int
2366scsi_internal_device_block(struct scsi_device *sdev)
2367{
2368 struct request_queue *q = sdev->request_queue;
2369 unsigned long flags;
2370 int err = 0;
2371
2372 err = scsi_device_set_state(sdev, SDEV_BLOCK);
2373 if (err) {
2374 err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK);
2375
2376 if (err)
2377 return err;
2378 }
2379
2380
2381
2382
2383
2384
2385 spin_lock_irqsave(q->queue_lock, flags);
2386 blk_stop_queue(q);
2387 spin_unlock_irqrestore(q->queue_lock, flags);
2388
2389 return 0;
2390}
2391EXPORT_SYMBOL_GPL(scsi_internal_device_block);
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409int
2410scsi_internal_device_unblock(struct scsi_device *sdev)
2411{
2412 struct request_queue *q = sdev->request_queue;
2413 unsigned long flags;
2414
2415
2416
2417
2418
2419 if (sdev->sdev_state == SDEV_BLOCK)
2420 sdev->sdev_state = SDEV_RUNNING;
2421 else if (sdev->sdev_state == SDEV_CREATED_BLOCK)
2422 sdev->sdev_state = SDEV_CREATED;
2423 else if (sdev->sdev_state != SDEV_CANCEL &&
2424 sdev->sdev_state != SDEV_OFFLINE)
2425 return -EINVAL;
2426
2427 spin_lock_irqsave(q->queue_lock, flags);
2428 blk_start_queue(q);
2429 spin_unlock_irqrestore(q->queue_lock, flags);
2430
2431 return 0;
2432}
2433EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
2434
2435static void
2436device_block(struct scsi_device *sdev, void *data)
2437{
2438 scsi_internal_device_block(sdev);
2439}
2440
2441static int
2442target_block(struct device *dev, void *data)
2443{
2444 if (scsi_is_target_device(dev))
2445 starget_for_each_device(to_scsi_target(dev), NULL,
2446 device_block);
2447 return 0;
2448}
2449
2450void
2451scsi_target_block(struct device *dev)
2452{
2453 if (scsi_is_target_device(dev))
2454 starget_for_each_device(to_scsi_target(dev), NULL,
2455 device_block);
2456 else
2457 device_for_each_child(dev, NULL, target_block);
2458}
2459EXPORT_SYMBOL_GPL(scsi_target_block);
2460
2461static void
2462device_unblock(struct scsi_device *sdev, void *data)
2463{
2464 scsi_internal_device_unblock(sdev);
2465}
2466
2467static int
2468target_unblock(struct device *dev, void *data)
2469{
2470 if (scsi_is_target_device(dev))
2471 starget_for_each_device(to_scsi_target(dev), NULL,
2472 device_unblock);
2473 return 0;
2474}
2475
2476void
2477scsi_target_unblock(struct device *dev)
2478{
2479 if (scsi_is_target_device(dev))
2480 starget_for_each_device(to_scsi_target(dev), NULL,
2481 device_unblock);
2482 else
2483 device_for_each_child(dev, NULL, target_unblock);
2484}
2485EXPORT_SYMBOL_GPL(scsi_target_unblock);
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
2497 size_t *offset, size_t *len)
2498{
2499 int i;
2500 size_t sg_len = 0, len_complete = 0;
2501 struct scatterlist *sg;
2502 struct page *page;
2503
2504 WARN_ON(!irqs_disabled());
2505
2506 for_each_sg(sgl, sg, sg_count, i) {
2507 len_complete = sg_len;
2508 sg_len += sg->length;
2509 if (sg_len > *offset)
2510 break;
2511 }
2512
2513 if (unlikely(i == sg_count)) {
2514 printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, "
2515 "elements %d\n",
2516 __func__, sg_len, *offset, sg_count);
2517 WARN_ON(1);
2518 return NULL;
2519 }
2520
2521
2522 *offset = *offset - len_complete + sg->offset;
2523
2524
2525 page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT));
2526 *offset &= ~PAGE_MASK;
2527
2528
2529 sg_len = PAGE_SIZE - *offset;
2530 if (*len > sg_len)
2531 *len = sg_len;
2532
2533 return kmap_atomic(page, KM_BIO_SRC_IRQ);
2534}
2535EXPORT_SYMBOL(scsi_kmap_atomic_sg);
2536
2537
2538
2539
2540
2541void scsi_kunmap_atomic_sg(void *virt)
2542{
2543 kunmap_atomic(virt, KM_BIO_SRC_IRQ);
2544}
2545EXPORT_SYMBOL(scsi_kunmap_atomic_sg);
2546