1
2
3
4
5
6
7
8
9
10#include <linux/bio.h>
11#include <linux/bitops.h>
12#include <linux/blkdev.h>
13#include <linux/completion.h>
14#include <linux/kernel.h>
15#include <linux/mempool.h>
16#include <linux/slab.h>
17#include <linux/init.h>
18#include <linux/pci.h>
19#include <linux/delay.h>
20#include <linux/hardirq.h>
21#include <linux/scatterlist.h>
22
23#include <scsi/scsi.h>
24#include <scsi/scsi_cmnd.h>
25#include <scsi/scsi_dbg.h>
26#include <scsi/scsi_device.h>
27#include <scsi/scsi_driver.h>
28#include <scsi/scsi_eh.h>
29#include <scsi/scsi_host.h>
30
31#include "scsi_priv.h"
32#include "scsi_logging.h"
33
34
35#define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools)
36#define SG_MEMPOOL_SIZE 2
37
38struct scsi_host_sg_pool {
39 size_t size;
40 char *name;
41 struct kmem_cache *slab;
42 mempool_t *pool;
43};
44
45#define SP(x) { x, "sgpool-" __stringify(x) }
46#if (SCSI_MAX_SG_SEGMENTS < 32)
47#error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater)
48#endif
49static struct scsi_host_sg_pool scsi_sg_pools[] = {
50 SP(8),
51 SP(16),
52#if (SCSI_MAX_SG_SEGMENTS > 32)
53 SP(32),
54#if (SCSI_MAX_SG_SEGMENTS > 64)
55 SP(64),
56#if (SCSI_MAX_SG_SEGMENTS > 128)
57 SP(128),
58#if (SCSI_MAX_SG_SEGMENTS > 256)
59#error SCSI_MAX_SG_SEGMENTS is too large (256 MAX)
60#endif
61#endif
62#endif
63#endif
64 SP(SCSI_MAX_SG_SEGMENTS)
65};
66#undef SP
67
68struct kmem_cache *scsi_sdb_cache;
69
70static void scsi_run_queue(struct request_queue *q);
71
72
73
74
75
76
77
78
79
80
81
82
83
84static void scsi_unprep_request(struct request *req)
85{
86 struct scsi_cmnd *cmd = req->special;
87
88 req->cmd_flags &= ~REQ_DONTPREP;
89 req->special = NULL;
90
91 scsi_put_command(cmd);
92}
93
94
95
96
97
98
99
100
101
102
103
104
105
106static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
107{
108 struct Scsi_Host *host = cmd->device->host;
109 struct scsi_device *device = cmd->device;
110 struct scsi_target *starget = scsi_target(device);
111 struct request_queue *q = device->request_queue;
112 unsigned long flags;
113
114 SCSI_LOG_MLQUEUE(1,
115 printk("Inserting command %p into mlqueue\n", cmd));
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130 switch (reason) {
131 case SCSI_MLQUEUE_HOST_BUSY:
132 host->host_blocked = host->max_host_blocked;
133 break;
134 case SCSI_MLQUEUE_DEVICE_BUSY:
135 device->device_blocked = device->max_device_blocked;
136 break;
137 case SCSI_MLQUEUE_TARGET_BUSY:
138 starget->target_blocked = starget->max_target_blocked;
139 break;
140 }
141
142
143
144
145
146 if (unbusy)
147 scsi_device_unbusy(device);
148
149
150
151
152
153
154
155
156
157
158
159
160 spin_lock_irqsave(q->queue_lock, flags);
161 blk_requeue_request(q, cmd->request);
162 spin_unlock_irqrestore(q->queue_lock, flags);
163
164 scsi_run_queue(q);
165
166 return 0;
167}
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
189{
190 return __scsi_queue_insert(cmd, reason, 1);
191}
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
209 int data_direction, void *buffer, unsigned bufflen,
210 unsigned char *sense, int timeout, int retries, int flags,
211 int *resid)
212{
213 struct request *req;
214 int write = (data_direction == DMA_TO_DEVICE);
215 int ret = DRIVER_ERROR << 24;
216
217 req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
218
219 if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
220 buffer, bufflen, __GFP_WAIT))
221 goto out;
222
223 req->cmd_len = COMMAND_SIZE(cmd[0]);
224 memcpy(req->cmd, cmd, req->cmd_len);
225 req->sense = sense;
226 req->sense_len = 0;
227 req->retries = retries;
228 req->timeout = timeout;
229 req->cmd_type = REQ_TYPE_BLOCK_PC;
230 req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT;
231
232
233
234
235 blk_execute_rq(req->q, NULL, req, 1);
236
237
238
239
240
241
242
243 if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen))
244 memset(buffer + (bufflen - req->resid_len), 0, req->resid_len);
245
246 if (resid)
247 *resid = req->resid_len;
248 ret = req->errors;
249 out:
250 blk_put_request(req);
251
252 return ret;
253}
254EXPORT_SYMBOL(scsi_execute);
255
256
257int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
258 int data_direction, void *buffer, unsigned bufflen,
259 struct scsi_sense_hdr *sshdr, int timeout, int retries,
260 int *resid)
261{
262 char *sense = NULL;
263 int result;
264
265 if (sshdr) {
266 sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
267 if (!sense)
268 return DRIVER_ERROR << 24;
269 }
270 result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
271 sense, timeout, retries, 0, resid);
272 if (sshdr)
273 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
274
275 kfree(sense);
276 return result;
277}
278EXPORT_SYMBOL(scsi_execute_req);
279
280
281
282
283
284
285
286
287
288
289
290
291static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
292{
293 cmd->serial_number = 0;
294 scsi_set_resid(cmd, 0);
295 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
296 if (cmd->cmd_len == 0)
297 cmd->cmd_len = scsi_command_size(cmd->cmnd);
298}
299
300void scsi_device_unbusy(struct scsi_device *sdev)
301{
302 struct Scsi_Host *shost = sdev->host;
303 struct scsi_target *starget = scsi_target(sdev);
304 unsigned long flags;
305
306 spin_lock_irqsave(shost->host_lock, flags);
307 shost->host_busy--;
308 starget->target_busy--;
309 if (unlikely(scsi_host_in_recovery(shost) &&
310 (shost->host_failed || shost->host_eh_scheduled)))
311 scsi_eh_wakeup(shost);
312 spin_unlock(shost->host_lock);
313 spin_lock(sdev->request_queue->queue_lock);
314 sdev->device_busy--;
315 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
316}
317
318
319
320
321
322
323
324
325static void scsi_single_lun_run(struct scsi_device *current_sdev)
326{
327 struct Scsi_Host *shost = current_sdev->host;
328 struct scsi_device *sdev, *tmp;
329 struct scsi_target *starget = scsi_target(current_sdev);
330 unsigned long flags;
331
332 spin_lock_irqsave(shost->host_lock, flags);
333 starget->starget_sdev_user = NULL;
334 spin_unlock_irqrestore(shost->host_lock, flags);
335
336
337
338
339
340
341
342 blk_run_queue(current_sdev->request_queue);
343
344 spin_lock_irqsave(shost->host_lock, flags);
345 if (starget->starget_sdev_user)
346 goto out;
347 list_for_each_entry_safe(sdev, tmp, &starget->devices,
348 same_target_siblings) {
349 if (sdev == current_sdev)
350 continue;
351 if (scsi_device_get(sdev))
352 continue;
353
354 spin_unlock_irqrestore(shost->host_lock, flags);
355 blk_run_queue(sdev->request_queue);
356 spin_lock_irqsave(shost->host_lock, flags);
357
358 scsi_device_put(sdev);
359 }
360 out:
361 spin_unlock_irqrestore(shost->host_lock, flags);
362}
363
364static inline int scsi_device_is_busy(struct scsi_device *sdev)
365{
366 if (sdev->device_busy >= sdev->queue_depth || sdev->device_blocked)
367 return 1;
368
369 return 0;
370}
371
372static inline int scsi_target_is_busy(struct scsi_target *starget)
373{
374 return ((starget->can_queue > 0 &&
375 starget->target_busy >= starget->can_queue) ||
376 starget->target_blocked);
377}
378
379static inline int scsi_host_is_busy(struct Scsi_Host *shost)
380{
381 if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
382 shost->host_blocked || shost->host_self_blocked)
383 return 1;
384
385 return 0;
386}
387
388
389
390
391
392
393
394
395
396
397
398
399
400static void scsi_run_queue(struct request_queue *q)
401{
402 struct scsi_device *sdev = q->queuedata;
403 struct Scsi_Host *shost = sdev->host;
404 LIST_HEAD(starved_list);
405 unsigned long flags;
406
407 if (scsi_target(sdev)->single_lun)
408 scsi_single_lun_run(sdev);
409
410 spin_lock_irqsave(shost->host_lock, flags);
411 list_splice_init(&shost->starved_list, &starved_list);
412
413 while (!list_empty(&starved_list)) {
414 int flagset;
415
416
417
418
419
420
421
422
423
424
425
426 if (scsi_host_is_busy(shost))
427 break;
428
429 sdev = list_entry(starved_list.next,
430 struct scsi_device, starved_entry);
431 list_del_init(&sdev->starved_entry);
432 if (scsi_target_is_busy(scsi_target(sdev))) {
433 list_move_tail(&sdev->starved_entry,
434 &shost->starved_list);
435 continue;
436 }
437
438 spin_unlock(shost->host_lock);
439
440 spin_lock(sdev->request_queue->queue_lock);
441 flagset = test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) &&
442 !test_bit(QUEUE_FLAG_REENTER,
443 &sdev->request_queue->queue_flags);
444 if (flagset)
445 queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue);
446 __blk_run_queue(sdev->request_queue);
447 if (flagset)
448 queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue);
449 spin_unlock(sdev->request_queue->queue_lock);
450
451 spin_lock(shost->host_lock);
452 }
453
454 list_splice(&starved_list, &shost->starved_list);
455 spin_unlock_irqrestore(shost->host_lock, flags);
456
457 blk_run_queue(q);
458}
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
479{
480 struct request *req = cmd->request;
481 unsigned long flags;
482
483 spin_lock_irqsave(q->queue_lock, flags);
484 scsi_unprep_request(req);
485 blk_requeue_request(q, req);
486 spin_unlock_irqrestore(q->queue_lock, flags);
487
488 scsi_run_queue(q);
489}
490
491void scsi_next_command(struct scsi_cmnd *cmd)
492{
493 struct scsi_device *sdev = cmd->device;
494 struct request_queue *q = sdev->request_queue;
495
496
497 get_device(&sdev->sdev_gendev);
498
499 scsi_put_command(cmd);
500 scsi_run_queue(q);
501
502
503 put_device(&sdev->sdev_gendev);
504}
505
506void scsi_run_host_queues(struct Scsi_Host *shost)
507{
508 struct scsi_device *sdev;
509
510 shost_for_each_device(sdev, shost)
511 scsi_run_queue(sdev->request_queue);
512}
513
514static void __scsi_release_buffers(struct scsi_cmnd *, int);
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
539 int bytes, int requeue)
540{
541 struct request_queue *q = cmd->device->request_queue;
542 struct request *req = cmd->request;
543
544
545
546
547
548 if (blk_end_request(req, error, bytes)) {
549
550 if (error && scsi_noretry_cmd(cmd))
551 blk_end_request_all(req, error);
552 else {
553 if (requeue) {
554
555
556
557
558
559 scsi_release_buffers(cmd);
560 scsi_requeue_command(q, cmd);
561 cmd = NULL;
562 }
563 return cmd;
564 }
565 }
566
567
568
569
570
571 __scsi_release_buffers(cmd, 0);
572 scsi_next_command(cmd);
573 return NULL;
574}
575
576static inline unsigned int scsi_sgtable_index(unsigned short nents)
577{
578 unsigned int index;
579
580 BUG_ON(nents > SCSI_MAX_SG_SEGMENTS);
581
582 if (nents <= 8)
583 index = 0;
584 else
585 index = get_count_order(nents) - 3;
586
587 return index;
588}
589
590static void scsi_sg_free(struct scatterlist *sgl, unsigned int nents)
591{
592 struct scsi_host_sg_pool *sgp;
593
594 sgp = scsi_sg_pools + scsi_sgtable_index(nents);
595 mempool_free(sgl, sgp->pool);
596}
597
598static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask)
599{
600 struct scsi_host_sg_pool *sgp;
601
602 sgp = scsi_sg_pools + scsi_sgtable_index(nents);
603 return mempool_alloc(sgp->pool, gfp_mask);
604}
605
606static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents,
607 gfp_t gfp_mask)
608{
609 int ret;
610
611 BUG_ON(!nents);
612
613 ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS,
614 gfp_mask, scsi_sg_alloc);
615 if (unlikely(ret))
616 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS,
617 scsi_sg_free);
618
619 return ret;
620}
621
622static void scsi_free_sgtable(struct scsi_data_buffer *sdb)
623{
624 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free);
625}
626
627static void __scsi_release_buffers(struct scsi_cmnd *cmd, int do_bidi_check)
628{
629
630 if (cmd->sdb.table.nents)
631 scsi_free_sgtable(&cmd->sdb);
632
633 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
634
635 if (do_bidi_check && scsi_bidi_cmnd(cmd)) {
636 struct scsi_data_buffer *bidi_sdb =
637 cmd->request->next_rq->special;
638 scsi_free_sgtable(bidi_sdb);
639 kmem_cache_free(scsi_sdb_cache, bidi_sdb);
640 cmd->request->next_rq->special = NULL;
641 }
642
643 if (scsi_prot_sg_count(cmd))
644 scsi_free_sgtable(cmd->prot_sdb);
645}
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664void scsi_release_buffers(struct scsi_cmnd *cmd)
665{
666 __scsi_release_buffers(cmd, 1);
667}
668EXPORT_SYMBOL(scsi_release_buffers);
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
707{
708 int result = cmd->result;
709 struct request_queue *q = cmd->device->request_queue;
710 struct request *req = cmd->request;
711 int error = 0;
712 struct scsi_sense_hdr sshdr;
713 int sense_valid = 0;
714 int sense_deferred = 0;
715 enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
716 ACTION_DELAYED_RETRY} action;
717 char *description = NULL;
718
719 if (result) {
720 sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
721 if (sense_valid)
722 sense_deferred = scsi_sense_is_deferred(&sshdr);
723 }
724
725 if (blk_pc_request(req)) {
726 req->errors = result;
727 if (result) {
728 if (sense_valid && req->sense) {
729
730
731
732 int len = 8 + cmd->sense_buffer[7];
733
734 if (len > SCSI_SENSE_BUFFERSIZE)
735 len = SCSI_SENSE_BUFFERSIZE;
736 memcpy(req->sense, cmd->sense_buffer, len);
737 req->sense_len = len;
738 }
739 if (!sense_deferred)
740 error = -EIO;
741 }
742
743 req->resid_len = scsi_get_resid(cmd);
744
745 if (scsi_bidi_cmnd(cmd)) {
746
747
748
749
750 req->next_rq->resid_len = scsi_in(cmd)->resid;
751
752 blk_end_request_all(req, 0);
753
754 scsi_release_buffers(cmd);
755 scsi_next_command(cmd);
756 return;
757 }
758 }
759
760 BUG_ON(blk_bidi_rq(req));
761
762
763
764
765
766 SCSI_LOG_HLCOMPLETE(1, printk("%u sectors total, "
767 "%d bytes done.\n",
768 blk_rq_sectors(req), good_bytes));
769
770
771
772
773
774
775
776 if (sense_valid && sshdr.sense_key == RECOVERED_ERROR) {
777 if (!(req->cmd_flags & REQ_QUIET))
778 scsi_print_sense("", cmd);
779 result = 0;
780
781 error = 0;
782 }
783
784
785
786
787
788
789 if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL)
790 return;
791
792 error = -EIO;
793
794 if (host_byte(result) == DID_RESET) {
795
796
797
798
799 action = ACTION_RETRY;
800 } else if (sense_valid && !sense_deferred) {
801 switch (sshdr.sense_key) {
802 case UNIT_ATTENTION:
803 if (cmd->device->removable) {
804
805
806
807 cmd->device->changed = 1;
808 description = "Media Changed";
809 action = ACTION_FAIL;
810 } else {
811
812
813
814
815
816 action = ACTION_RETRY;
817 }
818 break;
819 case ILLEGAL_REQUEST:
820
821
822
823
824
825
826
827
828 if ((cmd->device->use_10_for_rw &&
829 sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
830 (cmd->cmnd[0] == READ_10 ||
831 cmd->cmnd[0] == WRITE_10)) {
832
833 cmd->device->use_10_for_rw = 0;
834 action = ACTION_REPREP;
835 } else if (sshdr.asc == 0x10) {
836 description = "Host Data Integrity Failure";
837 action = ACTION_FAIL;
838 error = -EILSEQ;
839 } else
840 action = ACTION_FAIL;
841 break;
842 case ABORTED_COMMAND:
843 action = ACTION_FAIL;
844 if (sshdr.asc == 0x10) {
845 description = "Target Data Integrity Failure";
846 error = -EILSEQ;
847 }
848 break;
849 case NOT_READY:
850
851
852
853 if (sshdr.asc == 0x04) {
854 switch (sshdr.ascq) {
855 case 0x01:
856 case 0x04:
857 case 0x05:
858 case 0x06:
859 case 0x07:
860 case 0x08:
861 case 0x09:
862 action = ACTION_DELAYED_RETRY;
863 break;
864 default:
865 description = "Device not ready";
866 action = ACTION_FAIL;
867 break;
868 }
869 } else {
870 description = "Device not ready";
871 action = ACTION_FAIL;
872 }
873 break;
874 case VOLUME_OVERFLOW:
875
876 action = ACTION_FAIL;
877 break;
878 default:
879 description = "Unhandled sense code";
880 action = ACTION_FAIL;
881 break;
882 }
883 } else {
884 description = "Unhandled error code";
885 action = ACTION_FAIL;
886 }
887
888 switch (action) {
889 case ACTION_FAIL:
890
891 scsi_release_buffers(cmd);
892 if (!(req->cmd_flags & REQ_QUIET)) {
893 if (description)
894 scmd_printk(KERN_INFO, cmd, "%s\n",
895 description);
896 scsi_print_result(cmd);
897 if (driver_byte(result) & DRIVER_SENSE)
898 scsi_print_sense("", cmd);
899 scsi_print_command(cmd);
900 }
901 if (blk_end_request_err(req, -EIO))
902 scsi_requeue_command(q, cmd);
903 else
904 scsi_next_command(cmd);
905 break;
906 case ACTION_REPREP:
907
908
909
910 scsi_release_buffers(cmd);
911 scsi_requeue_command(q, cmd);
912 break;
913 case ACTION_RETRY:
914
915 __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, 0);
916 break;
917 case ACTION_DELAYED_RETRY:
918
919 __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0);
920 break;
921 }
922}
923
924static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
925 gfp_t gfp_mask)
926{
927 int count;
928
929
930
931
932 if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments,
933 gfp_mask))) {
934 return BLKPREP_DEFER;
935 }
936
937 req->buffer = NULL;
938
939
940
941
942
943 count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
944 BUG_ON(count > sdb->table.nents);
945 sdb->table.nents = count;
946 sdb->length = blk_rq_bytes(req);
947 return BLKPREP_OK;
948}
949
950
951
952
953
954
955
956
957
958
959
960
961int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
962{
963 int error = scsi_init_sgtable(cmd->request, &cmd->sdb, gfp_mask);
964 if (error)
965 goto err_exit;
966
967 if (blk_bidi_rq(cmd->request)) {
968 struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc(
969 scsi_sdb_cache, GFP_ATOMIC);
970 if (!bidi_sdb) {
971 error = BLKPREP_DEFER;
972 goto err_exit;
973 }
974
975 cmd->request->next_rq->special = bidi_sdb;
976 error = scsi_init_sgtable(cmd->request->next_rq, bidi_sdb,
977 GFP_ATOMIC);
978 if (error)
979 goto err_exit;
980 }
981
982 if (blk_integrity_rq(cmd->request)) {
983 struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
984 int ivecs, count;
985
986 BUG_ON(prot_sdb == NULL);
987 ivecs = blk_rq_count_integrity_sg(cmd->request);
988
989 if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask)) {
990 error = BLKPREP_DEFER;
991 goto err_exit;
992 }
993
994 count = blk_rq_map_integrity_sg(cmd->request,
995 prot_sdb->table.sgl);
996 BUG_ON(unlikely(count > ivecs));
997
998 cmd->prot_sdb = prot_sdb;
999 cmd->prot_sdb->table.nents = count;
1000 }
1001
1002 return BLKPREP_OK ;
1003
1004err_exit:
1005 scsi_release_buffers(cmd);
1006 if (error == BLKPREP_KILL)
1007 scsi_put_command(cmd);
1008 else
1009 scsi_unprep_request(cmd->request);
1010
1011 return error;
1012}
1013EXPORT_SYMBOL(scsi_init_io);
1014
1015static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
1016 struct request *req)
1017{
1018 struct scsi_cmnd *cmd;
1019
1020 if (!req->special) {
1021 cmd = scsi_get_command(sdev, GFP_ATOMIC);
1022 if (unlikely(!cmd))
1023 return NULL;
1024 req->special = cmd;
1025 } else {
1026 cmd = req->special;
1027 }
1028
1029
1030 cmd->tag = req->tag;
1031 cmd->request = req;
1032
1033 cmd->cmnd = req->cmd;
1034
1035 return cmd;
1036}
1037
1038int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
1039{
1040 struct scsi_cmnd *cmd;
1041 int ret = scsi_prep_state_check(sdev, req);
1042
1043 if (ret != BLKPREP_OK)
1044 return ret;
1045
1046 cmd = scsi_get_cmd_from_req(sdev, req);
1047 if (unlikely(!cmd))
1048 return BLKPREP_DEFER;
1049
1050
1051
1052
1053
1054
1055
1056 if (req->bio) {
1057 int ret;
1058
1059 BUG_ON(!req->nr_phys_segments);
1060
1061 ret = scsi_init_io(cmd, GFP_ATOMIC);
1062 if (unlikely(ret))
1063 return ret;
1064 } else {
1065 BUG_ON(blk_rq_bytes(req));
1066
1067 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
1068 req->buffer = NULL;
1069 }
1070
1071 cmd->cmd_len = req->cmd_len;
1072 if (!blk_rq_bytes(req))
1073 cmd->sc_data_direction = DMA_NONE;
1074 else if (rq_data_dir(req) == WRITE)
1075 cmd->sc_data_direction = DMA_TO_DEVICE;
1076 else
1077 cmd->sc_data_direction = DMA_FROM_DEVICE;
1078
1079 cmd->transfersize = blk_rq_bytes(req);
1080 cmd->allowed = req->retries;
1081 return BLKPREP_OK;
1082}
1083EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd);
1084
1085
1086
1087
1088
1089
1090int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
1091{
1092 struct scsi_cmnd *cmd;
1093 int ret = scsi_prep_state_check(sdev, req);
1094
1095 if (ret != BLKPREP_OK)
1096 return ret;
1097
1098 if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh
1099 && sdev->scsi_dh_data->scsi_dh->prep_fn)) {
1100 ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req);
1101 if (ret != BLKPREP_OK)
1102 return ret;
1103 }
1104
1105
1106
1107
1108 BUG_ON(!req->nr_phys_segments);
1109
1110 cmd = scsi_get_cmd_from_req(sdev, req);
1111 if (unlikely(!cmd))
1112 return BLKPREP_DEFER;
1113
1114 memset(cmd->cmnd, 0, BLK_MAX_CDB);
1115 return scsi_init_io(cmd, GFP_ATOMIC);
1116}
1117EXPORT_SYMBOL(scsi_setup_fs_cmnd);
1118
1119int scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
1120{
1121 int ret = BLKPREP_OK;
1122
1123
1124
1125
1126
1127 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1128 switch (sdev->sdev_state) {
1129 case SDEV_OFFLINE:
1130
1131
1132
1133
1134
1135 sdev_printk(KERN_ERR, sdev,
1136 "rejecting I/O to offline device\n");
1137 ret = BLKPREP_KILL;
1138 break;
1139 case SDEV_DEL:
1140
1141
1142
1143
1144 sdev_printk(KERN_ERR, sdev,
1145 "rejecting I/O to dead device\n");
1146 ret = BLKPREP_KILL;
1147 break;
1148 case SDEV_QUIESCE:
1149 case SDEV_BLOCK:
1150 case SDEV_CREATED_BLOCK:
1151
1152
1153
1154 if (!(req->cmd_flags & REQ_PREEMPT))
1155 ret = BLKPREP_DEFER;
1156 break;
1157 default:
1158
1159
1160
1161
1162
1163 if (!(req->cmd_flags & REQ_PREEMPT))
1164 ret = BLKPREP_KILL;
1165 break;
1166 }
1167 }
1168 return ret;
1169}
1170EXPORT_SYMBOL(scsi_prep_state_check);
1171
1172int scsi_prep_return(struct request_queue *q, struct request *req, int ret)
1173{
1174 struct scsi_device *sdev = q->queuedata;
1175
1176 switch (ret) {
1177 case BLKPREP_KILL:
1178 req->errors = DID_NO_CONNECT << 16;
1179
1180 if (req->special) {
1181 struct scsi_cmnd *cmd = req->special;
1182 scsi_release_buffers(cmd);
1183 scsi_put_command(cmd);
1184 req->special = NULL;
1185 }
1186 break;
1187 case BLKPREP_DEFER:
1188
1189
1190
1191
1192
1193 if (sdev->device_busy == 0)
1194 blk_plug_device(q);
1195 break;
1196 default:
1197 req->cmd_flags |= REQ_DONTPREP;
1198 }
1199
1200 return ret;
1201}
1202EXPORT_SYMBOL(scsi_prep_return);
1203
1204int scsi_prep_fn(struct request_queue *q, struct request *req)
1205{
1206 struct scsi_device *sdev = q->queuedata;
1207 int ret = BLKPREP_KILL;
1208
1209 if (req->cmd_type == REQ_TYPE_BLOCK_PC)
1210 ret = scsi_setup_blk_pc_cmnd(sdev, req);
1211 return scsi_prep_return(q, req, ret);
1212}
1213EXPORT_SYMBOL(scsi_prep_fn);
1214
1215
1216
1217
1218
1219
1220
1221static inline int scsi_dev_queue_ready(struct request_queue *q,
1222 struct scsi_device *sdev)
1223{
1224 if (sdev->device_busy == 0 && sdev->device_blocked) {
1225
1226
1227
1228 if (--sdev->device_blocked == 0) {
1229 SCSI_LOG_MLQUEUE(3,
1230 sdev_printk(KERN_INFO, sdev,
1231 "unblocking device at zero depth\n"));
1232 } else {
1233 blk_plug_device(q);
1234 return 0;
1235 }
1236 }
1237 if (scsi_device_is_busy(sdev))
1238 return 0;
1239
1240 return 1;
1241}
1242
1243
1244
1245
1246
1247
1248
1249
1250static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
1251 struct scsi_device *sdev)
1252{
1253 struct scsi_target *starget = scsi_target(sdev);
1254
1255 if (starget->single_lun) {
1256 if (starget->starget_sdev_user &&
1257 starget->starget_sdev_user != sdev)
1258 return 0;
1259 starget->starget_sdev_user = sdev;
1260 }
1261
1262 if (starget->target_busy == 0 && starget->target_blocked) {
1263
1264
1265
1266 if (--starget->target_blocked == 0) {
1267 SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget,
1268 "unblocking target at zero depth\n"));
1269 } else
1270 return 0;
1271 }
1272
1273 if (scsi_target_is_busy(starget)) {
1274 if (list_empty(&sdev->starved_entry)) {
1275 list_add_tail(&sdev->starved_entry,
1276 &shost->starved_list);
1277 return 0;
1278 }
1279 }
1280
1281
1282 if (!list_empty(&sdev->starved_entry))
1283 list_del_init(&sdev->starved_entry);
1284 return 1;
1285}
1286
1287
1288
1289
1290
1291
1292
1293
1294static inline int scsi_host_queue_ready(struct request_queue *q,
1295 struct Scsi_Host *shost,
1296 struct scsi_device *sdev)
1297{
1298 if (scsi_host_in_recovery(shost))
1299 return 0;
1300 if (shost->host_busy == 0 && shost->host_blocked) {
1301
1302
1303
1304 if (--shost->host_blocked == 0) {
1305 SCSI_LOG_MLQUEUE(3,
1306 printk("scsi%d unblocking host at zero depth\n",
1307 shost->host_no));
1308 } else {
1309 return 0;
1310 }
1311 }
1312 if (scsi_host_is_busy(shost)) {
1313 if (list_empty(&sdev->starved_entry))
1314 list_add_tail(&sdev->starved_entry, &shost->starved_list);
1315 return 0;
1316 }
1317
1318
1319 if (!list_empty(&sdev->starved_entry))
1320 list_del_init(&sdev->starved_entry);
1321
1322 return 1;
1323}
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337static int scsi_lld_busy(struct request_queue *q)
1338{
1339 struct scsi_device *sdev = q->queuedata;
1340 struct Scsi_Host *shost;
1341 struct scsi_target *starget;
1342
1343 if (!sdev)
1344 return 0;
1345
1346 shost = sdev->host;
1347 starget = scsi_target(sdev);
1348
1349 if (scsi_host_in_recovery(shost) || scsi_host_is_busy(shost) ||
1350 scsi_target_is_busy(starget) || scsi_device_is_busy(sdev))
1351 return 1;
1352
1353 return 0;
1354}
1355
1356
1357
1358
1359static void scsi_kill_request(struct request *req, struct request_queue *q)
1360{
1361 struct scsi_cmnd *cmd = req->special;
1362 struct scsi_device *sdev = cmd->device;
1363 struct scsi_target *starget = scsi_target(sdev);
1364 struct Scsi_Host *shost = sdev->host;
1365
1366 blk_start_request(req);
1367
1368 if (unlikely(cmd == NULL)) {
1369 printk(KERN_CRIT "impossible request in %s.\n",
1370 __func__);
1371 BUG();
1372 }
1373
1374 scsi_init_cmd_errh(cmd);
1375 cmd->result = DID_NO_CONNECT << 16;
1376 atomic_inc(&cmd->device->iorequest_cnt);
1377
1378
1379
1380
1381
1382
1383 sdev->device_busy++;
1384 spin_unlock(sdev->request_queue->queue_lock);
1385 spin_lock(shost->host_lock);
1386 shost->host_busy++;
1387 starget->target_busy++;
1388 spin_unlock(shost->host_lock);
1389 spin_lock(sdev->request_queue->queue_lock);
1390
1391 blk_complete_request(req);
1392}
1393
1394static void scsi_softirq_done(struct request *rq)
1395{
1396 struct scsi_cmnd *cmd = rq->special;
1397 unsigned long wait_for = (cmd->allowed + 1) * rq->timeout;
1398 int disposition;
1399
1400 INIT_LIST_HEAD(&cmd->eh_entry);
1401
1402
1403
1404
1405 cmd->serial_number = 0;
1406
1407 atomic_inc(&cmd->device->iodone_cnt);
1408 if (cmd->result)
1409 atomic_inc(&cmd->device->ioerr_cnt);
1410
1411 disposition = scsi_decide_disposition(cmd);
1412 if (disposition != SUCCESS &&
1413 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
1414 sdev_printk(KERN_ERR, cmd->device,
1415 "timing out command, waited %lus\n",
1416 wait_for/HZ);
1417 disposition = SUCCESS;
1418 }
1419
1420 scsi_log_completion(cmd, disposition);
1421
1422 switch (disposition) {
1423 case SUCCESS:
1424 scsi_finish_command(cmd);
1425 break;
1426 case NEEDS_RETRY:
1427 scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
1428 break;
1429 case ADD_TO_MLQUEUE:
1430 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
1431 break;
1432 default:
1433 if (!scsi_eh_scmd_add(cmd, 0))
1434 scsi_finish_command(cmd);
1435 }
1436}
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449static void scsi_request_fn(struct request_queue *q)
1450{
1451 struct scsi_device *sdev = q->queuedata;
1452 struct Scsi_Host *shost;
1453 struct scsi_cmnd *cmd;
1454 struct request *req;
1455
1456 if (!sdev) {
1457 printk("scsi: killing requests for dead queue\n");
1458 while ((req = blk_peek_request(q)) != NULL)
1459 scsi_kill_request(req, q);
1460 return;
1461 }
1462
1463 if(!get_device(&sdev->sdev_gendev))
1464
1465 return;
1466
1467
1468
1469
1470
1471 shost = sdev->host;
1472 while (!blk_queue_plugged(q)) {
1473 int rtn;
1474
1475
1476
1477
1478
1479 req = blk_peek_request(q);
1480 if (!req || !scsi_dev_queue_ready(q, sdev))
1481 break;
1482
1483 if (unlikely(!scsi_device_online(sdev))) {
1484 sdev_printk(KERN_ERR, sdev,
1485 "rejecting I/O to offline device\n");
1486 scsi_kill_request(req, q);
1487 continue;
1488 }
1489
1490
1491
1492
1493
1494 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1495 blk_start_request(req);
1496 sdev->device_busy++;
1497
1498 spin_unlock(q->queue_lock);
1499 cmd = req->special;
1500 if (unlikely(cmd == NULL)) {
1501 printk(KERN_CRIT "impossible request in %s.\n"
1502 "please mail a stack trace to "
1503 "linux-scsi@vger.kernel.org\n",
1504 __func__);
1505 blk_dump_rq_flags(req, "foo");
1506 BUG();
1507 }
1508 spin_lock(shost->host_lock);
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518 if (blk_queue_tagged(q) && !blk_rq_tagged(req)) {
1519 if (list_empty(&sdev->starved_entry))
1520 list_add_tail(&sdev->starved_entry,
1521 &shost->starved_list);
1522 goto not_ready;
1523 }
1524
1525 if (!scsi_target_queue_ready(shost, sdev))
1526 goto not_ready;
1527
1528 if (!scsi_host_queue_ready(q, shost, sdev))
1529 goto not_ready;
1530
1531 scsi_target(sdev)->target_busy++;
1532 shost->host_busy++;
1533
1534
1535
1536
1537
1538 spin_unlock_irq(shost->host_lock);
1539
1540
1541
1542
1543
1544 scsi_init_cmd_errh(cmd);
1545
1546
1547
1548
1549 rtn = scsi_dispatch_cmd(cmd);
1550 spin_lock_irq(q->queue_lock);
1551 if(rtn) {
1552
1553
1554
1555 if(sdev->device_busy == 0)
1556 blk_plug_device(q);
1557
1558 break;
1559 }
1560 }
1561
1562 goto out;
1563
1564 not_ready:
1565 spin_unlock_irq(shost->host_lock);
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575 spin_lock_irq(q->queue_lock);
1576 blk_requeue_request(q, req);
1577 sdev->device_busy--;
1578 if(sdev->device_busy == 0)
1579 blk_plug_device(q);
1580 out:
1581
1582
1583 spin_unlock_irq(q->queue_lock);
1584 put_device(&sdev->sdev_gendev);
1585 spin_lock_irq(q->queue_lock);
1586}
1587
1588u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
1589{
1590 struct device *host_dev;
1591 u64 bounce_limit = 0xffffffff;
1592
1593 if (shost->unchecked_isa_dma)
1594 return BLK_BOUNCE_ISA;
1595
1596
1597
1598
1599 if (!PCI_DMA_BUS_IS_PHYS)
1600 return BLK_BOUNCE_ANY;
1601
1602 host_dev = scsi_get_device(shost);
1603 if (host_dev && host_dev->dma_mask)
1604 bounce_limit = *host_dev->dma_mask;
1605
1606 return bounce_limit;
1607}
1608EXPORT_SYMBOL(scsi_calculate_bounce_limit);
1609
1610struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
1611 request_fn_proc *request_fn)
1612{
1613 struct request_queue *q;
1614 struct device *dev = shost->shost_gendev.parent;
1615
1616 q = blk_init_queue(request_fn, NULL);
1617 if (!q)
1618 return NULL;
1619
1620
1621
1622
1623 blk_queue_max_hw_segments(q, shost->sg_tablesize);
1624 blk_queue_max_phys_segments(q, SCSI_MAX_SG_CHAIN_SEGMENTS);
1625
1626 blk_queue_max_sectors(q, shost->max_sectors);
1627 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1628 blk_queue_segment_boundary(q, shost->dma_boundary);
1629 dma_set_seg_boundary(dev, shost->dma_boundary);
1630
1631 blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));
1632
1633
1634 if (!shost->use_clustering)
1635 queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
1636
1637
1638
1639
1640
1641
1642 blk_queue_dma_alignment(q, 0x03);
1643
1644 return q;
1645}
1646EXPORT_SYMBOL(__scsi_alloc_queue);
1647
1648struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1649{
1650 struct request_queue *q;
1651
1652 q = __scsi_alloc_queue(sdev->host, scsi_request_fn);
1653 if (!q)
1654 return NULL;
1655
1656 blk_queue_prep_rq(q, scsi_prep_fn);
1657 blk_queue_softirq_done(q, scsi_softirq_done);
1658 blk_queue_rq_timed_out(q, scsi_times_out);
1659 blk_queue_lld_busy(q, scsi_lld_busy);
1660 return q;
1661}
1662
1663void scsi_free_queue(struct request_queue *q)
1664{
1665 blk_cleanup_queue(q);
1666}
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684void scsi_block_requests(struct Scsi_Host *shost)
1685{
1686 shost->host_self_blocked = 1;
1687}
1688EXPORT_SYMBOL(scsi_block_requests);
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710void scsi_unblock_requests(struct Scsi_Host *shost)
1711{
1712 shost->host_self_blocked = 0;
1713 scsi_run_host_queues(shost);
1714}
1715EXPORT_SYMBOL(scsi_unblock_requests);
1716
1717int __init scsi_init_queue(void)
1718{
1719 int i;
1720
1721 scsi_sdb_cache = kmem_cache_create("scsi_data_buffer",
1722 sizeof(struct scsi_data_buffer),
1723 0, 0, NULL);
1724 if (!scsi_sdb_cache) {
1725 printk(KERN_ERR "SCSI: can't init scsi sdb cache\n");
1726 return -ENOMEM;
1727 }
1728
1729 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1730 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1731 int size = sgp->size * sizeof(struct scatterlist);
1732
1733 sgp->slab = kmem_cache_create(sgp->name, size, 0,
1734 SLAB_HWCACHE_ALIGN, NULL);
1735 if (!sgp->slab) {
1736 printk(KERN_ERR "SCSI: can't init sg slab %s\n",
1737 sgp->name);
1738 goto cleanup_sdb;
1739 }
1740
1741 sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE,
1742 sgp->slab);
1743 if (!sgp->pool) {
1744 printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
1745 sgp->name);
1746 goto cleanup_sdb;
1747 }
1748 }
1749
1750 return 0;
1751
1752cleanup_sdb:
1753 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1754 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1755 if (sgp->pool)
1756 mempool_destroy(sgp->pool);
1757 if (sgp->slab)
1758 kmem_cache_destroy(sgp->slab);
1759 }
1760 kmem_cache_destroy(scsi_sdb_cache);
1761
1762 return -ENOMEM;
1763}
1764
1765void scsi_exit_queue(void)
1766{
1767 int i;
1768
1769 kmem_cache_destroy(scsi_sdb_cache);
1770
1771 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1772 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1773 mempool_destroy(sgp->pool);
1774 kmem_cache_destroy(sgp->slab);
1775 }
1776}
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796int
1797scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
1798 unsigned char *buffer, int len, int timeout, int retries,
1799 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
1800{
1801 unsigned char cmd[10];
1802 unsigned char *real_buffer;
1803 int ret;
1804
1805 memset(cmd, 0, sizeof(cmd));
1806 cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0);
1807
1808 if (sdev->use_10_for_ms) {
1809 if (len > 65535)
1810 return -EINVAL;
1811 real_buffer = kmalloc(8 + len, GFP_KERNEL);
1812 if (!real_buffer)
1813 return -ENOMEM;
1814 memcpy(real_buffer + 8, buffer, len);
1815 len += 8;
1816 real_buffer[0] = 0;
1817 real_buffer[1] = 0;
1818 real_buffer[2] = data->medium_type;
1819 real_buffer[3] = data->device_specific;
1820 real_buffer[4] = data->longlba ? 0x01 : 0;
1821 real_buffer[5] = 0;
1822 real_buffer[6] = data->block_descriptor_length >> 8;
1823 real_buffer[7] = data->block_descriptor_length;
1824
1825 cmd[0] = MODE_SELECT_10;
1826 cmd[7] = len >> 8;
1827 cmd[8] = len;
1828 } else {
1829 if (len > 255 || data->block_descriptor_length > 255 ||
1830 data->longlba)
1831 return -EINVAL;
1832
1833 real_buffer = kmalloc(4 + len, GFP_KERNEL);
1834 if (!real_buffer)
1835 return -ENOMEM;
1836 memcpy(real_buffer + 4, buffer, len);
1837 len += 4;
1838 real_buffer[0] = 0;
1839 real_buffer[1] = data->medium_type;
1840 real_buffer[2] = data->device_specific;
1841 real_buffer[3] = data->block_descriptor_length;
1842
1843
1844 cmd[0] = MODE_SELECT;
1845 cmd[4] = len;
1846 }
1847
1848 ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len,
1849 sshdr, timeout, retries, NULL);
1850 kfree(real_buffer);
1851 return ret;
1852}
1853EXPORT_SYMBOL_GPL(scsi_mode_select);
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872int
1873scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
1874 unsigned char *buffer, int len, int timeout, int retries,
1875 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
1876{
1877 unsigned char cmd[12];
1878 int use_10_for_ms;
1879 int header_length;
1880 int result;
1881 struct scsi_sense_hdr my_sshdr;
1882
1883 memset(data, 0, sizeof(*data));
1884 memset(&cmd[0], 0, 12);
1885 cmd[1] = dbd & 0x18;
1886 cmd[2] = modepage;
1887
1888
1889 if (!sshdr)
1890 sshdr = &my_sshdr;
1891
1892 retry:
1893 use_10_for_ms = sdev->use_10_for_ms;
1894
1895 if (use_10_for_ms) {
1896 if (len < 8)
1897 len = 8;
1898
1899 cmd[0] = MODE_SENSE_10;
1900 cmd[8] = len;
1901 header_length = 8;
1902 } else {
1903 if (len < 4)
1904 len = 4;
1905
1906 cmd[0] = MODE_SENSE;
1907 cmd[4] = len;
1908 header_length = 4;
1909 }
1910
1911 memset(buffer, 0, len);
1912
1913 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
1914 sshdr, timeout, retries, NULL);
1915
1916
1917
1918
1919
1920
1921 if (use_10_for_ms && !scsi_status_is_good(result) &&
1922 (driver_byte(result) & DRIVER_SENSE)) {
1923 if (scsi_sense_valid(sshdr)) {
1924 if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
1925 (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
1926
1927
1928
1929 sdev->use_10_for_ms = 0;
1930 goto retry;
1931 }
1932 }
1933 }
1934
1935 if(scsi_status_is_good(result)) {
1936 if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b &&
1937 (modepage == 6 || modepage == 8))) {
1938
1939 header_length = 0;
1940 data->length = 13;
1941 data->medium_type = 0;
1942 data->device_specific = 0;
1943 data->longlba = 0;
1944 data->block_descriptor_length = 0;
1945 } else if(use_10_for_ms) {
1946 data->length = buffer[0]*256 + buffer[1] + 2;
1947 data->medium_type = buffer[2];
1948 data->device_specific = buffer[3];
1949 data->longlba = buffer[4] & 0x01;
1950 data->block_descriptor_length = buffer[6]*256
1951 + buffer[7];
1952 } else {
1953 data->length = buffer[0] + 1;
1954 data->medium_type = buffer[1];
1955 data->device_specific = buffer[2];
1956 data->block_descriptor_length = buffer[3];
1957 }
1958 data->header_length = header_length;
1959 }
1960
1961 return result;
1962}
1963EXPORT_SYMBOL(scsi_mode_sense);
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978int
1979scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
1980 struct scsi_sense_hdr *sshdr_external)
1981{
1982 char cmd[] = {
1983 TEST_UNIT_READY, 0, 0, 0, 0, 0,
1984 };
1985 struct scsi_sense_hdr *sshdr;
1986 int result;
1987
1988 if (!sshdr_external)
1989 sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL);
1990 else
1991 sshdr = sshdr_external;
1992
1993
1994 do {
1995 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr,
1996 timeout, retries, NULL);
1997 if (sdev->removable && scsi_sense_valid(sshdr) &&
1998 sshdr->sense_key == UNIT_ATTENTION)
1999 sdev->changed = 1;
2000 } while (scsi_sense_valid(sshdr) &&
2001 sshdr->sense_key == UNIT_ATTENTION && --retries);
2002
2003 if (!sshdr)
2004
2005 return result;
2006
2007 if (sdev->removable && scsi_sense_valid(sshdr) &&
2008 (sshdr->sense_key == UNIT_ATTENTION ||
2009 sshdr->sense_key == NOT_READY)) {
2010 sdev->changed = 1;
2011 result = 0;
2012 }
2013 if (!sshdr_external)
2014 kfree(sshdr);
2015 return result;
2016}
2017EXPORT_SYMBOL(scsi_test_unit_ready);
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027int
2028scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
2029{
2030 enum scsi_device_state oldstate = sdev->sdev_state;
2031
2032 if (state == oldstate)
2033 return 0;
2034
2035 switch (state) {
2036 case SDEV_CREATED:
2037 switch (oldstate) {
2038 case SDEV_CREATED_BLOCK:
2039 break;
2040 default:
2041 goto illegal;
2042 }
2043 break;
2044
2045 case SDEV_RUNNING:
2046 switch (oldstate) {
2047 case SDEV_CREATED:
2048 case SDEV_OFFLINE:
2049 case SDEV_QUIESCE:
2050 case SDEV_BLOCK:
2051 break;
2052 default:
2053 goto illegal;
2054 }
2055 break;
2056
2057 case SDEV_QUIESCE:
2058 switch (oldstate) {
2059 case SDEV_RUNNING:
2060 case SDEV_OFFLINE:
2061 break;
2062 default:
2063 goto illegal;
2064 }
2065 break;
2066
2067 case SDEV_OFFLINE:
2068 switch (oldstate) {
2069 case SDEV_CREATED:
2070 case SDEV_RUNNING:
2071 case SDEV_QUIESCE:
2072 case SDEV_BLOCK:
2073 break;
2074 default:
2075 goto illegal;
2076 }
2077 break;
2078
2079 case SDEV_BLOCK:
2080 switch (oldstate) {
2081 case SDEV_RUNNING:
2082 case SDEV_CREATED_BLOCK:
2083 break;
2084 default:
2085 goto illegal;
2086 }
2087 break;
2088
2089 case SDEV_CREATED_BLOCK:
2090 switch (oldstate) {
2091 case SDEV_CREATED:
2092 break;
2093 default:
2094 goto illegal;
2095 }
2096 break;
2097
2098 case SDEV_CANCEL:
2099 switch (oldstate) {
2100 case SDEV_CREATED:
2101 case SDEV_RUNNING:
2102 case SDEV_QUIESCE:
2103 case SDEV_OFFLINE:
2104 case SDEV_BLOCK:
2105 break;
2106 default:
2107 goto illegal;
2108 }
2109 break;
2110
2111 case SDEV_DEL:
2112 switch (oldstate) {
2113 case SDEV_CREATED:
2114 case SDEV_RUNNING:
2115 case SDEV_OFFLINE:
2116 case SDEV_CANCEL:
2117 break;
2118 default:
2119 goto illegal;
2120 }
2121 break;
2122
2123 }
2124 sdev->sdev_state = state;
2125 return 0;
2126
2127 illegal:
2128 SCSI_LOG_ERROR_RECOVERY(1,
2129 sdev_printk(KERN_ERR, sdev,
2130 "Illegal state transition %s->%s\n",
2131 scsi_device_state_name(oldstate),
2132 scsi_device_state_name(state))
2133 );
2134 return -EINVAL;
2135}
2136EXPORT_SYMBOL(scsi_device_set_state);
2137
2138
2139
2140
2141
2142
2143
2144
2145static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
2146{
2147 int idx = 0;
2148 char *envp[3];
2149
2150 switch (evt->evt_type) {
2151 case SDEV_EVT_MEDIA_CHANGE:
2152 envp[idx++] = "SDEV_MEDIA_CHANGE=1";
2153 break;
2154
2155 default:
2156
2157 break;
2158 }
2159
2160 envp[idx++] = NULL;
2161
2162 kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp);
2163}
2164
2165
2166
2167
2168
2169
2170
2171
2172void scsi_evt_thread(struct work_struct *work)
2173{
2174 struct scsi_device *sdev;
2175 LIST_HEAD(event_list);
2176
2177 sdev = container_of(work, struct scsi_device, event_work);
2178
2179 while (1) {
2180 struct scsi_event *evt;
2181 struct list_head *this, *tmp;
2182 unsigned long flags;
2183
2184 spin_lock_irqsave(&sdev->list_lock, flags);
2185 list_splice_init(&sdev->event_list, &event_list);
2186 spin_unlock_irqrestore(&sdev->list_lock, flags);
2187
2188 if (list_empty(&event_list))
2189 break;
2190
2191 list_for_each_safe(this, tmp, &event_list) {
2192 evt = list_entry(this, struct scsi_event, node);
2193 list_del(&evt->node);
2194 scsi_evt_emit(sdev, evt);
2195 kfree(evt);
2196 }
2197 }
2198}
2199
2200
2201
2202
2203
2204
2205
2206
2207void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt)
2208{
2209 unsigned long flags;
2210
2211#if 0
2212
2213
2214
2215 if (!test_bit(evt->evt_type, sdev->supported_events)) {
2216 kfree(evt);
2217 return;
2218 }
2219#endif
2220
2221 spin_lock_irqsave(&sdev->list_lock, flags);
2222 list_add_tail(&evt->node, &sdev->event_list);
2223 schedule_work(&sdev->event_work);
2224 spin_unlock_irqrestore(&sdev->list_lock, flags);
2225}
2226EXPORT_SYMBOL_GPL(sdev_evt_send);
2227
2228
2229
2230
2231
2232
2233
2234
2235struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
2236 gfp_t gfpflags)
2237{
2238 struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags);
2239 if (!evt)
2240 return NULL;
2241
2242 evt->evt_type = evt_type;
2243 INIT_LIST_HEAD(&evt->node);
2244
2245
2246 switch (evt_type) {
2247 case SDEV_EVT_MEDIA_CHANGE:
2248 default:
2249
2250 break;
2251 }
2252
2253 return evt;
2254}
2255EXPORT_SYMBOL_GPL(sdev_evt_alloc);
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265void sdev_evt_send_simple(struct scsi_device *sdev,
2266 enum scsi_device_event evt_type, gfp_t gfpflags)
2267{
2268 struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags);
2269 if (!evt) {
2270 sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n",
2271 evt_type);
2272 return;
2273 }
2274
2275 sdev_evt_send(sdev, evt);
2276}
2277EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294int
2295scsi_device_quiesce(struct scsi_device *sdev)
2296{
2297 int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
2298 if (err)
2299 return err;
2300
2301 scsi_run_queue(sdev->request_queue);
2302 while (sdev->device_busy) {
2303 msleep_interruptible(200);
2304 scsi_run_queue(sdev->request_queue);
2305 }
2306 return 0;
2307}
2308EXPORT_SYMBOL(scsi_device_quiesce);
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319void
2320scsi_device_resume(struct scsi_device *sdev)
2321{
2322 if(scsi_device_set_state(sdev, SDEV_RUNNING))
2323 return;
2324 scsi_run_queue(sdev->request_queue);
2325}
2326EXPORT_SYMBOL(scsi_device_resume);
2327
2328static void
2329device_quiesce_fn(struct scsi_device *sdev, void *data)
2330{
2331 scsi_device_quiesce(sdev);
2332}
2333
2334void
2335scsi_target_quiesce(struct scsi_target *starget)
2336{
2337 starget_for_each_device(starget, NULL, device_quiesce_fn);
2338}
2339EXPORT_SYMBOL(scsi_target_quiesce);
2340
2341static void
2342device_resume_fn(struct scsi_device *sdev, void *data)
2343{
2344 scsi_device_resume(sdev);
2345}
2346
2347void
2348scsi_target_resume(struct scsi_target *starget)
2349{
2350 starget_for_each_device(starget, NULL, device_resume_fn);
2351}
2352EXPORT_SYMBOL(scsi_target_resume);
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371int
2372scsi_internal_device_block(struct scsi_device *sdev)
2373{
2374 struct request_queue *q = sdev->request_queue;
2375 unsigned long flags;
2376 int err = 0;
2377
2378 err = scsi_device_set_state(sdev, SDEV_BLOCK);
2379 if (err) {
2380 err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK);
2381
2382 if (err)
2383 return err;
2384 }
2385
2386
2387
2388
2389
2390
2391 spin_lock_irqsave(q->queue_lock, flags);
2392 blk_stop_queue(q);
2393 spin_unlock_irqrestore(q->queue_lock, flags);
2394
2395 return 0;
2396}
2397EXPORT_SYMBOL_GPL(scsi_internal_device_block);
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415int
2416scsi_internal_device_unblock(struct scsi_device *sdev)
2417{
2418 struct request_queue *q = sdev->request_queue;
2419 unsigned long flags;
2420
2421
2422
2423
2424
2425 if (sdev->sdev_state == SDEV_BLOCK)
2426 sdev->sdev_state = SDEV_RUNNING;
2427 else if (sdev->sdev_state == SDEV_CREATED_BLOCK)
2428 sdev->sdev_state = SDEV_CREATED;
2429 else
2430 return -EINVAL;
2431
2432 spin_lock_irqsave(q->queue_lock, flags);
2433 blk_start_queue(q);
2434 spin_unlock_irqrestore(q->queue_lock, flags);
2435
2436 return 0;
2437}
2438EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
2439
2440static void
2441device_block(struct scsi_device *sdev, void *data)
2442{
2443 scsi_internal_device_block(sdev);
2444}
2445
2446static int
2447target_block(struct device *dev, void *data)
2448{
2449 if (scsi_is_target_device(dev))
2450 starget_for_each_device(to_scsi_target(dev), NULL,
2451 device_block);
2452 return 0;
2453}
2454
2455void
2456scsi_target_block(struct device *dev)
2457{
2458 if (scsi_is_target_device(dev))
2459 starget_for_each_device(to_scsi_target(dev), NULL,
2460 device_block);
2461 else
2462 device_for_each_child(dev, NULL, target_block);
2463}
2464EXPORT_SYMBOL_GPL(scsi_target_block);
2465
2466static void
2467device_unblock(struct scsi_device *sdev, void *data)
2468{
2469 scsi_internal_device_unblock(sdev);
2470}
2471
2472static int
2473target_unblock(struct device *dev, void *data)
2474{
2475 if (scsi_is_target_device(dev))
2476 starget_for_each_device(to_scsi_target(dev), NULL,
2477 device_unblock);
2478 return 0;
2479}
2480
2481void
2482scsi_target_unblock(struct device *dev)
2483{
2484 if (scsi_is_target_device(dev))
2485 starget_for_each_device(to_scsi_target(dev), NULL,
2486 device_unblock);
2487 else
2488 device_for_each_child(dev, NULL, target_unblock);
2489}
2490EXPORT_SYMBOL_GPL(scsi_target_unblock);
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
2502 size_t *offset, size_t *len)
2503{
2504 int i;
2505 size_t sg_len = 0, len_complete = 0;
2506 struct scatterlist *sg;
2507 struct page *page;
2508
2509 WARN_ON(!irqs_disabled());
2510
2511 for_each_sg(sgl, sg, sg_count, i) {
2512 len_complete = sg_len;
2513 sg_len += sg->length;
2514 if (sg_len > *offset)
2515 break;
2516 }
2517
2518 if (unlikely(i == sg_count)) {
2519 printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, "
2520 "elements %d\n",
2521 __func__, sg_len, *offset, sg_count);
2522 WARN_ON(1);
2523 return NULL;
2524 }
2525
2526
2527 *offset = *offset - len_complete + sg->offset;
2528
2529
2530 page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT));
2531 *offset &= ~PAGE_MASK;
2532
2533
2534 sg_len = PAGE_SIZE - *offset;
2535 if (*len > sg_len)
2536 *len = sg_len;
2537
2538 return kmap_atomic(page, KM_BIO_SRC_IRQ);
2539}
2540EXPORT_SYMBOL(scsi_kmap_atomic_sg);
2541
2542
2543
2544
2545
2546void scsi_kunmap_atomic_sg(void *virt)
2547{
2548 kunmap_atomic(virt, KM_BIO_SRC_IRQ);
2549}
2550EXPORT_SYMBOL(scsi_kunmap_atomic_sg);
2551