1
2
3
4
5
6
7
8
9
10#include <linux/bio.h>
11#include <linux/bitops.h>
12#include <linux/blkdev.h>
13#include <linux/completion.h>
14#include <linux/kernel.h>
15#include <linux/export.h>
16#include <linux/mempool.h>
17#include <linux/slab.h>
18#include <linux/init.h>
19#include <linux/pci.h>
20#include <linux/delay.h>
21#include <linux/hardirq.h>
22#include <linux/scatterlist.h>
23
24#include <scsi/scsi.h>
25#include <scsi/scsi_cmnd.h>
26#include <scsi/scsi_dbg.h>
27#include <scsi/scsi_device.h>
28#include <scsi/scsi_driver.h>
29#include <scsi/scsi_eh.h>
30#include <scsi/scsi_host.h>
31
32#include "scsi_priv.h"
33#include "scsi_logging.h"
34
35
36#define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools)
37#define SG_MEMPOOL_SIZE 2
38
39struct scsi_host_sg_pool {
40 size_t size;
41 char *name;
42 struct kmem_cache *slab;
43 mempool_t *pool;
44};
45
46#define SP(x) { x, "sgpool-" __stringify(x) }
47#if (SCSI_MAX_SG_SEGMENTS < 32)
48#error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater)
49#endif
50static struct scsi_host_sg_pool scsi_sg_pools[] = {
51 SP(8),
52 SP(16),
53#if (SCSI_MAX_SG_SEGMENTS > 32)
54 SP(32),
55#if (SCSI_MAX_SG_SEGMENTS > 64)
56 SP(64),
57#if (SCSI_MAX_SG_SEGMENTS > 128)
58 SP(128),
59#if (SCSI_MAX_SG_SEGMENTS > 256)
60#error SCSI_MAX_SG_SEGMENTS is too large (256 MAX)
61#endif
62#endif
63#endif
64#endif
65 SP(SCSI_MAX_SG_SEGMENTS)
66};
67#undef SP
68
69struct kmem_cache *scsi_sdb_cache;
70
71
72
73
74
75
76#define SCSI_QUEUE_DELAY 3
77
78
79
80
81
82
83
84
85
86
87
88
89
90static void scsi_unprep_request(struct request *req)
91{
92 struct scsi_cmnd *cmd = req->special;
93
94 blk_unprep_request(req);
95 req->special = NULL;
96
97 scsi_put_command(cmd);
98}
99
100
101
102
103
104
105
106
107
108
109
110
111
112static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
113{
114 struct Scsi_Host *host = cmd->device->host;
115 struct scsi_device *device = cmd->device;
116 struct scsi_target *starget = scsi_target(device);
117 struct request_queue *q = device->request_queue;
118 unsigned long flags;
119
120 SCSI_LOG_MLQUEUE(1,
121 printk("Inserting command %p into mlqueue\n", cmd));
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136 switch (reason) {
137 case SCSI_MLQUEUE_HOST_BUSY:
138 host->host_blocked = host->max_host_blocked;
139 break;
140 case SCSI_MLQUEUE_DEVICE_BUSY:
141 case SCSI_MLQUEUE_EH_RETRY:
142 device->device_blocked = device->max_device_blocked;
143 break;
144 case SCSI_MLQUEUE_TARGET_BUSY:
145 starget->target_blocked = starget->max_target_blocked;
146 break;
147 }
148
149
150
151
152
153 if (unbusy)
154 scsi_device_unbusy(device);
155
156
157
158
159
160 spin_lock_irqsave(q->queue_lock, flags);
161 blk_requeue_request(q, cmd->request);
162 spin_unlock_irqrestore(q->queue_lock, flags);
163
164 kblockd_schedule_work(q, &device->requeue_work);
165
166 return 0;
167}
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
189{
190 return __scsi_queue_insert(cmd, reason, 1);
191}
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
209 int data_direction, void *buffer, unsigned bufflen,
210 unsigned char *sense, int timeout, int retries, int flags,
211 int *resid)
212{
213 struct request *req;
214 int write = (data_direction == DMA_TO_DEVICE);
215 int ret = DRIVER_ERROR << 24;
216
217 req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
218 if (!req)
219 return ret;
220
221 if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
222 buffer, bufflen, __GFP_WAIT))
223 goto out;
224
225 req->cmd_len = COMMAND_SIZE(cmd[0]);
226 memcpy(req->cmd, cmd, req->cmd_len);
227 req->sense = sense;
228 req->sense_len = 0;
229 req->retries = retries;
230 req->timeout = timeout;
231 req->cmd_type = REQ_TYPE_BLOCK_PC;
232 req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT;
233
234
235
236
237 blk_execute_rq(req->q, NULL, req, 1);
238
239
240
241
242
243
244
245 if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen))
246 memset(buffer + (bufflen - req->resid_len), 0, req->resid_len);
247
248 if (resid)
249 *resid = req->resid_len;
250 ret = req->errors;
251 out:
252 blk_put_request(req);
253
254 return ret;
255}
256EXPORT_SYMBOL(scsi_execute);
257
258
259int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
260 int data_direction, void *buffer, unsigned bufflen,
261 struct scsi_sense_hdr *sshdr, int timeout, int retries,
262 int *resid)
263{
264 char *sense = NULL;
265 int result;
266
267 if (sshdr) {
268 sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
269 if (!sense)
270 return DRIVER_ERROR << 24;
271 }
272 result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
273 sense, timeout, retries, 0, resid);
274 if (sshdr)
275 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
276
277 kfree(sense);
278 return result;
279}
280EXPORT_SYMBOL(scsi_execute_req);
281
282
283
284
285
286
287
288
289
290
291
292
293static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
294{
295 cmd->serial_number = 0;
296 scsi_set_resid(cmd, 0);
297 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
298 if (cmd->cmd_len == 0)
299 cmd->cmd_len = scsi_command_size(cmd->cmnd);
300}
301
302void scsi_device_unbusy(struct scsi_device *sdev)
303{
304 struct Scsi_Host *shost = sdev->host;
305 struct scsi_target *starget = scsi_target(sdev);
306 unsigned long flags;
307
308 spin_lock_irqsave(shost->host_lock, flags);
309 shost->host_busy--;
310 starget->target_busy--;
311 if (unlikely(scsi_host_in_recovery(shost) &&
312 (shost->host_failed || shost->host_eh_scheduled)))
313 scsi_eh_wakeup(shost);
314 spin_unlock(shost->host_lock);
315 spin_lock(sdev->request_queue->queue_lock);
316 sdev->device_busy--;
317 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
318}
319
320
321
322
323
324
325
326
327static void scsi_single_lun_run(struct scsi_device *current_sdev)
328{
329 struct Scsi_Host *shost = current_sdev->host;
330 struct scsi_device *sdev, *tmp;
331 struct scsi_target *starget = scsi_target(current_sdev);
332 unsigned long flags;
333
334 spin_lock_irqsave(shost->host_lock, flags);
335 starget->starget_sdev_user = NULL;
336 spin_unlock_irqrestore(shost->host_lock, flags);
337
338
339
340
341
342
343
344 blk_run_queue(current_sdev->request_queue);
345
346 spin_lock_irqsave(shost->host_lock, flags);
347 if (starget->starget_sdev_user)
348 goto out;
349 list_for_each_entry_safe(sdev, tmp, &starget->devices,
350 same_target_siblings) {
351 if (sdev == current_sdev)
352 continue;
353 if (scsi_device_get(sdev))
354 continue;
355
356 spin_unlock_irqrestore(shost->host_lock, flags);
357 blk_run_queue(sdev->request_queue);
358 spin_lock_irqsave(shost->host_lock, flags);
359
360 scsi_device_put(sdev);
361 }
362 out:
363 spin_unlock_irqrestore(shost->host_lock, flags);
364}
365
366static inline int scsi_device_is_busy(struct scsi_device *sdev)
367{
368 if (sdev->device_busy >= sdev->queue_depth || sdev->device_blocked)
369 return 1;
370
371 return 0;
372}
373
374static inline int scsi_target_is_busy(struct scsi_target *starget)
375{
376 return ((starget->can_queue > 0 &&
377 starget->target_busy >= starget->can_queue) ||
378 starget->target_blocked);
379}
380
381static inline int scsi_host_is_busy(struct Scsi_Host *shost)
382{
383 if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
384 shost->host_blocked || shost->host_self_blocked)
385 return 1;
386
387 return 0;
388}
389
390
391
392
393
394
395
396
397
398
399
400
401
402static void scsi_run_queue(struct request_queue *q)
403{
404 struct scsi_device *sdev = q->queuedata;
405 struct Scsi_Host *shost;
406 LIST_HEAD(starved_list);
407 unsigned long flags;
408
409
410 if (!sdev)
411 return;
412
413 shost = sdev->host;
414 if (scsi_target(sdev)->single_lun)
415 scsi_single_lun_run(sdev);
416
417 spin_lock_irqsave(shost->host_lock, flags);
418 list_splice_init(&shost->starved_list, &starved_list);
419
420 while (!list_empty(&starved_list)) {
421
422
423
424
425
426
427
428
429
430
431 if (scsi_host_is_busy(shost))
432 break;
433
434 sdev = list_entry(starved_list.next,
435 struct scsi_device, starved_entry);
436 list_del_init(&sdev->starved_entry);
437 if (scsi_target_is_busy(scsi_target(sdev))) {
438 list_move_tail(&sdev->starved_entry,
439 &shost->starved_list);
440 continue;
441 }
442
443 spin_unlock(shost->host_lock);
444 spin_lock(sdev->request_queue->queue_lock);
445 __blk_run_queue(sdev->request_queue);
446 spin_unlock(sdev->request_queue->queue_lock);
447 spin_lock(shost->host_lock);
448 }
449
450 list_splice(&starved_list, &shost->starved_list);
451 spin_unlock_irqrestore(shost->host_lock, flags);
452
453 blk_run_queue(q);
454}
455
456void scsi_requeue_run_queue(struct work_struct *work)
457{
458 struct scsi_device *sdev;
459 struct request_queue *q;
460
461 sdev = container_of(work, struct scsi_device, requeue_work);
462 q = sdev->request_queue;
463 scsi_run_queue(q);
464}
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
485{
486 struct request *req = cmd->request;
487 unsigned long flags;
488
489 spin_lock_irqsave(q->queue_lock, flags);
490 scsi_unprep_request(req);
491 blk_requeue_request(q, req);
492 spin_unlock_irqrestore(q->queue_lock, flags);
493
494 scsi_run_queue(q);
495}
496
497void scsi_next_command(struct scsi_cmnd *cmd)
498{
499 struct scsi_device *sdev = cmd->device;
500 struct request_queue *q = sdev->request_queue;
501
502
503 get_device(&sdev->sdev_gendev);
504
505 scsi_put_command(cmd);
506 scsi_run_queue(q);
507
508
509 put_device(&sdev->sdev_gendev);
510}
511
512void scsi_run_host_queues(struct Scsi_Host *shost)
513{
514 struct scsi_device *sdev;
515
516 shost_for_each_device(sdev, shost)
517 scsi_run_queue(sdev->request_queue);
518}
519
520static void __scsi_release_buffers(struct scsi_cmnd *, int);
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
545 int bytes, int requeue)
546{
547 struct request_queue *q = cmd->device->request_queue;
548 struct request *req = cmd->request;
549
550
551
552
553
554 if (blk_end_request(req, error, bytes)) {
555
556 if (error && scsi_noretry_cmd(cmd))
557 blk_end_request_all(req, error);
558 else {
559 if (requeue) {
560
561
562
563
564
565 scsi_release_buffers(cmd);
566 scsi_requeue_command(q, cmd);
567 cmd = NULL;
568 }
569 return cmd;
570 }
571 }
572
573
574
575
576
577 __scsi_release_buffers(cmd, 0);
578 scsi_next_command(cmd);
579 return NULL;
580}
581
582static inline unsigned int scsi_sgtable_index(unsigned short nents)
583{
584 unsigned int index;
585
586 BUG_ON(nents > SCSI_MAX_SG_SEGMENTS);
587
588 if (nents <= 8)
589 index = 0;
590 else
591 index = get_count_order(nents) - 3;
592
593 return index;
594}
595
596static void scsi_sg_free(struct scatterlist *sgl, unsigned int nents)
597{
598 struct scsi_host_sg_pool *sgp;
599
600 sgp = scsi_sg_pools + scsi_sgtable_index(nents);
601 mempool_free(sgl, sgp->pool);
602}
603
604static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask)
605{
606 struct scsi_host_sg_pool *sgp;
607
608 sgp = scsi_sg_pools + scsi_sgtable_index(nents);
609 return mempool_alloc(sgp->pool, gfp_mask);
610}
611
612static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents,
613 gfp_t gfp_mask)
614{
615 int ret;
616
617 BUG_ON(!nents);
618
619 ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS,
620 gfp_mask, scsi_sg_alloc);
621 if (unlikely(ret))
622 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS,
623 scsi_sg_free);
624
625 return ret;
626}
627
628static void scsi_free_sgtable(struct scsi_data_buffer *sdb)
629{
630 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free);
631}
632
633static void __scsi_release_buffers(struct scsi_cmnd *cmd, int do_bidi_check)
634{
635
636 if (cmd->sdb.table.nents)
637 scsi_free_sgtable(&cmd->sdb);
638
639 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
640
641 if (do_bidi_check && scsi_bidi_cmnd(cmd)) {
642 struct scsi_data_buffer *bidi_sdb =
643 cmd->request->next_rq->special;
644 scsi_free_sgtable(bidi_sdb);
645 kmem_cache_free(scsi_sdb_cache, bidi_sdb);
646 cmd->request->next_rq->special = NULL;
647 }
648
649 if (scsi_prot_sg_count(cmd))
650 scsi_free_sgtable(cmd->prot_sdb);
651}
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670void scsi_release_buffers(struct scsi_cmnd *cmd)
671{
672 __scsi_release_buffers(cmd, 1);
673}
674EXPORT_SYMBOL(scsi_release_buffers);
675
676static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
677{
678 int error = 0;
679
680 switch(host_byte(result)) {
681 case DID_TRANSPORT_FAILFAST:
682 error = -ENOLINK;
683 break;
684 case DID_TARGET_FAILURE:
685 set_host_byte(cmd, DID_OK);
686 error = -EREMOTEIO;
687 break;
688 case DID_NEXUS_FAILURE:
689 set_host_byte(cmd, DID_OK);
690 error = -EBADE;
691 break;
692 default:
693 error = -EIO;
694 break;
695 }
696
697 return error;
698}
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
737{
738 int result = cmd->result;
739 struct request_queue *q = cmd->device->request_queue;
740 struct request *req = cmd->request;
741 int error = 0;
742 struct scsi_sense_hdr sshdr;
743 int sense_valid = 0;
744 int sense_deferred = 0;
745 enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
746 ACTION_DELAYED_RETRY} action;
747 char *description = NULL;
748
749 if (result) {
750 sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
751 if (sense_valid)
752 sense_deferred = scsi_sense_is_deferred(&sshdr);
753 }
754
755 if (req->cmd_type == REQ_TYPE_BLOCK_PC) {
756 req->errors = result;
757 if (result) {
758 if (sense_valid && req->sense) {
759
760
761
762 int len = 8 + cmd->sense_buffer[7];
763
764 if (len > SCSI_SENSE_BUFFERSIZE)
765 len = SCSI_SENSE_BUFFERSIZE;
766 memcpy(req->sense, cmd->sense_buffer, len);
767 req->sense_len = len;
768 }
769 if (!sense_deferred)
770 error = __scsi_error_from_host_byte(cmd, result);
771 }
772
773 req->resid_len = scsi_get_resid(cmd);
774
775 if (scsi_bidi_cmnd(cmd)) {
776
777
778
779
780 req->next_rq->resid_len = scsi_in(cmd)->resid;
781
782 scsi_release_buffers(cmd);
783 blk_end_request_all(req, 0);
784
785 scsi_next_command(cmd);
786 return;
787 }
788 }
789
790
791 BUG_ON(blk_bidi_rq(req));
792
793
794
795
796
797 SCSI_LOG_HLCOMPLETE(1, printk("%u sectors total, "
798 "%d bytes done.\n",
799 blk_rq_sectors(req), good_bytes));
800
801
802
803
804
805
806
807 if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) {
808
809
810
811
812 if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
813 ;
814 else if (!(req->cmd_flags & REQ_QUIET))
815 scsi_print_sense("", cmd);
816 result = 0;
817
818 error = 0;
819 }
820
821
822
823
824
825
826 if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL)
827 return;
828
829 error = __scsi_error_from_host_byte(cmd, result);
830
831 if (host_byte(result) == DID_RESET) {
832
833
834
835
836 action = ACTION_RETRY;
837 } else if (sense_valid && !sense_deferred) {
838 switch (sshdr.sense_key) {
839 case UNIT_ATTENTION:
840 if (cmd->device->removable) {
841
842
843
844 cmd->device->changed = 1;
845 description = "Media Changed";
846 action = ACTION_FAIL;
847 } else {
848
849
850
851
852
853 action = ACTION_RETRY;
854 }
855 break;
856 case ILLEGAL_REQUEST:
857
858
859
860
861
862
863
864
865 if ((cmd->device->use_10_for_rw &&
866 sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
867 (cmd->cmnd[0] == READ_10 ||
868 cmd->cmnd[0] == WRITE_10)) {
869
870 cmd->device->use_10_for_rw = 0;
871 action = ACTION_REPREP;
872 } else if (sshdr.asc == 0x10) {
873 description = "Host Data Integrity Failure";
874 action = ACTION_FAIL;
875 error = -EILSEQ;
876
877 } else if ((sshdr.asc == 0x20 || sshdr.asc == 0x24) &&
878 (cmd->cmnd[0] == UNMAP ||
879 cmd->cmnd[0] == WRITE_SAME_16 ||
880 cmd->cmnd[0] == WRITE_SAME)) {
881 description = "Discard failure";
882 action = ACTION_FAIL;
883 error = -EREMOTEIO;
884 } else
885 action = ACTION_FAIL;
886 break;
887 case ABORTED_COMMAND:
888 action = ACTION_FAIL;
889 if (sshdr.asc == 0x10) {
890 description = "Target Data Integrity Failure";
891 error = -EILSEQ;
892 }
893 break;
894 case NOT_READY:
895
896
897
898 if (sshdr.asc == 0x04) {
899 switch (sshdr.ascq) {
900 case 0x01:
901 case 0x04:
902 case 0x05:
903 case 0x06:
904 case 0x07:
905 case 0x08:
906 case 0x09:
907 case 0x14:
908 action = ACTION_DELAYED_RETRY;
909 break;
910 default:
911 description = "Device not ready";
912 action = ACTION_FAIL;
913 break;
914 }
915 } else {
916 description = "Device not ready";
917 action = ACTION_FAIL;
918 }
919 break;
920 case VOLUME_OVERFLOW:
921
922 action = ACTION_FAIL;
923 break;
924 default:
925 description = "Unhandled sense code";
926 action = ACTION_FAIL;
927 break;
928 }
929 } else {
930 description = "Unhandled error code";
931 action = ACTION_FAIL;
932 }
933
934 switch (action) {
935 case ACTION_FAIL:
936
937 scsi_release_buffers(cmd);
938 if (!(req->cmd_flags & REQ_QUIET)) {
939 if (description)
940 scmd_printk(KERN_INFO, cmd, "%s\n",
941 description);
942 scsi_print_result(cmd);
943 if (driver_byte(result) & DRIVER_SENSE)
944 scsi_print_sense("", cmd);
945 scsi_print_command(cmd);
946 }
947 if (blk_end_request_err(req, error))
948 scsi_requeue_command(q, cmd);
949 else
950 scsi_next_command(cmd);
951 break;
952 case ACTION_REPREP:
953
954
955
956 scsi_release_buffers(cmd);
957 scsi_requeue_command(q, cmd);
958 break;
959 case ACTION_RETRY:
960
961 __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, 0);
962 break;
963 case ACTION_DELAYED_RETRY:
964
965 __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0);
966 break;
967 }
968}
969
970static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
971 gfp_t gfp_mask)
972{
973 int count;
974
975
976
977
978 if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments,
979 gfp_mask))) {
980 return BLKPREP_DEFER;
981 }
982
983 req->buffer = NULL;
984
985
986
987
988
989 count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
990 BUG_ON(count > sdb->table.nents);
991 sdb->table.nents = count;
992 sdb->length = blk_rq_bytes(req);
993 return BLKPREP_OK;
994}
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
1008{
1009 struct request *rq = cmd->request;
1010
1011 int error = scsi_init_sgtable(rq, &cmd->sdb, gfp_mask);
1012 if (error)
1013 goto err_exit;
1014
1015 if (blk_bidi_rq(rq)) {
1016 struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc(
1017 scsi_sdb_cache, GFP_ATOMIC);
1018 if (!bidi_sdb) {
1019 error = BLKPREP_DEFER;
1020 goto err_exit;
1021 }
1022
1023 rq->next_rq->special = bidi_sdb;
1024 error = scsi_init_sgtable(rq->next_rq, bidi_sdb, GFP_ATOMIC);
1025 if (error)
1026 goto err_exit;
1027 }
1028
1029 if (blk_integrity_rq(rq)) {
1030 struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
1031 int ivecs, count;
1032
1033 BUG_ON(prot_sdb == NULL);
1034 ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
1035
1036 if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask)) {
1037 error = BLKPREP_DEFER;
1038 goto err_exit;
1039 }
1040
1041 count = blk_rq_map_integrity_sg(rq->q, rq->bio,
1042 prot_sdb->table.sgl);
1043 BUG_ON(unlikely(count > ivecs));
1044 BUG_ON(unlikely(count > queue_max_integrity_segments(rq->q)));
1045
1046 cmd->prot_sdb = prot_sdb;
1047 cmd->prot_sdb->table.nents = count;
1048 }
1049
1050 return BLKPREP_OK ;
1051
1052err_exit:
1053 scsi_release_buffers(cmd);
1054 cmd->request->special = NULL;
1055 scsi_put_command(cmd);
1056 return error;
1057}
1058EXPORT_SYMBOL(scsi_init_io);
1059
1060static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
1061 struct request *req)
1062{
1063 struct scsi_cmnd *cmd;
1064
1065 if (!req->special) {
1066 cmd = scsi_get_command(sdev, GFP_ATOMIC);
1067 if (unlikely(!cmd))
1068 return NULL;
1069 req->special = cmd;
1070 } else {
1071 cmd = req->special;
1072 }
1073
1074
1075 cmd->tag = req->tag;
1076 cmd->request = req;
1077
1078 cmd->cmnd = req->cmd;
1079 cmd->prot_op = SCSI_PROT_NORMAL;
1080
1081 return cmd;
1082}
1083
1084int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
1085{
1086 struct scsi_cmnd *cmd;
1087 int ret = scsi_prep_state_check(sdev, req);
1088
1089 if (ret != BLKPREP_OK)
1090 return ret;
1091
1092 cmd = scsi_get_cmd_from_req(sdev, req);
1093 if (unlikely(!cmd))
1094 return BLKPREP_DEFER;
1095
1096
1097
1098
1099
1100
1101
1102 if (req->bio) {
1103 int ret;
1104
1105 BUG_ON(!req->nr_phys_segments);
1106
1107 ret = scsi_init_io(cmd, GFP_ATOMIC);
1108 if (unlikely(ret))
1109 return ret;
1110 } else {
1111 BUG_ON(blk_rq_bytes(req));
1112
1113 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
1114 req->buffer = NULL;
1115 }
1116
1117 cmd->cmd_len = req->cmd_len;
1118 if (!blk_rq_bytes(req))
1119 cmd->sc_data_direction = DMA_NONE;
1120 else if (rq_data_dir(req) == WRITE)
1121 cmd->sc_data_direction = DMA_TO_DEVICE;
1122 else
1123 cmd->sc_data_direction = DMA_FROM_DEVICE;
1124
1125 cmd->transfersize = blk_rq_bytes(req);
1126 cmd->allowed = req->retries;
1127 return BLKPREP_OK;
1128}
1129EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd);
1130
1131
1132
1133
1134
1135
1136int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
1137{
1138 struct scsi_cmnd *cmd;
1139 int ret = scsi_prep_state_check(sdev, req);
1140
1141 if (ret != BLKPREP_OK)
1142 return ret;
1143
1144 if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh
1145 && sdev->scsi_dh_data->scsi_dh->prep_fn)) {
1146 ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req);
1147 if (ret != BLKPREP_OK)
1148 return ret;
1149 }
1150
1151
1152
1153
1154 BUG_ON(!req->nr_phys_segments);
1155
1156 cmd = scsi_get_cmd_from_req(sdev, req);
1157 if (unlikely(!cmd))
1158 return BLKPREP_DEFER;
1159
1160 memset(cmd->cmnd, 0, BLK_MAX_CDB);
1161 return scsi_init_io(cmd, GFP_ATOMIC);
1162}
1163EXPORT_SYMBOL(scsi_setup_fs_cmnd);
1164
1165int scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
1166{
1167 int ret = BLKPREP_OK;
1168
1169
1170
1171
1172
1173 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1174 switch (sdev->sdev_state) {
1175 case SDEV_OFFLINE:
1176
1177
1178
1179
1180
1181 sdev_printk(KERN_ERR, sdev,
1182 "rejecting I/O to offline device\n");
1183 ret = BLKPREP_KILL;
1184 break;
1185 case SDEV_DEL:
1186
1187
1188
1189
1190 sdev_printk(KERN_ERR, sdev,
1191 "rejecting I/O to dead device\n");
1192 ret = BLKPREP_KILL;
1193 break;
1194 case SDEV_QUIESCE:
1195 case SDEV_BLOCK:
1196 case SDEV_CREATED_BLOCK:
1197
1198
1199
1200 if (!(req->cmd_flags & REQ_PREEMPT))
1201 ret = BLKPREP_DEFER;
1202 break;
1203 default:
1204
1205
1206
1207
1208
1209 if (!(req->cmd_flags & REQ_PREEMPT))
1210 ret = BLKPREP_KILL;
1211 break;
1212 }
1213 }
1214 return ret;
1215}
1216EXPORT_SYMBOL(scsi_prep_state_check);
1217
1218int scsi_prep_return(struct request_queue *q, struct request *req, int ret)
1219{
1220 struct scsi_device *sdev = q->queuedata;
1221
1222 switch (ret) {
1223 case BLKPREP_KILL:
1224 req->errors = DID_NO_CONNECT << 16;
1225
1226 if (req->special) {
1227 struct scsi_cmnd *cmd = req->special;
1228 scsi_release_buffers(cmd);
1229 scsi_put_command(cmd);
1230 req->special = NULL;
1231 }
1232 break;
1233 case BLKPREP_DEFER:
1234
1235
1236
1237
1238
1239 if (sdev->device_busy == 0)
1240 blk_delay_queue(q, SCSI_QUEUE_DELAY);
1241 break;
1242 default:
1243 req->cmd_flags |= REQ_DONTPREP;
1244 }
1245
1246 return ret;
1247}
1248EXPORT_SYMBOL(scsi_prep_return);
1249
1250int scsi_prep_fn(struct request_queue *q, struct request *req)
1251{
1252 struct scsi_device *sdev = q->queuedata;
1253 int ret = BLKPREP_KILL;
1254
1255 if (req->cmd_type == REQ_TYPE_BLOCK_PC)
1256 ret = scsi_setup_blk_pc_cmnd(sdev, req);
1257 return scsi_prep_return(q, req, ret);
1258}
1259EXPORT_SYMBOL(scsi_prep_fn);
1260
1261
1262
1263
1264
1265
1266
1267static inline int scsi_dev_queue_ready(struct request_queue *q,
1268 struct scsi_device *sdev)
1269{
1270 if (sdev->device_busy == 0 && sdev->device_blocked) {
1271
1272
1273
1274 if (--sdev->device_blocked == 0) {
1275 SCSI_LOG_MLQUEUE(3,
1276 sdev_printk(KERN_INFO, sdev,
1277 "unblocking device at zero depth\n"));
1278 } else {
1279 blk_delay_queue(q, SCSI_QUEUE_DELAY);
1280 return 0;
1281 }
1282 }
1283 if (scsi_device_is_busy(sdev))
1284 return 0;
1285
1286 return 1;
1287}
1288
1289
1290
1291
1292
1293
1294
1295
1296static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
1297 struct scsi_device *sdev)
1298{
1299 struct scsi_target *starget = scsi_target(sdev);
1300
1301 if (starget->single_lun) {
1302 if (starget->starget_sdev_user &&
1303 starget->starget_sdev_user != sdev)
1304 return 0;
1305 starget->starget_sdev_user = sdev;
1306 }
1307
1308 if (starget->target_busy == 0 && starget->target_blocked) {
1309
1310
1311
1312 if (--starget->target_blocked == 0) {
1313 SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget,
1314 "unblocking target at zero depth\n"));
1315 } else
1316 return 0;
1317 }
1318
1319 if (scsi_target_is_busy(starget)) {
1320 list_move_tail(&sdev->starved_entry, &shost->starved_list);
1321 return 0;
1322 }
1323
1324 return 1;
1325}
1326
1327
1328
1329
1330
1331
1332
1333
1334static inline int scsi_host_queue_ready(struct request_queue *q,
1335 struct Scsi_Host *shost,
1336 struct scsi_device *sdev)
1337{
1338 if (scsi_host_in_recovery(shost))
1339 return 0;
1340 if (shost->host_busy == 0 && shost->host_blocked) {
1341
1342
1343
1344 if (--shost->host_blocked == 0) {
1345 SCSI_LOG_MLQUEUE(3,
1346 printk("scsi%d unblocking host at zero depth\n",
1347 shost->host_no));
1348 } else {
1349 return 0;
1350 }
1351 }
1352 if (scsi_host_is_busy(shost)) {
1353 if (list_empty(&sdev->starved_entry))
1354 list_add_tail(&sdev->starved_entry, &shost->starved_list);
1355 return 0;
1356 }
1357
1358
1359 if (!list_empty(&sdev->starved_entry))
1360 list_del_init(&sdev->starved_entry);
1361
1362 return 1;
1363}
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377static int scsi_lld_busy(struct request_queue *q)
1378{
1379 struct scsi_device *sdev = q->queuedata;
1380 struct Scsi_Host *shost;
1381 struct scsi_target *starget;
1382
1383 if (!sdev)
1384 return 0;
1385
1386 shost = sdev->host;
1387 starget = scsi_target(sdev);
1388
1389 if (scsi_host_in_recovery(shost) || scsi_host_is_busy(shost) ||
1390 scsi_target_is_busy(starget) || scsi_device_is_busy(sdev))
1391 return 1;
1392
1393 return 0;
1394}
1395
1396
1397
1398
1399static void scsi_kill_request(struct request *req, struct request_queue *q)
1400{
1401 struct scsi_cmnd *cmd = req->special;
1402 struct scsi_device *sdev;
1403 struct scsi_target *starget;
1404 struct Scsi_Host *shost;
1405
1406 blk_start_request(req);
1407
1408 scmd_printk(KERN_INFO, cmd, "killing request\n");
1409
1410 sdev = cmd->device;
1411 starget = scsi_target(sdev);
1412 shost = sdev->host;
1413 scsi_init_cmd_errh(cmd);
1414 cmd->result = DID_NO_CONNECT << 16;
1415 atomic_inc(&cmd->device->iorequest_cnt);
1416
1417
1418
1419
1420
1421
1422 sdev->device_busy++;
1423 spin_unlock(sdev->request_queue->queue_lock);
1424 spin_lock(shost->host_lock);
1425 shost->host_busy++;
1426 starget->target_busy++;
1427 spin_unlock(shost->host_lock);
1428 spin_lock(sdev->request_queue->queue_lock);
1429
1430 blk_complete_request(req);
1431}
1432
1433static void scsi_softirq_done(struct request *rq)
1434{
1435 struct scsi_cmnd *cmd = rq->special;
1436 unsigned long wait_for = (cmd->allowed + 1) * rq->timeout;
1437 int disposition;
1438
1439 INIT_LIST_HEAD(&cmd->eh_entry);
1440
1441 atomic_inc(&cmd->device->iodone_cnt);
1442 if (cmd->result)
1443 atomic_inc(&cmd->device->ioerr_cnt);
1444
1445 disposition = scsi_decide_disposition(cmd);
1446 if (disposition != SUCCESS &&
1447 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
1448 sdev_printk(KERN_ERR, cmd->device,
1449 "timing out command, waited %lus\n",
1450 wait_for/HZ);
1451 disposition = SUCCESS;
1452 }
1453
1454 scsi_log_completion(cmd, disposition);
1455
1456 switch (disposition) {
1457 case SUCCESS:
1458 scsi_finish_command(cmd);
1459 break;
1460 case NEEDS_RETRY:
1461 scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
1462 break;
1463 case ADD_TO_MLQUEUE:
1464 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
1465 break;
1466 default:
1467 if (!scsi_eh_scmd_add(cmd, 0))
1468 scsi_finish_command(cmd);
1469 }
1470}
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483static void scsi_request_fn(struct request_queue *q)
1484{
1485 struct scsi_device *sdev = q->queuedata;
1486 struct Scsi_Host *shost;
1487 struct scsi_cmnd *cmd;
1488 struct request *req;
1489
1490 if (!sdev) {
1491 while ((req = blk_peek_request(q)) != NULL)
1492 scsi_kill_request(req, q);
1493 return;
1494 }
1495
1496 if(!get_device(&sdev->sdev_gendev))
1497
1498 return;
1499
1500
1501
1502
1503
1504 shost = sdev->host;
1505 for (;;) {
1506 int rtn;
1507
1508
1509
1510
1511
1512 req = blk_peek_request(q);
1513 if (!req || !scsi_dev_queue_ready(q, sdev))
1514 break;
1515
1516 if (unlikely(!scsi_device_online(sdev))) {
1517 sdev_printk(KERN_ERR, sdev,
1518 "rejecting I/O to offline device\n");
1519 scsi_kill_request(req, q);
1520 continue;
1521 }
1522
1523
1524
1525
1526
1527 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1528 blk_start_request(req);
1529 sdev->device_busy++;
1530
1531 spin_unlock(q->queue_lock);
1532 cmd = req->special;
1533 if (unlikely(cmd == NULL)) {
1534 printk(KERN_CRIT "impossible request in %s.\n"
1535 "please mail a stack trace to "
1536 "linux-scsi@vger.kernel.org\n",
1537 __func__);
1538 blk_dump_rq_flags(req, "foo");
1539 BUG();
1540 }
1541 spin_lock(shost->host_lock);
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551 if (blk_queue_tagged(q) && !blk_rq_tagged(req)) {
1552 if (list_empty(&sdev->starved_entry))
1553 list_add_tail(&sdev->starved_entry,
1554 &shost->starved_list);
1555 goto not_ready;
1556 }
1557
1558 if (!scsi_target_queue_ready(shost, sdev))
1559 goto not_ready;
1560
1561 if (!scsi_host_queue_ready(q, shost, sdev))
1562 goto not_ready;
1563
1564 scsi_target(sdev)->target_busy++;
1565 shost->host_busy++;
1566
1567
1568
1569
1570
1571 spin_unlock_irq(shost->host_lock);
1572
1573
1574
1575
1576
1577 scsi_init_cmd_errh(cmd);
1578
1579
1580
1581
1582 rtn = scsi_dispatch_cmd(cmd);
1583 spin_lock_irq(q->queue_lock);
1584 if (rtn)
1585 goto out_delay;
1586 }
1587
1588 goto out;
1589
1590 not_ready:
1591 spin_unlock_irq(shost->host_lock);
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601 spin_lock_irq(q->queue_lock);
1602 blk_requeue_request(q, req);
1603 sdev->device_busy--;
1604out_delay:
1605 if (sdev->device_busy == 0)
1606 blk_delay_queue(q, SCSI_QUEUE_DELAY);
1607out:
1608
1609
1610 spin_unlock_irq(q->queue_lock);
1611 put_device(&sdev->sdev_gendev);
1612 spin_lock_irq(q->queue_lock);
1613}
1614
1615u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
1616{
1617 struct device *host_dev;
1618 u64 bounce_limit = 0xffffffff;
1619
1620 if (shost->unchecked_isa_dma)
1621 return BLK_BOUNCE_ISA;
1622
1623
1624
1625
1626 if (!PCI_DMA_BUS_IS_PHYS)
1627 return BLK_BOUNCE_ANY;
1628
1629 host_dev = scsi_get_device(shost);
1630 if (host_dev && host_dev->dma_mask)
1631 bounce_limit = *host_dev->dma_mask;
1632
1633 return bounce_limit;
1634}
1635EXPORT_SYMBOL(scsi_calculate_bounce_limit);
1636
1637struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
1638 request_fn_proc *request_fn)
1639{
1640 struct request_queue *q;
1641 struct device *dev = shost->dma_dev;
1642
1643 q = blk_init_queue(request_fn, NULL);
1644 if (!q)
1645 return NULL;
1646
1647
1648
1649
1650 blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize,
1651 SCSI_MAX_SG_CHAIN_SEGMENTS));
1652
1653 if (scsi_host_prot_dma(shost)) {
1654 shost->sg_prot_tablesize =
1655 min_not_zero(shost->sg_prot_tablesize,
1656 (unsigned short)SCSI_MAX_PROT_SG_SEGMENTS);
1657 BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize);
1658 blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize);
1659 }
1660
1661 blk_queue_max_hw_sectors(q, shost->max_sectors);
1662 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1663 blk_queue_segment_boundary(q, shost->dma_boundary);
1664 dma_set_seg_boundary(dev, shost->dma_boundary);
1665
1666 blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));
1667
1668 if (!shost->use_clustering)
1669 q->limits.cluster = 0;
1670
1671
1672
1673
1674
1675
1676 blk_queue_dma_alignment(q, 0x03);
1677
1678 return q;
1679}
1680EXPORT_SYMBOL(__scsi_alloc_queue);
1681
1682struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1683{
1684 struct request_queue *q;
1685
1686 q = __scsi_alloc_queue(sdev->host, scsi_request_fn);
1687 if (!q)
1688 return NULL;
1689
1690 blk_queue_prep_rq(q, scsi_prep_fn);
1691 blk_queue_softirq_done(q, scsi_softirq_done);
1692 blk_queue_rq_timed_out(q, scsi_times_out);
1693 blk_queue_lld_busy(q, scsi_lld_busy);
1694 return q;
1695}
1696
1697void scsi_free_queue(struct request_queue *q)
1698{
1699 unsigned long flags;
1700
1701 WARN_ON(q->queuedata);
1702
1703
1704 spin_lock_irqsave(q->queue_lock, flags);
1705 q->request_fn(q);
1706 spin_unlock_irqrestore(q->queue_lock, flags);
1707
1708 blk_cleanup_queue(q);
1709}
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727void scsi_block_requests(struct Scsi_Host *shost)
1728{
1729 shost->host_self_blocked = 1;
1730}
1731EXPORT_SYMBOL(scsi_block_requests);
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753void scsi_unblock_requests(struct Scsi_Host *shost)
1754{
1755 shost->host_self_blocked = 0;
1756 scsi_run_host_queues(shost);
1757}
1758EXPORT_SYMBOL(scsi_unblock_requests);
1759
1760int __init scsi_init_queue(void)
1761{
1762 int i;
1763
1764 scsi_sdb_cache = kmem_cache_create("scsi_data_buffer",
1765 sizeof(struct scsi_data_buffer),
1766 0, 0, NULL);
1767 if (!scsi_sdb_cache) {
1768 printk(KERN_ERR "SCSI: can't init scsi sdb cache\n");
1769 return -ENOMEM;
1770 }
1771
1772 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1773 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1774 int size = sgp->size * sizeof(struct scatterlist);
1775
1776 sgp->slab = kmem_cache_create(sgp->name, size, 0,
1777 SLAB_HWCACHE_ALIGN, NULL);
1778 if (!sgp->slab) {
1779 printk(KERN_ERR "SCSI: can't init sg slab %s\n",
1780 sgp->name);
1781 goto cleanup_sdb;
1782 }
1783
1784 sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE,
1785 sgp->slab);
1786 if (!sgp->pool) {
1787 printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
1788 sgp->name);
1789 goto cleanup_sdb;
1790 }
1791 }
1792
1793 return 0;
1794
1795cleanup_sdb:
1796 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1797 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1798 if (sgp->pool)
1799 mempool_destroy(sgp->pool);
1800 if (sgp->slab)
1801 kmem_cache_destroy(sgp->slab);
1802 }
1803 kmem_cache_destroy(scsi_sdb_cache);
1804
1805 return -ENOMEM;
1806}
1807
1808void scsi_exit_queue(void)
1809{
1810 int i;
1811
1812 kmem_cache_destroy(scsi_sdb_cache);
1813
1814 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1815 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1816 mempool_destroy(sgp->pool);
1817 kmem_cache_destroy(sgp->slab);
1818 }
1819}
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839int
1840scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
1841 unsigned char *buffer, int len, int timeout, int retries,
1842 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
1843{
1844 unsigned char cmd[10];
1845 unsigned char *real_buffer;
1846 int ret;
1847
1848 memset(cmd, 0, sizeof(cmd));
1849 cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0);
1850
1851 if (sdev->use_10_for_ms) {
1852 if (len > 65535)
1853 return -EINVAL;
1854 real_buffer = kmalloc(8 + len, GFP_KERNEL);
1855 if (!real_buffer)
1856 return -ENOMEM;
1857 memcpy(real_buffer + 8, buffer, len);
1858 len += 8;
1859 real_buffer[0] = 0;
1860 real_buffer[1] = 0;
1861 real_buffer[2] = data->medium_type;
1862 real_buffer[3] = data->device_specific;
1863 real_buffer[4] = data->longlba ? 0x01 : 0;
1864 real_buffer[5] = 0;
1865 real_buffer[6] = data->block_descriptor_length >> 8;
1866 real_buffer[7] = data->block_descriptor_length;
1867
1868 cmd[0] = MODE_SELECT_10;
1869 cmd[7] = len >> 8;
1870 cmd[8] = len;
1871 } else {
1872 if (len > 255 || data->block_descriptor_length > 255 ||
1873 data->longlba)
1874 return -EINVAL;
1875
1876 real_buffer = kmalloc(4 + len, GFP_KERNEL);
1877 if (!real_buffer)
1878 return -ENOMEM;
1879 memcpy(real_buffer + 4, buffer, len);
1880 len += 4;
1881 real_buffer[0] = 0;
1882 real_buffer[1] = data->medium_type;
1883 real_buffer[2] = data->device_specific;
1884 real_buffer[3] = data->block_descriptor_length;
1885
1886
1887 cmd[0] = MODE_SELECT;
1888 cmd[4] = len;
1889 }
1890
1891 ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len,
1892 sshdr, timeout, retries, NULL);
1893 kfree(real_buffer);
1894 return ret;
1895}
1896EXPORT_SYMBOL_GPL(scsi_mode_select);
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915int
1916scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
1917 unsigned char *buffer, int len, int timeout, int retries,
1918 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
1919{
1920 unsigned char cmd[12];
1921 int use_10_for_ms;
1922 int header_length;
1923 int result;
1924 struct scsi_sense_hdr my_sshdr;
1925
1926 memset(data, 0, sizeof(*data));
1927 memset(&cmd[0], 0, 12);
1928 cmd[1] = dbd & 0x18;
1929 cmd[2] = modepage;
1930
1931
1932 if (!sshdr)
1933 sshdr = &my_sshdr;
1934
1935 retry:
1936 use_10_for_ms = sdev->use_10_for_ms;
1937
1938 if (use_10_for_ms) {
1939 if (len < 8)
1940 len = 8;
1941
1942 cmd[0] = MODE_SENSE_10;
1943 cmd[8] = len;
1944 header_length = 8;
1945 } else {
1946 if (len < 4)
1947 len = 4;
1948
1949 cmd[0] = MODE_SENSE;
1950 cmd[4] = len;
1951 header_length = 4;
1952 }
1953
1954 memset(buffer, 0, len);
1955
1956 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
1957 sshdr, timeout, retries, NULL);
1958
1959
1960
1961
1962
1963
1964 if (use_10_for_ms && !scsi_status_is_good(result) &&
1965 (driver_byte(result) & DRIVER_SENSE)) {
1966 if (scsi_sense_valid(sshdr)) {
1967 if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
1968 (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
1969
1970
1971
1972 sdev->use_10_for_ms = 0;
1973 goto retry;
1974 }
1975 }
1976 }
1977
1978 if(scsi_status_is_good(result)) {
1979 if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b &&
1980 (modepage == 6 || modepage == 8))) {
1981
1982 header_length = 0;
1983 data->length = 13;
1984 data->medium_type = 0;
1985 data->device_specific = 0;
1986 data->longlba = 0;
1987 data->block_descriptor_length = 0;
1988 } else if(use_10_for_ms) {
1989 data->length = buffer[0]*256 + buffer[1] + 2;
1990 data->medium_type = buffer[2];
1991 data->device_specific = buffer[3];
1992 data->longlba = buffer[4] & 0x01;
1993 data->block_descriptor_length = buffer[6]*256
1994 + buffer[7];
1995 } else {
1996 data->length = buffer[0] + 1;
1997 data->medium_type = buffer[1];
1998 data->device_specific = buffer[2];
1999 data->block_descriptor_length = buffer[3];
2000 }
2001 data->header_length = header_length;
2002 }
2003
2004 return result;
2005}
2006EXPORT_SYMBOL(scsi_mode_sense);
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020int
2021scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
2022 struct scsi_sense_hdr *sshdr_external)
2023{
2024 char cmd[] = {
2025 TEST_UNIT_READY, 0, 0, 0, 0, 0,
2026 };
2027 struct scsi_sense_hdr *sshdr;
2028 int result;
2029
2030 if (!sshdr_external)
2031 sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL);
2032 else
2033 sshdr = sshdr_external;
2034
2035
2036 do {
2037 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr,
2038 timeout, retries, NULL);
2039 if (sdev->removable && scsi_sense_valid(sshdr) &&
2040 sshdr->sense_key == UNIT_ATTENTION)
2041 sdev->changed = 1;
2042 } while (scsi_sense_valid(sshdr) &&
2043 sshdr->sense_key == UNIT_ATTENTION && --retries);
2044
2045 if (!sshdr_external)
2046 kfree(sshdr);
2047 return result;
2048}
2049EXPORT_SYMBOL(scsi_test_unit_ready);
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059int
2060scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
2061{
2062 enum scsi_device_state oldstate = sdev->sdev_state;
2063
2064 if (state == oldstate)
2065 return 0;
2066
2067 switch (state) {
2068 case SDEV_CREATED:
2069 switch (oldstate) {
2070 case SDEV_CREATED_BLOCK:
2071 break;
2072 default:
2073 goto illegal;
2074 }
2075 break;
2076
2077 case SDEV_RUNNING:
2078 switch (oldstate) {
2079 case SDEV_CREATED:
2080 case SDEV_OFFLINE:
2081 case SDEV_QUIESCE:
2082 case SDEV_BLOCK:
2083 break;
2084 default:
2085 goto illegal;
2086 }
2087 break;
2088
2089 case SDEV_QUIESCE:
2090 switch (oldstate) {
2091 case SDEV_RUNNING:
2092 case SDEV_OFFLINE:
2093 break;
2094 default:
2095 goto illegal;
2096 }
2097 break;
2098
2099 case SDEV_OFFLINE:
2100 switch (oldstate) {
2101 case SDEV_CREATED:
2102 case SDEV_RUNNING:
2103 case SDEV_QUIESCE:
2104 case SDEV_BLOCK:
2105 break;
2106 default:
2107 goto illegal;
2108 }
2109 break;
2110
2111 case SDEV_BLOCK:
2112 switch (oldstate) {
2113 case SDEV_RUNNING:
2114 case SDEV_CREATED_BLOCK:
2115 break;
2116 default:
2117 goto illegal;
2118 }
2119 break;
2120
2121 case SDEV_CREATED_BLOCK:
2122 switch (oldstate) {
2123 case SDEV_CREATED:
2124 break;
2125 default:
2126 goto illegal;
2127 }
2128 break;
2129
2130 case SDEV_CANCEL:
2131 switch (oldstate) {
2132 case SDEV_CREATED:
2133 case SDEV_RUNNING:
2134 case SDEV_QUIESCE:
2135 case SDEV_OFFLINE:
2136 case SDEV_BLOCK:
2137 break;
2138 default:
2139 goto illegal;
2140 }
2141 break;
2142
2143 case SDEV_DEL:
2144 switch (oldstate) {
2145 case SDEV_CREATED:
2146 case SDEV_RUNNING:
2147 case SDEV_OFFLINE:
2148 case SDEV_CANCEL:
2149 break;
2150 default:
2151 goto illegal;
2152 }
2153 break;
2154
2155 }
2156 sdev->sdev_state = state;
2157 return 0;
2158
2159 illegal:
2160 SCSI_LOG_ERROR_RECOVERY(1,
2161 sdev_printk(KERN_ERR, sdev,
2162 "Illegal state transition %s->%s\n",
2163 scsi_device_state_name(oldstate),
2164 scsi_device_state_name(state))
2165 );
2166 return -EINVAL;
2167}
2168EXPORT_SYMBOL(scsi_device_set_state);
2169
2170
2171
2172
2173
2174
2175
2176
2177static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
2178{
2179 int idx = 0;
2180 char *envp[3];
2181
2182 switch (evt->evt_type) {
2183 case SDEV_EVT_MEDIA_CHANGE:
2184 envp[idx++] = "SDEV_MEDIA_CHANGE=1";
2185 break;
2186
2187 default:
2188
2189 break;
2190 }
2191
2192 envp[idx++] = NULL;
2193
2194 kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp);
2195}
2196
2197
2198
2199
2200
2201
2202
2203
2204void scsi_evt_thread(struct work_struct *work)
2205{
2206 struct scsi_device *sdev;
2207 LIST_HEAD(event_list);
2208
2209 sdev = container_of(work, struct scsi_device, event_work);
2210
2211 while (1) {
2212 struct scsi_event *evt;
2213 struct list_head *this, *tmp;
2214 unsigned long flags;
2215
2216 spin_lock_irqsave(&sdev->list_lock, flags);
2217 list_splice_init(&sdev->event_list, &event_list);
2218 spin_unlock_irqrestore(&sdev->list_lock, flags);
2219
2220 if (list_empty(&event_list))
2221 break;
2222
2223 list_for_each_safe(this, tmp, &event_list) {
2224 evt = list_entry(this, struct scsi_event, node);
2225 list_del(&evt->node);
2226 scsi_evt_emit(sdev, evt);
2227 kfree(evt);
2228 }
2229 }
2230}
2231
2232
2233
2234
2235
2236
2237
2238
2239void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt)
2240{
2241 unsigned long flags;
2242
2243#if 0
2244
2245
2246
2247 if (!test_bit(evt->evt_type, sdev->supported_events)) {
2248 kfree(evt);
2249 return;
2250 }
2251#endif
2252
2253 spin_lock_irqsave(&sdev->list_lock, flags);
2254 list_add_tail(&evt->node, &sdev->event_list);
2255 schedule_work(&sdev->event_work);
2256 spin_unlock_irqrestore(&sdev->list_lock, flags);
2257}
2258EXPORT_SYMBOL_GPL(sdev_evt_send);
2259
2260
2261
2262
2263
2264
2265
2266
2267struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
2268 gfp_t gfpflags)
2269{
2270 struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags);
2271 if (!evt)
2272 return NULL;
2273
2274 evt->evt_type = evt_type;
2275 INIT_LIST_HEAD(&evt->node);
2276
2277
2278 switch (evt_type) {
2279 case SDEV_EVT_MEDIA_CHANGE:
2280 default:
2281
2282 break;
2283 }
2284
2285 return evt;
2286}
2287EXPORT_SYMBOL_GPL(sdev_evt_alloc);
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297void sdev_evt_send_simple(struct scsi_device *sdev,
2298 enum scsi_device_event evt_type, gfp_t gfpflags)
2299{
2300 struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags);
2301 if (!evt) {
2302 sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n",
2303 evt_type);
2304 return;
2305 }
2306
2307 sdev_evt_send(sdev, evt);
2308}
2309EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326int
2327scsi_device_quiesce(struct scsi_device *sdev)
2328{
2329 int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
2330 if (err)
2331 return err;
2332
2333 scsi_run_queue(sdev->request_queue);
2334 while (sdev->device_busy) {
2335 msleep_interruptible(200);
2336 scsi_run_queue(sdev->request_queue);
2337 }
2338 return 0;
2339}
2340EXPORT_SYMBOL(scsi_device_quiesce);
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351void
2352scsi_device_resume(struct scsi_device *sdev)
2353{
2354 if(scsi_device_set_state(sdev, SDEV_RUNNING))
2355 return;
2356 scsi_run_queue(sdev->request_queue);
2357}
2358EXPORT_SYMBOL(scsi_device_resume);
2359
2360static void
2361device_quiesce_fn(struct scsi_device *sdev, void *data)
2362{
2363 scsi_device_quiesce(sdev);
2364}
2365
2366void
2367scsi_target_quiesce(struct scsi_target *starget)
2368{
2369 starget_for_each_device(starget, NULL, device_quiesce_fn);
2370}
2371EXPORT_SYMBOL(scsi_target_quiesce);
2372
2373static void
2374device_resume_fn(struct scsi_device *sdev, void *data)
2375{
2376 scsi_device_resume(sdev);
2377}
2378
2379void
2380scsi_target_resume(struct scsi_target *starget)
2381{
2382 starget_for_each_device(starget, NULL, device_resume_fn);
2383}
2384EXPORT_SYMBOL(scsi_target_resume);
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403int
2404scsi_internal_device_block(struct scsi_device *sdev)
2405{
2406 struct request_queue *q = sdev->request_queue;
2407 unsigned long flags;
2408 int err = 0;
2409
2410 err = scsi_device_set_state(sdev, SDEV_BLOCK);
2411 if (err) {
2412 err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK);
2413
2414 if (err)
2415 return err;
2416 }
2417
2418
2419
2420
2421
2422
2423 spin_lock_irqsave(q->queue_lock, flags);
2424 blk_stop_queue(q);
2425 spin_unlock_irqrestore(q->queue_lock, flags);
2426
2427 return 0;
2428}
2429EXPORT_SYMBOL_GPL(scsi_internal_device_block);
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447int
2448scsi_internal_device_unblock(struct scsi_device *sdev)
2449{
2450 struct request_queue *q = sdev->request_queue;
2451 unsigned long flags;
2452
2453
2454
2455
2456
2457 if (sdev->sdev_state == SDEV_BLOCK)
2458 sdev->sdev_state = SDEV_RUNNING;
2459 else if (sdev->sdev_state == SDEV_CREATED_BLOCK)
2460 sdev->sdev_state = SDEV_CREATED;
2461 else if (sdev->sdev_state != SDEV_CANCEL &&
2462 sdev->sdev_state != SDEV_OFFLINE)
2463 return -EINVAL;
2464
2465 spin_lock_irqsave(q->queue_lock, flags);
2466 blk_start_queue(q);
2467 spin_unlock_irqrestore(q->queue_lock, flags);
2468
2469 return 0;
2470}
2471EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
2472
2473static void
2474device_block(struct scsi_device *sdev, void *data)
2475{
2476 scsi_internal_device_block(sdev);
2477}
2478
2479static int
2480target_block(struct device *dev, void *data)
2481{
2482 if (scsi_is_target_device(dev))
2483 starget_for_each_device(to_scsi_target(dev), NULL,
2484 device_block);
2485 return 0;
2486}
2487
2488void
2489scsi_target_block(struct device *dev)
2490{
2491 if (scsi_is_target_device(dev))
2492 starget_for_each_device(to_scsi_target(dev), NULL,
2493 device_block);
2494 else
2495 device_for_each_child(dev, NULL, target_block);
2496}
2497EXPORT_SYMBOL_GPL(scsi_target_block);
2498
2499static void
2500device_unblock(struct scsi_device *sdev, void *data)
2501{
2502 scsi_internal_device_unblock(sdev);
2503}
2504
2505static int
2506target_unblock(struct device *dev, void *data)
2507{
2508 if (scsi_is_target_device(dev))
2509 starget_for_each_device(to_scsi_target(dev), NULL,
2510 device_unblock);
2511 return 0;
2512}
2513
2514void
2515scsi_target_unblock(struct device *dev)
2516{
2517 if (scsi_is_target_device(dev))
2518 starget_for_each_device(to_scsi_target(dev), NULL,
2519 device_unblock);
2520 else
2521 device_for_each_child(dev, NULL, target_unblock);
2522}
2523EXPORT_SYMBOL_GPL(scsi_target_unblock);
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
2535 size_t *offset, size_t *len)
2536{
2537 int i;
2538 size_t sg_len = 0, len_complete = 0;
2539 struct scatterlist *sg;
2540 struct page *page;
2541
2542 WARN_ON(!irqs_disabled());
2543
2544 for_each_sg(sgl, sg, sg_count, i) {
2545 len_complete = sg_len;
2546 sg_len += sg->length;
2547 if (sg_len > *offset)
2548 break;
2549 }
2550
2551 if (unlikely(i == sg_count)) {
2552 printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, "
2553 "elements %d\n",
2554 __func__, sg_len, *offset, sg_count);
2555 WARN_ON(1);
2556 return NULL;
2557 }
2558
2559
2560 *offset = *offset - len_complete + sg->offset;
2561
2562
2563 page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT));
2564 *offset &= ~PAGE_MASK;
2565
2566
2567 sg_len = PAGE_SIZE - *offset;
2568 if (*len > sg_len)
2569 *len = sg_len;
2570
2571 return kmap_atomic(page);
2572}
2573EXPORT_SYMBOL(scsi_kmap_atomic_sg);
2574
2575
2576
2577
2578
2579void scsi_kunmap_atomic_sg(void *virt)
2580{
2581 kunmap_atomic(virt);
2582}
2583EXPORT_SYMBOL(scsi_kunmap_atomic_sg);
2584