1
2
3
4
5
6
7
8
9
10#include <linux/bio.h>
11#include <linux/blkdev.h>
12#include <linux/completion.h>
13#include <linux/kernel.h>
14#include <linux/mempool.h>
15#include <linux/slab.h>
16#include <linux/init.h>
17#include <linux/pci.h>
18#include <linux/delay.h>
19#include <linux/hardirq.h>
20#include <linux/scatterlist.h>
21
22#include <scsi/scsi.h>
23#include <scsi/scsi_cmnd.h>
24#include <scsi/scsi_dbg.h>
25#include <scsi/scsi_device.h>
26#include <scsi/scsi_driver.h>
27#include <scsi/scsi_eh.h>
28#include <scsi/scsi_host.h>
29
30#include "scsi_priv.h"
31#include "scsi_logging.h"
32
33
34#define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools)
35#define SG_MEMPOOL_SIZE 2
36
37
38
39
40
41
42#define SCSI_MAX_SG_SEGMENTS 128
43
44struct scsi_host_sg_pool {
45 size_t size;
46 char *name;
47 struct kmem_cache *slab;
48 mempool_t *pool;
49};
50
51#define SP(x) { x, "sgpool-" #x }
52static struct scsi_host_sg_pool scsi_sg_pools[] = {
53 SP(8),
54 SP(16),
55#if (SCSI_MAX_SG_SEGMENTS > 16)
56 SP(32),
57#if (SCSI_MAX_SG_SEGMENTS > 32)
58 SP(64),
59#if (SCSI_MAX_SG_SEGMENTS > 64)
60 SP(128),
61#endif
62#endif
63#endif
64};
65#undef SP
66
67static void scsi_run_queue(struct request_queue *q);
68
69
70
71
72
73
74
75
76
77
78
79
80
81static void scsi_unprep_request(struct request *req)
82{
83 struct scsi_cmnd *cmd = req->special;
84
85 req->cmd_flags &= ~REQ_DONTPREP;
86 req->special = NULL;
87
88 scsi_put_command(cmd);
89}
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
111{
112 struct Scsi_Host *host = cmd->device->host;
113 struct scsi_device *device = cmd->device;
114 struct request_queue *q = device->request_queue;
115 unsigned long flags;
116
117 SCSI_LOG_MLQUEUE(1,
118 printk("Inserting command %p into mlqueue\n", cmd));
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133 if (reason == SCSI_MLQUEUE_HOST_BUSY)
134 host->host_blocked = host->max_host_blocked;
135 else if (reason == SCSI_MLQUEUE_DEVICE_BUSY)
136 device->device_blocked = device->max_device_blocked;
137
138
139
140
141
142 scsi_device_unbusy(device);
143
144
145
146
147
148
149
150
151
152
153
154
155 spin_lock_irqsave(q->queue_lock, flags);
156 blk_requeue_request(q, cmd->request);
157 spin_unlock_irqrestore(q->queue_lock, flags);
158
159 scsi_run_queue(q);
160
161 return 0;
162}
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
180 int data_direction, void *buffer, unsigned bufflen,
181 unsigned char *sense, int timeout, int retries, int flags)
182{
183 struct request *req;
184 int write = (data_direction == DMA_TO_DEVICE);
185 int ret = DRIVER_ERROR << 24;
186
187 req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
188
189 if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
190 buffer, bufflen, __GFP_WAIT))
191 goto out;
192
193 req->cmd_len = COMMAND_SIZE(cmd[0]);
194 memcpy(req->cmd, cmd, req->cmd_len);
195 req->sense = sense;
196 req->sense_len = 0;
197 req->retries = retries;
198 req->timeout = timeout;
199 req->cmd_type = REQ_TYPE_BLOCK_PC;
200 req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT;
201
202
203
204
205 blk_execute_rq(req->q, NULL, req, 1);
206
207 ret = req->errors;
208 out:
209 blk_put_request(req);
210
211 return ret;
212}
213EXPORT_SYMBOL(scsi_execute);
214
215
216int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
217 int data_direction, void *buffer, unsigned bufflen,
218 struct scsi_sense_hdr *sshdr, int timeout, int retries)
219{
220 char *sense = NULL;
221 int result;
222
223 if (sshdr) {
224 sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
225 if (!sense)
226 return DRIVER_ERROR << 24;
227 }
228 result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
229 sense, timeout, retries, 0);
230 if (sshdr)
231 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
232
233 kfree(sense);
234 return result;
235}
236EXPORT_SYMBOL(scsi_execute_req);
237
238struct scsi_io_context {
239 void *data;
240 void (*done)(void *data, char *sense, int result, int resid);
241 char sense[SCSI_SENSE_BUFFERSIZE];
242};
243
244static struct kmem_cache *scsi_io_context_cache;
245
246static void scsi_end_async(struct request *req, int uptodate)
247{
248 struct scsi_io_context *sioc = req->end_io_data;
249
250 if (sioc->done)
251 sioc->done(sioc->data, sioc->sense, req->errors, req->data_len);
252
253 kmem_cache_free(scsi_io_context_cache, sioc);
254 __blk_put_request(req->q, req);
255}
256
257static int scsi_merge_bio(struct request *rq, struct bio *bio)
258{
259 struct request_queue *q = rq->q;
260
261 bio->bi_flags &= ~(1 << BIO_SEG_VALID);
262 if (rq_data_dir(rq) == WRITE)
263 bio->bi_rw |= (1 << BIO_RW);
264 blk_queue_bounce(q, &bio);
265
266 return blk_rq_append_bio(q, rq, bio);
267}
268
269static void scsi_bi_endio(struct bio *bio, int error)
270{
271 bio_put(bio);
272}
273
274
275
276
277
278
279
280
281
282
283
284
285
286static int scsi_req_map_sg(struct request *rq, struct scatterlist *sgl,
287 int nsegs, unsigned bufflen, gfp_t gfp)
288{
289 struct request_queue *q = rq->q;
290 int nr_pages = (bufflen + sgl[0].offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
291 unsigned int data_len = bufflen, len, bytes, off;
292 struct scatterlist *sg;
293 struct page *page;
294 struct bio *bio = NULL;
295 int i, err, nr_vecs = 0;
296
297 for_each_sg(sgl, sg, nsegs, i) {
298 page = sg_page(sg);
299 off = sg->offset;
300 len = sg->length;
301 data_len += len;
302
303 while (len > 0 && data_len > 0) {
304
305
306
307
308
309 bytes = min_t(unsigned int, len, PAGE_SIZE - off);
310 bytes = min(bytes, data_len);
311
312 if (!bio) {
313 nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
314 nr_pages -= nr_vecs;
315
316 bio = bio_alloc(gfp, nr_vecs);
317 if (!bio) {
318 err = -ENOMEM;
319 goto free_bios;
320 }
321 bio->bi_end_io = scsi_bi_endio;
322 }
323
324 if (bio_add_pc_page(q, bio, page, bytes, off) !=
325 bytes) {
326 bio_put(bio);
327 err = -EINVAL;
328 goto free_bios;
329 }
330
331 if (bio->bi_vcnt >= nr_vecs) {
332 err = scsi_merge_bio(rq, bio);
333 if (err) {
334 bio_endio(bio, 0);
335 goto free_bios;
336 }
337 bio = NULL;
338 }
339
340 page++;
341 len -= bytes;
342 data_len -=bytes;
343 off = 0;
344 }
345 }
346
347 rq->buffer = rq->data = NULL;
348 rq->data_len = bufflen;
349 return 0;
350
351free_bios:
352 while ((bio = rq->bio) != NULL) {
353 rq->bio = bio->bi_next;
354
355
356
357 bio_endio(bio, 0);
358 }
359
360 return err;
361}
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd,
377 int cmd_len, int data_direction, void *buffer, unsigned bufflen,
378 int use_sg, int timeout, int retries, void *privdata,
379 void (*done)(void *, char *, int, int), gfp_t gfp)
380{
381 struct request *req;
382 struct scsi_io_context *sioc;
383 int err = 0;
384 int write = (data_direction == DMA_TO_DEVICE);
385
386 sioc = kmem_cache_zalloc(scsi_io_context_cache, gfp);
387 if (!sioc)
388 return DRIVER_ERROR << 24;
389
390 req = blk_get_request(sdev->request_queue, write, gfp);
391 if (!req)
392 goto free_sense;
393 req->cmd_type = REQ_TYPE_BLOCK_PC;
394 req->cmd_flags |= REQ_QUIET;
395
396 if (use_sg)
397 err = scsi_req_map_sg(req, buffer, use_sg, bufflen, gfp);
398 else if (bufflen)
399 err = blk_rq_map_kern(req->q, req, buffer, bufflen, gfp);
400
401 if (err)
402 goto free_req;
403
404 req->cmd_len = cmd_len;
405 memset(req->cmd, 0, BLK_MAX_CDB);
406 memcpy(req->cmd, cmd, req->cmd_len);
407 req->sense = sioc->sense;
408 req->sense_len = 0;
409 req->timeout = timeout;
410 req->retries = retries;
411 req->end_io_data = sioc;
412
413 sioc->data = privdata;
414 sioc->done = done;
415
416 blk_execute_rq_nowait(req->q, NULL, req, 1, scsi_end_async);
417 return 0;
418
419free_req:
420 blk_put_request(req);
421free_sense:
422 kmem_cache_free(scsi_io_context_cache, sioc);
423 return DRIVER_ERROR << 24;
424}
425EXPORT_SYMBOL_GPL(scsi_execute_async);
426
427
428
429
430
431
432
433
434
435
436
437
438static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
439{
440 cmd->serial_number = 0;
441 cmd->resid = 0;
442 memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer);
443 if (cmd->cmd_len == 0)
444 cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]);
445}
446
447void scsi_device_unbusy(struct scsi_device *sdev)
448{
449 struct Scsi_Host *shost = sdev->host;
450 unsigned long flags;
451
452 spin_lock_irqsave(shost->host_lock, flags);
453 shost->host_busy--;
454 if (unlikely(scsi_host_in_recovery(shost) &&
455 (shost->host_failed || shost->host_eh_scheduled)))
456 scsi_eh_wakeup(shost);
457 spin_unlock(shost->host_lock);
458 spin_lock(sdev->request_queue->queue_lock);
459 sdev->device_busy--;
460 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
461}
462
463
464
465
466
467
468
469
470static void scsi_single_lun_run(struct scsi_device *current_sdev)
471{
472 struct Scsi_Host *shost = current_sdev->host;
473 struct scsi_device *sdev, *tmp;
474 struct scsi_target *starget = scsi_target(current_sdev);
475 unsigned long flags;
476
477 spin_lock_irqsave(shost->host_lock, flags);
478 starget->starget_sdev_user = NULL;
479 spin_unlock_irqrestore(shost->host_lock, flags);
480
481
482
483
484
485
486
487 blk_run_queue(current_sdev->request_queue);
488
489 spin_lock_irqsave(shost->host_lock, flags);
490 if (starget->starget_sdev_user)
491 goto out;
492 list_for_each_entry_safe(sdev, tmp, &starget->devices,
493 same_target_siblings) {
494 if (sdev == current_sdev)
495 continue;
496 if (scsi_device_get(sdev))
497 continue;
498
499 spin_unlock_irqrestore(shost->host_lock, flags);
500 blk_run_queue(sdev->request_queue);
501 spin_lock_irqsave(shost->host_lock, flags);
502
503 scsi_device_put(sdev);
504 }
505 out:
506 spin_unlock_irqrestore(shost->host_lock, flags);
507}
508
509
510
511
512
513
514
515
516
517
518
519
520
521static void scsi_run_queue(struct request_queue *q)
522{
523 struct scsi_device *sdev = q->queuedata;
524 struct Scsi_Host *shost = sdev->host;
525 unsigned long flags;
526
527 if (sdev->single_lun)
528 scsi_single_lun_run(sdev);
529
530 spin_lock_irqsave(shost->host_lock, flags);
531 while (!list_empty(&shost->starved_list) &&
532 !shost->host_blocked && !shost->host_self_blocked &&
533 !((shost->can_queue > 0) &&
534 (shost->host_busy >= shost->can_queue))) {
535
536
537
538
539
540
541
542
543
544
545 sdev = list_entry(shost->starved_list.next,
546 struct scsi_device, starved_entry);
547 list_del_init(&sdev->starved_entry);
548 spin_unlock_irqrestore(shost->host_lock, flags);
549
550
551 if (test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) &&
552 !test_and_set_bit(QUEUE_FLAG_REENTER,
553 &sdev->request_queue->queue_flags)) {
554 blk_run_queue(sdev->request_queue);
555 clear_bit(QUEUE_FLAG_REENTER,
556 &sdev->request_queue->queue_flags);
557 } else
558 blk_run_queue(sdev->request_queue);
559
560 spin_lock_irqsave(shost->host_lock, flags);
561 if (unlikely(!list_empty(&sdev->starved_entry)))
562
563
564
565
566
567 break;
568 }
569 spin_unlock_irqrestore(shost->host_lock, flags);
570
571 blk_run_queue(q);
572}
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
593{
594 struct request *req = cmd->request;
595 unsigned long flags;
596
597 scsi_unprep_request(req);
598 spin_lock_irqsave(q->queue_lock, flags);
599 blk_requeue_request(q, req);
600 spin_unlock_irqrestore(q->queue_lock, flags);
601
602 scsi_run_queue(q);
603}
604
605void scsi_next_command(struct scsi_cmnd *cmd)
606{
607 struct scsi_device *sdev = cmd->device;
608 struct request_queue *q = sdev->request_queue;
609
610
611 get_device(&sdev->sdev_gendev);
612
613 scsi_put_command(cmd);
614 scsi_run_queue(q);
615
616
617 put_device(&sdev->sdev_gendev);
618}
619
620void scsi_run_host_queues(struct Scsi_Host *shost)
621{
622 struct scsi_device *sdev;
623
624 shost_for_each_device(sdev, shost)
625 scsi_run_queue(sdev->request_queue);
626}
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
651 int bytes, int requeue)
652{
653 struct request_queue *q = cmd->device->request_queue;
654 struct request *req = cmd->request;
655 unsigned long flags;
656
657
658
659
660
661 if (end_that_request_chunk(req, uptodate, bytes)) {
662 int leftover = (req->hard_nr_sectors << 9);
663
664 if (blk_pc_request(req))
665 leftover = req->data_len;
666
667
668 if (!uptodate && blk_noretry_request(req))
669 end_that_request_chunk(req, 0, leftover);
670 else {
671 if (requeue) {
672
673
674
675
676
677 scsi_requeue_command(q, cmd);
678 cmd = NULL;
679 }
680 return cmd;
681 }
682 }
683
684 add_disk_randomness(req->rq_disk);
685
686 spin_lock_irqsave(q->queue_lock, flags);
687 if (blk_rq_tagged(req))
688 blk_queue_end_tag(q, req);
689 end_that_request_last(req, uptodate);
690 spin_unlock_irqrestore(q->queue_lock, flags);
691
692
693
694
695
696 scsi_next_command(cmd);
697 return NULL;
698}
699
700
701
702
703
704#define SCSI_MAX_SG_CHAIN_SEGMENTS 2048
705
706static inline unsigned int scsi_sgtable_index(unsigned short nents)
707{
708 unsigned int index;
709
710 switch (nents) {
711 case 1 ... 8:
712 index = 0;
713 break;
714 case 9 ... 16:
715 index = 1;
716 break;
717#if (SCSI_MAX_SG_SEGMENTS > 16)
718 case 17 ... 32:
719 index = 2;
720 break;
721#if (SCSI_MAX_SG_SEGMENTS > 32)
722 case 33 ... 64:
723 index = 3;
724 break;
725#if (SCSI_MAX_SG_SEGMENTS > 64)
726 case 65 ... 128:
727 index = 4;
728 break;
729#endif
730#endif
731#endif
732 default:
733 printk(KERN_ERR "scsi: bad segment count=%d\n", nents);
734 BUG();
735 }
736
737 return index;
738}
739
740struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask)
741{
742 struct scsi_host_sg_pool *sgp;
743 struct scatterlist *sgl, *prev, *ret;
744 unsigned int index;
745 int this, left;
746
747 BUG_ON(!cmd->use_sg);
748
749 left = cmd->use_sg;
750 ret = prev = NULL;
751 do {
752 this = left;
753 if (this > SCSI_MAX_SG_SEGMENTS) {
754 this = SCSI_MAX_SG_SEGMENTS - 1;
755 index = SG_MEMPOOL_NR - 1;
756 } else
757 index = scsi_sgtable_index(this);
758
759 left -= this;
760
761 sgp = scsi_sg_pools + index;
762
763 sgl = mempool_alloc(sgp->pool, gfp_mask);
764 if (unlikely(!sgl))
765 goto enomem;
766
767 sg_init_table(sgl, sgp->size);
768
769
770
771
772 if (!ret)
773 ret = sgl;
774
775
776
777
778
779
780 if (prev)
781 sg_chain(prev, SCSI_MAX_SG_SEGMENTS, sgl);
782
783
784
785
786
787 if (!left)
788 sg_mark_end(&sgl[this - 1]);
789
790
791
792
793
794 gfp_mask &= ~__GFP_WAIT;
795 gfp_mask |= __GFP_HIGH;
796 prev = sgl;
797 } while (left);
798
799
800
801
802
803 cmd->__use_sg = cmd->use_sg;
804 return ret;
805enomem:
806 if (ret) {
807
808
809
810
811
812 sgp = scsi_sg_pools + SG_MEMPOOL_NR - 1;
813 prev = ret;
814 ret = &ret[SCSI_MAX_SG_SEGMENTS - 1];
815
816 while ((sgl = sg_chain_ptr(ret)) != NULL) {
817 ret = &sgl[SCSI_MAX_SG_SEGMENTS - 1];
818 mempool_free(sgl, sgp->pool);
819 }
820
821 mempool_free(prev, sgp->pool);
822 }
823 return NULL;
824}
825
826EXPORT_SYMBOL(scsi_alloc_sgtable);
827
828void scsi_free_sgtable(struct scsi_cmnd *cmd)
829{
830 struct scatterlist *sgl = cmd->request_buffer;
831 struct scsi_host_sg_pool *sgp;
832
833
834
835
836
837 if (cmd->__use_sg > SCSI_MAX_SG_SEGMENTS) {
838 unsigned short this, left;
839 struct scatterlist *next;
840 unsigned int index;
841
842 left = cmd->__use_sg - (SCSI_MAX_SG_SEGMENTS - 1);
843 next = sg_chain_ptr(&sgl[SCSI_MAX_SG_SEGMENTS - 1]);
844 while (left && next) {
845 sgl = next;
846 this = left;
847 if (this > SCSI_MAX_SG_SEGMENTS) {
848 this = SCSI_MAX_SG_SEGMENTS - 1;
849 index = SG_MEMPOOL_NR - 1;
850 } else
851 index = scsi_sgtable_index(this);
852
853 left -= this;
854
855 sgp = scsi_sg_pools + index;
856
857 if (left)
858 next = sg_chain_ptr(&sgl[sgp->size - 1]);
859
860 mempool_free(sgl, sgp->pool);
861 }
862
863
864
865
866 sgl = cmd->request_buffer;
867 sgp = scsi_sg_pools + SG_MEMPOOL_NR - 1;
868 } else
869 sgp = scsi_sg_pools + scsi_sgtable_index(cmd->__use_sg);
870
871 mempool_free(sgl, sgp->pool);
872}
873
874EXPORT_SYMBOL(scsi_free_sgtable);
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893static void scsi_release_buffers(struct scsi_cmnd *cmd)
894{
895 if (cmd->use_sg)
896 scsi_free_sgtable(cmd);
897
898
899
900
901
902 cmd->request_buffer = NULL;
903 cmd->request_bufflen = 0;
904}
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
935{
936 int result = cmd->result;
937 int this_count = cmd->request_bufflen;
938 struct request_queue *q = cmd->device->request_queue;
939 struct request *req = cmd->request;
940 int clear_errors = 1;
941 struct scsi_sense_hdr sshdr;
942 int sense_valid = 0;
943 int sense_deferred = 0;
944
945 scsi_release_buffers(cmd);
946
947 if (result) {
948 sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
949 if (sense_valid)
950 sense_deferred = scsi_sense_is_deferred(&sshdr);
951 }
952
953 if (blk_pc_request(req)) {
954 req->errors = result;
955 if (result) {
956 clear_errors = 0;
957 if (sense_valid && req->sense) {
958
959
960
961 int len = 8 + cmd->sense_buffer[7];
962
963 if (len > SCSI_SENSE_BUFFERSIZE)
964 len = SCSI_SENSE_BUFFERSIZE;
965 memcpy(req->sense, cmd->sense_buffer, len);
966 req->sense_len = len;
967 }
968 }
969 req->data_len = cmd->resid;
970 }
971
972
973
974
975
976 SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, "
977 "%d bytes done.\n",
978 req->nr_sectors, good_bytes));
979 SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n", cmd->use_sg));
980
981 if (clear_errors)
982 req->errors = 0;
983
984
985
986
987
988 if (scsi_end_request(cmd, 1, good_bytes, result == 0) == NULL)
989 return;
990
991
992
993
994 if (sense_valid && !sense_deferred) {
995 switch (sshdr.sense_key) {
996 case UNIT_ATTENTION:
997 if (cmd->device->removable) {
998
999
1000
1001 cmd->device->changed = 1;
1002 scsi_end_request(cmd, 0, this_count, 1);
1003 return;
1004 } else {
1005
1006
1007
1008
1009
1010 scsi_requeue_command(q, cmd);
1011 return;
1012 }
1013 break;
1014 case ILLEGAL_REQUEST:
1015
1016
1017
1018
1019
1020
1021
1022
1023 if ((cmd->device->use_10_for_rw &&
1024 sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
1025 (cmd->cmnd[0] == READ_10 ||
1026 cmd->cmnd[0] == WRITE_10)) {
1027 cmd->device->use_10_for_rw = 0;
1028
1029
1030
1031 scsi_requeue_command(q, cmd);
1032 return;
1033 } else {
1034 scsi_end_request(cmd, 0, this_count, 1);
1035 return;
1036 }
1037 break;
1038 case NOT_READY:
1039
1040
1041
1042 if (sshdr.asc == 0x04) {
1043 switch (sshdr.ascq) {
1044 case 0x01:
1045 case 0x04:
1046 case 0x05:
1047 case 0x06:
1048 case 0x07:
1049 case 0x08:
1050 case 0x09:
1051 scsi_requeue_command(q, cmd);
1052 return;
1053 default:
1054 break;
1055 }
1056 }
1057 if (!(req->cmd_flags & REQ_QUIET))
1058 scsi_cmd_print_sense_hdr(cmd,
1059 "Device not ready",
1060 &sshdr);
1061
1062 scsi_end_request(cmd, 0, this_count, 1);
1063 return;
1064 case VOLUME_OVERFLOW:
1065 if (!(req->cmd_flags & REQ_QUIET)) {
1066 scmd_printk(KERN_INFO, cmd,
1067 "Volume overflow, CDB: ");
1068 __scsi_print_command(cmd->cmnd);
1069 scsi_print_sense("", cmd);
1070 }
1071
1072 scsi_end_request(cmd, 0, this_count, 1);
1073 return;
1074 default:
1075 break;
1076 }
1077 }
1078 if (host_byte(result) == DID_RESET) {
1079
1080
1081
1082
1083 scsi_requeue_command(q, cmd);
1084 return;
1085 }
1086 if (result) {
1087 if (!(req->cmd_flags & REQ_QUIET)) {
1088 scsi_print_result(cmd);
1089 if (driver_byte(result) & DRIVER_SENSE)
1090 scsi_print_sense("", cmd);
1091 }
1092 }
1093 scsi_end_request(cmd, 0, this_count, !result);
1094}
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107static int scsi_init_io(struct scsi_cmnd *cmd)
1108{
1109 struct request *req = cmd->request;
1110 int count;
1111
1112
1113
1114
1115
1116
1117 cmd->use_sg = req->nr_phys_segments;
1118
1119
1120
1121
1122 cmd->request_buffer = scsi_alloc_sgtable(cmd, GFP_ATOMIC);
1123 if (unlikely(!cmd->request_buffer)) {
1124 scsi_unprep_request(req);
1125 return BLKPREP_DEFER;
1126 }
1127
1128 req->buffer = NULL;
1129 if (blk_pc_request(req))
1130 cmd->request_bufflen = req->data_len;
1131 else
1132 cmd->request_bufflen = req->nr_sectors << 9;
1133
1134
1135
1136
1137
1138 count = blk_rq_map_sg(req->q, req, cmd->request_buffer);
1139 if (likely(count <= cmd->use_sg)) {
1140 cmd->use_sg = count;
1141 return BLKPREP_OK;
1142 }
1143
1144 printk(KERN_ERR "Incorrect number of segments after building list\n");
1145 printk(KERN_ERR "counted %d, received %d\n", count, cmd->use_sg);
1146 printk(KERN_ERR "req nr_sec %lu, cur_nr_sec %u\n", req->nr_sectors,
1147 req->current_nr_sectors);
1148
1149 return BLKPREP_KILL;
1150}
1151
1152static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
1153 struct request *req)
1154{
1155 struct scsi_cmnd *cmd;
1156
1157 if (!req->special) {
1158 cmd = scsi_get_command(sdev, GFP_ATOMIC);
1159 if (unlikely(!cmd))
1160 return NULL;
1161 req->special = cmd;
1162 } else {
1163 cmd = req->special;
1164 }
1165
1166
1167 cmd->tag = req->tag;
1168 cmd->request = req;
1169
1170 return cmd;
1171}
1172
1173int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
1174{
1175 struct scsi_cmnd *cmd;
1176 int ret = scsi_prep_state_check(sdev, req);
1177
1178 if (ret != BLKPREP_OK)
1179 return ret;
1180
1181 cmd = scsi_get_cmd_from_req(sdev, req);
1182 if (unlikely(!cmd))
1183 return BLKPREP_DEFER;
1184
1185
1186
1187
1188
1189
1190
1191 if (req->bio) {
1192 int ret;
1193
1194 BUG_ON(!req->nr_phys_segments);
1195
1196 ret = scsi_init_io(cmd);
1197 if (unlikely(ret))
1198 return ret;
1199 } else {
1200 BUG_ON(req->data_len);
1201 BUG_ON(req->data);
1202
1203 cmd->request_bufflen = 0;
1204 cmd->request_buffer = NULL;
1205 cmd->use_sg = 0;
1206 req->buffer = NULL;
1207 }
1208
1209 BUILD_BUG_ON(sizeof(req->cmd) > sizeof(cmd->cmnd));
1210 memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd));
1211 cmd->cmd_len = req->cmd_len;
1212 if (!req->data_len)
1213 cmd->sc_data_direction = DMA_NONE;
1214 else if (rq_data_dir(req) == WRITE)
1215 cmd->sc_data_direction = DMA_TO_DEVICE;
1216 else
1217 cmd->sc_data_direction = DMA_FROM_DEVICE;
1218
1219 cmd->transfersize = req->data_len;
1220 cmd->allowed = req->retries;
1221 cmd->timeout_per_command = req->timeout;
1222 return BLKPREP_OK;
1223}
1224EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd);
1225
1226
1227
1228
1229
1230
1231int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
1232{
1233 struct scsi_cmnd *cmd;
1234 int ret = scsi_prep_state_check(sdev, req);
1235
1236 if (ret != BLKPREP_OK)
1237 return ret;
1238
1239
1240
1241 BUG_ON(!req->nr_phys_segments);
1242
1243 cmd = scsi_get_cmd_from_req(sdev, req);
1244 if (unlikely(!cmd))
1245 return BLKPREP_DEFER;
1246
1247 return scsi_init_io(cmd);
1248}
1249EXPORT_SYMBOL(scsi_setup_fs_cmnd);
1250
1251int scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
1252{
1253 int ret = BLKPREP_OK;
1254
1255
1256
1257
1258
1259 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1260 switch (sdev->sdev_state) {
1261 case SDEV_OFFLINE:
1262
1263
1264
1265
1266
1267 sdev_printk(KERN_ERR, sdev,
1268 "rejecting I/O to offline device\n");
1269 ret = BLKPREP_KILL;
1270 break;
1271 case SDEV_DEL:
1272
1273
1274
1275
1276 sdev_printk(KERN_ERR, sdev,
1277 "rejecting I/O to dead device\n");
1278 ret = BLKPREP_KILL;
1279 break;
1280 case SDEV_QUIESCE:
1281 case SDEV_BLOCK:
1282
1283
1284
1285 if (!(req->cmd_flags & REQ_PREEMPT))
1286 ret = BLKPREP_DEFER;
1287 break;
1288 default:
1289
1290
1291
1292
1293
1294 if (!(req->cmd_flags & REQ_PREEMPT))
1295 ret = BLKPREP_KILL;
1296 break;
1297 }
1298 }
1299 return ret;
1300}
1301EXPORT_SYMBOL(scsi_prep_state_check);
1302
1303int scsi_prep_return(struct request_queue *q, struct request *req, int ret)
1304{
1305 struct scsi_device *sdev = q->queuedata;
1306
1307 switch (ret) {
1308 case BLKPREP_KILL:
1309 req->errors = DID_NO_CONNECT << 16;
1310
1311 if (req->special) {
1312 struct scsi_cmnd *cmd = req->special;
1313 scsi_release_buffers(cmd);
1314 scsi_put_command(cmd);
1315 req->special = NULL;
1316 }
1317 break;
1318 case BLKPREP_DEFER:
1319
1320
1321
1322
1323
1324 if (sdev->device_busy == 0)
1325 blk_plug_device(q);
1326 break;
1327 default:
1328 req->cmd_flags |= REQ_DONTPREP;
1329 }
1330
1331 return ret;
1332}
1333EXPORT_SYMBOL(scsi_prep_return);
1334
1335int scsi_prep_fn(struct request_queue *q, struct request *req)
1336{
1337 struct scsi_device *sdev = q->queuedata;
1338 int ret = BLKPREP_KILL;
1339
1340 if (req->cmd_type == REQ_TYPE_BLOCK_PC)
1341 ret = scsi_setup_blk_pc_cmnd(sdev, req);
1342 return scsi_prep_return(q, req, ret);
1343}
1344
1345
1346
1347
1348
1349
1350
1351static inline int scsi_dev_queue_ready(struct request_queue *q,
1352 struct scsi_device *sdev)
1353{
1354 if (sdev->device_busy >= sdev->queue_depth)
1355 return 0;
1356 if (sdev->device_busy == 0 && sdev->device_blocked) {
1357
1358
1359
1360 if (--sdev->device_blocked == 0) {
1361 SCSI_LOG_MLQUEUE(3,
1362 sdev_printk(KERN_INFO, sdev,
1363 "unblocking device at zero depth\n"));
1364 } else {
1365 blk_plug_device(q);
1366 return 0;
1367 }
1368 }
1369 if (sdev->device_blocked)
1370 return 0;
1371
1372 return 1;
1373}
1374
1375
1376
1377
1378
1379
1380
1381
1382static inline int scsi_host_queue_ready(struct request_queue *q,
1383 struct Scsi_Host *shost,
1384 struct scsi_device *sdev)
1385{
1386 if (scsi_host_in_recovery(shost))
1387 return 0;
1388 if (shost->host_busy == 0 && shost->host_blocked) {
1389
1390
1391
1392 if (--shost->host_blocked == 0) {
1393 SCSI_LOG_MLQUEUE(3,
1394 printk("scsi%d unblocking host at zero depth\n",
1395 shost->host_no));
1396 } else {
1397 blk_plug_device(q);
1398 return 0;
1399 }
1400 }
1401 if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
1402 shost->host_blocked || shost->host_self_blocked) {
1403 if (list_empty(&sdev->starved_entry))
1404 list_add_tail(&sdev->starved_entry, &shost->starved_list);
1405 return 0;
1406 }
1407
1408
1409 if (!list_empty(&sdev->starved_entry))
1410 list_del_init(&sdev->starved_entry);
1411
1412 return 1;
1413}
1414
1415
1416
1417
1418static void scsi_kill_request(struct request *req, struct request_queue *q)
1419{
1420 struct scsi_cmnd *cmd = req->special;
1421 struct scsi_device *sdev = cmd->device;
1422 struct Scsi_Host *shost = sdev->host;
1423
1424 blkdev_dequeue_request(req);
1425
1426 if (unlikely(cmd == NULL)) {
1427 printk(KERN_CRIT "impossible request in %s.\n",
1428 __FUNCTION__);
1429 BUG();
1430 }
1431
1432 scsi_init_cmd_errh(cmd);
1433 cmd->result = DID_NO_CONNECT << 16;
1434 atomic_inc(&cmd->device->iorequest_cnt);
1435
1436
1437
1438
1439
1440
1441 sdev->device_busy++;
1442 spin_unlock(sdev->request_queue->queue_lock);
1443 spin_lock(shost->host_lock);
1444 shost->host_busy++;
1445 spin_unlock(shost->host_lock);
1446 spin_lock(sdev->request_queue->queue_lock);
1447
1448 __scsi_done(cmd);
1449}
1450
1451static void scsi_softirq_done(struct request *rq)
1452{
1453 struct scsi_cmnd *cmd = rq->completion_data;
1454 unsigned long wait_for = (cmd->allowed + 1) * cmd->timeout_per_command;
1455 int disposition;
1456
1457 INIT_LIST_HEAD(&cmd->eh_entry);
1458
1459 disposition = scsi_decide_disposition(cmd);
1460 if (disposition != SUCCESS &&
1461 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
1462 sdev_printk(KERN_ERR, cmd->device,
1463 "timing out command, waited %lus\n",
1464 wait_for/HZ);
1465 disposition = SUCCESS;
1466 }
1467
1468 scsi_log_completion(cmd, disposition);
1469
1470 switch (disposition) {
1471 case SUCCESS:
1472 scsi_finish_command(cmd);
1473 break;
1474 case NEEDS_RETRY:
1475 scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
1476 break;
1477 case ADD_TO_MLQUEUE:
1478 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
1479 break;
1480 default:
1481 if (!scsi_eh_scmd_add(cmd, 0))
1482 scsi_finish_command(cmd);
1483 }
1484}
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497static void scsi_request_fn(struct request_queue *q)
1498{
1499 struct scsi_device *sdev = q->queuedata;
1500 struct Scsi_Host *shost;
1501 struct scsi_cmnd *cmd;
1502 struct request *req;
1503
1504 if (!sdev) {
1505 printk("scsi: killing requests for dead queue\n");
1506 while ((req = elv_next_request(q)) != NULL)
1507 scsi_kill_request(req, q);
1508 return;
1509 }
1510
1511 if(!get_device(&sdev->sdev_gendev))
1512
1513 return;
1514
1515
1516
1517
1518
1519 shost = sdev->host;
1520 while (!blk_queue_plugged(q)) {
1521 int rtn;
1522
1523
1524
1525
1526
1527 req = elv_next_request(q);
1528 if (!req || !scsi_dev_queue_ready(q, sdev))
1529 break;
1530
1531 if (unlikely(!scsi_device_online(sdev))) {
1532 sdev_printk(KERN_ERR, sdev,
1533 "rejecting I/O to offline device\n");
1534 scsi_kill_request(req, q);
1535 continue;
1536 }
1537
1538
1539
1540
1541
1542 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1543 blkdev_dequeue_request(req);
1544 sdev->device_busy++;
1545
1546 spin_unlock(q->queue_lock);
1547 cmd = req->special;
1548 if (unlikely(cmd == NULL)) {
1549 printk(KERN_CRIT "impossible request in %s.\n"
1550 "please mail a stack trace to "
1551 "linux-scsi@vger.kernel.org\n",
1552 __FUNCTION__);
1553 blk_dump_rq_flags(req, "foo");
1554 BUG();
1555 }
1556 spin_lock(shost->host_lock);
1557
1558 if (!scsi_host_queue_ready(q, shost, sdev))
1559 goto not_ready;
1560 if (sdev->single_lun) {
1561 if (scsi_target(sdev)->starget_sdev_user &&
1562 scsi_target(sdev)->starget_sdev_user != sdev)
1563 goto not_ready;
1564 scsi_target(sdev)->starget_sdev_user = sdev;
1565 }
1566 shost->host_busy++;
1567
1568
1569
1570
1571
1572 spin_unlock_irq(shost->host_lock);
1573
1574
1575
1576
1577
1578 scsi_init_cmd_errh(cmd);
1579
1580
1581
1582
1583 rtn = scsi_dispatch_cmd(cmd);
1584 spin_lock_irq(q->queue_lock);
1585 if(rtn) {
1586
1587
1588
1589 if(sdev->device_busy == 0)
1590 blk_plug_device(q);
1591
1592 break;
1593 }
1594 }
1595
1596 goto out;
1597
1598 not_ready:
1599 spin_unlock_irq(shost->host_lock);
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609 spin_lock_irq(q->queue_lock);
1610 blk_requeue_request(q, req);
1611 sdev->device_busy--;
1612 if(sdev->device_busy == 0)
1613 blk_plug_device(q);
1614 out:
1615
1616
1617 spin_unlock_irq(q->queue_lock);
1618 put_device(&sdev->sdev_gendev);
1619 spin_lock_irq(q->queue_lock);
1620}
1621
1622u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
1623{
1624 struct device *host_dev;
1625 u64 bounce_limit = 0xffffffff;
1626
1627 if (shost->unchecked_isa_dma)
1628 return BLK_BOUNCE_ISA;
1629
1630
1631
1632
1633 if (!PCI_DMA_BUS_IS_PHYS)
1634 return BLK_BOUNCE_ANY;
1635
1636 host_dev = scsi_get_device(shost);
1637 if (host_dev && host_dev->dma_mask)
1638 bounce_limit = *host_dev->dma_mask;
1639
1640 return bounce_limit;
1641}
1642EXPORT_SYMBOL(scsi_calculate_bounce_limit);
1643
1644struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
1645 request_fn_proc *request_fn)
1646{
1647 struct request_queue *q;
1648
1649 q = blk_init_queue(request_fn, NULL);
1650 if (!q)
1651 return NULL;
1652
1653
1654
1655
1656 blk_queue_max_hw_segments(q, shost->sg_tablesize);
1657
1658
1659
1660
1661
1662
1663#ifdef ARCH_HAS_SG_CHAIN
1664 if (shost->use_sg_chaining)
1665 blk_queue_max_phys_segments(q, SCSI_MAX_SG_CHAIN_SEGMENTS);
1666 else
1667 blk_queue_max_phys_segments(q, SCSI_MAX_SG_SEGMENTS);
1668#else
1669 blk_queue_max_phys_segments(q, SCSI_MAX_SG_SEGMENTS);
1670#endif
1671
1672 blk_queue_max_sectors(q, shost->max_sectors);
1673 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1674 blk_queue_segment_boundary(q, shost->dma_boundary);
1675
1676 if (!shost->use_clustering)
1677 clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
1678 return q;
1679}
1680EXPORT_SYMBOL(__scsi_alloc_queue);
1681
1682struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1683{
1684 struct request_queue *q;
1685
1686 q = __scsi_alloc_queue(sdev->host, scsi_request_fn);
1687 if (!q)
1688 return NULL;
1689
1690 blk_queue_prep_rq(q, scsi_prep_fn);
1691 blk_queue_softirq_done(q, scsi_softirq_done);
1692 return q;
1693}
1694
1695void scsi_free_queue(struct request_queue *q)
1696{
1697 blk_cleanup_queue(q);
1698}
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716void scsi_block_requests(struct Scsi_Host *shost)
1717{
1718 shost->host_self_blocked = 1;
1719}
1720EXPORT_SYMBOL(scsi_block_requests);
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742void scsi_unblock_requests(struct Scsi_Host *shost)
1743{
1744 shost->host_self_blocked = 0;
1745 scsi_run_host_queues(shost);
1746}
1747EXPORT_SYMBOL(scsi_unblock_requests);
1748
1749int __init scsi_init_queue(void)
1750{
1751 int i;
1752
1753 scsi_io_context_cache = kmem_cache_create("scsi_io_context",
1754 sizeof(struct scsi_io_context),
1755 0, 0, NULL);
1756 if (!scsi_io_context_cache) {
1757 printk(KERN_ERR "SCSI: can't init scsi io context cache\n");
1758 return -ENOMEM;
1759 }
1760
1761 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1762 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1763 int size = sgp->size * sizeof(struct scatterlist);
1764
1765 sgp->slab = kmem_cache_create(sgp->name, size, 0,
1766 SLAB_HWCACHE_ALIGN, NULL);
1767 if (!sgp->slab) {
1768 printk(KERN_ERR "SCSI: can't init sg slab %s\n",
1769 sgp->name);
1770 }
1771
1772 sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE,
1773 sgp->slab);
1774 if (!sgp->pool) {
1775 printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
1776 sgp->name);
1777 }
1778 }
1779
1780 return 0;
1781}
1782
1783void scsi_exit_queue(void)
1784{
1785 int i;
1786
1787 kmem_cache_destroy(scsi_io_context_cache);
1788
1789 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1790 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1791 mempool_destroy(sgp->pool);
1792 kmem_cache_destroy(sgp->slab);
1793 }
1794}
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814int
1815scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
1816 unsigned char *buffer, int len, int timeout, int retries,
1817 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
1818{
1819 unsigned char cmd[10];
1820 unsigned char *real_buffer;
1821 int ret;
1822
1823 memset(cmd, 0, sizeof(cmd));
1824 cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0);
1825
1826 if (sdev->use_10_for_ms) {
1827 if (len > 65535)
1828 return -EINVAL;
1829 real_buffer = kmalloc(8 + len, GFP_KERNEL);
1830 if (!real_buffer)
1831 return -ENOMEM;
1832 memcpy(real_buffer + 8, buffer, len);
1833 len += 8;
1834 real_buffer[0] = 0;
1835 real_buffer[1] = 0;
1836 real_buffer[2] = data->medium_type;
1837 real_buffer[3] = data->device_specific;
1838 real_buffer[4] = data->longlba ? 0x01 : 0;
1839 real_buffer[5] = 0;
1840 real_buffer[6] = data->block_descriptor_length >> 8;
1841 real_buffer[7] = data->block_descriptor_length;
1842
1843 cmd[0] = MODE_SELECT_10;
1844 cmd[7] = len >> 8;
1845 cmd[8] = len;
1846 } else {
1847 if (len > 255 || data->block_descriptor_length > 255 ||
1848 data->longlba)
1849 return -EINVAL;
1850
1851 real_buffer = kmalloc(4 + len, GFP_KERNEL);
1852 if (!real_buffer)
1853 return -ENOMEM;
1854 memcpy(real_buffer + 4, buffer, len);
1855 len += 4;
1856 real_buffer[0] = 0;
1857 real_buffer[1] = data->medium_type;
1858 real_buffer[2] = data->device_specific;
1859 real_buffer[3] = data->block_descriptor_length;
1860
1861
1862 cmd[0] = MODE_SELECT;
1863 cmd[4] = len;
1864 }
1865
1866 ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len,
1867 sshdr, timeout, retries);
1868 kfree(real_buffer);
1869 return ret;
1870}
1871EXPORT_SYMBOL_GPL(scsi_mode_select);
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891int
1892scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
1893 unsigned char *buffer, int len, int timeout, int retries,
1894 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
1895{
1896 unsigned char cmd[12];
1897 int use_10_for_ms;
1898 int header_length;
1899 int result;
1900 struct scsi_sense_hdr my_sshdr;
1901
1902 memset(data, 0, sizeof(*data));
1903 memset(&cmd[0], 0, 12);
1904 cmd[1] = dbd & 0x18;
1905 cmd[2] = modepage;
1906
1907
1908 if (!sshdr)
1909 sshdr = &my_sshdr;
1910
1911 retry:
1912 use_10_for_ms = sdev->use_10_for_ms;
1913
1914 if (use_10_for_ms) {
1915 if (len < 8)
1916 len = 8;
1917
1918 cmd[0] = MODE_SENSE_10;
1919 cmd[8] = len;
1920 header_length = 8;
1921 } else {
1922 if (len < 4)
1923 len = 4;
1924
1925 cmd[0] = MODE_SENSE;
1926 cmd[4] = len;
1927 header_length = 4;
1928 }
1929
1930 memset(buffer, 0, len);
1931
1932 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
1933 sshdr, timeout, retries);
1934
1935
1936
1937
1938
1939
1940 if (use_10_for_ms && !scsi_status_is_good(result) &&
1941 (driver_byte(result) & DRIVER_SENSE)) {
1942 if (scsi_sense_valid(sshdr)) {
1943 if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
1944 (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
1945
1946
1947
1948 sdev->use_10_for_ms = 0;
1949 goto retry;
1950 }
1951 }
1952 }
1953
1954 if(scsi_status_is_good(result)) {
1955 if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b &&
1956 (modepage == 6 || modepage == 8))) {
1957
1958 header_length = 0;
1959 data->length = 13;
1960 data->medium_type = 0;
1961 data->device_specific = 0;
1962 data->longlba = 0;
1963 data->block_descriptor_length = 0;
1964 } else if(use_10_for_ms) {
1965 data->length = buffer[0]*256 + buffer[1] + 2;
1966 data->medium_type = buffer[2];
1967 data->device_specific = buffer[3];
1968 data->longlba = buffer[4] & 0x01;
1969 data->block_descriptor_length = buffer[6]*256
1970 + buffer[7];
1971 } else {
1972 data->length = buffer[0] + 1;
1973 data->medium_type = buffer[1];
1974 data->device_specific = buffer[2];
1975 data->block_descriptor_length = buffer[3];
1976 }
1977 data->header_length = header_length;
1978 }
1979
1980 return result;
1981}
1982EXPORT_SYMBOL(scsi_mode_sense);
1983
1984int
1985scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries)
1986{
1987 char cmd[] = {
1988 TEST_UNIT_READY, 0, 0, 0, 0, 0,
1989 };
1990 struct scsi_sense_hdr sshdr;
1991 int result;
1992
1993 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, &sshdr,
1994 timeout, retries);
1995
1996 if ((driver_byte(result) & DRIVER_SENSE) && sdev->removable) {
1997
1998 if ((scsi_sense_valid(&sshdr)) &&
1999 ((sshdr.sense_key == UNIT_ATTENTION) ||
2000 (sshdr.sense_key == NOT_READY))) {
2001 sdev->changed = 1;
2002 result = 0;
2003 }
2004 }
2005 return result;
2006}
2007EXPORT_SYMBOL(scsi_test_unit_ready);
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018int
2019scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
2020{
2021 enum scsi_device_state oldstate = sdev->sdev_state;
2022
2023 if (state == oldstate)
2024 return 0;
2025
2026 switch (state) {
2027 case SDEV_CREATED:
2028
2029
2030
2031 goto illegal;
2032
2033 case SDEV_RUNNING:
2034 switch (oldstate) {
2035 case SDEV_CREATED:
2036 case SDEV_OFFLINE:
2037 case SDEV_QUIESCE:
2038 case SDEV_BLOCK:
2039 break;
2040 default:
2041 goto illegal;
2042 }
2043 break;
2044
2045 case SDEV_QUIESCE:
2046 switch (oldstate) {
2047 case SDEV_RUNNING:
2048 case SDEV_OFFLINE:
2049 break;
2050 default:
2051 goto illegal;
2052 }
2053 break;
2054
2055 case SDEV_OFFLINE:
2056 switch (oldstate) {
2057 case SDEV_CREATED:
2058 case SDEV_RUNNING:
2059 case SDEV_QUIESCE:
2060 case SDEV_BLOCK:
2061 break;
2062 default:
2063 goto illegal;
2064 }
2065 break;
2066
2067 case SDEV_BLOCK:
2068 switch (oldstate) {
2069 case SDEV_CREATED:
2070 case SDEV_RUNNING:
2071 break;
2072 default:
2073 goto illegal;
2074 }
2075 break;
2076
2077 case SDEV_CANCEL:
2078 switch (oldstate) {
2079 case SDEV_CREATED:
2080 case SDEV_RUNNING:
2081 case SDEV_QUIESCE:
2082 case SDEV_OFFLINE:
2083 case SDEV_BLOCK:
2084 break;
2085 default:
2086 goto illegal;
2087 }
2088 break;
2089
2090 case SDEV_DEL:
2091 switch (oldstate) {
2092 case SDEV_CREATED:
2093 case SDEV_RUNNING:
2094 case SDEV_OFFLINE:
2095 case SDEV_CANCEL:
2096 break;
2097 default:
2098 goto illegal;
2099 }
2100 break;
2101
2102 }
2103 sdev->sdev_state = state;
2104 return 0;
2105
2106 illegal:
2107 SCSI_LOG_ERROR_RECOVERY(1,
2108 sdev_printk(KERN_ERR, sdev,
2109 "Illegal state transition %s->%s\n",
2110 scsi_device_state_name(oldstate),
2111 scsi_device_state_name(state))
2112 );
2113 return -EINVAL;
2114}
2115EXPORT_SYMBOL(scsi_device_set_state);
2116
2117
2118
2119
2120
2121
2122
2123
2124static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
2125{
2126 int idx = 0;
2127 char *envp[3];
2128
2129 switch (evt->evt_type) {
2130 case SDEV_EVT_MEDIA_CHANGE:
2131 envp[idx++] = "SDEV_MEDIA_CHANGE=1";
2132 break;
2133
2134 default:
2135
2136 break;
2137 }
2138
2139 envp[idx++] = NULL;
2140
2141 kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp);
2142}
2143
2144
2145
2146
2147
2148
2149
2150
2151void scsi_evt_thread(struct work_struct *work)
2152{
2153 struct scsi_device *sdev;
2154 LIST_HEAD(event_list);
2155
2156 sdev = container_of(work, struct scsi_device, event_work);
2157
2158 while (1) {
2159 struct scsi_event *evt;
2160 struct list_head *this, *tmp;
2161 unsigned long flags;
2162
2163 spin_lock_irqsave(&sdev->list_lock, flags);
2164 list_splice_init(&sdev->event_list, &event_list);
2165 spin_unlock_irqrestore(&sdev->list_lock, flags);
2166
2167 if (list_empty(&event_list))
2168 break;
2169
2170 list_for_each_safe(this, tmp, &event_list) {
2171 evt = list_entry(this, struct scsi_event, node);
2172 list_del(&evt->node);
2173 scsi_evt_emit(sdev, evt);
2174 kfree(evt);
2175 }
2176 }
2177}
2178
2179
2180
2181
2182
2183
2184
2185
2186void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt)
2187{
2188 unsigned long flags;
2189
2190 if (!test_bit(evt->evt_type, sdev->supported_events)) {
2191 kfree(evt);
2192 return;
2193 }
2194
2195 spin_lock_irqsave(&sdev->list_lock, flags);
2196 list_add_tail(&evt->node, &sdev->event_list);
2197 schedule_work(&sdev->event_work);
2198 spin_unlock_irqrestore(&sdev->list_lock, flags);
2199}
2200EXPORT_SYMBOL_GPL(sdev_evt_send);
2201
2202
2203
2204
2205
2206
2207
2208
2209struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
2210 gfp_t gfpflags)
2211{
2212 struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags);
2213 if (!evt)
2214 return NULL;
2215
2216 evt->evt_type = evt_type;
2217 INIT_LIST_HEAD(&evt->node);
2218
2219
2220 switch (evt_type) {
2221 case SDEV_EVT_MEDIA_CHANGE:
2222 default:
2223
2224 break;
2225 }
2226
2227 return evt;
2228}
2229EXPORT_SYMBOL_GPL(sdev_evt_alloc);
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239void sdev_evt_send_simple(struct scsi_device *sdev,
2240 enum scsi_device_event evt_type, gfp_t gfpflags)
2241{
2242 struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags);
2243 if (!evt) {
2244 sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n",
2245 evt_type);
2246 return;
2247 }
2248
2249 sdev_evt_send(sdev, evt);
2250}
2251EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268int
2269scsi_device_quiesce(struct scsi_device *sdev)
2270{
2271 int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
2272 if (err)
2273 return err;
2274
2275 scsi_run_queue(sdev->request_queue);
2276 while (sdev->device_busy) {
2277 msleep_interruptible(200);
2278 scsi_run_queue(sdev->request_queue);
2279 }
2280 return 0;
2281}
2282EXPORT_SYMBOL(scsi_device_quiesce);
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293void
2294scsi_device_resume(struct scsi_device *sdev)
2295{
2296 if(scsi_device_set_state(sdev, SDEV_RUNNING))
2297 return;
2298 scsi_run_queue(sdev->request_queue);
2299}
2300EXPORT_SYMBOL(scsi_device_resume);
2301
2302static void
2303device_quiesce_fn(struct scsi_device *sdev, void *data)
2304{
2305 scsi_device_quiesce(sdev);
2306}
2307
2308void
2309scsi_target_quiesce(struct scsi_target *starget)
2310{
2311 starget_for_each_device(starget, NULL, device_quiesce_fn);
2312}
2313EXPORT_SYMBOL(scsi_target_quiesce);
2314
2315static void
2316device_resume_fn(struct scsi_device *sdev, void *data)
2317{
2318 scsi_device_resume(sdev);
2319}
2320
2321void
2322scsi_target_resume(struct scsi_target *starget)
2323{
2324 starget_for_each_device(starget, NULL, device_resume_fn);
2325}
2326EXPORT_SYMBOL(scsi_target_resume);
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346int
2347scsi_internal_device_block(struct scsi_device *sdev)
2348{
2349 struct request_queue *q = sdev->request_queue;
2350 unsigned long flags;
2351 int err = 0;
2352
2353 err = scsi_device_set_state(sdev, SDEV_BLOCK);
2354 if (err)
2355 return err;
2356
2357
2358
2359
2360
2361
2362 spin_lock_irqsave(q->queue_lock, flags);
2363 blk_stop_queue(q);
2364 spin_unlock_irqrestore(q->queue_lock, flags);
2365
2366 return 0;
2367}
2368EXPORT_SYMBOL_GPL(scsi_internal_device_block);
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386int
2387scsi_internal_device_unblock(struct scsi_device *sdev)
2388{
2389 struct request_queue *q = sdev->request_queue;
2390 int err;
2391 unsigned long flags;
2392
2393
2394
2395
2396
2397 err = scsi_device_set_state(sdev, SDEV_RUNNING);
2398 if (err)
2399 return err;
2400
2401 spin_lock_irqsave(q->queue_lock, flags);
2402 blk_start_queue(q);
2403 spin_unlock_irqrestore(q->queue_lock, flags);
2404
2405 return 0;
2406}
2407EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
2408
2409static void
2410device_block(struct scsi_device *sdev, void *data)
2411{
2412 scsi_internal_device_block(sdev);
2413}
2414
2415static int
2416target_block(struct device *dev, void *data)
2417{
2418 if (scsi_is_target_device(dev))
2419 starget_for_each_device(to_scsi_target(dev), NULL,
2420 device_block);
2421 return 0;
2422}
2423
2424void
2425scsi_target_block(struct device *dev)
2426{
2427 if (scsi_is_target_device(dev))
2428 starget_for_each_device(to_scsi_target(dev), NULL,
2429 device_block);
2430 else
2431 device_for_each_child(dev, NULL, target_block);
2432}
2433EXPORT_SYMBOL_GPL(scsi_target_block);
2434
2435static void
2436device_unblock(struct scsi_device *sdev, void *data)
2437{
2438 scsi_internal_device_unblock(sdev);
2439}
2440
2441static int
2442target_unblock(struct device *dev, void *data)
2443{
2444 if (scsi_is_target_device(dev))
2445 starget_for_each_device(to_scsi_target(dev), NULL,
2446 device_unblock);
2447 return 0;
2448}
2449
2450void
2451scsi_target_unblock(struct device *dev)
2452{
2453 if (scsi_is_target_device(dev))
2454 starget_for_each_device(to_scsi_target(dev), NULL,
2455 device_unblock);
2456 else
2457 device_for_each_child(dev, NULL, target_unblock);
2458}
2459EXPORT_SYMBOL_GPL(scsi_target_unblock);
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
2471 size_t *offset, size_t *len)
2472{
2473 int i;
2474 size_t sg_len = 0, len_complete = 0;
2475 struct scatterlist *sg;
2476 struct page *page;
2477
2478 WARN_ON(!irqs_disabled());
2479
2480 for_each_sg(sgl, sg, sg_count, i) {
2481 len_complete = sg_len;
2482 sg_len += sg->length;
2483 if (sg_len > *offset)
2484 break;
2485 }
2486
2487 if (unlikely(i == sg_count)) {
2488 printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, "
2489 "elements %d\n",
2490 __FUNCTION__, sg_len, *offset, sg_count);
2491 WARN_ON(1);
2492 return NULL;
2493 }
2494
2495
2496 *offset = *offset - len_complete + sg->offset;
2497
2498
2499 page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT));
2500 *offset &= ~PAGE_MASK;
2501
2502
2503 sg_len = PAGE_SIZE - *offset;
2504 if (*len > sg_len)
2505 *len = sg_len;
2506
2507 return kmap_atomic(page, KM_BIO_SRC_IRQ);
2508}
2509EXPORT_SYMBOL(scsi_kmap_atomic_sg);
2510
2511
2512
2513
2514
2515
2516void scsi_kunmap_atomic_sg(void *virt)
2517{
2518 kunmap_atomic(virt, KM_BIO_SRC_IRQ);
2519}
2520EXPORT_SYMBOL(scsi_kunmap_atomic_sg);
2521