1
2
3
4
5
6
7
8
9
10#include <linux/bio.h>
11#include <linux/bitops.h>
12#include <linux/blkdev.h>
13#include <linux/completion.h>
14#include <linux/kernel.h>
15#include <linux/export.h>
16#include <linux/mempool.h>
17#include <linux/slab.h>
18#include <linux/init.h>
19#include <linux/pci.h>
20#include <linux/delay.h>
21#include <linux/hardirq.h>
22#include <linux/scatterlist.h>
23
24#include <scsi/scsi.h>
25#include <scsi/scsi_cmnd.h>
26#include <scsi/scsi_dbg.h>
27#include <scsi/scsi_device.h>
28#include <scsi/scsi_driver.h>
29#include <scsi/scsi_eh.h>
30#include <scsi/scsi_host.h>
31
32#include "scsi_priv.h"
33#include "scsi_logging.h"
34
35
36#define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools)
37#define SG_MEMPOOL_SIZE 2
38
39struct scsi_host_sg_pool {
40 size_t size;
41 char *name;
42 struct kmem_cache *slab;
43 mempool_t *pool;
44};
45
46#define SP(x) { x, "sgpool-" __stringify(x) }
47#if (SCSI_MAX_SG_SEGMENTS < 32)
48#error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater)
49#endif
50static struct scsi_host_sg_pool scsi_sg_pools[] = {
51 SP(8),
52 SP(16),
53#if (SCSI_MAX_SG_SEGMENTS > 32)
54 SP(32),
55#if (SCSI_MAX_SG_SEGMENTS > 64)
56 SP(64),
57#if (SCSI_MAX_SG_SEGMENTS > 128)
58 SP(128),
59#if (SCSI_MAX_SG_SEGMENTS > 256)
60#error SCSI_MAX_SG_SEGMENTS is too large (256 MAX)
61#endif
62#endif
63#endif
64#endif
65 SP(SCSI_MAX_SG_SEGMENTS)
66};
67#undef SP
68
69struct kmem_cache *scsi_sdb_cache;
70
71#ifdef CONFIG_ACPI
72#include <acpi/acpi_bus.h>
73
74static bool acpi_scsi_bus_match(struct device *dev)
75{
76 return dev->bus == &scsi_bus_type;
77}
78
79int scsi_register_acpi_bus_type(struct acpi_bus_type *bus)
80{
81 bus->match = acpi_scsi_bus_match;
82 return register_acpi_bus_type(bus);
83}
84EXPORT_SYMBOL_GPL(scsi_register_acpi_bus_type);
85
86void scsi_unregister_acpi_bus_type(struct acpi_bus_type *bus)
87{
88 unregister_acpi_bus_type(bus);
89}
90EXPORT_SYMBOL_GPL(scsi_unregister_acpi_bus_type);
91#endif
92
93
94
95
96
97
98#define SCSI_QUEUE_DELAY 3
99
100
101
102
103
104
105
106
107
108
109
110
111
112static void scsi_unprep_request(struct request *req)
113{
114 struct scsi_cmnd *cmd = req->special;
115
116 blk_unprep_request(req);
117 req->special = NULL;
118
119 scsi_put_command(cmd);
120}
121
122
123
124
125
126
127
128
129
130
131
132
133
134static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
135{
136 struct Scsi_Host *host = cmd->device->host;
137 struct scsi_device *device = cmd->device;
138 struct scsi_target *starget = scsi_target(device);
139 struct request_queue *q = device->request_queue;
140 unsigned long flags;
141
142 SCSI_LOG_MLQUEUE(1,
143 printk("Inserting command %p into mlqueue\n", cmd));
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158 switch (reason) {
159 case SCSI_MLQUEUE_HOST_BUSY:
160 host->host_blocked = host->max_host_blocked;
161 break;
162 case SCSI_MLQUEUE_DEVICE_BUSY:
163 case SCSI_MLQUEUE_EH_RETRY:
164 device->device_blocked = device->max_device_blocked;
165 break;
166 case SCSI_MLQUEUE_TARGET_BUSY:
167 starget->target_blocked = starget->max_target_blocked;
168 break;
169 }
170
171
172
173
174
175 if (unbusy)
176 scsi_device_unbusy(device);
177
178
179
180
181
182
183
184 spin_lock_irqsave(q->queue_lock, flags);
185 blk_requeue_request(q, cmd->request);
186 kblockd_schedule_work(q, &device->requeue_work);
187 spin_unlock_irqrestore(q->queue_lock, flags);
188}
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
210{
211 __scsi_queue_insert(cmd, reason, 1);
212}
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
230 int data_direction, void *buffer, unsigned bufflen,
231 unsigned char *sense, int timeout, int retries, int flags,
232 int *resid)
233{
234 struct request *req;
235 int write = (data_direction == DMA_TO_DEVICE);
236 int ret = DRIVER_ERROR << 24;
237
238 req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
239 if (!req)
240 return ret;
241
242 if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
243 buffer, bufflen, __GFP_WAIT))
244 goto out;
245
246 req->cmd_len = COMMAND_SIZE(cmd[0]);
247 memcpy(req->cmd, cmd, req->cmd_len);
248 req->sense = sense;
249 req->sense_len = 0;
250 req->retries = retries;
251 req->timeout = timeout;
252 req->cmd_type = REQ_TYPE_BLOCK_PC;
253 req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT;
254
255
256
257
258 blk_execute_rq(req->q, NULL, req, 1);
259
260
261
262
263
264
265
266 if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen))
267 memset(buffer + (bufflen - req->resid_len), 0, req->resid_len);
268
269 if (resid)
270 *resid = req->resid_len;
271 ret = req->errors;
272 out:
273 blk_put_request(req);
274
275 return ret;
276}
277EXPORT_SYMBOL(scsi_execute);
278
279int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd,
280 int data_direction, void *buffer, unsigned bufflen,
281 struct scsi_sense_hdr *sshdr, int timeout, int retries,
282 int *resid, int flags)
283{
284 char *sense = NULL;
285 int result;
286
287 if (sshdr) {
288 sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
289 if (!sense)
290 return DRIVER_ERROR << 24;
291 }
292 result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
293 sense, timeout, retries, flags, resid);
294 if (sshdr)
295 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
296
297 kfree(sense);
298 return result;
299}
300EXPORT_SYMBOL(scsi_execute_req_flags);
301
302
303
304
305
306
307
308
309
310
311
312
313static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
314{
315 cmd->serial_number = 0;
316 scsi_set_resid(cmd, 0);
317 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
318 if (cmd->cmd_len == 0)
319 cmd->cmd_len = scsi_command_size(cmd->cmnd);
320}
321
322void scsi_device_unbusy(struct scsi_device *sdev)
323{
324 struct Scsi_Host *shost = sdev->host;
325 struct scsi_target *starget = scsi_target(sdev);
326 unsigned long flags;
327
328 spin_lock_irqsave(shost->host_lock, flags);
329 shost->host_busy--;
330 starget->target_busy--;
331 if (unlikely(scsi_host_in_recovery(shost) &&
332 (shost->host_failed || shost->host_eh_scheduled)))
333 scsi_eh_wakeup(shost);
334 spin_unlock(shost->host_lock);
335 spin_lock(sdev->request_queue->queue_lock);
336 sdev->device_busy--;
337 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
338}
339
340
341
342
343
344
345
346
347static void scsi_single_lun_run(struct scsi_device *current_sdev)
348{
349 struct Scsi_Host *shost = current_sdev->host;
350 struct scsi_device *sdev, *tmp;
351 struct scsi_target *starget = scsi_target(current_sdev);
352 unsigned long flags;
353
354 spin_lock_irqsave(shost->host_lock, flags);
355 starget->starget_sdev_user = NULL;
356 spin_unlock_irqrestore(shost->host_lock, flags);
357
358
359
360
361
362
363
364 blk_run_queue(current_sdev->request_queue);
365
366 spin_lock_irqsave(shost->host_lock, flags);
367 if (starget->starget_sdev_user)
368 goto out;
369 list_for_each_entry_safe(sdev, tmp, &starget->devices,
370 same_target_siblings) {
371 if (sdev == current_sdev)
372 continue;
373 if (scsi_device_get(sdev))
374 continue;
375
376 spin_unlock_irqrestore(shost->host_lock, flags);
377 blk_run_queue(sdev->request_queue);
378 spin_lock_irqsave(shost->host_lock, flags);
379
380 scsi_device_put(sdev);
381 }
382 out:
383 spin_unlock_irqrestore(shost->host_lock, flags);
384}
385
386static inline int scsi_device_is_busy(struct scsi_device *sdev)
387{
388 if (sdev->device_busy >= sdev->queue_depth || sdev->device_blocked)
389 return 1;
390
391 return 0;
392}
393
394static inline int scsi_target_is_busy(struct scsi_target *starget)
395{
396 return ((starget->can_queue > 0 &&
397 starget->target_busy >= starget->can_queue) ||
398 starget->target_blocked);
399}
400
401static inline int scsi_host_is_busy(struct Scsi_Host *shost)
402{
403 if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
404 shost->host_blocked || shost->host_self_blocked)
405 return 1;
406
407 return 0;
408}
409
410
411
412
413
414
415
416
417
418
419
420
421
422static void scsi_run_queue(struct request_queue *q)
423{
424 struct scsi_device *sdev = q->queuedata;
425 struct Scsi_Host *shost;
426 LIST_HEAD(starved_list);
427 unsigned long flags;
428
429 shost = sdev->host;
430 if (scsi_target(sdev)->single_lun)
431 scsi_single_lun_run(sdev);
432
433 spin_lock_irqsave(shost->host_lock, flags);
434 list_splice_init(&shost->starved_list, &starved_list);
435
436 while (!list_empty(&starved_list)) {
437 struct request_queue *slq;
438
439
440
441
442
443
444
445
446
447
448
449 if (scsi_host_is_busy(shost))
450 break;
451
452 sdev = list_entry(starved_list.next,
453 struct scsi_device, starved_entry);
454 list_del_init(&sdev->starved_entry);
455 if (scsi_target_is_busy(scsi_target(sdev))) {
456 list_move_tail(&sdev->starved_entry,
457 &shost->starved_list);
458 continue;
459 }
460
461
462
463
464
465
466
467
468
469
470
471 slq = sdev->request_queue;
472 if (!blk_get_queue(slq))
473 continue;
474 spin_unlock_irqrestore(shost->host_lock, flags);
475
476 blk_run_queue(slq);
477 blk_put_queue(slq);
478
479 spin_lock_irqsave(shost->host_lock, flags);
480 }
481
482 list_splice(&starved_list, &shost->starved_list);
483 spin_unlock_irqrestore(shost->host_lock, flags);
484
485 blk_run_queue(q);
486}
487
488void scsi_requeue_run_queue(struct work_struct *work)
489{
490 struct scsi_device *sdev;
491 struct request_queue *q;
492
493 sdev = container_of(work, struct scsi_device, requeue_work);
494 q = sdev->request_queue;
495 scsi_run_queue(q);
496}
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
517{
518 struct scsi_device *sdev = cmd->device;
519 struct request *req = cmd->request;
520 unsigned long flags;
521
522
523
524
525
526
527
528 get_device(&sdev->sdev_gendev);
529
530 spin_lock_irqsave(q->queue_lock, flags);
531 scsi_unprep_request(req);
532 blk_requeue_request(q, req);
533 spin_unlock_irqrestore(q->queue_lock, flags);
534
535 scsi_run_queue(q);
536
537 put_device(&sdev->sdev_gendev);
538}
539
540void scsi_next_command(struct scsi_cmnd *cmd)
541{
542 struct scsi_device *sdev = cmd->device;
543 struct request_queue *q = sdev->request_queue;
544
545
546 get_device(&sdev->sdev_gendev);
547
548 scsi_put_command(cmd);
549 scsi_run_queue(q);
550
551
552 put_device(&sdev->sdev_gendev);
553}
554
555void scsi_run_host_queues(struct Scsi_Host *shost)
556{
557 struct scsi_device *sdev;
558
559 shost_for_each_device(sdev, shost)
560 scsi_run_queue(sdev->request_queue);
561}
562
563static void __scsi_release_buffers(struct scsi_cmnd *, int);
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
588 int bytes, int requeue)
589{
590 struct request_queue *q = cmd->device->request_queue;
591 struct request *req = cmd->request;
592
593
594
595
596
597 if (blk_end_request(req, error, bytes)) {
598
599 if (error && scsi_noretry_cmd(cmd))
600 blk_end_request_all(req, error);
601 else {
602 if (requeue) {
603
604
605
606
607
608 scsi_release_buffers(cmd);
609 scsi_requeue_command(q, cmd);
610 cmd = NULL;
611 }
612 return cmd;
613 }
614 }
615
616
617
618
619
620 __scsi_release_buffers(cmd, 0);
621 scsi_next_command(cmd);
622 return NULL;
623}
624
625static inline unsigned int scsi_sgtable_index(unsigned short nents)
626{
627 unsigned int index;
628
629 BUG_ON(nents > SCSI_MAX_SG_SEGMENTS);
630
631 if (nents <= 8)
632 index = 0;
633 else
634 index = get_count_order(nents) - 3;
635
636 return index;
637}
638
639static void scsi_sg_free(struct scatterlist *sgl, unsigned int nents)
640{
641 struct scsi_host_sg_pool *sgp;
642
643 sgp = scsi_sg_pools + scsi_sgtable_index(nents);
644 mempool_free(sgl, sgp->pool);
645}
646
647static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask)
648{
649 struct scsi_host_sg_pool *sgp;
650
651 sgp = scsi_sg_pools + scsi_sgtable_index(nents);
652 return mempool_alloc(sgp->pool, gfp_mask);
653}
654
655static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents,
656 gfp_t gfp_mask)
657{
658 int ret;
659
660 BUG_ON(!nents);
661
662 ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS,
663 gfp_mask, scsi_sg_alloc);
664 if (unlikely(ret))
665 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS,
666 scsi_sg_free);
667
668 return ret;
669}
670
671static void scsi_free_sgtable(struct scsi_data_buffer *sdb)
672{
673 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free);
674}
675
676static void __scsi_release_buffers(struct scsi_cmnd *cmd, int do_bidi_check)
677{
678
679 if (cmd->sdb.table.nents)
680 scsi_free_sgtable(&cmd->sdb);
681
682 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
683
684 if (do_bidi_check && scsi_bidi_cmnd(cmd)) {
685 struct scsi_data_buffer *bidi_sdb =
686 cmd->request->next_rq->special;
687 scsi_free_sgtable(bidi_sdb);
688 kmem_cache_free(scsi_sdb_cache, bidi_sdb);
689 cmd->request->next_rq->special = NULL;
690 }
691
692 if (scsi_prot_sg_count(cmd))
693 scsi_free_sgtable(cmd->prot_sdb);
694}
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713void scsi_release_buffers(struct scsi_cmnd *cmd)
714{
715 __scsi_release_buffers(cmd, 1);
716}
717EXPORT_SYMBOL(scsi_release_buffers);
718
719static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
720{
721 int error = 0;
722
723 switch(host_byte(result)) {
724 case DID_TRANSPORT_FAILFAST:
725 error = -ENOLINK;
726 break;
727 case DID_TARGET_FAILURE:
728 set_host_byte(cmd, DID_OK);
729 error = -EREMOTEIO;
730 break;
731 case DID_NEXUS_FAILURE:
732 set_host_byte(cmd, DID_OK);
733 error = -EBADE;
734 break;
735 default:
736 error = -EIO;
737 break;
738 }
739
740 return error;
741}
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
780{
781 int result = cmd->result;
782 struct request_queue *q = cmd->device->request_queue;
783 struct request *req = cmd->request;
784 int error = 0;
785 struct scsi_sense_hdr sshdr;
786 int sense_valid = 0;
787 int sense_deferred = 0;
788 enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
789 ACTION_DELAYED_RETRY} action;
790 char *description = NULL;
791
792 if (result) {
793 sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
794 if (sense_valid)
795 sense_deferred = scsi_sense_is_deferred(&sshdr);
796 }
797
798 if (req->cmd_type == REQ_TYPE_BLOCK_PC) {
799 if (result) {
800 if (sense_valid && req->sense) {
801
802
803
804 int len = 8 + cmd->sense_buffer[7];
805
806 if (len > SCSI_SENSE_BUFFERSIZE)
807 len = SCSI_SENSE_BUFFERSIZE;
808 memcpy(req->sense, cmd->sense_buffer, len);
809 req->sense_len = len;
810 }
811 if (!sense_deferred)
812 error = __scsi_error_from_host_byte(cmd, result);
813 }
814
815
816
817 req->errors = cmd->result;
818
819 req->resid_len = scsi_get_resid(cmd);
820
821 if (scsi_bidi_cmnd(cmd)) {
822
823
824
825
826 req->next_rq->resid_len = scsi_in(cmd)->resid;
827
828 scsi_release_buffers(cmd);
829 blk_end_request_all(req, 0);
830
831 scsi_next_command(cmd);
832 return;
833 }
834 }
835
836
837 BUG_ON(blk_bidi_rq(req));
838
839
840
841
842
843 SCSI_LOG_HLCOMPLETE(1, printk("%u sectors total, "
844 "%d bytes done.\n",
845 blk_rq_sectors(req), good_bytes));
846
847
848
849
850
851
852
853 if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) {
854
855
856
857
858 if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
859 ;
860 else if (!(req->cmd_flags & REQ_QUIET))
861 scsi_print_sense("", cmd);
862 result = 0;
863
864 error = 0;
865 }
866
867
868
869
870
871
872 if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL)
873 return;
874
875 error = __scsi_error_from_host_byte(cmd, result);
876
877 if (host_byte(result) == DID_RESET) {
878
879
880
881
882 action = ACTION_RETRY;
883 } else if (sense_valid && !sense_deferred) {
884 switch (sshdr.sense_key) {
885 case UNIT_ATTENTION:
886 if (cmd->device->removable) {
887
888
889
890 cmd->device->changed = 1;
891 description = "Media Changed";
892 action = ACTION_FAIL;
893 } else {
894
895
896
897
898
899 action = ACTION_RETRY;
900 }
901 break;
902 case ILLEGAL_REQUEST:
903
904
905
906
907
908
909
910
911 if ((cmd->device->use_10_for_rw &&
912 sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
913 (cmd->cmnd[0] == READ_10 ||
914 cmd->cmnd[0] == WRITE_10)) {
915
916 cmd->device->use_10_for_rw = 0;
917 action = ACTION_REPREP;
918 } else if (sshdr.asc == 0x10) {
919 description = "Host Data Integrity Failure";
920 action = ACTION_FAIL;
921 error = -EILSEQ;
922
923 } else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) {
924 switch (cmd->cmnd[0]) {
925 case UNMAP:
926 description = "Discard failure";
927 break;
928 case WRITE_SAME:
929 case WRITE_SAME_16:
930 if (cmd->cmnd[1] & 0x8)
931 description = "Discard failure";
932 else
933 description =
934 "Write same failure";
935 break;
936 default:
937 description = "Invalid command failure";
938 break;
939 }
940 action = ACTION_FAIL;
941 error = -EREMOTEIO;
942 } else
943 action = ACTION_FAIL;
944 break;
945 case ABORTED_COMMAND:
946 action = ACTION_FAIL;
947 if (sshdr.asc == 0x10) {
948 description = "Target Data Integrity Failure";
949 error = -EILSEQ;
950 }
951 break;
952 case NOT_READY:
953
954
955
956 if (sshdr.asc == 0x04) {
957 switch (sshdr.ascq) {
958 case 0x01:
959 case 0x04:
960 case 0x05:
961 case 0x06:
962 case 0x07:
963 case 0x08:
964 case 0x09:
965 case 0x14:
966 action = ACTION_DELAYED_RETRY;
967 break;
968 default:
969 description = "Device not ready";
970 action = ACTION_FAIL;
971 break;
972 }
973 } else {
974 description = "Device not ready";
975 action = ACTION_FAIL;
976 }
977 break;
978 case VOLUME_OVERFLOW:
979
980 action = ACTION_FAIL;
981 break;
982 default:
983 description = "Unhandled sense code";
984 action = ACTION_FAIL;
985 break;
986 }
987 } else {
988 description = "Unhandled error code";
989 action = ACTION_FAIL;
990 }
991
992 switch (action) {
993 case ACTION_FAIL:
994
995 scsi_release_buffers(cmd);
996 if (!(req->cmd_flags & REQ_QUIET)) {
997 if (description)
998 scmd_printk(KERN_INFO, cmd, "%s\n",
999 description);
1000 scsi_print_result(cmd);
1001 if (driver_byte(result) & DRIVER_SENSE)
1002 scsi_print_sense("", cmd);
1003 scsi_print_command(cmd);
1004 }
1005 if (blk_end_request_err(req, error))
1006 scsi_requeue_command(q, cmd);
1007 else
1008 scsi_next_command(cmd);
1009 break;
1010 case ACTION_REPREP:
1011
1012
1013
1014 scsi_release_buffers(cmd);
1015 scsi_requeue_command(q, cmd);
1016 break;
1017 case ACTION_RETRY:
1018
1019 __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, 0);
1020 break;
1021 case ACTION_DELAYED_RETRY:
1022
1023 __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0);
1024 break;
1025 }
1026}
1027
1028static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
1029 gfp_t gfp_mask)
1030{
1031 int count;
1032
1033
1034
1035
1036 if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments,
1037 gfp_mask))) {
1038 return BLKPREP_DEFER;
1039 }
1040
1041 req->buffer = NULL;
1042
1043
1044
1045
1046
1047 count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
1048 BUG_ON(count > sdb->table.nents);
1049 sdb->table.nents = count;
1050 sdb->length = blk_rq_bytes(req);
1051 return BLKPREP_OK;
1052}
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
1066{
1067 struct request *rq = cmd->request;
1068
1069 int error = scsi_init_sgtable(rq, &cmd->sdb, gfp_mask);
1070 if (error)
1071 goto err_exit;
1072
1073 if (blk_bidi_rq(rq)) {
1074 struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc(
1075 scsi_sdb_cache, GFP_ATOMIC);
1076 if (!bidi_sdb) {
1077 error = BLKPREP_DEFER;
1078 goto err_exit;
1079 }
1080
1081 rq->next_rq->special = bidi_sdb;
1082 error = scsi_init_sgtable(rq->next_rq, bidi_sdb, GFP_ATOMIC);
1083 if (error)
1084 goto err_exit;
1085 }
1086
1087 if (blk_integrity_rq(rq)) {
1088 struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
1089 int ivecs, count;
1090
1091 BUG_ON(prot_sdb == NULL);
1092 ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
1093
1094 if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask)) {
1095 error = BLKPREP_DEFER;
1096 goto err_exit;
1097 }
1098
1099 count = blk_rq_map_integrity_sg(rq->q, rq->bio,
1100 prot_sdb->table.sgl);
1101 BUG_ON(unlikely(count > ivecs));
1102 BUG_ON(unlikely(count > queue_max_integrity_segments(rq->q)));
1103
1104 cmd->prot_sdb = prot_sdb;
1105 cmd->prot_sdb->table.nents = count;
1106 }
1107
1108 return BLKPREP_OK ;
1109
1110err_exit:
1111 scsi_release_buffers(cmd);
1112 cmd->request->special = NULL;
1113 scsi_put_command(cmd);
1114 return error;
1115}
1116EXPORT_SYMBOL(scsi_init_io);
1117
1118static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
1119 struct request *req)
1120{
1121 struct scsi_cmnd *cmd;
1122
1123 if (!req->special) {
1124 cmd = scsi_get_command(sdev, GFP_ATOMIC);
1125 if (unlikely(!cmd))
1126 return NULL;
1127 req->special = cmd;
1128 } else {
1129 cmd = req->special;
1130 }
1131
1132
1133 cmd->tag = req->tag;
1134 cmd->request = req;
1135
1136 cmd->cmnd = req->cmd;
1137 cmd->prot_op = SCSI_PROT_NORMAL;
1138
1139 return cmd;
1140}
1141
1142int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
1143{
1144 struct scsi_cmnd *cmd;
1145 int ret = scsi_prep_state_check(sdev, req);
1146
1147 if (ret != BLKPREP_OK)
1148 return ret;
1149
1150 cmd = scsi_get_cmd_from_req(sdev, req);
1151 if (unlikely(!cmd))
1152 return BLKPREP_DEFER;
1153
1154
1155
1156
1157
1158
1159
1160 if (req->bio) {
1161 int ret;
1162
1163 BUG_ON(!req->nr_phys_segments);
1164
1165 ret = scsi_init_io(cmd, GFP_ATOMIC);
1166 if (unlikely(ret))
1167 return ret;
1168 } else {
1169 BUG_ON(blk_rq_bytes(req));
1170
1171 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
1172 req->buffer = NULL;
1173 }
1174
1175 cmd->cmd_len = req->cmd_len;
1176 if (!blk_rq_bytes(req))
1177 cmd->sc_data_direction = DMA_NONE;
1178 else if (rq_data_dir(req) == WRITE)
1179 cmd->sc_data_direction = DMA_TO_DEVICE;
1180 else
1181 cmd->sc_data_direction = DMA_FROM_DEVICE;
1182
1183 cmd->transfersize = blk_rq_bytes(req);
1184 cmd->allowed = req->retries;
1185 return BLKPREP_OK;
1186}
1187EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd);
1188
1189
1190
1191
1192
1193
1194int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
1195{
1196 struct scsi_cmnd *cmd;
1197 int ret = scsi_prep_state_check(sdev, req);
1198
1199 if (ret != BLKPREP_OK)
1200 return ret;
1201
1202 if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh
1203 && sdev->scsi_dh_data->scsi_dh->prep_fn)) {
1204 ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req);
1205 if (ret != BLKPREP_OK)
1206 return ret;
1207 }
1208
1209
1210
1211
1212 BUG_ON(!req->nr_phys_segments);
1213
1214 cmd = scsi_get_cmd_from_req(sdev, req);
1215 if (unlikely(!cmd))
1216 return BLKPREP_DEFER;
1217
1218 memset(cmd->cmnd, 0, BLK_MAX_CDB);
1219 return scsi_init_io(cmd, GFP_ATOMIC);
1220}
1221EXPORT_SYMBOL(scsi_setup_fs_cmnd);
1222
1223int scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
1224{
1225 int ret = BLKPREP_OK;
1226
1227
1228
1229
1230
1231 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1232 switch (sdev->sdev_state) {
1233 case SDEV_OFFLINE:
1234 case SDEV_TRANSPORT_OFFLINE:
1235
1236
1237
1238
1239
1240 sdev_printk(KERN_ERR, sdev,
1241 "rejecting I/O to offline device\n");
1242 ret = BLKPREP_KILL;
1243 break;
1244 case SDEV_DEL:
1245
1246
1247
1248
1249 sdev_printk(KERN_ERR, sdev,
1250 "rejecting I/O to dead device\n");
1251 ret = BLKPREP_KILL;
1252 break;
1253 case SDEV_QUIESCE:
1254 case SDEV_BLOCK:
1255 case SDEV_CREATED_BLOCK:
1256
1257
1258
1259 if (!(req->cmd_flags & REQ_PREEMPT))
1260 ret = BLKPREP_DEFER;
1261 break;
1262 default:
1263
1264
1265
1266
1267
1268 if (!(req->cmd_flags & REQ_PREEMPT))
1269 ret = BLKPREP_KILL;
1270 break;
1271 }
1272 }
1273 return ret;
1274}
1275EXPORT_SYMBOL(scsi_prep_state_check);
1276
1277int scsi_prep_return(struct request_queue *q, struct request *req, int ret)
1278{
1279 struct scsi_device *sdev = q->queuedata;
1280
1281 switch (ret) {
1282 case BLKPREP_KILL:
1283 req->errors = DID_NO_CONNECT << 16;
1284
1285 if (req->special) {
1286 struct scsi_cmnd *cmd = req->special;
1287 scsi_release_buffers(cmd);
1288 scsi_put_command(cmd);
1289 req->special = NULL;
1290 }
1291 break;
1292 case BLKPREP_DEFER:
1293
1294
1295
1296
1297
1298 if (sdev->device_busy == 0)
1299 blk_delay_queue(q, SCSI_QUEUE_DELAY);
1300 break;
1301 default:
1302 req->cmd_flags |= REQ_DONTPREP;
1303 }
1304
1305 return ret;
1306}
1307EXPORT_SYMBOL(scsi_prep_return);
1308
1309int scsi_prep_fn(struct request_queue *q, struct request *req)
1310{
1311 struct scsi_device *sdev = q->queuedata;
1312 int ret = BLKPREP_KILL;
1313
1314 if (req->cmd_type == REQ_TYPE_BLOCK_PC)
1315 ret = scsi_setup_blk_pc_cmnd(sdev, req);
1316 return scsi_prep_return(q, req, ret);
1317}
1318EXPORT_SYMBOL(scsi_prep_fn);
1319
1320
1321
1322
1323
1324
1325
1326static inline int scsi_dev_queue_ready(struct request_queue *q,
1327 struct scsi_device *sdev)
1328{
1329 if (sdev->device_busy == 0 && sdev->device_blocked) {
1330
1331
1332
1333 if (--sdev->device_blocked == 0) {
1334 SCSI_LOG_MLQUEUE(3,
1335 sdev_printk(KERN_INFO, sdev,
1336 "unblocking device at zero depth\n"));
1337 } else {
1338 blk_delay_queue(q, SCSI_QUEUE_DELAY);
1339 return 0;
1340 }
1341 }
1342 if (scsi_device_is_busy(sdev))
1343 return 0;
1344
1345 return 1;
1346}
1347
1348
1349
1350
1351
1352
1353
1354
1355static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
1356 struct scsi_device *sdev)
1357{
1358 struct scsi_target *starget = scsi_target(sdev);
1359
1360 if (starget->single_lun) {
1361 if (starget->starget_sdev_user &&
1362 starget->starget_sdev_user != sdev)
1363 return 0;
1364 starget->starget_sdev_user = sdev;
1365 }
1366
1367 if (starget->target_busy == 0 && starget->target_blocked) {
1368
1369
1370
1371 if (--starget->target_blocked == 0) {
1372 SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget,
1373 "unblocking target at zero depth\n"));
1374 } else
1375 return 0;
1376 }
1377
1378 if (scsi_target_is_busy(starget)) {
1379 list_move_tail(&sdev->starved_entry, &shost->starved_list);
1380 return 0;
1381 }
1382
1383 return 1;
1384}
1385
1386
1387
1388
1389
1390
1391
1392
1393static inline int scsi_host_queue_ready(struct request_queue *q,
1394 struct Scsi_Host *shost,
1395 struct scsi_device *sdev)
1396{
1397 if (scsi_host_in_recovery(shost))
1398 return 0;
1399 if (shost->host_busy == 0 && shost->host_blocked) {
1400
1401
1402
1403 if (--shost->host_blocked == 0) {
1404 SCSI_LOG_MLQUEUE(3,
1405 printk("scsi%d unblocking host at zero depth\n",
1406 shost->host_no));
1407 } else {
1408 return 0;
1409 }
1410 }
1411 if (scsi_host_is_busy(shost)) {
1412 if (list_empty(&sdev->starved_entry))
1413 list_add_tail(&sdev->starved_entry, &shost->starved_list);
1414 return 0;
1415 }
1416
1417
1418 if (!list_empty(&sdev->starved_entry))
1419 list_del_init(&sdev->starved_entry);
1420
1421 return 1;
1422}
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436static int scsi_lld_busy(struct request_queue *q)
1437{
1438 struct scsi_device *sdev = q->queuedata;
1439 struct Scsi_Host *shost;
1440
1441 if (blk_queue_dying(q))
1442 return 0;
1443
1444 shost = sdev->host;
1445
1446
1447
1448
1449
1450
1451
1452 if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev))
1453 return 1;
1454
1455 return 0;
1456}
1457
1458
1459
1460
1461static void scsi_kill_request(struct request *req, struct request_queue *q)
1462{
1463 struct scsi_cmnd *cmd = req->special;
1464 struct scsi_device *sdev;
1465 struct scsi_target *starget;
1466 struct Scsi_Host *shost;
1467
1468 blk_start_request(req);
1469
1470 scmd_printk(KERN_INFO, cmd, "killing request\n");
1471
1472 sdev = cmd->device;
1473 starget = scsi_target(sdev);
1474 shost = sdev->host;
1475 scsi_init_cmd_errh(cmd);
1476 cmd->result = DID_NO_CONNECT << 16;
1477 atomic_inc(&cmd->device->iorequest_cnt);
1478
1479
1480
1481
1482
1483
1484 sdev->device_busy++;
1485 spin_unlock(sdev->request_queue->queue_lock);
1486 spin_lock(shost->host_lock);
1487 shost->host_busy++;
1488 starget->target_busy++;
1489 spin_unlock(shost->host_lock);
1490 spin_lock(sdev->request_queue->queue_lock);
1491
1492 blk_complete_request(req);
1493}
1494
1495static void scsi_softirq_done(struct request *rq)
1496{
1497 struct scsi_cmnd *cmd = rq->special;
1498 unsigned long wait_for = (cmd->allowed + 1) * rq->timeout;
1499 int disposition;
1500
1501 INIT_LIST_HEAD(&cmd->eh_entry);
1502
1503 atomic_inc(&cmd->device->iodone_cnt);
1504 if (cmd->result)
1505 atomic_inc(&cmd->device->ioerr_cnt);
1506
1507 disposition = scsi_decide_disposition(cmd);
1508 if (disposition != SUCCESS &&
1509 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
1510 sdev_printk(KERN_ERR, cmd->device,
1511 "timing out command, waited %lus\n",
1512 wait_for/HZ);
1513 disposition = SUCCESS;
1514 }
1515
1516 scsi_log_completion(cmd, disposition);
1517
1518 switch (disposition) {
1519 case SUCCESS:
1520 scsi_finish_command(cmd);
1521 break;
1522 case NEEDS_RETRY:
1523 scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
1524 break;
1525 case ADD_TO_MLQUEUE:
1526 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
1527 break;
1528 default:
1529 if (!scsi_eh_scmd_add(cmd, 0))
1530 scsi_finish_command(cmd);
1531 }
1532}
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545static void scsi_request_fn(struct request_queue *q)
1546{
1547 struct scsi_device *sdev = q->queuedata;
1548 struct Scsi_Host *shost;
1549 struct scsi_cmnd *cmd;
1550 struct request *req;
1551
1552 if(!get_device(&sdev->sdev_gendev))
1553
1554 return;
1555
1556
1557
1558
1559
1560 shost = sdev->host;
1561 for (;;) {
1562 int rtn;
1563
1564
1565
1566
1567
1568 req = blk_peek_request(q);
1569 if (!req || !scsi_dev_queue_ready(q, sdev))
1570 break;
1571
1572 if (unlikely(!scsi_device_online(sdev))) {
1573 sdev_printk(KERN_ERR, sdev,
1574 "rejecting I/O to offline device\n");
1575 scsi_kill_request(req, q);
1576 continue;
1577 }
1578
1579
1580
1581
1582
1583 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1584 blk_start_request(req);
1585 sdev->device_busy++;
1586
1587 spin_unlock(q->queue_lock);
1588 cmd = req->special;
1589 if (unlikely(cmd == NULL)) {
1590 printk(KERN_CRIT "impossible request in %s.\n"
1591 "please mail a stack trace to "
1592 "linux-scsi@vger.kernel.org\n",
1593 __func__);
1594 blk_dump_rq_flags(req, "foo");
1595 BUG();
1596 }
1597 spin_lock(shost->host_lock);
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607 if (blk_queue_tagged(q) && !blk_rq_tagged(req)) {
1608 if (list_empty(&sdev->starved_entry))
1609 list_add_tail(&sdev->starved_entry,
1610 &shost->starved_list);
1611 goto not_ready;
1612 }
1613
1614 if (!scsi_target_queue_ready(shost, sdev))
1615 goto not_ready;
1616
1617 if (!scsi_host_queue_ready(q, shost, sdev))
1618 goto not_ready;
1619
1620 scsi_target(sdev)->target_busy++;
1621 shost->host_busy++;
1622
1623
1624
1625
1626
1627 spin_unlock_irq(shost->host_lock);
1628
1629
1630
1631
1632
1633 scsi_init_cmd_errh(cmd);
1634
1635
1636
1637
1638 rtn = scsi_dispatch_cmd(cmd);
1639 spin_lock_irq(q->queue_lock);
1640 if (rtn)
1641 goto out_delay;
1642 }
1643
1644 goto out;
1645
1646 not_ready:
1647 spin_unlock_irq(shost->host_lock);
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657 spin_lock_irq(q->queue_lock);
1658 blk_requeue_request(q, req);
1659 sdev->device_busy--;
1660out_delay:
1661 if (sdev->device_busy == 0)
1662 blk_delay_queue(q, SCSI_QUEUE_DELAY);
1663out:
1664
1665
1666 spin_unlock_irq(q->queue_lock);
1667 put_device(&sdev->sdev_gendev);
1668 spin_lock_irq(q->queue_lock);
1669}
1670
1671u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
1672{
1673 struct device *host_dev;
1674 u64 bounce_limit = 0xffffffff;
1675
1676 if (shost->unchecked_isa_dma)
1677 return BLK_BOUNCE_ISA;
1678
1679
1680
1681
1682 if (!PCI_DMA_BUS_IS_PHYS)
1683 return BLK_BOUNCE_ANY;
1684
1685 host_dev = scsi_get_device(shost);
1686 if (host_dev && host_dev->dma_mask)
1687 bounce_limit = *host_dev->dma_mask;
1688
1689 return bounce_limit;
1690}
1691EXPORT_SYMBOL(scsi_calculate_bounce_limit);
1692
1693struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
1694 request_fn_proc *request_fn)
1695{
1696 struct request_queue *q;
1697 struct device *dev = shost->dma_dev;
1698
1699 q = blk_init_queue(request_fn, NULL);
1700 if (!q)
1701 return NULL;
1702
1703
1704
1705
1706 blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize,
1707 SCSI_MAX_SG_CHAIN_SEGMENTS));
1708
1709 if (scsi_host_prot_dma(shost)) {
1710 shost->sg_prot_tablesize =
1711 min_not_zero(shost->sg_prot_tablesize,
1712 (unsigned short)SCSI_MAX_PROT_SG_SEGMENTS);
1713 BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize);
1714 blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize);
1715 }
1716
1717 blk_queue_max_hw_sectors(q, shost->max_sectors);
1718 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1719 blk_queue_segment_boundary(q, shost->dma_boundary);
1720 dma_set_seg_boundary(dev, shost->dma_boundary);
1721
1722 blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));
1723
1724 if (!shost->use_clustering)
1725 q->limits.cluster = 0;
1726
1727
1728
1729
1730
1731
1732 blk_queue_dma_alignment(q, 0x03);
1733
1734 return q;
1735}
1736EXPORT_SYMBOL(__scsi_alloc_queue);
1737
1738struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1739{
1740 struct request_queue *q;
1741
1742 q = __scsi_alloc_queue(sdev->host, scsi_request_fn);
1743 if (!q)
1744 return NULL;
1745
1746 blk_queue_prep_rq(q, scsi_prep_fn);
1747 blk_queue_softirq_done(q, scsi_softirq_done);
1748 blk_queue_rq_timed_out(q, scsi_times_out);
1749 blk_queue_lld_busy(q, scsi_lld_busy);
1750 return q;
1751}
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769void scsi_block_requests(struct Scsi_Host *shost)
1770{
1771 shost->host_self_blocked = 1;
1772}
1773EXPORT_SYMBOL(scsi_block_requests);
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795void scsi_unblock_requests(struct Scsi_Host *shost)
1796{
1797 shost->host_self_blocked = 0;
1798 scsi_run_host_queues(shost);
1799}
1800EXPORT_SYMBOL(scsi_unblock_requests);
1801
1802int __init scsi_init_queue(void)
1803{
1804 int i;
1805
1806 scsi_sdb_cache = kmem_cache_create("scsi_data_buffer",
1807 sizeof(struct scsi_data_buffer),
1808 0, 0, NULL);
1809 if (!scsi_sdb_cache) {
1810 printk(KERN_ERR "SCSI: can't init scsi sdb cache\n");
1811 return -ENOMEM;
1812 }
1813
1814 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1815 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1816 int size = sgp->size * sizeof(struct scatterlist);
1817
1818 sgp->slab = kmem_cache_create(sgp->name, size, 0,
1819 SLAB_HWCACHE_ALIGN, NULL);
1820 if (!sgp->slab) {
1821 printk(KERN_ERR "SCSI: can't init sg slab %s\n",
1822 sgp->name);
1823 goto cleanup_sdb;
1824 }
1825
1826 sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE,
1827 sgp->slab);
1828 if (!sgp->pool) {
1829 printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
1830 sgp->name);
1831 goto cleanup_sdb;
1832 }
1833 }
1834
1835 return 0;
1836
1837cleanup_sdb:
1838 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1839 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1840 if (sgp->pool)
1841 mempool_destroy(sgp->pool);
1842 if (sgp->slab)
1843 kmem_cache_destroy(sgp->slab);
1844 }
1845 kmem_cache_destroy(scsi_sdb_cache);
1846
1847 return -ENOMEM;
1848}
1849
1850void scsi_exit_queue(void)
1851{
1852 int i;
1853
1854 kmem_cache_destroy(scsi_sdb_cache);
1855
1856 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1857 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1858 mempool_destroy(sgp->pool);
1859 kmem_cache_destroy(sgp->slab);
1860 }
1861}
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881int
1882scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
1883 unsigned char *buffer, int len, int timeout, int retries,
1884 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
1885{
1886 unsigned char cmd[10];
1887 unsigned char *real_buffer;
1888 int ret;
1889
1890 memset(cmd, 0, sizeof(cmd));
1891 cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0);
1892
1893 if (sdev->use_10_for_ms) {
1894 if (len > 65535)
1895 return -EINVAL;
1896 real_buffer = kmalloc(8 + len, GFP_KERNEL);
1897 if (!real_buffer)
1898 return -ENOMEM;
1899 memcpy(real_buffer + 8, buffer, len);
1900 len += 8;
1901 real_buffer[0] = 0;
1902 real_buffer[1] = 0;
1903 real_buffer[2] = data->medium_type;
1904 real_buffer[3] = data->device_specific;
1905 real_buffer[4] = data->longlba ? 0x01 : 0;
1906 real_buffer[5] = 0;
1907 real_buffer[6] = data->block_descriptor_length >> 8;
1908 real_buffer[7] = data->block_descriptor_length;
1909
1910 cmd[0] = MODE_SELECT_10;
1911 cmd[7] = len >> 8;
1912 cmd[8] = len;
1913 } else {
1914 if (len > 255 || data->block_descriptor_length > 255 ||
1915 data->longlba)
1916 return -EINVAL;
1917
1918 real_buffer = kmalloc(4 + len, GFP_KERNEL);
1919 if (!real_buffer)
1920 return -ENOMEM;
1921 memcpy(real_buffer + 4, buffer, len);
1922 len += 4;
1923 real_buffer[0] = 0;
1924 real_buffer[1] = data->medium_type;
1925 real_buffer[2] = data->device_specific;
1926 real_buffer[3] = data->block_descriptor_length;
1927
1928
1929 cmd[0] = MODE_SELECT;
1930 cmd[4] = len;
1931 }
1932
1933 ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len,
1934 sshdr, timeout, retries, NULL);
1935 kfree(real_buffer);
1936 return ret;
1937}
1938EXPORT_SYMBOL_GPL(scsi_mode_select);
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957int
1958scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
1959 unsigned char *buffer, int len, int timeout, int retries,
1960 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
1961{
1962 unsigned char cmd[12];
1963 int use_10_for_ms;
1964 int header_length;
1965 int result;
1966 struct scsi_sense_hdr my_sshdr;
1967
1968 memset(data, 0, sizeof(*data));
1969 memset(&cmd[0], 0, 12);
1970 cmd[1] = dbd & 0x18;
1971 cmd[2] = modepage;
1972
1973
1974 if (!sshdr)
1975 sshdr = &my_sshdr;
1976
1977 retry:
1978 use_10_for_ms = sdev->use_10_for_ms;
1979
1980 if (use_10_for_ms) {
1981 if (len < 8)
1982 len = 8;
1983
1984 cmd[0] = MODE_SENSE_10;
1985 cmd[8] = len;
1986 header_length = 8;
1987 } else {
1988 if (len < 4)
1989 len = 4;
1990
1991 cmd[0] = MODE_SENSE;
1992 cmd[4] = len;
1993 header_length = 4;
1994 }
1995
1996 memset(buffer, 0, len);
1997
1998 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
1999 sshdr, timeout, retries, NULL);
2000
2001
2002
2003
2004
2005
2006 if (use_10_for_ms && !scsi_status_is_good(result) &&
2007 (driver_byte(result) & DRIVER_SENSE)) {
2008 if (scsi_sense_valid(sshdr)) {
2009 if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
2010 (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
2011
2012
2013
2014 sdev->use_10_for_ms = 0;
2015 goto retry;
2016 }
2017 }
2018 }
2019
2020 if(scsi_status_is_good(result)) {
2021 if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b &&
2022 (modepage == 6 || modepage == 8))) {
2023
2024 header_length = 0;
2025 data->length = 13;
2026 data->medium_type = 0;
2027 data->device_specific = 0;
2028 data->longlba = 0;
2029 data->block_descriptor_length = 0;
2030 } else if(use_10_for_ms) {
2031 data->length = buffer[0]*256 + buffer[1] + 2;
2032 data->medium_type = buffer[2];
2033 data->device_specific = buffer[3];
2034 data->longlba = buffer[4] & 0x01;
2035 data->block_descriptor_length = buffer[6]*256
2036 + buffer[7];
2037 } else {
2038 data->length = buffer[0] + 1;
2039 data->medium_type = buffer[1];
2040 data->device_specific = buffer[2];
2041 data->block_descriptor_length = buffer[3];
2042 }
2043 data->header_length = header_length;
2044 }
2045
2046 return result;
2047}
2048EXPORT_SYMBOL(scsi_mode_sense);
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062int
2063scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
2064 struct scsi_sense_hdr *sshdr_external)
2065{
2066 char cmd[] = {
2067 TEST_UNIT_READY, 0, 0, 0, 0, 0,
2068 };
2069 struct scsi_sense_hdr *sshdr;
2070 int result;
2071
2072 if (!sshdr_external)
2073 sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL);
2074 else
2075 sshdr = sshdr_external;
2076
2077
2078 do {
2079 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr,
2080 timeout, retries, NULL);
2081 if (sdev->removable && scsi_sense_valid(sshdr) &&
2082 sshdr->sense_key == UNIT_ATTENTION)
2083 sdev->changed = 1;
2084 } while (scsi_sense_valid(sshdr) &&
2085 sshdr->sense_key == UNIT_ATTENTION && --retries);
2086
2087 if (!sshdr_external)
2088 kfree(sshdr);
2089 return result;
2090}
2091EXPORT_SYMBOL(scsi_test_unit_ready);
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101int
2102scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
2103{
2104 enum scsi_device_state oldstate = sdev->sdev_state;
2105
2106 if (state == oldstate)
2107 return 0;
2108
2109 switch (state) {
2110 case SDEV_CREATED:
2111 switch (oldstate) {
2112 case SDEV_CREATED_BLOCK:
2113 break;
2114 default:
2115 goto illegal;
2116 }
2117 break;
2118
2119 case SDEV_RUNNING:
2120 switch (oldstate) {
2121 case SDEV_CREATED:
2122 case SDEV_OFFLINE:
2123 case SDEV_TRANSPORT_OFFLINE:
2124 case SDEV_QUIESCE:
2125 case SDEV_BLOCK:
2126 break;
2127 default:
2128 goto illegal;
2129 }
2130 break;
2131
2132 case SDEV_QUIESCE:
2133 switch (oldstate) {
2134 case SDEV_RUNNING:
2135 case SDEV_OFFLINE:
2136 case SDEV_TRANSPORT_OFFLINE:
2137 break;
2138 default:
2139 goto illegal;
2140 }
2141 break;
2142
2143 case SDEV_OFFLINE:
2144 case SDEV_TRANSPORT_OFFLINE:
2145 switch (oldstate) {
2146 case SDEV_CREATED:
2147 case SDEV_RUNNING:
2148 case SDEV_QUIESCE:
2149 case SDEV_BLOCK:
2150 break;
2151 default:
2152 goto illegal;
2153 }
2154 break;
2155
2156 case SDEV_BLOCK:
2157 switch (oldstate) {
2158 case SDEV_RUNNING:
2159 case SDEV_CREATED_BLOCK:
2160 break;
2161 default:
2162 goto illegal;
2163 }
2164 break;
2165
2166 case SDEV_CREATED_BLOCK:
2167 switch (oldstate) {
2168 case SDEV_CREATED:
2169 break;
2170 default:
2171 goto illegal;
2172 }
2173 break;
2174
2175 case SDEV_CANCEL:
2176 switch (oldstate) {
2177 case SDEV_CREATED:
2178 case SDEV_RUNNING:
2179 case SDEV_QUIESCE:
2180 case SDEV_OFFLINE:
2181 case SDEV_TRANSPORT_OFFLINE:
2182 case SDEV_BLOCK:
2183 break;
2184 default:
2185 goto illegal;
2186 }
2187 break;
2188
2189 case SDEV_DEL:
2190 switch (oldstate) {
2191 case SDEV_CREATED:
2192 case SDEV_RUNNING:
2193 case SDEV_OFFLINE:
2194 case SDEV_TRANSPORT_OFFLINE:
2195 case SDEV_CANCEL:
2196 case SDEV_CREATED_BLOCK:
2197 break;
2198 default:
2199 goto illegal;
2200 }
2201 break;
2202
2203 }
2204 sdev->sdev_state = state;
2205 return 0;
2206
2207 illegal:
2208 SCSI_LOG_ERROR_RECOVERY(1,
2209 sdev_printk(KERN_ERR, sdev,
2210 "Illegal state transition %s->%s\n",
2211 scsi_device_state_name(oldstate),
2212 scsi_device_state_name(state))
2213 );
2214 return -EINVAL;
2215}
2216EXPORT_SYMBOL(scsi_device_set_state);
2217
2218
2219
2220
2221
2222
2223
2224
2225static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
2226{
2227 int idx = 0;
2228 char *envp[3];
2229
2230 switch (evt->evt_type) {
2231 case SDEV_EVT_MEDIA_CHANGE:
2232 envp[idx++] = "SDEV_MEDIA_CHANGE=1";
2233 break;
2234
2235 default:
2236
2237 break;
2238 }
2239
2240 envp[idx++] = NULL;
2241
2242 kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp);
2243}
2244
2245
2246
2247
2248
2249
2250
2251
2252void scsi_evt_thread(struct work_struct *work)
2253{
2254 struct scsi_device *sdev;
2255 LIST_HEAD(event_list);
2256
2257 sdev = container_of(work, struct scsi_device, event_work);
2258
2259 while (1) {
2260 struct scsi_event *evt;
2261 struct list_head *this, *tmp;
2262 unsigned long flags;
2263
2264 spin_lock_irqsave(&sdev->list_lock, flags);
2265 list_splice_init(&sdev->event_list, &event_list);
2266 spin_unlock_irqrestore(&sdev->list_lock, flags);
2267
2268 if (list_empty(&event_list))
2269 break;
2270
2271 list_for_each_safe(this, tmp, &event_list) {
2272 evt = list_entry(this, struct scsi_event, node);
2273 list_del(&evt->node);
2274 scsi_evt_emit(sdev, evt);
2275 kfree(evt);
2276 }
2277 }
2278}
2279
2280
2281
2282
2283
2284
2285
2286
2287void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt)
2288{
2289 unsigned long flags;
2290
2291#if 0
2292
2293
2294
2295 if (!test_bit(evt->evt_type, sdev->supported_events)) {
2296 kfree(evt);
2297 return;
2298 }
2299#endif
2300
2301 spin_lock_irqsave(&sdev->list_lock, flags);
2302 list_add_tail(&evt->node, &sdev->event_list);
2303 schedule_work(&sdev->event_work);
2304 spin_unlock_irqrestore(&sdev->list_lock, flags);
2305}
2306EXPORT_SYMBOL_GPL(sdev_evt_send);
2307
2308
2309
2310
2311
2312
2313
2314
2315struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
2316 gfp_t gfpflags)
2317{
2318 struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags);
2319 if (!evt)
2320 return NULL;
2321
2322 evt->evt_type = evt_type;
2323 INIT_LIST_HEAD(&evt->node);
2324
2325
2326 switch (evt_type) {
2327 case SDEV_EVT_MEDIA_CHANGE:
2328 default:
2329
2330 break;
2331 }
2332
2333 return evt;
2334}
2335EXPORT_SYMBOL_GPL(sdev_evt_alloc);
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345void sdev_evt_send_simple(struct scsi_device *sdev,
2346 enum scsi_device_event evt_type, gfp_t gfpflags)
2347{
2348 struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags);
2349 if (!evt) {
2350 sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n",
2351 evt_type);
2352 return;
2353 }
2354
2355 sdev_evt_send(sdev, evt);
2356}
2357EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374int
2375scsi_device_quiesce(struct scsi_device *sdev)
2376{
2377 int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
2378 if (err)
2379 return err;
2380
2381 scsi_run_queue(sdev->request_queue);
2382 while (sdev->device_busy) {
2383 msleep_interruptible(200);
2384 scsi_run_queue(sdev->request_queue);
2385 }
2386 return 0;
2387}
2388EXPORT_SYMBOL(scsi_device_quiesce);
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399void scsi_device_resume(struct scsi_device *sdev)
2400{
2401
2402
2403
2404
2405 if (sdev->sdev_state != SDEV_QUIESCE ||
2406 scsi_device_set_state(sdev, SDEV_RUNNING))
2407 return;
2408 scsi_run_queue(sdev->request_queue);
2409}
2410EXPORT_SYMBOL(scsi_device_resume);
2411
2412static void
2413device_quiesce_fn(struct scsi_device *sdev, void *data)
2414{
2415 scsi_device_quiesce(sdev);
2416}
2417
2418void
2419scsi_target_quiesce(struct scsi_target *starget)
2420{
2421 starget_for_each_device(starget, NULL, device_quiesce_fn);
2422}
2423EXPORT_SYMBOL(scsi_target_quiesce);
2424
2425static void
2426device_resume_fn(struct scsi_device *sdev, void *data)
2427{
2428 scsi_device_resume(sdev);
2429}
2430
2431void
2432scsi_target_resume(struct scsi_target *starget)
2433{
2434 starget_for_each_device(starget, NULL, device_resume_fn);
2435}
2436EXPORT_SYMBOL(scsi_target_resume);
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454int
2455scsi_internal_device_block(struct scsi_device *sdev)
2456{
2457 struct request_queue *q = sdev->request_queue;
2458 unsigned long flags;
2459 int err = 0;
2460
2461 err = scsi_device_set_state(sdev, SDEV_BLOCK);
2462 if (err) {
2463 err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK);
2464
2465 if (err)
2466 return err;
2467 }
2468
2469
2470
2471
2472
2473
2474 spin_lock_irqsave(q->queue_lock, flags);
2475 blk_stop_queue(q);
2476 spin_unlock_irqrestore(q->queue_lock, flags);
2477
2478 return 0;
2479}
2480EXPORT_SYMBOL_GPL(scsi_internal_device_block);
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498int
2499scsi_internal_device_unblock(struct scsi_device *sdev,
2500 enum scsi_device_state new_state)
2501{
2502 struct request_queue *q = sdev->request_queue;
2503 unsigned long flags;
2504
2505
2506
2507
2508
2509 if ((sdev->sdev_state == SDEV_BLOCK) ||
2510 (sdev->sdev_state == SDEV_TRANSPORT_OFFLINE))
2511 sdev->sdev_state = new_state;
2512 else if (sdev->sdev_state == SDEV_CREATED_BLOCK) {
2513 if (new_state == SDEV_TRANSPORT_OFFLINE ||
2514 new_state == SDEV_OFFLINE)
2515 sdev->sdev_state = new_state;
2516 else
2517 sdev->sdev_state = SDEV_CREATED;
2518 } else if (sdev->sdev_state != SDEV_CANCEL &&
2519 sdev->sdev_state != SDEV_OFFLINE)
2520 return -EINVAL;
2521
2522 spin_lock_irqsave(q->queue_lock, flags);
2523 blk_start_queue(q);
2524 spin_unlock_irqrestore(q->queue_lock, flags);
2525
2526 return 0;
2527}
2528EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
2529
2530static void
2531device_block(struct scsi_device *sdev, void *data)
2532{
2533 scsi_internal_device_block(sdev);
2534}
2535
2536static int
2537target_block(struct device *dev, void *data)
2538{
2539 if (scsi_is_target_device(dev))
2540 starget_for_each_device(to_scsi_target(dev), NULL,
2541 device_block);
2542 return 0;
2543}
2544
2545void
2546scsi_target_block(struct device *dev)
2547{
2548 if (scsi_is_target_device(dev))
2549 starget_for_each_device(to_scsi_target(dev), NULL,
2550 device_block);
2551 else
2552 device_for_each_child(dev, NULL, target_block);
2553}
2554EXPORT_SYMBOL_GPL(scsi_target_block);
2555
2556static void
2557device_unblock(struct scsi_device *sdev, void *data)
2558{
2559 scsi_internal_device_unblock(sdev, *(enum scsi_device_state *)data);
2560}
2561
2562static int
2563target_unblock(struct device *dev, void *data)
2564{
2565 if (scsi_is_target_device(dev))
2566 starget_for_each_device(to_scsi_target(dev), data,
2567 device_unblock);
2568 return 0;
2569}
2570
2571void
2572scsi_target_unblock(struct device *dev, enum scsi_device_state new_state)
2573{
2574 if (scsi_is_target_device(dev))
2575 starget_for_each_device(to_scsi_target(dev), &new_state,
2576 device_unblock);
2577 else
2578 device_for_each_child(dev, &new_state, target_unblock);
2579}
2580EXPORT_SYMBOL_GPL(scsi_target_unblock);
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
2592 size_t *offset, size_t *len)
2593{
2594 int i;
2595 size_t sg_len = 0, len_complete = 0;
2596 struct scatterlist *sg;
2597 struct page *page;
2598
2599 WARN_ON(!irqs_disabled());
2600
2601 for_each_sg(sgl, sg, sg_count, i) {
2602 len_complete = sg_len;
2603 sg_len += sg->length;
2604 if (sg_len > *offset)
2605 break;
2606 }
2607
2608 if (unlikely(i == sg_count)) {
2609 printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, "
2610 "elements %d\n",
2611 __func__, sg_len, *offset, sg_count);
2612 WARN_ON(1);
2613 return NULL;
2614 }
2615
2616
2617 *offset = *offset - len_complete + sg->offset;
2618
2619
2620 page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT));
2621 *offset &= ~PAGE_MASK;
2622
2623
2624 sg_len = PAGE_SIZE - *offset;
2625 if (*len > sg_len)
2626 *len = sg_len;
2627
2628 return kmap_atomic(page);
2629}
2630EXPORT_SYMBOL(scsi_kmap_atomic_sg);
2631
2632
2633
2634
2635
2636void scsi_kunmap_atomic_sg(void *virt)
2637{
2638 kunmap_atomic(virt);
2639}
2640EXPORT_SYMBOL(scsi_kunmap_atomic_sg);
2641
2642void sdev_disable_disk_events(struct scsi_device *sdev)
2643{
2644 atomic_inc(&sdev->disk_events_disable_depth);
2645}
2646EXPORT_SYMBOL(sdev_disable_disk_events);
2647
2648void sdev_enable_disk_events(struct scsi_device *sdev)
2649{
2650 if (WARN_ON_ONCE(atomic_read(&sdev->disk_events_disable_depth) <= 0))
2651 return;
2652 atomic_dec(&sdev->disk_events_disable_depth);
2653}
2654EXPORT_SYMBOL(sdev_enable_disk_events);
2655