1
2
3
4
5
6
7
8
9
10
11#include <linux/bio.h>
12#include <linux/bitops.h>
13#include <linux/blkdev.h>
14#include <linux/completion.h>
15#include <linux/kernel.h>
16#include <linux/export.h>
17#include <linux/mempool.h>
18#include <linux/slab.h>
19#include <linux/init.h>
20#include <linux/pci.h>
21#include <linux/delay.h>
22#include <linux/hardirq.h>
23#include <linux/scatterlist.h>
24#include <linux/blk-mq.h>
25#include <linux/ratelimit.h>
26#include <asm/unaligned.h>
27
28#include <scsi/scsi.h>
29#include <scsi/scsi_cmnd.h>
30#include <scsi/scsi_dbg.h>
31#include <scsi/scsi_device.h>
32#include <scsi/scsi_driver.h>
33#include <scsi/scsi_eh.h>
34#include <scsi/scsi_host.h>
35#include <scsi/scsi_dh.h>
36
37#include <trace/events/scsi.h>
38
39#include "scsi_priv.h"
40#include "scsi_logging.h"
41
42
43#define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools)
44#define SG_MEMPOOL_SIZE 2
45
46struct scsi_host_sg_pool {
47 size_t size;
48 char *name;
49 struct kmem_cache *slab;
50 mempool_t *pool;
51};
52
53#define SP(x) { .size = x, "sgpool-" __stringify(x) }
54#if (SCSI_MAX_SG_SEGMENTS < 32)
55#error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater)
56#endif
57static struct scsi_host_sg_pool scsi_sg_pools[] = {
58 SP(8),
59 SP(16),
60#if (SCSI_MAX_SG_SEGMENTS > 32)
61 SP(32),
62#if (SCSI_MAX_SG_SEGMENTS > 64)
63 SP(64),
64#if (SCSI_MAX_SG_SEGMENTS > 128)
65 SP(128),
66#if (SCSI_MAX_SG_SEGMENTS > 256)
67#error SCSI_MAX_SG_SEGMENTS is too large (256 MAX)
68#endif
69#endif
70#endif
71#endif
72 SP(SCSI_MAX_SG_SEGMENTS)
73};
74#undef SP
75
76struct kmem_cache *scsi_sdb_cache;
77
78
79
80
81
82
83#define SCSI_QUEUE_DELAY 3
84
85static void
86scsi_set_blocked(struct scsi_cmnd *cmd, int reason)
87{
88 struct Scsi_Host *host = cmd->device->host;
89 struct scsi_device *device = cmd->device;
90 struct scsi_target *starget = scsi_target(device);
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105 switch (reason) {
106 case SCSI_MLQUEUE_HOST_BUSY:
107 atomic_set(&host->host_blocked, host->max_host_blocked);
108 break;
109 case SCSI_MLQUEUE_DEVICE_BUSY:
110 case SCSI_MLQUEUE_EH_RETRY:
111 atomic_set(&device->device_blocked,
112 device->max_device_blocked);
113 break;
114 case SCSI_MLQUEUE_TARGET_BUSY:
115 atomic_set(&starget->target_blocked,
116 starget->max_target_blocked);
117 break;
118 }
119}
120
121static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
122{
123 struct scsi_device *sdev = cmd->device;
124 struct request_queue *q = cmd->request->q;
125
126 blk_mq_requeue_request(cmd->request);
127 blk_mq_kick_requeue_list(q);
128 put_device(&sdev->sdev_gendev);
129}
130
131
132
133
134
135
136
137
138
139
140
141
142
143static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
144{
145 struct scsi_device *device = cmd->device;
146 struct request_queue *q = device->request_queue;
147 unsigned long flags;
148
149 SCSI_LOG_MLQUEUE(1, scmd_printk(KERN_INFO, cmd,
150 "Inserting command %p into mlqueue\n", cmd));
151
152 scsi_set_blocked(cmd, reason);
153
154
155
156
157
158 if (unbusy)
159 scsi_device_unbusy(device);
160
161
162
163
164
165
166
167 cmd->result = 0;
168 if (q->mq_ops) {
169 scsi_mq_requeue_cmd(cmd);
170 return;
171 }
172 spin_lock_irqsave(q->queue_lock, flags);
173 blk_requeue_request(q, cmd->request);
174 kblockd_schedule_work(&device->requeue_work);
175 spin_unlock_irqrestore(q->queue_lock, flags);
176}
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
198{
199 __scsi_queue_insert(cmd, reason, 1);
200}
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
218 int data_direction, void *buffer, unsigned bufflen,
219 unsigned char *sense, int timeout, int retries, u64 flags,
220 int *resid)
221{
222 struct request *req;
223 int write = (data_direction == DMA_TO_DEVICE);
224 int ret = DRIVER_ERROR << 24;
225
226 req = blk_get_request(sdev->request_queue, write, __GFP_RECLAIM);
227 if (IS_ERR(req))
228 return ret;
229 blk_rq_set_block_pc(req);
230
231 if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
232 buffer, bufflen, __GFP_RECLAIM))
233 goto out;
234
235 req->cmd_len = COMMAND_SIZE(cmd[0]);
236 memcpy(req->cmd, cmd, req->cmd_len);
237 req->sense = sense;
238 req->sense_len = 0;
239 req->retries = retries;
240 req->timeout = timeout;
241 req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT;
242
243
244
245
246 blk_execute_rq(req->q, NULL, req, 1);
247
248
249
250
251
252
253
254 if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen))
255 memset(buffer + (bufflen - req->resid_len), 0, req->resid_len);
256
257 if (resid)
258 *resid = req->resid_len;
259 ret = req->errors;
260 out:
261 blk_put_request(req);
262
263 return ret;
264}
265EXPORT_SYMBOL(scsi_execute);
266
267int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd,
268 int data_direction, void *buffer, unsigned bufflen,
269 struct scsi_sense_hdr *sshdr, int timeout, int retries,
270 int *resid, u64 flags)
271{
272 char *sense = NULL;
273 int result;
274
275 if (sshdr) {
276 sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
277 if (!sense)
278 return DRIVER_ERROR << 24;
279 }
280 result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
281 sense, timeout, retries, flags, resid);
282 if (sshdr)
283 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
284
285 kfree(sense);
286 return result;
287}
288EXPORT_SYMBOL(scsi_execute_req_flags);
289
290
291
292
293
294
295
296
297
298
299
300
301static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
302{
303 cmd->serial_number = 0;
304 scsi_set_resid(cmd, 0);
305 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
306 if (cmd->cmd_len == 0)
307 cmd->cmd_len = scsi_command_size(cmd->cmnd);
308}
309
310void scsi_device_unbusy(struct scsi_device *sdev)
311{
312 struct Scsi_Host *shost = sdev->host;
313 struct scsi_target *starget = scsi_target(sdev);
314 unsigned long flags;
315
316 atomic_dec(&shost->host_busy);
317 if (starget->can_queue > 0)
318 atomic_dec(&starget->target_busy);
319
320 if (unlikely(scsi_host_in_recovery(shost) &&
321 (shost->host_failed || shost->host_eh_scheduled))) {
322 spin_lock_irqsave(shost->host_lock, flags);
323 scsi_eh_wakeup(shost);
324 spin_unlock_irqrestore(shost->host_lock, flags);
325 }
326
327 atomic_dec(&sdev->device_busy);
328}
329
330static void scsi_kick_queue(struct request_queue *q)
331{
332 if (q->mq_ops)
333 blk_mq_start_hw_queues(q);
334 else
335 blk_run_queue(q);
336}
337
338
339
340
341
342
343
344
345static void scsi_single_lun_run(struct scsi_device *current_sdev)
346{
347 struct Scsi_Host *shost = current_sdev->host;
348 struct scsi_device *sdev, *tmp;
349 struct scsi_target *starget = scsi_target(current_sdev);
350 unsigned long flags;
351
352 spin_lock_irqsave(shost->host_lock, flags);
353 starget->starget_sdev_user = NULL;
354 spin_unlock_irqrestore(shost->host_lock, flags);
355
356
357
358
359
360
361
362 scsi_kick_queue(current_sdev->request_queue);
363
364 spin_lock_irqsave(shost->host_lock, flags);
365 if (starget->starget_sdev_user)
366 goto out;
367 list_for_each_entry_safe(sdev, tmp, &starget->devices,
368 same_target_siblings) {
369 if (sdev == current_sdev)
370 continue;
371 if (scsi_device_get(sdev))
372 continue;
373
374 spin_unlock_irqrestore(shost->host_lock, flags);
375 scsi_kick_queue(sdev->request_queue);
376 spin_lock_irqsave(shost->host_lock, flags);
377
378 scsi_device_put(sdev);
379 }
380 out:
381 spin_unlock_irqrestore(shost->host_lock, flags);
382}
383
384static inline bool scsi_device_is_busy(struct scsi_device *sdev)
385{
386 if (atomic_read(&sdev->device_busy) >= sdev->queue_depth)
387 return true;
388 if (atomic_read(&sdev->device_blocked) > 0)
389 return true;
390 return false;
391}
392
393static inline bool scsi_target_is_busy(struct scsi_target *starget)
394{
395 if (starget->can_queue > 0) {
396 if (atomic_read(&starget->target_busy) >= starget->can_queue)
397 return true;
398 if (atomic_read(&starget->target_blocked) > 0)
399 return true;
400 }
401 return false;
402}
403
404static inline bool scsi_host_is_busy(struct Scsi_Host *shost)
405{
406 if (shost->can_queue > 0 &&
407 atomic_read(&shost->host_busy) >= shost->can_queue)
408 return true;
409 if (atomic_read(&shost->host_blocked) > 0)
410 return true;
411 if (shost->host_self_blocked)
412 return true;
413 return false;
414}
415
416static void scsi_starved_list_run(struct Scsi_Host *shost)
417{
418 LIST_HEAD(starved_list);
419 struct scsi_device *sdev;
420 unsigned long flags;
421
422 spin_lock_irqsave(shost->host_lock, flags);
423 list_splice_init(&shost->starved_list, &starved_list);
424
425 while (!list_empty(&starved_list)) {
426 struct request_queue *slq;
427
428
429
430
431
432
433
434
435
436
437
438 if (scsi_host_is_busy(shost))
439 break;
440
441 sdev = list_entry(starved_list.next,
442 struct scsi_device, starved_entry);
443 list_del_init(&sdev->starved_entry);
444 if (scsi_target_is_busy(scsi_target(sdev))) {
445 list_move_tail(&sdev->starved_entry,
446 &shost->starved_list);
447 continue;
448 }
449
450
451
452
453
454
455
456
457
458
459
460 slq = sdev->request_queue;
461 if (!blk_get_queue(slq))
462 continue;
463 spin_unlock_irqrestore(shost->host_lock, flags);
464
465 scsi_kick_queue(slq);
466 blk_put_queue(slq);
467
468 spin_lock_irqsave(shost->host_lock, flags);
469 }
470
471 list_splice(&starved_list, &shost->starved_list);
472 spin_unlock_irqrestore(shost->host_lock, flags);
473}
474
475
476
477
478
479
480
481
482
483
484
485
486
487static void scsi_run_queue(struct request_queue *q)
488{
489 struct scsi_device *sdev = q->queuedata;
490
491 if (scsi_target(sdev)->single_lun)
492 scsi_single_lun_run(sdev);
493 if (!list_empty(&sdev->host->starved_list))
494 scsi_starved_list_run(sdev->host);
495
496 if (q->mq_ops)
497 blk_mq_start_stopped_hw_queues(q, false);
498 else
499 blk_run_queue(q);
500}
501
502void scsi_requeue_run_queue(struct work_struct *work)
503{
504 struct scsi_device *sdev;
505 struct request_queue *q;
506
507 sdev = container_of(work, struct scsi_device, requeue_work);
508 q = sdev->request_queue;
509 scsi_run_queue(q);
510}
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
531{
532 struct scsi_device *sdev = cmd->device;
533 struct request *req = cmd->request;
534 unsigned long flags;
535
536 spin_lock_irqsave(q->queue_lock, flags);
537 blk_unprep_request(req);
538 req->special = NULL;
539 scsi_put_command(cmd);
540 blk_requeue_request(q, req);
541 spin_unlock_irqrestore(q->queue_lock, flags);
542
543 scsi_run_queue(q);
544
545 put_device(&sdev->sdev_gendev);
546}
547
548void scsi_run_host_queues(struct Scsi_Host *shost)
549{
550 struct scsi_device *sdev;
551
552 shost_for_each_device(sdev, shost)
553 scsi_run_queue(sdev->request_queue);
554}
555
556static inline unsigned int scsi_sgtable_index(unsigned short nents)
557{
558 unsigned int index;
559
560 BUG_ON(nents > SCSI_MAX_SG_SEGMENTS);
561
562 if (nents <= 8)
563 index = 0;
564 else
565 index = get_count_order(nents) - 3;
566
567 return index;
568}
569
570static void scsi_sg_free(struct scatterlist *sgl, unsigned int nents)
571{
572 struct scsi_host_sg_pool *sgp;
573
574 sgp = scsi_sg_pools + scsi_sgtable_index(nents);
575 mempool_free(sgl, sgp->pool);
576}
577
578static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask)
579{
580 struct scsi_host_sg_pool *sgp;
581
582 sgp = scsi_sg_pools + scsi_sgtable_index(nents);
583 return mempool_alloc(sgp->pool, gfp_mask);
584}
585
586static void scsi_free_sgtable(struct scsi_data_buffer *sdb, bool mq)
587{
588 if (mq && sdb->table.orig_nents <= SCSI_MAX_SG_SEGMENTS)
589 return;
590 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, mq, scsi_sg_free);
591}
592
593static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, bool mq)
594{
595 struct scatterlist *first_chunk = NULL;
596 int ret;
597
598 BUG_ON(!nents);
599
600 if (mq) {
601 if (nents <= SCSI_MAX_SG_SEGMENTS) {
602 sdb->table.nents = sdb->table.orig_nents = nents;
603 sg_init_table(sdb->table.sgl, nents);
604 return 0;
605 }
606 first_chunk = sdb->table.sgl;
607 }
608
609 ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS,
610 first_chunk, GFP_ATOMIC, scsi_sg_alloc);
611 if (unlikely(ret))
612 scsi_free_sgtable(sdb, mq);
613 return ret;
614}
615
616static void scsi_uninit_cmd(struct scsi_cmnd *cmd)
617{
618 if (cmd->request->cmd_type == REQ_TYPE_FS) {
619 struct scsi_driver *drv = scsi_cmd_to_driver(cmd);
620
621 if (drv->uninit_command)
622 drv->uninit_command(cmd);
623 }
624}
625
626static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd)
627{
628 if (cmd->sdb.table.nents)
629 scsi_free_sgtable(&cmd->sdb, true);
630 if (cmd->request->next_rq && cmd->request->next_rq->special)
631 scsi_free_sgtable(cmd->request->next_rq->special, true);
632 if (scsi_prot_sg_count(cmd))
633 scsi_free_sgtable(cmd->prot_sdb, true);
634}
635
636static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
637{
638 struct scsi_device *sdev = cmd->device;
639 struct Scsi_Host *shost = sdev->host;
640 unsigned long flags;
641
642 scsi_mq_free_sgtables(cmd);
643 scsi_uninit_cmd(cmd);
644
645 if (shost->use_cmd_list) {
646 BUG_ON(list_empty(&cmd->list));
647 spin_lock_irqsave(&sdev->list_lock, flags);
648 list_del_init(&cmd->list);
649 spin_unlock_irqrestore(&sdev->list_lock, flags);
650 }
651}
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669static void scsi_release_buffers(struct scsi_cmnd *cmd)
670{
671 if (cmd->sdb.table.nents)
672 scsi_free_sgtable(&cmd->sdb, false);
673
674 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
675
676 if (scsi_prot_sg_count(cmd))
677 scsi_free_sgtable(cmd->prot_sdb, false);
678}
679
680static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd)
681{
682 struct scsi_data_buffer *bidi_sdb = cmd->request->next_rq->special;
683
684 scsi_free_sgtable(bidi_sdb, false);
685 kmem_cache_free(scsi_sdb_cache, bidi_sdb);
686 cmd->request->next_rq->special = NULL;
687}
688
689static bool scsi_end_request(struct request *req, int error,
690 unsigned int bytes, unsigned int bidi_bytes)
691{
692 struct scsi_cmnd *cmd = req->special;
693 struct scsi_device *sdev = cmd->device;
694 struct request_queue *q = sdev->request_queue;
695
696 if (blk_update_request(req, error, bytes))
697 return true;
698
699
700 if (unlikely(bidi_bytes) &&
701 blk_update_request(req->next_rq, error, bidi_bytes))
702 return true;
703
704 if (blk_queue_add_random(q))
705 add_disk_randomness(req->rq_disk);
706
707 if (req->mq_ctx) {
708
709
710
711
712
713
714
715 scsi_mq_uninit_cmd(cmd);
716
717 __blk_mq_end_request(req, error);
718
719 if (scsi_target(sdev)->single_lun ||
720 !list_empty(&sdev->host->starved_list))
721 kblockd_schedule_work(&sdev->requeue_work);
722 else
723 blk_mq_start_stopped_hw_queues(q, true);
724 } else {
725 unsigned long flags;
726
727 if (bidi_bytes)
728 scsi_release_bidi_buffers(cmd);
729
730 spin_lock_irqsave(q->queue_lock, flags);
731 blk_finish_request(req, error);
732 spin_unlock_irqrestore(q->queue_lock, flags);
733
734 scsi_release_buffers(cmd);
735
736 scsi_put_command(cmd);
737 scsi_run_queue(q);
738 }
739
740 put_device(&sdev->sdev_gendev);
741 return false;
742}
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
759{
760 int error = 0;
761
762 switch(host_byte(result)) {
763 case DID_TRANSPORT_FAILFAST:
764 error = -ENOLINK;
765 break;
766 case DID_TARGET_FAILURE:
767 set_host_byte(cmd, DID_OK);
768 error = -EREMOTEIO;
769 break;
770 case DID_NEXUS_FAILURE:
771 set_host_byte(cmd, DID_OK);
772 error = -EBADE;
773 break;
774 case DID_ALLOC_FAILURE:
775 set_host_byte(cmd, DID_OK);
776 error = -ENOSPC;
777 break;
778 case DID_MEDIUM_ERROR:
779 set_host_byte(cmd, DID_OK);
780 error = -ENODATA;
781 break;
782 default:
783 error = -EIO;
784 break;
785 }
786
787 return error;
788}
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
820{
821 int result = cmd->result;
822 struct request_queue *q = cmd->device->request_queue;
823 struct request *req = cmd->request;
824 int error = 0;
825 struct scsi_sense_hdr sshdr;
826 bool sense_valid = false;
827 int sense_deferred = 0, level = 0;
828 enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
829 ACTION_DELAYED_RETRY} action;
830 unsigned long wait_for = (cmd->allowed + 1) * req->timeout;
831
832 if (result) {
833 sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
834 if (sense_valid)
835 sense_deferred = scsi_sense_is_deferred(&sshdr);
836 }
837
838 if (req->cmd_type == REQ_TYPE_BLOCK_PC) {
839 if (result) {
840 if (sense_valid && req->sense) {
841
842
843
844 int len = 8 + cmd->sense_buffer[7];
845
846 if (len > SCSI_SENSE_BUFFERSIZE)
847 len = SCSI_SENSE_BUFFERSIZE;
848 memcpy(req->sense, cmd->sense_buffer, len);
849 req->sense_len = len;
850 }
851 if (!sense_deferred)
852 error = __scsi_error_from_host_byte(cmd, result);
853 }
854
855
856
857 req->errors = cmd->result;
858
859 req->resid_len = scsi_get_resid(cmd);
860
861 if (scsi_bidi_cmnd(cmd)) {
862
863
864
865
866 req->next_rq->resid_len = scsi_in(cmd)->resid;
867 if (scsi_end_request(req, 0, blk_rq_bytes(req),
868 blk_rq_bytes(req->next_rq)))
869 BUG();
870 return;
871 }
872 } else if (blk_rq_bytes(req) == 0 && result && !sense_deferred) {
873
874
875
876
877
878
879 error = __scsi_error_from_host_byte(cmd, result);
880 }
881
882
883 BUG_ON(blk_bidi_rq(req));
884
885
886
887
888
889 SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, cmd,
890 "%u sectors total, %d bytes done.\n",
891 blk_rq_sectors(req), good_bytes));
892
893
894
895
896
897
898
899 if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) {
900
901
902
903
904 if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
905 ;
906 else if (!(req->cmd_flags & REQ_QUIET))
907 scsi_print_sense(cmd);
908 result = 0;
909
910 error = 0;
911 }
912
913
914
915
916 if (!scsi_end_request(req, error, good_bytes, 0))
917 return;
918
919
920
921
922 if (error && scsi_noretry_cmd(cmd)) {
923 if (scsi_end_request(req, error, blk_rq_bytes(req), 0))
924 BUG();
925 return;
926 }
927
928
929
930
931
932 if (result == 0)
933 goto requeue;
934
935 error = __scsi_error_from_host_byte(cmd, result);
936
937 if (host_byte(result) == DID_RESET) {
938
939
940
941
942 action = ACTION_RETRY;
943 } else if (sense_valid && !sense_deferred) {
944 switch (sshdr.sense_key) {
945 case UNIT_ATTENTION:
946 if (cmd->device->removable) {
947
948
949
950 cmd->device->changed = 1;
951 action = ACTION_FAIL;
952 } else {
953
954
955
956
957
958 action = ACTION_RETRY;
959 }
960 break;
961 case ILLEGAL_REQUEST:
962
963
964
965
966
967
968
969
970 if ((cmd->device->use_10_for_rw &&
971 sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
972 (cmd->cmnd[0] == READ_10 ||
973 cmd->cmnd[0] == WRITE_10)) {
974
975 cmd->device->use_10_for_rw = 0;
976 action = ACTION_REPREP;
977 } else if (sshdr.asc == 0x10) {
978 action = ACTION_FAIL;
979 error = -EILSEQ;
980
981 } else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) {
982 action = ACTION_FAIL;
983 error = -EREMOTEIO;
984 } else
985 action = ACTION_FAIL;
986 break;
987 case ABORTED_COMMAND:
988 action = ACTION_FAIL;
989 if (sshdr.asc == 0x10)
990 error = -EILSEQ;
991 break;
992 case NOT_READY:
993
994
995
996 if (sshdr.asc == 0x04) {
997 switch (sshdr.ascq) {
998 case 0x01:
999 case 0x04:
1000 case 0x05:
1001 case 0x06:
1002 case 0x07:
1003 case 0x08:
1004 case 0x09:
1005 case 0x14:
1006 action = ACTION_DELAYED_RETRY;
1007 break;
1008 default:
1009 action = ACTION_FAIL;
1010 break;
1011 }
1012 } else
1013 action = ACTION_FAIL;
1014 break;
1015 case VOLUME_OVERFLOW:
1016
1017 action = ACTION_FAIL;
1018 break;
1019 default:
1020 action = ACTION_FAIL;
1021 break;
1022 }
1023 } else
1024 action = ACTION_FAIL;
1025
1026 if (action != ACTION_FAIL &&
1027 time_before(cmd->jiffies_at_alloc + wait_for, jiffies))
1028 action = ACTION_FAIL;
1029
1030 switch (action) {
1031 case ACTION_FAIL:
1032
1033 if (!(req->cmd_flags & REQ_QUIET)) {
1034 static DEFINE_RATELIMIT_STATE(_rs,
1035 DEFAULT_RATELIMIT_INTERVAL,
1036 DEFAULT_RATELIMIT_BURST);
1037
1038 if (unlikely(scsi_logging_level))
1039 level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT,
1040 SCSI_LOG_MLCOMPLETE_BITS);
1041
1042
1043
1044
1045
1046 if (!level && __ratelimit(&_rs)) {
1047 scsi_print_result(cmd, NULL, FAILED);
1048 if (driver_byte(result) & DRIVER_SENSE)
1049 scsi_print_sense(cmd);
1050 scsi_print_command(cmd);
1051 }
1052 }
1053 if (!scsi_end_request(req, error, blk_rq_err_bytes(req), 0))
1054 return;
1055
1056 case ACTION_REPREP:
1057 requeue:
1058
1059
1060
1061 if (q->mq_ops) {
1062 cmd->request->cmd_flags &= ~REQ_DONTPREP;
1063 scsi_mq_uninit_cmd(cmd);
1064 scsi_mq_requeue_cmd(cmd);
1065 } else {
1066 scsi_release_buffers(cmd);
1067 scsi_requeue_command(q, cmd);
1068 }
1069 break;
1070 case ACTION_RETRY:
1071
1072 __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, 0);
1073 break;
1074 case ACTION_DELAYED_RETRY:
1075
1076 __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0);
1077 break;
1078 }
1079}
1080
1081static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb)
1082{
1083 int count;
1084
1085
1086
1087
1088 if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments,
1089 req->mq_ctx != NULL)))
1090 return BLKPREP_DEFER;
1091
1092
1093
1094
1095
1096 count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
1097 BUG_ON(count > sdb->table.nents);
1098 sdb->table.nents = count;
1099 sdb->length = blk_rq_bytes(req);
1100 return BLKPREP_OK;
1101}
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114int scsi_init_io(struct scsi_cmnd *cmd)
1115{
1116 struct scsi_device *sdev = cmd->device;
1117 struct request *rq = cmd->request;
1118 bool is_mq = (rq->mq_ctx != NULL);
1119 int error;
1120
1121 BUG_ON(!rq->nr_phys_segments);
1122
1123 error = scsi_init_sgtable(rq, &cmd->sdb);
1124 if (error)
1125 goto err_exit;
1126
1127 if (blk_bidi_rq(rq)) {
1128 if (!rq->q->mq_ops) {
1129 struct scsi_data_buffer *bidi_sdb =
1130 kmem_cache_zalloc(scsi_sdb_cache, GFP_ATOMIC);
1131 if (!bidi_sdb) {
1132 error = BLKPREP_DEFER;
1133 goto err_exit;
1134 }
1135
1136 rq->next_rq->special = bidi_sdb;
1137 }
1138
1139 error = scsi_init_sgtable(rq->next_rq, rq->next_rq->special);
1140 if (error)
1141 goto err_exit;
1142 }
1143
1144 if (blk_integrity_rq(rq)) {
1145 struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
1146 int ivecs, count;
1147
1148 if (prot_sdb == NULL) {
1149
1150
1151
1152
1153
1154 WARN_ON_ONCE(1);
1155 error = BLKPREP_KILL;
1156 goto err_exit;
1157 }
1158
1159 ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
1160
1161 if (scsi_alloc_sgtable(prot_sdb, ivecs, is_mq)) {
1162 error = BLKPREP_DEFER;
1163 goto err_exit;
1164 }
1165
1166 count = blk_rq_map_integrity_sg(rq->q, rq->bio,
1167 prot_sdb->table.sgl);
1168 BUG_ON(unlikely(count > ivecs));
1169 BUG_ON(unlikely(count > queue_max_integrity_segments(rq->q)));
1170
1171 cmd->prot_sdb = prot_sdb;
1172 cmd->prot_sdb->table.nents = count;
1173 }
1174
1175 return BLKPREP_OK;
1176err_exit:
1177 if (is_mq) {
1178 scsi_mq_free_sgtables(cmd);
1179 } else {
1180 scsi_release_buffers(cmd);
1181 cmd->request->special = NULL;
1182 scsi_put_command(cmd);
1183 put_device(&sdev->sdev_gendev);
1184 }
1185 return error;
1186}
1187EXPORT_SYMBOL(scsi_init_io);
1188
1189static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
1190 struct request *req)
1191{
1192 struct scsi_cmnd *cmd;
1193
1194 if (!req->special) {
1195
1196 if (!get_device(&sdev->sdev_gendev))
1197 return NULL;
1198
1199 cmd = scsi_get_command(sdev, GFP_ATOMIC);
1200 if (unlikely(!cmd)) {
1201 put_device(&sdev->sdev_gendev);
1202 return NULL;
1203 }
1204 req->special = cmd;
1205 } else {
1206 cmd = req->special;
1207 }
1208
1209
1210 cmd->tag = req->tag;
1211 cmd->request = req;
1212
1213 cmd->cmnd = req->cmd;
1214 cmd->prot_op = SCSI_PROT_NORMAL;
1215
1216 return cmd;
1217}
1218
1219static int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
1220{
1221 struct scsi_cmnd *cmd = req->special;
1222
1223
1224
1225
1226
1227
1228
1229 if (req->bio) {
1230 int ret = scsi_init_io(cmd);
1231 if (unlikely(ret))
1232 return ret;
1233 } else {
1234 BUG_ON(blk_rq_bytes(req));
1235
1236 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
1237 }
1238
1239 cmd->cmd_len = req->cmd_len;
1240 cmd->transfersize = blk_rq_bytes(req);
1241 cmd->allowed = req->retries;
1242 return BLKPREP_OK;
1243}
1244
1245
1246
1247
1248
1249static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
1250{
1251 struct scsi_cmnd *cmd = req->special;
1252
1253 if (unlikely(sdev->handler && sdev->handler->prep_fn)) {
1254 int ret = sdev->handler->prep_fn(sdev, req);
1255 if (ret != BLKPREP_OK)
1256 return ret;
1257 }
1258
1259 memset(cmd->cmnd, 0, BLK_MAX_CDB);
1260 return scsi_cmd_to_driver(cmd)->init_command(cmd);
1261}
1262
1263static int scsi_setup_cmnd(struct scsi_device *sdev, struct request *req)
1264{
1265 struct scsi_cmnd *cmd = req->special;
1266
1267 if (!blk_rq_bytes(req))
1268 cmd->sc_data_direction = DMA_NONE;
1269 else if (rq_data_dir(req) == WRITE)
1270 cmd->sc_data_direction = DMA_TO_DEVICE;
1271 else
1272 cmd->sc_data_direction = DMA_FROM_DEVICE;
1273
1274 switch (req->cmd_type) {
1275 case REQ_TYPE_FS:
1276 return scsi_setup_fs_cmnd(sdev, req);
1277 case REQ_TYPE_BLOCK_PC:
1278 return scsi_setup_blk_pc_cmnd(sdev, req);
1279 default:
1280 return BLKPREP_KILL;
1281 }
1282}
1283
1284static int
1285scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
1286{
1287 int ret = BLKPREP_OK;
1288
1289
1290
1291
1292
1293 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1294 switch (sdev->sdev_state) {
1295 case SDEV_OFFLINE:
1296 case SDEV_TRANSPORT_OFFLINE:
1297
1298
1299
1300
1301
1302 sdev_printk(KERN_ERR, sdev,
1303 "rejecting I/O to offline device\n");
1304 ret = BLKPREP_KILL;
1305 break;
1306 case SDEV_DEL:
1307
1308
1309
1310
1311 sdev_printk(KERN_ERR, sdev,
1312 "rejecting I/O to dead device\n");
1313 ret = BLKPREP_KILL;
1314 break;
1315 case SDEV_BLOCK:
1316 case SDEV_CREATED_BLOCK:
1317 ret = BLKPREP_DEFER;
1318 break;
1319 case SDEV_QUIESCE:
1320
1321
1322
1323 if (!(req->cmd_flags & REQ_PREEMPT))
1324 ret = BLKPREP_DEFER;
1325 break;
1326 default:
1327
1328
1329
1330
1331
1332 if (!(req->cmd_flags & REQ_PREEMPT))
1333 ret = BLKPREP_KILL;
1334 break;
1335 }
1336 }
1337 return ret;
1338}
1339
1340static int
1341scsi_prep_return(struct request_queue *q, struct request *req, int ret)
1342{
1343 struct scsi_device *sdev = q->queuedata;
1344
1345 switch (ret) {
1346 case BLKPREP_KILL:
1347 case BLKPREP_INVALID:
1348 req->errors = DID_NO_CONNECT << 16;
1349
1350 if (req->special) {
1351 struct scsi_cmnd *cmd = req->special;
1352 scsi_release_buffers(cmd);
1353 scsi_put_command(cmd);
1354 put_device(&sdev->sdev_gendev);
1355 req->special = NULL;
1356 }
1357 break;
1358 case BLKPREP_DEFER:
1359
1360
1361
1362
1363
1364 if (atomic_read(&sdev->device_busy) == 0)
1365 blk_delay_queue(q, SCSI_QUEUE_DELAY);
1366 break;
1367 default:
1368 req->cmd_flags |= REQ_DONTPREP;
1369 }
1370
1371 return ret;
1372}
1373
1374static int scsi_prep_fn(struct request_queue *q, struct request *req)
1375{
1376 struct scsi_device *sdev = q->queuedata;
1377 struct scsi_cmnd *cmd;
1378 int ret;
1379
1380 ret = scsi_prep_state_check(sdev, req);
1381 if (ret != BLKPREP_OK)
1382 goto out;
1383
1384 cmd = scsi_get_cmd_from_req(sdev, req);
1385 if (unlikely(!cmd)) {
1386 ret = BLKPREP_DEFER;
1387 goto out;
1388 }
1389
1390 ret = scsi_setup_cmnd(sdev, req);
1391out:
1392 return scsi_prep_return(q, req, ret);
1393}
1394
1395static void scsi_unprep_fn(struct request_queue *q, struct request *req)
1396{
1397 scsi_uninit_cmd(req->special);
1398}
1399
1400
1401
1402
1403
1404
1405
1406static inline int scsi_dev_queue_ready(struct request_queue *q,
1407 struct scsi_device *sdev)
1408{
1409 unsigned int busy;
1410
1411 busy = atomic_inc_return(&sdev->device_busy) - 1;
1412 if (atomic_read(&sdev->device_blocked)) {
1413 if (busy)
1414 goto out_dec;
1415
1416
1417
1418
1419 if (atomic_dec_return(&sdev->device_blocked) > 0) {
1420
1421
1422
1423 if (!q->mq_ops)
1424 blk_delay_queue(q, SCSI_QUEUE_DELAY);
1425 goto out_dec;
1426 }
1427 SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev,
1428 "unblocking device at zero depth\n"));
1429 }
1430
1431 if (busy >= sdev->queue_depth)
1432 goto out_dec;
1433
1434 return 1;
1435out_dec:
1436 atomic_dec(&sdev->device_busy);
1437 return 0;
1438}
1439
1440
1441
1442
1443
1444static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
1445 struct scsi_device *sdev)
1446{
1447 struct scsi_target *starget = scsi_target(sdev);
1448 unsigned int busy;
1449
1450 if (starget->single_lun) {
1451 spin_lock_irq(shost->host_lock);
1452 if (starget->starget_sdev_user &&
1453 starget->starget_sdev_user != sdev) {
1454 spin_unlock_irq(shost->host_lock);
1455 return 0;
1456 }
1457 starget->starget_sdev_user = sdev;
1458 spin_unlock_irq(shost->host_lock);
1459 }
1460
1461 if (starget->can_queue <= 0)
1462 return 1;
1463
1464 busy = atomic_inc_return(&starget->target_busy) - 1;
1465 if (atomic_read(&starget->target_blocked) > 0) {
1466 if (busy)
1467 goto starved;
1468
1469
1470
1471
1472 if (atomic_dec_return(&starget->target_blocked) > 0)
1473 goto out_dec;
1474
1475 SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget,
1476 "unblocking target at zero depth\n"));
1477 }
1478
1479 if (busy >= starget->can_queue)
1480 goto starved;
1481
1482 return 1;
1483
1484starved:
1485 spin_lock_irq(shost->host_lock);
1486 list_move_tail(&sdev->starved_entry, &shost->starved_list);
1487 spin_unlock_irq(shost->host_lock);
1488out_dec:
1489 if (starget->can_queue > 0)
1490 atomic_dec(&starget->target_busy);
1491 return 0;
1492}
1493
1494
1495
1496
1497
1498
1499static inline int scsi_host_queue_ready(struct request_queue *q,
1500 struct Scsi_Host *shost,
1501 struct scsi_device *sdev)
1502{
1503 unsigned int busy;
1504
1505 if (scsi_host_in_recovery(shost))
1506 return 0;
1507
1508 busy = atomic_inc_return(&shost->host_busy) - 1;
1509 if (atomic_read(&shost->host_blocked) > 0) {
1510 if (busy)
1511 goto starved;
1512
1513
1514
1515
1516 if (atomic_dec_return(&shost->host_blocked) > 0)
1517 goto out_dec;
1518
1519 SCSI_LOG_MLQUEUE(3,
1520 shost_printk(KERN_INFO, shost,
1521 "unblocking host at zero depth\n"));
1522 }
1523
1524 if (shost->can_queue > 0 && busy >= shost->can_queue)
1525 goto starved;
1526 if (shost->host_self_blocked)
1527 goto starved;
1528
1529
1530 if (!list_empty(&sdev->starved_entry)) {
1531 spin_lock_irq(shost->host_lock);
1532 if (!list_empty(&sdev->starved_entry))
1533 list_del_init(&sdev->starved_entry);
1534 spin_unlock_irq(shost->host_lock);
1535 }
1536
1537 return 1;
1538
1539starved:
1540 spin_lock_irq(shost->host_lock);
1541 if (list_empty(&sdev->starved_entry))
1542 list_add_tail(&sdev->starved_entry, &shost->starved_list);
1543 spin_unlock_irq(shost->host_lock);
1544out_dec:
1545 atomic_dec(&shost->host_busy);
1546 return 0;
1547}
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561static int scsi_lld_busy(struct request_queue *q)
1562{
1563 struct scsi_device *sdev = q->queuedata;
1564 struct Scsi_Host *shost;
1565
1566 if (blk_queue_dying(q))
1567 return 0;
1568
1569 shost = sdev->host;
1570
1571
1572
1573
1574
1575
1576
1577 if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev))
1578 return 1;
1579
1580 return 0;
1581}
1582
1583
1584
1585
1586static void scsi_kill_request(struct request *req, struct request_queue *q)
1587{
1588 struct scsi_cmnd *cmd = req->special;
1589 struct scsi_device *sdev;
1590 struct scsi_target *starget;
1591 struct Scsi_Host *shost;
1592
1593 blk_start_request(req);
1594
1595 scmd_printk(KERN_INFO, cmd, "killing request\n");
1596
1597 sdev = cmd->device;
1598 starget = scsi_target(sdev);
1599 shost = sdev->host;
1600 scsi_init_cmd_errh(cmd);
1601 cmd->result = DID_NO_CONNECT << 16;
1602 atomic_inc(&cmd->device->iorequest_cnt);
1603
1604
1605
1606
1607
1608
1609 atomic_inc(&sdev->device_busy);
1610 atomic_inc(&shost->host_busy);
1611 if (starget->can_queue > 0)
1612 atomic_inc(&starget->target_busy);
1613
1614 blk_complete_request(req);
1615}
1616
1617static void scsi_softirq_done(struct request *rq)
1618{
1619 struct scsi_cmnd *cmd = rq->special;
1620 unsigned long wait_for = (cmd->allowed + 1) * rq->timeout;
1621 int disposition;
1622
1623 INIT_LIST_HEAD(&cmd->eh_entry);
1624
1625 atomic_inc(&cmd->device->iodone_cnt);
1626 if (cmd->result)
1627 atomic_inc(&cmd->device->ioerr_cnt);
1628
1629 disposition = scsi_decide_disposition(cmd);
1630 if (disposition != SUCCESS &&
1631 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
1632 sdev_printk(KERN_ERR, cmd->device,
1633 "timing out command, waited %lus\n",
1634 wait_for/HZ);
1635 disposition = SUCCESS;
1636 }
1637
1638 scsi_log_completion(cmd, disposition);
1639
1640 switch (disposition) {
1641 case SUCCESS:
1642 scsi_finish_command(cmd);
1643 break;
1644 case NEEDS_RETRY:
1645 scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
1646 break;
1647 case ADD_TO_MLQUEUE:
1648 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
1649 break;
1650 default:
1651 if (!scsi_eh_scmd_add(cmd, 0))
1652 scsi_finish_command(cmd);
1653 }
1654}
1655
1656
1657
1658
1659
1660
1661
1662
1663static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
1664{
1665 struct Scsi_Host *host = cmd->device->host;
1666 int rtn = 0;
1667
1668 atomic_inc(&cmd->device->iorequest_cnt);
1669
1670
1671 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
1672
1673
1674
1675 cmd->result = DID_NO_CONNECT << 16;
1676 goto done;
1677 }
1678
1679
1680 if (unlikely(scsi_device_blocked(cmd->device))) {
1681
1682
1683
1684
1685
1686
1687
1688 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
1689 "queuecommand : device blocked\n"));
1690 return SCSI_MLQUEUE_DEVICE_BUSY;
1691 }
1692
1693
1694 if (cmd->device->lun_in_cdb)
1695 cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) |
1696 (cmd->device->lun << 5 & 0xe0);
1697
1698 scsi_log_send(cmd);
1699
1700
1701
1702
1703
1704 if (cmd->cmd_len > cmd->device->host->max_cmd_len) {
1705 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
1706 "queuecommand : command too long. "
1707 "cdb_size=%d host->max_cmd_len=%d\n",
1708 cmd->cmd_len, cmd->device->host->max_cmd_len));
1709 cmd->result = (DID_ABORT << 16);
1710 goto done;
1711 }
1712
1713 if (unlikely(host->shost_state == SHOST_DEL)) {
1714 cmd->result = (DID_NO_CONNECT << 16);
1715 goto done;
1716
1717 }
1718
1719 trace_scsi_dispatch_cmd_start(cmd);
1720 rtn = host->hostt->queuecommand(host, cmd);
1721 if (rtn) {
1722 trace_scsi_dispatch_cmd_error(cmd, rtn);
1723 if (rtn != SCSI_MLQUEUE_DEVICE_BUSY &&
1724 rtn != SCSI_MLQUEUE_TARGET_BUSY)
1725 rtn = SCSI_MLQUEUE_HOST_BUSY;
1726
1727 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
1728 "queuecommand : request rejected\n"));
1729 }
1730
1731 return rtn;
1732 done:
1733 cmd->scsi_done(cmd);
1734 return 0;
1735}
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748static void scsi_done(struct scsi_cmnd *cmd)
1749{
1750 trace_scsi_dispatch_cmd_done(cmd);
1751 blk_complete_request(cmd->request);
1752}
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765static void scsi_request_fn(struct request_queue *q)
1766 __releases(q->queue_lock)
1767 __acquires(q->queue_lock)
1768{
1769 struct scsi_device *sdev = q->queuedata;
1770 struct Scsi_Host *shost;
1771 struct scsi_cmnd *cmd;
1772 struct request *req;
1773
1774
1775
1776
1777
1778 shost = sdev->host;
1779 for (;;) {
1780 int rtn;
1781
1782
1783
1784
1785
1786 req = blk_peek_request(q);
1787 if (!req)
1788 break;
1789
1790 if (unlikely(!scsi_device_online(sdev))) {
1791 sdev_printk(KERN_ERR, sdev,
1792 "rejecting I/O to offline device\n");
1793 scsi_kill_request(req, q);
1794 continue;
1795 }
1796
1797 if (!scsi_dev_queue_ready(q, sdev))
1798 break;
1799
1800
1801
1802
1803 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1804 blk_start_request(req);
1805
1806 spin_unlock_irq(q->queue_lock);
1807 cmd = req->special;
1808 if (unlikely(cmd == NULL)) {
1809 printk(KERN_CRIT "impossible request in %s.\n"
1810 "please mail a stack trace to "
1811 "linux-scsi@vger.kernel.org\n",
1812 __func__);
1813 blk_dump_rq_flags(req, "foo");
1814 BUG();
1815 }
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825 if (blk_queue_tagged(q) && !(req->cmd_flags & REQ_QUEUED)) {
1826 spin_lock_irq(shost->host_lock);
1827 if (list_empty(&sdev->starved_entry))
1828 list_add_tail(&sdev->starved_entry,
1829 &shost->starved_list);
1830 spin_unlock_irq(shost->host_lock);
1831 goto not_ready;
1832 }
1833
1834 if (!scsi_target_queue_ready(shost, sdev))
1835 goto not_ready;
1836
1837 if (!scsi_host_queue_ready(q, shost, sdev))
1838 goto host_not_ready;
1839
1840 if (sdev->simple_tags)
1841 cmd->flags |= SCMD_TAGGED;
1842 else
1843 cmd->flags &= ~SCMD_TAGGED;
1844
1845
1846
1847
1848
1849 scsi_init_cmd_errh(cmd);
1850
1851
1852
1853
1854 cmd->scsi_done = scsi_done;
1855 rtn = scsi_dispatch_cmd(cmd);
1856 if (rtn) {
1857 scsi_queue_insert(cmd, rtn);
1858 spin_lock_irq(q->queue_lock);
1859 goto out_delay;
1860 }
1861 spin_lock_irq(q->queue_lock);
1862 }
1863
1864 return;
1865
1866 host_not_ready:
1867 if (scsi_target(sdev)->can_queue > 0)
1868 atomic_dec(&scsi_target(sdev)->target_busy);
1869 not_ready:
1870
1871
1872
1873
1874
1875
1876
1877
1878 spin_lock_irq(q->queue_lock);
1879 blk_requeue_request(q, req);
1880 atomic_dec(&sdev->device_busy);
1881out_delay:
1882 if (!atomic_read(&sdev->device_busy) && !scsi_device_blocked(sdev))
1883 blk_delay_queue(q, SCSI_QUEUE_DELAY);
1884}
1885
1886static inline int prep_to_mq(int ret)
1887{
1888 switch (ret) {
1889 case BLKPREP_OK:
1890 return 0;
1891 case BLKPREP_DEFER:
1892 return BLK_MQ_RQ_QUEUE_BUSY;
1893 default:
1894 return BLK_MQ_RQ_QUEUE_ERROR;
1895 }
1896}
1897
1898static int scsi_mq_prep_fn(struct request *req)
1899{
1900 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1901 struct scsi_device *sdev = req->q->queuedata;
1902 struct Scsi_Host *shost = sdev->host;
1903 unsigned char *sense_buf = cmd->sense_buffer;
1904 struct scatterlist *sg;
1905
1906 memset(cmd, 0, sizeof(struct scsi_cmnd));
1907
1908 req->special = cmd;
1909
1910 cmd->request = req;
1911 cmd->device = sdev;
1912 cmd->sense_buffer = sense_buf;
1913
1914 cmd->tag = req->tag;
1915
1916 cmd->cmnd = req->cmd;
1917 cmd->prot_op = SCSI_PROT_NORMAL;
1918
1919 INIT_LIST_HEAD(&cmd->list);
1920 INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
1921 cmd->jiffies_at_alloc = jiffies;
1922
1923 if (shost->use_cmd_list) {
1924 spin_lock_irq(&sdev->list_lock);
1925 list_add_tail(&cmd->list, &sdev->cmd_list);
1926 spin_unlock_irq(&sdev->list_lock);
1927 }
1928
1929 sg = (void *)cmd + sizeof(struct scsi_cmnd) + shost->hostt->cmd_size;
1930 cmd->sdb.table.sgl = sg;
1931
1932 if (scsi_host_get_prot(shost)) {
1933 cmd->prot_sdb = (void *)sg +
1934 min_t(unsigned int,
1935 shost->sg_tablesize, SCSI_MAX_SG_SEGMENTS) *
1936 sizeof(struct scatterlist);
1937 memset(cmd->prot_sdb, 0, sizeof(struct scsi_data_buffer));
1938
1939 cmd->prot_sdb->table.sgl =
1940 (struct scatterlist *)(cmd->prot_sdb + 1);
1941 }
1942
1943 if (blk_bidi_rq(req)) {
1944 struct request *next_rq = req->next_rq;
1945 struct scsi_data_buffer *bidi_sdb = blk_mq_rq_to_pdu(next_rq);
1946
1947 memset(bidi_sdb, 0, sizeof(struct scsi_data_buffer));
1948 bidi_sdb->table.sgl =
1949 (struct scatterlist *)(bidi_sdb + 1);
1950
1951 next_rq->special = bidi_sdb;
1952 }
1953
1954 blk_mq_start_request(req);
1955
1956 return scsi_setup_cmnd(sdev, req);
1957}
1958
1959static void scsi_mq_done(struct scsi_cmnd *cmd)
1960{
1961 trace_scsi_dispatch_cmd_done(cmd);
1962 blk_mq_complete_request(cmd->request, cmd->request->errors);
1963}
1964
1965static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
1966 const struct blk_mq_queue_data *bd)
1967{
1968 struct request *req = bd->rq;
1969 struct request_queue *q = req->q;
1970 struct scsi_device *sdev = q->queuedata;
1971 struct Scsi_Host *shost = sdev->host;
1972 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1973 int ret;
1974 int reason;
1975
1976 ret = prep_to_mq(scsi_prep_state_check(sdev, req));
1977 if (ret)
1978 goto out;
1979
1980 ret = BLK_MQ_RQ_QUEUE_BUSY;
1981 if (!get_device(&sdev->sdev_gendev))
1982 goto out;
1983
1984 if (!scsi_dev_queue_ready(q, sdev))
1985 goto out_put_device;
1986 if (!scsi_target_queue_ready(shost, sdev))
1987 goto out_dec_device_busy;
1988 if (!scsi_host_queue_ready(q, shost, sdev))
1989 goto out_dec_target_busy;
1990
1991
1992 if (!(req->cmd_flags & REQ_DONTPREP)) {
1993 ret = prep_to_mq(scsi_mq_prep_fn(req));
1994 if (ret)
1995 goto out_dec_host_busy;
1996 req->cmd_flags |= REQ_DONTPREP;
1997 } else {
1998 blk_mq_start_request(req);
1999 }
2000
2001 if (sdev->simple_tags)
2002 cmd->flags |= SCMD_TAGGED;
2003 else
2004 cmd->flags &= ~SCMD_TAGGED;
2005
2006 scsi_init_cmd_errh(cmd);
2007 cmd->scsi_done = scsi_mq_done;
2008
2009 reason = scsi_dispatch_cmd(cmd);
2010 if (reason) {
2011 scsi_set_blocked(cmd, reason);
2012 ret = BLK_MQ_RQ_QUEUE_BUSY;
2013 goto out_dec_host_busy;
2014 }
2015
2016 return BLK_MQ_RQ_QUEUE_OK;
2017
2018out_dec_host_busy:
2019 atomic_dec(&shost->host_busy);
2020out_dec_target_busy:
2021 if (scsi_target(sdev)->can_queue > 0)
2022 atomic_dec(&scsi_target(sdev)->target_busy);
2023out_dec_device_busy:
2024 atomic_dec(&sdev->device_busy);
2025out_put_device:
2026 put_device(&sdev->sdev_gendev);
2027out:
2028 switch (ret) {
2029 case BLK_MQ_RQ_QUEUE_BUSY:
2030 blk_mq_stop_hw_queue(hctx);
2031 if (atomic_read(&sdev->device_busy) == 0 &&
2032 !scsi_device_blocked(sdev))
2033 blk_mq_delay_queue(hctx, SCSI_QUEUE_DELAY);
2034 break;
2035 case BLK_MQ_RQ_QUEUE_ERROR:
2036
2037
2038
2039
2040
2041 if (req->cmd_flags & REQ_DONTPREP)
2042 scsi_mq_uninit_cmd(cmd);
2043 break;
2044 default:
2045 break;
2046 }
2047 return ret;
2048}
2049
2050static enum blk_eh_timer_return scsi_timeout(struct request *req,
2051 bool reserved)
2052{
2053 if (reserved)
2054 return BLK_EH_RESET_TIMER;
2055 return scsi_times_out(req);
2056}
2057
2058static int scsi_init_request(void *data, struct request *rq,
2059 unsigned int hctx_idx, unsigned int request_idx,
2060 unsigned int numa_node)
2061{
2062 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
2063
2064 cmd->sense_buffer = kzalloc_node(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL,
2065 numa_node);
2066 if (!cmd->sense_buffer)
2067 return -ENOMEM;
2068 return 0;
2069}
2070
2071static void scsi_exit_request(void *data, struct request *rq,
2072 unsigned int hctx_idx, unsigned int request_idx)
2073{
2074 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
2075
2076 kfree(cmd->sense_buffer);
2077}
2078
2079static u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
2080{
2081 struct device *host_dev;
2082 u64 bounce_limit = 0xffffffff;
2083
2084 if (shost->unchecked_isa_dma)
2085 return BLK_BOUNCE_ISA;
2086
2087
2088
2089
2090 if (!PCI_DMA_BUS_IS_PHYS)
2091 return BLK_BOUNCE_ANY;
2092
2093 host_dev = scsi_get_device(shost);
2094 if (host_dev && host_dev->dma_mask)
2095 bounce_limit = (u64)dma_max_pfn(host_dev) << PAGE_SHIFT;
2096
2097 return bounce_limit;
2098}
2099
2100static void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
2101{
2102 struct device *dev = shost->dma_dev;
2103
2104
2105
2106
2107 blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize,
2108 SCSI_MAX_SG_CHAIN_SEGMENTS));
2109
2110 if (scsi_host_prot_dma(shost)) {
2111 shost->sg_prot_tablesize =
2112 min_not_zero(shost->sg_prot_tablesize,
2113 (unsigned short)SCSI_MAX_PROT_SG_SEGMENTS);
2114 BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize);
2115 blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize);
2116 }
2117
2118 blk_queue_max_hw_sectors(q, shost->max_sectors);
2119 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
2120 blk_queue_segment_boundary(q, shost->dma_boundary);
2121 dma_set_seg_boundary(dev, shost->dma_boundary);
2122
2123 blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));
2124
2125 if (!shost->use_clustering)
2126 q->limits.cluster = 0;
2127
2128
2129
2130
2131
2132
2133 blk_queue_dma_alignment(q, 0x03);
2134}
2135
2136struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
2137 request_fn_proc *request_fn)
2138{
2139 struct request_queue *q;
2140
2141 q = blk_init_queue(request_fn, NULL);
2142 if (!q)
2143 return NULL;
2144 __scsi_init_queue(shost, q);
2145 return q;
2146}
2147EXPORT_SYMBOL(__scsi_alloc_queue);
2148
2149struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
2150{
2151 struct request_queue *q;
2152
2153 q = __scsi_alloc_queue(sdev->host, scsi_request_fn);
2154 if (!q)
2155 return NULL;
2156
2157 blk_queue_prep_rq(q, scsi_prep_fn);
2158 blk_queue_unprep_rq(q, scsi_unprep_fn);
2159 blk_queue_softirq_done(q, scsi_softirq_done);
2160 blk_queue_rq_timed_out(q, scsi_times_out);
2161 blk_queue_lld_busy(q, scsi_lld_busy);
2162 return q;
2163}
2164
2165static struct blk_mq_ops scsi_mq_ops = {
2166 .map_queue = blk_mq_map_queue,
2167 .queue_rq = scsi_queue_rq,
2168 .complete = scsi_softirq_done,
2169 .timeout = scsi_timeout,
2170 .init_request = scsi_init_request,
2171 .exit_request = scsi_exit_request,
2172};
2173
2174struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev)
2175{
2176 sdev->request_queue = blk_mq_init_queue(&sdev->host->tag_set);
2177 if (IS_ERR(sdev->request_queue))
2178 return NULL;
2179
2180 sdev->request_queue->queuedata = sdev;
2181 __scsi_init_queue(sdev->host, sdev->request_queue);
2182 return sdev->request_queue;
2183}
2184
2185int scsi_mq_setup_tags(struct Scsi_Host *shost)
2186{
2187 unsigned int cmd_size, sgl_size, tbl_size;
2188
2189 tbl_size = shost->sg_tablesize;
2190 if (tbl_size > SCSI_MAX_SG_SEGMENTS)
2191 tbl_size = SCSI_MAX_SG_SEGMENTS;
2192 sgl_size = tbl_size * sizeof(struct scatterlist);
2193 cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size;
2194 if (scsi_host_get_prot(shost))
2195 cmd_size += sizeof(struct scsi_data_buffer) + sgl_size;
2196
2197 memset(&shost->tag_set, 0, sizeof(shost->tag_set));
2198 shost->tag_set.ops = &scsi_mq_ops;
2199 shost->tag_set.nr_hw_queues = shost->nr_hw_queues ? : 1;
2200 shost->tag_set.queue_depth = shost->can_queue;
2201 shost->tag_set.cmd_size = cmd_size;
2202 shost->tag_set.numa_node = NUMA_NO_NODE;
2203 shost->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
2204 shost->tag_set.flags |=
2205 BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy);
2206 shost->tag_set.driver_data = shost;
2207
2208 return blk_mq_alloc_tag_set(&shost->tag_set);
2209}
2210
2211void scsi_mq_destroy_tags(struct Scsi_Host *shost)
2212{
2213 blk_mq_free_tag_set(&shost->tag_set);
2214}
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232void scsi_block_requests(struct Scsi_Host *shost)
2233{
2234 shost->host_self_blocked = 1;
2235}
2236EXPORT_SYMBOL(scsi_block_requests);
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258void scsi_unblock_requests(struct Scsi_Host *shost)
2259{
2260 shost->host_self_blocked = 0;
2261 scsi_run_host_queues(shost);
2262}
2263EXPORT_SYMBOL(scsi_unblock_requests);
2264
2265int __init scsi_init_queue(void)
2266{
2267 int i;
2268
2269 scsi_sdb_cache = kmem_cache_create("scsi_data_buffer",
2270 sizeof(struct scsi_data_buffer),
2271 0, 0, NULL);
2272 if (!scsi_sdb_cache) {
2273 printk(KERN_ERR "SCSI: can't init scsi sdb cache\n");
2274 return -ENOMEM;
2275 }
2276
2277 for (i = 0; i < SG_MEMPOOL_NR; i++) {
2278 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
2279 int size = sgp->size * sizeof(struct scatterlist);
2280
2281 sgp->slab = kmem_cache_create(sgp->name, size, 0,
2282 SLAB_HWCACHE_ALIGN, NULL);
2283 if (!sgp->slab) {
2284 printk(KERN_ERR "SCSI: can't init sg slab %s\n",
2285 sgp->name);
2286 goto cleanup_sdb;
2287 }
2288
2289 sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE,
2290 sgp->slab);
2291 if (!sgp->pool) {
2292 printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
2293 sgp->name);
2294 goto cleanup_sdb;
2295 }
2296 }
2297
2298 return 0;
2299
2300cleanup_sdb:
2301 for (i = 0; i < SG_MEMPOOL_NR; i++) {
2302 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
2303 if (sgp->pool)
2304 mempool_destroy(sgp->pool);
2305 if (sgp->slab)
2306 kmem_cache_destroy(sgp->slab);
2307 }
2308 kmem_cache_destroy(scsi_sdb_cache);
2309
2310 return -ENOMEM;
2311}
2312
2313void scsi_exit_queue(void)
2314{
2315 int i;
2316
2317 kmem_cache_destroy(scsi_sdb_cache);
2318
2319 for (i = 0; i < SG_MEMPOOL_NR; i++) {
2320 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
2321 mempool_destroy(sgp->pool);
2322 kmem_cache_destroy(sgp->slab);
2323 }
2324}
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344int
2345scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
2346 unsigned char *buffer, int len, int timeout, int retries,
2347 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
2348{
2349 unsigned char cmd[10];
2350 unsigned char *real_buffer;
2351 int ret;
2352
2353 memset(cmd, 0, sizeof(cmd));
2354 cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0);
2355
2356 if (sdev->use_10_for_ms) {
2357 if (len > 65535)
2358 return -EINVAL;
2359 real_buffer = kmalloc(8 + len, GFP_KERNEL);
2360 if (!real_buffer)
2361 return -ENOMEM;
2362 memcpy(real_buffer + 8, buffer, len);
2363 len += 8;
2364 real_buffer[0] = 0;
2365 real_buffer[1] = 0;
2366 real_buffer[2] = data->medium_type;
2367 real_buffer[3] = data->device_specific;
2368 real_buffer[4] = data->longlba ? 0x01 : 0;
2369 real_buffer[5] = 0;
2370 real_buffer[6] = data->block_descriptor_length >> 8;
2371 real_buffer[7] = data->block_descriptor_length;
2372
2373 cmd[0] = MODE_SELECT_10;
2374 cmd[7] = len >> 8;
2375 cmd[8] = len;
2376 } else {
2377 if (len > 255 || data->block_descriptor_length > 255 ||
2378 data->longlba)
2379 return -EINVAL;
2380
2381 real_buffer = kmalloc(4 + len, GFP_KERNEL);
2382 if (!real_buffer)
2383 return -ENOMEM;
2384 memcpy(real_buffer + 4, buffer, len);
2385 len += 4;
2386 real_buffer[0] = 0;
2387 real_buffer[1] = data->medium_type;
2388 real_buffer[2] = data->device_specific;
2389 real_buffer[3] = data->block_descriptor_length;
2390
2391
2392 cmd[0] = MODE_SELECT;
2393 cmd[4] = len;
2394 }
2395
2396 ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len,
2397 sshdr, timeout, retries, NULL);
2398 kfree(real_buffer);
2399 return ret;
2400}
2401EXPORT_SYMBOL_GPL(scsi_mode_select);
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420int
2421scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
2422 unsigned char *buffer, int len, int timeout, int retries,
2423 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
2424{
2425 unsigned char cmd[12];
2426 int use_10_for_ms;
2427 int header_length;
2428 int result, retry_count = retries;
2429 struct scsi_sense_hdr my_sshdr;
2430
2431 memset(data, 0, sizeof(*data));
2432 memset(&cmd[0], 0, 12);
2433 cmd[1] = dbd & 0x18;
2434 cmd[2] = modepage;
2435
2436
2437 if (!sshdr)
2438 sshdr = &my_sshdr;
2439
2440 retry:
2441 use_10_for_ms = sdev->use_10_for_ms;
2442
2443 if (use_10_for_ms) {
2444 if (len < 8)
2445 len = 8;
2446
2447 cmd[0] = MODE_SENSE_10;
2448 cmd[8] = len;
2449 header_length = 8;
2450 } else {
2451 if (len < 4)
2452 len = 4;
2453
2454 cmd[0] = MODE_SENSE;
2455 cmd[4] = len;
2456 header_length = 4;
2457 }
2458
2459 memset(buffer, 0, len);
2460
2461 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
2462 sshdr, timeout, retries, NULL);
2463
2464
2465
2466
2467
2468
2469 if (use_10_for_ms && !scsi_status_is_good(result) &&
2470 (driver_byte(result) & DRIVER_SENSE)) {
2471 if (scsi_sense_valid(sshdr)) {
2472 if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
2473 (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
2474
2475
2476
2477 sdev->use_10_for_ms = 0;
2478 goto retry;
2479 }
2480 }
2481 }
2482
2483 if(scsi_status_is_good(result)) {
2484 if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b &&
2485 (modepage == 6 || modepage == 8))) {
2486
2487 header_length = 0;
2488 data->length = 13;
2489 data->medium_type = 0;
2490 data->device_specific = 0;
2491 data->longlba = 0;
2492 data->block_descriptor_length = 0;
2493 } else if(use_10_for_ms) {
2494 data->length = buffer[0]*256 + buffer[1] + 2;
2495 data->medium_type = buffer[2];
2496 data->device_specific = buffer[3];
2497 data->longlba = buffer[4] & 0x01;
2498 data->block_descriptor_length = buffer[6]*256
2499 + buffer[7];
2500 } else {
2501 data->length = buffer[0] + 1;
2502 data->medium_type = buffer[1];
2503 data->device_specific = buffer[2];
2504 data->block_descriptor_length = buffer[3];
2505 }
2506 data->header_length = header_length;
2507 } else if ((status_byte(result) == CHECK_CONDITION) &&
2508 scsi_sense_valid(sshdr) &&
2509 sshdr->sense_key == UNIT_ATTENTION && retry_count) {
2510 retry_count--;
2511 goto retry;
2512 }
2513
2514 return result;
2515}
2516EXPORT_SYMBOL(scsi_mode_sense);
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530int
2531scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
2532 struct scsi_sense_hdr *sshdr_external)
2533{
2534 char cmd[] = {
2535 TEST_UNIT_READY, 0, 0, 0, 0, 0,
2536 };
2537 struct scsi_sense_hdr *sshdr;
2538 int result;
2539
2540 if (!sshdr_external)
2541 sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL);
2542 else
2543 sshdr = sshdr_external;
2544
2545
2546 do {
2547 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr,
2548 timeout, retries, NULL);
2549 if (sdev->removable && scsi_sense_valid(sshdr) &&
2550 sshdr->sense_key == UNIT_ATTENTION)
2551 sdev->changed = 1;
2552 } while (scsi_sense_valid(sshdr) &&
2553 sshdr->sense_key == UNIT_ATTENTION && --retries);
2554
2555 if (!sshdr_external)
2556 kfree(sshdr);
2557 return result;
2558}
2559EXPORT_SYMBOL(scsi_test_unit_ready);
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569int
2570scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
2571{
2572 enum scsi_device_state oldstate = sdev->sdev_state;
2573
2574 if (state == oldstate)
2575 return 0;
2576
2577 switch (state) {
2578 case SDEV_CREATED:
2579 switch (oldstate) {
2580 case SDEV_CREATED_BLOCK:
2581 break;
2582 default:
2583 goto illegal;
2584 }
2585 break;
2586
2587 case SDEV_RUNNING:
2588 switch (oldstate) {
2589 case SDEV_CREATED:
2590 case SDEV_OFFLINE:
2591 case SDEV_TRANSPORT_OFFLINE:
2592 case SDEV_QUIESCE:
2593 case SDEV_BLOCK:
2594 break;
2595 default:
2596 goto illegal;
2597 }
2598 break;
2599
2600 case SDEV_QUIESCE:
2601 switch (oldstate) {
2602 case SDEV_RUNNING:
2603 case SDEV_OFFLINE:
2604 case SDEV_TRANSPORT_OFFLINE:
2605 break;
2606 default:
2607 goto illegal;
2608 }
2609 break;
2610
2611 case SDEV_OFFLINE:
2612 case SDEV_TRANSPORT_OFFLINE:
2613 switch (oldstate) {
2614 case SDEV_CREATED:
2615 case SDEV_RUNNING:
2616 case SDEV_QUIESCE:
2617 case SDEV_BLOCK:
2618 break;
2619 default:
2620 goto illegal;
2621 }
2622 break;
2623
2624 case SDEV_BLOCK:
2625 switch (oldstate) {
2626 case SDEV_RUNNING:
2627 case SDEV_CREATED_BLOCK:
2628 break;
2629 default:
2630 goto illegal;
2631 }
2632 break;
2633
2634 case SDEV_CREATED_BLOCK:
2635 switch (oldstate) {
2636 case SDEV_CREATED:
2637 break;
2638 default:
2639 goto illegal;
2640 }
2641 break;
2642
2643 case SDEV_CANCEL:
2644 switch (oldstate) {
2645 case SDEV_CREATED:
2646 case SDEV_RUNNING:
2647 case SDEV_QUIESCE:
2648 case SDEV_OFFLINE:
2649 case SDEV_TRANSPORT_OFFLINE:
2650 case SDEV_BLOCK:
2651 break;
2652 default:
2653 goto illegal;
2654 }
2655 break;
2656
2657 case SDEV_DEL:
2658 switch (oldstate) {
2659 case SDEV_CREATED:
2660 case SDEV_RUNNING:
2661 case SDEV_OFFLINE:
2662 case SDEV_TRANSPORT_OFFLINE:
2663 case SDEV_CANCEL:
2664 case SDEV_CREATED_BLOCK:
2665 break;
2666 default:
2667 goto illegal;
2668 }
2669 break;
2670
2671 }
2672 sdev->sdev_state = state;
2673 return 0;
2674
2675 illegal:
2676 SCSI_LOG_ERROR_RECOVERY(1,
2677 sdev_printk(KERN_ERR, sdev,
2678 "Illegal state transition %s->%s",
2679 scsi_device_state_name(oldstate),
2680 scsi_device_state_name(state))
2681 );
2682 return -EINVAL;
2683}
2684EXPORT_SYMBOL(scsi_device_set_state);
2685
2686
2687
2688
2689
2690
2691
2692
2693static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
2694{
2695 int idx = 0;
2696 char *envp[3];
2697
2698 switch (evt->evt_type) {
2699 case SDEV_EVT_MEDIA_CHANGE:
2700 envp[idx++] = "SDEV_MEDIA_CHANGE=1";
2701 break;
2702 case SDEV_EVT_INQUIRY_CHANGE_REPORTED:
2703 scsi_rescan_device(&sdev->sdev_gendev);
2704 envp[idx++] = "SDEV_UA=INQUIRY_DATA_HAS_CHANGED";
2705 break;
2706 case SDEV_EVT_CAPACITY_CHANGE_REPORTED:
2707 envp[idx++] = "SDEV_UA=CAPACITY_DATA_HAS_CHANGED";
2708 break;
2709 case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED:
2710 envp[idx++] = "SDEV_UA=THIN_PROVISIONING_SOFT_THRESHOLD_REACHED";
2711 break;
2712 case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED:
2713 envp[idx++] = "SDEV_UA=MODE_PARAMETERS_CHANGED";
2714 break;
2715 case SDEV_EVT_LUN_CHANGE_REPORTED:
2716 envp[idx++] = "SDEV_UA=REPORTED_LUNS_DATA_HAS_CHANGED";
2717 break;
2718 case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED:
2719 envp[idx++] = "SDEV_UA=ASYMMETRIC_ACCESS_STATE_CHANGED";
2720 break;
2721 default:
2722
2723 break;
2724 }
2725
2726 envp[idx++] = NULL;
2727
2728 kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp);
2729}
2730
2731
2732
2733
2734
2735
2736
2737
2738void scsi_evt_thread(struct work_struct *work)
2739{
2740 struct scsi_device *sdev;
2741 enum scsi_device_event evt_type;
2742 LIST_HEAD(event_list);
2743
2744 sdev = container_of(work, struct scsi_device, event_work);
2745
2746 for (evt_type = SDEV_EVT_FIRST; evt_type <= SDEV_EVT_LAST; evt_type++)
2747 if (test_and_clear_bit(evt_type, sdev->pending_events))
2748 sdev_evt_send_simple(sdev, evt_type, GFP_KERNEL);
2749
2750 while (1) {
2751 struct scsi_event *evt;
2752 struct list_head *this, *tmp;
2753 unsigned long flags;
2754
2755 spin_lock_irqsave(&sdev->list_lock, flags);
2756 list_splice_init(&sdev->event_list, &event_list);
2757 spin_unlock_irqrestore(&sdev->list_lock, flags);
2758
2759 if (list_empty(&event_list))
2760 break;
2761
2762 list_for_each_safe(this, tmp, &event_list) {
2763 evt = list_entry(this, struct scsi_event, node);
2764 list_del(&evt->node);
2765 scsi_evt_emit(sdev, evt);
2766 kfree(evt);
2767 }
2768 }
2769}
2770
2771
2772
2773
2774
2775
2776
2777
2778void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt)
2779{
2780 unsigned long flags;
2781
2782#if 0
2783
2784
2785
2786 if (!test_bit(evt->evt_type, sdev->supported_events)) {
2787 kfree(evt);
2788 return;
2789 }
2790#endif
2791
2792 spin_lock_irqsave(&sdev->list_lock, flags);
2793 list_add_tail(&evt->node, &sdev->event_list);
2794 schedule_work(&sdev->event_work);
2795 spin_unlock_irqrestore(&sdev->list_lock, flags);
2796}
2797EXPORT_SYMBOL_GPL(sdev_evt_send);
2798
2799
2800
2801
2802
2803
2804
2805
2806struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
2807 gfp_t gfpflags)
2808{
2809 struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags);
2810 if (!evt)
2811 return NULL;
2812
2813 evt->evt_type = evt_type;
2814 INIT_LIST_HEAD(&evt->node);
2815
2816
2817 switch (evt_type) {
2818 case SDEV_EVT_MEDIA_CHANGE:
2819 case SDEV_EVT_INQUIRY_CHANGE_REPORTED:
2820 case SDEV_EVT_CAPACITY_CHANGE_REPORTED:
2821 case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED:
2822 case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED:
2823 case SDEV_EVT_LUN_CHANGE_REPORTED:
2824 case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED:
2825 default:
2826
2827 break;
2828 }
2829
2830 return evt;
2831}
2832EXPORT_SYMBOL_GPL(sdev_evt_alloc);
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842void sdev_evt_send_simple(struct scsi_device *sdev,
2843 enum scsi_device_event evt_type, gfp_t gfpflags)
2844{
2845 struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags);
2846 if (!evt) {
2847 sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n",
2848 evt_type);
2849 return;
2850 }
2851
2852 sdev_evt_send(sdev, evt);
2853}
2854EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871int
2872scsi_device_quiesce(struct scsi_device *sdev)
2873{
2874 int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
2875 if (err)
2876 return err;
2877
2878 scsi_run_queue(sdev->request_queue);
2879 while (atomic_read(&sdev->device_busy)) {
2880 msleep_interruptible(200);
2881 scsi_run_queue(sdev->request_queue);
2882 }
2883 return 0;
2884}
2885EXPORT_SYMBOL(scsi_device_quiesce);
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896void scsi_device_resume(struct scsi_device *sdev)
2897{
2898
2899
2900
2901
2902 if (sdev->sdev_state != SDEV_QUIESCE ||
2903 scsi_device_set_state(sdev, SDEV_RUNNING))
2904 return;
2905 scsi_run_queue(sdev->request_queue);
2906}
2907EXPORT_SYMBOL(scsi_device_resume);
2908
2909static void
2910device_quiesce_fn(struct scsi_device *sdev, void *data)
2911{
2912 scsi_device_quiesce(sdev);
2913}
2914
2915void
2916scsi_target_quiesce(struct scsi_target *starget)
2917{
2918 starget_for_each_device(starget, NULL, device_quiesce_fn);
2919}
2920EXPORT_SYMBOL(scsi_target_quiesce);
2921
2922static void
2923device_resume_fn(struct scsi_device *sdev, void *data)
2924{
2925 scsi_device_resume(sdev);
2926}
2927
2928void
2929scsi_target_resume(struct scsi_target *starget)
2930{
2931 starget_for_each_device(starget, NULL, device_resume_fn);
2932}
2933EXPORT_SYMBOL(scsi_target_resume);
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951int
2952scsi_internal_device_block(struct scsi_device *sdev)
2953{
2954 struct request_queue *q = sdev->request_queue;
2955 unsigned long flags;
2956 int err = 0;
2957
2958 err = scsi_device_set_state(sdev, SDEV_BLOCK);
2959 if (err) {
2960 err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK);
2961
2962 if (err)
2963 return err;
2964 }
2965
2966
2967
2968
2969
2970
2971 if (q->mq_ops) {
2972 blk_mq_stop_hw_queues(q);
2973 } else {
2974 spin_lock_irqsave(q->queue_lock, flags);
2975 blk_stop_queue(q);
2976 spin_unlock_irqrestore(q->queue_lock, flags);
2977 }
2978
2979 return 0;
2980}
2981EXPORT_SYMBOL_GPL(scsi_internal_device_block);
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999int
3000scsi_internal_device_unblock(struct scsi_device *sdev,
3001 enum scsi_device_state new_state)
3002{
3003 struct request_queue *q = sdev->request_queue;
3004 unsigned long flags;
3005
3006
3007
3008
3009
3010 if ((sdev->sdev_state == SDEV_BLOCK) ||
3011 (sdev->sdev_state == SDEV_TRANSPORT_OFFLINE))
3012 sdev->sdev_state = new_state;
3013 else if (sdev->sdev_state == SDEV_CREATED_BLOCK) {
3014 if (new_state == SDEV_TRANSPORT_OFFLINE ||
3015 new_state == SDEV_OFFLINE)
3016 sdev->sdev_state = new_state;
3017 else
3018 sdev->sdev_state = SDEV_CREATED;
3019 } else if (sdev->sdev_state != SDEV_CANCEL &&
3020 sdev->sdev_state != SDEV_OFFLINE)
3021 return -EINVAL;
3022
3023 if (q->mq_ops) {
3024 blk_mq_start_stopped_hw_queues(q, false);
3025 } else {
3026 spin_lock_irqsave(q->queue_lock, flags);
3027 blk_start_queue(q);
3028 spin_unlock_irqrestore(q->queue_lock, flags);
3029 }
3030
3031 return 0;
3032}
3033EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
3034
3035static void
3036device_block(struct scsi_device *sdev, void *data)
3037{
3038 scsi_internal_device_block(sdev);
3039}
3040
3041static int
3042target_block(struct device *dev, void *data)
3043{
3044 if (scsi_is_target_device(dev))
3045 starget_for_each_device(to_scsi_target(dev), NULL,
3046 device_block);
3047 return 0;
3048}
3049
3050void
3051scsi_target_block(struct device *dev)
3052{
3053 if (scsi_is_target_device(dev))
3054 starget_for_each_device(to_scsi_target(dev), NULL,
3055 device_block);
3056 else
3057 device_for_each_child(dev, NULL, target_block);
3058}
3059EXPORT_SYMBOL_GPL(scsi_target_block);
3060
3061static void
3062device_unblock(struct scsi_device *sdev, void *data)
3063{
3064 scsi_internal_device_unblock(sdev, *(enum scsi_device_state *)data);
3065}
3066
3067static int
3068target_unblock(struct device *dev, void *data)
3069{
3070 if (scsi_is_target_device(dev))
3071 starget_for_each_device(to_scsi_target(dev), data,
3072 device_unblock);
3073 return 0;
3074}
3075
3076void
3077scsi_target_unblock(struct device *dev, enum scsi_device_state new_state)
3078{
3079 if (scsi_is_target_device(dev))
3080 starget_for_each_device(to_scsi_target(dev), &new_state,
3081 device_unblock);
3082 else
3083 device_for_each_child(dev, &new_state, target_unblock);
3084}
3085EXPORT_SYMBOL_GPL(scsi_target_unblock);
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
3097 size_t *offset, size_t *len)
3098{
3099 int i;
3100 size_t sg_len = 0, len_complete = 0;
3101 struct scatterlist *sg;
3102 struct page *page;
3103
3104 WARN_ON(!irqs_disabled());
3105
3106 for_each_sg(sgl, sg, sg_count, i) {
3107 len_complete = sg_len;
3108 sg_len += sg->length;
3109 if (sg_len > *offset)
3110 break;
3111 }
3112
3113 if (unlikely(i == sg_count)) {
3114 printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, "
3115 "elements %d\n",
3116 __func__, sg_len, *offset, sg_count);
3117 WARN_ON(1);
3118 return NULL;
3119 }
3120
3121
3122 *offset = *offset - len_complete + sg->offset;
3123
3124
3125 page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT));
3126 *offset &= ~PAGE_MASK;
3127
3128
3129 sg_len = PAGE_SIZE - *offset;
3130 if (*len > sg_len)
3131 *len = sg_len;
3132
3133 return kmap_atomic(page);
3134}
3135EXPORT_SYMBOL(scsi_kmap_atomic_sg);
3136
3137
3138
3139
3140
3141void scsi_kunmap_atomic_sg(void *virt)
3142{
3143 kunmap_atomic(virt);
3144}
3145EXPORT_SYMBOL(scsi_kunmap_atomic_sg);
3146
3147void sdev_disable_disk_events(struct scsi_device *sdev)
3148{
3149 atomic_inc(&sdev->disk_events_disable_depth);
3150}
3151EXPORT_SYMBOL(sdev_disable_disk_events);
3152
3153void sdev_enable_disk_events(struct scsi_device *sdev)
3154{
3155 if (WARN_ON_ONCE(atomic_read(&sdev->disk_events_disable_depth) <= 0))
3156 return;
3157 atomic_dec(&sdev->disk_events_disable_depth);
3158}
3159EXPORT_SYMBOL(sdev_enable_disk_events);
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
3176{
3177 u8 cur_id_type = 0xff;
3178 u8 cur_id_size = 0;
3179 unsigned char *d, *cur_id_str;
3180 unsigned char __rcu *vpd_pg83;
3181 int id_size = -EINVAL;
3182
3183 rcu_read_lock();
3184 vpd_pg83 = rcu_dereference(sdev->vpd_pg83);
3185 if (!vpd_pg83) {
3186 rcu_read_unlock();
3187 return -ENXIO;
3188 }
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204 if (id_len < 21) {
3205 rcu_read_unlock();
3206 return -EINVAL;
3207 }
3208
3209 memset(id, 0, id_len);
3210 d = vpd_pg83 + 4;
3211 while (d < vpd_pg83 + sdev->vpd_pg83_len) {
3212
3213 if ((d[1] & 0x30) != 0x00)
3214 goto next_desig;
3215
3216 switch (d[1] & 0xf) {
3217 case 0x2:
3218
3219 if (cur_id_size > d[3])
3220 break;
3221
3222 if (cur_id_type == 0x3 &&
3223 cur_id_size == d[3])
3224 break;
3225 cur_id_size = d[3];
3226 cur_id_str = d + 4;
3227 cur_id_type = d[1] & 0xf;
3228 switch (cur_id_size) {
3229 case 8:
3230 id_size = snprintf(id, id_len,
3231 "eui.%8phN",
3232 cur_id_str);
3233 break;
3234 case 12:
3235 id_size = snprintf(id, id_len,
3236 "eui.%12phN",
3237 cur_id_str);
3238 break;
3239 case 16:
3240 id_size = snprintf(id, id_len,
3241 "eui.%16phN",
3242 cur_id_str);
3243 break;
3244 default:
3245 cur_id_size = 0;
3246 break;
3247 }
3248 break;
3249 case 0x3:
3250
3251 if (cur_id_size > d[3])
3252 break;
3253 cur_id_size = d[3];
3254 cur_id_str = d + 4;
3255 cur_id_type = d[1] & 0xf;
3256 switch (cur_id_size) {
3257 case 8:
3258 id_size = snprintf(id, id_len,
3259 "naa.%8phN",
3260 cur_id_str);
3261 break;
3262 case 16:
3263 id_size = snprintf(id, id_len,
3264 "naa.%16phN",
3265 cur_id_str);
3266 break;
3267 default:
3268 cur_id_size = 0;
3269 break;
3270 }
3271 break;
3272 case 0x8:
3273
3274 if (cur_id_size + 4 > d[3])
3275 break;
3276
3277 if (cur_id_size && d[3] > id_len)
3278 break;
3279 cur_id_size = id_size = d[3];
3280 cur_id_str = d + 4;
3281 cur_id_type = d[1] & 0xf;
3282 if (cur_id_size >= id_len)
3283 cur_id_size = id_len - 1;
3284 memcpy(id, cur_id_str, cur_id_size);
3285
3286 if (cur_id_size != id_size)
3287 cur_id_size = 6;
3288 break;
3289 default:
3290 break;
3291 }
3292next_desig:
3293 d += d[3] + 4;
3294 }
3295 rcu_read_unlock();
3296
3297 return id_size;
3298}
3299EXPORT_SYMBOL(scsi_vpd_lun_id);
3300
3301
3302
3303
3304
3305
3306
3307
3308
3309
3310int scsi_vpd_tpg_id(struct scsi_device *sdev, int *rel_id)
3311{
3312 unsigned char *d;
3313 unsigned char __rcu *vpd_pg83;
3314 int group_id = -EAGAIN, rel_port = -1;
3315
3316 rcu_read_lock();
3317 vpd_pg83 = rcu_dereference(sdev->vpd_pg83);
3318 if (!vpd_pg83) {
3319 rcu_read_unlock();
3320 return -ENXIO;
3321 }
3322
3323 d = sdev->vpd_pg83 + 4;
3324 while (d < sdev->vpd_pg83 + sdev->vpd_pg83_len) {
3325 switch (d[1] & 0xf) {
3326 case 0x4:
3327
3328 rel_port = get_unaligned_be16(&d[6]);
3329 break;
3330 case 0x5:
3331
3332 group_id = get_unaligned_be16(&d[6]);
3333 break;
3334 default:
3335 break;
3336 }
3337 d += d[3] + 4;
3338 }
3339 rcu_read_unlock();
3340
3341 if (group_id >= 0 && rel_id && rel_port != -1)
3342 *rel_id = rel_port;
3343
3344 return group_id;
3345}
3346EXPORT_SYMBOL(scsi_vpd_tpg_id);
3347