1
2
3
4
5
6
7
8
9
10
11#include <linux/bio.h>
12#include <linux/bitops.h>
13#include <linux/blkdev.h>
14#include <linux/completion.h>
15#include <linux/kernel.h>
16#include <linux/export.h>
17#include <linux/mempool.h>
18#include <linux/slab.h>
19#include <linux/init.h>
20#include <linux/pci.h>
21#include <linux/delay.h>
22#include <linux/hardirq.h>
23#include <linux/scatterlist.h>
24#include <linux/blk-mq.h>
25#include <linux/ratelimit.h>
26
27#include <scsi/scsi.h>
28#include <scsi/scsi_cmnd.h>
29#include <scsi/scsi_dbg.h>
30#include <scsi/scsi_device.h>
31#include <scsi/scsi_driver.h>
32#include <scsi/scsi_eh.h>
33#include <scsi/scsi_host.h>
34
35#include <trace/events/scsi.h>
36
37#include "scsi_priv.h"
38#include "scsi_logging.h"
39
40
41#define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools)
42#define SG_MEMPOOL_SIZE 2
43
44struct scsi_host_sg_pool {
45 size_t size;
46 char *name;
47 struct kmem_cache *slab;
48 mempool_t *pool;
49};
50
51#define SP(x) { .size = x, "sgpool-" __stringify(x) }
52#if (SCSI_MAX_SG_SEGMENTS < 32)
53#error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater)
54#endif
55static struct scsi_host_sg_pool scsi_sg_pools[] = {
56 SP(8),
57 SP(16),
58#if (SCSI_MAX_SG_SEGMENTS > 32)
59 SP(32),
60#if (SCSI_MAX_SG_SEGMENTS > 64)
61 SP(64),
62#if (SCSI_MAX_SG_SEGMENTS > 128)
63 SP(128),
64#if (SCSI_MAX_SG_SEGMENTS > 256)
65#error SCSI_MAX_SG_SEGMENTS is too large (256 MAX)
66#endif
67#endif
68#endif
69#endif
70 SP(SCSI_MAX_SG_SEGMENTS)
71};
72#undef SP
73
74struct kmem_cache *scsi_sdb_cache;
75
76
77
78
79
80
81#define SCSI_QUEUE_DELAY 3
82
83static void
84scsi_set_blocked(struct scsi_cmnd *cmd, int reason)
85{
86 struct Scsi_Host *host = cmd->device->host;
87 struct scsi_device *device = cmd->device;
88 struct scsi_target *starget = scsi_target(device);
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103 switch (reason) {
104 case SCSI_MLQUEUE_HOST_BUSY:
105 atomic_set(&host->host_blocked, host->max_host_blocked);
106 break;
107 case SCSI_MLQUEUE_DEVICE_BUSY:
108 case SCSI_MLQUEUE_EH_RETRY:
109 atomic_set(&device->device_blocked,
110 device->max_device_blocked);
111 break;
112 case SCSI_MLQUEUE_TARGET_BUSY:
113 atomic_set(&starget->target_blocked,
114 starget->max_target_blocked);
115 break;
116 }
117}
118
119static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
120{
121 struct scsi_device *sdev = cmd->device;
122 struct request_queue *q = cmd->request->q;
123
124 blk_mq_requeue_request(cmd->request);
125 blk_mq_kick_requeue_list(q);
126 put_device(&sdev->sdev_gendev);
127}
128
129
130
131
132
133
134
135
136
137
138
139
140
141static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
142{
143 struct scsi_device *device = cmd->device;
144 struct request_queue *q = device->request_queue;
145 unsigned long flags;
146
147 SCSI_LOG_MLQUEUE(1, scmd_printk(KERN_INFO, cmd,
148 "Inserting command %p into mlqueue\n", cmd));
149
150 scsi_set_blocked(cmd, reason);
151
152
153
154
155
156 if (unbusy)
157 scsi_device_unbusy(device);
158
159
160
161
162
163
164
165 cmd->result = 0;
166 if (q->mq_ops) {
167 scsi_mq_requeue_cmd(cmd);
168 return;
169 }
170 spin_lock_irqsave(q->queue_lock, flags);
171 blk_requeue_request(q, cmd->request);
172 kblockd_schedule_work(&device->requeue_work);
173 spin_unlock_irqrestore(q->queue_lock, flags);
174}
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
196{
197 __scsi_queue_insert(cmd, reason, 1);
198}
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
216 int data_direction, void *buffer, unsigned bufflen,
217 unsigned char *sense, int timeout, int retries, u64 flags,
218 int *resid)
219{
220 struct request *req;
221 int write = (data_direction == DMA_TO_DEVICE);
222 int ret = DRIVER_ERROR << 24;
223
224 req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
225 if (IS_ERR(req))
226 return ret;
227 blk_rq_set_block_pc(req);
228
229 if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
230 buffer, bufflen, __GFP_WAIT))
231 goto out;
232
233 req->cmd_len = COMMAND_SIZE(cmd[0]);
234 memcpy(req->cmd, cmd, req->cmd_len);
235 req->sense = sense;
236 req->sense_len = 0;
237 req->retries = retries;
238 req->timeout = timeout;
239 req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT;
240
241
242
243
244 blk_execute_rq(req->q, NULL, req, 1);
245
246
247
248
249
250
251
252 if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen))
253 memset(buffer + (bufflen - req->resid_len), 0, req->resid_len);
254
255 if (resid)
256 *resid = req->resid_len;
257 ret = req->errors;
258 out:
259 blk_put_request(req);
260
261 return ret;
262}
263EXPORT_SYMBOL(scsi_execute);
264
265int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd,
266 int data_direction, void *buffer, unsigned bufflen,
267 struct scsi_sense_hdr *sshdr, int timeout, int retries,
268 int *resid, u64 flags)
269{
270 char *sense = NULL;
271 int result;
272
273 if (sshdr) {
274 sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
275 if (!sense)
276 return DRIVER_ERROR << 24;
277 }
278 result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
279 sense, timeout, retries, flags, resid);
280 if (sshdr)
281 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
282
283 kfree(sense);
284 return result;
285}
286EXPORT_SYMBOL(scsi_execute_req_flags);
287
288
289
290
291
292
293
294
295
296
297
298
299static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
300{
301 cmd->serial_number = 0;
302 scsi_set_resid(cmd, 0);
303 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
304 if (cmd->cmd_len == 0)
305 cmd->cmd_len = scsi_command_size(cmd->cmnd);
306}
307
308void scsi_device_unbusy(struct scsi_device *sdev)
309{
310 struct Scsi_Host *shost = sdev->host;
311 struct scsi_target *starget = scsi_target(sdev);
312 unsigned long flags;
313
314 atomic_dec(&shost->host_busy);
315 if (starget->can_queue > 0)
316 atomic_dec(&starget->target_busy);
317
318 if (unlikely(scsi_host_in_recovery(shost) &&
319 (shost->host_failed || shost->host_eh_scheduled))) {
320 spin_lock_irqsave(shost->host_lock, flags);
321 scsi_eh_wakeup(shost);
322 spin_unlock_irqrestore(shost->host_lock, flags);
323 }
324
325 atomic_dec(&sdev->device_busy);
326}
327
328static void scsi_kick_queue(struct request_queue *q)
329{
330 if (q->mq_ops)
331 blk_mq_start_hw_queues(q);
332 else
333 blk_run_queue(q);
334}
335
336
337
338
339
340
341
342
343static void scsi_single_lun_run(struct scsi_device *current_sdev)
344{
345 struct Scsi_Host *shost = current_sdev->host;
346 struct scsi_device *sdev, *tmp;
347 struct scsi_target *starget = scsi_target(current_sdev);
348 unsigned long flags;
349
350 spin_lock_irqsave(shost->host_lock, flags);
351 starget->starget_sdev_user = NULL;
352 spin_unlock_irqrestore(shost->host_lock, flags);
353
354
355
356
357
358
359
360 scsi_kick_queue(current_sdev->request_queue);
361
362 spin_lock_irqsave(shost->host_lock, flags);
363 if (starget->starget_sdev_user)
364 goto out;
365 list_for_each_entry_safe(sdev, tmp, &starget->devices,
366 same_target_siblings) {
367 if (sdev == current_sdev)
368 continue;
369 if (scsi_device_get(sdev))
370 continue;
371
372 spin_unlock_irqrestore(shost->host_lock, flags);
373 scsi_kick_queue(sdev->request_queue);
374 spin_lock_irqsave(shost->host_lock, flags);
375
376 scsi_device_put(sdev);
377 }
378 out:
379 spin_unlock_irqrestore(shost->host_lock, flags);
380}
381
382static inline bool scsi_device_is_busy(struct scsi_device *sdev)
383{
384 if (atomic_read(&sdev->device_busy) >= sdev->queue_depth)
385 return true;
386 if (atomic_read(&sdev->device_blocked) > 0)
387 return true;
388 return false;
389}
390
391static inline bool scsi_target_is_busy(struct scsi_target *starget)
392{
393 if (starget->can_queue > 0) {
394 if (atomic_read(&starget->target_busy) >= starget->can_queue)
395 return true;
396 if (atomic_read(&starget->target_blocked) > 0)
397 return true;
398 }
399 return false;
400}
401
402static inline bool scsi_host_is_busy(struct Scsi_Host *shost)
403{
404 if (shost->can_queue > 0 &&
405 atomic_read(&shost->host_busy) >= shost->can_queue)
406 return true;
407 if (atomic_read(&shost->host_blocked) > 0)
408 return true;
409 if (shost->host_self_blocked)
410 return true;
411 return false;
412}
413
414static void scsi_starved_list_run(struct Scsi_Host *shost)
415{
416 LIST_HEAD(starved_list);
417 struct scsi_device *sdev;
418 unsigned long flags;
419
420 spin_lock_irqsave(shost->host_lock, flags);
421 list_splice_init(&shost->starved_list, &starved_list);
422
423 while (!list_empty(&starved_list)) {
424 struct request_queue *slq;
425
426
427
428
429
430
431
432
433
434
435
436 if (scsi_host_is_busy(shost))
437 break;
438
439 sdev = list_entry(starved_list.next,
440 struct scsi_device, starved_entry);
441 list_del_init(&sdev->starved_entry);
442 if (scsi_target_is_busy(scsi_target(sdev))) {
443 list_move_tail(&sdev->starved_entry,
444 &shost->starved_list);
445 continue;
446 }
447
448
449
450
451
452
453
454
455
456
457
458 slq = sdev->request_queue;
459 if (!blk_get_queue(slq))
460 continue;
461 spin_unlock_irqrestore(shost->host_lock, flags);
462
463 scsi_kick_queue(slq);
464 blk_put_queue(slq);
465
466 spin_lock_irqsave(shost->host_lock, flags);
467 }
468
469 list_splice(&starved_list, &shost->starved_list);
470 spin_unlock_irqrestore(shost->host_lock, flags);
471}
472
473
474
475
476
477
478
479
480
481
482
483
484
485static void scsi_run_queue(struct request_queue *q)
486{
487 struct scsi_device *sdev = q->queuedata;
488
489 if (scsi_target(sdev)->single_lun)
490 scsi_single_lun_run(sdev);
491 if (!list_empty(&sdev->host->starved_list))
492 scsi_starved_list_run(sdev->host);
493
494 if (q->mq_ops)
495 blk_mq_start_stopped_hw_queues(q, false);
496 else
497 blk_run_queue(q);
498}
499
500void scsi_requeue_run_queue(struct work_struct *work)
501{
502 struct scsi_device *sdev;
503 struct request_queue *q;
504
505 sdev = container_of(work, struct scsi_device, requeue_work);
506 q = sdev->request_queue;
507 scsi_run_queue(q);
508}
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
529{
530 struct scsi_device *sdev = cmd->device;
531 struct request *req = cmd->request;
532 unsigned long flags;
533
534 spin_lock_irqsave(q->queue_lock, flags);
535 blk_unprep_request(req);
536 req->special = NULL;
537 scsi_put_command(cmd);
538 blk_requeue_request(q, req);
539 spin_unlock_irqrestore(q->queue_lock, flags);
540
541 scsi_run_queue(q);
542
543 put_device(&sdev->sdev_gendev);
544}
545
546void scsi_run_host_queues(struct Scsi_Host *shost)
547{
548 struct scsi_device *sdev;
549
550 shost_for_each_device(sdev, shost)
551 scsi_run_queue(sdev->request_queue);
552}
553
554static inline unsigned int scsi_sgtable_index(unsigned short nents)
555{
556 unsigned int index;
557
558 BUG_ON(nents > SCSI_MAX_SG_SEGMENTS);
559
560 if (nents <= 8)
561 index = 0;
562 else
563 index = get_count_order(nents) - 3;
564
565 return index;
566}
567
568static void scsi_sg_free(struct scatterlist *sgl, unsigned int nents)
569{
570 struct scsi_host_sg_pool *sgp;
571
572 sgp = scsi_sg_pools + scsi_sgtable_index(nents);
573 mempool_free(sgl, sgp->pool);
574}
575
576static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask)
577{
578 struct scsi_host_sg_pool *sgp;
579
580 sgp = scsi_sg_pools + scsi_sgtable_index(nents);
581 return mempool_alloc(sgp->pool, gfp_mask);
582}
583
584static void scsi_free_sgtable(struct scsi_data_buffer *sdb, bool mq)
585{
586 if (mq && sdb->table.orig_nents <= SCSI_MAX_SG_SEGMENTS)
587 return;
588 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, mq, scsi_sg_free);
589}
590
591static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, bool mq)
592{
593 struct scatterlist *first_chunk = NULL;
594 int ret;
595
596 BUG_ON(!nents);
597
598 if (mq) {
599 if (nents <= SCSI_MAX_SG_SEGMENTS) {
600 sdb->table.nents = sdb->table.orig_nents = nents;
601 sg_init_table(sdb->table.sgl, nents);
602 return 0;
603 }
604 first_chunk = sdb->table.sgl;
605 }
606
607 ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS,
608 first_chunk, GFP_ATOMIC, scsi_sg_alloc);
609 if (unlikely(ret))
610 scsi_free_sgtable(sdb, mq);
611 return ret;
612}
613
614static void scsi_uninit_cmd(struct scsi_cmnd *cmd)
615{
616 if (cmd->request->cmd_type == REQ_TYPE_FS) {
617 struct scsi_driver *drv = scsi_cmd_to_driver(cmd);
618
619 if (drv->uninit_command)
620 drv->uninit_command(cmd);
621 }
622}
623
624static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd)
625{
626 if (cmd->sdb.table.nents)
627 scsi_free_sgtable(&cmd->sdb, true);
628 if (cmd->request->next_rq && cmd->request->next_rq->special)
629 scsi_free_sgtable(cmd->request->next_rq->special, true);
630 if (scsi_prot_sg_count(cmd))
631 scsi_free_sgtable(cmd->prot_sdb, true);
632}
633
634static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
635{
636 struct scsi_device *sdev = cmd->device;
637 struct Scsi_Host *shost = sdev->host;
638 unsigned long flags;
639
640 scsi_mq_free_sgtables(cmd);
641 scsi_uninit_cmd(cmd);
642
643 if (shost->use_cmd_list) {
644 BUG_ON(list_empty(&cmd->list));
645 spin_lock_irqsave(&sdev->list_lock, flags);
646 list_del_init(&cmd->list);
647 spin_unlock_irqrestore(&sdev->list_lock, flags);
648 }
649}
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667static void scsi_release_buffers(struct scsi_cmnd *cmd)
668{
669 if (cmd->sdb.table.nents)
670 scsi_free_sgtable(&cmd->sdb, false);
671
672 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
673
674 if (scsi_prot_sg_count(cmd))
675 scsi_free_sgtable(cmd->prot_sdb, false);
676}
677
678static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd)
679{
680 struct scsi_data_buffer *bidi_sdb = cmd->request->next_rq->special;
681
682 scsi_free_sgtable(bidi_sdb, false);
683 kmem_cache_free(scsi_sdb_cache, bidi_sdb);
684 cmd->request->next_rq->special = NULL;
685}
686
687static bool scsi_end_request(struct request *req, int error,
688 unsigned int bytes, unsigned int bidi_bytes)
689{
690 struct scsi_cmnd *cmd = req->special;
691 struct scsi_device *sdev = cmd->device;
692 struct request_queue *q = sdev->request_queue;
693
694 if (blk_update_request(req, error, bytes))
695 return true;
696
697
698 if (unlikely(bidi_bytes) &&
699 blk_update_request(req->next_rq, error, bidi_bytes))
700 return true;
701
702 if (blk_queue_add_random(q))
703 add_disk_randomness(req->rq_disk);
704
705 if (req->mq_ctx) {
706
707
708
709
710
711
712
713 scsi_mq_uninit_cmd(cmd);
714
715 __blk_mq_end_request(req, error);
716
717 if (scsi_target(sdev)->single_lun ||
718 !list_empty(&sdev->host->starved_list))
719 kblockd_schedule_work(&sdev->requeue_work);
720 else
721 blk_mq_start_stopped_hw_queues(q, true);
722 } else {
723 unsigned long flags;
724
725 if (bidi_bytes)
726 scsi_release_bidi_buffers(cmd);
727
728 spin_lock_irqsave(q->queue_lock, flags);
729 blk_finish_request(req, error);
730 spin_unlock_irqrestore(q->queue_lock, flags);
731
732 scsi_release_buffers(cmd);
733
734 scsi_put_command(cmd);
735 scsi_run_queue(q);
736 }
737
738 put_device(&sdev->sdev_gendev);
739 return false;
740}
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
757{
758 int error = 0;
759
760 switch(host_byte(result)) {
761 case DID_TRANSPORT_FAILFAST:
762 error = -ENOLINK;
763 break;
764 case DID_TARGET_FAILURE:
765 set_host_byte(cmd, DID_OK);
766 error = -EREMOTEIO;
767 break;
768 case DID_NEXUS_FAILURE:
769 set_host_byte(cmd, DID_OK);
770 error = -EBADE;
771 break;
772 case DID_ALLOC_FAILURE:
773 set_host_byte(cmd, DID_OK);
774 error = -ENOSPC;
775 break;
776 case DID_MEDIUM_ERROR:
777 set_host_byte(cmd, DID_OK);
778 error = -ENODATA;
779 break;
780 default:
781 error = -EIO;
782 break;
783 }
784
785 return error;
786}
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
818{
819 int result = cmd->result;
820 struct request_queue *q = cmd->device->request_queue;
821 struct request *req = cmd->request;
822 int error = 0;
823 struct scsi_sense_hdr sshdr;
824 bool sense_valid = false;
825 int sense_deferred = 0, level = 0;
826 enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
827 ACTION_DELAYED_RETRY} action;
828 unsigned long wait_for = (cmd->allowed + 1) * req->timeout;
829
830 if (result) {
831 sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
832 if (sense_valid)
833 sense_deferred = scsi_sense_is_deferred(&sshdr);
834 }
835
836 if (req->cmd_type == REQ_TYPE_BLOCK_PC) {
837 if (result) {
838 if (sense_valid && req->sense) {
839
840
841
842 int len = 8 + cmd->sense_buffer[7];
843
844 if (len > SCSI_SENSE_BUFFERSIZE)
845 len = SCSI_SENSE_BUFFERSIZE;
846 memcpy(req->sense, cmd->sense_buffer, len);
847 req->sense_len = len;
848 }
849 if (!sense_deferred)
850 error = __scsi_error_from_host_byte(cmd, result);
851 }
852
853
854
855 req->errors = cmd->result;
856
857 req->resid_len = scsi_get_resid(cmd);
858
859 if (scsi_bidi_cmnd(cmd)) {
860
861
862
863
864 req->next_rq->resid_len = scsi_in(cmd)->resid;
865 if (scsi_end_request(req, 0, blk_rq_bytes(req),
866 blk_rq_bytes(req->next_rq)))
867 BUG();
868 return;
869 }
870 } else if (blk_rq_bytes(req) == 0 && result && !sense_deferred) {
871
872
873
874
875
876
877 error = __scsi_error_from_host_byte(cmd, result);
878 }
879
880
881 BUG_ON(blk_bidi_rq(req));
882
883
884
885
886
887 SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, cmd,
888 "%u sectors total, %d bytes done.\n",
889 blk_rq_sectors(req), good_bytes));
890
891
892
893
894
895
896
897 if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) {
898
899
900
901
902 if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
903 ;
904 else if (!(req->cmd_flags & REQ_QUIET))
905 scsi_print_sense(cmd);
906 result = 0;
907
908 error = 0;
909 }
910
911
912
913
914 if (!scsi_end_request(req, error, good_bytes, 0))
915 return;
916
917
918
919
920 if (error && scsi_noretry_cmd(cmd)) {
921 if (scsi_end_request(req, error, blk_rq_bytes(req), 0))
922 BUG();
923 return;
924 }
925
926
927
928
929
930 if (result == 0)
931 goto requeue;
932
933 error = __scsi_error_from_host_byte(cmd, result);
934
935 if (host_byte(result) == DID_RESET) {
936
937
938
939
940 action = ACTION_RETRY;
941 } else if (sense_valid && !sense_deferred) {
942 switch (sshdr.sense_key) {
943 case UNIT_ATTENTION:
944 if (cmd->device->removable) {
945
946
947
948 cmd->device->changed = 1;
949 action = ACTION_FAIL;
950 } else {
951
952
953
954
955
956 action = ACTION_RETRY;
957 }
958 break;
959 case ILLEGAL_REQUEST:
960
961
962
963
964
965
966
967
968 if ((cmd->device->use_10_for_rw &&
969 sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
970 (cmd->cmnd[0] == READ_10 ||
971 cmd->cmnd[0] == WRITE_10)) {
972
973 cmd->device->use_10_for_rw = 0;
974 action = ACTION_REPREP;
975 } else if (sshdr.asc == 0x10) {
976 action = ACTION_FAIL;
977 error = -EILSEQ;
978
979 } else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) {
980 action = ACTION_FAIL;
981 error = -EREMOTEIO;
982 } else
983 action = ACTION_FAIL;
984 break;
985 case ABORTED_COMMAND:
986 action = ACTION_FAIL;
987 if (sshdr.asc == 0x10)
988 error = -EILSEQ;
989 break;
990 case NOT_READY:
991
992
993
994 if (sshdr.asc == 0x04) {
995 switch (sshdr.ascq) {
996 case 0x01:
997 case 0x04:
998 case 0x05:
999 case 0x06:
1000 case 0x07:
1001 case 0x08:
1002 case 0x09:
1003 case 0x14:
1004 action = ACTION_DELAYED_RETRY;
1005 break;
1006 default:
1007 action = ACTION_FAIL;
1008 break;
1009 }
1010 } else
1011 action = ACTION_FAIL;
1012 break;
1013 case VOLUME_OVERFLOW:
1014
1015 action = ACTION_FAIL;
1016 break;
1017 default:
1018 action = ACTION_FAIL;
1019 break;
1020 }
1021 } else
1022 action = ACTION_FAIL;
1023
1024 if (action != ACTION_FAIL &&
1025 time_before(cmd->jiffies_at_alloc + wait_for, jiffies))
1026 action = ACTION_FAIL;
1027
1028 switch (action) {
1029 case ACTION_FAIL:
1030
1031 if (!(req->cmd_flags & REQ_QUIET)) {
1032 static DEFINE_RATELIMIT_STATE(_rs,
1033 DEFAULT_RATELIMIT_INTERVAL,
1034 DEFAULT_RATELIMIT_BURST);
1035
1036 if (unlikely(scsi_logging_level))
1037 level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT,
1038 SCSI_LOG_MLCOMPLETE_BITS);
1039
1040
1041
1042
1043
1044 if (!level && __ratelimit(&_rs)) {
1045 scsi_print_result(cmd, NULL, FAILED);
1046 if (driver_byte(result) & DRIVER_SENSE)
1047 scsi_print_sense(cmd);
1048 scsi_print_command(cmd);
1049 }
1050 }
1051 if (!scsi_end_request(req, error, blk_rq_err_bytes(req), 0))
1052 return;
1053
1054 case ACTION_REPREP:
1055 requeue:
1056
1057
1058
1059 if (q->mq_ops) {
1060 cmd->request->cmd_flags &= ~REQ_DONTPREP;
1061 scsi_mq_uninit_cmd(cmd);
1062 scsi_mq_requeue_cmd(cmd);
1063 } else {
1064 scsi_release_buffers(cmd);
1065 scsi_requeue_command(q, cmd);
1066 }
1067 break;
1068 case ACTION_RETRY:
1069
1070 __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, 0);
1071 break;
1072 case ACTION_DELAYED_RETRY:
1073
1074 __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0);
1075 break;
1076 }
1077}
1078
1079static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb)
1080{
1081 int count;
1082
1083
1084
1085
1086 if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments,
1087 req->mq_ctx != NULL)))
1088 return BLKPREP_DEFER;
1089
1090
1091
1092
1093
1094 count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
1095 BUG_ON(count > sdb->table.nents);
1096 sdb->table.nents = count;
1097 sdb->length = blk_rq_bytes(req);
1098 return BLKPREP_OK;
1099}
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112int scsi_init_io(struct scsi_cmnd *cmd)
1113{
1114 struct scsi_device *sdev = cmd->device;
1115 struct request *rq = cmd->request;
1116 bool is_mq = (rq->mq_ctx != NULL);
1117 int error;
1118
1119 BUG_ON(!rq->nr_phys_segments);
1120
1121 error = scsi_init_sgtable(rq, &cmd->sdb);
1122 if (error)
1123 goto err_exit;
1124
1125 if (blk_bidi_rq(rq)) {
1126 if (!rq->q->mq_ops) {
1127 struct scsi_data_buffer *bidi_sdb =
1128 kmem_cache_zalloc(scsi_sdb_cache, GFP_ATOMIC);
1129 if (!bidi_sdb) {
1130 error = BLKPREP_DEFER;
1131 goto err_exit;
1132 }
1133
1134 rq->next_rq->special = bidi_sdb;
1135 }
1136
1137 error = scsi_init_sgtable(rq->next_rq, rq->next_rq->special);
1138 if (error)
1139 goto err_exit;
1140 }
1141
1142 if (blk_integrity_rq(rq)) {
1143 struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
1144 int ivecs, count;
1145
1146 if (prot_sdb == NULL) {
1147
1148
1149
1150
1151
1152 WARN_ON_ONCE(1);
1153 error = BLKPREP_KILL;
1154 goto err_exit;
1155 }
1156
1157 ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
1158
1159 if (scsi_alloc_sgtable(prot_sdb, ivecs, is_mq)) {
1160 error = BLKPREP_DEFER;
1161 goto err_exit;
1162 }
1163
1164 count = blk_rq_map_integrity_sg(rq->q, rq->bio,
1165 prot_sdb->table.sgl);
1166 BUG_ON(unlikely(count > ivecs));
1167 BUG_ON(unlikely(count > queue_max_integrity_segments(rq->q)));
1168
1169 cmd->prot_sdb = prot_sdb;
1170 cmd->prot_sdb->table.nents = count;
1171 }
1172
1173 return BLKPREP_OK;
1174err_exit:
1175 if (is_mq) {
1176 scsi_mq_free_sgtables(cmd);
1177 } else {
1178 scsi_release_buffers(cmd);
1179 cmd->request->special = NULL;
1180 scsi_put_command(cmd);
1181 put_device(&sdev->sdev_gendev);
1182 }
1183 return error;
1184}
1185EXPORT_SYMBOL(scsi_init_io);
1186
1187static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
1188 struct request *req)
1189{
1190 struct scsi_cmnd *cmd;
1191
1192 if (!req->special) {
1193
1194 if (!get_device(&sdev->sdev_gendev))
1195 return NULL;
1196
1197 cmd = scsi_get_command(sdev, GFP_ATOMIC);
1198 if (unlikely(!cmd)) {
1199 put_device(&sdev->sdev_gendev);
1200 return NULL;
1201 }
1202 req->special = cmd;
1203 } else {
1204 cmd = req->special;
1205 }
1206
1207
1208 cmd->tag = req->tag;
1209 cmd->request = req;
1210
1211 cmd->cmnd = req->cmd;
1212 cmd->prot_op = SCSI_PROT_NORMAL;
1213
1214 return cmd;
1215}
1216
1217static int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
1218{
1219 struct scsi_cmnd *cmd = req->special;
1220
1221
1222
1223
1224
1225
1226
1227 if (req->bio) {
1228 int ret = scsi_init_io(cmd);
1229 if (unlikely(ret))
1230 return ret;
1231 } else {
1232 BUG_ON(blk_rq_bytes(req));
1233
1234 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
1235 }
1236
1237 cmd->cmd_len = req->cmd_len;
1238 cmd->transfersize = blk_rq_bytes(req);
1239 cmd->allowed = req->retries;
1240 return BLKPREP_OK;
1241}
1242
1243
1244
1245
1246
1247static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
1248{
1249 struct scsi_cmnd *cmd = req->special;
1250
1251 if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh
1252 && sdev->scsi_dh_data->scsi_dh->prep_fn)) {
1253 int ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req);
1254 if (ret != BLKPREP_OK)
1255 return ret;
1256 }
1257
1258 memset(cmd->cmnd, 0, BLK_MAX_CDB);
1259 return scsi_cmd_to_driver(cmd)->init_command(cmd);
1260}
1261
1262static int scsi_setup_cmnd(struct scsi_device *sdev, struct request *req)
1263{
1264 struct scsi_cmnd *cmd = req->special;
1265
1266 if (!blk_rq_bytes(req))
1267 cmd->sc_data_direction = DMA_NONE;
1268 else if (rq_data_dir(req) == WRITE)
1269 cmd->sc_data_direction = DMA_TO_DEVICE;
1270 else
1271 cmd->sc_data_direction = DMA_FROM_DEVICE;
1272
1273 switch (req->cmd_type) {
1274 case REQ_TYPE_FS:
1275 return scsi_setup_fs_cmnd(sdev, req);
1276 case REQ_TYPE_BLOCK_PC:
1277 return scsi_setup_blk_pc_cmnd(sdev, req);
1278 default:
1279 return BLKPREP_KILL;
1280 }
1281}
1282
1283static int
1284scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
1285{
1286 int ret = BLKPREP_OK;
1287
1288
1289
1290
1291
1292 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1293 switch (sdev->sdev_state) {
1294 case SDEV_OFFLINE:
1295 case SDEV_TRANSPORT_OFFLINE:
1296
1297
1298
1299
1300
1301 sdev_printk(KERN_ERR, sdev,
1302 "rejecting I/O to offline device\n");
1303 ret = BLKPREP_KILL;
1304 break;
1305 case SDEV_DEL:
1306
1307
1308
1309
1310 sdev_printk(KERN_ERR, sdev,
1311 "rejecting I/O to dead device\n");
1312 ret = BLKPREP_KILL;
1313 break;
1314 case SDEV_BLOCK:
1315 case SDEV_CREATED_BLOCK:
1316 ret = BLKPREP_DEFER;
1317 break;
1318 case SDEV_QUIESCE:
1319
1320
1321
1322 if (!(req->cmd_flags & REQ_PREEMPT))
1323 ret = BLKPREP_DEFER;
1324 break;
1325 default:
1326
1327
1328
1329
1330
1331 if (!(req->cmd_flags & REQ_PREEMPT))
1332 ret = BLKPREP_KILL;
1333 break;
1334 }
1335 }
1336 return ret;
1337}
1338
1339static int
1340scsi_prep_return(struct request_queue *q, struct request *req, int ret)
1341{
1342 struct scsi_device *sdev = q->queuedata;
1343
1344 switch (ret) {
1345 case BLKPREP_KILL:
1346 req->errors = DID_NO_CONNECT << 16;
1347
1348 if (req->special) {
1349 struct scsi_cmnd *cmd = req->special;
1350 scsi_release_buffers(cmd);
1351 scsi_put_command(cmd);
1352 put_device(&sdev->sdev_gendev);
1353 req->special = NULL;
1354 }
1355 break;
1356 case BLKPREP_DEFER:
1357
1358
1359
1360
1361
1362 if (atomic_read(&sdev->device_busy) == 0)
1363 blk_delay_queue(q, SCSI_QUEUE_DELAY);
1364 break;
1365 default:
1366 req->cmd_flags |= REQ_DONTPREP;
1367 }
1368
1369 return ret;
1370}
1371
1372static int scsi_prep_fn(struct request_queue *q, struct request *req)
1373{
1374 struct scsi_device *sdev = q->queuedata;
1375 struct scsi_cmnd *cmd;
1376 int ret;
1377
1378 ret = scsi_prep_state_check(sdev, req);
1379 if (ret != BLKPREP_OK)
1380 goto out;
1381
1382 cmd = scsi_get_cmd_from_req(sdev, req);
1383 if (unlikely(!cmd)) {
1384 ret = BLKPREP_DEFER;
1385 goto out;
1386 }
1387
1388 ret = scsi_setup_cmnd(sdev, req);
1389out:
1390 return scsi_prep_return(q, req, ret);
1391}
1392
1393static void scsi_unprep_fn(struct request_queue *q, struct request *req)
1394{
1395 scsi_uninit_cmd(req->special);
1396}
1397
1398
1399
1400
1401
1402
1403
1404static inline int scsi_dev_queue_ready(struct request_queue *q,
1405 struct scsi_device *sdev)
1406{
1407 unsigned int busy;
1408
1409 busy = atomic_inc_return(&sdev->device_busy) - 1;
1410 if (atomic_read(&sdev->device_blocked)) {
1411 if (busy)
1412 goto out_dec;
1413
1414
1415
1416
1417 if (atomic_dec_return(&sdev->device_blocked) > 0) {
1418
1419
1420
1421 if (!q->mq_ops)
1422 blk_delay_queue(q, SCSI_QUEUE_DELAY);
1423 goto out_dec;
1424 }
1425 SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev,
1426 "unblocking device at zero depth\n"));
1427 }
1428
1429 if (busy >= sdev->queue_depth)
1430 goto out_dec;
1431
1432 return 1;
1433out_dec:
1434 atomic_dec(&sdev->device_busy);
1435 return 0;
1436}
1437
1438
1439
1440
1441
1442static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
1443 struct scsi_device *sdev)
1444{
1445 struct scsi_target *starget = scsi_target(sdev);
1446 unsigned int busy;
1447
1448 if (starget->single_lun) {
1449 spin_lock_irq(shost->host_lock);
1450 if (starget->starget_sdev_user &&
1451 starget->starget_sdev_user != sdev) {
1452 spin_unlock_irq(shost->host_lock);
1453 return 0;
1454 }
1455 starget->starget_sdev_user = sdev;
1456 spin_unlock_irq(shost->host_lock);
1457 }
1458
1459 if (starget->can_queue <= 0)
1460 return 1;
1461
1462 busy = atomic_inc_return(&starget->target_busy) - 1;
1463 if (atomic_read(&starget->target_blocked) > 0) {
1464 if (busy)
1465 goto starved;
1466
1467
1468
1469
1470 if (atomic_dec_return(&starget->target_blocked) > 0)
1471 goto out_dec;
1472
1473 SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget,
1474 "unblocking target at zero depth\n"));
1475 }
1476
1477 if (busy >= starget->can_queue)
1478 goto starved;
1479
1480 return 1;
1481
1482starved:
1483 spin_lock_irq(shost->host_lock);
1484 list_move_tail(&sdev->starved_entry, &shost->starved_list);
1485 spin_unlock_irq(shost->host_lock);
1486out_dec:
1487 if (starget->can_queue > 0)
1488 atomic_dec(&starget->target_busy);
1489 return 0;
1490}
1491
1492
1493
1494
1495
1496
1497static inline int scsi_host_queue_ready(struct request_queue *q,
1498 struct Scsi_Host *shost,
1499 struct scsi_device *sdev)
1500{
1501 unsigned int busy;
1502
1503 if (scsi_host_in_recovery(shost))
1504 return 0;
1505
1506 busy = atomic_inc_return(&shost->host_busy) - 1;
1507 if (atomic_read(&shost->host_blocked) > 0) {
1508 if (busy)
1509 goto starved;
1510
1511
1512
1513
1514 if (atomic_dec_return(&shost->host_blocked) > 0)
1515 goto out_dec;
1516
1517 SCSI_LOG_MLQUEUE(3,
1518 shost_printk(KERN_INFO, shost,
1519 "unblocking host at zero depth\n"));
1520 }
1521
1522 if (shost->can_queue > 0 && busy >= shost->can_queue)
1523 goto starved;
1524 if (shost->host_self_blocked)
1525 goto starved;
1526
1527
1528 if (!list_empty(&sdev->starved_entry)) {
1529 spin_lock_irq(shost->host_lock);
1530 if (!list_empty(&sdev->starved_entry))
1531 list_del_init(&sdev->starved_entry);
1532 spin_unlock_irq(shost->host_lock);
1533 }
1534
1535 return 1;
1536
1537starved:
1538 spin_lock_irq(shost->host_lock);
1539 if (list_empty(&sdev->starved_entry))
1540 list_add_tail(&sdev->starved_entry, &shost->starved_list);
1541 spin_unlock_irq(shost->host_lock);
1542out_dec:
1543 atomic_dec(&shost->host_busy);
1544 return 0;
1545}
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559static int scsi_lld_busy(struct request_queue *q)
1560{
1561 struct scsi_device *sdev = q->queuedata;
1562 struct Scsi_Host *shost;
1563
1564 if (blk_queue_dying(q))
1565 return 0;
1566
1567 shost = sdev->host;
1568
1569
1570
1571
1572
1573
1574
1575 if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev))
1576 return 1;
1577
1578 return 0;
1579}
1580
1581
1582
1583
1584static void scsi_kill_request(struct request *req, struct request_queue *q)
1585{
1586 struct scsi_cmnd *cmd = req->special;
1587 struct scsi_device *sdev;
1588 struct scsi_target *starget;
1589 struct Scsi_Host *shost;
1590
1591 blk_start_request(req);
1592
1593 scmd_printk(KERN_INFO, cmd, "killing request\n");
1594
1595 sdev = cmd->device;
1596 starget = scsi_target(sdev);
1597 shost = sdev->host;
1598 scsi_init_cmd_errh(cmd);
1599 cmd->result = DID_NO_CONNECT << 16;
1600 atomic_inc(&cmd->device->iorequest_cnt);
1601
1602
1603
1604
1605
1606
1607 atomic_inc(&sdev->device_busy);
1608 atomic_inc(&shost->host_busy);
1609 if (starget->can_queue > 0)
1610 atomic_inc(&starget->target_busy);
1611
1612 blk_complete_request(req);
1613}
1614
1615static void scsi_softirq_done(struct request *rq)
1616{
1617 struct scsi_cmnd *cmd = rq->special;
1618 unsigned long wait_for = (cmd->allowed + 1) * rq->timeout;
1619 int disposition;
1620
1621 INIT_LIST_HEAD(&cmd->eh_entry);
1622
1623 atomic_inc(&cmd->device->iodone_cnt);
1624 if (cmd->result)
1625 atomic_inc(&cmd->device->ioerr_cnt);
1626
1627 disposition = scsi_decide_disposition(cmd);
1628 if (disposition != SUCCESS &&
1629 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
1630 sdev_printk(KERN_ERR, cmd->device,
1631 "timing out command, waited %lus\n",
1632 wait_for/HZ);
1633 disposition = SUCCESS;
1634 }
1635
1636 scsi_log_completion(cmd, disposition);
1637
1638 switch (disposition) {
1639 case SUCCESS:
1640 scsi_finish_command(cmd);
1641 break;
1642 case NEEDS_RETRY:
1643 scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
1644 break;
1645 case ADD_TO_MLQUEUE:
1646 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
1647 break;
1648 default:
1649 if (!scsi_eh_scmd_add(cmd, 0))
1650 scsi_finish_command(cmd);
1651 }
1652}
1653
1654
1655
1656
1657
1658
1659
1660
1661static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
1662{
1663 struct Scsi_Host *host = cmd->device->host;
1664 int rtn = 0;
1665
1666 atomic_inc(&cmd->device->iorequest_cnt);
1667
1668
1669 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
1670
1671
1672
1673 cmd->result = DID_NO_CONNECT << 16;
1674 goto done;
1675 }
1676
1677
1678 if (unlikely(scsi_device_blocked(cmd->device))) {
1679
1680
1681
1682
1683
1684
1685
1686 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
1687 "queuecommand : device blocked\n"));
1688 return SCSI_MLQUEUE_DEVICE_BUSY;
1689 }
1690
1691
1692 if (cmd->device->lun_in_cdb)
1693 cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) |
1694 (cmd->device->lun << 5 & 0xe0);
1695
1696 scsi_log_send(cmd);
1697
1698
1699
1700
1701
1702 if (cmd->cmd_len > cmd->device->host->max_cmd_len) {
1703 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
1704 "queuecommand : command too long. "
1705 "cdb_size=%d host->max_cmd_len=%d\n",
1706 cmd->cmd_len, cmd->device->host->max_cmd_len));
1707 cmd->result = (DID_ABORT << 16);
1708 goto done;
1709 }
1710
1711 if (unlikely(host->shost_state == SHOST_DEL)) {
1712 cmd->result = (DID_NO_CONNECT << 16);
1713 goto done;
1714
1715 }
1716
1717 trace_scsi_dispatch_cmd_start(cmd);
1718 rtn = host->hostt->queuecommand(host, cmd);
1719 if (rtn) {
1720 trace_scsi_dispatch_cmd_error(cmd, rtn);
1721 if (rtn != SCSI_MLQUEUE_DEVICE_BUSY &&
1722 rtn != SCSI_MLQUEUE_TARGET_BUSY)
1723 rtn = SCSI_MLQUEUE_HOST_BUSY;
1724
1725 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
1726 "queuecommand : request rejected\n"));
1727 }
1728
1729 return rtn;
1730 done:
1731 cmd->scsi_done(cmd);
1732 return 0;
1733}
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746static void scsi_done(struct scsi_cmnd *cmd)
1747{
1748 trace_scsi_dispatch_cmd_done(cmd);
1749 blk_complete_request(cmd->request);
1750}
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763static void scsi_request_fn(struct request_queue *q)
1764 __releases(q->queue_lock)
1765 __acquires(q->queue_lock)
1766{
1767 struct scsi_device *sdev = q->queuedata;
1768 struct Scsi_Host *shost;
1769 struct scsi_cmnd *cmd;
1770 struct request *req;
1771
1772
1773
1774
1775
1776 shost = sdev->host;
1777 for (;;) {
1778 int rtn;
1779
1780
1781
1782
1783
1784 req = blk_peek_request(q);
1785 if (!req)
1786 break;
1787
1788 if (unlikely(!scsi_device_online(sdev))) {
1789 sdev_printk(KERN_ERR, sdev,
1790 "rejecting I/O to offline device\n");
1791 scsi_kill_request(req, q);
1792 continue;
1793 }
1794
1795 if (!scsi_dev_queue_ready(q, sdev))
1796 break;
1797
1798
1799
1800
1801 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1802 blk_start_request(req);
1803
1804 spin_unlock_irq(q->queue_lock);
1805 cmd = req->special;
1806 if (unlikely(cmd == NULL)) {
1807 printk(KERN_CRIT "impossible request in %s.\n"
1808 "please mail a stack trace to "
1809 "linux-scsi@vger.kernel.org\n",
1810 __func__);
1811 blk_dump_rq_flags(req, "foo");
1812 BUG();
1813 }
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823 if (blk_queue_tagged(q) && !(req->cmd_flags & REQ_QUEUED)) {
1824 spin_lock_irq(shost->host_lock);
1825 if (list_empty(&sdev->starved_entry))
1826 list_add_tail(&sdev->starved_entry,
1827 &shost->starved_list);
1828 spin_unlock_irq(shost->host_lock);
1829 goto not_ready;
1830 }
1831
1832 if (!scsi_target_queue_ready(shost, sdev))
1833 goto not_ready;
1834
1835 if (!scsi_host_queue_ready(q, shost, sdev))
1836 goto host_not_ready;
1837
1838 if (sdev->simple_tags)
1839 cmd->flags |= SCMD_TAGGED;
1840 else
1841 cmd->flags &= ~SCMD_TAGGED;
1842
1843
1844
1845
1846
1847 scsi_init_cmd_errh(cmd);
1848
1849
1850
1851
1852 cmd->scsi_done = scsi_done;
1853 rtn = scsi_dispatch_cmd(cmd);
1854 if (rtn) {
1855 scsi_queue_insert(cmd, rtn);
1856 spin_lock_irq(q->queue_lock);
1857 goto out_delay;
1858 }
1859 spin_lock_irq(q->queue_lock);
1860 }
1861
1862 return;
1863
1864 host_not_ready:
1865 if (scsi_target(sdev)->can_queue > 0)
1866 atomic_dec(&scsi_target(sdev)->target_busy);
1867 not_ready:
1868
1869
1870
1871
1872
1873
1874
1875
1876 spin_lock_irq(q->queue_lock);
1877 blk_requeue_request(q, req);
1878 atomic_dec(&sdev->device_busy);
1879out_delay:
1880 if (!atomic_read(&sdev->device_busy) && !scsi_device_blocked(sdev))
1881 blk_delay_queue(q, SCSI_QUEUE_DELAY);
1882}
1883
1884static inline int prep_to_mq(int ret)
1885{
1886 switch (ret) {
1887 case BLKPREP_OK:
1888 return 0;
1889 case BLKPREP_DEFER:
1890 return BLK_MQ_RQ_QUEUE_BUSY;
1891 default:
1892 return BLK_MQ_RQ_QUEUE_ERROR;
1893 }
1894}
1895
1896static int scsi_mq_prep_fn(struct request *req)
1897{
1898 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1899 struct scsi_device *sdev = req->q->queuedata;
1900 struct Scsi_Host *shost = sdev->host;
1901 unsigned char *sense_buf = cmd->sense_buffer;
1902 struct scatterlist *sg;
1903
1904 memset(cmd, 0, sizeof(struct scsi_cmnd));
1905
1906 req->special = cmd;
1907
1908 cmd->request = req;
1909 cmd->device = sdev;
1910 cmd->sense_buffer = sense_buf;
1911
1912 cmd->tag = req->tag;
1913
1914 cmd->cmnd = req->cmd;
1915 cmd->prot_op = SCSI_PROT_NORMAL;
1916
1917 INIT_LIST_HEAD(&cmd->list);
1918 INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
1919 cmd->jiffies_at_alloc = jiffies;
1920
1921 if (shost->use_cmd_list) {
1922 spin_lock_irq(&sdev->list_lock);
1923 list_add_tail(&cmd->list, &sdev->cmd_list);
1924 spin_unlock_irq(&sdev->list_lock);
1925 }
1926
1927 sg = (void *)cmd + sizeof(struct scsi_cmnd) + shost->hostt->cmd_size;
1928 cmd->sdb.table.sgl = sg;
1929
1930 if (scsi_host_get_prot(shost)) {
1931 cmd->prot_sdb = (void *)sg +
1932 min_t(unsigned int,
1933 shost->sg_tablesize, SCSI_MAX_SG_SEGMENTS) *
1934 sizeof(struct scatterlist);
1935 memset(cmd->prot_sdb, 0, sizeof(struct scsi_data_buffer));
1936
1937 cmd->prot_sdb->table.sgl =
1938 (struct scatterlist *)(cmd->prot_sdb + 1);
1939 }
1940
1941 if (blk_bidi_rq(req)) {
1942 struct request *next_rq = req->next_rq;
1943 struct scsi_data_buffer *bidi_sdb = blk_mq_rq_to_pdu(next_rq);
1944
1945 memset(bidi_sdb, 0, sizeof(struct scsi_data_buffer));
1946 bidi_sdb->table.sgl =
1947 (struct scatterlist *)(bidi_sdb + 1);
1948
1949 next_rq->special = bidi_sdb;
1950 }
1951
1952 blk_mq_start_request(req);
1953
1954 return scsi_setup_cmnd(sdev, req);
1955}
1956
1957static void scsi_mq_done(struct scsi_cmnd *cmd)
1958{
1959 trace_scsi_dispatch_cmd_done(cmd);
1960 blk_mq_complete_request(cmd->request);
1961}
1962
1963static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
1964 const struct blk_mq_queue_data *bd)
1965{
1966 struct request *req = bd->rq;
1967 struct request_queue *q = req->q;
1968 struct scsi_device *sdev = q->queuedata;
1969 struct Scsi_Host *shost = sdev->host;
1970 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1971 int ret;
1972 int reason;
1973
1974 ret = prep_to_mq(scsi_prep_state_check(sdev, req));
1975 if (ret)
1976 goto out;
1977
1978 ret = BLK_MQ_RQ_QUEUE_BUSY;
1979 if (!get_device(&sdev->sdev_gendev))
1980 goto out;
1981
1982 if (!scsi_dev_queue_ready(q, sdev))
1983 goto out_put_device;
1984 if (!scsi_target_queue_ready(shost, sdev))
1985 goto out_dec_device_busy;
1986 if (!scsi_host_queue_ready(q, shost, sdev))
1987 goto out_dec_target_busy;
1988
1989
1990 if (!(req->cmd_flags & REQ_DONTPREP)) {
1991 ret = prep_to_mq(scsi_mq_prep_fn(req));
1992 if (ret)
1993 goto out_dec_host_busy;
1994 req->cmd_flags |= REQ_DONTPREP;
1995 } else {
1996 blk_mq_start_request(req);
1997 }
1998
1999 if (sdev->simple_tags)
2000 cmd->flags |= SCMD_TAGGED;
2001 else
2002 cmd->flags &= ~SCMD_TAGGED;
2003
2004 scsi_init_cmd_errh(cmd);
2005 cmd->scsi_done = scsi_mq_done;
2006
2007 reason = scsi_dispatch_cmd(cmd);
2008 if (reason) {
2009 scsi_set_blocked(cmd, reason);
2010 ret = BLK_MQ_RQ_QUEUE_BUSY;
2011 goto out_dec_host_busy;
2012 }
2013
2014 return BLK_MQ_RQ_QUEUE_OK;
2015
2016out_dec_host_busy:
2017 atomic_dec(&shost->host_busy);
2018out_dec_target_busy:
2019 if (scsi_target(sdev)->can_queue > 0)
2020 atomic_dec(&scsi_target(sdev)->target_busy);
2021out_dec_device_busy:
2022 atomic_dec(&sdev->device_busy);
2023out_put_device:
2024 put_device(&sdev->sdev_gendev);
2025out:
2026 switch (ret) {
2027 case BLK_MQ_RQ_QUEUE_BUSY:
2028 blk_mq_stop_hw_queue(hctx);
2029 if (atomic_read(&sdev->device_busy) == 0 &&
2030 !scsi_device_blocked(sdev))
2031 blk_mq_delay_queue(hctx, SCSI_QUEUE_DELAY);
2032 break;
2033 case BLK_MQ_RQ_QUEUE_ERROR:
2034
2035
2036
2037
2038
2039 if (req->cmd_flags & REQ_DONTPREP)
2040 scsi_mq_uninit_cmd(cmd);
2041 break;
2042 default:
2043 break;
2044 }
2045 return ret;
2046}
2047
2048static enum blk_eh_timer_return scsi_timeout(struct request *req,
2049 bool reserved)
2050{
2051 if (reserved)
2052 return BLK_EH_RESET_TIMER;
2053 return scsi_times_out(req);
2054}
2055
2056static int scsi_init_request(void *data, struct request *rq,
2057 unsigned int hctx_idx, unsigned int request_idx,
2058 unsigned int numa_node)
2059{
2060 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
2061
2062 cmd->sense_buffer = kzalloc_node(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL,
2063 numa_node);
2064 if (!cmd->sense_buffer)
2065 return -ENOMEM;
2066 return 0;
2067}
2068
2069static void scsi_exit_request(void *data, struct request *rq,
2070 unsigned int hctx_idx, unsigned int request_idx)
2071{
2072 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
2073
2074 kfree(cmd->sense_buffer);
2075}
2076
2077static u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
2078{
2079 struct device *host_dev;
2080 u64 bounce_limit = 0xffffffff;
2081
2082 if (shost->unchecked_isa_dma)
2083 return BLK_BOUNCE_ISA;
2084
2085
2086
2087
2088 if (!PCI_DMA_BUS_IS_PHYS)
2089 return BLK_BOUNCE_ANY;
2090
2091 host_dev = scsi_get_device(shost);
2092 if (host_dev && host_dev->dma_mask)
2093 bounce_limit = (u64)dma_max_pfn(host_dev) << PAGE_SHIFT;
2094
2095 return bounce_limit;
2096}
2097
2098static void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
2099{
2100 struct device *dev = shost->dma_dev;
2101
2102
2103
2104
2105 blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize,
2106 SCSI_MAX_SG_CHAIN_SEGMENTS));
2107
2108 if (scsi_host_prot_dma(shost)) {
2109 shost->sg_prot_tablesize =
2110 min_not_zero(shost->sg_prot_tablesize,
2111 (unsigned short)SCSI_MAX_PROT_SG_SEGMENTS);
2112 BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize);
2113 blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize);
2114 }
2115
2116 blk_queue_max_hw_sectors(q, shost->max_sectors);
2117 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
2118 blk_queue_segment_boundary(q, shost->dma_boundary);
2119 dma_set_seg_boundary(dev, shost->dma_boundary);
2120
2121 blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));
2122
2123 if (!shost->use_clustering)
2124 q->limits.cluster = 0;
2125
2126
2127
2128
2129
2130
2131 blk_queue_dma_alignment(q, 0x03);
2132}
2133
2134struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
2135 request_fn_proc *request_fn)
2136{
2137 struct request_queue *q;
2138
2139 q = blk_init_queue(request_fn, NULL);
2140 if (!q)
2141 return NULL;
2142 __scsi_init_queue(shost, q);
2143 return q;
2144}
2145EXPORT_SYMBOL(__scsi_alloc_queue);
2146
2147struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
2148{
2149 struct request_queue *q;
2150
2151 q = __scsi_alloc_queue(sdev->host, scsi_request_fn);
2152 if (!q)
2153 return NULL;
2154
2155 blk_queue_prep_rq(q, scsi_prep_fn);
2156 blk_queue_unprep_rq(q, scsi_unprep_fn);
2157 blk_queue_softirq_done(q, scsi_softirq_done);
2158 blk_queue_rq_timed_out(q, scsi_times_out);
2159 blk_queue_lld_busy(q, scsi_lld_busy);
2160 return q;
2161}
2162
2163static struct blk_mq_ops scsi_mq_ops = {
2164 .map_queue = blk_mq_map_queue,
2165 .queue_rq = scsi_queue_rq,
2166 .complete = scsi_softirq_done,
2167 .timeout = scsi_timeout,
2168 .init_request = scsi_init_request,
2169 .exit_request = scsi_exit_request,
2170};
2171
2172struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev)
2173{
2174 sdev->request_queue = blk_mq_init_queue(&sdev->host->tag_set);
2175 if (IS_ERR(sdev->request_queue))
2176 return NULL;
2177
2178 sdev->request_queue->queuedata = sdev;
2179 __scsi_init_queue(sdev->host, sdev->request_queue);
2180 return sdev->request_queue;
2181}
2182
2183int scsi_mq_setup_tags(struct Scsi_Host *shost)
2184{
2185 unsigned int cmd_size, sgl_size, tbl_size;
2186
2187 tbl_size = shost->sg_tablesize;
2188 if (tbl_size > SCSI_MAX_SG_SEGMENTS)
2189 tbl_size = SCSI_MAX_SG_SEGMENTS;
2190 sgl_size = tbl_size * sizeof(struct scatterlist);
2191 cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size;
2192 if (scsi_host_get_prot(shost))
2193 cmd_size += sizeof(struct scsi_data_buffer) + sgl_size;
2194
2195 memset(&shost->tag_set, 0, sizeof(shost->tag_set));
2196 shost->tag_set.ops = &scsi_mq_ops;
2197 shost->tag_set.nr_hw_queues = shost->nr_hw_queues ? : 1;
2198 shost->tag_set.queue_depth = shost->can_queue;
2199 shost->tag_set.cmd_size = cmd_size;
2200 shost->tag_set.numa_node = NUMA_NO_NODE;
2201 shost->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
2202 shost->tag_set.flags |=
2203 BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy);
2204 shost->tag_set.driver_data = shost;
2205
2206 return blk_mq_alloc_tag_set(&shost->tag_set);
2207}
2208
2209void scsi_mq_destroy_tags(struct Scsi_Host *shost)
2210{
2211 blk_mq_free_tag_set(&shost->tag_set);
2212}
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230void scsi_block_requests(struct Scsi_Host *shost)
2231{
2232 shost->host_self_blocked = 1;
2233}
2234EXPORT_SYMBOL(scsi_block_requests);
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256void scsi_unblock_requests(struct Scsi_Host *shost)
2257{
2258 shost->host_self_blocked = 0;
2259 scsi_run_host_queues(shost);
2260}
2261EXPORT_SYMBOL(scsi_unblock_requests);
2262
2263int __init scsi_init_queue(void)
2264{
2265 int i;
2266
2267 scsi_sdb_cache = kmem_cache_create("scsi_data_buffer",
2268 sizeof(struct scsi_data_buffer),
2269 0, 0, NULL);
2270 if (!scsi_sdb_cache) {
2271 printk(KERN_ERR "SCSI: can't init scsi sdb cache\n");
2272 return -ENOMEM;
2273 }
2274
2275 for (i = 0; i < SG_MEMPOOL_NR; i++) {
2276 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
2277 int size = sgp->size * sizeof(struct scatterlist);
2278
2279 sgp->slab = kmem_cache_create(sgp->name, size, 0,
2280 SLAB_HWCACHE_ALIGN, NULL);
2281 if (!sgp->slab) {
2282 printk(KERN_ERR "SCSI: can't init sg slab %s\n",
2283 sgp->name);
2284 goto cleanup_sdb;
2285 }
2286
2287 sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE,
2288 sgp->slab);
2289 if (!sgp->pool) {
2290 printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
2291 sgp->name);
2292 goto cleanup_sdb;
2293 }
2294 }
2295
2296 return 0;
2297
2298cleanup_sdb:
2299 for (i = 0; i < SG_MEMPOOL_NR; i++) {
2300 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
2301 if (sgp->pool)
2302 mempool_destroy(sgp->pool);
2303 if (sgp->slab)
2304 kmem_cache_destroy(sgp->slab);
2305 }
2306 kmem_cache_destroy(scsi_sdb_cache);
2307
2308 return -ENOMEM;
2309}
2310
2311void scsi_exit_queue(void)
2312{
2313 int i;
2314
2315 kmem_cache_destroy(scsi_sdb_cache);
2316
2317 for (i = 0; i < SG_MEMPOOL_NR; i++) {
2318 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
2319 mempool_destroy(sgp->pool);
2320 kmem_cache_destroy(sgp->slab);
2321 }
2322}
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342int
2343scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
2344 unsigned char *buffer, int len, int timeout, int retries,
2345 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
2346{
2347 unsigned char cmd[10];
2348 unsigned char *real_buffer;
2349 int ret;
2350
2351 memset(cmd, 0, sizeof(cmd));
2352 cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0);
2353
2354 if (sdev->use_10_for_ms) {
2355 if (len > 65535)
2356 return -EINVAL;
2357 real_buffer = kmalloc(8 + len, GFP_KERNEL);
2358 if (!real_buffer)
2359 return -ENOMEM;
2360 memcpy(real_buffer + 8, buffer, len);
2361 len += 8;
2362 real_buffer[0] = 0;
2363 real_buffer[1] = 0;
2364 real_buffer[2] = data->medium_type;
2365 real_buffer[3] = data->device_specific;
2366 real_buffer[4] = data->longlba ? 0x01 : 0;
2367 real_buffer[5] = 0;
2368 real_buffer[6] = data->block_descriptor_length >> 8;
2369 real_buffer[7] = data->block_descriptor_length;
2370
2371 cmd[0] = MODE_SELECT_10;
2372 cmd[7] = len >> 8;
2373 cmd[8] = len;
2374 } else {
2375 if (len > 255 || data->block_descriptor_length > 255 ||
2376 data->longlba)
2377 return -EINVAL;
2378
2379 real_buffer = kmalloc(4 + len, GFP_KERNEL);
2380 if (!real_buffer)
2381 return -ENOMEM;
2382 memcpy(real_buffer + 4, buffer, len);
2383 len += 4;
2384 real_buffer[0] = 0;
2385 real_buffer[1] = data->medium_type;
2386 real_buffer[2] = data->device_specific;
2387 real_buffer[3] = data->block_descriptor_length;
2388
2389
2390 cmd[0] = MODE_SELECT;
2391 cmd[4] = len;
2392 }
2393
2394 ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len,
2395 sshdr, timeout, retries, NULL);
2396 kfree(real_buffer);
2397 return ret;
2398}
2399EXPORT_SYMBOL_GPL(scsi_mode_select);
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418int
2419scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
2420 unsigned char *buffer, int len, int timeout, int retries,
2421 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
2422{
2423 unsigned char cmd[12];
2424 int use_10_for_ms;
2425 int header_length;
2426 int result;
2427 struct scsi_sense_hdr my_sshdr;
2428
2429 memset(data, 0, sizeof(*data));
2430 memset(&cmd[0], 0, 12);
2431 cmd[1] = dbd & 0x18;
2432 cmd[2] = modepage;
2433
2434
2435 if (!sshdr)
2436 sshdr = &my_sshdr;
2437
2438 retry:
2439 use_10_for_ms = sdev->use_10_for_ms;
2440
2441 if (use_10_for_ms) {
2442 if (len < 8)
2443 len = 8;
2444
2445 cmd[0] = MODE_SENSE_10;
2446 cmd[8] = len;
2447 header_length = 8;
2448 } else {
2449 if (len < 4)
2450 len = 4;
2451
2452 cmd[0] = MODE_SENSE;
2453 cmd[4] = len;
2454 header_length = 4;
2455 }
2456
2457 memset(buffer, 0, len);
2458
2459 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
2460 sshdr, timeout, retries, NULL);
2461
2462
2463
2464
2465
2466
2467 if (use_10_for_ms && !scsi_status_is_good(result) &&
2468 (driver_byte(result) & DRIVER_SENSE)) {
2469 if (scsi_sense_valid(sshdr)) {
2470 if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
2471 (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
2472
2473
2474
2475 sdev->use_10_for_ms = 0;
2476 goto retry;
2477 }
2478 }
2479 }
2480
2481 if(scsi_status_is_good(result)) {
2482 if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b &&
2483 (modepage == 6 || modepage == 8))) {
2484
2485 header_length = 0;
2486 data->length = 13;
2487 data->medium_type = 0;
2488 data->device_specific = 0;
2489 data->longlba = 0;
2490 data->block_descriptor_length = 0;
2491 } else if(use_10_for_ms) {
2492 data->length = buffer[0]*256 + buffer[1] + 2;
2493 data->medium_type = buffer[2];
2494 data->device_specific = buffer[3];
2495 data->longlba = buffer[4] & 0x01;
2496 data->block_descriptor_length = buffer[6]*256
2497 + buffer[7];
2498 } else {
2499 data->length = buffer[0] + 1;
2500 data->medium_type = buffer[1];
2501 data->device_specific = buffer[2];
2502 data->block_descriptor_length = buffer[3];
2503 }
2504 data->header_length = header_length;
2505 }
2506
2507 return result;
2508}
2509EXPORT_SYMBOL(scsi_mode_sense);
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523int
2524scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
2525 struct scsi_sense_hdr *sshdr_external)
2526{
2527 char cmd[] = {
2528 TEST_UNIT_READY, 0, 0, 0, 0, 0,
2529 };
2530 struct scsi_sense_hdr *sshdr;
2531 int result;
2532
2533 if (!sshdr_external)
2534 sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL);
2535 else
2536 sshdr = sshdr_external;
2537
2538
2539 do {
2540 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr,
2541 timeout, retries, NULL);
2542 if (sdev->removable && scsi_sense_valid(sshdr) &&
2543 sshdr->sense_key == UNIT_ATTENTION)
2544 sdev->changed = 1;
2545 } while (scsi_sense_valid(sshdr) &&
2546 sshdr->sense_key == UNIT_ATTENTION && --retries);
2547
2548 if (!sshdr_external)
2549 kfree(sshdr);
2550 return result;
2551}
2552EXPORT_SYMBOL(scsi_test_unit_ready);
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562int
2563scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
2564{
2565 enum scsi_device_state oldstate = sdev->sdev_state;
2566
2567 if (state == oldstate)
2568 return 0;
2569
2570 switch (state) {
2571 case SDEV_CREATED:
2572 switch (oldstate) {
2573 case SDEV_CREATED_BLOCK:
2574 break;
2575 default:
2576 goto illegal;
2577 }
2578 break;
2579
2580 case SDEV_RUNNING:
2581 switch (oldstate) {
2582 case SDEV_CREATED:
2583 case SDEV_OFFLINE:
2584 case SDEV_TRANSPORT_OFFLINE:
2585 case SDEV_QUIESCE:
2586 case SDEV_BLOCK:
2587 break;
2588 default:
2589 goto illegal;
2590 }
2591 break;
2592
2593 case SDEV_QUIESCE:
2594 switch (oldstate) {
2595 case SDEV_RUNNING:
2596 case SDEV_OFFLINE:
2597 case SDEV_TRANSPORT_OFFLINE:
2598 break;
2599 default:
2600 goto illegal;
2601 }
2602 break;
2603
2604 case SDEV_OFFLINE:
2605 case SDEV_TRANSPORT_OFFLINE:
2606 switch (oldstate) {
2607 case SDEV_CREATED:
2608 case SDEV_RUNNING:
2609 case SDEV_QUIESCE:
2610 case SDEV_BLOCK:
2611 break;
2612 default:
2613 goto illegal;
2614 }
2615 break;
2616
2617 case SDEV_BLOCK:
2618 switch (oldstate) {
2619 case SDEV_RUNNING:
2620 case SDEV_CREATED_BLOCK:
2621 break;
2622 default:
2623 goto illegal;
2624 }
2625 break;
2626
2627 case SDEV_CREATED_BLOCK:
2628 switch (oldstate) {
2629 case SDEV_CREATED:
2630 break;
2631 default:
2632 goto illegal;
2633 }
2634 break;
2635
2636 case SDEV_CANCEL:
2637 switch (oldstate) {
2638 case SDEV_CREATED:
2639 case SDEV_RUNNING:
2640 case SDEV_QUIESCE:
2641 case SDEV_OFFLINE:
2642 case SDEV_TRANSPORT_OFFLINE:
2643 case SDEV_BLOCK:
2644 break;
2645 default:
2646 goto illegal;
2647 }
2648 break;
2649
2650 case SDEV_DEL:
2651 switch (oldstate) {
2652 case SDEV_CREATED:
2653 case SDEV_RUNNING:
2654 case SDEV_OFFLINE:
2655 case SDEV_TRANSPORT_OFFLINE:
2656 case SDEV_CANCEL:
2657 case SDEV_CREATED_BLOCK:
2658 break;
2659 default:
2660 goto illegal;
2661 }
2662 break;
2663
2664 }
2665 sdev->sdev_state = state;
2666 return 0;
2667
2668 illegal:
2669 SCSI_LOG_ERROR_RECOVERY(1,
2670 sdev_printk(KERN_ERR, sdev,
2671 "Illegal state transition %s->%s",
2672 scsi_device_state_name(oldstate),
2673 scsi_device_state_name(state))
2674 );
2675 return -EINVAL;
2676}
2677EXPORT_SYMBOL(scsi_device_set_state);
2678
2679
2680
2681
2682
2683
2684
2685
2686static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
2687{
2688 int idx = 0;
2689 char *envp[3];
2690
2691 switch (evt->evt_type) {
2692 case SDEV_EVT_MEDIA_CHANGE:
2693 envp[idx++] = "SDEV_MEDIA_CHANGE=1";
2694 break;
2695 case SDEV_EVT_INQUIRY_CHANGE_REPORTED:
2696 envp[idx++] = "SDEV_UA=INQUIRY_DATA_HAS_CHANGED";
2697 break;
2698 case SDEV_EVT_CAPACITY_CHANGE_REPORTED:
2699 envp[idx++] = "SDEV_UA=CAPACITY_DATA_HAS_CHANGED";
2700 break;
2701 case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED:
2702 envp[idx++] = "SDEV_UA=THIN_PROVISIONING_SOFT_THRESHOLD_REACHED";
2703 break;
2704 case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED:
2705 envp[idx++] = "SDEV_UA=MODE_PARAMETERS_CHANGED";
2706 break;
2707 case SDEV_EVT_LUN_CHANGE_REPORTED:
2708 envp[idx++] = "SDEV_UA=REPORTED_LUNS_DATA_HAS_CHANGED";
2709 break;
2710 default:
2711
2712 break;
2713 }
2714
2715 envp[idx++] = NULL;
2716
2717 kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp);
2718}
2719
2720
2721
2722
2723
2724
2725
2726
2727void scsi_evt_thread(struct work_struct *work)
2728{
2729 struct scsi_device *sdev;
2730 enum scsi_device_event evt_type;
2731 LIST_HEAD(event_list);
2732
2733 sdev = container_of(work, struct scsi_device, event_work);
2734
2735 for (evt_type = SDEV_EVT_FIRST; evt_type <= SDEV_EVT_LAST; evt_type++)
2736 if (test_and_clear_bit(evt_type, sdev->pending_events))
2737 sdev_evt_send_simple(sdev, evt_type, GFP_KERNEL);
2738
2739 while (1) {
2740 struct scsi_event *evt;
2741 struct list_head *this, *tmp;
2742 unsigned long flags;
2743
2744 spin_lock_irqsave(&sdev->list_lock, flags);
2745 list_splice_init(&sdev->event_list, &event_list);
2746 spin_unlock_irqrestore(&sdev->list_lock, flags);
2747
2748 if (list_empty(&event_list))
2749 break;
2750
2751 list_for_each_safe(this, tmp, &event_list) {
2752 evt = list_entry(this, struct scsi_event, node);
2753 list_del(&evt->node);
2754 scsi_evt_emit(sdev, evt);
2755 kfree(evt);
2756 }
2757 }
2758}
2759
2760
2761
2762
2763
2764
2765
2766
2767void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt)
2768{
2769 unsigned long flags;
2770
2771#if 0
2772
2773
2774
2775 if (!test_bit(evt->evt_type, sdev->supported_events)) {
2776 kfree(evt);
2777 return;
2778 }
2779#endif
2780
2781 spin_lock_irqsave(&sdev->list_lock, flags);
2782 list_add_tail(&evt->node, &sdev->event_list);
2783 schedule_work(&sdev->event_work);
2784 spin_unlock_irqrestore(&sdev->list_lock, flags);
2785}
2786EXPORT_SYMBOL_GPL(sdev_evt_send);
2787
2788
2789
2790
2791
2792
2793
2794
2795struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
2796 gfp_t gfpflags)
2797{
2798 struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags);
2799 if (!evt)
2800 return NULL;
2801
2802 evt->evt_type = evt_type;
2803 INIT_LIST_HEAD(&evt->node);
2804
2805
2806 switch (evt_type) {
2807 case SDEV_EVT_MEDIA_CHANGE:
2808 case SDEV_EVT_INQUIRY_CHANGE_REPORTED:
2809 case SDEV_EVT_CAPACITY_CHANGE_REPORTED:
2810 case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED:
2811 case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED:
2812 case SDEV_EVT_LUN_CHANGE_REPORTED:
2813 default:
2814
2815 break;
2816 }
2817
2818 return evt;
2819}
2820EXPORT_SYMBOL_GPL(sdev_evt_alloc);
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830void sdev_evt_send_simple(struct scsi_device *sdev,
2831 enum scsi_device_event evt_type, gfp_t gfpflags)
2832{
2833 struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags);
2834 if (!evt) {
2835 sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n",
2836 evt_type);
2837 return;
2838 }
2839
2840 sdev_evt_send(sdev, evt);
2841}
2842EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859int
2860scsi_device_quiesce(struct scsi_device *sdev)
2861{
2862 int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
2863 if (err)
2864 return err;
2865
2866 scsi_run_queue(sdev->request_queue);
2867 while (atomic_read(&sdev->device_busy)) {
2868 msleep_interruptible(200);
2869 scsi_run_queue(sdev->request_queue);
2870 }
2871 return 0;
2872}
2873EXPORT_SYMBOL(scsi_device_quiesce);
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884void scsi_device_resume(struct scsi_device *sdev)
2885{
2886
2887
2888
2889
2890 if (sdev->sdev_state != SDEV_QUIESCE ||
2891 scsi_device_set_state(sdev, SDEV_RUNNING))
2892 return;
2893 scsi_run_queue(sdev->request_queue);
2894}
2895EXPORT_SYMBOL(scsi_device_resume);
2896
2897static void
2898device_quiesce_fn(struct scsi_device *sdev, void *data)
2899{
2900 scsi_device_quiesce(sdev);
2901}
2902
2903void
2904scsi_target_quiesce(struct scsi_target *starget)
2905{
2906 starget_for_each_device(starget, NULL, device_quiesce_fn);
2907}
2908EXPORT_SYMBOL(scsi_target_quiesce);
2909
2910static void
2911device_resume_fn(struct scsi_device *sdev, void *data)
2912{
2913 scsi_device_resume(sdev);
2914}
2915
2916void
2917scsi_target_resume(struct scsi_target *starget)
2918{
2919 starget_for_each_device(starget, NULL, device_resume_fn);
2920}
2921EXPORT_SYMBOL(scsi_target_resume);
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939int
2940scsi_internal_device_block(struct scsi_device *sdev)
2941{
2942 struct request_queue *q = sdev->request_queue;
2943 unsigned long flags;
2944 int err = 0;
2945
2946 err = scsi_device_set_state(sdev, SDEV_BLOCK);
2947 if (err) {
2948 err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK);
2949
2950 if (err)
2951 return err;
2952 }
2953
2954
2955
2956
2957
2958
2959 if (q->mq_ops) {
2960 blk_mq_stop_hw_queues(q);
2961 } else {
2962 spin_lock_irqsave(q->queue_lock, flags);
2963 blk_stop_queue(q);
2964 spin_unlock_irqrestore(q->queue_lock, flags);
2965 }
2966
2967 return 0;
2968}
2969EXPORT_SYMBOL_GPL(scsi_internal_device_block);
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987int
2988scsi_internal_device_unblock(struct scsi_device *sdev,
2989 enum scsi_device_state new_state)
2990{
2991 struct request_queue *q = sdev->request_queue;
2992 unsigned long flags;
2993
2994
2995
2996
2997
2998 if ((sdev->sdev_state == SDEV_BLOCK) ||
2999 (sdev->sdev_state == SDEV_TRANSPORT_OFFLINE))
3000 sdev->sdev_state = new_state;
3001 else if (sdev->sdev_state == SDEV_CREATED_BLOCK) {
3002 if (new_state == SDEV_TRANSPORT_OFFLINE ||
3003 new_state == SDEV_OFFLINE)
3004 sdev->sdev_state = new_state;
3005 else
3006 sdev->sdev_state = SDEV_CREATED;
3007 } else if (sdev->sdev_state != SDEV_CANCEL &&
3008 sdev->sdev_state != SDEV_OFFLINE)
3009 return -EINVAL;
3010
3011 if (q->mq_ops) {
3012 blk_mq_start_stopped_hw_queues(q, false);
3013 } else {
3014 spin_lock_irqsave(q->queue_lock, flags);
3015 blk_start_queue(q);
3016 spin_unlock_irqrestore(q->queue_lock, flags);
3017 }
3018
3019 return 0;
3020}
3021EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
3022
3023static void
3024device_block(struct scsi_device *sdev, void *data)
3025{
3026 scsi_internal_device_block(sdev);
3027}
3028
3029static int
3030target_block(struct device *dev, void *data)
3031{
3032 if (scsi_is_target_device(dev))
3033 starget_for_each_device(to_scsi_target(dev), NULL,
3034 device_block);
3035 return 0;
3036}
3037
3038void
3039scsi_target_block(struct device *dev)
3040{
3041 if (scsi_is_target_device(dev))
3042 starget_for_each_device(to_scsi_target(dev), NULL,
3043 device_block);
3044 else
3045 device_for_each_child(dev, NULL, target_block);
3046}
3047EXPORT_SYMBOL_GPL(scsi_target_block);
3048
3049static void
3050device_unblock(struct scsi_device *sdev, void *data)
3051{
3052 scsi_internal_device_unblock(sdev, *(enum scsi_device_state *)data);
3053}
3054
3055static int
3056target_unblock(struct device *dev, void *data)
3057{
3058 if (scsi_is_target_device(dev))
3059 starget_for_each_device(to_scsi_target(dev), data,
3060 device_unblock);
3061 return 0;
3062}
3063
3064void
3065scsi_target_unblock(struct device *dev, enum scsi_device_state new_state)
3066{
3067 if (scsi_is_target_device(dev))
3068 starget_for_each_device(to_scsi_target(dev), &new_state,
3069 device_unblock);
3070 else
3071 device_for_each_child(dev, &new_state, target_unblock);
3072}
3073EXPORT_SYMBOL_GPL(scsi_target_unblock);
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
3085 size_t *offset, size_t *len)
3086{
3087 int i;
3088 size_t sg_len = 0, len_complete = 0;
3089 struct scatterlist *sg;
3090 struct page *page;
3091
3092 WARN_ON(!irqs_disabled());
3093
3094 for_each_sg(sgl, sg, sg_count, i) {
3095 len_complete = sg_len;
3096 sg_len += sg->length;
3097 if (sg_len > *offset)
3098 break;
3099 }
3100
3101 if (unlikely(i == sg_count)) {
3102 printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, "
3103 "elements %d\n",
3104 __func__, sg_len, *offset, sg_count);
3105 WARN_ON(1);
3106 return NULL;
3107 }
3108
3109
3110 *offset = *offset - len_complete + sg->offset;
3111
3112
3113 page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT));
3114 *offset &= ~PAGE_MASK;
3115
3116
3117 sg_len = PAGE_SIZE - *offset;
3118 if (*len > sg_len)
3119 *len = sg_len;
3120
3121 return kmap_atomic(page);
3122}
3123EXPORT_SYMBOL(scsi_kmap_atomic_sg);
3124
3125
3126
3127
3128
3129void scsi_kunmap_atomic_sg(void *virt)
3130{
3131 kunmap_atomic(virt);
3132}
3133EXPORT_SYMBOL(scsi_kunmap_atomic_sg);
3134
3135void sdev_disable_disk_events(struct scsi_device *sdev)
3136{
3137 atomic_inc(&sdev->disk_events_disable_depth);
3138}
3139EXPORT_SYMBOL(sdev_disable_disk_events);
3140
3141void sdev_enable_disk_events(struct scsi_device *sdev)
3142{
3143 if (WARN_ON_ONCE(atomic_read(&sdev->disk_events_disable_depth) <= 0))
3144 return;
3145 atomic_dec(&sdev->disk_events_disable_depth);
3146}
3147EXPORT_SYMBOL(sdev_enable_disk_events);
3148