1
2
3
4
5
6
7
8
9
10
11#include <linux/bio.h>
12#include <linux/bitops.h>
13#include <linux/blkdev.h>
14#include <linux/completion.h>
15#include <linux/kernel.h>
16#include <linux/export.h>
17#include <linux/init.h>
18#include <linux/pci.h>
19#include <linux/delay.h>
20#include <linux/hardirq.h>
21#include <linux/scatterlist.h>
22#include <linux/blk-mq.h>
23#include <linux/ratelimit.h>
24#include <asm/unaligned.h>
25
26#include <scsi/scsi.h>
27#include <scsi/scsi_cmnd.h>
28#include <scsi/scsi_dbg.h>
29#include <scsi/scsi_device.h>
30#include <scsi/scsi_driver.h>
31#include <scsi/scsi_eh.h>
32#include <scsi/scsi_host.h>
33#include <scsi/scsi_dh.h>
34
35#include <trace/events/scsi.h>
36
37#include "scsi_priv.h"
38#include "scsi_logging.h"
39
40
41struct kmem_cache *scsi_sdb_cache;
42
43
44
45
46
47
48#define SCSI_QUEUE_DELAY 3
49
50static void
51scsi_set_blocked(struct scsi_cmnd *cmd, int reason)
52{
53 struct Scsi_Host *host = cmd->device->host;
54 struct scsi_device *device = cmd->device;
55 struct scsi_target *starget = scsi_target(device);
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70 switch (reason) {
71 case SCSI_MLQUEUE_HOST_BUSY:
72 atomic_set(&host->host_blocked, host->max_host_blocked);
73 break;
74 case SCSI_MLQUEUE_DEVICE_BUSY:
75 case SCSI_MLQUEUE_EH_RETRY:
76 atomic_set(&device->device_blocked,
77 device->max_device_blocked);
78 break;
79 case SCSI_MLQUEUE_TARGET_BUSY:
80 atomic_set(&starget->target_blocked,
81 starget->max_target_blocked);
82 break;
83 }
84}
85
86static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
87{
88 struct scsi_device *sdev = cmd->device;
89 struct request_queue *q = cmd->request->q;
90
91 blk_mq_requeue_request(cmd->request);
92 blk_mq_kick_requeue_list(q);
93 put_device(&sdev->sdev_gendev);
94}
95
96
97
98
99
100
101
102
103
104
105
106
107
108static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
109{
110 struct scsi_device *device = cmd->device;
111 struct request_queue *q = device->request_queue;
112 unsigned long flags;
113
114 SCSI_LOG_MLQUEUE(1, scmd_printk(KERN_INFO, cmd,
115 "Inserting command %p into mlqueue\n", cmd));
116
117 scsi_set_blocked(cmd, reason);
118
119
120
121
122
123 if (unbusy)
124 scsi_device_unbusy(device);
125
126
127
128
129
130
131
132 cmd->result = 0;
133 if (q->mq_ops) {
134 scsi_mq_requeue_cmd(cmd);
135 return;
136 }
137 spin_lock_irqsave(q->queue_lock, flags);
138 blk_requeue_request(q, cmd->request);
139 kblockd_schedule_work(&device->requeue_work);
140 spin_unlock_irqrestore(q->queue_lock, flags);
141}
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
163{
164 __scsi_queue_insert(cmd, reason, 1);
165}
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
183 int data_direction, void *buffer, unsigned bufflen,
184 unsigned char *sense, int timeout, int retries, u64 flags,
185 int *resid)
186{
187 struct request *req;
188 int write = (data_direction == DMA_TO_DEVICE);
189 int ret = DRIVER_ERROR << 24;
190
191 req = blk_get_request(sdev->request_queue, write, __GFP_RECLAIM);
192 if (IS_ERR(req))
193 return ret;
194 blk_rq_set_block_pc(req);
195
196 if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
197 buffer, bufflen, __GFP_RECLAIM))
198 goto out;
199
200 req->cmd_len = COMMAND_SIZE(cmd[0]);
201 memcpy(req->cmd, cmd, req->cmd_len);
202 req->sense = sense;
203 req->sense_len = 0;
204 req->retries = retries;
205 req->timeout = timeout;
206 req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT;
207
208
209
210
211 blk_execute_rq(req->q, NULL, req, 1);
212
213
214
215
216
217
218
219 if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen))
220 memset(buffer + (bufflen - req->resid_len), 0, req->resid_len);
221
222 if (resid)
223 *resid = req->resid_len;
224 ret = req->errors;
225 out:
226 blk_put_request(req);
227
228 return ret;
229}
230EXPORT_SYMBOL(scsi_execute);
231
232int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd,
233 int data_direction, void *buffer, unsigned bufflen,
234 struct scsi_sense_hdr *sshdr, int timeout, int retries,
235 int *resid, u64 flags)
236{
237 char *sense = NULL;
238 int result;
239
240 if (sshdr) {
241 sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
242 if (!sense)
243 return DRIVER_ERROR << 24;
244 }
245 result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
246 sense, timeout, retries, flags, resid);
247 if (sshdr)
248 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
249
250 kfree(sense);
251 return result;
252}
253EXPORT_SYMBOL(scsi_execute_req_flags);
254
255
256
257
258
259
260
261
262
263
264
265
266static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
267{
268 cmd->serial_number = 0;
269 scsi_set_resid(cmd, 0);
270 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
271 if (cmd->cmd_len == 0)
272 cmd->cmd_len = scsi_command_size(cmd->cmnd);
273}
274
275void scsi_device_unbusy(struct scsi_device *sdev)
276{
277 struct Scsi_Host *shost = sdev->host;
278 struct scsi_target *starget = scsi_target(sdev);
279 unsigned long flags;
280
281 atomic_dec(&shost->host_busy);
282 if (starget->can_queue > 0)
283 atomic_dec(&starget->target_busy);
284
285 if (unlikely(scsi_host_in_recovery(shost) &&
286 (shost->host_failed || shost->host_eh_scheduled))) {
287 spin_lock_irqsave(shost->host_lock, flags);
288 scsi_eh_wakeup(shost);
289 spin_unlock_irqrestore(shost->host_lock, flags);
290 }
291
292 atomic_dec(&sdev->device_busy);
293}
294
295static void scsi_kick_queue(struct request_queue *q)
296{
297 if (q->mq_ops)
298 blk_mq_start_hw_queues(q);
299 else
300 blk_run_queue(q);
301}
302
303
304
305
306
307
308
309
310static void scsi_single_lun_run(struct scsi_device *current_sdev)
311{
312 struct Scsi_Host *shost = current_sdev->host;
313 struct scsi_device *sdev, *tmp;
314 struct scsi_target *starget = scsi_target(current_sdev);
315 unsigned long flags;
316
317 spin_lock_irqsave(shost->host_lock, flags);
318 starget->starget_sdev_user = NULL;
319 spin_unlock_irqrestore(shost->host_lock, flags);
320
321
322
323
324
325
326
327 scsi_kick_queue(current_sdev->request_queue);
328
329 spin_lock_irqsave(shost->host_lock, flags);
330 if (starget->starget_sdev_user)
331 goto out;
332 list_for_each_entry_safe(sdev, tmp, &starget->devices,
333 same_target_siblings) {
334 if (sdev == current_sdev)
335 continue;
336 if (scsi_device_get(sdev))
337 continue;
338
339 spin_unlock_irqrestore(shost->host_lock, flags);
340 scsi_kick_queue(sdev->request_queue);
341 spin_lock_irqsave(shost->host_lock, flags);
342
343 scsi_device_put(sdev);
344 }
345 out:
346 spin_unlock_irqrestore(shost->host_lock, flags);
347}
348
349static inline bool scsi_device_is_busy(struct scsi_device *sdev)
350{
351 if (atomic_read(&sdev->device_busy) >= sdev->queue_depth)
352 return true;
353 if (atomic_read(&sdev->device_blocked) > 0)
354 return true;
355 return false;
356}
357
358static inline bool scsi_target_is_busy(struct scsi_target *starget)
359{
360 if (starget->can_queue > 0) {
361 if (atomic_read(&starget->target_busy) >= starget->can_queue)
362 return true;
363 if (atomic_read(&starget->target_blocked) > 0)
364 return true;
365 }
366 return false;
367}
368
369static inline bool scsi_host_is_busy(struct Scsi_Host *shost)
370{
371 if (shost->can_queue > 0 &&
372 atomic_read(&shost->host_busy) >= shost->can_queue)
373 return true;
374 if (atomic_read(&shost->host_blocked) > 0)
375 return true;
376 if (shost->host_self_blocked)
377 return true;
378 return false;
379}
380
381static void scsi_starved_list_run(struct Scsi_Host *shost)
382{
383 LIST_HEAD(starved_list);
384 struct scsi_device *sdev;
385 unsigned long flags;
386
387 spin_lock_irqsave(shost->host_lock, flags);
388 list_splice_init(&shost->starved_list, &starved_list);
389
390 while (!list_empty(&starved_list)) {
391 struct request_queue *slq;
392
393
394
395
396
397
398
399
400
401
402
403 if (scsi_host_is_busy(shost))
404 break;
405
406 sdev = list_entry(starved_list.next,
407 struct scsi_device, starved_entry);
408 list_del_init(&sdev->starved_entry);
409 if (scsi_target_is_busy(scsi_target(sdev))) {
410 list_move_tail(&sdev->starved_entry,
411 &shost->starved_list);
412 continue;
413 }
414
415
416
417
418
419
420
421
422
423
424
425 slq = sdev->request_queue;
426 if (!blk_get_queue(slq))
427 continue;
428 spin_unlock_irqrestore(shost->host_lock, flags);
429
430 scsi_kick_queue(slq);
431 blk_put_queue(slq);
432
433 spin_lock_irqsave(shost->host_lock, flags);
434 }
435
436 list_splice(&starved_list, &shost->starved_list);
437 spin_unlock_irqrestore(shost->host_lock, flags);
438}
439
440
441
442
443
444
445
446
447
448
449
450
451
452static void scsi_run_queue(struct request_queue *q)
453{
454 struct scsi_device *sdev = q->queuedata;
455
456 if (scsi_target(sdev)->single_lun)
457 scsi_single_lun_run(sdev);
458 if (!list_empty(&sdev->host->starved_list))
459 scsi_starved_list_run(sdev->host);
460
461 if (q->mq_ops)
462 blk_mq_start_stopped_hw_queues(q, false);
463 else
464 blk_run_queue(q);
465}
466
467void scsi_requeue_run_queue(struct work_struct *work)
468{
469 struct scsi_device *sdev;
470 struct request_queue *q;
471
472 sdev = container_of(work, struct scsi_device, requeue_work);
473 q = sdev->request_queue;
474 scsi_run_queue(q);
475}
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
496{
497 struct scsi_device *sdev = cmd->device;
498 struct request *req = cmd->request;
499 unsigned long flags;
500
501 spin_lock_irqsave(q->queue_lock, flags);
502 blk_unprep_request(req);
503 req->special = NULL;
504 scsi_put_command(cmd);
505 blk_requeue_request(q, req);
506 spin_unlock_irqrestore(q->queue_lock, flags);
507
508 scsi_run_queue(q);
509
510 put_device(&sdev->sdev_gendev);
511}
512
513void scsi_run_host_queues(struct Scsi_Host *shost)
514{
515 struct scsi_device *sdev;
516
517 shost_for_each_device(sdev, shost)
518 scsi_run_queue(sdev->request_queue);
519}
520
521static void scsi_uninit_cmd(struct scsi_cmnd *cmd)
522{
523 if (cmd->request->cmd_type == REQ_TYPE_FS) {
524 struct scsi_driver *drv = scsi_cmd_to_driver(cmd);
525
526 if (drv->uninit_command)
527 drv->uninit_command(cmd);
528 }
529}
530
531static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd)
532{
533 struct scsi_data_buffer *sdb;
534
535 if (cmd->sdb.table.nents)
536 sg_free_table_chained(&cmd->sdb.table, true);
537 if (cmd->request->next_rq) {
538 sdb = cmd->request->next_rq->special;
539 if (sdb)
540 sg_free_table_chained(&sdb->table, true);
541 }
542 if (scsi_prot_sg_count(cmd))
543 sg_free_table_chained(&cmd->prot_sdb->table, true);
544}
545
546static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
547{
548 struct scsi_device *sdev = cmd->device;
549 struct Scsi_Host *shost = sdev->host;
550 unsigned long flags;
551
552 scsi_mq_free_sgtables(cmd);
553 scsi_uninit_cmd(cmd);
554
555 if (shost->use_cmd_list) {
556 BUG_ON(list_empty(&cmd->list));
557 spin_lock_irqsave(&sdev->list_lock, flags);
558 list_del_init(&cmd->list);
559 spin_unlock_irqrestore(&sdev->list_lock, flags);
560 }
561}
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579static void scsi_release_buffers(struct scsi_cmnd *cmd)
580{
581 if (cmd->sdb.table.nents)
582 sg_free_table_chained(&cmd->sdb.table, false);
583
584 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
585
586 if (scsi_prot_sg_count(cmd))
587 sg_free_table_chained(&cmd->prot_sdb->table, false);
588}
589
590static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd)
591{
592 struct scsi_data_buffer *bidi_sdb = cmd->request->next_rq->special;
593
594 sg_free_table_chained(&bidi_sdb->table, false);
595 kmem_cache_free(scsi_sdb_cache, bidi_sdb);
596 cmd->request->next_rq->special = NULL;
597}
598
599static bool scsi_end_request(struct request *req, int error,
600 unsigned int bytes, unsigned int bidi_bytes)
601{
602 struct scsi_cmnd *cmd = req->special;
603 struct scsi_device *sdev = cmd->device;
604 struct request_queue *q = sdev->request_queue;
605
606 if (blk_update_request(req, error, bytes))
607 return true;
608
609
610 if (unlikely(bidi_bytes) &&
611 blk_update_request(req->next_rq, error, bidi_bytes))
612 return true;
613
614 if (blk_queue_add_random(q))
615 add_disk_randomness(req->rq_disk);
616
617 if (req->mq_ctx) {
618
619
620
621
622
623
624
625 scsi_mq_uninit_cmd(cmd);
626
627 __blk_mq_end_request(req, error);
628
629 if (scsi_target(sdev)->single_lun ||
630 !list_empty(&sdev->host->starved_list))
631 kblockd_schedule_work(&sdev->requeue_work);
632 else
633 blk_mq_start_stopped_hw_queues(q, true);
634 } else {
635 unsigned long flags;
636
637 if (bidi_bytes)
638 scsi_release_bidi_buffers(cmd);
639
640 spin_lock_irqsave(q->queue_lock, flags);
641 blk_finish_request(req, error);
642 spin_unlock_irqrestore(q->queue_lock, flags);
643
644 scsi_release_buffers(cmd);
645
646 scsi_put_command(cmd);
647 scsi_run_queue(q);
648 }
649
650 put_device(&sdev->sdev_gendev);
651 return false;
652}
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
669{
670 int error = 0;
671
672 switch(host_byte(result)) {
673 case DID_TRANSPORT_FAILFAST:
674 error = -ENOLINK;
675 break;
676 case DID_TARGET_FAILURE:
677 set_host_byte(cmd, DID_OK);
678 error = -EREMOTEIO;
679 break;
680 case DID_NEXUS_FAILURE:
681 set_host_byte(cmd, DID_OK);
682 error = -EBADE;
683 break;
684 case DID_ALLOC_FAILURE:
685 set_host_byte(cmd, DID_OK);
686 error = -ENOSPC;
687 break;
688 case DID_MEDIUM_ERROR:
689 set_host_byte(cmd, DID_OK);
690 error = -ENODATA;
691 break;
692 default:
693 error = -EIO;
694 break;
695 }
696
697 return error;
698}
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
730{
731 int result = cmd->result;
732 struct request_queue *q = cmd->device->request_queue;
733 struct request *req = cmd->request;
734 int error = 0;
735 struct scsi_sense_hdr sshdr;
736 bool sense_valid = false;
737 int sense_deferred = 0, level = 0;
738 enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
739 ACTION_DELAYED_RETRY} action;
740 unsigned long wait_for = (cmd->allowed + 1) * req->timeout;
741
742 if (result) {
743 sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
744 if (sense_valid)
745 sense_deferred = scsi_sense_is_deferred(&sshdr);
746 }
747
748 if (req->cmd_type == REQ_TYPE_BLOCK_PC) {
749 if (result) {
750 if (sense_valid && req->sense) {
751
752
753
754 int len = 8 + cmd->sense_buffer[7];
755
756 if (len > SCSI_SENSE_BUFFERSIZE)
757 len = SCSI_SENSE_BUFFERSIZE;
758 memcpy(req->sense, cmd->sense_buffer, len);
759 req->sense_len = len;
760 }
761 if (!sense_deferred)
762 error = __scsi_error_from_host_byte(cmd, result);
763 }
764
765
766
767 req->errors = cmd->result;
768
769 req->resid_len = scsi_get_resid(cmd);
770
771 if (scsi_bidi_cmnd(cmd)) {
772
773
774
775
776 req->next_rq->resid_len = scsi_in(cmd)->resid;
777 if (scsi_end_request(req, 0, blk_rq_bytes(req),
778 blk_rq_bytes(req->next_rq)))
779 BUG();
780 return;
781 }
782 } else if (blk_rq_bytes(req) == 0 && result && !sense_deferred) {
783
784
785
786
787
788
789 error = __scsi_error_from_host_byte(cmd, result);
790 }
791
792
793 BUG_ON(blk_bidi_rq(req));
794
795
796
797
798
799 SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, cmd,
800 "%u sectors total, %d bytes done.\n",
801 blk_rq_sectors(req), good_bytes));
802
803
804
805
806
807
808
809 if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) {
810
811
812
813
814 if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
815 ;
816 else if (!(req->cmd_flags & REQ_QUIET))
817 scsi_print_sense(cmd);
818 result = 0;
819
820 error = 0;
821 }
822
823
824
825
826
827
828 if (!(blk_rq_bytes(req) == 0 && error) &&
829 !scsi_end_request(req, error, good_bytes, 0))
830 return;
831
832
833
834
835 if (error && scsi_noretry_cmd(cmd)) {
836 if (scsi_end_request(req, error, blk_rq_bytes(req), 0))
837 BUG();
838 return;
839 }
840
841
842
843
844
845 if (result == 0)
846 goto requeue;
847
848 error = __scsi_error_from_host_byte(cmd, result);
849
850 if (host_byte(result) == DID_RESET) {
851
852
853
854
855 action = ACTION_RETRY;
856 } else if (sense_valid && !sense_deferred) {
857 switch (sshdr.sense_key) {
858 case UNIT_ATTENTION:
859 if (cmd->device->removable) {
860
861
862
863 cmd->device->changed = 1;
864 action = ACTION_FAIL;
865 } else {
866
867
868
869
870
871 action = ACTION_RETRY;
872 }
873 break;
874 case ILLEGAL_REQUEST:
875
876
877
878
879
880
881
882
883 if ((cmd->device->use_10_for_rw &&
884 sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
885 (cmd->cmnd[0] == READ_10 ||
886 cmd->cmnd[0] == WRITE_10)) {
887
888 cmd->device->use_10_for_rw = 0;
889 action = ACTION_REPREP;
890 } else if (sshdr.asc == 0x10) {
891 action = ACTION_FAIL;
892 error = -EILSEQ;
893
894 } else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) {
895 action = ACTION_FAIL;
896 error = -EREMOTEIO;
897 } else
898 action = ACTION_FAIL;
899 break;
900 case ABORTED_COMMAND:
901 action = ACTION_FAIL;
902 if (sshdr.asc == 0x10)
903 error = -EILSEQ;
904 break;
905 case NOT_READY:
906
907
908
909 if (sshdr.asc == 0x04) {
910 switch (sshdr.ascq) {
911 case 0x01:
912 case 0x04:
913 case 0x05:
914 case 0x06:
915 case 0x07:
916 case 0x08:
917 case 0x09:
918 case 0x14:
919 action = ACTION_DELAYED_RETRY;
920 break;
921 default:
922 action = ACTION_FAIL;
923 break;
924 }
925 } else
926 action = ACTION_FAIL;
927 break;
928 case VOLUME_OVERFLOW:
929
930 action = ACTION_FAIL;
931 break;
932 default:
933 action = ACTION_FAIL;
934 break;
935 }
936 } else
937 action = ACTION_FAIL;
938
939 if (action != ACTION_FAIL &&
940 time_before(cmd->jiffies_at_alloc + wait_for, jiffies))
941 action = ACTION_FAIL;
942
943 switch (action) {
944 case ACTION_FAIL:
945
946 if (!(req->cmd_flags & REQ_QUIET)) {
947 static DEFINE_RATELIMIT_STATE(_rs,
948 DEFAULT_RATELIMIT_INTERVAL,
949 DEFAULT_RATELIMIT_BURST);
950
951 if (unlikely(scsi_logging_level))
952 level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT,
953 SCSI_LOG_MLCOMPLETE_BITS);
954
955
956
957
958
959 if (!level && __ratelimit(&_rs)) {
960 scsi_print_result(cmd, NULL, FAILED);
961 if (driver_byte(result) & DRIVER_SENSE)
962 scsi_print_sense(cmd);
963 scsi_print_command(cmd);
964 }
965 }
966 if (!scsi_end_request(req, error, blk_rq_err_bytes(req), 0))
967 return;
968
969 case ACTION_REPREP:
970 requeue:
971
972
973
974 if (q->mq_ops) {
975 cmd->request->cmd_flags &= ~REQ_DONTPREP;
976 scsi_mq_uninit_cmd(cmd);
977 scsi_mq_requeue_cmd(cmd);
978 } else {
979 scsi_release_buffers(cmd);
980 scsi_requeue_command(q, cmd);
981 }
982 break;
983 case ACTION_RETRY:
984
985 __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, 0);
986 break;
987 case ACTION_DELAYED_RETRY:
988
989 __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0);
990 break;
991 }
992}
993
994static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb)
995{
996 int count;
997
998
999
1000
1001 if (unlikely(sg_alloc_table_chained(&sdb->table, req->nr_phys_segments,
1002 sdb->table.sgl)))
1003 return BLKPREP_DEFER;
1004
1005
1006
1007
1008
1009 count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
1010 BUG_ON(count > sdb->table.nents);
1011 sdb->table.nents = count;
1012 sdb->length = blk_rq_bytes(req);
1013 return BLKPREP_OK;
1014}
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027int scsi_init_io(struct scsi_cmnd *cmd)
1028{
1029 struct scsi_device *sdev = cmd->device;
1030 struct request *rq = cmd->request;
1031 bool is_mq = (rq->mq_ctx != NULL);
1032 int error;
1033
1034 BUG_ON(!rq->nr_phys_segments);
1035
1036 error = scsi_init_sgtable(rq, &cmd->sdb);
1037 if (error)
1038 goto err_exit;
1039
1040 if (blk_bidi_rq(rq)) {
1041 if (!rq->q->mq_ops) {
1042 struct scsi_data_buffer *bidi_sdb =
1043 kmem_cache_zalloc(scsi_sdb_cache, GFP_ATOMIC);
1044 if (!bidi_sdb) {
1045 error = BLKPREP_DEFER;
1046 goto err_exit;
1047 }
1048
1049 rq->next_rq->special = bidi_sdb;
1050 }
1051
1052 error = scsi_init_sgtable(rq->next_rq, rq->next_rq->special);
1053 if (error)
1054 goto err_exit;
1055 }
1056
1057 if (blk_integrity_rq(rq)) {
1058 struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
1059 int ivecs, count;
1060
1061 if (prot_sdb == NULL) {
1062
1063
1064
1065
1066
1067 WARN_ON_ONCE(1);
1068 error = BLKPREP_KILL;
1069 goto err_exit;
1070 }
1071
1072 ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
1073
1074 if (sg_alloc_table_chained(&prot_sdb->table, ivecs,
1075 prot_sdb->table.sgl)) {
1076 error = BLKPREP_DEFER;
1077 goto err_exit;
1078 }
1079
1080 count = blk_rq_map_integrity_sg(rq->q, rq->bio,
1081 prot_sdb->table.sgl);
1082 BUG_ON(unlikely(count > ivecs));
1083 BUG_ON(unlikely(count > queue_max_integrity_segments(rq->q)));
1084
1085 cmd->prot_sdb = prot_sdb;
1086 cmd->prot_sdb->table.nents = count;
1087 }
1088
1089 return BLKPREP_OK;
1090err_exit:
1091 if (is_mq) {
1092 scsi_mq_free_sgtables(cmd);
1093 } else {
1094 scsi_release_buffers(cmd);
1095 cmd->request->special = NULL;
1096 scsi_put_command(cmd);
1097 put_device(&sdev->sdev_gendev);
1098 }
1099 return error;
1100}
1101EXPORT_SYMBOL(scsi_init_io);
1102
1103static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
1104 struct request *req)
1105{
1106 struct scsi_cmnd *cmd;
1107
1108 if (!req->special) {
1109
1110 if (!get_device(&sdev->sdev_gendev))
1111 return NULL;
1112
1113 cmd = scsi_get_command(sdev, GFP_ATOMIC);
1114 if (unlikely(!cmd)) {
1115 put_device(&sdev->sdev_gendev);
1116 return NULL;
1117 }
1118 req->special = cmd;
1119 } else {
1120 cmd = req->special;
1121 }
1122
1123
1124 cmd->tag = req->tag;
1125 cmd->request = req;
1126
1127 cmd->cmnd = req->cmd;
1128 cmd->prot_op = SCSI_PROT_NORMAL;
1129
1130 return cmd;
1131}
1132
1133static int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
1134{
1135 struct scsi_cmnd *cmd = req->special;
1136
1137
1138
1139
1140
1141
1142
1143 if (req->bio) {
1144 int ret = scsi_init_io(cmd);
1145 if (unlikely(ret))
1146 return ret;
1147 } else {
1148 BUG_ON(blk_rq_bytes(req));
1149
1150 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
1151 }
1152
1153 cmd->cmd_len = req->cmd_len;
1154 cmd->transfersize = blk_rq_bytes(req);
1155 cmd->allowed = req->retries;
1156 return BLKPREP_OK;
1157}
1158
1159
1160
1161
1162
1163static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
1164{
1165 struct scsi_cmnd *cmd = req->special;
1166
1167 if (unlikely(sdev->handler && sdev->handler->prep_fn)) {
1168 int ret = sdev->handler->prep_fn(sdev, req);
1169 if (ret != BLKPREP_OK)
1170 return ret;
1171 }
1172
1173 memset(cmd->cmnd, 0, BLK_MAX_CDB);
1174 return scsi_cmd_to_driver(cmd)->init_command(cmd);
1175}
1176
1177static int scsi_setup_cmnd(struct scsi_device *sdev, struct request *req)
1178{
1179 struct scsi_cmnd *cmd = req->special;
1180
1181 if (!blk_rq_bytes(req))
1182 cmd->sc_data_direction = DMA_NONE;
1183 else if (rq_data_dir(req) == WRITE)
1184 cmd->sc_data_direction = DMA_TO_DEVICE;
1185 else
1186 cmd->sc_data_direction = DMA_FROM_DEVICE;
1187
1188 switch (req->cmd_type) {
1189 case REQ_TYPE_FS:
1190 return scsi_setup_fs_cmnd(sdev, req);
1191 case REQ_TYPE_BLOCK_PC:
1192 return scsi_setup_blk_pc_cmnd(sdev, req);
1193 default:
1194 return BLKPREP_KILL;
1195 }
1196}
1197
1198static int
1199scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
1200{
1201 int ret = BLKPREP_OK;
1202
1203
1204
1205
1206
1207 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1208 switch (sdev->sdev_state) {
1209 case SDEV_OFFLINE:
1210 case SDEV_TRANSPORT_OFFLINE:
1211
1212
1213
1214
1215
1216 sdev_printk(KERN_ERR, sdev,
1217 "rejecting I/O to offline device\n");
1218 ret = BLKPREP_KILL;
1219 break;
1220 case SDEV_DEL:
1221
1222
1223
1224
1225 sdev_printk(KERN_ERR, sdev,
1226 "rejecting I/O to dead device\n");
1227 ret = BLKPREP_KILL;
1228 break;
1229 case SDEV_BLOCK:
1230 case SDEV_CREATED_BLOCK:
1231 ret = BLKPREP_DEFER;
1232 break;
1233 case SDEV_QUIESCE:
1234
1235
1236
1237 if (!(req->cmd_flags & REQ_PREEMPT))
1238 ret = BLKPREP_DEFER;
1239 break;
1240 default:
1241
1242
1243
1244
1245
1246 if (!(req->cmd_flags & REQ_PREEMPT))
1247 ret = BLKPREP_KILL;
1248 break;
1249 }
1250 }
1251 return ret;
1252}
1253
1254static int
1255scsi_prep_return(struct request_queue *q, struct request *req, int ret)
1256{
1257 struct scsi_device *sdev = q->queuedata;
1258
1259 switch (ret) {
1260 case BLKPREP_KILL:
1261 case BLKPREP_INVALID:
1262 req->errors = DID_NO_CONNECT << 16;
1263
1264 if (req->special) {
1265 struct scsi_cmnd *cmd = req->special;
1266 scsi_release_buffers(cmd);
1267 scsi_put_command(cmd);
1268 put_device(&sdev->sdev_gendev);
1269 req->special = NULL;
1270 }
1271 break;
1272 case BLKPREP_DEFER:
1273
1274
1275
1276
1277
1278 if (atomic_read(&sdev->device_busy) == 0)
1279 blk_delay_queue(q, SCSI_QUEUE_DELAY);
1280 break;
1281 default:
1282 req->cmd_flags |= REQ_DONTPREP;
1283 }
1284
1285 return ret;
1286}
1287
1288static int scsi_prep_fn(struct request_queue *q, struct request *req)
1289{
1290 struct scsi_device *sdev = q->queuedata;
1291 struct scsi_cmnd *cmd;
1292 int ret;
1293
1294 ret = scsi_prep_state_check(sdev, req);
1295 if (ret != BLKPREP_OK)
1296 goto out;
1297
1298 cmd = scsi_get_cmd_from_req(sdev, req);
1299 if (unlikely(!cmd)) {
1300 ret = BLKPREP_DEFER;
1301 goto out;
1302 }
1303
1304 ret = scsi_setup_cmnd(sdev, req);
1305out:
1306 return scsi_prep_return(q, req, ret);
1307}
1308
1309static void scsi_unprep_fn(struct request_queue *q, struct request *req)
1310{
1311 scsi_uninit_cmd(req->special);
1312}
1313
1314
1315
1316
1317
1318
1319
1320static inline int scsi_dev_queue_ready(struct request_queue *q,
1321 struct scsi_device *sdev)
1322{
1323 unsigned int busy;
1324
1325 busy = atomic_inc_return(&sdev->device_busy) - 1;
1326 if (atomic_read(&sdev->device_blocked)) {
1327 if (busy)
1328 goto out_dec;
1329
1330
1331
1332
1333 if (atomic_dec_return(&sdev->device_blocked) > 0) {
1334
1335
1336
1337 if (!q->mq_ops)
1338 blk_delay_queue(q, SCSI_QUEUE_DELAY);
1339 goto out_dec;
1340 }
1341 SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev,
1342 "unblocking device at zero depth\n"));
1343 }
1344
1345 if (busy >= sdev->queue_depth)
1346 goto out_dec;
1347
1348 return 1;
1349out_dec:
1350 atomic_dec(&sdev->device_busy);
1351 return 0;
1352}
1353
1354
1355
1356
1357
1358static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
1359 struct scsi_device *sdev)
1360{
1361 struct scsi_target *starget = scsi_target(sdev);
1362 unsigned int busy;
1363
1364 if (starget->single_lun) {
1365 spin_lock_irq(shost->host_lock);
1366 if (starget->starget_sdev_user &&
1367 starget->starget_sdev_user != sdev) {
1368 spin_unlock_irq(shost->host_lock);
1369 return 0;
1370 }
1371 starget->starget_sdev_user = sdev;
1372 spin_unlock_irq(shost->host_lock);
1373 }
1374
1375 if (starget->can_queue <= 0)
1376 return 1;
1377
1378 busy = atomic_inc_return(&starget->target_busy) - 1;
1379 if (atomic_read(&starget->target_blocked) > 0) {
1380 if (busy)
1381 goto starved;
1382
1383
1384
1385
1386 if (atomic_dec_return(&starget->target_blocked) > 0)
1387 goto out_dec;
1388
1389 SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget,
1390 "unblocking target at zero depth\n"));
1391 }
1392
1393 if (busy >= starget->can_queue)
1394 goto starved;
1395
1396 return 1;
1397
1398starved:
1399 spin_lock_irq(shost->host_lock);
1400 list_move_tail(&sdev->starved_entry, &shost->starved_list);
1401 spin_unlock_irq(shost->host_lock);
1402out_dec:
1403 if (starget->can_queue > 0)
1404 atomic_dec(&starget->target_busy);
1405 return 0;
1406}
1407
1408
1409
1410
1411
1412
1413static inline int scsi_host_queue_ready(struct request_queue *q,
1414 struct Scsi_Host *shost,
1415 struct scsi_device *sdev)
1416{
1417 unsigned int busy;
1418
1419 if (scsi_host_in_recovery(shost))
1420 return 0;
1421
1422 busy = atomic_inc_return(&shost->host_busy) - 1;
1423 if (atomic_read(&shost->host_blocked) > 0) {
1424 if (busy)
1425 goto starved;
1426
1427
1428
1429
1430 if (atomic_dec_return(&shost->host_blocked) > 0)
1431 goto out_dec;
1432
1433 SCSI_LOG_MLQUEUE(3,
1434 shost_printk(KERN_INFO, shost,
1435 "unblocking host at zero depth\n"));
1436 }
1437
1438 if (shost->can_queue > 0 && busy >= shost->can_queue)
1439 goto starved;
1440 if (shost->host_self_blocked)
1441 goto starved;
1442
1443
1444 if (!list_empty(&sdev->starved_entry)) {
1445 spin_lock_irq(shost->host_lock);
1446 if (!list_empty(&sdev->starved_entry))
1447 list_del_init(&sdev->starved_entry);
1448 spin_unlock_irq(shost->host_lock);
1449 }
1450
1451 return 1;
1452
1453starved:
1454 spin_lock_irq(shost->host_lock);
1455 if (list_empty(&sdev->starved_entry))
1456 list_add_tail(&sdev->starved_entry, &shost->starved_list);
1457 spin_unlock_irq(shost->host_lock);
1458out_dec:
1459 atomic_dec(&shost->host_busy);
1460 return 0;
1461}
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475static int scsi_lld_busy(struct request_queue *q)
1476{
1477 struct scsi_device *sdev = q->queuedata;
1478 struct Scsi_Host *shost;
1479
1480 if (blk_queue_dying(q))
1481 return 0;
1482
1483 shost = sdev->host;
1484
1485
1486
1487
1488
1489
1490
1491 if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev))
1492 return 1;
1493
1494 return 0;
1495}
1496
1497
1498
1499
1500static void scsi_kill_request(struct request *req, struct request_queue *q)
1501{
1502 struct scsi_cmnd *cmd = req->special;
1503 struct scsi_device *sdev;
1504 struct scsi_target *starget;
1505 struct Scsi_Host *shost;
1506
1507 blk_start_request(req);
1508
1509 scmd_printk(KERN_INFO, cmd, "killing request\n");
1510
1511 sdev = cmd->device;
1512 starget = scsi_target(sdev);
1513 shost = sdev->host;
1514 scsi_init_cmd_errh(cmd);
1515 cmd->result = DID_NO_CONNECT << 16;
1516 atomic_inc(&cmd->device->iorequest_cnt);
1517
1518
1519
1520
1521
1522
1523 atomic_inc(&sdev->device_busy);
1524 atomic_inc(&shost->host_busy);
1525 if (starget->can_queue > 0)
1526 atomic_inc(&starget->target_busy);
1527
1528 blk_complete_request(req);
1529}
1530
1531static void scsi_softirq_done(struct request *rq)
1532{
1533 struct scsi_cmnd *cmd = rq->special;
1534 unsigned long wait_for = (cmd->allowed + 1) * rq->timeout;
1535 int disposition;
1536
1537 INIT_LIST_HEAD(&cmd->eh_entry);
1538
1539 atomic_inc(&cmd->device->iodone_cnt);
1540 if (cmd->result)
1541 atomic_inc(&cmd->device->ioerr_cnt);
1542
1543 disposition = scsi_decide_disposition(cmd);
1544 if (disposition != SUCCESS &&
1545 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
1546 sdev_printk(KERN_ERR, cmd->device,
1547 "timing out command, waited %lus\n",
1548 wait_for/HZ);
1549 disposition = SUCCESS;
1550 }
1551
1552 scsi_log_completion(cmd, disposition);
1553
1554 switch (disposition) {
1555 case SUCCESS:
1556 scsi_finish_command(cmd);
1557 break;
1558 case NEEDS_RETRY:
1559 scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
1560 break;
1561 case ADD_TO_MLQUEUE:
1562 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
1563 break;
1564 default:
1565 if (!scsi_eh_scmd_add(cmd, 0))
1566 scsi_finish_command(cmd);
1567 }
1568}
1569
1570
1571
1572
1573
1574
1575
1576
1577static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
1578{
1579 struct Scsi_Host *host = cmd->device->host;
1580 int rtn = 0;
1581
1582 atomic_inc(&cmd->device->iorequest_cnt);
1583
1584
1585 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
1586
1587
1588
1589 cmd->result = DID_NO_CONNECT << 16;
1590 goto done;
1591 }
1592
1593
1594 if (unlikely(scsi_device_blocked(cmd->device))) {
1595
1596
1597
1598
1599
1600
1601
1602 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
1603 "queuecommand : device blocked\n"));
1604 return SCSI_MLQUEUE_DEVICE_BUSY;
1605 }
1606
1607
1608 if (cmd->device->lun_in_cdb)
1609 cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) |
1610 (cmd->device->lun << 5 & 0xe0);
1611
1612 scsi_log_send(cmd);
1613
1614
1615
1616
1617
1618 if (cmd->cmd_len > cmd->device->host->max_cmd_len) {
1619 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
1620 "queuecommand : command too long. "
1621 "cdb_size=%d host->max_cmd_len=%d\n",
1622 cmd->cmd_len, cmd->device->host->max_cmd_len));
1623 cmd->result = (DID_ABORT << 16);
1624 goto done;
1625 }
1626
1627 if (unlikely(host->shost_state == SHOST_DEL)) {
1628 cmd->result = (DID_NO_CONNECT << 16);
1629 goto done;
1630
1631 }
1632
1633 trace_scsi_dispatch_cmd_start(cmd);
1634 rtn = host->hostt->queuecommand(host, cmd);
1635 if (rtn) {
1636 trace_scsi_dispatch_cmd_error(cmd, rtn);
1637 if (rtn != SCSI_MLQUEUE_DEVICE_BUSY &&
1638 rtn != SCSI_MLQUEUE_TARGET_BUSY)
1639 rtn = SCSI_MLQUEUE_HOST_BUSY;
1640
1641 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
1642 "queuecommand : request rejected\n"));
1643 }
1644
1645 return rtn;
1646 done:
1647 cmd->scsi_done(cmd);
1648 return 0;
1649}
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662static void scsi_done(struct scsi_cmnd *cmd)
1663{
1664 trace_scsi_dispatch_cmd_done(cmd);
1665 blk_complete_request(cmd->request);
1666}
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679static void scsi_request_fn(struct request_queue *q)
1680 __releases(q->queue_lock)
1681 __acquires(q->queue_lock)
1682{
1683 struct scsi_device *sdev = q->queuedata;
1684 struct Scsi_Host *shost;
1685 struct scsi_cmnd *cmd;
1686 struct request *req;
1687
1688
1689
1690
1691
1692 shost = sdev->host;
1693 for (;;) {
1694 int rtn;
1695
1696
1697
1698
1699
1700 req = blk_peek_request(q);
1701 if (!req)
1702 break;
1703
1704 if (unlikely(!scsi_device_online(sdev))) {
1705 sdev_printk(KERN_ERR, sdev,
1706 "rejecting I/O to offline device\n");
1707 scsi_kill_request(req, q);
1708 continue;
1709 }
1710
1711 if (!scsi_dev_queue_ready(q, sdev))
1712 break;
1713
1714
1715
1716
1717 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1718 blk_start_request(req);
1719
1720 spin_unlock_irq(q->queue_lock);
1721 cmd = req->special;
1722 if (unlikely(cmd == NULL)) {
1723 printk(KERN_CRIT "impossible request in %s.\n"
1724 "please mail a stack trace to "
1725 "linux-scsi@vger.kernel.org\n",
1726 __func__);
1727 blk_dump_rq_flags(req, "foo");
1728 BUG();
1729 }
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739 if (blk_queue_tagged(q) && !(req->cmd_flags & REQ_QUEUED)) {
1740 spin_lock_irq(shost->host_lock);
1741 if (list_empty(&sdev->starved_entry))
1742 list_add_tail(&sdev->starved_entry,
1743 &shost->starved_list);
1744 spin_unlock_irq(shost->host_lock);
1745 goto not_ready;
1746 }
1747
1748 if (!scsi_target_queue_ready(shost, sdev))
1749 goto not_ready;
1750
1751 if (!scsi_host_queue_ready(q, shost, sdev))
1752 goto host_not_ready;
1753
1754 if (sdev->simple_tags)
1755 cmd->flags |= SCMD_TAGGED;
1756 else
1757 cmd->flags &= ~SCMD_TAGGED;
1758
1759
1760
1761
1762
1763 scsi_init_cmd_errh(cmd);
1764
1765
1766
1767
1768 cmd->scsi_done = scsi_done;
1769 rtn = scsi_dispatch_cmd(cmd);
1770 if (rtn) {
1771 scsi_queue_insert(cmd, rtn);
1772 spin_lock_irq(q->queue_lock);
1773 goto out_delay;
1774 }
1775 spin_lock_irq(q->queue_lock);
1776 }
1777
1778 return;
1779
1780 host_not_ready:
1781 if (scsi_target(sdev)->can_queue > 0)
1782 atomic_dec(&scsi_target(sdev)->target_busy);
1783 not_ready:
1784
1785
1786
1787
1788
1789
1790
1791
1792 spin_lock_irq(q->queue_lock);
1793 blk_requeue_request(q, req);
1794 atomic_dec(&sdev->device_busy);
1795out_delay:
1796 if (!atomic_read(&sdev->device_busy) && !scsi_device_blocked(sdev))
1797 blk_delay_queue(q, SCSI_QUEUE_DELAY);
1798}
1799
1800static inline int prep_to_mq(int ret)
1801{
1802 switch (ret) {
1803 case BLKPREP_OK:
1804 return 0;
1805 case BLKPREP_DEFER:
1806 return BLK_MQ_RQ_QUEUE_BUSY;
1807 default:
1808 return BLK_MQ_RQ_QUEUE_ERROR;
1809 }
1810}
1811
1812static int scsi_mq_prep_fn(struct request *req)
1813{
1814 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1815 struct scsi_device *sdev = req->q->queuedata;
1816 struct Scsi_Host *shost = sdev->host;
1817 unsigned char *sense_buf = cmd->sense_buffer;
1818 struct scatterlist *sg;
1819
1820 memset(cmd, 0, sizeof(struct scsi_cmnd));
1821
1822 req->special = cmd;
1823
1824 cmd->request = req;
1825 cmd->device = sdev;
1826 cmd->sense_buffer = sense_buf;
1827
1828 cmd->tag = req->tag;
1829
1830 cmd->cmnd = req->cmd;
1831 cmd->prot_op = SCSI_PROT_NORMAL;
1832
1833 INIT_LIST_HEAD(&cmd->list);
1834 INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
1835 cmd->jiffies_at_alloc = jiffies;
1836
1837 if (shost->use_cmd_list) {
1838 spin_lock_irq(&sdev->list_lock);
1839 list_add_tail(&cmd->list, &sdev->cmd_list);
1840 spin_unlock_irq(&sdev->list_lock);
1841 }
1842
1843 sg = (void *)cmd + sizeof(struct scsi_cmnd) + shost->hostt->cmd_size;
1844 cmd->sdb.table.sgl = sg;
1845
1846 if (scsi_host_get_prot(shost)) {
1847 cmd->prot_sdb = (void *)sg +
1848 min_t(unsigned int,
1849 shost->sg_tablesize, SG_CHUNK_SIZE) *
1850 sizeof(struct scatterlist);
1851 memset(cmd->prot_sdb, 0, sizeof(struct scsi_data_buffer));
1852
1853 cmd->prot_sdb->table.sgl =
1854 (struct scatterlist *)(cmd->prot_sdb + 1);
1855 }
1856
1857 if (blk_bidi_rq(req)) {
1858 struct request *next_rq = req->next_rq;
1859 struct scsi_data_buffer *bidi_sdb = blk_mq_rq_to_pdu(next_rq);
1860
1861 memset(bidi_sdb, 0, sizeof(struct scsi_data_buffer));
1862 bidi_sdb->table.sgl =
1863 (struct scatterlist *)(bidi_sdb + 1);
1864
1865 next_rq->special = bidi_sdb;
1866 }
1867
1868 blk_mq_start_request(req);
1869
1870 return scsi_setup_cmnd(sdev, req);
1871}
1872
1873static void scsi_mq_done(struct scsi_cmnd *cmd)
1874{
1875 trace_scsi_dispatch_cmd_done(cmd);
1876 blk_mq_complete_request(cmd->request, cmd->request->errors);
1877}
1878
1879static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
1880 const struct blk_mq_queue_data *bd)
1881{
1882 struct request *req = bd->rq;
1883 struct request_queue *q = req->q;
1884 struct scsi_device *sdev = q->queuedata;
1885 struct Scsi_Host *shost = sdev->host;
1886 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1887 int ret;
1888 int reason;
1889
1890 ret = prep_to_mq(scsi_prep_state_check(sdev, req));
1891 if (ret)
1892 goto out;
1893
1894 ret = BLK_MQ_RQ_QUEUE_BUSY;
1895 if (!get_device(&sdev->sdev_gendev))
1896 goto out;
1897
1898 if (!scsi_dev_queue_ready(q, sdev))
1899 goto out_put_device;
1900 if (!scsi_target_queue_ready(shost, sdev))
1901 goto out_dec_device_busy;
1902 if (!scsi_host_queue_ready(q, shost, sdev))
1903 goto out_dec_target_busy;
1904
1905
1906 if (!(req->cmd_flags & REQ_DONTPREP)) {
1907 ret = prep_to_mq(scsi_mq_prep_fn(req));
1908 if (ret)
1909 goto out_dec_host_busy;
1910 req->cmd_flags |= REQ_DONTPREP;
1911 } else {
1912 blk_mq_start_request(req);
1913 }
1914
1915 if (sdev->simple_tags)
1916 cmd->flags |= SCMD_TAGGED;
1917 else
1918 cmd->flags &= ~SCMD_TAGGED;
1919
1920 scsi_init_cmd_errh(cmd);
1921 cmd->scsi_done = scsi_mq_done;
1922
1923 reason = scsi_dispatch_cmd(cmd);
1924 if (reason) {
1925 scsi_set_blocked(cmd, reason);
1926 ret = BLK_MQ_RQ_QUEUE_BUSY;
1927 goto out_dec_host_busy;
1928 }
1929
1930 return BLK_MQ_RQ_QUEUE_OK;
1931
1932out_dec_host_busy:
1933 atomic_dec(&shost->host_busy);
1934out_dec_target_busy:
1935 if (scsi_target(sdev)->can_queue > 0)
1936 atomic_dec(&scsi_target(sdev)->target_busy);
1937out_dec_device_busy:
1938 atomic_dec(&sdev->device_busy);
1939out_put_device:
1940 put_device(&sdev->sdev_gendev);
1941out:
1942 switch (ret) {
1943 case BLK_MQ_RQ_QUEUE_BUSY:
1944 blk_mq_stop_hw_queue(hctx);
1945 if (atomic_read(&sdev->device_busy) == 0 &&
1946 !scsi_device_blocked(sdev))
1947 blk_mq_delay_queue(hctx, SCSI_QUEUE_DELAY);
1948 break;
1949 case BLK_MQ_RQ_QUEUE_ERROR:
1950
1951
1952
1953
1954
1955 if (req->cmd_flags & REQ_DONTPREP)
1956 scsi_mq_uninit_cmd(cmd);
1957 break;
1958 default:
1959 break;
1960 }
1961 return ret;
1962}
1963
1964static enum blk_eh_timer_return scsi_timeout(struct request *req,
1965 bool reserved)
1966{
1967 if (reserved)
1968 return BLK_EH_RESET_TIMER;
1969 return scsi_times_out(req);
1970}
1971
1972static int scsi_init_request(void *data, struct request *rq,
1973 unsigned int hctx_idx, unsigned int request_idx,
1974 unsigned int numa_node)
1975{
1976 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
1977
1978 cmd->sense_buffer = kzalloc_node(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL,
1979 numa_node);
1980 if (!cmd->sense_buffer)
1981 return -ENOMEM;
1982 return 0;
1983}
1984
1985static void scsi_exit_request(void *data, struct request *rq,
1986 unsigned int hctx_idx, unsigned int request_idx)
1987{
1988 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
1989
1990 kfree(cmd->sense_buffer);
1991}
1992
1993static u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
1994{
1995 struct device *host_dev;
1996 u64 bounce_limit = 0xffffffff;
1997
1998 if (shost->unchecked_isa_dma)
1999 return BLK_BOUNCE_ISA;
2000
2001
2002
2003
2004 if (!PCI_DMA_BUS_IS_PHYS)
2005 return BLK_BOUNCE_ANY;
2006
2007 host_dev = scsi_get_device(shost);
2008 if (host_dev && host_dev->dma_mask)
2009 bounce_limit = (u64)dma_max_pfn(host_dev) << PAGE_SHIFT;
2010
2011 return bounce_limit;
2012}
2013
2014static void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
2015{
2016 struct device *dev = shost->dma_dev;
2017
2018
2019
2020
2021 blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize,
2022 SG_MAX_SEGMENTS));
2023
2024 if (scsi_host_prot_dma(shost)) {
2025 shost->sg_prot_tablesize =
2026 min_not_zero(shost->sg_prot_tablesize,
2027 (unsigned short)SCSI_MAX_PROT_SG_SEGMENTS);
2028 BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize);
2029 blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize);
2030 }
2031
2032 blk_queue_max_hw_sectors(q, shost->max_sectors);
2033 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
2034 blk_queue_segment_boundary(q, shost->dma_boundary);
2035 dma_set_seg_boundary(dev, shost->dma_boundary);
2036
2037 blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));
2038
2039 if (!shost->use_clustering)
2040 q->limits.cluster = 0;
2041
2042
2043
2044
2045
2046
2047 blk_queue_dma_alignment(q, 0x03);
2048}
2049
2050struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
2051 request_fn_proc *request_fn)
2052{
2053 struct request_queue *q;
2054
2055 q = blk_init_queue(request_fn, NULL);
2056 if (!q)
2057 return NULL;
2058 __scsi_init_queue(shost, q);
2059 return q;
2060}
2061EXPORT_SYMBOL(__scsi_alloc_queue);
2062
2063struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
2064{
2065 struct request_queue *q;
2066
2067 q = __scsi_alloc_queue(sdev->host, scsi_request_fn);
2068 if (!q)
2069 return NULL;
2070
2071 blk_queue_prep_rq(q, scsi_prep_fn);
2072 blk_queue_unprep_rq(q, scsi_unprep_fn);
2073 blk_queue_softirq_done(q, scsi_softirq_done);
2074 blk_queue_rq_timed_out(q, scsi_times_out);
2075 blk_queue_lld_busy(q, scsi_lld_busy);
2076 return q;
2077}
2078
2079static struct blk_mq_ops scsi_mq_ops = {
2080 .queue_rq = scsi_queue_rq,
2081 .complete = scsi_softirq_done,
2082 .timeout = scsi_timeout,
2083 .init_request = scsi_init_request,
2084 .exit_request = scsi_exit_request,
2085};
2086
2087struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev)
2088{
2089 sdev->request_queue = blk_mq_init_queue(&sdev->host->tag_set);
2090 if (IS_ERR(sdev->request_queue))
2091 return NULL;
2092
2093 sdev->request_queue->queuedata = sdev;
2094 __scsi_init_queue(sdev->host, sdev->request_queue);
2095 return sdev->request_queue;
2096}
2097
2098int scsi_mq_setup_tags(struct Scsi_Host *shost)
2099{
2100 unsigned int cmd_size, sgl_size, tbl_size;
2101
2102 tbl_size = shost->sg_tablesize;
2103 if (tbl_size > SG_CHUNK_SIZE)
2104 tbl_size = SG_CHUNK_SIZE;
2105 sgl_size = tbl_size * sizeof(struct scatterlist);
2106 cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size;
2107 if (scsi_host_get_prot(shost))
2108 cmd_size += sizeof(struct scsi_data_buffer) + sgl_size;
2109
2110 memset(&shost->tag_set, 0, sizeof(shost->tag_set));
2111 shost->tag_set.ops = &scsi_mq_ops;
2112 shost->tag_set.nr_hw_queues = shost->nr_hw_queues ? : 1;
2113 shost->tag_set.queue_depth = shost->can_queue;
2114 shost->tag_set.cmd_size = cmd_size;
2115 shost->tag_set.numa_node = NUMA_NO_NODE;
2116 shost->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
2117 shost->tag_set.flags |=
2118 BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy);
2119 shost->tag_set.driver_data = shost;
2120
2121 return blk_mq_alloc_tag_set(&shost->tag_set);
2122}
2123
2124void scsi_mq_destroy_tags(struct Scsi_Host *shost)
2125{
2126 blk_mq_free_tag_set(&shost->tag_set);
2127}
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145void scsi_block_requests(struct Scsi_Host *shost)
2146{
2147 shost->host_self_blocked = 1;
2148}
2149EXPORT_SYMBOL(scsi_block_requests);
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171void scsi_unblock_requests(struct Scsi_Host *shost)
2172{
2173 shost->host_self_blocked = 0;
2174 scsi_run_host_queues(shost);
2175}
2176EXPORT_SYMBOL(scsi_unblock_requests);
2177
2178int __init scsi_init_queue(void)
2179{
2180 scsi_sdb_cache = kmem_cache_create("scsi_data_buffer",
2181 sizeof(struct scsi_data_buffer),
2182 0, 0, NULL);
2183 if (!scsi_sdb_cache) {
2184 printk(KERN_ERR "SCSI: can't init scsi sdb cache\n");
2185 return -ENOMEM;
2186 }
2187
2188 return 0;
2189}
2190
2191void scsi_exit_queue(void)
2192{
2193 kmem_cache_destroy(scsi_sdb_cache);
2194}
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214int
2215scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
2216 unsigned char *buffer, int len, int timeout, int retries,
2217 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
2218{
2219 unsigned char cmd[10];
2220 unsigned char *real_buffer;
2221 int ret;
2222
2223 memset(cmd, 0, sizeof(cmd));
2224 cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0);
2225
2226 if (sdev->use_10_for_ms) {
2227 if (len > 65535)
2228 return -EINVAL;
2229 real_buffer = kmalloc(8 + len, GFP_KERNEL);
2230 if (!real_buffer)
2231 return -ENOMEM;
2232 memcpy(real_buffer + 8, buffer, len);
2233 len += 8;
2234 real_buffer[0] = 0;
2235 real_buffer[1] = 0;
2236 real_buffer[2] = data->medium_type;
2237 real_buffer[3] = data->device_specific;
2238 real_buffer[4] = data->longlba ? 0x01 : 0;
2239 real_buffer[5] = 0;
2240 real_buffer[6] = data->block_descriptor_length >> 8;
2241 real_buffer[7] = data->block_descriptor_length;
2242
2243 cmd[0] = MODE_SELECT_10;
2244 cmd[7] = len >> 8;
2245 cmd[8] = len;
2246 } else {
2247 if (len > 255 || data->block_descriptor_length > 255 ||
2248 data->longlba)
2249 return -EINVAL;
2250
2251 real_buffer = kmalloc(4 + len, GFP_KERNEL);
2252 if (!real_buffer)
2253 return -ENOMEM;
2254 memcpy(real_buffer + 4, buffer, len);
2255 len += 4;
2256 real_buffer[0] = 0;
2257 real_buffer[1] = data->medium_type;
2258 real_buffer[2] = data->device_specific;
2259 real_buffer[3] = data->block_descriptor_length;
2260
2261
2262 cmd[0] = MODE_SELECT;
2263 cmd[4] = len;
2264 }
2265
2266 ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len,
2267 sshdr, timeout, retries, NULL);
2268 kfree(real_buffer);
2269 return ret;
2270}
2271EXPORT_SYMBOL_GPL(scsi_mode_select);
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290int
2291scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
2292 unsigned char *buffer, int len, int timeout, int retries,
2293 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
2294{
2295 unsigned char cmd[12];
2296 int use_10_for_ms;
2297 int header_length;
2298 int result, retry_count = retries;
2299 struct scsi_sense_hdr my_sshdr;
2300
2301 memset(data, 0, sizeof(*data));
2302 memset(&cmd[0], 0, 12);
2303 cmd[1] = dbd & 0x18;
2304 cmd[2] = modepage;
2305
2306
2307 if (!sshdr)
2308 sshdr = &my_sshdr;
2309
2310 retry:
2311 use_10_for_ms = sdev->use_10_for_ms;
2312
2313 if (use_10_for_ms) {
2314 if (len < 8)
2315 len = 8;
2316
2317 cmd[0] = MODE_SENSE_10;
2318 cmd[8] = len;
2319 header_length = 8;
2320 } else {
2321 if (len < 4)
2322 len = 4;
2323
2324 cmd[0] = MODE_SENSE;
2325 cmd[4] = len;
2326 header_length = 4;
2327 }
2328
2329 memset(buffer, 0, len);
2330
2331 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
2332 sshdr, timeout, retries, NULL);
2333
2334
2335
2336
2337
2338
2339 if (use_10_for_ms && !scsi_status_is_good(result) &&
2340 (driver_byte(result) & DRIVER_SENSE)) {
2341 if (scsi_sense_valid(sshdr)) {
2342 if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
2343 (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
2344
2345
2346
2347 sdev->use_10_for_ms = 0;
2348 goto retry;
2349 }
2350 }
2351 }
2352
2353 if(scsi_status_is_good(result)) {
2354 if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b &&
2355 (modepage == 6 || modepage == 8))) {
2356
2357 header_length = 0;
2358 data->length = 13;
2359 data->medium_type = 0;
2360 data->device_specific = 0;
2361 data->longlba = 0;
2362 data->block_descriptor_length = 0;
2363 } else if(use_10_for_ms) {
2364 data->length = buffer[0]*256 + buffer[1] + 2;
2365 data->medium_type = buffer[2];
2366 data->device_specific = buffer[3];
2367 data->longlba = buffer[4] & 0x01;
2368 data->block_descriptor_length = buffer[6]*256
2369 + buffer[7];
2370 } else {
2371 data->length = buffer[0] + 1;
2372 data->medium_type = buffer[1];
2373 data->device_specific = buffer[2];
2374 data->block_descriptor_length = buffer[3];
2375 }
2376 data->header_length = header_length;
2377 } else if ((status_byte(result) == CHECK_CONDITION) &&
2378 scsi_sense_valid(sshdr) &&
2379 sshdr->sense_key == UNIT_ATTENTION && retry_count) {
2380 retry_count--;
2381 goto retry;
2382 }
2383
2384 return result;
2385}
2386EXPORT_SYMBOL(scsi_mode_sense);
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400int
2401scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
2402 struct scsi_sense_hdr *sshdr_external)
2403{
2404 char cmd[] = {
2405 TEST_UNIT_READY, 0, 0, 0, 0, 0,
2406 };
2407 struct scsi_sense_hdr *sshdr;
2408 int result;
2409
2410 if (!sshdr_external)
2411 sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL);
2412 else
2413 sshdr = sshdr_external;
2414
2415
2416 do {
2417 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr,
2418 timeout, retries, NULL);
2419 if (sdev->removable && scsi_sense_valid(sshdr) &&
2420 sshdr->sense_key == UNIT_ATTENTION)
2421 sdev->changed = 1;
2422 } while (scsi_sense_valid(sshdr) &&
2423 sshdr->sense_key == UNIT_ATTENTION && --retries);
2424
2425 if (!sshdr_external)
2426 kfree(sshdr);
2427 return result;
2428}
2429EXPORT_SYMBOL(scsi_test_unit_ready);
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439int
2440scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
2441{
2442 enum scsi_device_state oldstate = sdev->sdev_state;
2443
2444 if (state == oldstate)
2445 return 0;
2446
2447 switch (state) {
2448 case SDEV_CREATED:
2449 switch (oldstate) {
2450 case SDEV_CREATED_BLOCK:
2451 break;
2452 default:
2453 goto illegal;
2454 }
2455 break;
2456
2457 case SDEV_RUNNING:
2458 switch (oldstate) {
2459 case SDEV_CREATED:
2460 case SDEV_OFFLINE:
2461 case SDEV_TRANSPORT_OFFLINE:
2462 case SDEV_QUIESCE:
2463 case SDEV_BLOCK:
2464 break;
2465 default:
2466 goto illegal;
2467 }
2468 break;
2469
2470 case SDEV_QUIESCE:
2471 switch (oldstate) {
2472 case SDEV_RUNNING:
2473 case SDEV_OFFLINE:
2474 case SDEV_TRANSPORT_OFFLINE:
2475 break;
2476 default:
2477 goto illegal;
2478 }
2479 break;
2480
2481 case SDEV_OFFLINE:
2482 case SDEV_TRANSPORT_OFFLINE:
2483 switch (oldstate) {
2484 case SDEV_CREATED:
2485 case SDEV_RUNNING:
2486 case SDEV_QUIESCE:
2487 case SDEV_BLOCK:
2488 break;
2489 default:
2490 goto illegal;
2491 }
2492 break;
2493
2494 case SDEV_BLOCK:
2495 switch (oldstate) {
2496 case SDEV_RUNNING:
2497 case SDEV_CREATED_BLOCK:
2498 break;
2499 default:
2500 goto illegal;
2501 }
2502 break;
2503
2504 case SDEV_CREATED_BLOCK:
2505 switch (oldstate) {
2506 case SDEV_CREATED:
2507 break;
2508 default:
2509 goto illegal;
2510 }
2511 break;
2512
2513 case SDEV_CANCEL:
2514 switch (oldstate) {
2515 case SDEV_CREATED:
2516 case SDEV_RUNNING:
2517 case SDEV_QUIESCE:
2518 case SDEV_OFFLINE:
2519 case SDEV_TRANSPORT_OFFLINE:
2520 case SDEV_BLOCK:
2521 break;
2522 default:
2523 goto illegal;
2524 }
2525 break;
2526
2527 case SDEV_DEL:
2528 switch (oldstate) {
2529 case SDEV_CREATED:
2530 case SDEV_RUNNING:
2531 case SDEV_OFFLINE:
2532 case SDEV_TRANSPORT_OFFLINE:
2533 case SDEV_CANCEL:
2534 case SDEV_CREATED_BLOCK:
2535 break;
2536 default:
2537 goto illegal;
2538 }
2539 break;
2540
2541 }
2542 sdev->sdev_state = state;
2543 return 0;
2544
2545 illegal:
2546 SCSI_LOG_ERROR_RECOVERY(1,
2547 sdev_printk(KERN_ERR, sdev,
2548 "Illegal state transition %s->%s",
2549 scsi_device_state_name(oldstate),
2550 scsi_device_state_name(state))
2551 );
2552 return -EINVAL;
2553}
2554EXPORT_SYMBOL(scsi_device_set_state);
2555
2556
2557
2558
2559
2560
2561
2562
2563static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
2564{
2565 int idx = 0;
2566 char *envp[3];
2567
2568 switch (evt->evt_type) {
2569 case SDEV_EVT_MEDIA_CHANGE:
2570 envp[idx++] = "SDEV_MEDIA_CHANGE=1";
2571 break;
2572 case SDEV_EVT_INQUIRY_CHANGE_REPORTED:
2573 scsi_rescan_device(&sdev->sdev_gendev);
2574 envp[idx++] = "SDEV_UA=INQUIRY_DATA_HAS_CHANGED";
2575 break;
2576 case SDEV_EVT_CAPACITY_CHANGE_REPORTED:
2577 envp[idx++] = "SDEV_UA=CAPACITY_DATA_HAS_CHANGED";
2578 break;
2579 case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED:
2580 envp[idx++] = "SDEV_UA=THIN_PROVISIONING_SOFT_THRESHOLD_REACHED";
2581 break;
2582 case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED:
2583 envp[idx++] = "SDEV_UA=MODE_PARAMETERS_CHANGED";
2584 break;
2585 case SDEV_EVT_LUN_CHANGE_REPORTED:
2586 envp[idx++] = "SDEV_UA=REPORTED_LUNS_DATA_HAS_CHANGED";
2587 break;
2588 case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED:
2589 envp[idx++] = "SDEV_UA=ASYMMETRIC_ACCESS_STATE_CHANGED";
2590 break;
2591 default:
2592
2593 break;
2594 }
2595
2596 envp[idx++] = NULL;
2597
2598 kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp);
2599}
2600
2601
2602
2603
2604
2605
2606
2607
2608void scsi_evt_thread(struct work_struct *work)
2609{
2610 struct scsi_device *sdev;
2611 enum scsi_device_event evt_type;
2612 LIST_HEAD(event_list);
2613
2614 sdev = container_of(work, struct scsi_device, event_work);
2615
2616 for (evt_type = SDEV_EVT_FIRST; evt_type <= SDEV_EVT_LAST; evt_type++)
2617 if (test_and_clear_bit(evt_type, sdev->pending_events))
2618 sdev_evt_send_simple(sdev, evt_type, GFP_KERNEL);
2619
2620 while (1) {
2621 struct scsi_event *evt;
2622 struct list_head *this, *tmp;
2623 unsigned long flags;
2624
2625 spin_lock_irqsave(&sdev->list_lock, flags);
2626 list_splice_init(&sdev->event_list, &event_list);
2627 spin_unlock_irqrestore(&sdev->list_lock, flags);
2628
2629 if (list_empty(&event_list))
2630 break;
2631
2632 list_for_each_safe(this, tmp, &event_list) {
2633 evt = list_entry(this, struct scsi_event, node);
2634 list_del(&evt->node);
2635 scsi_evt_emit(sdev, evt);
2636 kfree(evt);
2637 }
2638 }
2639}
2640
2641
2642
2643
2644
2645
2646
2647
2648void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt)
2649{
2650 unsigned long flags;
2651
2652#if 0
2653
2654
2655
2656 if (!test_bit(evt->evt_type, sdev->supported_events)) {
2657 kfree(evt);
2658 return;
2659 }
2660#endif
2661
2662 spin_lock_irqsave(&sdev->list_lock, flags);
2663 list_add_tail(&evt->node, &sdev->event_list);
2664 schedule_work(&sdev->event_work);
2665 spin_unlock_irqrestore(&sdev->list_lock, flags);
2666}
2667EXPORT_SYMBOL_GPL(sdev_evt_send);
2668
2669
2670
2671
2672
2673
2674
2675
2676struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
2677 gfp_t gfpflags)
2678{
2679 struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags);
2680 if (!evt)
2681 return NULL;
2682
2683 evt->evt_type = evt_type;
2684 INIT_LIST_HEAD(&evt->node);
2685
2686
2687 switch (evt_type) {
2688 case SDEV_EVT_MEDIA_CHANGE:
2689 case SDEV_EVT_INQUIRY_CHANGE_REPORTED:
2690 case SDEV_EVT_CAPACITY_CHANGE_REPORTED:
2691 case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED:
2692 case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED:
2693 case SDEV_EVT_LUN_CHANGE_REPORTED:
2694 case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED:
2695 default:
2696
2697 break;
2698 }
2699
2700 return evt;
2701}
2702EXPORT_SYMBOL_GPL(sdev_evt_alloc);
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712void sdev_evt_send_simple(struct scsi_device *sdev,
2713 enum scsi_device_event evt_type, gfp_t gfpflags)
2714{
2715 struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags);
2716 if (!evt) {
2717 sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n",
2718 evt_type);
2719 return;
2720 }
2721
2722 sdev_evt_send(sdev, evt);
2723}
2724EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741int
2742scsi_device_quiesce(struct scsi_device *sdev)
2743{
2744 int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
2745 if (err)
2746 return err;
2747
2748 scsi_run_queue(sdev->request_queue);
2749 while (atomic_read(&sdev->device_busy)) {
2750 msleep_interruptible(200);
2751 scsi_run_queue(sdev->request_queue);
2752 }
2753 return 0;
2754}
2755EXPORT_SYMBOL(scsi_device_quiesce);
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766void scsi_device_resume(struct scsi_device *sdev)
2767{
2768
2769
2770
2771
2772 if (sdev->sdev_state != SDEV_QUIESCE ||
2773 scsi_device_set_state(sdev, SDEV_RUNNING))
2774 return;
2775 scsi_run_queue(sdev->request_queue);
2776}
2777EXPORT_SYMBOL(scsi_device_resume);
2778
2779static void
2780device_quiesce_fn(struct scsi_device *sdev, void *data)
2781{
2782 scsi_device_quiesce(sdev);
2783}
2784
2785void
2786scsi_target_quiesce(struct scsi_target *starget)
2787{
2788 starget_for_each_device(starget, NULL, device_quiesce_fn);
2789}
2790EXPORT_SYMBOL(scsi_target_quiesce);
2791
2792static void
2793device_resume_fn(struct scsi_device *sdev, void *data)
2794{
2795 scsi_device_resume(sdev);
2796}
2797
2798void
2799scsi_target_resume(struct scsi_target *starget)
2800{
2801 starget_for_each_device(starget, NULL, device_resume_fn);
2802}
2803EXPORT_SYMBOL(scsi_target_resume);
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821int
2822scsi_internal_device_block(struct scsi_device *sdev)
2823{
2824 struct request_queue *q = sdev->request_queue;
2825 unsigned long flags;
2826 int err = 0;
2827
2828 err = scsi_device_set_state(sdev, SDEV_BLOCK);
2829 if (err) {
2830 err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK);
2831
2832 if (err)
2833 return err;
2834 }
2835
2836
2837
2838
2839
2840
2841 if (q->mq_ops) {
2842 blk_mq_stop_hw_queues(q);
2843 } else {
2844 spin_lock_irqsave(q->queue_lock, flags);
2845 blk_stop_queue(q);
2846 spin_unlock_irqrestore(q->queue_lock, flags);
2847 }
2848
2849 return 0;
2850}
2851EXPORT_SYMBOL_GPL(scsi_internal_device_block);
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869int
2870scsi_internal_device_unblock(struct scsi_device *sdev,
2871 enum scsi_device_state new_state)
2872{
2873 struct request_queue *q = sdev->request_queue;
2874 unsigned long flags;
2875
2876
2877
2878
2879
2880 if ((sdev->sdev_state == SDEV_BLOCK) ||
2881 (sdev->sdev_state == SDEV_TRANSPORT_OFFLINE))
2882 sdev->sdev_state = new_state;
2883 else if (sdev->sdev_state == SDEV_CREATED_BLOCK) {
2884 if (new_state == SDEV_TRANSPORT_OFFLINE ||
2885 new_state == SDEV_OFFLINE)
2886 sdev->sdev_state = new_state;
2887 else
2888 sdev->sdev_state = SDEV_CREATED;
2889 } else if (sdev->sdev_state != SDEV_CANCEL &&
2890 sdev->sdev_state != SDEV_OFFLINE)
2891 return -EINVAL;
2892
2893 if (q->mq_ops) {
2894 blk_mq_start_stopped_hw_queues(q, false);
2895 } else {
2896 spin_lock_irqsave(q->queue_lock, flags);
2897 blk_start_queue(q);
2898 spin_unlock_irqrestore(q->queue_lock, flags);
2899 }
2900
2901 return 0;
2902}
2903EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
2904
2905static void
2906device_block(struct scsi_device *sdev, void *data)
2907{
2908 scsi_internal_device_block(sdev);
2909}
2910
2911static int
2912target_block(struct device *dev, void *data)
2913{
2914 if (scsi_is_target_device(dev))
2915 starget_for_each_device(to_scsi_target(dev), NULL,
2916 device_block);
2917 return 0;
2918}
2919
2920void
2921scsi_target_block(struct device *dev)
2922{
2923 if (scsi_is_target_device(dev))
2924 starget_for_each_device(to_scsi_target(dev), NULL,
2925 device_block);
2926 else
2927 device_for_each_child(dev, NULL, target_block);
2928}
2929EXPORT_SYMBOL_GPL(scsi_target_block);
2930
2931static void
2932device_unblock(struct scsi_device *sdev, void *data)
2933{
2934 scsi_internal_device_unblock(sdev, *(enum scsi_device_state *)data);
2935}
2936
2937static int
2938target_unblock(struct device *dev, void *data)
2939{
2940 if (scsi_is_target_device(dev))
2941 starget_for_each_device(to_scsi_target(dev), data,
2942 device_unblock);
2943 return 0;
2944}
2945
2946void
2947scsi_target_unblock(struct device *dev, enum scsi_device_state new_state)
2948{
2949 if (scsi_is_target_device(dev))
2950 starget_for_each_device(to_scsi_target(dev), &new_state,
2951 device_unblock);
2952 else
2953 device_for_each_child(dev, &new_state, target_unblock);
2954}
2955EXPORT_SYMBOL_GPL(scsi_target_unblock);
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
2967 size_t *offset, size_t *len)
2968{
2969 int i;
2970 size_t sg_len = 0, len_complete = 0;
2971 struct scatterlist *sg;
2972 struct page *page;
2973
2974 WARN_ON(!irqs_disabled());
2975
2976 for_each_sg(sgl, sg, sg_count, i) {
2977 len_complete = sg_len;
2978 sg_len += sg->length;
2979 if (sg_len > *offset)
2980 break;
2981 }
2982
2983 if (unlikely(i == sg_count)) {
2984 printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, "
2985 "elements %d\n",
2986 __func__, sg_len, *offset, sg_count);
2987 WARN_ON(1);
2988 return NULL;
2989 }
2990
2991
2992 *offset = *offset - len_complete + sg->offset;
2993
2994
2995 page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT));
2996 *offset &= ~PAGE_MASK;
2997
2998
2999 sg_len = PAGE_SIZE - *offset;
3000 if (*len > sg_len)
3001 *len = sg_len;
3002
3003 return kmap_atomic(page);
3004}
3005EXPORT_SYMBOL(scsi_kmap_atomic_sg);
3006
3007
3008
3009
3010
3011void scsi_kunmap_atomic_sg(void *virt)
3012{
3013 kunmap_atomic(virt);
3014}
3015EXPORT_SYMBOL(scsi_kunmap_atomic_sg);
3016
3017void sdev_disable_disk_events(struct scsi_device *sdev)
3018{
3019 atomic_inc(&sdev->disk_events_disable_depth);
3020}
3021EXPORT_SYMBOL(sdev_disable_disk_events);
3022
3023void sdev_enable_disk_events(struct scsi_device *sdev)
3024{
3025 if (WARN_ON_ONCE(atomic_read(&sdev->disk_events_disable_depth) <= 0))
3026 return;
3027 atomic_dec(&sdev->disk_events_disable_depth);
3028}
3029EXPORT_SYMBOL(sdev_enable_disk_events);
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
3046{
3047 u8 cur_id_type = 0xff;
3048 u8 cur_id_size = 0;
3049 unsigned char *d, *cur_id_str;
3050 unsigned char __rcu *vpd_pg83;
3051 int id_size = -EINVAL;
3052
3053 rcu_read_lock();
3054 vpd_pg83 = rcu_dereference(sdev->vpd_pg83);
3055 if (!vpd_pg83) {
3056 rcu_read_unlock();
3057 return -ENXIO;
3058 }
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075 if (id_len < 21) {
3076 rcu_read_unlock();
3077 return -EINVAL;
3078 }
3079
3080 memset(id, 0, id_len);
3081 d = vpd_pg83 + 4;
3082 while (d < vpd_pg83 + sdev->vpd_pg83_len) {
3083
3084 if ((d[1] & 0x30) != 0x00)
3085 goto next_desig;
3086
3087 switch (d[1] & 0xf) {
3088 case 0x1:
3089
3090 if (cur_id_size > d[3])
3091 break;
3092
3093 if (cur_id_type > 0x01 && cur_id_type != 0xff)
3094 break;
3095 cur_id_size = d[3];
3096 if (cur_id_size + 4 > id_len)
3097 cur_id_size = id_len - 4;
3098 cur_id_str = d + 4;
3099 cur_id_type = d[1] & 0xf;
3100 id_size = snprintf(id, id_len, "t10.%*pE",
3101 cur_id_size, cur_id_str);
3102 break;
3103 case 0x2:
3104
3105 if (cur_id_size > d[3])
3106 break;
3107
3108 if (cur_id_type == 0x3 &&
3109 cur_id_size == d[3])
3110 break;
3111 cur_id_size = d[3];
3112 cur_id_str = d + 4;
3113 cur_id_type = d[1] & 0xf;
3114 switch (cur_id_size) {
3115 case 8:
3116 id_size = snprintf(id, id_len,
3117 "eui.%8phN",
3118 cur_id_str);
3119 break;
3120 case 12:
3121 id_size = snprintf(id, id_len,
3122 "eui.%12phN",
3123 cur_id_str);
3124 break;
3125 case 16:
3126 id_size = snprintf(id, id_len,
3127 "eui.%16phN",
3128 cur_id_str);
3129 break;
3130 default:
3131 cur_id_size = 0;
3132 break;
3133 }
3134 break;
3135 case 0x3:
3136
3137 if (cur_id_size > d[3])
3138 break;
3139 cur_id_size = d[3];
3140 cur_id_str = d + 4;
3141 cur_id_type = d[1] & 0xf;
3142 switch (cur_id_size) {
3143 case 8:
3144 id_size = snprintf(id, id_len,
3145 "naa.%8phN",
3146 cur_id_str);
3147 break;
3148 case 16:
3149 id_size = snprintf(id, id_len,
3150 "naa.%16phN",
3151 cur_id_str);
3152 break;
3153 default:
3154 cur_id_size = 0;
3155 break;
3156 }
3157 break;
3158 case 0x8:
3159
3160 if (cur_id_size + 4 > d[3])
3161 break;
3162
3163 if (cur_id_size && d[3] > id_len)
3164 break;
3165 cur_id_size = id_size = d[3];
3166 cur_id_str = d + 4;
3167 cur_id_type = d[1] & 0xf;
3168 if (cur_id_size >= id_len)
3169 cur_id_size = id_len - 1;
3170 memcpy(id, cur_id_str, cur_id_size);
3171
3172 if (cur_id_size != id_size)
3173 cur_id_size = 6;
3174 break;
3175 default:
3176 break;
3177 }
3178next_desig:
3179 d += d[3] + 4;
3180 }
3181 rcu_read_unlock();
3182
3183 return id_size;
3184}
3185EXPORT_SYMBOL(scsi_vpd_lun_id);
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196int scsi_vpd_tpg_id(struct scsi_device *sdev, int *rel_id)
3197{
3198 unsigned char *d;
3199 unsigned char __rcu *vpd_pg83;
3200 int group_id = -EAGAIN, rel_port = -1;
3201
3202 rcu_read_lock();
3203 vpd_pg83 = rcu_dereference(sdev->vpd_pg83);
3204 if (!vpd_pg83) {
3205 rcu_read_unlock();
3206 return -ENXIO;
3207 }
3208
3209 d = sdev->vpd_pg83 + 4;
3210 while (d < sdev->vpd_pg83 + sdev->vpd_pg83_len) {
3211 switch (d[1] & 0xf) {
3212 case 0x4:
3213
3214 rel_port = get_unaligned_be16(&d[6]);
3215 break;
3216 case 0x5:
3217
3218 group_id = get_unaligned_be16(&d[6]);
3219 break;
3220 default:
3221 break;
3222 }
3223 d += d[3] + 4;
3224 }
3225 rcu_read_unlock();
3226
3227 if (group_id >= 0 && rel_id && rel_port != -1)
3228 *rel_id = rel_port;
3229
3230 return group_id;
3231}
3232EXPORT_SYMBOL(scsi_vpd_tpg_id);
3233