1
2
3
4
5
6
7
8
9
10
11
12#include <linux/bio.h>
13#include <linux/bitops.h>
14#include <linux/blkdev.h>
15#include <linux/completion.h>
16#include <linux/kernel.h>
17#include <linux/export.h>
18#include <linux/init.h>
19#include <linux/pci.h>
20#include <linux/delay.h>
21#include <linux/hardirq.h>
22#include <linux/scatterlist.h>
23#include <linux/blk-mq.h>
24#include <linux/ratelimit.h>
25#include <asm/unaligned.h>
26
27#include <scsi/scsi.h>
28#include <scsi/scsi_cmnd.h>
29#include <scsi/scsi_dbg.h>
30#include <scsi/scsi_device.h>
31#include <scsi/scsi_driver.h>
32#include <scsi/scsi_eh.h>
33#include <scsi/scsi_host.h>
34#include <scsi/scsi_transport.h>
35#include <scsi/scsi_dh.h>
36
37#include <trace/events/scsi.h>
38
39#include "scsi_debugfs.h"
40#include "scsi_priv.h"
41#include "scsi_logging.h"
42
43
44
45
46
47#ifdef CONFIG_ARCH_NO_SG_CHAIN
48#define SCSI_INLINE_PROT_SG_CNT 0
49#define SCSI_INLINE_SG_CNT 0
50#else
51#define SCSI_INLINE_PROT_SG_CNT 1
52#define SCSI_INLINE_SG_CNT 2
53#endif
54
55static struct kmem_cache *scsi_sdb_cache;
56static struct kmem_cache *scsi_sense_cache;
57static struct kmem_cache *scsi_sense_isadma_cache;
58static DEFINE_MUTEX(scsi_sense_cache_mutex);
59
60static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd);
61
62static inline struct kmem_cache *
63scsi_select_sense_cache(bool unchecked_isa_dma)
64{
65 return unchecked_isa_dma ? scsi_sense_isadma_cache : scsi_sense_cache;
66}
67
68static void scsi_free_sense_buffer(bool unchecked_isa_dma,
69 unsigned char *sense_buffer)
70{
71 kmem_cache_free(scsi_select_sense_cache(unchecked_isa_dma),
72 sense_buffer);
73}
74
75static unsigned char *scsi_alloc_sense_buffer(bool unchecked_isa_dma,
76 gfp_t gfp_mask, int numa_node)
77{
78 return kmem_cache_alloc_node(scsi_select_sense_cache(unchecked_isa_dma),
79 gfp_mask, numa_node);
80}
81
82int scsi_init_sense_cache(struct Scsi_Host *shost)
83{
84 struct kmem_cache *cache;
85 int ret = 0;
86
87 mutex_lock(&scsi_sense_cache_mutex);
88 cache = scsi_select_sense_cache(shost->unchecked_isa_dma);
89 if (cache)
90 goto exit;
91
92 if (shost->unchecked_isa_dma) {
93 scsi_sense_isadma_cache =
94 kmem_cache_create("scsi_sense_cache(DMA)",
95 SCSI_SENSE_BUFFERSIZE, 0,
96 SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA, NULL);
97 if (!scsi_sense_isadma_cache)
98 ret = -ENOMEM;
99 } else {
100 scsi_sense_cache =
101 kmem_cache_create_usercopy("scsi_sense_cache",
102 SCSI_SENSE_BUFFERSIZE, 0, SLAB_HWCACHE_ALIGN,
103 0, SCSI_SENSE_BUFFERSIZE, NULL);
104 if (!scsi_sense_cache)
105 ret = -ENOMEM;
106 }
107 exit:
108 mutex_unlock(&scsi_sense_cache_mutex);
109 return ret;
110}
111
112
113
114
115
116
117#define SCSI_QUEUE_DELAY 3
118
119static void
120scsi_set_blocked(struct scsi_cmnd *cmd, int reason)
121{
122 struct Scsi_Host *host = cmd->device->host;
123 struct scsi_device *device = cmd->device;
124 struct scsi_target *starget = scsi_target(device);
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139 switch (reason) {
140 case SCSI_MLQUEUE_HOST_BUSY:
141 atomic_set(&host->host_blocked, host->max_host_blocked);
142 break;
143 case SCSI_MLQUEUE_DEVICE_BUSY:
144 case SCSI_MLQUEUE_EH_RETRY:
145 atomic_set(&device->device_blocked,
146 device->max_device_blocked);
147 break;
148 case SCSI_MLQUEUE_TARGET_BUSY:
149 atomic_set(&starget->target_blocked,
150 starget->max_target_blocked);
151 break;
152 }
153}
154
155static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
156{
157 if (cmd->request->rq_flags & RQF_DONTPREP) {
158 cmd->request->rq_flags &= ~RQF_DONTPREP;
159 scsi_mq_uninit_cmd(cmd);
160 } else {
161 WARN_ON_ONCE(true);
162 }
163 blk_mq_requeue_request(cmd->request, true);
164}
165
166
167
168
169
170
171
172
173
174
175
176
177
178static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy)
179{
180 struct scsi_device *device = cmd->device;
181
182 SCSI_LOG_MLQUEUE(1, scmd_printk(KERN_INFO, cmd,
183 "Inserting command %p into mlqueue\n", cmd));
184
185 scsi_set_blocked(cmd, reason);
186
187
188
189
190
191 if (unbusy)
192 scsi_device_unbusy(device);
193
194
195
196
197
198
199
200 cmd->result = 0;
201
202 blk_mq_requeue_request(cmd->request, true);
203}
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
225{
226 __scsi_queue_insert(cmd, reason, true);
227}
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
249 int data_direction, void *buffer, unsigned bufflen,
250 unsigned char *sense, struct scsi_sense_hdr *sshdr,
251 int timeout, int retries, u64 flags, req_flags_t rq_flags,
252 int *resid)
253{
254 struct request *req;
255 struct scsi_request *rq;
256 int ret = DRIVER_ERROR << 24;
257
258 req = blk_get_request(sdev->request_queue,
259 data_direction == DMA_TO_DEVICE ?
260 REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, BLK_MQ_REQ_PREEMPT);
261 if (IS_ERR(req))
262 return ret;
263 rq = scsi_req(req);
264
265 if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
266 buffer, bufflen, GFP_NOIO))
267 goto out;
268
269 rq->cmd_len = COMMAND_SIZE(cmd[0]);
270 memcpy(rq->cmd, cmd, rq->cmd_len);
271 rq->retries = retries;
272 req->timeout = timeout;
273 req->cmd_flags |= flags;
274 req->rq_flags |= rq_flags | RQF_QUIET;
275
276
277
278
279 blk_execute_rq(req->q, NULL, req, 1);
280
281
282
283
284
285
286
287 if (unlikely(rq->resid_len > 0 && rq->resid_len <= bufflen))
288 memset(buffer + (bufflen - rq->resid_len), 0, rq->resid_len);
289
290 if (resid)
291 *resid = rq->resid_len;
292 if (sense && rq->sense_len)
293 memcpy(sense, rq->sense, SCSI_SENSE_BUFFERSIZE);
294 if (sshdr)
295 scsi_normalize_sense(rq->sense, rq->sense_len, sshdr);
296 ret = rq->result;
297 out:
298 blk_put_request(req);
299
300 return ret;
301}
302EXPORT_SYMBOL(__scsi_execute);
303
304
305
306
307
308
309
310
311
312
313
314
315static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
316{
317 scsi_set_resid(cmd, 0);
318 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
319 if (cmd->cmd_len == 0)
320 cmd->cmd_len = scsi_command_size(cmd->cmnd);
321}
322
323
324
325
326
327
328
329
330
331
332static void scsi_dec_host_busy(struct Scsi_Host *shost)
333{
334 unsigned long flags;
335
336 rcu_read_lock();
337 atomic_dec(&shost->host_busy);
338 if (unlikely(scsi_host_in_recovery(shost))) {
339 spin_lock_irqsave(shost->host_lock, flags);
340 if (shost->host_failed || shost->host_eh_scheduled)
341 scsi_eh_wakeup(shost);
342 spin_unlock_irqrestore(shost->host_lock, flags);
343 }
344 rcu_read_unlock();
345}
346
347void scsi_device_unbusy(struct scsi_device *sdev)
348{
349 struct Scsi_Host *shost = sdev->host;
350 struct scsi_target *starget = scsi_target(sdev);
351
352 scsi_dec_host_busy(shost);
353
354 if (starget->can_queue > 0)
355 atomic_dec(&starget->target_busy);
356
357 atomic_dec(&sdev->device_busy);
358}
359
360static void scsi_kick_queue(struct request_queue *q)
361{
362 blk_mq_run_hw_queues(q, false);
363}
364
365
366
367
368
369
370
371
372static void scsi_single_lun_run(struct scsi_device *current_sdev)
373{
374 struct Scsi_Host *shost = current_sdev->host;
375 struct scsi_device *sdev, *tmp;
376 struct scsi_target *starget = scsi_target(current_sdev);
377 unsigned long flags;
378
379 spin_lock_irqsave(shost->host_lock, flags);
380 starget->starget_sdev_user = NULL;
381 spin_unlock_irqrestore(shost->host_lock, flags);
382
383
384
385
386
387
388
389 scsi_kick_queue(current_sdev->request_queue);
390
391 spin_lock_irqsave(shost->host_lock, flags);
392 if (starget->starget_sdev_user)
393 goto out;
394 list_for_each_entry_safe(sdev, tmp, &starget->devices,
395 same_target_siblings) {
396 if (sdev == current_sdev)
397 continue;
398 if (scsi_device_get(sdev))
399 continue;
400
401 spin_unlock_irqrestore(shost->host_lock, flags);
402 scsi_kick_queue(sdev->request_queue);
403 spin_lock_irqsave(shost->host_lock, flags);
404
405 scsi_device_put(sdev);
406 }
407 out:
408 spin_unlock_irqrestore(shost->host_lock, flags);
409}
410
411static inline bool scsi_device_is_busy(struct scsi_device *sdev)
412{
413 if (atomic_read(&sdev->device_busy) >= sdev->queue_depth)
414 return true;
415 if (atomic_read(&sdev->device_blocked) > 0)
416 return true;
417 return false;
418}
419
420static inline bool scsi_target_is_busy(struct scsi_target *starget)
421{
422 if (starget->can_queue > 0) {
423 if (atomic_read(&starget->target_busy) >= starget->can_queue)
424 return true;
425 if (atomic_read(&starget->target_blocked) > 0)
426 return true;
427 }
428 return false;
429}
430
431static inline bool scsi_host_is_busy(struct Scsi_Host *shost)
432{
433 if (shost->can_queue > 0 &&
434 atomic_read(&shost->host_busy) >= shost->can_queue)
435 return true;
436 if (atomic_read(&shost->host_blocked) > 0)
437 return true;
438 if (shost->host_self_blocked)
439 return true;
440 return false;
441}
442
443static void scsi_starved_list_run(struct Scsi_Host *shost)
444{
445 LIST_HEAD(starved_list);
446 struct scsi_device *sdev;
447 unsigned long flags;
448
449 spin_lock_irqsave(shost->host_lock, flags);
450 list_splice_init(&shost->starved_list, &starved_list);
451
452 while (!list_empty(&starved_list)) {
453 struct request_queue *slq;
454
455
456
457
458
459
460
461
462
463
464
465 if (scsi_host_is_busy(shost))
466 break;
467
468 sdev = list_entry(starved_list.next,
469 struct scsi_device, starved_entry);
470 list_del_init(&sdev->starved_entry);
471 if (scsi_target_is_busy(scsi_target(sdev))) {
472 list_move_tail(&sdev->starved_entry,
473 &shost->starved_list);
474 continue;
475 }
476
477
478
479
480
481
482
483
484
485
486
487 slq = sdev->request_queue;
488 if (!blk_get_queue(slq))
489 continue;
490 spin_unlock_irqrestore(shost->host_lock, flags);
491
492 scsi_kick_queue(slq);
493 blk_put_queue(slq);
494
495 spin_lock_irqsave(shost->host_lock, flags);
496 }
497
498 list_splice(&starved_list, &shost->starved_list);
499 spin_unlock_irqrestore(shost->host_lock, flags);
500}
501
502
503
504
505
506
507
508
509
510
511
512
513
514static void scsi_run_queue(struct request_queue *q)
515{
516 struct scsi_device *sdev = q->queuedata;
517
518 if (scsi_target(sdev)->single_lun)
519 scsi_single_lun_run(sdev);
520 if (!list_empty(&sdev->host->starved_list))
521 scsi_starved_list_run(sdev->host);
522
523 blk_mq_run_hw_queues(q, false);
524}
525
526void scsi_requeue_run_queue(struct work_struct *work)
527{
528 struct scsi_device *sdev;
529 struct request_queue *q;
530
531 sdev = container_of(work, struct scsi_device, requeue_work);
532 q = sdev->request_queue;
533 scsi_run_queue(q);
534}
535
536void scsi_run_host_queues(struct Scsi_Host *shost)
537{
538 struct scsi_device *sdev;
539
540 shost_for_each_device(sdev, shost)
541 scsi_run_queue(sdev->request_queue);
542}
543
544static void scsi_uninit_cmd(struct scsi_cmnd *cmd)
545{
546 if (!blk_rq_is_passthrough(cmd->request)) {
547 struct scsi_driver *drv = scsi_cmd_to_driver(cmd);
548
549 if (drv->uninit_command)
550 drv->uninit_command(cmd);
551 }
552}
553
554static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd)
555{
556 if (cmd->sdb.table.nents)
557 sg_free_table_chained(&cmd->sdb.table,
558 SCSI_INLINE_SG_CNT);
559 if (scsi_prot_sg_count(cmd))
560 sg_free_table_chained(&cmd->prot_sdb->table,
561 SCSI_INLINE_PROT_SG_CNT);
562}
563
564static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
565{
566 scsi_mq_free_sgtables(cmd);
567 scsi_uninit_cmd(cmd);
568 scsi_del_cmd_from_list(cmd);
569}
570
571
572static bool scsi_end_request(struct request *req, blk_status_t error,
573 unsigned int bytes)
574{
575 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
576 struct scsi_device *sdev = cmd->device;
577 struct request_queue *q = sdev->request_queue;
578
579 if (blk_update_request(req, error, bytes))
580 return true;
581
582 if (blk_queue_add_random(q))
583 add_disk_randomness(req->rq_disk);
584
585 if (!blk_rq_is_scsi(req)) {
586 WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED));
587 cmd->flags &= ~SCMD_INITIALIZED;
588 }
589
590
591
592
593
594
595
596 destroy_rcu_head(&cmd->rcu);
597
598
599
600
601
602
603
604
605 scsi_mq_uninit_cmd(cmd);
606
607
608
609
610
611 percpu_ref_get(&q->q_usage_counter);
612
613 __blk_mq_end_request(req, error);
614
615 if (scsi_target(sdev)->single_lun ||
616 !list_empty(&sdev->host->starved_list))
617 kblockd_schedule_work(&sdev->requeue_work);
618 else
619 blk_mq_run_hw_queues(q, true);
620
621 percpu_ref_put(&q->q_usage_counter);
622 return false;
623}
624
625
626
627
628
629
630
631
632
633static blk_status_t scsi_result_to_blk_status(struct scsi_cmnd *cmd, int result)
634{
635 switch (host_byte(result)) {
636 case DID_OK:
637
638
639
640
641
642 if (scsi_status_is_good(result) && (result & ~0xff) == 0)
643 return BLK_STS_OK;
644 return BLK_STS_IOERR;
645 case DID_TRANSPORT_FAILFAST:
646 return BLK_STS_TRANSPORT;
647 case DID_TARGET_FAILURE:
648 set_host_byte(cmd, DID_OK);
649 return BLK_STS_TARGET;
650 case DID_NEXUS_FAILURE:
651 set_host_byte(cmd, DID_OK);
652 return BLK_STS_NEXUS;
653 case DID_ALLOC_FAILURE:
654 set_host_byte(cmd, DID_OK);
655 return BLK_STS_NOSPC;
656 case DID_MEDIUM_ERROR:
657 set_host_byte(cmd, DID_OK);
658 return BLK_STS_MEDIUM;
659 default:
660 return BLK_STS_IOERR;
661 }
662}
663
664
665static void scsi_io_completion_reprep(struct scsi_cmnd *cmd,
666 struct request_queue *q)
667{
668
669 scsi_mq_requeue_cmd(cmd);
670}
671
672
673static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
674{
675 struct request_queue *q = cmd->device->request_queue;
676 struct request *req = cmd->request;
677 int level = 0;
678 enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
679 ACTION_DELAYED_RETRY} action;
680 unsigned long wait_for = (cmd->allowed + 1) * req->timeout;
681 struct scsi_sense_hdr sshdr;
682 bool sense_valid;
683 bool sense_current = true;
684 blk_status_t blk_stat;
685
686 sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
687 if (sense_valid)
688 sense_current = !scsi_sense_is_deferred(&sshdr);
689
690 blk_stat = scsi_result_to_blk_status(cmd, result);
691
692 if (host_byte(result) == DID_RESET) {
693
694
695
696
697 action = ACTION_RETRY;
698 } else if (sense_valid && sense_current) {
699 switch (sshdr.sense_key) {
700 case UNIT_ATTENTION:
701 if (cmd->device->removable) {
702
703
704
705 cmd->device->changed = 1;
706 action = ACTION_FAIL;
707 } else {
708
709
710
711
712
713 action = ACTION_RETRY;
714 }
715 break;
716 case ILLEGAL_REQUEST:
717
718
719
720
721
722
723
724
725 if ((cmd->device->use_10_for_rw &&
726 sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
727 (cmd->cmnd[0] == READ_10 ||
728 cmd->cmnd[0] == WRITE_10)) {
729
730 cmd->device->use_10_for_rw = 0;
731 action = ACTION_REPREP;
732 } else if (sshdr.asc == 0x10) {
733 action = ACTION_FAIL;
734 blk_stat = BLK_STS_PROTECTION;
735
736 } else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) {
737 action = ACTION_FAIL;
738 blk_stat = BLK_STS_TARGET;
739 } else
740 action = ACTION_FAIL;
741 break;
742 case ABORTED_COMMAND:
743 action = ACTION_FAIL;
744 if (sshdr.asc == 0x10)
745 blk_stat = BLK_STS_PROTECTION;
746 break;
747 case NOT_READY:
748
749
750
751 if (sshdr.asc == 0x04) {
752 switch (sshdr.ascq) {
753 case 0x01:
754 case 0x04:
755 case 0x05:
756 case 0x06:
757 case 0x07:
758 case 0x08:
759 case 0x09:
760 case 0x14:
761 case 0x1a:
762 case 0x1b:
763 case 0x1d:
764 case 0x24:
765 action = ACTION_DELAYED_RETRY;
766 break;
767 default:
768 action = ACTION_FAIL;
769 break;
770 }
771 } else
772 action = ACTION_FAIL;
773 break;
774 case VOLUME_OVERFLOW:
775
776 action = ACTION_FAIL;
777 break;
778 default:
779 action = ACTION_FAIL;
780 break;
781 }
782 } else
783 action = ACTION_FAIL;
784
785 if (action != ACTION_FAIL &&
786 time_before(cmd->jiffies_at_alloc + wait_for, jiffies))
787 action = ACTION_FAIL;
788
789 switch (action) {
790 case ACTION_FAIL:
791
792 if (!(req->rq_flags & RQF_QUIET)) {
793 static DEFINE_RATELIMIT_STATE(_rs,
794 DEFAULT_RATELIMIT_INTERVAL,
795 DEFAULT_RATELIMIT_BURST);
796
797 if (unlikely(scsi_logging_level))
798 level =
799 SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT,
800 SCSI_LOG_MLCOMPLETE_BITS);
801
802
803
804
805
806 if (!level && __ratelimit(&_rs)) {
807 scsi_print_result(cmd, NULL, FAILED);
808 if (driver_byte(result) == DRIVER_SENSE)
809 scsi_print_sense(cmd);
810 scsi_print_command(cmd);
811 }
812 }
813 if (!scsi_end_request(req, blk_stat, blk_rq_err_bytes(req)))
814 return;
815
816 case ACTION_REPREP:
817 scsi_io_completion_reprep(cmd, q);
818 break;
819 case ACTION_RETRY:
820
821 __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, false);
822 break;
823 case ACTION_DELAYED_RETRY:
824
825 __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, false);
826 break;
827 }
828}
829
830
831
832
833
834
835static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result,
836 blk_status_t *blk_statp)
837{
838 bool sense_valid;
839 bool sense_current = true;
840 struct request *req = cmd->request;
841 struct scsi_sense_hdr sshdr;
842
843 sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
844 if (sense_valid)
845 sense_current = !scsi_sense_is_deferred(&sshdr);
846
847 if (blk_rq_is_passthrough(req)) {
848 if (sense_valid) {
849
850
851
852 scsi_req(req)->sense_len =
853 min(8 + cmd->sense_buffer[7],
854 SCSI_SENSE_BUFFERSIZE);
855 }
856 if (sense_current)
857 *blk_statp = scsi_result_to_blk_status(cmd, result);
858 } else if (blk_rq_bytes(req) == 0 && sense_current) {
859
860
861
862
863
864 *blk_statp = scsi_result_to_blk_status(cmd, result);
865 }
866
867
868
869
870
871
872 if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) {
873 bool do_print = true;
874
875
876
877
878
879 if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
880 do_print = false;
881 else if (req->rq_flags & RQF_QUIET)
882 do_print = false;
883 if (do_print)
884 scsi_print_sense(cmd);
885 result = 0;
886
887 *blk_statp = BLK_STS_OK;
888 }
889
890
891
892
893
894
895
896 if (status_byte(result) && scsi_status_is_good(result)) {
897 result = 0;
898 *blk_statp = BLK_STS_OK;
899 }
900 return result;
901}
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
933{
934 int result = cmd->result;
935 struct request_queue *q = cmd->device->request_queue;
936 struct request *req = cmd->request;
937 blk_status_t blk_stat = BLK_STS_OK;
938
939 if (unlikely(result))
940 result = scsi_io_completion_nz_result(cmd, result, &blk_stat);
941
942 if (unlikely(blk_rq_is_passthrough(req))) {
943
944
945
946 scsi_req(req)->result = cmd->result;
947 }
948
949
950
951
952
953 SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, cmd,
954 "%u sectors total, %d bytes done.\n",
955 blk_rq_sectors(req), good_bytes));
956
957
958
959
960
961
962 if (likely(blk_rq_bytes(req) > 0 || blk_stat == BLK_STS_OK)) {
963 if (likely(!scsi_end_request(req, blk_stat, good_bytes)))
964 return;
965 }
966
967
968 if (unlikely(blk_stat && scsi_noretry_cmd(cmd))) {
969 if (scsi_end_request(req, blk_stat, blk_rq_bytes(req)))
970 WARN_ONCE(true,
971 "Bytes remaining after failed, no-retry command");
972 return;
973 }
974
975
976
977
978
979 if (likely(result == 0))
980 scsi_io_completion_reprep(cmd, q);
981 else
982 scsi_io_completion_action(cmd, result);
983}
984
985static blk_status_t scsi_init_sgtable(struct request *req,
986 struct scsi_data_buffer *sdb)
987{
988 int count;
989
990
991
992
993 if (unlikely(sg_alloc_table_chained(&sdb->table,
994 blk_rq_nr_phys_segments(req), sdb->table.sgl,
995 SCSI_INLINE_SG_CNT)))
996 return BLK_STS_RESOURCE;
997
998
999
1000
1001
1002 count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
1003 BUG_ON(count > sdb->table.nents);
1004 sdb->table.nents = count;
1005 sdb->length = blk_rq_payload_bytes(req);
1006 return BLK_STS_OK;
1007}
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020blk_status_t scsi_init_io(struct scsi_cmnd *cmd)
1021{
1022 struct request *rq = cmd->request;
1023 blk_status_t ret;
1024
1025 if (WARN_ON_ONCE(!blk_rq_nr_phys_segments(rq)))
1026 return BLK_STS_IOERR;
1027
1028 ret = scsi_init_sgtable(rq, &cmd->sdb);
1029 if (ret)
1030 return ret;
1031
1032 if (blk_integrity_rq(rq)) {
1033 struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
1034 int ivecs, count;
1035
1036 if (WARN_ON_ONCE(!prot_sdb)) {
1037
1038
1039
1040
1041
1042 ret = BLK_STS_IOERR;
1043 goto out_free_sgtables;
1044 }
1045
1046 ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
1047
1048 if (sg_alloc_table_chained(&prot_sdb->table, ivecs,
1049 prot_sdb->table.sgl,
1050 SCSI_INLINE_PROT_SG_CNT)) {
1051 ret = BLK_STS_RESOURCE;
1052 goto out_free_sgtables;
1053 }
1054
1055 count = blk_rq_map_integrity_sg(rq->q, rq->bio,
1056 prot_sdb->table.sgl);
1057 BUG_ON(count > ivecs);
1058 BUG_ON(count > queue_max_integrity_segments(rq->q));
1059
1060 cmd->prot_sdb = prot_sdb;
1061 cmd->prot_sdb->table.nents = count;
1062 }
1063
1064 return BLK_STS_OK;
1065out_free_sgtables:
1066 scsi_mq_free_sgtables(cmd);
1067 return ret;
1068}
1069EXPORT_SYMBOL(scsi_init_io);
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082static void scsi_initialize_rq(struct request *rq)
1083{
1084 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
1085
1086 scsi_req_init(&cmd->req);
1087 init_rcu_head(&cmd->rcu);
1088 cmd->jiffies_at_alloc = jiffies;
1089 cmd->retries = 0;
1090}
1091
1092
1093void scsi_add_cmd_to_list(struct scsi_cmnd *cmd)
1094{
1095 struct scsi_device *sdev = cmd->device;
1096 struct Scsi_Host *shost = sdev->host;
1097 unsigned long flags;
1098
1099 if (shost->use_cmd_list) {
1100 spin_lock_irqsave(&sdev->list_lock, flags);
1101 list_add_tail(&cmd->list, &sdev->cmd_list);
1102 spin_unlock_irqrestore(&sdev->list_lock, flags);
1103 }
1104}
1105
1106
1107void scsi_del_cmd_from_list(struct scsi_cmnd *cmd)
1108{
1109 struct scsi_device *sdev = cmd->device;
1110 struct Scsi_Host *shost = sdev->host;
1111 unsigned long flags;
1112
1113 if (shost->use_cmd_list) {
1114 spin_lock_irqsave(&sdev->list_lock, flags);
1115 BUG_ON(list_empty(&cmd->list));
1116 list_del_init(&cmd->list);
1117 spin_unlock_irqrestore(&sdev->list_lock, flags);
1118 }
1119}
1120
1121
1122void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
1123{
1124 void *buf = cmd->sense_buffer;
1125 void *prot = cmd->prot_sdb;
1126 struct request *rq = blk_mq_rq_from_pdu(cmd);
1127 unsigned int flags = cmd->flags & SCMD_PRESERVED_FLAGS;
1128 unsigned long jiffies_at_alloc;
1129 int retries;
1130
1131 if (!blk_rq_is_scsi(rq) && !(flags & SCMD_INITIALIZED)) {
1132 flags |= SCMD_INITIALIZED;
1133 scsi_initialize_rq(rq);
1134 }
1135
1136 jiffies_at_alloc = cmd->jiffies_at_alloc;
1137 retries = cmd->retries;
1138
1139 memset((char *)cmd + sizeof(cmd->req), 0,
1140 sizeof(*cmd) - sizeof(cmd->req) + dev->host->hostt->cmd_size);
1141
1142 cmd->device = dev;
1143 cmd->sense_buffer = buf;
1144 cmd->prot_sdb = prot;
1145 cmd->flags = flags;
1146 INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
1147 cmd->jiffies_at_alloc = jiffies_at_alloc;
1148 cmd->retries = retries;
1149
1150 scsi_add_cmd_to_list(cmd);
1151}
1152
1153static blk_status_t scsi_setup_scsi_cmnd(struct scsi_device *sdev,
1154 struct request *req)
1155{
1156 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1157
1158
1159
1160
1161
1162
1163
1164 if (req->bio) {
1165 blk_status_t ret = scsi_init_io(cmd);
1166 if (unlikely(ret != BLK_STS_OK))
1167 return ret;
1168 } else {
1169 BUG_ON(blk_rq_bytes(req));
1170
1171 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
1172 }
1173
1174 cmd->cmd_len = scsi_req(req)->cmd_len;
1175 cmd->cmnd = scsi_req(req)->cmd;
1176 cmd->transfersize = blk_rq_bytes(req);
1177 cmd->allowed = scsi_req(req)->retries;
1178 return BLK_STS_OK;
1179}
1180
1181
1182
1183
1184
1185static blk_status_t scsi_setup_fs_cmnd(struct scsi_device *sdev,
1186 struct request *req)
1187{
1188 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1189
1190 if (unlikely(sdev->handler && sdev->handler->prep_fn)) {
1191 blk_status_t ret = sdev->handler->prep_fn(sdev, req);
1192 if (ret != BLK_STS_OK)
1193 return ret;
1194 }
1195
1196 cmd->cmnd = scsi_req(req)->cmd = scsi_req(req)->__cmd;
1197 memset(cmd->cmnd, 0, BLK_MAX_CDB);
1198 return scsi_cmd_to_driver(cmd)->init_command(cmd);
1199}
1200
1201static blk_status_t scsi_setup_cmnd(struct scsi_device *sdev,
1202 struct request *req)
1203{
1204 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1205
1206 if (!blk_rq_bytes(req))
1207 cmd->sc_data_direction = DMA_NONE;
1208 else if (rq_data_dir(req) == WRITE)
1209 cmd->sc_data_direction = DMA_TO_DEVICE;
1210 else
1211 cmd->sc_data_direction = DMA_FROM_DEVICE;
1212
1213 if (blk_rq_is_scsi(req))
1214 return scsi_setup_scsi_cmnd(sdev, req);
1215 else
1216 return scsi_setup_fs_cmnd(sdev, req);
1217}
1218
1219static blk_status_t
1220scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
1221{
1222 switch (sdev->sdev_state) {
1223 case SDEV_OFFLINE:
1224 case SDEV_TRANSPORT_OFFLINE:
1225
1226
1227
1228
1229
1230 sdev_printk(KERN_ERR, sdev,
1231 "rejecting I/O to offline device\n");
1232 return BLK_STS_IOERR;
1233 case SDEV_DEL:
1234
1235
1236
1237
1238 sdev_printk(KERN_ERR, sdev,
1239 "rejecting I/O to dead device\n");
1240 return BLK_STS_IOERR;
1241 case SDEV_BLOCK:
1242 case SDEV_CREATED_BLOCK:
1243 return BLK_STS_RESOURCE;
1244 case SDEV_QUIESCE:
1245
1246
1247
1248 if (req && !(req->rq_flags & RQF_PREEMPT))
1249 return BLK_STS_RESOURCE;
1250 return BLK_STS_OK;
1251 default:
1252
1253
1254
1255
1256
1257 if (req && !(req->rq_flags & RQF_PREEMPT))
1258 return BLK_STS_IOERR;
1259 return BLK_STS_OK;
1260 }
1261}
1262
1263
1264
1265
1266
1267
1268
1269static inline int scsi_dev_queue_ready(struct request_queue *q,
1270 struct scsi_device *sdev)
1271{
1272 unsigned int busy;
1273
1274 busy = atomic_inc_return(&sdev->device_busy) - 1;
1275 if (atomic_read(&sdev->device_blocked)) {
1276 if (busy)
1277 goto out_dec;
1278
1279
1280
1281
1282 if (atomic_dec_return(&sdev->device_blocked) > 0)
1283 goto out_dec;
1284 SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev,
1285 "unblocking device at zero depth\n"));
1286 }
1287
1288 if (busy >= sdev->queue_depth)
1289 goto out_dec;
1290
1291 return 1;
1292out_dec:
1293 atomic_dec(&sdev->device_busy);
1294 return 0;
1295}
1296
1297
1298
1299
1300
1301static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
1302 struct scsi_device *sdev)
1303{
1304 struct scsi_target *starget = scsi_target(sdev);
1305 unsigned int busy;
1306
1307 if (starget->single_lun) {
1308 spin_lock_irq(shost->host_lock);
1309 if (starget->starget_sdev_user &&
1310 starget->starget_sdev_user != sdev) {
1311 spin_unlock_irq(shost->host_lock);
1312 return 0;
1313 }
1314 starget->starget_sdev_user = sdev;
1315 spin_unlock_irq(shost->host_lock);
1316 }
1317
1318 if (starget->can_queue <= 0)
1319 return 1;
1320
1321 busy = atomic_inc_return(&starget->target_busy) - 1;
1322 if (atomic_read(&starget->target_blocked) > 0) {
1323 if (busy)
1324 goto starved;
1325
1326
1327
1328
1329 if (atomic_dec_return(&starget->target_blocked) > 0)
1330 goto out_dec;
1331
1332 SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget,
1333 "unblocking target at zero depth\n"));
1334 }
1335
1336 if (busy >= starget->can_queue)
1337 goto starved;
1338
1339 return 1;
1340
1341starved:
1342 spin_lock_irq(shost->host_lock);
1343 list_move_tail(&sdev->starved_entry, &shost->starved_list);
1344 spin_unlock_irq(shost->host_lock);
1345out_dec:
1346 if (starget->can_queue > 0)
1347 atomic_dec(&starget->target_busy);
1348 return 0;
1349}
1350
1351
1352
1353
1354
1355
1356static inline int scsi_host_queue_ready(struct request_queue *q,
1357 struct Scsi_Host *shost,
1358 struct scsi_device *sdev)
1359{
1360 unsigned int busy;
1361
1362 if (scsi_host_in_recovery(shost))
1363 return 0;
1364
1365 busy = atomic_inc_return(&shost->host_busy) - 1;
1366 if (atomic_read(&shost->host_blocked) > 0) {
1367 if (busy)
1368 goto starved;
1369
1370
1371
1372
1373 if (atomic_dec_return(&shost->host_blocked) > 0)
1374 goto out_dec;
1375
1376 SCSI_LOG_MLQUEUE(3,
1377 shost_printk(KERN_INFO, shost,
1378 "unblocking host at zero depth\n"));
1379 }
1380
1381 if (shost->can_queue > 0 && busy >= shost->can_queue)
1382 goto starved;
1383 if (shost->host_self_blocked)
1384 goto starved;
1385
1386
1387 if (!list_empty(&sdev->starved_entry)) {
1388 spin_lock_irq(shost->host_lock);
1389 if (!list_empty(&sdev->starved_entry))
1390 list_del_init(&sdev->starved_entry);
1391 spin_unlock_irq(shost->host_lock);
1392 }
1393
1394 return 1;
1395
1396starved:
1397 spin_lock_irq(shost->host_lock);
1398 if (list_empty(&sdev->starved_entry))
1399 list_add_tail(&sdev->starved_entry, &shost->starved_list);
1400 spin_unlock_irq(shost->host_lock);
1401out_dec:
1402 scsi_dec_host_busy(shost);
1403 return 0;
1404}
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418static bool scsi_mq_lld_busy(struct request_queue *q)
1419{
1420 struct scsi_device *sdev = q->queuedata;
1421 struct Scsi_Host *shost;
1422
1423 if (blk_queue_dying(q))
1424 return false;
1425
1426 shost = sdev->host;
1427
1428
1429
1430
1431
1432
1433
1434 if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev))
1435 return true;
1436
1437 return false;
1438}
1439
1440static void scsi_softirq_done(struct request *rq)
1441{
1442 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
1443 unsigned long wait_for = (cmd->allowed + 1) * rq->timeout;
1444 int disposition;
1445
1446 INIT_LIST_HEAD(&cmd->eh_entry);
1447
1448 atomic_inc(&cmd->device->iodone_cnt);
1449 if (cmd->result)
1450 atomic_inc(&cmd->device->ioerr_cnt);
1451
1452 disposition = scsi_decide_disposition(cmd);
1453 if (disposition != SUCCESS &&
1454 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
1455 scmd_printk(KERN_ERR, cmd,
1456 "timing out command, waited %lus\n",
1457 wait_for/HZ);
1458 disposition = SUCCESS;
1459 }
1460
1461 scsi_log_completion(cmd, disposition);
1462
1463 switch (disposition) {
1464 case SUCCESS:
1465 scsi_finish_command(cmd);
1466 break;
1467 case NEEDS_RETRY:
1468 scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
1469 break;
1470 case ADD_TO_MLQUEUE:
1471 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
1472 break;
1473 default:
1474 scsi_eh_scmd_add(cmd);
1475 break;
1476 }
1477}
1478
1479
1480
1481
1482
1483
1484
1485
1486static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
1487{
1488 struct Scsi_Host *host = cmd->device->host;
1489 int rtn = 0;
1490
1491 atomic_inc(&cmd->device->iorequest_cnt);
1492
1493
1494 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
1495
1496
1497
1498 cmd->result = DID_NO_CONNECT << 16;
1499 goto done;
1500 }
1501
1502
1503 if (unlikely(scsi_device_blocked(cmd->device))) {
1504
1505
1506
1507
1508
1509
1510
1511 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
1512 "queuecommand : device blocked\n"));
1513 return SCSI_MLQUEUE_DEVICE_BUSY;
1514 }
1515
1516
1517 if (cmd->device->lun_in_cdb)
1518 cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) |
1519 (cmd->device->lun << 5 & 0xe0);
1520
1521 scsi_log_send(cmd);
1522
1523
1524
1525
1526
1527 if (cmd->cmd_len > cmd->device->host->max_cmd_len) {
1528 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
1529 "queuecommand : command too long. "
1530 "cdb_size=%d host->max_cmd_len=%d\n",
1531 cmd->cmd_len, cmd->device->host->max_cmd_len));
1532 cmd->result = (DID_ABORT << 16);
1533 goto done;
1534 }
1535
1536 if (unlikely(host->shost_state == SHOST_DEL)) {
1537 cmd->result = (DID_NO_CONNECT << 16);
1538 goto done;
1539
1540 }
1541
1542 trace_scsi_dispatch_cmd_start(cmd);
1543 rtn = host->hostt->queuecommand(host, cmd);
1544 if (rtn) {
1545 trace_scsi_dispatch_cmd_error(cmd, rtn);
1546 if (rtn != SCSI_MLQUEUE_DEVICE_BUSY &&
1547 rtn != SCSI_MLQUEUE_TARGET_BUSY)
1548 rtn = SCSI_MLQUEUE_HOST_BUSY;
1549
1550 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
1551 "queuecommand : request rejected\n"));
1552 }
1553
1554 return rtn;
1555 done:
1556 cmd->scsi_done(cmd);
1557 return 0;
1558}
1559
1560
1561static unsigned int scsi_mq_inline_sgl_size(struct Scsi_Host *shost)
1562{
1563 return min_t(unsigned int, shost->sg_tablesize, SCSI_INLINE_SG_CNT) *
1564 sizeof(struct scatterlist);
1565}
1566
1567static blk_status_t scsi_mq_prep_fn(struct request *req)
1568{
1569 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1570 struct scsi_device *sdev = req->q->queuedata;
1571 struct Scsi_Host *shost = sdev->host;
1572 struct scatterlist *sg;
1573
1574 scsi_init_command(sdev, cmd);
1575
1576 cmd->request = req;
1577 cmd->tag = req->tag;
1578 cmd->prot_op = SCSI_PROT_NORMAL;
1579
1580 sg = (void *)cmd + sizeof(struct scsi_cmnd) + shost->hostt->cmd_size;
1581 cmd->sdb.table.sgl = sg;
1582
1583 if (scsi_host_get_prot(shost)) {
1584 memset(cmd->prot_sdb, 0, sizeof(struct scsi_data_buffer));
1585
1586 cmd->prot_sdb->table.sgl =
1587 (struct scatterlist *)(cmd->prot_sdb + 1);
1588 }
1589
1590 blk_mq_start_request(req);
1591
1592 return scsi_setup_cmnd(sdev, req);
1593}
1594
1595static void scsi_mq_done(struct scsi_cmnd *cmd)
1596{
1597 if (unlikely(test_and_set_bit(SCMD_STATE_COMPLETE, &cmd->state)))
1598 return;
1599 trace_scsi_dispatch_cmd_done(cmd);
1600
1601
1602
1603
1604
1605
1606
1607 if (unlikely(!blk_mq_complete_request(cmd->request)))
1608 clear_bit(SCMD_STATE_COMPLETE, &cmd->state);
1609}
1610
1611static void scsi_mq_put_budget(struct blk_mq_hw_ctx *hctx)
1612{
1613 struct request_queue *q = hctx->queue;
1614 struct scsi_device *sdev = q->queuedata;
1615
1616 atomic_dec(&sdev->device_busy);
1617}
1618
1619static bool scsi_mq_get_budget(struct blk_mq_hw_ctx *hctx)
1620{
1621 struct request_queue *q = hctx->queue;
1622 struct scsi_device *sdev = q->queuedata;
1623
1624 if (scsi_dev_queue_ready(q, sdev))
1625 return true;
1626
1627 if (atomic_read(&sdev->device_busy) == 0 && !scsi_device_blocked(sdev))
1628 blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY);
1629 return false;
1630}
1631
1632static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
1633 const struct blk_mq_queue_data *bd)
1634{
1635 struct request *req = bd->rq;
1636 struct request_queue *q = req->q;
1637 struct scsi_device *sdev = q->queuedata;
1638 struct Scsi_Host *shost = sdev->host;
1639 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1640 blk_status_t ret;
1641 int reason;
1642
1643
1644
1645
1646
1647 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1648 ret = scsi_prep_state_check(sdev, req);
1649 if (ret != BLK_STS_OK)
1650 goto out_put_budget;
1651 }
1652
1653 ret = BLK_STS_RESOURCE;
1654 if (!scsi_target_queue_ready(shost, sdev))
1655 goto out_put_budget;
1656 if (!scsi_host_queue_ready(q, shost, sdev))
1657 goto out_dec_target_busy;
1658
1659 if (!(req->rq_flags & RQF_DONTPREP)) {
1660 ret = scsi_mq_prep_fn(req);
1661 if (ret != BLK_STS_OK)
1662 goto out_dec_host_busy;
1663 req->rq_flags |= RQF_DONTPREP;
1664 } else {
1665 clear_bit(SCMD_STATE_COMPLETE, &cmd->state);
1666 blk_mq_start_request(req);
1667 }
1668
1669 if (sdev->simple_tags)
1670 cmd->flags |= SCMD_TAGGED;
1671 else
1672 cmd->flags &= ~SCMD_TAGGED;
1673
1674 scsi_init_cmd_errh(cmd);
1675 cmd->scsi_done = scsi_mq_done;
1676
1677 reason = scsi_dispatch_cmd(cmd);
1678 if (reason) {
1679 scsi_set_blocked(cmd, reason);
1680 ret = BLK_STS_RESOURCE;
1681 goto out_dec_host_busy;
1682 }
1683
1684 return BLK_STS_OK;
1685
1686out_dec_host_busy:
1687 scsi_dec_host_busy(shost);
1688out_dec_target_busy:
1689 if (scsi_target(sdev)->can_queue > 0)
1690 atomic_dec(&scsi_target(sdev)->target_busy);
1691out_put_budget:
1692 scsi_mq_put_budget(hctx);
1693 switch (ret) {
1694 case BLK_STS_OK:
1695 break;
1696 case BLK_STS_RESOURCE:
1697 if (atomic_read(&sdev->device_busy) ||
1698 scsi_device_blocked(sdev))
1699 ret = BLK_STS_DEV_RESOURCE;
1700 break;
1701 default:
1702 if (unlikely(!scsi_device_online(sdev)))
1703 scsi_req(req)->result = DID_NO_CONNECT << 16;
1704 else
1705 scsi_req(req)->result = DID_ERROR << 16;
1706
1707
1708
1709
1710
1711 if (req->rq_flags & RQF_DONTPREP)
1712 scsi_mq_uninit_cmd(cmd);
1713 break;
1714 }
1715 return ret;
1716}
1717
1718static enum blk_eh_timer_return scsi_timeout(struct request *req,
1719 bool reserved)
1720{
1721 if (reserved)
1722 return BLK_EH_RESET_TIMER;
1723 return scsi_times_out(req);
1724}
1725
1726static int scsi_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
1727 unsigned int hctx_idx, unsigned int numa_node)
1728{
1729 struct Scsi_Host *shost = set->driver_data;
1730 const bool unchecked_isa_dma = shost->unchecked_isa_dma;
1731 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
1732 struct scatterlist *sg;
1733
1734 if (unchecked_isa_dma)
1735 cmd->flags |= SCMD_UNCHECKED_ISA_DMA;
1736 cmd->sense_buffer = scsi_alloc_sense_buffer(unchecked_isa_dma,
1737 GFP_KERNEL, numa_node);
1738 if (!cmd->sense_buffer)
1739 return -ENOMEM;
1740 cmd->req.sense = cmd->sense_buffer;
1741
1742 if (scsi_host_get_prot(shost)) {
1743 sg = (void *)cmd + sizeof(struct scsi_cmnd) +
1744 shost->hostt->cmd_size;
1745 cmd->prot_sdb = (void *)sg + scsi_mq_inline_sgl_size(shost);
1746 }
1747
1748 return 0;
1749}
1750
1751static void scsi_mq_exit_request(struct blk_mq_tag_set *set, struct request *rq,
1752 unsigned int hctx_idx)
1753{
1754 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
1755
1756 scsi_free_sense_buffer(cmd->flags & SCMD_UNCHECKED_ISA_DMA,
1757 cmd->sense_buffer);
1758}
1759
1760static int scsi_map_queues(struct blk_mq_tag_set *set)
1761{
1762 struct Scsi_Host *shost = container_of(set, struct Scsi_Host, tag_set);
1763
1764 if (shost->hostt->map_queues)
1765 return shost->hostt->map_queues(shost);
1766 return blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
1767}
1768
1769void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
1770{
1771 struct device *dev = shost->dma_dev;
1772
1773
1774
1775
1776 blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize,
1777 SG_MAX_SEGMENTS));
1778
1779 if (scsi_host_prot_dma(shost)) {
1780 shost->sg_prot_tablesize =
1781 min_not_zero(shost->sg_prot_tablesize,
1782 (unsigned short)SCSI_MAX_PROT_SG_SEGMENTS);
1783 BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize);
1784 blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize);
1785 }
1786
1787 if (dev->dma_mask) {
1788 shost->max_sectors = min_t(unsigned int, shost->max_sectors,
1789 dma_max_mapping_size(dev) >> SECTOR_SHIFT);
1790 }
1791 blk_queue_max_hw_sectors(q, shost->max_sectors);
1792 if (shost->unchecked_isa_dma)
1793 blk_queue_bounce_limit(q, BLK_BOUNCE_ISA);
1794 blk_queue_segment_boundary(q, shost->dma_boundary);
1795 dma_set_seg_boundary(dev, shost->dma_boundary);
1796
1797 blk_queue_max_segment_size(q, shost->max_segment_size);
1798 blk_queue_virt_boundary(q, shost->virt_boundary_mask);
1799 dma_set_max_seg_size(dev, queue_max_segment_size(q));
1800
1801
1802
1803
1804
1805
1806
1807
1808 blk_queue_dma_alignment(q, max(4, dma_get_cache_alignment()) - 1);
1809}
1810EXPORT_SYMBOL_GPL(__scsi_init_queue);
1811
1812static const struct blk_mq_ops scsi_mq_ops = {
1813 .get_budget = scsi_mq_get_budget,
1814 .put_budget = scsi_mq_put_budget,
1815 .queue_rq = scsi_queue_rq,
1816 .complete = scsi_softirq_done,
1817 .timeout = scsi_timeout,
1818#ifdef CONFIG_BLK_DEBUG_FS
1819 .show_rq = scsi_show_rq,
1820#endif
1821 .init_request = scsi_mq_init_request,
1822 .exit_request = scsi_mq_exit_request,
1823 .initialize_rq_fn = scsi_initialize_rq,
1824 .busy = scsi_mq_lld_busy,
1825 .map_queues = scsi_map_queues,
1826};
1827
1828struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev)
1829{
1830 sdev->request_queue = blk_mq_init_queue(&sdev->host->tag_set);
1831 if (IS_ERR(sdev->request_queue))
1832 return NULL;
1833
1834 sdev->request_queue->queuedata = sdev;
1835 __scsi_init_queue(sdev->host, sdev->request_queue);
1836 blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, sdev->request_queue);
1837 return sdev->request_queue;
1838}
1839
1840int scsi_mq_setup_tags(struct Scsi_Host *shost)
1841{
1842 unsigned int cmd_size, sgl_size;
1843
1844 sgl_size = scsi_mq_inline_sgl_size(shost);
1845 cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size;
1846 if (scsi_host_get_prot(shost))
1847 cmd_size += sizeof(struct scsi_data_buffer) +
1848 sizeof(struct scatterlist) * SCSI_INLINE_PROT_SG_CNT;
1849
1850 memset(&shost->tag_set, 0, sizeof(shost->tag_set));
1851 shost->tag_set.ops = &scsi_mq_ops;
1852 shost->tag_set.nr_hw_queues = shost->nr_hw_queues ? : 1;
1853 shost->tag_set.queue_depth = shost->can_queue;
1854 shost->tag_set.cmd_size = cmd_size;
1855 shost->tag_set.numa_node = NUMA_NO_NODE;
1856 shost->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
1857 shost->tag_set.flags |=
1858 BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy);
1859 shost->tag_set.driver_data = shost;
1860
1861 return blk_mq_alloc_tag_set(&shost->tag_set);
1862}
1863
1864void scsi_mq_destroy_tags(struct Scsi_Host *shost)
1865{
1866 blk_mq_free_tag_set(&shost->tag_set);
1867}
1868
1869
1870
1871
1872
1873
1874
1875
1876struct scsi_device *scsi_device_from_queue(struct request_queue *q)
1877{
1878 struct scsi_device *sdev = NULL;
1879
1880 if (q->mq_ops == &scsi_mq_ops)
1881 sdev = q->queuedata;
1882 if (!sdev || !get_device(&sdev->sdev_gendev))
1883 sdev = NULL;
1884
1885 return sdev;
1886}
1887EXPORT_SYMBOL_GPL(scsi_device_from_queue);
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905void scsi_block_requests(struct Scsi_Host *shost)
1906{
1907 shost->host_self_blocked = 1;
1908}
1909EXPORT_SYMBOL(scsi_block_requests);
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931void scsi_unblock_requests(struct Scsi_Host *shost)
1932{
1933 shost->host_self_blocked = 0;
1934 scsi_run_host_queues(shost);
1935}
1936EXPORT_SYMBOL(scsi_unblock_requests);
1937
1938int __init scsi_init_queue(void)
1939{
1940 scsi_sdb_cache = kmem_cache_create("scsi_data_buffer",
1941 sizeof(struct scsi_data_buffer),
1942 0, 0, NULL);
1943 if (!scsi_sdb_cache) {
1944 printk(KERN_ERR "SCSI: can't init scsi sdb cache\n");
1945 return -ENOMEM;
1946 }
1947
1948 return 0;
1949}
1950
1951void scsi_exit_queue(void)
1952{
1953 kmem_cache_destroy(scsi_sense_cache);
1954 kmem_cache_destroy(scsi_sense_isadma_cache);
1955 kmem_cache_destroy(scsi_sdb_cache);
1956}
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976int
1977scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
1978 unsigned char *buffer, int len, int timeout, int retries,
1979 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
1980{
1981 unsigned char cmd[10];
1982 unsigned char *real_buffer;
1983 int ret;
1984
1985 memset(cmd, 0, sizeof(cmd));
1986 cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0);
1987
1988 if (sdev->use_10_for_ms) {
1989 if (len > 65535)
1990 return -EINVAL;
1991 real_buffer = kmalloc(8 + len, GFP_KERNEL);
1992 if (!real_buffer)
1993 return -ENOMEM;
1994 memcpy(real_buffer + 8, buffer, len);
1995 len += 8;
1996 real_buffer[0] = 0;
1997 real_buffer[1] = 0;
1998 real_buffer[2] = data->medium_type;
1999 real_buffer[3] = data->device_specific;
2000 real_buffer[4] = data->longlba ? 0x01 : 0;
2001 real_buffer[5] = 0;
2002 real_buffer[6] = data->block_descriptor_length >> 8;
2003 real_buffer[7] = data->block_descriptor_length;
2004
2005 cmd[0] = MODE_SELECT_10;
2006 cmd[7] = len >> 8;
2007 cmd[8] = len;
2008 } else {
2009 if (len > 255 || data->block_descriptor_length > 255 ||
2010 data->longlba)
2011 return -EINVAL;
2012
2013 real_buffer = kmalloc(4 + len, GFP_KERNEL);
2014 if (!real_buffer)
2015 return -ENOMEM;
2016 memcpy(real_buffer + 4, buffer, len);
2017 len += 4;
2018 real_buffer[0] = 0;
2019 real_buffer[1] = data->medium_type;
2020 real_buffer[2] = data->device_specific;
2021 real_buffer[3] = data->block_descriptor_length;
2022
2023
2024 cmd[0] = MODE_SELECT;
2025 cmd[4] = len;
2026 }
2027
2028 ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len,
2029 sshdr, timeout, retries, NULL);
2030 kfree(real_buffer);
2031 return ret;
2032}
2033EXPORT_SYMBOL_GPL(scsi_mode_select);
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052int
2053scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
2054 unsigned char *buffer, int len, int timeout, int retries,
2055 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
2056{
2057 unsigned char cmd[12];
2058 int use_10_for_ms;
2059 int header_length;
2060 int result, retry_count = retries;
2061 struct scsi_sense_hdr my_sshdr;
2062
2063 memset(data, 0, sizeof(*data));
2064 memset(&cmd[0], 0, 12);
2065 cmd[1] = dbd & 0x18;
2066 cmd[2] = modepage;
2067
2068
2069 if (!sshdr)
2070 sshdr = &my_sshdr;
2071
2072 retry:
2073 use_10_for_ms = sdev->use_10_for_ms;
2074
2075 if (use_10_for_ms) {
2076 if (len < 8)
2077 len = 8;
2078
2079 cmd[0] = MODE_SENSE_10;
2080 cmd[8] = len;
2081 header_length = 8;
2082 } else {
2083 if (len < 4)
2084 len = 4;
2085
2086 cmd[0] = MODE_SENSE;
2087 cmd[4] = len;
2088 header_length = 4;
2089 }
2090
2091 memset(buffer, 0, len);
2092
2093 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
2094 sshdr, timeout, retries, NULL);
2095
2096
2097
2098
2099
2100
2101 if (use_10_for_ms && !scsi_status_is_good(result) &&
2102 driver_byte(result) == DRIVER_SENSE) {
2103 if (scsi_sense_valid(sshdr)) {
2104 if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
2105 (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
2106
2107
2108
2109 sdev->use_10_for_ms = 0;
2110 goto retry;
2111 }
2112 }
2113 }
2114
2115 if(scsi_status_is_good(result)) {
2116 if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b &&
2117 (modepage == 6 || modepage == 8))) {
2118
2119 header_length = 0;
2120 data->length = 13;
2121 data->medium_type = 0;
2122 data->device_specific = 0;
2123 data->longlba = 0;
2124 data->block_descriptor_length = 0;
2125 } else if(use_10_for_ms) {
2126 data->length = buffer[0]*256 + buffer[1] + 2;
2127 data->medium_type = buffer[2];
2128 data->device_specific = buffer[3];
2129 data->longlba = buffer[4] & 0x01;
2130 data->block_descriptor_length = buffer[6]*256
2131 + buffer[7];
2132 } else {
2133 data->length = buffer[0] + 1;
2134 data->medium_type = buffer[1];
2135 data->device_specific = buffer[2];
2136 data->block_descriptor_length = buffer[3];
2137 }
2138 data->header_length = header_length;
2139 } else if ((status_byte(result) == CHECK_CONDITION) &&
2140 scsi_sense_valid(sshdr) &&
2141 sshdr->sense_key == UNIT_ATTENTION && retry_count) {
2142 retry_count--;
2143 goto retry;
2144 }
2145
2146 return result;
2147}
2148EXPORT_SYMBOL(scsi_mode_sense);
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160int
2161scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
2162 struct scsi_sense_hdr *sshdr)
2163{
2164 char cmd[] = {
2165 TEST_UNIT_READY, 0, 0, 0, 0, 0,
2166 };
2167 int result;
2168
2169
2170 do {
2171 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr,
2172 timeout, 1, NULL);
2173 if (sdev->removable && scsi_sense_valid(sshdr) &&
2174 sshdr->sense_key == UNIT_ATTENTION)
2175 sdev->changed = 1;
2176 } while (scsi_sense_valid(sshdr) &&
2177 sshdr->sense_key == UNIT_ATTENTION && --retries);
2178
2179 return result;
2180}
2181EXPORT_SYMBOL(scsi_test_unit_ready);
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191int
2192scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
2193{
2194 enum scsi_device_state oldstate = sdev->sdev_state;
2195
2196 if (state == oldstate)
2197 return 0;
2198
2199 switch (state) {
2200 case SDEV_CREATED:
2201 switch (oldstate) {
2202 case SDEV_CREATED_BLOCK:
2203 break;
2204 default:
2205 goto illegal;
2206 }
2207 break;
2208
2209 case SDEV_RUNNING:
2210 switch (oldstate) {
2211 case SDEV_CREATED:
2212 case SDEV_OFFLINE:
2213 case SDEV_TRANSPORT_OFFLINE:
2214 case SDEV_QUIESCE:
2215 case SDEV_BLOCK:
2216 break;
2217 default:
2218 goto illegal;
2219 }
2220 break;
2221
2222 case SDEV_QUIESCE:
2223 switch (oldstate) {
2224 case SDEV_RUNNING:
2225 case SDEV_OFFLINE:
2226 case SDEV_TRANSPORT_OFFLINE:
2227 break;
2228 default:
2229 goto illegal;
2230 }
2231 break;
2232
2233 case SDEV_OFFLINE:
2234 case SDEV_TRANSPORT_OFFLINE:
2235 switch (oldstate) {
2236 case SDEV_CREATED:
2237 case SDEV_RUNNING:
2238 case SDEV_QUIESCE:
2239 case SDEV_BLOCK:
2240 break;
2241 default:
2242 goto illegal;
2243 }
2244 break;
2245
2246 case SDEV_BLOCK:
2247 switch (oldstate) {
2248 case SDEV_RUNNING:
2249 case SDEV_CREATED_BLOCK:
2250 case SDEV_OFFLINE:
2251 break;
2252 default:
2253 goto illegal;
2254 }
2255 break;
2256
2257 case SDEV_CREATED_BLOCK:
2258 switch (oldstate) {
2259 case SDEV_CREATED:
2260 break;
2261 default:
2262 goto illegal;
2263 }
2264 break;
2265
2266 case SDEV_CANCEL:
2267 switch (oldstate) {
2268 case SDEV_CREATED:
2269 case SDEV_RUNNING:
2270 case SDEV_QUIESCE:
2271 case SDEV_OFFLINE:
2272 case SDEV_TRANSPORT_OFFLINE:
2273 break;
2274 default:
2275 goto illegal;
2276 }
2277 break;
2278
2279 case SDEV_DEL:
2280 switch (oldstate) {
2281 case SDEV_CREATED:
2282 case SDEV_RUNNING:
2283 case SDEV_OFFLINE:
2284 case SDEV_TRANSPORT_OFFLINE:
2285 case SDEV_CANCEL:
2286 case SDEV_BLOCK:
2287 case SDEV_CREATED_BLOCK:
2288 break;
2289 default:
2290 goto illegal;
2291 }
2292 break;
2293
2294 }
2295 sdev->sdev_state = state;
2296 return 0;
2297
2298 illegal:
2299 SCSI_LOG_ERROR_RECOVERY(1,
2300 sdev_printk(KERN_ERR, sdev,
2301 "Illegal state transition %s->%s",
2302 scsi_device_state_name(oldstate),
2303 scsi_device_state_name(state))
2304 );
2305 return -EINVAL;
2306}
2307EXPORT_SYMBOL(scsi_device_set_state);
2308
2309
2310
2311
2312
2313
2314
2315
2316static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
2317{
2318 int idx = 0;
2319 char *envp[3];
2320
2321 switch (evt->evt_type) {
2322 case SDEV_EVT_MEDIA_CHANGE:
2323 envp[idx++] = "SDEV_MEDIA_CHANGE=1";
2324 break;
2325 case SDEV_EVT_INQUIRY_CHANGE_REPORTED:
2326 scsi_rescan_device(&sdev->sdev_gendev);
2327 envp[idx++] = "SDEV_UA=INQUIRY_DATA_HAS_CHANGED";
2328 break;
2329 case SDEV_EVT_CAPACITY_CHANGE_REPORTED:
2330 envp[idx++] = "SDEV_UA=CAPACITY_DATA_HAS_CHANGED";
2331 break;
2332 case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED:
2333 envp[idx++] = "SDEV_UA=THIN_PROVISIONING_SOFT_THRESHOLD_REACHED";
2334 break;
2335 case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED:
2336 envp[idx++] = "SDEV_UA=MODE_PARAMETERS_CHANGED";
2337 break;
2338 case SDEV_EVT_LUN_CHANGE_REPORTED:
2339 envp[idx++] = "SDEV_UA=REPORTED_LUNS_DATA_HAS_CHANGED";
2340 break;
2341 case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED:
2342 envp[idx++] = "SDEV_UA=ASYMMETRIC_ACCESS_STATE_CHANGED";
2343 break;
2344 case SDEV_EVT_POWER_ON_RESET_OCCURRED:
2345 envp[idx++] = "SDEV_UA=POWER_ON_RESET_OCCURRED";
2346 break;
2347 default:
2348
2349 break;
2350 }
2351
2352 envp[idx++] = NULL;
2353
2354 kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp);
2355}
2356
2357
2358
2359
2360
2361
2362
2363
2364void scsi_evt_thread(struct work_struct *work)
2365{
2366 struct scsi_device *sdev;
2367 enum scsi_device_event evt_type;
2368 LIST_HEAD(event_list);
2369
2370 sdev = container_of(work, struct scsi_device, event_work);
2371
2372 for (evt_type = SDEV_EVT_FIRST; evt_type <= SDEV_EVT_LAST; evt_type++)
2373 if (test_and_clear_bit(evt_type, sdev->pending_events))
2374 sdev_evt_send_simple(sdev, evt_type, GFP_KERNEL);
2375
2376 while (1) {
2377 struct scsi_event *evt;
2378 struct list_head *this, *tmp;
2379 unsigned long flags;
2380
2381 spin_lock_irqsave(&sdev->list_lock, flags);
2382 list_splice_init(&sdev->event_list, &event_list);
2383 spin_unlock_irqrestore(&sdev->list_lock, flags);
2384
2385 if (list_empty(&event_list))
2386 break;
2387
2388 list_for_each_safe(this, tmp, &event_list) {
2389 evt = list_entry(this, struct scsi_event, node);
2390 list_del(&evt->node);
2391 scsi_evt_emit(sdev, evt);
2392 kfree(evt);
2393 }
2394 }
2395}
2396
2397
2398
2399
2400
2401
2402
2403
2404void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt)
2405{
2406 unsigned long flags;
2407
2408#if 0
2409
2410
2411
2412 if (!test_bit(evt->evt_type, sdev->supported_events)) {
2413 kfree(evt);
2414 return;
2415 }
2416#endif
2417
2418 spin_lock_irqsave(&sdev->list_lock, flags);
2419 list_add_tail(&evt->node, &sdev->event_list);
2420 schedule_work(&sdev->event_work);
2421 spin_unlock_irqrestore(&sdev->list_lock, flags);
2422}
2423EXPORT_SYMBOL_GPL(sdev_evt_send);
2424
2425
2426
2427
2428
2429
2430
2431
2432struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
2433 gfp_t gfpflags)
2434{
2435 struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags);
2436 if (!evt)
2437 return NULL;
2438
2439 evt->evt_type = evt_type;
2440 INIT_LIST_HEAD(&evt->node);
2441
2442
2443 switch (evt_type) {
2444 case SDEV_EVT_MEDIA_CHANGE:
2445 case SDEV_EVT_INQUIRY_CHANGE_REPORTED:
2446 case SDEV_EVT_CAPACITY_CHANGE_REPORTED:
2447 case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED:
2448 case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED:
2449 case SDEV_EVT_LUN_CHANGE_REPORTED:
2450 case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED:
2451 case SDEV_EVT_POWER_ON_RESET_OCCURRED:
2452 default:
2453
2454 break;
2455 }
2456
2457 return evt;
2458}
2459EXPORT_SYMBOL_GPL(sdev_evt_alloc);
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469void sdev_evt_send_simple(struct scsi_device *sdev,
2470 enum scsi_device_event evt_type, gfp_t gfpflags)
2471{
2472 struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags);
2473 if (!evt) {
2474 sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n",
2475 evt_type);
2476 return;
2477 }
2478
2479 sdev_evt_send(sdev, evt);
2480}
2481EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498int
2499scsi_device_quiesce(struct scsi_device *sdev)
2500{
2501 struct request_queue *q = sdev->request_queue;
2502 int err;
2503
2504
2505
2506
2507
2508
2509 WARN_ON_ONCE(sdev->quiesced_by && sdev->quiesced_by != current);
2510
2511 if (sdev->quiesced_by == current)
2512 return 0;
2513
2514 blk_set_pm_only(q);
2515
2516 blk_mq_freeze_queue(q);
2517
2518
2519
2520
2521
2522
2523 synchronize_rcu();
2524 blk_mq_unfreeze_queue(q);
2525
2526 mutex_lock(&sdev->state_mutex);
2527 err = scsi_device_set_state(sdev, SDEV_QUIESCE);
2528 if (err == 0)
2529 sdev->quiesced_by = current;
2530 else
2531 blk_clear_pm_only(q);
2532 mutex_unlock(&sdev->state_mutex);
2533
2534 return err;
2535}
2536EXPORT_SYMBOL(scsi_device_quiesce);
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547void scsi_device_resume(struct scsi_device *sdev)
2548{
2549
2550
2551
2552
2553 mutex_lock(&sdev->state_mutex);
2554 if (sdev->quiesced_by) {
2555 sdev->quiesced_by = NULL;
2556 blk_clear_pm_only(sdev->request_queue);
2557 }
2558 if (sdev->sdev_state == SDEV_QUIESCE)
2559 scsi_device_set_state(sdev, SDEV_RUNNING);
2560 mutex_unlock(&sdev->state_mutex);
2561}
2562EXPORT_SYMBOL(scsi_device_resume);
2563
2564static void
2565device_quiesce_fn(struct scsi_device *sdev, void *data)
2566{
2567 scsi_device_quiesce(sdev);
2568}
2569
2570void
2571scsi_target_quiesce(struct scsi_target *starget)
2572{
2573 starget_for_each_device(starget, NULL, device_quiesce_fn);
2574}
2575EXPORT_SYMBOL(scsi_target_quiesce);
2576
2577static void
2578device_resume_fn(struct scsi_device *sdev, void *data)
2579{
2580 scsi_device_resume(sdev);
2581}
2582
2583void
2584scsi_target_resume(struct scsi_target *starget)
2585{
2586 starget_for_each_device(starget, NULL, device_resume_fn);
2587}
2588EXPORT_SYMBOL(scsi_target_resume);
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604int scsi_internal_device_block_nowait(struct scsi_device *sdev)
2605{
2606 struct request_queue *q = sdev->request_queue;
2607 int err = 0;
2608
2609 err = scsi_device_set_state(sdev, SDEV_BLOCK);
2610 if (err) {
2611 err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK);
2612
2613 if (err)
2614 return err;
2615 }
2616
2617
2618
2619
2620
2621
2622 blk_mq_quiesce_queue_nowait(q);
2623 return 0;
2624}
2625EXPORT_SYMBOL_GPL(scsi_internal_device_block_nowait);
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642static int scsi_internal_device_block(struct scsi_device *sdev)
2643{
2644 struct request_queue *q = sdev->request_queue;
2645 int err;
2646
2647 mutex_lock(&sdev->state_mutex);
2648 err = scsi_internal_device_block_nowait(sdev);
2649 if (err == 0)
2650 blk_mq_quiesce_queue(q);
2651 mutex_unlock(&sdev->state_mutex);
2652
2653 return err;
2654}
2655
2656void scsi_start_queue(struct scsi_device *sdev)
2657{
2658 struct request_queue *q = sdev->request_queue;
2659
2660 blk_mq_unquiesce_queue(q);
2661}
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678int scsi_internal_device_unblock_nowait(struct scsi_device *sdev,
2679 enum scsi_device_state new_state)
2680{
2681
2682
2683
2684
2685 switch (sdev->sdev_state) {
2686 case SDEV_BLOCK:
2687 case SDEV_TRANSPORT_OFFLINE:
2688 sdev->sdev_state = new_state;
2689 break;
2690 case SDEV_CREATED_BLOCK:
2691 if (new_state == SDEV_TRANSPORT_OFFLINE ||
2692 new_state == SDEV_OFFLINE)
2693 sdev->sdev_state = new_state;
2694 else
2695 sdev->sdev_state = SDEV_CREATED;
2696 break;
2697 case SDEV_CANCEL:
2698 case SDEV_OFFLINE:
2699 break;
2700 default:
2701 return -EINVAL;
2702 }
2703 scsi_start_queue(sdev);
2704
2705 return 0;
2706}
2707EXPORT_SYMBOL_GPL(scsi_internal_device_unblock_nowait);
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723static int scsi_internal_device_unblock(struct scsi_device *sdev,
2724 enum scsi_device_state new_state)
2725{
2726 int ret;
2727
2728 mutex_lock(&sdev->state_mutex);
2729 ret = scsi_internal_device_unblock_nowait(sdev, new_state);
2730 mutex_unlock(&sdev->state_mutex);
2731
2732 return ret;
2733}
2734
2735static void
2736device_block(struct scsi_device *sdev, void *data)
2737{
2738 scsi_internal_device_block(sdev);
2739}
2740
2741static int
2742target_block(struct device *dev, void *data)
2743{
2744 if (scsi_is_target_device(dev))
2745 starget_for_each_device(to_scsi_target(dev), NULL,
2746 device_block);
2747 return 0;
2748}
2749
2750void
2751scsi_target_block(struct device *dev)
2752{
2753 if (scsi_is_target_device(dev))
2754 starget_for_each_device(to_scsi_target(dev), NULL,
2755 device_block);
2756 else
2757 device_for_each_child(dev, NULL, target_block);
2758}
2759EXPORT_SYMBOL_GPL(scsi_target_block);
2760
2761static void
2762device_unblock(struct scsi_device *sdev, void *data)
2763{
2764 scsi_internal_device_unblock(sdev, *(enum scsi_device_state *)data);
2765}
2766
2767static int
2768target_unblock(struct device *dev, void *data)
2769{
2770 if (scsi_is_target_device(dev))
2771 starget_for_each_device(to_scsi_target(dev), data,
2772 device_unblock);
2773 return 0;
2774}
2775
2776void
2777scsi_target_unblock(struct device *dev, enum scsi_device_state new_state)
2778{
2779 if (scsi_is_target_device(dev))
2780 starget_for_each_device(to_scsi_target(dev), &new_state,
2781 device_unblock);
2782 else
2783 device_for_each_child(dev, &new_state, target_unblock);
2784}
2785EXPORT_SYMBOL_GPL(scsi_target_unblock);
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
2797 size_t *offset, size_t *len)
2798{
2799 int i;
2800 size_t sg_len = 0, len_complete = 0;
2801 struct scatterlist *sg;
2802 struct page *page;
2803
2804 WARN_ON(!irqs_disabled());
2805
2806 for_each_sg(sgl, sg, sg_count, i) {
2807 len_complete = sg_len;
2808 sg_len += sg->length;
2809 if (sg_len > *offset)
2810 break;
2811 }
2812
2813 if (unlikely(i == sg_count)) {
2814 printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, "
2815 "elements %d\n",
2816 __func__, sg_len, *offset, sg_count);
2817 WARN_ON(1);
2818 return NULL;
2819 }
2820
2821
2822 *offset = *offset - len_complete + sg->offset;
2823
2824
2825 page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT));
2826 *offset &= ~PAGE_MASK;
2827
2828
2829 sg_len = PAGE_SIZE - *offset;
2830 if (*len > sg_len)
2831 *len = sg_len;
2832
2833 return kmap_atomic(page);
2834}
2835EXPORT_SYMBOL(scsi_kmap_atomic_sg);
2836
2837
2838
2839
2840
2841void scsi_kunmap_atomic_sg(void *virt)
2842{
2843 kunmap_atomic(virt);
2844}
2845EXPORT_SYMBOL(scsi_kunmap_atomic_sg);
2846
2847void sdev_disable_disk_events(struct scsi_device *sdev)
2848{
2849 atomic_inc(&sdev->disk_events_disable_depth);
2850}
2851EXPORT_SYMBOL(sdev_disable_disk_events);
2852
2853void sdev_enable_disk_events(struct scsi_device *sdev)
2854{
2855 if (WARN_ON_ONCE(atomic_read(&sdev->disk_events_disable_depth) <= 0))
2856 return;
2857 atomic_dec(&sdev->disk_events_disable_depth);
2858}
2859EXPORT_SYMBOL(sdev_enable_disk_events);
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
2876{
2877 u8 cur_id_type = 0xff;
2878 u8 cur_id_size = 0;
2879 const unsigned char *d, *cur_id_str;
2880 const struct scsi_vpd *vpd_pg83;
2881 int id_size = -EINVAL;
2882
2883 rcu_read_lock();
2884 vpd_pg83 = rcu_dereference(sdev->vpd_pg83);
2885 if (!vpd_pg83) {
2886 rcu_read_unlock();
2887 return -ENXIO;
2888 }
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905 if (id_len < 21) {
2906 rcu_read_unlock();
2907 return -EINVAL;
2908 }
2909
2910 memset(id, 0, id_len);
2911 d = vpd_pg83->data + 4;
2912 while (d < vpd_pg83->data + vpd_pg83->len) {
2913
2914 if ((d[1] & 0x30) != 0x00)
2915 goto next_desig;
2916
2917 switch (d[1] & 0xf) {
2918 case 0x1:
2919
2920 if (cur_id_size > d[3])
2921 break;
2922
2923 if (cur_id_type > 0x01 && cur_id_type != 0xff)
2924 break;
2925 cur_id_size = d[3];
2926 if (cur_id_size + 4 > id_len)
2927 cur_id_size = id_len - 4;
2928 cur_id_str = d + 4;
2929 cur_id_type = d[1] & 0xf;
2930 id_size = snprintf(id, id_len, "t10.%*pE",
2931 cur_id_size, cur_id_str);
2932 break;
2933 case 0x2:
2934
2935 if (cur_id_size > d[3])
2936 break;
2937
2938 if (cur_id_type == 0x3 &&
2939 cur_id_size == d[3])
2940 break;
2941 cur_id_size = d[3];
2942 cur_id_str = d + 4;
2943 cur_id_type = d[1] & 0xf;
2944 switch (cur_id_size) {
2945 case 8:
2946 id_size = snprintf(id, id_len,
2947 "eui.%8phN",
2948 cur_id_str);
2949 break;
2950 case 12:
2951 id_size = snprintf(id, id_len,
2952 "eui.%12phN",
2953 cur_id_str);
2954 break;
2955 case 16:
2956 id_size = snprintf(id, id_len,
2957 "eui.%16phN",
2958 cur_id_str);
2959 break;
2960 default:
2961 cur_id_size = 0;
2962 break;
2963 }
2964 break;
2965 case 0x3:
2966
2967 if (cur_id_size > d[3])
2968 break;
2969 cur_id_size = d[3];
2970 cur_id_str = d + 4;
2971 cur_id_type = d[1] & 0xf;
2972 switch (cur_id_size) {
2973 case 8:
2974 id_size = snprintf(id, id_len,
2975 "naa.%8phN",
2976 cur_id_str);
2977 break;
2978 case 16:
2979 id_size = snprintf(id, id_len,
2980 "naa.%16phN",
2981 cur_id_str);
2982 break;
2983 default:
2984 cur_id_size = 0;
2985 break;
2986 }
2987 break;
2988 case 0x8:
2989
2990 if (cur_id_size + 4 > d[3])
2991 break;
2992
2993 if (cur_id_size && d[3] > id_len)
2994 break;
2995 cur_id_size = id_size = d[3];
2996 cur_id_str = d + 4;
2997 cur_id_type = d[1] & 0xf;
2998 if (cur_id_size >= id_len)
2999 cur_id_size = id_len - 1;
3000 memcpy(id, cur_id_str, cur_id_size);
3001
3002 if (cur_id_size != id_size)
3003 cur_id_size = 6;
3004 break;
3005 default:
3006 break;
3007 }
3008next_desig:
3009 d += d[3] + 4;
3010 }
3011 rcu_read_unlock();
3012
3013 return id_size;
3014}
3015EXPORT_SYMBOL(scsi_vpd_lun_id);
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026int scsi_vpd_tpg_id(struct scsi_device *sdev, int *rel_id)
3027{
3028 const unsigned char *d;
3029 const struct scsi_vpd *vpd_pg83;
3030 int group_id = -EAGAIN, rel_port = -1;
3031
3032 rcu_read_lock();
3033 vpd_pg83 = rcu_dereference(sdev->vpd_pg83);
3034 if (!vpd_pg83) {
3035 rcu_read_unlock();
3036 return -ENXIO;
3037 }
3038
3039 d = vpd_pg83->data + 4;
3040 while (d < vpd_pg83->data + vpd_pg83->len) {
3041 switch (d[1] & 0xf) {
3042 case 0x4:
3043
3044 rel_port = get_unaligned_be16(&d[6]);
3045 break;
3046 case 0x5:
3047
3048 group_id = get_unaligned_be16(&d[6]);
3049 break;
3050 default:
3051 break;
3052 }
3053 d += d[3] + 4;
3054 }
3055 rcu_read_unlock();
3056
3057 if (group_id >= 0 && rel_id && rel_port != -1)
3058 *rel_id = rel_port;
3059
3060 return group_id;
3061}
3062EXPORT_SYMBOL(scsi_vpd_tpg_id);
3063