1
2
3
4
5
6
7
8
9
10
11#include <linux/bio.h>
12#include <linux/bitops.h>
13#include <linux/blkdev.h>
14#include <linux/completion.h>
15#include <linux/kernel.h>
16#include <linux/export.h>
17#include <linux/init.h>
18#include <linux/pci.h>
19#include <linux/delay.h>
20#include <linux/hardirq.h>
21#include <linux/scatterlist.h>
22#include <linux/blk-mq.h>
23#include <linux/ratelimit.h>
24#include <asm/unaligned.h>
25
26#include <scsi/scsi.h>
27#include <scsi/scsi_cmnd.h>
28#include <scsi/scsi_dbg.h>
29#include <scsi/scsi_device.h>
30#include <scsi/scsi_driver.h>
31#include <scsi/scsi_eh.h>
32#include <scsi/scsi_host.h>
33#include <scsi/scsi_transport.h>
34#include <scsi/scsi_dh.h>
35
36#include <trace/events/scsi.h>
37
38#include "scsi_debugfs.h"
39#include "scsi_priv.h"
40#include "scsi_logging.h"
41
42static struct kmem_cache *scsi_sdb_cache;
43static struct kmem_cache *scsi_sense_cache;
44static struct kmem_cache *scsi_sense_isadma_cache;
45static DEFINE_MUTEX(scsi_sense_cache_mutex);
46
47static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd);
48
49static inline struct kmem_cache *
50scsi_select_sense_cache(bool unchecked_isa_dma)
51{
52 return unchecked_isa_dma ? scsi_sense_isadma_cache : scsi_sense_cache;
53}
54
55static void scsi_free_sense_buffer(bool unchecked_isa_dma,
56 unsigned char *sense_buffer)
57{
58 kmem_cache_free(scsi_select_sense_cache(unchecked_isa_dma),
59 sense_buffer);
60}
61
62static unsigned char *scsi_alloc_sense_buffer(bool unchecked_isa_dma,
63 gfp_t gfp_mask, int numa_node)
64{
65 return kmem_cache_alloc_node(scsi_select_sense_cache(unchecked_isa_dma),
66 gfp_mask, numa_node);
67}
68
69int scsi_init_sense_cache(struct Scsi_Host *shost)
70{
71 struct kmem_cache *cache;
72 int ret = 0;
73
74 cache = scsi_select_sense_cache(shost->unchecked_isa_dma);
75 if (cache)
76 return 0;
77
78 mutex_lock(&scsi_sense_cache_mutex);
79 if (shost->unchecked_isa_dma) {
80 scsi_sense_isadma_cache =
81 kmem_cache_create("scsi_sense_cache(DMA)",
82 SCSI_SENSE_BUFFERSIZE, 0,
83 SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA, NULL);
84 if (!scsi_sense_isadma_cache)
85 ret = -ENOMEM;
86 } else {
87 scsi_sense_cache =
88 kmem_cache_create_usercopy("scsi_sense_cache",
89 SCSI_SENSE_BUFFERSIZE, 0, SLAB_HWCACHE_ALIGN,
90 0, SCSI_SENSE_BUFFERSIZE, NULL);
91 if (!scsi_sense_cache)
92 ret = -ENOMEM;
93 }
94
95 mutex_unlock(&scsi_sense_cache_mutex);
96 return ret;
97}
98
99
100
101
102
103
104#define SCSI_QUEUE_DELAY 3
105
106static void
107scsi_set_blocked(struct scsi_cmnd *cmd, int reason)
108{
109 struct Scsi_Host *host = cmd->device->host;
110 struct scsi_device *device = cmd->device;
111 struct scsi_target *starget = scsi_target(device);
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126 switch (reason) {
127 case SCSI_MLQUEUE_HOST_BUSY:
128 atomic_set(&host->host_blocked, host->max_host_blocked);
129 break;
130 case SCSI_MLQUEUE_DEVICE_BUSY:
131 case SCSI_MLQUEUE_EH_RETRY:
132 atomic_set(&device->device_blocked,
133 device->max_device_blocked);
134 break;
135 case SCSI_MLQUEUE_TARGET_BUSY:
136 atomic_set(&starget->target_blocked,
137 starget->max_target_blocked);
138 break;
139 }
140}
141
142static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
143{
144 struct scsi_device *sdev = cmd->device;
145
146 if (cmd->request->rq_flags & RQF_DONTPREP) {
147 cmd->request->rq_flags &= ~RQF_DONTPREP;
148 scsi_mq_uninit_cmd(cmd);
149 } else {
150 WARN_ON_ONCE(true);
151 }
152 blk_mq_requeue_request(cmd->request, true);
153 put_device(&sdev->sdev_gendev);
154}
155
156
157
158
159
160
161
162
163
164
165
166
167
168static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy)
169{
170 struct scsi_device *device = cmd->device;
171 struct request_queue *q = device->request_queue;
172 unsigned long flags;
173
174 SCSI_LOG_MLQUEUE(1, scmd_printk(KERN_INFO, cmd,
175 "Inserting command %p into mlqueue\n", cmd));
176
177 scsi_set_blocked(cmd, reason);
178
179
180
181
182
183 if (unbusy)
184 scsi_device_unbusy(device);
185
186
187
188
189
190
191
192 cmd->result = 0;
193 if (q->mq_ops) {
194
195
196
197
198
199
200
201
202
203
204
205 blk_mq_requeue_request(cmd->request, true);
206 put_device(&device->sdev_gendev);
207 return;
208 }
209 spin_lock_irqsave(q->queue_lock, flags);
210 blk_requeue_request(q, cmd->request);
211 kblockd_schedule_work(&device->requeue_work);
212 spin_unlock_irqrestore(q->queue_lock, flags);
213}
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
235{
236 __scsi_queue_insert(cmd, reason, true);
237}
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
259 int data_direction, void *buffer, unsigned bufflen,
260 unsigned char *sense, struct scsi_sense_hdr *sshdr,
261 int timeout, int retries, u64 flags, req_flags_t rq_flags,
262 int *resid)
263{
264 struct request *req;
265 struct scsi_request *rq;
266 int ret = DRIVER_ERROR << 24;
267
268 req = blk_get_request(sdev->request_queue,
269 data_direction == DMA_TO_DEVICE ?
270 REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, BLK_MQ_REQ_PREEMPT);
271 if (IS_ERR(req))
272 return ret;
273 rq = scsi_req(req);
274
275 if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
276 buffer, bufflen, GFP_NOIO))
277 goto out;
278
279 rq->cmd_len = COMMAND_SIZE(cmd[0]);
280 memcpy(rq->cmd, cmd, rq->cmd_len);
281 rq->retries = retries;
282 req->timeout = timeout;
283 req->cmd_flags |= flags;
284 req->rq_flags |= rq_flags | RQF_QUIET;
285
286
287
288
289 blk_execute_rq(req->q, NULL, req, 1);
290
291
292
293
294
295
296
297 if (unlikely(rq->resid_len > 0 && rq->resid_len <= bufflen))
298 memset(buffer + (bufflen - rq->resid_len), 0, rq->resid_len);
299
300 if (resid)
301 *resid = rq->resid_len;
302 if (sense && rq->sense_len)
303 memcpy(sense, rq->sense, SCSI_SENSE_BUFFERSIZE);
304 if (sshdr)
305 scsi_normalize_sense(rq->sense, rq->sense_len, sshdr);
306 ret = rq->result;
307 out:
308 blk_put_request(req);
309
310 return ret;
311}
312EXPORT_SYMBOL(__scsi_execute);
313
314
315
316
317
318
319
320
321
322
323
324
325static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
326{
327 cmd->serial_number = 0;
328 scsi_set_resid(cmd, 0);
329 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
330 if (cmd->cmd_len == 0)
331 cmd->cmd_len = scsi_command_size(cmd->cmnd);
332}
333
334
335
336
337
338
339
340
341
342
343static void scsi_dec_host_busy(struct Scsi_Host *shost)
344{
345 unsigned long flags;
346
347 rcu_read_lock();
348 atomic_dec(&shost->host_busy);
349 if (unlikely(scsi_host_in_recovery(shost))) {
350 spin_lock_irqsave(shost->host_lock, flags);
351 if (shost->host_failed || shost->host_eh_scheduled)
352 scsi_eh_wakeup(shost);
353 spin_unlock_irqrestore(shost->host_lock, flags);
354 }
355 rcu_read_unlock();
356}
357
358void scsi_device_unbusy(struct scsi_device *sdev)
359{
360 struct Scsi_Host *shost = sdev->host;
361 struct scsi_target *starget = scsi_target(sdev);
362
363 scsi_dec_host_busy(shost);
364
365 if (starget->can_queue > 0)
366 atomic_dec(&starget->target_busy);
367
368 atomic_dec(&sdev->device_busy);
369}
370
371static void scsi_kick_queue(struct request_queue *q)
372{
373 if (q->mq_ops)
374 blk_mq_run_hw_queues(q, false);
375 else
376 blk_run_queue(q);
377}
378
379
380
381
382
383
384
385
386static void scsi_single_lun_run(struct scsi_device *current_sdev)
387{
388 struct Scsi_Host *shost = current_sdev->host;
389 struct scsi_device *sdev, *tmp;
390 struct scsi_target *starget = scsi_target(current_sdev);
391 unsigned long flags;
392
393 spin_lock_irqsave(shost->host_lock, flags);
394 starget->starget_sdev_user = NULL;
395 spin_unlock_irqrestore(shost->host_lock, flags);
396
397
398
399
400
401
402
403 scsi_kick_queue(current_sdev->request_queue);
404
405 spin_lock_irqsave(shost->host_lock, flags);
406 if (starget->starget_sdev_user)
407 goto out;
408 list_for_each_entry_safe(sdev, tmp, &starget->devices,
409 same_target_siblings) {
410 if (sdev == current_sdev)
411 continue;
412 if (scsi_device_get(sdev))
413 continue;
414
415 spin_unlock_irqrestore(shost->host_lock, flags);
416 scsi_kick_queue(sdev->request_queue);
417 spin_lock_irqsave(shost->host_lock, flags);
418
419 scsi_device_put(sdev);
420 }
421 out:
422 spin_unlock_irqrestore(shost->host_lock, flags);
423}
424
425static inline bool scsi_device_is_busy(struct scsi_device *sdev)
426{
427 if (atomic_read(&sdev->device_busy) >= sdev->queue_depth)
428 return true;
429 if (atomic_read(&sdev->device_blocked) > 0)
430 return true;
431 return false;
432}
433
434static inline bool scsi_target_is_busy(struct scsi_target *starget)
435{
436 if (starget->can_queue > 0) {
437 if (atomic_read(&starget->target_busy) >= starget->can_queue)
438 return true;
439 if (atomic_read(&starget->target_blocked) > 0)
440 return true;
441 }
442 return false;
443}
444
445static inline bool scsi_host_is_busy(struct Scsi_Host *shost)
446{
447 if (shost->can_queue > 0 &&
448 atomic_read(&shost->host_busy) >= shost->can_queue)
449 return true;
450 if (atomic_read(&shost->host_blocked) > 0)
451 return true;
452 if (shost->host_self_blocked)
453 return true;
454 return false;
455}
456
457static void scsi_starved_list_run(struct Scsi_Host *shost)
458{
459 LIST_HEAD(starved_list);
460 struct scsi_device *sdev;
461 unsigned long flags;
462
463 spin_lock_irqsave(shost->host_lock, flags);
464 list_splice_init(&shost->starved_list, &starved_list);
465
466 while (!list_empty(&starved_list)) {
467 struct request_queue *slq;
468
469
470
471
472
473
474
475
476
477
478
479 if (scsi_host_is_busy(shost))
480 break;
481
482 sdev = list_entry(starved_list.next,
483 struct scsi_device, starved_entry);
484 list_del_init(&sdev->starved_entry);
485 if (scsi_target_is_busy(scsi_target(sdev))) {
486 list_move_tail(&sdev->starved_entry,
487 &shost->starved_list);
488 continue;
489 }
490
491
492
493
494
495
496
497
498
499
500
501 slq = sdev->request_queue;
502 if (!blk_get_queue(slq))
503 continue;
504 spin_unlock_irqrestore(shost->host_lock, flags);
505
506 scsi_kick_queue(slq);
507 blk_put_queue(slq);
508
509 spin_lock_irqsave(shost->host_lock, flags);
510 }
511
512 list_splice(&starved_list, &shost->starved_list);
513 spin_unlock_irqrestore(shost->host_lock, flags);
514}
515
516
517
518
519
520
521
522
523
524
525
526
527
528static void scsi_run_queue(struct request_queue *q)
529{
530 struct scsi_device *sdev = q->queuedata;
531
532 if (scsi_target(sdev)->single_lun)
533 scsi_single_lun_run(sdev);
534 if (!list_empty(&sdev->host->starved_list))
535 scsi_starved_list_run(sdev->host);
536
537 if (q->mq_ops)
538 blk_mq_run_hw_queues(q, false);
539 else
540 blk_run_queue(q);
541}
542
543void scsi_requeue_run_queue(struct work_struct *work)
544{
545 struct scsi_device *sdev;
546 struct request_queue *q;
547
548 sdev = container_of(work, struct scsi_device, requeue_work);
549 q = sdev->request_queue;
550 scsi_run_queue(q);
551}
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
572{
573 struct scsi_device *sdev = cmd->device;
574 struct request *req = cmd->request;
575 unsigned long flags;
576
577 spin_lock_irqsave(q->queue_lock, flags);
578 blk_unprep_request(req);
579 req->special = NULL;
580 scsi_put_command(cmd);
581 blk_requeue_request(q, req);
582 spin_unlock_irqrestore(q->queue_lock, flags);
583
584 scsi_run_queue(q);
585
586 put_device(&sdev->sdev_gendev);
587}
588
589void scsi_run_host_queues(struct Scsi_Host *shost)
590{
591 struct scsi_device *sdev;
592
593 shost_for_each_device(sdev, shost)
594 scsi_run_queue(sdev->request_queue);
595}
596
597static void scsi_uninit_cmd(struct scsi_cmnd *cmd)
598{
599 if (!blk_rq_is_passthrough(cmd->request)) {
600 struct scsi_driver *drv = scsi_cmd_to_driver(cmd);
601
602 if (drv->uninit_command)
603 drv->uninit_command(cmd);
604 }
605}
606
607static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd)
608{
609 struct scsi_data_buffer *sdb;
610
611 if (cmd->sdb.table.nents)
612 sg_free_table_chained(&cmd->sdb.table, true);
613 if (cmd->request->next_rq) {
614 sdb = cmd->request->next_rq->special;
615 if (sdb)
616 sg_free_table_chained(&sdb->table, true);
617 }
618 if (scsi_prot_sg_count(cmd))
619 sg_free_table_chained(&cmd->prot_sdb->table, true);
620}
621
622static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
623{
624 scsi_mq_free_sgtables(cmd);
625 scsi_uninit_cmd(cmd);
626 scsi_del_cmd_from_list(cmd);
627}
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645static void scsi_release_buffers(struct scsi_cmnd *cmd)
646{
647 if (cmd->sdb.table.nents)
648 sg_free_table_chained(&cmd->sdb.table, false);
649
650 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
651
652 if (scsi_prot_sg_count(cmd))
653 sg_free_table_chained(&cmd->prot_sdb->table, false);
654}
655
656static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd)
657{
658 struct scsi_data_buffer *bidi_sdb = cmd->request->next_rq->special;
659
660 sg_free_table_chained(&bidi_sdb->table, false);
661 kmem_cache_free(scsi_sdb_cache, bidi_sdb);
662 cmd->request->next_rq->special = NULL;
663}
664
665
666static bool scsi_end_request(struct request *req, blk_status_t error,
667 unsigned int bytes, unsigned int bidi_bytes)
668{
669 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
670 struct scsi_device *sdev = cmd->device;
671 struct request_queue *q = sdev->request_queue;
672
673 if (blk_update_request(req, error, bytes))
674 return true;
675
676
677 if (unlikely(bidi_bytes) &&
678 blk_update_request(req->next_rq, error, bidi_bytes))
679 return true;
680
681 if (blk_queue_add_random(q))
682 add_disk_randomness(req->rq_disk);
683
684 if (!blk_rq_is_scsi(req)) {
685 WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED));
686 cmd->flags &= ~SCMD_INITIALIZED;
687 destroy_rcu_head(&cmd->rcu);
688 }
689
690 if (req->mq_ctx) {
691
692
693
694
695
696
697
698 scsi_mq_uninit_cmd(cmd);
699
700 __blk_mq_end_request(req, error);
701
702 if (scsi_target(sdev)->single_lun ||
703 !list_empty(&sdev->host->starved_list))
704 kblockd_schedule_work(&sdev->requeue_work);
705 else
706 blk_mq_run_hw_queues(q, true);
707 } else {
708 unsigned long flags;
709
710 if (bidi_bytes)
711 scsi_release_bidi_buffers(cmd);
712 scsi_release_buffers(cmd);
713 scsi_put_command(cmd);
714
715 spin_lock_irqsave(q->queue_lock, flags);
716 blk_finish_request(req, error);
717 spin_unlock_irqrestore(q->queue_lock, flags);
718
719 scsi_run_queue(q);
720 }
721
722 put_device(&sdev->sdev_gendev);
723 return false;
724}
725
726
727
728
729
730
731
732
733
734static blk_status_t scsi_result_to_blk_status(struct scsi_cmnd *cmd, int result)
735{
736 switch (host_byte(result)) {
737 case DID_OK:
738
739
740
741
742
743 if (scsi_status_is_good(result) && (result & ~0xff) == 0)
744 return BLK_STS_OK;
745 return BLK_STS_IOERR;
746 case DID_TRANSPORT_FAILFAST:
747 return BLK_STS_TRANSPORT;
748 case DID_TARGET_FAILURE:
749 set_host_byte(cmd, DID_OK);
750 return BLK_STS_TARGET;
751 case DID_NEXUS_FAILURE:
752 return BLK_STS_NEXUS;
753 case DID_ALLOC_FAILURE:
754 set_host_byte(cmd, DID_OK);
755 return BLK_STS_NOSPC;
756 case DID_MEDIUM_ERROR:
757 set_host_byte(cmd, DID_OK);
758 return BLK_STS_MEDIUM;
759 default:
760 return BLK_STS_IOERR;
761 }
762}
763
764
765static void scsi_io_completion_reprep(struct scsi_cmnd *cmd,
766 struct request_queue *q)
767{
768
769 if (q->mq_ops) {
770 scsi_mq_requeue_cmd(cmd);
771 } else {
772
773 scsi_release_buffers(cmd);
774 scsi_requeue_command(q, cmd);
775 }
776}
777
778
779static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
780{
781 struct request_queue *q = cmd->device->request_queue;
782 struct request *req = cmd->request;
783 int level = 0;
784 enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
785 ACTION_DELAYED_RETRY} action;
786 unsigned long wait_for = (cmd->allowed + 1) * req->timeout;
787 struct scsi_sense_hdr sshdr;
788 bool sense_valid;
789 bool sense_current = true;
790 blk_status_t blk_stat;
791
792 sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
793 if (sense_valid)
794 sense_current = !scsi_sense_is_deferred(&sshdr);
795
796 blk_stat = scsi_result_to_blk_status(cmd, result);
797
798 if (host_byte(result) == DID_RESET) {
799
800
801
802
803 action = ACTION_RETRY;
804 } else if (sense_valid && sense_current) {
805 switch (sshdr.sense_key) {
806 case UNIT_ATTENTION:
807 if (cmd->device->removable) {
808
809
810
811 cmd->device->changed = 1;
812 action = ACTION_FAIL;
813 } else {
814
815
816
817
818
819 action = ACTION_RETRY;
820 }
821 break;
822 case ILLEGAL_REQUEST:
823
824
825
826
827
828
829
830
831 if ((cmd->device->use_10_for_rw &&
832 sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
833 (cmd->cmnd[0] == READ_10 ||
834 cmd->cmnd[0] == WRITE_10)) {
835
836 cmd->device->use_10_for_rw = 0;
837 action = ACTION_REPREP;
838 } else if (sshdr.asc == 0x10) {
839 action = ACTION_FAIL;
840 blk_stat = BLK_STS_PROTECTION;
841
842 } else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) {
843 action = ACTION_FAIL;
844 blk_stat = BLK_STS_TARGET;
845 } else
846 action = ACTION_FAIL;
847 break;
848 case ABORTED_COMMAND:
849 action = ACTION_FAIL;
850 if (sshdr.asc == 0x10)
851 blk_stat = BLK_STS_PROTECTION;
852 break;
853 case NOT_READY:
854
855
856
857 if (sshdr.asc == 0x04) {
858 switch (sshdr.ascq) {
859 case 0x01:
860 case 0x04:
861 case 0x05:
862 case 0x06:
863 case 0x07:
864 case 0x08:
865 case 0x09:
866 case 0x14:
867 case 0x1a:
868 case 0x1b:
869 case 0x1d:
870 case 0x24:
871 action = ACTION_DELAYED_RETRY;
872 break;
873 default:
874 action = ACTION_FAIL;
875 break;
876 }
877 } else
878 action = ACTION_FAIL;
879 break;
880 case VOLUME_OVERFLOW:
881
882 action = ACTION_FAIL;
883 break;
884 default:
885 action = ACTION_FAIL;
886 break;
887 }
888 } else
889 action = ACTION_FAIL;
890
891 if (action != ACTION_FAIL &&
892 time_before(cmd->jiffies_at_alloc + wait_for, jiffies))
893 action = ACTION_FAIL;
894
895 switch (action) {
896 case ACTION_FAIL:
897
898 if (!(req->rq_flags & RQF_QUIET)) {
899 static DEFINE_RATELIMIT_STATE(_rs,
900 DEFAULT_RATELIMIT_INTERVAL,
901 DEFAULT_RATELIMIT_BURST);
902
903 if (unlikely(scsi_logging_level))
904 level =
905 SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT,
906 SCSI_LOG_MLCOMPLETE_BITS);
907
908
909
910
911
912 if (!level && __ratelimit(&_rs)) {
913 scsi_print_result(cmd, NULL, FAILED);
914 if (driver_byte(result) == DRIVER_SENSE)
915 scsi_print_sense(cmd);
916 scsi_print_command(cmd);
917 }
918 }
919 if (!scsi_end_request(req, blk_stat, blk_rq_err_bytes(req), 0))
920 return;
921
922 case ACTION_REPREP:
923 scsi_io_completion_reprep(cmd, q);
924 break;
925 case ACTION_RETRY:
926
927 __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, false);
928 break;
929 case ACTION_DELAYED_RETRY:
930
931 __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, false);
932 break;
933 }
934}
935
936
937
938
939
940
941static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result,
942 blk_status_t *blk_statp)
943{
944 bool sense_valid;
945 bool sense_current = true;
946 struct request *req = cmd->request;
947 struct scsi_sense_hdr sshdr;
948
949 sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
950 if (sense_valid)
951 sense_current = !scsi_sense_is_deferred(&sshdr);
952
953 if (blk_rq_is_passthrough(req)) {
954 if (sense_valid) {
955
956
957
958 scsi_req(req)->sense_len =
959 min(8 + cmd->sense_buffer[7],
960 SCSI_SENSE_BUFFERSIZE);
961 }
962 if (sense_current)
963 *blk_statp = scsi_result_to_blk_status(cmd, result);
964 } else if (blk_rq_bytes(req) == 0 && sense_current) {
965
966
967
968
969
970 *blk_statp = scsi_result_to_blk_status(cmd, result);
971 }
972
973
974
975
976
977
978 if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) {
979 bool do_print = true;
980
981
982
983
984
985 if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
986 do_print = false;
987 else if (req->rq_flags & RQF_QUIET)
988 do_print = false;
989 if (do_print)
990 scsi_print_sense(cmd);
991 result = 0;
992
993 *blk_statp = BLK_STS_OK;
994 }
995
996
997
998
999
1000
1001
1002 if (status_byte(result) && scsi_status_is_good(result)) {
1003 result = 0;
1004 *blk_statp = BLK_STS_OK;
1005 }
1006 return result;
1007}
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
1039{
1040 int result = cmd->result;
1041 struct request_queue *q = cmd->device->request_queue;
1042 struct request *req = cmd->request;
1043 blk_status_t blk_stat = BLK_STS_OK;
1044
1045 if (unlikely(result))
1046 result = scsi_io_completion_nz_result(cmd, result, &blk_stat);
1047
1048 if (unlikely(blk_rq_is_passthrough(req))) {
1049
1050
1051
1052 scsi_req(req)->result = cmd->result;
1053 scsi_req(req)->resid_len = scsi_get_resid(cmd);
1054
1055 if (unlikely(scsi_bidi_cmnd(cmd))) {
1056
1057
1058
1059
1060 scsi_req(req->next_rq)->resid_len = scsi_in(cmd)->resid;
1061 if (scsi_end_request(req, BLK_STS_OK, blk_rq_bytes(req),
1062 blk_rq_bytes(req->next_rq)))
1063 WARN_ONCE(true,
1064 "Bidi command with remaining bytes");
1065 return;
1066 }
1067 }
1068
1069
1070 if (unlikely(blk_bidi_rq(req))) {
1071 WARN_ONCE(true, "Only support bidi command in passthrough");
1072 scmd_printk(KERN_ERR, cmd, "Killing bidi command\n");
1073 if (scsi_end_request(req, BLK_STS_IOERR, blk_rq_bytes(req),
1074 blk_rq_bytes(req->next_rq)))
1075 WARN_ONCE(true, "Bidi command with remaining bytes");
1076 return;
1077 }
1078
1079
1080
1081
1082
1083 SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, cmd,
1084 "%u sectors total, %d bytes done.\n",
1085 blk_rq_sectors(req), good_bytes));
1086
1087
1088
1089
1090
1091
1092 if (likely(blk_rq_bytes(req) > 0 || blk_stat == BLK_STS_OK)) {
1093 if (likely(!scsi_end_request(req, blk_stat, good_bytes, 0)))
1094 return;
1095 }
1096
1097
1098 if (unlikely(blk_stat && scsi_noretry_cmd(cmd))) {
1099 if (scsi_end_request(req, blk_stat, blk_rq_bytes(req), 0))
1100 WARN_ONCE(true,
1101 "Bytes remaining after failed, no-retry command");
1102 return;
1103 }
1104
1105
1106
1107
1108
1109 if (likely(result == 0))
1110 scsi_io_completion_reprep(cmd, q);
1111 else
1112 scsi_io_completion_action(cmd, result);
1113}
1114
1115static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb)
1116{
1117 int count;
1118
1119
1120
1121
1122 if (unlikely(sg_alloc_table_chained(&sdb->table,
1123 blk_rq_nr_phys_segments(req), sdb->table.sgl)))
1124 return BLKPREP_DEFER;
1125
1126
1127
1128
1129
1130 count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
1131 BUG_ON(count > sdb->table.nents);
1132 sdb->table.nents = count;
1133 sdb->length = blk_rq_payload_bytes(req);
1134 return BLKPREP_OK;
1135}
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148int scsi_init_io(struct scsi_cmnd *cmd)
1149{
1150 struct scsi_device *sdev = cmd->device;
1151 struct request *rq = cmd->request;
1152 bool is_mq = (rq->mq_ctx != NULL);
1153 int error = BLKPREP_KILL;
1154
1155 if (WARN_ON_ONCE(!blk_rq_nr_phys_segments(rq)))
1156 goto err_exit;
1157
1158 error = scsi_init_sgtable(rq, &cmd->sdb);
1159 if (error)
1160 goto err_exit;
1161
1162 if (blk_bidi_rq(rq)) {
1163 if (!rq->q->mq_ops) {
1164 struct scsi_data_buffer *bidi_sdb =
1165 kmem_cache_zalloc(scsi_sdb_cache, GFP_ATOMIC);
1166 if (!bidi_sdb) {
1167 error = BLKPREP_DEFER;
1168 goto err_exit;
1169 }
1170
1171 rq->next_rq->special = bidi_sdb;
1172 }
1173
1174 error = scsi_init_sgtable(rq->next_rq, rq->next_rq->special);
1175 if (error)
1176 goto err_exit;
1177 }
1178
1179 if (blk_integrity_rq(rq)) {
1180 struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
1181 int ivecs, count;
1182
1183 if (prot_sdb == NULL) {
1184
1185
1186
1187
1188
1189 WARN_ON_ONCE(1);
1190 error = BLKPREP_KILL;
1191 goto err_exit;
1192 }
1193
1194 ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
1195
1196 if (sg_alloc_table_chained(&prot_sdb->table, ivecs,
1197 prot_sdb->table.sgl)) {
1198 error = BLKPREP_DEFER;
1199 goto err_exit;
1200 }
1201
1202 count = blk_rq_map_integrity_sg(rq->q, rq->bio,
1203 prot_sdb->table.sgl);
1204 BUG_ON(unlikely(count > ivecs));
1205 BUG_ON(unlikely(count > queue_max_integrity_segments(rq->q)));
1206
1207 cmd->prot_sdb = prot_sdb;
1208 cmd->prot_sdb->table.nents = count;
1209 }
1210
1211 return BLKPREP_OK;
1212err_exit:
1213 if (is_mq) {
1214 scsi_mq_free_sgtables(cmd);
1215 } else {
1216 scsi_release_buffers(cmd);
1217 cmd->request->special = NULL;
1218 scsi_put_command(cmd);
1219 put_device(&sdev->sdev_gendev);
1220 }
1221 return error;
1222}
1223EXPORT_SYMBOL(scsi_init_io);
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236static void scsi_initialize_rq(struct request *rq)
1237{
1238 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
1239
1240 scsi_req_init(&cmd->req);
1241 init_rcu_head(&cmd->rcu);
1242 cmd->jiffies_at_alloc = jiffies;
1243 cmd->retries = 0;
1244}
1245
1246
1247void scsi_add_cmd_to_list(struct scsi_cmnd *cmd)
1248{
1249 struct scsi_device *sdev = cmd->device;
1250 struct Scsi_Host *shost = sdev->host;
1251 unsigned long flags;
1252
1253 if (shost->use_cmd_list) {
1254 spin_lock_irqsave(&sdev->list_lock, flags);
1255 list_add_tail(&cmd->list, &sdev->cmd_list);
1256 spin_unlock_irqrestore(&sdev->list_lock, flags);
1257 }
1258}
1259
1260
1261void scsi_del_cmd_from_list(struct scsi_cmnd *cmd)
1262{
1263 struct scsi_device *sdev = cmd->device;
1264 struct Scsi_Host *shost = sdev->host;
1265 unsigned long flags;
1266
1267 if (shost->use_cmd_list) {
1268 spin_lock_irqsave(&sdev->list_lock, flags);
1269 BUG_ON(list_empty(&cmd->list));
1270 list_del_init(&cmd->list);
1271 spin_unlock_irqrestore(&sdev->list_lock, flags);
1272 }
1273}
1274
1275
1276void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
1277{
1278 void *buf = cmd->sense_buffer;
1279 void *prot = cmd->prot_sdb;
1280 struct request *rq = blk_mq_rq_from_pdu(cmd);
1281 unsigned int flags = cmd->flags & SCMD_PRESERVED_FLAGS;
1282 unsigned long jiffies_at_alloc;
1283 int retries;
1284
1285 if (!blk_rq_is_scsi(rq) && !(flags & SCMD_INITIALIZED)) {
1286 flags |= SCMD_INITIALIZED;
1287 scsi_initialize_rq(rq);
1288 }
1289
1290 jiffies_at_alloc = cmd->jiffies_at_alloc;
1291 retries = cmd->retries;
1292
1293 memset((char *)cmd + sizeof(cmd->req), 0,
1294 sizeof(*cmd) - sizeof(cmd->req) + dev->host->hostt->cmd_size);
1295
1296 cmd->device = dev;
1297 cmd->sense_buffer = buf;
1298 cmd->prot_sdb = prot;
1299 cmd->flags = flags;
1300 INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
1301 cmd->jiffies_at_alloc = jiffies_at_alloc;
1302 cmd->retries = retries;
1303
1304 scsi_add_cmd_to_list(cmd);
1305}
1306
1307static int scsi_setup_scsi_cmnd(struct scsi_device *sdev, struct request *req)
1308{
1309 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1310
1311
1312
1313
1314
1315
1316
1317 if (req->bio) {
1318 int ret = scsi_init_io(cmd);
1319 if (unlikely(ret))
1320 return ret;
1321 } else {
1322 BUG_ON(blk_rq_bytes(req));
1323
1324 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
1325 }
1326
1327 cmd->cmd_len = scsi_req(req)->cmd_len;
1328 cmd->cmnd = scsi_req(req)->cmd;
1329 cmd->transfersize = blk_rq_bytes(req);
1330 cmd->allowed = scsi_req(req)->retries;
1331 return BLKPREP_OK;
1332}
1333
1334
1335
1336
1337
1338static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
1339{
1340 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1341
1342 if (unlikely(sdev->handler && sdev->handler->prep_fn)) {
1343 int ret = sdev->handler->prep_fn(sdev, req);
1344 if (ret != BLKPREP_OK)
1345 return ret;
1346 }
1347
1348 cmd->cmnd = scsi_req(req)->cmd = scsi_req(req)->__cmd;
1349 memset(cmd->cmnd, 0, BLK_MAX_CDB);
1350 return scsi_cmd_to_driver(cmd)->init_command(cmd);
1351}
1352
1353static int scsi_setup_cmnd(struct scsi_device *sdev, struct request *req)
1354{
1355 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1356
1357 if (!blk_rq_bytes(req))
1358 cmd->sc_data_direction = DMA_NONE;
1359 else if (rq_data_dir(req) == WRITE)
1360 cmd->sc_data_direction = DMA_TO_DEVICE;
1361 else
1362 cmd->sc_data_direction = DMA_FROM_DEVICE;
1363
1364 if (blk_rq_is_scsi(req))
1365 return scsi_setup_scsi_cmnd(sdev, req);
1366 else
1367 return scsi_setup_fs_cmnd(sdev, req);
1368}
1369
1370static int
1371scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
1372{
1373 int ret = BLKPREP_OK;
1374
1375
1376
1377
1378
1379 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1380 switch (sdev->sdev_state) {
1381 case SDEV_OFFLINE:
1382 case SDEV_TRANSPORT_OFFLINE:
1383
1384
1385
1386
1387
1388 sdev_printk(KERN_ERR, sdev,
1389 "rejecting I/O to offline device\n");
1390 ret = BLKPREP_KILL;
1391 break;
1392 case SDEV_DEL:
1393
1394
1395
1396
1397 sdev_printk(KERN_ERR, sdev,
1398 "rejecting I/O to dead device\n");
1399 ret = BLKPREP_KILL;
1400 break;
1401 case SDEV_BLOCK:
1402 case SDEV_CREATED_BLOCK:
1403 ret = BLKPREP_DEFER;
1404 break;
1405 case SDEV_QUIESCE:
1406
1407
1408
1409 if (req && !(req->rq_flags & RQF_PREEMPT))
1410 ret = BLKPREP_DEFER;
1411 break;
1412 default:
1413
1414
1415
1416
1417
1418 if (req && !(req->rq_flags & RQF_PREEMPT))
1419 ret = BLKPREP_KILL;
1420 break;
1421 }
1422 }
1423 return ret;
1424}
1425
1426static int
1427scsi_prep_return(struct request_queue *q, struct request *req, int ret)
1428{
1429 struct scsi_device *sdev = q->queuedata;
1430
1431 switch (ret) {
1432 case BLKPREP_KILL:
1433 case BLKPREP_INVALID:
1434 scsi_req(req)->result = DID_NO_CONNECT << 16;
1435
1436 if (req->special) {
1437 struct scsi_cmnd *cmd = req->special;
1438 scsi_release_buffers(cmd);
1439 scsi_put_command(cmd);
1440 put_device(&sdev->sdev_gendev);
1441 req->special = NULL;
1442 }
1443 break;
1444 case BLKPREP_DEFER:
1445
1446
1447
1448
1449
1450 if (atomic_read(&sdev->device_busy) == 0)
1451 blk_delay_queue(q, SCSI_QUEUE_DELAY);
1452 break;
1453 default:
1454 req->rq_flags |= RQF_DONTPREP;
1455 }
1456
1457 return ret;
1458}
1459
1460static int scsi_prep_fn(struct request_queue *q, struct request *req)
1461{
1462 struct scsi_device *sdev = q->queuedata;
1463 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1464 int ret;
1465
1466 ret = scsi_prep_state_check(sdev, req);
1467 if (ret != BLKPREP_OK)
1468 goto out;
1469
1470 if (!req->special) {
1471
1472 if (unlikely(!get_device(&sdev->sdev_gendev))) {
1473 ret = BLKPREP_DEFER;
1474 goto out;
1475 }
1476
1477 scsi_init_command(sdev, cmd);
1478 req->special = cmd;
1479 }
1480
1481 cmd->tag = req->tag;
1482 cmd->request = req;
1483 cmd->prot_op = SCSI_PROT_NORMAL;
1484
1485 ret = scsi_setup_cmnd(sdev, req);
1486out:
1487 return scsi_prep_return(q, req, ret);
1488}
1489
1490static void scsi_unprep_fn(struct request_queue *q, struct request *req)
1491{
1492 scsi_uninit_cmd(blk_mq_rq_to_pdu(req));
1493}
1494
1495
1496
1497
1498
1499
1500
1501static inline int scsi_dev_queue_ready(struct request_queue *q,
1502 struct scsi_device *sdev)
1503{
1504 unsigned int busy;
1505
1506 busy = atomic_inc_return(&sdev->device_busy) - 1;
1507 if (atomic_read(&sdev->device_blocked)) {
1508 if (busy)
1509 goto out_dec;
1510
1511
1512
1513
1514 if (atomic_dec_return(&sdev->device_blocked) > 0) {
1515
1516
1517
1518 if (!q->mq_ops)
1519 blk_delay_queue(q, SCSI_QUEUE_DELAY);
1520 goto out_dec;
1521 }
1522 SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev,
1523 "unblocking device at zero depth\n"));
1524 }
1525
1526 if (busy >= sdev->queue_depth)
1527 goto out_dec;
1528
1529 return 1;
1530out_dec:
1531 atomic_dec(&sdev->device_busy);
1532 return 0;
1533}
1534
1535
1536
1537
1538
1539static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
1540 struct scsi_device *sdev)
1541{
1542 struct scsi_target *starget = scsi_target(sdev);
1543 unsigned int busy;
1544
1545 if (starget->single_lun) {
1546 spin_lock_irq(shost->host_lock);
1547 if (starget->starget_sdev_user &&
1548 starget->starget_sdev_user != sdev) {
1549 spin_unlock_irq(shost->host_lock);
1550 return 0;
1551 }
1552 starget->starget_sdev_user = sdev;
1553 spin_unlock_irq(shost->host_lock);
1554 }
1555
1556 if (starget->can_queue <= 0)
1557 return 1;
1558
1559 busy = atomic_inc_return(&starget->target_busy) - 1;
1560 if (atomic_read(&starget->target_blocked) > 0) {
1561 if (busy)
1562 goto starved;
1563
1564
1565
1566
1567 if (atomic_dec_return(&starget->target_blocked) > 0)
1568 goto out_dec;
1569
1570 SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget,
1571 "unblocking target at zero depth\n"));
1572 }
1573
1574 if (busy >= starget->can_queue)
1575 goto starved;
1576
1577 return 1;
1578
1579starved:
1580 spin_lock_irq(shost->host_lock);
1581 list_move_tail(&sdev->starved_entry, &shost->starved_list);
1582 spin_unlock_irq(shost->host_lock);
1583out_dec:
1584 if (starget->can_queue > 0)
1585 atomic_dec(&starget->target_busy);
1586 return 0;
1587}
1588
1589
1590
1591
1592
1593
1594static inline int scsi_host_queue_ready(struct request_queue *q,
1595 struct Scsi_Host *shost,
1596 struct scsi_device *sdev)
1597{
1598 unsigned int busy;
1599
1600 if (scsi_host_in_recovery(shost))
1601 return 0;
1602
1603 busy = atomic_inc_return(&shost->host_busy) - 1;
1604 if (atomic_read(&shost->host_blocked) > 0) {
1605 if (busy)
1606 goto starved;
1607
1608
1609
1610
1611 if (atomic_dec_return(&shost->host_blocked) > 0)
1612 goto out_dec;
1613
1614 SCSI_LOG_MLQUEUE(3,
1615 shost_printk(KERN_INFO, shost,
1616 "unblocking host at zero depth\n"));
1617 }
1618
1619 if (shost->can_queue > 0 && busy >= shost->can_queue)
1620 goto starved;
1621 if (shost->host_self_blocked)
1622 goto starved;
1623
1624
1625 if (!list_empty(&sdev->starved_entry)) {
1626 spin_lock_irq(shost->host_lock);
1627 if (!list_empty(&sdev->starved_entry))
1628 list_del_init(&sdev->starved_entry);
1629 spin_unlock_irq(shost->host_lock);
1630 }
1631
1632 return 1;
1633
1634starved:
1635 spin_lock_irq(shost->host_lock);
1636 if (list_empty(&sdev->starved_entry))
1637 list_add_tail(&sdev->starved_entry, &shost->starved_list);
1638 spin_unlock_irq(shost->host_lock);
1639out_dec:
1640 scsi_dec_host_busy(shost);
1641 return 0;
1642}
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656static int scsi_lld_busy(struct request_queue *q)
1657{
1658 struct scsi_device *sdev = q->queuedata;
1659 struct Scsi_Host *shost;
1660
1661 if (blk_queue_dying(q))
1662 return 0;
1663
1664 shost = sdev->host;
1665
1666
1667
1668
1669
1670
1671
1672 if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev))
1673 return 1;
1674
1675 return 0;
1676}
1677
1678
1679
1680
1681static void scsi_kill_request(struct request *req, struct request_queue *q)
1682{
1683 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1684 struct scsi_device *sdev;
1685 struct scsi_target *starget;
1686 struct Scsi_Host *shost;
1687
1688 blk_start_request(req);
1689
1690 scmd_printk(KERN_INFO, cmd, "killing request\n");
1691
1692 sdev = cmd->device;
1693 starget = scsi_target(sdev);
1694 shost = sdev->host;
1695 scsi_init_cmd_errh(cmd);
1696 cmd->result = DID_NO_CONNECT << 16;
1697 atomic_inc(&cmd->device->iorequest_cnt);
1698
1699
1700
1701
1702
1703
1704 atomic_inc(&sdev->device_busy);
1705 atomic_inc(&shost->host_busy);
1706 if (starget->can_queue > 0)
1707 atomic_inc(&starget->target_busy);
1708
1709 blk_complete_request(req);
1710}
1711
1712static void scsi_softirq_done(struct request *rq)
1713{
1714 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
1715 unsigned long wait_for = (cmd->allowed + 1) * rq->timeout;
1716 int disposition;
1717
1718 INIT_LIST_HEAD(&cmd->eh_entry);
1719
1720 atomic_inc(&cmd->device->iodone_cnt);
1721 if (cmd->result)
1722 atomic_inc(&cmd->device->ioerr_cnt);
1723
1724 disposition = scsi_decide_disposition(cmd);
1725 if (disposition != SUCCESS &&
1726 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
1727 sdev_printk(KERN_ERR, cmd->device,
1728 "timing out command, waited %lus\n",
1729 wait_for/HZ);
1730 disposition = SUCCESS;
1731 }
1732
1733 scsi_log_completion(cmd, disposition);
1734
1735 switch (disposition) {
1736 case SUCCESS:
1737 scsi_finish_command(cmd);
1738 break;
1739 case NEEDS_RETRY:
1740 scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
1741 break;
1742 case ADD_TO_MLQUEUE:
1743 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
1744 break;
1745 default:
1746 scsi_eh_scmd_add(cmd);
1747 break;
1748 }
1749}
1750
1751
1752
1753
1754
1755
1756
1757
1758static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
1759{
1760 struct Scsi_Host *host = cmd->device->host;
1761 int rtn = 0;
1762
1763 atomic_inc(&cmd->device->iorequest_cnt);
1764
1765
1766 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
1767
1768
1769
1770 cmd->result = DID_NO_CONNECT << 16;
1771 goto done;
1772 }
1773
1774
1775 if (unlikely(scsi_device_blocked(cmd->device))) {
1776
1777
1778
1779
1780
1781
1782
1783 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
1784 "queuecommand : device blocked\n"));
1785 return SCSI_MLQUEUE_DEVICE_BUSY;
1786 }
1787
1788
1789 if (cmd->device->lun_in_cdb)
1790 cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) |
1791 (cmd->device->lun << 5 & 0xe0);
1792
1793 scsi_log_send(cmd);
1794
1795
1796
1797
1798
1799 if (cmd->cmd_len > cmd->device->host->max_cmd_len) {
1800 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
1801 "queuecommand : command too long. "
1802 "cdb_size=%d host->max_cmd_len=%d\n",
1803 cmd->cmd_len, cmd->device->host->max_cmd_len));
1804 cmd->result = (DID_ABORT << 16);
1805 goto done;
1806 }
1807
1808 if (unlikely(host->shost_state == SHOST_DEL)) {
1809 cmd->result = (DID_NO_CONNECT << 16);
1810 goto done;
1811
1812 }
1813
1814 trace_scsi_dispatch_cmd_start(cmd);
1815 rtn = host->hostt->queuecommand(host, cmd);
1816 if (rtn) {
1817 trace_scsi_dispatch_cmd_error(cmd, rtn);
1818 if (rtn != SCSI_MLQUEUE_DEVICE_BUSY &&
1819 rtn != SCSI_MLQUEUE_TARGET_BUSY)
1820 rtn = SCSI_MLQUEUE_HOST_BUSY;
1821
1822 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
1823 "queuecommand : request rejected\n"));
1824 }
1825
1826 return rtn;
1827 done:
1828 cmd->scsi_done(cmd);
1829 return 0;
1830}
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843static void scsi_done(struct scsi_cmnd *cmd)
1844{
1845 trace_scsi_dispatch_cmd_done(cmd);
1846 blk_complete_request(cmd->request);
1847}
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863static void scsi_request_fn(struct request_queue *q)
1864 __releases(q->queue_lock)
1865 __acquires(q->queue_lock)
1866{
1867 struct scsi_device *sdev = q->queuedata;
1868 struct Scsi_Host *shost;
1869 struct scsi_cmnd *cmd;
1870 struct request *req;
1871
1872
1873
1874
1875
1876 shost = sdev->host;
1877 for (;;) {
1878 int rtn;
1879
1880
1881
1882
1883
1884 req = blk_peek_request(q);
1885 if (!req)
1886 break;
1887
1888 if (unlikely(!scsi_device_online(sdev))) {
1889 sdev_printk(KERN_ERR, sdev,
1890 "rejecting I/O to offline device\n");
1891 scsi_kill_request(req, q);
1892 continue;
1893 }
1894
1895 if (!scsi_dev_queue_ready(q, sdev))
1896 break;
1897
1898
1899
1900
1901 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1902 blk_start_request(req);
1903
1904 spin_unlock_irq(q->queue_lock);
1905 cmd = blk_mq_rq_to_pdu(req);
1906 if (cmd != req->special) {
1907 printk(KERN_CRIT "impossible request in %s.\n"
1908 "please mail a stack trace to "
1909 "linux-scsi@vger.kernel.org\n",
1910 __func__);
1911 blk_dump_rq_flags(req, "foo");
1912 BUG();
1913 }
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923 if (blk_queue_tagged(q) && !(req->rq_flags & RQF_QUEUED)) {
1924 spin_lock_irq(shost->host_lock);
1925 if (list_empty(&sdev->starved_entry))
1926 list_add_tail(&sdev->starved_entry,
1927 &shost->starved_list);
1928 spin_unlock_irq(shost->host_lock);
1929 goto not_ready;
1930 }
1931
1932 if (!scsi_target_queue_ready(shost, sdev))
1933 goto not_ready;
1934
1935 if (!scsi_host_queue_ready(q, shost, sdev))
1936 goto host_not_ready;
1937
1938 if (sdev->simple_tags)
1939 cmd->flags |= SCMD_TAGGED;
1940 else
1941 cmd->flags &= ~SCMD_TAGGED;
1942
1943
1944
1945
1946
1947 scsi_init_cmd_errh(cmd);
1948
1949
1950
1951
1952 cmd->scsi_done = scsi_done;
1953 rtn = scsi_dispatch_cmd(cmd);
1954 if (rtn) {
1955 scsi_queue_insert(cmd, rtn);
1956 spin_lock_irq(q->queue_lock);
1957 goto out_delay;
1958 }
1959 spin_lock_irq(q->queue_lock);
1960 }
1961
1962 return;
1963
1964 host_not_ready:
1965 if (scsi_target(sdev)->can_queue > 0)
1966 atomic_dec(&scsi_target(sdev)->target_busy);
1967 not_ready:
1968
1969
1970
1971
1972
1973
1974
1975
1976 spin_lock_irq(q->queue_lock);
1977 blk_requeue_request(q, req);
1978 atomic_dec(&sdev->device_busy);
1979out_delay:
1980 if (!atomic_read(&sdev->device_busy) && !scsi_device_blocked(sdev))
1981 blk_delay_queue(q, SCSI_QUEUE_DELAY);
1982}
1983
1984static inline blk_status_t prep_to_mq(int ret)
1985{
1986 switch (ret) {
1987 case BLKPREP_OK:
1988 return BLK_STS_OK;
1989 case BLKPREP_DEFER:
1990 return BLK_STS_RESOURCE;
1991 default:
1992 return BLK_STS_IOERR;
1993 }
1994}
1995
1996
1997static unsigned int scsi_mq_sgl_size(struct Scsi_Host *shost)
1998{
1999 return min_t(unsigned int, shost->sg_tablesize, SG_CHUNK_SIZE) *
2000 sizeof(struct scatterlist);
2001}
2002
2003static int scsi_mq_prep_fn(struct request *req)
2004{
2005 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
2006 struct scsi_device *sdev = req->q->queuedata;
2007 struct Scsi_Host *shost = sdev->host;
2008 struct scatterlist *sg;
2009
2010 scsi_init_command(sdev, cmd);
2011
2012 req->special = cmd;
2013
2014 cmd->request = req;
2015
2016 cmd->tag = req->tag;
2017 cmd->prot_op = SCSI_PROT_NORMAL;
2018
2019 sg = (void *)cmd + sizeof(struct scsi_cmnd) + shost->hostt->cmd_size;
2020 cmd->sdb.table.sgl = sg;
2021
2022 if (scsi_host_get_prot(shost)) {
2023 memset(cmd->prot_sdb, 0, sizeof(struct scsi_data_buffer));
2024
2025 cmd->prot_sdb->table.sgl =
2026 (struct scatterlist *)(cmd->prot_sdb + 1);
2027 }
2028
2029 if (blk_bidi_rq(req)) {
2030 struct request *next_rq = req->next_rq;
2031 struct scsi_data_buffer *bidi_sdb = blk_mq_rq_to_pdu(next_rq);
2032
2033 memset(bidi_sdb, 0, sizeof(struct scsi_data_buffer));
2034 bidi_sdb->table.sgl =
2035 (struct scatterlist *)(bidi_sdb + 1);
2036
2037 next_rq->special = bidi_sdb;
2038 }
2039
2040 blk_mq_start_request(req);
2041
2042 return scsi_setup_cmnd(sdev, req);
2043}
2044
2045static void scsi_mq_done(struct scsi_cmnd *cmd)
2046{
2047 trace_scsi_dispatch_cmd_done(cmd);
2048 blk_mq_complete_request(cmd->request);
2049}
2050
2051static void scsi_mq_put_budget(struct blk_mq_hw_ctx *hctx)
2052{
2053 struct request_queue *q = hctx->queue;
2054 struct scsi_device *sdev = q->queuedata;
2055
2056 atomic_dec(&sdev->device_busy);
2057 put_device(&sdev->sdev_gendev);
2058}
2059
2060static bool scsi_mq_get_budget(struct blk_mq_hw_ctx *hctx)
2061{
2062 struct request_queue *q = hctx->queue;
2063 struct scsi_device *sdev = q->queuedata;
2064
2065 if (!get_device(&sdev->sdev_gendev))
2066 goto out;
2067 if (!scsi_dev_queue_ready(q, sdev))
2068 goto out_put_device;
2069
2070 return true;
2071
2072out_put_device:
2073 put_device(&sdev->sdev_gendev);
2074out:
2075 if (atomic_read(&sdev->device_busy) == 0 && !scsi_device_blocked(sdev))
2076 blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY);
2077 return false;
2078}
2079
2080static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
2081 const struct blk_mq_queue_data *bd)
2082{
2083 struct request *req = bd->rq;
2084 struct request_queue *q = req->q;
2085 struct scsi_device *sdev = q->queuedata;
2086 struct Scsi_Host *shost = sdev->host;
2087 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
2088 blk_status_t ret;
2089 int reason;
2090
2091 ret = prep_to_mq(scsi_prep_state_check(sdev, req));
2092 if (ret != BLK_STS_OK)
2093 goto out_put_budget;
2094
2095 ret = BLK_STS_RESOURCE;
2096 if (!scsi_target_queue_ready(shost, sdev))
2097 goto out_put_budget;
2098 if (!scsi_host_queue_ready(q, shost, sdev))
2099 goto out_dec_target_busy;
2100
2101 if (!(req->rq_flags & RQF_DONTPREP)) {
2102 ret = prep_to_mq(scsi_mq_prep_fn(req));
2103 if (ret != BLK_STS_OK)
2104 goto out_dec_host_busy;
2105 req->rq_flags |= RQF_DONTPREP;
2106 } else {
2107 blk_mq_start_request(req);
2108 }
2109
2110 if (sdev->simple_tags)
2111 cmd->flags |= SCMD_TAGGED;
2112 else
2113 cmd->flags &= ~SCMD_TAGGED;
2114
2115 scsi_init_cmd_errh(cmd);
2116 cmd->scsi_done = scsi_mq_done;
2117
2118 reason = scsi_dispatch_cmd(cmd);
2119 if (reason) {
2120 scsi_set_blocked(cmd, reason);
2121 ret = BLK_STS_RESOURCE;
2122 goto out_dec_host_busy;
2123 }
2124
2125 return BLK_STS_OK;
2126
2127out_dec_host_busy:
2128 scsi_dec_host_busy(shost);
2129out_dec_target_busy:
2130 if (scsi_target(sdev)->can_queue > 0)
2131 atomic_dec(&scsi_target(sdev)->target_busy);
2132out_put_budget:
2133 scsi_mq_put_budget(hctx);
2134 switch (ret) {
2135 case BLK_STS_OK:
2136 break;
2137 case BLK_STS_RESOURCE:
2138 if (atomic_read(&sdev->device_busy) ||
2139 scsi_device_blocked(sdev))
2140 ret = BLK_STS_DEV_RESOURCE;
2141 break;
2142 default:
2143
2144
2145
2146
2147
2148 if (req->rq_flags & RQF_DONTPREP)
2149 scsi_mq_uninit_cmd(cmd);
2150 break;
2151 }
2152 return ret;
2153}
2154
2155static enum blk_eh_timer_return scsi_timeout(struct request *req,
2156 bool reserved)
2157{
2158 if (reserved)
2159 return BLK_EH_RESET_TIMER;
2160 return scsi_times_out(req);
2161}
2162
2163static int scsi_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
2164 unsigned int hctx_idx, unsigned int numa_node)
2165{
2166 struct Scsi_Host *shost = set->driver_data;
2167 const bool unchecked_isa_dma = shost->unchecked_isa_dma;
2168 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
2169 struct scatterlist *sg;
2170
2171 if (unchecked_isa_dma)
2172 cmd->flags |= SCMD_UNCHECKED_ISA_DMA;
2173 cmd->sense_buffer = scsi_alloc_sense_buffer(unchecked_isa_dma,
2174 GFP_KERNEL, numa_node);
2175 if (!cmd->sense_buffer)
2176 return -ENOMEM;
2177 cmd->req.sense = cmd->sense_buffer;
2178
2179 if (scsi_host_get_prot(shost)) {
2180 sg = (void *)cmd + sizeof(struct scsi_cmnd) +
2181 shost->hostt->cmd_size;
2182 cmd->prot_sdb = (void *)sg + scsi_mq_sgl_size(shost);
2183 }
2184
2185 return 0;
2186}
2187
2188static void scsi_mq_exit_request(struct blk_mq_tag_set *set, struct request *rq,
2189 unsigned int hctx_idx)
2190{
2191 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
2192
2193 scsi_free_sense_buffer(cmd->flags & SCMD_UNCHECKED_ISA_DMA,
2194 cmd->sense_buffer);
2195}
2196
2197static int scsi_map_queues(struct blk_mq_tag_set *set)
2198{
2199 struct Scsi_Host *shost = container_of(set, struct Scsi_Host, tag_set);
2200
2201 if (shost->hostt->map_queues)
2202 return shost->hostt->map_queues(shost);
2203 return blk_mq_map_queues(set);
2204}
2205
2206void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
2207{
2208 struct device *dev = shost->dma_dev;
2209
2210
2211
2212
2213 blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize,
2214 SG_MAX_SEGMENTS));
2215
2216 if (scsi_host_prot_dma(shost)) {
2217 shost->sg_prot_tablesize =
2218 min_not_zero(shost->sg_prot_tablesize,
2219 (unsigned short)SCSI_MAX_PROT_SG_SEGMENTS);
2220 BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize);
2221 blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize);
2222 }
2223
2224 blk_queue_max_hw_sectors(q, shost->max_sectors);
2225 if (shost->unchecked_isa_dma)
2226 blk_queue_bounce_limit(q, BLK_BOUNCE_ISA);
2227 blk_queue_segment_boundary(q, shost->dma_boundary);
2228 dma_set_seg_boundary(dev, shost->dma_boundary);
2229
2230 blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));
2231
2232 if (!shost->use_clustering)
2233 q->limits.cluster = 0;
2234
2235
2236
2237
2238
2239
2240
2241
2242 blk_queue_dma_alignment(q, max(4, dma_get_cache_alignment()) - 1);
2243}
2244EXPORT_SYMBOL_GPL(__scsi_init_queue);
2245
2246static int scsi_old_init_rq(struct request_queue *q, struct request *rq,
2247 gfp_t gfp)
2248{
2249 struct Scsi_Host *shost = q->rq_alloc_data;
2250 const bool unchecked_isa_dma = shost->unchecked_isa_dma;
2251 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
2252
2253 memset(cmd, 0, sizeof(*cmd));
2254
2255 if (unchecked_isa_dma)
2256 cmd->flags |= SCMD_UNCHECKED_ISA_DMA;
2257 cmd->sense_buffer = scsi_alloc_sense_buffer(unchecked_isa_dma, gfp,
2258 NUMA_NO_NODE);
2259 if (!cmd->sense_buffer)
2260 goto fail;
2261 cmd->req.sense = cmd->sense_buffer;
2262
2263 if (scsi_host_get_prot(shost) >= SHOST_DIX_TYPE0_PROTECTION) {
2264 cmd->prot_sdb = kmem_cache_zalloc(scsi_sdb_cache, gfp);
2265 if (!cmd->prot_sdb)
2266 goto fail_free_sense;
2267 }
2268
2269 return 0;
2270
2271fail_free_sense:
2272 scsi_free_sense_buffer(unchecked_isa_dma, cmd->sense_buffer);
2273fail:
2274 return -ENOMEM;
2275}
2276
2277static void scsi_old_exit_rq(struct request_queue *q, struct request *rq)
2278{
2279 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
2280
2281 if (cmd->prot_sdb)
2282 kmem_cache_free(scsi_sdb_cache, cmd->prot_sdb);
2283 scsi_free_sense_buffer(cmd->flags & SCMD_UNCHECKED_ISA_DMA,
2284 cmd->sense_buffer);
2285}
2286
2287struct request_queue *scsi_old_alloc_queue(struct scsi_device *sdev)
2288{
2289 struct Scsi_Host *shost = sdev->host;
2290 struct request_queue *q;
2291
2292 q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE, NULL);
2293 if (!q)
2294 return NULL;
2295 q->cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size;
2296 q->rq_alloc_data = shost;
2297 q->request_fn = scsi_request_fn;
2298 q->init_rq_fn = scsi_old_init_rq;
2299 q->exit_rq_fn = scsi_old_exit_rq;
2300 q->initialize_rq_fn = scsi_initialize_rq;
2301
2302 if (blk_init_allocated_queue(q) < 0) {
2303 blk_cleanup_queue(q);
2304 return NULL;
2305 }
2306
2307 __scsi_init_queue(shost, q);
2308 blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
2309 blk_queue_prep_rq(q, scsi_prep_fn);
2310 blk_queue_unprep_rq(q, scsi_unprep_fn);
2311 blk_queue_softirq_done(q, scsi_softirq_done);
2312 blk_queue_rq_timed_out(q, scsi_times_out);
2313 blk_queue_lld_busy(q, scsi_lld_busy);
2314 return q;
2315}
2316
2317static const struct blk_mq_ops scsi_mq_ops = {
2318 .get_budget = scsi_mq_get_budget,
2319 .put_budget = scsi_mq_put_budget,
2320 .queue_rq = scsi_queue_rq,
2321 .complete = scsi_softirq_done,
2322 .timeout = scsi_timeout,
2323#ifdef CONFIG_BLK_DEBUG_FS
2324 .show_rq = scsi_show_rq,
2325#endif
2326 .init_request = scsi_mq_init_request,
2327 .exit_request = scsi_mq_exit_request,
2328 .initialize_rq_fn = scsi_initialize_rq,
2329 .map_queues = scsi_map_queues,
2330};
2331
2332struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev)
2333{
2334 sdev->request_queue = blk_mq_init_queue(&sdev->host->tag_set);
2335 if (IS_ERR(sdev->request_queue))
2336 return NULL;
2337
2338 sdev->request_queue->queuedata = sdev;
2339 __scsi_init_queue(sdev->host, sdev->request_queue);
2340 blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, sdev->request_queue);
2341 return sdev->request_queue;
2342}
2343
2344int scsi_mq_setup_tags(struct Scsi_Host *shost)
2345{
2346 unsigned int cmd_size, sgl_size;
2347
2348 sgl_size = scsi_mq_sgl_size(shost);
2349 cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size;
2350 if (scsi_host_get_prot(shost))
2351 cmd_size += sizeof(struct scsi_data_buffer) + sgl_size;
2352
2353 memset(&shost->tag_set, 0, sizeof(shost->tag_set));
2354 shost->tag_set.ops = &scsi_mq_ops;
2355 shost->tag_set.nr_hw_queues = shost->nr_hw_queues ? : 1;
2356 shost->tag_set.queue_depth = shost->can_queue;
2357 shost->tag_set.cmd_size = cmd_size;
2358 shost->tag_set.numa_node = NUMA_NO_NODE;
2359 shost->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
2360 shost->tag_set.flags |=
2361 BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy);
2362 shost->tag_set.driver_data = shost;
2363
2364 return blk_mq_alloc_tag_set(&shost->tag_set);
2365}
2366
2367void scsi_mq_destroy_tags(struct Scsi_Host *shost)
2368{
2369 blk_mq_free_tag_set(&shost->tag_set);
2370}
2371
2372
2373
2374
2375
2376
2377
2378
2379struct scsi_device *scsi_device_from_queue(struct request_queue *q)
2380{
2381 struct scsi_device *sdev = NULL;
2382
2383 if (q->mq_ops) {
2384 if (q->mq_ops == &scsi_mq_ops)
2385 sdev = q->queuedata;
2386 } else if (q->request_fn == scsi_request_fn)
2387 sdev = q->queuedata;
2388 if (!sdev || !get_device(&sdev->sdev_gendev))
2389 sdev = NULL;
2390
2391 return sdev;
2392}
2393EXPORT_SYMBOL_GPL(scsi_device_from_queue);
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411void scsi_block_requests(struct Scsi_Host *shost)
2412{
2413 shost->host_self_blocked = 1;
2414}
2415EXPORT_SYMBOL(scsi_block_requests);
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437void scsi_unblock_requests(struct Scsi_Host *shost)
2438{
2439 shost->host_self_blocked = 0;
2440 scsi_run_host_queues(shost);
2441}
2442EXPORT_SYMBOL(scsi_unblock_requests);
2443
2444int __init scsi_init_queue(void)
2445{
2446 scsi_sdb_cache = kmem_cache_create("scsi_data_buffer",
2447 sizeof(struct scsi_data_buffer),
2448 0, 0, NULL);
2449 if (!scsi_sdb_cache) {
2450 printk(KERN_ERR "SCSI: can't init scsi sdb cache\n");
2451 return -ENOMEM;
2452 }
2453
2454 return 0;
2455}
2456
2457void scsi_exit_queue(void)
2458{
2459 kmem_cache_destroy(scsi_sense_cache);
2460 kmem_cache_destroy(scsi_sense_isadma_cache);
2461 kmem_cache_destroy(scsi_sdb_cache);
2462}
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482int
2483scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
2484 unsigned char *buffer, int len, int timeout, int retries,
2485 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
2486{
2487 unsigned char cmd[10];
2488 unsigned char *real_buffer;
2489 int ret;
2490
2491 memset(cmd, 0, sizeof(cmd));
2492 cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0);
2493
2494 if (sdev->use_10_for_ms) {
2495 if (len > 65535)
2496 return -EINVAL;
2497 real_buffer = kmalloc(8 + len, GFP_KERNEL);
2498 if (!real_buffer)
2499 return -ENOMEM;
2500 memcpy(real_buffer + 8, buffer, len);
2501 len += 8;
2502 real_buffer[0] = 0;
2503 real_buffer[1] = 0;
2504 real_buffer[2] = data->medium_type;
2505 real_buffer[3] = data->device_specific;
2506 real_buffer[4] = data->longlba ? 0x01 : 0;
2507 real_buffer[5] = 0;
2508 real_buffer[6] = data->block_descriptor_length >> 8;
2509 real_buffer[7] = data->block_descriptor_length;
2510
2511 cmd[0] = MODE_SELECT_10;
2512 cmd[7] = len >> 8;
2513 cmd[8] = len;
2514 } else {
2515 if (len > 255 || data->block_descriptor_length > 255 ||
2516 data->longlba)
2517 return -EINVAL;
2518
2519 real_buffer = kmalloc(4 + len, GFP_KERNEL);
2520 if (!real_buffer)
2521 return -ENOMEM;
2522 memcpy(real_buffer + 4, buffer, len);
2523 len += 4;
2524 real_buffer[0] = 0;
2525 real_buffer[1] = data->medium_type;
2526 real_buffer[2] = data->device_specific;
2527 real_buffer[3] = data->block_descriptor_length;
2528
2529
2530 cmd[0] = MODE_SELECT;
2531 cmd[4] = len;
2532 }
2533
2534 ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len,
2535 sshdr, timeout, retries, NULL);
2536 kfree(real_buffer);
2537 return ret;
2538}
2539EXPORT_SYMBOL_GPL(scsi_mode_select);
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558int
2559scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
2560 unsigned char *buffer, int len, int timeout, int retries,
2561 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
2562{
2563 unsigned char cmd[12];
2564 int use_10_for_ms;
2565 int header_length;
2566 int result, retry_count = retries;
2567 struct scsi_sense_hdr my_sshdr;
2568
2569 memset(data, 0, sizeof(*data));
2570 memset(&cmd[0], 0, 12);
2571 cmd[1] = dbd & 0x18;
2572 cmd[2] = modepage;
2573
2574
2575 if (!sshdr)
2576 sshdr = &my_sshdr;
2577
2578 retry:
2579 use_10_for_ms = sdev->use_10_for_ms;
2580
2581 if (use_10_for_ms) {
2582 if (len < 8)
2583 len = 8;
2584
2585 cmd[0] = MODE_SENSE_10;
2586 cmd[8] = len;
2587 header_length = 8;
2588 } else {
2589 if (len < 4)
2590 len = 4;
2591
2592 cmd[0] = MODE_SENSE;
2593 cmd[4] = len;
2594 header_length = 4;
2595 }
2596
2597 memset(buffer, 0, len);
2598
2599 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
2600 sshdr, timeout, retries, NULL);
2601
2602
2603
2604
2605
2606
2607 if (use_10_for_ms && !scsi_status_is_good(result) &&
2608 driver_byte(result) == DRIVER_SENSE) {
2609 if (scsi_sense_valid(sshdr)) {
2610 if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
2611 (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
2612
2613
2614
2615 sdev->use_10_for_ms = 0;
2616 goto retry;
2617 }
2618 }
2619 }
2620
2621 if(scsi_status_is_good(result)) {
2622 if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b &&
2623 (modepage == 6 || modepage == 8))) {
2624
2625 header_length = 0;
2626 data->length = 13;
2627 data->medium_type = 0;
2628 data->device_specific = 0;
2629 data->longlba = 0;
2630 data->block_descriptor_length = 0;
2631 } else if(use_10_for_ms) {
2632 data->length = buffer[0]*256 + buffer[1] + 2;
2633 data->medium_type = buffer[2];
2634 data->device_specific = buffer[3];
2635 data->longlba = buffer[4] & 0x01;
2636 data->block_descriptor_length = buffer[6]*256
2637 + buffer[7];
2638 } else {
2639 data->length = buffer[0] + 1;
2640 data->medium_type = buffer[1];
2641 data->device_specific = buffer[2];
2642 data->block_descriptor_length = buffer[3];
2643 }
2644 data->header_length = header_length;
2645 } else if ((status_byte(result) == CHECK_CONDITION) &&
2646 scsi_sense_valid(sshdr) &&
2647 sshdr->sense_key == UNIT_ATTENTION && retry_count) {
2648 retry_count--;
2649 goto retry;
2650 }
2651
2652 return result;
2653}
2654EXPORT_SYMBOL(scsi_mode_sense);
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666int
2667scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
2668 struct scsi_sense_hdr *sshdr)
2669{
2670 char cmd[] = {
2671 TEST_UNIT_READY, 0, 0, 0, 0, 0,
2672 };
2673 int result;
2674
2675
2676 do {
2677 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr,
2678 timeout, 1, NULL);
2679 if (sdev->removable && scsi_sense_valid(sshdr) &&
2680 sshdr->sense_key == UNIT_ATTENTION)
2681 sdev->changed = 1;
2682 } while (scsi_sense_valid(sshdr) &&
2683 sshdr->sense_key == UNIT_ATTENTION && --retries);
2684
2685 return result;
2686}
2687EXPORT_SYMBOL(scsi_test_unit_ready);
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697int
2698scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
2699{
2700 enum scsi_device_state oldstate = sdev->sdev_state;
2701
2702 if (state == oldstate)
2703 return 0;
2704
2705 switch (state) {
2706 case SDEV_CREATED:
2707 switch (oldstate) {
2708 case SDEV_CREATED_BLOCK:
2709 break;
2710 default:
2711 goto illegal;
2712 }
2713 break;
2714
2715 case SDEV_RUNNING:
2716 switch (oldstate) {
2717 case SDEV_CREATED:
2718 case SDEV_OFFLINE:
2719 case SDEV_TRANSPORT_OFFLINE:
2720 case SDEV_QUIESCE:
2721 case SDEV_BLOCK:
2722 break;
2723 default:
2724 goto illegal;
2725 }
2726 break;
2727
2728 case SDEV_QUIESCE:
2729 switch (oldstate) {
2730 case SDEV_RUNNING:
2731 case SDEV_OFFLINE:
2732 case SDEV_TRANSPORT_OFFLINE:
2733 break;
2734 default:
2735 goto illegal;
2736 }
2737 break;
2738
2739 case SDEV_OFFLINE:
2740 case SDEV_TRANSPORT_OFFLINE:
2741 switch (oldstate) {
2742 case SDEV_CREATED:
2743 case SDEV_RUNNING:
2744 case SDEV_QUIESCE:
2745 case SDEV_BLOCK:
2746 break;
2747 default:
2748 goto illegal;
2749 }
2750 break;
2751
2752 case SDEV_BLOCK:
2753 switch (oldstate) {
2754 case SDEV_RUNNING:
2755 case SDEV_CREATED_BLOCK:
2756 break;
2757 default:
2758 goto illegal;
2759 }
2760 break;
2761
2762 case SDEV_CREATED_BLOCK:
2763 switch (oldstate) {
2764 case SDEV_CREATED:
2765 break;
2766 default:
2767 goto illegal;
2768 }
2769 break;
2770
2771 case SDEV_CANCEL:
2772 switch (oldstate) {
2773 case SDEV_CREATED:
2774 case SDEV_RUNNING:
2775 case SDEV_QUIESCE:
2776 case SDEV_OFFLINE:
2777 case SDEV_TRANSPORT_OFFLINE:
2778 break;
2779 default:
2780 goto illegal;
2781 }
2782 break;
2783
2784 case SDEV_DEL:
2785 switch (oldstate) {
2786 case SDEV_CREATED:
2787 case SDEV_RUNNING:
2788 case SDEV_OFFLINE:
2789 case SDEV_TRANSPORT_OFFLINE:
2790 case SDEV_CANCEL:
2791 case SDEV_BLOCK:
2792 case SDEV_CREATED_BLOCK:
2793 break;
2794 default:
2795 goto illegal;
2796 }
2797 break;
2798
2799 }
2800 sdev->sdev_state = state;
2801 return 0;
2802
2803 illegal:
2804 SCSI_LOG_ERROR_RECOVERY(1,
2805 sdev_printk(KERN_ERR, sdev,
2806 "Illegal state transition %s->%s",
2807 scsi_device_state_name(oldstate),
2808 scsi_device_state_name(state))
2809 );
2810 return -EINVAL;
2811}
2812EXPORT_SYMBOL(scsi_device_set_state);
2813
2814
2815
2816
2817
2818
2819
2820
2821static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
2822{
2823 int idx = 0;
2824 char *envp[3];
2825
2826 switch (evt->evt_type) {
2827 case SDEV_EVT_MEDIA_CHANGE:
2828 envp[idx++] = "SDEV_MEDIA_CHANGE=1";
2829 break;
2830 case SDEV_EVT_INQUIRY_CHANGE_REPORTED:
2831 scsi_rescan_device(&sdev->sdev_gendev);
2832 envp[idx++] = "SDEV_UA=INQUIRY_DATA_HAS_CHANGED";
2833 break;
2834 case SDEV_EVT_CAPACITY_CHANGE_REPORTED:
2835 envp[idx++] = "SDEV_UA=CAPACITY_DATA_HAS_CHANGED";
2836 break;
2837 case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED:
2838 envp[idx++] = "SDEV_UA=THIN_PROVISIONING_SOFT_THRESHOLD_REACHED";
2839 break;
2840 case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED:
2841 envp[idx++] = "SDEV_UA=MODE_PARAMETERS_CHANGED";
2842 break;
2843 case SDEV_EVT_LUN_CHANGE_REPORTED:
2844 envp[idx++] = "SDEV_UA=REPORTED_LUNS_DATA_HAS_CHANGED";
2845 break;
2846 case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED:
2847 envp[idx++] = "SDEV_UA=ASYMMETRIC_ACCESS_STATE_CHANGED";
2848 break;
2849 case SDEV_EVT_POWER_ON_RESET_OCCURRED:
2850 envp[idx++] = "SDEV_UA=POWER_ON_RESET_OCCURRED";
2851 break;
2852 default:
2853
2854 break;
2855 }
2856
2857 envp[idx++] = NULL;
2858
2859 kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp);
2860}
2861
2862
2863
2864
2865
2866
2867
2868
2869void scsi_evt_thread(struct work_struct *work)
2870{
2871 struct scsi_device *sdev;
2872 enum scsi_device_event evt_type;
2873 LIST_HEAD(event_list);
2874
2875 sdev = container_of(work, struct scsi_device, event_work);
2876
2877 for (evt_type = SDEV_EVT_FIRST; evt_type <= SDEV_EVT_LAST; evt_type++)
2878 if (test_and_clear_bit(evt_type, sdev->pending_events))
2879 sdev_evt_send_simple(sdev, evt_type, GFP_KERNEL);
2880
2881 while (1) {
2882 struct scsi_event *evt;
2883 struct list_head *this, *tmp;
2884 unsigned long flags;
2885
2886 spin_lock_irqsave(&sdev->list_lock, flags);
2887 list_splice_init(&sdev->event_list, &event_list);
2888 spin_unlock_irqrestore(&sdev->list_lock, flags);
2889
2890 if (list_empty(&event_list))
2891 break;
2892
2893 list_for_each_safe(this, tmp, &event_list) {
2894 evt = list_entry(this, struct scsi_event, node);
2895 list_del(&evt->node);
2896 scsi_evt_emit(sdev, evt);
2897 kfree(evt);
2898 }
2899 }
2900}
2901
2902
2903
2904
2905
2906
2907
2908
2909void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt)
2910{
2911 unsigned long flags;
2912
2913#if 0
2914
2915
2916
2917 if (!test_bit(evt->evt_type, sdev->supported_events)) {
2918 kfree(evt);
2919 return;
2920 }
2921#endif
2922
2923 spin_lock_irqsave(&sdev->list_lock, flags);
2924 list_add_tail(&evt->node, &sdev->event_list);
2925 schedule_work(&sdev->event_work);
2926 spin_unlock_irqrestore(&sdev->list_lock, flags);
2927}
2928EXPORT_SYMBOL_GPL(sdev_evt_send);
2929
2930
2931
2932
2933
2934
2935
2936
2937struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
2938 gfp_t gfpflags)
2939{
2940 struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags);
2941 if (!evt)
2942 return NULL;
2943
2944 evt->evt_type = evt_type;
2945 INIT_LIST_HEAD(&evt->node);
2946
2947
2948 switch (evt_type) {
2949 case SDEV_EVT_MEDIA_CHANGE:
2950 case SDEV_EVT_INQUIRY_CHANGE_REPORTED:
2951 case SDEV_EVT_CAPACITY_CHANGE_REPORTED:
2952 case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED:
2953 case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED:
2954 case SDEV_EVT_LUN_CHANGE_REPORTED:
2955 case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED:
2956 case SDEV_EVT_POWER_ON_RESET_OCCURRED:
2957 default:
2958
2959 break;
2960 }
2961
2962 return evt;
2963}
2964EXPORT_SYMBOL_GPL(sdev_evt_alloc);
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974void sdev_evt_send_simple(struct scsi_device *sdev,
2975 enum scsi_device_event evt_type, gfp_t gfpflags)
2976{
2977 struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags);
2978 if (!evt) {
2979 sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n",
2980 evt_type);
2981 return;
2982 }
2983
2984 sdev_evt_send(sdev, evt);
2985}
2986EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
2987
2988
2989
2990
2991
2992static int scsi_request_fn_active(struct scsi_device *sdev)
2993{
2994 struct request_queue *q = sdev->request_queue;
2995 int request_fn_active;
2996
2997 WARN_ON_ONCE(sdev->host->use_blk_mq);
2998
2999 spin_lock_irq(q->queue_lock);
3000 request_fn_active = q->request_fn_active;
3001 spin_unlock_irq(q->queue_lock);
3002
3003 return request_fn_active;
3004}
3005
3006
3007
3008
3009
3010
3011
3012
3013static void scsi_wait_for_queuecommand(struct scsi_device *sdev)
3014{
3015 WARN_ON_ONCE(sdev->host->use_blk_mq);
3016
3017 while (scsi_request_fn_active(sdev))
3018 msleep(20);
3019}
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036int
3037scsi_device_quiesce(struct scsi_device *sdev)
3038{
3039 struct request_queue *q = sdev->request_queue;
3040 int err;
3041
3042
3043
3044
3045
3046
3047 WARN_ON_ONCE(sdev->quiesced_by && sdev->quiesced_by != current);
3048
3049 blk_set_preempt_only(q);
3050
3051 blk_mq_freeze_queue(q);
3052
3053
3054
3055
3056
3057
3058 synchronize_rcu();
3059 blk_mq_unfreeze_queue(q);
3060
3061 mutex_lock(&sdev->state_mutex);
3062 err = scsi_device_set_state(sdev, SDEV_QUIESCE);
3063 if (err == 0)
3064 sdev->quiesced_by = current;
3065 else
3066 blk_clear_preempt_only(q);
3067 mutex_unlock(&sdev->state_mutex);
3068
3069 return err;
3070}
3071EXPORT_SYMBOL(scsi_device_quiesce);
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082void scsi_device_resume(struct scsi_device *sdev)
3083{
3084
3085
3086
3087
3088 mutex_lock(&sdev->state_mutex);
3089 WARN_ON_ONCE(!sdev->quiesced_by);
3090 sdev->quiesced_by = NULL;
3091 blk_clear_preempt_only(sdev->request_queue);
3092 if (sdev->sdev_state == SDEV_QUIESCE)
3093 scsi_device_set_state(sdev, SDEV_RUNNING);
3094 mutex_unlock(&sdev->state_mutex);
3095}
3096EXPORT_SYMBOL(scsi_device_resume);
3097
3098static void
3099device_quiesce_fn(struct scsi_device *sdev, void *data)
3100{
3101 scsi_device_quiesce(sdev);
3102}
3103
3104void
3105scsi_target_quiesce(struct scsi_target *starget)
3106{
3107 starget_for_each_device(starget, NULL, device_quiesce_fn);
3108}
3109EXPORT_SYMBOL(scsi_target_quiesce);
3110
3111static void
3112device_resume_fn(struct scsi_device *sdev, void *data)
3113{
3114 scsi_device_resume(sdev);
3115}
3116
3117void
3118scsi_target_resume(struct scsi_target *starget)
3119{
3120 starget_for_each_device(starget, NULL, device_resume_fn);
3121}
3122EXPORT_SYMBOL(scsi_target_resume);
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138int scsi_internal_device_block_nowait(struct scsi_device *sdev)
3139{
3140 struct request_queue *q = sdev->request_queue;
3141 unsigned long flags;
3142 int err = 0;
3143
3144 err = scsi_device_set_state(sdev, SDEV_BLOCK);
3145 if (err) {
3146 err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK);
3147
3148 if (err)
3149 return err;
3150 }
3151
3152
3153
3154
3155
3156
3157 if (q->mq_ops) {
3158 blk_mq_quiesce_queue_nowait(q);
3159 } else {
3160 spin_lock_irqsave(q->queue_lock, flags);
3161 blk_stop_queue(q);
3162 spin_unlock_irqrestore(q->queue_lock, flags);
3163 }
3164
3165 return 0;
3166}
3167EXPORT_SYMBOL_GPL(scsi_internal_device_block_nowait);
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188static int scsi_internal_device_block(struct scsi_device *sdev)
3189{
3190 struct request_queue *q = sdev->request_queue;
3191 int err;
3192
3193 mutex_lock(&sdev->state_mutex);
3194 err = scsi_internal_device_block_nowait(sdev);
3195 if (err == 0) {
3196 if (q->mq_ops)
3197 blk_mq_quiesce_queue(q);
3198 else
3199 scsi_wait_for_queuecommand(sdev);
3200 }
3201 mutex_unlock(&sdev->state_mutex);
3202
3203 return err;
3204}
3205
3206void scsi_start_queue(struct scsi_device *sdev)
3207{
3208 struct request_queue *q = sdev->request_queue;
3209 unsigned long flags;
3210
3211 if (q->mq_ops) {
3212 blk_mq_unquiesce_queue(q);
3213 } else {
3214 spin_lock_irqsave(q->queue_lock, flags);
3215 blk_start_queue(q);
3216 spin_unlock_irqrestore(q->queue_lock, flags);
3217 }
3218}
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235int scsi_internal_device_unblock_nowait(struct scsi_device *sdev,
3236 enum scsi_device_state new_state)
3237{
3238
3239
3240
3241
3242 switch (sdev->sdev_state) {
3243 case SDEV_BLOCK:
3244 case SDEV_TRANSPORT_OFFLINE:
3245 sdev->sdev_state = new_state;
3246 break;
3247 case SDEV_CREATED_BLOCK:
3248 if (new_state == SDEV_TRANSPORT_OFFLINE ||
3249 new_state == SDEV_OFFLINE)
3250 sdev->sdev_state = new_state;
3251 else
3252 sdev->sdev_state = SDEV_CREATED;
3253 break;
3254 case SDEV_CANCEL:
3255 case SDEV_OFFLINE:
3256 break;
3257 default:
3258 return -EINVAL;
3259 }
3260 scsi_start_queue(sdev);
3261
3262 return 0;
3263}
3264EXPORT_SYMBOL_GPL(scsi_internal_device_unblock_nowait);
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280static int scsi_internal_device_unblock(struct scsi_device *sdev,
3281 enum scsi_device_state new_state)
3282{
3283 int ret;
3284
3285 mutex_lock(&sdev->state_mutex);
3286 ret = scsi_internal_device_unblock_nowait(sdev, new_state);
3287 mutex_unlock(&sdev->state_mutex);
3288
3289 return ret;
3290}
3291
3292static void
3293device_block(struct scsi_device *sdev, void *data)
3294{
3295 scsi_internal_device_block(sdev);
3296}
3297
3298static int
3299target_block(struct device *dev, void *data)
3300{
3301 if (scsi_is_target_device(dev))
3302 starget_for_each_device(to_scsi_target(dev), NULL,
3303 device_block);
3304 return 0;
3305}
3306
3307void
3308scsi_target_block(struct device *dev)
3309{
3310 if (scsi_is_target_device(dev))
3311 starget_for_each_device(to_scsi_target(dev), NULL,
3312 device_block);
3313 else
3314 device_for_each_child(dev, NULL, target_block);
3315}
3316EXPORT_SYMBOL_GPL(scsi_target_block);
3317
3318static void
3319device_unblock(struct scsi_device *sdev, void *data)
3320{
3321 scsi_internal_device_unblock(sdev, *(enum scsi_device_state *)data);
3322}
3323
3324static int
3325target_unblock(struct device *dev, void *data)
3326{
3327 if (scsi_is_target_device(dev))
3328 starget_for_each_device(to_scsi_target(dev), data,
3329 device_unblock);
3330 return 0;
3331}
3332
3333void
3334scsi_target_unblock(struct device *dev, enum scsi_device_state new_state)
3335{
3336 if (scsi_is_target_device(dev))
3337 starget_for_each_device(to_scsi_target(dev), &new_state,
3338 device_unblock);
3339 else
3340 device_for_each_child(dev, &new_state, target_unblock);
3341}
3342EXPORT_SYMBOL_GPL(scsi_target_unblock);
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
3354 size_t *offset, size_t *len)
3355{
3356 int i;
3357 size_t sg_len = 0, len_complete = 0;
3358 struct scatterlist *sg;
3359 struct page *page;
3360
3361 WARN_ON(!irqs_disabled());
3362
3363 for_each_sg(sgl, sg, sg_count, i) {
3364 len_complete = sg_len;
3365 sg_len += sg->length;
3366 if (sg_len > *offset)
3367 break;
3368 }
3369
3370 if (unlikely(i == sg_count)) {
3371 printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, "
3372 "elements %d\n",
3373 __func__, sg_len, *offset, sg_count);
3374 WARN_ON(1);
3375 return NULL;
3376 }
3377
3378
3379 *offset = *offset - len_complete + sg->offset;
3380
3381
3382 page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT));
3383 *offset &= ~PAGE_MASK;
3384
3385
3386 sg_len = PAGE_SIZE - *offset;
3387 if (*len > sg_len)
3388 *len = sg_len;
3389
3390 return kmap_atomic(page);
3391}
3392EXPORT_SYMBOL(scsi_kmap_atomic_sg);
3393
3394
3395
3396
3397
3398void scsi_kunmap_atomic_sg(void *virt)
3399{
3400 kunmap_atomic(virt);
3401}
3402EXPORT_SYMBOL(scsi_kunmap_atomic_sg);
3403
3404void sdev_disable_disk_events(struct scsi_device *sdev)
3405{
3406 atomic_inc(&sdev->disk_events_disable_depth);
3407}
3408EXPORT_SYMBOL(sdev_disable_disk_events);
3409
3410void sdev_enable_disk_events(struct scsi_device *sdev)
3411{
3412 if (WARN_ON_ONCE(atomic_read(&sdev->disk_events_disable_depth) <= 0))
3413 return;
3414 atomic_dec(&sdev->disk_events_disable_depth);
3415}
3416EXPORT_SYMBOL(sdev_enable_disk_events);
3417
3418
3419
3420
3421
3422
3423
3424
3425
3426
3427
3428
3429
3430
3431
3432int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
3433{
3434 u8 cur_id_type = 0xff;
3435 u8 cur_id_size = 0;
3436 const unsigned char *d, *cur_id_str;
3437 const struct scsi_vpd *vpd_pg83;
3438 int id_size = -EINVAL;
3439
3440 rcu_read_lock();
3441 vpd_pg83 = rcu_dereference(sdev->vpd_pg83);
3442 if (!vpd_pg83) {
3443 rcu_read_unlock();
3444 return -ENXIO;
3445 }
3446
3447
3448
3449
3450
3451
3452
3453
3454
3455
3456
3457
3458
3459
3460
3461
3462 if (id_len < 21) {
3463 rcu_read_unlock();
3464 return -EINVAL;
3465 }
3466
3467 memset(id, 0, id_len);
3468 d = vpd_pg83->data + 4;
3469 while (d < vpd_pg83->data + vpd_pg83->len) {
3470
3471 if ((d[1] & 0x30) != 0x00)
3472 goto next_desig;
3473
3474 switch (d[1] & 0xf) {
3475 case 0x1:
3476
3477 if (cur_id_size > d[3])
3478 break;
3479
3480 if (cur_id_type > 0x01 && cur_id_type != 0xff)
3481 break;
3482 cur_id_size = d[3];
3483 if (cur_id_size + 4 > id_len)
3484 cur_id_size = id_len - 4;
3485 cur_id_str = d + 4;
3486 cur_id_type = d[1] & 0xf;
3487 id_size = snprintf(id, id_len, "t10.%*pE",
3488 cur_id_size, cur_id_str);
3489 break;
3490 case 0x2:
3491
3492 if (cur_id_size > d[3])
3493 break;
3494
3495 if (cur_id_type == 0x3 &&
3496 cur_id_size == d[3])
3497 break;
3498 cur_id_size = d[3];
3499 cur_id_str = d + 4;
3500 cur_id_type = d[1] & 0xf;
3501 switch (cur_id_size) {
3502 case 8:
3503 id_size = snprintf(id, id_len,
3504 "eui.%8phN",
3505 cur_id_str);
3506 break;
3507 case 12:
3508 id_size = snprintf(id, id_len,
3509 "eui.%12phN",
3510 cur_id_str);
3511 break;
3512 case 16:
3513 id_size = snprintf(id, id_len,
3514 "eui.%16phN",
3515 cur_id_str);
3516 break;
3517 default:
3518 cur_id_size = 0;
3519 break;
3520 }
3521 break;
3522 case 0x3:
3523
3524 if (cur_id_size > d[3])
3525 break;
3526 cur_id_size = d[3];
3527 cur_id_str = d + 4;
3528 cur_id_type = d[1] & 0xf;
3529 switch (cur_id_size) {
3530 case 8:
3531 id_size = snprintf(id, id_len,
3532 "naa.%8phN",
3533 cur_id_str);
3534 break;
3535 case 16:
3536 id_size = snprintf(id, id_len,
3537 "naa.%16phN",
3538 cur_id_str);
3539 break;
3540 default:
3541 cur_id_size = 0;
3542 break;
3543 }
3544 break;
3545 case 0x8:
3546
3547 if (cur_id_size + 4 > d[3])
3548 break;
3549
3550 if (cur_id_size && d[3] > id_len)
3551 break;
3552 cur_id_size = id_size = d[3];
3553 cur_id_str = d + 4;
3554 cur_id_type = d[1] & 0xf;
3555 if (cur_id_size >= id_len)
3556 cur_id_size = id_len - 1;
3557 memcpy(id, cur_id_str, cur_id_size);
3558
3559 if (cur_id_size != id_size)
3560 cur_id_size = 6;
3561 break;
3562 default:
3563 break;
3564 }
3565next_desig:
3566 d += d[3] + 4;
3567 }
3568 rcu_read_unlock();
3569
3570 return id_size;
3571}
3572EXPORT_SYMBOL(scsi_vpd_lun_id);
3573
3574
3575
3576
3577
3578
3579
3580
3581
3582
3583int scsi_vpd_tpg_id(struct scsi_device *sdev, int *rel_id)
3584{
3585 const unsigned char *d;
3586 const struct scsi_vpd *vpd_pg83;
3587 int group_id = -EAGAIN, rel_port = -1;
3588
3589 rcu_read_lock();
3590 vpd_pg83 = rcu_dereference(sdev->vpd_pg83);
3591 if (!vpd_pg83) {
3592 rcu_read_unlock();
3593 return -ENXIO;
3594 }
3595
3596 d = vpd_pg83->data + 4;
3597 while (d < vpd_pg83->data + vpd_pg83->len) {
3598 switch (d[1] & 0xf) {
3599 case 0x4:
3600
3601 rel_port = get_unaligned_be16(&d[6]);
3602 break;
3603 case 0x5:
3604
3605 group_id = get_unaligned_be16(&d[6]);
3606 break;
3607 default:
3608 break;
3609 }
3610 d += d[3] + 4;
3611 }
3612 rcu_read_unlock();
3613
3614 if (group_id >= 0 && rel_id && rel_port != -1)
3615 *rel_id = rel_port;
3616
3617 return group_id;
3618}
3619EXPORT_SYMBOL(scsi_vpd_tpg_id);
3620