1
2
3
4
5
6
7
8
9
10
11#include <linux/bio.h>
12#include <linux/bitops.h>
13#include <linux/blkdev.h>
14#include <linux/completion.h>
15#include <linux/kernel.h>
16#include <linux/export.h>
17#include <linux/init.h>
18#include <linux/pci.h>
19#include <linux/delay.h>
20#include <linux/hardirq.h>
21#include <linux/scatterlist.h>
22#include <linux/blk-mq.h>
23#include <linux/ratelimit.h>
24#include <asm/unaligned.h>
25
26#include <scsi/scsi.h>
27#include <scsi/scsi_cmnd.h>
28#include <scsi/scsi_dbg.h>
29#include <scsi/scsi_device.h>
30#include <scsi/scsi_driver.h>
31#include <scsi/scsi_eh.h>
32#include <scsi/scsi_host.h>
33#include <scsi/scsi_transport.h>
34#include <scsi/scsi_dh.h>
35
36#include <trace/events/scsi.h>
37
38#include "scsi_debugfs.h"
39#include "scsi_priv.h"
40#include "scsi_logging.h"
41
42static struct kmem_cache *scsi_sdb_cache;
43static struct kmem_cache *scsi_sense_cache;
44static struct kmem_cache *scsi_sense_isadma_cache;
45static DEFINE_MUTEX(scsi_sense_cache_mutex);
46
47static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd);
48
49static inline struct kmem_cache *
50scsi_select_sense_cache(bool unchecked_isa_dma)
51{
52 return unchecked_isa_dma ? scsi_sense_isadma_cache : scsi_sense_cache;
53}
54
55static void scsi_free_sense_buffer(bool unchecked_isa_dma,
56 unsigned char *sense_buffer)
57{
58 kmem_cache_free(scsi_select_sense_cache(unchecked_isa_dma),
59 sense_buffer);
60}
61
62static unsigned char *scsi_alloc_sense_buffer(bool unchecked_isa_dma,
63 gfp_t gfp_mask, int numa_node)
64{
65 return kmem_cache_alloc_node(scsi_select_sense_cache(unchecked_isa_dma),
66 gfp_mask, numa_node);
67}
68
69int scsi_init_sense_cache(struct Scsi_Host *shost)
70{
71 struct kmem_cache *cache;
72 int ret = 0;
73
74 cache = scsi_select_sense_cache(shost->unchecked_isa_dma);
75 if (cache)
76 return 0;
77
78 mutex_lock(&scsi_sense_cache_mutex);
79 if (shost->unchecked_isa_dma) {
80 scsi_sense_isadma_cache =
81 kmem_cache_create("scsi_sense_cache(DMA)",
82 SCSI_SENSE_BUFFERSIZE, 0,
83 SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA, NULL);
84 if (!scsi_sense_isadma_cache)
85 ret = -ENOMEM;
86 } else {
87 scsi_sense_cache =
88 kmem_cache_create("scsi_sense_cache",
89 SCSI_SENSE_BUFFERSIZE, 0, SLAB_HWCACHE_ALIGN, NULL);
90 if (!scsi_sense_cache)
91 ret = -ENOMEM;
92 }
93
94 mutex_unlock(&scsi_sense_cache_mutex);
95 return ret;
96}
97
98
99
100
101
102
103#define SCSI_QUEUE_DELAY 3
104
105static void
106scsi_set_blocked(struct scsi_cmnd *cmd, int reason)
107{
108 struct Scsi_Host *host = cmd->device->host;
109 struct scsi_device *device = cmd->device;
110 struct scsi_target *starget = scsi_target(device);
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125 switch (reason) {
126 case SCSI_MLQUEUE_HOST_BUSY:
127 atomic_set(&host->host_blocked, host->max_host_blocked);
128 break;
129 case SCSI_MLQUEUE_DEVICE_BUSY:
130 case SCSI_MLQUEUE_EH_RETRY:
131 atomic_set(&device->device_blocked,
132 device->max_device_blocked);
133 break;
134 case SCSI_MLQUEUE_TARGET_BUSY:
135 atomic_set(&starget->target_blocked,
136 starget->max_target_blocked);
137 break;
138 }
139}
140
141static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
142{
143 struct scsi_device *sdev = cmd->device;
144
145 if (cmd->request->rq_flags & RQF_DONTPREP) {
146 cmd->request->rq_flags &= ~RQF_DONTPREP;
147 scsi_mq_uninit_cmd(cmd);
148 } else {
149 WARN_ON_ONCE(true);
150 }
151 blk_mq_requeue_request(cmd->request, true);
152 put_device(&sdev->sdev_gendev);
153}
154
155
156
157
158
159
160
161
162
163
164
165
166
167static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
168{
169 struct scsi_device *device = cmd->device;
170 struct request_queue *q = device->request_queue;
171 unsigned long flags;
172
173 SCSI_LOG_MLQUEUE(1, scmd_printk(KERN_INFO, cmd,
174 "Inserting command %p into mlqueue\n", cmd));
175
176 scsi_set_blocked(cmd, reason);
177
178
179
180
181
182 if (unbusy)
183 scsi_device_unbusy(device);
184
185
186
187
188
189
190
191 cmd->result = 0;
192 if (q->mq_ops) {
193 scsi_mq_requeue_cmd(cmd);
194 return;
195 }
196 spin_lock_irqsave(q->queue_lock, flags);
197 blk_requeue_request(q, cmd->request);
198 kblockd_schedule_work(&device->requeue_work);
199 spin_unlock_irqrestore(q->queue_lock, flags);
200}
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
222{
223 __scsi_queue_insert(cmd, reason, 1);
224}
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
246 int data_direction, void *buffer, unsigned bufflen,
247 unsigned char *sense, struct scsi_sense_hdr *sshdr,
248 int timeout, int retries, u64 flags, req_flags_t rq_flags,
249 int *resid)
250{
251 struct request *req;
252 struct scsi_request *rq;
253 int ret = DRIVER_ERROR << 24;
254
255 req = blk_get_request_flags(sdev->request_queue,
256 data_direction == DMA_TO_DEVICE ?
257 REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, BLK_MQ_REQ_PREEMPT);
258 if (IS_ERR(req))
259 return ret;
260 rq = scsi_req(req);
261
262 if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
263 buffer, bufflen, __GFP_RECLAIM))
264 goto out;
265
266 rq->cmd_len = COMMAND_SIZE(cmd[0]);
267 memcpy(rq->cmd, cmd, rq->cmd_len);
268 rq->retries = retries;
269 req->timeout = timeout;
270 req->cmd_flags |= flags;
271 req->rq_flags |= rq_flags | RQF_QUIET;
272
273
274
275
276 blk_execute_rq(req->q, NULL, req, 1);
277
278
279
280
281
282
283
284 if (unlikely(rq->resid_len > 0 && rq->resid_len <= bufflen))
285 memset(buffer + (bufflen - rq->resid_len), 0, rq->resid_len);
286
287 if (resid)
288 *resid = rq->resid_len;
289 if (sense && rq->sense_len)
290 memcpy(sense, rq->sense, SCSI_SENSE_BUFFERSIZE);
291 if (sshdr)
292 scsi_normalize_sense(rq->sense, rq->sense_len, sshdr);
293 ret = rq->result;
294 out:
295 blk_put_request(req);
296
297 return ret;
298}
299EXPORT_SYMBOL(scsi_execute);
300
301
302
303
304
305
306
307
308
309
310
311
312static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
313{
314 cmd->serial_number = 0;
315 scsi_set_resid(cmd, 0);
316 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
317 if (cmd->cmd_len == 0)
318 cmd->cmd_len = scsi_command_size(cmd->cmnd);
319}
320
321void scsi_device_unbusy(struct scsi_device *sdev)
322{
323 struct Scsi_Host *shost = sdev->host;
324 struct scsi_target *starget = scsi_target(sdev);
325 unsigned long flags;
326
327 atomic_dec(&shost->host_busy);
328 if (starget->can_queue > 0)
329 atomic_dec(&starget->target_busy);
330
331 if (unlikely(scsi_host_in_recovery(shost) &&
332 (shost->host_failed || shost->host_eh_scheduled))) {
333 spin_lock_irqsave(shost->host_lock, flags);
334 scsi_eh_wakeup(shost);
335 spin_unlock_irqrestore(shost->host_lock, flags);
336 }
337
338 atomic_dec(&sdev->device_busy);
339}
340
341static void scsi_kick_queue(struct request_queue *q)
342{
343 if (q->mq_ops)
344 blk_mq_start_hw_queues(q);
345 else
346 blk_run_queue(q);
347}
348
349
350
351
352
353
354
355
356static void scsi_single_lun_run(struct scsi_device *current_sdev)
357{
358 struct Scsi_Host *shost = current_sdev->host;
359 struct scsi_device *sdev, *tmp;
360 struct scsi_target *starget = scsi_target(current_sdev);
361 unsigned long flags;
362
363 spin_lock_irqsave(shost->host_lock, flags);
364 starget->starget_sdev_user = NULL;
365 spin_unlock_irqrestore(shost->host_lock, flags);
366
367
368
369
370
371
372
373 scsi_kick_queue(current_sdev->request_queue);
374
375 spin_lock_irqsave(shost->host_lock, flags);
376 if (starget->starget_sdev_user)
377 goto out;
378 list_for_each_entry_safe(sdev, tmp, &starget->devices,
379 same_target_siblings) {
380 if (sdev == current_sdev)
381 continue;
382 if (scsi_device_get(sdev))
383 continue;
384
385 spin_unlock_irqrestore(shost->host_lock, flags);
386 scsi_kick_queue(sdev->request_queue);
387 spin_lock_irqsave(shost->host_lock, flags);
388
389 scsi_device_put(sdev);
390 }
391 out:
392 spin_unlock_irqrestore(shost->host_lock, flags);
393}
394
395static inline bool scsi_device_is_busy(struct scsi_device *sdev)
396{
397 if (atomic_read(&sdev->device_busy) >= sdev->queue_depth)
398 return true;
399 if (atomic_read(&sdev->device_blocked) > 0)
400 return true;
401 return false;
402}
403
404static inline bool scsi_target_is_busy(struct scsi_target *starget)
405{
406 if (starget->can_queue > 0) {
407 if (atomic_read(&starget->target_busy) >= starget->can_queue)
408 return true;
409 if (atomic_read(&starget->target_blocked) > 0)
410 return true;
411 }
412 return false;
413}
414
415static inline bool scsi_host_is_busy(struct Scsi_Host *shost)
416{
417 if (shost->can_queue > 0 &&
418 atomic_read(&shost->host_busy) >= shost->can_queue)
419 return true;
420 if (atomic_read(&shost->host_blocked) > 0)
421 return true;
422 if (shost->host_self_blocked)
423 return true;
424 return false;
425}
426
427static void scsi_starved_list_run(struct Scsi_Host *shost)
428{
429 LIST_HEAD(starved_list);
430 struct scsi_device *sdev;
431 unsigned long flags;
432
433 spin_lock_irqsave(shost->host_lock, flags);
434 list_splice_init(&shost->starved_list, &starved_list);
435
436 while (!list_empty(&starved_list)) {
437 struct request_queue *slq;
438
439
440
441
442
443
444
445
446
447
448
449 if (scsi_host_is_busy(shost))
450 break;
451
452 sdev = list_entry(starved_list.next,
453 struct scsi_device, starved_entry);
454 list_del_init(&sdev->starved_entry);
455 if (scsi_target_is_busy(scsi_target(sdev))) {
456 list_move_tail(&sdev->starved_entry,
457 &shost->starved_list);
458 continue;
459 }
460
461
462
463
464
465
466
467
468
469
470
471 slq = sdev->request_queue;
472 if (!blk_get_queue(slq))
473 continue;
474 spin_unlock_irqrestore(shost->host_lock, flags);
475
476 scsi_kick_queue(slq);
477 blk_put_queue(slq);
478
479 spin_lock_irqsave(shost->host_lock, flags);
480 }
481
482 list_splice(&starved_list, &shost->starved_list);
483 spin_unlock_irqrestore(shost->host_lock, flags);
484}
485
486
487
488
489
490
491
492
493
494
495
496
497
498static void scsi_run_queue(struct request_queue *q)
499{
500 struct scsi_device *sdev = q->queuedata;
501
502 if (scsi_target(sdev)->single_lun)
503 scsi_single_lun_run(sdev);
504 if (!list_empty(&sdev->host->starved_list))
505 scsi_starved_list_run(sdev->host);
506
507 if (q->mq_ops)
508 blk_mq_run_hw_queues(q, false);
509 else
510 blk_run_queue(q);
511}
512
513void scsi_requeue_run_queue(struct work_struct *work)
514{
515 struct scsi_device *sdev;
516 struct request_queue *q;
517
518 sdev = container_of(work, struct scsi_device, requeue_work);
519 q = sdev->request_queue;
520 scsi_run_queue(q);
521}
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
542{
543 struct scsi_device *sdev = cmd->device;
544 struct request *req = cmd->request;
545 unsigned long flags;
546
547 spin_lock_irqsave(q->queue_lock, flags);
548 blk_unprep_request(req);
549 req->special = NULL;
550 scsi_put_command(cmd);
551 blk_requeue_request(q, req);
552 spin_unlock_irqrestore(q->queue_lock, flags);
553
554 scsi_run_queue(q);
555
556 put_device(&sdev->sdev_gendev);
557}
558
559void scsi_run_host_queues(struct Scsi_Host *shost)
560{
561 struct scsi_device *sdev;
562
563 shost_for_each_device(sdev, shost)
564 scsi_run_queue(sdev->request_queue);
565}
566
567static void scsi_uninit_cmd(struct scsi_cmnd *cmd)
568{
569 if (!blk_rq_is_passthrough(cmd->request)) {
570 struct scsi_driver *drv = scsi_cmd_to_driver(cmd);
571
572 if (drv->uninit_command)
573 drv->uninit_command(cmd);
574 }
575}
576
577static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd)
578{
579 struct scsi_data_buffer *sdb;
580
581 if (cmd->sdb.table.nents)
582 sg_free_table_chained(&cmd->sdb.table, true);
583 if (cmd->request->next_rq) {
584 sdb = cmd->request->next_rq->special;
585 if (sdb)
586 sg_free_table_chained(&sdb->table, true);
587 }
588 if (scsi_prot_sg_count(cmd))
589 sg_free_table_chained(&cmd->prot_sdb->table, true);
590}
591
592static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
593{
594 scsi_mq_free_sgtables(cmd);
595 scsi_uninit_cmd(cmd);
596 scsi_del_cmd_from_list(cmd);
597}
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615static void scsi_release_buffers(struct scsi_cmnd *cmd)
616{
617 if (cmd->sdb.table.nents)
618 sg_free_table_chained(&cmd->sdb.table, false);
619
620 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
621
622 if (scsi_prot_sg_count(cmd))
623 sg_free_table_chained(&cmd->prot_sdb->table, false);
624}
625
626static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd)
627{
628 struct scsi_data_buffer *bidi_sdb = cmd->request->next_rq->special;
629
630 sg_free_table_chained(&bidi_sdb->table, false);
631 kmem_cache_free(scsi_sdb_cache, bidi_sdb);
632 cmd->request->next_rq->special = NULL;
633}
634
635static bool scsi_end_request(struct request *req, blk_status_t error,
636 unsigned int bytes, unsigned int bidi_bytes)
637{
638 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
639 struct scsi_device *sdev = cmd->device;
640 struct request_queue *q = sdev->request_queue;
641
642 if (blk_update_request(req, error, bytes))
643 return true;
644
645
646 if (unlikely(bidi_bytes) &&
647 blk_update_request(req->next_rq, error, bidi_bytes))
648 return true;
649
650 if (blk_queue_add_random(q))
651 add_disk_randomness(req->rq_disk);
652
653 if (!blk_rq_is_scsi(req)) {
654 WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED));
655 cmd->flags &= ~SCMD_INITIALIZED;
656 }
657
658 if (req->mq_ctx) {
659
660
661
662
663
664
665
666 scsi_mq_uninit_cmd(cmd);
667
668 __blk_mq_end_request(req, error);
669
670 if (scsi_target(sdev)->single_lun ||
671 !list_empty(&sdev->host->starved_list))
672 kblockd_schedule_work(&sdev->requeue_work);
673 else
674 blk_mq_run_hw_queues(q, true);
675 } else {
676 unsigned long flags;
677
678 if (bidi_bytes)
679 scsi_release_bidi_buffers(cmd);
680 scsi_release_buffers(cmd);
681 scsi_put_command(cmd);
682
683 spin_lock_irqsave(q->queue_lock, flags);
684 blk_finish_request(req, error);
685 spin_unlock_irqrestore(q->queue_lock, flags);
686
687 scsi_run_queue(q);
688 }
689
690 put_device(&sdev->sdev_gendev);
691 return false;
692}
693
694
695
696
697
698
699
700
701static blk_status_t __scsi_error_from_host_byte(struct scsi_cmnd *cmd,
702 int result)
703{
704 switch (host_byte(result)) {
705 case DID_TRANSPORT_FAILFAST:
706 return BLK_STS_TRANSPORT;
707 case DID_TARGET_FAILURE:
708 set_host_byte(cmd, DID_OK);
709 return BLK_STS_TARGET;
710 case DID_NEXUS_FAILURE:
711 return BLK_STS_NEXUS;
712 case DID_ALLOC_FAILURE:
713 set_host_byte(cmd, DID_OK);
714 return BLK_STS_NOSPC;
715 case DID_MEDIUM_ERROR:
716 set_host_byte(cmd, DID_OK);
717 return BLK_STS_MEDIUM;
718 default:
719 return BLK_STS_IOERR;
720 }
721}
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
753{
754 int result = cmd->result;
755 struct request_queue *q = cmd->device->request_queue;
756 struct request *req = cmd->request;
757 blk_status_t error = BLK_STS_OK;
758 struct scsi_sense_hdr sshdr;
759 bool sense_valid = false;
760 int sense_deferred = 0, level = 0;
761 enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
762 ACTION_DELAYED_RETRY} action;
763 unsigned long wait_for = (cmd->allowed + 1) * req->timeout;
764
765 if (result) {
766 sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
767 if (sense_valid)
768 sense_deferred = scsi_sense_is_deferred(&sshdr);
769 }
770
771 if (blk_rq_is_passthrough(req)) {
772 if (result) {
773 if (sense_valid) {
774
775
776
777 scsi_req(req)->sense_len =
778 min(8 + cmd->sense_buffer[7],
779 SCSI_SENSE_BUFFERSIZE);
780 }
781 if (!sense_deferred)
782 error = __scsi_error_from_host_byte(cmd, result);
783 }
784
785
786
787 scsi_req(req)->result = cmd->result;
788 scsi_req(req)->resid_len = scsi_get_resid(cmd);
789
790 if (scsi_bidi_cmnd(cmd)) {
791
792
793
794
795 scsi_req(req->next_rq)->resid_len = scsi_in(cmd)->resid;
796 if (scsi_end_request(req, BLK_STS_OK, blk_rq_bytes(req),
797 blk_rq_bytes(req->next_rq)))
798 BUG();
799 return;
800 }
801 } else if (blk_rq_bytes(req) == 0 && result && !sense_deferred) {
802
803
804
805
806
807 error = __scsi_error_from_host_byte(cmd, result);
808 }
809
810
811 BUG_ON(blk_bidi_rq(req));
812
813
814
815
816
817 SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, cmd,
818 "%u sectors total, %d bytes done.\n",
819 blk_rq_sectors(req), good_bytes));
820
821
822
823
824
825
826
827 if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) {
828
829
830
831
832 if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
833 ;
834 else if (!(req->rq_flags & RQF_QUIET))
835 scsi_print_sense(cmd);
836 result = 0;
837
838 error = BLK_STS_OK;
839 }
840
841
842
843
844
845
846 if (!(blk_rq_bytes(req) == 0 && error) &&
847 !scsi_end_request(req, error, good_bytes, 0))
848 return;
849
850
851
852
853 if (error && scsi_noretry_cmd(cmd)) {
854 if (scsi_end_request(req, error, blk_rq_bytes(req), 0))
855 BUG();
856 return;
857 }
858
859
860
861
862
863 if (result == 0)
864 goto requeue;
865
866 error = __scsi_error_from_host_byte(cmd, result);
867
868 if (host_byte(result) == DID_RESET) {
869
870
871
872
873 action = ACTION_RETRY;
874 } else if (sense_valid && !sense_deferred) {
875 switch (sshdr.sense_key) {
876 case UNIT_ATTENTION:
877 if (cmd->device->removable) {
878
879
880
881 cmd->device->changed = 1;
882 action = ACTION_FAIL;
883 } else {
884
885
886
887
888
889 action = ACTION_RETRY;
890 }
891 break;
892 case ILLEGAL_REQUEST:
893
894
895
896
897
898
899
900
901 if ((cmd->device->use_10_for_rw &&
902 sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
903 (cmd->cmnd[0] == READ_10 ||
904 cmd->cmnd[0] == WRITE_10)) {
905
906 cmd->device->use_10_for_rw = 0;
907 action = ACTION_REPREP;
908 } else if (sshdr.asc == 0x10) {
909 action = ACTION_FAIL;
910 error = BLK_STS_PROTECTION;
911
912 } else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) {
913 action = ACTION_FAIL;
914 error = BLK_STS_TARGET;
915 } else
916 action = ACTION_FAIL;
917 break;
918 case ABORTED_COMMAND:
919 action = ACTION_FAIL;
920 if (sshdr.asc == 0x10)
921 error = BLK_STS_PROTECTION;
922 break;
923 case NOT_READY:
924
925
926
927 if (sshdr.asc == 0x04) {
928 switch (sshdr.ascq) {
929 case 0x01:
930 case 0x04:
931 case 0x05:
932 case 0x06:
933 case 0x07:
934 case 0x08:
935 case 0x09:
936 case 0x14:
937 action = ACTION_DELAYED_RETRY;
938 break;
939 default:
940 action = ACTION_FAIL;
941 break;
942 }
943 } else
944 action = ACTION_FAIL;
945 break;
946 case VOLUME_OVERFLOW:
947
948 action = ACTION_FAIL;
949 break;
950 default:
951 action = ACTION_FAIL;
952 break;
953 }
954 } else
955 action = ACTION_FAIL;
956
957 if (action != ACTION_FAIL &&
958 time_before(cmd->jiffies_at_alloc + wait_for, jiffies))
959 action = ACTION_FAIL;
960
961 switch (action) {
962 case ACTION_FAIL:
963
964 if (!(req->rq_flags & RQF_QUIET)) {
965 static DEFINE_RATELIMIT_STATE(_rs,
966 DEFAULT_RATELIMIT_INTERVAL,
967 DEFAULT_RATELIMIT_BURST);
968
969 if (unlikely(scsi_logging_level))
970 level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT,
971 SCSI_LOG_MLCOMPLETE_BITS);
972
973
974
975
976
977 if (!level && __ratelimit(&_rs)) {
978 scsi_print_result(cmd, NULL, FAILED);
979 if (driver_byte(result) & DRIVER_SENSE)
980 scsi_print_sense(cmd);
981 scsi_print_command(cmd);
982 }
983 }
984 if (!scsi_end_request(req, error, blk_rq_err_bytes(req), 0))
985 return;
986
987 case ACTION_REPREP:
988 requeue:
989
990
991
992 if (q->mq_ops) {
993 scsi_mq_requeue_cmd(cmd);
994 } else {
995 scsi_release_buffers(cmd);
996 scsi_requeue_command(q, cmd);
997 }
998 break;
999 case ACTION_RETRY:
1000
1001 __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, 0);
1002 break;
1003 case ACTION_DELAYED_RETRY:
1004
1005 __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0);
1006 break;
1007 }
1008}
1009
1010static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb)
1011{
1012 int count;
1013
1014
1015
1016
1017 if (unlikely(sg_alloc_table_chained(&sdb->table,
1018 blk_rq_nr_phys_segments(req), sdb->table.sgl)))
1019 return BLKPREP_DEFER;
1020
1021
1022
1023
1024
1025 count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
1026 BUG_ON(count > sdb->table.nents);
1027 sdb->table.nents = count;
1028 sdb->length = blk_rq_payload_bytes(req);
1029 return BLKPREP_OK;
1030}
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043int scsi_init_io(struct scsi_cmnd *cmd)
1044{
1045 struct scsi_device *sdev = cmd->device;
1046 struct request *rq = cmd->request;
1047 bool is_mq = (rq->mq_ctx != NULL);
1048 int error = BLKPREP_KILL;
1049
1050 if (WARN_ON_ONCE(!blk_rq_nr_phys_segments(rq)))
1051 goto err_exit;
1052
1053 error = scsi_init_sgtable(rq, &cmd->sdb);
1054 if (error)
1055 goto err_exit;
1056
1057 if (blk_bidi_rq(rq)) {
1058 if (!rq->q->mq_ops) {
1059 struct scsi_data_buffer *bidi_sdb =
1060 kmem_cache_zalloc(scsi_sdb_cache, GFP_ATOMIC);
1061 if (!bidi_sdb) {
1062 error = BLKPREP_DEFER;
1063 goto err_exit;
1064 }
1065
1066 rq->next_rq->special = bidi_sdb;
1067 }
1068
1069 error = scsi_init_sgtable(rq->next_rq, rq->next_rq->special);
1070 if (error)
1071 goto err_exit;
1072 }
1073
1074 if (blk_integrity_rq(rq)) {
1075 struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
1076 int ivecs, count;
1077
1078 if (prot_sdb == NULL) {
1079
1080
1081
1082
1083
1084 WARN_ON_ONCE(1);
1085 error = BLKPREP_KILL;
1086 goto err_exit;
1087 }
1088
1089 ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
1090
1091 if (sg_alloc_table_chained(&prot_sdb->table, ivecs,
1092 prot_sdb->table.sgl)) {
1093 error = BLKPREP_DEFER;
1094 goto err_exit;
1095 }
1096
1097 count = blk_rq_map_integrity_sg(rq->q, rq->bio,
1098 prot_sdb->table.sgl);
1099 BUG_ON(unlikely(count > ivecs));
1100 BUG_ON(unlikely(count > queue_max_integrity_segments(rq->q)));
1101
1102 cmd->prot_sdb = prot_sdb;
1103 cmd->prot_sdb->table.nents = count;
1104 }
1105
1106 return BLKPREP_OK;
1107err_exit:
1108 if (is_mq) {
1109 scsi_mq_free_sgtables(cmd);
1110 } else {
1111 scsi_release_buffers(cmd);
1112 cmd->request->special = NULL;
1113 scsi_put_command(cmd);
1114 put_device(&sdev->sdev_gendev);
1115 }
1116 return error;
1117}
1118EXPORT_SYMBOL(scsi_init_io);
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131void scsi_initialize_rq(struct request *rq)
1132{
1133 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
1134
1135 scsi_req_init(&cmd->req);
1136 cmd->jiffies_at_alloc = jiffies;
1137 cmd->retries = 0;
1138}
1139EXPORT_SYMBOL(scsi_initialize_rq);
1140
1141
1142void scsi_add_cmd_to_list(struct scsi_cmnd *cmd)
1143{
1144 struct scsi_device *sdev = cmd->device;
1145 struct Scsi_Host *shost = sdev->host;
1146 unsigned long flags;
1147
1148 if (shost->use_cmd_list) {
1149 spin_lock_irqsave(&sdev->list_lock, flags);
1150 list_add_tail(&cmd->list, &sdev->cmd_list);
1151 spin_unlock_irqrestore(&sdev->list_lock, flags);
1152 }
1153}
1154
1155
1156void scsi_del_cmd_from_list(struct scsi_cmnd *cmd)
1157{
1158 struct scsi_device *sdev = cmd->device;
1159 struct Scsi_Host *shost = sdev->host;
1160 unsigned long flags;
1161
1162 if (shost->use_cmd_list) {
1163 spin_lock_irqsave(&sdev->list_lock, flags);
1164 BUG_ON(list_empty(&cmd->list));
1165 list_del_init(&cmd->list);
1166 spin_unlock_irqrestore(&sdev->list_lock, flags);
1167 }
1168}
1169
1170
1171void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
1172{
1173 void *buf = cmd->sense_buffer;
1174 void *prot = cmd->prot_sdb;
1175 struct request *rq = blk_mq_rq_from_pdu(cmd);
1176 unsigned int flags = cmd->flags & SCMD_PRESERVED_FLAGS;
1177 unsigned long jiffies_at_alloc;
1178 int retries;
1179
1180 if (!blk_rq_is_scsi(rq) && !(flags & SCMD_INITIALIZED)) {
1181 flags |= SCMD_INITIALIZED;
1182 scsi_initialize_rq(rq);
1183 }
1184
1185 jiffies_at_alloc = cmd->jiffies_at_alloc;
1186 retries = cmd->retries;
1187
1188 memset((char *)cmd + sizeof(cmd->req), 0,
1189 sizeof(*cmd) - sizeof(cmd->req) + dev->host->hostt->cmd_size);
1190
1191 cmd->device = dev;
1192 cmd->sense_buffer = buf;
1193 cmd->prot_sdb = prot;
1194 cmd->flags = flags;
1195 INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
1196 cmd->jiffies_at_alloc = jiffies_at_alloc;
1197 cmd->retries = retries;
1198
1199 scsi_add_cmd_to_list(cmd);
1200}
1201
1202static int scsi_setup_scsi_cmnd(struct scsi_device *sdev, struct request *req)
1203{
1204 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1205
1206
1207
1208
1209
1210
1211
1212 if (req->bio) {
1213 int ret = scsi_init_io(cmd);
1214 if (unlikely(ret))
1215 return ret;
1216 } else {
1217 BUG_ON(blk_rq_bytes(req));
1218
1219 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
1220 }
1221
1222 cmd->cmd_len = scsi_req(req)->cmd_len;
1223 cmd->cmnd = scsi_req(req)->cmd;
1224 cmd->transfersize = blk_rq_bytes(req);
1225 cmd->allowed = scsi_req(req)->retries;
1226 return BLKPREP_OK;
1227}
1228
1229
1230
1231
1232
1233static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
1234{
1235 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1236
1237 if (unlikely(sdev->handler && sdev->handler->prep_fn)) {
1238 int ret = sdev->handler->prep_fn(sdev, req);
1239 if (ret != BLKPREP_OK)
1240 return ret;
1241 }
1242
1243 cmd->cmnd = scsi_req(req)->cmd = scsi_req(req)->__cmd;
1244 memset(cmd->cmnd, 0, BLK_MAX_CDB);
1245 return scsi_cmd_to_driver(cmd)->init_command(cmd);
1246}
1247
1248static int scsi_setup_cmnd(struct scsi_device *sdev, struct request *req)
1249{
1250 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1251
1252 if (!blk_rq_bytes(req))
1253 cmd->sc_data_direction = DMA_NONE;
1254 else if (rq_data_dir(req) == WRITE)
1255 cmd->sc_data_direction = DMA_TO_DEVICE;
1256 else
1257 cmd->sc_data_direction = DMA_FROM_DEVICE;
1258
1259 if (blk_rq_is_scsi(req))
1260 return scsi_setup_scsi_cmnd(sdev, req);
1261 else
1262 return scsi_setup_fs_cmnd(sdev, req);
1263}
1264
1265static int
1266scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
1267{
1268 int ret = BLKPREP_OK;
1269
1270
1271
1272
1273
1274 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1275 switch (sdev->sdev_state) {
1276 case SDEV_OFFLINE:
1277 case SDEV_TRANSPORT_OFFLINE:
1278
1279
1280
1281
1282
1283 sdev_printk(KERN_ERR, sdev,
1284 "rejecting I/O to offline device\n");
1285 ret = BLKPREP_KILL;
1286 break;
1287 case SDEV_DEL:
1288
1289
1290
1291
1292 sdev_printk(KERN_ERR, sdev,
1293 "rejecting I/O to dead device\n");
1294 ret = BLKPREP_KILL;
1295 break;
1296 case SDEV_BLOCK:
1297 case SDEV_CREATED_BLOCK:
1298 ret = BLKPREP_DEFER;
1299 break;
1300 case SDEV_QUIESCE:
1301
1302
1303
1304 if (req && !(req->rq_flags & RQF_PREEMPT))
1305 ret = BLKPREP_DEFER;
1306 break;
1307 default:
1308
1309
1310
1311
1312
1313 if (req && !(req->rq_flags & RQF_PREEMPT))
1314 ret = BLKPREP_KILL;
1315 break;
1316 }
1317 }
1318 return ret;
1319}
1320
1321static int
1322scsi_prep_return(struct request_queue *q, struct request *req, int ret)
1323{
1324 struct scsi_device *sdev = q->queuedata;
1325
1326 switch (ret) {
1327 case BLKPREP_KILL:
1328 case BLKPREP_INVALID:
1329 scsi_req(req)->result = DID_NO_CONNECT << 16;
1330
1331 if (req->special) {
1332 struct scsi_cmnd *cmd = req->special;
1333 scsi_release_buffers(cmd);
1334 scsi_put_command(cmd);
1335 put_device(&sdev->sdev_gendev);
1336 req->special = NULL;
1337 }
1338 break;
1339 case BLKPREP_DEFER:
1340
1341
1342
1343
1344
1345 if (atomic_read(&sdev->device_busy) == 0)
1346 blk_delay_queue(q, SCSI_QUEUE_DELAY);
1347 break;
1348 default:
1349 req->rq_flags |= RQF_DONTPREP;
1350 }
1351
1352 return ret;
1353}
1354
1355static int scsi_prep_fn(struct request_queue *q, struct request *req)
1356{
1357 struct scsi_device *sdev = q->queuedata;
1358 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1359 int ret;
1360
1361 ret = scsi_prep_state_check(sdev, req);
1362 if (ret != BLKPREP_OK)
1363 goto out;
1364
1365 if (!req->special) {
1366
1367 if (unlikely(!get_device(&sdev->sdev_gendev))) {
1368 ret = BLKPREP_DEFER;
1369 goto out;
1370 }
1371
1372 scsi_init_command(sdev, cmd);
1373 req->special = cmd;
1374 }
1375
1376 cmd->tag = req->tag;
1377 cmd->request = req;
1378 cmd->prot_op = SCSI_PROT_NORMAL;
1379
1380 ret = scsi_setup_cmnd(sdev, req);
1381out:
1382 return scsi_prep_return(q, req, ret);
1383}
1384
1385static void scsi_unprep_fn(struct request_queue *q, struct request *req)
1386{
1387 scsi_uninit_cmd(blk_mq_rq_to_pdu(req));
1388}
1389
1390
1391
1392
1393
1394
1395
1396static inline int scsi_dev_queue_ready(struct request_queue *q,
1397 struct scsi_device *sdev)
1398{
1399 unsigned int busy;
1400
1401 busy = atomic_inc_return(&sdev->device_busy) - 1;
1402 if (atomic_read(&sdev->device_blocked)) {
1403 if (busy)
1404 goto out_dec;
1405
1406
1407
1408
1409 if (atomic_dec_return(&sdev->device_blocked) > 0) {
1410
1411
1412
1413 if (!q->mq_ops)
1414 blk_delay_queue(q, SCSI_QUEUE_DELAY);
1415 goto out_dec;
1416 }
1417 SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev,
1418 "unblocking device at zero depth\n"));
1419 }
1420
1421 if (busy >= sdev->queue_depth)
1422 goto out_dec;
1423
1424 return 1;
1425out_dec:
1426 atomic_dec(&sdev->device_busy);
1427 return 0;
1428}
1429
1430
1431
1432
1433
1434static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
1435 struct scsi_device *sdev)
1436{
1437 struct scsi_target *starget = scsi_target(sdev);
1438 unsigned int busy;
1439
1440 if (starget->single_lun) {
1441 spin_lock_irq(shost->host_lock);
1442 if (starget->starget_sdev_user &&
1443 starget->starget_sdev_user != sdev) {
1444 spin_unlock_irq(shost->host_lock);
1445 return 0;
1446 }
1447 starget->starget_sdev_user = sdev;
1448 spin_unlock_irq(shost->host_lock);
1449 }
1450
1451 if (starget->can_queue <= 0)
1452 return 1;
1453
1454 busy = atomic_inc_return(&starget->target_busy) - 1;
1455 if (atomic_read(&starget->target_blocked) > 0) {
1456 if (busy)
1457 goto starved;
1458
1459
1460
1461
1462 if (atomic_dec_return(&starget->target_blocked) > 0)
1463 goto out_dec;
1464
1465 SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget,
1466 "unblocking target at zero depth\n"));
1467 }
1468
1469 if (busy >= starget->can_queue)
1470 goto starved;
1471
1472 return 1;
1473
1474starved:
1475 spin_lock_irq(shost->host_lock);
1476 list_move_tail(&sdev->starved_entry, &shost->starved_list);
1477 spin_unlock_irq(shost->host_lock);
1478out_dec:
1479 if (starget->can_queue > 0)
1480 atomic_dec(&starget->target_busy);
1481 return 0;
1482}
1483
1484
1485
1486
1487
1488
1489static inline int scsi_host_queue_ready(struct request_queue *q,
1490 struct Scsi_Host *shost,
1491 struct scsi_device *sdev)
1492{
1493 unsigned int busy;
1494
1495 if (scsi_host_in_recovery(shost))
1496 return 0;
1497
1498 busy = atomic_inc_return(&shost->host_busy) - 1;
1499 if (atomic_read(&shost->host_blocked) > 0) {
1500 if (busy)
1501 goto starved;
1502
1503
1504
1505
1506 if (atomic_dec_return(&shost->host_blocked) > 0)
1507 goto out_dec;
1508
1509 SCSI_LOG_MLQUEUE(3,
1510 shost_printk(KERN_INFO, shost,
1511 "unblocking host at zero depth\n"));
1512 }
1513
1514 if (shost->can_queue > 0 && busy >= shost->can_queue)
1515 goto starved;
1516 if (shost->host_self_blocked)
1517 goto starved;
1518
1519
1520 if (!list_empty(&sdev->starved_entry)) {
1521 spin_lock_irq(shost->host_lock);
1522 if (!list_empty(&sdev->starved_entry))
1523 list_del_init(&sdev->starved_entry);
1524 spin_unlock_irq(shost->host_lock);
1525 }
1526
1527 return 1;
1528
1529starved:
1530 spin_lock_irq(shost->host_lock);
1531 if (list_empty(&sdev->starved_entry))
1532 list_add_tail(&sdev->starved_entry, &shost->starved_list);
1533 spin_unlock_irq(shost->host_lock);
1534out_dec:
1535 atomic_dec(&shost->host_busy);
1536 return 0;
1537}
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551static int scsi_lld_busy(struct request_queue *q)
1552{
1553 struct scsi_device *sdev = q->queuedata;
1554 struct Scsi_Host *shost;
1555
1556 if (blk_queue_dying(q))
1557 return 0;
1558
1559 shost = sdev->host;
1560
1561
1562
1563
1564
1565
1566
1567 if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev))
1568 return 1;
1569
1570 return 0;
1571}
1572
1573
1574
1575
1576static void scsi_kill_request(struct request *req, struct request_queue *q)
1577{
1578 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1579 struct scsi_device *sdev;
1580 struct scsi_target *starget;
1581 struct Scsi_Host *shost;
1582
1583 blk_start_request(req);
1584
1585 scmd_printk(KERN_INFO, cmd, "killing request\n");
1586
1587 sdev = cmd->device;
1588 starget = scsi_target(sdev);
1589 shost = sdev->host;
1590 scsi_init_cmd_errh(cmd);
1591 cmd->result = DID_NO_CONNECT << 16;
1592 atomic_inc(&cmd->device->iorequest_cnt);
1593
1594
1595
1596
1597
1598
1599 atomic_inc(&sdev->device_busy);
1600 atomic_inc(&shost->host_busy);
1601 if (starget->can_queue > 0)
1602 atomic_inc(&starget->target_busy);
1603
1604 blk_complete_request(req);
1605}
1606
1607static void scsi_softirq_done(struct request *rq)
1608{
1609 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
1610 unsigned long wait_for = (cmd->allowed + 1) * rq->timeout;
1611 int disposition;
1612
1613 INIT_LIST_HEAD(&cmd->eh_entry);
1614
1615 atomic_inc(&cmd->device->iodone_cnt);
1616 if (cmd->result)
1617 atomic_inc(&cmd->device->ioerr_cnt);
1618
1619 disposition = scsi_decide_disposition(cmd);
1620 if (disposition != SUCCESS &&
1621 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
1622 sdev_printk(KERN_ERR, cmd->device,
1623 "timing out command, waited %lus\n",
1624 wait_for/HZ);
1625 disposition = SUCCESS;
1626 }
1627
1628 scsi_log_completion(cmd, disposition);
1629
1630 switch (disposition) {
1631 case SUCCESS:
1632 scsi_finish_command(cmd);
1633 break;
1634 case NEEDS_RETRY:
1635 scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
1636 break;
1637 case ADD_TO_MLQUEUE:
1638 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
1639 break;
1640 default:
1641 scsi_eh_scmd_add(cmd);
1642 break;
1643 }
1644}
1645
1646
1647
1648
1649
1650
1651
1652
1653static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
1654{
1655 struct Scsi_Host *host = cmd->device->host;
1656 int rtn = 0;
1657
1658 atomic_inc(&cmd->device->iorequest_cnt);
1659
1660
1661 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
1662
1663
1664
1665 cmd->result = DID_NO_CONNECT << 16;
1666 goto done;
1667 }
1668
1669
1670 if (unlikely(scsi_device_blocked(cmd->device))) {
1671
1672
1673
1674
1675
1676
1677
1678 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
1679 "queuecommand : device blocked\n"));
1680 return SCSI_MLQUEUE_DEVICE_BUSY;
1681 }
1682
1683
1684 if (cmd->device->lun_in_cdb)
1685 cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) |
1686 (cmd->device->lun << 5 & 0xe0);
1687
1688 scsi_log_send(cmd);
1689
1690
1691
1692
1693
1694 if (cmd->cmd_len > cmd->device->host->max_cmd_len) {
1695 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
1696 "queuecommand : command too long. "
1697 "cdb_size=%d host->max_cmd_len=%d\n",
1698 cmd->cmd_len, cmd->device->host->max_cmd_len));
1699 cmd->result = (DID_ABORT << 16);
1700 goto done;
1701 }
1702
1703 if (unlikely(host->shost_state == SHOST_DEL)) {
1704 cmd->result = (DID_NO_CONNECT << 16);
1705 goto done;
1706
1707 }
1708
1709 trace_scsi_dispatch_cmd_start(cmd);
1710 rtn = host->hostt->queuecommand(host, cmd);
1711 if (rtn) {
1712 trace_scsi_dispatch_cmd_error(cmd, rtn);
1713 if (rtn != SCSI_MLQUEUE_DEVICE_BUSY &&
1714 rtn != SCSI_MLQUEUE_TARGET_BUSY)
1715 rtn = SCSI_MLQUEUE_HOST_BUSY;
1716
1717 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
1718 "queuecommand : request rejected\n"));
1719 }
1720
1721 return rtn;
1722 done:
1723 cmd->scsi_done(cmd);
1724 return 0;
1725}
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738static void scsi_done(struct scsi_cmnd *cmd)
1739{
1740 trace_scsi_dispatch_cmd_done(cmd);
1741 blk_complete_request(cmd->request);
1742}
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758static void scsi_request_fn(struct request_queue *q)
1759 __releases(q->queue_lock)
1760 __acquires(q->queue_lock)
1761{
1762 struct scsi_device *sdev = q->queuedata;
1763 struct Scsi_Host *shost;
1764 struct scsi_cmnd *cmd;
1765 struct request *req;
1766
1767
1768
1769
1770
1771 shost = sdev->host;
1772 for (;;) {
1773 int rtn;
1774
1775
1776
1777
1778
1779 req = blk_peek_request(q);
1780 if (!req)
1781 break;
1782
1783 if (unlikely(!scsi_device_online(sdev))) {
1784 sdev_printk(KERN_ERR, sdev,
1785 "rejecting I/O to offline device\n");
1786 scsi_kill_request(req, q);
1787 continue;
1788 }
1789
1790 if (!scsi_dev_queue_ready(q, sdev))
1791 break;
1792
1793
1794
1795
1796 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1797 blk_start_request(req);
1798
1799 spin_unlock_irq(q->queue_lock);
1800 cmd = blk_mq_rq_to_pdu(req);
1801 if (cmd != req->special) {
1802 printk(KERN_CRIT "impossible request in %s.\n"
1803 "please mail a stack trace to "
1804 "linux-scsi@vger.kernel.org\n",
1805 __func__);
1806 blk_dump_rq_flags(req, "foo");
1807 BUG();
1808 }
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818 if (blk_queue_tagged(q) && !(req->rq_flags & RQF_QUEUED)) {
1819 spin_lock_irq(shost->host_lock);
1820 if (list_empty(&sdev->starved_entry))
1821 list_add_tail(&sdev->starved_entry,
1822 &shost->starved_list);
1823 spin_unlock_irq(shost->host_lock);
1824 goto not_ready;
1825 }
1826
1827 if (!scsi_target_queue_ready(shost, sdev))
1828 goto not_ready;
1829
1830 if (!scsi_host_queue_ready(q, shost, sdev))
1831 goto host_not_ready;
1832
1833 if (sdev->simple_tags)
1834 cmd->flags |= SCMD_TAGGED;
1835 else
1836 cmd->flags &= ~SCMD_TAGGED;
1837
1838
1839
1840
1841
1842 scsi_init_cmd_errh(cmd);
1843
1844
1845
1846
1847 cmd->scsi_done = scsi_done;
1848 rtn = scsi_dispatch_cmd(cmd);
1849 if (rtn) {
1850 scsi_queue_insert(cmd, rtn);
1851 spin_lock_irq(q->queue_lock);
1852 goto out_delay;
1853 }
1854 spin_lock_irq(q->queue_lock);
1855 }
1856
1857 return;
1858
1859 host_not_ready:
1860 if (scsi_target(sdev)->can_queue > 0)
1861 atomic_dec(&scsi_target(sdev)->target_busy);
1862 not_ready:
1863
1864
1865
1866
1867
1868
1869
1870
1871 spin_lock_irq(q->queue_lock);
1872 blk_requeue_request(q, req);
1873 atomic_dec(&sdev->device_busy);
1874out_delay:
1875 if (!atomic_read(&sdev->device_busy) && !scsi_device_blocked(sdev))
1876 blk_delay_queue(q, SCSI_QUEUE_DELAY);
1877}
1878
1879static inline blk_status_t prep_to_mq(int ret)
1880{
1881 switch (ret) {
1882 case BLKPREP_OK:
1883 return BLK_STS_OK;
1884 case BLKPREP_DEFER:
1885 return BLK_STS_RESOURCE;
1886 default:
1887 return BLK_STS_IOERR;
1888 }
1889}
1890
1891
1892static unsigned int scsi_mq_sgl_size(struct Scsi_Host *shost)
1893{
1894 return min_t(unsigned int, shost->sg_tablesize, SG_CHUNK_SIZE) *
1895 sizeof(struct scatterlist);
1896}
1897
1898static int scsi_mq_prep_fn(struct request *req)
1899{
1900 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1901 struct scsi_device *sdev = req->q->queuedata;
1902 struct Scsi_Host *shost = sdev->host;
1903 struct scatterlist *sg;
1904
1905 scsi_init_command(sdev, cmd);
1906
1907 req->special = cmd;
1908
1909 cmd->request = req;
1910
1911 cmd->tag = req->tag;
1912 cmd->prot_op = SCSI_PROT_NORMAL;
1913
1914 sg = (void *)cmd + sizeof(struct scsi_cmnd) + shost->hostt->cmd_size;
1915 cmd->sdb.table.sgl = sg;
1916
1917 if (scsi_host_get_prot(shost)) {
1918 memset(cmd->prot_sdb, 0, sizeof(struct scsi_data_buffer));
1919
1920 cmd->prot_sdb->table.sgl =
1921 (struct scatterlist *)(cmd->prot_sdb + 1);
1922 }
1923
1924 if (blk_bidi_rq(req)) {
1925 struct request *next_rq = req->next_rq;
1926 struct scsi_data_buffer *bidi_sdb = blk_mq_rq_to_pdu(next_rq);
1927
1928 memset(bidi_sdb, 0, sizeof(struct scsi_data_buffer));
1929 bidi_sdb->table.sgl =
1930 (struct scatterlist *)(bidi_sdb + 1);
1931
1932 next_rq->special = bidi_sdb;
1933 }
1934
1935 blk_mq_start_request(req);
1936
1937 return scsi_setup_cmnd(sdev, req);
1938}
1939
1940static void scsi_mq_done(struct scsi_cmnd *cmd)
1941{
1942 trace_scsi_dispatch_cmd_done(cmd);
1943 blk_mq_complete_request(cmd->request);
1944}
1945
1946static void scsi_mq_put_budget(struct blk_mq_hw_ctx *hctx)
1947{
1948 struct request_queue *q = hctx->queue;
1949 struct scsi_device *sdev = q->queuedata;
1950
1951 atomic_dec(&sdev->device_busy);
1952 put_device(&sdev->sdev_gendev);
1953}
1954
1955static bool scsi_mq_get_budget(struct blk_mq_hw_ctx *hctx)
1956{
1957 struct request_queue *q = hctx->queue;
1958 struct scsi_device *sdev = q->queuedata;
1959
1960 if (!get_device(&sdev->sdev_gendev))
1961 goto out;
1962 if (!scsi_dev_queue_ready(q, sdev))
1963 goto out_put_device;
1964
1965 return true;
1966
1967out_put_device:
1968 put_device(&sdev->sdev_gendev);
1969out:
1970 if (atomic_read(&sdev->device_busy) == 0 && !scsi_device_blocked(sdev))
1971 blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY);
1972 return false;
1973}
1974
1975static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
1976 const struct blk_mq_queue_data *bd)
1977{
1978 struct request *req = bd->rq;
1979 struct request_queue *q = req->q;
1980 struct scsi_device *sdev = q->queuedata;
1981 struct Scsi_Host *shost = sdev->host;
1982 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1983 blk_status_t ret;
1984 int reason;
1985
1986 ret = prep_to_mq(scsi_prep_state_check(sdev, req));
1987 if (ret != BLK_STS_OK)
1988 goto out_put_budget;
1989
1990 ret = BLK_STS_RESOURCE;
1991 if (!scsi_target_queue_ready(shost, sdev))
1992 goto out_put_budget;
1993 if (!scsi_host_queue_ready(q, shost, sdev))
1994 goto out_dec_target_busy;
1995
1996 if (!(req->rq_flags & RQF_DONTPREP)) {
1997 ret = prep_to_mq(scsi_mq_prep_fn(req));
1998 if (ret != BLK_STS_OK)
1999 goto out_dec_host_busy;
2000 req->rq_flags |= RQF_DONTPREP;
2001 } else {
2002 blk_mq_start_request(req);
2003 }
2004
2005 if (sdev->simple_tags)
2006 cmd->flags |= SCMD_TAGGED;
2007 else
2008 cmd->flags &= ~SCMD_TAGGED;
2009
2010 scsi_init_cmd_errh(cmd);
2011 cmd->scsi_done = scsi_mq_done;
2012
2013 reason = scsi_dispatch_cmd(cmd);
2014 if (reason) {
2015 scsi_set_blocked(cmd, reason);
2016 ret = BLK_STS_RESOURCE;
2017 goto out_dec_host_busy;
2018 }
2019
2020 return BLK_STS_OK;
2021
2022out_dec_host_busy:
2023 atomic_dec(&shost->host_busy);
2024out_dec_target_busy:
2025 if (scsi_target(sdev)->can_queue > 0)
2026 atomic_dec(&scsi_target(sdev)->target_busy);
2027out_put_budget:
2028 scsi_mq_put_budget(hctx);
2029 switch (ret) {
2030 case BLK_STS_OK:
2031 break;
2032 case BLK_STS_RESOURCE:
2033 if (atomic_read(&sdev->device_busy) == 0 &&
2034 !scsi_device_blocked(sdev))
2035 blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY);
2036 break;
2037 default:
2038
2039
2040
2041
2042
2043 if (req->rq_flags & RQF_DONTPREP)
2044 scsi_mq_uninit_cmd(cmd);
2045 break;
2046 }
2047 return ret;
2048}
2049
2050static enum blk_eh_timer_return scsi_timeout(struct request *req,
2051 bool reserved)
2052{
2053 if (reserved)
2054 return BLK_EH_RESET_TIMER;
2055 return scsi_times_out(req);
2056}
2057
2058static int scsi_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
2059 unsigned int hctx_idx, unsigned int numa_node)
2060{
2061 struct Scsi_Host *shost = set->driver_data;
2062 const bool unchecked_isa_dma = shost->unchecked_isa_dma;
2063 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
2064 struct scatterlist *sg;
2065
2066 if (unchecked_isa_dma)
2067 cmd->flags |= SCMD_UNCHECKED_ISA_DMA;
2068 cmd->sense_buffer = scsi_alloc_sense_buffer(unchecked_isa_dma,
2069 GFP_KERNEL, numa_node);
2070 if (!cmd->sense_buffer)
2071 return -ENOMEM;
2072 cmd->req.sense = cmd->sense_buffer;
2073
2074 if (scsi_host_get_prot(shost)) {
2075 sg = (void *)cmd + sizeof(struct scsi_cmnd) +
2076 shost->hostt->cmd_size;
2077 cmd->prot_sdb = (void *)sg + scsi_mq_sgl_size(shost);
2078 }
2079
2080 return 0;
2081}
2082
2083static void scsi_mq_exit_request(struct blk_mq_tag_set *set, struct request *rq,
2084 unsigned int hctx_idx)
2085{
2086 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
2087
2088 scsi_free_sense_buffer(cmd->flags & SCMD_UNCHECKED_ISA_DMA,
2089 cmd->sense_buffer);
2090}
2091
2092static int scsi_map_queues(struct blk_mq_tag_set *set)
2093{
2094 struct Scsi_Host *shost = container_of(set, struct Scsi_Host, tag_set);
2095
2096 if (shost->hostt->map_queues)
2097 return shost->hostt->map_queues(shost);
2098 return blk_mq_map_queues(set);
2099}
2100
2101static u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
2102{
2103 struct device *host_dev;
2104 u64 bounce_limit = 0xffffffff;
2105
2106 if (shost->unchecked_isa_dma)
2107 return BLK_BOUNCE_ISA;
2108
2109
2110
2111
2112 if (!PCI_DMA_BUS_IS_PHYS)
2113 return BLK_BOUNCE_ANY;
2114
2115 host_dev = scsi_get_device(shost);
2116 if (host_dev && host_dev->dma_mask)
2117 bounce_limit = (u64)dma_max_pfn(host_dev) << PAGE_SHIFT;
2118
2119 return bounce_limit;
2120}
2121
2122void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
2123{
2124 struct device *dev = shost->dma_dev;
2125
2126 queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
2127
2128
2129
2130
2131 blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize,
2132 SG_MAX_SEGMENTS));
2133
2134 if (scsi_host_prot_dma(shost)) {
2135 shost->sg_prot_tablesize =
2136 min_not_zero(shost->sg_prot_tablesize,
2137 (unsigned short)SCSI_MAX_PROT_SG_SEGMENTS);
2138 BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize);
2139 blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize);
2140 }
2141
2142 blk_queue_max_hw_sectors(q, shost->max_sectors);
2143 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
2144 blk_queue_segment_boundary(q, shost->dma_boundary);
2145 dma_set_seg_boundary(dev, shost->dma_boundary);
2146
2147 blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));
2148
2149 if (!shost->use_clustering)
2150 q->limits.cluster = 0;
2151
2152
2153
2154
2155
2156
2157
2158
2159 blk_queue_dma_alignment(q, max(4, dma_get_cache_alignment()) - 1);
2160}
2161EXPORT_SYMBOL_GPL(__scsi_init_queue);
2162
2163static int scsi_old_init_rq(struct request_queue *q, struct request *rq,
2164 gfp_t gfp)
2165{
2166 struct Scsi_Host *shost = q->rq_alloc_data;
2167 const bool unchecked_isa_dma = shost->unchecked_isa_dma;
2168 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
2169
2170 memset(cmd, 0, sizeof(*cmd));
2171
2172 if (unchecked_isa_dma)
2173 cmd->flags |= SCMD_UNCHECKED_ISA_DMA;
2174 cmd->sense_buffer = scsi_alloc_sense_buffer(unchecked_isa_dma, gfp,
2175 NUMA_NO_NODE);
2176 if (!cmd->sense_buffer)
2177 goto fail;
2178 cmd->req.sense = cmd->sense_buffer;
2179
2180 if (scsi_host_get_prot(shost) >= SHOST_DIX_TYPE0_PROTECTION) {
2181 cmd->prot_sdb = kmem_cache_zalloc(scsi_sdb_cache, gfp);
2182 if (!cmd->prot_sdb)
2183 goto fail_free_sense;
2184 }
2185
2186 return 0;
2187
2188fail_free_sense:
2189 scsi_free_sense_buffer(unchecked_isa_dma, cmd->sense_buffer);
2190fail:
2191 return -ENOMEM;
2192}
2193
2194static void scsi_old_exit_rq(struct request_queue *q, struct request *rq)
2195{
2196 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
2197
2198 if (cmd->prot_sdb)
2199 kmem_cache_free(scsi_sdb_cache, cmd->prot_sdb);
2200 scsi_free_sense_buffer(cmd->flags & SCMD_UNCHECKED_ISA_DMA,
2201 cmd->sense_buffer);
2202}
2203
2204struct request_queue *scsi_old_alloc_queue(struct scsi_device *sdev)
2205{
2206 struct Scsi_Host *shost = sdev->host;
2207 struct request_queue *q;
2208
2209 q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE);
2210 if (!q)
2211 return NULL;
2212 q->cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size;
2213 q->rq_alloc_data = shost;
2214 q->request_fn = scsi_request_fn;
2215 q->init_rq_fn = scsi_old_init_rq;
2216 q->exit_rq_fn = scsi_old_exit_rq;
2217 q->initialize_rq_fn = scsi_initialize_rq;
2218
2219 if (blk_init_allocated_queue(q) < 0) {
2220 blk_cleanup_queue(q);
2221 return NULL;
2222 }
2223
2224 __scsi_init_queue(shost, q);
2225 blk_queue_prep_rq(q, scsi_prep_fn);
2226 blk_queue_unprep_rq(q, scsi_unprep_fn);
2227 blk_queue_softirq_done(q, scsi_softirq_done);
2228 blk_queue_rq_timed_out(q, scsi_times_out);
2229 blk_queue_lld_busy(q, scsi_lld_busy);
2230 return q;
2231}
2232
2233static const struct blk_mq_ops scsi_mq_ops = {
2234 .get_budget = scsi_mq_get_budget,
2235 .put_budget = scsi_mq_put_budget,
2236 .queue_rq = scsi_queue_rq,
2237 .complete = scsi_softirq_done,
2238 .timeout = scsi_timeout,
2239#ifdef CONFIG_BLK_DEBUG_FS
2240 .show_rq = scsi_show_rq,
2241#endif
2242 .init_request = scsi_mq_init_request,
2243 .exit_request = scsi_mq_exit_request,
2244 .initialize_rq_fn = scsi_initialize_rq,
2245 .map_queues = scsi_map_queues,
2246};
2247
2248struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev)
2249{
2250 sdev->request_queue = blk_mq_init_queue(&sdev->host->tag_set);
2251 if (IS_ERR(sdev->request_queue))
2252 return NULL;
2253
2254 sdev->request_queue->queuedata = sdev;
2255 __scsi_init_queue(sdev->host, sdev->request_queue);
2256 return sdev->request_queue;
2257}
2258
2259int scsi_mq_setup_tags(struct Scsi_Host *shost)
2260{
2261 unsigned int cmd_size, sgl_size;
2262
2263 sgl_size = scsi_mq_sgl_size(shost);
2264 cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size;
2265 if (scsi_host_get_prot(shost))
2266 cmd_size += sizeof(struct scsi_data_buffer) + sgl_size;
2267
2268 memset(&shost->tag_set, 0, sizeof(shost->tag_set));
2269 shost->tag_set.ops = &scsi_mq_ops;
2270 shost->tag_set.nr_hw_queues = shost->nr_hw_queues ? : 1;
2271 shost->tag_set.queue_depth = shost->can_queue;
2272 shost->tag_set.cmd_size = cmd_size;
2273 shost->tag_set.numa_node = NUMA_NO_NODE;
2274 shost->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
2275 shost->tag_set.flags |=
2276 BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy);
2277 shost->tag_set.driver_data = shost;
2278
2279 return blk_mq_alloc_tag_set(&shost->tag_set);
2280}
2281
2282void scsi_mq_destroy_tags(struct Scsi_Host *shost)
2283{
2284 blk_mq_free_tag_set(&shost->tag_set);
2285}
2286
2287
2288
2289
2290
2291
2292
2293
2294struct scsi_device *scsi_device_from_queue(struct request_queue *q)
2295{
2296 struct scsi_device *sdev = NULL;
2297
2298 if (q->mq_ops) {
2299 if (q->mq_ops == &scsi_mq_ops)
2300 sdev = q->queuedata;
2301 } else if (q->request_fn == scsi_request_fn)
2302 sdev = q->queuedata;
2303 if (!sdev || !get_device(&sdev->sdev_gendev))
2304 sdev = NULL;
2305
2306 return sdev;
2307}
2308EXPORT_SYMBOL_GPL(scsi_device_from_queue);
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326void scsi_block_requests(struct Scsi_Host *shost)
2327{
2328 shost->host_self_blocked = 1;
2329}
2330EXPORT_SYMBOL(scsi_block_requests);
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352void scsi_unblock_requests(struct Scsi_Host *shost)
2353{
2354 shost->host_self_blocked = 0;
2355 scsi_run_host_queues(shost);
2356}
2357EXPORT_SYMBOL(scsi_unblock_requests);
2358
2359int __init scsi_init_queue(void)
2360{
2361 scsi_sdb_cache = kmem_cache_create("scsi_data_buffer",
2362 sizeof(struct scsi_data_buffer),
2363 0, 0, NULL);
2364 if (!scsi_sdb_cache) {
2365 printk(KERN_ERR "SCSI: can't init scsi sdb cache\n");
2366 return -ENOMEM;
2367 }
2368
2369 return 0;
2370}
2371
2372void scsi_exit_queue(void)
2373{
2374 kmem_cache_destroy(scsi_sense_cache);
2375 kmem_cache_destroy(scsi_sense_isadma_cache);
2376 kmem_cache_destroy(scsi_sdb_cache);
2377}
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397int
2398scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
2399 unsigned char *buffer, int len, int timeout, int retries,
2400 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
2401{
2402 unsigned char cmd[10];
2403 unsigned char *real_buffer;
2404 int ret;
2405
2406 memset(cmd, 0, sizeof(cmd));
2407 cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0);
2408
2409 if (sdev->use_10_for_ms) {
2410 if (len > 65535)
2411 return -EINVAL;
2412 real_buffer = kmalloc(8 + len, GFP_KERNEL);
2413 if (!real_buffer)
2414 return -ENOMEM;
2415 memcpy(real_buffer + 8, buffer, len);
2416 len += 8;
2417 real_buffer[0] = 0;
2418 real_buffer[1] = 0;
2419 real_buffer[2] = data->medium_type;
2420 real_buffer[3] = data->device_specific;
2421 real_buffer[4] = data->longlba ? 0x01 : 0;
2422 real_buffer[5] = 0;
2423 real_buffer[6] = data->block_descriptor_length >> 8;
2424 real_buffer[7] = data->block_descriptor_length;
2425
2426 cmd[0] = MODE_SELECT_10;
2427 cmd[7] = len >> 8;
2428 cmd[8] = len;
2429 } else {
2430 if (len > 255 || data->block_descriptor_length > 255 ||
2431 data->longlba)
2432 return -EINVAL;
2433
2434 real_buffer = kmalloc(4 + len, GFP_KERNEL);
2435 if (!real_buffer)
2436 return -ENOMEM;
2437 memcpy(real_buffer + 4, buffer, len);
2438 len += 4;
2439 real_buffer[0] = 0;
2440 real_buffer[1] = data->medium_type;
2441 real_buffer[2] = data->device_specific;
2442 real_buffer[3] = data->block_descriptor_length;
2443
2444
2445 cmd[0] = MODE_SELECT;
2446 cmd[4] = len;
2447 }
2448
2449 ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len,
2450 sshdr, timeout, retries, NULL);
2451 kfree(real_buffer);
2452 return ret;
2453}
2454EXPORT_SYMBOL_GPL(scsi_mode_select);
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473int
2474scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
2475 unsigned char *buffer, int len, int timeout, int retries,
2476 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
2477{
2478 unsigned char cmd[12];
2479 int use_10_for_ms;
2480 int header_length;
2481 int result, retry_count = retries;
2482 struct scsi_sense_hdr my_sshdr;
2483
2484 memset(data, 0, sizeof(*data));
2485 memset(&cmd[0], 0, 12);
2486 cmd[1] = dbd & 0x18;
2487 cmd[2] = modepage;
2488
2489
2490 if (!sshdr)
2491 sshdr = &my_sshdr;
2492
2493 retry:
2494 use_10_for_ms = sdev->use_10_for_ms;
2495
2496 if (use_10_for_ms) {
2497 if (len < 8)
2498 len = 8;
2499
2500 cmd[0] = MODE_SENSE_10;
2501 cmd[8] = len;
2502 header_length = 8;
2503 } else {
2504 if (len < 4)
2505 len = 4;
2506
2507 cmd[0] = MODE_SENSE;
2508 cmd[4] = len;
2509 header_length = 4;
2510 }
2511
2512 memset(buffer, 0, len);
2513
2514 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
2515 sshdr, timeout, retries, NULL);
2516
2517
2518
2519
2520
2521
2522 if (use_10_for_ms && !scsi_status_is_good(result) &&
2523 (driver_byte(result) & DRIVER_SENSE)) {
2524 if (scsi_sense_valid(sshdr)) {
2525 if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
2526 (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
2527
2528
2529
2530 sdev->use_10_for_ms = 0;
2531 goto retry;
2532 }
2533 }
2534 }
2535
2536 if(scsi_status_is_good(result)) {
2537 if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b &&
2538 (modepage == 6 || modepage == 8))) {
2539
2540 header_length = 0;
2541 data->length = 13;
2542 data->medium_type = 0;
2543 data->device_specific = 0;
2544 data->longlba = 0;
2545 data->block_descriptor_length = 0;
2546 } else if(use_10_for_ms) {
2547 data->length = buffer[0]*256 + buffer[1] + 2;
2548 data->medium_type = buffer[2];
2549 data->device_specific = buffer[3];
2550 data->longlba = buffer[4] & 0x01;
2551 data->block_descriptor_length = buffer[6]*256
2552 + buffer[7];
2553 } else {
2554 data->length = buffer[0] + 1;
2555 data->medium_type = buffer[1];
2556 data->device_specific = buffer[2];
2557 data->block_descriptor_length = buffer[3];
2558 }
2559 data->header_length = header_length;
2560 } else if ((status_byte(result) == CHECK_CONDITION) &&
2561 scsi_sense_valid(sshdr) &&
2562 sshdr->sense_key == UNIT_ATTENTION && retry_count) {
2563 retry_count--;
2564 goto retry;
2565 }
2566
2567 return result;
2568}
2569EXPORT_SYMBOL(scsi_mode_sense);
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581int
2582scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
2583 struct scsi_sense_hdr *sshdr)
2584{
2585 char cmd[] = {
2586 TEST_UNIT_READY, 0, 0, 0, 0, 0,
2587 };
2588 int result;
2589
2590
2591 do {
2592 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr,
2593 timeout, retries, NULL);
2594 if (sdev->removable && scsi_sense_valid(sshdr) &&
2595 sshdr->sense_key == UNIT_ATTENTION)
2596 sdev->changed = 1;
2597 } while (scsi_sense_valid(sshdr) &&
2598 sshdr->sense_key == UNIT_ATTENTION && --retries);
2599
2600 return result;
2601}
2602EXPORT_SYMBOL(scsi_test_unit_ready);
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612int
2613scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
2614{
2615 enum scsi_device_state oldstate = sdev->sdev_state;
2616
2617 if (state == oldstate)
2618 return 0;
2619
2620 switch (state) {
2621 case SDEV_CREATED:
2622 switch (oldstate) {
2623 case SDEV_CREATED_BLOCK:
2624 break;
2625 default:
2626 goto illegal;
2627 }
2628 break;
2629
2630 case SDEV_RUNNING:
2631 switch (oldstate) {
2632 case SDEV_CREATED:
2633 case SDEV_OFFLINE:
2634 case SDEV_TRANSPORT_OFFLINE:
2635 case SDEV_QUIESCE:
2636 case SDEV_BLOCK:
2637 break;
2638 default:
2639 goto illegal;
2640 }
2641 break;
2642
2643 case SDEV_QUIESCE:
2644 switch (oldstate) {
2645 case SDEV_RUNNING:
2646 case SDEV_OFFLINE:
2647 case SDEV_TRANSPORT_OFFLINE:
2648 break;
2649 default:
2650 goto illegal;
2651 }
2652 break;
2653
2654 case SDEV_OFFLINE:
2655 case SDEV_TRANSPORT_OFFLINE:
2656 switch (oldstate) {
2657 case SDEV_CREATED:
2658 case SDEV_RUNNING:
2659 case SDEV_QUIESCE:
2660 case SDEV_BLOCK:
2661 break;
2662 default:
2663 goto illegal;
2664 }
2665 break;
2666
2667 case SDEV_BLOCK:
2668 switch (oldstate) {
2669 case SDEV_RUNNING:
2670 case SDEV_CREATED_BLOCK:
2671 break;
2672 default:
2673 goto illegal;
2674 }
2675 break;
2676
2677 case SDEV_CREATED_BLOCK:
2678 switch (oldstate) {
2679 case SDEV_CREATED:
2680 break;
2681 default:
2682 goto illegal;
2683 }
2684 break;
2685
2686 case SDEV_CANCEL:
2687 switch (oldstate) {
2688 case SDEV_CREATED:
2689 case SDEV_RUNNING:
2690 case SDEV_QUIESCE:
2691 case SDEV_OFFLINE:
2692 case SDEV_TRANSPORT_OFFLINE:
2693 break;
2694 default:
2695 goto illegal;
2696 }
2697 break;
2698
2699 case SDEV_DEL:
2700 switch (oldstate) {
2701 case SDEV_CREATED:
2702 case SDEV_RUNNING:
2703 case SDEV_OFFLINE:
2704 case SDEV_TRANSPORT_OFFLINE:
2705 case SDEV_CANCEL:
2706 case SDEV_BLOCK:
2707 case SDEV_CREATED_BLOCK:
2708 break;
2709 default:
2710 goto illegal;
2711 }
2712 break;
2713
2714 }
2715 sdev->sdev_state = state;
2716 return 0;
2717
2718 illegal:
2719 SCSI_LOG_ERROR_RECOVERY(1,
2720 sdev_printk(KERN_ERR, sdev,
2721 "Illegal state transition %s->%s",
2722 scsi_device_state_name(oldstate),
2723 scsi_device_state_name(state))
2724 );
2725 return -EINVAL;
2726}
2727EXPORT_SYMBOL(scsi_device_set_state);
2728
2729
2730
2731
2732
2733
2734
2735
2736static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
2737{
2738 int idx = 0;
2739 char *envp[3];
2740
2741 switch (evt->evt_type) {
2742 case SDEV_EVT_MEDIA_CHANGE:
2743 envp[idx++] = "SDEV_MEDIA_CHANGE=1";
2744 break;
2745 case SDEV_EVT_INQUIRY_CHANGE_REPORTED:
2746 scsi_rescan_device(&sdev->sdev_gendev);
2747 envp[idx++] = "SDEV_UA=INQUIRY_DATA_HAS_CHANGED";
2748 break;
2749 case SDEV_EVT_CAPACITY_CHANGE_REPORTED:
2750 envp[idx++] = "SDEV_UA=CAPACITY_DATA_HAS_CHANGED";
2751 break;
2752 case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED:
2753 envp[idx++] = "SDEV_UA=THIN_PROVISIONING_SOFT_THRESHOLD_REACHED";
2754 break;
2755 case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED:
2756 envp[idx++] = "SDEV_UA=MODE_PARAMETERS_CHANGED";
2757 break;
2758 case SDEV_EVT_LUN_CHANGE_REPORTED:
2759 envp[idx++] = "SDEV_UA=REPORTED_LUNS_DATA_HAS_CHANGED";
2760 break;
2761 case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED:
2762 envp[idx++] = "SDEV_UA=ASYMMETRIC_ACCESS_STATE_CHANGED";
2763 break;
2764 case SDEV_EVT_POWER_ON_RESET_OCCURRED:
2765 envp[idx++] = "SDEV_UA=POWER_ON_RESET_OCCURRED";
2766 break;
2767 default:
2768
2769 break;
2770 }
2771
2772 envp[idx++] = NULL;
2773
2774 kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp);
2775}
2776
2777
2778
2779
2780
2781
2782
2783
2784void scsi_evt_thread(struct work_struct *work)
2785{
2786 struct scsi_device *sdev;
2787 enum scsi_device_event evt_type;
2788 LIST_HEAD(event_list);
2789
2790 sdev = container_of(work, struct scsi_device, event_work);
2791
2792 for (evt_type = SDEV_EVT_FIRST; evt_type <= SDEV_EVT_LAST; evt_type++)
2793 if (test_and_clear_bit(evt_type, sdev->pending_events))
2794 sdev_evt_send_simple(sdev, evt_type, GFP_KERNEL);
2795
2796 while (1) {
2797 struct scsi_event *evt;
2798 struct list_head *this, *tmp;
2799 unsigned long flags;
2800
2801 spin_lock_irqsave(&sdev->list_lock, flags);
2802 list_splice_init(&sdev->event_list, &event_list);
2803 spin_unlock_irqrestore(&sdev->list_lock, flags);
2804
2805 if (list_empty(&event_list))
2806 break;
2807
2808 list_for_each_safe(this, tmp, &event_list) {
2809 evt = list_entry(this, struct scsi_event, node);
2810 list_del(&evt->node);
2811 scsi_evt_emit(sdev, evt);
2812 kfree(evt);
2813 }
2814 }
2815}
2816
2817
2818
2819
2820
2821
2822
2823
2824void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt)
2825{
2826 unsigned long flags;
2827
2828#if 0
2829
2830
2831
2832 if (!test_bit(evt->evt_type, sdev->supported_events)) {
2833 kfree(evt);
2834 return;
2835 }
2836#endif
2837
2838 spin_lock_irqsave(&sdev->list_lock, flags);
2839 list_add_tail(&evt->node, &sdev->event_list);
2840 schedule_work(&sdev->event_work);
2841 spin_unlock_irqrestore(&sdev->list_lock, flags);
2842}
2843EXPORT_SYMBOL_GPL(sdev_evt_send);
2844
2845
2846
2847
2848
2849
2850
2851
2852struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
2853 gfp_t gfpflags)
2854{
2855 struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags);
2856 if (!evt)
2857 return NULL;
2858
2859 evt->evt_type = evt_type;
2860 INIT_LIST_HEAD(&evt->node);
2861
2862
2863 switch (evt_type) {
2864 case SDEV_EVT_MEDIA_CHANGE:
2865 case SDEV_EVT_INQUIRY_CHANGE_REPORTED:
2866 case SDEV_EVT_CAPACITY_CHANGE_REPORTED:
2867 case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED:
2868 case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED:
2869 case SDEV_EVT_LUN_CHANGE_REPORTED:
2870 case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED:
2871 case SDEV_EVT_POWER_ON_RESET_OCCURRED:
2872 default:
2873
2874 break;
2875 }
2876
2877 return evt;
2878}
2879EXPORT_SYMBOL_GPL(sdev_evt_alloc);
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889void sdev_evt_send_simple(struct scsi_device *sdev,
2890 enum scsi_device_event evt_type, gfp_t gfpflags)
2891{
2892 struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags);
2893 if (!evt) {
2894 sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n",
2895 evt_type);
2896 return;
2897 }
2898
2899 sdev_evt_send(sdev, evt);
2900}
2901EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
2902
2903
2904
2905
2906
2907static int scsi_request_fn_active(struct scsi_device *sdev)
2908{
2909 struct request_queue *q = sdev->request_queue;
2910 int request_fn_active;
2911
2912 WARN_ON_ONCE(sdev->host->use_blk_mq);
2913
2914 spin_lock_irq(q->queue_lock);
2915 request_fn_active = q->request_fn_active;
2916 spin_unlock_irq(q->queue_lock);
2917
2918 return request_fn_active;
2919}
2920
2921
2922
2923
2924
2925
2926
2927
2928static void scsi_wait_for_queuecommand(struct scsi_device *sdev)
2929{
2930 WARN_ON_ONCE(sdev->host->use_blk_mq);
2931
2932 while (scsi_request_fn_active(sdev))
2933 msleep(20);
2934}
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951int
2952scsi_device_quiesce(struct scsi_device *sdev)
2953{
2954 struct request_queue *q = sdev->request_queue;
2955 int err;
2956
2957
2958
2959
2960
2961
2962 WARN_ON_ONCE(sdev->quiesced_by && sdev->quiesced_by != current);
2963
2964 blk_set_preempt_only(q);
2965
2966 blk_mq_freeze_queue(q);
2967
2968
2969
2970
2971
2972
2973 synchronize_rcu();
2974 blk_mq_unfreeze_queue(q);
2975
2976 mutex_lock(&sdev->state_mutex);
2977 err = scsi_device_set_state(sdev, SDEV_QUIESCE);
2978 if (err == 0)
2979 sdev->quiesced_by = current;
2980 else
2981 blk_clear_preempt_only(q);
2982 mutex_unlock(&sdev->state_mutex);
2983
2984 return err;
2985}
2986EXPORT_SYMBOL(scsi_device_quiesce);
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997void scsi_device_resume(struct scsi_device *sdev)
2998{
2999
3000
3001
3002
3003 mutex_lock(&sdev->state_mutex);
3004 WARN_ON_ONCE(!sdev->quiesced_by);
3005 sdev->quiesced_by = NULL;
3006 blk_clear_preempt_only(sdev->request_queue);
3007 if (sdev->sdev_state == SDEV_QUIESCE)
3008 scsi_device_set_state(sdev, SDEV_RUNNING);
3009 mutex_unlock(&sdev->state_mutex);
3010}
3011EXPORT_SYMBOL(scsi_device_resume);
3012
3013static void
3014device_quiesce_fn(struct scsi_device *sdev, void *data)
3015{
3016 scsi_device_quiesce(sdev);
3017}
3018
3019void
3020scsi_target_quiesce(struct scsi_target *starget)
3021{
3022 starget_for_each_device(starget, NULL, device_quiesce_fn);
3023}
3024EXPORT_SYMBOL(scsi_target_quiesce);
3025
3026static void
3027device_resume_fn(struct scsi_device *sdev, void *data)
3028{
3029 scsi_device_resume(sdev);
3030}
3031
3032void
3033scsi_target_resume(struct scsi_target *starget)
3034{
3035 starget_for_each_device(starget, NULL, device_resume_fn);
3036}
3037EXPORT_SYMBOL(scsi_target_resume);
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053int scsi_internal_device_block_nowait(struct scsi_device *sdev)
3054{
3055 struct request_queue *q = sdev->request_queue;
3056 unsigned long flags;
3057 int err = 0;
3058
3059 err = scsi_device_set_state(sdev, SDEV_BLOCK);
3060 if (err) {
3061 err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK);
3062
3063 if (err)
3064 return err;
3065 }
3066
3067
3068
3069
3070
3071
3072 if (q->mq_ops) {
3073 blk_mq_quiesce_queue_nowait(q);
3074 } else {
3075 spin_lock_irqsave(q->queue_lock, flags);
3076 blk_stop_queue(q);
3077 spin_unlock_irqrestore(q->queue_lock, flags);
3078 }
3079
3080 return 0;
3081}
3082EXPORT_SYMBOL_GPL(scsi_internal_device_block_nowait);
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103static int scsi_internal_device_block(struct scsi_device *sdev)
3104{
3105 struct request_queue *q = sdev->request_queue;
3106 int err;
3107
3108 mutex_lock(&sdev->state_mutex);
3109 err = scsi_internal_device_block_nowait(sdev);
3110 if (err == 0) {
3111 if (q->mq_ops)
3112 blk_mq_quiesce_queue(q);
3113 else
3114 scsi_wait_for_queuecommand(sdev);
3115 }
3116 mutex_unlock(&sdev->state_mutex);
3117
3118 return err;
3119}
3120
3121void scsi_start_queue(struct scsi_device *sdev)
3122{
3123 struct request_queue *q = sdev->request_queue;
3124 unsigned long flags;
3125
3126 if (q->mq_ops) {
3127 blk_mq_unquiesce_queue(q);
3128 } else {
3129 spin_lock_irqsave(q->queue_lock, flags);
3130 blk_start_queue(q);
3131 spin_unlock_irqrestore(q->queue_lock, flags);
3132 }
3133}
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150int scsi_internal_device_unblock_nowait(struct scsi_device *sdev,
3151 enum scsi_device_state new_state)
3152{
3153
3154
3155
3156
3157 switch (sdev->sdev_state) {
3158 case SDEV_BLOCK:
3159 case SDEV_TRANSPORT_OFFLINE:
3160 sdev->sdev_state = new_state;
3161 break;
3162 case SDEV_CREATED_BLOCK:
3163 if (new_state == SDEV_TRANSPORT_OFFLINE ||
3164 new_state == SDEV_OFFLINE)
3165 sdev->sdev_state = new_state;
3166 else
3167 sdev->sdev_state = SDEV_CREATED;
3168 break;
3169 case SDEV_CANCEL:
3170 case SDEV_OFFLINE:
3171 break;
3172 default:
3173 return -EINVAL;
3174 }
3175 scsi_start_queue(sdev);
3176
3177 return 0;
3178}
3179EXPORT_SYMBOL_GPL(scsi_internal_device_unblock_nowait);
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195static int scsi_internal_device_unblock(struct scsi_device *sdev,
3196 enum scsi_device_state new_state)
3197{
3198 int ret;
3199
3200 mutex_lock(&sdev->state_mutex);
3201 ret = scsi_internal_device_unblock_nowait(sdev, new_state);
3202 mutex_unlock(&sdev->state_mutex);
3203
3204 return ret;
3205}
3206
3207static void
3208device_block(struct scsi_device *sdev, void *data)
3209{
3210 scsi_internal_device_block(sdev);
3211}
3212
3213static int
3214target_block(struct device *dev, void *data)
3215{
3216 if (scsi_is_target_device(dev))
3217 starget_for_each_device(to_scsi_target(dev), NULL,
3218 device_block);
3219 return 0;
3220}
3221
3222void
3223scsi_target_block(struct device *dev)
3224{
3225 if (scsi_is_target_device(dev))
3226 starget_for_each_device(to_scsi_target(dev), NULL,
3227 device_block);
3228 else
3229 device_for_each_child(dev, NULL, target_block);
3230}
3231EXPORT_SYMBOL_GPL(scsi_target_block);
3232
3233static void
3234device_unblock(struct scsi_device *sdev, void *data)
3235{
3236 scsi_internal_device_unblock(sdev, *(enum scsi_device_state *)data);
3237}
3238
3239static int
3240target_unblock(struct device *dev, void *data)
3241{
3242 if (scsi_is_target_device(dev))
3243 starget_for_each_device(to_scsi_target(dev), data,
3244 device_unblock);
3245 return 0;
3246}
3247
3248void
3249scsi_target_unblock(struct device *dev, enum scsi_device_state new_state)
3250{
3251 if (scsi_is_target_device(dev))
3252 starget_for_each_device(to_scsi_target(dev), &new_state,
3253 device_unblock);
3254 else
3255 device_for_each_child(dev, &new_state, target_unblock);
3256}
3257EXPORT_SYMBOL_GPL(scsi_target_unblock);
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
3269 size_t *offset, size_t *len)
3270{
3271 int i;
3272 size_t sg_len = 0, len_complete = 0;
3273 struct scatterlist *sg;
3274 struct page *page;
3275
3276 WARN_ON(!irqs_disabled());
3277
3278 for_each_sg(sgl, sg, sg_count, i) {
3279 len_complete = sg_len;
3280 sg_len += sg->length;
3281 if (sg_len > *offset)
3282 break;
3283 }
3284
3285 if (unlikely(i == sg_count)) {
3286 printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, "
3287 "elements %d\n",
3288 __func__, sg_len, *offset, sg_count);
3289 WARN_ON(1);
3290 return NULL;
3291 }
3292
3293
3294 *offset = *offset - len_complete + sg->offset;
3295
3296
3297 page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT));
3298 *offset &= ~PAGE_MASK;
3299
3300
3301 sg_len = PAGE_SIZE - *offset;
3302 if (*len > sg_len)
3303 *len = sg_len;
3304
3305 return kmap_atomic(page);
3306}
3307EXPORT_SYMBOL(scsi_kmap_atomic_sg);
3308
3309
3310
3311
3312
3313void scsi_kunmap_atomic_sg(void *virt)
3314{
3315 kunmap_atomic(virt);
3316}
3317EXPORT_SYMBOL(scsi_kunmap_atomic_sg);
3318
3319void sdev_disable_disk_events(struct scsi_device *sdev)
3320{
3321 atomic_inc(&sdev->disk_events_disable_depth);
3322}
3323EXPORT_SYMBOL(sdev_disable_disk_events);
3324
3325void sdev_enable_disk_events(struct scsi_device *sdev)
3326{
3327 if (WARN_ON_ONCE(atomic_read(&sdev->disk_events_disable_depth) <= 0))
3328 return;
3329 atomic_dec(&sdev->disk_events_disable_depth);
3330}
3331EXPORT_SYMBOL(sdev_enable_disk_events);
3332
3333
3334
3335
3336
3337
3338
3339
3340
3341
3342
3343
3344
3345
3346
3347int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
3348{
3349 u8 cur_id_type = 0xff;
3350 u8 cur_id_size = 0;
3351 const unsigned char *d, *cur_id_str;
3352 const struct scsi_vpd *vpd_pg83;
3353 int id_size = -EINVAL;
3354
3355 rcu_read_lock();
3356 vpd_pg83 = rcu_dereference(sdev->vpd_pg83);
3357 if (!vpd_pg83) {
3358 rcu_read_unlock();
3359 return -ENXIO;
3360 }
3361
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372
3373
3374
3375
3376
3377 if (id_len < 21) {
3378 rcu_read_unlock();
3379 return -EINVAL;
3380 }
3381
3382 memset(id, 0, id_len);
3383 d = vpd_pg83->data + 4;
3384 while (d < vpd_pg83->data + vpd_pg83->len) {
3385
3386 if ((d[1] & 0x30) != 0x00)
3387 goto next_desig;
3388
3389 switch (d[1] & 0xf) {
3390 case 0x1:
3391
3392 if (cur_id_size > d[3])
3393 break;
3394
3395 if (cur_id_type > 0x01 && cur_id_type != 0xff)
3396 break;
3397 cur_id_size = d[3];
3398 if (cur_id_size + 4 > id_len)
3399 cur_id_size = id_len - 4;
3400 cur_id_str = d + 4;
3401 cur_id_type = d[1] & 0xf;
3402 id_size = snprintf(id, id_len, "t10.%*pE",
3403 cur_id_size, cur_id_str);
3404 break;
3405 case 0x2:
3406
3407 if (cur_id_size > d[3])
3408 break;
3409
3410 if (cur_id_type == 0x3 &&
3411 cur_id_size == d[3])
3412 break;
3413 cur_id_size = d[3];
3414 cur_id_str = d + 4;
3415 cur_id_type = d[1] & 0xf;
3416 switch (cur_id_size) {
3417 case 8:
3418 id_size = snprintf(id, id_len,
3419 "eui.%8phN",
3420 cur_id_str);
3421 break;
3422 case 12:
3423 id_size = snprintf(id, id_len,
3424 "eui.%12phN",
3425 cur_id_str);
3426 break;
3427 case 16:
3428 id_size = snprintf(id, id_len,
3429 "eui.%16phN",
3430 cur_id_str);
3431 break;
3432 default:
3433 cur_id_size = 0;
3434 break;
3435 }
3436 break;
3437 case 0x3:
3438
3439 if (cur_id_size > d[3])
3440 break;
3441 cur_id_size = d[3];
3442 cur_id_str = d + 4;
3443 cur_id_type = d[1] & 0xf;
3444 switch (cur_id_size) {
3445 case 8:
3446 id_size = snprintf(id, id_len,
3447 "naa.%8phN",
3448 cur_id_str);
3449 break;
3450 case 16:
3451 id_size = snprintf(id, id_len,
3452 "naa.%16phN",
3453 cur_id_str);
3454 break;
3455 default:
3456 cur_id_size = 0;
3457 break;
3458 }
3459 break;
3460 case 0x8:
3461
3462 if (cur_id_size + 4 > d[3])
3463 break;
3464
3465 if (cur_id_size && d[3] > id_len)
3466 break;
3467 cur_id_size = id_size = d[3];
3468 cur_id_str = d + 4;
3469 cur_id_type = d[1] & 0xf;
3470 if (cur_id_size >= id_len)
3471 cur_id_size = id_len - 1;
3472 memcpy(id, cur_id_str, cur_id_size);
3473
3474 if (cur_id_size != id_size)
3475 cur_id_size = 6;
3476 break;
3477 default:
3478 break;
3479 }
3480next_desig:
3481 d += d[3] + 4;
3482 }
3483 rcu_read_unlock();
3484
3485 return id_size;
3486}
3487EXPORT_SYMBOL(scsi_vpd_lun_id);
3488
3489
3490
3491
3492
3493
3494
3495
3496
3497
3498int scsi_vpd_tpg_id(struct scsi_device *sdev, int *rel_id)
3499{
3500 const unsigned char *d;
3501 const struct scsi_vpd *vpd_pg83;
3502 int group_id = -EAGAIN, rel_port = -1;
3503
3504 rcu_read_lock();
3505 vpd_pg83 = rcu_dereference(sdev->vpd_pg83);
3506 if (!vpd_pg83) {
3507 rcu_read_unlock();
3508 return -ENXIO;
3509 }
3510
3511 d = vpd_pg83->data + 4;
3512 while (d < vpd_pg83->data + vpd_pg83->len) {
3513 switch (d[1] & 0xf) {
3514 case 0x4:
3515
3516 rel_port = get_unaligned_be16(&d[6]);
3517 break;
3518 case 0x5:
3519
3520 group_id = get_unaligned_be16(&d[6]);
3521 break;
3522 default:
3523 break;
3524 }
3525 d += d[3] + 4;
3526 }
3527 rcu_read_unlock();
3528
3529 if (group_id >= 0 && rel_id && rel_port != -1)
3530 *rel_id = rel_port;
3531
3532 return group_id;
3533}
3534EXPORT_SYMBOL(scsi_vpd_tpg_id);
3535