1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/module.h>
18#include <linux/sched.h>
19#include <linux/timer.h>
20#include <linux/string.h>
21#include <linux/kernel.h>
22#include <linux/freezer.h>
23#include <linux/kthread.h>
24#include <linux/interrupt.h>
25#include <linux/blkdev.h>
26#include <linux/delay.h>
27
28#include <scsi/scsi.h>
29#include <scsi/scsi_cmnd.h>
30#include <scsi/scsi_dbg.h>
31#include <scsi/scsi_device.h>
32#include <scsi/scsi_eh.h>
33#include <scsi/scsi_transport.h>
34#include <scsi/scsi_host.h>
35#include <scsi/scsi_ioctl.h>
36
37#include "scsi_priv.h"
38#include "scsi_logging.h"
39#include "scsi_transport_api.h"
40
41#define SENSE_TIMEOUT (10*HZ)
42
43
44
45
46
47#define BUS_RESET_SETTLE_TIME (10)
48#define HOST_RESET_SETTLE_TIME (10)
49
50
51void scsi_eh_wakeup(struct Scsi_Host *shost)
52{
53 if (shost->host_busy == shost->host_failed) {
54 wake_up_process(shost->ehandler);
55 SCSI_LOG_ERROR_RECOVERY(5,
56 printk("Waking error handler thread\n"));
57 }
58}
59
60
61
62
63
64
65
66void scsi_schedule_eh(struct Scsi_Host *shost)
67{
68 unsigned long flags;
69
70 spin_lock_irqsave(shost->host_lock, flags);
71
72 if (scsi_host_set_state(shost, SHOST_RECOVERY) == 0 ||
73 scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY) == 0) {
74 shost->host_eh_scheduled++;
75 scsi_eh_wakeup(shost);
76 }
77
78 spin_unlock_irqrestore(shost->host_lock, flags);
79}
80EXPORT_SYMBOL_GPL(scsi_schedule_eh);
81
82
83
84
85
86
87
88
89
90int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag)
91{
92 struct Scsi_Host *shost = scmd->device->host;
93 unsigned long flags;
94 int ret = 0;
95
96 if (!shost->ehandler)
97 return 0;
98
99 spin_lock_irqsave(shost->host_lock, flags);
100 if (scsi_host_set_state(shost, SHOST_RECOVERY))
101 if (scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY))
102 goto out_unlock;
103
104 ret = 1;
105 scmd->eh_eflags |= eh_flag;
106 list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q);
107 shost->host_failed++;
108 scsi_eh_wakeup(shost);
109 out_unlock:
110 spin_unlock_irqrestore(shost->host_lock, flags);
111 return ret;
112}
113
114
115
116
117
118
119
120
121
122
123
124enum blk_eh_timer_return scsi_times_out(struct request *req)
125{
126 struct scsi_cmnd *scmd = req->special;
127 enum blk_eh_timer_return rtn = BLK_EH_NOT_HANDLED;
128
129 scsi_log_completion(scmd, TIMEOUT_ERROR);
130
131 if (scmd->device->host->transportt->eh_timed_out)
132 rtn = scmd->device->host->transportt->eh_timed_out(scmd);
133 else if (scmd->device->host->hostt->eh_timed_out)
134 rtn = scmd->device->host->hostt->eh_timed_out(scmd);
135
136 if (unlikely(rtn == BLK_EH_NOT_HANDLED &&
137 !scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) {
138 scmd->result |= DID_TIME_OUT << 16;
139 rtn = BLK_EH_HANDLED;
140 }
141
142 return rtn;
143}
144
145
146
147
148
149
150
151
152
153
154
155
156int scsi_block_when_processing_errors(struct scsi_device *sdev)
157{
158 int online;
159
160 wait_event(sdev->host->host_wait, !scsi_host_in_recovery(sdev->host));
161
162 online = scsi_device_online(sdev);
163
164 SCSI_LOG_ERROR_RECOVERY(5, printk("%s: rtn: %d\n", __func__,
165 online));
166
167 return online;
168}
169EXPORT_SYMBOL(scsi_block_when_processing_errors);
170
171#ifdef CONFIG_SCSI_LOGGING
172
173
174
175
176
177static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost,
178 struct list_head *work_q)
179{
180 struct scsi_cmnd *scmd;
181 struct scsi_device *sdev;
182 int total_failures = 0;
183 int cmd_failed = 0;
184 int cmd_cancel = 0;
185 int devices_failed = 0;
186
187 shost_for_each_device(sdev, shost) {
188 list_for_each_entry(scmd, work_q, eh_entry) {
189 if (scmd->device == sdev) {
190 ++total_failures;
191 if (scmd->eh_eflags & SCSI_EH_CANCEL_CMD)
192 ++cmd_cancel;
193 else
194 ++cmd_failed;
195 }
196 }
197
198 if (cmd_cancel || cmd_failed) {
199 SCSI_LOG_ERROR_RECOVERY(3,
200 sdev_printk(KERN_INFO, sdev,
201 "%s: cmds failed: %d, cancel: %d\n",
202 __func__, cmd_failed,
203 cmd_cancel));
204 cmd_cancel = 0;
205 cmd_failed = 0;
206 ++devices_failed;
207 }
208 }
209
210 SCSI_LOG_ERROR_RECOVERY(2, printk("Total of %d commands on %d"
211 " devices require eh work\n",
212 total_failures, devices_failed));
213}
214#endif
215
216
217
218
219
220
221
222
223
224
225
226
227static int scsi_check_sense(struct scsi_cmnd *scmd)
228{
229 struct scsi_device *sdev = scmd->device;
230 struct scsi_sense_hdr sshdr;
231
232 if (! scsi_command_normalize_sense(scmd, &sshdr))
233 return FAILED;
234
235 if (scsi_sense_is_deferred(&sshdr))
236 return NEEDS_RETRY;
237
238 if (sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh &&
239 sdev->scsi_dh_data->scsi_dh->check_sense) {
240 int rc;
241
242 rc = sdev->scsi_dh_data->scsi_dh->check_sense(sdev, &sshdr);
243 if (rc != SCSI_RETURN_NOT_HANDLED)
244 return rc;
245
246 }
247
248
249
250
251
252 if (sshdr.response_code == 0x70) {
253
254 if (scmd->sense_buffer[2] & 0xe0)
255 return SUCCESS;
256 } else {
257
258
259
260
261
262 if ((sshdr.additional_length > 3) &&
263 (scmd->sense_buffer[8] == 0x4) &&
264 (scmd->sense_buffer[11] & 0xe0))
265 return SUCCESS;
266 }
267
268 switch (sshdr.sense_key) {
269 case NO_SENSE:
270 return SUCCESS;
271 case RECOVERED_ERROR:
272 return SUCCESS;
273
274 case ABORTED_COMMAND:
275 if (sshdr.asc == 0x10)
276 return SUCCESS;
277
278 return NEEDS_RETRY;
279 case NOT_READY:
280 case UNIT_ATTENTION:
281
282
283
284
285
286
287 if (scmd->device->expecting_cc_ua) {
288 scmd->device->expecting_cc_ua = 0;
289 return NEEDS_RETRY;
290 }
291
292
293
294
295 if ((sshdr.asc == 0x04) && (sshdr.ascq == 0x01))
296 return NEEDS_RETRY;
297
298
299
300
301 if (scmd->device->allow_restart &&
302 (sshdr.asc == 0x04) && (sshdr.ascq == 0x02))
303 return FAILED;
304 return SUCCESS;
305
306
307 case COPY_ABORTED:
308 case VOLUME_OVERFLOW:
309 case MISCOMPARE:
310 return SUCCESS;
311
312 case MEDIUM_ERROR:
313 if (sshdr.asc == 0x11 ||
314 sshdr.asc == 0x13 ||
315 sshdr.asc == 0x14) {
316 return SUCCESS;
317 }
318 return NEEDS_RETRY;
319
320 case HARDWARE_ERROR:
321 if (scmd->device->retry_hwerror)
322 return ADD_TO_MLQUEUE;
323 else
324 return SUCCESS;
325
326 case ILLEGAL_REQUEST:
327 case BLANK_CHECK:
328 case DATA_PROTECT:
329 default:
330 return SUCCESS;
331 }
332}
333
334
335
336
337
338
339
340
341
342
343
344static int scsi_eh_completed_normally(struct scsi_cmnd *scmd)
345{
346
347
348
349
350 if (host_byte(scmd->result) == DID_RESET) {
351
352
353
354
355
356
357 return scsi_check_sense(scmd);
358 }
359 if (host_byte(scmd->result) != DID_OK)
360 return FAILED;
361
362
363
364
365 if (msg_byte(scmd->result) != COMMAND_COMPLETE)
366 return FAILED;
367
368
369
370
371
372 switch (status_byte(scmd->result)) {
373 case GOOD:
374 case COMMAND_TERMINATED:
375 return SUCCESS;
376 case CHECK_CONDITION:
377 return scsi_check_sense(scmd);
378 case CONDITION_GOOD:
379 case INTERMEDIATE_GOOD:
380 case INTERMEDIATE_C_GOOD:
381
382
383
384 return SUCCESS;
385 case RESERVATION_CONFLICT:
386
387
388
389 return SUCCESS;
390 case BUSY:
391 case QUEUE_FULL:
392 default:
393 return FAILED;
394 }
395 return FAILED;
396}
397
398
399
400
401
402static void scsi_eh_done(struct scsi_cmnd *scmd)
403{
404 struct completion *eh_action;
405
406 SCSI_LOG_ERROR_RECOVERY(3,
407 printk("%s scmd: %p result: %x\n",
408 __func__, scmd, scmd->result));
409
410 eh_action = scmd->device->host->eh_action;
411 if (eh_action)
412 complete(eh_action);
413}
414
415
416
417
418
419static int scsi_try_host_reset(struct scsi_cmnd *scmd)
420{
421 unsigned long flags;
422 int rtn;
423
424 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Snd Host RST\n",
425 __func__));
426
427 if (!scmd->device->host->hostt->eh_host_reset_handler)
428 return FAILED;
429
430 rtn = scmd->device->host->hostt->eh_host_reset_handler(scmd);
431
432 if (rtn == SUCCESS) {
433 if (!scmd->device->host->hostt->skip_settle_delay)
434 ssleep(HOST_RESET_SETTLE_TIME);
435 spin_lock_irqsave(scmd->device->host->host_lock, flags);
436 scsi_report_bus_reset(scmd->device->host,
437 scmd_channel(scmd));
438 spin_unlock_irqrestore(scmd->device->host->host_lock, flags);
439 }
440
441 return rtn;
442}
443
444
445
446
447
448static int scsi_try_bus_reset(struct scsi_cmnd *scmd)
449{
450 unsigned long flags;
451 int rtn;
452
453 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Snd Bus RST\n",
454 __func__));
455
456 if (!scmd->device->host->hostt->eh_bus_reset_handler)
457 return FAILED;
458
459 rtn = scmd->device->host->hostt->eh_bus_reset_handler(scmd);
460
461 if (rtn == SUCCESS) {
462 if (!scmd->device->host->hostt->skip_settle_delay)
463 ssleep(BUS_RESET_SETTLE_TIME);
464 spin_lock_irqsave(scmd->device->host->host_lock, flags);
465 scsi_report_bus_reset(scmd->device->host,
466 scmd_channel(scmd));
467 spin_unlock_irqrestore(scmd->device->host->host_lock, flags);
468 }
469
470 return rtn;
471}
472
473static void __scsi_report_device_reset(struct scsi_device *sdev, void *data)
474{
475 sdev->was_reset = 1;
476 sdev->expecting_cc_ua = 1;
477}
478
479
480
481
482
483
484
485
486
487
488
489static int scsi_try_target_reset(struct scsi_cmnd *scmd)
490{
491 unsigned long flags;
492 int rtn;
493
494 if (!scmd->device->host->hostt->eh_target_reset_handler)
495 return FAILED;
496
497 rtn = scmd->device->host->hostt->eh_target_reset_handler(scmd);
498 if (rtn == SUCCESS) {
499 spin_lock_irqsave(scmd->device->host->host_lock, flags);
500 __starget_for_each_device(scsi_target(scmd->device), NULL,
501 __scsi_report_device_reset);
502 spin_unlock_irqrestore(scmd->device->host->host_lock, flags);
503 }
504
505 return rtn;
506}
507
508
509
510
511
512
513
514
515
516
517
518static int scsi_try_bus_device_reset(struct scsi_cmnd *scmd)
519{
520 int rtn;
521
522 if (!scmd->device->host->hostt->eh_device_reset_handler)
523 return FAILED;
524
525 rtn = scmd->device->host->hostt->eh_device_reset_handler(scmd);
526 if (rtn == SUCCESS)
527 __scsi_report_device_reset(scmd->device, NULL);
528 return rtn;
529}
530
531static int __scsi_try_to_abort_cmd(struct scsi_cmnd *scmd)
532{
533 if (!scmd->device->host->hostt->eh_abort_handler)
534 return FAILED;
535
536 return scmd->device->host->hostt->eh_abort_handler(scmd);
537}
538
539
540
541
542
543
544
545
546
547
548
549
550static int scsi_try_to_abort_cmd(struct scsi_cmnd *scmd)
551{
552
553
554
555
556 if (scmd->serial_number == 0)
557 return SUCCESS;
558 return __scsi_try_to_abort_cmd(scmd);
559}
560
561static void scsi_abort_eh_cmnd(struct scsi_cmnd *scmd)
562{
563 if (__scsi_try_to_abort_cmd(scmd) != SUCCESS)
564 if (scsi_try_bus_device_reset(scmd) != SUCCESS)
565 if (scsi_try_target_reset(scmd) != SUCCESS)
566 if (scsi_try_bus_reset(scmd) != SUCCESS)
567 scsi_try_host_reset(scmd);
568}
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
585 unsigned char *cmnd, int cmnd_size, unsigned sense_bytes)
586{
587 struct scsi_device *sdev = scmd->device;
588
589
590
591
592
593
594
595
596 ses->cmd_len = scmd->cmd_len;
597 ses->cmnd = scmd->cmnd;
598 ses->data_direction = scmd->sc_data_direction;
599 ses->sdb = scmd->sdb;
600 ses->next_rq = scmd->request->next_rq;
601 ses->result = scmd->result;
602 ses->underflow = scmd->underflow;
603 ses->prot_op = scmd->prot_op;
604
605 scmd->prot_op = SCSI_PROT_NORMAL;
606 scmd->cmnd = ses->eh_cmnd;
607 memset(scmd->cmnd, 0, BLK_MAX_CDB);
608 memset(&scmd->sdb, 0, sizeof(scmd->sdb));
609 scmd->request->next_rq = NULL;
610
611 if (sense_bytes) {
612 scmd->sdb.length = min_t(unsigned, SCSI_SENSE_BUFFERSIZE,
613 sense_bytes);
614 sg_init_one(&ses->sense_sgl, scmd->sense_buffer,
615 scmd->sdb.length);
616 scmd->sdb.table.sgl = &ses->sense_sgl;
617 scmd->sc_data_direction = DMA_FROM_DEVICE;
618 scmd->sdb.table.nents = 1;
619 scmd->cmnd[0] = REQUEST_SENSE;
620 scmd->cmnd[4] = scmd->sdb.length;
621 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
622 } else {
623 scmd->sc_data_direction = DMA_NONE;
624 if (cmnd) {
625 BUG_ON(cmnd_size > BLK_MAX_CDB);
626 memcpy(scmd->cmnd, cmnd, cmnd_size);
627 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
628 }
629 }
630
631 scmd->underflow = 0;
632
633 if (sdev->scsi_level <= SCSI_2 && sdev->scsi_level != SCSI_UNKNOWN)
634 scmd->cmnd[1] = (scmd->cmnd[1] & 0x1f) |
635 (sdev->lun << 5 & 0xe0);
636
637
638
639
640
641 memset(scmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
642}
643EXPORT_SYMBOL(scsi_eh_prep_cmnd);
644
645
646
647
648
649
650
651
652void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses)
653{
654
655
656
657 scmd->cmd_len = ses->cmd_len;
658 scmd->cmnd = ses->cmnd;
659 scmd->sc_data_direction = ses->data_direction;
660 scmd->sdb = ses->sdb;
661 scmd->request->next_rq = ses->next_rq;
662 scmd->result = ses->result;
663 scmd->underflow = ses->underflow;
664 scmd->prot_op = ses->prot_op;
665}
666EXPORT_SYMBOL(scsi_eh_restore_cmnd);
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
683 int cmnd_size, int timeout, unsigned sense_bytes)
684{
685 struct scsi_device *sdev = scmd->device;
686 struct Scsi_Host *shost = sdev->host;
687 DECLARE_COMPLETION_ONSTACK(done);
688 unsigned long timeleft;
689 unsigned long flags;
690 struct scsi_eh_save ses;
691 int rtn;
692
693 scsi_eh_prep_cmnd(scmd, &ses, cmnd, cmnd_size, sense_bytes);
694 shost->eh_action = &done;
695
696 spin_lock_irqsave(shost->host_lock, flags);
697 scsi_log_send(scmd);
698 shost->hostt->queuecommand(scmd, scsi_eh_done);
699 spin_unlock_irqrestore(shost->host_lock, flags);
700
701 timeleft = wait_for_completion_timeout(&done, timeout);
702
703 shost->eh_action = NULL;
704
705 scsi_log_completion(scmd, SUCCESS);
706
707 SCSI_LOG_ERROR_RECOVERY(3,
708 printk("%s: scmd: %p, timeleft: %ld\n",
709 __func__, scmd, timeleft));
710
711
712
713
714
715
716
717 if (timeleft) {
718 rtn = scsi_eh_completed_normally(scmd);
719 SCSI_LOG_ERROR_RECOVERY(3,
720 printk("%s: scsi_eh_completed_normally %x\n",
721 __func__, rtn));
722
723 switch (rtn) {
724 case SUCCESS:
725 case NEEDS_RETRY:
726 case FAILED:
727 break;
728 case ADD_TO_MLQUEUE:
729 rtn = NEEDS_RETRY;
730 break;
731 default:
732 rtn = FAILED;
733 break;
734 }
735 } else {
736 scsi_abort_eh_cmnd(scmd);
737 rtn = FAILED;
738 }
739
740 scsi_eh_restore_cmnd(scmd, &ses);
741 return rtn;
742}
743
744
745
746
747
748
749
750
751
752
753static int scsi_request_sense(struct scsi_cmnd *scmd)
754{
755 return scsi_send_eh_cmnd(scmd, NULL, 0, SENSE_TIMEOUT, ~0);
756}
757
758
759
760
761
762
763
764
765
766
767
768
769
770void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q)
771{
772 scmd->device->host->host_failed--;
773 scmd->eh_eflags = 0;
774 list_move_tail(&scmd->eh_entry, done_q);
775}
776EXPORT_SYMBOL(scsi_eh_finish_cmd);
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798int scsi_eh_get_sense(struct list_head *work_q,
799 struct list_head *done_q)
800{
801 struct scsi_cmnd *scmd, *next;
802 int rtn;
803
804 list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
805 if ((scmd->eh_eflags & SCSI_EH_CANCEL_CMD) ||
806 SCSI_SENSE_VALID(scmd))
807 continue;
808
809 SCSI_LOG_ERROR_RECOVERY(2, scmd_printk(KERN_INFO, scmd,
810 "%s: requesting sense\n",
811 current->comm));
812 rtn = scsi_request_sense(scmd);
813 if (rtn != SUCCESS)
814 continue;
815
816 SCSI_LOG_ERROR_RECOVERY(3, printk("sense requested for %p"
817 " result %x\n", scmd,
818 scmd->result));
819 SCSI_LOG_ERROR_RECOVERY(3, scsi_print_sense("bh", scmd));
820
821 rtn = scsi_decide_disposition(scmd);
822
823
824
825
826
827 if (rtn == SUCCESS)
828
829
830
831
832 scmd->retries = scmd->allowed;
833 else if (rtn != NEEDS_RETRY)
834 continue;
835
836 scsi_eh_finish_cmd(scmd, done_q);
837 }
838
839 return list_empty(work_q);
840}
841EXPORT_SYMBOL_GPL(scsi_eh_get_sense);
842
843
844
845
846
847
848
849
850static int scsi_eh_tur(struct scsi_cmnd *scmd)
851{
852 static unsigned char tur_command[6] = {TEST_UNIT_READY, 0, 0, 0, 0, 0};
853 int retry_cnt = 1, rtn;
854
855retry_tur:
856 rtn = scsi_send_eh_cmnd(scmd, tur_command, 6, SENSE_TIMEOUT, 0);
857
858 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n",
859 __func__, scmd, rtn));
860
861 switch (rtn) {
862 case NEEDS_RETRY:
863 if (retry_cnt--)
864 goto retry_tur;
865
866 case SUCCESS:
867 return 0;
868 default:
869 return 1;
870 }
871}
872
873
874
875
876
877
878
879
880
881
882
883
884
885static int scsi_eh_abort_cmds(struct list_head *work_q,
886 struct list_head *done_q)
887{
888 struct scsi_cmnd *scmd, *next;
889 int rtn;
890
891 list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
892 if (!(scmd->eh_eflags & SCSI_EH_CANCEL_CMD))
893 continue;
894 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting cmd:"
895 "0x%p\n", current->comm,
896 scmd));
897 rtn = scsi_try_to_abort_cmd(scmd);
898 if (rtn == SUCCESS) {
899 scmd->eh_eflags &= ~SCSI_EH_CANCEL_CMD;
900 if (!scsi_device_online(scmd->device) ||
901 !scsi_eh_tur(scmd)) {
902 scsi_eh_finish_cmd(scmd, done_q);
903 }
904
905 } else
906 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting"
907 " cmd failed:"
908 "0x%p\n",
909 current->comm,
910 scmd));
911 }
912
913 return list_empty(work_q);
914}
915
916
917
918
919
920
921
922
923static int scsi_eh_try_stu(struct scsi_cmnd *scmd)
924{
925 static unsigned char stu_command[6] = {START_STOP, 0, 0, 0, 1, 0};
926
927 if (scmd->device->allow_restart) {
928 int i, rtn = NEEDS_RETRY;
929
930 for (i = 0; rtn == NEEDS_RETRY && i < 2; i++)
931 rtn = scsi_send_eh_cmnd(scmd, stu_command, 6, scmd->device->request_queue->rq_timeout, 0);
932
933 if (rtn == SUCCESS)
934 return 0;
935 }
936
937 return 1;
938}
939
940
941
942
943
944
945
946
947
948
949
950static int scsi_eh_stu(struct Scsi_Host *shost,
951 struct list_head *work_q,
952 struct list_head *done_q)
953{
954 struct scsi_cmnd *scmd, *stu_scmd, *next;
955 struct scsi_device *sdev;
956
957 shost_for_each_device(sdev, shost) {
958 stu_scmd = NULL;
959 list_for_each_entry(scmd, work_q, eh_entry)
960 if (scmd->device == sdev && SCSI_SENSE_VALID(scmd) &&
961 scsi_check_sense(scmd) == FAILED ) {
962 stu_scmd = scmd;
963 break;
964 }
965
966 if (!stu_scmd)
967 continue;
968
969 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending START_UNIT to sdev:"
970 " 0x%p\n", current->comm, sdev));
971
972 if (!scsi_eh_try_stu(stu_scmd)) {
973 if (!scsi_device_online(sdev) ||
974 !scsi_eh_tur(stu_scmd)) {
975 list_for_each_entry_safe(scmd, next,
976 work_q, eh_entry) {
977 if (scmd->device == sdev)
978 scsi_eh_finish_cmd(scmd, done_q);
979 }
980 }
981 } else {
982 SCSI_LOG_ERROR_RECOVERY(3,
983 printk("%s: START_UNIT failed to sdev:"
984 " 0x%p\n", current->comm, sdev));
985 }
986 }
987
988 return list_empty(work_q);
989}
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004static int scsi_eh_bus_device_reset(struct Scsi_Host *shost,
1005 struct list_head *work_q,
1006 struct list_head *done_q)
1007{
1008 struct scsi_cmnd *scmd, *bdr_scmd, *next;
1009 struct scsi_device *sdev;
1010 int rtn;
1011
1012 shost_for_each_device(sdev, shost) {
1013 bdr_scmd = NULL;
1014 list_for_each_entry(scmd, work_q, eh_entry)
1015 if (scmd->device == sdev) {
1016 bdr_scmd = scmd;
1017 break;
1018 }
1019
1020 if (!bdr_scmd)
1021 continue;
1022
1023 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending BDR sdev:"
1024 " 0x%p\n", current->comm,
1025 sdev));
1026 rtn = scsi_try_bus_device_reset(bdr_scmd);
1027 if (rtn == SUCCESS) {
1028 if (!scsi_device_online(sdev) ||
1029 !scsi_eh_tur(bdr_scmd)) {
1030 list_for_each_entry_safe(scmd, next,
1031 work_q, eh_entry) {
1032 if (scmd->device == sdev)
1033 scsi_eh_finish_cmd(scmd,
1034 done_q);
1035 }
1036 }
1037 } else {
1038 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: BDR"
1039 " failed sdev:"
1040 "0x%p\n",
1041 current->comm,
1042 sdev));
1043 }
1044 }
1045
1046 return list_empty(work_q);
1047}
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058static int scsi_eh_target_reset(struct Scsi_Host *shost,
1059 struct list_head *work_q,
1060 struct list_head *done_q)
1061{
1062 struct scsi_cmnd *scmd, *tgtr_scmd, *next;
1063 unsigned int id = 0;
1064 int rtn;
1065
1066 do {
1067 tgtr_scmd = NULL;
1068 list_for_each_entry(scmd, work_q, eh_entry) {
1069 if (id == scmd_id(scmd)) {
1070 tgtr_scmd = scmd;
1071 break;
1072 }
1073 }
1074 if (!tgtr_scmd) {
1075
1076 list_for_each_entry(scmd, work_q, eh_entry) {
1077 if (scmd_id(scmd) > id &&
1078 (!tgtr_scmd ||
1079 scmd_id(tgtr_scmd) > scmd_id(scmd)))
1080 tgtr_scmd = scmd;
1081 }
1082 }
1083 if (!tgtr_scmd)
1084
1085 break;
1086
1087 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending target reset "
1088 "to target %d\n",
1089 current->comm, id));
1090 rtn = scsi_try_target_reset(tgtr_scmd);
1091 if (rtn == SUCCESS) {
1092 list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1093 if (id == scmd_id(scmd))
1094 if (!scsi_device_online(scmd->device) ||
1095 !scsi_eh_tur(tgtr_scmd))
1096 scsi_eh_finish_cmd(scmd,
1097 done_q);
1098 }
1099 } else
1100 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Target reset"
1101 " failed target: "
1102 "%d\n",
1103 current->comm, id));
1104 id++;
1105 } while(id != 0);
1106
1107 return list_empty(work_q);
1108}
1109
1110
1111
1112
1113
1114
1115
1116static int scsi_eh_bus_reset(struct Scsi_Host *shost,
1117 struct list_head *work_q,
1118 struct list_head *done_q)
1119{
1120 struct scsi_cmnd *scmd, *chan_scmd, *next;
1121 unsigned int channel;
1122 int rtn;
1123
1124
1125
1126
1127
1128
1129
1130
1131 for (channel = 0; channel <= shost->max_channel; channel++) {
1132 chan_scmd = NULL;
1133 list_for_each_entry(scmd, work_q, eh_entry) {
1134 if (channel == scmd_channel(scmd)) {
1135 chan_scmd = scmd;
1136 break;
1137
1138
1139
1140
1141 }
1142 }
1143
1144 if (!chan_scmd)
1145 continue;
1146 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending BRST chan:"
1147 " %d\n", current->comm,
1148 channel));
1149 rtn = scsi_try_bus_reset(chan_scmd);
1150 if (rtn == SUCCESS) {
1151 list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1152 if (channel == scmd_channel(scmd))
1153 if (!scsi_device_online(scmd->device) ||
1154 !scsi_eh_tur(scmd))
1155 scsi_eh_finish_cmd(scmd,
1156 done_q);
1157 }
1158 } else {
1159 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: BRST"
1160 " failed chan: %d\n",
1161 current->comm,
1162 channel));
1163 }
1164 }
1165 return list_empty(work_q);
1166}
1167
1168
1169
1170
1171
1172
1173static int scsi_eh_host_reset(struct list_head *work_q,
1174 struct list_head *done_q)
1175{
1176 struct scsi_cmnd *scmd, *next;
1177 int rtn;
1178
1179 if (!list_empty(work_q)) {
1180 scmd = list_entry(work_q->next,
1181 struct scsi_cmnd, eh_entry);
1182
1183 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending HRST\n"
1184 , current->comm));
1185
1186 rtn = scsi_try_host_reset(scmd);
1187 if (rtn == SUCCESS) {
1188 list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1189 if (!scsi_device_online(scmd->device) ||
1190 (!scsi_eh_try_stu(scmd) && !scsi_eh_tur(scmd)) ||
1191 !scsi_eh_tur(scmd))
1192 scsi_eh_finish_cmd(scmd, done_q);
1193 }
1194 } else {
1195 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: HRST"
1196 " failed\n",
1197 current->comm));
1198 }
1199 }
1200 return list_empty(work_q);
1201}
1202
1203
1204
1205
1206
1207
1208static void scsi_eh_offline_sdevs(struct list_head *work_q,
1209 struct list_head *done_q)
1210{
1211 struct scsi_cmnd *scmd, *next;
1212
1213 list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1214 sdev_printk(KERN_INFO, scmd->device, "Device offlined - "
1215 "not ready after error recovery\n");
1216 scsi_device_set_state(scmd->device, SDEV_OFFLINE);
1217 if (scmd->eh_eflags & SCSI_EH_CANCEL_CMD) {
1218
1219
1220
1221 }
1222 scsi_eh_finish_cmd(scmd, done_q);
1223 }
1224 return;
1225}
1226
1227
1228
1229
1230
1231int scsi_noretry_cmd(struct scsi_cmnd *scmd)
1232{
1233 switch (host_byte(scmd->result)) {
1234 case DID_OK:
1235 break;
1236 case DID_BUS_BUSY:
1237 return blk_failfast_transport(scmd->request);
1238 case DID_PARITY:
1239 return blk_failfast_dev(scmd->request);
1240 case DID_ERROR:
1241 if (msg_byte(scmd->result) == COMMAND_COMPLETE &&
1242 status_byte(scmd->result) == RESERVATION_CONFLICT)
1243 return 0;
1244
1245 case DID_SOFT_ERROR:
1246 return blk_failfast_driver(scmd->request);
1247 }
1248
1249 switch (status_byte(scmd->result)) {
1250 case CHECK_CONDITION:
1251
1252
1253
1254
1255 return blk_failfast_dev(scmd->request);
1256 }
1257
1258 return 0;
1259}
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275int scsi_decide_disposition(struct scsi_cmnd *scmd)
1276{
1277 int rtn;
1278
1279
1280
1281
1282
1283 if (!scsi_device_online(scmd->device)) {
1284 SCSI_LOG_ERROR_RECOVERY(5, printk("%s: device offline - report"
1285 " as SUCCESS\n",
1286 __func__));
1287 return SUCCESS;
1288 }
1289
1290
1291
1292
1293
1294 switch (host_byte(scmd->result)) {
1295 case DID_PASSTHROUGH:
1296
1297
1298
1299
1300
1301 scmd->result &= 0xff00ffff;
1302 return SUCCESS;
1303 case DID_OK:
1304
1305
1306
1307 break;
1308 case DID_NO_CONNECT:
1309 case DID_BAD_TARGET:
1310 case DID_ABORT:
1311
1312
1313
1314
1315
1316 return SUCCESS;
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326 case DID_SOFT_ERROR:
1327 goto maybe_retry;
1328 case DID_IMM_RETRY:
1329 return NEEDS_RETRY;
1330
1331 case DID_REQUEUE:
1332 return ADD_TO_MLQUEUE;
1333 case DID_TRANSPORT_DISRUPTED:
1334
1335
1336
1337
1338
1339
1340
1341 goto maybe_retry;
1342 case DID_TRANSPORT_FAILFAST:
1343
1344
1345
1346
1347 return SUCCESS;
1348 case DID_ERROR:
1349 if (msg_byte(scmd->result) == COMMAND_COMPLETE &&
1350 status_byte(scmd->result) == RESERVATION_CONFLICT)
1351
1352
1353
1354
1355 break;
1356
1357
1358 case DID_BUS_BUSY:
1359 case DID_PARITY:
1360 goto maybe_retry;
1361 case DID_TIME_OUT:
1362
1363
1364
1365
1366
1367 if ((scmd->cmnd[0] == TEST_UNIT_READY ||
1368 scmd->cmnd[0] == INQUIRY)) {
1369 return SUCCESS;
1370 } else {
1371 return FAILED;
1372 }
1373 case DID_RESET:
1374 return SUCCESS;
1375 default:
1376 return FAILED;
1377 }
1378
1379
1380
1381
1382 if (msg_byte(scmd->result) != COMMAND_COMPLETE)
1383 return FAILED;
1384
1385
1386
1387
1388 switch (status_byte(scmd->result)) {
1389 case QUEUE_FULL:
1390
1391
1392
1393
1394 case BUSY:
1395
1396
1397
1398
1399
1400
1401 return ADD_TO_MLQUEUE;
1402 case GOOD:
1403 case COMMAND_TERMINATED:
1404 return SUCCESS;
1405 case TASK_ABORTED:
1406 goto maybe_retry;
1407 case CHECK_CONDITION:
1408 rtn = scsi_check_sense(scmd);
1409 if (rtn == NEEDS_RETRY)
1410 goto maybe_retry;
1411
1412
1413
1414
1415 return rtn;
1416 case CONDITION_GOOD:
1417 case INTERMEDIATE_GOOD:
1418 case INTERMEDIATE_C_GOOD:
1419 case ACA_ACTIVE:
1420
1421
1422
1423 return SUCCESS;
1424
1425 case RESERVATION_CONFLICT:
1426 sdev_printk(KERN_INFO, scmd->device,
1427 "reservation conflict\n");
1428 return SUCCESS;
1429 default:
1430 return FAILED;
1431 }
1432 return FAILED;
1433
1434 maybe_retry:
1435
1436
1437
1438
1439
1440 if ((++scmd->retries) <= scmd->allowed
1441 && !scsi_noretry_cmd(scmd)) {
1442 return NEEDS_RETRY;
1443 } else {
1444
1445
1446
1447 return SUCCESS;
1448 }
1449}
1450
1451static void eh_lock_door_done(struct request *req, int uptodate)
1452{
1453 __blk_put_request(req->q, req);
1454}
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467static void scsi_eh_lock_door(struct scsi_device *sdev)
1468{
1469 struct request *req;
1470
1471
1472
1473
1474
1475 req = blk_get_request(sdev->request_queue, READ, GFP_KERNEL);
1476
1477 req->cmd[0] = ALLOW_MEDIUM_REMOVAL;
1478 req->cmd[1] = 0;
1479 req->cmd[2] = 0;
1480 req->cmd[3] = 0;
1481 req->cmd[4] = SCSI_REMOVAL_PREVENT;
1482 req->cmd[5] = 0;
1483
1484 req->cmd_len = COMMAND_SIZE(req->cmd[0]);
1485
1486 req->cmd_type = REQ_TYPE_BLOCK_PC;
1487 req->cmd_flags |= REQ_QUIET;
1488 req->timeout = 10 * HZ;
1489 req->retries = 5;
1490
1491 blk_execute_rq_nowait(req->q, NULL, req, 1, eh_lock_door_done);
1492}
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502static void scsi_restart_operations(struct Scsi_Host *shost)
1503{
1504 struct scsi_device *sdev;
1505 unsigned long flags;
1506
1507
1508
1509
1510
1511
1512 shost_for_each_device(sdev, shost) {
1513 if (scsi_device_online(sdev) && sdev->locked)
1514 scsi_eh_lock_door(sdev);
1515 }
1516
1517
1518
1519
1520
1521
1522 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: waking up host to restart\n",
1523 __func__));
1524
1525 spin_lock_irqsave(shost->host_lock, flags);
1526 if (scsi_host_set_state(shost, SHOST_RUNNING))
1527 if (scsi_host_set_state(shost, SHOST_CANCEL))
1528 BUG_ON(scsi_host_set_state(shost, SHOST_DEL));
1529 spin_unlock_irqrestore(shost->host_lock, flags);
1530
1531 wake_up(&shost->host_wait);
1532
1533
1534
1535
1536
1537
1538
1539 scsi_run_host_queues(shost);
1540}
1541
1542
1543
1544
1545
1546
1547
1548void scsi_eh_ready_devs(struct Scsi_Host *shost,
1549 struct list_head *work_q,
1550 struct list_head *done_q)
1551{
1552 if (!scsi_eh_stu(shost, work_q, done_q))
1553 if (!scsi_eh_bus_device_reset(shost, work_q, done_q))
1554 if (!scsi_eh_target_reset(shost, work_q, done_q))
1555 if (!scsi_eh_bus_reset(shost, work_q, done_q))
1556 if (!scsi_eh_host_reset(work_q, done_q))
1557 scsi_eh_offline_sdevs(work_q,
1558 done_q);
1559}
1560EXPORT_SYMBOL_GPL(scsi_eh_ready_devs);
1561
1562
1563
1564
1565
1566void scsi_eh_flush_done_q(struct list_head *done_q)
1567{
1568 struct scsi_cmnd *scmd, *next;
1569
1570 list_for_each_entry_safe(scmd, next, done_q, eh_entry) {
1571 list_del_init(&scmd->eh_entry);
1572 if (scsi_device_online(scmd->device) &&
1573 !scsi_noretry_cmd(scmd) &&
1574 (++scmd->retries <= scmd->allowed)) {
1575 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: flush"
1576 " retry cmd: %p\n",
1577 current->comm,
1578 scmd));
1579 scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY);
1580 } else {
1581
1582
1583
1584
1585
1586 if (!scmd->result)
1587 scmd->result |= (DRIVER_TIMEOUT << 24);
1588 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: flush finish"
1589 " cmd: %p\n",
1590 current->comm, scmd));
1591 scsi_finish_command(scmd);
1592 }
1593 }
1594}
1595EXPORT_SYMBOL(scsi_eh_flush_done_q);
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620static void scsi_unjam_host(struct Scsi_Host *shost)
1621{
1622 unsigned long flags;
1623 LIST_HEAD(eh_work_q);
1624 LIST_HEAD(eh_done_q);
1625
1626 spin_lock_irqsave(shost->host_lock, flags);
1627 list_splice_init(&shost->eh_cmd_q, &eh_work_q);
1628 spin_unlock_irqrestore(shost->host_lock, flags);
1629
1630 SCSI_LOG_ERROR_RECOVERY(1, scsi_eh_prt_fail_stats(shost, &eh_work_q));
1631
1632 if (!scsi_eh_get_sense(&eh_work_q, &eh_done_q))
1633 if (!scsi_eh_abort_cmds(&eh_work_q, &eh_done_q))
1634 scsi_eh_ready_devs(shost, &eh_work_q, &eh_done_q);
1635
1636 scsi_eh_flush_done_q(&eh_done_q);
1637}
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647int scsi_error_handler(void *data)
1648{
1649 struct Scsi_Host *shost = data;
1650
1651
1652
1653
1654
1655
1656
1657 set_current_state(TASK_INTERRUPTIBLE);
1658 while (!kthread_should_stop()) {
1659 if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) ||
1660 shost->host_failed != shost->host_busy) {
1661 SCSI_LOG_ERROR_RECOVERY(1,
1662 printk("Error handler scsi_eh_%d sleeping\n",
1663 shost->host_no));
1664 schedule();
1665 set_current_state(TASK_INTERRUPTIBLE);
1666 continue;
1667 }
1668
1669 __set_current_state(TASK_RUNNING);
1670 SCSI_LOG_ERROR_RECOVERY(1,
1671 printk("Error handler scsi_eh_%d waking up\n",
1672 shost->host_no));
1673
1674
1675
1676
1677
1678
1679 if (shost->transportt->eh_strategy_handler)
1680 shost->transportt->eh_strategy_handler(shost);
1681 else
1682 scsi_unjam_host(shost);
1683
1684
1685
1686
1687
1688
1689
1690
1691 scsi_restart_operations(shost);
1692 set_current_state(TASK_INTERRUPTIBLE);
1693 }
1694 __set_current_state(TASK_RUNNING);
1695
1696 SCSI_LOG_ERROR_RECOVERY(1,
1697 printk("Error handler scsi_eh_%d exiting\n", shost->host_no));
1698 shost->ehandler = NULL;
1699 return 0;
1700}
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723void scsi_report_bus_reset(struct Scsi_Host *shost, int channel)
1724{
1725 struct scsi_device *sdev;
1726
1727 __shost_for_each_device(sdev, shost) {
1728 if (channel == sdev_channel(sdev))
1729 __scsi_report_device_reset(sdev, NULL);
1730 }
1731}
1732EXPORT_SYMBOL(scsi_report_bus_reset);
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756void scsi_report_device_reset(struct Scsi_Host *shost, int channel, int target)
1757{
1758 struct scsi_device *sdev;
1759
1760 __shost_for_each_device(sdev, shost) {
1761 if (channel == sdev_channel(sdev) &&
1762 target == sdev_id(sdev))
1763 __scsi_report_device_reset(sdev, NULL);
1764 }
1765}
1766EXPORT_SYMBOL(scsi_report_device_reset);
1767
1768static void
1769scsi_reset_provider_done_command(struct scsi_cmnd *scmd)
1770{
1771}
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786int
1787scsi_reset_provider(struct scsi_device *dev, int flag)
1788{
1789 struct scsi_cmnd *scmd = scsi_get_command(dev, GFP_KERNEL);
1790 struct Scsi_Host *shost = dev->host;
1791 struct request req;
1792 unsigned long flags;
1793 int rtn;
1794
1795 blk_rq_init(NULL, &req);
1796 scmd->request = &req;
1797
1798 scmd->cmnd = req.cmd;
1799
1800 scmd->scsi_done = scsi_reset_provider_done_command;
1801 memset(&scmd->sdb, 0, sizeof(scmd->sdb));
1802
1803 scmd->cmd_len = 0;
1804
1805 scmd->sc_data_direction = DMA_BIDIRECTIONAL;
1806
1807 spin_lock_irqsave(shost->host_lock, flags);
1808 shost->tmf_in_progress = 1;
1809 spin_unlock_irqrestore(shost->host_lock, flags);
1810
1811 switch (flag) {
1812 case SCSI_TRY_RESET_DEVICE:
1813 rtn = scsi_try_bus_device_reset(scmd);
1814 if (rtn == SUCCESS)
1815 break;
1816
1817 case SCSI_TRY_RESET_TARGET:
1818 rtn = scsi_try_target_reset(scmd);
1819 if (rtn == SUCCESS)
1820 break;
1821
1822 case SCSI_TRY_RESET_BUS:
1823 rtn = scsi_try_bus_reset(scmd);
1824 if (rtn == SUCCESS)
1825 break;
1826
1827 case SCSI_TRY_RESET_HOST:
1828 rtn = scsi_try_host_reset(scmd);
1829 break;
1830 default:
1831 rtn = FAILED;
1832 }
1833
1834 spin_lock_irqsave(shost->host_lock, flags);
1835 shost->tmf_in_progress = 0;
1836 spin_unlock_irqrestore(shost->host_lock, flags);
1837
1838
1839
1840
1841
1842 SCSI_LOG_ERROR_RECOVERY(3,
1843 printk("%s: waking up host to restart after TMF\n",
1844 __func__));
1845
1846 wake_up(&shost->host_wait);
1847
1848 scsi_run_host_queues(shost);
1849
1850 scsi_next_command(scmd);
1851 return rtn;
1852}
1853EXPORT_SYMBOL(scsi_reset_provider);
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874int scsi_normalize_sense(const u8 *sense_buffer, int sb_len,
1875 struct scsi_sense_hdr *sshdr)
1876{
1877 if (!sense_buffer || !sb_len)
1878 return 0;
1879
1880 memset(sshdr, 0, sizeof(struct scsi_sense_hdr));
1881
1882 sshdr->response_code = (sense_buffer[0] & 0x7f);
1883
1884 if (!scsi_sense_valid(sshdr))
1885 return 0;
1886
1887 if (sshdr->response_code >= 0x72) {
1888
1889
1890
1891 if (sb_len > 1)
1892 sshdr->sense_key = (sense_buffer[1] & 0xf);
1893 if (sb_len > 2)
1894 sshdr->asc = sense_buffer[2];
1895 if (sb_len > 3)
1896 sshdr->ascq = sense_buffer[3];
1897 if (sb_len > 7)
1898 sshdr->additional_length = sense_buffer[7];
1899 } else {
1900
1901
1902
1903 if (sb_len > 2)
1904 sshdr->sense_key = (sense_buffer[2] & 0xf);
1905 if (sb_len > 7) {
1906 sb_len = (sb_len < (sense_buffer[7] + 8)) ?
1907 sb_len : (sense_buffer[7] + 8);
1908 if (sb_len > 12)
1909 sshdr->asc = sense_buffer[12];
1910 if (sb_len > 13)
1911 sshdr->ascq = sense_buffer[13];
1912 }
1913 }
1914
1915 return 1;
1916}
1917EXPORT_SYMBOL(scsi_normalize_sense);
1918
1919int scsi_command_normalize_sense(struct scsi_cmnd *cmd,
1920 struct scsi_sense_hdr *sshdr)
1921{
1922 return scsi_normalize_sense(cmd->sense_buffer,
1923 SCSI_SENSE_BUFFERSIZE, sshdr);
1924}
1925EXPORT_SYMBOL(scsi_command_normalize_sense);
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940const u8 * scsi_sense_desc_find(const u8 * sense_buffer, int sb_len,
1941 int desc_type)
1942{
1943 int add_sen_len, add_len, desc_len, k;
1944 const u8 * descp;
1945
1946 if ((sb_len < 8) || (0 == (add_sen_len = sense_buffer[7])))
1947 return NULL;
1948 if ((sense_buffer[0] < 0x72) || (sense_buffer[0] > 0x73))
1949 return NULL;
1950 add_sen_len = (add_sen_len < (sb_len - 8)) ?
1951 add_sen_len : (sb_len - 8);
1952 descp = &sense_buffer[8];
1953 for (desc_len = 0, k = 0; k < add_sen_len; k += desc_len) {
1954 descp += desc_len;
1955 add_len = (k < (add_sen_len - 1)) ? descp[1]: -1;
1956 desc_len = add_len + 2;
1957 if (descp[0] == desc_type)
1958 return descp;
1959 if (add_len < 0)
1960 break;
1961 }
1962 return NULL;
1963}
1964EXPORT_SYMBOL(scsi_sense_desc_find);
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len,
1977 u64 * info_out)
1978{
1979 int j;
1980 const u8 * ucp;
1981 u64 ull;
1982
1983 if (sb_len < 7)
1984 return 0;
1985 switch (sense_buffer[0] & 0x7f) {
1986 case 0x70:
1987 case 0x71:
1988 if (sense_buffer[0] & 0x80) {
1989 *info_out = (sense_buffer[3] << 24) +
1990 (sense_buffer[4] << 16) +
1991 (sense_buffer[5] << 8) + sense_buffer[6];
1992 return 1;
1993 } else
1994 return 0;
1995 case 0x72:
1996 case 0x73:
1997 ucp = scsi_sense_desc_find(sense_buffer, sb_len,
1998 0 );
1999 if (ucp && (0xa == ucp[1])) {
2000 ull = 0;
2001 for (j = 0; j < 8; ++j) {
2002 if (j > 0)
2003 ull <<= 8;
2004 ull |= ucp[4 + j];
2005 }
2006 *info_out = ull;
2007 return 1;
2008 } else
2009 return 0;
2010 default:
2011 return 0;
2012 }
2013}
2014EXPORT_SYMBOL(scsi_get_sense_info_fld);
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq)
2027{
2028 if (desc) {
2029 buf[0] = 0x72;
2030 buf[1] = key;
2031 buf[2] = asc;
2032 buf[3] = ascq;
2033 buf[7] = 0;
2034 } else {
2035 buf[0] = 0x70;
2036 buf[2] = key;
2037 buf[7] = 0xa;
2038 buf[12] = asc;
2039 buf[13] = ascq;
2040 }
2041}
2042EXPORT_SYMBOL(scsi_build_sense_buffer);
2043