1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/module.h>
18#include <linux/sched.h>
19#include <linux/gfp.h>
20#include <linux/timer.h>
21#include <linux/string.h>
22#include <linux/kernel.h>
23#include <linux/freezer.h>
24#include <linux/kthread.h>
25#include <linux/interrupt.h>
26#include <linux/blkdev.h>
27#include <linux/delay.h>
28#include <linux/jiffies.h>
29
30#include <scsi/scsi.h>
31#include <scsi/scsi_cmnd.h>
32#include <scsi/scsi_dbg.h>
33#include <scsi/scsi_device.h>
34#include <scsi/scsi_driver.h>
35#include <scsi/scsi_eh.h>
36#include <scsi/scsi_transport.h>
37#include <scsi/scsi_host.h>
38#include <scsi/scsi_ioctl.h>
39
40#include "scsi_priv.h"
41#include "scsi_logging.h"
42#include "scsi_transport_api.h"
43
44#include <trace/events/scsi.h>
45
46static void scsi_eh_done(struct scsi_cmnd *scmd);
47
48#define SENSE_TIMEOUT (10*HZ)
49
50
51
52
53
54#define BUS_RESET_SETTLE_TIME (10)
55#define HOST_RESET_SETTLE_TIME (10)
56
57static int scsi_eh_try_stu(struct scsi_cmnd *scmd);
58
59
60void scsi_eh_wakeup(struct Scsi_Host *shost)
61{
62 if (shost->host_busy == shost->host_failed) {
63 trace_scsi_eh_wakeup(shost);
64 wake_up_process(shost->ehandler);
65 SCSI_LOG_ERROR_RECOVERY(5,
66 printk("Waking error handler thread\n"));
67 }
68}
69
70
71
72
73
74
75
76void scsi_schedule_eh(struct Scsi_Host *shost)
77{
78 unsigned long flags;
79
80 spin_lock_irqsave(shost->host_lock, flags);
81
82 if (scsi_host_set_state(shost, SHOST_RECOVERY) == 0 ||
83 scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY) == 0) {
84 shost->host_eh_scheduled++;
85 scsi_eh_wakeup(shost);
86 }
87
88 spin_unlock_irqrestore(shost->host_lock, flags);
89}
90EXPORT_SYMBOL_GPL(scsi_schedule_eh);
91
92
93
94
95
96
97
98
99
100int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag)
101{
102 struct Scsi_Host *shost = scmd->device->host;
103 unsigned long flags;
104 int ret = 0;
105
106 if (!shost->ehandler)
107 return 0;
108
109 spin_lock_irqsave(shost->host_lock, flags);
110 if (scsi_host_set_state(shost, SHOST_RECOVERY))
111 if (scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY))
112 goto out_unlock;
113
114 ret = 1;
115 scmd->eh_eflags |= eh_flag;
116 list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q);
117 shost->host_failed++;
118 scsi_eh_wakeup(shost);
119 out_unlock:
120 spin_unlock_irqrestore(shost->host_lock, flags);
121 return ret;
122}
123
124
125
126
127
128
129
130
131
132
133
134enum blk_eh_timer_return scsi_times_out(struct request *req)
135{
136 struct scsi_cmnd *scmd = req->special;
137 enum blk_eh_timer_return rtn = BLK_EH_NOT_HANDLED;
138 struct Scsi_Host *host = scmd->device->host;
139
140 trace_scsi_dispatch_cmd_timeout(scmd);
141 scsi_log_completion(scmd, TIMEOUT_ERROR);
142
143 if (host->transportt->eh_timed_out)
144 rtn = host->transportt->eh_timed_out(scmd);
145 else if (host->hostt->eh_timed_out)
146 rtn = host->hostt->eh_timed_out(scmd);
147
148 scmd->result |= DID_TIME_OUT << 16;
149
150 if (unlikely(rtn == BLK_EH_NOT_HANDLED &&
151 !scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD)))
152 rtn = BLK_EH_HANDLED;
153
154 return rtn;
155}
156
157
158
159
160
161
162
163
164
165
166
167
168int scsi_block_when_processing_errors(struct scsi_device *sdev)
169{
170 int online;
171
172 wait_event(sdev->host->host_wait, !scsi_host_in_recovery(sdev->host));
173
174 online = scsi_device_online(sdev);
175
176 SCSI_LOG_ERROR_RECOVERY(5, printk("%s: rtn: %d\n", __func__,
177 online));
178
179 return online;
180}
181EXPORT_SYMBOL(scsi_block_when_processing_errors);
182
183#ifdef CONFIG_SCSI_LOGGING
184
185
186
187
188
189static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost,
190 struct list_head *work_q)
191{
192 struct scsi_cmnd *scmd;
193 struct scsi_device *sdev;
194 int total_failures = 0;
195 int cmd_failed = 0;
196 int cmd_cancel = 0;
197 int devices_failed = 0;
198
199 shost_for_each_device(sdev, shost) {
200 list_for_each_entry(scmd, work_q, eh_entry) {
201 if (scmd->device == sdev) {
202 ++total_failures;
203 if (scmd->eh_eflags & SCSI_EH_CANCEL_CMD)
204 ++cmd_cancel;
205 else
206 ++cmd_failed;
207 }
208 }
209
210 if (cmd_cancel || cmd_failed) {
211 SCSI_LOG_ERROR_RECOVERY(3,
212 sdev_printk(KERN_INFO, sdev,
213 "%s: cmds failed: %d, cancel: %d\n",
214 __func__, cmd_failed,
215 cmd_cancel));
216 cmd_cancel = 0;
217 cmd_failed = 0;
218 ++devices_failed;
219 }
220 }
221
222 SCSI_LOG_ERROR_RECOVERY(2, printk("Total of %d commands on %d"
223 " devices require eh work\n",
224 total_failures, devices_failed));
225}
226#endif
227
228
229
230
231
232
233
234
235
236
237
238
239static int scsi_check_sense(struct scsi_cmnd *scmd)
240{
241 struct scsi_device *sdev = scmd->device;
242 struct scsi_sense_hdr sshdr;
243
244 if (! scsi_command_normalize_sense(scmd, &sshdr))
245 return FAILED;
246
247 if (scmd->cmnd[0] == TEST_UNIT_READY && scmd->scsi_done != scsi_eh_done)
248
249
250
251
252
253 return SUCCESS;
254
255 if (scsi_sense_is_deferred(&sshdr))
256 return NEEDS_RETRY;
257
258 if (sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh &&
259 sdev->scsi_dh_data->scsi_dh->check_sense) {
260 int rc;
261
262 rc = sdev->scsi_dh_data->scsi_dh->check_sense(sdev, &sshdr);
263 if (rc != SCSI_RETURN_NOT_HANDLED)
264 return rc;
265
266 }
267
268
269
270
271
272 if (sshdr.response_code == 0x70) {
273
274 if (scmd->sense_buffer[2] & 0xe0)
275 return SUCCESS;
276 } else {
277
278
279
280
281
282 if ((sshdr.additional_length > 3) &&
283 (scmd->sense_buffer[8] == 0x4) &&
284 (scmd->sense_buffer[11] & 0xe0))
285 return SUCCESS;
286 }
287
288 switch (sshdr.sense_key) {
289 case NO_SENSE:
290 return SUCCESS;
291 case RECOVERED_ERROR:
292 return SUCCESS;
293
294 case ABORTED_COMMAND:
295 if (sshdr.asc == 0x10)
296 return SUCCESS;
297
298 return NEEDS_RETRY;
299 case NOT_READY:
300 case UNIT_ATTENTION:
301
302
303
304
305
306
307 if (scmd->device->expecting_cc_ua) {
308
309
310
311
312
313
314 if (sshdr.asc != 0x28 || sshdr.ascq != 0x00) {
315 scmd->device->expecting_cc_ua = 0;
316 return NEEDS_RETRY;
317 }
318 }
319
320
321
322
323 if ((sshdr.asc == 0x04) && (sshdr.ascq == 0x01))
324 return NEEDS_RETRY;
325
326
327
328
329 if (scmd->device->allow_restart &&
330 (sshdr.asc == 0x04) && (sshdr.ascq == 0x02))
331 return FAILED;
332
333 if (sshdr.asc == 0x3f && sshdr.ascq == 0x0e)
334 scmd_printk(KERN_WARNING, scmd,
335 "Warning! Received an indication that the "
336 "LUN assignments on this target have "
337 "changed. The Linux SCSI layer does not "
338 "automatically remap LUN assignments.\n");
339 else if (sshdr.asc == 0x3f)
340 scmd_printk(KERN_WARNING, scmd,
341 "Warning! Received an indication that the "
342 "operating parameters on this target have "
343 "changed. The Linux SCSI layer does not "
344 "automatically adjust these parameters.\n");
345
346 if (sshdr.asc == 0x38 && sshdr.ascq == 0x07)
347 scmd_printk(KERN_WARNING, scmd,
348 "Warning! Received an indication that the "
349 "LUN reached a thin provisioning soft "
350 "threshold.\n");
351
352
353
354
355
356 return SUCCESS;
357
358
359 case COPY_ABORTED:
360 case VOLUME_OVERFLOW:
361 case MISCOMPARE:
362 case BLANK_CHECK:
363 case DATA_PROTECT:
364 return TARGET_ERROR;
365
366 case MEDIUM_ERROR:
367 if (sshdr.asc == 0x11 ||
368 sshdr.asc == 0x13 ||
369 sshdr.asc == 0x14) {
370 return TARGET_ERROR;
371 }
372 return NEEDS_RETRY;
373
374 case HARDWARE_ERROR:
375 if (scmd->device->retry_hwerror)
376 return ADD_TO_MLQUEUE;
377 else
378 return TARGET_ERROR;
379
380 case ILLEGAL_REQUEST:
381 if (sshdr.asc == 0x20 ||
382 sshdr.asc == 0x21 ||
383 sshdr.asc == 0x24 ||
384 sshdr.asc == 0x26) {
385 return TARGET_ERROR;
386 }
387 return SUCCESS;
388
389 default:
390 return SUCCESS;
391 }
392}
393
394static void scsi_handle_queue_ramp_up(struct scsi_device *sdev)
395{
396 struct scsi_host_template *sht = sdev->host->hostt;
397 struct scsi_device *tmp_sdev;
398
399 if (!sht->change_queue_depth ||
400 sdev->queue_depth >= sdev->max_queue_depth)
401 return;
402
403 if (time_before(jiffies,
404 sdev->last_queue_ramp_up + sdev->queue_ramp_up_period))
405 return;
406
407 if (time_before(jiffies,
408 sdev->last_queue_full_time + sdev->queue_ramp_up_period))
409 return;
410
411
412
413
414
415 shost_for_each_device(tmp_sdev, sdev->host) {
416 if (tmp_sdev->channel != sdev->channel ||
417 tmp_sdev->id != sdev->id ||
418 tmp_sdev->queue_depth == sdev->max_queue_depth)
419 continue;
420
421
422
423
424 sht->change_queue_depth(tmp_sdev, tmp_sdev->queue_depth + 1,
425 SCSI_QDEPTH_RAMP_UP);
426 sdev->last_queue_ramp_up = jiffies;
427 }
428}
429
430static void scsi_handle_queue_full(struct scsi_device *sdev)
431{
432 struct scsi_host_template *sht = sdev->host->hostt;
433 struct scsi_device *tmp_sdev;
434
435 if (!sht->change_queue_depth)
436 return;
437
438 shost_for_each_device(tmp_sdev, sdev->host) {
439 if (tmp_sdev->channel != sdev->channel ||
440 tmp_sdev->id != sdev->id)
441 continue;
442
443
444
445
446
447 sht->change_queue_depth(tmp_sdev, tmp_sdev->queue_depth - 1,
448 SCSI_QDEPTH_QFULL);
449 }
450}
451
452
453
454
455
456
457
458
459
460
461
462static int scsi_eh_completed_normally(struct scsi_cmnd *scmd)
463{
464
465
466
467
468 if (host_byte(scmd->result) == DID_RESET) {
469
470
471
472
473
474
475 return scsi_check_sense(scmd);
476 }
477 if (host_byte(scmd->result) != DID_OK)
478 return FAILED;
479
480
481
482
483 if (msg_byte(scmd->result) != COMMAND_COMPLETE)
484 return FAILED;
485
486
487
488
489
490 switch (status_byte(scmd->result)) {
491 case GOOD:
492 scsi_handle_queue_ramp_up(scmd->device);
493 case COMMAND_TERMINATED:
494 return SUCCESS;
495 case CHECK_CONDITION:
496 return scsi_check_sense(scmd);
497 case CONDITION_GOOD:
498 case INTERMEDIATE_GOOD:
499 case INTERMEDIATE_C_GOOD:
500
501
502
503 return SUCCESS;
504 case RESERVATION_CONFLICT:
505 if (scmd->cmnd[0] == TEST_UNIT_READY)
506
507
508 return SUCCESS;
509
510 return FAILED;
511 case QUEUE_FULL:
512 scsi_handle_queue_full(scmd->device);
513
514 case BUSY:
515 return NEEDS_RETRY;
516 default:
517 return FAILED;
518 }
519 return FAILED;
520}
521
522
523
524
525
526static void scsi_eh_done(struct scsi_cmnd *scmd)
527{
528 struct completion *eh_action;
529
530 SCSI_LOG_ERROR_RECOVERY(3,
531 printk("%s scmd: %p result: %x\n",
532 __func__, scmd, scmd->result));
533
534 eh_action = scmd->device->host->eh_action;
535 if (eh_action)
536 complete(eh_action);
537}
538
539
540
541
542
543static int scsi_try_host_reset(struct scsi_cmnd *scmd)
544{
545 unsigned long flags;
546 int rtn;
547 struct Scsi_Host *host = scmd->device->host;
548 struct scsi_host_template *hostt = host->hostt;
549
550 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Snd Host RST\n",
551 __func__));
552
553 if (!hostt->eh_host_reset_handler)
554 return FAILED;
555
556 rtn = hostt->eh_host_reset_handler(scmd);
557
558 if (rtn == SUCCESS) {
559 if (!hostt->skip_settle_delay)
560 ssleep(HOST_RESET_SETTLE_TIME);
561 spin_lock_irqsave(host->host_lock, flags);
562 scsi_report_bus_reset(host, scmd_channel(scmd));
563 spin_unlock_irqrestore(host->host_lock, flags);
564 }
565
566 return rtn;
567}
568
569
570
571
572
573static int scsi_try_bus_reset(struct scsi_cmnd *scmd)
574{
575 unsigned long flags;
576 int rtn;
577 struct Scsi_Host *host = scmd->device->host;
578 struct scsi_host_template *hostt = host->hostt;
579
580 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Snd Bus RST\n",
581 __func__));
582
583 if (!hostt->eh_bus_reset_handler)
584 return FAILED;
585
586 rtn = hostt->eh_bus_reset_handler(scmd);
587
588 if (rtn == SUCCESS) {
589 if (!hostt->skip_settle_delay)
590 ssleep(BUS_RESET_SETTLE_TIME);
591 spin_lock_irqsave(host->host_lock, flags);
592 scsi_report_bus_reset(host, scmd_channel(scmd));
593 spin_unlock_irqrestore(host->host_lock, flags);
594 }
595
596 return rtn;
597}
598
599static void __scsi_report_device_reset(struct scsi_device *sdev, void *data)
600{
601 sdev->was_reset = 1;
602 sdev->expecting_cc_ua = 1;
603}
604
605
606
607
608
609
610
611
612
613
614
615static int scsi_try_target_reset(struct scsi_cmnd *scmd)
616{
617 unsigned long flags;
618 int rtn;
619 struct Scsi_Host *host = scmd->device->host;
620 struct scsi_host_template *hostt = host->hostt;
621
622 if (!hostt->eh_target_reset_handler)
623 return FAILED;
624
625 rtn = hostt->eh_target_reset_handler(scmd);
626 if (rtn == SUCCESS) {
627 spin_lock_irqsave(host->host_lock, flags);
628 __starget_for_each_device(scsi_target(scmd->device), NULL,
629 __scsi_report_device_reset);
630 spin_unlock_irqrestore(host->host_lock, flags);
631 }
632
633 return rtn;
634}
635
636
637
638
639
640
641
642
643
644
645
646static int scsi_try_bus_device_reset(struct scsi_cmnd *scmd)
647{
648 int rtn;
649 struct scsi_host_template *hostt = scmd->device->host->hostt;
650
651 if (!hostt->eh_device_reset_handler)
652 return FAILED;
653
654 rtn = hostt->eh_device_reset_handler(scmd);
655 if (rtn == SUCCESS)
656 __scsi_report_device_reset(scmd->device, NULL);
657 return rtn;
658}
659
660static int scsi_try_to_abort_cmd(struct scsi_host_template *hostt, struct scsi_cmnd *scmd)
661{
662 if (!hostt->eh_abort_handler)
663 return FAILED;
664
665 return hostt->eh_abort_handler(scmd);
666}
667
668static void scsi_abort_eh_cmnd(struct scsi_cmnd *scmd)
669{
670 if (scsi_try_to_abort_cmd(scmd->device->host->hostt, scmd) != SUCCESS)
671 if (scsi_try_bus_device_reset(scmd) != SUCCESS)
672 if (scsi_try_target_reset(scmd) != SUCCESS)
673 if (scsi_try_bus_reset(scmd) != SUCCESS)
674 scsi_try_host_reset(scmd);
675}
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
692 unsigned char *cmnd, int cmnd_size, unsigned sense_bytes)
693{
694 struct scsi_device *sdev = scmd->device;
695
696
697
698
699
700
701
702
703 ses->cmd_len = scmd->cmd_len;
704 ses->cmnd = scmd->cmnd;
705 ses->data_direction = scmd->sc_data_direction;
706 ses->sdb = scmd->sdb;
707 ses->next_rq = scmd->request->next_rq;
708 ses->result = scmd->result;
709 ses->underflow = scmd->underflow;
710 ses->prot_op = scmd->prot_op;
711
712 scmd->prot_op = SCSI_PROT_NORMAL;
713 scmd->cmnd = ses->eh_cmnd;
714 memset(scmd->cmnd, 0, BLK_MAX_CDB);
715 memset(&scmd->sdb, 0, sizeof(scmd->sdb));
716 scmd->request->next_rq = NULL;
717
718 if (sense_bytes) {
719 scmd->sdb.length = min_t(unsigned, SCSI_SENSE_BUFFERSIZE,
720 sense_bytes);
721 sg_init_one(&ses->sense_sgl, scmd->sense_buffer,
722 scmd->sdb.length);
723 scmd->sdb.table.sgl = &ses->sense_sgl;
724 scmd->sc_data_direction = DMA_FROM_DEVICE;
725 scmd->sdb.table.nents = 1;
726 scmd->cmnd[0] = REQUEST_SENSE;
727 scmd->cmnd[4] = scmd->sdb.length;
728 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
729 } else {
730 scmd->sc_data_direction = DMA_NONE;
731 if (cmnd) {
732 BUG_ON(cmnd_size > BLK_MAX_CDB);
733 memcpy(scmd->cmnd, cmnd, cmnd_size);
734 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
735 }
736 }
737
738 scmd->underflow = 0;
739
740 if (sdev->scsi_level <= SCSI_2 && sdev->scsi_level != SCSI_UNKNOWN)
741 scmd->cmnd[1] = (scmd->cmnd[1] & 0x1f) |
742 (sdev->lun << 5 & 0xe0);
743
744
745
746
747
748 memset(scmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
749}
750EXPORT_SYMBOL(scsi_eh_prep_cmnd);
751
752
753
754
755
756
757
758
759void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses)
760{
761
762
763
764 scmd->cmd_len = ses->cmd_len;
765 scmd->cmnd = ses->cmnd;
766 scmd->sc_data_direction = ses->data_direction;
767 scmd->sdb = ses->sdb;
768 scmd->request->next_rq = ses->next_rq;
769 scmd->result = ses->result;
770 scmd->underflow = ses->underflow;
771 scmd->prot_op = ses->prot_op;
772}
773EXPORT_SYMBOL(scsi_eh_restore_cmnd);
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
790 int cmnd_size, int timeout, unsigned sense_bytes)
791{
792 struct scsi_device *sdev = scmd->device;
793 struct Scsi_Host *shost = sdev->host;
794 DECLARE_COMPLETION_ONSTACK(done);
795 unsigned long timeleft = timeout;
796 struct scsi_eh_save ses;
797 const unsigned long stall_for = msecs_to_jiffies(100);
798 int rtn;
799
800retry:
801 scsi_eh_prep_cmnd(scmd, &ses, cmnd, cmnd_size, sense_bytes);
802 shost->eh_action = &done;
803
804 scsi_log_send(scmd);
805 scmd->scsi_done = scsi_eh_done;
806 rtn = shost->hostt->queuecommand(shost, scmd);
807 if (rtn) {
808 if (timeleft > stall_for) {
809 scsi_eh_restore_cmnd(scmd, &ses);
810 timeleft -= stall_for;
811 msleep(jiffies_to_msecs(stall_for));
812 goto retry;
813 }
814
815 timeleft = 0;
816 rtn = NEEDS_RETRY;
817 } else {
818 timeleft = wait_for_completion_timeout(&done, timeout);
819 }
820
821 shost->eh_action = NULL;
822
823 scsi_log_completion(scmd, rtn);
824
825 SCSI_LOG_ERROR_RECOVERY(3,
826 printk("%s: scmd: %p, timeleft: %ld\n",
827 __func__, scmd, timeleft));
828
829
830
831
832
833
834
835
836
837
838 if (timeleft) {
839 rtn = scsi_eh_completed_normally(scmd);
840 SCSI_LOG_ERROR_RECOVERY(3,
841 printk("%s: scsi_eh_completed_normally %x\n",
842 __func__, rtn));
843
844 switch (rtn) {
845 case SUCCESS:
846 case NEEDS_RETRY:
847 case FAILED:
848 case TARGET_ERROR:
849 break;
850 case ADD_TO_MLQUEUE:
851 rtn = NEEDS_RETRY;
852 break;
853 default:
854 rtn = FAILED;
855 break;
856 }
857 } else if (!rtn) {
858 scsi_abort_eh_cmnd(scmd);
859 rtn = FAILED;
860 }
861
862 scsi_eh_restore_cmnd(scmd, &ses);
863
864 if (scmd->request->cmd_type != REQ_TYPE_BLOCK_PC) {
865 struct scsi_driver *sdrv = scsi_cmd_to_driver(scmd);
866 if (sdrv->eh_action)
867 rtn = sdrv->eh_action(scmd, cmnd, cmnd_size, rtn);
868 }
869
870 return rtn;
871}
872
873
874
875
876
877
878
879
880
881
882static int scsi_request_sense(struct scsi_cmnd *scmd)
883{
884 return scsi_send_eh_cmnd(scmd, NULL, 0, SENSE_TIMEOUT, ~0);
885}
886
887
888
889
890
891
892
893
894
895
896
897
898
899void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q)
900{
901 scmd->device->host->host_failed--;
902 scmd->eh_eflags = 0;
903 list_move_tail(&scmd->eh_entry, done_q);
904}
905EXPORT_SYMBOL(scsi_eh_finish_cmd);
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927int scsi_eh_get_sense(struct list_head *work_q,
928 struct list_head *done_q)
929{
930 struct scsi_cmnd *scmd, *next;
931 int rtn;
932
933 list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
934 if ((scmd->eh_eflags & SCSI_EH_CANCEL_CMD) ||
935 SCSI_SENSE_VALID(scmd))
936 continue;
937
938 SCSI_LOG_ERROR_RECOVERY(2, scmd_printk(KERN_INFO, scmd,
939 "%s: requesting sense\n",
940 current->comm));
941 rtn = scsi_request_sense(scmd);
942 if (rtn != SUCCESS)
943 continue;
944
945 SCSI_LOG_ERROR_RECOVERY(3, printk("sense requested for %p"
946 " result %x\n", scmd,
947 scmd->result));
948 SCSI_LOG_ERROR_RECOVERY(3, scsi_print_sense("bh", scmd));
949
950 rtn = scsi_decide_disposition(scmd);
951
952
953
954
955
956 if (rtn == SUCCESS)
957
958
959
960
961 scmd->retries = scmd->allowed;
962 else if (rtn != NEEDS_RETRY)
963 continue;
964
965 scsi_eh_finish_cmd(scmd, done_q);
966 }
967
968 return list_empty(work_q);
969}
970EXPORT_SYMBOL_GPL(scsi_eh_get_sense);
971
972
973
974
975
976
977
978
979static int scsi_eh_tur(struct scsi_cmnd *scmd)
980{
981 static unsigned char tur_command[6] = {TEST_UNIT_READY, 0, 0, 0, 0, 0};
982 int retry_cnt = 1, rtn;
983
984retry_tur:
985 rtn = scsi_send_eh_cmnd(scmd, tur_command, 6, SENSE_TIMEOUT, 0);
986
987 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n",
988 __func__, scmd, rtn));
989
990 switch (rtn) {
991 case NEEDS_RETRY:
992 if (retry_cnt--)
993 goto retry_tur;
994
995 case SUCCESS:
996 return 0;
997 default:
998 return 1;
999 }
1000}
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015static int scsi_eh_test_devices(struct list_head *cmd_list,
1016 struct list_head *work_q,
1017 struct list_head *done_q, int try_stu)
1018{
1019 struct scsi_cmnd *scmd, *next;
1020 struct scsi_device *sdev;
1021 int finish_cmds;
1022
1023 while (!list_empty(cmd_list)) {
1024 scmd = list_entry(cmd_list->next, struct scsi_cmnd, eh_entry);
1025 sdev = scmd->device;
1026
1027 finish_cmds = !scsi_device_online(scmd->device) ||
1028 (try_stu && !scsi_eh_try_stu(scmd) &&
1029 !scsi_eh_tur(scmd)) ||
1030 !scsi_eh_tur(scmd);
1031
1032 list_for_each_entry_safe(scmd, next, cmd_list, eh_entry)
1033 if (scmd->device == sdev) {
1034 if (finish_cmds)
1035 scsi_eh_finish_cmd(scmd, done_q);
1036 else
1037 list_move_tail(&scmd->eh_entry, work_q);
1038 }
1039 }
1040 return list_empty(work_q);
1041}
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056static int scsi_eh_abort_cmds(struct list_head *work_q,
1057 struct list_head *done_q)
1058{
1059 struct scsi_cmnd *scmd, *next;
1060 LIST_HEAD(check_list);
1061 int rtn;
1062
1063 list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1064 if (!(scmd->eh_eflags & SCSI_EH_CANCEL_CMD))
1065 continue;
1066 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting cmd:"
1067 "0x%p\n", current->comm,
1068 scmd));
1069 rtn = scsi_try_to_abort_cmd(scmd->device->host->hostt, scmd);
1070 if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
1071 scmd->eh_eflags &= ~SCSI_EH_CANCEL_CMD;
1072 if (rtn == FAST_IO_FAIL)
1073 scsi_eh_finish_cmd(scmd, done_q);
1074 else
1075 list_move_tail(&scmd->eh_entry, &check_list);
1076 } else
1077 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting"
1078 " cmd failed:"
1079 "0x%p\n",
1080 current->comm,
1081 scmd));
1082 }
1083
1084 return scsi_eh_test_devices(&check_list, work_q, done_q, 0);
1085}
1086
1087
1088
1089
1090
1091
1092
1093
1094static int scsi_eh_try_stu(struct scsi_cmnd *scmd)
1095{
1096 static unsigned char stu_command[6] = {START_STOP, 0, 0, 0, 1, 0};
1097
1098 if (scmd->device->allow_restart) {
1099 int i, rtn = NEEDS_RETRY;
1100
1101 for (i = 0; rtn == NEEDS_RETRY && i < 2; i++)
1102 rtn = scsi_send_eh_cmnd(scmd, stu_command, 6, scmd->device->request_queue->rq_timeout, 0);
1103
1104 if (rtn == SUCCESS)
1105 return 0;
1106 }
1107
1108 return 1;
1109}
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121static int scsi_eh_stu(struct Scsi_Host *shost,
1122 struct list_head *work_q,
1123 struct list_head *done_q)
1124{
1125 struct scsi_cmnd *scmd, *stu_scmd, *next;
1126 struct scsi_device *sdev;
1127
1128 shost_for_each_device(sdev, shost) {
1129 stu_scmd = NULL;
1130 list_for_each_entry(scmd, work_q, eh_entry)
1131 if (scmd->device == sdev && SCSI_SENSE_VALID(scmd) &&
1132 scsi_check_sense(scmd) == FAILED ) {
1133 stu_scmd = scmd;
1134 break;
1135 }
1136
1137 if (!stu_scmd)
1138 continue;
1139
1140 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending START_UNIT to sdev:"
1141 " 0x%p\n", current->comm, sdev));
1142
1143 if (!scsi_eh_try_stu(stu_scmd)) {
1144 if (!scsi_device_online(sdev) ||
1145 !scsi_eh_tur(stu_scmd)) {
1146 list_for_each_entry_safe(scmd, next,
1147 work_q, eh_entry) {
1148 if (scmd->device == sdev)
1149 scsi_eh_finish_cmd(scmd, done_q);
1150 }
1151 }
1152 } else {
1153 SCSI_LOG_ERROR_RECOVERY(3,
1154 printk("%s: START_UNIT failed to sdev:"
1155 " 0x%p\n", current->comm, sdev));
1156 }
1157 }
1158
1159 return list_empty(work_q);
1160}
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175static int scsi_eh_bus_device_reset(struct Scsi_Host *shost,
1176 struct list_head *work_q,
1177 struct list_head *done_q)
1178{
1179 struct scsi_cmnd *scmd, *bdr_scmd, *next;
1180 struct scsi_device *sdev;
1181 int rtn;
1182
1183 shost_for_each_device(sdev, shost) {
1184 bdr_scmd = NULL;
1185 list_for_each_entry(scmd, work_q, eh_entry)
1186 if (scmd->device == sdev) {
1187 bdr_scmd = scmd;
1188 break;
1189 }
1190
1191 if (!bdr_scmd)
1192 continue;
1193
1194 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending BDR sdev:"
1195 " 0x%p\n", current->comm,
1196 sdev));
1197 rtn = scsi_try_bus_device_reset(bdr_scmd);
1198 if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
1199 if (!scsi_device_online(sdev) ||
1200 rtn == FAST_IO_FAIL ||
1201 !scsi_eh_tur(bdr_scmd)) {
1202 list_for_each_entry_safe(scmd, next,
1203 work_q, eh_entry) {
1204 if (scmd->device == sdev)
1205 scsi_eh_finish_cmd(scmd,
1206 done_q);
1207 }
1208 }
1209 } else {
1210 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: BDR"
1211 " failed sdev:"
1212 "0x%p\n",
1213 current->comm,
1214 sdev));
1215 }
1216 }
1217
1218 return list_empty(work_q);
1219}
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230static int scsi_eh_target_reset(struct Scsi_Host *shost,
1231 struct list_head *work_q,
1232 struct list_head *done_q)
1233{
1234 LIST_HEAD(tmp_list);
1235 LIST_HEAD(check_list);
1236
1237 list_splice_init(work_q, &tmp_list);
1238
1239 while (!list_empty(&tmp_list)) {
1240 struct scsi_cmnd *next, *scmd;
1241 int rtn;
1242 unsigned int id;
1243
1244 scmd = list_entry(tmp_list.next, struct scsi_cmnd, eh_entry);
1245 id = scmd_id(scmd);
1246
1247 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending target reset "
1248 "to target %d\n",
1249 current->comm, id));
1250 rtn = scsi_try_target_reset(scmd);
1251 if (rtn != SUCCESS && rtn != FAST_IO_FAIL)
1252 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Target reset"
1253 " failed target: "
1254 "%d\n",
1255 current->comm, id));
1256 list_for_each_entry_safe(scmd, next, &tmp_list, eh_entry) {
1257 if (scmd_id(scmd) != id)
1258 continue;
1259
1260 if (rtn == SUCCESS)
1261 list_move_tail(&scmd->eh_entry, &check_list);
1262 else if (rtn == FAST_IO_FAIL)
1263 scsi_eh_finish_cmd(scmd, done_q);
1264 else
1265
1266 list_move(&scmd->eh_entry, work_q);
1267 }
1268 }
1269
1270 return scsi_eh_test_devices(&check_list, work_q, done_q, 0);
1271}
1272
1273
1274
1275
1276
1277
1278
1279static int scsi_eh_bus_reset(struct Scsi_Host *shost,
1280 struct list_head *work_q,
1281 struct list_head *done_q)
1282{
1283 struct scsi_cmnd *scmd, *chan_scmd, *next;
1284 LIST_HEAD(check_list);
1285 unsigned int channel;
1286 int rtn;
1287
1288
1289
1290
1291
1292
1293
1294
1295 for (channel = 0; channel <= shost->max_channel; channel++) {
1296 chan_scmd = NULL;
1297 list_for_each_entry(scmd, work_q, eh_entry) {
1298 if (channel == scmd_channel(scmd)) {
1299 chan_scmd = scmd;
1300 break;
1301
1302
1303
1304
1305 }
1306 }
1307
1308 if (!chan_scmd)
1309 continue;
1310 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending BRST chan:"
1311 " %d\n", current->comm,
1312 channel));
1313 rtn = scsi_try_bus_reset(chan_scmd);
1314 if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
1315 list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1316 if (channel == scmd_channel(scmd)) {
1317 if (rtn == FAST_IO_FAIL)
1318 scsi_eh_finish_cmd(scmd,
1319 done_q);
1320 else
1321 list_move_tail(&scmd->eh_entry,
1322 &check_list);
1323 }
1324 }
1325 } else {
1326 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: BRST"
1327 " failed chan: %d\n",
1328 current->comm,
1329 channel));
1330 }
1331 }
1332 return scsi_eh_test_devices(&check_list, work_q, done_q, 0);
1333}
1334
1335
1336
1337
1338
1339
1340static int scsi_eh_host_reset(struct list_head *work_q,
1341 struct list_head *done_q)
1342{
1343 struct scsi_cmnd *scmd, *next;
1344 LIST_HEAD(check_list);
1345 int rtn;
1346
1347 if (!list_empty(work_q)) {
1348 scmd = list_entry(work_q->next,
1349 struct scsi_cmnd, eh_entry);
1350
1351 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending HRST\n"
1352 , current->comm));
1353
1354 rtn = scsi_try_host_reset(scmd);
1355 if (rtn == SUCCESS) {
1356 list_splice_init(work_q, &check_list);
1357 } else if (rtn == FAST_IO_FAIL) {
1358 list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1359 scsi_eh_finish_cmd(scmd, done_q);
1360 }
1361 } else {
1362 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: HRST"
1363 " failed\n",
1364 current->comm));
1365 }
1366 }
1367 return scsi_eh_test_devices(&check_list, work_q, done_q, 1);
1368}
1369
1370
1371
1372
1373
1374
1375static void scsi_eh_offline_sdevs(struct list_head *work_q,
1376 struct list_head *done_q)
1377{
1378 struct scsi_cmnd *scmd, *next;
1379
1380 list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1381 sdev_printk(KERN_INFO, scmd->device, "Device offlined - "
1382 "not ready after error recovery\n");
1383 scsi_device_set_state(scmd->device, SDEV_OFFLINE);
1384 if (scmd->eh_eflags & SCSI_EH_CANCEL_CMD) {
1385
1386
1387
1388 }
1389 scsi_eh_finish_cmd(scmd, done_q);
1390 }
1391 return;
1392}
1393
1394
1395
1396
1397
1398int scsi_noretry_cmd(struct scsi_cmnd *scmd)
1399{
1400 switch (host_byte(scmd->result)) {
1401 case DID_OK:
1402 break;
1403 case DID_BUS_BUSY:
1404 return (scmd->request->cmd_flags & REQ_FAILFAST_TRANSPORT);
1405 case DID_PARITY:
1406 return (scmd->request->cmd_flags & REQ_FAILFAST_DEV);
1407 case DID_ERROR:
1408 if (msg_byte(scmd->result) == COMMAND_COMPLETE &&
1409 status_byte(scmd->result) == RESERVATION_CONFLICT)
1410 return 0;
1411
1412 case DID_SOFT_ERROR:
1413 return (scmd->request->cmd_flags & REQ_FAILFAST_DRIVER);
1414 }
1415
1416 switch (status_byte(scmd->result)) {
1417 case CHECK_CONDITION:
1418
1419
1420
1421
1422 if (scmd->request->cmd_flags & REQ_FAILFAST_DEV ||
1423 scmd->request->cmd_type == REQ_TYPE_BLOCK_PC)
1424 return 1;
1425 }
1426
1427 return 0;
1428}
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444int scsi_decide_disposition(struct scsi_cmnd *scmd)
1445{
1446 int rtn;
1447
1448
1449
1450
1451
1452 if (!scsi_device_online(scmd->device)) {
1453 SCSI_LOG_ERROR_RECOVERY(5, printk("%s: device offline - report"
1454 " as SUCCESS\n",
1455 __func__));
1456 return SUCCESS;
1457 }
1458
1459
1460
1461
1462
1463 switch (host_byte(scmd->result)) {
1464 case DID_PASSTHROUGH:
1465
1466
1467
1468
1469
1470 scmd->result &= 0xff00ffff;
1471 return SUCCESS;
1472 case DID_OK:
1473
1474
1475
1476 break;
1477 case DID_NO_CONNECT:
1478 case DID_BAD_TARGET:
1479 case DID_ABORT:
1480
1481
1482
1483
1484
1485 return SUCCESS;
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495 case DID_SOFT_ERROR:
1496 goto maybe_retry;
1497 case DID_IMM_RETRY:
1498 return NEEDS_RETRY;
1499
1500 case DID_REQUEUE:
1501 return ADD_TO_MLQUEUE;
1502 case DID_TRANSPORT_DISRUPTED:
1503
1504
1505
1506
1507
1508
1509
1510 goto maybe_retry;
1511 case DID_TRANSPORT_FAILFAST:
1512
1513
1514
1515
1516 return SUCCESS;
1517 case DID_ERROR:
1518 if (msg_byte(scmd->result) == COMMAND_COMPLETE &&
1519 status_byte(scmd->result) == RESERVATION_CONFLICT)
1520
1521
1522
1523
1524 break;
1525
1526 case DID_BUS_BUSY:
1527 case DID_PARITY:
1528 goto maybe_retry;
1529 case DID_TIME_OUT:
1530
1531
1532
1533
1534
1535 if ((scmd->cmnd[0] == TEST_UNIT_READY ||
1536 scmd->cmnd[0] == INQUIRY)) {
1537 return SUCCESS;
1538 } else {
1539 return FAILED;
1540 }
1541 case DID_RESET:
1542 return SUCCESS;
1543 default:
1544 return FAILED;
1545 }
1546
1547
1548
1549
1550 if (msg_byte(scmd->result) != COMMAND_COMPLETE)
1551 return FAILED;
1552
1553
1554
1555
1556 switch (status_byte(scmd->result)) {
1557 case QUEUE_FULL:
1558 scsi_handle_queue_full(scmd->device);
1559
1560
1561
1562
1563 case BUSY:
1564
1565
1566
1567
1568
1569
1570 return ADD_TO_MLQUEUE;
1571 case GOOD:
1572 scsi_handle_queue_ramp_up(scmd->device);
1573 case COMMAND_TERMINATED:
1574 return SUCCESS;
1575 case TASK_ABORTED:
1576 goto maybe_retry;
1577 case CHECK_CONDITION:
1578 rtn = scsi_check_sense(scmd);
1579 if (rtn == NEEDS_RETRY)
1580 goto maybe_retry;
1581 else if (rtn == TARGET_ERROR) {
1582
1583
1584
1585
1586 set_host_byte(scmd, DID_TARGET_FAILURE);
1587 rtn = SUCCESS;
1588 }
1589
1590
1591
1592
1593 return rtn;
1594 case CONDITION_GOOD:
1595 case INTERMEDIATE_GOOD:
1596 case INTERMEDIATE_C_GOOD:
1597 case ACA_ACTIVE:
1598
1599
1600
1601 return SUCCESS;
1602
1603 case RESERVATION_CONFLICT:
1604 sdev_printk(KERN_INFO, scmd->device,
1605 "reservation conflict\n");
1606 set_host_byte(scmd, DID_NEXUS_FAILURE);
1607 return SUCCESS;
1608 default:
1609 return FAILED;
1610 }
1611 return FAILED;
1612
1613 maybe_retry:
1614
1615
1616
1617
1618
1619 if ((++scmd->retries) <= scmd->allowed
1620 && !scsi_noretry_cmd(scmd)) {
1621 return NEEDS_RETRY;
1622 } else {
1623
1624
1625
1626 return SUCCESS;
1627 }
1628}
1629
1630static void eh_lock_door_done(struct request *req, int uptodate)
1631{
1632 __blk_put_request(req->q, req);
1633}
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646static void scsi_eh_lock_door(struct scsi_device *sdev)
1647{
1648 struct request *req;
1649
1650
1651
1652
1653
1654 req = blk_get_request(sdev->request_queue, READ, GFP_KERNEL);
1655
1656 req->cmd[0] = ALLOW_MEDIUM_REMOVAL;
1657 req->cmd[1] = 0;
1658 req->cmd[2] = 0;
1659 req->cmd[3] = 0;
1660 req->cmd[4] = SCSI_REMOVAL_PREVENT;
1661 req->cmd[5] = 0;
1662
1663 req->cmd_len = COMMAND_SIZE(req->cmd[0]);
1664
1665 req->cmd_type = REQ_TYPE_BLOCK_PC;
1666 req->cmd_flags |= REQ_QUIET;
1667 req->timeout = 10 * HZ;
1668 req->retries = 5;
1669
1670 blk_execute_rq_nowait(req->q, NULL, req, 1, eh_lock_door_done);
1671}
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681static void scsi_restart_operations(struct Scsi_Host *shost)
1682{
1683 struct scsi_device *sdev;
1684 unsigned long flags;
1685
1686
1687
1688
1689
1690
1691 shost_for_each_device(sdev, shost) {
1692 if (scsi_device_online(sdev) && sdev->locked)
1693 scsi_eh_lock_door(sdev);
1694 }
1695
1696
1697
1698
1699
1700
1701 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: waking up host to restart\n",
1702 __func__));
1703
1704 spin_lock_irqsave(shost->host_lock, flags);
1705 if (scsi_host_set_state(shost, SHOST_RUNNING))
1706 if (scsi_host_set_state(shost, SHOST_CANCEL))
1707 BUG_ON(scsi_host_set_state(shost, SHOST_DEL));
1708 spin_unlock_irqrestore(shost->host_lock, flags);
1709
1710 wake_up(&shost->host_wait);
1711
1712
1713
1714
1715
1716
1717
1718 scsi_run_host_queues(shost);
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728 spin_lock_irqsave(shost->host_lock, flags);
1729 if (shost->host_eh_scheduled)
1730 if (scsi_host_set_state(shost, SHOST_RECOVERY))
1731 WARN_ON(scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY));
1732 spin_unlock_irqrestore(shost->host_lock, flags);
1733}
1734
1735
1736
1737
1738
1739
1740
1741void scsi_eh_ready_devs(struct Scsi_Host *shost,
1742 struct list_head *work_q,
1743 struct list_head *done_q)
1744{
1745 if (!scsi_eh_stu(shost, work_q, done_q))
1746 if (!scsi_eh_bus_device_reset(shost, work_q, done_q))
1747 if (!scsi_eh_target_reset(shost, work_q, done_q))
1748 if (!scsi_eh_bus_reset(shost, work_q, done_q))
1749 if (!scsi_eh_host_reset(work_q, done_q))
1750 scsi_eh_offline_sdevs(work_q,
1751 done_q);
1752}
1753EXPORT_SYMBOL_GPL(scsi_eh_ready_devs);
1754
1755
1756
1757
1758
1759void scsi_eh_flush_done_q(struct list_head *done_q)
1760{
1761 struct scsi_cmnd *scmd, *next;
1762
1763 list_for_each_entry_safe(scmd, next, done_q, eh_entry) {
1764 list_del_init(&scmd->eh_entry);
1765 if (scsi_device_online(scmd->device) &&
1766 !scsi_noretry_cmd(scmd) &&
1767 (++scmd->retries <= scmd->allowed)) {
1768 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: flush"
1769 " retry cmd: %p\n",
1770 current->comm,
1771 scmd));
1772 scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY);
1773 } else {
1774
1775
1776
1777
1778
1779 if (!scmd->result)
1780 scmd->result |= (DRIVER_TIMEOUT << 24);
1781 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: flush finish"
1782 " cmd: %p\n",
1783 current->comm, scmd));
1784 scsi_finish_command(scmd);
1785 }
1786 }
1787}
1788EXPORT_SYMBOL(scsi_eh_flush_done_q);
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813static void scsi_unjam_host(struct Scsi_Host *shost)
1814{
1815 unsigned long flags;
1816 LIST_HEAD(eh_work_q);
1817 LIST_HEAD(eh_done_q);
1818
1819 spin_lock_irqsave(shost->host_lock, flags);
1820 list_splice_init(&shost->eh_cmd_q, &eh_work_q);
1821 spin_unlock_irqrestore(shost->host_lock, flags);
1822
1823 SCSI_LOG_ERROR_RECOVERY(1, scsi_eh_prt_fail_stats(shost, &eh_work_q));
1824
1825 if (!scsi_eh_get_sense(&eh_work_q, &eh_done_q))
1826 if (!scsi_eh_abort_cmds(&eh_work_q, &eh_done_q))
1827 scsi_eh_ready_devs(shost, &eh_work_q, &eh_done_q);
1828
1829 scsi_eh_flush_done_q(&eh_done_q);
1830}
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840int scsi_error_handler(void *data)
1841{
1842 struct Scsi_Host *shost = data;
1843
1844
1845
1846
1847
1848
1849
1850 while (!kthread_should_stop()) {
1851 set_current_state(TASK_INTERRUPTIBLE);
1852 if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) ||
1853 shost->host_failed != shost->host_busy) {
1854 SCSI_LOG_ERROR_RECOVERY(1,
1855 printk("Error handler scsi_eh_%d sleeping\n",
1856 shost->host_no));
1857 schedule();
1858 continue;
1859 }
1860
1861 __set_current_state(TASK_RUNNING);
1862 SCSI_LOG_ERROR_RECOVERY(1,
1863 printk("Error handler scsi_eh_%d waking up\n",
1864 shost->host_no));
1865
1866
1867
1868
1869
1870
1871 if (!shost->eh_noresume && scsi_autopm_get_host(shost) != 0) {
1872 SCSI_LOG_ERROR_RECOVERY(1,
1873 printk(KERN_ERR "Error handler scsi_eh_%d "
1874 "unable to autoresume\n",
1875 shost->host_no));
1876 continue;
1877 }
1878
1879 if (shost->transportt->eh_strategy_handler)
1880 shost->transportt->eh_strategy_handler(shost);
1881 else
1882 scsi_unjam_host(shost);
1883
1884
1885
1886
1887
1888
1889
1890
1891 scsi_restart_operations(shost);
1892 if (!shost->eh_noresume)
1893 scsi_autopm_put_host(shost);
1894 }
1895 __set_current_state(TASK_RUNNING);
1896
1897 SCSI_LOG_ERROR_RECOVERY(1,
1898 printk("Error handler scsi_eh_%d exiting\n", shost->host_no));
1899 shost->ehandler = NULL;
1900 return 0;
1901}
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924void scsi_report_bus_reset(struct Scsi_Host *shost, int channel)
1925{
1926 struct scsi_device *sdev;
1927
1928 __shost_for_each_device(sdev, shost) {
1929 if (channel == sdev_channel(sdev))
1930 __scsi_report_device_reset(sdev, NULL);
1931 }
1932}
1933EXPORT_SYMBOL(scsi_report_bus_reset);
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957void scsi_report_device_reset(struct Scsi_Host *shost, int channel, int target)
1958{
1959 struct scsi_device *sdev;
1960
1961 __shost_for_each_device(sdev, shost) {
1962 if (channel == sdev_channel(sdev) &&
1963 target == sdev_id(sdev))
1964 __scsi_report_device_reset(sdev, NULL);
1965 }
1966}
1967EXPORT_SYMBOL(scsi_report_device_reset);
1968
1969static void
1970scsi_reset_provider_done_command(struct scsi_cmnd *scmd)
1971{
1972}
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987int
1988scsi_reset_provider(struct scsi_device *dev, int flag)
1989{
1990 struct scsi_cmnd *scmd;
1991 struct Scsi_Host *shost = dev->host;
1992 struct request req;
1993 unsigned long flags;
1994 int rtn;
1995
1996 if (scsi_autopm_get_host(shost) < 0)
1997 return FAILED;
1998
1999 scmd = scsi_get_command(dev, GFP_KERNEL);
2000 blk_rq_init(NULL, &req);
2001 scmd->request = &req;
2002
2003 scmd->cmnd = req.cmd;
2004
2005 scmd->scsi_done = scsi_reset_provider_done_command;
2006 memset(&scmd->sdb, 0, sizeof(scmd->sdb));
2007
2008 scmd->cmd_len = 0;
2009
2010 scmd->sc_data_direction = DMA_BIDIRECTIONAL;
2011
2012 spin_lock_irqsave(shost->host_lock, flags);
2013 shost->tmf_in_progress = 1;
2014 spin_unlock_irqrestore(shost->host_lock, flags);
2015
2016 switch (flag) {
2017 case SCSI_TRY_RESET_DEVICE:
2018 rtn = scsi_try_bus_device_reset(scmd);
2019 if (rtn == SUCCESS)
2020 break;
2021
2022 case SCSI_TRY_RESET_TARGET:
2023 rtn = scsi_try_target_reset(scmd);
2024 if (rtn == SUCCESS)
2025 break;
2026
2027 case SCSI_TRY_RESET_BUS:
2028 rtn = scsi_try_bus_reset(scmd);
2029 if (rtn == SUCCESS)
2030 break;
2031
2032 case SCSI_TRY_RESET_HOST:
2033 rtn = scsi_try_host_reset(scmd);
2034 break;
2035 default:
2036 rtn = FAILED;
2037 }
2038
2039 spin_lock_irqsave(shost->host_lock, flags);
2040 shost->tmf_in_progress = 0;
2041 spin_unlock_irqrestore(shost->host_lock, flags);
2042
2043
2044
2045
2046
2047 SCSI_LOG_ERROR_RECOVERY(3,
2048 printk("%s: waking up host to restart after TMF\n",
2049 __func__));
2050
2051 wake_up(&shost->host_wait);
2052
2053 scsi_run_host_queues(shost);
2054
2055 scsi_next_command(scmd);
2056 scsi_autopm_put_host(shost);
2057 return rtn;
2058}
2059EXPORT_SYMBOL(scsi_reset_provider);
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080int scsi_normalize_sense(const u8 *sense_buffer, int sb_len,
2081 struct scsi_sense_hdr *sshdr)
2082{
2083 if (!sense_buffer || !sb_len)
2084 return 0;
2085
2086 memset(sshdr, 0, sizeof(struct scsi_sense_hdr));
2087
2088 sshdr->response_code = (sense_buffer[0] & 0x7f);
2089
2090 if (!scsi_sense_valid(sshdr))
2091 return 0;
2092
2093 if (sshdr->response_code >= 0x72) {
2094
2095
2096
2097 if (sb_len > 1)
2098 sshdr->sense_key = (sense_buffer[1] & 0xf);
2099 if (sb_len > 2)
2100 sshdr->asc = sense_buffer[2];
2101 if (sb_len > 3)
2102 sshdr->ascq = sense_buffer[3];
2103 if (sb_len > 7)
2104 sshdr->additional_length = sense_buffer[7];
2105 } else {
2106
2107
2108
2109 if (sb_len > 2)
2110 sshdr->sense_key = (sense_buffer[2] & 0xf);
2111 if (sb_len > 7) {
2112 sb_len = (sb_len < (sense_buffer[7] + 8)) ?
2113 sb_len : (sense_buffer[7] + 8);
2114 if (sb_len > 12)
2115 sshdr->asc = sense_buffer[12];
2116 if (sb_len > 13)
2117 sshdr->ascq = sense_buffer[13];
2118 }
2119 }
2120
2121 return 1;
2122}
2123EXPORT_SYMBOL(scsi_normalize_sense);
2124
2125int scsi_command_normalize_sense(struct scsi_cmnd *cmd,
2126 struct scsi_sense_hdr *sshdr)
2127{
2128 return scsi_normalize_sense(cmd->sense_buffer,
2129 SCSI_SENSE_BUFFERSIZE, sshdr);
2130}
2131EXPORT_SYMBOL(scsi_command_normalize_sense);
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146const u8 * scsi_sense_desc_find(const u8 * sense_buffer, int sb_len,
2147 int desc_type)
2148{
2149 int add_sen_len, add_len, desc_len, k;
2150 const u8 * descp;
2151
2152 if ((sb_len < 8) || (0 == (add_sen_len = sense_buffer[7])))
2153 return NULL;
2154 if ((sense_buffer[0] < 0x72) || (sense_buffer[0] > 0x73))
2155 return NULL;
2156 add_sen_len = (add_sen_len < (sb_len - 8)) ?
2157 add_sen_len : (sb_len - 8);
2158 descp = &sense_buffer[8];
2159 for (desc_len = 0, k = 0; k < add_sen_len; k += desc_len) {
2160 descp += desc_len;
2161 add_len = (k < (add_sen_len - 1)) ? descp[1]: -1;
2162 desc_len = add_len + 2;
2163 if (descp[0] == desc_type)
2164 return descp;
2165 if (add_len < 0)
2166 break;
2167 }
2168 return NULL;
2169}
2170EXPORT_SYMBOL(scsi_sense_desc_find);
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len,
2183 u64 * info_out)
2184{
2185 int j;
2186 const u8 * ucp;
2187 u64 ull;
2188
2189 if (sb_len < 7)
2190 return 0;
2191 switch (sense_buffer[0] & 0x7f) {
2192 case 0x70:
2193 case 0x71:
2194 if (sense_buffer[0] & 0x80) {
2195 *info_out = (sense_buffer[3] << 24) +
2196 (sense_buffer[4] << 16) +
2197 (sense_buffer[5] << 8) + sense_buffer[6];
2198 return 1;
2199 } else
2200 return 0;
2201 case 0x72:
2202 case 0x73:
2203 ucp = scsi_sense_desc_find(sense_buffer, sb_len,
2204 0 );
2205 if (ucp && (0xa == ucp[1])) {
2206 ull = 0;
2207 for (j = 0; j < 8; ++j) {
2208 if (j > 0)
2209 ull <<= 8;
2210 ull |= ucp[4 + j];
2211 }
2212 *info_out = ull;
2213 return 1;
2214 } else
2215 return 0;
2216 default:
2217 return 0;
2218 }
2219}
2220EXPORT_SYMBOL(scsi_get_sense_info_fld);
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq)
2233{
2234 if (desc) {
2235 buf[0] = 0x72;
2236 buf[1] = key;
2237 buf[2] = asc;
2238 buf[3] = ascq;
2239 buf[7] = 0;
2240 } else {
2241 buf[0] = 0x70;
2242 buf[2] = key;
2243 buf[7] = 0xa;
2244 buf[12] = asc;
2245 buf[13] = ascq;
2246 }
2247}
2248EXPORT_SYMBOL(scsi_build_sense_buffer);
2249