1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/module.h>
18#include <linux/sched.h>
19#include <linux/gfp.h>
20#include <linux/timer.h>
21#include <linux/string.h>
22#include <linux/kernel.h>
23#include <linux/freezer.h>
24#include <linux/kthread.h>
25#include <linux/interrupt.h>
26#include <linux/blkdev.h>
27#include <linux/delay.h>
28#include <linux/jiffies.h>
29
30#include <scsi/scsi.h>
31#include <scsi/scsi_cmnd.h>
32#include <scsi/scsi_dbg.h>
33#include <scsi/scsi_device.h>
34#include <scsi/scsi_driver.h>
35#include <scsi/scsi_eh.h>
36#include <scsi/scsi_transport.h>
37#include <scsi/scsi_host.h>
38#include <scsi/scsi_ioctl.h>
39#include <scsi/sg.h>
40
41#include "scsi_priv.h"
42#include "scsi_logging.h"
43#include "scsi_transport_api.h"
44
45#include <trace/events/scsi.h>
46
47static void scsi_eh_done(struct scsi_cmnd *scmd);
48
49
50
51
52
53#define BUS_RESET_SETTLE_TIME (10)
54#define HOST_RESET_SETTLE_TIME (10)
55
56static int scsi_eh_try_stu(struct scsi_cmnd *scmd);
57static int scsi_try_to_abort_cmd(struct scsi_host_template *,
58 struct scsi_cmnd *);
59
60
61void scsi_eh_wakeup(struct Scsi_Host *shost)
62{
63 if (atomic_read(&shost->host_busy) == shost->host_failed) {
64 trace_scsi_eh_wakeup(shost);
65 wake_up_process(shost->ehandler);
66 SCSI_LOG_ERROR_RECOVERY(5, shost_printk(KERN_INFO, shost,
67 "Waking error handler thread\n"));
68 }
69}
70
71
72
73
74
75
76
77void scsi_schedule_eh(struct Scsi_Host *shost)
78{
79 unsigned long flags;
80
81 spin_lock_irqsave(shost->host_lock, flags);
82
83 if (scsi_host_set_state(shost, SHOST_RECOVERY) == 0 ||
84 scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY) == 0) {
85 shost->host_eh_scheduled++;
86 scsi_eh_wakeup(shost);
87 }
88
89 spin_unlock_irqrestore(shost->host_lock, flags);
90}
91EXPORT_SYMBOL_GPL(scsi_schedule_eh);
92
93static int scsi_host_eh_past_deadline(struct Scsi_Host *shost)
94{
95 if (!shost->last_reset || shost->eh_deadline == -1)
96 return 0;
97
98
99
100
101
102
103
104
105 if (time_before(jiffies, shost->last_reset + shost->eh_deadline) &&
106 shost->eh_deadline > -1)
107 return 0;
108
109 return 1;
110}
111
112
113
114
115
116void
117scmd_eh_abort_handler(struct work_struct *work)
118{
119 struct scsi_cmnd *scmd =
120 container_of(work, struct scsi_cmnd, abort_work.work);
121 struct scsi_device *sdev = scmd->device;
122 int rtn;
123
124 if (scsi_host_eh_past_deadline(sdev->host)) {
125 SCSI_LOG_ERROR_RECOVERY(3,
126 scmd_printk(KERN_INFO, scmd,
127 "scmd %p eh timeout, not aborting\n",
128 scmd));
129 } else {
130 SCSI_LOG_ERROR_RECOVERY(3,
131 scmd_printk(KERN_INFO, scmd,
132 "aborting command %p\n", scmd));
133 rtn = scsi_try_to_abort_cmd(sdev->host->hostt, scmd);
134 if (rtn == SUCCESS) {
135 set_host_byte(scmd, DID_TIME_OUT);
136 if (scsi_host_eh_past_deadline(sdev->host)) {
137 SCSI_LOG_ERROR_RECOVERY(3,
138 scmd_printk(KERN_INFO, scmd,
139 "scmd %p eh timeout, "
140 "not retrying aborted "
141 "command\n", scmd));
142 } else if (!scsi_noretry_cmd(scmd) &&
143 (++scmd->retries <= scmd->allowed)) {
144 SCSI_LOG_ERROR_RECOVERY(3,
145 scmd_printk(KERN_WARNING, scmd,
146 "scmd %p retry "
147 "aborted command\n", scmd));
148 scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY);
149 return;
150 } else {
151 SCSI_LOG_ERROR_RECOVERY(3,
152 scmd_printk(KERN_WARNING, scmd,
153 "scmd %p finish "
154 "aborted command\n", scmd));
155 scsi_finish_command(scmd);
156 return;
157 }
158 } else {
159 SCSI_LOG_ERROR_RECOVERY(3,
160 scmd_printk(KERN_INFO, scmd,
161 "scmd %p abort %s\n", scmd,
162 (rtn == FAST_IO_FAIL) ?
163 "not send" : "failed"));
164 }
165 }
166
167 if (!scsi_eh_scmd_add(scmd, 0)) {
168 SCSI_LOG_ERROR_RECOVERY(3,
169 scmd_printk(KERN_WARNING, scmd,
170 "scmd %p terminate "
171 "aborted command\n", scmd));
172 set_host_byte(scmd, DID_TIME_OUT);
173 scsi_finish_command(scmd);
174 }
175}
176
177
178
179
180
181
182
183static int
184scsi_abort_command(struct scsi_cmnd *scmd)
185{
186 struct scsi_device *sdev = scmd->device;
187 struct Scsi_Host *shost = sdev->host;
188 unsigned long flags;
189
190 if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) {
191
192
193
194 scmd->eh_eflags &= ~SCSI_EH_ABORT_SCHEDULED;
195 SCSI_LOG_ERROR_RECOVERY(3,
196 scmd_printk(KERN_INFO, scmd,
197 "scmd %p previous abort failed\n", scmd));
198 BUG_ON(delayed_work_pending(&scmd->abort_work));
199 return FAILED;
200 }
201
202
203
204
205
206 spin_lock_irqsave(shost->host_lock, flags);
207 if (scsi_host_in_recovery(shost)) {
208 spin_unlock_irqrestore(shost->host_lock, flags);
209 SCSI_LOG_ERROR_RECOVERY(3,
210 scmd_printk(KERN_INFO, scmd,
211 "scmd %p not aborting, host in recovery\n",
212 scmd));
213 return FAILED;
214 }
215
216 if (shost->eh_deadline != -1 && !shost->last_reset)
217 shost->last_reset = jiffies;
218 spin_unlock_irqrestore(shost->host_lock, flags);
219
220 scmd->eh_eflags |= SCSI_EH_ABORT_SCHEDULED;
221 SCSI_LOG_ERROR_RECOVERY(3,
222 scmd_printk(KERN_INFO, scmd,
223 "scmd %p abort scheduled\n", scmd));
224 queue_delayed_work(shost->tmf_work_q, &scmd->abort_work, HZ / 100);
225 return SUCCESS;
226}
227
228
229
230
231
232
233
234
235
236int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag)
237{
238 struct Scsi_Host *shost = scmd->device->host;
239 unsigned long flags;
240 int ret = 0;
241
242 if (!shost->ehandler)
243 return 0;
244
245 spin_lock_irqsave(shost->host_lock, flags);
246 if (scsi_host_set_state(shost, SHOST_RECOVERY))
247 if (scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY))
248 goto out_unlock;
249
250 if (shost->eh_deadline != -1 && !shost->last_reset)
251 shost->last_reset = jiffies;
252
253 ret = 1;
254 if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED)
255 eh_flag &= ~SCSI_EH_CANCEL_CMD;
256 scmd->eh_eflags |= eh_flag;
257 list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q);
258 shost->host_failed++;
259 scsi_eh_wakeup(shost);
260 out_unlock:
261 spin_unlock_irqrestore(shost->host_lock, flags);
262 return ret;
263}
264
265
266
267
268
269
270
271
272
273
274
275enum blk_eh_timer_return scsi_times_out(struct request *req)
276{
277 struct scsi_cmnd *scmd = req->special;
278 enum blk_eh_timer_return rtn = BLK_EH_NOT_HANDLED;
279 struct Scsi_Host *host = scmd->device->host;
280
281 trace_scsi_dispatch_cmd_timeout(scmd);
282 scsi_log_completion(scmd, TIMEOUT_ERROR);
283
284 if (host->eh_deadline != -1 && !host->last_reset)
285 host->last_reset = jiffies;
286
287 if (host->transportt->eh_timed_out)
288 rtn = host->transportt->eh_timed_out(scmd);
289 else if (host->hostt->eh_timed_out)
290 rtn = host->hostt->eh_timed_out(scmd);
291
292 if (rtn == BLK_EH_NOT_HANDLED) {
293 if (!host->hostt->no_async_abort &&
294 scsi_abort_command(scmd) == SUCCESS)
295 return BLK_EH_NOT_HANDLED;
296
297 set_host_byte(scmd, DID_TIME_OUT);
298 if (!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))
299 rtn = BLK_EH_HANDLED;
300 }
301
302 return rtn;
303}
304
305
306
307
308
309
310
311
312
313
314
315
316int scsi_block_when_processing_errors(struct scsi_device *sdev)
317{
318 int online;
319
320 wait_event(sdev->host->host_wait, !scsi_host_in_recovery(sdev->host));
321
322 online = scsi_device_online(sdev);
323
324 SCSI_LOG_ERROR_RECOVERY(5, sdev_printk(KERN_INFO, sdev,
325 "%s: rtn: %d\n", __func__, online));
326
327 return online;
328}
329EXPORT_SYMBOL(scsi_block_when_processing_errors);
330
331#ifdef CONFIG_SCSI_LOGGING
332
333
334
335
336
337static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost,
338 struct list_head *work_q)
339{
340 struct scsi_cmnd *scmd;
341 struct scsi_device *sdev;
342 int total_failures = 0;
343 int cmd_failed = 0;
344 int cmd_cancel = 0;
345 int devices_failed = 0;
346
347 shost_for_each_device(sdev, shost) {
348 list_for_each_entry(scmd, work_q, eh_entry) {
349 if (scmd->device == sdev) {
350 ++total_failures;
351 if (scmd->eh_eflags & SCSI_EH_CANCEL_CMD)
352 ++cmd_cancel;
353 else
354 ++cmd_failed;
355 }
356 }
357
358 if (cmd_cancel || cmd_failed) {
359 SCSI_LOG_ERROR_RECOVERY(3,
360 shost_printk(KERN_INFO, shost,
361 "%s: cmds failed: %d, cancel: %d\n",
362 __func__, cmd_failed,
363 cmd_cancel));
364 cmd_cancel = 0;
365 cmd_failed = 0;
366 ++devices_failed;
367 }
368 }
369
370 SCSI_LOG_ERROR_RECOVERY(2, shost_printk(KERN_INFO, shost,
371 "Total of %d commands on %d"
372 " devices require eh work\n",
373 total_failures, devices_failed));
374}
375#endif
376
377
378
379
380
381
382static void scsi_report_lun_change(struct scsi_device *sdev)
383{
384 sdev->sdev_target->expecting_lun_change = 1;
385}
386
387
388
389
390
391
392
393static void scsi_report_sense(struct scsi_device *sdev,
394 struct scsi_sense_hdr *sshdr)
395{
396 enum scsi_device_event evt_type = SDEV_EVT_MAXBITS;
397
398 if (sshdr->sense_key == UNIT_ATTENTION) {
399 if (sshdr->asc == 0x3f && sshdr->ascq == 0x03) {
400 evt_type = SDEV_EVT_INQUIRY_CHANGE_REPORTED;
401 sdev_printk(KERN_WARNING, sdev,
402 "Inquiry data has changed");
403 } else if (sshdr->asc == 0x3f && sshdr->ascq == 0x0e) {
404 evt_type = SDEV_EVT_LUN_CHANGE_REPORTED;
405 scsi_report_lun_change(sdev);
406 sdev_printk(KERN_WARNING, sdev,
407 "Warning! Received an indication that the "
408 "LUN assignments on this target have "
409 "changed. The Linux SCSI layer does not "
410 "automatically remap LUN assignments.\n");
411 } else if (sshdr->asc == 0x3f)
412 sdev_printk(KERN_WARNING, sdev,
413 "Warning! Received an indication that the "
414 "operating parameters on this target have "
415 "changed. The Linux SCSI layer does not "
416 "automatically adjust these parameters.\n");
417
418 if (sshdr->asc == 0x38 && sshdr->ascq == 0x07) {
419 evt_type = SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED;
420 sdev_printk(KERN_WARNING, sdev,
421 "Warning! Received an indication that the "
422 "LUN reached a thin provisioning soft "
423 "threshold.\n");
424 }
425
426 if (sshdr->asc == 0x2a && sshdr->ascq == 0x01) {
427 evt_type = SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED;
428 sdev_printk(KERN_WARNING, sdev,
429 "Mode parameters changed");
430 } else if (sshdr->asc == 0x2a && sshdr->ascq == 0x09) {
431 evt_type = SDEV_EVT_CAPACITY_CHANGE_REPORTED;
432 sdev_printk(KERN_WARNING, sdev,
433 "Capacity data has changed");
434 } else if (sshdr->asc == 0x2a)
435 sdev_printk(KERN_WARNING, sdev,
436 "Parameters changed");
437 }
438
439 if (evt_type != SDEV_EVT_MAXBITS) {
440 set_bit(evt_type, sdev->pending_events);
441 schedule_work(&sdev->event_work);
442 }
443}
444
445
446
447
448
449
450
451
452
453
454
455
456static int scsi_check_sense(struct scsi_cmnd *scmd)
457{
458 struct scsi_device *sdev = scmd->device;
459 struct scsi_sense_hdr sshdr;
460
461 if (! scsi_command_normalize_sense(scmd, &sshdr))
462 return FAILED;
463
464 scsi_report_sense(sdev, &sshdr);
465
466 if (scsi_sense_is_deferred(&sshdr))
467 return NEEDS_RETRY;
468
469 if (sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh &&
470 sdev->scsi_dh_data->scsi_dh->check_sense) {
471 int rc;
472
473 rc = sdev->scsi_dh_data->scsi_dh->check_sense(sdev, &sshdr);
474 if (rc != SCSI_RETURN_NOT_HANDLED)
475 return rc;
476
477 }
478
479 if (scmd->cmnd[0] == TEST_UNIT_READY && scmd->scsi_done != scsi_eh_done)
480
481
482
483
484
485 return SUCCESS;
486
487
488
489
490
491 if (sshdr.response_code == 0x70) {
492
493 if (scmd->sense_buffer[2] & 0xe0)
494 return SUCCESS;
495 } else {
496
497
498
499
500
501 if ((sshdr.additional_length > 3) &&
502 (scmd->sense_buffer[8] == 0x4) &&
503 (scmd->sense_buffer[11] & 0xe0))
504 return SUCCESS;
505 }
506
507 switch (sshdr.sense_key) {
508 case NO_SENSE:
509 return SUCCESS;
510 case RECOVERED_ERROR:
511 return SUCCESS;
512
513 case ABORTED_COMMAND:
514 if (sshdr.asc == 0x10)
515 return SUCCESS;
516
517 return NEEDS_RETRY;
518 case NOT_READY:
519 case UNIT_ATTENTION:
520
521
522
523
524
525
526 if (scmd->device->expecting_cc_ua) {
527
528
529
530
531
532
533 if (sshdr.asc != 0x28 || sshdr.ascq != 0x00) {
534 scmd->device->expecting_cc_ua = 0;
535 return NEEDS_RETRY;
536 }
537 }
538
539
540
541
542
543 if (scmd->device->sdev_target->expecting_lun_change &&
544 sshdr.asc == 0x3f && sshdr.ascq == 0x0e)
545 return NEEDS_RETRY;
546
547
548
549
550 if ((sshdr.asc == 0x04) && (sshdr.ascq == 0x01))
551 return NEEDS_RETRY;
552
553
554
555
556 if (scmd->device->allow_restart &&
557 (sshdr.asc == 0x04) && (sshdr.ascq == 0x02))
558 return FAILED;
559
560
561
562
563 return SUCCESS;
564
565
566 case DATA_PROTECT:
567 if (sshdr.asc == 0x27 && sshdr.ascq == 0x07) {
568
569 set_host_byte(scmd, DID_ALLOC_FAILURE);
570 return SUCCESS;
571 }
572 case COPY_ABORTED:
573 case VOLUME_OVERFLOW:
574 case MISCOMPARE:
575 case BLANK_CHECK:
576 set_host_byte(scmd, DID_TARGET_FAILURE);
577 return SUCCESS;
578
579 case MEDIUM_ERROR:
580 if (sshdr.asc == 0x11 ||
581 sshdr.asc == 0x13 ||
582 sshdr.asc == 0x14) {
583 set_host_byte(scmd, DID_MEDIUM_ERROR);
584 return SUCCESS;
585 }
586 return NEEDS_RETRY;
587
588 case HARDWARE_ERROR:
589 if (scmd->device->retry_hwerror)
590 return ADD_TO_MLQUEUE;
591 else
592 set_host_byte(scmd, DID_TARGET_FAILURE);
593
594 case ILLEGAL_REQUEST:
595 if (sshdr.asc == 0x20 ||
596 sshdr.asc == 0x21 ||
597 sshdr.asc == 0x24 ||
598 sshdr.asc == 0x26) {
599 set_host_byte(scmd, DID_TARGET_FAILURE);
600 }
601 return SUCCESS;
602
603 default:
604 return SUCCESS;
605 }
606}
607
608static void scsi_handle_queue_ramp_up(struct scsi_device *sdev)
609{
610 struct scsi_host_template *sht = sdev->host->hostt;
611 struct scsi_device *tmp_sdev;
612
613 if (!sht->track_queue_depth ||
614 sdev->queue_depth >= sdev->max_queue_depth)
615 return;
616
617 if (time_before(jiffies,
618 sdev->last_queue_ramp_up + sdev->queue_ramp_up_period))
619 return;
620
621 if (time_before(jiffies,
622 sdev->last_queue_full_time + sdev->queue_ramp_up_period))
623 return;
624
625
626
627
628
629 shost_for_each_device(tmp_sdev, sdev->host) {
630 if (tmp_sdev->channel != sdev->channel ||
631 tmp_sdev->id != sdev->id ||
632 tmp_sdev->queue_depth == sdev->max_queue_depth)
633 continue;
634
635 scsi_change_queue_depth(tmp_sdev, tmp_sdev->queue_depth + 1);
636 sdev->last_queue_ramp_up = jiffies;
637 }
638}
639
640static void scsi_handle_queue_full(struct scsi_device *sdev)
641{
642 struct scsi_host_template *sht = sdev->host->hostt;
643 struct scsi_device *tmp_sdev;
644
645 if (!sht->track_queue_depth)
646 return;
647
648 shost_for_each_device(tmp_sdev, sdev->host) {
649 if (tmp_sdev->channel != sdev->channel ||
650 tmp_sdev->id != sdev->id)
651 continue;
652
653
654
655
656
657 scsi_track_queue_full(tmp_sdev, tmp_sdev->queue_depth - 1);
658 }
659}
660
661
662
663
664
665
666
667
668
669
670
671static int scsi_eh_completed_normally(struct scsi_cmnd *scmd)
672{
673
674
675
676
677 if (host_byte(scmd->result) == DID_RESET) {
678
679
680
681
682
683
684 return scsi_check_sense(scmd);
685 }
686 if (host_byte(scmd->result) != DID_OK)
687 return FAILED;
688
689
690
691
692 if (msg_byte(scmd->result) != COMMAND_COMPLETE)
693 return FAILED;
694
695
696
697
698
699 switch (status_byte(scmd->result)) {
700 case GOOD:
701 scsi_handle_queue_ramp_up(scmd->device);
702 case COMMAND_TERMINATED:
703 return SUCCESS;
704 case CHECK_CONDITION:
705 return scsi_check_sense(scmd);
706 case CONDITION_GOOD:
707 case INTERMEDIATE_GOOD:
708 case INTERMEDIATE_C_GOOD:
709
710
711
712 return SUCCESS;
713 case RESERVATION_CONFLICT:
714 if (scmd->cmnd[0] == TEST_UNIT_READY)
715
716
717 return SUCCESS;
718
719 return FAILED;
720 case QUEUE_FULL:
721 scsi_handle_queue_full(scmd->device);
722
723 case BUSY:
724 return NEEDS_RETRY;
725 default:
726 return FAILED;
727 }
728 return FAILED;
729}
730
731
732
733
734
735static void scsi_eh_done(struct scsi_cmnd *scmd)
736{
737 struct completion *eh_action;
738
739 SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
740 "%s scmd: %p result: %x\n",
741 __func__, scmd, scmd->result));
742
743 eh_action = scmd->device->host->eh_action;
744 if (eh_action)
745 complete(eh_action);
746}
747
748
749
750
751
752static int scsi_try_host_reset(struct scsi_cmnd *scmd)
753{
754 unsigned long flags;
755 int rtn;
756 struct Scsi_Host *host = scmd->device->host;
757 struct scsi_host_template *hostt = host->hostt;
758
759 SCSI_LOG_ERROR_RECOVERY(3,
760 shost_printk(KERN_INFO, host, "Snd Host RST\n"));
761
762 if (!hostt->eh_host_reset_handler)
763 return FAILED;
764
765 rtn = hostt->eh_host_reset_handler(scmd);
766
767 if (rtn == SUCCESS) {
768 if (!hostt->skip_settle_delay)
769 ssleep(HOST_RESET_SETTLE_TIME);
770 spin_lock_irqsave(host->host_lock, flags);
771 scsi_report_bus_reset(host, scmd_channel(scmd));
772 spin_unlock_irqrestore(host->host_lock, flags);
773 }
774
775 return rtn;
776}
777
778
779
780
781
782static int scsi_try_bus_reset(struct scsi_cmnd *scmd)
783{
784 unsigned long flags;
785 int rtn;
786 struct Scsi_Host *host = scmd->device->host;
787 struct scsi_host_template *hostt = host->hostt;
788
789 SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
790 "%s: Snd Bus RST\n", __func__));
791
792 if (!hostt->eh_bus_reset_handler)
793 return FAILED;
794
795 rtn = hostt->eh_bus_reset_handler(scmd);
796
797 if (rtn == SUCCESS) {
798 if (!hostt->skip_settle_delay)
799 ssleep(BUS_RESET_SETTLE_TIME);
800 spin_lock_irqsave(host->host_lock, flags);
801 scsi_report_bus_reset(host, scmd_channel(scmd));
802 spin_unlock_irqrestore(host->host_lock, flags);
803 }
804
805 return rtn;
806}
807
808static void __scsi_report_device_reset(struct scsi_device *sdev, void *data)
809{
810 sdev->was_reset = 1;
811 sdev->expecting_cc_ua = 1;
812}
813
814
815
816
817
818
819
820
821
822
823
824static int scsi_try_target_reset(struct scsi_cmnd *scmd)
825{
826 unsigned long flags;
827 int rtn;
828 struct Scsi_Host *host = scmd->device->host;
829 struct scsi_host_template *hostt = host->hostt;
830
831 if (!hostt->eh_target_reset_handler)
832 return FAILED;
833
834 rtn = hostt->eh_target_reset_handler(scmd);
835 if (rtn == SUCCESS) {
836 spin_lock_irqsave(host->host_lock, flags);
837 __starget_for_each_device(scsi_target(scmd->device), NULL,
838 __scsi_report_device_reset);
839 spin_unlock_irqrestore(host->host_lock, flags);
840 }
841
842 return rtn;
843}
844
845
846
847
848
849
850
851
852
853
854
855static int scsi_try_bus_device_reset(struct scsi_cmnd *scmd)
856{
857 int rtn;
858 struct scsi_host_template *hostt = scmd->device->host->hostt;
859
860 if (!hostt->eh_device_reset_handler)
861 return FAILED;
862
863 rtn = hostt->eh_device_reset_handler(scmd);
864 if (rtn == SUCCESS)
865 __scsi_report_device_reset(scmd->device, NULL);
866 return rtn;
867}
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885static int scsi_try_to_abort_cmd(struct scsi_host_template *hostt,
886 struct scsi_cmnd *scmd)
887{
888 if (!hostt->eh_abort_handler)
889 return FAILED;
890
891 return hostt->eh_abort_handler(scmd);
892}
893
894static void scsi_abort_eh_cmnd(struct scsi_cmnd *scmd)
895{
896 if (scsi_try_to_abort_cmd(scmd->device->host->hostt, scmd) != SUCCESS)
897 if (scsi_try_bus_device_reset(scmd) != SUCCESS)
898 if (scsi_try_target_reset(scmd) != SUCCESS)
899 if (scsi_try_bus_reset(scmd) != SUCCESS)
900 scsi_try_host_reset(scmd);
901}
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
918 unsigned char *cmnd, int cmnd_size, unsigned sense_bytes)
919{
920 struct scsi_device *sdev = scmd->device;
921
922
923
924
925
926
927
928
929 ses->cmd_len = scmd->cmd_len;
930 ses->cmnd = scmd->cmnd;
931 ses->data_direction = scmd->sc_data_direction;
932 ses->sdb = scmd->sdb;
933 ses->next_rq = scmd->request->next_rq;
934 ses->result = scmd->result;
935 ses->underflow = scmd->underflow;
936 ses->prot_op = scmd->prot_op;
937
938 scmd->prot_op = SCSI_PROT_NORMAL;
939 scmd->eh_eflags = 0;
940 scmd->cmnd = ses->eh_cmnd;
941 memset(scmd->cmnd, 0, BLK_MAX_CDB);
942 memset(&scmd->sdb, 0, sizeof(scmd->sdb));
943 scmd->request->next_rq = NULL;
944 scmd->result = 0;
945
946 if (sense_bytes) {
947 scmd->sdb.length = min_t(unsigned, SCSI_SENSE_BUFFERSIZE,
948 sense_bytes);
949 sg_init_one(&ses->sense_sgl, scmd->sense_buffer,
950 scmd->sdb.length);
951 scmd->sdb.table.sgl = &ses->sense_sgl;
952 scmd->sc_data_direction = DMA_FROM_DEVICE;
953 scmd->sdb.table.nents = 1;
954 scmd->cmnd[0] = REQUEST_SENSE;
955 scmd->cmnd[4] = scmd->sdb.length;
956 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
957 } else {
958 scmd->sc_data_direction = DMA_NONE;
959 if (cmnd) {
960 BUG_ON(cmnd_size > BLK_MAX_CDB);
961 memcpy(scmd->cmnd, cmnd, cmnd_size);
962 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
963 }
964 }
965
966 scmd->underflow = 0;
967
968 if (sdev->scsi_level <= SCSI_2 && sdev->scsi_level != SCSI_UNKNOWN)
969 scmd->cmnd[1] = (scmd->cmnd[1] & 0x1f) |
970 (sdev->lun << 5 & 0xe0);
971
972
973
974
975
976 memset(scmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
977}
978EXPORT_SYMBOL(scsi_eh_prep_cmnd);
979
980
981
982
983
984
985
986
987void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses)
988{
989
990
991
992 scmd->cmd_len = ses->cmd_len;
993 scmd->cmnd = ses->cmnd;
994 scmd->sc_data_direction = ses->data_direction;
995 scmd->sdb = ses->sdb;
996 scmd->request->next_rq = ses->next_rq;
997 scmd->result = ses->result;
998 scmd->underflow = ses->underflow;
999 scmd->prot_op = ses->prot_op;
1000}
1001EXPORT_SYMBOL(scsi_eh_restore_cmnd);
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
1018 int cmnd_size, int timeout, unsigned sense_bytes)
1019{
1020 struct scsi_device *sdev = scmd->device;
1021 struct Scsi_Host *shost = sdev->host;
1022 DECLARE_COMPLETION_ONSTACK(done);
1023 unsigned long timeleft = timeout;
1024 struct scsi_eh_save ses;
1025 const unsigned long stall_for = msecs_to_jiffies(100);
1026 int rtn;
1027
1028retry:
1029 scsi_eh_prep_cmnd(scmd, &ses, cmnd, cmnd_size, sense_bytes);
1030 shost->eh_action = &done;
1031
1032 scsi_log_send(scmd);
1033 scmd->scsi_done = scsi_eh_done;
1034 rtn = shost->hostt->queuecommand(shost, scmd);
1035 if (rtn) {
1036 if (timeleft > stall_for) {
1037 scsi_eh_restore_cmnd(scmd, &ses);
1038 timeleft -= stall_for;
1039 msleep(jiffies_to_msecs(stall_for));
1040 goto retry;
1041 }
1042
1043 timeleft = 0;
1044 rtn = FAILED;
1045 } else {
1046 timeleft = wait_for_completion_timeout(&done, timeout);
1047 rtn = SUCCESS;
1048 }
1049
1050 shost->eh_action = NULL;
1051
1052 scsi_log_completion(scmd, rtn);
1053
1054 SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
1055 "%s: scmd: %p, timeleft: %ld\n",
1056 __func__, scmd, timeleft));
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067 if (timeleft) {
1068 rtn = scsi_eh_completed_normally(scmd);
1069 SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
1070 "%s: scsi_eh_completed_normally %x\n", __func__, rtn));
1071
1072 switch (rtn) {
1073 case SUCCESS:
1074 case NEEDS_RETRY:
1075 case FAILED:
1076 break;
1077 case ADD_TO_MLQUEUE:
1078 rtn = NEEDS_RETRY;
1079 break;
1080 default:
1081 rtn = FAILED;
1082 break;
1083 }
1084 } else if (rtn != FAILED) {
1085 scsi_abort_eh_cmnd(scmd);
1086 rtn = FAILED;
1087 }
1088
1089 scsi_eh_restore_cmnd(scmd, &ses);
1090
1091 return rtn;
1092}
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103static int scsi_request_sense(struct scsi_cmnd *scmd)
1104{
1105 return scsi_send_eh_cmnd(scmd, NULL, 0, scmd->device->eh_timeout, ~0);
1106}
1107
1108static int scsi_eh_action(struct scsi_cmnd *scmd, int rtn)
1109{
1110 if (scmd->request->cmd_type != REQ_TYPE_BLOCK_PC) {
1111 struct scsi_driver *sdrv = scsi_cmd_to_driver(scmd);
1112 if (sdrv->eh_action)
1113 rtn = sdrv->eh_action(scmd, rtn);
1114 }
1115 return rtn;
1116}
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q)
1131{
1132 scmd->device->host->host_failed--;
1133 scmd->eh_eflags = 0;
1134 list_move_tail(&scmd->eh_entry, done_q);
1135}
1136EXPORT_SYMBOL(scsi_eh_finish_cmd);
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158int scsi_eh_get_sense(struct list_head *work_q,
1159 struct list_head *done_q)
1160{
1161 struct scsi_cmnd *scmd, *next;
1162 struct Scsi_Host *shost;
1163 int rtn;
1164
1165 list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1166 if ((scmd->eh_eflags & SCSI_EH_CANCEL_CMD) ||
1167 SCSI_SENSE_VALID(scmd))
1168 continue;
1169
1170 shost = scmd->device->host;
1171 if (scsi_host_eh_past_deadline(shost)) {
1172 SCSI_LOG_ERROR_RECOVERY(3,
1173 scmd_printk(KERN_INFO, scmd,
1174 "%s: skip request sense, past eh deadline\n",
1175 current->comm));
1176 break;
1177 }
1178 if (status_byte(scmd->result) != CHECK_CONDITION)
1179
1180
1181
1182
1183
1184
1185 continue;
1186
1187 SCSI_LOG_ERROR_RECOVERY(2, scmd_printk(KERN_INFO, scmd,
1188 "%s: requesting sense\n",
1189 current->comm));
1190 rtn = scsi_request_sense(scmd);
1191 if (rtn != SUCCESS)
1192 continue;
1193
1194 SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
1195 "sense requested for %p result %x\n",
1196 scmd, scmd->result));
1197 SCSI_LOG_ERROR_RECOVERY(3, scsi_print_sense(scmd));
1198
1199 rtn = scsi_decide_disposition(scmd);
1200
1201
1202
1203
1204
1205 if (rtn == SUCCESS)
1206
1207
1208
1209
1210 scmd->retries = scmd->allowed;
1211 else if (rtn != NEEDS_RETRY)
1212 continue;
1213
1214 scsi_eh_finish_cmd(scmd, done_q);
1215 }
1216
1217 return list_empty(work_q);
1218}
1219EXPORT_SYMBOL_GPL(scsi_eh_get_sense);
1220
1221
1222
1223
1224
1225
1226
1227
1228static int scsi_eh_tur(struct scsi_cmnd *scmd)
1229{
1230 static unsigned char tur_command[6] = {TEST_UNIT_READY, 0, 0, 0, 0, 0};
1231 int retry_cnt = 1, rtn;
1232
1233retry_tur:
1234 rtn = scsi_send_eh_cmnd(scmd, tur_command, 6,
1235 scmd->device->eh_timeout, 0);
1236
1237 SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
1238 "%s: scmd %p rtn %x\n", __func__, scmd, rtn));
1239
1240 switch (rtn) {
1241 case NEEDS_RETRY:
1242 if (retry_cnt--)
1243 goto retry_tur;
1244
1245 case SUCCESS:
1246 return 0;
1247 default:
1248 return 1;
1249 }
1250}
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265static int scsi_eh_test_devices(struct list_head *cmd_list,
1266 struct list_head *work_q,
1267 struct list_head *done_q, int try_stu)
1268{
1269 struct scsi_cmnd *scmd, *next;
1270 struct scsi_device *sdev;
1271 int finish_cmds;
1272
1273 while (!list_empty(cmd_list)) {
1274 scmd = list_entry(cmd_list->next, struct scsi_cmnd, eh_entry);
1275 sdev = scmd->device;
1276
1277 if (!try_stu) {
1278 if (scsi_host_eh_past_deadline(sdev->host)) {
1279
1280 list_splice_init(cmd_list, work_q);
1281 SCSI_LOG_ERROR_RECOVERY(3,
1282 sdev_printk(KERN_INFO, sdev,
1283 "%s: skip test device, past eh deadline",
1284 current->comm));
1285 break;
1286 }
1287 }
1288
1289 finish_cmds = !scsi_device_online(scmd->device) ||
1290 (try_stu && !scsi_eh_try_stu(scmd) &&
1291 !scsi_eh_tur(scmd)) ||
1292 !scsi_eh_tur(scmd);
1293
1294 list_for_each_entry_safe(scmd, next, cmd_list, eh_entry)
1295 if (scmd->device == sdev) {
1296 if (finish_cmds &&
1297 (try_stu ||
1298 scsi_eh_action(scmd, SUCCESS) == SUCCESS))
1299 scsi_eh_finish_cmd(scmd, done_q);
1300 else
1301 list_move_tail(&scmd->eh_entry, work_q);
1302 }
1303 }
1304 return list_empty(work_q);
1305}
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320static int scsi_eh_abort_cmds(struct list_head *work_q,
1321 struct list_head *done_q)
1322{
1323 struct scsi_cmnd *scmd, *next;
1324 LIST_HEAD(check_list);
1325 int rtn;
1326 struct Scsi_Host *shost;
1327
1328 list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1329 if (!(scmd->eh_eflags & SCSI_EH_CANCEL_CMD))
1330 continue;
1331 shost = scmd->device->host;
1332 if (scsi_host_eh_past_deadline(shost)) {
1333 list_splice_init(&check_list, work_q);
1334 SCSI_LOG_ERROR_RECOVERY(3,
1335 scmd_printk(KERN_INFO, scmd,
1336 "%s: skip aborting cmd, past eh deadline\n",
1337 current->comm));
1338 return list_empty(work_q);
1339 }
1340 SCSI_LOG_ERROR_RECOVERY(3,
1341 scmd_printk(KERN_INFO, scmd,
1342 "%s: aborting cmd\n", current->comm));
1343 rtn = scsi_try_to_abort_cmd(shost->hostt, scmd);
1344 if (rtn == FAILED) {
1345 SCSI_LOG_ERROR_RECOVERY(3,
1346 scmd_printk(KERN_INFO, scmd,
1347 "%s: aborting cmd failed\n",
1348 current->comm));
1349 list_splice_init(&check_list, work_q);
1350 return list_empty(work_q);
1351 }
1352 scmd->eh_eflags &= ~SCSI_EH_CANCEL_CMD;
1353 if (rtn == FAST_IO_FAIL)
1354 scsi_eh_finish_cmd(scmd, done_q);
1355 else
1356 list_move_tail(&scmd->eh_entry, &check_list);
1357 }
1358
1359 return scsi_eh_test_devices(&check_list, work_q, done_q, 0);
1360}
1361
1362
1363
1364
1365
1366
1367
1368
1369static int scsi_eh_try_stu(struct scsi_cmnd *scmd)
1370{
1371 static unsigned char stu_command[6] = {START_STOP, 0, 0, 0, 1, 0};
1372
1373 if (scmd->device->allow_restart) {
1374 int i, rtn = NEEDS_RETRY;
1375
1376 for (i = 0; rtn == NEEDS_RETRY && i < 2; i++)
1377 rtn = scsi_send_eh_cmnd(scmd, stu_command, 6, scmd->device->request_queue->rq_timeout, 0);
1378
1379 if (rtn == SUCCESS)
1380 return 0;
1381 }
1382
1383 return 1;
1384}
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396static int scsi_eh_stu(struct Scsi_Host *shost,
1397 struct list_head *work_q,
1398 struct list_head *done_q)
1399{
1400 struct scsi_cmnd *scmd, *stu_scmd, *next;
1401 struct scsi_device *sdev;
1402
1403 shost_for_each_device(sdev, shost) {
1404 if (scsi_host_eh_past_deadline(shost)) {
1405 SCSI_LOG_ERROR_RECOVERY(3,
1406 sdev_printk(KERN_INFO, sdev,
1407 "%s: skip START_UNIT, past eh deadline\n",
1408 current->comm));
1409 break;
1410 }
1411 stu_scmd = NULL;
1412 list_for_each_entry(scmd, work_q, eh_entry)
1413 if (scmd->device == sdev && SCSI_SENSE_VALID(scmd) &&
1414 scsi_check_sense(scmd) == FAILED ) {
1415 stu_scmd = scmd;
1416 break;
1417 }
1418
1419 if (!stu_scmd)
1420 continue;
1421
1422 SCSI_LOG_ERROR_RECOVERY(3,
1423 sdev_printk(KERN_INFO, sdev,
1424 "%s: Sending START_UNIT\n",
1425 current->comm));
1426
1427 if (!scsi_eh_try_stu(stu_scmd)) {
1428 if (!scsi_device_online(sdev) ||
1429 !scsi_eh_tur(stu_scmd)) {
1430 list_for_each_entry_safe(scmd, next,
1431 work_q, eh_entry) {
1432 if (scmd->device == sdev &&
1433 scsi_eh_action(scmd, SUCCESS) == SUCCESS)
1434 scsi_eh_finish_cmd(scmd, done_q);
1435 }
1436 }
1437 } else {
1438 SCSI_LOG_ERROR_RECOVERY(3,
1439 sdev_printk(KERN_INFO, sdev,
1440 "%s: START_UNIT failed\n",
1441 current->comm));
1442 }
1443 }
1444
1445 return list_empty(work_q);
1446}
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461static int scsi_eh_bus_device_reset(struct Scsi_Host *shost,
1462 struct list_head *work_q,
1463 struct list_head *done_q)
1464{
1465 struct scsi_cmnd *scmd, *bdr_scmd, *next;
1466 struct scsi_device *sdev;
1467 int rtn;
1468
1469 shost_for_each_device(sdev, shost) {
1470 if (scsi_host_eh_past_deadline(shost)) {
1471 SCSI_LOG_ERROR_RECOVERY(3,
1472 sdev_printk(KERN_INFO, sdev,
1473 "%s: skip BDR, past eh deadline\n",
1474 current->comm));
1475 break;
1476 }
1477 bdr_scmd = NULL;
1478 list_for_each_entry(scmd, work_q, eh_entry)
1479 if (scmd->device == sdev) {
1480 bdr_scmd = scmd;
1481 break;
1482 }
1483
1484 if (!bdr_scmd)
1485 continue;
1486
1487 SCSI_LOG_ERROR_RECOVERY(3,
1488 sdev_printk(KERN_INFO, sdev,
1489 "%s: Sending BDR\n", current->comm));
1490 rtn = scsi_try_bus_device_reset(bdr_scmd);
1491 if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
1492 if (!scsi_device_online(sdev) ||
1493 rtn == FAST_IO_FAIL ||
1494 !scsi_eh_tur(bdr_scmd)) {
1495 list_for_each_entry_safe(scmd, next,
1496 work_q, eh_entry) {
1497 if (scmd->device == sdev &&
1498 scsi_eh_action(scmd, rtn) != FAILED)
1499 scsi_eh_finish_cmd(scmd,
1500 done_q);
1501 }
1502 }
1503 } else {
1504 SCSI_LOG_ERROR_RECOVERY(3,
1505 sdev_printk(KERN_INFO, sdev,
1506 "%s: BDR failed\n", current->comm));
1507 }
1508 }
1509
1510 return list_empty(work_q);
1511}
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522static int scsi_eh_target_reset(struct Scsi_Host *shost,
1523 struct list_head *work_q,
1524 struct list_head *done_q)
1525{
1526 LIST_HEAD(tmp_list);
1527 LIST_HEAD(check_list);
1528
1529 list_splice_init(work_q, &tmp_list);
1530
1531 while (!list_empty(&tmp_list)) {
1532 struct scsi_cmnd *next, *scmd;
1533 int rtn;
1534 unsigned int id;
1535
1536 if (scsi_host_eh_past_deadline(shost)) {
1537
1538 list_splice_init(&check_list, work_q);
1539 list_splice_init(&tmp_list, work_q);
1540 SCSI_LOG_ERROR_RECOVERY(3,
1541 shost_printk(KERN_INFO, shost,
1542 "%s: Skip target reset, past eh deadline\n",
1543 current->comm));
1544 return list_empty(work_q);
1545 }
1546
1547 scmd = list_entry(tmp_list.next, struct scsi_cmnd, eh_entry);
1548 id = scmd_id(scmd);
1549
1550 SCSI_LOG_ERROR_RECOVERY(3,
1551 shost_printk(KERN_INFO, shost,
1552 "%s: Sending target reset to target %d\n",
1553 current->comm, id));
1554 rtn = scsi_try_target_reset(scmd);
1555 if (rtn != SUCCESS && rtn != FAST_IO_FAIL)
1556 SCSI_LOG_ERROR_RECOVERY(3,
1557 shost_printk(KERN_INFO, shost,
1558 "%s: Target reset failed"
1559 " target: %d\n",
1560 current->comm, id));
1561 list_for_each_entry_safe(scmd, next, &tmp_list, eh_entry) {
1562 if (scmd_id(scmd) != id)
1563 continue;
1564
1565 if (rtn == SUCCESS)
1566 list_move_tail(&scmd->eh_entry, &check_list);
1567 else if (rtn == FAST_IO_FAIL)
1568 scsi_eh_finish_cmd(scmd, done_q);
1569 else
1570
1571 list_move(&scmd->eh_entry, work_q);
1572 }
1573 }
1574
1575 return scsi_eh_test_devices(&check_list, work_q, done_q, 0);
1576}
1577
1578
1579
1580
1581
1582
1583
1584static int scsi_eh_bus_reset(struct Scsi_Host *shost,
1585 struct list_head *work_q,
1586 struct list_head *done_q)
1587{
1588 struct scsi_cmnd *scmd, *chan_scmd, *next;
1589 LIST_HEAD(check_list);
1590 unsigned int channel;
1591 int rtn;
1592
1593
1594
1595
1596
1597
1598
1599
1600 for (channel = 0; channel <= shost->max_channel; channel++) {
1601 if (scsi_host_eh_past_deadline(shost)) {
1602 list_splice_init(&check_list, work_q);
1603 SCSI_LOG_ERROR_RECOVERY(3,
1604 shost_printk(KERN_INFO, shost,
1605 "%s: skip BRST, past eh deadline\n",
1606 current->comm));
1607 return list_empty(work_q);
1608 }
1609
1610 chan_scmd = NULL;
1611 list_for_each_entry(scmd, work_q, eh_entry) {
1612 if (channel == scmd_channel(scmd)) {
1613 chan_scmd = scmd;
1614 break;
1615
1616
1617
1618
1619 }
1620 }
1621
1622 if (!chan_scmd)
1623 continue;
1624 SCSI_LOG_ERROR_RECOVERY(3,
1625 shost_printk(KERN_INFO, shost,
1626 "%s: Sending BRST chan: %d\n",
1627 current->comm, channel));
1628 rtn = scsi_try_bus_reset(chan_scmd);
1629 if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
1630 list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1631 if (channel == scmd_channel(scmd)) {
1632 if (rtn == FAST_IO_FAIL)
1633 scsi_eh_finish_cmd(scmd,
1634 done_q);
1635 else
1636 list_move_tail(&scmd->eh_entry,
1637 &check_list);
1638 }
1639 }
1640 } else {
1641 SCSI_LOG_ERROR_RECOVERY(3,
1642 shost_printk(KERN_INFO, shost,
1643 "%s: BRST failed chan: %d\n",
1644 current->comm, channel));
1645 }
1646 }
1647 return scsi_eh_test_devices(&check_list, work_q, done_q, 0);
1648}
1649
1650
1651
1652
1653
1654
1655
1656static int scsi_eh_host_reset(struct Scsi_Host *shost,
1657 struct list_head *work_q,
1658 struct list_head *done_q)
1659{
1660 struct scsi_cmnd *scmd, *next;
1661 LIST_HEAD(check_list);
1662 int rtn;
1663
1664 if (!list_empty(work_q)) {
1665 scmd = list_entry(work_q->next,
1666 struct scsi_cmnd, eh_entry);
1667
1668 SCSI_LOG_ERROR_RECOVERY(3,
1669 shost_printk(KERN_INFO, shost,
1670 "%s: Sending HRST\n",
1671 current->comm));
1672
1673 rtn = scsi_try_host_reset(scmd);
1674 if (rtn == SUCCESS) {
1675 list_splice_init(work_q, &check_list);
1676 } else if (rtn == FAST_IO_FAIL) {
1677 list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1678 scsi_eh_finish_cmd(scmd, done_q);
1679 }
1680 } else {
1681 SCSI_LOG_ERROR_RECOVERY(3,
1682 shost_printk(KERN_INFO, shost,
1683 "%s: HRST failed\n",
1684 current->comm));
1685 }
1686 }
1687 return scsi_eh_test_devices(&check_list, work_q, done_q, 1);
1688}
1689
1690
1691
1692
1693
1694
1695static void scsi_eh_offline_sdevs(struct list_head *work_q,
1696 struct list_head *done_q)
1697{
1698 struct scsi_cmnd *scmd, *next;
1699
1700 list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1701 sdev_printk(KERN_INFO, scmd->device, "Device offlined - "
1702 "not ready after error recovery\n");
1703 scsi_device_set_state(scmd->device, SDEV_OFFLINE);
1704 if (scmd->eh_eflags & SCSI_EH_CANCEL_CMD) {
1705
1706
1707
1708 }
1709 scsi_eh_finish_cmd(scmd, done_q);
1710 }
1711 return;
1712}
1713
1714
1715
1716
1717
1718int scsi_noretry_cmd(struct scsi_cmnd *scmd)
1719{
1720 switch (host_byte(scmd->result)) {
1721 case DID_OK:
1722 break;
1723 case DID_TIME_OUT:
1724 goto check_type;
1725 case DID_BUS_BUSY:
1726 return (scmd->request->cmd_flags & REQ_FAILFAST_TRANSPORT);
1727 case DID_PARITY:
1728 return (scmd->request->cmd_flags & REQ_FAILFAST_DEV);
1729 case DID_ERROR:
1730 if (msg_byte(scmd->result) == COMMAND_COMPLETE &&
1731 status_byte(scmd->result) == RESERVATION_CONFLICT)
1732 return 0;
1733
1734 case DID_SOFT_ERROR:
1735 return (scmd->request->cmd_flags & REQ_FAILFAST_DRIVER);
1736 }
1737
1738 if (status_byte(scmd->result) != CHECK_CONDITION)
1739 return 0;
1740
1741check_type:
1742
1743
1744
1745
1746 if (scmd->request->cmd_flags & REQ_FAILFAST_DEV ||
1747 scmd->request->cmd_type == REQ_TYPE_BLOCK_PC)
1748 return 1;
1749 else
1750 return 0;
1751}
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767int scsi_decide_disposition(struct scsi_cmnd *scmd)
1768{
1769 int rtn;
1770
1771
1772
1773
1774
1775 if (!scsi_device_online(scmd->device)) {
1776 SCSI_LOG_ERROR_RECOVERY(5, scmd_printk(KERN_INFO, scmd,
1777 "%s: device offline - report as SUCCESS\n", __func__));
1778 return SUCCESS;
1779 }
1780
1781
1782
1783
1784
1785 switch (host_byte(scmd->result)) {
1786 case DID_PASSTHROUGH:
1787
1788
1789
1790
1791
1792 scmd->result &= 0xff00ffff;
1793 return SUCCESS;
1794 case DID_OK:
1795
1796
1797
1798 break;
1799 case DID_ABORT:
1800 if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) {
1801 set_host_byte(scmd, DID_TIME_OUT);
1802 return SUCCESS;
1803 }
1804 case DID_NO_CONNECT:
1805 case DID_BAD_TARGET:
1806
1807
1808
1809
1810
1811 return SUCCESS;
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821 case DID_SOFT_ERROR:
1822 goto maybe_retry;
1823 case DID_IMM_RETRY:
1824 return NEEDS_RETRY;
1825
1826 case DID_REQUEUE:
1827 return ADD_TO_MLQUEUE;
1828 case DID_TRANSPORT_DISRUPTED:
1829
1830
1831
1832
1833
1834
1835
1836 goto maybe_retry;
1837 case DID_TRANSPORT_FAILFAST:
1838
1839
1840
1841
1842 return SUCCESS;
1843 case DID_ERROR:
1844 if (msg_byte(scmd->result) == COMMAND_COMPLETE &&
1845 status_byte(scmd->result) == RESERVATION_CONFLICT)
1846
1847
1848
1849
1850 break;
1851
1852 case DID_BUS_BUSY:
1853 case DID_PARITY:
1854 goto maybe_retry;
1855 case DID_TIME_OUT:
1856
1857
1858
1859
1860
1861 if ((scmd->cmnd[0] == TEST_UNIT_READY ||
1862 scmd->cmnd[0] == INQUIRY)) {
1863 return SUCCESS;
1864 } else {
1865 return FAILED;
1866 }
1867 case DID_RESET:
1868 return SUCCESS;
1869 default:
1870 return FAILED;
1871 }
1872
1873
1874
1875
1876 if (msg_byte(scmd->result) != COMMAND_COMPLETE)
1877 return FAILED;
1878
1879
1880
1881
1882 switch (status_byte(scmd->result)) {
1883 case QUEUE_FULL:
1884 scsi_handle_queue_full(scmd->device);
1885
1886
1887
1888
1889 case BUSY:
1890
1891
1892
1893
1894
1895
1896 return ADD_TO_MLQUEUE;
1897 case GOOD:
1898 if (scmd->cmnd[0] == REPORT_LUNS)
1899 scmd->device->sdev_target->expecting_lun_change = 0;
1900 scsi_handle_queue_ramp_up(scmd->device);
1901 case COMMAND_TERMINATED:
1902 return SUCCESS;
1903 case TASK_ABORTED:
1904 goto maybe_retry;
1905 case CHECK_CONDITION:
1906 rtn = scsi_check_sense(scmd);
1907 if (rtn == NEEDS_RETRY)
1908 goto maybe_retry;
1909
1910
1911
1912
1913 return rtn;
1914 case CONDITION_GOOD:
1915 case INTERMEDIATE_GOOD:
1916 case INTERMEDIATE_C_GOOD:
1917 case ACA_ACTIVE:
1918
1919
1920
1921 return SUCCESS;
1922
1923 case RESERVATION_CONFLICT:
1924 sdev_printk(KERN_INFO, scmd->device,
1925 "reservation conflict\n");
1926 set_host_byte(scmd, DID_NEXUS_FAILURE);
1927 return SUCCESS;
1928 default:
1929 return FAILED;
1930 }
1931 return FAILED;
1932
1933 maybe_retry:
1934
1935
1936
1937
1938
1939 if ((++scmd->retries) <= scmd->allowed
1940 && !scsi_noretry_cmd(scmd)) {
1941 return NEEDS_RETRY;
1942 } else {
1943
1944
1945
1946 return SUCCESS;
1947 }
1948}
1949
1950static void eh_lock_door_done(struct request *req, int uptodate)
1951{
1952 __blk_put_request(req->q, req);
1953}
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966static void scsi_eh_lock_door(struct scsi_device *sdev)
1967{
1968 struct request *req;
1969
1970
1971
1972
1973
1974 req = blk_get_request(sdev->request_queue, READ, GFP_KERNEL);
1975 if (IS_ERR(req))
1976 return;
1977
1978 blk_rq_set_block_pc(req);
1979
1980 req->cmd[0] = ALLOW_MEDIUM_REMOVAL;
1981 req->cmd[1] = 0;
1982 req->cmd[2] = 0;
1983 req->cmd[3] = 0;
1984 req->cmd[4] = SCSI_REMOVAL_PREVENT;
1985 req->cmd[5] = 0;
1986
1987 req->cmd_len = COMMAND_SIZE(req->cmd[0]);
1988
1989 req->cmd_flags |= REQ_QUIET;
1990 req->timeout = 10 * HZ;
1991 req->retries = 5;
1992
1993 blk_execute_rq_nowait(req->q, NULL, req, 1, eh_lock_door_done);
1994}
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004static void scsi_restart_operations(struct Scsi_Host *shost)
2005{
2006 struct scsi_device *sdev;
2007 unsigned long flags;
2008
2009
2010
2011
2012
2013
2014 shost_for_each_device(sdev, shost) {
2015 if (scsi_device_online(sdev) && sdev->was_reset && sdev->locked) {
2016 scsi_eh_lock_door(sdev);
2017 sdev->was_reset = 0;
2018 }
2019 }
2020
2021
2022
2023
2024
2025
2026 SCSI_LOG_ERROR_RECOVERY(3,
2027 shost_printk(KERN_INFO, shost, "waking up host to restart\n"));
2028
2029 spin_lock_irqsave(shost->host_lock, flags);
2030 if (scsi_host_set_state(shost, SHOST_RUNNING))
2031 if (scsi_host_set_state(shost, SHOST_CANCEL))
2032 BUG_ON(scsi_host_set_state(shost, SHOST_DEL));
2033 spin_unlock_irqrestore(shost->host_lock, flags);
2034
2035 wake_up(&shost->host_wait);
2036
2037
2038
2039
2040
2041
2042
2043 scsi_run_host_queues(shost);
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053 spin_lock_irqsave(shost->host_lock, flags);
2054 if (shost->host_eh_scheduled)
2055 if (scsi_host_set_state(shost, SHOST_RECOVERY))
2056 WARN_ON(scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY));
2057 spin_unlock_irqrestore(shost->host_lock, flags);
2058}
2059
2060
2061
2062
2063
2064
2065
2066void scsi_eh_ready_devs(struct Scsi_Host *shost,
2067 struct list_head *work_q,
2068 struct list_head *done_q)
2069{
2070 if (!scsi_eh_stu(shost, work_q, done_q))
2071 if (!scsi_eh_bus_device_reset(shost, work_q, done_q))
2072 if (!scsi_eh_target_reset(shost, work_q, done_q))
2073 if (!scsi_eh_bus_reset(shost, work_q, done_q))
2074 if (!scsi_eh_host_reset(shost, work_q, done_q))
2075 scsi_eh_offline_sdevs(work_q,
2076 done_q);
2077}
2078EXPORT_SYMBOL_GPL(scsi_eh_ready_devs);
2079
2080
2081
2082
2083
2084void scsi_eh_flush_done_q(struct list_head *done_q)
2085{
2086 struct scsi_cmnd *scmd, *next;
2087
2088 list_for_each_entry_safe(scmd, next, done_q, eh_entry) {
2089 list_del_init(&scmd->eh_entry);
2090 if (scsi_device_online(scmd->device) &&
2091 !scsi_noretry_cmd(scmd) &&
2092 (++scmd->retries <= scmd->allowed)) {
2093 SCSI_LOG_ERROR_RECOVERY(3,
2094 scmd_printk(KERN_INFO, scmd,
2095 "%s: flush retry cmd: %p\n",
2096 current->comm, scmd));
2097 scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY);
2098 } else {
2099
2100
2101
2102
2103
2104 if (!scmd->result)
2105 scmd->result |= (DRIVER_TIMEOUT << 24);
2106 SCSI_LOG_ERROR_RECOVERY(3,
2107 scmd_printk(KERN_INFO, scmd,
2108 "%s: flush finish cmd: %p\n",
2109 current->comm, scmd));
2110 scsi_finish_command(scmd);
2111 }
2112 }
2113}
2114EXPORT_SYMBOL(scsi_eh_flush_done_q);
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139static void scsi_unjam_host(struct Scsi_Host *shost)
2140{
2141 unsigned long flags;
2142 LIST_HEAD(eh_work_q);
2143 LIST_HEAD(eh_done_q);
2144
2145 spin_lock_irqsave(shost->host_lock, flags);
2146 list_splice_init(&shost->eh_cmd_q, &eh_work_q);
2147 spin_unlock_irqrestore(shost->host_lock, flags);
2148
2149 SCSI_LOG_ERROR_RECOVERY(1, scsi_eh_prt_fail_stats(shost, &eh_work_q));
2150
2151 if (!scsi_eh_get_sense(&eh_work_q, &eh_done_q))
2152 if (!scsi_eh_abort_cmds(&eh_work_q, &eh_done_q))
2153 scsi_eh_ready_devs(shost, &eh_work_q, &eh_done_q);
2154
2155 spin_lock_irqsave(shost->host_lock, flags);
2156 if (shost->eh_deadline != -1)
2157 shost->last_reset = 0;
2158 spin_unlock_irqrestore(shost->host_lock, flags);
2159 scsi_eh_flush_done_q(&eh_done_q);
2160}
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170int scsi_error_handler(void *data)
2171{
2172 struct Scsi_Host *shost = data;
2173
2174
2175
2176
2177
2178
2179
2180 while (!kthread_should_stop()) {
2181 set_current_state(TASK_INTERRUPTIBLE);
2182 if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) ||
2183 shost->host_failed != atomic_read(&shost->host_busy)) {
2184 SCSI_LOG_ERROR_RECOVERY(1,
2185 shost_printk(KERN_INFO, shost,
2186 "scsi_eh_%d: sleeping\n",
2187 shost->host_no));
2188 schedule();
2189 continue;
2190 }
2191
2192 __set_current_state(TASK_RUNNING);
2193 SCSI_LOG_ERROR_RECOVERY(1,
2194 shost_printk(KERN_INFO, shost,
2195 "scsi_eh_%d: waking up %d/%d/%d\n",
2196 shost->host_no, shost->host_eh_scheduled,
2197 shost->host_failed,
2198 atomic_read(&shost->host_busy)));
2199
2200
2201
2202
2203
2204
2205 if (!shost->eh_noresume && scsi_autopm_get_host(shost) != 0) {
2206 SCSI_LOG_ERROR_RECOVERY(1,
2207 shost_printk(KERN_ERR, shost,
2208 "scsi_eh_%d: unable to autoresume\n",
2209 shost->host_no));
2210 continue;
2211 }
2212
2213 if (shost->transportt->eh_strategy_handler)
2214 shost->transportt->eh_strategy_handler(shost);
2215 else
2216 scsi_unjam_host(shost);
2217
2218
2219
2220
2221
2222
2223
2224
2225 scsi_restart_operations(shost);
2226 if (!shost->eh_noresume)
2227 scsi_autopm_put_host(shost);
2228 }
2229 __set_current_state(TASK_RUNNING);
2230
2231 SCSI_LOG_ERROR_RECOVERY(1,
2232 shost_printk(KERN_INFO, shost,
2233 "Error handler scsi_eh_%d exiting\n",
2234 shost->host_no));
2235 shost->ehandler = NULL;
2236 return 0;
2237}
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260void scsi_report_bus_reset(struct Scsi_Host *shost, int channel)
2261{
2262 struct scsi_device *sdev;
2263
2264 __shost_for_each_device(sdev, shost) {
2265 if (channel == sdev_channel(sdev))
2266 __scsi_report_device_reset(sdev, NULL);
2267 }
2268}
2269EXPORT_SYMBOL(scsi_report_bus_reset);
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293void scsi_report_device_reset(struct Scsi_Host *shost, int channel, int target)
2294{
2295 struct scsi_device *sdev;
2296
2297 __shost_for_each_device(sdev, shost) {
2298 if (channel == sdev_channel(sdev) &&
2299 target == sdev_id(sdev))
2300 __scsi_report_device_reset(sdev, NULL);
2301 }
2302}
2303EXPORT_SYMBOL(scsi_report_device_reset);
2304
2305static void
2306scsi_reset_provider_done_command(struct scsi_cmnd *scmd)
2307{
2308}
2309
2310
2311
2312
2313
2314
2315int
2316scsi_ioctl_reset(struct scsi_device *dev, int __user *arg)
2317{
2318 struct scsi_cmnd *scmd;
2319 struct Scsi_Host *shost = dev->host;
2320 struct request req;
2321 unsigned long flags;
2322 int error = 0, rtn, val;
2323
2324 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2325 return -EACCES;
2326
2327 error = get_user(val, arg);
2328 if (error)
2329 return error;
2330
2331 if (scsi_autopm_get_host(shost) < 0)
2332 return -EIO;
2333
2334 error = -EIO;
2335 scmd = scsi_get_command(dev, GFP_KERNEL);
2336 if (!scmd)
2337 goto out_put_autopm_host;
2338
2339 blk_rq_init(NULL, &req);
2340 scmd->request = &req;
2341
2342 scmd->cmnd = req.cmd;
2343
2344 scmd->scsi_done = scsi_reset_provider_done_command;
2345 memset(&scmd->sdb, 0, sizeof(scmd->sdb));
2346
2347 scmd->cmd_len = 0;
2348
2349 scmd->sc_data_direction = DMA_BIDIRECTIONAL;
2350
2351 spin_lock_irqsave(shost->host_lock, flags);
2352 shost->tmf_in_progress = 1;
2353 spin_unlock_irqrestore(shost->host_lock, flags);
2354
2355 switch (val & ~SG_SCSI_RESET_NO_ESCALATE) {
2356 case SG_SCSI_RESET_NOTHING:
2357 rtn = SUCCESS;
2358 break;
2359 case SG_SCSI_RESET_DEVICE:
2360 rtn = scsi_try_bus_device_reset(scmd);
2361 if (rtn == SUCCESS || (val & SG_SCSI_RESET_NO_ESCALATE))
2362 break;
2363
2364 case SG_SCSI_RESET_TARGET:
2365 rtn = scsi_try_target_reset(scmd);
2366 if (rtn == SUCCESS || (val & SG_SCSI_RESET_NO_ESCALATE))
2367 break;
2368
2369 case SG_SCSI_RESET_BUS:
2370 rtn = scsi_try_bus_reset(scmd);
2371 if (rtn == SUCCESS || (val & SG_SCSI_RESET_NO_ESCALATE))
2372 break;
2373
2374 case SG_SCSI_RESET_HOST:
2375 rtn = scsi_try_host_reset(scmd);
2376 if (rtn == SUCCESS)
2377 break;
2378 default:
2379
2380 rtn = FAILED;
2381 break;
2382 }
2383
2384 error = (rtn == SUCCESS) ? 0 : -EIO;
2385
2386 spin_lock_irqsave(shost->host_lock, flags);
2387 shost->tmf_in_progress = 0;
2388 spin_unlock_irqrestore(shost->host_lock, flags);
2389
2390
2391
2392
2393
2394 SCSI_LOG_ERROR_RECOVERY(3,
2395 shost_printk(KERN_INFO, shost,
2396 "waking up host to restart after TMF\n"));
2397
2398 wake_up(&shost->host_wait);
2399 scsi_run_host_queues(shost);
2400
2401 scsi_put_command(scmd);
2402
2403out_put_autopm_host:
2404 scsi_autopm_put_host(shost);
2405 return error;
2406}
2407EXPORT_SYMBOL(scsi_ioctl_reset);
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428bool scsi_normalize_sense(const u8 *sense_buffer, int sb_len,
2429 struct scsi_sense_hdr *sshdr)
2430{
2431 if (!sense_buffer || !sb_len)
2432 return false;
2433
2434 memset(sshdr, 0, sizeof(struct scsi_sense_hdr));
2435
2436 sshdr->response_code = (sense_buffer[0] & 0x7f);
2437
2438 if (!scsi_sense_valid(sshdr))
2439 return false;
2440
2441 if (sshdr->response_code >= 0x72) {
2442
2443
2444
2445 if (sb_len > 1)
2446 sshdr->sense_key = (sense_buffer[1] & 0xf);
2447 if (sb_len > 2)
2448 sshdr->asc = sense_buffer[2];
2449 if (sb_len > 3)
2450 sshdr->ascq = sense_buffer[3];
2451 if (sb_len > 7)
2452 sshdr->additional_length = sense_buffer[7];
2453 } else {
2454
2455
2456
2457 if (sb_len > 2)
2458 sshdr->sense_key = (sense_buffer[2] & 0xf);
2459 if (sb_len > 7) {
2460 sb_len = (sb_len < (sense_buffer[7] + 8)) ?
2461 sb_len : (sense_buffer[7] + 8);
2462 if (sb_len > 12)
2463 sshdr->asc = sense_buffer[12];
2464 if (sb_len > 13)
2465 sshdr->ascq = sense_buffer[13];
2466 }
2467 }
2468
2469 return true;
2470}
2471EXPORT_SYMBOL(scsi_normalize_sense);
2472
2473bool scsi_command_normalize_sense(const struct scsi_cmnd *cmd,
2474 struct scsi_sense_hdr *sshdr)
2475{
2476 return scsi_normalize_sense(cmd->sense_buffer,
2477 SCSI_SENSE_BUFFERSIZE, sshdr);
2478}
2479EXPORT_SYMBOL(scsi_command_normalize_sense);
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494const u8 * scsi_sense_desc_find(const u8 * sense_buffer, int sb_len,
2495 int desc_type)
2496{
2497 int add_sen_len, add_len, desc_len, k;
2498 const u8 * descp;
2499
2500 if ((sb_len < 8) || (0 == (add_sen_len = sense_buffer[7])))
2501 return NULL;
2502 if ((sense_buffer[0] < 0x72) || (sense_buffer[0] > 0x73))
2503 return NULL;
2504 add_sen_len = (add_sen_len < (sb_len - 8)) ?
2505 add_sen_len : (sb_len - 8);
2506 descp = &sense_buffer[8];
2507 for (desc_len = 0, k = 0; k < add_sen_len; k += desc_len) {
2508 descp += desc_len;
2509 add_len = (k < (add_sen_len - 1)) ? descp[1]: -1;
2510 desc_len = add_len + 2;
2511 if (descp[0] == desc_type)
2512 return descp;
2513 if (add_len < 0)
2514 break;
2515 }
2516 return NULL;
2517}
2518EXPORT_SYMBOL(scsi_sense_desc_find);
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len,
2531 u64 * info_out)
2532{
2533 int j;
2534 const u8 * ucp;
2535 u64 ull;
2536
2537 if (sb_len < 7)
2538 return 0;
2539 switch (sense_buffer[0] & 0x7f) {
2540 case 0x70:
2541 case 0x71:
2542 if (sense_buffer[0] & 0x80) {
2543 *info_out = (sense_buffer[3] << 24) +
2544 (sense_buffer[4] << 16) +
2545 (sense_buffer[5] << 8) + sense_buffer[6];
2546 return 1;
2547 } else
2548 return 0;
2549 case 0x72:
2550 case 0x73:
2551 ucp = scsi_sense_desc_find(sense_buffer, sb_len,
2552 0 );
2553 if (ucp && (0xa == ucp[1])) {
2554 ull = 0;
2555 for (j = 0; j < 8; ++j) {
2556 if (j > 0)
2557 ull <<= 8;
2558 ull |= ucp[4 + j];
2559 }
2560 *info_out = ull;
2561 return 1;
2562 } else
2563 return 0;
2564 default:
2565 return 0;
2566 }
2567}
2568EXPORT_SYMBOL(scsi_get_sense_info_fld);
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq)
2581{
2582 if (desc) {
2583 buf[0] = 0x72;
2584 buf[1] = key;
2585 buf[2] = asc;
2586 buf[3] = ascq;
2587 buf[7] = 0;
2588 } else {
2589 buf[0] = 0x70;
2590 buf[2] = key;
2591 buf[7] = 0xa;
2592 buf[12] = asc;
2593 buf[13] = ascq;
2594 }
2595}
2596EXPORT_SYMBOL(scsi_build_sense_buffer);
2597