linux/drivers/scsi/scsi_error.c
<<
>>
Prefs
   1/*
   2 *  scsi_error.c Copyright (C) 1997 Eric Youngdale
   3 *
   4 *  SCSI error/timeout handling
   5 *      Initial versions: Eric Youngdale.  Based upon conversations with
   6 *                        Leonard Zubkoff and David Miller at Linux Expo,
   7 *                        ideas originating from all over the place.
   8 *
   9 *      Restructured scsi_unjam_host and associated functions.
  10 *      September 04, 2002 Mike Anderson (andmike@us.ibm.com)
  11 *
  12 *      Forward port of Russell King's (rmk@arm.linux.org.uk) changes and
  13 *      minor cleanups.
  14 *      September 30, 2002 Mike Anderson (andmike@us.ibm.com)
  15 */
  16
  17#include <linux/module.h>
  18#include <linux/sched.h>
  19#include <linux/gfp.h>
  20#include <linux/timer.h>
  21#include <linux/string.h>
  22#include <linux/kernel.h>
  23#include <linux/freezer.h>
  24#include <linux/kthread.h>
  25#include <linux/interrupt.h>
  26#include <linux/blkdev.h>
  27#include <linux/delay.h>
  28#include <linux/jiffies.h>
  29
  30#include <scsi/scsi.h>
  31#include <scsi/scsi_cmnd.h>
  32#include <scsi/scsi_dbg.h>
  33#include <scsi/scsi_device.h>
  34#include <scsi/scsi_driver.h>
  35#include <scsi/scsi_eh.h>
  36#include <scsi/scsi_common.h>
  37#include <scsi/scsi_transport.h>
  38#include <scsi/scsi_host.h>
  39#include <scsi/scsi_ioctl.h>
  40#include <scsi/scsi_dh.h>
  41#include <scsi/scsi_devinfo.h>
  42#include <scsi/sg.h>
  43
  44#include "scsi_priv.h"
  45#include "scsi_logging.h"
  46#include "scsi_transport_api.h"
  47
  48#include <trace/events/scsi.h>
  49
  50#include <asm/unaligned.h>
  51
  52static void scsi_eh_done(struct scsi_cmnd *scmd);
  53
  54/*
  55 * These should *probably* be handled by the host itself.
  56 * Since it is allowed to sleep, it probably should.
  57 */
  58#define BUS_RESET_SETTLE_TIME   (10)
  59#define HOST_RESET_SETTLE_TIME  (10)
  60
  61static int scsi_eh_try_stu(struct scsi_cmnd *scmd);
  62static int scsi_try_to_abort_cmd(struct scsi_host_template *,
  63                                 struct scsi_cmnd *);
  64
  65void scsi_eh_wakeup(struct Scsi_Host *shost)
  66{
  67        lockdep_assert_held(shost->host_lock);
  68
  69        if (scsi_host_busy(shost) == shost->host_failed) {
  70                trace_scsi_eh_wakeup(shost);
  71                wake_up_process(shost->ehandler);
  72                SCSI_LOG_ERROR_RECOVERY(5, shost_printk(KERN_INFO, shost,
  73                        "Waking error handler thread\n"));
  74        }
  75}
  76
  77/**
  78 * scsi_schedule_eh - schedule EH for SCSI host
  79 * @shost:      SCSI host to invoke error handling on.
  80 *
  81 * Schedule SCSI EH without scmd.
  82 */
  83void scsi_schedule_eh(struct Scsi_Host *shost)
  84{
  85        unsigned long flags;
  86
  87        spin_lock_irqsave(shost->host_lock, flags);
  88
  89        if (scsi_host_set_state(shost, SHOST_RECOVERY) == 0 ||
  90            scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY) == 0) {
  91                shost->host_eh_scheduled++;
  92                scsi_eh_wakeup(shost);
  93        }
  94
  95        spin_unlock_irqrestore(shost->host_lock, flags);
  96}
  97EXPORT_SYMBOL_GPL(scsi_schedule_eh);
  98
  99static int scsi_host_eh_past_deadline(struct Scsi_Host *shost)
 100{
 101        if (!shost->last_reset || shost->eh_deadline == -1)
 102                return 0;
 103
 104        /*
 105         * 32bit accesses are guaranteed to be atomic
 106         * (on all supported architectures), so instead
 107         * of using a spinlock we can as well double check
 108         * if eh_deadline has been set to 'off' during the
 109         * time_before call.
 110         */
 111        if (time_before(jiffies, shost->last_reset + shost->eh_deadline) &&
 112            shost->eh_deadline > -1)
 113                return 0;
 114
 115        return 1;
 116}
 117
 118/**
 119 * scmd_eh_abort_handler - Handle command aborts
 120 * @work:       command to be aborted.
 121 *
 122 * Note: this function must be called only for a command that has timed out.
 123 * Because the block layer marks a request as complete before it calls
 124 * scsi_times_out(), a .scsi_done() call from the LLD for a command that has
 125 * timed out do not have any effect. Hence it is safe to call
 126 * scsi_finish_command() from this function.
 127 */
 128void
 129scmd_eh_abort_handler(struct work_struct *work)
 130{
 131        struct scsi_cmnd *scmd =
 132                container_of(work, struct scsi_cmnd, abort_work.work);
 133        struct scsi_device *sdev = scmd->device;
 134        int rtn;
 135
 136        if (scsi_host_eh_past_deadline(sdev->host)) {
 137                SCSI_LOG_ERROR_RECOVERY(3,
 138                        scmd_printk(KERN_INFO, scmd,
 139                                    "eh timeout, not aborting\n"));
 140        } else {
 141                SCSI_LOG_ERROR_RECOVERY(3,
 142                        scmd_printk(KERN_INFO, scmd,
 143                                    "aborting command\n"));
 144                rtn = scsi_try_to_abort_cmd(sdev->host->hostt, scmd);
 145                if (rtn == SUCCESS) {
 146                        set_host_byte(scmd, DID_TIME_OUT);
 147                        if (scsi_host_eh_past_deadline(sdev->host)) {
 148                                SCSI_LOG_ERROR_RECOVERY(3,
 149                                        scmd_printk(KERN_INFO, scmd,
 150                                                    "eh timeout, not retrying "
 151                                                    "aborted command\n"));
 152                        } else if (!scsi_noretry_cmd(scmd) &&
 153                            (++scmd->retries <= scmd->allowed)) {
 154                                SCSI_LOG_ERROR_RECOVERY(3,
 155                                        scmd_printk(KERN_WARNING, scmd,
 156                                                    "retry aborted command\n"));
 157                                scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY);
 158                                return;
 159                        } else {
 160                                SCSI_LOG_ERROR_RECOVERY(3,
 161                                        scmd_printk(KERN_WARNING, scmd,
 162                                                    "finish aborted command\n"));
 163                                scsi_finish_command(scmd);
 164                                return;
 165                        }
 166                } else {
 167                        SCSI_LOG_ERROR_RECOVERY(3,
 168                                scmd_printk(KERN_INFO, scmd,
 169                                            "cmd abort %s\n",
 170                                            (rtn == FAST_IO_FAIL) ?
 171                                            "not send" : "failed"));
 172                }
 173        }
 174
 175        scsi_eh_scmd_add(scmd);
 176}
 177
 178/**
 179 * scsi_abort_command - schedule a command abort
 180 * @scmd:       scmd to abort.
 181 *
 182 * We only need to abort commands after a command timeout
 183 */
 184static int
 185scsi_abort_command(struct scsi_cmnd *scmd)
 186{
 187        struct scsi_device *sdev = scmd->device;
 188        struct Scsi_Host *shost = sdev->host;
 189        unsigned long flags;
 190
 191        if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) {
 192                /*
 193                 * Retry after abort failed, escalate to next level.
 194                 */
 195                SCSI_LOG_ERROR_RECOVERY(3,
 196                        scmd_printk(KERN_INFO, scmd,
 197                                    "previous abort failed\n"));
 198                BUG_ON(delayed_work_pending(&scmd->abort_work));
 199                return FAILED;
 200        }
 201
 202        spin_lock_irqsave(shost->host_lock, flags);
 203        if (shost->eh_deadline != -1 && !shost->last_reset)
 204                shost->last_reset = jiffies;
 205        spin_unlock_irqrestore(shost->host_lock, flags);
 206
 207        scmd->eh_eflags |= SCSI_EH_ABORT_SCHEDULED;
 208        SCSI_LOG_ERROR_RECOVERY(3,
 209                scmd_printk(KERN_INFO, scmd, "abort scheduled\n"));
 210        queue_delayed_work(shost->tmf_work_q, &scmd->abort_work, HZ / 100);
 211        return SUCCESS;
 212}
 213
 214/**
 215 * scsi_eh_reset - call into ->eh_action to reset internal counters
 216 * @scmd:       scmd to run eh on.
 217 *
 218 * The scsi driver might be carrying internal state about the
 219 * devices, so we need to call into the driver to reset the
 220 * internal state once the error handler is started.
 221 */
 222static void scsi_eh_reset(struct scsi_cmnd *scmd)
 223{
 224        if (!blk_rq_is_passthrough(scmd->request)) {
 225                struct scsi_driver *sdrv = scsi_cmd_to_driver(scmd);
 226                if (sdrv->eh_reset)
 227                        sdrv->eh_reset(scmd);
 228        }
 229}
 230
 231static void scsi_eh_inc_host_failed(struct rcu_head *head)
 232{
 233        struct scsi_cmnd *scmd = container_of(head, typeof(*scmd), rcu);
 234        struct Scsi_Host *shost = scmd->device->host;
 235        unsigned long flags;
 236
 237        spin_lock_irqsave(shost->host_lock, flags);
 238        shost->host_failed++;
 239        scsi_eh_wakeup(shost);
 240        spin_unlock_irqrestore(shost->host_lock, flags);
 241}
 242
 243/**
 244 * scsi_eh_scmd_add - add scsi cmd to error handling.
 245 * @scmd:       scmd to run eh on.
 246 */
 247void scsi_eh_scmd_add(struct scsi_cmnd *scmd)
 248{
 249        struct Scsi_Host *shost = scmd->device->host;
 250        unsigned long flags;
 251        int ret;
 252
 253        WARN_ON_ONCE(!shost->ehandler);
 254
 255        spin_lock_irqsave(shost->host_lock, flags);
 256        if (scsi_host_set_state(shost, SHOST_RECOVERY)) {
 257                ret = scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY);
 258                WARN_ON_ONCE(ret);
 259        }
 260        if (shost->eh_deadline != -1 && !shost->last_reset)
 261                shost->last_reset = jiffies;
 262
 263        scsi_eh_reset(scmd);
 264        list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q);
 265        spin_unlock_irqrestore(shost->host_lock, flags);
 266        /*
 267         * Ensure that all tasks observe the host state change before the
 268         * host_failed change.
 269         */
 270        call_rcu(&scmd->rcu, scsi_eh_inc_host_failed);
 271}
 272
 273/**
 274 * scsi_times_out - Timeout function for normal scsi commands.
 275 * @req:        request that is timing out.
 276 *
 277 * Notes:
 278 *     We do not need to lock this.  There is the potential for a race
 279 *     only in that the normal completion handling might run, but if the
 280 *     normal completion function determines that the timer has already
 281 *     fired, then it mustn't do anything.
 282 */
 283enum blk_eh_timer_return scsi_times_out(struct request *req)
 284{
 285        struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
 286        enum blk_eh_timer_return rtn = BLK_EH_DONE;
 287        struct Scsi_Host *host = scmd->device->host;
 288
 289        trace_scsi_dispatch_cmd_timeout(scmd);
 290        scsi_log_completion(scmd, TIMEOUT_ERROR);
 291
 292        if (host->eh_deadline != -1 && !host->last_reset)
 293                host->last_reset = jiffies;
 294
 295        if (host->hostt->eh_timed_out)
 296                rtn = host->hostt->eh_timed_out(scmd);
 297
 298        if (rtn == BLK_EH_DONE) {
 299                /*
 300                 * For blk-mq, we must set the request state to complete now
 301                 * before sending the request to the scsi error handler. This
 302                 * will prevent a use-after-free in the event the LLD manages
 303                 * to complete the request before the error handler finishes
 304                 * processing this timed out request.
 305                 *
 306                 * If the request was already completed, then the LLD beat the
 307                 * time out handler from transferring the request to the scsi
 308                 * error handler. In that case we can return immediately as no
 309                 * further action is required.
 310                 */
 311                if (req->q->mq_ops && !blk_mq_mark_complete(req))
 312                        return rtn;
 313                if (scsi_abort_command(scmd) != SUCCESS) {
 314                        set_host_byte(scmd, DID_TIME_OUT);
 315                        scsi_eh_scmd_add(scmd);
 316                }
 317        }
 318
 319        return rtn;
 320}
 321
 322/**
 323 * scsi_block_when_processing_errors - Prevent cmds from being queued.
 324 * @sdev:       Device on which we are performing recovery.
 325 *
 326 * Description:
 327 *     We block until the host is out of error recovery, and then check to
 328 *     see whether the host or the device is offline.
 329 *
 330 * Return value:
 331 *     0 when dev was taken offline by error recovery. 1 OK to proceed.
 332 */
 333int scsi_block_when_processing_errors(struct scsi_device *sdev)
 334{
 335        int online;
 336
 337        wait_event(sdev->host->host_wait, !scsi_host_in_recovery(sdev->host));
 338
 339        online = scsi_device_online(sdev);
 340
 341        SCSI_LOG_ERROR_RECOVERY(5, sdev_printk(KERN_INFO, sdev,
 342                "%s: rtn: %d\n", __func__, online));
 343
 344        return online;
 345}
 346EXPORT_SYMBOL(scsi_block_when_processing_errors);
 347
 348#ifdef CONFIG_SCSI_LOGGING
 349/**
 350 * scsi_eh_prt_fail_stats - Log info on failures.
 351 * @shost:      scsi host being recovered.
 352 * @work_q:     Queue of scsi cmds to process.
 353 */
 354static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost,
 355                                          struct list_head *work_q)
 356{
 357        struct scsi_cmnd *scmd;
 358        struct scsi_device *sdev;
 359        int total_failures = 0;
 360        int cmd_failed = 0;
 361        int cmd_cancel = 0;
 362        int devices_failed = 0;
 363
 364        shost_for_each_device(sdev, shost) {
 365                list_for_each_entry(scmd, work_q, eh_entry) {
 366                        if (scmd->device == sdev) {
 367                                ++total_failures;
 368                                if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED)
 369                                        ++cmd_cancel;
 370                                else
 371                                        ++cmd_failed;
 372                        }
 373                }
 374
 375                if (cmd_cancel || cmd_failed) {
 376                        SCSI_LOG_ERROR_RECOVERY(3,
 377                                shost_printk(KERN_INFO, shost,
 378                                            "%s: cmds failed: %d, cancel: %d\n",
 379                                            __func__, cmd_failed,
 380                                            cmd_cancel));
 381                        cmd_cancel = 0;
 382                        cmd_failed = 0;
 383                        ++devices_failed;
 384                }
 385        }
 386
 387        SCSI_LOG_ERROR_RECOVERY(2, shost_printk(KERN_INFO, shost,
 388                                   "Total of %d commands on %d"
 389                                   " devices require eh work\n",
 390                                   total_failures, devices_failed));
 391}
 392#endif
 393
 394 /**
 395 * scsi_report_lun_change - Set flag on all *other* devices on the same target
 396 *                          to indicate that a UNIT ATTENTION is expected.
 397 * @sdev:       Device reporting the UNIT ATTENTION
 398 */
 399static void scsi_report_lun_change(struct scsi_device *sdev)
 400{
 401        sdev->sdev_target->expecting_lun_change = 1;
 402}
 403
 404/**
 405 * scsi_report_sense - Examine scsi sense information and log messages for
 406 *                     certain conditions, also issue uevents for some of them.
 407 * @sdev:       Device reporting the sense code
 408 * @sshdr:      sshdr to be examined
 409 */
 410static void scsi_report_sense(struct scsi_device *sdev,
 411                              struct scsi_sense_hdr *sshdr)
 412{
 413        enum scsi_device_event evt_type = SDEV_EVT_MAXBITS;     /* i.e. none */
 414
 415        if (sshdr->sense_key == UNIT_ATTENTION) {
 416                if (sshdr->asc == 0x3f && sshdr->ascq == 0x03) {
 417                        evt_type = SDEV_EVT_INQUIRY_CHANGE_REPORTED;
 418                        sdev_printk(KERN_WARNING, sdev,
 419                                    "Inquiry data has changed");
 420                } else if (sshdr->asc == 0x3f && sshdr->ascq == 0x0e) {
 421                        evt_type = SDEV_EVT_LUN_CHANGE_REPORTED;
 422                        scsi_report_lun_change(sdev);
 423                        sdev_printk(KERN_WARNING, sdev,
 424                                    "Warning! Received an indication that the "
 425                                    "LUN assignments on this target have "
 426                                    "changed. The Linux SCSI layer does not "
 427                                    "automatically remap LUN assignments.\n");
 428                } else if (sshdr->asc == 0x3f)
 429                        sdev_printk(KERN_WARNING, sdev,
 430                                    "Warning! Received an indication that the "
 431                                    "operating parameters on this target have "
 432                                    "changed. The Linux SCSI layer does not "
 433                                    "automatically adjust these parameters.\n");
 434
 435                if (sshdr->asc == 0x38 && sshdr->ascq == 0x07) {
 436                        evt_type = SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED;
 437                        sdev_printk(KERN_WARNING, sdev,
 438                                    "Warning! Received an indication that the "
 439                                    "LUN reached a thin provisioning soft "
 440                                    "threshold.\n");
 441                }
 442
 443                if (sshdr->asc == 0x29) {
 444                        evt_type = SDEV_EVT_POWER_ON_RESET_OCCURRED;
 445                        sdev_printk(KERN_WARNING, sdev,
 446                                    "Power-on or device reset occurred\n");
 447                }
 448
 449                if (sshdr->asc == 0x2a && sshdr->ascq == 0x01) {
 450                        evt_type = SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED;
 451                        sdev_printk(KERN_WARNING, sdev,
 452                                    "Mode parameters changed");
 453                } else if (sshdr->asc == 0x2a && sshdr->ascq == 0x06) {
 454                        evt_type = SDEV_EVT_ALUA_STATE_CHANGE_REPORTED;
 455                        sdev_printk(KERN_WARNING, sdev,
 456                                    "Asymmetric access state changed");
 457                } else if (sshdr->asc == 0x2a && sshdr->ascq == 0x09) {
 458                        evt_type = SDEV_EVT_CAPACITY_CHANGE_REPORTED;
 459                        sdev_printk(KERN_WARNING, sdev,
 460                                    "Capacity data has changed");
 461                } else if (sshdr->asc == 0x2a)
 462                        sdev_printk(KERN_WARNING, sdev,
 463                                    "Parameters changed");
 464        }
 465
 466        if (evt_type != SDEV_EVT_MAXBITS) {
 467                set_bit(evt_type, sdev->pending_events);
 468                schedule_work(&sdev->event_work);
 469        }
 470}
 471
 472/**
 473 * scsi_check_sense - Examine scsi cmd sense
 474 * @scmd:       Cmd to have sense checked.
 475 *
 476 * Return value:
 477 *      SUCCESS or FAILED or NEEDS_RETRY or ADD_TO_MLQUEUE
 478 *
 479 * Notes:
 480 *      When a deferred error is detected the current command has
 481 *      not been executed and needs retrying.
 482 */
 483int scsi_check_sense(struct scsi_cmnd *scmd)
 484{
 485        struct scsi_device *sdev = scmd->device;
 486        struct scsi_sense_hdr sshdr;
 487
 488        if (! scsi_command_normalize_sense(scmd, &sshdr))
 489                return FAILED;  /* no valid sense data */
 490
 491        scsi_report_sense(sdev, &sshdr);
 492
 493        if (scsi_sense_is_deferred(&sshdr))
 494                return NEEDS_RETRY;
 495
 496        if (sdev->handler && sdev->handler->check_sense) {
 497                int rc;
 498
 499                rc = sdev->handler->check_sense(sdev, &sshdr);
 500                if (rc != SCSI_RETURN_NOT_HANDLED)
 501                        return rc;
 502                /* handler does not care. Drop down to default handling */
 503        }
 504
 505        if (scmd->cmnd[0] == TEST_UNIT_READY && scmd->scsi_done != scsi_eh_done)
 506                /*
 507                 * nasty: for mid-layer issued TURs, we need to return the
 508                 * actual sense data without any recovery attempt.  For eh
 509                 * issued ones, we need to try to recover and interpret
 510                 */
 511                return SUCCESS;
 512
 513        /*
 514         * Previous logic looked for FILEMARK, EOM or ILI which are
 515         * mainly associated with tapes and returned SUCCESS.
 516         */
 517        if (sshdr.response_code == 0x70) {
 518                /* fixed format */
 519                if (scmd->sense_buffer[2] & 0xe0)
 520                        return SUCCESS;
 521        } else {
 522                /*
 523                 * descriptor format: look for "stream commands sense data
 524                 * descriptor" (see SSC-3). Assume single sense data
 525                 * descriptor. Ignore ILI from SBC-2 READ LONG and WRITE LONG.
 526                 */
 527                if ((sshdr.additional_length > 3) &&
 528                    (scmd->sense_buffer[8] == 0x4) &&
 529                    (scmd->sense_buffer[11] & 0xe0))
 530                        return SUCCESS;
 531        }
 532
 533        switch (sshdr.sense_key) {
 534        case NO_SENSE:
 535                return SUCCESS;
 536        case RECOVERED_ERROR:
 537                return /* soft_error */ SUCCESS;
 538
 539        case ABORTED_COMMAND:
 540                if (sshdr.asc == 0x10) /* DIF */
 541                        return SUCCESS;
 542
 543                if (sshdr.asc == 0x44 && sdev->sdev_bflags & BLIST_RETRY_ITF)
 544                        return ADD_TO_MLQUEUE;
 545                if (sshdr.asc == 0xc1 && sshdr.ascq == 0x01 &&
 546                    sdev->sdev_bflags & BLIST_RETRY_ASC_C1)
 547                        return ADD_TO_MLQUEUE;
 548
 549                return NEEDS_RETRY;
 550        case NOT_READY:
 551        case UNIT_ATTENTION:
 552                /*
 553                 * if we are expecting a cc/ua because of a bus reset that we
 554                 * performed, treat this just as a retry.  otherwise this is
 555                 * information that we should pass up to the upper-level driver
 556                 * so that we can deal with it there.
 557                 */
 558                if (scmd->device->expecting_cc_ua) {
 559                        /*
 560                         * Because some device does not queue unit
 561                         * attentions correctly, we carefully check
 562                         * additional sense code and qualifier so as
 563                         * not to squash media change unit attention.
 564                         */
 565                        if (sshdr.asc != 0x28 || sshdr.ascq != 0x00) {
 566                                scmd->device->expecting_cc_ua = 0;
 567                                return NEEDS_RETRY;
 568                        }
 569                }
 570                /*
 571                 * we might also expect a cc/ua if another LUN on the target
 572                 * reported a UA with an ASC/ASCQ of 3F 0E -
 573                 * REPORTED LUNS DATA HAS CHANGED.
 574                 */
 575                if (scmd->device->sdev_target->expecting_lun_change &&
 576                    sshdr.asc == 0x3f && sshdr.ascq == 0x0e)
 577                        return NEEDS_RETRY;
 578                /*
 579                 * if the device is in the process of becoming ready, we
 580                 * should retry.
 581                 */
 582                if ((sshdr.asc == 0x04) && (sshdr.ascq == 0x01))
 583                        return NEEDS_RETRY;
 584                /*
 585                 * if the device is not started, we need to wake
 586                 * the error handler to start the motor
 587                 */
 588                if (scmd->device->allow_restart &&
 589                    (sshdr.asc == 0x04) && (sshdr.ascq == 0x02))
 590                        return FAILED;
 591                /*
 592                 * Pass the UA upwards for a determination in the completion
 593                 * functions.
 594                 */
 595                return SUCCESS;
 596
 597                /* these are not supported */
 598        case DATA_PROTECT:
 599                if (sshdr.asc == 0x27 && sshdr.ascq == 0x07) {
 600                        /* Thin provisioning hard threshold reached */
 601                        set_host_byte(scmd, DID_ALLOC_FAILURE);
 602                        return SUCCESS;
 603                }
 604                /* FALLTHROUGH */
 605        case COPY_ABORTED:
 606        case VOLUME_OVERFLOW:
 607        case MISCOMPARE:
 608        case BLANK_CHECK:
 609                set_host_byte(scmd, DID_TARGET_FAILURE);
 610                return SUCCESS;
 611
 612        case MEDIUM_ERROR:
 613                if (sshdr.asc == 0x11 || /* UNRECOVERED READ ERR */
 614                    sshdr.asc == 0x13 || /* AMNF DATA FIELD */
 615                    sshdr.asc == 0x14) { /* RECORD NOT FOUND */
 616                        set_host_byte(scmd, DID_MEDIUM_ERROR);
 617                        return SUCCESS;
 618                }
 619                return NEEDS_RETRY;
 620
 621        case HARDWARE_ERROR:
 622                if (scmd->device->retry_hwerror)
 623                        return ADD_TO_MLQUEUE;
 624                else
 625                        set_host_byte(scmd, DID_TARGET_FAILURE);
 626                /* FALLTHROUGH */
 627
 628        case ILLEGAL_REQUEST:
 629                if (sshdr.asc == 0x20 || /* Invalid command operation code */
 630                    sshdr.asc == 0x21 || /* Logical block address out of range */
 631                    sshdr.asc == 0x22 || /* Invalid function */
 632                    sshdr.asc == 0x24 || /* Invalid field in cdb */
 633                    sshdr.asc == 0x26 || /* Parameter value invalid */
 634                    sshdr.asc == 0x27) { /* Write protected */
 635                        set_host_byte(scmd, DID_TARGET_FAILURE);
 636                }
 637                return SUCCESS;
 638
 639        default:
 640                return SUCCESS;
 641        }
 642}
 643EXPORT_SYMBOL_GPL(scsi_check_sense);
 644
 645static void scsi_handle_queue_ramp_up(struct scsi_device *sdev)
 646{
 647        struct scsi_host_template *sht = sdev->host->hostt;
 648        struct scsi_device *tmp_sdev;
 649
 650        if (!sht->track_queue_depth ||
 651            sdev->queue_depth >= sdev->max_queue_depth)
 652                return;
 653
 654        if (time_before(jiffies,
 655            sdev->last_queue_ramp_up + sdev->queue_ramp_up_period))
 656                return;
 657
 658        if (time_before(jiffies,
 659            sdev->last_queue_full_time + sdev->queue_ramp_up_period))
 660                return;
 661
 662        /*
 663         * Walk all devices of a target and do
 664         * ramp up on them.
 665         */
 666        shost_for_each_device(tmp_sdev, sdev->host) {
 667                if (tmp_sdev->channel != sdev->channel ||
 668                    tmp_sdev->id != sdev->id ||
 669                    tmp_sdev->queue_depth == sdev->max_queue_depth)
 670                        continue;
 671
 672                scsi_change_queue_depth(tmp_sdev, tmp_sdev->queue_depth + 1);
 673                sdev->last_queue_ramp_up = jiffies;
 674        }
 675}
 676
 677static void scsi_handle_queue_full(struct scsi_device *sdev)
 678{
 679        struct scsi_host_template *sht = sdev->host->hostt;
 680        struct scsi_device *tmp_sdev;
 681
 682        if (!sht->track_queue_depth)
 683                return;
 684
 685        shost_for_each_device(tmp_sdev, sdev->host) {
 686                if (tmp_sdev->channel != sdev->channel ||
 687                    tmp_sdev->id != sdev->id)
 688                        continue;
 689                /*
 690                 * We do not know the number of commands that were at
 691                 * the device when we got the queue full so we start
 692                 * from the highest possible value and work our way down.
 693                 */
 694                scsi_track_queue_full(tmp_sdev, tmp_sdev->queue_depth - 1);
 695        }
 696}
 697
 698/**
 699 * scsi_eh_completed_normally - Disposition a eh cmd on return from LLD.
 700 * @scmd:       SCSI cmd to examine.
 701 *
 702 * Notes:
 703 *    This is *only* called when we are examining the status of commands
 704 *    queued during error recovery.  the main difference here is that we
 705 *    don't allow for the possibility of retries here, and we are a lot
 706 *    more restrictive about what we consider acceptable.
 707 */
 708static int scsi_eh_completed_normally(struct scsi_cmnd *scmd)
 709{
 710        /*
 711         * first check the host byte, to see if there is anything in there
 712         * that would indicate what we need to do.
 713         */
 714        if (host_byte(scmd->result) == DID_RESET) {
 715                /*
 716                 * rats.  we are already in the error handler, so we now
 717                 * get to try and figure out what to do next.  if the sense
 718                 * is valid, we have a pretty good idea of what to do.
 719                 * if not, we mark it as FAILED.
 720                 */
 721                return scsi_check_sense(scmd);
 722        }
 723        if (host_byte(scmd->result) != DID_OK)
 724                return FAILED;
 725
 726        /*
 727         * next, check the message byte.
 728         */
 729        if (msg_byte(scmd->result) != COMMAND_COMPLETE)
 730                return FAILED;
 731
 732        /*
 733         * now, check the status byte to see if this indicates
 734         * anything special.
 735         */
 736        switch (status_byte(scmd->result)) {
 737        case GOOD:
 738                scsi_handle_queue_ramp_up(scmd->device);
 739                /* FALLTHROUGH */
 740        case COMMAND_TERMINATED:
 741                return SUCCESS;
 742        case CHECK_CONDITION:
 743                return scsi_check_sense(scmd);
 744        case CONDITION_GOOD:
 745        case INTERMEDIATE_GOOD:
 746        case INTERMEDIATE_C_GOOD:
 747                /*
 748                 * who knows?  FIXME(eric)
 749                 */
 750                return SUCCESS;
 751        case RESERVATION_CONFLICT:
 752                if (scmd->cmnd[0] == TEST_UNIT_READY)
 753                        /* it is a success, we probed the device and
 754                         * found it */
 755                        return SUCCESS;
 756                /* otherwise, we failed to send the command */
 757                return FAILED;
 758        case QUEUE_FULL:
 759                scsi_handle_queue_full(scmd->device);
 760                /* fall through */
 761        case BUSY:
 762                return NEEDS_RETRY;
 763        default:
 764                return FAILED;
 765        }
 766        return FAILED;
 767}
 768
 769/**
 770 * scsi_eh_done - Completion function for error handling.
 771 * @scmd:       Cmd that is done.
 772 */
 773static void scsi_eh_done(struct scsi_cmnd *scmd)
 774{
 775        struct completion *eh_action;
 776
 777        SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
 778                        "%s result: %x\n", __func__, scmd->result));
 779
 780        eh_action = scmd->device->host->eh_action;
 781        if (eh_action)
 782                complete(eh_action);
 783}
 784
 785/**
 786 * scsi_try_host_reset - ask host adapter to reset itself
 787 * @scmd:       SCSI cmd to send host reset.
 788 */
 789static int scsi_try_host_reset(struct scsi_cmnd *scmd)
 790{
 791        unsigned long flags;
 792        int rtn;
 793        struct Scsi_Host *host = scmd->device->host;
 794        struct scsi_host_template *hostt = host->hostt;
 795
 796        SCSI_LOG_ERROR_RECOVERY(3,
 797                shost_printk(KERN_INFO, host, "Snd Host RST\n"));
 798
 799        if (!hostt->eh_host_reset_handler)
 800                return FAILED;
 801
 802        rtn = hostt->eh_host_reset_handler(scmd);
 803
 804        if (rtn == SUCCESS) {
 805                if (!hostt->skip_settle_delay)
 806                        ssleep(HOST_RESET_SETTLE_TIME);
 807                spin_lock_irqsave(host->host_lock, flags);
 808                scsi_report_bus_reset(host, scmd_channel(scmd));
 809                spin_unlock_irqrestore(host->host_lock, flags);
 810        }
 811
 812        return rtn;
 813}
 814
 815/**
 816 * scsi_try_bus_reset - ask host to perform a bus reset
 817 * @scmd:       SCSI cmd to send bus reset.
 818 */
 819static int scsi_try_bus_reset(struct scsi_cmnd *scmd)
 820{
 821        unsigned long flags;
 822        int rtn;
 823        struct Scsi_Host *host = scmd->device->host;
 824        struct scsi_host_template *hostt = host->hostt;
 825
 826        SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
 827                "%s: Snd Bus RST\n", __func__));
 828
 829        if (!hostt->eh_bus_reset_handler)
 830                return FAILED;
 831
 832        rtn = hostt->eh_bus_reset_handler(scmd);
 833
 834        if (rtn == SUCCESS) {
 835                if (!hostt->skip_settle_delay)
 836                        ssleep(BUS_RESET_SETTLE_TIME);
 837                spin_lock_irqsave(host->host_lock, flags);
 838                scsi_report_bus_reset(host, scmd_channel(scmd));
 839                spin_unlock_irqrestore(host->host_lock, flags);
 840        }
 841
 842        return rtn;
 843}
 844
 845static void __scsi_report_device_reset(struct scsi_device *sdev, void *data)
 846{
 847        sdev->was_reset = 1;
 848        sdev->expecting_cc_ua = 1;
 849}
 850
 851/**
 852 * scsi_try_target_reset - Ask host to perform a target reset
 853 * @scmd:       SCSI cmd used to send a target reset
 854 *
 855 * Notes:
 856 *    There is no timeout for this operation.  if this operation is
 857 *    unreliable for a given host, then the host itself needs to put a
 858 *    timer on it, and set the host back to a consistent state prior to
 859 *    returning.
 860 */
 861static int scsi_try_target_reset(struct scsi_cmnd *scmd)
 862{
 863        unsigned long flags;
 864        int rtn;
 865        struct Scsi_Host *host = scmd->device->host;
 866        struct scsi_host_template *hostt = host->hostt;
 867
 868        if (!hostt->eh_target_reset_handler)
 869                return FAILED;
 870
 871        rtn = hostt->eh_target_reset_handler(scmd);
 872        if (rtn == SUCCESS) {
 873                spin_lock_irqsave(host->host_lock, flags);
 874                __starget_for_each_device(scsi_target(scmd->device), NULL,
 875                                          __scsi_report_device_reset);
 876                spin_unlock_irqrestore(host->host_lock, flags);
 877        }
 878
 879        return rtn;
 880}
 881
 882/**
 883 * scsi_try_bus_device_reset - Ask host to perform a BDR on a dev
 884 * @scmd:       SCSI cmd used to send BDR
 885 *
 886 * Notes:
 887 *    There is no timeout for this operation.  if this operation is
 888 *    unreliable for a given host, then the host itself needs to put a
 889 *    timer on it, and set the host back to a consistent state prior to
 890 *    returning.
 891 */
 892static int scsi_try_bus_device_reset(struct scsi_cmnd *scmd)
 893{
 894        int rtn;
 895        struct scsi_host_template *hostt = scmd->device->host->hostt;
 896
 897        if (!hostt->eh_device_reset_handler)
 898                return FAILED;
 899
 900        rtn = hostt->eh_device_reset_handler(scmd);
 901        if (rtn == SUCCESS)
 902                __scsi_report_device_reset(scmd->device, NULL);
 903        return rtn;
 904}
 905
 906/**
 907 * scsi_try_to_abort_cmd - Ask host to abort a SCSI command
 908 * @hostt:      SCSI driver host template
 909 * @scmd:       SCSI cmd used to send a target reset
 910 *
 911 * Return value:
 912 *      SUCCESS, FAILED, or FAST_IO_FAIL
 913 *
 914 * Notes:
 915 *    SUCCESS does not necessarily indicate that the command
 916 *    has been aborted; it only indicates that the LLDDs
 917 *    has cleared all references to that command.
 918 *    LLDDs should return FAILED only if an abort was required
 919 *    but could not be executed. LLDDs should return FAST_IO_FAIL
 920 *    if the device is temporarily unavailable (eg due to a
 921 *    link down on FibreChannel)
 922 */
 923static int scsi_try_to_abort_cmd(struct scsi_host_template *hostt,
 924                                 struct scsi_cmnd *scmd)
 925{
 926        if (!hostt->eh_abort_handler)
 927                return FAILED;
 928
 929        return hostt->eh_abort_handler(scmd);
 930}
 931
 932static void scsi_abort_eh_cmnd(struct scsi_cmnd *scmd)
 933{
 934        if (scsi_try_to_abort_cmd(scmd->device->host->hostt, scmd) != SUCCESS)
 935                if (scsi_try_bus_device_reset(scmd) != SUCCESS)
 936                        if (scsi_try_target_reset(scmd) != SUCCESS)
 937                                if (scsi_try_bus_reset(scmd) != SUCCESS)
 938                                        scsi_try_host_reset(scmd);
 939}
 940
 941/**
 942 * scsi_eh_prep_cmnd  - Save a scsi command info as part of error recovery
 943 * @scmd:       SCSI command structure to hijack
 944 * @ses:        structure to save restore information
 945 * @cmnd:       CDB to send. Can be NULL if no new cmnd is needed
 946 * @cmnd_size:  size in bytes of @cmnd (must be <= BLK_MAX_CDB)
 947 * @sense_bytes: size of sense data to copy. or 0 (if != 0 @cmnd is ignored)
 948 *
 949 * This function is used to save a scsi command information before re-execution
 950 * as part of the error recovery process.  If @sense_bytes is 0 the command
 951 * sent must be one that does not transfer any data.  If @sense_bytes != 0
 952 * @cmnd is ignored and this functions sets up a REQUEST_SENSE command
 953 * and cmnd buffers to read @sense_bytes into @scmd->sense_buffer.
 954 */
 955void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
 956                        unsigned char *cmnd, int cmnd_size, unsigned sense_bytes)
 957{
 958        struct scsi_device *sdev = scmd->device;
 959
 960        /*
 961         * We need saved copies of a number of fields - this is because
 962         * error handling may need to overwrite these with different values
 963         * to run different commands, and once error handling is complete,
 964         * we will need to restore these values prior to running the actual
 965         * command.
 966         */
 967        ses->cmd_len = scmd->cmd_len;
 968        ses->cmnd = scmd->cmnd;
 969        ses->data_direction = scmd->sc_data_direction;
 970        ses->sdb = scmd->sdb;
 971        ses->next_rq = scmd->request->next_rq;
 972        ses->result = scmd->result;
 973        ses->underflow = scmd->underflow;
 974        ses->prot_op = scmd->prot_op;
 975        ses->eh_eflags = scmd->eh_eflags;
 976
 977        scmd->prot_op = SCSI_PROT_NORMAL;
 978        scmd->eh_eflags = 0;
 979        scmd->cmnd = ses->eh_cmnd;
 980        memset(scmd->cmnd, 0, BLK_MAX_CDB);
 981        memset(&scmd->sdb, 0, sizeof(scmd->sdb));
 982        scmd->request->next_rq = NULL;
 983        scmd->result = 0;
 984
 985        if (sense_bytes) {
 986                scmd->sdb.length = min_t(unsigned, SCSI_SENSE_BUFFERSIZE,
 987                                         sense_bytes);
 988                sg_init_one(&ses->sense_sgl, scmd->sense_buffer,
 989                            scmd->sdb.length);
 990                scmd->sdb.table.sgl = &ses->sense_sgl;
 991                scmd->sc_data_direction = DMA_FROM_DEVICE;
 992                scmd->sdb.table.nents = scmd->sdb.table.orig_nents = 1;
 993                scmd->cmnd[0] = REQUEST_SENSE;
 994                scmd->cmnd[4] = scmd->sdb.length;
 995                scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
 996        } else {
 997                scmd->sc_data_direction = DMA_NONE;
 998                if (cmnd) {
 999                        BUG_ON(cmnd_size > BLK_MAX_CDB);
1000                        memcpy(scmd->cmnd, cmnd, cmnd_size);
1001                        scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
1002                }
1003        }
1004
1005        scmd->underflow = 0;
1006
1007        if (sdev->scsi_level <= SCSI_2 && sdev->scsi_level != SCSI_UNKNOWN)
1008                scmd->cmnd[1] = (scmd->cmnd[1] & 0x1f) |
1009                        (sdev->lun << 5 & 0xe0);
1010
1011        /*
1012         * Zero the sense buffer.  The scsi spec mandates that any
1013         * untransferred sense data should be interpreted as being zero.
1014         */
1015        memset(scmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1016}
1017EXPORT_SYMBOL(scsi_eh_prep_cmnd);
1018
1019/**
1020 * scsi_eh_restore_cmnd  - Restore a scsi command info as part of error recovery
1021 * @scmd:       SCSI command structure to restore
1022 * @ses:        saved information from a coresponding call to scsi_eh_prep_cmnd
1023 *
1024 * Undo any damage done by above scsi_eh_prep_cmnd().
1025 */
1026void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses)
1027{
1028        /*
1029         * Restore original data
1030         */
1031        scmd->cmd_len = ses->cmd_len;
1032        scmd->cmnd = ses->cmnd;
1033        scmd->sc_data_direction = ses->data_direction;
1034        scmd->sdb = ses->sdb;
1035        scmd->request->next_rq = ses->next_rq;
1036        scmd->result = ses->result;
1037        scmd->underflow = ses->underflow;
1038        scmd->prot_op = ses->prot_op;
1039        scmd->eh_eflags = ses->eh_eflags;
1040}
1041EXPORT_SYMBOL(scsi_eh_restore_cmnd);
1042
1043/**
1044 * scsi_send_eh_cmnd  - submit a scsi command as part of error recovery
1045 * @scmd:       SCSI command structure to hijack
1046 * @cmnd:       CDB to send
1047 * @cmnd_size:  size in bytes of @cmnd
1048 * @timeout:    timeout for this request
1049 * @sense_bytes: size of sense data to copy or 0
1050 *
1051 * This function is used to send a scsi command down to a target device
1052 * as part of the error recovery process. See also scsi_eh_prep_cmnd() above.
1053 *
1054 * Return value:
1055 *    SUCCESS or FAILED or NEEDS_RETRY
1056 */
1057static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
1058                             int cmnd_size, int timeout, unsigned sense_bytes)
1059{
1060        struct scsi_device *sdev = scmd->device;
1061        struct Scsi_Host *shost = sdev->host;
1062        DECLARE_COMPLETION_ONSTACK(done);
1063        unsigned long timeleft = timeout;
1064        struct scsi_eh_save ses;
1065        const unsigned long stall_for = msecs_to_jiffies(100);
1066        int rtn;
1067
1068retry:
1069        scsi_eh_prep_cmnd(scmd, &ses, cmnd, cmnd_size, sense_bytes);
1070        shost->eh_action = &done;
1071
1072        scsi_log_send(scmd);
1073        scmd->scsi_done = scsi_eh_done;
1074        rtn = shost->hostt->queuecommand(shost, scmd);
1075        if (rtn) {
1076                if (timeleft > stall_for) {
1077                        scsi_eh_restore_cmnd(scmd, &ses);
1078                        timeleft -= stall_for;
1079                        msleep(jiffies_to_msecs(stall_for));
1080                        goto retry;
1081                }
1082                /* signal not to enter either branch of the if () below */
1083                timeleft = 0;
1084                rtn = FAILED;
1085        } else {
1086                timeleft = wait_for_completion_timeout(&done, timeout);
1087                rtn = SUCCESS;
1088        }
1089
1090        shost->eh_action = NULL;
1091
1092        scsi_log_completion(scmd, rtn);
1093
1094        SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
1095                        "%s timeleft: %ld\n",
1096                        __func__, timeleft));
1097
1098        /*
1099         * If there is time left scsi_eh_done got called, and we will examine
1100         * the actual status codes to see whether the command actually did
1101         * complete normally, else if we have a zero return and no time left,
1102         * the command must still be pending, so abort it and return FAILED.
1103         * If we never actually managed to issue the command, because
1104         * ->queuecommand() kept returning non zero, use the rtn = FAILED
1105         * value above (so don't execute either branch of the if)
1106         */
1107        if (timeleft) {
1108                rtn = scsi_eh_completed_normally(scmd);
1109                SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
1110                        "%s: scsi_eh_completed_normally %x\n", __func__, rtn));
1111
1112                switch (rtn) {
1113                case SUCCESS:
1114                case NEEDS_RETRY:
1115                case FAILED:
1116                        break;
1117                case ADD_TO_MLQUEUE:
1118                        rtn = NEEDS_RETRY;
1119                        break;
1120                default:
1121                        rtn = FAILED;
1122                        break;
1123                }
1124        } else if (rtn != FAILED) {
1125                scsi_abort_eh_cmnd(scmd);
1126                rtn = FAILED;
1127        }
1128
1129        scsi_eh_restore_cmnd(scmd, &ses);
1130
1131        return rtn;
1132}
1133
1134/**
1135 * scsi_request_sense - Request sense data from a particular target.
1136 * @scmd:       SCSI cmd for request sense.
1137 *
1138 * Notes:
1139 *    Some hosts automatically obtain this information, others require
1140 *    that we obtain it on our own. This function will *not* return until
1141 *    the command either times out, or it completes.
1142 */
1143static int scsi_request_sense(struct scsi_cmnd *scmd)
1144{
1145        return scsi_send_eh_cmnd(scmd, NULL, 0, scmd->device->eh_timeout, ~0);
1146}
1147
1148static int scsi_eh_action(struct scsi_cmnd *scmd, int rtn)
1149{
1150        if (!blk_rq_is_passthrough(scmd->request)) {
1151                struct scsi_driver *sdrv = scsi_cmd_to_driver(scmd);
1152                if (sdrv->eh_action)
1153                        rtn = sdrv->eh_action(scmd, rtn);
1154        }
1155        return rtn;
1156}
1157
1158/**
1159 * scsi_eh_finish_cmd - Handle a cmd that eh is finished with.
1160 * @scmd:       Original SCSI cmd that eh has finished.
1161 * @done_q:     Queue for processed commands.
1162 *
1163 * Notes:
1164 *    We don't want to use the normal command completion while we are are
1165 *    still handling errors - it may cause other commands to be queued,
1166 *    and that would disturb what we are doing.  Thus we really want to
1167 *    keep a list of pending commands for final completion, and once we
1168 *    are ready to leave error handling we handle completion for real.
1169 */
1170void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q)
1171{
1172        list_move_tail(&scmd->eh_entry, done_q);
1173}
1174EXPORT_SYMBOL(scsi_eh_finish_cmd);
1175
1176/**
1177 * scsi_eh_get_sense - Get device sense data.
1178 * @work_q:     Queue of commands to process.
1179 * @done_q:     Queue of processed commands.
1180 *
1181 * Description:
1182 *    See if we need to request sense information.  if so, then get it
1183 *    now, so we have a better idea of what to do.
1184 *
1185 * Notes:
1186 *    This has the unfortunate side effect that if a shost adapter does
1187 *    not automatically request sense information, we end up shutting
1188 *    it down before we request it.
1189 *
1190 *    All drivers should request sense information internally these days,
1191 *    so for now all I have to say is tough noogies if you end up in here.
1192 *
1193 *    XXX: Long term this code should go away, but that needs an audit of
1194 *         all LLDDs first.
1195 */
1196int scsi_eh_get_sense(struct list_head *work_q,
1197                      struct list_head *done_q)
1198{
1199        struct scsi_cmnd *scmd, *next;
1200        struct Scsi_Host *shost;
1201        int rtn;
1202
1203        /*
1204         * If SCSI_EH_ABORT_SCHEDULED has been set, it is timeout IO,
1205         * should not get sense.
1206         */
1207        list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1208                if ((scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) ||
1209                    SCSI_SENSE_VALID(scmd))
1210                        continue;
1211
1212                shost = scmd->device->host;
1213                if (scsi_host_eh_past_deadline(shost)) {
1214                        SCSI_LOG_ERROR_RECOVERY(3,
1215                                scmd_printk(KERN_INFO, scmd,
1216                                            "%s: skip request sense, past eh deadline\n",
1217                                             current->comm));
1218                        break;
1219                }
1220                if (status_byte(scmd->result) != CHECK_CONDITION)
1221                        /*
1222                         * don't request sense if there's no check condition
1223                         * status because the error we're processing isn't one
1224                         * that has a sense code (and some devices get
1225                         * confused by sense requests out of the blue)
1226                         */
1227                        continue;
1228
1229                SCSI_LOG_ERROR_RECOVERY(2, scmd_printk(KERN_INFO, scmd,
1230                                                  "%s: requesting sense\n",
1231                                                  current->comm));
1232                rtn = scsi_request_sense(scmd);
1233                if (rtn != SUCCESS)
1234                        continue;
1235
1236                SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
1237                        "sense requested, result %x\n", scmd->result));
1238                SCSI_LOG_ERROR_RECOVERY(3, scsi_print_sense(scmd));
1239
1240                rtn = scsi_decide_disposition(scmd);
1241
1242                /*
1243                 * if the result was normal, then just pass it along to the
1244                 * upper level.
1245                 */
1246                if (rtn == SUCCESS)
1247                        /* we don't want this command reissued, just
1248                         * finished with the sense data, so set
1249                         * retries to the max allowed to ensure it
1250                         * won't get reissued */
1251                        scmd->retries = scmd->allowed;
1252                else if (rtn != NEEDS_RETRY)
1253                        continue;
1254
1255                scsi_eh_finish_cmd(scmd, done_q);
1256        }
1257
1258        return list_empty(work_q);
1259}
1260EXPORT_SYMBOL_GPL(scsi_eh_get_sense);
1261
1262/**
1263 * scsi_eh_tur - Send TUR to device.
1264 * @scmd:       &scsi_cmnd to send TUR
1265 *
1266 * Return value:
1267 *    0 - Device is ready. 1 - Device NOT ready.
1268 */
1269static int scsi_eh_tur(struct scsi_cmnd *scmd)
1270{
1271        static unsigned char tur_command[6] = {TEST_UNIT_READY, 0, 0, 0, 0, 0};
1272        int retry_cnt = 1, rtn;
1273
1274retry_tur:
1275        rtn = scsi_send_eh_cmnd(scmd, tur_command, 6,
1276                                scmd->device->eh_timeout, 0);
1277
1278        SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
1279                "%s return: %x\n", __func__, rtn));
1280
1281        switch (rtn) {
1282        case NEEDS_RETRY:
1283                if (retry_cnt--)
1284                        goto retry_tur;
1285                /*FALLTHRU*/
1286        case SUCCESS:
1287                return 0;
1288        default:
1289                return 1;
1290        }
1291}
1292
1293/**
1294 * scsi_eh_test_devices - check if devices are responding from error recovery.
1295 * @cmd_list:   scsi commands in error recovery.
1296 * @work_q:     queue for commands which still need more error recovery
1297 * @done_q:     queue for commands which are finished
1298 * @try_stu:    boolean on if a STU command should be tried in addition to TUR.
1299 *
1300 * Decription:
1301 *    Tests if devices are in a working state.  Commands to devices now in
1302 *    a working state are sent to the done_q while commands to devices which
1303 *    are still failing to respond are returned to the work_q for more
1304 *    processing.
1305 **/
1306static int scsi_eh_test_devices(struct list_head *cmd_list,
1307                                struct list_head *work_q,
1308                                struct list_head *done_q, int try_stu)
1309{
1310        struct scsi_cmnd *scmd, *next;
1311        struct scsi_device *sdev;
1312        int finish_cmds;
1313
1314        while (!list_empty(cmd_list)) {
1315                scmd = list_entry(cmd_list->next, struct scsi_cmnd, eh_entry);
1316                sdev = scmd->device;
1317
1318                if (!try_stu) {
1319                        if (scsi_host_eh_past_deadline(sdev->host)) {
1320                                /* Push items back onto work_q */
1321                                list_splice_init(cmd_list, work_q);
1322                                SCSI_LOG_ERROR_RECOVERY(3,
1323                                        sdev_printk(KERN_INFO, sdev,
1324                                                    "%s: skip test device, past eh deadline",
1325                                                    current->comm));
1326                                break;
1327                        }
1328                }
1329
1330                finish_cmds = !scsi_device_online(scmd->device) ||
1331                        (try_stu && !scsi_eh_try_stu(scmd) &&
1332                         !scsi_eh_tur(scmd)) ||
1333                        !scsi_eh_tur(scmd);
1334
1335                list_for_each_entry_safe(scmd, next, cmd_list, eh_entry)
1336                        if (scmd->device == sdev) {
1337                                if (finish_cmds &&
1338                                    (try_stu ||
1339                                     scsi_eh_action(scmd, SUCCESS) == SUCCESS))
1340                                        scsi_eh_finish_cmd(scmd, done_q);
1341                                else
1342                                        list_move_tail(&scmd->eh_entry, work_q);
1343                        }
1344        }
1345        return list_empty(work_q);
1346}
1347
1348/**
1349 * scsi_eh_try_stu - Send START_UNIT to device.
1350 * @scmd:       &scsi_cmnd to send START_UNIT
1351 *
1352 * Return value:
1353 *    0 - Device is ready. 1 - Device NOT ready.
1354 */
1355static int scsi_eh_try_stu(struct scsi_cmnd *scmd)
1356{
1357        static unsigned char stu_command[6] = {START_STOP, 0, 0, 0, 1, 0};
1358
1359        if (scmd->device->allow_restart) {
1360                int i, rtn = NEEDS_RETRY;
1361
1362                for (i = 0; rtn == NEEDS_RETRY && i < 2; i++)
1363                        rtn = scsi_send_eh_cmnd(scmd, stu_command, 6, scmd->device->request_queue->rq_timeout, 0);
1364
1365                if (rtn == SUCCESS)
1366                        return 0;
1367        }
1368
1369        return 1;
1370}
1371
1372 /**
1373 * scsi_eh_stu - send START_UNIT if needed
1374 * @shost:      &scsi host being recovered.
1375 * @work_q:     &list_head for pending commands.
1376 * @done_q:     &list_head for processed commands.
1377 *
1378 * Notes:
1379 *    If commands are failing due to not ready, initializing command required,
1380 *      try revalidating the device, which will end up sending a start unit.
1381 */
1382static int scsi_eh_stu(struct Scsi_Host *shost,
1383                              struct list_head *work_q,
1384                              struct list_head *done_q)
1385{
1386        struct scsi_cmnd *scmd, *stu_scmd, *next;
1387        struct scsi_device *sdev;
1388
1389        shost_for_each_device(sdev, shost) {
1390                if (scsi_host_eh_past_deadline(shost)) {
1391                        SCSI_LOG_ERROR_RECOVERY(3,
1392                                sdev_printk(KERN_INFO, sdev,
1393                                            "%s: skip START_UNIT, past eh deadline\n",
1394                                            current->comm));
1395                        break;
1396                }
1397                stu_scmd = NULL;
1398                list_for_each_entry(scmd, work_q, eh_entry)
1399                        if (scmd->device == sdev && SCSI_SENSE_VALID(scmd) &&
1400                            scsi_check_sense(scmd) == FAILED ) {
1401                                stu_scmd = scmd;
1402                                break;
1403                        }
1404
1405                if (!stu_scmd)
1406                        continue;
1407
1408                SCSI_LOG_ERROR_RECOVERY(3,
1409                        sdev_printk(KERN_INFO, sdev,
1410                                     "%s: Sending START_UNIT\n",
1411                                    current->comm));
1412
1413                if (!scsi_eh_try_stu(stu_scmd)) {
1414                        if (!scsi_device_online(sdev) ||
1415                            !scsi_eh_tur(stu_scmd)) {
1416                                list_for_each_entry_safe(scmd, next,
1417                                                          work_q, eh_entry) {
1418                                        if (scmd->device == sdev &&
1419                                            scsi_eh_action(scmd, SUCCESS) == SUCCESS)
1420                                                scsi_eh_finish_cmd(scmd, done_q);
1421                                }
1422                        }
1423                } else {
1424                        SCSI_LOG_ERROR_RECOVERY(3,
1425                                sdev_printk(KERN_INFO, sdev,
1426                                            "%s: START_UNIT failed\n",
1427                                            current->comm));
1428                }
1429        }
1430
1431        return list_empty(work_q);
1432}
1433
1434
1435/**
1436 * scsi_eh_bus_device_reset - send bdr if needed
1437 * @shost:      scsi host being recovered.
1438 * @work_q:     &list_head for pending commands.
1439 * @done_q:     &list_head for processed commands.
1440 *
1441 * Notes:
1442 *    Try a bus device reset.  Still, look to see whether we have multiple
1443 *    devices that are jammed or not - if we have multiple devices, it
1444 *    makes no sense to try bus_device_reset - we really would need to try
1445 *    a bus_reset instead.
1446 */
1447static int scsi_eh_bus_device_reset(struct Scsi_Host *shost,
1448                                    struct list_head *work_q,
1449                                    struct list_head *done_q)
1450{
1451        struct scsi_cmnd *scmd, *bdr_scmd, *next;
1452        struct scsi_device *sdev;
1453        int rtn;
1454
1455        shost_for_each_device(sdev, shost) {
1456                if (scsi_host_eh_past_deadline(shost)) {
1457                        SCSI_LOG_ERROR_RECOVERY(3,
1458                                sdev_printk(KERN_INFO, sdev,
1459                                            "%s: skip BDR, past eh deadline\n",
1460                                             current->comm));
1461                        break;
1462                }
1463                bdr_scmd = NULL;
1464                list_for_each_entry(scmd, work_q, eh_entry)
1465                        if (scmd->device == sdev) {
1466                                bdr_scmd = scmd;
1467                                break;
1468                        }
1469
1470                if (!bdr_scmd)
1471                        continue;
1472
1473                SCSI_LOG_ERROR_RECOVERY(3,
1474                        sdev_printk(KERN_INFO, sdev,
1475                                     "%s: Sending BDR\n", current->comm));
1476                rtn = scsi_try_bus_device_reset(bdr_scmd);
1477                if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
1478                        if (!scsi_device_online(sdev) ||
1479                            rtn == FAST_IO_FAIL ||
1480                            !scsi_eh_tur(bdr_scmd)) {
1481                                list_for_each_entry_safe(scmd, next,
1482                                                         work_q, eh_entry) {
1483                                        if (scmd->device == sdev &&
1484                                            scsi_eh_action(scmd, rtn) != FAILED)
1485                                                scsi_eh_finish_cmd(scmd,
1486                                                                   done_q);
1487                                }
1488                        }
1489                } else {
1490                        SCSI_LOG_ERROR_RECOVERY(3,
1491                                sdev_printk(KERN_INFO, sdev,
1492                                            "%s: BDR failed\n", current->comm));
1493                }
1494        }
1495
1496        return list_empty(work_q);
1497}
1498
1499/**
1500 * scsi_eh_target_reset - send target reset if needed
1501 * @shost:      scsi host being recovered.
1502 * @work_q:     &list_head for pending commands.
1503 * @done_q:     &list_head for processed commands.
1504 *
1505 * Notes:
1506 *    Try a target reset.
1507 */
1508static int scsi_eh_target_reset(struct Scsi_Host *shost,
1509                                struct list_head *work_q,
1510                                struct list_head *done_q)
1511{
1512        LIST_HEAD(tmp_list);
1513        LIST_HEAD(check_list);
1514
1515        list_splice_init(work_q, &tmp_list);
1516
1517        while (!list_empty(&tmp_list)) {
1518                struct scsi_cmnd *next, *scmd;
1519                int rtn;
1520                unsigned int id;
1521
1522                if (scsi_host_eh_past_deadline(shost)) {
1523                        /* push back on work queue for further processing */
1524                        list_splice_init(&check_list, work_q);
1525                        list_splice_init(&tmp_list, work_q);
1526                        SCSI_LOG_ERROR_RECOVERY(3,
1527                                shost_printk(KERN_INFO, shost,
1528                                            "%s: Skip target reset, past eh deadline\n",
1529                                             current->comm));
1530                        return list_empty(work_q);
1531                }
1532
1533                scmd = list_entry(tmp_list.next, struct scsi_cmnd, eh_entry);
1534                id = scmd_id(scmd);
1535
1536                SCSI_LOG_ERROR_RECOVERY(3,
1537                        shost_printk(KERN_INFO, shost,
1538                                     "%s: Sending target reset to target %d\n",
1539                                     current->comm, id));
1540                rtn = scsi_try_target_reset(scmd);
1541                if (rtn != SUCCESS && rtn != FAST_IO_FAIL)
1542                        SCSI_LOG_ERROR_RECOVERY(3,
1543                                shost_printk(KERN_INFO, shost,
1544                                             "%s: Target reset failed"
1545                                             " target: %d\n",
1546                                             current->comm, id));
1547                list_for_each_entry_safe(scmd, next, &tmp_list, eh_entry) {
1548                        if (scmd_id(scmd) != id)
1549                                continue;
1550
1551                        if (rtn == SUCCESS)
1552                                list_move_tail(&scmd->eh_entry, &check_list);
1553                        else if (rtn == FAST_IO_FAIL)
1554                                scsi_eh_finish_cmd(scmd, done_q);
1555                        else
1556                                /* push back on work queue for further processing */
1557                                list_move(&scmd->eh_entry, work_q);
1558                }
1559        }
1560
1561        return scsi_eh_test_devices(&check_list, work_q, done_q, 0);
1562}
1563
1564/**
1565 * scsi_eh_bus_reset - send a bus reset
1566 * @shost:      &scsi host being recovered.
1567 * @work_q:     &list_head for pending commands.
1568 * @done_q:     &list_head for processed commands.
1569 */
1570static int scsi_eh_bus_reset(struct Scsi_Host *shost,
1571                             struct list_head *work_q,
1572                             struct list_head *done_q)
1573{
1574        struct scsi_cmnd *scmd, *chan_scmd, *next;
1575        LIST_HEAD(check_list);
1576        unsigned int channel;
1577        int rtn;
1578
1579        /*
1580         * we really want to loop over the various channels, and do this on
1581         * a channel by channel basis.  we should also check to see if any
1582         * of the failed commands are on soft_reset devices, and if so, skip
1583         * the reset.
1584         */
1585
1586        for (channel = 0; channel <= shost->max_channel; channel++) {
1587                if (scsi_host_eh_past_deadline(shost)) {
1588                        list_splice_init(&check_list, work_q);
1589                        SCSI_LOG_ERROR_RECOVERY(3,
1590                                shost_printk(KERN_INFO, shost,
1591                                            "%s: skip BRST, past eh deadline\n",
1592                                             current->comm));
1593                        return list_empty(work_q);
1594                }
1595
1596                chan_scmd = NULL;
1597                list_for_each_entry(scmd, work_q, eh_entry) {
1598                        if (channel == scmd_channel(scmd)) {
1599                                chan_scmd = scmd;
1600                                break;
1601                                /*
1602                                 * FIXME add back in some support for
1603                                 * soft_reset devices.
1604                                 */
1605                        }
1606                }
1607
1608                if (!chan_scmd)
1609                        continue;
1610                SCSI_LOG_ERROR_RECOVERY(3,
1611                        shost_printk(KERN_INFO, shost,
1612                                     "%s: Sending BRST chan: %d\n",
1613                                     current->comm, channel));
1614                rtn = scsi_try_bus_reset(chan_scmd);
1615                if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
1616                        list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1617                                if (channel == scmd_channel(scmd)) {
1618                                        if (rtn == FAST_IO_FAIL)
1619                                                scsi_eh_finish_cmd(scmd,
1620                                                                   done_q);
1621                                        else
1622                                                list_move_tail(&scmd->eh_entry,
1623                                                               &check_list);
1624                                }
1625                        }
1626                } else {
1627                        SCSI_LOG_ERROR_RECOVERY(3,
1628                                shost_printk(KERN_INFO, shost,
1629                                             "%s: BRST failed chan: %d\n",
1630                                             current->comm, channel));
1631                }
1632        }
1633        return scsi_eh_test_devices(&check_list, work_q, done_q, 0);
1634}
1635
1636/**
1637 * scsi_eh_host_reset - send a host reset
1638 * @shost:      host to be reset.
1639 * @work_q:     &list_head for pending commands.
1640 * @done_q:     &list_head for processed commands.
1641 */
1642static int scsi_eh_host_reset(struct Scsi_Host *shost,
1643                              struct list_head *work_q,
1644                              struct list_head *done_q)
1645{
1646        struct scsi_cmnd *scmd, *next;
1647        LIST_HEAD(check_list);
1648        int rtn;
1649
1650        if (!list_empty(work_q)) {
1651                scmd = list_entry(work_q->next,
1652                                  struct scsi_cmnd, eh_entry);
1653
1654                SCSI_LOG_ERROR_RECOVERY(3,
1655                        shost_printk(KERN_INFO, shost,
1656                                     "%s: Sending HRST\n",
1657                                     current->comm));
1658
1659                rtn = scsi_try_host_reset(scmd);
1660                if (rtn == SUCCESS) {
1661                        list_splice_init(work_q, &check_list);
1662                } else if (rtn == FAST_IO_FAIL) {
1663                        list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1664                                        scsi_eh_finish_cmd(scmd, done_q);
1665                        }
1666                } else {
1667                        SCSI_LOG_ERROR_RECOVERY(3,
1668                                shost_printk(KERN_INFO, shost,
1669                                             "%s: HRST failed\n",
1670                                             current->comm));
1671                }
1672        }
1673        return scsi_eh_test_devices(&check_list, work_q, done_q, 1);
1674}
1675
1676/**
1677 * scsi_eh_offline_sdevs - offline scsi devices that fail to recover
1678 * @work_q:     &list_head for pending commands.
1679 * @done_q:     &list_head for processed commands.
1680 */
1681static void scsi_eh_offline_sdevs(struct list_head *work_q,
1682                                  struct list_head *done_q)
1683{
1684        struct scsi_cmnd *scmd, *next;
1685        struct scsi_device *sdev;
1686
1687        list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1688                sdev_printk(KERN_INFO, scmd->device, "Device offlined - "
1689                            "not ready after error recovery\n");
1690                sdev = scmd->device;
1691
1692                mutex_lock(&sdev->state_mutex);
1693                scsi_device_set_state(sdev, SDEV_OFFLINE);
1694                mutex_unlock(&sdev->state_mutex);
1695
1696                scsi_eh_finish_cmd(scmd, done_q);
1697        }
1698        return;
1699}
1700
1701/**
1702 * scsi_noretry_cmd - determine if command should be failed fast
1703 * @scmd:       SCSI cmd to examine.
1704 */
1705int scsi_noretry_cmd(struct scsi_cmnd *scmd)
1706{
1707        switch (host_byte(scmd->result)) {
1708        case DID_OK:
1709                break;
1710        case DID_TIME_OUT:
1711                goto check_type;
1712        case DID_BUS_BUSY:
1713                return (scmd->request->cmd_flags & REQ_FAILFAST_TRANSPORT);
1714        case DID_PARITY:
1715                return (scmd->request->cmd_flags & REQ_FAILFAST_DEV);
1716        case DID_ERROR:
1717                if (msg_byte(scmd->result) == COMMAND_COMPLETE &&
1718                    status_byte(scmd->result) == RESERVATION_CONFLICT)
1719                        return 0;
1720                /* fall through */
1721        case DID_SOFT_ERROR:
1722                return (scmd->request->cmd_flags & REQ_FAILFAST_DRIVER);
1723        }
1724
1725        if (status_byte(scmd->result) != CHECK_CONDITION)
1726                return 0;
1727
1728check_type:
1729        /*
1730         * assume caller has checked sense and determined
1731         * the check condition was retryable.
1732         */
1733        if (scmd->request->cmd_flags & REQ_FAILFAST_DEV ||
1734            blk_rq_is_passthrough(scmd->request))
1735                return 1;
1736        else
1737                return 0;
1738}
1739
1740/**
1741 * scsi_decide_disposition - Disposition a cmd on return from LLD.
1742 * @scmd:       SCSI cmd to examine.
1743 *
1744 * Notes:
1745 *    This is *only* called when we are examining the status after sending
1746 *    out the actual data command.  any commands that are queued for error
1747 *    recovery (e.g. test_unit_ready) do *not* come through here.
1748 *
1749 *    When this routine returns failed, it means the error handler thread
1750 *    is woken.  In cases where the error code indicates an error that
1751 *    doesn't require the error handler read (i.e. we don't need to
1752 *    abort/reset), this function should return SUCCESS.
1753 */
1754int scsi_decide_disposition(struct scsi_cmnd *scmd)
1755{
1756        int rtn;
1757
1758        /*
1759         * if the device is offline, then we clearly just pass the result back
1760         * up to the top level.
1761         */
1762        if (!scsi_device_online(scmd->device)) {
1763                SCSI_LOG_ERROR_RECOVERY(5, scmd_printk(KERN_INFO, scmd,
1764                        "%s: device offline - report as SUCCESS\n", __func__));
1765                return SUCCESS;
1766        }
1767
1768        /*
1769         * first check the host byte, to see if there is anything in there
1770         * that would indicate what we need to do.
1771         */
1772        switch (host_byte(scmd->result)) {
1773        case DID_PASSTHROUGH:
1774                /*
1775                 * no matter what, pass this through to the upper layer.
1776                 * nuke this special code so that it looks like we are saying
1777                 * did_ok.
1778                 */
1779                scmd->result &= 0xff00ffff;
1780                return SUCCESS;
1781        case DID_OK:
1782                /*
1783                 * looks good.  drop through, and check the next byte.
1784                 */
1785                break;
1786        case DID_ABORT:
1787                if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) {
1788                        set_host_byte(scmd, DID_TIME_OUT);
1789                        return SUCCESS;
1790                }
1791                /* FALLTHROUGH */
1792        case DID_NO_CONNECT:
1793        case DID_BAD_TARGET:
1794                /*
1795                 * note - this means that we just report the status back
1796                 * to the top level driver, not that we actually think
1797                 * that it indicates SUCCESS.
1798                 */
1799                return SUCCESS;
1800        case DID_SOFT_ERROR:
1801                /*
1802                 * when the low level driver returns did_soft_error,
1803                 * it is responsible for keeping an internal retry counter
1804                 * in order to avoid endless loops (db)
1805                 */
1806                goto maybe_retry;
1807        case DID_IMM_RETRY:
1808                return NEEDS_RETRY;
1809
1810        case DID_REQUEUE:
1811                return ADD_TO_MLQUEUE;
1812        case DID_TRANSPORT_DISRUPTED:
1813                /*
1814                 * LLD/transport was disrupted during processing of the IO.
1815                 * The transport class is now blocked/blocking,
1816                 * and the transport will decide what to do with the IO
1817                 * based on its timers and recovery capablilities if
1818                 * there are enough retries.
1819                 */
1820                goto maybe_retry;
1821        case DID_TRANSPORT_FAILFAST:
1822                /*
1823                 * The transport decided to failfast the IO (most likely
1824                 * the fast io fail tmo fired), so send IO directly upwards.
1825                 */
1826                return SUCCESS;
1827        case DID_ERROR:
1828                if (msg_byte(scmd->result) == COMMAND_COMPLETE &&
1829                    status_byte(scmd->result) == RESERVATION_CONFLICT)
1830                        /*
1831                         * execute reservation conflict processing code
1832                         * lower down
1833                         */
1834                        break;
1835                /* fallthrough */
1836        case DID_BUS_BUSY:
1837        case DID_PARITY:
1838                goto maybe_retry;
1839        case DID_TIME_OUT:
1840                /*
1841                 * when we scan the bus, we get timeout messages for
1842                 * these commands if there is no device available.
1843                 * other hosts report did_no_connect for the same thing.
1844                 */
1845                if ((scmd->cmnd[0] == TEST_UNIT_READY ||
1846                     scmd->cmnd[0] == INQUIRY)) {
1847                        return SUCCESS;
1848                } else {
1849                        return FAILED;
1850                }
1851        case DID_RESET:
1852                return SUCCESS;
1853        default:
1854                return FAILED;
1855        }
1856
1857        /*
1858         * next, check the message byte.
1859         */
1860        if (msg_byte(scmd->result) != COMMAND_COMPLETE)
1861                return FAILED;
1862
1863        /*
1864         * check the status byte to see if this indicates anything special.
1865         */
1866        switch (status_byte(scmd->result)) {
1867        case QUEUE_FULL:
1868                scsi_handle_queue_full(scmd->device);
1869                /*
1870                 * the case of trying to send too many commands to a
1871                 * tagged queueing device.
1872                 */
1873                /* FALLTHROUGH */
1874        case BUSY:
1875                /*
1876                 * device can't talk to us at the moment.  Should only
1877                 * occur (SAM-3) when the task queue is empty, so will cause
1878                 * the empty queue handling to trigger a stall in the
1879                 * device.
1880                 */
1881                return ADD_TO_MLQUEUE;
1882        case GOOD:
1883                if (scmd->cmnd[0] == REPORT_LUNS)
1884                        scmd->device->sdev_target->expecting_lun_change = 0;
1885                scsi_handle_queue_ramp_up(scmd->device);
1886                /* FALLTHROUGH */
1887        case COMMAND_TERMINATED:
1888                return SUCCESS;
1889        case TASK_ABORTED:
1890                goto maybe_retry;
1891        case CHECK_CONDITION:
1892                rtn = scsi_check_sense(scmd);
1893                if (rtn == NEEDS_RETRY)
1894                        goto maybe_retry;
1895                /* if rtn == FAILED, we have no sense information;
1896                 * returning FAILED will wake the error handler thread
1897                 * to collect the sense and redo the decide
1898                 * disposition */
1899                return rtn;
1900        case CONDITION_GOOD:
1901        case INTERMEDIATE_GOOD:
1902        case INTERMEDIATE_C_GOOD:
1903        case ACA_ACTIVE:
1904                /*
1905                 * who knows?  FIXME(eric)
1906                 */
1907                return SUCCESS;
1908
1909        case RESERVATION_CONFLICT:
1910                sdev_printk(KERN_INFO, scmd->device,
1911                            "reservation conflict\n");
1912                set_host_byte(scmd, DID_NEXUS_FAILURE);
1913                return SUCCESS; /* causes immediate i/o error */
1914        default:
1915                return FAILED;
1916        }
1917        return FAILED;
1918
1919maybe_retry:
1920
1921        /* we requeue for retry because the error was retryable, and
1922         * the request was not marked fast fail.  Note that above,
1923         * even if the request is marked fast fail, we still requeue
1924         * for queue congestion conditions (QUEUE_FULL or BUSY) */
1925        if ((++scmd->retries) <= scmd->allowed
1926            && !scsi_noretry_cmd(scmd)) {
1927                return NEEDS_RETRY;
1928        } else {
1929                /*
1930                 * no more retries - report this one back to upper level.
1931                 */
1932                return SUCCESS;
1933        }
1934}
1935
1936static void eh_lock_door_done(struct request *req, blk_status_t status)
1937{
1938        __blk_put_request(req->q, req);
1939}
1940
1941/**
1942 * scsi_eh_lock_door - Prevent medium removal for the specified device
1943 * @sdev:       SCSI device to prevent medium removal
1944 *
1945 * Locking:
1946 *      We must be called from process context.
1947 *
1948 * Notes:
1949 *      We queue up an asynchronous "ALLOW MEDIUM REMOVAL" request on the
1950 *      head of the devices request queue, and continue.
1951 */
1952static void scsi_eh_lock_door(struct scsi_device *sdev)
1953{
1954        struct request *req;
1955        struct scsi_request *rq;
1956
1957        req = blk_get_request(sdev->request_queue, REQ_OP_SCSI_IN, 0);
1958        if (IS_ERR(req))
1959                return;
1960        rq = scsi_req(req);
1961
1962        rq->cmd[0] = ALLOW_MEDIUM_REMOVAL;
1963        rq->cmd[1] = 0;
1964        rq->cmd[2] = 0;
1965        rq->cmd[3] = 0;
1966        rq->cmd[4] = SCSI_REMOVAL_PREVENT;
1967        rq->cmd[5] = 0;
1968        rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
1969
1970        req->rq_flags |= RQF_QUIET;
1971        req->timeout = 10 * HZ;
1972        rq->retries = 5;
1973
1974        blk_execute_rq_nowait(req->q, NULL, req, 1, eh_lock_door_done);
1975}
1976
1977/**
1978 * scsi_restart_operations - restart io operations to the specified host.
1979 * @shost:      Host we are restarting.
1980 *
1981 * Notes:
1982 *    When we entered the error handler, we blocked all further i/o to
1983 *    this device.  we need to 'reverse' this process.
1984 */
1985static void scsi_restart_operations(struct Scsi_Host *shost)
1986{
1987        struct scsi_device *sdev;
1988        unsigned long flags;
1989
1990        /*
1991         * If the door was locked, we need to insert a door lock request
1992         * onto the head of the SCSI request queue for the device.  There
1993         * is no point trying to lock the door of an off-line device.
1994         */
1995        shost_for_each_device(sdev, shost) {
1996                if (scsi_device_online(sdev) && sdev->was_reset && sdev->locked) {
1997                        scsi_eh_lock_door(sdev);
1998                        sdev->was_reset = 0;
1999                }
2000        }
2001
2002        /*
2003         * next free up anything directly waiting upon the host.  this
2004         * will be requests for character device operations, and also for
2005         * ioctls to queued block devices.
2006         */
2007        SCSI_LOG_ERROR_RECOVERY(3,
2008                shost_printk(KERN_INFO, shost, "waking up host to restart\n"));
2009
2010        spin_lock_irqsave(shost->host_lock, flags);
2011        if (scsi_host_set_state(shost, SHOST_RUNNING))
2012                if (scsi_host_set_state(shost, SHOST_CANCEL))
2013                        BUG_ON(scsi_host_set_state(shost, SHOST_DEL));
2014        spin_unlock_irqrestore(shost->host_lock, flags);
2015
2016        wake_up(&shost->host_wait);
2017
2018        /*
2019         * finally we need to re-initiate requests that may be pending.  we will
2020         * have had everything blocked while error handling is taking place, and
2021         * now that error recovery is done, we will need to ensure that these
2022         * requests are started.
2023         */
2024        scsi_run_host_queues(shost);
2025
2026        /*
2027         * if eh is active and host_eh_scheduled is pending we need to re-run
2028         * recovery.  we do this check after scsi_run_host_queues() to allow
2029         * everything pent up since the last eh run a chance to make forward
2030         * progress before we sync again.  Either we'll immediately re-run
2031         * recovery or scsi_device_unbusy() will wake us again when these
2032         * pending commands complete.
2033         */
2034        spin_lock_irqsave(shost->host_lock, flags);
2035        if (shost->host_eh_scheduled)
2036                if (scsi_host_set_state(shost, SHOST_RECOVERY))
2037                        WARN_ON(scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY));
2038        spin_unlock_irqrestore(shost->host_lock, flags);
2039}
2040
2041/**
2042 * scsi_eh_ready_devs - check device ready state and recover if not.
2043 * @shost:      host to be recovered.
2044 * @work_q:     &list_head for pending commands.
2045 * @done_q:     &list_head for processed commands.
2046 */
2047void scsi_eh_ready_devs(struct Scsi_Host *shost,
2048                        struct list_head *work_q,
2049                        struct list_head *done_q)
2050{
2051        if (!scsi_eh_stu(shost, work_q, done_q))
2052                if (!scsi_eh_bus_device_reset(shost, work_q, done_q))
2053                        if (!scsi_eh_target_reset(shost, work_q, done_q))
2054                                if (!scsi_eh_bus_reset(shost, work_q, done_q))
2055                                        if (!scsi_eh_host_reset(shost, work_q, done_q))
2056                                                scsi_eh_offline_sdevs(work_q,
2057                                                                      done_q);
2058}
2059EXPORT_SYMBOL_GPL(scsi_eh_ready_devs);
2060
2061/**
2062 * scsi_eh_flush_done_q - finish processed commands or retry them.
2063 * @done_q:     list_head of processed commands.
2064 */
2065void scsi_eh_flush_done_q(struct list_head *done_q)
2066{
2067        struct scsi_cmnd *scmd, *next;
2068
2069        list_for_each_entry_safe(scmd, next, done_q, eh_entry) {
2070                list_del_init(&scmd->eh_entry);
2071                if (scsi_device_online(scmd->device) &&
2072                    !scsi_noretry_cmd(scmd) &&
2073                    (++scmd->retries <= scmd->allowed)) {
2074                        SCSI_LOG_ERROR_RECOVERY(3,
2075                                scmd_printk(KERN_INFO, scmd,
2076                                             "%s: flush retry cmd\n",
2077                                             current->comm));
2078                                scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY);
2079                } else {
2080                        /*
2081                         * If just we got sense for the device (called
2082                         * scsi_eh_get_sense), scmd->result is already
2083                         * set, do not set DRIVER_TIMEOUT.
2084                         */
2085                        if (!scmd->result)
2086                                scmd->result |= (DRIVER_TIMEOUT << 24);
2087                        SCSI_LOG_ERROR_RECOVERY(3,
2088                                scmd_printk(KERN_INFO, scmd,
2089                                             "%s: flush finish cmd\n",
2090                                             current->comm));
2091                        scsi_finish_command(scmd);
2092                }
2093        }
2094}
2095EXPORT_SYMBOL(scsi_eh_flush_done_q);
2096
2097/**
2098 * scsi_unjam_host - Attempt to fix a host which has a cmd that failed.
2099 * @shost:      Host to unjam.
2100 *
2101 * Notes:
2102 *    When we come in here, we *know* that all commands on the bus have
2103 *    either completed, failed or timed out.  we also know that no further
2104 *    commands are being sent to the host, so things are relatively quiet
2105 *    and we have freedom to fiddle with things as we wish.
2106 *
2107 *    This is only the *default* implementation.  it is possible for
2108 *    individual drivers to supply their own version of this function, and
2109 *    if the maintainer wishes to do this, it is strongly suggested that
2110 *    this function be taken as a template and modified.  this function
2111 *    was designed to correctly handle problems for about 95% of the
2112 *    different cases out there, and it should always provide at least a
2113 *    reasonable amount of error recovery.
2114 *
2115 *    Any command marked 'failed' or 'timeout' must eventually have
2116 *    scsi_finish_cmd() called for it.  we do all of the retry stuff
2117 *    here, so when we restart the host after we return it should have an
2118 *    empty queue.
2119 */
2120static void scsi_unjam_host(struct Scsi_Host *shost)
2121{
2122        unsigned long flags;
2123        LIST_HEAD(eh_work_q);
2124        LIST_HEAD(eh_done_q);
2125
2126        spin_lock_irqsave(shost->host_lock, flags);
2127        list_splice_init(&shost->eh_cmd_q, &eh_work_q);
2128        spin_unlock_irqrestore(shost->host_lock, flags);
2129
2130        SCSI_LOG_ERROR_RECOVERY(1, scsi_eh_prt_fail_stats(shost, &eh_work_q));
2131
2132        if (!scsi_eh_get_sense(&eh_work_q, &eh_done_q))
2133                scsi_eh_ready_devs(shost, &eh_work_q, &eh_done_q);
2134
2135        spin_lock_irqsave(shost->host_lock, flags);
2136        if (shost->eh_deadline != -1)
2137                shost->last_reset = 0;
2138        spin_unlock_irqrestore(shost->host_lock, flags);
2139        scsi_eh_flush_done_q(&eh_done_q);
2140}
2141
2142/**
2143 * scsi_error_handler - SCSI error handler thread
2144 * @data:       Host for which we are running.
2145 *
2146 * Notes:
2147 *    This is the main error handling loop.  This is run as a kernel thread
2148 *    for every SCSI host and handles all error handling activity.
2149 */
2150int scsi_error_handler(void *data)
2151{
2152        struct Scsi_Host *shost = data;
2153
2154        /*
2155         * We use TASK_INTERRUPTIBLE so that the thread is not
2156         * counted against the load average as a running process.
2157         * We never actually get interrupted because kthread_run
2158         * disables signal delivery for the created thread.
2159         */
2160        while (true) {
2161                /*
2162                 * The sequence in kthread_stop() sets the stop flag first
2163                 * then wakes the process.  To avoid missed wakeups, the task
2164                 * should always be in a non running state before the stop
2165                 * flag is checked
2166                 */
2167                set_current_state(TASK_INTERRUPTIBLE);
2168                if (kthread_should_stop())
2169                        break;
2170
2171                if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) ||
2172                    shost->host_failed != scsi_host_busy(shost)) {
2173                        SCSI_LOG_ERROR_RECOVERY(1,
2174                                shost_printk(KERN_INFO, shost,
2175                                             "scsi_eh_%d: sleeping\n",
2176                                             shost->host_no));
2177                        schedule();
2178                        continue;
2179                }
2180
2181                __set_current_state(TASK_RUNNING);
2182                SCSI_LOG_ERROR_RECOVERY(1,
2183                        shost_printk(KERN_INFO, shost,
2184                                     "scsi_eh_%d: waking up %d/%d/%d\n",
2185                                     shost->host_no, shost->host_eh_scheduled,
2186                                     shost->host_failed,
2187                                     scsi_host_busy(shost)));
2188
2189                /*
2190                 * We have a host that is failing for some reason.  Figure out
2191                 * what we need to do to get it up and online again (if we can).
2192                 * If we fail, we end up taking the thing offline.
2193                 */
2194                if (!shost->eh_noresume && scsi_autopm_get_host(shost) != 0) {
2195                        SCSI_LOG_ERROR_RECOVERY(1,
2196                                shost_printk(KERN_ERR, shost,
2197                                             "scsi_eh_%d: unable to autoresume\n",
2198                                             shost->host_no));
2199                        continue;
2200                }
2201
2202                if (shost->transportt->eh_strategy_handler)
2203                        shost->transportt->eh_strategy_handler(shost);
2204                else
2205                        scsi_unjam_host(shost);
2206
2207                /* All scmds have been handled */
2208                shost->host_failed = 0;
2209
2210                /*
2211                 * Note - if the above fails completely, the action is to take
2212                 * individual devices offline and flush the queue of any
2213                 * outstanding requests that may have been pending.  When we
2214                 * restart, we restart any I/O to any other devices on the bus
2215                 * which are still online.
2216                 */
2217                scsi_restart_operations(shost);
2218                if (!shost->eh_noresume)
2219                        scsi_autopm_put_host(shost);
2220        }
2221        __set_current_state(TASK_RUNNING);
2222
2223        SCSI_LOG_ERROR_RECOVERY(1,
2224                shost_printk(KERN_INFO, shost,
2225                             "Error handler scsi_eh_%d exiting\n",
2226                             shost->host_no));
2227        shost->ehandler = NULL;
2228        return 0;
2229}
2230
2231/*
2232 * Function:    scsi_report_bus_reset()
2233 *
2234 * Purpose:     Utility function used by low-level drivers to report that
2235 *              they have observed a bus reset on the bus being handled.
2236 *
2237 * Arguments:   shost       - Host in question
2238 *              channel     - channel on which reset was observed.
2239 *
2240 * Returns:     Nothing
2241 *
2242 * Lock status: Host lock must be held.
2243 *
2244 * Notes:       This only needs to be called if the reset is one which
2245 *              originates from an unknown location.  Resets originated
2246 *              by the mid-level itself don't need to call this, but there
2247 *              should be no harm.
2248 *
2249 *              The main purpose of this is to make sure that a CHECK_CONDITION
2250 *              is properly treated.
2251 */
2252void scsi_report_bus_reset(struct Scsi_Host *shost, int channel)
2253{
2254        struct scsi_device *sdev;
2255
2256        __shost_for_each_device(sdev, shost) {
2257                if (channel == sdev_channel(sdev))
2258                        __scsi_report_device_reset(sdev, NULL);
2259        }
2260}
2261EXPORT_SYMBOL(scsi_report_bus_reset);
2262
2263/*
2264 * Function:    scsi_report_device_reset()
2265 *
2266 * Purpose:     Utility function used by low-level drivers to report that
2267 *              they have observed a device reset on the device being handled.
2268 *
2269 * Arguments:   shost       - Host in question
2270 *              channel     - channel on which reset was observed
2271 *              target      - target on which reset was observed
2272 *
2273 * Returns:     Nothing
2274 *
2275 * Lock status: Host lock must be held
2276 *
2277 * Notes:       This only needs to be called if the reset is one which
2278 *              originates from an unknown location.  Resets originated
2279 *              by the mid-level itself don't need to call this, but there
2280 *              should be no harm.
2281 *
2282 *              The main purpose of this is to make sure that a CHECK_CONDITION
2283 *              is properly treated.
2284 */
2285void scsi_report_device_reset(struct Scsi_Host *shost, int channel, int target)
2286{
2287        struct scsi_device *sdev;
2288
2289        __shost_for_each_device(sdev, shost) {
2290                if (channel == sdev_channel(sdev) &&
2291                    target == sdev_id(sdev))
2292                        __scsi_report_device_reset(sdev, NULL);
2293        }
2294}
2295EXPORT_SYMBOL(scsi_report_device_reset);
2296
2297static void
2298scsi_reset_provider_done_command(struct scsi_cmnd *scmd)
2299{
2300}
2301
2302/**
2303 * scsi_ioctl_reset: explicitly reset a host/bus/target/device
2304 * @dev:        scsi_device to operate on
2305 * @arg:        reset type (see sg.h)
2306 */
2307int
2308scsi_ioctl_reset(struct scsi_device *dev, int __user *arg)
2309{
2310        struct scsi_cmnd *scmd;
2311        struct Scsi_Host *shost = dev->host;
2312        struct request *rq;
2313        unsigned long flags;
2314        int error = 0, rtn, val;
2315
2316        if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2317                return -EACCES;
2318
2319        error = get_user(val, arg);
2320        if (error)
2321                return error;
2322
2323        if (scsi_autopm_get_host(shost) < 0)
2324                return -EIO;
2325
2326        error = -EIO;
2327        rq = kzalloc(sizeof(struct request) + sizeof(struct scsi_cmnd) +
2328                        shost->hostt->cmd_size, GFP_KERNEL);
2329        if (!rq)
2330                goto out_put_autopm_host;
2331        blk_rq_init(NULL, rq);
2332
2333        scmd = (struct scsi_cmnd *)(rq + 1);
2334        scsi_init_command(dev, scmd);
2335        scmd->request = rq;
2336        scmd->cmnd = scsi_req(rq)->cmd;
2337
2338        scmd->scsi_done         = scsi_reset_provider_done_command;
2339        memset(&scmd->sdb, 0, sizeof(scmd->sdb));
2340
2341        scmd->cmd_len                   = 0;
2342
2343        scmd->sc_data_direction         = DMA_BIDIRECTIONAL;
2344
2345        spin_lock_irqsave(shost->host_lock, flags);
2346        shost->tmf_in_progress = 1;
2347        spin_unlock_irqrestore(shost->host_lock, flags);
2348
2349        switch (val & ~SG_SCSI_RESET_NO_ESCALATE) {
2350        case SG_SCSI_RESET_NOTHING:
2351                rtn = SUCCESS;
2352                break;
2353        case SG_SCSI_RESET_DEVICE:
2354                rtn = scsi_try_bus_device_reset(scmd);
2355                if (rtn == SUCCESS || (val & SG_SCSI_RESET_NO_ESCALATE))
2356                        break;
2357                /* FALLTHROUGH */
2358        case SG_SCSI_RESET_TARGET:
2359                rtn = scsi_try_target_reset(scmd);
2360                if (rtn == SUCCESS || (val & SG_SCSI_RESET_NO_ESCALATE))
2361                        break;
2362                /* FALLTHROUGH */
2363        case SG_SCSI_RESET_BUS:
2364                rtn = scsi_try_bus_reset(scmd);
2365                if (rtn == SUCCESS || (val & SG_SCSI_RESET_NO_ESCALATE))
2366                        break;
2367                /* FALLTHROUGH */
2368        case SG_SCSI_RESET_HOST:
2369                rtn = scsi_try_host_reset(scmd);
2370                if (rtn == SUCCESS)
2371                        break;
2372                /* FALLTHROUGH */
2373        default:
2374                rtn = FAILED;
2375                break;
2376        }
2377
2378        error = (rtn == SUCCESS) ? 0 : -EIO;
2379
2380        spin_lock_irqsave(shost->host_lock, flags);
2381        shost->tmf_in_progress = 0;
2382        spin_unlock_irqrestore(shost->host_lock, flags);
2383
2384        /*
2385         * be sure to wake up anyone who was sleeping or had their queue
2386         * suspended while we performed the TMF.
2387         */
2388        SCSI_LOG_ERROR_RECOVERY(3,
2389                shost_printk(KERN_INFO, shost,
2390                             "waking up host to restart after TMF\n"));
2391
2392        wake_up(&shost->host_wait);
2393        scsi_run_host_queues(shost);
2394
2395        scsi_put_command(scmd);
2396        kfree(rq);
2397
2398out_put_autopm_host:
2399        scsi_autopm_put_host(shost);
2400        return error;
2401}
2402EXPORT_SYMBOL(scsi_ioctl_reset);
2403
2404bool scsi_command_normalize_sense(const struct scsi_cmnd *cmd,
2405                                  struct scsi_sense_hdr *sshdr)
2406{
2407        return scsi_normalize_sense(cmd->sense_buffer,
2408                        SCSI_SENSE_BUFFERSIZE, sshdr);
2409}
2410EXPORT_SYMBOL(scsi_command_normalize_sense);
2411
2412/**
2413 * scsi_get_sense_info_fld - get information field from sense data (either fixed or descriptor format)
2414 * @sense_buffer:       byte array of sense data
2415 * @sb_len:             number of valid bytes in sense_buffer
2416 * @info_out:           pointer to 64 integer where 8 or 4 byte information
2417 *                      field will be placed if found.
2418 *
2419 * Return value:
2420 *      true if information field found, false if not found.
2421 */
2422bool scsi_get_sense_info_fld(const u8 *sense_buffer, int sb_len,
2423                             u64 *info_out)
2424{
2425        const u8 * ucp;
2426
2427        if (sb_len < 7)
2428                return false;
2429        switch (sense_buffer[0] & 0x7f) {
2430        case 0x70:
2431        case 0x71:
2432                if (sense_buffer[0] & 0x80) {
2433                        *info_out = get_unaligned_be32(&sense_buffer[3]);
2434                        return true;
2435                }
2436                return false;
2437        case 0x72:
2438        case 0x73:
2439                ucp = scsi_sense_desc_find(sense_buffer, sb_len,
2440                                           0 /* info desc */);
2441                if (ucp && (0xa == ucp[1])) {
2442                        *info_out = get_unaligned_be64(&ucp[4]);
2443                        return true;
2444                }
2445                return false;
2446        default:
2447                return false;
2448        }
2449}
2450EXPORT_SYMBOL(scsi_get_sense_info_fld);
2451