linux/drivers/scsi/scsi_error.c
<<
>>
Prefs
   1/*
   2 *  scsi_error.c Copyright (C) 1997 Eric Youngdale
   3 *
   4 *  SCSI error/timeout handling
   5 *      Initial versions: Eric Youngdale.  Based upon conversations with
   6 *                        Leonard Zubkoff and David Miller at Linux Expo,
   7 *                        ideas originating from all over the place.
   8 *
   9 *      Restructured scsi_unjam_host and associated functions.
  10 *      September 04, 2002 Mike Anderson (andmike@us.ibm.com)
  11 *
  12 *      Forward port of Russell King's (rmk@arm.linux.org.uk) changes and
  13 *      minor cleanups.
  14 *      September 30, 2002 Mike Anderson (andmike@us.ibm.com)
  15 */
  16
  17#include <linux/module.h>
  18#include <linux/sched.h>
  19#include <linux/gfp.h>
  20#include <linux/timer.h>
  21#include <linux/string.h>
  22#include <linux/kernel.h>
  23#include <linux/freezer.h>
  24#include <linux/kthread.h>
  25#include <linux/interrupt.h>
  26#include <linux/blkdev.h>
  27#include <linux/delay.h>
  28#include <linux/jiffies.h>
  29
  30#include <scsi/scsi.h>
  31#include <scsi/scsi_cmnd.h>
  32#include <scsi/scsi_dbg.h>
  33#include <scsi/scsi_device.h>
  34#include <scsi/scsi_driver.h>
  35#include <scsi/scsi_eh.h>
  36#include <scsi/scsi_transport.h>
  37#include <scsi/scsi_host.h>
  38#include <scsi/scsi_ioctl.h>
  39#include <scsi/sg.h>
  40
  41#include "scsi_priv.h"
  42#include "scsi_logging.h"
  43#include "scsi_transport_api.h"
  44
  45#include <trace/events/scsi.h>
  46
  47static void scsi_eh_done(struct scsi_cmnd *scmd);
  48
  49/*
  50 * These should *probably* be handled by the host itself.
  51 * Since it is allowed to sleep, it probably should.
  52 */
  53#define BUS_RESET_SETTLE_TIME   (10)
  54#define HOST_RESET_SETTLE_TIME  (10)
  55
  56static int scsi_eh_try_stu(struct scsi_cmnd *scmd);
  57static int scsi_try_to_abort_cmd(struct scsi_host_template *,
  58                                 struct scsi_cmnd *);
  59
  60/* called with shost->host_lock held */
  61void scsi_eh_wakeup(struct Scsi_Host *shost)
  62{
  63        if (atomic_read(&shost->host_busy) == shost->host_failed) {
  64                trace_scsi_eh_wakeup(shost);
  65                wake_up_process(shost->ehandler);
  66                SCSI_LOG_ERROR_RECOVERY(5, shost_printk(KERN_INFO, shost,
  67                        "Waking error handler thread\n"));
  68        }
  69}
  70
  71/**
  72 * scsi_schedule_eh - schedule EH for SCSI host
  73 * @shost:      SCSI host to invoke error handling on.
  74 *
  75 * Schedule SCSI EH without scmd.
  76 */
  77void scsi_schedule_eh(struct Scsi_Host *shost)
  78{
  79        unsigned long flags;
  80
  81        spin_lock_irqsave(shost->host_lock, flags);
  82
  83        if (scsi_host_set_state(shost, SHOST_RECOVERY) == 0 ||
  84            scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY) == 0) {
  85                shost->host_eh_scheduled++;
  86                scsi_eh_wakeup(shost);
  87        }
  88
  89        spin_unlock_irqrestore(shost->host_lock, flags);
  90}
  91EXPORT_SYMBOL_GPL(scsi_schedule_eh);
  92
  93static int scsi_host_eh_past_deadline(struct Scsi_Host *shost)
  94{
  95        if (!shost->last_reset || shost->eh_deadline == -1)
  96                return 0;
  97
  98        /*
  99         * 32bit accesses are guaranteed to be atomic
 100         * (on all supported architectures), so instead
 101         * of using a spinlock we can as well double check
 102         * if eh_deadline has been set to 'off' during the
 103         * time_before call.
 104         */
 105        if (time_before(jiffies, shost->last_reset + shost->eh_deadline) &&
 106            shost->eh_deadline > -1)
 107                return 0;
 108
 109        return 1;
 110}
 111
 112/**
 113 * scmd_eh_abort_handler - Handle command aborts
 114 * @work:       command to be aborted.
 115 */
 116void
 117scmd_eh_abort_handler(struct work_struct *work)
 118{
 119        struct scsi_cmnd *scmd =
 120                container_of(work, struct scsi_cmnd, abort_work.work);
 121        struct scsi_device *sdev = scmd->device;
 122        int rtn;
 123
 124        if (scsi_host_eh_past_deadline(sdev->host)) {
 125                SCSI_LOG_ERROR_RECOVERY(3,
 126                        scmd_printk(KERN_INFO, scmd,
 127                                    "scmd %p eh timeout, not aborting\n",
 128                                    scmd));
 129        } else {
 130                SCSI_LOG_ERROR_RECOVERY(3,
 131                        scmd_printk(KERN_INFO, scmd,
 132                                    "aborting command %p\n", scmd));
 133                rtn = scsi_try_to_abort_cmd(sdev->host->hostt, scmd);
 134                if (rtn == SUCCESS) {
 135                        set_host_byte(scmd, DID_TIME_OUT);
 136                        if (scsi_host_eh_past_deadline(sdev->host)) {
 137                                SCSI_LOG_ERROR_RECOVERY(3,
 138                                        scmd_printk(KERN_INFO, scmd,
 139                                                    "scmd %p eh timeout, "
 140                                                    "not retrying aborted "
 141                                                    "command\n", scmd));
 142                        } else if (!scsi_noretry_cmd(scmd) &&
 143                            (++scmd->retries <= scmd->allowed)) {
 144                                SCSI_LOG_ERROR_RECOVERY(3,
 145                                        scmd_printk(KERN_WARNING, scmd,
 146                                                    "scmd %p retry "
 147                                                    "aborted command\n", scmd));
 148                                scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY);
 149                                return;
 150                        } else {
 151                                SCSI_LOG_ERROR_RECOVERY(3,
 152                                        scmd_printk(KERN_WARNING, scmd,
 153                                                    "scmd %p finish "
 154                                                    "aborted command\n", scmd));
 155                                scsi_finish_command(scmd);
 156                                return;
 157                        }
 158                } else {
 159                        SCSI_LOG_ERROR_RECOVERY(3,
 160                                scmd_printk(KERN_INFO, scmd,
 161                                            "scmd %p abort %s\n", scmd,
 162                                            (rtn == FAST_IO_FAIL) ?
 163                                            "not send" : "failed"));
 164                }
 165        }
 166
 167        if (!scsi_eh_scmd_add(scmd, 0)) {
 168                SCSI_LOG_ERROR_RECOVERY(3,
 169                        scmd_printk(KERN_WARNING, scmd,
 170                                    "scmd %p terminate "
 171                                    "aborted command\n", scmd));
 172                set_host_byte(scmd, DID_TIME_OUT);
 173                scsi_finish_command(scmd);
 174        }
 175}
 176
 177/**
 178 * scsi_abort_command - schedule a command abort
 179 * @scmd:       scmd to abort.
 180 *
 181 * We only need to abort commands after a command timeout
 182 */
 183static int
 184scsi_abort_command(struct scsi_cmnd *scmd)
 185{
 186        struct scsi_device *sdev = scmd->device;
 187        struct Scsi_Host *shost = sdev->host;
 188        unsigned long flags;
 189
 190        if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) {
 191                /*
 192                 * Retry after abort failed, escalate to next level.
 193                 */
 194                scmd->eh_eflags &= ~SCSI_EH_ABORT_SCHEDULED;
 195                SCSI_LOG_ERROR_RECOVERY(3,
 196                        scmd_printk(KERN_INFO, scmd,
 197                                    "scmd %p previous abort failed\n", scmd));
 198                BUG_ON(delayed_work_pending(&scmd->abort_work));
 199                return FAILED;
 200        }
 201
 202        /*
 203         * Do not try a command abort if
 204         * SCSI EH has already started.
 205         */
 206        spin_lock_irqsave(shost->host_lock, flags);
 207        if (scsi_host_in_recovery(shost)) {
 208                spin_unlock_irqrestore(shost->host_lock, flags);
 209                SCSI_LOG_ERROR_RECOVERY(3,
 210                        scmd_printk(KERN_INFO, scmd,
 211                                    "scmd %p not aborting, host in recovery\n",
 212                                    scmd));
 213                return FAILED;
 214        }
 215
 216        if (shost->eh_deadline != -1 && !shost->last_reset)
 217                shost->last_reset = jiffies;
 218        spin_unlock_irqrestore(shost->host_lock, flags);
 219
 220        scmd->eh_eflags |= SCSI_EH_ABORT_SCHEDULED;
 221        SCSI_LOG_ERROR_RECOVERY(3,
 222                scmd_printk(KERN_INFO, scmd,
 223                            "scmd %p abort scheduled\n", scmd));
 224        queue_delayed_work(shost->tmf_work_q, &scmd->abort_work, HZ / 100);
 225        return SUCCESS;
 226}
 227
 228/**
 229 * scsi_eh_scmd_add - add scsi cmd to error handling.
 230 * @scmd:       scmd to run eh on.
 231 * @eh_flag:    optional SCSI_EH flag.
 232 *
 233 * Return value:
 234 *      0 on failure.
 235 */
 236int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag)
 237{
 238        struct Scsi_Host *shost = scmd->device->host;
 239        unsigned long flags;
 240        int ret = 0;
 241
 242        if (!shost->ehandler)
 243                return 0;
 244
 245        spin_lock_irqsave(shost->host_lock, flags);
 246        if (scsi_host_set_state(shost, SHOST_RECOVERY))
 247                if (scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY))
 248                        goto out_unlock;
 249
 250        if (shost->eh_deadline != -1 && !shost->last_reset)
 251                shost->last_reset = jiffies;
 252
 253        ret = 1;
 254        if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED)
 255                eh_flag &= ~SCSI_EH_CANCEL_CMD;
 256        scmd->eh_eflags |= eh_flag;
 257        list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q);
 258        shost->host_failed++;
 259        scsi_eh_wakeup(shost);
 260 out_unlock:
 261        spin_unlock_irqrestore(shost->host_lock, flags);
 262        return ret;
 263}
 264
 265/**
 266 * scsi_times_out - Timeout function for normal scsi commands.
 267 * @req:        request that is timing out.
 268 *
 269 * Notes:
 270 *     We do not need to lock this.  There is the potential for a race
 271 *     only in that the normal completion handling might run, but if the
 272 *     normal completion function determines that the timer has already
 273 *     fired, then it mustn't do anything.
 274 */
 275enum blk_eh_timer_return scsi_times_out(struct request *req)
 276{
 277        struct scsi_cmnd *scmd = req->special;
 278        enum blk_eh_timer_return rtn = BLK_EH_NOT_HANDLED;
 279        struct Scsi_Host *host = scmd->device->host;
 280
 281        trace_scsi_dispatch_cmd_timeout(scmd);
 282        scsi_log_completion(scmd, TIMEOUT_ERROR);
 283
 284        if (host->eh_deadline != -1 && !host->last_reset)
 285                host->last_reset = jiffies;
 286
 287        if (host->transportt->eh_timed_out)
 288                rtn = host->transportt->eh_timed_out(scmd);
 289        else if (host->hostt->eh_timed_out)
 290                rtn = host->hostt->eh_timed_out(scmd);
 291
 292        if (rtn == BLK_EH_NOT_HANDLED) {
 293                if (!host->hostt->no_async_abort &&
 294                    scsi_abort_command(scmd) == SUCCESS)
 295                        return BLK_EH_NOT_HANDLED;
 296
 297                set_host_byte(scmd, DID_TIME_OUT);
 298                if (!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))
 299                        rtn = BLK_EH_HANDLED;
 300        }
 301
 302        return rtn;
 303}
 304
 305/**
 306 * scsi_block_when_processing_errors - Prevent cmds from being queued.
 307 * @sdev:       Device on which we are performing recovery.
 308 *
 309 * Description:
 310 *     We block until the host is out of error recovery, and then check to
 311 *     see whether the host or the device is offline.
 312 *
 313 * Return value:
 314 *     0 when dev was taken offline by error recovery. 1 OK to proceed.
 315 */
 316int scsi_block_when_processing_errors(struct scsi_device *sdev)
 317{
 318        int online;
 319
 320        wait_event(sdev->host->host_wait, !scsi_host_in_recovery(sdev->host));
 321
 322        online = scsi_device_online(sdev);
 323
 324        SCSI_LOG_ERROR_RECOVERY(5, sdev_printk(KERN_INFO, sdev,
 325                "%s: rtn: %d\n", __func__, online));
 326
 327        return online;
 328}
 329EXPORT_SYMBOL(scsi_block_when_processing_errors);
 330
 331#ifdef CONFIG_SCSI_LOGGING
 332/**
 333 * scsi_eh_prt_fail_stats - Log info on failures.
 334 * @shost:      scsi host being recovered.
 335 * @work_q:     Queue of scsi cmds to process.
 336 */
 337static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost,
 338                                          struct list_head *work_q)
 339{
 340        struct scsi_cmnd *scmd;
 341        struct scsi_device *sdev;
 342        int total_failures = 0;
 343        int cmd_failed = 0;
 344        int cmd_cancel = 0;
 345        int devices_failed = 0;
 346
 347        shost_for_each_device(sdev, shost) {
 348                list_for_each_entry(scmd, work_q, eh_entry) {
 349                        if (scmd->device == sdev) {
 350                                ++total_failures;
 351                                if (scmd->eh_eflags & SCSI_EH_CANCEL_CMD)
 352                                        ++cmd_cancel;
 353                                else
 354                                        ++cmd_failed;
 355                        }
 356                }
 357
 358                if (cmd_cancel || cmd_failed) {
 359                        SCSI_LOG_ERROR_RECOVERY(3,
 360                                shost_printk(KERN_INFO, shost,
 361                                            "%s: cmds failed: %d, cancel: %d\n",
 362                                            __func__, cmd_failed,
 363                                            cmd_cancel));
 364                        cmd_cancel = 0;
 365                        cmd_failed = 0;
 366                        ++devices_failed;
 367                }
 368        }
 369
 370        SCSI_LOG_ERROR_RECOVERY(2, shost_printk(KERN_INFO, shost,
 371                                   "Total of %d commands on %d"
 372                                   " devices require eh work\n",
 373                                   total_failures, devices_failed));
 374}
 375#endif
 376
 377 /**
 378 * scsi_report_lun_change - Set flag on all *other* devices on the same target
 379 *                          to indicate that a UNIT ATTENTION is expected.
 380 * @sdev:       Device reporting the UNIT ATTENTION
 381 */
 382static void scsi_report_lun_change(struct scsi_device *sdev)
 383{
 384        sdev->sdev_target->expecting_lun_change = 1;
 385}
 386
 387/**
 388 * scsi_report_sense - Examine scsi sense information and log messages for
 389 *                     certain conditions, also issue uevents for some of them.
 390 * @sdev:       Device reporting the sense code
 391 * @sshdr:      sshdr to be examined
 392 */
 393static void scsi_report_sense(struct scsi_device *sdev,
 394                              struct scsi_sense_hdr *sshdr)
 395{
 396        enum scsi_device_event evt_type = SDEV_EVT_MAXBITS;     /* i.e. none */
 397
 398        if (sshdr->sense_key == UNIT_ATTENTION) {
 399                if (sshdr->asc == 0x3f && sshdr->ascq == 0x03) {
 400                        evt_type = SDEV_EVT_INQUIRY_CHANGE_REPORTED;
 401                        sdev_printk(KERN_WARNING, sdev,
 402                                    "Inquiry data has changed");
 403                } else if (sshdr->asc == 0x3f && sshdr->ascq == 0x0e) {
 404                        evt_type = SDEV_EVT_LUN_CHANGE_REPORTED;
 405                        scsi_report_lun_change(sdev);
 406                        sdev_printk(KERN_WARNING, sdev,
 407                                    "Warning! Received an indication that the "
 408                                    "LUN assignments on this target have "
 409                                    "changed. The Linux SCSI layer does not "
 410                                    "automatically remap LUN assignments.\n");
 411                } else if (sshdr->asc == 0x3f)
 412                        sdev_printk(KERN_WARNING, sdev,
 413                                    "Warning! Received an indication that the "
 414                                    "operating parameters on this target have "
 415                                    "changed. The Linux SCSI layer does not "
 416                                    "automatically adjust these parameters.\n");
 417
 418                if (sshdr->asc == 0x38 && sshdr->ascq == 0x07) {
 419                        evt_type = SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED;
 420                        sdev_printk(KERN_WARNING, sdev,
 421                                    "Warning! Received an indication that the "
 422                                    "LUN reached a thin provisioning soft "
 423                                    "threshold.\n");
 424                }
 425
 426                if (sshdr->asc == 0x2a && sshdr->ascq == 0x01) {
 427                        evt_type = SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED;
 428                        sdev_printk(KERN_WARNING, sdev,
 429                                    "Mode parameters changed");
 430                } else if (sshdr->asc == 0x2a && sshdr->ascq == 0x09) {
 431                        evt_type = SDEV_EVT_CAPACITY_CHANGE_REPORTED;
 432                        sdev_printk(KERN_WARNING, sdev,
 433                                    "Capacity data has changed");
 434                } else if (sshdr->asc == 0x2a)
 435                        sdev_printk(KERN_WARNING, sdev,
 436                                    "Parameters changed");
 437        }
 438
 439        if (evt_type != SDEV_EVT_MAXBITS) {
 440                set_bit(evt_type, sdev->pending_events);
 441                schedule_work(&sdev->event_work);
 442        }
 443}
 444
 445/**
 446 * scsi_check_sense - Examine scsi cmd sense
 447 * @scmd:       Cmd to have sense checked.
 448 *
 449 * Return value:
 450 *      SUCCESS or FAILED or NEEDS_RETRY or ADD_TO_MLQUEUE
 451 *
 452 * Notes:
 453 *      When a deferred error is detected the current command has
 454 *      not been executed and needs retrying.
 455 */
 456static int scsi_check_sense(struct scsi_cmnd *scmd)
 457{
 458        struct scsi_device *sdev = scmd->device;
 459        struct scsi_sense_hdr sshdr;
 460
 461        if (! scsi_command_normalize_sense(scmd, &sshdr))
 462                return FAILED;  /* no valid sense data */
 463
 464        scsi_report_sense(sdev, &sshdr);
 465
 466        if (scsi_sense_is_deferred(&sshdr))
 467                return NEEDS_RETRY;
 468
 469        if (sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh &&
 470                        sdev->scsi_dh_data->scsi_dh->check_sense) {
 471                int rc;
 472
 473                rc = sdev->scsi_dh_data->scsi_dh->check_sense(sdev, &sshdr);
 474                if (rc != SCSI_RETURN_NOT_HANDLED)
 475                        return rc;
 476                /* handler does not care. Drop down to default handling */
 477        }
 478
 479        if (scmd->cmnd[0] == TEST_UNIT_READY && scmd->scsi_done != scsi_eh_done)
 480                /*
 481                 * nasty: for mid-layer issued TURs, we need to return the
 482                 * actual sense data without any recovery attempt.  For eh
 483                 * issued ones, we need to try to recover and interpret
 484                 */
 485                return SUCCESS;
 486
 487        /*
 488         * Previous logic looked for FILEMARK, EOM or ILI which are
 489         * mainly associated with tapes and returned SUCCESS.
 490         */
 491        if (sshdr.response_code == 0x70) {
 492                /* fixed format */
 493                if (scmd->sense_buffer[2] & 0xe0)
 494                        return SUCCESS;
 495        } else {
 496                /*
 497                 * descriptor format: look for "stream commands sense data
 498                 * descriptor" (see SSC-3). Assume single sense data
 499                 * descriptor. Ignore ILI from SBC-2 READ LONG and WRITE LONG.
 500                 */
 501                if ((sshdr.additional_length > 3) &&
 502                    (scmd->sense_buffer[8] == 0x4) &&
 503                    (scmd->sense_buffer[11] & 0xe0))
 504                        return SUCCESS;
 505        }
 506
 507        switch (sshdr.sense_key) {
 508        case NO_SENSE:
 509                return SUCCESS;
 510        case RECOVERED_ERROR:
 511                return /* soft_error */ SUCCESS;
 512
 513        case ABORTED_COMMAND:
 514                if (sshdr.asc == 0x10) /* DIF */
 515                        return SUCCESS;
 516
 517                return NEEDS_RETRY;
 518        case NOT_READY:
 519        case UNIT_ATTENTION:
 520                /*
 521                 * if we are expecting a cc/ua because of a bus reset that we
 522                 * performed, treat this just as a retry.  otherwise this is
 523                 * information that we should pass up to the upper-level driver
 524                 * so that we can deal with it there.
 525                 */
 526                if (scmd->device->expecting_cc_ua) {
 527                        /*
 528                         * Because some device does not queue unit
 529                         * attentions correctly, we carefully check
 530                         * additional sense code and qualifier so as
 531                         * not to squash media change unit attention.
 532                         */
 533                        if (sshdr.asc != 0x28 || sshdr.ascq != 0x00) {
 534                                scmd->device->expecting_cc_ua = 0;
 535                                return NEEDS_RETRY;
 536                        }
 537                }
 538                /*
 539                 * we might also expect a cc/ua if another LUN on the target
 540                 * reported a UA with an ASC/ASCQ of 3F 0E -
 541                 * REPORTED LUNS DATA HAS CHANGED.
 542                 */
 543                if (scmd->device->sdev_target->expecting_lun_change &&
 544                    sshdr.asc == 0x3f && sshdr.ascq == 0x0e)
 545                        return NEEDS_RETRY;
 546                /*
 547                 * if the device is in the process of becoming ready, we
 548                 * should retry.
 549                 */
 550                if ((sshdr.asc == 0x04) && (sshdr.ascq == 0x01))
 551                        return NEEDS_RETRY;
 552                /*
 553                 * if the device is not started, we need to wake
 554                 * the error handler to start the motor
 555                 */
 556                if (scmd->device->allow_restart &&
 557                    (sshdr.asc == 0x04) && (sshdr.ascq == 0x02))
 558                        return FAILED;
 559                /*
 560                 * Pass the UA upwards for a determination in the completion
 561                 * functions.
 562                 */
 563                return SUCCESS;
 564
 565                /* these are not supported */
 566        case DATA_PROTECT:
 567                if (sshdr.asc == 0x27 && sshdr.ascq == 0x07) {
 568                        /* Thin provisioning hard threshold reached */
 569                        set_host_byte(scmd, DID_ALLOC_FAILURE);
 570                        return SUCCESS;
 571                }
 572        case COPY_ABORTED:
 573        case VOLUME_OVERFLOW:
 574        case MISCOMPARE:
 575        case BLANK_CHECK:
 576                set_host_byte(scmd, DID_TARGET_FAILURE);
 577                return SUCCESS;
 578
 579        case MEDIUM_ERROR:
 580                if (sshdr.asc == 0x11 || /* UNRECOVERED READ ERR */
 581                    sshdr.asc == 0x13 || /* AMNF DATA FIELD */
 582                    sshdr.asc == 0x14) { /* RECORD NOT FOUND */
 583                        set_host_byte(scmd, DID_MEDIUM_ERROR);
 584                        return SUCCESS;
 585                }
 586                return NEEDS_RETRY;
 587
 588        case HARDWARE_ERROR:
 589                if (scmd->device->retry_hwerror)
 590                        return ADD_TO_MLQUEUE;
 591                else
 592                        set_host_byte(scmd, DID_TARGET_FAILURE);
 593
 594        case ILLEGAL_REQUEST:
 595                if (sshdr.asc == 0x20 || /* Invalid command operation code */
 596                    sshdr.asc == 0x21 || /* Logical block address out of range */
 597                    sshdr.asc == 0x24 || /* Invalid field in cdb */
 598                    sshdr.asc == 0x26) { /* Parameter value invalid */
 599                        set_host_byte(scmd, DID_TARGET_FAILURE);
 600                }
 601                return SUCCESS;
 602
 603        default:
 604                return SUCCESS;
 605        }
 606}
 607
 608static void scsi_handle_queue_ramp_up(struct scsi_device *sdev)
 609{
 610        struct scsi_host_template *sht = sdev->host->hostt;
 611        struct scsi_device *tmp_sdev;
 612
 613        if (!sht->track_queue_depth ||
 614            sdev->queue_depth >= sdev->max_queue_depth)
 615                return;
 616
 617        if (time_before(jiffies,
 618            sdev->last_queue_ramp_up + sdev->queue_ramp_up_period))
 619                return;
 620
 621        if (time_before(jiffies,
 622            sdev->last_queue_full_time + sdev->queue_ramp_up_period))
 623                return;
 624
 625        /*
 626         * Walk all devices of a target and do
 627         * ramp up on them.
 628         */
 629        shost_for_each_device(tmp_sdev, sdev->host) {
 630                if (tmp_sdev->channel != sdev->channel ||
 631                    tmp_sdev->id != sdev->id ||
 632                    tmp_sdev->queue_depth == sdev->max_queue_depth)
 633                        continue;
 634
 635                scsi_change_queue_depth(tmp_sdev, tmp_sdev->queue_depth + 1);
 636                sdev->last_queue_ramp_up = jiffies;
 637        }
 638}
 639
 640static void scsi_handle_queue_full(struct scsi_device *sdev)
 641{
 642        struct scsi_host_template *sht = sdev->host->hostt;
 643        struct scsi_device *tmp_sdev;
 644
 645        if (!sht->track_queue_depth)
 646                return;
 647
 648        shost_for_each_device(tmp_sdev, sdev->host) {
 649                if (tmp_sdev->channel != sdev->channel ||
 650                    tmp_sdev->id != sdev->id)
 651                        continue;
 652                /*
 653                 * We do not know the number of commands that were at
 654                 * the device when we got the queue full so we start
 655                 * from the highest possible value and work our way down.
 656                 */
 657                scsi_track_queue_full(tmp_sdev, tmp_sdev->queue_depth - 1);
 658        }
 659}
 660
 661/**
 662 * scsi_eh_completed_normally - Disposition a eh cmd on return from LLD.
 663 * @scmd:       SCSI cmd to examine.
 664 *
 665 * Notes:
 666 *    This is *only* called when we are examining the status of commands
 667 *    queued during error recovery.  the main difference here is that we
 668 *    don't allow for the possibility of retries here, and we are a lot
 669 *    more restrictive about what we consider acceptable.
 670 */
 671static int scsi_eh_completed_normally(struct scsi_cmnd *scmd)
 672{
 673        /*
 674         * first check the host byte, to see if there is anything in there
 675         * that would indicate what we need to do.
 676         */
 677        if (host_byte(scmd->result) == DID_RESET) {
 678                /*
 679                 * rats.  we are already in the error handler, so we now
 680                 * get to try and figure out what to do next.  if the sense
 681                 * is valid, we have a pretty good idea of what to do.
 682                 * if not, we mark it as FAILED.
 683                 */
 684                return scsi_check_sense(scmd);
 685        }
 686        if (host_byte(scmd->result) != DID_OK)
 687                return FAILED;
 688
 689        /*
 690         * next, check the message byte.
 691         */
 692        if (msg_byte(scmd->result) != COMMAND_COMPLETE)
 693                return FAILED;
 694
 695        /*
 696         * now, check the status byte to see if this indicates
 697         * anything special.
 698         */
 699        switch (status_byte(scmd->result)) {
 700        case GOOD:
 701                scsi_handle_queue_ramp_up(scmd->device);
 702        case COMMAND_TERMINATED:
 703                return SUCCESS;
 704        case CHECK_CONDITION:
 705                return scsi_check_sense(scmd);
 706        case CONDITION_GOOD:
 707        case INTERMEDIATE_GOOD:
 708        case INTERMEDIATE_C_GOOD:
 709                /*
 710                 * who knows?  FIXME(eric)
 711                 */
 712                return SUCCESS;
 713        case RESERVATION_CONFLICT:
 714                if (scmd->cmnd[0] == TEST_UNIT_READY)
 715                        /* it is a success, we probed the device and
 716                         * found it */
 717                        return SUCCESS;
 718                /* otherwise, we failed to send the command */
 719                return FAILED;
 720        case QUEUE_FULL:
 721                scsi_handle_queue_full(scmd->device);
 722                /* fall through */
 723        case BUSY:
 724                return NEEDS_RETRY;
 725        default:
 726                return FAILED;
 727        }
 728        return FAILED;
 729}
 730
 731/**
 732 * scsi_eh_done - Completion function for error handling.
 733 * @scmd:       Cmd that is done.
 734 */
 735static void scsi_eh_done(struct scsi_cmnd *scmd)
 736{
 737        struct completion *eh_action;
 738
 739        SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
 740                        "%s scmd: %p result: %x\n",
 741                        __func__, scmd, scmd->result));
 742
 743        eh_action = scmd->device->host->eh_action;
 744        if (eh_action)
 745                complete(eh_action);
 746}
 747
 748/**
 749 * scsi_try_host_reset - ask host adapter to reset itself
 750 * @scmd:       SCSI cmd to send host reset.
 751 */
 752static int scsi_try_host_reset(struct scsi_cmnd *scmd)
 753{
 754        unsigned long flags;
 755        int rtn;
 756        struct Scsi_Host *host = scmd->device->host;
 757        struct scsi_host_template *hostt = host->hostt;
 758
 759        SCSI_LOG_ERROR_RECOVERY(3,
 760                shost_printk(KERN_INFO, host, "Snd Host RST\n"));
 761
 762        if (!hostt->eh_host_reset_handler)
 763                return FAILED;
 764
 765        rtn = hostt->eh_host_reset_handler(scmd);
 766
 767        if (rtn == SUCCESS) {
 768                if (!hostt->skip_settle_delay)
 769                        ssleep(HOST_RESET_SETTLE_TIME);
 770                spin_lock_irqsave(host->host_lock, flags);
 771                scsi_report_bus_reset(host, scmd_channel(scmd));
 772                spin_unlock_irqrestore(host->host_lock, flags);
 773        }
 774
 775        return rtn;
 776}
 777
 778/**
 779 * scsi_try_bus_reset - ask host to perform a bus reset
 780 * @scmd:       SCSI cmd to send bus reset.
 781 */
 782static int scsi_try_bus_reset(struct scsi_cmnd *scmd)
 783{
 784        unsigned long flags;
 785        int rtn;
 786        struct Scsi_Host *host = scmd->device->host;
 787        struct scsi_host_template *hostt = host->hostt;
 788
 789        SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
 790                "%s: Snd Bus RST\n", __func__));
 791
 792        if (!hostt->eh_bus_reset_handler)
 793                return FAILED;
 794
 795        rtn = hostt->eh_bus_reset_handler(scmd);
 796
 797        if (rtn == SUCCESS) {
 798                if (!hostt->skip_settle_delay)
 799                        ssleep(BUS_RESET_SETTLE_TIME);
 800                spin_lock_irqsave(host->host_lock, flags);
 801                scsi_report_bus_reset(host, scmd_channel(scmd));
 802                spin_unlock_irqrestore(host->host_lock, flags);
 803        }
 804
 805        return rtn;
 806}
 807
 808static void __scsi_report_device_reset(struct scsi_device *sdev, void *data)
 809{
 810        sdev->was_reset = 1;
 811        sdev->expecting_cc_ua = 1;
 812}
 813
 814/**
 815 * scsi_try_target_reset - Ask host to perform a target reset
 816 * @scmd:       SCSI cmd used to send a target reset
 817 *
 818 * Notes:
 819 *    There is no timeout for this operation.  if this operation is
 820 *    unreliable for a given host, then the host itself needs to put a
 821 *    timer on it, and set the host back to a consistent state prior to
 822 *    returning.
 823 */
 824static int scsi_try_target_reset(struct scsi_cmnd *scmd)
 825{
 826        unsigned long flags;
 827        int rtn;
 828        struct Scsi_Host *host = scmd->device->host;
 829        struct scsi_host_template *hostt = host->hostt;
 830
 831        if (!hostt->eh_target_reset_handler)
 832                return FAILED;
 833
 834        rtn = hostt->eh_target_reset_handler(scmd);
 835        if (rtn == SUCCESS) {
 836                spin_lock_irqsave(host->host_lock, flags);
 837                __starget_for_each_device(scsi_target(scmd->device), NULL,
 838                                          __scsi_report_device_reset);
 839                spin_unlock_irqrestore(host->host_lock, flags);
 840        }
 841
 842        return rtn;
 843}
 844
 845/**
 846 * scsi_try_bus_device_reset - Ask host to perform a BDR on a dev
 847 * @scmd:       SCSI cmd used to send BDR
 848 *
 849 * Notes:
 850 *    There is no timeout for this operation.  if this operation is
 851 *    unreliable for a given host, then the host itself needs to put a
 852 *    timer on it, and set the host back to a consistent state prior to
 853 *    returning.
 854 */
 855static int scsi_try_bus_device_reset(struct scsi_cmnd *scmd)
 856{
 857        int rtn;
 858        struct scsi_host_template *hostt = scmd->device->host->hostt;
 859
 860        if (!hostt->eh_device_reset_handler)
 861                return FAILED;
 862
 863        rtn = hostt->eh_device_reset_handler(scmd);
 864        if (rtn == SUCCESS)
 865                __scsi_report_device_reset(scmd->device, NULL);
 866        return rtn;
 867}
 868
 869/**
 870 * scsi_try_to_abort_cmd - Ask host to abort a SCSI command
 871 * @scmd:       SCSI cmd used to send a target reset
 872 *
 873 * Return value:
 874 *      SUCCESS, FAILED, or FAST_IO_FAIL
 875 *
 876 * Notes:
 877 *    SUCCESS does not necessarily indicate that the command
 878 *    has been aborted; it only indicates that the LLDDs
 879 *    has cleared all references to that command.
 880 *    LLDDs should return FAILED only if an abort was required
 881 *    but could not be executed. LLDDs should return FAST_IO_FAIL
 882 *    if the device is temporarily unavailable (eg due to a
 883 *    link down on FibreChannel)
 884 */
 885static int scsi_try_to_abort_cmd(struct scsi_host_template *hostt,
 886                                 struct scsi_cmnd *scmd)
 887{
 888        if (!hostt->eh_abort_handler)
 889                return FAILED;
 890
 891        return hostt->eh_abort_handler(scmd);
 892}
 893
 894static void scsi_abort_eh_cmnd(struct scsi_cmnd *scmd)
 895{
 896        if (scsi_try_to_abort_cmd(scmd->device->host->hostt, scmd) != SUCCESS)
 897                if (scsi_try_bus_device_reset(scmd) != SUCCESS)
 898                        if (scsi_try_target_reset(scmd) != SUCCESS)
 899                                if (scsi_try_bus_reset(scmd) != SUCCESS)
 900                                        scsi_try_host_reset(scmd);
 901}
 902
 903/**
 904 * scsi_eh_prep_cmnd  - Save a scsi command info as part of error recovery
 905 * @scmd:       SCSI command structure to hijack
 906 * @ses:        structure to save restore information
 907 * @cmnd:       CDB to send. Can be NULL if no new cmnd is needed
 908 * @cmnd_size:  size in bytes of @cmnd (must be <= BLK_MAX_CDB)
 909 * @sense_bytes: size of sense data to copy. or 0 (if != 0 @cmnd is ignored)
 910 *
 911 * This function is used to save a scsi command information before re-execution
 912 * as part of the error recovery process.  If @sense_bytes is 0 the command
 913 * sent must be one that does not transfer any data.  If @sense_bytes != 0
 914 * @cmnd is ignored and this functions sets up a REQUEST_SENSE command
 915 * and cmnd buffers to read @sense_bytes into @scmd->sense_buffer.
 916 */
 917void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
 918                        unsigned char *cmnd, int cmnd_size, unsigned sense_bytes)
 919{
 920        struct scsi_device *sdev = scmd->device;
 921
 922        /*
 923         * We need saved copies of a number of fields - this is because
 924         * error handling may need to overwrite these with different values
 925         * to run different commands, and once error handling is complete,
 926         * we will need to restore these values prior to running the actual
 927         * command.
 928         */
 929        ses->cmd_len = scmd->cmd_len;
 930        ses->cmnd = scmd->cmnd;
 931        ses->data_direction = scmd->sc_data_direction;
 932        ses->sdb = scmd->sdb;
 933        ses->next_rq = scmd->request->next_rq;
 934        ses->result = scmd->result;
 935        ses->underflow = scmd->underflow;
 936        ses->prot_op = scmd->prot_op;
 937
 938        scmd->prot_op = SCSI_PROT_NORMAL;
 939        scmd->eh_eflags = 0;
 940        scmd->cmnd = ses->eh_cmnd;
 941        memset(scmd->cmnd, 0, BLK_MAX_CDB);
 942        memset(&scmd->sdb, 0, sizeof(scmd->sdb));
 943        scmd->request->next_rq = NULL;
 944        scmd->result = 0;
 945
 946        if (sense_bytes) {
 947                scmd->sdb.length = min_t(unsigned, SCSI_SENSE_BUFFERSIZE,
 948                                         sense_bytes);
 949                sg_init_one(&ses->sense_sgl, scmd->sense_buffer,
 950                            scmd->sdb.length);
 951                scmd->sdb.table.sgl = &ses->sense_sgl;
 952                scmd->sc_data_direction = DMA_FROM_DEVICE;
 953                scmd->sdb.table.nents = 1;
 954                scmd->cmnd[0] = REQUEST_SENSE;
 955                scmd->cmnd[4] = scmd->sdb.length;
 956                scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
 957        } else {
 958                scmd->sc_data_direction = DMA_NONE;
 959                if (cmnd) {
 960                        BUG_ON(cmnd_size > BLK_MAX_CDB);
 961                        memcpy(scmd->cmnd, cmnd, cmnd_size);
 962                        scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
 963                }
 964        }
 965
 966        scmd->underflow = 0;
 967
 968        if (sdev->scsi_level <= SCSI_2 && sdev->scsi_level != SCSI_UNKNOWN)
 969                scmd->cmnd[1] = (scmd->cmnd[1] & 0x1f) |
 970                        (sdev->lun << 5 & 0xe0);
 971
 972        /*
 973         * Zero the sense buffer.  The scsi spec mandates that any
 974         * untransferred sense data should be interpreted as being zero.
 975         */
 976        memset(scmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
 977}
 978EXPORT_SYMBOL(scsi_eh_prep_cmnd);
 979
 980/**
 981 * scsi_eh_restore_cmnd  - Restore a scsi command info as part of error recovery
 982 * @scmd:       SCSI command structure to restore
 983 * @ses:        saved information from a coresponding call to scsi_eh_prep_cmnd
 984 *
 985 * Undo any damage done by above scsi_eh_prep_cmnd().
 986 */
 987void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses)
 988{
 989        /*
 990         * Restore original data
 991         */
 992        scmd->cmd_len = ses->cmd_len;
 993        scmd->cmnd = ses->cmnd;
 994        scmd->sc_data_direction = ses->data_direction;
 995        scmd->sdb = ses->sdb;
 996        scmd->request->next_rq = ses->next_rq;
 997        scmd->result = ses->result;
 998        scmd->underflow = ses->underflow;
 999        scmd->prot_op = ses->prot_op;
1000}
1001EXPORT_SYMBOL(scsi_eh_restore_cmnd);
1002
1003/**
1004 * scsi_send_eh_cmnd  - submit a scsi command as part of error recovery
1005 * @scmd:       SCSI command structure to hijack
1006 * @cmnd:       CDB to send
1007 * @cmnd_size:  size in bytes of @cmnd
1008 * @timeout:    timeout for this request
1009 * @sense_bytes: size of sense data to copy or 0
1010 *
1011 * This function is used to send a scsi command down to a target device
1012 * as part of the error recovery process. See also scsi_eh_prep_cmnd() above.
1013 *
1014 * Return value:
1015 *    SUCCESS or FAILED or NEEDS_RETRY
1016 */
1017static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
1018                             int cmnd_size, int timeout, unsigned sense_bytes)
1019{
1020        struct scsi_device *sdev = scmd->device;
1021        struct Scsi_Host *shost = sdev->host;
1022        DECLARE_COMPLETION_ONSTACK(done);
1023        unsigned long timeleft = timeout;
1024        struct scsi_eh_save ses;
1025        const unsigned long stall_for = msecs_to_jiffies(100);
1026        int rtn;
1027
1028retry:
1029        scsi_eh_prep_cmnd(scmd, &ses, cmnd, cmnd_size, sense_bytes);
1030        shost->eh_action = &done;
1031
1032        scsi_log_send(scmd);
1033        scmd->scsi_done = scsi_eh_done;
1034        rtn = shost->hostt->queuecommand(shost, scmd);
1035        if (rtn) {
1036                if (timeleft > stall_for) {
1037                        scsi_eh_restore_cmnd(scmd, &ses);
1038                        timeleft -= stall_for;
1039                        msleep(jiffies_to_msecs(stall_for));
1040                        goto retry;
1041                }
1042                /* signal not to enter either branch of the if () below */
1043                timeleft = 0;
1044                rtn = FAILED;
1045        } else {
1046                timeleft = wait_for_completion_timeout(&done, timeout);
1047                rtn = SUCCESS;
1048        }
1049
1050        shost->eh_action = NULL;
1051
1052        scsi_log_completion(scmd, rtn);
1053
1054        SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
1055                        "%s: scmd: %p, timeleft: %ld\n",
1056                        __func__, scmd, timeleft));
1057
1058        /*
1059         * If there is time left scsi_eh_done got called, and we will examine
1060         * the actual status codes to see whether the command actually did
1061         * complete normally, else if we have a zero return and no time left,
1062         * the command must still be pending, so abort it and return FAILED.
1063         * If we never actually managed to issue the command, because
1064         * ->queuecommand() kept returning non zero, use the rtn = FAILED
1065         * value above (so don't execute either branch of the if)
1066         */
1067        if (timeleft) {
1068                rtn = scsi_eh_completed_normally(scmd);
1069                SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
1070                        "%s: scsi_eh_completed_normally %x\n", __func__, rtn));
1071
1072                switch (rtn) {
1073                case SUCCESS:
1074                case NEEDS_RETRY:
1075                case FAILED:
1076                        break;
1077                case ADD_TO_MLQUEUE:
1078                        rtn = NEEDS_RETRY;
1079                        break;
1080                default:
1081                        rtn = FAILED;
1082                        break;
1083                }
1084        } else if (rtn != FAILED) {
1085                scsi_abort_eh_cmnd(scmd);
1086                rtn = FAILED;
1087        }
1088
1089        scsi_eh_restore_cmnd(scmd, &ses);
1090
1091        return rtn;
1092}
1093
1094/**
1095 * scsi_request_sense - Request sense data from a particular target.
1096 * @scmd:       SCSI cmd for request sense.
1097 *
1098 * Notes:
1099 *    Some hosts automatically obtain this information, others require
1100 *    that we obtain it on our own. This function will *not* return until
1101 *    the command either times out, or it completes.
1102 */
1103static int scsi_request_sense(struct scsi_cmnd *scmd)
1104{
1105        return scsi_send_eh_cmnd(scmd, NULL, 0, scmd->device->eh_timeout, ~0);
1106}
1107
1108static int scsi_eh_action(struct scsi_cmnd *scmd, int rtn)
1109{
1110        if (scmd->request->cmd_type != REQ_TYPE_BLOCK_PC) {
1111                struct scsi_driver *sdrv = scsi_cmd_to_driver(scmd);
1112                if (sdrv->eh_action)
1113                        rtn = sdrv->eh_action(scmd, rtn);
1114        }
1115        return rtn;
1116}
1117
1118/**
1119 * scsi_eh_finish_cmd - Handle a cmd that eh is finished with.
1120 * @scmd:       Original SCSI cmd that eh has finished.
1121 * @done_q:     Queue for processed commands.
1122 *
1123 * Notes:
1124 *    We don't want to use the normal command completion while we are are
1125 *    still handling errors - it may cause other commands to be queued,
1126 *    and that would disturb what we are doing.  Thus we really want to
1127 *    keep a list of pending commands for final completion, and once we
1128 *    are ready to leave error handling we handle completion for real.
1129 */
1130void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q)
1131{
1132        scmd->device->host->host_failed--;
1133        scmd->eh_eflags = 0;
1134        list_move_tail(&scmd->eh_entry, done_q);
1135}
1136EXPORT_SYMBOL(scsi_eh_finish_cmd);
1137
1138/**
1139 * scsi_eh_get_sense - Get device sense data.
1140 * @work_q:     Queue of commands to process.
1141 * @done_q:     Queue of processed commands.
1142 *
1143 * Description:
1144 *    See if we need to request sense information.  if so, then get it
1145 *    now, so we have a better idea of what to do.
1146 *
1147 * Notes:
1148 *    This has the unfortunate side effect that if a shost adapter does
1149 *    not automatically request sense information, we end up shutting
1150 *    it down before we request it.
1151 *
1152 *    All drivers should request sense information internally these days,
1153 *    so for now all I have to say is tough noogies if you end up in here.
1154 *
1155 *    XXX: Long term this code should go away, but that needs an audit of
1156 *         all LLDDs first.
1157 */
1158int scsi_eh_get_sense(struct list_head *work_q,
1159                      struct list_head *done_q)
1160{
1161        struct scsi_cmnd *scmd, *next;
1162        struct Scsi_Host *shost;
1163        int rtn;
1164
1165        list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1166                if ((scmd->eh_eflags & SCSI_EH_CANCEL_CMD) ||
1167                    SCSI_SENSE_VALID(scmd))
1168                        continue;
1169
1170                shost = scmd->device->host;
1171                if (scsi_host_eh_past_deadline(shost)) {
1172                        SCSI_LOG_ERROR_RECOVERY(3,
1173                                scmd_printk(KERN_INFO, scmd,
1174                                            "%s: skip request sense, past eh deadline\n",
1175                                             current->comm));
1176                        break;
1177                }
1178                if (status_byte(scmd->result) != CHECK_CONDITION)
1179                        /*
1180                         * don't request sense if there's no check condition
1181                         * status because the error we're processing isn't one
1182                         * that has a sense code (and some devices get
1183                         * confused by sense requests out of the blue)
1184                         */
1185                        continue;
1186
1187                SCSI_LOG_ERROR_RECOVERY(2, scmd_printk(KERN_INFO, scmd,
1188                                                  "%s: requesting sense\n",
1189                                                  current->comm));
1190                rtn = scsi_request_sense(scmd);
1191                if (rtn != SUCCESS)
1192                        continue;
1193
1194                SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
1195                        "sense requested for %p result %x\n",
1196                        scmd, scmd->result));
1197                SCSI_LOG_ERROR_RECOVERY(3, scsi_print_sense(scmd));
1198
1199                rtn = scsi_decide_disposition(scmd);
1200
1201                /*
1202                 * if the result was normal, then just pass it along to the
1203                 * upper level.
1204                 */
1205                if (rtn == SUCCESS)
1206                        /* we don't want this command reissued, just
1207                         * finished with the sense data, so set
1208                         * retries to the max allowed to ensure it
1209                         * won't get reissued */
1210                        scmd->retries = scmd->allowed;
1211                else if (rtn != NEEDS_RETRY)
1212                        continue;
1213
1214                scsi_eh_finish_cmd(scmd, done_q);
1215        }
1216
1217        return list_empty(work_q);
1218}
1219EXPORT_SYMBOL_GPL(scsi_eh_get_sense);
1220
1221/**
1222 * scsi_eh_tur - Send TUR to device.
1223 * @scmd:       &scsi_cmnd to send TUR
1224 *
1225 * Return value:
1226 *    0 - Device is ready. 1 - Device NOT ready.
1227 */
1228static int scsi_eh_tur(struct scsi_cmnd *scmd)
1229{
1230        static unsigned char tur_command[6] = {TEST_UNIT_READY, 0, 0, 0, 0, 0};
1231        int retry_cnt = 1, rtn;
1232
1233retry_tur:
1234        rtn = scsi_send_eh_cmnd(scmd, tur_command, 6,
1235                                scmd->device->eh_timeout, 0);
1236
1237        SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
1238                "%s: scmd %p rtn %x\n", __func__, scmd, rtn));
1239
1240        switch (rtn) {
1241        case NEEDS_RETRY:
1242                if (retry_cnt--)
1243                        goto retry_tur;
1244                /*FALLTHRU*/
1245        case SUCCESS:
1246                return 0;
1247        default:
1248                return 1;
1249        }
1250}
1251
1252/**
1253 * scsi_eh_test_devices - check if devices are responding from error recovery.
1254 * @cmd_list:   scsi commands in error recovery.
1255 * @work_q:     queue for commands which still need more error recovery
1256 * @done_q:     queue for commands which are finished
1257 * @try_stu:    boolean on if a STU command should be tried in addition to TUR.
1258 *
1259 * Decription:
1260 *    Tests if devices are in a working state.  Commands to devices now in
1261 *    a working state are sent to the done_q while commands to devices which
1262 *    are still failing to respond are returned to the work_q for more
1263 *    processing.
1264 **/
1265static int scsi_eh_test_devices(struct list_head *cmd_list,
1266                                struct list_head *work_q,
1267                                struct list_head *done_q, int try_stu)
1268{
1269        struct scsi_cmnd *scmd, *next;
1270        struct scsi_device *sdev;
1271        int finish_cmds;
1272
1273        while (!list_empty(cmd_list)) {
1274                scmd = list_entry(cmd_list->next, struct scsi_cmnd, eh_entry);
1275                sdev = scmd->device;
1276
1277                if (!try_stu) {
1278                        if (scsi_host_eh_past_deadline(sdev->host)) {
1279                                /* Push items back onto work_q */
1280                                list_splice_init(cmd_list, work_q);
1281                                SCSI_LOG_ERROR_RECOVERY(3,
1282                                        sdev_printk(KERN_INFO, sdev,
1283                                                    "%s: skip test device, past eh deadline",
1284                                                    current->comm));
1285                                break;
1286                        }
1287                }
1288
1289                finish_cmds = !scsi_device_online(scmd->device) ||
1290                        (try_stu && !scsi_eh_try_stu(scmd) &&
1291                         !scsi_eh_tur(scmd)) ||
1292                        !scsi_eh_tur(scmd);
1293
1294                list_for_each_entry_safe(scmd, next, cmd_list, eh_entry)
1295                        if (scmd->device == sdev) {
1296                                if (finish_cmds &&
1297                                    (try_stu ||
1298                                     scsi_eh_action(scmd, SUCCESS) == SUCCESS))
1299                                        scsi_eh_finish_cmd(scmd, done_q);
1300                                else
1301                                        list_move_tail(&scmd->eh_entry, work_q);
1302                        }
1303        }
1304        return list_empty(work_q);
1305}
1306
1307
1308/**
1309 * scsi_eh_abort_cmds - abort pending commands.
1310 * @work_q:     &list_head for pending commands.
1311 * @done_q:     &list_head for processed commands.
1312 *
1313 * Decription:
1314 *    Try and see whether or not it makes sense to try and abort the
1315 *    running command.  This only works out to be the case if we have one
1316 *    command that has timed out.  If the command simply failed, it makes
1317 *    no sense to try and abort the command, since as far as the shost
1318 *    adapter is concerned, it isn't running.
1319 */
1320static int scsi_eh_abort_cmds(struct list_head *work_q,
1321                              struct list_head *done_q)
1322{
1323        struct scsi_cmnd *scmd, *next;
1324        LIST_HEAD(check_list);
1325        int rtn;
1326        struct Scsi_Host *shost;
1327
1328        list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1329                if (!(scmd->eh_eflags & SCSI_EH_CANCEL_CMD))
1330                        continue;
1331                shost = scmd->device->host;
1332                if (scsi_host_eh_past_deadline(shost)) {
1333                        list_splice_init(&check_list, work_q);
1334                        SCSI_LOG_ERROR_RECOVERY(3,
1335                                scmd_printk(KERN_INFO, scmd,
1336                                            "%s: skip aborting cmd, past eh deadline\n",
1337                                            current->comm));
1338                        return list_empty(work_q);
1339                }
1340                SCSI_LOG_ERROR_RECOVERY(3,
1341                        scmd_printk(KERN_INFO, scmd,
1342                                     "%s: aborting cmd\n", current->comm));
1343                rtn = scsi_try_to_abort_cmd(shost->hostt, scmd);
1344                if (rtn == FAILED) {
1345                        SCSI_LOG_ERROR_RECOVERY(3,
1346                                scmd_printk(KERN_INFO, scmd,
1347                                            "%s: aborting cmd failed\n",
1348                                             current->comm));
1349                        list_splice_init(&check_list, work_q);
1350                        return list_empty(work_q);
1351                }
1352                scmd->eh_eflags &= ~SCSI_EH_CANCEL_CMD;
1353                if (rtn == FAST_IO_FAIL)
1354                        scsi_eh_finish_cmd(scmd, done_q);
1355                else
1356                        list_move_tail(&scmd->eh_entry, &check_list);
1357        }
1358
1359        return scsi_eh_test_devices(&check_list, work_q, done_q, 0);
1360}
1361
1362/**
1363 * scsi_eh_try_stu - Send START_UNIT to device.
1364 * @scmd:       &scsi_cmnd to send START_UNIT
1365 *
1366 * Return value:
1367 *    0 - Device is ready. 1 - Device NOT ready.
1368 */
1369static int scsi_eh_try_stu(struct scsi_cmnd *scmd)
1370{
1371        static unsigned char stu_command[6] = {START_STOP, 0, 0, 0, 1, 0};
1372
1373        if (scmd->device->allow_restart) {
1374                int i, rtn = NEEDS_RETRY;
1375
1376                for (i = 0; rtn == NEEDS_RETRY && i < 2; i++)
1377                        rtn = scsi_send_eh_cmnd(scmd, stu_command, 6, scmd->device->request_queue->rq_timeout, 0);
1378
1379                if (rtn == SUCCESS)
1380                        return 0;
1381        }
1382
1383        return 1;
1384}
1385
1386 /**
1387 * scsi_eh_stu - send START_UNIT if needed
1388 * @shost:      &scsi host being recovered.
1389 * @work_q:     &list_head for pending commands.
1390 * @done_q:     &list_head for processed commands.
1391 *
1392 * Notes:
1393 *    If commands are failing due to not ready, initializing command required,
1394 *      try revalidating the device, which will end up sending a start unit.
1395 */
1396static int scsi_eh_stu(struct Scsi_Host *shost,
1397                              struct list_head *work_q,
1398                              struct list_head *done_q)
1399{
1400        struct scsi_cmnd *scmd, *stu_scmd, *next;
1401        struct scsi_device *sdev;
1402
1403        shost_for_each_device(sdev, shost) {
1404                if (scsi_host_eh_past_deadline(shost)) {
1405                        SCSI_LOG_ERROR_RECOVERY(3,
1406                                sdev_printk(KERN_INFO, sdev,
1407                                            "%s: skip START_UNIT, past eh deadline\n",
1408                                            current->comm));
1409                        break;
1410                }
1411                stu_scmd = NULL;
1412                list_for_each_entry(scmd, work_q, eh_entry)
1413                        if (scmd->device == sdev && SCSI_SENSE_VALID(scmd) &&
1414                            scsi_check_sense(scmd) == FAILED ) {
1415                                stu_scmd = scmd;
1416                                break;
1417                        }
1418
1419                if (!stu_scmd)
1420                        continue;
1421
1422                SCSI_LOG_ERROR_RECOVERY(3,
1423                        sdev_printk(KERN_INFO, sdev,
1424                                     "%s: Sending START_UNIT\n",
1425                                    current->comm));
1426
1427                if (!scsi_eh_try_stu(stu_scmd)) {
1428                        if (!scsi_device_online(sdev) ||
1429                            !scsi_eh_tur(stu_scmd)) {
1430                                list_for_each_entry_safe(scmd, next,
1431                                                          work_q, eh_entry) {
1432                                        if (scmd->device == sdev &&
1433                                            scsi_eh_action(scmd, SUCCESS) == SUCCESS)
1434                                                scsi_eh_finish_cmd(scmd, done_q);
1435                                }
1436                        }
1437                } else {
1438                        SCSI_LOG_ERROR_RECOVERY(3,
1439                                sdev_printk(KERN_INFO, sdev,
1440                                            "%s: START_UNIT failed\n",
1441                                            current->comm));
1442                }
1443        }
1444
1445        return list_empty(work_q);
1446}
1447
1448
1449/**
1450 * scsi_eh_bus_device_reset - send bdr if needed
1451 * @shost:      scsi host being recovered.
1452 * @work_q:     &list_head for pending commands.
1453 * @done_q:     &list_head for processed commands.
1454 *
1455 * Notes:
1456 *    Try a bus device reset.  Still, look to see whether we have multiple
1457 *    devices that are jammed or not - if we have multiple devices, it
1458 *    makes no sense to try bus_device_reset - we really would need to try
1459 *    a bus_reset instead.
1460 */
1461static int scsi_eh_bus_device_reset(struct Scsi_Host *shost,
1462                                    struct list_head *work_q,
1463                                    struct list_head *done_q)
1464{
1465        struct scsi_cmnd *scmd, *bdr_scmd, *next;
1466        struct scsi_device *sdev;
1467        int rtn;
1468
1469        shost_for_each_device(sdev, shost) {
1470                if (scsi_host_eh_past_deadline(shost)) {
1471                        SCSI_LOG_ERROR_RECOVERY(3,
1472                                sdev_printk(KERN_INFO, sdev,
1473                                            "%s: skip BDR, past eh deadline\n",
1474                                             current->comm));
1475                        break;
1476                }
1477                bdr_scmd = NULL;
1478                list_for_each_entry(scmd, work_q, eh_entry)
1479                        if (scmd->device == sdev) {
1480                                bdr_scmd = scmd;
1481                                break;
1482                        }
1483
1484                if (!bdr_scmd)
1485                        continue;
1486
1487                SCSI_LOG_ERROR_RECOVERY(3,
1488                        sdev_printk(KERN_INFO, sdev,
1489                                     "%s: Sending BDR\n", current->comm));
1490                rtn = scsi_try_bus_device_reset(bdr_scmd);
1491                if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
1492                        if (!scsi_device_online(sdev) ||
1493                            rtn == FAST_IO_FAIL ||
1494                            !scsi_eh_tur(bdr_scmd)) {
1495                                list_for_each_entry_safe(scmd, next,
1496                                                         work_q, eh_entry) {
1497                                        if (scmd->device == sdev &&
1498                                            scsi_eh_action(scmd, rtn) != FAILED)
1499                                                scsi_eh_finish_cmd(scmd,
1500                                                                   done_q);
1501                                }
1502                        }
1503                } else {
1504                        SCSI_LOG_ERROR_RECOVERY(3,
1505                                sdev_printk(KERN_INFO, sdev,
1506                                            "%s: BDR failed\n", current->comm));
1507                }
1508        }
1509
1510        return list_empty(work_q);
1511}
1512
1513/**
1514 * scsi_eh_target_reset - send target reset if needed
1515 * @shost:      scsi host being recovered.
1516 * @work_q:     &list_head for pending commands.
1517 * @done_q:     &list_head for processed commands.
1518 *
1519 * Notes:
1520 *    Try a target reset.
1521 */
1522static int scsi_eh_target_reset(struct Scsi_Host *shost,
1523                                struct list_head *work_q,
1524                                struct list_head *done_q)
1525{
1526        LIST_HEAD(tmp_list);
1527        LIST_HEAD(check_list);
1528
1529        list_splice_init(work_q, &tmp_list);
1530
1531        while (!list_empty(&tmp_list)) {
1532                struct scsi_cmnd *next, *scmd;
1533                int rtn;
1534                unsigned int id;
1535
1536                if (scsi_host_eh_past_deadline(shost)) {
1537                        /* push back on work queue for further processing */
1538                        list_splice_init(&check_list, work_q);
1539                        list_splice_init(&tmp_list, work_q);
1540                        SCSI_LOG_ERROR_RECOVERY(3,
1541                                shost_printk(KERN_INFO, shost,
1542                                            "%s: Skip target reset, past eh deadline\n",
1543                                             current->comm));
1544                        return list_empty(work_q);
1545                }
1546
1547                scmd = list_entry(tmp_list.next, struct scsi_cmnd, eh_entry);
1548                id = scmd_id(scmd);
1549
1550                SCSI_LOG_ERROR_RECOVERY(3,
1551                        shost_printk(KERN_INFO, shost,
1552                                     "%s: Sending target reset to target %d\n",
1553                                     current->comm, id));
1554                rtn = scsi_try_target_reset(scmd);
1555                if (rtn != SUCCESS && rtn != FAST_IO_FAIL)
1556                        SCSI_LOG_ERROR_RECOVERY(3,
1557                                shost_printk(KERN_INFO, shost,
1558                                             "%s: Target reset failed"
1559                                             " target: %d\n",
1560                                             current->comm, id));
1561                list_for_each_entry_safe(scmd, next, &tmp_list, eh_entry) {
1562                        if (scmd_id(scmd) != id)
1563                                continue;
1564
1565                        if (rtn == SUCCESS)
1566                                list_move_tail(&scmd->eh_entry, &check_list);
1567                        else if (rtn == FAST_IO_FAIL)
1568                                scsi_eh_finish_cmd(scmd, done_q);
1569                        else
1570                                /* push back on work queue for further processing */
1571                                list_move(&scmd->eh_entry, work_q);
1572                }
1573        }
1574
1575        return scsi_eh_test_devices(&check_list, work_q, done_q, 0);
1576}
1577
1578/**
1579 * scsi_eh_bus_reset - send a bus reset
1580 * @shost:      &scsi host being recovered.
1581 * @work_q:     &list_head for pending commands.
1582 * @done_q:     &list_head for processed commands.
1583 */
1584static int scsi_eh_bus_reset(struct Scsi_Host *shost,
1585                             struct list_head *work_q,
1586                             struct list_head *done_q)
1587{
1588        struct scsi_cmnd *scmd, *chan_scmd, *next;
1589        LIST_HEAD(check_list);
1590        unsigned int channel;
1591        int rtn;
1592
1593        /*
1594         * we really want to loop over the various channels, and do this on
1595         * a channel by channel basis.  we should also check to see if any
1596         * of the failed commands are on soft_reset devices, and if so, skip
1597         * the reset.
1598         */
1599
1600        for (channel = 0; channel <= shost->max_channel; channel++) {
1601                if (scsi_host_eh_past_deadline(shost)) {
1602                        list_splice_init(&check_list, work_q);
1603                        SCSI_LOG_ERROR_RECOVERY(3,
1604                                shost_printk(KERN_INFO, shost,
1605                                            "%s: skip BRST, past eh deadline\n",
1606                                             current->comm));
1607                        return list_empty(work_q);
1608                }
1609
1610                chan_scmd = NULL;
1611                list_for_each_entry(scmd, work_q, eh_entry) {
1612                        if (channel == scmd_channel(scmd)) {
1613                                chan_scmd = scmd;
1614                                break;
1615                                /*
1616                                 * FIXME add back in some support for
1617                                 * soft_reset devices.
1618                                 */
1619                        }
1620                }
1621
1622                if (!chan_scmd)
1623                        continue;
1624                SCSI_LOG_ERROR_RECOVERY(3,
1625                        shost_printk(KERN_INFO, shost,
1626                                     "%s: Sending BRST chan: %d\n",
1627                                     current->comm, channel));
1628                rtn = scsi_try_bus_reset(chan_scmd);
1629                if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
1630                        list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1631                                if (channel == scmd_channel(scmd)) {
1632                                        if (rtn == FAST_IO_FAIL)
1633                                                scsi_eh_finish_cmd(scmd,
1634                                                                   done_q);
1635                                        else
1636                                                list_move_tail(&scmd->eh_entry,
1637                                                               &check_list);
1638                                }
1639                        }
1640                } else {
1641                        SCSI_LOG_ERROR_RECOVERY(3,
1642                                shost_printk(KERN_INFO, shost,
1643                                             "%s: BRST failed chan: %d\n",
1644                                             current->comm, channel));
1645                }
1646        }
1647        return scsi_eh_test_devices(&check_list, work_q, done_q, 0);
1648}
1649
1650/**
1651 * scsi_eh_host_reset - send a host reset
1652 * @shost:      host to be reset.
1653 * @work_q:     &list_head for pending commands.
1654 * @done_q:     &list_head for processed commands.
1655 */
1656static int scsi_eh_host_reset(struct Scsi_Host *shost,
1657                              struct list_head *work_q,
1658                              struct list_head *done_q)
1659{
1660        struct scsi_cmnd *scmd, *next;
1661        LIST_HEAD(check_list);
1662        int rtn;
1663
1664        if (!list_empty(work_q)) {
1665                scmd = list_entry(work_q->next,
1666                                  struct scsi_cmnd, eh_entry);
1667
1668                SCSI_LOG_ERROR_RECOVERY(3,
1669                        shost_printk(KERN_INFO, shost,
1670                                     "%s: Sending HRST\n",
1671                                     current->comm));
1672
1673                rtn = scsi_try_host_reset(scmd);
1674                if (rtn == SUCCESS) {
1675                        list_splice_init(work_q, &check_list);
1676                } else if (rtn == FAST_IO_FAIL) {
1677                        list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1678                                        scsi_eh_finish_cmd(scmd, done_q);
1679                        }
1680                } else {
1681                        SCSI_LOG_ERROR_RECOVERY(3,
1682                                shost_printk(KERN_INFO, shost,
1683                                             "%s: HRST failed\n",
1684                                             current->comm));
1685                }
1686        }
1687        return scsi_eh_test_devices(&check_list, work_q, done_q, 1);
1688}
1689
1690/**
1691 * scsi_eh_offline_sdevs - offline scsi devices that fail to recover
1692 * @work_q:     &list_head for pending commands.
1693 * @done_q:     &list_head for processed commands.
1694 */
1695static void scsi_eh_offline_sdevs(struct list_head *work_q,
1696                                  struct list_head *done_q)
1697{
1698        struct scsi_cmnd *scmd, *next;
1699
1700        list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1701                sdev_printk(KERN_INFO, scmd->device, "Device offlined - "
1702                            "not ready after error recovery\n");
1703                scsi_device_set_state(scmd->device, SDEV_OFFLINE);
1704                if (scmd->eh_eflags & SCSI_EH_CANCEL_CMD) {
1705                        /*
1706                         * FIXME: Handle lost cmds.
1707                         */
1708                }
1709                scsi_eh_finish_cmd(scmd, done_q);
1710        }
1711        return;
1712}
1713
1714/**
1715 * scsi_noretry_cmd - determine if command should be failed fast
1716 * @scmd:       SCSI cmd to examine.
1717 */
1718int scsi_noretry_cmd(struct scsi_cmnd *scmd)
1719{
1720        switch (host_byte(scmd->result)) {
1721        case DID_OK:
1722                break;
1723        case DID_TIME_OUT:
1724                goto check_type;
1725        case DID_BUS_BUSY:
1726                return (scmd->request->cmd_flags & REQ_FAILFAST_TRANSPORT);
1727        case DID_PARITY:
1728                return (scmd->request->cmd_flags & REQ_FAILFAST_DEV);
1729        case DID_ERROR:
1730                if (msg_byte(scmd->result) == COMMAND_COMPLETE &&
1731                    status_byte(scmd->result) == RESERVATION_CONFLICT)
1732                        return 0;
1733                /* fall through */
1734        case DID_SOFT_ERROR:
1735                return (scmd->request->cmd_flags & REQ_FAILFAST_DRIVER);
1736        }
1737
1738        if (status_byte(scmd->result) != CHECK_CONDITION)
1739                return 0;
1740
1741check_type:
1742        /*
1743         * assume caller has checked sense and determined
1744         * the check condition was retryable.
1745         */
1746        if (scmd->request->cmd_flags & REQ_FAILFAST_DEV ||
1747            scmd->request->cmd_type == REQ_TYPE_BLOCK_PC)
1748                return 1;
1749        else
1750                return 0;
1751}
1752
1753/**
1754 * scsi_decide_disposition - Disposition a cmd on return from LLD.
1755 * @scmd:       SCSI cmd to examine.
1756 *
1757 * Notes:
1758 *    This is *only* called when we are examining the status after sending
1759 *    out the actual data command.  any commands that are queued for error
1760 *    recovery (e.g. test_unit_ready) do *not* come through here.
1761 *
1762 *    When this routine returns failed, it means the error handler thread
1763 *    is woken.  In cases where the error code indicates an error that
1764 *    doesn't require the error handler read (i.e. we don't need to
1765 *    abort/reset), this function should return SUCCESS.
1766 */
1767int scsi_decide_disposition(struct scsi_cmnd *scmd)
1768{
1769        int rtn;
1770
1771        /*
1772         * if the device is offline, then we clearly just pass the result back
1773         * up to the top level.
1774         */
1775        if (!scsi_device_online(scmd->device)) {
1776                SCSI_LOG_ERROR_RECOVERY(5, scmd_printk(KERN_INFO, scmd,
1777                        "%s: device offline - report as SUCCESS\n", __func__));
1778                return SUCCESS;
1779        }
1780
1781        /*
1782         * first check the host byte, to see if there is anything in there
1783         * that would indicate what we need to do.
1784         */
1785        switch (host_byte(scmd->result)) {
1786        case DID_PASSTHROUGH:
1787                /*
1788                 * no matter what, pass this through to the upper layer.
1789                 * nuke this special code so that it looks like we are saying
1790                 * did_ok.
1791                 */
1792                scmd->result &= 0xff00ffff;
1793                return SUCCESS;
1794        case DID_OK:
1795                /*
1796                 * looks good.  drop through, and check the next byte.
1797                 */
1798                break;
1799        case DID_ABORT:
1800                if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) {
1801                        set_host_byte(scmd, DID_TIME_OUT);
1802                        return SUCCESS;
1803                }
1804        case DID_NO_CONNECT:
1805        case DID_BAD_TARGET:
1806                /*
1807                 * note - this means that we just report the status back
1808                 * to the top level driver, not that we actually think
1809                 * that it indicates SUCCESS.
1810                 */
1811                return SUCCESS;
1812                /*
1813                 * when the low level driver returns did_soft_error,
1814                 * it is responsible for keeping an internal retry counter
1815                 * in order to avoid endless loops (db)
1816                 *
1817                 * actually this is a bug in this function here.  we should
1818                 * be mindful of the maximum number of retries specified
1819                 * and not get stuck in a loop.
1820                 */
1821        case DID_SOFT_ERROR:
1822                goto maybe_retry;
1823        case DID_IMM_RETRY:
1824                return NEEDS_RETRY;
1825
1826        case DID_REQUEUE:
1827                return ADD_TO_MLQUEUE;
1828        case DID_TRANSPORT_DISRUPTED:
1829                /*
1830                 * LLD/transport was disrupted during processing of the IO.
1831                 * The transport class is now blocked/blocking,
1832                 * and the transport will decide what to do with the IO
1833                 * based on its timers and recovery capablilities if
1834                 * there are enough retries.
1835                 */
1836                goto maybe_retry;
1837        case DID_TRANSPORT_FAILFAST:
1838                /*
1839                 * The transport decided to failfast the IO (most likely
1840                 * the fast io fail tmo fired), so send IO directly upwards.
1841                 */
1842                return SUCCESS;
1843        case DID_ERROR:
1844                if (msg_byte(scmd->result) == COMMAND_COMPLETE &&
1845                    status_byte(scmd->result) == RESERVATION_CONFLICT)
1846                        /*
1847                         * execute reservation conflict processing code
1848                         * lower down
1849                         */
1850                        break;
1851                /* fallthrough */
1852        case DID_BUS_BUSY:
1853        case DID_PARITY:
1854                goto maybe_retry;
1855        case DID_TIME_OUT:
1856                /*
1857                 * when we scan the bus, we get timeout messages for
1858                 * these commands if there is no device available.
1859                 * other hosts report did_no_connect for the same thing.
1860                 */
1861                if ((scmd->cmnd[0] == TEST_UNIT_READY ||
1862                     scmd->cmnd[0] == INQUIRY)) {
1863                        return SUCCESS;
1864                } else {
1865                        return FAILED;
1866                }
1867        case DID_RESET:
1868                return SUCCESS;
1869        default:
1870                return FAILED;
1871        }
1872
1873        /*
1874         * next, check the message byte.
1875         */
1876        if (msg_byte(scmd->result) != COMMAND_COMPLETE)
1877                return FAILED;
1878
1879        /*
1880         * check the status byte to see if this indicates anything special.
1881         */
1882        switch (status_byte(scmd->result)) {
1883        case QUEUE_FULL:
1884                scsi_handle_queue_full(scmd->device);
1885                /*
1886                 * the case of trying to send too many commands to a
1887                 * tagged queueing device.
1888                 */
1889        case BUSY:
1890                /*
1891                 * device can't talk to us at the moment.  Should only
1892                 * occur (SAM-3) when the task queue is empty, so will cause
1893                 * the empty queue handling to trigger a stall in the
1894                 * device.
1895                 */
1896                return ADD_TO_MLQUEUE;
1897        case GOOD:
1898                if (scmd->cmnd[0] == REPORT_LUNS)
1899                        scmd->device->sdev_target->expecting_lun_change = 0;
1900                scsi_handle_queue_ramp_up(scmd->device);
1901        case COMMAND_TERMINATED:
1902                return SUCCESS;
1903        case TASK_ABORTED:
1904                goto maybe_retry;
1905        case CHECK_CONDITION:
1906                rtn = scsi_check_sense(scmd);
1907                if (rtn == NEEDS_RETRY)
1908                        goto maybe_retry;
1909                /* if rtn == FAILED, we have no sense information;
1910                 * returning FAILED will wake the error handler thread
1911                 * to collect the sense and redo the decide
1912                 * disposition */
1913                return rtn;
1914        case CONDITION_GOOD:
1915        case INTERMEDIATE_GOOD:
1916        case INTERMEDIATE_C_GOOD:
1917        case ACA_ACTIVE:
1918                /*
1919                 * who knows?  FIXME(eric)
1920                 */
1921                return SUCCESS;
1922
1923        case RESERVATION_CONFLICT:
1924                sdev_printk(KERN_INFO, scmd->device,
1925                            "reservation conflict\n");
1926                set_host_byte(scmd, DID_NEXUS_FAILURE);
1927                return SUCCESS; /* causes immediate i/o error */
1928        default:
1929                return FAILED;
1930        }
1931        return FAILED;
1932
1933      maybe_retry:
1934
1935        /* we requeue for retry because the error was retryable, and
1936         * the request was not marked fast fail.  Note that above,
1937         * even if the request is marked fast fail, we still requeue
1938         * for queue congestion conditions (QUEUE_FULL or BUSY) */
1939        if ((++scmd->retries) <= scmd->allowed
1940            && !scsi_noretry_cmd(scmd)) {
1941                return NEEDS_RETRY;
1942        } else {
1943                /*
1944                 * no more retries - report this one back to upper level.
1945                 */
1946                return SUCCESS;
1947        }
1948}
1949
1950static void eh_lock_door_done(struct request *req, int uptodate)
1951{
1952        __blk_put_request(req->q, req);
1953}
1954
1955/**
1956 * scsi_eh_lock_door - Prevent medium removal for the specified device
1957 * @sdev:       SCSI device to prevent medium removal
1958 *
1959 * Locking:
1960 *      We must be called from process context.
1961 *
1962 * Notes:
1963 *      We queue up an asynchronous "ALLOW MEDIUM REMOVAL" request on the
1964 *      head of the devices request queue, and continue.
1965 */
1966static void scsi_eh_lock_door(struct scsi_device *sdev)
1967{
1968        struct request *req;
1969
1970        /*
1971         * blk_get_request with GFP_KERNEL (__GFP_WAIT) sleeps until a
1972         * request becomes available
1973         */
1974        req = blk_get_request(sdev->request_queue, READ, GFP_KERNEL);
1975        if (IS_ERR(req))
1976                return;
1977
1978        blk_rq_set_block_pc(req);
1979
1980        req->cmd[0] = ALLOW_MEDIUM_REMOVAL;
1981        req->cmd[1] = 0;
1982        req->cmd[2] = 0;
1983        req->cmd[3] = 0;
1984        req->cmd[4] = SCSI_REMOVAL_PREVENT;
1985        req->cmd[5] = 0;
1986
1987        req->cmd_len = COMMAND_SIZE(req->cmd[0]);
1988
1989        req->cmd_flags |= REQ_QUIET;
1990        req->timeout = 10 * HZ;
1991        req->retries = 5;
1992
1993        blk_execute_rq_nowait(req->q, NULL, req, 1, eh_lock_door_done);
1994}
1995
1996/**
1997 * scsi_restart_operations - restart io operations to the specified host.
1998 * @shost:      Host we are restarting.
1999 *
2000 * Notes:
2001 *    When we entered the error handler, we blocked all further i/o to
2002 *    this device.  we need to 'reverse' this process.
2003 */
2004static void scsi_restart_operations(struct Scsi_Host *shost)
2005{
2006        struct scsi_device *sdev;
2007        unsigned long flags;
2008
2009        /*
2010         * If the door was locked, we need to insert a door lock request
2011         * onto the head of the SCSI request queue for the device.  There
2012         * is no point trying to lock the door of an off-line device.
2013         */
2014        shost_for_each_device(sdev, shost) {
2015                if (scsi_device_online(sdev) && sdev->was_reset && sdev->locked) {
2016                        scsi_eh_lock_door(sdev);
2017                        sdev->was_reset = 0;
2018                }
2019        }
2020
2021        /*
2022         * next free up anything directly waiting upon the host.  this
2023         * will be requests for character device operations, and also for
2024         * ioctls to queued block devices.
2025         */
2026        SCSI_LOG_ERROR_RECOVERY(3,
2027                shost_printk(KERN_INFO, shost, "waking up host to restart\n"));
2028
2029        spin_lock_irqsave(shost->host_lock, flags);
2030        if (scsi_host_set_state(shost, SHOST_RUNNING))
2031                if (scsi_host_set_state(shost, SHOST_CANCEL))
2032                        BUG_ON(scsi_host_set_state(shost, SHOST_DEL));
2033        spin_unlock_irqrestore(shost->host_lock, flags);
2034
2035        wake_up(&shost->host_wait);
2036
2037        /*
2038         * finally we need to re-initiate requests that may be pending.  we will
2039         * have had everything blocked while error handling is taking place, and
2040         * now that error recovery is done, we will need to ensure that these
2041         * requests are started.
2042         */
2043        scsi_run_host_queues(shost);
2044
2045        /*
2046         * if eh is active and host_eh_scheduled is pending we need to re-run
2047         * recovery.  we do this check after scsi_run_host_queues() to allow
2048         * everything pent up since the last eh run a chance to make forward
2049         * progress before we sync again.  Either we'll immediately re-run
2050         * recovery or scsi_device_unbusy() will wake us again when these
2051         * pending commands complete.
2052         */
2053        spin_lock_irqsave(shost->host_lock, flags);
2054        if (shost->host_eh_scheduled)
2055                if (scsi_host_set_state(shost, SHOST_RECOVERY))
2056                        WARN_ON(scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY));
2057        spin_unlock_irqrestore(shost->host_lock, flags);
2058}
2059
2060/**
2061 * scsi_eh_ready_devs - check device ready state and recover if not.
2062 * @shost:      host to be recovered.
2063 * @work_q:     &list_head for pending commands.
2064 * @done_q:     &list_head for processed commands.
2065 */
2066void scsi_eh_ready_devs(struct Scsi_Host *shost,
2067                        struct list_head *work_q,
2068                        struct list_head *done_q)
2069{
2070        if (!scsi_eh_stu(shost, work_q, done_q))
2071                if (!scsi_eh_bus_device_reset(shost, work_q, done_q))
2072                        if (!scsi_eh_target_reset(shost, work_q, done_q))
2073                                if (!scsi_eh_bus_reset(shost, work_q, done_q))
2074                                        if (!scsi_eh_host_reset(shost, work_q, done_q))
2075                                                scsi_eh_offline_sdevs(work_q,
2076                                                                      done_q);
2077}
2078EXPORT_SYMBOL_GPL(scsi_eh_ready_devs);
2079
2080/**
2081 * scsi_eh_flush_done_q - finish processed commands or retry them.
2082 * @done_q:     list_head of processed commands.
2083 */
2084void scsi_eh_flush_done_q(struct list_head *done_q)
2085{
2086        struct scsi_cmnd *scmd, *next;
2087
2088        list_for_each_entry_safe(scmd, next, done_q, eh_entry) {
2089                list_del_init(&scmd->eh_entry);
2090                if (scsi_device_online(scmd->device) &&
2091                    !scsi_noretry_cmd(scmd) &&
2092                    (++scmd->retries <= scmd->allowed)) {
2093                        SCSI_LOG_ERROR_RECOVERY(3,
2094                                scmd_printk(KERN_INFO, scmd,
2095                                             "%s: flush retry cmd: %p\n",
2096                                             current->comm, scmd));
2097                                scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY);
2098                } else {
2099                        /*
2100                         * If just we got sense for the device (called
2101                         * scsi_eh_get_sense), scmd->result is already
2102                         * set, do not set DRIVER_TIMEOUT.
2103                         */
2104                        if (!scmd->result)
2105                                scmd->result |= (DRIVER_TIMEOUT << 24);
2106                        SCSI_LOG_ERROR_RECOVERY(3,
2107                                scmd_printk(KERN_INFO, scmd,
2108                                             "%s: flush finish cmd: %p\n",
2109                                             current->comm, scmd));
2110                        scsi_finish_command(scmd);
2111                }
2112        }
2113}
2114EXPORT_SYMBOL(scsi_eh_flush_done_q);
2115
2116/**
2117 * scsi_unjam_host - Attempt to fix a host which has a cmd that failed.
2118 * @shost:      Host to unjam.
2119 *
2120 * Notes:
2121 *    When we come in here, we *know* that all commands on the bus have
2122 *    either completed, failed or timed out.  we also know that no further
2123 *    commands are being sent to the host, so things are relatively quiet
2124 *    and we have freedom to fiddle with things as we wish.
2125 *
2126 *    This is only the *default* implementation.  it is possible for
2127 *    individual drivers to supply their own version of this function, and
2128 *    if the maintainer wishes to do this, it is strongly suggested that
2129 *    this function be taken as a template and modified.  this function
2130 *    was designed to correctly handle problems for about 95% of the
2131 *    different cases out there, and it should always provide at least a
2132 *    reasonable amount of error recovery.
2133 *
2134 *    Any command marked 'failed' or 'timeout' must eventually have
2135 *    scsi_finish_cmd() called for it.  we do all of the retry stuff
2136 *    here, so when we restart the host after we return it should have an
2137 *    empty queue.
2138 */
2139static void scsi_unjam_host(struct Scsi_Host *shost)
2140{
2141        unsigned long flags;
2142        LIST_HEAD(eh_work_q);
2143        LIST_HEAD(eh_done_q);
2144
2145        spin_lock_irqsave(shost->host_lock, flags);
2146        list_splice_init(&shost->eh_cmd_q, &eh_work_q);
2147        spin_unlock_irqrestore(shost->host_lock, flags);
2148
2149        SCSI_LOG_ERROR_RECOVERY(1, scsi_eh_prt_fail_stats(shost, &eh_work_q));
2150
2151        if (!scsi_eh_get_sense(&eh_work_q, &eh_done_q))
2152                if (!scsi_eh_abort_cmds(&eh_work_q, &eh_done_q))
2153                        scsi_eh_ready_devs(shost, &eh_work_q, &eh_done_q);
2154
2155        spin_lock_irqsave(shost->host_lock, flags);
2156        if (shost->eh_deadline != -1)
2157                shost->last_reset = 0;
2158        spin_unlock_irqrestore(shost->host_lock, flags);
2159        scsi_eh_flush_done_q(&eh_done_q);
2160}
2161
2162/**
2163 * scsi_error_handler - SCSI error handler thread
2164 * @data:       Host for which we are running.
2165 *
2166 * Notes:
2167 *    This is the main error handling loop.  This is run as a kernel thread
2168 *    for every SCSI host and handles all error handling activity.
2169 */
2170int scsi_error_handler(void *data)
2171{
2172        struct Scsi_Host *shost = data;
2173
2174        /*
2175         * We use TASK_INTERRUPTIBLE so that the thread is not
2176         * counted against the load average as a running process.
2177         * We never actually get interrupted because kthread_run
2178         * disables signal delivery for the created thread.
2179         */
2180        while (!kthread_should_stop()) {
2181                set_current_state(TASK_INTERRUPTIBLE);
2182                if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) ||
2183                    shost->host_failed != atomic_read(&shost->host_busy)) {
2184                        SCSI_LOG_ERROR_RECOVERY(1,
2185                                shost_printk(KERN_INFO, shost,
2186                                             "scsi_eh_%d: sleeping\n",
2187                                             shost->host_no));
2188                        schedule();
2189                        continue;
2190                }
2191
2192                __set_current_state(TASK_RUNNING);
2193                SCSI_LOG_ERROR_RECOVERY(1,
2194                        shost_printk(KERN_INFO, shost,
2195                                     "scsi_eh_%d: waking up %d/%d/%d\n",
2196                                     shost->host_no, shost->host_eh_scheduled,
2197                                     shost->host_failed,
2198                                     atomic_read(&shost->host_busy)));
2199
2200                /*
2201                 * We have a host that is failing for some reason.  Figure out
2202                 * what we need to do to get it up and online again (if we can).
2203                 * If we fail, we end up taking the thing offline.
2204                 */
2205                if (!shost->eh_noresume && scsi_autopm_get_host(shost) != 0) {
2206                        SCSI_LOG_ERROR_RECOVERY(1,
2207                                shost_printk(KERN_ERR, shost,
2208                                             "scsi_eh_%d: unable to autoresume\n",
2209                                             shost->host_no));
2210                        continue;
2211                }
2212
2213                if (shost->transportt->eh_strategy_handler)
2214                        shost->transportt->eh_strategy_handler(shost);
2215                else
2216                        scsi_unjam_host(shost);
2217
2218                /*
2219                 * Note - if the above fails completely, the action is to take
2220                 * individual devices offline and flush the queue of any
2221                 * outstanding requests that may have been pending.  When we
2222                 * restart, we restart any I/O to any other devices on the bus
2223                 * which are still online.
2224                 */
2225                scsi_restart_operations(shost);
2226                if (!shost->eh_noresume)
2227                        scsi_autopm_put_host(shost);
2228        }
2229        __set_current_state(TASK_RUNNING);
2230
2231        SCSI_LOG_ERROR_RECOVERY(1,
2232                shost_printk(KERN_INFO, shost,
2233                             "Error handler scsi_eh_%d exiting\n",
2234                             shost->host_no));
2235        shost->ehandler = NULL;
2236        return 0;
2237}
2238
2239/*
2240 * Function:    scsi_report_bus_reset()
2241 *
2242 * Purpose:     Utility function used by low-level drivers to report that
2243 *              they have observed a bus reset on the bus being handled.
2244 *
2245 * Arguments:   shost       - Host in question
2246 *              channel     - channel on which reset was observed.
2247 *
2248 * Returns:     Nothing
2249 *
2250 * Lock status: Host lock must be held.
2251 *
2252 * Notes:       This only needs to be called if the reset is one which
2253 *              originates from an unknown location.  Resets originated
2254 *              by the mid-level itself don't need to call this, but there
2255 *              should be no harm.
2256 *
2257 *              The main purpose of this is to make sure that a CHECK_CONDITION
2258 *              is properly treated.
2259 */
2260void scsi_report_bus_reset(struct Scsi_Host *shost, int channel)
2261{
2262        struct scsi_device *sdev;
2263
2264        __shost_for_each_device(sdev, shost) {
2265                if (channel == sdev_channel(sdev))
2266                        __scsi_report_device_reset(sdev, NULL);
2267        }
2268}
2269EXPORT_SYMBOL(scsi_report_bus_reset);
2270
2271/*
2272 * Function:    scsi_report_device_reset()
2273 *
2274 * Purpose:     Utility function used by low-level drivers to report that
2275 *              they have observed a device reset on the device being handled.
2276 *
2277 * Arguments:   shost       - Host in question
2278 *              channel     - channel on which reset was observed
2279 *              target      - target on which reset was observed
2280 *
2281 * Returns:     Nothing
2282 *
2283 * Lock status: Host lock must be held
2284 *
2285 * Notes:       This only needs to be called if the reset is one which
2286 *              originates from an unknown location.  Resets originated
2287 *              by the mid-level itself don't need to call this, but there
2288 *              should be no harm.
2289 *
2290 *              The main purpose of this is to make sure that a CHECK_CONDITION
2291 *              is properly treated.
2292 */
2293void scsi_report_device_reset(struct Scsi_Host *shost, int channel, int target)
2294{
2295        struct scsi_device *sdev;
2296
2297        __shost_for_each_device(sdev, shost) {
2298                if (channel == sdev_channel(sdev) &&
2299                    target == sdev_id(sdev))
2300                        __scsi_report_device_reset(sdev, NULL);
2301        }
2302}
2303EXPORT_SYMBOL(scsi_report_device_reset);
2304
2305static void
2306scsi_reset_provider_done_command(struct scsi_cmnd *scmd)
2307{
2308}
2309
2310/**
2311 * scsi_ioctl_reset: explicitly reset a host/bus/target/device
2312 * @dev:        scsi_device to operate on
2313 * @arg:        reset type (see sg.h)
2314 */
2315int
2316scsi_ioctl_reset(struct scsi_device *dev, int __user *arg)
2317{
2318        struct scsi_cmnd *scmd;
2319        struct Scsi_Host *shost = dev->host;
2320        struct request req;
2321        unsigned long flags;
2322        int error = 0, rtn, val;
2323
2324        if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2325                return -EACCES;
2326
2327        error = get_user(val, arg);
2328        if (error)
2329                return error;
2330
2331        if (scsi_autopm_get_host(shost) < 0)
2332                return -EIO;
2333
2334        error = -EIO;
2335        scmd = scsi_get_command(dev, GFP_KERNEL);
2336        if (!scmd)
2337                goto out_put_autopm_host;
2338
2339        blk_rq_init(NULL, &req);
2340        scmd->request = &req;
2341
2342        scmd->cmnd = req.cmd;
2343
2344        scmd->scsi_done         = scsi_reset_provider_done_command;
2345        memset(&scmd->sdb, 0, sizeof(scmd->sdb));
2346
2347        scmd->cmd_len                   = 0;
2348
2349        scmd->sc_data_direction         = DMA_BIDIRECTIONAL;
2350
2351        spin_lock_irqsave(shost->host_lock, flags);
2352        shost->tmf_in_progress = 1;
2353        spin_unlock_irqrestore(shost->host_lock, flags);
2354
2355        switch (val & ~SG_SCSI_RESET_NO_ESCALATE) {
2356        case SG_SCSI_RESET_NOTHING:
2357                rtn = SUCCESS;
2358                break;
2359        case SG_SCSI_RESET_DEVICE:
2360                rtn = scsi_try_bus_device_reset(scmd);
2361                if (rtn == SUCCESS || (val & SG_SCSI_RESET_NO_ESCALATE))
2362                        break;
2363                /* FALLTHROUGH */
2364        case SG_SCSI_RESET_TARGET:
2365                rtn = scsi_try_target_reset(scmd);
2366                if (rtn == SUCCESS || (val & SG_SCSI_RESET_NO_ESCALATE))
2367                        break;
2368                /* FALLTHROUGH */
2369        case SG_SCSI_RESET_BUS:
2370                rtn = scsi_try_bus_reset(scmd);
2371                if (rtn == SUCCESS || (val & SG_SCSI_RESET_NO_ESCALATE))
2372                        break;
2373                /* FALLTHROUGH */
2374        case SG_SCSI_RESET_HOST:
2375                rtn = scsi_try_host_reset(scmd);
2376                if (rtn == SUCCESS)
2377                        break;
2378        default:
2379                /* FALLTHROUGH */
2380                rtn = FAILED;
2381                break;
2382        }
2383
2384        error = (rtn == SUCCESS) ? 0 : -EIO;
2385
2386        spin_lock_irqsave(shost->host_lock, flags);
2387        shost->tmf_in_progress = 0;
2388        spin_unlock_irqrestore(shost->host_lock, flags);
2389
2390        /*
2391         * be sure to wake up anyone who was sleeping or had their queue
2392         * suspended while we performed the TMF.
2393         */
2394        SCSI_LOG_ERROR_RECOVERY(3,
2395                shost_printk(KERN_INFO, shost,
2396                             "waking up host to restart after TMF\n"));
2397
2398        wake_up(&shost->host_wait);
2399        scsi_run_host_queues(shost);
2400
2401        scsi_put_command(scmd);
2402
2403out_put_autopm_host:
2404        scsi_autopm_put_host(shost);
2405        return error;
2406}
2407EXPORT_SYMBOL(scsi_ioctl_reset);
2408
2409/**
2410 * scsi_normalize_sense - normalize main elements from either fixed or
2411 *                      descriptor sense data format into a common format.
2412 *
2413 * @sense_buffer:       byte array containing sense data returned by device
2414 * @sb_len:             number of valid bytes in sense_buffer
2415 * @sshdr:              pointer to instance of structure that common
2416 *                      elements are written to.
2417 *
2418 * Notes:
2419 *      The "main elements" from sense data are: response_code, sense_key,
2420 *      asc, ascq and additional_length (only for descriptor format).
2421 *
2422 *      Typically this function can be called after a device has
2423 *      responded to a SCSI command with the CHECK_CONDITION status.
2424 *
2425 * Return value:
2426 *      true if valid sense data information found, else false;
2427 */
2428bool scsi_normalize_sense(const u8 *sense_buffer, int sb_len,
2429                          struct scsi_sense_hdr *sshdr)
2430{
2431        if (!sense_buffer || !sb_len)
2432                return false;
2433
2434        memset(sshdr, 0, sizeof(struct scsi_sense_hdr));
2435
2436        sshdr->response_code = (sense_buffer[0] & 0x7f);
2437
2438        if (!scsi_sense_valid(sshdr))
2439                return false;
2440
2441        if (sshdr->response_code >= 0x72) {
2442                /*
2443                 * descriptor format
2444                 */
2445                if (sb_len > 1)
2446                        sshdr->sense_key = (sense_buffer[1] & 0xf);
2447                if (sb_len > 2)
2448                        sshdr->asc = sense_buffer[2];
2449                if (sb_len > 3)
2450                        sshdr->ascq = sense_buffer[3];
2451                if (sb_len > 7)
2452                        sshdr->additional_length = sense_buffer[7];
2453        } else {
2454                /*
2455                 * fixed format
2456                 */
2457                if (sb_len > 2)
2458                        sshdr->sense_key = (sense_buffer[2] & 0xf);
2459                if (sb_len > 7) {
2460                        sb_len = (sb_len < (sense_buffer[7] + 8)) ?
2461                                         sb_len : (sense_buffer[7] + 8);
2462                        if (sb_len > 12)
2463                                sshdr->asc = sense_buffer[12];
2464                        if (sb_len > 13)
2465                                sshdr->ascq = sense_buffer[13];
2466                }
2467        }
2468
2469        return true;
2470}
2471EXPORT_SYMBOL(scsi_normalize_sense);
2472
2473bool scsi_command_normalize_sense(const struct scsi_cmnd *cmd,
2474                                  struct scsi_sense_hdr *sshdr)
2475{
2476        return scsi_normalize_sense(cmd->sense_buffer,
2477                        SCSI_SENSE_BUFFERSIZE, sshdr);
2478}
2479EXPORT_SYMBOL(scsi_command_normalize_sense);
2480
2481/**
2482 * scsi_sense_desc_find - search for a given descriptor type in descriptor sense data format.
2483 * @sense_buffer:       byte array of descriptor format sense data
2484 * @sb_len:             number of valid bytes in sense_buffer
2485 * @desc_type:          value of descriptor type to find
2486 *                      (e.g. 0 -> information)
2487 *
2488 * Notes:
2489 *      only valid when sense data is in descriptor format
2490 *
2491 * Return value:
2492 *      pointer to start of (first) descriptor if found else NULL
2493 */
2494const u8 * scsi_sense_desc_find(const u8 * sense_buffer, int sb_len,
2495                                int desc_type)
2496{
2497        int add_sen_len, add_len, desc_len, k;
2498        const u8 * descp;
2499
2500        if ((sb_len < 8) || (0 == (add_sen_len = sense_buffer[7])))
2501                return NULL;
2502        if ((sense_buffer[0] < 0x72) || (sense_buffer[0] > 0x73))
2503                return NULL;
2504        add_sen_len = (add_sen_len < (sb_len - 8)) ?
2505                        add_sen_len : (sb_len - 8);
2506        descp = &sense_buffer[8];
2507        for (desc_len = 0, k = 0; k < add_sen_len; k += desc_len) {
2508                descp += desc_len;
2509                add_len = (k < (add_sen_len - 1)) ? descp[1]: -1;
2510                desc_len = add_len + 2;
2511                if (descp[0] == desc_type)
2512                        return descp;
2513                if (add_len < 0) // short descriptor ??
2514                        break;
2515        }
2516        return NULL;
2517}
2518EXPORT_SYMBOL(scsi_sense_desc_find);
2519
2520/**
2521 * scsi_get_sense_info_fld - get information field from sense data (either fixed or descriptor format)
2522 * @sense_buffer:       byte array of sense data
2523 * @sb_len:             number of valid bytes in sense_buffer
2524 * @info_out:           pointer to 64 integer where 8 or 4 byte information
2525 *                      field will be placed if found.
2526 *
2527 * Return value:
2528 *      1 if information field found, 0 if not found.
2529 */
2530int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len,
2531                            u64 * info_out)
2532{
2533        int j;
2534        const u8 * ucp;
2535        u64 ull;
2536
2537        if (sb_len < 7)
2538                return 0;
2539        switch (sense_buffer[0] & 0x7f) {
2540        case 0x70:
2541        case 0x71:
2542                if (sense_buffer[0] & 0x80) {
2543                        *info_out = (sense_buffer[3] << 24) +
2544                                    (sense_buffer[4] << 16) +
2545                                    (sense_buffer[5] << 8) + sense_buffer[6];
2546                        return 1;
2547                } else
2548                        return 0;
2549        case 0x72:
2550        case 0x73:
2551                ucp = scsi_sense_desc_find(sense_buffer, sb_len,
2552                                           0 /* info desc */);
2553                if (ucp && (0xa == ucp[1])) {
2554                        ull = 0;
2555                        for (j = 0; j < 8; ++j) {
2556                                if (j > 0)
2557                                        ull <<= 8;
2558                                ull |= ucp[4 + j];
2559                        }
2560                        *info_out = ull;
2561                        return 1;
2562                } else
2563                        return 0;
2564        default:
2565                return 0;
2566        }
2567}
2568EXPORT_SYMBOL(scsi_get_sense_info_fld);
2569
2570/**
2571 * scsi_build_sense_buffer - build sense data in a buffer
2572 * @desc:       Sense format (non zero == descriptor format,
2573 *              0 == fixed format)
2574 * @buf:        Where to build sense data
2575 * @key:        Sense key
2576 * @asc:        Additional sense code
2577 * @ascq:       Additional sense code qualifier
2578 *
2579 **/
2580void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq)
2581{
2582        if (desc) {
2583                buf[0] = 0x72;  /* descriptor, current */
2584                buf[1] = key;
2585                buf[2] = asc;
2586                buf[3] = ascq;
2587                buf[7] = 0;
2588        } else {
2589                buf[0] = 0x70;  /* fixed, current */
2590                buf[2] = key;
2591                buf[7] = 0xa;
2592                buf[12] = asc;
2593                buf[13] = ascq;
2594        }
2595}
2596EXPORT_SYMBOL(scsi_build_sense_buffer);
2597