linux/drivers/scsi/scsi_lib.c
<<
>>
Prefs
   1/*
   2 *  scsi_lib.c Copyright (C) 1999 Eric Youngdale
   3 *
   4 *  SCSI queueing library.
   5 *      Initial versions: Eric Youngdale (eric@andante.org).
   6 *                        Based upon conversations with large numbers
   7 *                        of people at Linux Expo.
   8 */
   9
  10#include <linux/bio.h>
  11#include <linux/bitops.h>
  12#include <linux/blkdev.h>
  13#include <linux/completion.h>
  14#include <linux/kernel.h>
  15#include <linux/export.h>
  16#include <linux/mempool.h>
  17#include <linux/slab.h>
  18#include <linux/init.h>
  19#include <linux/pci.h>
  20#include <linux/delay.h>
  21#include <linux/hardirq.h>
  22#include <linux/scatterlist.h>
  23
  24#include <scsi/scsi.h>
  25#include <scsi/scsi_cmnd.h>
  26#include <scsi/scsi_dbg.h>
  27#include <scsi/scsi_device.h>
  28#include <scsi/scsi_driver.h>
  29#include <scsi/scsi_eh.h>
  30#include <scsi/scsi_host.h>
  31
  32#include "scsi_priv.h"
  33#include "scsi_logging.h"
  34
  35
  36#define SG_MEMPOOL_NR           ARRAY_SIZE(scsi_sg_pools)
  37#define SG_MEMPOOL_SIZE         2
  38
  39struct scsi_host_sg_pool {
  40        size_t          size;
  41        char            *name;
  42        struct kmem_cache       *slab;
  43        mempool_t       *pool;
  44};
  45
  46#define SP(x) { x, "sgpool-" __stringify(x) }
  47#if (SCSI_MAX_SG_SEGMENTS < 32)
  48#error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater)
  49#endif
  50static struct scsi_host_sg_pool scsi_sg_pools[] = {
  51        SP(8),
  52        SP(16),
  53#if (SCSI_MAX_SG_SEGMENTS > 32)
  54        SP(32),
  55#if (SCSI_MAX_SG_SEGMENTS > 64)
  56        SP(64),
  57#if (SCSI_MAX_SG_SEGMENTS > 128)
  58        SP(128),
  59#if (SCSI_MAX_SG_SEGMENTS > 256)
  60#error SCSI_MAX_SG_SEGMENTS is too large (256 MAX)
  61#endif
  62#endif
  63#endif
  64#endif
  65        SP(SCSI_MAX_SG_SEGMENTS)
  66};
  67#undef SP
  68
  69struct kmem_cache *scsi_sdb_cache;
  70
  71#ifdef CONFIG_ACPI
  72#include <acpi/acpi_bus.h>
  73
  74static bool acpi_scsi_bus_match(struct device *dev)
  75{
  76        return dev->bus == &scsi_bus_type;
  77}
  78
  79int scsi_register_acpi_bus_type(struct acpi_bus_type *bus)
  80{
  81        bus->match = acpi_scsi_bus_match;
  82        return register_acpi_bus_type(bus);
  83}
  84EXPORT_SYMBOL_GPL(scsi_register_acpi_bus_type);
  85
  86void scsi_unregister_acpi_bus_type(struct acpi_bus_type *bus)
  87{
  88        unregister_acpi_bus_type(bus);
  89}
  90EXPORT_SYMBOL_GPL(scsi_unregister_acpi_bus_type);
  91#endif
  92
  93/*
  94 * When to reinvoke queueing after a resource shortage. It's 3 msecs to
  95 * not change behaviour from the previous unplug mechanism, experimentation
  96 * may prove this needs changing.
  97 */
  98#define SCSI_QUEUE_DELAY        3
  99
 100/*
 101 * Function:    scsi_unprep_request()
 102 *
 103 * Purpose:     Remove all preparation done for a request, including its
 104 *              associated scsi_cmnd, so that it can be requeued.
 105 *
 106 * Arguments:   req     - request to unprepare
 107 *
 108 * Lock status: Assumed that no locks are held upon entry.
 109 *
 110 * Returns:     Nothing.
 111 */
 112static void scsi_unprep_request(struct request *req)
 113{
 114        struct scsi_cmnd *cmd = req->special;
 115
 116        blk_unprep_request(req);
 117        req->special = NULL;
 118
 119        scsi_put_command(cmd);
 120}
 121
 122/**
 123 * __scsi_queue_insert - private queue insertion
 124 * @cmd: The SCSI command being requeued
 125 * @reason:  The reason for the requeue
 126 * @unbusy: Whether the queue should be unbusied
 127 *
 128 * This is a private queue insertion.  The public interface
 129 * scsi_queue_insert() always assumes the queue should be unbusied
 130 * because it's always called before the completion.  This function is
 131 * for a requeue after completion, which should only occur in this
 132 * file.
 133 */
 134static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
 135{
 136        struct Scsi_Host *host = cmd->device->host;
 137        struct scsi_device *device = cmd->device;
 138        struct scsi_target *starget = scsi_target(device);
 139        struct request_queue *q = device->request_queue;
 140        unsigned long flags;
 141
 142        SCSI_LOG_MLQUEUE(1,
 143                 printk("Inserting command %p into mlqueue\n", cmd));
 144
 145        /*
 146         * Set the appropriate busy bit for the device/host.
 147         *
 148         * If the host/device isn't busy, assume that something actually
 149         * completed, and that we should be able to queue a command now.
 150         *
 151         * Note that the prior mid-layer assumption that any host could
 152         * always queue at least one command is now broken.  The mid-layer
 153         * will implement a user specifiable stall (see
 154         * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
 155         * if a command is requeued with no other commands outstanding
 156         * either for the device or for the host.
 157         */
 158        switch (reason) {
 159        case SCSI_MLQUEUE_HOST_BUSY:
 160                host->host_blocked = host->max_host_blocked;
 161                break;
 162        case SCSI_MLQUEUE_DEVICE_BUSY:
 163        case SCSI_MLQUEUE_EH_RETRY:
 164                device->device_blocked = device->max_device_blocked;
 165                break;
 166        case SCSI_MLQUEUE_TARGET_BUSY:
 167                starget->target_blocked = starget->max_target_blocked;
 168                break;
 169        }
 170
 171        /*
 172         * Decrement the counters, since these commands are no longer
 173         * active on the host/device.
 174         */
 175        if (unbusy)
 176                scsi_device_unbusy(device);
 177
 178        /*
 179         * Requeue this command.  It will go before all other commands
 180         * that are already in the queue. Schedule requeue work under
 181         * lock such that the kblockd_schedule_work() call happens
 182         * before blk_cleanup_queue() finishes.
 183         */
 184        spin_lock_irqsave(q->queue_lock, flags);
 185        blk_requeue_request(q, cmd->request);
 186        kblockd_schedule_work(q, &device->requeue_work);
 187        spin_unlock_irqrestore(q->queue_lock, flags);
 188}
 189
 190/*
 191 * Function:    scsi_queue_insert()
 192 *
 193 * Purpose:     Insert a command in the midlevel queue.
 194 *
 195 * Arguments:   cmd    - command that we are adding to queue.
 196 *              reason - why we are inserting command to queue.
 197 *
 198 * Lock status: Assumed that lock is not held upon entry.
 199 *
 200 * Returns:     Nothing.
 201 *
 202 * Notes:       We do this for one of two cases.  Either the host is busy
 203 *              and it cannot accept any more commands for the time being,
 204 *              or the device returned QUEUE_FULL and can accept no more
 205 *              commands.
 206 * Notes:       This could be called either from an interrupt context or a
 207 *              normal process context.
 208 */
 209void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
 210{
 211        __scsi_queue_insert(cmd, reason, 1);
 212}
 213/**
 214 * scsi_execute - insert request and wait for the result
 215 * @sdev:       scsi device
 216 * @cmd:        scsi command
 217 * @data_direction: data direction
 218 * @buffer:     data buffer
 219 * @bufflen:    len of buffer
 220 * @sense:      optional sense buffer
 221 * @timeout:    request timeout in seconds
 222 * @retries:    number of times to retry request
 223 * @flags:      or into request flags;
 224 * @resid:      optional residual length
 225 *
 226 * returns the req->errors value which is the scsi_cmnd result
 227 * field.
 228 */
 229int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
 230                 int data_direction, void *buffer, unsigned bufflen,
 231                 unsigned char *sense, int timeout, int retries, int flags,
 232                 int *resid)
 233{
 234        struct request *req;
 235        int write = (data_direction == DMA_TO_DEVICE);
 236        int ret = DRIVER_ERROR << 24;
 237
 238        req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
 239        if (!req)
 240                return ret;
 241
 242        if (bufflen &&  blk_rq_map_kern(sdev->request_queue, req,
 243                                        buffer, bufflen, __GFP_WAIT))
 244                goto out;
 245
 246        req->cmd_len = COMMAND_SIZE(cmd[0]);
 247        memcpy(req->cmd, cmd, req->cmd_len);
 248        req->sense = sense;
 249        req->sense_len = 0;
 250        req->retries = retries;
 251        req->timeout = timeout;
 252        req->cmd_type = REQ_TYPE_BLOCK_PC;
 253        req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT;
 254
 255        /*
 256         * head injection *required* here otherwise quiesce won't work
 257         */
 258        blk_execute_rq(req->q, NULL, req, 1);
 259
 260        /*
 261         * Some devices (USB mass-storage in particular) may transfer
 262         * garbage data together with a residue indicating that the data
 263         * is invalid.  Prevent the garbage from being misinterpreted
 264         * and prevent security leaks by zeroing out the excess data.
 265         */
 266        if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen))
 267                memset(buffer + (bufflen - req->resid_len), 0, req->resid_len);
 268
 269        if (resid)
 270                *resid = req->resid_len;
 271        ret = req->errors;
 272 out:
 273        blk_put_request(req);
 274
 275        return ret;
 276}
 277EXPORT_SYMBOL(scsi_execute);
 278
 279int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd,
 280                     int data_direction, void *buffer, unsigned bufflen,
 281                     struct scsi_sense_hdr *sshdr, int timeout, int retries,
 282                     int *resid, int flags)
 283{
 284        char *sense = NULL;
 285        int result;
 286        
 287        if (sshdr) {
 288                sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
 289                if (!sense)
 290                        return DRIVER_ERROR << 24;
 291        }
 292        result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
 293                              sense, timeout, retries, flags, resid);
 294        if (sshdr)
 295                scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
 296
 297        kfree(sense);
 298        return result;
 299}
 300EXPORT_SYMBOL(scsi_execute_req_flags);
 301
 302/*
 303 * Function:    scsi_init_cmd_errh()
 304 *
 305 * Purpose:     Initialize cmd fields related to error handling.
 306 *
 307 * Arguments:   cmd     - command that is ready to be queued.
 308 *
 309 * Notes:       This function has the job of initializing a number of
 310 *              fields related to error handling.   Typically this will
 311 *              be called once for each command, as required.
 312 */
 313static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
 314{
 315        cmd->serial_number = 0;
 316        scsi_set_resid(cmd, 0);
 317        memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
 318        if (cmd->cmd_len == 0)
 319                cmd->cmd_len = scsi_command_size(cmd->cmnd);
 320}
 321
 322void scsi_device_unbusy(struct scsi_device *sdev)
 323{
 324        struct Scsi_Host *shost = sdev->host;
 325        struct scsi_target *starget = scsi_target(sdev);
 326        unsigned long flags;
 327
 328        spin_lock_irqsave(shost->host_lock, flags);
 329        shost->host_busy--;
 330        starget->target_busy--;
 331        if (unlikely(scsi_host_in_recovery(shost) &&
 332                     (shost->host_failed || shost->host_eh_scheduled)))
 333                scsi_eh_wakeup(shost);
 334        spin_unlock(shost->host_lock);
 335        spin_lock(sdev->request_queue->queue_lock);
 336        sdev->device_busy--;
 337        spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
 338}
 339
 340/*
 341 * Called for single_lun devices on IO completion. Clear starget_sdev_user,
 342 * and call blk_run_queue for all the scsi_devices on the target -
 343 * including current_sdev first.
 344 *
 345 * Called with *no* scsi locks held.
 346 */
 347static void scsi_single_lun_run(struct scsi_device *current_sdev)
 348{
 349        struct Scsi_Host *shost = current_sdev->host;
 350        struct scsi_device *sdev, *tmp;
 351        struct scsi_target *starget = scsi_target(current_sdev);
 352        unsigned long flags;
 353
 354        spin_lock_irqsave(shost->host_lock, flags);
 355        starget->starget_sdev_user = NULL;
 356        spin_unlock_irqrestore(shost->host_lock, flags);
 357
 358        /*
 359         * Call blk_run_queue for all LUNs on the target, starting with
 360         * current_sdev. We race with others (to set starget_sdev_user),
 361         * but in most cases, we will be first. Ideally, each LU on the
 362         * target would get some limited time or requests on the target.
 363         */
 364        blk_run_queue(current_sdev->request_queue);
 365
 366        spin_lock_irqsave(shost->host_lock, flags);
 367        if (starget->starget_sdev_user)
 368                goto out;
 369        list_for_each_entry_safe(sdev, tmp, &starget->devices,
 370                        same_target_siblings) {
 371                if (sdev == current_sdev)
 372                        continue;
 373                if (scsi_device_get(sdev))
 374                        continue;
 375
 376                spin_unlock_irqrestore(shost->host_lock, flags);
 377                blk_run_queue(sdev->request_queue);
 378                spin_lock_irqsave(shost->host_lock, flags);
 379        
 380                scsi_device_put(sdev);
 381        }
 382 out:
 383        spin_unlock_irqrestore(shost->host_lock, flags);
 384}
 385
 386static inline int scsi_device_is_busy(struct scsi_device *sdev)
 387{
 388        if (sdev->device_busy >= sdev->queue_depth || sdev->device_blocked)
 389                return 1;
 390
 391        return 0;
 392}
 393
 394static inline int scsi_target_is_busy(struct scsi_target *starget)
 395{
 396        return ((starget->can_queue > 0 &&
 397                 starget->target_busy >= starget->can_queue) ||
 398                 starget->target_blocked);
 399}
 400
 401static inline int scsi_host_is_busy(struct Scsi_Host *shost)
 402{
 403        if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
 404            shost->host_blocked || shost->host_self_blocked)
 405                return 1;
 406
 407        return 0;
 408}
 409
 410/*
 411 * Function:    scsi_run_queue()
 412 *
 413 * Purpose:     Select a proper request queue to serve next
 414 *
 415 * Arguments:   q       - last request's queue
 416 *
 417 * Returns:     Nothing
 418 *
 419 * Notes:       The previous command was completely finished, start
 420 *              a new one if possible.
 421 */
 422static void scsi_run_queue(struct request_queue *q)
 423{
 424        struct scsi_device *sdev = q->queuedata;
 425        struct Scsi_Host *shost;
 426        LIST_HEAD(starved_list);
 427        unsigned long flags;
 428
 429        shost = sdev->host;
 430        if (scsi_target(sdev)->single_lun)
 431                scsi_single_lun_run(sdev);
 432
 433        spin_lock_irqsave(shost->host_lock, flags);
 434        list_splice_init(&shost->starved_list, &starved_list);
 435
 436        while (!list_empty(&starved_list)) {
 437                struct request_queue *slq;
 438
 439                /*
 440                 * As long as shost is accepting commands and we have
 441                 * starved queues, call blk_run_queue. scsi_request_fn
 442                 * drops the queue_lock and can add us back to the
 443                 * starved_list.
 444                 *
 445                 * host_lock protects the starved_list and starved_entry.
 446                 * scsi_request_fn must get the host_lock before checking
 447                 * or modifying starved_list or starved_entry.
 448                 */
 449                if (scsi_host_is_busy(shost))
 450                        break;
 451
 452                sdev = list_entry(starved_list.next,
 453                                  struct scsi_device, starved_entry);
 454                list_del_init(&sdev->starved_entry);
 455                if (scsi_target_is_busy(scsi_target(sdev))) {
 456                        list_move_tail(&sdev->starved_entry,
 457                                       &shost->starved_list);
 458                        continue;
 459                }
 460
 461                /*
 462                 * Once we drop the host lock, a racing scsi_remove_device()
 463                 * call may remove the sdev from the starved list and destroy
 464                 * it and the queue.  Mitigate by taking a reference to the
 465                 * queue and never touching the sdev again after we drop the
 466                 * host lock.  Note: if __scsi_remove_device() invokes
 467                 * blk_cleanup_queue() before the queue is run from this
 468                 * function then blk_run_queue() will return immediately since
 469                 * blk_cleanup_queue() marks the queue with QUEUE_FLAG_DYING.
 470                 */
 471                slq = sdev->request_queue;
 472                if (!blk_get_queue(slq))
 473                        continue;
 474                spin_unlock_irqrestore(shost->host_lock, flags);
 475
 476                blk_run_queue(slq);
 477                blk_put_queue(slq);
 478
 479                spin_lock_irqsave(shost->host_lock, flags);
 480        }
 481        /* put any unprocessed entries back */
 482        list_splice(&starved_list, &shost->starved_list);
 483        spin_unlock_irqrestore(shost->host_lock, flags);
 484
 485        blk_run_queue(q);
 486}
 487
 488void scsi_requeue_run_queue(struct work_struct *work)
 489{
 490        struct scsi_device *sdev;
 491        struct request_queue *q;
 492
 493        sdev = container_of(work, struct scsi_device, requeue_work);
 494        q = sdev->request_queue;
 495        scsi_run_queue(q);
 496}
 497
 498/*
 499 * Function:    scsi_requeue_command()
 500 *
 501 * Purpose:     Handle post-processing of completed commands.
 502 *
 503 * Arguments:   q       - queue to operate on
 504 *              cmd     - command that may need to be requeued.
 505 *
 506 * Returns:     Nothing
 507 *
 508 * Notes:       After command completion, there may be blocks left
 509 *              over which weren't finished by the previous command
 510 *              this can be for a number of reasons - the main one is
 511 *              I/O errors in the middle of the request, in which case
 512 *              we need to request the blocks that come after the bad
 513 *              sector.
 514 * Notes:       Upon return, cmd is a stale pointer.
 515 */
 516static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
 517{
 518        struct scsi_device *sdev = cmd->device;
 519        struct request *req = cmd->request;
 520        unsigned long flags;
 521
 522        /*
 523         * We need to hold a reference on the device to avoid the queue being
 524         * killed after the unlock and before scsi_run_queue is invoked which
 525         * may happen because scsi_unprep_request() puts the command which
 526         * releases its reference on the device.
 527         */
 528        get_device(&sdev->sdev_gendev);
 529
 530        spin_lock_irqsave(q->queue_lock, flags);
 531        scsi_unprep_request(req);
 532        blk_requeue_request(q, req);
 533        spin_unlock_irqrestore(q->queue_lock, flags);
 534
 535        scsi_run_queue(q);
 536
 537        put_device(&sdev->sdev_gendev);
 538}
 539
 540void scsi_next_command(struct scsi_cmnd *cmd)
 541{
 542        struct scsi_device *sdev = cmd->device;
 543        struct request_queue *q = sdev->request_queue;
 544
 545        /* need to hold a reference on the device before we let go of the cmd */
 546        get_device(&sdev->sdev_gendev);
 547
 548        scsi_put_command(cmd);
 549        scsi_run_queue(q);
 550
 551        /* ok to remove device now */
 552        put_device(&sdev->sdev_gendev);
 553}
 554
 555void scsi_run_host_queues(struct Scsi_Host *shost)
 556{
 557        struct scsi_device *sdev;
 558
 559        shost_for_each_device(sdev, shost)
 560                scsi_run_queue(sdev->request_queue);
 561}
 562
 563static void __scsi_release_buffers(struct scsi_cmnd *, int);
 564
 565/*
 566 * Function:    scsi_end_request()
 567 *
 568 * Purpose:     Post-processing of completed commands (usually invoked at end
 569 *              of upper level post-processing and scsi_io_completion).
 570 *
 571 * Arguments:   cmd      - command that is complete.
 572 *              error    - 0 if I/O indicates success, < 0 for I/O error.
 573 *              bytes    - number of bytes of completed I/O
 574 *              requeue  - indicates whether we should requeue leftovers.
 575 *
 576 * Lock status: Assumed that lock is not held upon entry.
 577 *
 578 * Returns:     cmd if requeue required, NULL otherwise.
 579 *
 580 * Notes:       This is called for block device requests in order to
 581 *              mark some number of sectors as complete.
 582 * 
 583 *              We are guaranteeing that the request queue will be goosed
 584 *              at some point during this call.
 585 * Notes:       If cmd was requeued, upon return it will be a stale pointer.
 586 */
 587static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
 588                                          int bytes, int requeue)
 589{
 590        struct request_queue *q = cmd->device->request_queue;
 591        struct request *req = cmd->request;
 592
 593        /*
 594         * If there are blocks left over at the end, set up the command
 595         * to queue the remainder of them.
 596         */
 597        if (blk_end_request(req, error, bytes)) {
 598                /* kill remainder if no retrys */
 599                if (error && scsi_noretry_cmd(cmd))
 600                        blk_end_request_all(req, error);
 601                else {
 602                        if (requeue) {
 603                                /*
 604                                 * Bleah.  Leftovers again.  Stick the
 605                                 * leftovers in the front of the
 606                                 * queue, and goose the queue again.
 607                                 */
 608                                scsi_release_buffers(cmd);
 609                                scsi_requeue_command(q, cmd);
 610                                cmd = NULL;
 611                        }
 612                        return cmd;
 613                }
 614        }
 615
 616        /*
 617         * This will goose the queue request function at the end, so we don't
 618         * need to worry about launching another command.
 619         */
 620        __scsi_release_buffers(cmd, 0);
 621        scsi_next_command(cmd);
 622        return NULL;
 623}
 624
 625static inline unsigned int scsi_sgtable_index(unsigned short nents)
 626{
 627        unsigned int index;
 628
 629        BUG_ON(nents > SCSI_MAX_SG_SEGMENTS);
 630
 631        if (nents <= 8)
 632                index = 0;
 633        else
 634                index = get_count_order(nents) - 3;
 635
 636        return index;
 637}
 638
 639static void scsi_sg_free(struct scatterlist *sgl, unsigned int nents)
 640{
 641        struct scsi_host_sg_pool *sgp;
 642
 643        sgp = scsi_sg_pools + scsi_sgtable_index(nents);
 644        mempool_free(sgl, sgp->pool);
 645}
 646
 647static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask)
 648{
 649        struct scsi_host_sg_pool *sgp;
 650
 651        sgp = scsi_sg_pools + scsi_sgtable_index(nents);
 652        return mempool_alloc(sgp->pool, gfp_mask);
 653}
 654
 655static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents,
 656                              gfp_t gfp_mask)
 657{
 658        int ret;
 659
 660        BUG_ON(!nents);
 661
 662        ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS,
 663                               gfp_mask, scsi_sg_alloc);
 664        if (unlikely(ret))
 665                __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS,
 666                                scsi_sg_free);
 667
 668        return ret;
 669}
 670
 671static void scsi_free_sgtable(struct scsi_data_buffer *sdb)
 672{
 673        __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free);
 674}
 675
 676static void __scsi_release_buffers(struct scsi_cmnd *cmd, int do_bidi_check)
 677{
 678
 679        if (cmd->sdb.table.nents)
 680                scsi_free_sgtable(&cmd->sdb);
 681
 682        memset(&cmd->sdb, 0, sizeof(cmd->sdb));
 683
 684        if (do_bidi_check && scsi_bidi_cmnd(cmd)) {
 685                struct scsi_data_buffer *bidi_sdb =
 686                        cmd->request->next_rq->special;
 687                scsi_free_sgtable(bidi_sdb);
 688                kmem_cache_free(scsi_sdb_cache, bidi_sdb);
 689                cmd->request->next_rq->special = NULL;
 690        }
 691
 692        if (scsi_prot_sg_count(cmd))
 693                scsi_free_sgtable(cmd->prot_sdb);
 694}
 695
 696/*
 697 * Function:    scsi_release_buffers()
 698 *
 699 * Purpose:     Completion processing for block device I/O requests.
 700 *
 701 * Arguments:   cmd     - command that we are bailing.
 702 *
 703 * Lock status: Assumed that no lock is held upon entry.
 704 *
 705 * Returns:     Nothing
 706 *
 707 * Notes:       In the event that an upper level driver rejects a
 708 *              command, we must release resources allocated during
 709 *              the __init_io() function.  Primarily this would involve
 710 *              the scatter-gather table, and potentially any bounce
 711 *              buffers.
 712 */
 713void scsi_release_buffers(struct scsi_cmnd *cmd)
 714{
 715        __scsi_release_buffers(cmd, 1);
 716}
 717EXPORT_SYMBOL(scsi_release_buffers);
 718
 719static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
 720{
 721        int error = 0;
 722
 723        switch(host_byte(result)) {
 724        case DID_TRANSPORT_FAILFAST:
 725                error = -ENOLINK;
 726                break;
 727        case DID_TARGET_FAILURE:
 728                set_host_byte(cmd, DID_OK);
 729                error = -EREMOTEIO;
 730                break;
 731        case DID_NEXUS_FAILURE:
 732                set_host_byte(cmd, DID_OK);
 733                error = -EBADE;
 734                break;
 735        default:
 736                error = -EIO;
 737                break;
 738        }
 739
 740        return error;
 741}
 742
 743/*
 744 * Function:    scsi_io_completion()
 745 *
 746 * Purpose:     Completion processing for block device I/O requests.
 747 *
 748 * Arguments:   cmd   - command that is finished.
 749 *
 750 * Lock status: Assumed that no lock is held upon entry.
 751 *
 752 * Returns:     Nothing
 753 *
 754 * Notes:       This function is matched in terms of capabilities to
 755 *              the function that created the scatter-gather list.
 756 *              In other words, if there are no bounce buffers
 757 *              (the normal case for most drivers), we don't need
 758 *              the logic to deal with cleaning up afterwards.
 759 *
 760 *              We must call scsi_end_request().  This will finish off
 761 *              the specified number of sectors.  If we are done, the
 762 *              command block will be released and the queue function
 763 *              will be goosed.  If we are not done then we have to
 764 *              figure out what to do next:
 765 *
 766 *              a) We can call scsi_requeue_command().  The request
 767 *                 will be unprepared and put back on the queue.  Then
 768 *                 a new command will be created for it.  This should
 769 *                 be used if we made forward progress, or if we want
 770 *                 to switch from READ(10) to READ(6) for example.
 771 *
 772 *              b) We can call scsi_queue_insert().  The request will
 773 *                 be put back on the queue and retried using the same
 774 *                 command as before, possibly after a delay.
 775 *
 776 *              c) We can call blk_end_request() with -EIO to fail
 777 *                 the remainder of the request.
 778 */
 779void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
 780{
 781        int result = cmd->result;
 782        struct request_queue *q = cmd->device->request_queue;
 783        struct request *req = cmd->request;
 784        int error = 0;
 785        struct scsi_sense_hdr sshdr;
 786        int sense_valid = 0;
 787        int sense_deferred = 0;
 788        enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
 789              ACTION_DELAYED_RETRY} action;
 790        char *description = NULL;
 791
 792        if (result) {
 793                sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
 794                if (sense_valid)
 795                        sense_deferred = scsi_sense_is_deferred(&sshdr);
 796        }
 797
 798        if (req->cmd_type == REQ_TYPE_BLOCK_PC) { /* SG_IO ioctl from block level */
 799                if (result) {
 800                        if (sense_valid && req->sense) {
 801                                /*
 802                                 * SG_IO wants current and deferred errors
 803                                 */
 804                                int len = 8 + cmd->sense_buffer[7];
 805
 806                                if (len > SCSI_SENSE_BUFFERSIZE)
 807                                        len = SCSI_SENSE_BUFFERSIZE;
 808                                memcpy(req->sense, cmd->sense_buffer,  len);
 809                                req->sense_len = len;
 810                        }
 811                        if (!sense_deferred)
 812                                error = __scsi_error_from_host_byte(cmd, result);
 813                }
 814                /*
 815                 * __scsi_error_from_host_byte may have reset the host_byte
 816                 */
 817                req->errors = cmd->result;
 818
 819                req->resid_len = scsi_get_resid(cmd);
 820
 821                if (scsi_bidi_cmnd(cmd)) {
 822                        /*
 823                         * Bidi commands Must be complete as a whole,
 824                         * both sides at once.
 825                         */
 826                        req->next_rq->resid_len = scsi_in(cmd)->resid;
 827
 828                        scsi_release_buffers(cmd);
 829                        blk_end_request_all(req, 0);
 830
 831                        scsi_next_command(cmd);
 832                        return;
 833                }
 834        }
 835
 836        /* no bidi support for !REQ_TYPE_BLOCK_PC yet */
 837        BUG_ON(blk_bidi_rq(req));
 838
 839        /*
 840         * Next deal with any sectors which we were able to correctly
 841         * handle.
 842         */
 843        SCSI_LOG_HLCOMPLETE(1, printk("%u sectors total, "
 844                                      "%d bytes done.\n",
 845                                      blk_rq_sectors(req), good_bytes));
 846
 847        /*
 848         * Recovered errors need reporting, but they're always treated
 849         * as success, so fiddle the result code here.  For BLOCK_PC
 850         * we already took a copy of the original into rq->errors which
 851         * is what gets returned to the user
 852         */
 853        if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) {
 854                /* if ATA PASS-THROUGH INFORMATION AVAILABLE skip
 855                 * print since caller wants ATA registers. Only occurs on
 856                 * SCSI ATA PASS_THROUGH commands when CK_COND=1
 857                 */
 858                if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
 859                        ;
 860                else if (!(req->cmd_flags & REQ_QUIET))
 861                        scsi_print_sense("", cmd);
 862                result = 0;
 863                /* BLOCK_PC may have set error */
 864                error = 0;
 865        }
 866
 867        /*
 868         * A number of bytes were successfully read.  If there
 869         * are leftovers and there is some kind of error
 870         * (result != 0), retry the rest.
 871         */
 872        if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL)
 873                return;
 874
 875        error = __scsi_error_from_host_byte(cmd, result);
 876
 877        if (host_byte(result) == DID_RESET) {
 878                /* Third party bus reset or reset for error recovery
 879                 * reasons.  Just retry the command and see what
 880                 * happens.
 881                 */
 882                action = ACTION_RETRY;
 883        } else if (sense_valid && !sense_deferred) {
 884                switch (sshdr.sense_key) {
 885                case UNIT_ATTENTION:
 886                        if (cmd->device->removable) {
 887                                /* Detected disc change.  Set a bit
 888                                 * and quietly refuse further access.
 889                                 */
 890                                cmd->device->changed = 1;
 891                                description = "Media Changed";
 892                                action = ACTION_FAIL;
 893                        } else {
 894                                /* Must have been a power glitch, or a
 895                                 * bus reset.  Could not have been a
 896                                 * media change, so we just retry the
 897                                 * command and see what happens.
 898                                 */
 899                                action = ACTION_RETRY;
 900                        }
 901                        break;
 902                case ILLEGAL_REQUEST:
 903                        /* If we had an ILLEGAL REQUEST returned, then
 904                         * we may have performed an unsupported
 905                         * command.  The only thing this should be
 906                         * would be a ten byte read where only a six
 907                         * byte read was supported.  Also, on a system
 908                         * where READ CAPACITY failed, we may have
 909                         * read past the end of the disk.
 910                         */
 911                        if ((cmd->device->use_10_for_rw &&
 912                            sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
 913                            (cmd->cmnd[0] == READ_10 ||
 914                             cmd->cmnd[0] == WRITE_10)) {
 915                                /* This will issue a new 6-byte command. */
 916                                cmd->device->use_10_for_rw = 0;
 917                                action = ACTION_REPREP;
 918                        } else if (sshdr.asc == 0x10) /* DIX */ {
 919                                description = "Host Data Integrity Failure";
 920                                action = ACTION_FAIL;
 921                                error = -EILSEQ;
 922                        /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */
 923                        } else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) {
 924                                switch (cmd->cmnd[0]) {
 925                                case UNMAP:
 926                                        description = "Discard failure";
 927                                        break;
 928                                case WRITE_SAME:
 929                                case WRITE_SAME_16:
 930                                        if (cmd->cmnd[1] & 0x8)
 931                                                description = "Discard failure";
 932                                        else
 933                                                description =
 934                                                        "Write same failure";
 935                                        break;
 936                                default:
 937                                        description = "Invalid command failure";
 938                                        break;
 939                                }
 940                                action = ACTION_FAIL;
 941                                error = -EREMOTEIO;
 942                        } else
 943                                action = ACTION_FAIL;
 944                        break;
 945                case ABORTED_COMMAND:
 946                        action = ACTION_FAIL;
 947                        if (sshdr.asc == 0x10) { /* DIF */
 948                                description = "Target Data Integrity Failure";
 949                                error = -EILSEQ;
 950                        }
 951                        break;
 952                case NOT_READY:
 953                        /* If the device is in the process of becoming
 954                         * ready, or has a temporary blockage, retry.
 955                         */
 956                        if (sshdr.asc == 0x04) {
 957                                switch (sshdr.ascq) {
 958                                case 0x01: /* becoming ready */
 959                                case 0x04: /* format in progress */
 960                                case 0x05: /* rebuild in progress */
 961                                case 0x06: /* recalculation in progress */
 962                                case 0x07: /* operation in progress */
 963                                case 0x08: /* Long write in progress */
 964                                case 0x09: /* self test in progress */
 965                                case 0x14: /* space allocation in progress */
 966                                        action = ACTION_DELAYED_RETRY;
 967                                        break;
 968                                default:
 969                                        description = "Device not ready";
 970                                        action = ACTION_FAIL;
 971                                        break;
 972                                }
 973                        } else {
 974                                description = "Device not ready";
 975                                action = ACTION_FAIL;
 976                        }
 977                        break;
 978                case VOLUME_OVERFLOW:
 979                        /* See SSC3rXX or current. */
 980                        action = ACTION_FAIL;
 981                        break;
 982                default:
 983                        description = "Unhandled sense code";
 984                        action = ACTION_FAIL;
 985                        break;
 986                }
 987        } else {
 988                description = "Unhandled error code";
 989                action = ACTION_FAIL;
 990        }
 991
 992        switch (action) {
 993        case ACTION_FAIL:
 994                /* Give up and fail the remainder of the request */
 995                scsi_release_buffers(cmd);
 996                if (!(req->cmd_flags & REQ_QUIET)) {
 997                        if (description)
 998                                scmd_printk(KERN_INFO, cmd, "%s\n",
 999                                            description);
1000                        scsi_print_result(cmd);
1001                        if (driver_byte(result) & DRIVER_SENSE)
1002                                scsi_print_sense("", cmd);
1003                        scsi_print_command(cmd);
1004                }
1005                if (blk_end_request_err(req, error))
1006                        scsi_requeue_command(q, cmd);
1007                else
1008                        scsi_next_command(cmd);
1009                break;
1010        case ACTION_REPREP:
1011                /* Unprep the request and put it back at the head of the queue.
1012                 * A new command will be prepared and issued.
1013                 */
1014                scsi_release_buffers(cmd);
1015                scsi_requeue_command(q, cmd);
1016                break;
1017        case ACTION_RETRY:
1018                /* Retry the same command immediately */
1019                __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, 0);
1020                break;
1021        case ACTION_DELAYED_RETRY:
1022                /* Retry the same command after a delay */
1023                __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0);
1024                break;
1025        }
1026}
1027
1028static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
1029                             gfp_t gfp_mask)
1030{
1031        int count;
1032
1033        /*
1034         * If sg table allocation fails, requeue request later.
1035         */
1036        if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments,
1037                                        gfp_mask))) {
1038                return BLKPREP_DEFER;
1039        }
1040
1041        req->buffer = NULL;
1042
1043        /* 
1044         * Next, walk the list, and fill in the addresses and sizes of
1045         * each segment.
1046         */
1047        count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
1048        BUG_ON(count > sdb->table.nents);
1049        sdb->table.nents = count;
1050        sdb->length = blk_rq_bytes(req);
1051        return BLKPREP_OK;
1052}
1053
1054/*
1055 * Function:    scsi_init_io()
1056 *
1057 * Purpose:     SCSI I/O initialize function.
1058 *
1059 * Arguments:   cmd   - Command descriptor we wish to initialize
1060 *
1061 * Returns:     0 on success
1062 *              BLKPREP_DEFER if the failure is retryable
1063 *              BLKPREP_KILL if the failure is fatal
1064 */
1065int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
1066{
1067        struct request *rq = cmd->request;
1068
1069        int error = scsi_init_sgtable(rq, &cmd->sdb, gfp_mask);
1070        if (error)
1071                goto err_exit;
1072
1073        if (blk_bidi_rq(rq)) {
1074                struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc(
1075                        scsi_sdb_cache, GFP_ATOMIC);
1076                if (!bidi_sdb) {
1077                        error = BLKPREP_DEFER;
1078                        goto err_exit;
1079                }
1080
1081                rq->next_rq->special = bidi_sdb;
1082                error = scsi_init_sgtable(rq->next_rq, bidi_sdb, GFP_ATOMIC);
1083                if (error)
1084                        goto err_exit;
1085        }
1086
1087        if (blk_integrity_rq(rq)) {
1088                struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
1089                int ivecs, count;
1090
1091                BUG_ON(prot_sdb == NULL);
1092                ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
1093
1094                if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask)) {
1095                        error = BLKPREP_DEFER;
1096                        goto err_exit;
1097                }
1098
1099                count = blk_rq_map_integrity_sg(rq->q, rq->bio,
1100                                                prot_sdb->table.sgl);
1101                BUG_ON(unlikely(count > ivecs));
1102                BUG_ON(unlikely(count > queue_max_integrity_segments(rq->q)));
1103
1104                cmd->prot_sdb = prot_sdb;
1105                cmd->prot_sdb->table.nents = count;
1106        }
1107
1108        return BLKPREP_OK ;
1109
1110err_exit:
1111        scsi_release_buffers(cmd);
1112        cmd->request->special = NULL;
1113        scsi_put_command(cmd);
1114        return error;
1115}
1116EXPORT_SYMBOL(scsi_init_io);
1117
1118static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
1119                struct request *req)
1120{
1121        struct scsi_cmnd *cmd;
1122
1123        if (!req->special) {
1124                cmd = scsi_get_command(sdev, GFP_ATOMIC);
1125                if (unlikely(!cmd))
1126                        return NULL;
1127                req->special = cmd;
1128        } else {
1129                cmd = req->special;
1130        }
1131
1132        /* pull a tag out of the request if we have one */
1133        cmd->tag = req->tag;
1134        cmd->request = req;
1135
1136        cmd->cmnd = req->cmd;
1137        cmd->prot_op = SCSI_PROT_NORMAL;
1138
1139        return cmd;
1140}
1141
1142int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
1143{
1144        struct scsi_cmnd *cmd;
1145        int ret = scsi_prep_state_check(sdev, req);
1146
1147        if (ret != BLKPREP_OK)
1148                return ret;
1149
1150        cmd = scsi_get_cmd_from_req(sdev, req);
1151        if (unlikely(!cmd))
1152                return BLKPREP_DEFER;
1153
1154        /*
1155         * BLOCK_PC requests may transfer data, in which case they must
1156         * a bio attached to them.  Or they might contain a SCSI command
1157         * that does not transfer data, in which case they may optionally
1158         * submit a request without an attached bio.
1159         */
1160        if (req->bio) {
1161                int ret;
1162
1163                BUG_ON(!req->nr_phys_segments);
1164
1165                ret = scsi_init_io(cmd, GFP_ATOMIC);
1166                if (unlikely(ret))
1167                        return ret;
1168        } else {
1169                BUG_ON(blk_rq_bytes(req));
1170
1171                memset(&cmd->sdb, 0, sizeof(cmd->sdb));
1172                req->buffer = NULL;
1173        }
1174
1175        cmd->cmd_len = req->cmd_len;
1176        if (!blk_rq_bytes(req))
1177                cmd->sc_data_direction = DMA_NONE;
1178        else if (rq_data_dir(req) == WRITE)
1179                cmd->sc_data_direction = DMA_TO_DEVICE;
1180        else
1181                cmd->sc_data_direction = DMA_FROM_DEVICE;
1182        
1183        cmd->transfersize = blk_rq_bytes(req);
1184        cmd->allowed = req->retries;
1185        return BLKPREP_OK;
1186}
1187EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd);
1188
1189/*
1190 * Setup a REQ_TYPE_FS command.  These are simple read/write request
1191 * from filesystems that still need to be translated to SCSI CDBs from
1192 * the ULD.
1193 */
1194int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
1195{
1196        struct scsi_cmnd *cmd;
1197        int ret = scsi_prep_state_check(sdev, req);
1198
1199        if (ret != BLKPREP_OK)
1200                return ret;
1201
1202        if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh
1203                         && sdev->scsi_dh_data->scsi_dh->prep_fn)) {
1204                ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req);
1205                if (ret != BLKPREP_OK)
1206                        return ret;
1207        }
1208
1209        /*
1210         * Filesystem requests must transfer data.
1211         */
1212        BUG_ON(!req->nr_phys_segments);
1213
1214        cmd = scsi_get_cmd_from_req(sdev, req);
1215        if (unlikely(!cmd))
1216                return BLKPREP_DEFER;
1217
1218        memset(cmd->cmnd, 0, BLK_MAX_CDB);
1219        return scsi_init_io(cmd, GFP_ATOMIC);
1220}
1221EXPORT_SYMBOL(scsi_setup_fs_cmnd);
1222
1223int scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
1224{
1225        int ret = BLKPREP_OK;
1226
1227        /*
1228         * If the device is not in running state we will reject some
1229         * or all commands.
1230         */
1231        if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1232                switch (sdev->sdev_state) {
1233                case SDEV_OFFLINE:
1234                case SDEV_TRANSPORT_OFFLINE:
1235                        /*
1236                         * If the device is offline we refuse to process any
1237                         * commands.  The device must be brought online
1238                         * before trying any recovery commands.
1239                         */
1240                        sdev_printk(KERN_ERR, sdev,
1241                                    "rejecting I/O to offline device\n");
1242                        ret = BLKPREP_KILL;
1243                        break;
1244                case SDEV_DEL:
1245                        /*
1246                         * If the device is fully deleted, we refuse to
1247                         * process any commands as well.
1248                         */
1249                        sdev_printk(KERN_ERR, sdev,
1250                                    "rejecting I/O to dead device\n");
1251                        ret = BLKPREP_KILL;
1252                        break;
1253                case SDEV_QUIESCE:
1254                case SDEV_BLOCK:
1255                case SDEV_CREATED_BLOCK:
1256                        /*
1257                         * If the devices is blocked we defer normal commands.
1258                         */
1259                        if (!(req->cmd_flags & REQ_PREEMPT))
1260                                ret = BLKPREP_DEFER;
1261                        break;
1262                default:
1263                        /*
1264                         * For any other not fully online state we only allow
1265                         * special commands.  In particular any user initiated
1266                         * command is not allowed.
1267                         */
1268                        if (!(req->cmd_flags & REQ_PREEMPT))
1269                                ret = BLKPREP_KILL;
1270                        break;
1271                }
1272        }
1273        return ret;
1274}
1275EXPORT_SYMBOL(scsi_prep_state_check);
1276
1277int scsi_prep_return(struct request_queue *q, struct request *req, int ret)
1278{
1279        struct scsi_device *sdev = q->queuedata;
1280
1281        switch (ret) {
1282        case BLKPREP_KILL:
1283                req->errors = DID_NO_CONNECT << 16;
1284                /* release the command and kill it */
1285                if (req->special) {
1286                        struct scsi_cmnd *cmd = req->special;
1287                        scsi_release_buffers(cmd);
1288                        scsi_put_command(cmd);
1289                        req->special = NULL;
1290                }
1291                break;
1292        case BLKPREP_DEFER:
1293                /*
1294                 * If we defer, the blk_peek_request() returns NULL, but the
1295                 * queue must be restarted, so we schedule a callback to happen
1296                 * shortly.
1297                 */
1298                if (sdev->device_busy == 0)
1299                        blk_delay_queue(q, SCSI_QUEUE_DELAY);
1300                break;
1301        default:
1302                req->cmd_flags |= REQ_DONTPREP;
1303        }
1304
1305        return ret;
1306}
1307EXPORT_SYMBOL(scsi_prep_return);
1308
1309int scsi_prep_fn(struct request_queue *q, struct request *req)
1310{
1311        struct scsi_device *sdev = q->queuedata;
1312        int ret = BLKPREP_KILL;
1313
1314        if (req->cmd_type == REQ_TYPE_BLOCK_PC)
1315                ret = scsi_setup_blk_pc_cmnd(sdev, req);
1316        return scsi_prep_return(q, req, ret);
1317}
1318EXPORT_SYMBOL(scsi_prep_fn);
1319
1320/*
1321 * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
1322 * return 0.
1323 *
1324 * Called with the queue_lock held.
1325 */
1326static inline int scsi_dev_queue_ready(struct request_queue *q,
1327                                  struct scsi_device *sdev)
1328{
1329        if (sdev->device_busy == 0 && sdev->device_blocked) {
1330                /*
1331                 * unblock after device_blocked iterates to zero
1332                 */
1333                if (--sdev->device_blocked == 0) {
1334                        SCSI_LOG_MLQUEUE(3,
1335                                   sdev_printk(KERN_INFO, sdev,
1336                                   "unblocking device at zero depth\n"));
1337                } else {
1338                        blk_delay_queue(q, SCSI_QUEUE_DELAY);
1339                        return 0;
1340                }
1341        }
1342        if (scsi_device_is_busy(sdev))
1343                return 0;
1344
1345        return 1;
1346}
1347
1348
1349/*
1350 * scsi_target_queue_ready: checks if there we can send commands to target
1351 * @sdev: scsi device on starget to check.
1352 *
1353 * Called with the host lock held.
1354 */
1355static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
1356                                           struct scsi_device *sdev)
1357{
1358        struct scsi_target *starget = scsi_target(sdev);
1359
1360        if (starget->single_lun) {
1361                if (starget->starget_sdev_user &&
1362                    starget->starget_sdev_user != sdev)
1363                        return 0;
1364                starget->starget_sdev_user = sdev;
1365        }
1366
1367        if (starget->target_busy == 0 && starget->target_blocked) {
1368                /*
1369                 * unblock after target_blocked iterates to zero
1370                 */
1371                if (--starget->target_blocked == 0) {
1372                        SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget,
1373                                         "unblocking target at zero depth\n"));
1374                } else
1375                        return 0;
1376        }
1377
1378        if (scsi_target_is_busy(starget)) {
1379                list_move_tail(&sdev->starved_entry, &shost->starved_list);
1380                return 0;
1381        }
1382
1383        return 1;
1384}
1385
1386/*
1387 * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1388 * return 0. We must end up running the queue again whenever 0 is
1389 * returned, else IO can hang.
1390 *
1391 * Called with host_lock held.
1392 */
1393static inline int scsi_host_queue_ready(struct request_queue *q,
1394                                   struct Scsi_Host *shost,
1395                                   struct scsi_device *sdev)
1396{
1397        if (scsi_host_in_recovery(shost))
1398                return 0;
1399        if (shost->host_busy == 0 && shost->host_blocked) {
1400                /*
1401                 * unblock after host_blocked iterates to zero
1402                 */
1403                if (--shost->host_blocked == 0) {
1404                        SCSI_LOG_MLQUEUE(3,
1405                                printk("scsi%d unblocking host at zero depth\n",
1406                                        shost->host_no));
1407                } else {
1408                        return 0;
1409                }
1410        }
1411        if (scsi_host_is_busy(shost)) {
1412                if (list_empty(&sdev->starved_entry))
1413                        list_add_tail(&sdev->starved_entry, &shost->starved_list);
1414                return 0;
1415        }
1416
1417        /* We're OK to process the command, so we can't be starved */
1418        if (!list_empty(&sdev->starved_entry))
1419                list_del_init(&sdev->starved_entry);
1420
1421        return 1;
1422}
1423
1424/*
1425 * Busy state exporting function for request stacking drivers.
1426 *
1427 * For efficiency, no lock is taken to check the busy state of
1428 * shost/starget/sdev, since the returned value is not guaranteed and
1429 * may be changed after request stacking drivers call the function,
1430 * regardless of taking lock or not.
1431 *
1432 * When scsi can't dispatch I/Os anymore and needs to kill I/Os scsi
1433 * needs to return 'not busy'. Otherwise, request stacking drivers
1434 * may hold requests forever.
1435 */
1436static int scsi_lld_busy(struct request_queue *q)
1437{
1438        struct scsi_device *sdev = q->queuedata;
1439        struct Scsi_Host *shost;
1440
1441        if (blk_queue_dying(q))
1442                return 0;
1443
1444        shost = sdev->host;
1445
1446        /*
1447         * Ignore host/starget busy state.
1448         * Since block layer does not have a concept of fairness across
1449         * multiple queues, congestion of host/starget needs to be handled
1450         * in SCSI layer.
1451         */
1452        if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev))
1453                return 1;
1454
1455        return 0;
1456}
1457
1458/*
1459 * Kill a request for a dead device
1460 */
1461static void scsi_kill_request(struct request *req, struct request_queue *q)
1462{
1463        struct scsi_cmnd *cmd = req->special;
1464        struct scsi_device *sdev;
1465        struct scsi_target *starget;
1466        struct Scsi_Host *shost;
1467
1468        blk_start_request(req);
1469
1470        scmd_printk(KERN_INFO, cmd, "killing request\n");
1471
1472        sdev = cmd->device;
1473        starget = scsi_target(sdev);
1474        shost = sdev->host;
1475        scsi_init_cmd_errh(cmd);
1476        cmd->result = DID_NO_CONNECT << 16;
1477        atomic_inc(&cmd->device->iorequest_cnt);
1478
1479        /*
1480         * SCSI request completion path will do scsi_device_unbusy(),
1481         * bump busy counts.  To bump the counters, we need to dance
1482         * with the locks as normal issue path does.
1483         */
1484        sdev->device_busy++;
1485        spin_unlock(sdev->request_queue->queue_lock);
1486        spin_lock(shost->host_lock);
1487        shost->host_busy++;
1488        starget->target_busy++;
1489        spin_unlock(shost->host_lock);
1490        spin_lock(sdev->request_queue->queue_lock);
1491
1492        blk_complete_request(req);
1493}
1494
1495static void scsi_softirq_done(struct request *rq)
1496{
1497        struct scsi_cmnd *cmd = rq->special;
1498        unsigned long wait_for = (cmd->allowed + 1) * rq->timeout;
1499        int disposition;
1500
1501        INIT_LIST_HEAD(&cmd->eh_entry);
1502
1503        atomic_inc(&cmd->device->iodone_cnt);
1504        if (cmd->result)
1505                atomic_inc(&cmd->device->ioerr_cnt);
1506
1507        disposition = scsi_decide_disposition(cmd);
1508        if (disposition != SUCCESS &&
1509            time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
1510                sdev_printk(KERN_ERR, cmd->device,
1511                            "timing out command, waited %lus\n",
1512                            wait_for/HZ);
1513                disposition = SUCCESS;
1514        }
1515                        
1516        scsi_log_completion(cmd, disposition);
1517
1518        switch (disposition) {
1519                case SUCCESS:
1520                        scsi_finish_command(cmd);
1521                        break;
1522                case NEEDS_RETRY:
1523                        scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
1524                        break;
1525                case ADD_TO_MLQUEUE:
1526                        scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
1527                        break;
1528                default:
1529                        if (!scsi_eh_scmd_add(cmd, 0))
1530                                scsi_finish_command(cmd);
1531        }
1532}
1533
1534/*
1535 * Function:    scsi_request_fn()
1536 *
1537 * Purpose:     Main strategy routine for SCSI.
1538 *
1539 * Arguments:   q       - Pointer to actual queue.
1540 *
1541 * Returns:     Nothing
1542 *
1543 * Lock status: IO request lock assumed to be held when called.
1544 */
1545static void scsi_request_fn(struct request_queue *q)
1546{
1547        struct scsi_device *sdev = q->queuedata;
1548        struct Scsi_Host *shost;
1549        struct scsi_cmnd *cmd;
1550        struct request *req;
1551
1552        if(!get_device(&sdev->sdev_gendev))
1553                /* We must be tearing the block queue down already */
1554                return;
1555
1556        /*
1557         * To start with, we keep looping until the queue is empty, or until
1558         * the host is no longer able to accept any more requests.
1559         */
1560        shost = sdev->host;
1561        for (;;) {
1562                int rtn;
1563                /*
1564                 * get next queueable request.  We do this early to make sure
1565                 * that the request is fully prepared even if we cannot 
1566                 * accept it.
1567                 */
1568                req = blk_peek_request(q);
1569                if (!req || !scsi_dev_queue_ready(q, sdev))
1570                        break;
1571
1572                if (unlikely(!scsi_device_online(sdev))) {
1573                        sdev_printk(KERN_ERR, sdev,
1574                                    "rejecting I/O to offline device\n");
1575                        scsi_kill_request(req, q);
1576                        continue;
1577                }
1578
1579
1580                /*
1581                 * Remove the request from the request list.
1582                 */
1583                if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1584                        blk_start_request(req);
1585                sdev->device_busy++;
1586
1587                spin_unlock(q->queue_lock);
1588                cmd = req->special;
1589                if (unlikely(cmd == NULL)) {
1590                        printk(KERN_CRIT "impossible request in %s.\n"
1591                                         "please mail a stack trace to "
1592                                         "linux-scsi@vger.kernel.org\n",
1593                                         __func__);
1594                        blk_dump_rq_flags(req, "foo");
1595                        BUG();
1596                }
1597                spin_lock(shost->host_lock);
1598
1599                /*
1600                 * We hit this when the driver is using a host wide
1601                 * tag map. For device level tag maps the queue_depth check
1602                 * in the device ready fn would prevent us from trying
1603                 * to allocate a tag. Since the map is a shared host resource
1604                 * we add the dev to the starved list so it eventually gets
1605                 * a run when a tag is freed.
1606                 */
1607                if (blk_queue_tagged(q) && !blk_rq_tagged(req)) {
1608                        if (list_empty(&sdev->starved_entry))
1609                                list_add_tail(&sdev->starved_entry,
1610                                              &shost->starved_list);
1611                        goto not_ready;
1612                }
1613
1614                if (!scsi_target_queue_ready(shost, sdev))
1615                        goto not_ready;
1616
1617                if (!scsi_host_queue_ready(q, shost, sdev))
1618                        goto not_ready;
1619
1620                scsi_target(sdev)->target_busy++;
1621                shost->host_busy++;
1622
1623                /*
1624                 * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will
1625                 *              take the lock again.
1626                 */
1627                spin_unlock_irq(shost->host_lock);
1628
1629                /*
1630                 * Finally, initialize any error handling parameters, and set up
1631                 * the timers for timeouts.
1632                 */
1633                scsi_init_cmd_errh(cmd);
1634
1635                /*
1636                 * Dispatch the command to the low-level driver.
1637                 */
1638                rtn = scsi_dispatch_cmd(cmd);
1639                spin_lock_irq(q->queue_lock);
1640                if (rtn)
1641                        goto out_delay;
1642        }
1643
1644        goto out;
1645
1646 not_ready:
1647        spin_unlock_irq(shost->host_lock);
1648
1649        /*
1650         * lock q, handle tag, requeue req, and decrement device_busy. We
1651         * must return with queue_lock held.
1652         *
1653         * Decrementing device_busy without checking it is OK, as all such
1654         * cases (host limits or settings) should run the queue at some
1655         * later time.
1656         */
1657        spin_lock_irq(q->queue_lock);
1658        blk_requeue_request(q, req);
1659        sdev->device_busy--;
1660out_delay:
1661        if (sdev->device_busy == 0)
1662                blk_delay_queue(q, SCSI_QUEUE_DELAY);
1663out:
1664        /* must be careful here...if we trigger the ->remove() function
1665         * we cannot be holding the q lock */
1666        spin_unlock_irq(q->queue_lock);
1667        put_device(&sdev->sdev_gendev);
1668        spin_lock_irq(q->queue_lock);
1669}
1670
1671u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
1672{
1673        struct device *host_dev;
1674        u64 bounce_limit = 0xffffffff;
1675
1676        if (shost->unchecked_isa_dma)
1677                return BLK_BOUNCE_ISA;
1678        /*
1679         * Platforms with virtual-DMA translation
1680         * hardware have no practical limit.
1681         */
1682        if (!PCI_DMA_BUS_IS_PHYS)
1683                return BLK_BOUNCE_ANY;
1684
1685        host_dev = scsi_get_device(shost);
1686        if (host_dev && host_dev->dma_mask)
1687                bounce_limit = *host_dev->dma_mask;
1688
1689        return bounce_limit;
1690}
1691EXPORT_SYMBOL(scsi_calculate_bounce_limit);
1692
1693struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
1694                                         request_fn_proc *request_fn)
1695{
1696        struct request_queue *q;
1697        struct device *dev = shost->dma_dev;
1698
1699        q = blk_init_queue(request_fn, NULL);
1700        if (!q)
1701                return NULL;
1702
1703        /*
1704         * this limit is imposed by hardware restrictions
1705         */
1706        blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize,
1707                                        SCSI_MAX_SG_CHAIN_SEGMENTS));
1708
1709        if (scsi_host_prot_dma(shost)) {
1710                shost->sg_prot_tablesize =
1711                        min_not_zero(shost->sg_prot_tablesize,
1712                                     (unsigned short)SCSI_MAX_PROT_SG_SEGMENTS);
1713                BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize);
1714                blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize);
1715        }
1716
1717        blk_queue_max_hw_sectors(q, shost->max_sectors);
1718        blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1719        blk_queue_segment_boundary(q, shost->dma_boundary);
1720        dma_set_seg_boundary(dev, shost->dma_boundary);
1721
1722        blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));
1723
1724        if (!shost->use_clustering)
1725                q->limits.cluster = 0;
1726
1727        /*
1728         * set a reasonable default alignment on word boundaries: the
1729         * host and device may alter it using
1730         * blk_queue_update_dma_alignment() later.
1731         */
1732        blk_queue_dma_alignment(q, 0x03);
1733
1734        return q;
1735}
1736EXPORT_SYMBOL(__scsi_alloc_queue);
1737
1738struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1739{
1740        struct request_queue *q;
1741
1742        q = __scsi_alloc_queue(sdev->host, scsi_request_fn);
1743        if (!q)
1744                return NULL;
1745
1746        blk_queue_prep_rq(q, scsi_prep_fn);
1747        blk_queue_softirq_done(q, scsi_softirq_done);
1748        blk_queue_rq_timed_out(q, scsi_times_out);
1749        blk_queue_lld_busy(q, scsi_lld_busy);
1750        return q;
1751}
1752
1753/*
1754 * Function:    scsi_block_requests()
1755 *
1756 * Purpose:     Utility function used by low-level drivers to prevent further
1757 *              commands from being queued to the device.
1758 *
1759 * Arguments:   shost       - Host in question
1760 *
1761 * Returns:     Nothing
1762 *
1763 * Lock status: No locks are assumed held.
1764 *
1765 * Notes:       There is no timer nor any other means by which the requests
1766 *              get unblocked other than the low-level driver calling
1767 *              scsi_unblock_requests().
1768 */
1769void scsi_block_requests(struct Scsi_Host *shost)
1770{
1771        shost->host_self_blocked = 1;
1772}
1773EXPORT_SYMBOL(scsi_block_requests);
1774
1775/*
1776 * Function:    scsi_unblock_requests()
1777 *
1778 * Purpose:     Utility function used by low-level drivers to allow further
1779 *              commands from being queued to the device.
1780 *
1781 * Arguments:   shost       - Host in question
1782 *
1783 * Returns:     Nothing
1784 *
1785 * Lock status: No locks are assumed held.
1786 *
1787 * Notes:       There is no timer nor any other means by which the requests
1788 *              get unblocked other than the low-level driver calling
1789 *              scsi_unblock_requests().
1790 *
1791 *              This is done as an API function so that changes to the
1792 *              internals of the scsi mid-layer won't require wholesale
1793 *              changes to drivers that use this feature.
1794 */
1795void scsi_unblock_requests(struct Scsi_Host *shost)
1796{
1797        shost->host_self_blocked = 0;
1798        scsi_run_host_queues(shost);
1799}
1800EXPORT_SYMBOL(scsi_unblock_requests);
1801
1802int __init scsi_init_queue(void)
1803{
1804        int i;
1805
1806        scsi_sdb_cache = kmem_cache_create("scsi_data_buffer",
1807                                           sizeof(struct scsi_data_buffer),
1808                                           0, 0, NULL);
1809        if (!scsi_sdb_cache) {
1810                printk(KERN_ERR "SCSI: can't init scsi sdb cache\n");
1811                return -ENOMEM;
1812        }
1813
1814        for (i = 0; i < SG_MEMPOOL_NR; i++) {
1815                struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1816                int size = sgp->size * sizeof(struct scatterlist);
1817
1818                sgp->slab = kmem_cache_create(sgp->name, size, 0,
1819                                SLAB_HWCACHE_ALIGN, NULL);
1820                if (!sgp->slab) {
1821                        printk(KERN_ERR "SCSI: can't init sg slab %s\n",
1822                                        sgp->name);
1823                        goto cleanup_sdb;
1824                }
1825
1826                sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE,
1827                                                     sgp->slab);
1828                if (!sgp->pool) {
1829                        printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
1830                                        sgp->name);
1831                        goto cleanup_sdb;
1832                }
1833        }
1834
1835        return 0;
1836
1837cleanup_sdb:
1838        for (i = 0; i < SG_MEMPOOL_NR; i++) {
1839                struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1840                if (sgp->pool)
1841                        mempool_destroy(sgp->pool);
1842                if (sgp->slab)
1843                        kmem_cache_destroy(sgp->slab);
1844        }
1845        kmem_cache_destroy(scsi_sdb_cache);
1846
1847        return -ENOMEM;
1848}
1849
1850void scsi_exit_queue(void)
1851{
1852        int i;
1853
1854        kmem_cache_destroy(scsi_sdb_cache);
1855
1856        for (i = 0; i < SG_MEMPOOL_NR; i++) {
1857                struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1858                mempool_destroy(sgp->pool);
1859                kmem_cache_destroy(sgp->slab);
1860        }
1861}
1862
1863/**
1864 *      scsi_mode_select - issue a mode select
1865 *      @sdev:  SCSI device to be queried
1866 *      @pf:    Page format bit (1 == standard, 0 == vendor specific)
1867 *      @sp:    Save page bit (0 == don't save, 1 == save)
1868 *      @modepage: mode page being requested
1869 *      @buffer: request buffer (may not be smaller than eight bytes)
1870 *      @len:   length of request buffer.
1871 *      @timeout: command timeout
1872 *      @retries: number of retries before failing
1873 *      @data: returns a structure abstracting the mode header data
1874 *      @sshdr: place to put sense data (or NULL if no sense to be collected).
1875 *              must be SCSI_SENSE_BUFFERSIZE big.
1876 *
1877 *      Returns zero if successful; negative error number or scsi
1878 *      status on error
1879 *
1880 */
1881int
1882scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
1883                 unsigned char *buffer, int len, int timeout, int retries,
1884                 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
1885{
1886        unsigned char cmd[10];
1887        unsigned char *real_buffer;
1888        int ret;
1889
1890        memset(cmd, 0, sizeof(cmd));
1891        cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0);
1892
1893        if (sdev->use_10_for_ms) {
1894                if (len > 65535)
1895                        return -EINVAL;
1896                real_buffer = kmalloc(8 + len, GFP_KERNEL);
1897                if (!real_buffer)
1898                        return -ENOMEM;
1899                memcpy(real_buffer + 8, buffer, len);
1900                len += 8;
1901                real_buffer[0] = 0;
1902                real_buffer[1] = 0;
1903                real_buffer[2] = data->medium_type;
1904                real_buffer[3] = data->device_specific;
1905                real_buffer[4] = data->longlba ? 0x01 : 0;
1906                real_buffer[5] = 0;
1907                real_buffer[6] = data->block_descriptor_length >> 8;
1908                real_buffer[7] = data->block_descriptor_length;
1909
1910                cmd[0] = MODE_SELECT_10;
1911                cmd[7] = len >> 8;
1912                cmd[8] = len;
1913        } else {
1914                if (len > 255 || data->block_descriptor_length > 255 ||
1915                    data->longlba)
1916                        return -EINVAL;
1917
1918                real_buffer = kmalloc(4 + len, GFP_KERNEL);
1919                if (!real_buffer)
1920                        return -ENOMEM;
1921                memcpy(real_buffer + 4, buffer, len);
1922                len += 4;
1923                real_buffer[0] = 0;
1924                real_buffer[1] = data->medium_type;
1925                real_buffer[2] = data->device_specific;
1926                real_buffer[3] = data->block_descriptor_length;
1927                
1928
1929                cmd[0] = MODE_SELECT;
1930                cmd[4] = len;
1931        }
1932
1933        ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len,
1934                               sshdr, timeout, retries, NULL);
1935        kfree(real_buffer);
1936        return ret;
1937}
1938EXPORT_SYMBOL_GPL(scsi_mode_select);
1939
1940/**
1941 *      scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary.
1942 *      @sdev:  SCSI device to be queried
1943 *      @dbd:   set if mode sense will allow block descriptors to be returned
1944 *      @modepage: mode page being requested
1945 *      @buffer: request buffer (may not be smaller than eight bytes)
1946 *      @len:   length of request buffer.
1947 *      @timeout: command timeout
1948 *      @retries: number of retries before failing
1949 *      @data: returns a structure abstracting the mode header data
1950 *      @sshdr: place to put sense data (or NULL if no sense to be collected).
1951 *              must be SCSI_SENSE_BUFFERSIZE big.
1952 *
1953 *      Returns zero if unsuccessful, or the header offset (either 4
1954 *      or 8 depending on whether a six or ten byte command was
1955 *      issued) if successful.
1956 */
1957int
1958scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
1959                  unsigned char *buffer, int len, int timeout, int retries,
1960                  struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
1961{
1962        unsigned char cmd[12];
1963        int use_10_for_ms;
1964        int header_length;
1965        int result;
1966        struct scsi_sense_hdr my_sshdr;
1967
1968        memset(data, 0, sizeof(*data));
1969        memset(&cmd[0], 0, 12);
1970        cmd[1] = dbd & 0x18;    /* allows DBD and LLBA bits */
1971        cmd[2] = modepage;
1972
1973        /* caller might not be interested in sense, but we need it */
1974        if (!sshdr)
1975                sshdr = &my_sshdr;
1976
1977 retry:
1978        use_10_for_ms = sdev->use_10_for_ms;
1979
1980        if (use_10_for_ms) {
1981                if (len < 8)
1982                        len = 8;
1983
1984                cmd[0] = MODE_SENSE_10;
1985                cmd[8] = len;
1986                header_length = 8;
1987        } else {
1988                if (len < 4)
1989                        len = 4;
1990
1991                cmd[0] = MODE_SENSE;
1992                cmd[4] = len;
1993                header_length = 4;
1994        }
1995
1996        memset(buffer, 0, len);
1997
1998        result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
1999                                  sshdr, timeout, retries, NULL);
2000
2001        /* This code looks awful: what it's doing is making sure an
2002         * ILLEGAL REQUEST sense return identifies the actual command
2003         * byte as the problem.  MODE_SENSE commands can return
2004         * ILLEGAL REQUEST if the code page isn't supported */
2005
2006        if (use_10_for_ms && !scsi_status_is_good(result) &&
2007            (driver_byte(result) & DRIVER_SENSE)) {
2008                if (scsi_sense_valid(sshdr)) {
2009                        if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
2010                            (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
2011                                /* 
2012                                 * Invalid command operation code
2013                                 */
2014                                sdev->use_10_for_ms = 0;
2015                                goto retry;
2016                        }
2017                }
2018        }
2019
2020        if(scsi_status_is_good(result)) {
2021                if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b &&
2022                             (modepage == 6 || modepage == 8))) {
2023                        /* Initio breakage? */
2024                        header_length = 0;
2025                        data->length = 13;
2026                        data->medium_type = 0;
2027                        data->device_specific = 0;
2028                        data->longlba = 0;
2029                        data->block_descriptor_length = 0;
2030                } else if(use_10_for_ms) {
2031                        data->length = buffer[0]*256 + buffer[1] + 2;
2032                        data->medium_type = buffer[2];
2033                        data->device_specific = buffer[3];
2034                        data->longlba = buffer[4] & 0x01;
2035                        data->block_descriptor_length = buffer[6]*256
2036                                + buffer[7];
2037                } else {
2038                        data->length = buffer[0] + 1;
2039                        data->medium_type = buffer[1];
2040                        data->device_specific = buffer[2];
2041                        data->block_descriptor_length = buffer[3];
2042                }
2043                data->header_length = header_length;
2044        }
2045
2046        return result;
2047}
2048EXPORT_SYMBOL(scsi_mode_sense);
2049
2050/**
2051 *      scsi_test_unit_ready - test if unit is ready
2052 *      @sdev:  scsi device to change the state of.
2053 *      @timeout: command timeout
2054 *      @retries: number of retries before failing
2055 *      @sshdr_external: Optional pointer to struct scsi_sense_hdr for
2056 *              returning sense. Make sure that this is cleared before passing
2057 *              in.
2058 *
2059 *      Returns zero if unsuccessful or an error if TUR failed.  For
2060 *      removable media, UNIT_ATTENTION sets ->changed flag.
2061 **/
2062int
2063scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
2064                     struct scsi_sense_hdr *sshdr_external)
2065{
2066        char cmd[] = {
2067                TEST_UNIT_READY, 0, 0, 0, 0, 0,
2068        };
2069        struct scsi_sense_hdr *sshdr;
2070        int result;
2071
2072        if (!sshdr_external)
2073                sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL);
2074        else
2075                sshdr = sshdr_external;
2076
2077        /* try to eat the UNIT_ATTENTION if there are enough retries */
2078        do {
2079                result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr,
2080                                          timeout, retries, NULL);
2081                if (sdev->removable && scsi_sense_valid(sshdr) &&
2082                    sshdr->sense_key == UNIT_ATTENTION)
2083                        sdev->changed = 1;
2084        } while (scsi_sense_valid(sshdr) &&
2085                 sshdr->sense_key == UNIT_ATTENTION && --retries);
2086
2087        if (!sshdr_external)
2088                kfree(sshdr);
2089        return result;
2090}
2091EXPORT_SYMBOL(scsi_test_unit_ready);
2092
2093/**
2094 *      scsi_device_set_state - Take the given device through the device state model.
2095 *      @sdev:  scsi device to change the state of.
2096 *      @state: state to change to.
2097 *
2098 *      Returns zero if unsuccessful or an error if the requested 
2099 *      transition is illegal.
2100 */
2101int
2102scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
2103{
2104        enum scsi_device_state oldstate = sdev->sdev_state;
2105
2106        if (state == oldstate)
2107                return 0;
2108
2109        switch (state) {
2110        case SDEV_CREATED:
2111                switch (oldstate) {
2112                case SDEV_CREATED_BLOCK:
2113                        break;
2114                default:
2115                        goto illegal;
2116                }
2117                break;
2118                        
2119        case SDEV_RUNNING:
2120                switch (oldstate) {
2121                case SDEV_CREATED:
2122                case SDEV_OFFLINE:
2123                case SDEV_TRANSPORT_OFFLINE:
2124                case SDEV_QUIESCE:
2125                case SDEV_BLOCK:
2126                        break;
2127                default:
2128                        goto illegal;
2129                }
2130                break;
2131
2132        case SDEV_QUIESCE:
2133                switch (oldstate) {
2134                case SDEV_RUNNING:
2135                case SDEV_OFFLINE:
2136                case SDEV_TRANSPORT_OFFLINE:
2137                        break;
2138                default:
2139                        goto illegal;
2140                }
2141                break;
2142
2143        case SDEV_OFFLINE:
2144        case SDEV_TRANSPORT_OFFLINE:
2145                switch (oldstate) {
2146                case SDEV_CREATED:
2147                case SDEV_RUNNING:
2148                case SDEV_QUIESCE:
2149                case SDEV_BLOCK:
2150                        break;
2151                default:
2152                        goto illegal;
2153                }
2154                break;
2155
2156        case SDEV_BLOCK:
2157                switch (oldstate) {
2158                case SDEV_RUNNING:
2159                case SDEV_CREATED_BLOCK:
2160                        break;
2161                default:
2162                        goto illegal;
2163                }
2164                break;
2165
2166        case SDEV_CREATED_BLOCK:
2167                switch (oldstate) {
2168                case SDEV_CREATED:
2169                        break;
2170                default:
2171                        goto illegal;
2172                }
2173                break;
2174
2175        case SDEV_CANCEL:
2176                switch (oldstate) {
2177                case SDEV_CREATED:
2178                case SDEV_RUNNING:
2179                case SDEV_QUIESCE:
2180                case SDEV_OFFLINE:
2181                case SDEV_TRANSPORT_OFFLINE:
2182                case SDEV_BLOCK:
2183                        break;
2184                default:
2185                        goto illegal;
2186                }
2187                break;
2188
2189        case SDEV_DEL:
2190                switch (oldstate) {
2191                case SDEV_CREATED:
2192                case SDEV_RUNNING:
2193                case SDEV_OFFLINE:
2194                case SDEV_TRANSPORT_OFFLINE:
2195                case SDEV_CANCEL:
2196                case SDEV_CREATED_BLOCK:
2197                        break;
2198                default:
2199                        goto illegal;
2200                }
2201                break;
2202
2203        }
2204        sdev->sdev_state = state;
2205        return 0;
2206
2207 illegal:
2208        SCSI_LOG_ERROR_RECOVERY(1, 
2209                                sdev_printk(KERN_ERR, sdev,
2210                                            "Illegal state transition %s->%s\n",
2211                                            scsi_device_state_name(oldstate),
2212                                            scsi_device_state_name(state))
2213                                );
2214        return -EINVAL;
2215}
2216EXPORT_SYMBOL(scsi_device_set_state);
2217
2218/**
2219 *      sdev_evt_emit - emit a single SCSI device uevent
2220 *      @sdev: associated SCSI device
2221 *      @evt: event to emit
2222 *
2223 *      Send a single uevent (scsi_event) to the associated scsi_device.
2224 */
2225static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
2226{
2227        int idx = 0;
2228        char *envp[3];
2229
2230        switch (evt->evt_type) {
2231        case SDEV_EVT_MEDIA_CHANGE:
2232                envp[idx++] = "SDEV_MEDIA_CHANGE=1";
2233                break;
2234
2235        default:
2236                /* do nothing */
2237                break;
2238        }
2239
2240        envp[idx++] = NULL;
2241
2242        kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp);
2243}
2244
2245/**
2246 *      sdev_evt_thread - send a uevent for each scsi event
2247 *      @work: work struct for scsi_device
2248 *
2249 *      Dispatch queued events to their associated scsi_device kobjects
2250 *      as uevents.
2251 */
2252void scsi_evt_thread(struct work_struct *work)
2253{
2254        struct scsi_device *sdev;
2255        LIST_HEAD(event_list);
2256
2257        sdev = container_of(work, struct scsi_device, event_work);
2258
2259        while (1) {
2260                struct scsi_event *evt;
2261                struct list_head *this, *tmp;
2262                unsigned long flags;
2263
2264                spin_lock_irqsave(&sdev->list_lock, flags);
2265                list_splice_init(&sdev->event_list, &event_list);
2266                spin_unlock_irqrestore(&sdev->list_lock, flags);
2267
2268                if (list_empty(&event_list))
2269                        break;
2270
2271                list_for_each_safe(this, tmp, &event_list) {
2272                        evt = list_entry(this, struct scsi_event, node);
2273                        list_del(&evt->node);
2274                        scsi_evt_emit(sdev, evt);
2275                        kfree(evt);
2276                }
2277        }
2278}
2279
2280/**
2281 *      sdev_evt_send - send asserted event to uevent thread
2282 *      @sdev: scsi_device event occurred on
2283 *      @evt: event to send
2284 *
2285 *      Assert scsi device event asynchronously.
2286 */
2287void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt)
2288{
2289        unsigned long flags;
2290
2291#if 0
2292        /* FIXME: currently this check eliminates all media change events
2293         * for polled devices.  Need to update to discriminate between AN
2294         * and polled events */
2295        if (!test_bit(evt->evt_type, sdev->supported_events)) {
2296                kfree(evt);
2297                return;
2298        }
2299#endif
2300
2301        spin_lock_irqsave(&sdev->list_lock, flags);
2302        list_add_tail(&evt->node, &sdev->event_list);
2303        schedule_work(&sdev->event_work);
2304        spin_unlock_irqrestore(&sdev->list_lock, flags);
2305}
2306EXPORT_SYMBOL_GPL(sdev_evt_send);
2307
2308/**
2309 *      sdev_evt_alloc - allocate a new scsi event
2310 *      @evt_type: type of event to allocate
2311 *      @gfpflags: GFP flags for allocation
2312 *
2313 *      Allocates and returns a new scsi_event.
2314 */
2315struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
2316                                  gfp_t gfpflags)
2317{
2318        struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags);
2319        if (!evt)
2320                return NULL;
2321
2322        evt->evt_type = evt_type;
2323        INIT_LIST_HEAD(&evt->node);
2324
2325        /* evt_type-specific initialization, if any */
2326        switch (evt_type) {
2327        case SDEV_EVT_MEDIA_CHANGE:
2328        default:
2329                /* do nothing */
2330                break;
2331        }
2332
2333        return evt;
2334}
2335EXPORT_SYMBOL_GPL(sdev_evt_alloc);
2336
2337/**
2338 *      sdev_evt_send_simple - send asserted event to uevent thread
2339 *      @sdev: scsi_device event occurred on
2340 *      @evt_type: type of event to send
2341 *      @gfpflags: GFP flags for allocation
2342 *
2343 *      Assert scsi device event asynchronously, given an event type.
2344 */
2345void sdev_evt_send_simple(struct scsi_device *sdev,
2346                          enum scsi_device_event evt_type, gfp_t gfpflags)
2347{
2348        struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags);
2349        if (!evt) {
2350                sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n",
2351                            evt_type);
2352                return;
2353        }
2354
2355        sdev_evt_send(sdev, evt);
2356}
2357EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
2358
2359/**
2360 *      scsi_device_quiesce - Block user issued commands.
2361 *      @sdev:  scsi device to quiesce.
2362 *
2363 *      This works by trying to transition to the SDEV_QUIESCE state
2364 *      (which must be a legal transition).  When the device is in this
2365 *      state, only special requests will be accepted, all others will
2366 *      be deferred.  Since special requests may also be requeued requests,
2367 *      a successful return doesn't guarantee the device will be 
2368 *      totally quiescent.
2369 *
2370 *      Must be called with user context, may sleep.
2371 *
2372 *      Returns zero if unsuccessful or an error if not.
2373 */
2374int
2375scsi_device_quiesce(struct scsi_device *sdev)
2376{
2377        int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
2378        if (err)
2379                return err;
2380
2381        scsi_run_queue(sdev->request_queue);
2382        while (sdev->device_busy) {
2383                msleep_interruptible(200);
2384                scsi_run_queue(sdev->request_queue);
2385        }
2386        return 0;
2387}
2388EXPORT_SYMBOL(scsi_device_quiesce);
2389
2390/**
2391 *      scsi_device_resume - Restart user issued commands to a quiesced device.
2392 *      @sdev:  scsi device to resume.
2393 *
2394 *      Moves the device from quiesced back to running and restarts the
2395 *      queues.
2396 *
2397 *      Must be called with user context, may sleep.
2398 */
2399void scsi_device_resume(struct scsi_device *sdev)
2400{
2401        /* check if the device state was mutated prior to resume, and if
2402         * so assume the state is being managed elsewhere (for example
2403         * device deleted during suspend)
2404         */
2405        if (sdev->sdev_state != SDEV_QUIESCE ||
2406            scsi_device_set_state(sdev, SDEV_RUNNING))
2407                return;
2408        scsi_run_queue(sdev->request_queue);
2409}
2410EXPORT_SYMBOL(scsi_device_resume);
2411
2412static void
2413device_quiesce_fn(struct scsi_device *sdev, void *data)
2414{
2415        scsi_device_quiesce(sdev);
2416}
2417
2418void
2419scsi_target_quiesce(struct scsi_target *starget)
2420{
2421        starget_for_each_device(starget, NULL, device_quiesce_fn);
2422}
2423EXPORT_SYMBOL(scsi_target_quiesce);
2424
2425static void
2426device_resume_fn(struct scsi_device *sdev, void *data)
2427{
2428        scsi_device_resume(sdev);
2429}
2430
2431void
2432scsi_target_resume(struct scsi_target *starget)
2433{
2434        starget_for_each_device(starget, NULL, device_resume_fn);
2435}
2436EXPORT_SYMBOL(scsi_target_resume);
2437
2438/**
2439 * scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state
2440 * @sdev:       device to block
2441 *
2442 * Block request made by scsi lld's to temporarily stop all
2443 * scsi commands on the specified device.  Called from interrupt
2444 * or normal process context.
2445 *
2446 * Returns zero if successful or error if not
2447 *
2448 * Notes:       
2449 *      This routine transitions the device to the SDEV_BLOCK state
2450 *      (which must be a legal transition).  When the device is in this
2451 *      state, all commands are deferred until the scsi lld reenables
2452 *      the device with scsi_device_unblock or device_block_tmo fires.
2453 */
2454int
2455scsi_internal_device_block(struct scsi_device *sdev)
2456{
2457        struct request_queue *q = sdev->request_queue;
2458        unsigned long flags;
2459        int err = 0;
2460
2461        err = scsi_device_set_state(sdev, SDEV_BLOCK);
2462        if (err) {
2463                err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK);
2464
2465                if (err)
2466                        return err;
2467        }
2468
2469        /* 
2470         * The device has transitioned to SDEV_BLOCK.  Stop the
2471         * block layer from calling the midlayer with this device's
2472         * request queue. 
2473         */
2474        spin_lock_irqsave(q->queue_lock, flags);
2475        blk_stop_queue(q);
2476        spin_unlock_irqrestore(q->queue_lock, flags);
2477
2478        return 0;
2479}
2480EXPORT_SYMBOL_GPL(scsi_internal_device_block);
2481 
2482/**
2483 * scsi_internal_device_unblock - resume a device after a block request
2484 * @sdev:       device to resume
2485 * @new_state:  state to set devices to after unblocking
2486 *
2487 * Called by scsi lld's or the midlayer to restart the device queue
2488 * for the previously suspended scsi device.  Called from interrupt or
2489 * normal process context.
2490 *
2491 * Returns zero if successful or error if not.
2492 *
2493 * Notes:       
2494 *      This routine transitions the device to the SDEV_RUNNING state
2495 *      or to one of the offline states (which must be a legal transition)
2496 *      allowing the midlayer to goose the queue for this device.
2497 */
2498int
2499scsi_internal_device_unblock(struct scsi_device *sdev,
2500                             enum scsi_device_state new_state)
2501{
2502        struct request_queue *q = sdev->request_queue; 
2503        unsigned long flags;
2504
2505        /*
2506         * Try to transition the scsi device to SDEV_RUNNING or one of the
2507         * offlined states and goose the device queue if successful.
2508         */
2509        if ((sdev->sdev_state == SDEV_BLOCK) ||
2510            (sdev->sdev_state == SDEV_TRANSPORT_OFFLINE))
2511                sdev->sdev_state = new_state;
2512        else if (sdev->sdev_state == SDEV_CREATED_BLOCK) {
2513                if (new_state == SDEV_TRANSPORT_OFFLINE ||
2514                    new_state == SDEV_OFFLINE)
2515                        sdev->sdev_state = new_state;
2516                else
2517                        sdev->sdev_state = SDEV_CREATED;
2518        } else if (sdev->sdev_state != SDEV_CANCEL &&
2519                 sdev->sdev_state != SDEV_OFFLINE)
2520                return -EINVAL;
2521
2522        spin_lock_irqsave(q->queue_lock, flags);
2523        blk_start_queue(q);
2524        spin_unlock_irqrestore(q->queue_lock, flags);
2525
2526        return 0;
2527}
2528EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
2529
2530static void
2531device_block(struct scsi_device *sdev, void *data)
2532{
2533        scsi_internal_device_block(sdev);
2534}
2535
2536static int
2537target_block(struct device *dev, void *data)
2538{
2539        if (scsi_is_target_device(dev))
2540                starget_for_each_device(to_scsi_target(dev), NULL,
2541                                        device_block);
2542        return 0;
2543}
2544
2545void
2546scsi_target_block(struct device *dev)
2547{
2548        if (scsi_is_target_device(dev))
2549                starget_for_each_device(to_scsi_target(dev), NULL,
2550                                        device_block);
2551        else
2552                device_for_each_child(dev, NULL, target_block);
2553}
2554EXPORT_SYMBOL_GPL(scsi_target_block);
2555
2556static void
2557device_unblock(struct scsi_device *sdev, void *data)
2558{
2559        scsi_internal_device_unblock(sdev, *(enum scsi_device_state *)data);
2560}
2561
2562static int
2563target_unblock(struct device *dev, void *data)
2564{
2565        if (scsi_is_target_device(dev))
2566                starget_for_each_device(to_scsi_target(dev), data,
2567                                        device_unblock);
2568        return 0;
2569}
2570
2571void
2572scsi_target_unblock(struct device *dev, enum scsi_device_state new_state)
2573{
2574        if (scsi_is_target_device(dev))
2575                starget_for_each_device(to_scsi_target(dev), &new_state,
2576                                        device_unblock);
2577        else
2578                device_for_each_child(dev, &new_state, target_unblock);
2579}
2580EXPORT_SYMBOL_GPL(scsi_target_unblock);
2581
2582/**
2583 * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt
2584 * @sgl:        scatter-gather list
2585 * @sg_count:   number of segments in sg
2586 * @offset:     offset in bytes into sg, on return offset into the mapped area
2587 * @len:        bytes to map, on return number of bytes mapped
2588 *
2589 * Returns virtual address of the start of the mapped page
2590 */
2591void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
2592                          size_t *offset, size_t *len)
2593{
2594        int i;
2595        size_t sg_len = 0, len_complete = 0;
2596        struct scatterlist *sg;
2597        struct page *page;
2598
2599        WARN_ON(!irqs_disabled());
2600
2601        for_each_sg(sgl, sg, sg_count, i) {
2602                len_complete = sg_len; /* Complete sg-entries */
2603                sg_len += sg->length;
2604                if (sg_len > *offset)
2605                        break;
2606        }
2607
2608        if (unlikely(i == sg_count)) {
2609                printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, "
2610                        "elements %d\n",
2611                       __func__, sg_len, *offset, sg_count);
2612                WARN_ON(1);
2613                return NULL;
2614        }
2615
2616        /* Offset starting from the beginning of first page in this sg-entry */
2617        *offset = *offset - len_complete + sg->offset;
2618
2619        /* Assumption: contiguous pages can be accessed as "page + i" */
2620        page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT));
2621        *offset &= ~PAGE_MASK;
2622
2623        /* Bytes in this sg-entry from *offset to the end of the page */
2624        sg_len = PAGE_SIZE - *offset;
2625        if (*len > sg_len)
2626                *len = sg_len;
2627
2628        return kmap_atomic(page);
2629}
2630EXPORT_SYMBOL(scsi_kmap_atomic_sg);
2631
2632/**
2633 * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg
2634 * @virt:       virtual address to be unmapped
2635 */
2636void scsi_kunmap_atomic_sg(void *virt)
2637{
2638        kunmap_atomic(virt);
2639}
2640EXPORT_SYMBOL(scsi_kunmap_atomic_sg);
2641
2642void sdev_disable_disk_events(struct scsi_device *sdev)
2643{
2644        atomic_inc(&sdev->disk_events_disable_depth);
2645}
2646EXPORT_SYMBOL(sdev_disable_disk_events);
2647
2648void sdev_enable_disk_events(struct scsi_device *sdev)
2649{
2650        if (WARN_ON_ONCE(atomic_read(&sdev->disk_events_disable_depth) <= 0))
2651                return;
2652        atomic_dec(&sdev->disk_events_disable_depth);
2653}
2654EXPORT_SYMBOL(sdev_enable_disk_events);
2655