linux/drivers/scsi/scsi.c
<<
>>
Prefs
   1/*
   2 *  scsi.c Copyright (C) 1992 Drew Eckhardt
   3 *         Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
   4 *         Copyright (C) 2002, 2003 Christoph Hellwig
   5 *
   6 *  generic mid-level SCSI driver
   7 *      Initial versions: Drew Eckhardt
   8 *      Subsequent revisions: Eric Youngdale
   9 *
  10 *  <drew@colorado.edu>
  11 *
  12 *  Bug correction thanks go to :
  13 *      Rik Faith <faith@cs.unc.edu>
  14 *      Tommy Thorn <tthorn>
  15 *      Thomas Wuensche <tw@fgb1.fgb.mw.tu-muenchen.de>
  16 *
  17 *  Modified by Eric Youngdale eric@andante.org or ericy@gnu.ai.mit.edu to
  18 *  add scatter-gather, multiple outstanding request, and other
  19 *  enhancements.
  20 *
  21 *  Native multichannel, wide scsi, /proc/scsi and hot plugging
  22 *  support added by Michael Neuffer <mike@i-connect.net>
  23 *
  24 *  Added request_module("scsi_hostadapter") for kerneld:
  25 *  (Put an "alias scsi_hostadapter your_hostadapter" in /etc/modprobe.conf)
  26 *  Bjorn Ekwall  <bj0rn@blox.se>
  27 *  (changed to kmod)
  28 *
  29 *  Major improvements to the timeout, abort, and reset processing,
  30 *  as well as performance modifications for large queue depths by
  31 *  Leonard N. Zubkoff <lnz@dandelion.com>
  32 *
  33 *  Converted cli() code to spinlocks, Ingo Molnar
  34 *
  35 *  Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli
  36 *
  37 *  out_of_space hacks, D. Gilbert (dpg) 990608
  38 */
  39
  40#include <linux/module.h>
  41#include <linux/moduleparam.h>
  42#include <linux/kernel.h>
  43#include <linux/timer.h>
  44#include <linux/string.h>
  45#include <linux/slab.h>
  46#include <linux/blkdev.h>
  47#include <linux/delay.h>
  48#include <linux/init.h>
  49#include <linux/completion.h>
  50#include <linux/unistd.h>
  51#include <linux/spinlock.h>
  52#include <linux/kmod.h>
  53#include <linux/interrupt.h>
  54#include <linux/notifier.h>
  55#include <linux/cpu.h>
  56#include <linux/mutex.h>
  57#include <linux/async.h>
  58#include <asm/unaligned.h>
  59
  60#include <scsi/scsi.h>
  61#include <scsi/scsi_cmnd.h>
  62#include <scsi/scsi_dbg.h>
  63#include <scsi/scsi_device.h>
  64#include <scsi/scsi_driver.h>
  65#include <scsi/scsi_eh.h>
  66#include <scsi/scsi_host.h>
  67#include <scsi/scsi_tcq.h>
  68
  69#include "scsi_priv.h"
  70#include "scsi_logging.h"
  71
  72#define CREATE_TRACE_POINTS
  73#include <trace/events/scsi.h>
  74
  75/*
  76 * Definitions and constants.
  77 */
  78
  79/*
  80 * Note - the initial logging level can be set here to log events at boot time.
  81 * After the system is up, you may enable logging via the /proc interface.
  82 */
  83unsigned int scsi_logging_level;
  84#if defined(CONFIG_SCSI_LOGGING)
  85EXPORT_SYMBOL(scsi_logging_level);
  86#endif
  87
  88/* sd, scsi core and power management need to coordinate flushing async actions */
  89ASYNC_DOMAIN(scsi_sd_probe_domain);
  90EXPORT_SYMBOL(scsi_sd_probe_domain);
  91
  92/* NB: These are exposed through /proc/scsi/scsi and form part of the ABI.
  93 * You may not alter any existing entry (although adding new ones is
  94 * encouraged once assigned by ANSI/INCITS T10
  95 */
  96static const char *const scsi_device_types[] = {
  97        "Direct-Access    ",
  98        "Sequential-Access",
  99        "Printer          ",
 100        "Processor        ",
 101        "WORM             ",
 102        "CD-ROM           ",
 103        "Scanner          ",
 104        "Optical Device   ",
 105        "Medium Changer   ",
 106        "Communications   ",
 107        "ASC IT8          ",
 108        "ASC IT8          ",
 109        "RAID             ",
 110        "Enclosure        ",
 111        "Direct-Access-RBC",
 112        "Optical card     ",
 113        "Bridge controller",
 114        "Object storage   ",
 115        "Automation/Drive ",
 116};
 117
 118/**
 119 * scsi_device_type - Return 17 char string indicating device type.
 120 * @type: type number to look up
 121 */
 122
 123const char * scsi_device_type(unsigned type)
 124{
 125        if (type == 0x1e)
 126                return "Well-known LUN   ";
 127        if (type == 0x1f)
 128                return "No Device        ";
 129        if (type >= ARRAY_SIZE(scsi_device_types))
 130                return "Unknown          ";
 131        return scsi_device_types[type];
 132}
 133
 134EXPORT_SYMBOL(scsi_device_type);
 135
 136struct scsi_host_cmd_pool {
 137        struct kmem_cache       *cmd_slab;
 138        struct kmem_cache       *sense_slab;
 139        unsigned int            users;
 140        char                    *cmd_name;
 141        char                    *sense_name;
 142        unsigned int            slab_flags;
 143        gfp_t                   gfp_mask;
 144};
 145
 146static struct scsi_host_cmd_pool scsi_cmd_pool = {
 147        .cmd_name       = "scsi_cmd_cache",
 148        .sense_name     = "scsi_sense_cache",
 149        .slab_flags     = SLAB_HWCACHE_ALIGN,
 150};
 151
 152static struct scsi_host_cmd_pool scsi_cmd_dma_pool = {
 153        .cmd_name       = "scsi_cmd_cache(DMA)",
 154        .sense_name     = "scsi_sense_cache(DMA)",
 155        .slab_flags     = SLAB_HWCACHE_ALIGN|SLAB_CACHE_DMA,
 156        .gfp_mask       = __GFP_DMA,
 157};
 158
 159static DEFINE_MUTEX(host_cmd_pool_mutex);
 160
 161/**
 162 * scsi_host_free_command - internal function to release a command
 163 * @shost:      host to free the command for
 164 * @cmd:        command to release
 165 *
 166 * the command must previously have been allocated by
 167 * scsi_host_alloc_command.
 168 */
 169static void
 170scsi_host_free_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
 171{
 172        struct scsi_host_cmd_pool *pool = shost->cmd_pool;
 173
 174        if (cmd->prot_sdb)
 175                kmem_cache_free(scsi_sdb_cache, cmd->prot_sdb);
 176        kmem_cache_free(pool->sense_slab, cmd->sense_buffer);
 177        kmem_cache_free(pool->cmd_slab, cmd);
 178}
 179
 180/**
 181 * scsi_host_alloc_command - internal function to allocate command
 182 * @shost:      SCSI host whose pool to allocate from
 183 * @gfp_mask:   mask for the allocation
 184 *
 185 * Returns a fully allocated command with sense buffer and protection
 186 * data buffer (where applicable) or NULL on failure
 187 */
 188static struct scsi_cmnd *
 189scsi_host_alloc_command(struct Scsi_Host *shost, gfp_t gfp_mask)
 190{
 191        struct scsi_host_cmd_pool *pool = shost->cmd_pool;
 192        struct scsi_cmnd *cmd;
 193
 194        cmd = kmem_cache_zalloc(pool->cmd_slab, gfp_mask | pool->gfp_mask);
 195        if (!cmd)
 196                goto fail;
 197
 198        cmd->sense_buffer = kmem_cache_alloc(pool->sense_slab,
 199                                             gfp_mask | pool->gfp_mask);
 200        if (!cmd->sense_buffer)
 201                goto fail_free_cmd;
 202
 203        if (scsi_host_get_prot(shost) >= SHOST_DIX_TYPE0_PROTECTION) {
 204                cmd->prot_sdb = kmem_cache_zalloc(scsi_sdb_cache, gfp_mask);
 205                if (!cmd->prot_sdb)
 206                        goto fail_free_sense;
 207        }
 208
 209        return cmd;
 210
 211fail_free_sense:
 212        kmem_cache_free(pool->sense_slab, cmd->sense_buffer);
 213fail_free_cmd:
 214        kmem_cache_free(pool->cmd_slab, cmd);
 215fail:
 216        return NULL;
 217}
 218
 219/**
 220 * __scsi_get_command - Allocate a struct scsi_cmnd
 221 * @shost: host to transmit command
 222 * @gfp_mask: allocation mask
 223 *
 224 * Description: allocate a struct scsi_cmd from host's slab, recycling from the
 225 *              host's free_list if necessary.
 226 */
 227struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, gfp_t gfp_mask)
 228{
 229        struct scsi_cmnd *cmd = scsi_host_alloc_command(shost, gfp_mask);
 230
 231        if (unlikely(!cmd)) {
 232                unsigned long flags;
 233
 234                spin_lock_irqsave(&shost->free_list_lock, flags);
 235                if (likely(!list_empty(&shost->free_list))) {
 236                        cmd = list_entry(shost->free_list.next,
 237                                         struct scsi_cmnd, list);
 238                        list_del_init(&cmd->list);
 239                }
 240                spin_unlock_irqrestore(&shost->free_list_lock, flags);
 241
 242                if (cmd) {
 243                        void *buf, *prot;
 244
 245                        buf = cmd->sense_buffer;
 246                        prot = cmd->prot_sdb;
 247
 248                        memset(cmd, 0, sizeof(*cmd));
 249
 250                        cmd->sense_buffer = buf;
 251                        cmd->prot_sdb = prot;
 252                }
 253        }
 254
 255        return cmd;
 256}
 257EXPORT_SYMBOL_GPL(__scsi_get_command);
 258
 259/**
 260 * scsi_get_command - Allocate and setup a scsi command block
 261 * @dev: parent scsi device
 262 * @gfp_mask: allocator flags
 263 *
 264 * Returns:     The allocated scsi command structure.
 265 */
 266struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask)
 267{
 268        struct scsi_cmnd *cmd = __scsi_get_command(dev->host, gfp_mask);
 269        unsigned long flags;
 270
 271        if (unlikely(cmd == NULL))
 272                return NULL;
 273
 274        cmd->device = dev;
 275        INIT_LIST_HEAD(&cmd->list);
 276        INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
 277        spin_lock_irqsave(&dev->list_lock, flags);
 278        list_add_tail(&cmd->list, &dev->cmd_list);
 279        spin_unlock_irqrestore(&dev->list_lock, flags);
 280        cmd->jiffies_at_alloc = jiffies;
 281        return cmd;
 282}
 283EXPORT_SYMBOL(scsi_get_command);
 284
 285/**
 286 * __scsi_put_command - Free a struct scsi_cmnd
 287 * @shost: dev->host
 288 * @cmd: Command to free
 289 */
 290void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
 291{
 292        unsigned long flags;
 293
 294        if (unlikely(list_empty(&shost->free_list))) {
 295                spin_lock_irqsave(&shost->free_list_lock, flags);
 296                if (list_empty(&shost->free_list)) {
 297                        list_add(&cmd->list, &shost->free_list);
 298                        cmd = NULL;
 299                }
 300                spin_unlock_irqrestore(&shost->free_list_lock, flags);
 301        }
 302
 303        if (likely(cmd != NULL))
 304                scsi_host_free_command(shost, cmd);
 305}
 306EXPORT_SYMBOL(__scsi_put_command);
 307
 308/**
 309 * scsi_put_command - Free a scsi command block
 310 * @cmd: command block to free
 311 *
 312 * Returns:     Nothing.
 313 *
 314 * Notes:       The command must not belong to any lists.
 315 */
 316void scsi_put_command(struct scsi_cmnd *cmd)
 317{
 318        unsigned long flags;
 319
 320        /* serious error if the command hasn't come from a device list */
 321        spin_lock_irqsave(&cmd->device->list_lock, flags);
 322        BUG_ON(list_empty(&cmd->list));
 323        list_del_init(&cmd->list);
 324        spin_unlock_irqrestore(&cmd->device->list_lock, flags);
 325
 326        cancel_delayed_work(&cmd->abort_work);
 327
 328        __scsi_put_command(cmd->device->host, cmd);
 329}
 330EXPORT_SYMBOL(scsi_put_command);
 331
 332static struct scsi_host_cmd_pool *
 333scsi_find_host_cmd_pool(struct Scsi_Host *shost)
 334{
 335        if (shost->hostt->cmd_size)
 336                return shost->hostt->cmd_pool;
 337        if (shost->unchecked_isa_dma)
 338                return &scsi_cmd_dma_pool;
 339        return &scsi_cmd_pool;
 340}
 341
 342static void
 343scsi_free_host_cmd_pool(struct scsi_host_cmd_pool *pool)
 344{
 345        kfree(pool->sense_name);
 346        kfree(pool->cmd_name);
 347        kfree(pool);
 348}
 349
 350static struct scsi_host_cmd_pool *
 351scsi_alloc_host_cmd_pool(struct Scsi_Host *shost)
 352{
 353        struct scsi_host_template *hostt = shost->hostt;
 354        struct scsi_host_cmd_pool *pool;
 355
 356        pool = kzalloc(sizeof(*pool), GFP_KERNEL);
 357        if (!pool)
 358                return NULL;
 359
 360        pool->cmd_name = kasprintf(GFP_KERNEL, "%s_cmd", hostt->proc_name);
 361        pool->sense_name = kasprintf(GFP_KERNEL, "%s_sense", hostt->proc_name);
 362        if (!pool->cmd_name || !pool->sense_name) {
 363                scsi_free_host_cmd_pool(pool);
 364                return NULL;
 365        }
 366
 367        pool->slab_flags = SLAB_HWCACHE_ALIGN;
 368        if (shost->unchecked_isa_dma) {
 369                pool->slab_flags |= SLAB_CACHE_DMA;
 370                pool->gfp_mask = __GFP_DMA;
 371        }
 372
 373        if (hostt->cmd_size)
 374                hostt->cmd_pool = pool;
 375
 376        return pool;
 377}
 378
 379static struct scsi_host_cmd_pool *
 380scsi_get_host_cmd_pool(struct Scsi_Host *shost)
 381{
 382        struct scsi_host_template *hostt = shost->hostt;
 383        struct scsi_host_cmd_pool *retval = NULL, *pool;
 384        size_t cmd_size = sizeof(struct scsi_cmnd) + hostt->cmd_size;
 385
 386        /*
 387         * Select a command slab for this host and create it if not
 388         * yet existent.
 389         */
 390        mutex_lock(&host_cmd_pool_mutex);
 391        pool = scsi_find_host_cmd_pool(shost);
 392        if (!pool) {
 393                pool = scsi_alloc_host_cmd_pool(shost);
 394                if (!pool)
 395                        goto out;
 396        }
 397
 398        if (!pool->users) {
 399                pool->cmd_slab = kmem_cache_create(pool->cmd_name, cmd_size, 0,
 400                                                   pool->slab_flags, NULL);
 401                if (!pool->cmd_slab)
 402                        goto out_free_pool;
 403
 404                pool->sense_slab = kmem_cache_create(pool->sense_name,
 405                                                     SCSI_SENSE_BUFFERSIZE, 0,
 406                                                     pool->slab_flags, NULL);
 407                if (!pool->sense_slab)
 408                        goto out_free_slab;
 409        }
 410
 411        pool->users++;
 412        retval = pool;
 413out:
 414        mutex_unlock(&host_cmd_pool_mutex);
 415        return retval;
 416
 417out_free_slab:
 418        kmem_cache_destroy(pool->cmd_slab);
 419out_free_pool:
 420        if (hostt->cmd_size) {
 421                scsi_free_host_cmd_pool(pool);
 422                hostt->cmd_pool = NULL;
 423        }
 424        goto out;
 425}
 426
 427static void scsi_put_host_cmd_pool(struct Scsi_Host *shost)
 428{
 429        struct scsi_host_template *hostt = shost->hostt;
 430        struct scsi_host_cmd_pool *pool;
 431
 432        mutex_lock(&host_cmd_pool_mutex);
 433        pool = scsi_find_host_cmd_pool(shost);
 434
 435        /*
 436         * This may happen if a driver has a mismatched get and put
 437         * of the command pool; the driver should be implicated in
 438         * the stack trace
 439         */
 440        BUG_ON(pool->users == 0);
 441
 442        if (!--pool->users) {
 443                kmem_cache_destroy(pool->cmd_slab);
 444                kmem_cache_destroy(pool->sense_slab);
 445                if (hostt->cmd_size) {
 446                        scsi_free_host_cmd_pool(pool);
 447                        hostt->cmd_pool = NULL;
 448                }
 449        }
 450        mutex_unlock(&host_cmd_pool_mutex);
 451}
 452
 453/**
 454 * scsi_setup_command_freelist - Setup the command freelist for a scsi host.
 455 * @shost: host to allocate the freelist for.
 456 *
 457 * Description: The command freelist protects against system-wide out of memory
 458 * deadlock by preallocating one SCSI command structure for each host, so the
 459 * system can always write to a swap file on a device associated with that host.
 460 *
 461 * Returns:     Nothing.
 462 */
 463int scsi_setup_command_freelist(struct Scsi_Host *shost)
 464{
 465        const gfp_t gfp_mask = shost->unchecked_isa_dma ? GFP_DMA : GFP_KERNEL;
 466        struct scsi_cmnd *cmd;
 467
 468        spin_lock_init(&shost->free_list_lock);
 469        INIT_LIST_HEAD(&shost->free_list);
 470
 471        shost->cmd_pool = scsi_get_host_cmd_pool(shost);
 472        if (!shost->cmd_pool)
 473                return -ENOMEM;
 474
 475        /*
 476         * Get one backup command for this host.
 477         */
 478        cmd = scsi_host_alloc_command(shost, gfp_mask);
 479        if (!cmd) {
 480                scsi_put_host_cmd_pool(shost);
 481                shost->cmd_pool = NULL;
 482                return -ENOMEM;
 483        }
 484        list_add(&cmd->list, &shost->free_list);
 485        return 0;
 486}
 487
 488/**
 489 * scsi_destroy_command_freelist - Release the command freelist for a scsi host.
 490 * @shost: host whose freelist is going to be destroyed
 491 */
 492void scsi_destroy_command_freelist(struct Scsi_Host *shost)
 493{
 494        /*
 495         * If cmd_pool is NULL the free list was not initialized, so
 496         * do not attempt to release resources.
 497         */
 498        if (!shost->cmd_pool)
 499                return;
 500
 501        while (!list_empty(&shost->free_list)) {
 502                struct scsi_cmnd *cmd;
 503
 504                cmd = list_entry(shost->free_list.next, struct scsi_cmnd, list);
 505                list_del_init(&cmd->list);
 506                scsi_host_free_command(shost, cmd);
 507        }
 508        shost->cmd_pool = NULL;
 509        scsi_put_host_cmd_pool(shost);
 510}
 511
 512#ifdef CONFIG_SCSI_LOGGING
 513void scsi_log_send(struct scsi_cmnd *cmd)
 514{
 515        unsigned int level;
 516
 517        /*
 518         * If ML QUEUE log level is greater than or equal to:
 519         *
 520         * 1: nothing (match completion)
 521         *
 522         * 2: log opcode + command of all commands + cmd address
 523         *
 524         * 3: same as 2
 525         *
 526         * 4: same as 3
 527         */
 528        if (unlikely(scsi_logging_level)) {
 529                level = SCSI_LOG_LEVEL(SCSI_LOG_MLQUEUE_SHIFT,
 530                                       SCSI_LOG_MLQUEUE_BITS);
 531                if (level > 1) {
 532                        scmd_printk(KERN_INFO, cmd,
 533                                    "Send: scmd 0x%p\n", cmd);
 534                        scsi_print_command(cmd);
 535                }
 536        }
 537}
 538
 539void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
 540{
 541        unsigned int level;
 542
 543        /*
 544         * If ML COMPLETE log level is greater than or equal to:
 545         *
 546         * 1: log disposition, result, opcode + command, and conditionally
 547         * sense data for failures or non SUCCESS dispositions.
 548         *
 549         * 2: same as 1 but for all command completions.
 550         *
 551         * 3: same as 2
 552         *
 553         * 4: same as 3 plus dump extra junk
 554         */
 555        if (unlikely(scsi_logging_level)) {
 556                level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT,
 557                                       SCSI_LOG_MLCOMPLETE_BITS);
 558                if (((level > 0) && (cmd->result || disposition != SUCCESS)) ||
 559                    (level > 1)) {
 560                        scsi_print_result(cmd, "Done", disposition);
 561                        scsi_print_command(cmd);
 562                        if (status_byte(cmd->result) & CHECK_CONDITION)
 563                                scsi_print_sense(cmd);
 564                        if (level > 3)
 565                                scmd_printk(KERN_INFO, cmd,
 566                                            "scsi host busy %d failed %d\n",
 567                                            atomic_read(&cmd->device->host->host_busy),
 568                                            cmd->device->host->host_failed);
 569                }
 570        }
 571}
 572#endif
 573
 574/**
 575 * scsi_cmd_get_serial - Assign a serial number to a command
 576 * @host: the scsi host
 577 * @cmd: command to assign serial number to
 578 *
 579 * Description: a serial number identifies a request for error recovery
 580 * and debugging purposes.  Protected by the Host_Lock of host.
 581 */
 582void scsi_cmd_get_serial(struct Scsi_Host *host, struct scsi_cmnd *cmd)
 583{
 584        cmd->serial_number = host->cmd_serial_number++;
 585        if (cmd->serial_number == 0) 
 586                cmd->serial_number = host->cmd_serial_number++;
 587}
 588EXPORT_SYMBOL(scsi_cmd_get_serial);
 589
 590/**
 591 * scsi_dispatch_command - Dispatch a command to the low-level driver.
 592 * @cmd: command block we are dispatching.
 593 *
 594 * Return: nonzero return request was rejected and device's queue needs to be
 595 * plugged.
 596 */
 597int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
 598{
 599        struct Scsi_Host *host = cmd->device->host;
 600        int rtn = 0;
 601
 602        atomic_inc(&cmd->device->iorequest_cnt);
 603
 604        /* check if the device is still usable */
 605        if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
 606                /* in SDEV_DEL we error all commands. DID_NO_CONNECT
 607                 * returns an immediate error upwards, and signals
 608                 * that the device is no longer present */
 609                cmd->result = DID_NO_CONNECT << 16;
 610                goto done;
 611        }
 612
 613        /* Check to see if the scsi lld made this device blocked. */
 614        if (unlikely(scsi_device_blocked(cmd->device))) {
 615                /*
 616                 * in blocked state, the command is just put back on
 617                 * the device queue.  The suspend state has already
 618                 * blocked the queue so future requests should not
 619                 * occur until the device transitions out of the
 620                 * suspend state.
 621                 */
 622                SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
 623                        "queuecommand : device blocked\n"));
 624                return SCSI_MLQUEUE_DEVICE_BUSY;
 625        }
 626
 627        /* Store the LUN value in cmnd, if needed. */
 628        if (cmd->device->lun_in_cdb)
 629                cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) |
 630                               (cmd->device->lun << 5 & 0xe0);
 631
 632        scsi_log_send(cmd);
 633
 634        /*
 635         * Before we queue this command, check if the command
 636         * length exceeds what the host adapter can handle.
 637         */
 638        if (cmd->cmd_len > cmd->device->host->max_cmd_len) {
 639                SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
 640                               "queuecommand : command too long. "
 641                               "cdb_size=%d host->max_cmd_len=%d\n",
 642                               cmd->cmd_len, cmd->device->host->max_cmd_len));
 643                cmd->result = (DID_ABORT << 16);
 644                goto done;
 645        }
 646
 647        if (unlikely(host->shost_state == SHOST_DEL)) {
 648                cmd->result = (DID_NO_CONNECT << 16);
 649                goto done;
 650
 651        }
 652
 653        trace_scsi_dispatch_cmd_start(cmd);
 654        rtn = host->hostt->queuecommand(host, cmd);
 655        if (rtn) {
 656                trace_scsi_dispatch_cmd_error(cmd, rtn);
 657                if (rtn != SCSI_MLQUEUE_DEVICE_BUSY &&
 658                    rtn != SCSI_MLQUEUE_TARGET_BUSY)
 659                        rtn = SCSI_MLQUEUE_HOST_BUSY;
 660
 661                SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
 662                        "queuecommand : request rejected\n"));
 663        }
 664
 665        return rtn;
 666 done:
 667        cmd->scsi_done(cmd);
 668        return 0;
 669}
 670
 671/**
 672 * scsi_finish_command - cleanup and pass command back to upper layer
 673 * @cmd: the command
 674 *
 675 * Description: Pass command off to upper layer for finishing of I/O
 676 *              request, waking processes that are waiting on results,
 677 *              etc.
 678 */
 679void scsi_finish_command(struct scsi_cmnd *cmd)
 680{
 681        struct scsi_device *sdev = cmd->device;
 682        struct scsi_target *starget = scsi_target(sdev);
 683        struct Scsi_Host *shost = sdev->host;
 684        struct scsi_driver *drv;
 685        unsigned int good_bytes;
 686
 687        scsi_device_unbusy(sdev);
 688
 689        /*
 690         * Clear the flags that say that the device/target/host is no longer
 691         * capable of accepting new commands.
 692         */
 693        if (atomic_read(&shost->host_blocked))
 694                atomic_set(&shost->host_blocked, 0);
 695        if (atomic_read(&starget->target_blocked))
 696                atomic_set(&starget->target_blocked, 0);
 697        if (atomic_read(&sdev->device_blocked))
 698                atomic_set(&sdev->device_blocked, 0);
 699
 700        /*
 701         * If we have valid sense information, then some kind of recovery
 702         * must have taken place.  Make a note of this.
 703         */
 704        if (SCSI_SENSE_VALID(cmd))
 705                cmd->result |= (DRIVER_SENSE << 24);
 706
 707        SCSI_LOG_MLCOMPLETE(4, sdev_printk(KERN_INFO, sdev,
 708                                "Notifying upper driver of completion "
 709                                "(result %x)\n", cmd->result));
 710
 711        good_bytes = scsi_bufflen(cmd);
 712        if (cmd->request->cmd_type != REQ_TYPE_BLOCK_PC) {
 713                int old_good_bytes = good_bytes;
 714                drv = scsi_cmd_to_driver(cmd);
 715                if (drv->done)
 716                        good_bytes = drv->done(cmd);
 717                /*
 718                 * USB may not give sense identifying bad sector and
 719                 * simply return a residue instead, so subtract off the
 720                 * residue if drv->done() error processing indicates no
 721                 * change to the completion length.
 722                 */
 723                if (good_bytes == old_good_bytes)
 724                        good_bytes -= scsi_get_resid(cmd);
 725        }
 726        scsi_io_completion(cmd, good_bytes);
 727}
 728EXPORT_SYMBOL(scsi_finish_command);
 729
 730/**
 731 * scsi_adjust_queue_depth - Let low level drivers change a device's queue depth
 732 * @sdev: SCSI Device in question
 733 * @tagged: Do we use tagged queueing (non-0) or do we treat
 734 *          this device as an untagged device (0)
 735 * @tags: Number of tags allowed if tagged queueing enabled,
 736 *        or number of commands the low level driver can
 737 *        queue up in non-tagged mode (as per cmd_per_lun).
 738 *
 739 * Returns:     Nothing
 740 *
 741 * Lock Status: None held on entry
 742 *
 743 * Notes:       Low level drivers may call this at any time and we will do
 744 *              the right thing depending on whether or not the device is
 745 *              currently active and whether or not it even has the
 746 *              command blocks built yet.
 747 */
 748void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags)
 749{
 750        unsigned long flags;
 751
 752        /*
 753         * refuse to set tagged depth to an unworkable size
 754         */
 755        if (tags <= 0)
 756                return;
 757
 758        spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
 759
 760        /*
 761         * Check to see if the queue is managed by the block layer.
 762         * If it is, and we fail to adjust the depth, exit.
 763         *
 764         * Do not resize the tag map if it is a host wide share bqt,
 765         * because the size should be the hosts's can_queue. If there
 766         * is more IO than the LLD's can_queue (so there are not enuogh
 767         * tags) request_fn's host queue ready check will handle it.
 768         */
 769        if (!shost_use_blk_mq(sdev->host) && !sdev->host->bqt) {
 770                if (blk_queue_tagged(sdev->request_queue) &&
 771                    blk_queue_resize_tags(sdev->request_queue, tags) != 0)
 772                        goto out;
 773        }
 774
 775        sdev->queue_depth = tags;
 776        blk_set_queue_depth(sdev->request_queue, sdev->queue_depth);
 777        switch (tagged) {
 778                case 0:
 779                        sdev->ordered_tags = 0;
 780                        sdev->simple_tags = 0;
 781                        break;
 782                case MSG_ORDERED_TAG:
 783                        sdev->ordered_tags = 1;
 784                        sdev->simple_tags = 1;
 785                        break;
 786                case MSG_SIMPLE_TAG:
 787                        sdev->ordered_tags = 0;
 788                        sdev->simple_tags = 1;
 789                        break;
 790                default:
 791                        sdev->ordered_tags = 0;
 792                        sdev->simple_tags = 0;
 793                        sdev_printk(KERN_WARNING, sdev,
 794                                    "scsi_adjust_queue_depth, bad queue type, "
 795                                    "disabled\n");
 796        }
 797 out:
 798        spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
 799}
 800EXPORT_SYMBOL(scsi_adjust_queue_depth);
 801
 802/**
 803 * scsi_track_queue_full - track QUEUE_FULL events to adjust queue depth
 804 * @sdev: SCSI Device in question
 805 * @depth: Current number of outstanding SCSI commands on this device,
 806 *         not counting the one returned as QUEUE_FULL.
 807 *
 808 * Description: This function will track successive QUEUE_FULL events on a
 809 *              specific SCSI device to determine if and when there is a
 810 *              need to adjust the queue depth on the device.
 811 *
 812 * Returns:     0 - No change needed, >0 - Adjust queue depth to this new depth,
 813 *              -1 - Drop back to untagged operation using host->cmd_per_lun
 814 *                      as the untagged command depth
 815 *
 816 * Lock Status: None held on entry
 817 *
 818 * Notes:       Low level drivers may call this at any time and we will do
 819 *              "The Right Thing."  We are interrupt context safe.
 820 */
 821int scsi_track_queue_full(struct scsi_device *sdev, int depth)
 822{
 823
 824        /*
 825         * Don't let QUEUE_FULLs on the same
 826         * jiffies count, they could all be from
 827         * same event.
 828         */
 829        if ((jiffies >> 4) == (sdev->last_queue_full_time >> 4))
 830                return 0;
 831
 832        sdev->last_queue_full_time = jiffies;
 833        if (sdev->last_queue_full_depth != depth) {
 834                sdev->last_queue_full_count = 1;
 835                sdev->last_queue_full_depth = depth;
 836        } else {
 837                sdev->last_queue_full_count++;
 838        }
 839
 840        if (sdev->last_queue_full_count <= 10)
 841                return 0;
 842        if (sdev->last_queue_full_depth < 8) {
 843                /* Drop back to untagged */
 844                scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
 845                return -1;
 846        }
 847        
 848        if (sdev->ordered_tags)
 849                scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth);
 850        else
 851                scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth);
 852        return depth;
 853}
 854EXPORT_SYMBOL(scsi_track_queue_full);
 855
 856/**
 857 * scsi_vpd_inquiry - Request a device provide us with a VPD page
 858 * @sdev: The device to ask
 859 * @buffer: Where to put the result
 860 * @page: Which Vital Product Data to return
 861 * @len: The length of the buffer
 862 *
 863 * This is an internal helper function.  You probably want to use
 864 * scsi_get_vpd_page instead.
 865 *
 866 * Returns size of the vpd page on success or a negative error number.
 867 */
 868static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
 869                                                        u8 page, unsigned len)
 870{
 871        int result;
 872        unsigned char cmd[16];
 873
 874        if (len < 4)
 875                return -EINVAL;
 876
 877        cmd[0] = INQUIRY;
 878        cmd[1] = 1;             /* EVPD */
 879        cmd[2] = page;
 880        cmd[3] = len >> 8;
 881        cmd[4] = len & 0xff;
 882        cmd[5] = 0;             /* Control byte */
 883
 884        /*
 885         * I'm not convinced we need to try quite this hard to get VPD, but
 886         * all the existing users tried this hard.
 887         */
 888        result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer,
 889                                  len, NULL, 30 * HZ, 3, NULL);
 890        if (result)
 891                return -EIO;
 892
 893        /* Sanity check that we got the page back that we asked for */
 894        if (buffer[1] != page)
 895                return -EIO;
 896
 897        return get_unaligned_be16(&buffer[2]) + 4;
 898}
 899
 900/**
 901 * scsi_get_vpd_page - Get Vital Product Data from a SCSI device
 902 * @sdev: The device to ask
 903 * @page: Which Vital Product Data to return
 904 * @buf: where to store the VPD
 905 * @buf_len: number of bytes in the VPD buffer area
 906 *
 907 * SCSI devices may optionally supply Vital Product Data.  Each 'page'
 908 * of VPD is defined in the appropriate SCSI document (eg SPC, SBC).
 909 * If the device supports this VPD page, this routine returns a pointer
 910 * to a buffer containing the data from that page.  The caller is
 911 * responsible for calling kfree() on this pointer when it is no longer
 912 * needed.  If we cannot retrieve the VPD page this routine returns %NULL.
 913 */
 914int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf,
 915                      int buf_len)
 916{
 917        int i, result;
 918
 919        if (sdev->skip_vpd_pages)
 920                goto fail;
 921
 922        /* Ask for all the pages supported by this device */
 923        result = scsi_vpd_inquiry(sdev, buf, 0, buf_len);
 924        if (result < 4)
 925                goto fail;
 926
 927        /* If the user actually wanted this page, we can skip the rest */
 928        if (page == 0)
 929                return 0;
 930
 931        for (i = 4; i < min(result, buf_len); i++)
 932                if (buf[i] == page)
 933                        goto found;
 934
 935        if (i < result && i >= buf_len)
 936                /* ran off the end of the buffer, give us benefit of doubt */
 937                goto found;
 938        /* The device claims it doesn't support the requested page */
 939        goto fail;
 940
 941 found:
 942        result = scsi_vpd_inquiry(sdev, buf, page, buf_len);
 943        if (result < 0)
 944                goto fail;
 945
 946        return 0;
 947
 948 fail:
 949        return -EINVAL;
 950}
 951EXPORT_SYMBOL_GPL(scsi_get_vpd_page);
 952
 953/**
 954 * scsi_attach_vpd - Attach Vital Product Data to a SCSI device structure
 955 * @sdev: The device to ask
 956 *
 957 * Attach the 'Device Identification' VPD page (0x83) and the
 958 * 'Unit Serial Number' VPD page (0x80) to a SCSI device
 959 * structure. This information can be used to identify the device
 960 * uniquely.
 961 */
 962void scsi_attach_vpd(struct scsi_device *sdev)
 963{
 964        int result, i;
 965        int vpd_len = SCSI_VPD_PG_LEN;
 966        int pg80_supported = 0;
 967        int pg83_supported = 0;
 968        unsigned char __rcu *vpd_buf, *orig_vpd_buf = NULL;
 969
 970        if (!scsi_device_supports_vpd(sdev))
 971                return;
 972
 973retry_pg0:
 974        vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
 975        if (!vpd_buf)
 976                return;
 977
 978        /* Ask for all the pages supported by this device */
 979        result = scsi_vpd_inquiry(sdev, vpd_buf, 0, vpd_len);
 980        if (result < 0) {
 981                kfree(vpd_buf);
 982                return;
 983        }
 984        if (result > vpd_len) {
 985                vpd_len = result;
 986                kfree(vpd_buf);
 987                goto retry_pg0;
 988        }
 989
 990        for (i = 4; i < result; i++) {
 991                if (vpd_buf[i] == 0x80)
 992                        pg80_supported = 1;
 993                if (vpd_buf[i] == 0x83)
 994                        pg83_supported = 1;
 995        }
 996        kfree(vpd_buf);
 997        vpd_len = SCSI_VPD_PG_LEN;
 998
 999        if (pg80_supported) {
1000retry_pg80:
1001                vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
1002                if (!vpd_buf)
1003                        return;
1004
1005                result = scsi_vpd_inquiry(sdev, vpd_buf, 0x80, vpd_len);
1006                if (result < 0) {
1007                        kfree(vpd_buf);
1008                        return;
1009                }
1010                if (result > vpd_len) {
1011                        vpd_len = result;
1012                        kfree(vpd_buf);
1013                        goto retry_pg80;
1014                }
1015                spin_lock(&sdev->inquiry_lock);
1016                orig_vpd_buf = sdev->vpd_pg80;
1017                sdev->vpd_pg80_len = result;
1018                rcu_assign_pointer(sdev->vpd_pg80, vpd_buf);
1019                spin_unlock(&sdev->inquiry_lock);
1020                synchronize_rcu();
1021                if (orig_vpd_buf) {
1022                        kfree(orig_vpd_buf);
1023                        orig_vpd_buf = NULL;
1024                }
1025                vpd_len = SCSI_VPD_PG_LEN;
1026        }
1027
1028        if (pg83_supported) {
1029retry_pg83:
1030                vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
1031                if (!vpd_buf)
1032                        return;
1033
1034                result = scsi_vpd_inquiry(sdev, vpd_buf, 0x83, vpd_len);
1035                if (result < 0) {
1036                        kfree(vpd_buf);
1037                        return;
1038                }
1039                if (result > vpd_len) {
1040                        vpd_len = result;
1041                        kfree(vpd_buf);
1042                        goto retry_pg83;
1043                }
1044                spin_lock(&sdev->inquiry_lock);
1045                orig_vpd_buf = sdev->vpd_pg83;
1046                sdev->vpd_pg83_len = result;
1047                rcu_assign_pointer(sdev->vpd_pg83, vpd_buf);
1048                spin_unlock(&sdev->inquiry_lock);
1049                synchronize_rcu();
1050                if (orig_vpd_buf)
1051                        kfree(orig_vpd_buf);
1052        }
1053}
1054
1055/**
1056 * scsi_report_opcode - Find out if a given command opcode is supported
1057 * @sdev:       scsi device to query
1058 * @buffer:     scratch buffer (must be at least 20 bytes long)
1059 * @len:        length of buffer
1060 * @opcode:     opcode for command to look up
1061 *
1062 * Uses the REPORT SUPPORTED OPERATION CODES to look up the given
1063 * opcode. Returns -EINVAL if RSOC fails, 0 if the command opcode is
1064 * unsupported and 1 if the device claims to support the command.
1065 */
1066int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer,
1067                       unsigned int len, unsigned char opcode)
1068{
1069        unsigned char cmd[16];
1070        struct scsi_sense_hdr sshdr;
1071        int result;
1072
1073        if (sdev->no_report_opcodes || sdev->scsi_level < SCSI_SPC_3)
1074                return -EINVAL;
1075
1076        memset(cmd, 0, 16);
1077        cmd[0] = MAINTENANCE_IN;
1078        cmd[1] = MI_REPORT_SUPPORTED_OPERATION_CODES;
1079        cmd[2] = 1;             /* One command format */
1080        cmd[3] = opcode;
1081        put_unaligned_be32(len, &cmd[6]);
1082        memset(buffer, 0, len);
1083
1084        result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
1085                                  &sshdr, 30 * HZ, 3, NULL);
1086
1087        if (result && scsi_sense_valid(&sshdr) &&
1088            sshdr.sense_key == ILLEGAL_REQUEST &&
1089            (sshdr.asc == 0x20 || sshdr.asc == 0x24) && sshdr.ascq == 0x00)
1090                return -EINVAL;
1091
1092        if ((buffer[1] & 3) == 3) /* Command supported */
1093                return 1;
1094
1095        return 0;
1096}
1097EXPORT_SYMBOL(scsi_report_opcode);
1098
1099/**
1100 * scsi_device_get  -  get an additional reference to a scsi_device
1101 * @sdev:       device to get a reference to
1102 *
1103 * Description: Gets a reference to the scsi_device and increments the use count
1104 * of the underlying LLDD module.  You must hold host_lock of the
1105 * parent Scsi_Host or already have a reference when calling this.
1106 *
1107 * This will fail if a device is deleted or cancelled, or when the LLD module
1108 * is in the process of being unloaded.
1109 */
1110int scsi_device_get(struct scsi_device *sdev)
1111{
1112        if (sdev->sdev_state == SDEV_DEL || sdev->sdev_state == SDEV_CANCEL)
1113                goto fail;
1114        if (!get_device(&sdev->sdev_gendev))
1115                goto fail;
1116        if (!try_module_get(sdev->host->hostt->module))
1117                goto fail_put_device;
1118        return 0;
1119
1120fail_put_device:
1121        put_device(&sdev->sdev_gendev);
1122fail:
1123        return -ENXIO;
1124}
1125EXPORT_SYMBOL(scsi_device_get);
1126
1127/**
1128 * scsi_device_put  -  release a reference to a scsi_device
1129 * @sdev:       device to release a reference on.
1130 *
1131 * Description: Release a reference to the scsi_device and decrements the use
1132 * count of the underlying LLDD module.  The device is freed once the last
1133 * user vanishes.
1134 */
1135void scsi_device_put(struct scsi_device *sdev)
1136{
1137        module_put(sdev->host->hostt->module);
1138        put_device(&sdev->sdev_gendev);
1139}
1140EXPORT_SYMBOL(scsi_device_put);
1141
1142/* helper for shost_for_each_device, see that for documentation */
1143struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *shost,
1144                                           struct scsi_device *prev)
1145{
1146        struct list_head *list = (prev ? &prev->siblings : &shost->__devices);
1147        struct scsi_device *next = NULL;
1148        unsigned long flags;
1149
1150        spin_lock_irqsave(shost->host_lock, flags);
1151        while (list->next != &shost->__devices) {
1152                next = list_entry(list->next, struct scsi_device, siblings);
1153                /* skip devices that we can't get a reference to */
1154                if (!scsi_device_get(next))
1155                        break;
1156                next = NULL;
1157                list = list->next;
1158        }
1159        spin_unlock_irqrestore(shost->host_lock, flags);
1160
1161        if (prev)
1162                scsi_device_put(prev);
1163        return next;
1164}
1165EXPORT_SYMBOL(__scsi_iterate_devices);
1166
1167/**
1168 * starget_for_each_device  -  helper to walk all devices of a target
1169 * @starget:    target whose devices we want to iterate over.
1170 * @data:       Opaque passed to each function call.
1171 * @fn:         Function to call on each device
1172 *
1173 * This traverses over each device of @starget.  The devices have
1174 * a reference that must be released by scsi_host_put when breaking
1175 * out of the loop.
1176 */
1177void starget_for_each_device(struct scsi_target *starget, void *data,
1178                     void (*fn)(struct scsi_device *, void *))
1179{
1180        struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1181        struct scsi_device *sdev;
1182
1183        shost_for_each_device(sdev, shost) {
1184                if ((sdev->channel == starget->channel) &&
1185                    (sdev->id == starget->id))
1186                        fn(sdev, data);
1187        }
1188}
1189EXPORT_SYMBOL(starget_for_each_device);
1190
1191/**
1192 * __starget_for_each_device - helper to walk all devices of a target (UNLOCKED)
1193 * @starget:    target whose devices we want to iterate over.
1194 * @data:       parameter for callback @fn()
1195 * @fn:         callback function that is invoked for each device
1196 *
1197 * This traverses over each device of @starget.  It does _not_
1198 * take a reference on the scsi_device, so the whole loop must be
1199 * protected by shost->host_lock.
1200 *
1201 * Note:  The only reason why drivers would want to use this is because
1202 * they need to access the device list in irq context.  Otherwise you
1203 * really want to use starget_for_each_device instead.
1204 **/
1205void __starget_for_each_device(struct scsi_target *starget, void *data,
1206                               void (*fn)(struct scsi_device *, void *))
1207{
1208        struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1209        struct scsi_device *sdev;
1210
1211        __shost_for_each_device(sdev, shost) {
1212                if ((sdev->channel == starget->channel) &&
1213                    (sdev->id == starget->id))
1214                        fn(sdev, data);
1215        }
1216}
1217EXPORT_SYMBOL(__starget_for_each_device);
1218
1219/**
1220 * __scsi_device_lookup_by_target - find a device given the target (UNLOCKED)
1221 * @starget:    SCSI target pointer
1222 * @lun:        SCSI Logical Unit Number
1223 *
1224 * Description: Looks up the scsi_device with the specified @lun for a given
1225 * @starget.  The returned scsi_device does not have an additional
1226 * reference.  You must hold the host's host_lock over this call and
1227 * any access to the returned scsi_device. A scsi_device in state
1228 * SDEV_DEL is skipped.
1229 *
1230 * Note:  The only reason why drivers should use this is because
1231 * they need to access the device list in irq context.  Otherwise you
1232 * really want to use scsi_device_lookup_by_target instead.
1233 **/
1234struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *starget,
1235                                                   uint lun)
1236{
1237        struct scsi_device *sdev;
1238
1239        list_for_each_entry(sdev, &starget->devices, same_target_siblings) {
1240                if (sdev->sdev_state == SDEV_DEL)
1241                        continue;
1242                if (sdev->lun ==lun)
1243                        return sdev;
1244        }
1245
1246        return NULL;
1247}
1248EXPORT_SYMBOL(__scsi_device_lookup_by_target);
1249
1250/**
1251 * scsi_device_lookup_by_target - find a device given the target
1252 * @starget:    SCSI target pointer
1253 * @lun:        SCSI Logical Unit Number
1254 *
1255 * Description: Looks up the scsi_device with the specified @lun for a given
1256 * @starget.  The returned scsi_device has an additional reference that
1257 * needs to be released with scsi_device_put once you're done with it.
1258 **/
1259struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget,
1260                                                 uint lun)
1261{
1262        struct scsi_device *sdev;
1263        struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1264        unsigned long flags;
1265
1266        spin_lock_irqsave(shost->host_lock, flags);
1267        sdev = __scsi_device_lookup_by_target(starget, lun);
1268        if (sdev && scsi_device_get(sdev))
1269                sdev = NULL;
1270        spin_unlock_irqrestore(shost->host_lock, flags);
1271
1272        return sdev;
1273}
1274EXPORT_SYMBOL(scsi_device_lookup_by_target);
1275
1276/**
1277 * __scsi_device_lookup - find a device given the host (UNLOCKED)
1278 * @shost:      SCSI host pointer
1279 * @channel:    SCSI channel (zero if only one channel)
1280 * @id:         SCSI target number (physical unit number)
1281 * @lun:        SCSI Logical Unit Number
1282 *
1283 * Description: Looks up the scsi_device with the specified @channel, @id, @lun
1284 * for a given host. The returned scsi_device does not have an additional
1285 * reference.  You must hold the host's host_lock over this call and any access
1286 * to the returned scsi_device.
1287 *
1288 * Note:  The only reason why drivers would want to use this is because
1289 * they need to access the device list in irq context.  Otherwise you
1290 * really want to use scsi_device_lookup instead.
1291 **/
1292struct scsi_device *__scsi_device_lookup(struct Scsi_Host *shost,
1293                uint channel, uint id, uint lun)
1294{
1295        struct scsi_device *sdev;
1296
1297        list_for_each_entry(sdev, &shost->__devices, siblings) {
1298                if (sdev->channel == channel && sdev->id == id &&
1299                                sdev->lun ==lun)
1300                        return sdev;
1301        }
1302
1303        return NULL;
1304}
1305EXPORT_SYMBOL(__scsi_device_lookup);
1306
1307/**
1308 * scsi_device_lookup - find a device given the host
1309 * @shost:      SCSI host pointer
1310 * @channel:    SCSI channel (zero if only one channel)
1311 * @id:         SCSI target number (physical unit number)
1312 * @lun:        SCSI Logical Unit Number
1313 *
1314 * Description: Looks up the scsi_device with the specified @channel, @id, @lun
1315 * for a given host.  The returned scsi_device has an additional reference that
1316 * needs to be released with scsi_device_put once you're done with it.
1317 **/
1318struct scsi_device *scsi_device_lookup(struct Scsi_Host *shost,
1319                uint channel, uint id, uint lun)
1320{
1321        struct scsi_device *sdev;
1322        unsigned long flags;
1323
1324        spin_lock_irqsave(shost->host_lock, flags);
1325        sdev = __scsi_device_lookup(shost, channel, id, lun);
1326        if (sdev && scsi_device_get(sdev))
1327                sdev = NULL;
1328        spin_unlock_irqrestore(shost->host_lock, flags);
1329
1330        return sdev;
1331}
1332EXPORT_SYMBOL(scsi_device_lookup);
1333
1334MODULE_DESCRIPTION("SCSI core");
1335MODULE_LICENSE("GPL");
1336
1337module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR);
1338MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels");
1339
1340bool scsi_use_blk_mq = false;
1341module_param_named(use_blk_mq, scsi_use_blk_mq, bool, S_IWUSR | S_IRUGO);
1342
1343static int __init init_scsi(void)
1344{
1345        int error;
1346
1347        if (scsi_use_blk_mq)
1348                mark_tech_preview("scsi-mq", THIS_MODULE);
1349
1350        error = scsi_init_queue();
1351        if (error)
1352                return error;
1353        error = scsi_init_procfs();
1354        if (error)
1355                goto cleanup_queue;
1356        error = scsi_init_devinfo();
1357        if (error)
1358                goto cleanup_procfs;
1359        error = scsi_init_hosts();
1360        if (error)
1361                goto cleanup_devlist;
1362        error = scsi_init_sysctl();
1363        if (error)
1364                goto cleanup_hosts;
1365        error = scsi_sysfs_register();
1366        if (error)
1367                goto cleanup_sysctl;
1368
1369        scsi_netlink_init();
1370
1371        printk(KERN_NOTICE "SCSI subsystem initialized\n");
1372        return 0;
1373
1374cleanup_sysctl:
1375        scsi_exit_sysctl();
1376cleanup_hosts:
1377        scsi_exit_hosts();
1378cleanup_devlist:
1379        scsi_exit_devinfo();
1380cleanup_procfs:
1381        scsi_exit_procfs();
1382cleanup_queue:
1383        scsi_exit_queue();
1384        printk(KERN_ERR "SCSI subsystem failed to initialize, error = %d\n",
1385               -error);
1386        return error;
1387}
1388
1389static void __exit exit_scsi(void)
1390{
1391        scsi_netlink_exit();
1392        scsi_sysfs_unregister();
1393        scsi_exit_sysctl();
1394        scsi_exit_hosts();
1395        scsi_exit_devinfo();
1396        scsi_exit_procfs();
1397        scsi_exit_queue();
1398        async_unregister_domain(&scsi_sd_probe_domain);
1399}
1400
1401subsys_initcall(init_scsi);
1402module_exit(exit_scsi);
1403