linux/drivers/scsi/scsi.c
<<
>>
Prefs
   1/*
   2 *  scsi.c Copyright (C) 1992 Drew Eckhardt
   3 *         Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
   4 *         Copyright (C) 2002, 2003 Christoph Hellwig
   5 *
   6 *  generic mid-level SCSI driver
   7 *      Initial versions: Drew Eckhardt
   8 *      Subsequent revisions: Eric Youngdale
   9 *
  10 *  <drew@colorado.edu>
  11 *
  12 *  Bug correction thanks go to :
  13 *      Rik Faith <faith@cs.unc.edu>
  14 *      Tommy Thorn <tthorn>
  15 *      Thomas Wuensche <tw@fgb1.fgb.mw.tu-muenchen.de>
  16 *
  17 *  Modified by Eric Youngdale eric@andante.org or ericy@gnu.ai.mit.edu to
  18 *  add scatter-gather, multiple outstanding request, and other
  19 *  enhancements.
  20 *
  21 *  Native multichannel, wide scsi, /proc/scsi and hot plugging
  22 *  support added by Michael Neuffer <mike@i-connect.net>
  23 *
  24 *  Added request_module("scsi_hostadapter") for kerneld:
  25 *  (Put an "alias scsi_hostadapter your_hostadapter" in /etc/modprobe.conf)
  26 *  Bjorn Ekwall  <bj0rn@blox.se>
  27 *  (changed to kmod)
  28 *
  29 *  Major improvements to the timeout, abort, and reset processing,
  30 *  as well as performance modifications for large queue depths by
  31 *  Leonard N. Zubkoff <lnz@dandelion.com>
  32 *
  33 *  Converted cli() code to spinlocks, Ingo Molnar
  34 *
  35 *  Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli
  36 *
  37 *  out_of_space hacks, D. Gilbert (dpg) 990608
  38 */
  39
  40#include <linux/module.h>
  41#include <linux/moduleparam.h>
  42#include <linux/kernel.h>
  43#include <linux/timer.h>
  44#include <linux/string.h>
  45#include <linux/slab.h>
  46#include <linux/blkdev.h>
  47#include <linux/delay.h>
  48#include <linux/init.h>
  49#include <linux/completion.h>
  50#include <linux/unistd.h>
  51#include <linux/spinlock.h>
  52#include <linux/kmod.h>
  53#include <linux/interrupt.h>
  54#include <linux/notifier.h>
  55#include <linux/cpu.h>
  56#include <linux/mutex.h>
  57#include <linux/async.h>
  58#include <asm/unaligned.h>
  59
  60#include <scsi/scsi.h>
  61#include <scsi/scsi_cmnd.h>
  62#include <scsi/scsi_dbg.h>
  63#include <scsi/scsi_device.h>
  64#include <scsi/scsi_driver.h>
  65#include <scsi/scsi_eh.h>
  66#include <scsi/scsi_host.h>
  67#include <scsi/scsi_tcq.h>
  68
  69#include "scsi_priv.h"
  70#include "scsi_logging.h"
  71
  72#define CREATE_TRACE_POINTS
  73#include <trace/events/scsi.h>
  74
  75/*
  76 * Definitions and constants.
  77 */
  78
  79/*
  80 * Note - the initial logging level can be set here to log events at boot time.
  81 * After the system is up, you may enable logging via the /proc interface.
  82 */
  83unsigned int scsi_logging_level;
  84#if defined(CONFIG_SCSI_LOGGING)
  85EXPORT_SYMBOL(scsi_logging_level);
  86#endif
  87
  88/* sd, scsi core and power management need to coordinate flushing async actions */
  89ASYNC_DOMAIN(scsi_sd_probe_domain);
  90EXPORT_SYMBOL(scsi_sd_probe_domain);
  91
  92/*
  93 * Separate domain (from scsi_sd_probe_domain) to maximize the benefit of
  94 * asynchronous system resume operations.  It is marked 'exclusive' to avoid
  95 * being included in the async_synchronize_full() that is invoked by
  96 * dpm_resume()
  97 */
  98ASYNC_DOMAIN_EXCLUSIVE(scsi_sd_pm_domain);
  99EXPORT_SYMBOL(scsi_sd_pm_domain);
 100
 101/**
 102 * scsi_put_command - Free a scsi command block
 103 * @cmd: command block to free
 104 *
 105 * Returns:     Nothing.
 106 *
 107 * Notes:       The command must not belong to any lists.
 108 */
 109void scsi_put_command(struct scsi_cmnd *cmd)
 110{
 111        unsigned long flags;
 112
 113        /* serious error if the command hasn't come from a device list */
 114        spin_lock_irqsave(&cmd->device->list_lock, flags);
 115        BUG_ON(list_empty(&cmd->list));
 116        list_del_init(&cmd->list);
 117        spin_unlock_irqrestore(&cmd->device->list_lock, flags);
 118
 119        BUG_ON(delayed_work_pending(&cmd->abort_work));
 120}
 121
 122#ifdef CONFIG_SCSI_LOGGING
 123void scsi_log_send(struct scsi_cmnd *cmd)
 124{
 125        unsigned int level;
 126
 127        /*
 128         * If ML QUEUE log level is greater than or equal to:
 129         *
 130         * 1: nothing (match completion)
 131         *
 132         * 2: log opcode + command of all commands + cmd address
 133         *
 134         * 3: same as 2
 135         *
 136         * 4: same as 3
 137         */
 138        if (unlikely(scsi_logging_level)) {
 139                level = SCSI_LOG_LEVEL(SCSI_LOG_MLQUEUE_SHIFT,
 140                                       SCSI_LOG_MLQUEUE_BITS);
 141                if (level > 1) {
 142                        scmd_printk(KERN_INFO, cmd,
 143                                    "Send: scmd 0x%p\n", cmd);
 144                        scsi_print_command(cmd);
 145                }
 146        }
 147}
 148
 149void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
 150{
 151        unsigned int level;
 152
 153        /*
 154         * If ML COMPLETE log level is greater than or equal to:
 155         *
 156         * 1: log disposition, result, opcode + command, and conditionally
 157         * sense data for failures or non SUCCESS dispositions.
 158         *
 159         * 2: same as 1 but for all command completions.
 160         *
 161         * 3: same as 2
 162         *
 163         * 4: same as 3 plus dump extra junk
 164         */
 165        if (unlikely(scsi_logging_level)) {
 166                level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT,
 167                                       SCSI_LOG_MLCOMPLETE_BITS);
 168                if (((level > 0) && (cmd->result || disposition != SUCCESS)) ||
 169                    (level > 1)) {
 170                        scsi_print_result(cmd, "Done", disposition);
 171                        scsi_print_command(cmd);
 172                        if (status_byte(cmd->result) & CHECK_CONDITION)
 173                                scsi_print_sense(cmd);
 174                        if (level > 3)
 175                                scmd_printk(KERN_INFO, cmd,
 176                                            "scsi host busy %d failed %d\n",
 177                                            atomic_read(&cmd->device->host->host_busy),
 178                                            cmd->device->host->host_failed);
 179                }
 180        }
 181}
 182#endif
 183
 184/**
 185 * scsi_cmd_get_serial - Assign a serial number to a command
 186 * @host: the scsi host
 187 * @cmd: command to assign serial number to
 188 *
 189 * Description: a serial number identifies a request for error recovery
 190 * and debugging purposes.  Protected by the Host_Lock of host.
 191 */
 192void scsi_cmd_get_serial(struct Scsi_Host *host, struct scsi_cmnd *cmd)
 193{
 194        cmd->serial_number = host->cmd_serial_number++;
 195        if (cmd->serial_number == 0) 
 196                cmd->serial_number = host->cmd_serial_number++;
 197}
 198EXPORT_SYMBOL(scsi_cmd_get_serial);
 199
 200/**
 201 * scsi_finish_command - cleanup and pass command back to upper layer
 202 * @cmd: the command
 203 *
 204 * Description: Pass command off to upper layer for finishing of I/O
 205 *              request, waking processes that are waiting on results,
 206 *              etc.
 207 */
 208void scsi_finish_command(struct scsi_cmnd *cmd)
 209{
 210        struct scsi_device *sdev = cmd->device;
 211        struct scsi_target *starget = scsi_target(sdev);
 212        struct Scsi_Host *shost = sdev->host;
 213        struct scsi_driver *drv;
 214        unsigned int good_bytes;
 215
 216        scsi_device_unbusy(sdev);
 217
 218        /*
 219         * Clear the flags that say that the device/target/host is no longer
 220         * capable of accepting new commands.
 221         */
 222        if (atomic_read(&shost->host_blocked))
 223                atomic_set(&shost->host_blocked, 0);
 224        if (atomic_read(&starget->target_blocked))
 225                atomic_set(&starget->target_blocked, 0);
 226        if (atomic_read(&sdev->device_blocked))
 227                atomic_set(&sdev->device_blocked, 0);
 228
 229        /*
 230         * If we have valid sense information, then some kind of recovery
 231         * must have taken place.  Make a note of this.
 232         */
 233        if (SCSI_SENSE_VALID(cmd))
 234                cmd->result |= (DRIVER_SENSE << 24);
 235
 236        SCSI_LOG_MLCOMPLETE(4, sdev_printk(KERN_INFO, sdev,
 237                                "Notifying upper driver of completion "
 238                                "(result %x)\n", cmd->result));
 239
 240        good_bytes = scsi_bufflen(cmd);
 241        if (!blk_rq_is_passthrough(cmd->request)) {
 242                int old_good_bytes = good_bytes;
 243                drv = scsi_cmd_to_driver(cmd);
 244                if (drv->done)
 245                        good_bytes = drv->done(cmd);
 246                /*
 247                 * USB may not give sense identifying bad sector and
 248                 * simply return a residue instead, so subtract off the
 249                 * residue if drv->done() error processing indicates no
 250                 * change to the completion length.
 251                 */
 252                if (good_bytes == old_good_bytes)
 253                        good_bytes -= scsi_get_resid(cmd);
 254        }
 255        scsi_io_completion(cmd, good_bytes);
 256}
 257
 258/**
 259 * scsi_change_queue_depth - change a device's queue depth
 260 * @sdev: SCSI Device in question
 261 * @depth: number of commands allowed to be queued to the driver
 262 *
 263 * Sets the device queue depth and returns the new value.
 264 */
 265int scsi_change_queue_depth(struct scsi_device *sdev, int depth)
 266{
 267        if (depth > 0) {
 268                sdev->queue_depth = depth;
 269                wmb();
 270        }
 271
 272        if (sdev->request_queue)
 273                blk_set_queue_depth(sdev->request_queue, depth);
 274
 275        return sdev->queue_depth;
 276}
 277EXPORT_SYMBOL(scsi_change_queue_depth);
 278
 279/**
 280 * scsi_track_queue_full - track QUEUE_FULL events to adjust queue depth
 281 * @sdev: SCSI Device in question
 282 * @depth: Current number of outstanding SCSI commands on this device,
 283 *         not counting the one returned as QUEUE_FULL.
 284 *
 285 * Description: This function will track successive QUEUE_FULL events on a
 286 *              specific SCSI device to determine if and when there is a
 287 *              need to adjust the queue depth on the device.
 288 *
 289 * Returns:     0 - No change needed, >0 - Adjust queue depth to this new depth,
 290 *              -1 - Drop back to untagged operation using host->cmd_per_lun
 291 *                      as the untagged command depth
 292 *
 293 * Lock Status: None held on entry
 294 *
 295 * Notes:       Low level drivers may call this at any time and we will do
 296 *              "The Right Thing."  We are interrupt context safe.
 297 */
 298int scsi_track_queue_full(struct scsi_device *sdev, int depth)
 299{
 300
 301        /*
 302         * Don't let QUEUE_FULLs on the same
 303         * jiffies count, they could all be from
 304         * same event.
 305         */
 306        if ((jiffies >> 4) == (sdev->last_queue_full_time >> 4))
 307                return 0;
 308
 309        sdev->last_queue_full_time = jiffies;
 310        if (sdev->last_queue_full_depth != depth) {
 311                sdev->last_queue_full_count = 1;
 312                sdev->last_queue_full_depth = depth;
 313        } else {
 314                sdev->last_queue_full_count++;
 315        }
 316
 317        if (sdev->last_queue_full_count <= 10)
 318                return 0;
 319
 320        return scsi_change_queue_depth(sdev, depth);
 321}
 322EXPORT_SYMBOL(scsi_track_queue_full);
 323
 324/**
 325 * scsi_vpd_inquiry - Request a device provide us with a VPD page
 326 * @sdev: The device to ask
 327 * @buffer: Where to put the result
 328 * @page: Which Vital Product Data to return
 329 * @len: The length of the buffer
 330 *
 331 * This is an internal helper function.  You probably want to use
 332 * scsi_get_vpd_page instead.
 333 *
 334 * Returns size of the vpd page on success or a negative error number.
 335 */
 336static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
 337                                                        u8 page, unsigned len)
 338{
 339        int result;
 340        unsigned char cmd[16];
 341
 342        if (len < 4)
 343                return -EINVAL;
 344
 345        cmd[0] = INQUIRY;
 346        cmd[1] = 1;             /* EVPD */
 347        cmd[2] = page;
 348        cmd[3] = len >> 8;
 349        cmd[4] = len & 0xff;
 350        cmd[5] = 0;             /* Control byte */
 351
 352        /*
 353         * I'm not convinced we need to try quite this hard to get VPD, but
 354         * all the existing users tried this hard.
 355         */
 356        result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer,
 357                                  len, NULL, 30 * HZ, 3, NULL);
 358        if (result)
 359                return -EIO;
 360
 361        /* Sanity check that we got the page back that we asked for */
 362        if (buffer[1] != page)
 363                return -EIO;
 364
 365        return get_unaligned_be16(&buffer[2]) + 4;
 366}
 367
 368/**
 369 * scsi_get_vpd_page - Get Vital Product Data from a SCSI device
 370 * @sdev: The device to ask
 371 * @page: Which Vital Product Data to return
 372 * @buf: where to store the VPD
 373 * @buf_len: number of bytes in the VPD buffer area
 374 *
 375 * SCSI devices may optionally supply Vital Product Data.  Each 'page'
 376 * of VPD is defined in the appropriate SCSI document (eg SPC, SBC).
 377 * If the device supports this VPD page, this routine returns a pointer
 378 * to a buffer containing the data from that page.  The caller is
 379 * responsible for calling kfree() on this pointer when it is no longer
 380 * needed.  If we cannot retrieve the VPD page this routine returns %NULL.
 381 */
 382int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf,
 383                      int buf_len)
 384{
 385        int i, result;
 386
 387        if (sdev->skip_vpd_pages)
 388                goto fail;
 389
 390        /* Ask for all the pages supported by this device */
 391        result = scsi_vpd_inquiry(sdev, buf, 0, buf_len);
 392        if (result < 4)
 393                goto fail;
 394
 395        /* If the user actually wanted this page, we can skip the rest */
 396        if (page == 0)
 397                return 0;
 398
 399        for (i = 4; i < min(result, buf_len); i++)
 400                if (buf[i] == page)
 401                        goto found;
 402
 403        if (i < result && i >= buf_len)
 404                /* ran off the end of the buffer, give us benefit of doubt */
 405                goto found;
 406        /* The device claims it doesn't support the requested page */
 407        goto fail;
 408
 409 found:
 410        result = scsi_vpd_inquiry(sdev, buf, page, buf_len);
 411        if (result < 0)
 412                goto fail;
 413
 414        return 0;
 415
 416 fail:
 417        return -EINVAL;
 418}
 419EXPORT_SYMBOL_GPL(scsi_get_vpd_page);
 420
 421/**
 422 * scsi_attach_vpd - Attach Vital Product Data to a SCSI device structure
 423 * @sdev: The device to ask
 424 *
 425 * Attach the 'Device Identification' VPD page (0x83) and the
 426 * 'Unit Serial Number' VPD page (0x80) to a SCSI device
 427 * structure. This information can be used to identify the device
 428 * uniquely.
 429 */
 430void scsi_attach_vpd(struct scsi_device *sdev)
 431{
 432        int result, i;
 433        int vpd_len = SCSI_VPD_PG_LEN;
 434        int pg80_supported = 0;
 435        int pg83_supported = 0;
 436        unsigned char __rcu *vpd_buf, *orig_vpd_buf = NULL;
 437
 438        if (!scsi_device_supports_vpd(sdev))
 439                return;
 440
 441retry_pg0:
 442        vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
 443        if (!vpd_buf)
 444                return;
 445
 446        /* Ask for all the pages supported by this device */
 447        result = scsi_vpd_inquiry(sdev, vpd_buf, 0, vpd_len);
 448        if (result < 0) {
 449                kfree(vpd_buf);
 450                return;
 451        }
 452        if (result > vpd_len) {
 453                vpd_len = result;
 454                kfree(vpd_buf);
 455                goto retry_pg0;
 456        }
 457
 458        for (i = 4; i < result; i++) {
 459                if (vpd_buf[i] == 0x80)
 460                        pg80_supported = 1;
 461                if (vpd_buf[i] == 0x83)
 462                        pg83_supported = 1;
 463        }
 464        kfree(vpd_buf);
 465        vpd_len = SCSI_VPD_PG_LEN;
 466
 467        if (pg80_supported) {
 468retry_pg80:
 469                vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
 470                if (!vpd_buf)
 471                        return;
 472
 473                result = scsi_vpd_inquiry(sdev, vpd_buf, 0x80, vpd_len);
 474                if (result < 0) {
 475                        kfree(vpd_buf);
 476                        return;
 477                }
 478                if (result > vpd_len) {
 479                        vpd_len = result;
 480                        kfree(vpd_buf);
 481                        goto retry_pg80;
 482                }
 483                mutex_lock(&sdev->inquiry_mutex);
 484                orig_vpd_buf = sdev->vpd_pg80;
 485                sdev->vpd_pg80_len = result;
 486                rcu_assign_pointer(sdev->vpd_pg80, vpd_buf);
 487                mutex_unlock(&sdev->inquiry_mutex);
 488                synchronize_rcu();
 489                if (orig_vpd_buf) {
 490                        kfree(orig_vpd_buf);
 491                        orig_vpd_buf = NULL;
 492                }
 493                vpd_len = SCSI_VPD_PG_LEN;
 494        }
 495
 496        if (pg83_supported) {
 497retry_pg83:
 498                vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
 499                if (!vpd_buf)
 500                        return;
 501
 502                result = scsi_vpd_inquiry(sdev, vpd_buf, 0x83, vpd_len);
 503                if (result < 0) {
 504                        kfree(vpd_buf);
 505                        return;
 506                }
 507                if (result > vpd_len) {
 508                        vpd_len = result;
 509                        kfree(vpd_buf);
 510                        goto retry_pg83;
 511                }
 512                mutex_lock(&sdev->inquiry_mutex);
 513                orig_vpd_buf = sdev->vpd_pg83;
 514                sdev->vpd_pg83_len = result;
 515                rcu_assign_pointer(sdev->vpd_pg83, vpd_buf);
 516                mutex_unlock(&sdev->inquiry_mutex);
 517                synchronize_rcu();
 518                if (orig_vpd_buf)
 519                        kfree(orig_vpd_buf);
 520        }
 521}
 522
 523/**
 524 * scsi_report_opcode - Find out if a given command opcode is supported
 525 * @sdev:       scsi device to query
 526 * @buffer:     scratch buffer (must be at least 20 bytes long)
 527 * @len:        length of buffer
 528 * @opcode:     opcode for command to look up
 529 *
 530 * Uses the REPORT SUPPORTED OPERATION CODES to look up the given
 531 * opcode. Returns -EINVAL if RSOC fails, 0 if the command opcode is
 532 * unsupported and 1 if the device claims to support the command.
 533 */
 534int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer,
 535                       unsigned int len, unsigned char opcode)
 536{
 537        unsigned char cmd[16];
 538        struct scsi_sense_hdr sshdr;
 539        int result;
 540
 541        if (sdev->no_report_opcodes || sdev->scsi_level < SCSI_SPC_3)
 542                return -EINVAL;
 543
 544        memset(cmd, 0, 16);
 545        cmd[0] = MAINTENANCE_IN;
 546        cmd[1] = MI_REPORT_SUPPORTED_OPERATION_CODES;
 547        cmd[2] = 1;             /* One command format */
 548        cmd[3] = opcode;
 549        put_unaligned_be32(len, &cmd[6]);
 550        memset(buffer, 0, len);
 551
 552        result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
 553                                  &sshdr, 30 * HZ, 3, NULL);
 554
 555        if (result && scsi_sense_valid(&sshdr) &&
 556            sshdr.sense_key == ILLEGAL_REQUEST &&
 557            (sshdr.asc == 0x20 || sshdr.asc == 0x24) && sshdr.ascq == 0x00)
 558                return -EINVAL;
 559
 560        if ((buffer[1] & 3) == 3) /* Command supported */
 561                return 1;
 562
 563        return 0;
 564}
 565EXPORT_SYMBOL(scsi_report_opcode);
 566
 567/**
 568 * scsi_device_get  -  get an additional reference to a scsi_device
 569 * @sdev:       device to get a reference to
 570 *
 571 * Description: Gets a reference to the scsi_device and increments the use count
 572 * of the underlying LLDD module.  You must hold host_lock of the
 573 * parent Scsi_Host or already have a reference when calling this.
 574 *
 575 * This will fail if a device is deleted or cancelled, or when the LLD module
 576 * is in the process of being unloaded.
 577 */
 578int scsi_device_get(struct scsi_device *sdev)
 579{
 580        if (sdev->sdev_state == SDEV_DEL || sdev->sdev_state == SDEV_CANCEL)
 581                goto fail;
 582        if (!get_device(&sdev->sdev_gendev))
 583                goto fail;
 584        if (!try_module_get(sdev->host->hostt->module))
 585                goto fail_put_device;
 586        return 0;
 587
 588fail_put_device:
 589        put_device(&sdev->sdev_gendev);
 590fail:
 591        return -ENXIO;
 592}
 593EXPORT_SYMBOL(scsi_device_get);
 594
 595/**
 596 * scsi_device_put  -  release a reference to a scsi_device
 597 * @sdev:       device to release a reference on.
 598 *
 599 * Description: Release a reference to the scsi_device and decrements the use
 600 * count of the underlying LLDD module.  The device is freed once the last
 601 * user vanishes.
 602 */
 603void scsi_device_put(struct scsi_device *sdev)
 604{
 605        module_put(sdev->host->hostt->module);
 606        put_device(&sdev->sdev_gendev);
 607}
 608EXPORT_SYMBOL(scsi_device_put);
 609
 610/* helper for shost_for_each_device, see that for documentation */
 611struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *shost,
 612                                           struct scsi_device *prev)
 613{
 614        struct list_head *list = (prev ? &prev->siblings : &shost->__devices);
 615        struct scsi_device *next = NULL;
 616        unsigned long flags;
 617
 618        spin_lock_irqsave(shost->host_lock, flags);
 619        while (list->next != &shost->__devices) {
 620                next = list_entry(list->next, struct scsi_device, siblings);
 621                /* skip devices that we can't get a reference to */
 622                if (!scsi_device_get(next))
 623                        break;
 624                next = NULL;
 625                list = list->next;
 626        }
 627        spin_unlock_irqrestore(shost->host_lock, flags);
 628
 629        if (prev)
 630                scsi_device_put(prev);
 631        return next;
 632}
 633EXPORT_SYMBOL(__scsi_iterate_devices);
 634
 635/**
 636 * starget_for_each_device  -  helper to walk all devices of a target
 637 * @starget:    target whose devices we want to iterate over.
 638 * @data:       Opaque passed to each function call.
 639 * @fn:         Function to call on each device
 640 *
 641 * This traverses over each device of @starget.  The devices have
 642 * a reference that must be released by scsi_host_put when breaking
 643 * out of the loop.
 644 */
 645void starget_for_each_device(struct scsi_target *starget, void *data,
 646                     void (*fn)(struct scsi_device *, void *))
 647{
 648        struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
 649        struct scsi_device *sdev;
 650
 651        shost_for_each_device(sdev, shost) {
 652                if ((sdev->channel == starget->channel) &&
 653                    (sdev->id == starget->id))
 654                        fn(sdev, data);
 655        }
 656}
 657EXPORT_SYMBOL(starget_for_each_device);
 658
 659/**
 660 * __starget_for_each_device - helper to walk all devices of a target (UNLOCKED)
 661 * @starget:    target whose devices we want to iterate over.
 662 * @data:       parameter for callback @fn()
 663 * @fn:         callback function that is invoked for each device
 664 *
 665 * This traverses over each device of @starget.  It does _not_
 666 * take a reference on the scsi_device, so the whole loop must be
 667 * protected by shost->host_lock.
 668 *
 669 * Note:  The only reason why drivers would want to use this is because
 670 * they need to access the device list in irq context.  Otherwise you
 671 * really want to use starget_for_each_device instead.
 672 **/
 673void __starget_for_each_device(struct scsi_target *starget, void *data,
 674                               void (*fn)(struct scsi_device *, void *))
 675{
 676        struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
 677        struct scsi_device *sdev;
 678
 679        __shost_for_each_device(sdev, shost) {
 680                if ((sdev->channel == starget->channel) &&
 681                    (sdev->id == starget->id))
 682                        fn(sdev, data);
 683        }
 684}
 685EXPORT_SYMBOL(__starget_for_each_device);
 686
 687/**
 688 * __scsi_device_lookup_by_target - find a device given the target (UNLOCKED)
 689 * @starget:    SCSI target pointer
 690 * @lun:        SCSI Logical Unit Number
 691 *
 692 * Description: Looks up the scsi_device with the specified @lun for a given
 693 * @starget.  The returned scsi_device does not have an additional
 694 * reference.  You must hold the host's host_lock over this call and
 695 * any access to the returned scsi_device. A scsi_device in state
 696 * SDEV_DEL is skipped.
 697 *
 698 * Note:  The only reason why drivers should use this is because
 699 * they need to access the device list in irq context.  Otherwise you
 700 * really want to use scsi_device_lookup_by_target instead.
 701 **/
 702struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *starget,
 703                                                   u64 lun)
 704{
 705        struct scsi_device *sdev;
 706
 707        list_for_each_entry(sdev, &starget->devices, same_target_siblings) {
 708                if (sdev->sdev_state == SDEV_DEL)
 709                        continue;
 710                if (sdev->lun ==lun)
 711                        return sdev;
 712        }
 713
 714        return NULL;
 715}
 716EXPORT_SYMBOL(__scsi_device_lookup_by_target);
 717
 718/**
 719 * scsi_device_lookup_by_target - find a device given the target
 720 * @starget:    SCSI target pointer
 721 * @lun:        SCSI Logical Unit Number
 722 *
 723 * Description: Looks up the scsi_device with the specified @lun for a given
 724 * @starget.  The returned scsi_device has an additional reference that
 725 * needs to be released with scsi_device_put once you're done with it.
 726 **/
 727struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget,
 728                                                 u64 lun)
 729{
 730        struct scsi_device *sdev;
 731        struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
 732        unsigned long flags;
 733
 734        spin_lock_irqsave(shost->host_lock, flags);
 735        sdev = __scsi_device_lookup_by_target(starget, lun);
 736        if (sdev && scsi_device_get(sdev))
 737                sdev = NULL;
 738        spin_unlock_irqrestore(shost->host_lock, flags);
 739
 740        return sdev;
 741}
 742EXPORT_SYMBOL(scsi_device_lookup_by_target);
 743
 744/**
 745 * __scsi_device_lookup - find a device given the host (UNLOCKED)
 746 * @shost:      SCSI host pointer
 747 * @channel:    SCSI channel (zero if only one channel)
 748 * @id:         SCSI target number (physical unit number)
 749 * @lun:        SCSI Logical Unit Number
 750 *
 751 * Description: Looks up the scsi_device with the specified @channel, @id, @lun
 752 * for a given host. The returned scsi_device does not have an additional
 753 * reference.  You must hold the host's host_lock over this call and any access
 754 * to the returned scsi_device.
 755 *
 756 * Note:  The only reason why drivers would want to use this is because
 757 * they need to access the device list in irq context.  Otherwise you
 758 * really want to use scsi_device_lookup instead.
 759 **/
 760struct scsi_device *__scsi_device_lookup(struct Scsi_Host *shost,
 761                uint channel, uint id, u64 lun)
 762{
 763        struct scsi_device *sdev;
 764
 765        list_for_each_entry(sdev, &shost->__devices, siblings) {
 766                if (sdev->sdev_state == SDEV_DEL)
 767                        continue;
 768                if (sdev->channel == channel && sdev->id == id &&
 769                                sdev->lun ==lun)
 770                        return sdev;
 771        }
 772
 773        return NULL;
 774}
 775EXPORT_SYMBOL(__scsi_device_lookup);
 776
 777/**
 778 * scsi_device_lookup - find a device given the host
 779 * @shost:      SCSI host pointer
 780 * @channel:    SCSI channel (zero if only one channel)
 781 * @id:         SCSI target number (physical unit number)
 782 * @lun:        SCSI Logical Unit Number
 783 *
 784 * Description: Looks up the scsi_device with the specified @channel, @id, @lun
 785 * for a given host.  The returned scsi_device has an additional reference that
 786 * needs to be released with scsi_device_put once you're done with it.
 787 **/
 788struct scsi_device *scsi_device_lookup(struct Scsi_Host *shost,
 789                uint channel, uint id, u64 lun)
 790{
 791        struct scsi_device *sdev;
 792        unsigned long flags;
 793
 794        spin_lock_irqsave(shost->host_lock, flags);
 795        sdev = __scsi_device_lookup(shost, channel, id, lun);
 796        if (sdev && scsi_device_get(sdev))
 797                sdev = NULL;
 798        spin_unlock_irqrestore(shost->host_lock, flags);
 799
 800        return sdev;
 801}
 802EXPORT_SYMBOL(scsi_device_lookup);
 803
 804MODULE_DESCRIPTION("SCSI core");
 805MODULE_LICENSE("GPL");
 806
 807module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR);
 808MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels");
 809
 810#ifdef CONFIG_SCSI_MQ_DEFAULT
 811bool scsi_use_blk_mq = true;
 812#else
 813bool scsi_use_blk_mq = false;
 814#endif
 815module_param_named(use_blk_mq, scsi_use_blk_mq, bool, S_IWUSR | S_IRUGO);
 816
 817static int __init init_scsi(void)
 818{
 819        int error;
 820
 821        error = scsi_init_queue();
 822        if (error)
 823                return error;
 824        error = scsi_init_procfs();
 825        if (error)
 826                goto cleanup_queue;
 827        error = scsi_init_devinfo();
 828        if (error)
 829                goto cleanup_procfs;
 830        error = scsi_init_hosts();
 831        if (error)
 832                goto cleanup_devlist;
 833        error = scsi_init_sysctl();
 834        if (error)
 835                goto cleanup_hosts;
 836        error = scsi_sysfs_register();
 837        if (error)
 838                goto cleanup_sysctl;
 839
 840        scsi_netlink_init();
 841
 842        printk(KERN_NOTICE "SCSI subsystem initialized\n");
 843        return 0;
 844
 845cleanup_sysctl:
 846        scsi_exit_sysctl();
 847cleanup_hosts:
 848        scsi_exit_hosts();
 849cleanup_devlist:
 850        scsi_exit_devinfo();
 851cleanup_procfs:
 852        scsi_exit_procfs();
 853cleanup_queue:
 854        scsi_exit_queue();
 855        printk(KERN_ERR "SCSI subsystem failed to initialize, error = %d\n",
 856               -error);
 857        return error;
 858}
 859
 860static void __exit exit_scsi(void)
 861{
 862        scsi_netlink_exit();
 863        scsi_sysfs_unregister();
 864        scsi_exit_sysctl();
 865        scsi_exit_hosts();
 866        scsi_exit_devinfo();
 867        scsi_exit_procfs();
 868        scsi_exit_queue();
 869        async_unregister_domain(&scsi_sd_probe_domain);
 870}
 871
 872subsys_initcall(init_scsi);
 873module_exit(exit_scsi);
 874