linux/drivers/scsi/scsi_lib.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 1999 Eric Youngdale
   4 * Copyright (C) 2014 Christoph Hellwig
   5 *
   6 *  SCSI queueing library.
   7 *      Initial versions: Eric Youngdale (eric@andante.org).
   8 *                        Based upon conversations with large numbers
   9 *                        of people at Linux Expo.
  10 */
  11
  12#include <linux/bio.h>
  13#include <linux/bitops.h>
  14#include <linux/blkdev.h>
  15#include <linux/completion.h>
  16#include <linux/kernel.h>
  17#include <linux/export.h>
  18#include <linux/init.h>
  19#include <linux/pci.h>
  20#include <linux/delay.h>
  21#include <linux/hardirq.h>
  22#include <linux/scatterlist.h>
  23#include <linux/blk-mq.h>
  24#include <linux/ratelimit.h>
  25#include <asm/unaligned.h>
  26
  27#include <scsi/scsi.h>
  28#include <scsi/scsi_cmnd.h>
  29#include <scsi/scsi_dbg.h>
  30#include <scsi/scsi_device.h>
  31#include <scsi/scsi_driver.h>
  32#include <scsi/scsi_eh.h>
  33#include <scsi/scsi_host.h>
  34#include <scsi/scsi_transport.h> /* __scsi_init_queue() */
  35#include <scsi/scsi_dh.h>
  36
  37#include <trace/events/scsi.h>
  38
  39#include "scsi_debugfs.h"
  40#include "scsi_priv.h"
  41#include "scsi_logging.h"
  42
  43/*
  44 * Size of integrity metadata is usually small, 1 inline sg should
  45 * cover normal cases.
  46 */
  47#ifdef CONFIG_ARCH_NO_SG_CHAIN
  48#define  SCSI_INLINE_PROT_SG_CNT  0
  49#define  SCSI_INLINE_SG_CNT  0
  50#else
  51#define  SCSI_INLINE_PROT_SG_CNT  1
  52#define  SCSI_INLINE_SG_CNT  2
  53#endif
  54
  55static struct kmem_cache *scsi_sense_cache;
  56static DEFINE_MUTEX(scsi_sense_cache_mutex);
  57
  58static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd);
  59
  60int scsi_init_sense_cache(struct Scsi_Host *shost)
  61{
  62        int ret = 0;
  63
  64        mutex_lock(&scsi_sense_cache_mutex);
  65        if (!scsi_sense_cache) {
  66                scsi_sense_cache =
  67                        kmem_cache_create_usercopy("scsi_sense_cache",
  68                                SCSI_SENSE_BUFFERSIZE, 0, SLAB_HWCACHE_ALIGN,
  69                                0, SCSI_SENSE_BUFFERSIZE, NULL);
  70                if (!scsi_sense_cache)
  71                        ret = -ENOMEM;
  72        }
  73        mutex_unlock(&scsi_sense_cache_mutex);
  74        return ret;
  75}
  76
  77/*
  78 * When to reinvoke queueing after a resource shortage. It's 3 msecs to
  79 * not change behaviour from the previous unplug mechanism, experimentation
  80 * may prove this needs changing.
  81 */
  82#define SCSI_QUEUE_DELAY        3
  83
  84static void
  85scsi_set_blocked(struct scsi_cmnd *cmd, int reason)
  86{
  87        struct Scsi_Host *host = cmd->device->host;
  88        struct scsi_device *device = cmd->device;
  89        struct scsi_target *starget = scsi_target(device);
  90
  91        /*
  92         * Set the appropriate busy bit for the device/host.
  93         *
  94         * If the host/device isn't busy, assume that something actually
  95         * completed, and that we should be able to queue a command now.
  96         *
  97         * Note that the prior mid-layer assumption that any host could
  98         * always queue at least one command is now broken.  The mid-layer
  99         * will implement a user specifiable stall (see
 100         * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
 101         * if a command is requeued with no other commands outstanding
 102         * either for the device or for the host.
 103         */
 104        switch (reason) {
 105        case SCSI_MLQUEUE_HOST_BUSY:
 106                atomic_set(&host->host_blocked, host->max_host_blocked);
 107                break;
 108        case SCSI_MLQUEUE_DEVICE_BUSY:
 109        case SCSI_MLQUEUE_EH_RETRY:
 110                atomic_set(&device->device_blocked,
 111                           device->max_device_blocked);
 112                break;
 113        case SCSI_MLQUEUE_TARGET_BUSY:
 114                atomic_set(&starget->target_blocked,
 115                           starget->max_target_blocked);
 116                break;
 117        }
 118}
 119
 120static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
 121{
 122        if (cmd->request->rq_flags & RQF_DONTPREP) {
 123                cmd->request->rq_flags &= ~RQF_DONTPREP;
 124                scsi_mq_uninit_cmd(cmd);
 125        } else {
 126                WARN_ON_ONCE(true);
 127        }
 128        blk_mq_requeue_request(cmd->request, true);
 129}
 130
 131/**
 132 * __scsi_queue_insert - private queue insertion
 133 * @cmd: The SCSI command being requeued
 134 * @reason:  The reason for the requeue
 135 * @unbusy: Whether the queue should be unbusied
 136 *
 137 * This is a private queue insertion.  The public interface
 138 * scsi_queue_insert() always assumes the queue should be unbusied
 139 * because it's always called before the completion.  This function is
 140 * for a requeue after completion, which should only occur in this
 141 * file.
 142 */
 143static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy)
 144{
 145        struct scsi_device *device = cmd->device;
 146
 147        SCSI_LOG_MLQUEUE(1, scmd_printk(KERN_INFO, cmd,
 148                "Inserting command %p into mlqueue\n", cmd));
 149
 150        scsi_set_blocked(cmd, reason);
 151
 152        /*
 153         * Decrement the counters, since these commands are no longer
 154         * active on the host/device.
 155         */
 156        if (unbusy)
 157                scsi_device_unbusy(device, cmd);
 158
 159        /*
 160         * Requeue this command.  It will go before all other commands
 161         * that are already in the queue. Schedule requeue work under
 162         * lock such that the kblockd_schedule_work() call happens
 163         * before blk_cleanup_queue() finishes.
 164         */
 165        cmd->result = 0;
 166
 167        blk_mq_requeue_request(cmd->request, true);
 168}
 169
 170/**
 171 * scsi_queue_insert - Reinsert a command in the queue.
 172 * @cmd:    command that we are adding to queue.
 173 * @reason: why we are inserting command to queue.
 174 *
 175 * We do this for one of two cases. Either the host is busy and it cannot accept
 176 * any more commands for the time being, or the device returned QUEUE_FULL and
 177 * can accept no more commands.
 178 *
 179 * Context: This could be called either from an interrupt context or a normal
 180 * process context.
 181 */
 182void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
 183{
 184        __scsi_queue_insert(cmd, reason, true);
 185}
 186
 187
 188/**
 189 * __scsi_execute - insert request and wait for the result
 190 * @sdev:       scsi device
 191 * @cmd:        scsi command
 192 * @data_direction: data direction
 193 * @buffer:     data buffer
 194 * @bufflen:    len of buffer
 195 * @sense:      optional sense buffer
 196 * @sshdr:      optional decoded sense header
 197 * @timeout:    request timeout in HZ
 198 * @retries:    number of times to retry request
 199 * @flags:      flags for ->cmd_flags
 200 * @rq_flags:   flags for ->rq_flags
 201 * @resid:      optional residual length
 202 *
 203 * Returns the scsi_cmnd result field if a command was executed, or a negative
 204 * Linux error code if we didn't get that far.
 205 */
 206int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
 207                 int data_direction, void *buffer, unsigned bufflen,
 208                 unsigned char *sense, struct scsi_sense_hdr *sshdr,
 209                 int timeout, int retries, u64 flags, req_flags_t rq_flags,
 210                 int *resid)
 211{
 212        struct request *req;
 213        struct scsi_request *rq;
 214        int ret;
 215
 216        req = blk_get_request(sdev->request_queue,
 217                        data_direction == DMA_TO_DEVICE ?
 218                        REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
 219                        rq_flags & RQF_PM ? BLK_MQ_REQ_PM : 0);
 220        if (IS_ERR(req))
 221                return PTR_ERR(req);
 222
 223        rq = scsi_req(req);
 224
 225        if (bufflen) {
 226                ret = blk_rq_map_kern(sdev->request_queue, req,
 227                                      buffer, bufflen, GFP_NOIO);
 228                if (ret)
 229                        goto out;
 230        }
 231        rq->cmd_len = COMMAND_SIZE(cmd[0]);
 232        memcpy(rq->cmd, cmd, rq->cmd_len);
 233        rq->retries = retries;
 234        req->timeout = timeout;
 235        req->cmd_flags |= flags;
 236        req->rq_flags |= rq_flags | RQF_QUIET;
 237
 238        /*
 239         * head injection *required* here otherwise quiesce won't work
 240         */
 241        blk_execute_rq(NULL, req, 1);
 242
 243        /*
 244         * Some devices (USB mass-storage in particular) may transfer
 245         * garbage data together with a residue indicating that the data
 246         * is invalid.  Prevent the garbage from being misinterpreted
 247         * and prevent security leaks by zeroing out the excess data.
 248         */
 249        if (unlikely(rq->resid_len > 0 && rq->resid_len <= bufflen))
 250                memset(buffer + (bufflen - rq->resid_len), 0, rq->resid_len);
 251
 252        if (resid)
 253                *resid = rq->resid_len;
 254        if (sense && rq->sense_len)
 255                memcpy(sense, rq->sense, SCSI_SENSE_BUFFERSIZE);
 256        if (sshdr)
 257                scsi_normalize_sense(rq->sense, rq->sense_len, sshdr);
 258        ret = rq->result;
 259 out:
 260        blk_put_request(req);
 261
 262        return ret;
 263}
 264EXPORT_SYMBOL(__scsi_execute);
 265
 266/*
 267 * Wake up the error handler if necessary. Avoid as follows that the error
 268 * handler is not woken up if host in-flight requests number ==
 269 * shost->host_failed: use call_rcu() in scsi_eh_scmd_add() in combination
 270 * with an RCU read lock in this function to ensure that this function in
 271 * its entirety either finishes before scsi_eh_scmd_add() increases the
 272 * host_failed counter or that it notices the shost state change made by
 273 * scsi_eh_scmd_add().
 274 */
 275static void scsi_dec_host_busy(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
 276{
 277        unsigned long flags;
 278
 279        rcu_read_lock();
 280        __clear_bit(SCMD_STATE_INFLIGHT, &cmd->state);
 281        if (unlikely(scsi_host_in_recovery(shost))) {
 282                spin_lock_irqsave(shost->host_lock, flags);
 283                if (shost->host_failed || shost->host_eh_scheduled)
 284                        scsi_eh_wakeup(shost);
 285                spin_unlock_irqrestore(shost->host_lock, flags);
 286        }
 287        rcu_read_unlock();
 288}
 289
 290void scsi_device_unbusy(struct scsi_device *sdev, struct scsi_cmnd *cmd)
 291{
 292        struct Scsi_Host *shost = sdev->host;
 293        struct scsi_target *starget = scsi_target(sdev);
 294
 295        scsi_dec_host_busy(shost, cmd);
 296
 297        if (starget->can_queue > 0)
 298                atomic_dec(&starget->target_busy);
 299
 300        sbitmap_put(&sdev->budget_map, cmd->budget_token);
 301        cmd->budget_token = -1;
 302}
 303
 304static void scsi_kick_queue(struct request_queue *q)
 305{
 306        blk_mq_run_hw_queues(q, false);
 307}
 308
 309/*
 310 * Called for single_lun devices on IO completion. Clear starget_sdev_user,
 311 * and call blk_run_queue for all the scsi_devices on the target -
 312 * including current_sdev first.
 313 *
 314 * Called with *no* scsi locks held.
 315 */
 316static void scsi_single_lun_run(struct scsi_device *current_sdev)
 317{
 318        struct Scsi_Host *shost = current_sdev->host;
 319        struct scsi_device *sdev, *tmp;
 320        struct scsi_target *starget = scsi_target(current_sdev);
 321        unsigned long flags;
 322
 323        spin_lock_irqsave(shost->host_lock, flags);
 324        starget->starget_sdev_user = NULL;
 325        spin_unlock_irqrestore(shost->host_lock, flags);
 326
 327        /*
 328         * Call blk_run_queue for all LUNs on the target, starting with
 329         * current_sdev. We race with others (to set starget_sdev_user),
 330         * but in most cases, we will be first. Ideally, each LU on the
 331         * target would get some limited time or requests on the target.
 332         */
 333        scsi_kick_queue(current_sdev->request_queue);
 334
 335        spin_lock_irqsave(shost->host_lock, flags);
 336        if (starget->starget_sdev_user)
 337                goto out;
 338        list_for_each_entry_safe(sdev, tmp, &starget->devices,
 339                        same_target_siblings) {
 340                if (sdev == current_sdev)
 341                        continue;
 342                if (scsi_device_get(sdev))
 343                        continue;
 344
 345                spin_unlock_irqrestore(shost->host_lock, flags);
 346                scsi_kick_queue(sdev->request_queue);
 347                spin_lock_irqsave(shost->host_lock, flags);
 348
 349                scsi_device_put(sdev);
 350        }
 351 out:
 352        spin_unlock_irqrestore(shost->host_lock, flags);
 353}
 354
 355static inline bool scsi_device_is_busy(struct scsi_device *sdev)
 356{
 357        if (scsi_device_busy(sdev) >= sdev->queue_depth)
 358                return true;
 359        if (atomic_read(&sdev->device_blocked) > 0)
 360                return true;
 361        return false;
 362}
 363
 364static inline bool scsi_target_is_busy(struct scsi_target *starget)
 365{
 366        if (starget->can_queue > 0) {
 367                if (atomic_read(&starget->target_busy) >= starget->can_queue)
 368                        return true;
 369                if (atomic_read(&starget->target_blocked) > 0)
 370                        return true;
 371        }
 372        return false;
 373}
 374
 375static inline bool scsi_host_is_busy(struct Scsi_Host *shost)
 376{
 377        if (atomic_read(&shost->host_blocked) > 0)
 378                return true;
 379        if (shost->host_self_blocked)
 380                return true;
 381        return false;
 382}
 383
 384static void scsi_starved_list_run(struct Scsi_Host *shost)
 385{
 386        LIST_HEAD(starved_list);
 387        struct scsi_device *sdev;
 388        unsigned long flags;
 389
 390        spin_lock_irqsave(shost->host_lock, flags);
 391        list_splice_init(&shost->starved_list, &starved_list);
 392
 393        while (!list_empty(&starved_list)) {
 394                struct request_queue *slq;
 395
 396                /*
 397                 * As long as shost is accepting commands and we have
 398                 * starved queues, call blk_run_queue. scsi_request_fn
 399                 * drops the queue_lock and can add us back to the
 400                 * starved_list.
 401                 *
 402                 * host_lock protects the starved_list and starved_entry.
 403                 * scsi_request_fn must get the host_lock before checking
 404                 * or modifying starved_list or starved_entry.
 405                 */
 406                if (scsi_host_is_busy(shost))
 407                        break;
 408
 409                sdev = list_entry(starved_list.next,
 410                                  struct scsi_device, starved_entry);
 411                list_del_init(&sdev->starved_entry);
 412                if (scsi_target_is_busy(scsi_target(sdev))) {
 413                        list_move_tail(&sdev->starved_entry,
 414                                       &shost->starved_list);
 415                        continue;
 416                }
 417
 418                /*
 419                 * Once we drop the host lock, a racing scsi_remove_device()
 420                 * call may remove the sdev from the starved list and destroy
 421                 * it and the queue.  Mitigate by taking a reference to the
 422                 * queue and never touching the sdev again after we drop the
 423                 * host lock.  Note: if __scsi_remove_device() invokes
 424                 * blk_cleanup_queue() before the queue is run from this
 425                 * function then blk_run_queue() will return immediately since
 426                 * blk_cleanup_queue() marks the queue with QUEUE_FLAG_DYING.
 427                 */
 428                slq = sdev->request_queue;
 429                if (!blk_get_queue(slq))
 430                        continue;
 431                spin_unlock_irqrestore(shost->host_lock, flags);
 432
 433                scsi_kick_queue(slq);
 434                blk_put_queue(slq);
 435
 436                spin_lock_irqsave(shost->host_lock, flags);
 437        }
 438        /* put any unprocessed entries back */
 439        list_splice(&starved_list, &shost->starved_list);
 440        spin_unlock_irqrestore(shost->host_lock, flags);
 441}
 442
 443/**
 444 * scsi_run_queue - Select a proper request queue to serve next.
 445 * @q:  last request's queue
 446 *
 447 * The previous command was completely finished, start a new one if possible.
 448 */
 449static void scsi_run_queue(struct request_queue *q)
 450{
 451        struct scsi_device *sdev = q->queuedata;
 452
 453        if (scsi_target(sdev)->single_lun)
 454                scsi_single_lun_run(sdev);
 455        if (!list_empty(&sdev->host->starved_list))
 456                scsi_starved_list_run(sdev->host);
 457
 458        blk_mq_run_hw_queues(q, false);
 459}
 460
 461void scsi_requeue_run_queue(struct work_struct *work)
 462{
 463        struct scsi_device *sdev;
 464        struct request_queue *q;
 465
 466        sdev = container_of(work, struct scsi_device, requeue_work);
 467        q = sdev->request_queue;
 468        scsi_run_queue(q);
 469}
 470
 471void scsi_run_host_queues(struct Scsi_Host *shost)
 472{
 473        struct scsi_device *sdev;
 474
 475        shost_for_each_device(sdev, shost)
 476                scsi_run_queue(sdev->request_queue);
 477}
 478
 479static void scsi_uninit_cmd(struct scsi_cmnd *cmd)
 480{
 481        if (!blk_rq_is_passthrough(cmd->request)) {
 482                struct scsi_driver *drv = scsi_cmd_to_driver(cmd);
 483
 484                if (drv->uninit_command)
 485                        drv->uninit_command(cmd);
 486        }
 487}
 488
 489void scsi_free_sgtables(struct scsi_cmnd *cmd)
 490{
 491        if (cmd->sdb.table.nents)
 492                sg_free_table_chained(&cmd->sdb.table,
 493                                SCSI_INLINE_SG_CNT);
 494        if (scsi_prot_sg_count(cmd))
 495                sg_free_table_chained(&cmd->prot_sdb->table,
 496                                SCSI_INLINE_PROT_SG_CNT);
 497}
 498EXPORT_SYMBOL_GPL(scsi_free_sgtables);
 499
 500static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
 501{
 502        scsi_free_sgtables(cmd);
 503        scsi_uninit_cmd(cmd);
 504}
 505
 506static void scsi_run_queue_async(struct scsi_device *sdev)
 507{
 508        if (scsi_target(sdev)->single_lun ||
 509            !list_empty(&sdev->host->starved_list)) {
 510                kblockd_schedule_work(&sdev->requeue_work);
 511        } else {
 512                /*
 513                 * smp_mb() present in sbitmap_queue_clear() or implied in
 514                 * .end_io is for ordering writing .device_busy in
 515                 * scsi_device_unbusy() and reading sdev->restarts.
 516                 */
 517                int old = atomic_read(&sdev->restarts);
 518
 519                /*
 520                 * ->restarts has to be kept as non-zero if new budget
 521                 *  contention occurs.
 522                 *
 523                 *  No need to run queue when either another re-run
 524                 *  queue wins in updating ->restarts or a new budget
 525                 *  contention occurs.
 526                 */
 527                if (old && atomic_cmpxchg(&sdev->restarts, old, 0) == old)
 528                        blk_mq_run_hw_queues(sdev->request_queue, true);
 529        }
 530}
 531
 532/* Returns false when no more bytes to process, true if there are more */
 533static bool scsi_end_request(struct request *req, blk_status_t error,
 534                unsigned int bytes)
 535{
 536        struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
 537        struct scsi_device *sdev = cmd->device;
 538        struct request_queue *q = sdev->request_queue;
 539
 540        if (blk_update_request(req, error, bytes))
 541                return true;
 542
 543        if (blk_queue_add_random(q))
 544                add_disk_randomness(req->rq_disk);
 545
 546        if (!blk_rq_is_passthrough(req)) {
 547                WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED));
 548                cmd->flags &= ~SCMD_INITIALIZED;
 549        }
 550
 551        /*
 552         * Calling rcu_barrier() is not necessary here because the
 553         * SCSI error handler guarantees that the function called by
 554         * call_rcu() has been called before scsi_end_request() is
 555         * called.
 556         */
 557        destroy_rcu_head(&cmd->rcu);
 558
 559        /*
 560         * In the MQ case the command gets freed by __blk_mq_end_request,
 561         * so we have to do all cleanup that depends on it earlier.
 562         *
 563         * We also can't kick the queues from irq context, so we
 564         * will have to defer it to a workqueue.
 565         */
 566        scsi_mq_uninit_cmd(cmd);
 567
 568        /*
 569         * queue is still alive, so grab the ref for preventing it
 570         * from being cleaned up during running queue.
 571         */
 572        percpu_ref_get(&q->q_usage_counter);
 573
 574        __blk_mq_end_request(req, error);
 575
 576        scsi_run_queue_async(sdev);
 577
 578        percpu_ref_put(&q->q_usage_counter);
 579        return false;
 580}
 581
 582/**
 583 * scsi_result_to_blk_status - translate a SCSI result code into blk_status_t
 584 * @cmd:        SCSI command
 585 * @result:     scsi error code
 586 *
 587 * Translate a SCSI result code into a blk_status_t value. May reset the host
 588 * byte of @cmd->result.
 589 */
 590static blk_status_t scsi_result_to_blk_status(struct scsi_cmnd *cmd, int result)
 591{
 592        switch (host_byte(result)) {
 593        case DID_OK:
 594                if (scsi_status_is_good(result))
 595                        return BLK_STS_OK;
 596                return BLK_STS_IOERR;
 597        case DID_TRANSPORT_FAILFAST:
 598        case DID_TRANSPORT_MARGINAL:
 599                return BLK_STS_TRANSPORT;
 600        case DID_TARGET_FAILURE:
 601                set_host_byte(cmd, DID_OK);
 602                return BLK_STS_TARGET;
 603        case DID_NEXUS_FAILURE:
 604                set_host_byte(cmd, DID_OK);
 605                return BLK_STS_NEXUS;
 606        case DID_ALLOC_FAILURE:
 607                set_host_byte(cmd, DID_OK);
 608                return BLK_STS_NOSPC;
 609        case DID_MEDIUM_ERROR:
 610                set_host_byte(cmd, DID_OK);
 611                return BLK_STS_MEDIUM;
 612        default:
 613                return BLK_STS_IOERR;
 614        }
 615}
 616
 617/* Helper for scsi_io_completion() when "reprep" action required. */
 618static void scsi_io_completion_reprep(struct scsi_cmnd *cmd,
 619                                      struct request_queue *q)
 620{
 621        /* A new command will be prepared and issued. */
 622        scsi_mq_requeue_cmd(cmd);
 623}
 624
 625static bool scsi_cmd_runtime_exceeced(struct scsi_cmnd *cmd)
 626{
 627        struct request *req = cmd->request;
 628        unsigned long wait_for;
 629
 630        if (cmd->allowed == SCSI_CMD_RETRIES_NO_LIMIT)
 631                return false;
 632
 633        wait_for = (cmd->allowed + 1) * req->timeout;
 634        if (time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
 635                scmd_printk(KERN_ERR, cmd, "timing out command, waited %lus\n",
 636                            wait_for/HZ);
 637                return true;
 638        }
 639        return false;
 640}
 641
 642/* Helper for scsi_io_completion() when special action required. */
 643static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
 644{
 645        struct request_queue *q = cmd->device->request_queue;
 646        struct request *req = cmd->request;
 647        int level = 0;
 648        enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
 649              ACTION_DELAYED_RETRY} action;
 650        struct scsi_sense_hdr sshdr;
 651        bool sense_valid;
 652        bool sense_current = true;      /* false implies "deferred sense" */
 653        blk_status_t blk_stat;
 654
 655        sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
 656        if (sense_valid)
 657                sense_current = !scsi_sense_is_deferred(&sshdr);
 658
 659        blk_stat = scsi_result_to_blk_status(cmd, result);
 660
 661        if (host_byte(result) == DID_RESET) {
 662                /* Third party bus reset or reset for error recovery
 663                 * reasons.  Just retry the command and see what
 664                 * happens.
 665                 */
 666                action = ACTION_RETRY;
 667        } else if (sense_valid && sense_current) {
 668                switch (sshdr.sense_key) {
 669                case UNIT_ATTENTION:
 670                        if (cmd->device->removable) {
 671                                /* Detected disc change.  Set a bit
 672                                 * and quietly refuse further access.
 673                                 */
 674                                cmd->device->changed = 1;
 675                                action = ACTION_FAIL;
 676                        } else {
 677                                /* Must have been a power glitch, or a
 678                                 * bus reset.  Could not have been a
 679                                 * media change, so we just retry the
 680                                 * command and see what happens.
 681                                 */
 682                                action = ACTION_RETRY;
 683                        }
 684                        break;
 685                case ILLEGAL_REQUEST:
 686                        /* If we had an ILLEGAL REQUEST returned, then
 687                         * we may have performed an unsupported
 688                         * command.  The only thing this should be
 689                         * would be a ten byte read where only a six
 690                         * byte read was supported.  Also, on a system
 691                         * where READ CAPACITY failed, we may have
 692                         * read past the end of the disk.
 693                         */
 694                        if ((cmd->device->use_10_for_rw &&
 695                            sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
 696                            (cmd->cmnd[0] == READ_10 ||
 697                             cmd->cmnd[0] == WRITE_10)) {
 698                                /* This will issue a new 6-byte command. */
 699                                cmd->device->use_10_for_rw = 0;
 700                                action = ACTION_REPREP;
 701                        } else if (sshdr.asc == 0x10) /* DIX */ {
 702                                action = ACTION_FAIL;
 703                                blk_stat = BLK_STS_PROTECTION;
 704                        /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */
 705                        } else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) {
 706                                action = ACTION_FAIL;
 707                                blk_stat = BLK_STS_TARGET;
 708                        } else
 709                                action = ACTION_FAIL;
 710                        break;
 711                case ABORTED_COMMAND:
 712                        action = ACTION_FAIL;
 713                        if (sshdr.asc == 0x10) /* DIF */
 714                                blk_stat = BLK_STS_PROTECTION;
 715                        break;
 716                case NOT_READY:
 717                        /* If the device is in the process of becoming
 718                         * ready, or has a temporary blockage, retry.
 719                         */
 720                        if (sshdr.asc == 0x04) {
 721                                switch (sshdr.ascq) {
 722                                case 0x01: /* becoming ready */
 723                                case 0x04: /* format in progress */
 724                                case 0x05: /* rebuild in progress */
 725                                case 0x06: /* recalculation in progress */
 726                                case 0x07: /* operation in progress */
 727                                case 0x08: /* Long write in progress */
 728                                case 0x09: /* self test in progress */
 729                                case 0x11: /* notify (enable spinup) required */
 730                                case 0x14: /* space allocation in progress */
 731                                case 0x1a: /* start stop unit in progress */
 732                                case 0x1b: /* sanitize in progress */
 733                                case 0x1d: /* configuration in progress */
 734                                case 0x24: /* depopulation in progress */
 735                                        action = ACTION_DELAYED_RETRY;
 736                                        break;
 737                                case 0x0a: /* ALUA state transition */
 738                                        blk_stat = BLK_STS_AGAIN;
 739                                        fallthrough;
 740                                default:
 741                                        action = ACTION_FAIL;
 742                                        break;
 743                                }
 744                        } else
 745                                action = ACTION_FAIL;
 746                        break;
 747                case VOLUME_OVERFLOW:
 748                        /* See SSC3rXX or current. */
 749                        action = ACTION_FAIL;
 750                        break;
 751                case DATA_PROTECT:
 752                        action = ACTION_FAIL;
 753                        if ((sshdr.asc == 0x0C && sshdr.ascq == 0x12) ||
 754                            (sshdr.asc == 0x55 &&
 755                             (sshdr.ascq == 0x0E || sshdr.ascq == 0x0F))) {
 756                                /* Insufficient zone resources */
 757                                blk_stat = BLK_STS_ZONE_OPEN_RESOURCE;
 758                        }
 759                        break;
 760                default:
 761                        action = ACTION_FAIL;
 762                        break;
 763                }
 764        } else
 765                action = ACTION_FAIL;
 766
 767        if (action != ACTION_FAIL && scsi_cmd_runtime_exceeced(cmd))
 768                action = ACTION_FAIL;
 769
 770        switch (action) {
 771        case ACTION_FAIL:
 772                /* Give up and fail the remainder of the request */
 773                if (!(req->rq_flags & RQF_QUIET)) {
 774                        static DEFINE_RATELIMIT_STATE(_rs,
 775                                        DEFAULT_RATELIMIT_INTERVAL,
 776                                        DEFAULT_RATELIMIT_BURST);
 777
 778                        if (unlikely(scsi_logging_level))
 779                                level =
 780                                     SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT,
 781                                                    SCSI_LOG_MLCOMPLETE_BITS);
 782
 783                        /*
 784                         * if logging is enabled the failure will be printed
 785                         * in scsi_log_completion(), so avoid duplicate messages
 786                         */
 787                        if (!level && __ratelimit(&_rs)) {
 788                                scsi_print_result(cmd, NULL, FAILED);
 789                                if (sense_valid)
 790                                        scsi_print_sense(cmd);
 791                                scsi_print_command(cmd);
 792                        }
 793                }
 794                if (!scsi_end_request(req, blk_stat, blk_rq_err_bytes(req)))
 795                        return;
 796                fallthrough;
 797        case ACTION_REPREP:
 798                scsi_io_completion_reprep(cmd, q);
 799                break;
 800        case ACTION_RETRY:
 801                /* Retry the same command immediately */
 802                __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, false);
 803                break;
 804        case ACTION_DELAYED_RETRY:
 805                /* Retry the same command after a delay */
 806                __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, false);
 807                break;
 808        }
 809}
 810
 811/*
 812 * Helper for scsi_io_completion() when cmd->result is non-zero. Returns a
 813 * new result that may suppress further error checking. Also modifies
 814 * *blk_statp in some cases.
 815 */
 816static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result,
 817                                        blk_status_t *blk_statp)
 818{
 819        bool sense_valid;
 820        bool sense_current = true;      /* false implies "deferred sense" */
 821        struct request *req = cmd->request;
 822        struct scsi_sense_hdr sshdr;
 823
 824        sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
 825        if (sense_valid)
 826                sense_current = !scsi_sense_is_deferred(&sshdr);
 827
 828        if (blk_rq_is_passthrough(req)) {
 829                if (sense_valid) {
 830                        /*
 831                         * SG_IO wants current and deferred errors
 832                         */
 833                        scsi_req(req)->sense_len =
 834                                min(8 + cmd->sense_buffer[7],
 835                                    SCSI_SENSE_BUFFERSIZE);
 836                }
 837                if (sense_current)
 838                        *blk_statp = scsi_result_to_blk_status(cmd, result);
 839        } else if (blk_rq_bytes(req) == 0 && sense_current) {
 840                /*
 841                 * Flush commands do not transfers any data, and thus cannot use
 842                 * good_bytes != blk_rq_bytes(req) as the signal for an error.
 843                 * This sets *blk_statp explicitly for the problem case.
 844                 */
 845                *blk_statp = scsi_result_to_blk_status(cmd, result);
 846        }
 847        /*
 848         * Recovered errors need reporting, but they're always treated as
 849         * success, so fiddle the result code here.  For passthrough requests
 850         * we already took a copy of the original into sreq->result which
 851         * is what gets returned to the user
 852         */
 853        if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) {
 854                bool do_print = true;
 855                /*
 856                 * if ATA PASS-THROUGH INFORMATION AVAILABLE [0x0, 0x1d]
 857                 * skip print since caller wants ATA registers. Only occurs
 858                 * on SCSI ATA PASS_THROUGH commands when CK_COND=1
 859                 */
 860                if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
 861                        do_print = false;
 862                else if (req->rq_flags & RQF_QUIET)
 863                        do_print = false;
 864                if (do_print)
 865                        scsi_print_sense(cmd);
 866                result = 0;
 867                /* for passthrough, *blk_statp may be set */
 868                *blk_statp = BLK_STS_OK;
 869        }
 870        /*
 871         * Another corner case: the SCSI status byte is non-zero but 'good'.
 872         * Example: PRE-FETCH command returns SAM_STAT_CONDITION_MET when
 873         * it is able to fit nominated LBs in its cache (and SAM_STAT_GOOD
 874         * if it can't fit). Treat SAM_STAT_CONDITION_MET and the related
 875         * intermediate statuses (both obsolete in SAM-4) as good.
 876         */
 877        if ((result & 0xff) && scsi_status_is_good(result)) {
 878                result = 0;
 879                *blk_statp = BLK_STS_OK;
 880        }
 881        return result;
 882}
 883
 884/**
 885 * scsi_io_completion - Completion processing for SCSI commands.
 886 * @cmd:        command that is finished.
 887 * @good_bytes: number of processed bytes.
 888 *
 889 * We will finish off the specified number of sectors. If we are done, the
 890 * command block will be released and the queue function will be goosed. If we
 891 * are not done then we have to figure out what to do next:
 892 *
 893 *   a) We can call scsi_io_completion_reprep().  The request will be
 894 *      unprepared and put back on the queue.  Then a new command will
 895 *      be created for it.  This should be used if we made forward
 896 *      progress, or if we want to switch from READ(10) to READ(6) for
 897 *      example.
 898 *
 899 *   b) We can call scsi_io_completion_action().  The request will be
 900 *      put back on the queue and retried using the same command as
 901 *      before, possibly after a delay.
 902 *
 903 *   c) We can call scsi_end_request() with blk_stat other than
 904 *      BLK_STS_OK, to fail the remainder of the request.
 905 */
 906void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
 907{
 908        int result = cmd->result;
 909        struct request_queue *q = cmd->device->request_queue;
 910        struct request *req = cmd->request;
 911        blk_status_t blk_stat = BLK_STS_OK;
 912
 913        if (unlikely(result))   /* a nz result may or may not be an error */
 914                result = scsi_io_completion_nz_result(cmd, result, &blk_stat);
 915
 916        if (unlikely(blk_rq_is_passthrough(req))) {
 917                /*
 918                 * scsi_result_to_blk_status may have reset the host_byte
 919                 */
 920                scsi_req(req)->result = cmd->result;
 921        }
 922
 923        /*
 924         * Next deal with any sectors which we were able to correctly
 925         * handle.
 926         */
 927        SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, cmd,
 928                "%u sectors total, %d bytes done.\n",
 929                blk_rq_sectors(req), good_bytes));
 930
 931        /*
 932         * Failed, zero length commands always need to drop down
 933         * to retry code. Fast path should return in this block.
 934         */
 935        if (likely(blk_rq_bytes(req) > 0 || blk_stat == BLK_STS_OK)) {
 936                if (likely(!scsi_end_request(req, blk_stat, good_bytes)))
 937                        return; /* no bytes remaining */
 938        }
 939
 940        /* Kill remainder if no retries. */
 941        if (unlikely(blk_stat && scsi_noretry_cmd(cmd))) {
 942                if (scsi_end_request(req, blk_stat, blk_rq_bytes(req)))
 943                        WARN_ONCE(true,
 944                            "Bytes remaining after failed, no-retry command");
 945                return;
 946        }
 947
 948        /*
 949         * If there had been no error, but we have leftover bytes in the
 950         * requeues just queue the command up again.
 951         */
 952        if (likely(result == 0))
 953                scsi_io_completion_reprep(cmd, q);
 954        else
 955                scsi_io_completion_action(cmd, result);
 956}
 957
 958static inline bool scsi_cmd_needs_dma_drain(struct scsi_device *sdev,
 959                struct request *rq)
 960{
 961        return sdev->dma_drain_len && blk_rq_is_passthrough(rq) &&
 962               !op_is_write(req_op(rq)) &&
 963               sdev->host->hostt->dma_need_drain(rq);
 964}
 965
 966/**
 967 * scsi_alloc_sgtables - Allocate and initialize data and integrity scatterlists
 968 * @cmd: SCSI command data structure to initialize.
 969 *
 970 * Initializes @cmd->sdb and also @cmd->prot_sdb if data integrity is enabled
 971 * for @cmd.
 972 *
 973 * Returns:
 974 * * BLK_STS_OK       - on success
 975 * * BLK_STS_RESOURCE - if the failure is retryable
 976 * * BLK_STS_IOERR    - if the failure is fatal
 977 */
 978blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd)
 979{
 980        struct scsi_device *sdev = cmd->device;
 981        struct request *rq = cmd->request;
 982        unsigned short nr_segs = blk_rq_nr_phys_segments(rq);
 983        struct scatterlist *last_sg = NULL;
 984        blk_status_t ret;
 985        bool need_drain = scsi_cmd_needs_dma_drain(sdev, rq);
 986        int count;
 987
 988        if (WARN_ON_ONCE(!nr_segs))
 989                return BLK_STS_IOERR;
 990
 991        /*
 992         * Make sure there is space for the drain.  The driver must adjust
 993         * max_hw_segments to be prepared for this.
 994         */
 995        if (need_drain)
 996                nr_segs++;
 997
 998        /*
 999         * If sg table allocation fails, requeue request later.
1000         */
1001        if (unlikely(sg_alloc_table_chained(&cmd->sdb.table, nr_segs,
1002                        cmd->sdb.table.sgl, SCSI_INLINE_SG_CNT)))
1003                return BLK_STS_RESOURCE;
1004
1005        /*
1006         * Next, walk the list, and fill in the addresses and sizes of
1007         * each segment.
1008         */
1009        count = __blk_rq_map_sg(rq->q, rq, cmd->sdb.table.sgl, &last_sg);
1010
1011        if (blk_rq_bytes(rq) & rq->q->dma_pad_mask) {
1012                unsigned int pad_len =
1013                        (rq->q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
1014
1015                last_sg->length += pad_len;
1016                cmd->extra_len += pad_len;
1017        }
1018
1019        if (need_drain) {
1020                sg_unmark_end(last_sg);
1021                last_sg = sg_next(last_sg);
1022                sg_set_buf(last_sg, sdev->dma_drain_buf, sdev->dma_drain_len);
1023                sg_mark_end(last_sg);
1024
1025                cmd->extra_len += sdev->dma_drain_len;
1026                count++;
1027        }
1028
1029        BUG_ON(count > cmd->sdb.table.nents);
1030        cmd->sdb.table.nents = count;
1031        cmd->sdb.length = blk_rq_payload_bytes(rq);
1032
1033        if (blk_integrity_rq(rq)) {
1034                struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
1035                int ivecs;
1036
1037                if (WARN_ON_ONCE(!prot_sdb)) {
1038                        /*
1039                         * This can happen if someone (e.g. multipath)
1040                         * queues a command to a device on an adapter
1041                         * that does not support DIX.
1042                         */
1043                        ret = BLK_STS_IOERR;
1044                        goto out_free_sgtables;
1045                }
1046
1047                ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
1048
1049                if (sg_alloc_table_chained(&prot_sdb->table, ivecs,
1050                                prot_sdb->table.sgl,
1051                                SCSI_INLINE_PROT_SG_CNT)) {
1052                        ret = BLK_STS_RESOURCE;
1053                        goto out_free_sgtables;
1054                }
1055
1056                count = blk_rq_map_integrity_sg(rq->q, rq->bio,
1057                                                prot_sdb->table.sgl);
1058                BUG_ON(count > ivecs);
1059                BUG_ON(count > queue_max_integrity_segments(rq->q));
1060
1061                cmd->prot_sdb = prot_sdb;
1062                cmd->prot_sdb->table.nents = count;
1063        }
1064
1065        return BLK_STS_OK;
1066out_free_sgtables:
1067        scsi_free_sgtables(cmd);
1068        return ret;
1069}
1070EXPORT_SYMBOL(scsi_alloc_sgtables);
1071
1072/**
1073 * scsi_initialize_rq - initialize struct scsi_cmnd partially
1074 * @rq: Request associated with the SCSI command to be initialized.
1075 *
1076 * This function initializes the members of struct scsi_cmnd that must be
1077 * initialized before request processing starts and that won't be
1078 * reinitialized if a SCSI command is requeued.
1079 *
1080 * Called from inside blk_get_request() for pass-through requests and from
1081 * inside scsi_init_command() for filesystem requests.
1082 */
1083static void scsi_initialize_rq(struct request *rq)
1084{
1085        struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
1086
1087        scsi_req_init(&cmd->req);
1088        init_rcu_head(&cmd->rcu);
1089        cmd->jiffies_at_alloc = jiffies;
1090        cmd->retries = 0;
1091}
1092
1093/*
1094 * Only called when the request isn't completed by SCSI, and not freed by
1095 * SCSI
1096 */
1097static void scsi_cleanup_rq(struct request *rq)
1098{
1099        if (rq->rq_flags & RQF_DONTPREP) {
1100                scsi_mq_uninit_cmd(blk_mq_rq_to_pdu(rq));
1101                rq->rq_flags &= ~RQF_DONTPREP;
1102        }
1103}
1104
1105/* Called before a request is prepared. See also scsi_mq_prep_fn(). */
1106void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
1107{
1108        void *buf = cmd->sense_buffer;
1109        void *prot = cmd->prot_sdb;
1110        struct request *rq = blk_mq_rq_from_pdu(cmd);
1111        unsigned int flags = cmd->flags & SCMD_PRESERVED_FLAGS;
1112        unsigned long jiffies_at_alloc;
1113        int retries, to_clear;
1114        bool in_flight;
1115        int budget_token = cmd->budget_token;
1116
1117        if (!blk_rq_is_passthrough(rq) && !(flags & SCMD_INITIALIZED)) {
1118                flags |= SCMD_INITIALIZED;
1119                scsi_initialize_rq(rq);
1120        }
1121
1122        jiffies_at_alloc = cmd->jiffies_at_alloc;
1123        retries = cmd->retries;
1124        in_flight = test_bit(SCMD_STATE_INFLIGHT, &cmd->state);
1125        /*
1126         * Zero out the cmd, except for the embedded scsi_request. Only clear
1127         * the driver-private command data if the LLD does not supply a
1128         * function to initialize that data.
1129         */
1130        to_clear = sizeof(*cmd) - sizeof(cmd->req);
1131        if (!dev->host->hostt->init_cmd_priv)
1132                to_clear += dev->host->hostt->cmd_size;
1133        memset((char *)cmd + sizeof(cmd->req), 0, to_clear);
1134
1135        cmd->device = dev;
1136        cmd->sense_buffer = buf;
1137        cmd->prot_sdb = prot;
1138        cmd->flags = flags;
1139        INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
1140        cmd->jiffies_at_alloc = jiffies_at_alloc;
1141        cmd->retries = retries;
1142        if (in_flight)
1143                __set_bit(SCMD_STATE_INFLIGHT, &cmd->state);
1144        cmd->budget_token = budget_token;
1145
1146}
1147
1148static blk_status_t scsi_setup_scsi_cmnd(struct scsi_device *sdev,
1149                struct request *req)
1150{
1151        struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1152
1153        /*
1154         * Passthrough requests may transfer data, in which case they must
1155         * a bio attached to them.  Or they might contain a SCSI command
1156         * that does not transfer data, in which case they may optionally
1157         * submit a request without an attached bio.
1158         */
1159        if (req->bio) {
1160                blk_status_t ret = scsi_alloc_sgtables(cmd);
1161                if (unlikely(ret != BLK_STS_OK))
1162                        return ret;
1163        } else {
1164                BUG_ON(blk_rq_bytes(req));
1165
1166                memset(&cmd->sdb, 0, sizeof(cmd->sdb));
1167        }
1168
1169        cmd->cmd_len = scsi_req(req)->cmd_len;
1170        if (cmd->cmd_len == 0)
1171                cmd->cmd_len = scsi_command_size(cmd->cmnd);
1172        cmd->cmnd = scsi_req(req)->cmd;
1173        cmd->transfersize = blk_rq_bytes(req);
1174        cmd->allowed = scsi_req(req)->retries;
1175        return BLK_STS_OK;
1176}
1177
1178static blk_status_t
1179scsi_device_state_check(struct scsi_device *sdev, struct request *req)
1180{
1181        switch (sdev->sdev_state) {
1182        case SDEV_CREATED:
1183                return BLK_STS_OK;
1184        case SDEV_OFFLINE:
1185        case SDEV_TRANSPORT_OFFLINE:
1186                /*
1187                 * If the device is offline we refuse to process any
1188                 * commands.  The device must be brought online
1189                 * before trying any recovery commands.
1190                 */
1191                if (!sdev->offline_already) {
1192                        sdev->offline_already = true;
1193                        sdev_printk(KERN_ERR, sdev,
1194                                    "rejecting I/O to offline device\n");
1195                }
1196                return BLK_STS_IOERR;
1197        case SDEV_DEL:
1198                /*
1199                 * If the device is fully deleted, we refuse to
1200                 * process any commands as well.
1201                 */
1202                sdev_printk(KERN_ERR, sdev,
1203                            "rejecting I/O to dead device\n");
1204                return BLK_STS_IOERR;
1205        case SDEV_BLOCK:
1206        case SDEV_CREATED_BLOCK:
1207                return BLK_STS_RESOURCE;
1208        case SDEV_QUIESCE:
1209                /*
1210                 * If the device is blocked we only accept power management
1211                 * commands.
1212                 */
1213                if (req && WARN_ON_ONCE(!(req->rq_flags & RQF_PM)))
1214                        return BLK_STS_RESOURCE;
1215                return BLK_STS_OK;
1216        default:
1217                /*
1218                 * For any other not fully online state we only allow
1219                 * power management commands.
1220                 */
1221                if (req && !(req->rq_flags & RQF_PM))
1222                        return BLK_STS_IOERR;
1223                return BLK_STS_OK;
1224        }
1225}
1226
1227/*
1228 * scsi_dev_queue_ready: if we can send requests to sdev, assign one token
1229 * and return the token else return -1.
1230 */
1231static inline int scsi_dev_queue_ready(struct request_queue *q,
1232                                  struct scsi_device *sdev)
1233{
1234        int token;
1235
1236        token = sbitmap_get(&sdev->budget_map);
1237        if (atomic_read(&sdev->device_blocked)) {
1238                if (token < 0)
1239                        goto out;
1240
1241                if (scsi_device_busy(sdev) > 1)
1242                        goto out_dec;
1243
1244                /*
1245                 * unblock after device_blocked iterates to zero
1246                 */
1247                if (atomic_dec_return(&sdev->device_blocked) > 0)
1248                        goto out_dec;
1249                SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev,
1250                                   "unblocking device at zero depth\n"));
1251        }
1252
1253        return token;
1254out_dec:
1255        if (token >= 0)
1256                sbitmap_put(&sdev->budget_map, token);
1257out:
1258        return -1;
1259}
1260
1261/*
1262 * scsi_target_queue_ready: checks if there we can send commands to target
1263 * @sdev: scsi device on starget to check.
1264 */
1265static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
1266                                           struct scsi_device *sdev)
1267{
1268        struct scsi_target *starget = scsi_target(sdev);
1269        unsigned int busy;
1270
1271        if (starget->single_lun) {
1272                spin_lock_irq(shost->host_lock);
1273                if (starget->starget_sdev_user &&
1274                    starget->starget_sdev_user != sdev) {
1275                        spin_unlock_irq(shost->host_lock);
1276                        return 0;
1277                }
1278                starget->starget_sdev_user = sdev;
1279                spin_unlock_irq(shost->host_lock);
1280        }
1281
1282        if (starget->can_queue <= 0)
1283                return 1;
1284
1285        busy = atomic_inc_return(&starget->target_busy) - 1;
1286        if (atomic_read(&starget->target_blocked) > 0) {
1287                if (busy)
1288                        goto starved;
1289
1290                /*
1291                 * unblock after target_blocked iterates to zero
1292                 */
1293                if (atomic_dec_return(&starget->target_blocked) > 0)
1294                        goto out_dec;
1295
1296                SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget,
1297                                 "unblocking target at zero depth\n"));
1298        }
1299
1300        if (busy >= starget->can_queue)
1301                goto starved;
1302
1303        return 1;
1304
1305starved:
1306        spin_lock_irq(shost->host_lock);
1307        list_move_tail(&sdev->starved_entry, &shost->starved_list);
1308        spin_unlock_irq(shost->host_lock);
1309out_dec:
1310        if (starget->can_queue > 0)
1311                atomic_dec(&starget->target_busy);
1312        return 0;
1313}
1314
1315/*
1316 * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1317 * return 0. We must end up running the queue again whenever 0 is
1318 * returned, else IO can hang.
1319 */
1320static inline int scsi_host_queue_ready(struct request_queue *q,
1321                                   struct Scsi_Host *shost,
1322                                   struct scsi_device *sdev,
1323                                   struct scsi_cmnd *cmd)
1324{
1325        if (scsi_host_in_recovery(shost))
1326                return 0;
1327
1328        if (atomic_read(&shost->host_blocked) > 0) {
1329                if (scsi_host_busy(shost) > 0)
1330                        goto starved;
1331
1332                /*
1333                 * unblock after host_blocked iterates to zero
1334                 */
1335                if (atomic_dec_return(&shost->host_blocked) > 0)
1336                        goto out_dec;
1337
1338                SCSI_LOG_MLQUEUE(3,
1339                        shost_printk(KERN_INFO, shost,
1340                                     "unblocking host at zero depth\n"));
1341        }
1342
1343        if (shost->host_self_blocked)
1344                goto starved;
1345
1346        /* We're OK to process the command, so we can't be starved */
1347        if (!list_empty(&sdev->starved_entry)) {
1348                spin_lock_irq(shost->host_lock);
1349                if (!list_empty(&sdev->starved_entry))
1350                        list_del_init(&sdev->starved_entry);
1351                spin_unlock_irq(shost->host_lock);
1352        }
1353
1354        __set_bit(SCMD_STATE_INFLIGHT, &cmd->state);
1355
1356        return 1;
1357
1358starved:
1359        spin_lock_irq(shost->host_lock);
1360        if (list_empty(&sdev->starved_entry))
1361                list_add_tail(&sdev->starved_entry, &shost->starved_list);
1362        spin_unlock_irq(shost->host_lock);
1363out_dec:
1364        scsi_dec_host_busy(shost, cmd);
1365        return 0;
1366}
1367
1368/*
1369 * Busy state exporting function for request stacking drivers.
1370 *
1371 * For efficiency, no lock is taken to check the busy state of
1372 * shost/starget/sdev, since the returned value is not guaranteed and
1373 * may be changed after request stacking drivers call the function,
1374 * regardless of taking lock or not.
1375 *
1376 * When scsi can't dispatch I/Os anymore and needs to kill I/Os scsi
1377 * needs to return 'not busy'. Otherwise, request stacking drivers
1378 * may hold requests forever.
1379 */
1380static bool scsi_mq_lld_busy(struct request_queue *q)
1381{
1382        struct scsi_device *sdev = q->queuedata;
1383        struct Scsi_Host *shost;
1384
1385        if (blk_queue_dying(q))
1386                return false;
1387
1388        shost = sdev->host;
1389
1390        /*
1391         * Ignore host/starget busy state.
1392         * Since block layer does not have a concept of fairness across
1393         * multiple queues, congestion of host/starget needs to be handled
1394         * in SCSI layer.
1395         */
1396        if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev))
1397                return true;
1398
1399        return false;
1400}
1401
1402/*
1403 * Block layer request completion callback. May be called from interrupt
1404 * context.
1405 */
1406static void scsi_complete(struct request *rq)
1407{
1408        struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
1409        enum scsi_disposition disposition;
1410
1411        INIT_LIST_HEAD(&cmd->eh_entry);
1412
1413        atomic_inc(&cmd->device->iodone_cnt);
1414        if (cmd->result)
1415                atomic_inc(&cmd->device->ioerr_cnt);
1416
1417        disposition = scsi_decide_disposition(cmd);
1418        if (disposition != SUCCESS && scsi_cmd_runtime_exceeced(cmd))
1419                disposition = SUCCESS;
1420
1421        scsi_log_completion(cmd, disposition);
1422
1423        switch (disposition) {
1424        case SUCCESS:
1425                scsi_finish_command(cmd);
1426                break;
1427        case NEEDS_RETRY:
1428                scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
1429                break;
1430        case ADD_TO_MLQUEUE:
1431                scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
1432                break;
1433        default:
1434                scsi_eh_scmd_add(cmd);
1435                break;
1436        }
1437}
1438
1439/**
1440 * scsi_dispatch_cmd - Dispatch a command to the low-level driver.
1441 * @cmd: command block we are dispatching.
1442 *
1443 * Return: nonzero return request was rejected and device's queue needs to be
1444 * plugged.
1445 */
1446static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
1447{
1448        struct Scsi_Host *host = cmd->device->host;
1449        int rtn = 0;
1450
1451        atomic_inc(&cmd->device->iorequest_cnt);
1452
1453        /* check if the device is still usable */
1454        if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
1455                /* in SDEV_DEL we error all commands. DID_NO_CONNECT
1456                 * returns an immediate error upwards, and signals
1457                 * that the device is no longer present */
1458                cmd->result = DID_NO_CONNECT << 16;
1459                goto done;
1460        }
1461
1462        /* Check to see if the scsi lld made this device blocked. */
1463        if (unlikely(scsi_device_blocked(cmd->device))) {
1464                /*
1465                 * in blocked state, the command is just put back on
1466                 * the device queue.  The suspend state has already
1467                 * blocked the queue so future requests should not
1468                 * occur until the device transitions out of the
1469                 * suspend state.
1470                 */
1471                SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
1472                        "queuecommand : device blocked\n"));
1473                return SCSI_MLQUEUE_DEVICE_BUSY;
1474        }
1475
1476        /* Store the LUN value in cmnd, if needed. */
1477        if (cmd->device->lun_in_cdb)
1478                cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) |
1479                               (cmd->device->lun << 5 & 0xe0);
1480
1481        scsi_log_send(cmd);
1482
1483        /*
1484         * Before we queue this command, check if the command
1485         * length exceeds what the host adapter can handle.
1486         */
1487        if (cmd->cmd_len > cmd->device->host->max_cmd_len) {
1488                SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
1489                               "queuecommand : command too long. "
1490                               "cdb_size=%d host->max_cmd_len=%d\n",
1491                               cmd->cmd_len, cmd->device->host->max_cmd_len));
1492                cmd->result = (DID_ABORT << 16);
1493                goto done;
1494        }
1495
1496        if (unlikely(host->shost_state == SHOST_DEL)) {
1497                cmd->result = (DID_NO_CONNECT << 16);
1498                goto done;
1499
1500        }
1501
1502        trace_scsi_dispatch_cmd_start(cmd);
1503        rtn = host->hostt->queuecommand(host, cmd);
1504        if (rtn) {
1505                trace_scsi_dispatch_cmd_error(cmd, rtn);
1506                if (rtn != SCSI_MLQUEUE_DEVICE_BUSY &&
1507                    rtn != SCSI_MLQUEUE_TARGET_BUSY)
1508                        rtn = SCSI_MLQUEUE_HOST_BUSY;
1509
1510                SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
1511                        "queuecommand : request rejected\n"));
1512        }
1513
1514        return rtn;
1515 done:
1516        cmd->scsi_done(cmd);
1517        return 0;
1518}
1519
1520/* Size in bytes of the sg-list stored in the scsi-mq command-private data. */
1521static unsigned int scsi_mq_inline_sgl_size(struct Scsi_Host *shost)
1522{
1523        return min_t(unsigned int, shost->sg_tablesize, SCSI_INLINE_SG_CNT) *
1524                sizeof(struct scatterlist);
1525}
1526
1527static blk_status_t scsi_prepare_cmd(struct request *req)
1528{
1529        struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1530        struct scsi_device *sdev = req->q->queuedata;
1531        struct Scsi_Host *shost = sdev->host;
1532        struct scatterlist *sg;
1533
1534        scsi_init_command(sdev, cmd);
1535
1536        cmd->request = req;
1537        cmd->tag = req->tag;
1538        cmd->prot_op = SCSI_PROT_NORMAL;
1539        if (blk_rq_bytes(req))
1540                cmd->sc_data_direction = rq_dma_dir(req);
1541        else
1542                cmd->sc_data_direction = DMA_NONE;
1543
1544        sg = (void *)cmd + sizeof(struct scsi_cmnd) + shost->hostt->cmd_size;
1545        cmd->sdb.table.sgl = sg;
1546
1547        if (scsi_host_get_prot(shost)) {
1548                memset(cmd->prot_sdb, 0, sizeof(struct scsi_data_buffer));
1549
1550                cmd->prot_sdb->table.sgl =
1551                        (struct scatterlist *)(cmd->prot_sdb + 1);
1552        }
1553
1554        /*
1555         * Special handling for passthrough commands, which don't go to the ULP
1556         * at all:
1557         */
1558        if (blk_rq_is_passthrough(req))
1559                return scsi_setup_scsi_cmnd(sdev, req);
1560
1561        if (sdev->handler && sdev->handler->prep_fn) {
1562                blk_status_t ret = sdev->handler->prep_fn(sdev, req);
1563
1564                if (ret != BLK_STS_OK)
1565                        return ret;
1566        }
1567
1568        cmd->cmnd = scsi_req(req)->cmd = scsi_req(req)->__cmd;
1569        memset(cmd->cmnd, 0, BLK_MAX_CDB);
1570        return scsi_cmd_to_driver(cmd)->init_command(cmd);
1571}
1572
1573static void scsi_mq_done(struct scsi_cmnd *cmd)
1574{
1575        if (unlikely(blk_should_fake_timeout(cmd->request->q)))
1576                return;
1577        if (unlikely(test_and_set_bit(SCMD_STATE_COMPLETE, &cmd->state)))
1578                return;
1579        trace_scsi_dispatch_cmd_done(cmd);
1580        blk_mq_complete_request(cmd->request);
1581}
1582
1583static void scsi_mq_put_budget(struct request_queue *q, int budget_token)
1584{
1585        struct scsi_device *sdev = q->queuedata;
1586
1587        sbitmap_put(&sdev->budget_map, budget_token);
1588}
1589
1590static int scsi_mq_get_budget(struct request_queue *q)
1591{
1592        struct scsi_device *sdev = q->queuedata;
1593        int token = scsi_dev_queue_ready(q, sdev);
1594
1595        if (token >= 0)
1596                return token;
1597
1598        atomic_inc(&sdev->restarts);
1599
1600        /*
1601         * Orders atomic_inc(&sdev->restarts) and atomic_read(&sdev->device_busy).
1602         * .restarts must be incremented before .device_busy is read because the
1603         * code in scsi_run_queue_async() depends on the order of these operations.
1604         */
1605        smp_mb__after_atomic();
1606
1607        /*
1608         * If all in-flight requests originated from this LUN are completed
1609         * before reading .device_busy, sdev->device_busy will be observed as
1610         * zero, then blk_mq_delay_run_hw_queues() will dispatch this request
1611         * soon. Otherwise, completion of one of these requests will observe
1612         * the .restarts flag, and the request queue will be run for handling
1613         * this request, see scsi_end_request().
1614         */
1615        if (unlikely(scsi_device_busy(sdev) == 0 &&
1616                                !scsi_device_blocked(sdev)))
1617                blk_mq_delay_run_hw_queues(sdev->request_queue, SCSI_QUEUE_DELAY);
1618        return -1;
1619}
1620
1621static void scsi_mq_set_rq_budget_token(struct request *req, int token)
1622{
1623        struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1624
1625        cmd->budget_token = token;
1626}
1627
1628static int scsi_mq_get_rq_budget_token(struct request *req)
1629{
1630        struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1631
1632        return cmd->budget_token;
1633}
1634
1635static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
1636                         const struct blk_mq_queue_data *bd)
1637{
1638        struct request *req = bd->rq;
1639        struct request_queue *q = req->q;
1640        struct scsi_device *sdev = q->queuedata;
1641        struct Scsi_Host *shost = sdev->host;
1642        struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1643        blk_status_t ret;
1644        int reason;
1645
1646        WARN_ON_ONCE(cmd->budget_token < 0);
1647
1648        /*
1649         * If the device is not in running state we will reject some or all
1650         * commands.
1651         */
1652        if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1653                ret = scsi_device_state_check(sdev, req);
1654                if (ret != BLK_STS_OK)
1655                        goto out_put_budget;
1656        }
1657
1658        ret = BLK_STS_RESOURCE;
1659        if (!scsi_target_queue_ready(shost, sdev))
1660                goto out_put_budget;
1661        if (!scsi_host_queue_ready(q, shost, sdev, cmd))
1662                goto out_dec_target_busy;
1663
1664        if (!(req->rq_flags & RQF_DONTPREP)) {
1665                ret = scsi_prepare_cmd(req);
1666                if (ret != BLK_STS_OK)
1667                        goto out_dec_host_busy;
1668                req->rq_flags |= RQF_DONTPREP;
1669        } else {
1670                clear_bit(SCMD_STATE_COMPLETE, &cmd->state);
1671        }
1672
1673        cmd->flags &= SCMD_PRESERVED_FLAGS;
1674        if (sdev->simple_tags)
1675                cmd->flags |= SCMD_TAGGED;
1676        if (bd->last)
1677                cmd->flags |= SCMD_LAST;
1678
1679        scsi_set_resid(cmd, 0);
1680        memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1681        cmd->scsi_done = scsi_mq_done;
1682
1683        blk_mq_start_request(req);
1684        reason = scsi_dispatch_cmd(cmd);
1685        if (reason) {
1686                scsi_set_blocked(cmd, reason);
1687                ret = BLK_STS_RESOURCE;
1688                goto out_dec_host_busy;
1689        }
1690
1691        return BLK_STS_OK;
1692
1693out_dec_host_busy:
1694        scsi_dec_host_busy(shost, cmd);
1695out_dec_target_busy:
1696        if (scsi_target(sdev)->can_queue > 0)
1697                atomic_dec(&scsi_target(sdev)->target_busy);
1698out_put_budget:
1699        scsi_mq_put_budget(q, cmd->budget_token);
1700        cmd->budget_token = -1;
1701        switch (ret) {
1702        case BLK_STS_OK:
1703                break;
1704        case BLK_STS_RESOURCE:
1705        case BLK_STS_ZONE_RESOURCE:
1706                if (scsi_device_blocked(sdev))
1707                        ret = BLK_STS_DEV_RESOURCE;
1708                break;
1709        case BLK_STS_AGAIN:
1710                scsi_req(req)->result = DID_BUS_BUSY << 16;
1711                if (req->rq_flags & RQF_DONTPREP)
1712                        scsi_mq_uninit_cmd(cmd);
1713                break;
1714        default:
1715                if (unlikely(!scsi_device_online(sdev)))
1716                        scsi_req(req)->result = DID_NO_CONNECT << 16;
1717                else
1718                        scsi_req(req)->result = DID_ERROR << 16;
1719                /*
1720                 * Make sure to release all allocated resources when
1721                 * we hit an error, as we will never see this command
1722                 * again.
1723                 */
1724                if (req->rq_flags & RQF_DONTPREP)
1725                        scsi_mq_uninit_cmd(cmd);
1726                scsi_run_queue_async(sdev);
1727                break;
1728        }
1729        return ret;
1730}
1731
1732static enum blk_eh_timer_return scsi_timeout(struct request *req,
1733                bool reserved)
1734{
1735        if (reserved)
1736                return BLK_EH_RESET_TIMER;
1737        return scsi_times_out(req);
1738}
1739
1740static int scsi_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
1741                                unsigned int hctx_idx, unsigned int numa_node)
1742{
1743        struct Scsi_Host *shost = set->driver_data;
1744        struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
1745        struct scatterlist *sg;
1746        int ret = 0;
1747
1748        cmd->sense_buffer =
1749                kmem_cache_alloc_node(scsi_sense_cache, GFP_KERNEL, numa_node);
1750        if (!cmd->sense_buffer)
1751                return -ENOMEM;
1752        cmd->req.sense = cmd->sense_buffer;
1753
1754        if (scsi_host_get_prot(shost)) {
1755                sg = (void *)cmd + sizeof(struct scsi_cmnd) +
1756                        shost->hostt->cmd_size;
1757                cmd->prot_sdb = (void *)sg + scsi_mq_inline_sgl_size(shost);
1758        }
1759
1760        if (shost->hostt->init_cmd_priv) {
1761                ret = shost->hostt->init_cmd_priv(shost, cmd);
1762                if (ret < 0)
1763                        kmem_cache_free(scsi_sense_cache, cmd->sense_buffer);
1764        }
1765
1766        return ret;
1767}
1768
1769static void scsi_mq_exit_request(struct blk_mq_tag_set *set, struct request *rq,
1770                                 unsigned int hctx_idx)
1771{
1772        struct Scsi_Host *shost = set->driver_data;
1773        struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
1774
1775        if (shost->hostt->exit_cmd_priv)
1776                shost->hostt->exit_cmd_priv(shost, cmd);
1777        kmem_cache_free(scsi_sense_cache, cmd->sense_buffer);
1778}
1779
1780
1781static int scsi_mq_poll(struct blk_mq_hw_ctx *hctx)
1782{
1783        struct Scsi_Host *shost = hctx->driver_data;
1784
1785        if (shost->hostt->mq_poll)
1786                return shost->hostt->mq_poll(shost, hctx->queue_num);
1787
1788        return 0;
1789}
1790
1791static int scsi_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
1792                          unsigned int hctx_idx)
1793{
1794        struct Scsi_Host *shost = data;
1795
1796        hctx->driver_data = shost;
1797        return 0;
1798}
1799
1800static int scsi_map_queues(struct blk_mq_tag_set *set)
1801{
1802        struct Scsi_Host *shost = container_of(set, struct Scsi_Host, tag_set);
1803
1804        if (shost->hostt->map_queues)
1805                return shost->hostt->map_queues(shost);
1806        return blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
1807}
1808
1809void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
1810{
1811        struct device *dev = shost->dma_dev;
1812
1813        /*
1814         * this limit is imposed by hardware restrictions
1815         */
1816        blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize,
1817                                        SG_MAX_SEGMENTS));
1818
1819        if (scsi_host_prot_dma(shost)) {
1820                shost->sg_prot_tablesize =
1821                        min_not_zero(shost->sg_prot_tablesize,
1822                                     (unsigned short)SCSI_MAX_PROT_SG_SEGMENTS);
1823                BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize);
1824                blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize);
1825        }
1826
1827        if (dev->dma_mask) {
1828                shost->max_sectors = min_t(unsigned int, shost->max_sectors,
1829                                dma_max_mapping_size(dev) >> SECTOR_SHIFT);
1830        }
1831        blk_queue_max_hw_sectors(q, shost->max_sectors);
1832        blk_queue_segment_boundary(q, shost->dma_boundary);
1833        dma_set_seg_boundary(dev, shost->dma_boundary);
1834
1835        blk_queue_max_segment_size(q, shost->max_segment_size);
1836        blk_queue_virt_boundary(q, shost->virt_boundary_mask);
1837        dma_set_max_seg_size(dev, queue_max_segment_size(q));
1838
1839        /*
1840         * Set a reasonable default alignment:  The larger of 32-byte (dword),
1841         * which is a common minimum for HBAs, and the minimum DMA alignment,
1842         * which is set by the platform.
1843         *
1844         * Devices that require a bigger alignment can increase it later.
1845         */
1846        blk_queue_dma_alignment(q, max(4, dma_get_cache_alignment()) - 1);
1847}
1848EXPORT_SYMBOL_GPL(__scsi_init_queue);
1849
1850static const struct blk_mq_ops scsi_mq_ops_no_commit = {
1851        .get_budget     = scsi_mq_get_budget,
1852        .put_budget     = scsi_mq_put_budget,
1853        .queue_rq       = scsi_queue_rq,
1854        .complete       = scsi_complete,
1855        .timeout        = scsi_timeout,
1856#ifdef CONFIG_BLK_DEBUG_FS
1857        .show_rq        = scsi_show_rq,
1858#endif
1859        .init_request   = scsi_mq_init_request,
1860        .exit_request   = scsi_mq_exit_request,
1861        .initialize_rq_fn = scsi_initialize_rq,
1862        .cleanup_rq     = scsi_cleanup_rq,
1863        .busy           = scsi_mq_lld_busy,
1864        .map_queues     = scsi_map_queues,
1865        .init_hctx      = scsi_init_hctx,
1866        .poll           = scsi_mq_poll,
1867        .set_rq_budget_token = scsi_mq_set_rq_budget_token,
1868        .get_rq_budget_token = scsi_mq_get_rq_budget_token,
1869};
1870
1871
1872static void scsi_commit_rqs(struct blk_mq_hw_ctx *hctx)
1873{
1874        struct Scsi_Host *shost = hctx->driver_data;
1875
1876        shost->hostt->commit_rqs(shost, hctx->queue_num);
1877}
1878
1879static const struct blk_mq_ops scsi_mq_ops = {
1880        .get_budget     = scsi_mq_get_budget,
1881        .put_budget     = scsi_mq_put_budget,
1882        .queue_rq       = scsi_queue_rq,
1883        .commit_rqs     = scsi_commit_rqs,
1884        .complete       = scsi_complete,
1885        .timeout        = scsi_timeout,
1886#ifdef CONFIG_BLK_DEBUG_FS
1887        .show_rq        = scsi_show_rq,
1888#endif
1889        .init_request   = scsi_mq_init_request,
1890        .exit_request   = scsi_mq_exit_request,
1891        .initialize_rq_fn = scsi_initialize_rq,
1892        .cleanup_rq     = scsi_cleanup_rq,
1893        .busy           = scsi_mq_lld_busy,
1894        .map_queues     = scsi_map_queues,
1895        .init_hctx      = scsi_init_hctx,
1896        .poll           = scsi_mq_poll,
1897        .set_rq_budget_token = scsi_mq_set_rq_budget_token,
1898        .get_rq_budget_token = scsi_mq_get_rq_budget_token,
1899};
1900
1901int scsi_mq_setup_tags(struct Scsi_Host *shost)
1902{
1903        unsigned int cmd_size, sgl_size;
1904        struct blk_mq_tag_set *tag_set = &shost->tag_set;
1905
1906        sgl_size = max_t(unsigned int, sizeof(struct scatterlist),
1907                                scsi_mq_inline_sgl_size(shost));
1908        cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size;
1909        if (scsi_host_get_prot(shost))
1910                cmd_size += sizeof(struct scsi_data_buffer) +
1911                        sizeof(struct scatterlist) * SCSI_INLINE_PROT_SG_CNT;
1912
1913        memset(tag_set, 0, sizeof(*tag_set));
1914        if (shost->hostt->commit_rqs)
1915                tag_set->ops = &scsi_mq_ops;
1916        else
1917                tag_set->ops = &scsi_mq_ops_no_commit;
1918        tag_set->nr_hw_queues = shost->nr_hw_queues ? : 1;
1919        tag_set->nr_maps = shost->nr_maps ? : 1;
1920        tag_set->queue_depth = shost->can_queue;
1921        tag_set->cmd_size = cmd_size;
1922        tag_set->numa_node = NUMA_NO_NODE;
1923        tag_set->flags = BLK_MQ_F_SHOULD_MERGE;
1924        tag_set->flags |=
1925                BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy);
1926        tag_set->driver_data = shost;
1927        if (shost->host_tagset)
1928                tag_set->flags |= BLK_MQ_F_TAG_HCTX_SHARED;
1929
1930        return blk_mq_alloc_tag_set(tag_set);
1931}
1932
1933void scsi_mq_destroy_tags(struct Scsi_Host *shost)
1934{
1935        blk_mq_free_tag_set(&shost->tag_set);
1936}
1937
1938/**
1939 * scsi_device_from_queue - return sdev associated with a request_queue
1940 * @q: The request queue to return the sdev from
1941 *
1942 * Return the sdev associated with a request queue or NULL if the
1943 * request_queue does not reference a SCSI device.
1944 */
1945struct scsi_device *scsi_device_from_queue(struct request_queue *q)
1946{
1947        struct scsi_device *sdev = NULL;
1948
1949        if (q->mq_ops == &scsi_mq_ops_no_commit ||
1950            q->mq_ops == &scsi_mq_ops)
1951                sdev = q->queuedata;
1952        if (!sdev || !get_device(&sdev->sdev_gendev))
1953                sdev = NULL;
1954
1955        return sdev;
1956}
1957
1958/**
1959 * scsi_block_requests - Utility function used by low-level drivers to prevent
1960 * further commands from being queued to the device.
1961 * @shost:  host in question
1962 *
1963 * There is no timer nor any other means by which the requests get unblocked
1964 * other than the low-level driver calling scsi_unblock_requests().
1965 */
1966void scsi_block_requests(struct Scsi_Host *shost)
1967{
1968        shost->host_self_blocked = 1;
1969}
1970EXPORT_SYMBOL(scsi_block_requests);
1971
1972/**
1973 * scsi_unblock_requests - Utility function used by low-level drivers to allow
1974 * further commands to be queued to the device.
1975 * @shost:  host in question
1976 *
1977 * There is no timer nor any other means by which the requests get unblocked
1978 * other than the low-level driver calling scsi_unblock_requests(). This is done
1979 * as an API function so that changes to the internals of the scsi mid-layer
1980 * won't require wholesale changes to drivers that use this feature.
1981 */
1982void scsi_unblock_requests(struct Scsi_Host *shost)
1983{
1984        shost->host_self_blocked = 0;
1985        scsi_run_host_queues(shost);
1986}
1987EXPORT_SYMBOL(scsi_unblock_requests);
1988
1989void scsi_exit_queue(void)
1990{
1991        kmem_cache_destroy(scsi_sense_cache);
1992}
1993
1994/**
1995 *      scsi_mode_select - issue a mode select
1996 *      @sdev:  SCSI device to be queried
1997 *      @pf:    Page format bit (1 == standard, 0 == vendor specific)
1998 *      @sp:    Save page bit (0 == don't save, 1 == save)
1999 *      @modepage: mode page being requested
2000 *      @buffer: request buffer (may not be smaller than eight bytes)
2001 *      @len:   length of request buffer.
2002 *      @timeout: command timeout
2003 *      @retries: number of retries before failing
2004 *      @data: returns a structure abstracting the mode header data
2005 *      @sshdr: place to put sense data (or NULL if no sense to be collected).
2006 *              must be SCSI_SENSE_BUFFERSIZE big.
2007 *
2008 *      Returns zero if successful; negative error number or scsi
2009 *      status on error
2010 *
2011 */
2012int
2013scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
2014                 unsigned char *buffer, int len, int timeout, int retries,
2015                 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
2016{
2017        unsigned char cmd[10];
2018        unsigned char *real_buffer;
2019        int ret;
2020
2021        memset(cmd, 0, sizeof(cmd));
2022        cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0);
2023
2024        if (sdev->use_10_for_ms) {
2025                if (len > 65535)
2026                        return -EINVAL;
2027                real_buffer = kmalloc(8 + len, GFP_KERNEL);
2028                if (!real_buffer)
2029                        return -ENOMEM;
2030                memcpy(real_buffer + 8, buffer, len);
2031                len += 8;
2032                real_buffer[0] = 0;
2033                real_buffer[1] = 0;
2034                real_buffer[2] = data->medium_type;
2035                real_buffer[3] = data->device_specific;
2036                real_buffer[4] = data->longlba ? 0x01 : 0;
2037                real_buffer[5] = 0;
2038                real_buffer[6] = data->block_descriptor_length >> 8;
2039                real_buffer[7] = data->block_descriptor_length;
2040
2041                cmd[0] = MODE_SELECT_10;
2042                cmd[7] = len >> 8;
2043                cmd[8] = len;
2044        } else {
2045                if (len > 255 || data->block_descriptor_length > 255 ||
2046                    data->longlba)
2047                        return -EINVAL;
2048
2049                real_buffer = kmalloc(4 + len, GFP_KERNEL);
2050                if (!real_buffer)
2051                        return -ENOMEM;
2052                memcpy(real_buffer + 4, buffer, len);
2053                len += 4;
2054                real_buffer[0] = 0;
2055                real_buffer[1] = data->medium_type;
2056                real_buffer[2] = data->device_specific;
2057                real_buffer[3] = data->block_descriptor_length;
2058
2059                cmd[0] = MODE_SELECT;
2060                cmd[4] = len;
2061        }
2062
2063        ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len,
2064                               sshdr, timeout, retries, NULL);
2065        kfree(real_buffer);
2066        return ret;
2067}
2068EXPORT_SYMBOL_GPL(scsi_mode_select);
2069
2070/**
2071 *      scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary.
2072 *      @sdev:  SCSI device to be queried
2073 *      @dbd:   set if mode sense will allow block descriptors to be returned
2074 *      @modepage: mode page being requested
2075 *      @buffer: request buffer (may not be smaller than eight bytes)
2076 *      @len:   length of request buffer.
2077 *      @timeout: command timeout
2078 *      @retries: number of retries before failing
2079 *      @data: returns a structure abstracting the mode header data
2080 *      @sshdr: place to put sense data (or NULL if no sense to be collected).
2081 *              must be SCSI_SENSE_BUFFERSIZE big.
2082 *
2083 *      Returns zero if successful, or a negative error number on failure
2084 */
2085int
2086scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
2087                  unsigned char *buffer, int len, int timeout, int retries,
2088                  struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
2089{
2090        unsigned char cmd[12];
2091        int use_10_for_ms;
2092        int header_length;
2093        int result, retry_count = retries;
2094        struct scsi_sense_hdr my_sshdr;
2095
2096        memset(data, 0, sizeof(*data));
2097        memset(&cmd[0], 0, 12);
2098
2099        dbd = sdev->set_dbd_for_ms ? 8 : dbd;
2100        cmd[1] = dbd & 0x18;    /* allows DBD and LLBA bits */
2101        cmd[2] = modepage;
2102
2103        /* caller might not be interested in sense, but we need it */
2104        if (!sshdr)
2105                sshdr = &my_sshdr;
2106
2107 retry:
2108        use_10_for_ms = sdev->use_10_for_ms;
2109
2110        if (use_10_for_ms) {
2111                if (len < 8)
2112                        len = 8;
2113
2114                cmd[0] = MODE_SENSE_10;
2115                cmd[8] = len;
2116                header_length = 8;
2117        } else {
2118                if (len < 4)
2119                        len = 4;
2120
2121                cmd[0] = MODE_SENSE;
2122                cmd[4] = len;
2123                header_length = 4;
2124        }
2125
2126        memset(buffer, 0, len);
2127
2128        result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
2129                                  sshdr, timeout, retries, NULL);
2130        if (result < 0)
2131                return result;
2132
2133        /* This code looks awful: what it's doing is making sure an
2134         * ILLEGAL REQUEST sense return identifies the actual command
2135         * byte as the problem.  MODE_SENSE commands can return
2136         * ILLEGAL REQUEST if the code page isn't supported */
2137
2138        if (!scsi_status_is_good(result)) {
2139                if (scsi_sense_valid(sshdr)) {
2140                        if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
2141                            (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
2142                                /*
2143                                 * Invalid command operation code
2144                                 */
2145                                if (use_10_for_ms) {
2146                                        sdev->use_10_for_ms = 0;
2147                                        goto retry;
2148                                }
2149                        }
2150                        if (scsi_status_is_check_condition(result) &&
2151                            sshdr->sense_key == UNIT_ATTENTION &&
2152                            retry_count) {
2153                                retry_count--;
2154                                goto retry;
2155                        }
2156                }
2157                return -EIO;
2158        }
2159        if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b &&
2160                     (modepage == 6 || modepage == 8))) {
2161                /* Initio breakage? */
2162                header_length = 0;
2163                data->length = 13;
2164                data->medium_type = 0;
2165                data->device_specific = 0;
2166                data->longlba = 0;
2167                data->block_descriptor_length = 0;
2168        } else if (use_10_for_ms) {
2169                data->length = buffer[0]*256 + buffer[1] + 2;
2170                data->medium_type = buffer[2];
2171                data->device_specific = buffer[3];
2172                data->longlba = buffer[4] & 0x01;
2173                data->block_descriptor_length = buffer[6]*256
2174                        + buffer[7];
2175        } else {
2176                data->length = buffer[0] + 1;
2177                data->medium_type = buffer[1];
2178                data->device_specific = buffer[2];
2179                data->block_descriptor_length = buffer[3];
2180        }
2181        data->header_length = header_length;
2182
2183        return 0;
2184}
2185EXPORT_SYMBOL(scsi_mode_sense);
2186
2187/**
2188 *      scsi_test_unit_ready - test if unit is ready
2189 *      @sdev:  scsi device to change the state of.
2190 *      @timeout: command timeout
2191 *      @retries: number of retries before failing
2192 *      @sshdr: outpout pointer for decoded sense information.
2193 *
2194 *      Returns zero if unsuccessful or an error if TUR failed.  For
2195 *      removable media, UNIT_ATTENTION sets ->changed flag.
2196 **/
2197int
2198scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
2199                     struct scsi_sense_hdr *sshdr)
2200{
2201        char cmd[] = {
2202                TEST_UNIT_READY, 0, 0, 0, 0, 0,
2203        };
2204        int result;
2205
2206        /* try to eat the UNIT_ATTENTION if there are enough retries */
2207        do {
2208                result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr,
2209                                          timeout, 1, NULL);
2210                if (sdev->removable && scsi_sense_valid(sshdr) &&
2211                    sshdr->sense_key == UNIT_ATTENTION)
2212                        sdev->changed = 1;
2213        } while (scsi_sense_valid(sshdr) &&
2214                 sshdr->sense_key == UNIT_ATTENTION && --retries);
2215
2216        return result;
2217}
2218EXPORT_SYMBOL(scsi_test_unit_ready);
2219
2220/**
2221 *      scsi_device_set_state - Take the given device through the device state model.
2222 *      @sdev:  scsi device to change the state of.
2223 *      @state: state to change to.
2224 *
2225 *      Returns zero if successful or an error if the requested
2226 *      transition is illegal.
2227 */
2228int
2229scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
2230{
2231        enum scsi_device_state oldstate = sdev->sdev_state;
2232
2233        if (state == oldstate)
2234                return 0;
2235
2236        switch (state) {
2237        case SDEV_CREATED:
2238                switch (oldstate) {
2239                case SDEV_CREATED_BLOCK:
2240                        break;
2241                default:
2242                        goto illegal;
2243                }
2244                break;
2245
2246        case SDEV_RUNNING:
2247                switch (oldstate) {
2248                case SDEV_CREATED:
2249                case SDEV_OFFLINE:
2250                case SDEV_TRANSPORT_OFFLINE:
2251                case SDEV_QUIESCE:
2252                case SDEV_BLOCK:
2253                        break;
2254                default:
2255                        goto illegal;
2256                }
2257                break;
2258
2259        case SDEV_QUIESCE:
2260                switch (oldstate) {
2261                case SDEV_RUNNING:
2262                case SDEV_OFFLINE:
2263                case SDEV_TRANSPORT_OFFLINE:
2264                        break;
2265                default:
2266                        goto illegal;
2267                }
2268                break;
2269
2270        case SDEV_OFFLINE:
2271        case SDEV_TRANSPORT_OFFLINE:
2272                switch (oldstate) {
2273                case SDEV_CREATED:
2274                case SDEV_RUNNING:
2275                case SDEV_QUIESCE:
2276                case SDEV_BLOCK:
2277                        break;
2278                default:
2279                        goto illegal;
2280                }
2281                break;
2282
2283        case SDEV_BLOCK:
2284                switch (oldstate) {
2285                case SDEV_RUNNING:
2286                case SDEV_CREATED_BLOCK:
2287                case SDEV_QUIESCE:
2288                case SDEV_OFFLINE:
2289                        break;
2290                default:
2291                        goto illegal;
2292                }
2293                break;
2294
2295        case SDEV_CREATED_BLOCK:
2296                switch (oldstate) {
2297                case SDEV_CREATED:
2298                        break;
2299                default:
2300                        goto illegal;
2301                }
2302                break;
2303
2304        case SDEV_CANCEL:
2305                switch (oldstate) {
2306                case SDEV_CREATED:
2307                case SDEV_RUNNING:
2308                case SDEV_QUIESCE:
2309                case SDEV_OFFLINE:
2310                case SDEV_TRANSPORT_OFFLINE:
2311                        break;
2312                default:
2313                        goto illegal;
2314                }
2315                break;
2316
2317        case SDEV_DEL:
2318                switch (oldstate) {
2319                case SDEV_CREATED:
2320                case SDEV_RUNNING:
2321                case SDEV_OFFLINE:
2322                case SDEV_TRANSPORT_OFFLINE:
2323                case SDEV_CANCEL:
2324                case SDEV_BLOCK:
2325                case SDEV_CREATED_BLOCK:
2326                        break;
2327                default:
2328                        goto illegal;
2329                }
2330                break;
2331
2332        }
2333        sdev->offline_already = false;
2334        sdev->sdev_state = state;
2335        return 0;
2336
2337 illegal:
2338        SCSI_LOG_ERROR_RECOVERY(1,
2339                                sdev_printk(KERN_ERR, sdev,
2340                                            "Illegal state transition %s->%s",
2341                                            scsi_device_state_name(oldstate),
2342                                            scsi_device_state_name(state))
2343                                );
2344        return -EINVAL;
2345}
2346EXPORT_SYMBOL(scsi_device_set_state);
2347
2348/**
2349 *      scsi_evt_emit - emit a single SCSI device uevent
2350 *      @sdev: associated SCSI device
2351 *      @evt: event to emit
2352 *
2353 *      Send a single uevent (scsi_event) to the associated scsi_device.
2354 */
2355static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
2356{
2357        int idx = 0;
2358        char *envp[3];
2359
2360        switch (evt->evt_type) {
2361        case SDEV_EVT_MEDIA_CHANGE:
2362                envp[idx++] = "SDEV_MEDIA_CHANGE=1";
2363                break;
2364        case SDEV_EVT_INQUIRY_CHANGE_REPORTED:
2365                scsi_rescan_device(&sdev->sdev_gendev);
2366                envp[idx++] = "SDEV_UA=INQUIRY_DATA_HAS_CHANGED";
2367                break;
2368        case SDEV_EVT_CAPACITY_CHANGE_REPORTED:
2369                envp[idx++] = "SDEV_UA=CAPACITY_DATA_HAS_CHANGED";
2370                break;
2371        case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED:
2372               envp[idx++] = "SDEV_UA=THIN_PROVISIONING_SOFT_THRESHOLD_REACHED";
2373                break;
2374        case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED:
2375                envp[idx++] = "SDEV_UA=MODE_PARAMETERS_CHANGED";
2376                break;
2377        case SDEV_EVT_LUN_CHANGE_REPORTED:
2378                envp[idx++] = "SDEV_UA=REPORTED_LUNS_DATA_HAS_CHANGED";
2379                break;
2380        case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED:
2381                envp[idx++] = "SDEV_UA=ASYMMETRIC_ACCESS_STATE_CHANGED";
2382                break;
2383        case SDEV_EVT_POWER_ON_RESET_OCCURRED:
2384                envp[idx++] = "SDEV_UA=POWER_ON_RESET_OCCURRED";
2385                break;
2386        default:
2387                /* do nothing */
2388                break;
2389        }
2390
2391        envp[idx++] = NULL;
2392
2393        kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp);
2394}
2395
2396/**
2397 *      scsi_evt_thread - send a uevent for each scsi event
2398 *      @work: work struct for scsi_device
2399 *
2400 *      Dispatch queued events to their associated scsi_device kobjects
2401 *      as uevents.
2402 */
2403void scsi_evt_thread(struct work_struct *work)
2404{
2405        struct scsi_device *sdev;
2406        enum scsi_device_event evt_type;
2407        LIST_HEAD(event_list);
2408
2409        sdev = container_of(work, struct scsi_device, event_work);
2410
2411        for (evt_type = SDEV_EVT_FIRST; evt_type <= SDEV_EVT_LAST; evt_type++)
2412                if (test_and_clear_bit(evt_type, sdev->pending_events))
2413                        sdev_evt_send_simple(sdev, evt_type, GFP_KERNEL);
2414
2415        while (1) {
2416                struct scsi_event *evt;
2417                struct list_head *this, *tmp;
2418                unsigned long flags;
2419
2420                spin_lock_irqsave(&sdev->list_lock, flags);
2421                list_splice_init(&sdev->event_list, &event_list);
2422                spin_unlock_irqrestore(&sdev->list_lock, flags);
2423
2424                if (list_empty(&event_list))
2425                        break;
2426
2427                list_for_each_safe(this, tmp, &event_list) {
2428                        evt = list_entry(this, struct scsi_event, node);
2429                        list_del(&evt->node);
2430                        scsi_evt_emit(sdev, evt);
2431                        kfree(evt);
2432                }
2433        }
2434}
2435
2436/**
2437 *      sdev_evt_send - send asserted event to uevent thread
2438 *      @sdev: scsi_device event occurred on
2439 *      @evt: event to send
2440 *
2441 *      Assert scsi device event asynchronously.
2442 */
2443void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt)
2444{
2445        unsigned long flags;
2446
2447#if 0
2448        /* FIXME: currently this check eliminates all media change events
2449         * for polled devices.  Need to update to discriminate between AN
2450         * and polled events */
2451        if (!test_bit(evt->evt_type, sdev->supported_events)) {
2452                kfree(evt);
2453                return;
2454        }
2455#endif
2456
2457        spin_lock_irqsave(&sdev->list_lock, flags);
2458        list_add_tail(&evt->node, &sdev->event_list);
2459        schedule_work(&sdev->event_work);
2460        spin_unlock_irqrestore(&sdev->list_lock, flags);
2461}
2462EXPORT_SYMBOL_GPL(sdev_evt_send);
2463
2464/**
2465 *      sdev_evt_alloc - allocate a new scsi event
2466 *      @evt_type: type of event to allocate
2467 *      @gfpflags: GFP flags for allocation
2468 *
2469 *      Allocates and returns a new scsi_event.
2470 */
2471struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
2472                                  gfp_t gfpflags)
2473{
2474        struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags);
2475        if (!evt)
2476                return NULL;
2477
2478        evt->evt_type = evt_type;
2479        INIT_LIST_HEAD(&evt->node);
2480
2481        /* evt_type-specific initialization, if any */
2482        switch (evt_type) {
2483        case SDEV_EVT_MEDIA_CHANGE:
2484        case SDEV_EVT_INQUIRY_CHANGE_REPORTED:
2485        case SDEV_EVT_CAPACITY_CHANGE_REPORTED:
2486        case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED:
2487        case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED:
2488        case SDEV_EVT_LUN_CHANGE_REPORTED:
2489        case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED:
2490        case SDEV_EVT_POWER_ON_RESET_OCCURRED:
2491        default:
2492                /* do nothing */
2493                break;
2494        }
2495
2496        return evt;
2497}
2498EXPORT_SYMBOL_GPL(sdev_evt_alloc);
2499
2500/**
2501 *      sdev_evt_send_simple - send asserted event to uevent thread
2502 *      @sdev: scsi_device event occurred on
2503 *      @evt_type: type of event to send
2504 *      @gfpflags: GFP flags for allocation
2505 *
2506 *      Assert scsi device event asynchronously, given an event type.
2507 */
2508void sdev_evt_send_simple(struct scsi_device *sdev,
2509                          enum scsi_device_event evt_type, gfp_t gfpflags)
2510{
2511        struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags);
2512        if (!evt) {
2513                sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n",
2514                            evt_type);
2515                return;
2516        }
2517
2518        sdev_evt_send(sdev, evt);
2519}
2520EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
2521
2522/**
2523 *      scsi_device_quiesce - Block all commands except power management.
2524 *      @sdev:  scsi device to quiesce.
2525 *
2526 *      This works by trying to transition to the SDEV_QUIESCE state
2527 *      (which must be a legal transition).  When the device is in this
2528 *      state, only power management requests will be accepted, all others will
2529 *      be deferred.
2530 *
2531 *      Must be called with user context, may sleep.
2532 *
2533 *      Returns zero if unsuccessful or an error if not.
2534 */
2535int
2536scsi_device_quiesce(struct scsi_device *sdev)
2537{
2538        struct request_queue *q = sdev->request_queue;
2539        int err;
2540
2541        /*
2542         * It is allowed to call scsi_device_quiesce() multiple times from
2543         * the same context but concurrent scsi_device_quiesce() calls are
2544         * not allowed.
2545         */
2546        WARN_ON_ONCE(sdev->quiesced_by && sdev->quiesced_by != current);
2547
2548        if (sdev->quiesced_by == current)
2549                return 0;
2550
2551        blk_set_pm_only(q);
2552
2553        blk_mq_freeze_queue(q);
2554        /*
2555         * Ensure that the effect of blk_set_pm_only() will be visible
2556         * for percpu_ref_tryget() callers that occur after the queue
2557         * unfreeze even if the queue was already frozen before this function
2558         * was called. See also https://lwn.net/Articles/573497/.
2559         */
2560        synchronize_rcu();
2561        blk_mq_unfreeze_queue(q);
2562
2563        mutex_lock(&sdev->state_mutex);
2564        err = scsi_device_set_state(sdev, SDEV_QUIESCE);
2565        if (err == 0)
2566                sdev->quiesced_by = current;
2567        else
2568                blk_clear_pm_only(q);
2569        mutex_unlock(&sdev->state_mutex);
2570
2571        return err;
2572}
2573EXPORT_SYMBOL(scsi_device_quiesce);
2574
2575/**
2576 *      scsi_device_resume - Restart user issued commands to a quiesced device.
2577 *      @sdev:  scsi device to resume.
2578 *
2579 *      Moves the device from quiesced back to running and restarts the
2580 *      queues.
2581 *
2582 *      Must be called with user context, may sleep.
2583 */
2584void scsi_device_resume(struct scsi_device *sdev)
2585{
2586        /* check if the device state was mutated prior to resume, and if
2587         * so assume the state is being managed elsewhere (for example
2588         * device deleted during suspend)
2589         */
2590        mutex_lock(&sdev->state_mutex);
2591        if (sdev->sdev_state == SDEV_QUIESCE)
2592                scsi_device_set_state(sdev, SDEV_RUNNING);
2593        if (sdev->quiesced_by) {
2594                sdev->quiesced_by = NULL;
2595                blk_clear_pm_only(sdev->request_queue);
2596        }
2597        mutex_unlock(&sdev->state_mutex);
2598}
2599EXPORT_SYMBOL(scsi_device_resume);
2600
2601static void
2602device_quiesce_fn(struct scsi_device *sdev, void *data)
2603{
2604        scsi_device_quiesce(sdev);
2605}
2606
2607void
2608scsi_target_quiesce(struct scsi_target *starget)
2609{
2610        starget_for_each_device(starget, NULL, device_quiesce_fn);
2611}
2612EXPORT_SYMBOL(scsi_target_quiesce);
2613
2614static void
2615device_resume_fn(struct scsi_device *sdev, void *data)
2616{
2617        scsi_device_resume(sdev);
2618}
2619
2620void
2621scsi_target_resume(struct scsi_target *starget)
2622{
2623        starget_for_each_device(starget, NULL, device_resume_fn);
2624}
2625EXPORT_SYMBOL(scsi_target_resume);
2626
2627/**
2628 * scsi_internal_device_block_nowait - try to transition to the SDEV_BLOCK state
2629 * @sdev: device to block
2630 *
2631 * Pause SCSI command processing on the specified device. Does not sleep.
2632 *
2633 * Returns zero if successful or a negative error code upon failure.
2634 *
2635 * Notes:
2636 * This routine transitions the device to the SDEV_BLOCK state (which must be
2637 * a legal transition). When the device is in this state, command processing
2638 * is paused until the device leaves the SDEV_BLOCK state. See also
2639 * scsi_internal_device_unblock_nowait().
2640 */
2641int scsi_internal_device_block_nowait(struct scsi_device *sdev)
2642{
2643        struct request_queue *q = sdev->request_queue;
2644        int err = 0;
2645
2646        err = scsi_device_set_state(sdev, SDEV_BLOCK);
2647        if (err) {
2648                err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK);
2649
2650                if (err)
2651                        return err;
2652        }
2653
2654        /*
2655         * The device has transitioned to SDEV_BLOCK.  Stop the
2656         * block layer from calling the midlayer with this device's
2657         * request queue.
2658         */
2659        blk_mq_quiesce_queue_nowait(q);
2660        return 0;
2661}
2662EXPORT_SYMBOL_GPL(scsi_internal_device_block_nowait);
2663
2664/**
2665 * scsi_internal_device_block - try to transition to the SDEV_BLOCK state
2666 * @sdev: device to block
2667 *
2668 * Pause SCSI command processing on the specified device and wait until all
2669 * ongoing scsi_request_fn() / scsi_queue_rq() calls have finished. May sleep.
2670 *
2671 * Returns zero if successful or a negative error code upon failure.
2672 *
2673 * Note:
2674 * This routine transitions the device to the SDEV_BLOCK state (which must be
2675 * a legal transition). When the device is in this state, command processing
2676 * is paused until the device leaves the SDEV_BLOCK state. See also
2677 * scsi_internal_device_unblock().
2678 */
2679static int scsi_internal_device_block(struct scsi_device *sdev)
2680{
2681        struct request_queue *q = sdev->request_queue;
2682        int err;
2683
2684        mutex_lock(&sdev->state_mutex);
2685        err = scsi_internal_device_block_nowait(sdev);
2686        if (err == 0)
2687                blk_mq_quiesce_queue(q);
2688        mutex_unlock(&sdev->state_mutex);
2689
2690        return err;
2691}
2692
2693void scsi_start_queue(struct scsi_device *sdev)
2694{
2695        struct request_queue *q = sdev->request_queue;
2696
2697        blk_mq_unquiesce_queue(q);
2698}
2699
2700/**
2701 * scsi_internal_device_unblock_nowait - resume a device after a block request
2702 * @sdev:       device to resume
2703 * @new_state:  state to set the device to after unblocking
2704 *
2705 * Restart the device queue for a previously suspended SCSI device. Does not
2706 * sleep.
2707 *
2708 * Returns zero if successful or a negative error code upon failure.
2709 *
2710 * Notes:
2711 * This routine transitions the device to the SDEV_RUNNING state or to one of
2712 * the offline states (which must be a legal transition) allowing the midlayer
2713 * to goose the queue for this device.
2714 */
2715int scsi_internal_device_unblock_nowait(struct scsi_device *sdev,
2716                                        enum scsi_device_state new_state)
2717{
2718        switch (new_state) {
2719        case SDEV_RUNNING:
2720        case SDEV_TRANSPORT_OFFLINE:
2721                break;
2722        default:
2723                return -EINVAL;
2724        }
2725
2726        /*
2727         * Try to transition the scsi device to SDEV_RUNNING or one of the
2728         * offlined states and goose the device queue if successful.
2729         */
2730        switch (sdev->sdev_state) {
2731        case SDEV_BLOCK:
2732        case SDEV_TRANSPORT_OFFLINE:
2733                sdev->sdev_state = new_state;
2734                break;
2735        case SDEV_CREATED_BLOCK:
2736                if (new_state == SDEV_TRANSPORT_OFFLINE ||
2737                    new_state == SDEV_OFFLINE)
2738                        sdev->sdev_state = new_state;
2739                else
2740                        sdev->sdev_state = SDEV_CREATED;
2741                break;
2742        case SDEV_CANCEL:
2743        case SDEV_OFFLINE:
2744                break;
2745        default:
2746                return -EINVAL;
2747        }
2748        scsi_start_queue(sdev);
2749
2750        return 0;
2751}
2752EXPORT_SYMBOL_GPL(scsi_internal_device_unblock_nowait);
2753
2754/**
2755 * scsi_internal_device_unblock - resume a device after a block request
2756 * @sdev:       device to resume
2757 * @new_state:  state to set the device to after unblocking
2758 *
2759 * Restart the device queue for a previously suspended SCSI device. May sleep.
2760 *
2761 * Returns zero if successful or a negative error code upon failure.
2762 *
2763 * Notes:
2764 * This routine transitions the device to the SDEV_RUNNING state or to one of
2765 * the offline states (which must be a legal transition) allowing the midlayer
2766 * to goose the queue for this device.
2767 */
2768static int scsi_internal_device_unblock(struct scsi_device *sdev,
2769                                        enum scsi_device_state new_state)
2770{
2771        int ret;
2772
2773        mutex_lock(&sdev->state_mutex);
2774        ret = scsi_internal_device_unblock_nowait(sdev, new_state);
2775        mutex_unlock(&sdev->state_mutex);
2776
2777        return ret;
2778}
2779
2780static void
2781device_block(struct scsi_device *sdev, void *data)
2782{
2783        int ret;
2784
2785        ret = scsi_internal_device_block(sdev);
2786
2787        WARN_ONCE(ret, "scsi_internal_device_block(%s) failed: ret = %d\n",
2788                  dev_name(&sdev->sdev_gendev), ret);
2789}
2790
2791static int
2792target_block(struct device *dev, void *data)
2793{
2794        if (scsi_is_target_device(dev))
2795                starget_for_each_device(to_scsi_target(dev), NULL,
2796                                        device_block);
2797        return 0;
2798}
2799
2800void
2801scsi_target_block(struct device *dev)
2802{
2803        if (scsi_is_target_device(dev))
2804                starget_for_each_device(to_scsi_target(dev), NULL,
2805                                        device_block);
2806        else
2807                device_for_each_child(dev, NULL, target_block);
2808}
2809EXPORT_SYMBOL_GPL(scsi_target_block);
2810
2811static void
2812device_unblock(struct scsi_device *sdev, void *data)
2813{
2814        scsi_internal_device_unblock(sdev, *(enum scsi_device_state *)data);
2815}
2816
2817static int
2818target_unblock(struct device *dev, void *data)
2819{
2820        if (scsi_is_target_device(dev))
2821                starget_for_each_device(to_scsi_target(dev), data,
2822                                        device_unblock);
2823        return 0;
2824}
2825
2826void
2827scsi_target_unblock(struct device *dev, enum scsi_device_state new_state)
2828{
2829        if (scsi_is_target_device(dev))
2830                starget_for_each_device(to_scsi_target(dev), &new_state,
2831                                        device_unblock);
2832        else
2833                device_for_each_child(dev, &new_state, target_unblock);
2834}
2835EXPORT_SYMBOL_GPL(scsi_target_unblock);
2836
2837int
2838scsi_host_block(struct Scsi_Host *shost)
2839{
2840        struct scsi_device *sdev;
2841        int ret = 0;
2842
2843        /*
2844         * Call scsi_internal_device_block_nowait so we can avoid
2845         * calling synchronize_rcu() for each LUN.
2846         */
2847        shost_for_each_device(sdev, shost) {
2848                mutex_lock(&sdev->state_mutex);
2849                ret = scsi_internal_device_block_nowait(sdev);
2850                mutex_unlock(&sdev->state_mutex);
2851                if (ret) {
2852                        scsi_device_put(sdev);
2853                        break;
2854                }
2855        }
2856
2857        /*
2858         * SCSI never enables blk-mq's BLK_MQ_F_BLOCKING flag so
2859         * calling synchronize_rcu() once is enough.
2860         */
2861        WARN_ON_ONCE(shost->tag_set.flags & BLK_MQ_F_BLOCKING);
2862
2863        if (!ret)
2864                synchronize_rcu();
2865
2866        return ret;
2867}
2868EXPORT_SYMBOL_GPL(scsi_host_block);
2869
2870int
2871scsi_host_unblock(struct Scsi_Host *shost, int new_state)
2872{
2873        struct scsi_device *sdev;
2874        int ret = 0;
2875
2876        shost_for_each_device(sdev, shost) {
2877                ret = scsi_internal_device_unblock(sdev, new_state);
2878                if (ret) {
2879                        scsi_device_put(sdev);
2880                        break;
2881                }
2882        }
2883        return ret;
2884}
2885EXPORT_SYMBOL_GPL(scsi_host_unblock);
2886
2887/**
2888 * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt
2889 * @sgl:        scatter-gather list
2890 * @sg_count:   number of segments in sg
2891 * @offset:     offset in bytes into sg, on return offset into the mapped area
2892 * @len:        bytes to map, on return number of bytes mapped
2893 *
2894 * Returns virtual address of the start of the mapped page
2895 */
2896void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
2897                          size_t *offset, size_t *len)
2898{
2899        int i;
2900        size_t sg_len = 0, len_complete = 0;
2901        struct scatterlist *sg;
2902        struct page *page;
2903
2904        WARN_ON(!irqs_disabled());
2905
2906        for_each_sg(sgl, sg, sg_count, i) {
2907                len_complete = sg_len; /* Complete sg-entries */
2908                sg_len += sg->length;
2909                if (sg_len > *offset)
2910                        break;
2911        }
2912
2913        if (unlikely(i == sg_count)) {
2914                printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, "
2915                        "elements %d\n",
2916                       __func__, sg_len, *offset, sg_count);
2917                WARN_ON(1);
2918                return NULL;
2919        }
2920
2921        /* Offset starting from the beginning of first page in this sg-entry */
2922        *offset = *offset - len_complete + sg->offset;
2923
2924        /* Assumption: contiguous pages can be accessed as "page + i" */
2925        page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT));
2926        *offset &= ~PAGE_MASK;
2927
2928        /* Bytes in this sg-entry from *offset to the end of the page */
2929        sg_len = PAGE_SIZE - *offset;
2930        if (*len > sg_len)
2931                *len = sg_len;
2932
2933        return kmap_atomic(page);
2934}
2935EXPORT_SYMBOL(scsi_kmap_atomic_sg);
2936
2937/**
2938 * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg
2939 * @virt:       virtual address to be unmapped
2940 */
2941void scsi_kunmap_atomic_sg(void *virt)
2942{
2943        kunmap_atomic(virt);
2944}
2945EXPORT_SYMBOL(scsi_kunmap_atomic_sg);
2946
2947void sdev_disable_disk_events(struct scsi_device *sdev)
2948{
2949        atomic_inc(&sdev->disk_events_disable_depth);
2950}
2951EXPORT_SYMBOL(sdev_disable_disk_events);
2952
2953void sdev_enable_disk_events(struct scsi_device *sdev)
2954{
2955        if (WARN_ON_ONCE(atomic_read(&sdev->disk_events_disable_depth) <= 0))
2956                return;
2957        atomic_dec(&sdev->disk_events_disable_depth);
2958}
2959EXPORT_SYMBOL(sdev_enable_disk_events);
2960
2961static unsigned char designator_prio(const unsigned char *d)
2962{
2963        if (d[1] & 0x30)
2964                /* not associated with LUN */
2965                return 0;
2966
2967        if (d[3] == 0)
2968                /* invalid length */
2969                return 0;
2970
2971        /*
2972         * Order of preference for lun descriptor:
2973         * - SCSI name string
2974         * - NAA IEEE Registered Extended
2975         * - EUI-64 based 16-byte
2976         * - EUI-64 based 12-byte
2977         * - NAA IEEE Registered
2978         * - NAA IEEE Extended
2979         * - EUI-64 based 8-byte
2980         * - SCSI name string (truncated)
2981         * - T10 Vendor ID
2982         * as longer descriptors reduce the likelyhood
2983         * of identification clashes.
2984         */
2985
2986        switch (d[1] & 0xf) {
2987        case 8:
2988                /* SCSI name string, variable-length UTF-8 */
2989                return 9;
2990        case 3:
2991                switch (d[4] >> 4) {
2992                case 6:
2993                        /* NAA registered extended */
2994                        return 8;
2995                case 5:
2996                        /* NAA registered */
2997                        return 5;
2998                case 4:
2999                        /* NAA extended */
3000                        return 4;
3001                case 3:
3002                        /* NAA locally assigned */
3003                        return 1;
3004                default:
3005                        break;
3006                }
3007                break;
3008        case 2:
3009                switch (d[3]) {
3010                case 16:
3011                        /* EUI64-based, 16 byte */
3012                        return 7;
3013                case 12:
3014                        /* EUI64-based, 12 byte */
3015                        return 6;
3016                case 8:
3017                        /* EUI64-based, 8 byte */
3018                        return 3;
3019                default:
3020                        break;
3021                }
3022                break;
3023        case 1:
3024                /* T10 vendor ID */
3025                return 1;
3026        default:
3027                break;
3028        }
3029
3030        return 0;
3031}
3032
3033/**
3034 * scsi_vpd_lun_id - return a unique device identification
3035 * @sdev: SCSI device
3036 * @id:   buffer for the identification
3037 * @id_len:  length of the buffer
3038 *
3039 * Copies a unique device identification into @id based
3040 * on the information in the VPD page 0x83 of the device.
3041 * The string will be formatted as a SCSI name string.
3042 *
3043 * Returns the length of the identification or error on failure.
3044 * If the identifier is longer than the supplied buffer the actual
3045 * identifier length is returned and the buffer is not zero-padded.
3046 */
3047int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
3048{
3049        u8 cur_id_prio = 0;
3050        u8 cur_id_size = 0;
3051        const unsigned char *d, *cur_id_str;
3052        const struct scsi_vpd *vpd_pg83;
3053        int id_size = -EINVAL;
3054
3055        rcu_read_lock();
3056        vpd_pg83 = rcu_dereference(sdev->vpd_pg83);
3057        if (!vpd_pg83) {
3058                rcu_read_unlock();
3059                return -ENXIO;
3060        }
3061
3062        /* The id string must be at least 20 bytes + terminating NULL byte */
3063        if (id_len < 21) {
3064                rcu_read_unlock();
3065                return -EINVAL;
3066        }
3067
3068        memset(id, 0, id_len);
3069        for (d = vpd_pg83->data + 4;
3070             d < vpd_pg83->data + vpd_pg83->len;
3071             d += d[3] + 4) {
3072                u8 prio = designator_prio(d);
3073
3074                if (prio == 0 || cur_id_prio > prio)
3075                        continue;
3076
3077                switch (d[1] & 0xf) {
3078                case 0x1:
3079                        /* T10 Vendor ID */
3080                        if (cur_id_size > d[3])
3081                                break;
3082                        cur_id_prio = prio;
3083                        cur_id_size = d[3];
3084                        if (cur_id_size + 4 > id_len)
3085                                cur_id_size = id_len - 4;
3086                        cur_id_str = d + 4;
3087                        id_size = snprintf(id, id_len, "t10.%*pE",
3088                                           cur_id_size, cur_id_str);
3089                        break;
3090                case 0x2:
3091                        /* EUI-64 */
3092                        cur_id_prio = prio;
3093                        cur_id_size = d[3];
3094                        cur_id_str = d + 4;
3095                        switch (cur_id_size) {
3096                        case 8:
3097                                id_size = snprintf(id, id_len,
3098                                                   "eui.%8phN",
3099                                                   cur_id_str);
3100                                break;
3101                        case 12:
3102                                id_size = snprintf(id, id_len,
3103                                                   "eui.%12phN",
3104                                                   cur_id_str);
3105                                break;
3106                        case 16:
3107                                id_size = snprintf(id, id_len,
3108                                                   "eui.%16phN",
3109                                                   cur_id_str);
3110                                break;
3111                        default:
3112                                break;
3113                        }
3114                        break;
3115                case 0x3:
3116                        /* NAA */
3117                        cur_id_prio = prio;
3118                        cur_id_size = d[3];
3119                        cur_id_str = d + 4;
3120                        switch (cur_id_size) {
3121                        case 8:
3122                                id_size = snprintf(id, id_len,
3123                                                   "naa.%8phN",
3124                                                   cur_id_str);
3125                                break;
3126                        case 16:
3127                                id_size = snprintf(id, id_len,
3128                                                   "naa.%16phN",
3129                                                   cur_id_str);
3130                                break;
3131                        default:
3132                                break;
3133                        }
3134                        break;
3135                case 0x8:
3136                        /* SCSI name string */
3137                        if (cur_id_size > d[3])
3138                                break;
3139                        /* Prefer others for truncated descriptor */
3140                        if (d[3] > id_len) {
3141                                prio = 2;
3142                                if (cur_id_prio > prio)
3143                                        break;
3144                        }
3145                        cur_id_prio = prio;
3146                        cur_id_size = id_size = d[3];
3147                        cur_id_str = d + 4;
3148                        if (cur_id_size >= id_len)
3149                                cur_id_size = id_len - 1;
3150                        memcpy(id, cur_id_str, cur_id_size);
3151                        break;
3152                default:
3153                        break;
3154                }
3155        }
3156        rcu_read_unlock();
3157
3158        return id_size;
3159}
3160EXPORT_SYMBOL(scsi_vpd_lun_id);
3161
3162/*
3163 * scsi_vpd_tpg_id - return a target port group identifier
3164 * @sdev: SCSI device
3165 *
3166 * Returns the Target Port Group identifier from the information
3167 * froom VPD page 0x83 of the device.
3168 *
3169 * Returns the identifier or error on failure.
3170 */
3171int scsi_vpd_tpg_id(struct scsi_device *sdev, int *rel_id)
3172{
3173        const unsigned char *d;
3174        const struct scsi_vpd *vpd_pg83;
3175        int group_id = -EAGAIN, rel_port = -1;
3176
3177        rcu_read_lock();
3178        vpd_pg83 = rcu_dereference(sdev->vpd_pg83);
3179        if (!vpd_pg83) {
3180                rcu_read_unlock();
3181                return -ENXIO;
3182        }
3183
3184        d = vpd_pg83->data + 4;
3185        while (d < vpd_pg83->data + vpd_pg83->len) {
3186                switch (d[1] & 0xf) {
3187                case 0x4:
3188                        /* Relative target port */
3189                        rel_port = get_unaligned_be16(&d[6]);
3190                        break;
3191                case 0x5:
3192                        /* Target port group */
3193                        group_id = get_unaligned_be16(&d[6]);
3194                        break;
3195                default:
3196                        break;
3197                }
3198                d += d[3] + 4;
3199        }
3200        rcu_read_unlock();
3201
3202        if (group_id >= 0 && rel_id && rel_port != -1)
3203                *rel_id = rel_port;
3204
3205        return group_id;
3206}
3207EXPORT_SYMBOL(scsi_vpd_tpg_id);
3208
3209/**
3210 * scsi_build_sense - build sense data for a command
3211 * @scmd:       scsi command for which the sense should be formatted
3212 * @desc:       Sense format (non-zero == descriptor format,
3213 *              0 == fixed format)
3214 * @key:        Sense key
3215 * @asc:        Additional sense code
3216 * @ascq:       Additional sense code qualifier
3217 *
3218 **/
3219void scsi_build_sense(struct scsi_cmnd *scmd, int desc, u8 key, u8 asc, u8 ascq)
3220{
3221        scsi_build_sense_buffer(desc, scmd->sense_buffer, key, asc, ascq);
3222        scmd->result = SAM_STAT_CHECK_CONDITION;
3223}
3224EXPORT_SYMBOL_GPL(scsi_build_sense);
3225