linux/drivers/scsi/scsi_debug.c
<<
>>
Prefs
   1/*
   2 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
   3 *  Copyright (C) 1992  Eric Youngdale
   4 *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
   5 *  to make sure that we are not getting blocks mixed up, and PANIC if
   6 *  anything out of the ordinary is seen.
   7 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
   8 *
   9 * Copyright (C) 2001 - 2016 Douglas Gilbert
  10 *
  11 * This program is free software; you can redistribute it and/or modify
  12 * it under the terms of the GNU General Public License as published by
  13 * the Free Software Foundation; either version 2, or (at your option)
  14 * any later version.
  15 *
  16 *  For documentation see http://sg.danny.cz/sg/sdebug26.html
  17 *
  18 */
  19
  20
  21#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
  22
  23#include <linux/module.h>
  24
  25#include <linux/kernel.h>
  26#include <linux/errno.h>
  27#include <linux/jiffies.h>
  28#include <linux/slab.h>
  29#include <linux/types.h>
  30#include <linux/string.h>
  31#include <linux/genhd.h>
  32#include <linux/fs.h>
  33#include <linux/init.h>
  34#include <linux/proc_fs.h>
  35#include <linux/vmalloc.h>
  36#include <linux/moduleparam.h>
  37#include <linux/scatterlist.h>
  38#include <linux/blkdev.h>
  39#include <linux/crc-t10dif.h>
  40#include <linux/spinlock.h>
  41#include <linux/interrupt.h>
  42#include <linux/atomic.h>
  43#include <linux/hrtimer.h>
  44#include <linux/uuid.h>
  45#include <linux/t10-pi.h>
  46
  47#include <net/checksum.h>
  48
  49#include <asm/unaligned.h>
  50
  51#include <scsi/scsi.h>
  52#include <scsi/scsi_cmnd.h>
  53#include <scsi/scsi_device.h>
  54#include <scsi/scsi_host.h>
  55#include <scsi/scsicam.h>
  56#include <scsi/scsi_eh.h>
  57#include <scsi/scsi_tcq.h>
  58#include <scsi/scsi_dbg.h>
  59
  60#include "sd.h"
  61#include "scsi_logging.h"
  62
  63/* make sure inq_product_rev string corresponds to this version */
  64#define SDEBUG_VERSION "1.86"
  65static const char *sdebug_version_date = "20160430";
  66
  67#define MY_NAME "scsi_debug"
  68
  69/* Additional Sense Code (ASC) */
  70#define NO_ADDITIONAL_SENSE 0x0
  71#define LOGICAL_UNIT_NOT_READY 0x4
  72#define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
  73#define UNRECOVERED_READ_ERR 0x11
  74#define PARAMETER_LIST_LENGTH_ERR 0x1a
  75#define INVALID_OPCODE 0x20
  76#define LBA_OUT_OF_RANGE 0x21
  77#define INVALID_FIELD_IN_CDB 0x24
  78#define INVALID_FIELD_IN_PARAM_LIST 0x26
  79#define UA_RESET_ASC 0x29
  80#define UA_CHANGED_ASC 0x2a
  81#define TARGET_CHANGED_ASC 0x3f
  82#define LUNS_CHANGED_ASCQ 0x0e
  83#define INSUFF_RES_ASC 0x55
  84#define INSUFF_RES_ASCQ 0x3
  85#define POWER_ON_RESET_ASCQ 0x0
  86#define BUS_RESET_ASCQ 0x2      /* scsi bus reset occurred */
  87#define MODE_CHANGED_ASCQ 0x1   /* mode parameters changed */
  88#define CAPACITY_CHANGED_ASCQ 0x9
  89#define SAVING_PARAMS_UNSUP 0x39
  90#define TRANSPORT_PROBLEM 0x4b
  91#define THRESHOLD_EXCEEDED 0x5d
  92#define LOW_POWER_COND_ON 0x5e
  93#define MISCOMPARE_VERIFY_ASC 0x1d
  94#define MICROCODE_CHANGED_ASCQ 0x1      /* with TARGET_CHANGED_ASC */
  95#define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
  96
  97/* Additional Sense Code Qualifier (ASCQ) */
  98#define ACK_NAK_TO 0x3
  99
 100/* Default values for driver parameters */
 101#define DEF_NUM_HOST   1
 102#define DEF_NUM_TGTS   1
 103#define DEF_MAX_LUNS   1
 104/* With these defaults, this driver will make 1 host with 1 target
 105 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
 106 */
 107#define DEF_ATO 1
 108#define DEF_JDELAY   1          /* if > 0 unit is a jiffy */
 109#define DEF_DEV_SIZE_MB   8
 110#define DEF_DIF 0
 111#define DEF_DIX 0
 112#define DEF_D_SENSE   0
 113#define DEF_EVERY_NTH   0
 114#define DEF_FAKE_RW     0
 115#define DEF_GUARD 0
 116#define DEF_HOST_LOCK 0
 117#define DEF_LBPU 0
 118#define DEF_LBPWS 0
 119#define DEF_LBPWS10 0
 120#define DEF_LBPRZ 1
 121#define DEF_LOWEST_ALIGNED 0
 122#define DEF_NDELAY   0          /* if > 0 unit is a nanosecond */
 123#define DEF_NO_LUN_0   0
 124#define DEF_NUM_PARTS   0
 125#define DEF_OPTS   0
 126#define DEF_OPT_BLKS 1024
 127#define DEF_PHYSBLK_EXP 0
 128#define DEF_OPT_XFERLEN_EXP 0
 129#define DEF_PTYPE   TYPE_DISK
 130#define DEF_REMOVABLE false
 131#define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
 132#define DEF_SECTOR_SIZE 512
 133#define DEF_UNMAP_ALIGNMENT 0
 134#define DEF_UNMAP_GRANULARITY 1
 135#define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
 136#define DEF_UNMAP_MAX_DESC 256
 137#define DEF_VIRTUAL_GB   0
 138#define DEF_VPD_USE_HOSTNO 1
 139#define DEF_WRITESAME_LENGTH 0xFFFF
 140#define DEF_STRICT 0
 141#define DEF_STATISTICS false
 142#define DEF_SUBMIT_QUEUES 1
 143#define DEF_UUID_CTL 0
 144#define JDELAY_OVERRIDDEN -9999
 145
 146#define SDEBUG_LUN_0_VAL 0
 147
 148/* bit mask values for sdebug_opts */
 149#define SDEBUG_OPT_NOISE                1
 150#define SDEBUG_OPT_MEDIUM_ERR           2
 151#define SDEBUG_OPT_TIMEOUT              4
 152#define SDEBUG_OPT_RECOVERED_ERR        8
 153#define SDEBUG_OPT_TRANSPORT_ERR        16
 154#define SDEBUG_OPT_DIF_ERR              32
 155#define SDEBUG_OPT_DIX_ERR              64
 156#define SDEBUG_OPT_MAC_TIMEOUT          128
 157#define SDEBUG_OPT_SHORT_TRANSFER       0x100
 158#define SDEBUG_OPT_Q_NOISE              0x200
 159#define SDEBUG_OPT_ALL_TSF              0x400
 160#define SDEBUG_OPT_RARE_TSF             0x800
 161#define SDEBUG_OPT_N_WCE                0x1000
 162#define SDEBUG_OPT_RESET_NOISE          0x2000
 163#define SDEBUG_OPT_NO_CDB_NOISE         0x4000
 164#define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
 165                              SDEBUG_OPT_RESET_NOISE)
 166#define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
 167                                  SDEBUG_OPT_TRANSPORT_ERR | \
 168                                  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
 169                                  SDEBUG_OPT_SHORT_TRANSFER)
 170/* When "every_nth" > 0 then modulo "every_nth" commands:
 171 *   - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
 172 *   - a RECOVERED_ERROR is simulated on successful read and write
 173 *     commands if SDEBUG_OPT_RECOVERED_ERR is set.
 174 *   - a TRANSPORT_ERROR is simulated on successful read and write
 175 *     commands if SDEBUG_OPT_TRANSPORT_ERR is set.
 176 *
 177 * When "every_nth" < 0 then after "- every_nth" commands:
 178 *   - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
 179 *   - a RECOVERED_ERROR is simulated on successful read and write
 180 *     commands if SDEBUG_OPT_RECOVERED_ERR is set.
 181 *   - a TRANSPORT_ERROR is simulated on successful read and write
 182 *     commands if _DEBUG_OPT_TRANSPORT_ERR is set.
 183 * This will continue on every subsequent command until some other action
 184 * occurs (e.g. the user * writing a new value (other than -1 or 1) to
 185 * every_nth via sysfs).
 186 */
 187
 188/* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
 189 * priority order. In the subset implemented here lower numbers have higher
 190 * priority. The UA numbers should be a sequence starting from 0 with
 191 * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
 192#define SDEBUG_UA_POR 0         /* Power on, reset, or bus device reset */
 193#define SDEBUG_UA_BUS_RESET 1
 194#define SDEBUG_UA_MODE_CHANGED 2
 195#define SDEBUG_UA_CAPACITY_CHANGED 3
 196#define SDEBUG_UA_LUNS_CHANGED 4
 197#define SDEBUG_UA_MICROCODE_CHANGED 5   /* simulate firmware change */
 198#define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
 199#define SDEBUG_NUM_UAS 7
 200
 201/* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
 202 * sector on read commands: */
 203#define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
 204#define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
 205
 206/* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
 207 * or "peripheral device" addressing (value 0) */
 208#define SAM2_LUN_ADDRESS_METHOD 0
 209
 210/* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
 211 * (for response) per submit queue at one time. Can be reduced by max_queue
 212 * option. Command responses are not queued when jdelay=0 and ndelay=0. The
 213 * per-device DEF_CMD_PER_LUN can be changed via sysfs:
 214 * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
 215 * but cannot exceed SDEBUG_CANQUEUE .
 216 */
 217#define SDEBUG_CANQUEUE_WORDS  3        /* a WORD is bits in a long */
 218#define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
 219#define DEF_CMD_PER_LUN  255
 220
 221#define F_D_IN                  1
 222#define F_D_OUT                 2
 223#define F_D_OUT_MAYBE           4       /* WRITE SAME, NDOB bit */
 224#define F_D_UNKN                8
 225#define F_RL_WLUN_OK            0x10
 226#define F_SKIP_UA               0x20
 227#define F_DELAY_OVERR           0x40
 228#define F_SA_LOW                0x80    /* cdb byte 1, bits 4 to 0 */
 229#define F_SA_HIGH               0x100   /* as used by variable length cdbs */
 230#define F_INV_OP                0x200
 231#define F_FAKE_RW               0x400
 232#define F_M_ACCESS              0x800   /* media access */
 233
 234#define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
 235#define FF_DIRECT_IO (F_M_ACCESS | F_FAKE_RW)
 236#define FF_SA (F_SA_HIGH | F_SA_LOW)
 237
 238#define SDEBUG_MAX_PARTS 4
 239
 240#define SDEBUG_MAX_CMD_LEN 32
 241
 242
 243struct sdebug_dev_info {
 244        struct list_head dev_list;
 245        unsigned int channel;
 246        unsigned int target;
 247        u64 lun;
 248        uuid_be lu_name;
 249        struct sdebug_host_info *sdbg_host;
 250        unsigned long uas_bm[1];
 251        atomic_t num_in_q;
 252        atomic_t stopped;
 253        bool used;
 254};
 255
 256struct sdebug_host_info {
 257        struct list_head host_list;
 258        struct Scsi_Host *shost;
 259        struct device dev;
 260        struct list_head dev_info_list;
 261};
 262
 263#define to_sdebug_host(d)       \
 264        container_of(d, struct sdebug_host_info, dev)
 265
 266struct sdebug_defer {
 267        struct hrtimer hrt;
 268        struct execute_work ew;
 269        int sqa_idx;    /* index of sdebug_queue array */
 270        int qc_idx;     /* index of sdebug_queued_cmd array within sqa_idx */
 271        int issuing_cpu;
 272};
 273
 274struct sdebug_queued_cmd {
 275        /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
 276         * instance indicates this slot is in use.
 277         */
 278        struct sdebug_defer *sd_dp;
 279        struct scsi_cmnd *a_cmnd;
 280        unsigned int inj_recovered:1;
 281        unsigned int inj_transport:1;
 282        unsigned int inj_dif:1;
 283        unsigned int inj_dix:1;
 284        unsigned int inj_short:1;
 285};
 286
 287struct sdebug_queue {
 288        struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
 289        unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
 290        spinlock_t qc_lock;
 291        atomic_t blocked;       /* to temporarily stop more being queued */
 292};
 293
 294static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
 295static atomic_t sdebug_completions;  /* count of deferred completions */
 296static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
 297static atomic_t sdebug_a_tsf;        /* 'almost task set full' counter */
 298
 299struct opcode_info_t {
 300        u8 num_attached;        /* 0 if this is it (i.e. a leaf); use 0xff */
 301                                /* for terminating element */
 302        u8 opcode;              /* if num_attached > 0, preferred */
 303        u16 sa;                 /* service action */
 304        u32 flags;              /* OR-ed set of SDEB_F_* */
 305        int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
 306        const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
 307        u8 len_mask[16];        /* len=len_mask[0], then mask for cdb[1]... */
 308                                /* ignore cdb bytes after position 15 */
 309};
 310
 311/* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
 312enum sdeb_opcode_index {
 313        SDEB_I_INVALID_OPCODE = 0,
 314        SDEB_I_INQUIRY = 1,
 315        SDEB_I_REPORT_LUNS = 2,
 316        SDEB_I_REQUEST_SENSE = 3,
 317        SDEB_I_TEST_UNIT_READY = 4,
 318        SDEB_I_MODE_SENSE = 5,          /* 6, 10 */
 319        SDEB_I_MODE_SELECT = 6,         /* 6, 10 */
 320        SDEB_I_LOG_SENSE = 7,
 321        SDEB_I_READ_CAPACITY = 8,       /* 10; 16 is in SA_IN(16) */
 322        SDEB_I_READ = 9,                /* 6, 10, 12, 16 */
 323        SDEB_I_WRITE = 10,              /* 6, 10, 12, 16 */
 324        SDEB_I_START_STOP = 11,
 325        SDEB_I_SERV_ACT_IN = 12,        /* 12, 16 */
 326        SDEB_I_SERV_ACT_OUT = 13,       /* 12, 16 */
 327        SDEB_I_MAINT_IN = 14,
 328        SDEB_I_MAINT_OUT = 15,
 329        SDEB_I_VERIFY = 16,             /* 10 only */
 330        SDEB_I_VARIABLE_LEN = 17,
 331        SDEB_I_RESERVE = 18,            /* 6, 10 */
 332        SDEB_I_RELEASE = 19,            /* 6, 10 */
 333        SDEB_I_ALLOW_REMOVAL = 20,      /* PREVENT ALLOW MEDIUM REMOVAL */
 334        SDEB_I_REZERO_UNIT = 21,        /* REWIND in SSC */
 335        SDEB_I_ATA_PT = 22,             /* 12, 16 */
 336        SDEB_I_SEND_DIAG = 23,
 337        SDEB_I_UNMAP = 24,
 338        SDEB_I_XDWRITEREAD = 25,        /* 10 only */
 339        SDEB_I_WRITE_BUFFER = 26,
 340        SDEB_I_WRITE_SAME = 27,         /* 10, 16 */
 341        SDEB_I_SYNC_CACHE = 28,         /* 10 only */
 342        SDEB_I_COMP_WRITE = 29,
 343        SDEB_I_LAST_ELEMENT = 30,       /* keep this last */
 344};
 345
 346
 347static const unsigned char opcode_ind_arr[256] = {
 348/* 0x0; 0x0->0x1f: 6 byte cdbs */
 349        SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
 350            0, 0, 0, 0,
 351        SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
 352        0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
 353            SDEB_I_RELEASE,
 354        0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
 355            SDEB_I_ALLOW_REMOVAL, 0,
 356/* 0x20; 0x20->0x3f: 10 byte cdbs */
 357        0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
 358        SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
 359        0, 0, 0, 0, 0, SDEB_I_SYNC_CACHE, 0, 0,
 360        0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
 361/* 0x40; 0x40->0x5f: 10 byte cdbs */
 362        0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
 363        0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
 364        0, 0, 0, SDEB_I_XDWRITEREAD, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
 365            SDEB_I_RELEASE,
 366        0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
 367/* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
 368        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 369        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 370        0, SDEB_I_VARIABLE_LEN,
 371/* 0x80; 0x80->0x9f: 16 byte cdbs */
 372        0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
 373        SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0, 0, 0, 0, 0,
 374        0, 0, 0, SDEB_I_WRITE_SAME, 0, 0, 0, 0,
 375        0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN, SDEB_I_SERV_ACT_OUT,
 376/* 0xa0; 0xa0->0xbf: 12 byte cdbs */
 377        SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
 378             SDEB_I_MAINT_OUT, 0, 0, 0,
 379        SDEB_I_READ, SDEB_I_SERV_ACT_OUT, SDEB_I_WRITE, SDEB_I_SERV_ACT_IN,
 380             0, 0, 0, 0,
 381        0, 0, 0, 0, 0, 0, 0, 0,
 382        0, 0, 0, 0, 0, 0, 0, 0,
 383/* 0xc0; 0xc0->0xff: vendor specific */
 384        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 385        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 386        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 387        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 388};
 389
 390static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
 391static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
 392static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
 393static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
 394static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
 395static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
 396static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
 397static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
 398static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
 399static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
 400static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
 401static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
 402static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
 403static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
 404static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
 405static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
 406static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
 407static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
 408static int resp_xdwriteread_10(struct scsi_cmnd *, struct sdebug_dev_info *);
 409static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
 410static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
 411
 412static const struct opcode_info_t msense_iarr[1] = {
 413        {0, 0x1a, 0, F_D_IN, NULL, NULL,
 414            {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 415};
 416
 417static const struct opcode_info_t mselect_iarr[1] = {
 418        {0, 0x15, 0, F_D_OUT, NULL, NULL,
 419            {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 420};
 421
 422static const struct opcode_info_t read_iarr[3] = {
 423        {0, 0x28, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(10) */
 424            {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
 425             0, 0, 0, 0} },
 426        {0, 0x8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL, /* READ(6) */
 427            {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 428        {0, 0xa8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(12) */
 429            {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
 430             0xc7, 0, 0, 0, 0} },
 431};
 432
 433static const struct opcode_info_t write_iarr[3] = {
 434        {0, 0x2a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL,   /* 10 */
 435            {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
 436             0, 0, 0, 0} },
 437        {0, 0xa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL,    /* 6 */
 438            {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 439        {0, 0xaa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL,   /* 12 */
 440            {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
 441             0xc7, 0, 0, 0, 0} },
 442};
 443
 444static const struct opcode_info_t sa_in_iarr[1] = {
 445        {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
 446            {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
 447             0xff, 0xff, 0xff, 0, 0xc7} },
 448};
 449
 450static const struct opcode_info_t vl_iarr[1] = {        /* VARIABLE LENGTH */
 451        {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_DIRECT_IO, resp_write_dt0,
 452            NULL, {32,  0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0xb, 0xfa,
 453                   0, 0xff, 0xff, 0xff, 0xff} },        /* WRITE(32) */
 454};
 455
 456static const struct opcode_info_t maint_in_iarr[2] = {
 457        {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
 458            {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
 459             0xc7, 0, 0, 0, 0} },
 460        {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
 461            {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
 462             0, 0} },
 463};
 464
 465static const struct opcode_info_t write_same_iarr[1] = {
 466        {0, 0x93, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_16, NULL,
 467            {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
 468             0xff, 0xff, 0xff, 0x1f, 0xc7} },
 469};
 470
 471static const struct opcode_info_t reserve_iarr[1] = {
 472        {0, 0x16, 0, F_D_OUT, NULL, NULL,       /* RESERVE(6) */
 473            {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 474};
 475
 476static const struct opcode_info_t release_iarr[1] = {
 477        {0, 0x17, 0, F_D_OUT, NULL, NULL,       /* RELEASE(6) */
 478            {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 479};
 480
 481
 482/* This array is accessed via SDEB_I_* values. Make sure all are mapped,
 483 * plus the terminating elements for logic that scans this table such as
 484 * REPORT SUPPORTED OPERATION CODES. */
 485static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
 486/* 0 */
 487        {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,
 488            {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 489        {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL,
 490            {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 491        {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
 492            {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
 493             0, 0} },
 494        {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
 495            {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 496        {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
 497            {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 498        {1, 0x5a, 0, F_D_IN, resp_mode_sense, msense_iarr,
 499            {10,  0xf8, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
 500             0} },
 501        {1, 0x55, 0, F_D_OUT, resp_mode_select, mselect_iarr,
 502            {10,  0xf1, 0, 0, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
 503        {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,
 504            {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
 505             0, 0, 0} },
 506        {0, 0x25, 0, F_D_IN, resp_readcap, NULL,
 507            {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
 508             0, 0} },
 509        {3, 0x88, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, read_iarr,
 510            {16,  0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
 511             0xff, 0xff, 0xff, 0x9f, 0xc7} },           /* READ(16) */
 512/* 10 */
 513        {3, 0x8a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, write_iarr,
 514            {16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
 515             0xff, 0xff, 0xff, 0x9f, 0xc7} },           /* WRITE(16) */
 516        {0, 0x1b, 0, 0, resp_start_stop, NULL,          /* START STOP UNIT */
 517            {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 518        {1, 0x9e, 0x10, F_SA_LOW | F_D_IN, resp_readcap16, sa_in_iarr,
 519            {16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
 520             0xff, 0xff, 0xff, 0x1, 0xc7} },    /* READ CAPACITY(16) */
 521        {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* SA OUT */
 522            {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 523        {2, 0xa3, 0xa, F_SA_LOW | F_D_IN, resp_report_tgtpgs, maint_in_iarr,
 524            {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, 0,
 525             0} },
 526        {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
 527            {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 528        {0, 0x2f, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, NULL, NULL, /* VERIFY(10) */
 529            {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7,
 530             0, 0, 0, 0, 0, 0} },
 531        {1, 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_DIRECT_IO, resp_read_dt0,
 532            vl_iarr, {32,  0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0x9, 0xfe, 0,
 533                      0xff, 0xff, 0xff, 0xff} },/* VARIABLE LENGTH, READ(32) */
 534        {1, 0x56, 0, F_D_OUT, NULL, reserve_iarr, /* RESERVE(10) */
 535            {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
 536             0} },
 537        {1, 0x57, 0, F_D_OUT, NULL, release_iarr, /* RELEASE(10) */
 538            {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
 539             0} },
 540/* 20 */
 541        {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
 542            {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 543        {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
 544            {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 545        {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
 546            {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 547        {0, 0x1d, F_D_OUT, 0, NULL, NULL,       /* SEND DIAGNOSTIC */
 548            {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 549        {0, 0x42, 0, F_D_OUT | FF_DIRECT_IO, resp_unmap, NULL, /* UNMAP */
 550            {10,  0x1, 0, 0, 0, 0, 0x1f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
 551        {0, 0x53, 0, F_D_IN | F_D_OUT | FF_DIRECT_IO, resp_xdwriteread_10,
 552            NULL, {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7,
 553                   0, 0, 0, 0, 0, 0} },
 554        {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
 555            {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
 556             0, 0, 0, 0} },                     /* WRITE_BUFFER */
 557        {1, 0x41, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_10,
 558            write_same_iarr, {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff,
 559                              0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
 560        {0, 0x35, 0, F_DELAY_OVERR | FF_DIRECT_IO, NULL, NULL, /* SYNC_CACHE */
 561            {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
 562             0, 0, 0, 0} },
 563        {0, 0x89, 0, F_D_OUT | FF_DIRECT_IO, resp_comp_write, NULL,
 564            {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
 565             0, 0xff, 0x1f, 0xc7} },            /* COMPARE AND WRITE */
 566
 567/* 30 */
 568        {0xff, 0, 0, 0, NULL, NULL,             /* terminating element */
 569            {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 570};
 571
 572static int sdebug_add_host = DEF_NUM_HOST;
 573static int sdebug_ato = DEF_ATO;
 574static int sdebug_jdelay = DEF_JDELAY;  /* if > 0 then unit is jiffies */
 575static int sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
 576static int sdebug_dif = DEF_DIF;
 577static int sdebug_dix = DEF_DIX;
 578static int sdebug_dsense = DEF_D_SENSE;
 579static int sdebug_every_nth = DEF_EVERY_NTH;
 580static int sdebug_fake_rw = DEF_FAKE_RW;
 581static unsigned int sdebug_guard = DEF_GUARD;
 582static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
 583static int sdebug_max_luns = DEF_MAX_LUNS;
 584static int sdebug_max_queue = SDEBUG_CANQUEUE;  /* per submit queue */
 585static atomic_t retired_max_queue;      /* if > 0 then was prior max_queue */
 586static int sdebug_ndelay = DEF_NDELAY;  /* if > 0 then unit is nanoseconds */
 587static int sdebug_no_lun_0 = DEF_NO_LUN_0;
 588static int sdebug_no_uld;
 589static int sdebug_num_parts = DEF_NUM_PARTS;
 590static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
 591static int sdebug_opt_blks = DEF_OPT_BLKS;
 592static int sdebug_opts = DEF_OPTS;
 593static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
 594static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
 595static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
 596static int sdebug_scsi_level = DEF_SCSI_LEVEL;
 597static int sdebug_sector_size = DEF_SECTOR_SIZE;
 598static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
 599static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
 600static unsigned int sdebug_lbpu = DEF_LBPU;
 601static unsigned int sdebug_lbpws = DEF_LBPWS;
 602static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
 603static unsigned int sdebug_lbprz = DEF_LBPRZ;
 604static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
 605static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
 606static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
 607static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
 608static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
 609static int sdebug_uuid_ctl = DEF_UUID_CTL;
 610static bool sdebug_removable = DEF_REMOVABLE;
 611static bool sdebug_clustering;
 612static bool sdebug_host_lock = DEF_HOST_LOCK;
 613static bool sdebug_strict = DEF_STRICT;
 614static bool sdebug_any_injecting_opt;
 615static bool sdebug_verbose;
 616static bool have_dif_prot;
 617static bool sdebug_statistics = DEF_STATISTICS;
 618static bool sdebug_mq_active;
 619
 620static unsigned int sdebug_store_sectors;
 621static sector_t sdebug_capacity;        /* in sectors */
 622
 623/* old BIOS stuff, kernel may get rid of them but some mode sense pages
 624   may still need them */
 625static int sdebug_heads;                /* heads per disk */
 626static int sdebug_cylinders_per;        /* cylinders per surface */
 627static int sdebug_sectors_per;          /* sectors per cylinder */
 628
 629static LIST_HEAD(sdebug_host_list);
 630static DEFINE_SPINLOCK(sdebug_host_list_lock);
 631
 632static unsigned char *fake_storep;      /* ramdisk storage */
 633static struct t10_pi_tuple *dif_storep; /* protection info */
 634static void *map_storep;                /* provisioning map */
 635
 636static unsigned long map_size;
 637static int num_aborts;
 638static int num_dev_resets;
 639static int num_target_resets;
 640static int num_bus_resets;
 641static int num_host_resets;
 642static int dix_writes;
 643static int dix_reads;
 644static int dif_errors;
 645
 646static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
 647static struct sdebug_queue *sdebug_q_arr;  /* ptr to array of submit queues */
 648
 649static DEFINE_RWLOCK(atomic_rw);
 650
 651static char sdebug_proc_name[] = MY_NAME;
 652static const char *my_name = MY_NAME;
 653
 654static struct bus_type pseudo_lld_bus;
 655
 656static struct device_driver sdebug_driverfs_driver = {
 657        .name           = sdebug_proc_name,
 658        .bus            = &pseudo_lld_bus,
 659};
 660
 661static const int check_condition_result =
 662                (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
 663
 664static const int illegal_condition_result =
 665        (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
 666
 667static const int device_qfull_result =
 668        (DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
 669
 670
 671/* Only do the extra work involved in logical block provisioning if one or
 672 * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
 673 * real reads and writes (i.e. not skipping them for speed).
 674 */
 675static inline bool scsi_debug_lbp(void)
 676{
 677        return 0 == sdebug_fake_rw &&
 678                (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
 679}
 680
 681static void *fake_store(unsigned long long lba)
 682{
 683        lba = do_div(lba, sdebug_store_sectors);
 684
 685        return fake_storep + lba * sdebug_sector_size;
 686}
 687
 688static struct t10_pi_tuple *dif_store(sector_t sector)
 689{
 690        sector = sector_div(sector, sdebug_store_sectors);
 691
 692        return dif_storep + sector;
 693}
 694
 695static void sdebug_max_tgts_luns(void)
 696{
 697        struct sdebug_host_info *sdbg_host;
 698        struct Scsi_Host *hpnt;
 699
 700        spin_lock(&sdebug_host_list_lock);
 701        list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
 702                hpnt = sdbg_host->shost;
 703                if ((hpnt->this_id >= 0) &&
 704                    (sdebug_num_tgts > hpnt->this_id))
 705                        hpnt->max_id = sdebug_num_tgts + 1;
 706                else
 707                        hpnt->max_id = sdebug_num_tgts;
 708                /* sdebug_max_luns; */
 709                hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
 710        }
 711        spin_unlock(&sdebug_host_list_lock);
 712}
 713
 714enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
 715
 716/* Set in_bit to -1 to indicate no bit position of invalid field */
 717static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
 718                                 enum sdeb_cmd_data c_d,
 719                                 int in_byte, int in_bit)
 720{
 721        unsigned char *sbuff;
 722        u8 sks[4];
 723        int sl, asc;
 724
 725        sbuff = scp->sense_buffer;
 726        if (!sbuff) {
 727                sdev_printk(KERN_ERR, scp->device,
 728                            "%s: sense_buffer is NULL\n", __func__);
 729                return;
 730        }
 731        asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
 732        memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
 733        scsi_build_sense_buffer(sdebug_dsense, sbuff, ILLEGAL_REQUEST, asc, 0);
 734        memset(sks, 0, sizeof(sks));
 735        sks[0] = 0x80;
 736        if (c_d)
 737                sks[0] |= 0x40;
 738        if (in_bit >= 0) {
 739                sks[0] |= 0x8;
 740                sks[0] |= 0x7 & in_bit;
 741        }
 742        put_unaligned_be16(in_byte, sks + 1);
 743        if (sdebug_dsense) {
 744                sl = sbuff[7] + 8;
 745                sbuff[7] = sl;
 746                sbuff[sl] = 0x2;
 747                sbuff[sl + 1] = 0x6;
 748                memcpy(sbuff + sl + 4, sks, 3);
 749        } else
 750                memcpy(sbuff + 15, sks, 3);
 751        if (sdebug_verbose)
 752                sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
 753                            "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
 754                            my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
 755}
 756
 757static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
 758{
 759        unsigned char *sbuff;
 760
 761        sbuff = scp->sense_buffer;
 762        if (!sbuff) {
 763                sdev_printk(KERN_ERR, scp->device,
 764                            "%s: sense_buffer is NULL\n", __func__);
 765                return;
 766        }
 767        memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
 768
 769        scsi_build_sense_buffer(sdebug_dsense, sbuff, key, asc, asq);
 770
 771        if (sdebug_verbose)
 772                sdev_printk(KERN_INFO, scp->device,
 773                            "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
 774                            my_name, key, asc, asq);
 775}
 776
 777static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
 778{
 779        mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
 780}
 781
 782static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
 783{
 784        if (sdebug_verbose) {
 785                if (0x1261 == cmd)
 786                        sdev_printk(KERN_INFO, dev,
 787                                    "%s: BLKFLSBUF [0x1261]\n", __func__);
 788                else if (0x5331 == cmd)
 789                        sdev_printk(KERN_INFO, dev,
 790                                    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
 791                                    __func__);
 792                else
 793                        sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
 794                                    __func__, cmd);
 795        }
 796        return -EINVAL;
 797        /* return -ENOTTY; // correct return but upsets fdisk */
 798}
 799
 800static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
 801{
 802        struct sdebug_host_info *sdhp;
 803        struct sdebug_dev_info *dp;
 804
 805        spin_lock(&sdebug_host_list_lock);
 806        list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
 807                list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
 808                        if ((devip->sdbg_host == dp->sdbg_host) &&
 809                            (devip->target == dp->target))
 810                                clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
 811                }
 812        }
 813        spin_unlock(&sdebug_host_list_lock);
 814}
 815
 816static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
 817{
 818        int k;
 819
 820        k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
 821        if (k != SDEBUG_NUM_UAS) {
 822                const char *cp = NULL;
 823
 824                switch (k) {
 825                case SDEBUG_UA_POR:
 826                        mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
 827                                        POWER_ON_RESET_ASCQ);
 828                        if (sdebug_verbose)
 829                                cp = "power on reset";
 830                        break;
 831                case SDEBUG_UA_BUS_RESET:
 832                        mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
 833                                        BUS_RESET_ASCQ);
 834                        if (sdebug_verbose)
 835                                cp = "bus reset";
 836                        break;
 837                case SDEBUG_UA_MODE_CHANGED:
 838                        mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
 839                                        MODE_CHANGED_ASCQ);
 840                        if (sdebug_verbose)
 841                                cp = "mode parameters changed";
 842                        break;
 843                case SDEBUG_UA_CAPACITY_CHANGED:
 844                        mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
 845                                        CAPACITY_CHANGED_ASCQ);
 846                        if (sdebug_verbose)
 847                                cp = "capacity data changed";
 848                        break;
 849                case SDEBUG_UA_MICROCODE_CHANGED:
 850                        mk_sense_buffer(scp, UNIT_ATTENTION,
 851                                        TARGET_CHANGED_ASC,
 852                                        MICROCODE_CHANGED_ASCQ);
 853                        if (sdebug_verbose)
 854                                cp = "microcode has been changed";
 855                        break;
 856                case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
 857                        mk_sense_buffer(scp, UNIT_ATTENTION,
 858                                        TARGET_CHANGED_ASC,
 859                                        MICROCODE_CHANGED_WO_RESET_ASCQ);
 860                        if (sdebug_verbose)
 861                                cp = "microcode has been changed without reset";
 862                        break;
 863                case SDEBUG_UA_LUNS_CHANGED:
 864                        /*
 865                         * SPC-3 behavior is to report a UNIT ATTENTION with
 866                         * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
 867                         * on the target, until a REPORT LUNS command is
 868                         * received.  SPC-4 behavior is to report it only once.
 869                         * NOTE:  sdebug_scsi_level does not use the same
 870                         * values as struct scsi_device->scsi_level.
 871                         */
 872                        if (sdebug_scsi_level >= 6)     /* SPC-4 and above */
 873                                clear_luns_changed_on_target(devip);
 874                        mk_sense_buffer(scp, UNIT_ATTENTION,
 875                                        TARGET_CHANGED_ASC,
 876                                        LUNS_CHANGED_ASCQ);
 877                        if (sdebug_verbose)
 878                                cp = "reported luns data has changed";
 879                        break;
 880                default:
 881                        pr_warn("unexpected unit attention code=%d\n", k);
 882                        if (sdebug_verbose)
 883                                cp = "unknown";
 884                        break;
 885                }
 886                clear_bit(k, devip->uas_bm);
 887                if (sdebug_verbose)
 888                        sdev_printk(KERN_INFO, scp->device,
 889                                   "%s reports: Unit attention: %s\n",
 890                                   my_name, cp);
 891                return check_condition_result;
 892        }
 893        return 0;
 894}
 895
 896/* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
 897static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
 898                                int arr_len)
 899{
 900        int act_len;
 901        struct scsi_data_buffer *sdb = scsi_in(scp);
 902
 903        if (!sdb->length)
 904                return 0;
 905        if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
 906                return DID_ERROR << 16;
 907
 908        act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
 909                                      arr, arr_len);
 910        sdb->resid = scsi_bufflen(scp) - act_len;
 911
 912        return 0;
 913}
 914
 915/* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
 916 * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
 917 * calls, not required to write in ascending offset order. Assumes resid
 918 * set to scsi_bufflen() prior to any calls.
 919 */
 920static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
 921                                  int arr_len, unsigned int off_dst)
 922{
 923        int act_len, n;
 924        struct scsi_data_buffer *sdb = scsi_in(scp);
 925        off_t skip = off_dst;
 926
 927        if (sdb->length <= off_dst)
 928                return 0;
 929        if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
 930                return DID_ERROR << 16;
 931
 932        act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
 933                                       arr, arr_len, skip);
 934        pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
 935                 __func__, off_dst, scsi_bufflen(scp), act_len, sdb->resid);
 936        n = (int)scsi_bufflen(scp) - ((int)off_dst + act_len);
 937        sdb->resid = min(sdb->resid, n);
 938        return 0;
 939}
 940
 941/* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
 942 * 'arr' or -1 if error.
 943 */
 944static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
 945                               int arr_len)
 946{
 947        if (!scsi_bufflen(scp))
 948                return 0;
 949        if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
 950                return -1;
 951
 952        return scsi_sg_copy_to_buffer(scp, arr, arr_len);
 953}
 954
 955
 956static const char * inq_vendor_id = "Linux   ";
 957static const char * inq_product_id = "scsi_debug      ";
 958static const char *inq_product_rev = "0186";    /* version less '.' */
 959/* Use some locally assigned NAAs for SAS addresses. */
 960static const u64 naa3_comp_a = 0x3222222000000000ULL;
 961static const u64 naa3_comp_b = 0x3333333000000000ULL;
 962static const u64 naa3_comp_c = 0x3111111000000000ULL;
 963
 964/* Device identification VPD page. Returns number of bytes placed in arr */
 965static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
 966                          int target_dev_id, int dev_id_num,
 967                          const char *dev_id_str, int dev_id_str_len,
 968                          const uuid_be *lu_name)
 969{
 970        int num, port_a;
 971        char b[32];
 972
 973        port_a = target_dev_id + 1;
 974        /* T10 vendor identifier field format (faked) */
 975        arr[0] = 0x2;   /* ASCII */
 976        arr[1] = 0x1;
 977        arr[2] = 0x0;
 978        memcpy(&arr[4], inq_vendor_id, 8);
 979        memcpy(&arr[12], inq_product_id, 16);
 980        memcpy(&arr[28], dev_id_str, dev_id_str_len);
 981        num = 8 + 16 + dev_id_str_len;
 982        arr[3] = num;
 983        num += 4;
 984        if (dev_id_num >= 0) {
 985                if (sdebug_uuid_ctl) {
 986                        /* Locally assigned UUID */
 987                        arr[num++] = 0x1;  /* binary (not necessarily sas) */
 988                        arr[num++] = 0xa;  /* PIV=0, lu, naa */
 989                        arr[num++] = 0x0;
 990                        arr[num++] = 0x12;
 991                        arr[num++] = 0x10; /* uuid type=1, locally assigned */
 992                        arr[num++] = 0x0;
 993                        memcpy(arr + num, lu_name, 16);
 994                        num += 16;
 995                } else {
 996                        /* NAA-3, Logical unit identifier (binary) */
 997                        arr[num++] = 0x1;  /* binary (not necessarily sas) */
 998                        arr[num++] = 0x3;  /* PIV=0, lu, naa */
 999                        arr[num++] = 0x0;
1000                        arr[num++] = 0x8;
1001                        put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1002                        num += 8;
1003                }
1004                /* Target relative port number */
1005                arr[num++] = 0x61;      /* proto=sas, binary */
1006                arr[num++] = 0x94;      /* PIV=1, target port, rel port */
1007                arr[num++] = 0x0;       /* reserved */
1008                arr[num++] = 0x4;       /* length */
1009                arr[num++] = 0x0;       /* reserved */
1010                arr[num++] = 0x0;       /* reserved */
1011                arr[num++] = 0x0;
1012                arr[num++] = 0x1;       /* relative port A */
1013        }
1014        /* NAA-3, Target port identifier */
1015        arr[num++] = 0x61;      /* proto=sas, binary */
1016        arr[num++] = 0x93;      /* piv=1, target port, naa */
1017        arr[num++] = 0x0;
1018        arr[num++] = 0x8;
1019        put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1020        num += 8;
1021        /* NAA-3, Target port group identifier */
1022        arr[num++] = 0x61;      /* proto=sas, binary */
1023        arr[num++] = 0x95;      /* piv=1, target port group id */
1024        arr[num++] = 0x0;
1025        arr[num++] = 0x4;
1026        arr[num++] = 0;
1027        arr[num++] = 0;
1028        put_unaligned_be16(port_group_id, arr + num);
1029        num += 2;
1030        /* NAA-3, Target device identifier */
1031        arr[num++] = 0x61;      /* proto=sas, binary */
1032        arr[num++] = 0xa3;      /* piv=1, target device, naa */
1033        arr[num++] = 0x0;
1034        arr[num++] = 0x8;
1035        put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1036        num += 8;
1037        /* SCSI name string: Target device identifier */
1038        arr[num++] = 0x63;      /* proto=sas, UTF-8 */
1039        arr[num++] = 0xa8;      /* piv=1, target device, SCSI name string */
1040        arr[num++] = 0x0;
1041        arr[num++] = 24;
1042        memcpy(arr + num, "naa.32222220", 12);
1043        num += 12;
1044        snprintf(b, sizeof(b), "%08X", target_dev_id);
1045        memcpy(arr + num, b, 8);
1046        num += 8;
1047        memset(arr + num, 0, 4);
1048        num += 4;
1049        return num;
1050}
1051
1052static unsigned char vpd84_data[] = {
1053/* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1054    0x22,0x22,0x22,0x0,0xbb,0x1,
1055    0x22,0x22,0x22,0x0,0xbb,0x2,
1056};
1057
1058/*  Software interface identification VPD page */
1059static int inquiry_vpd_84(unsigned char *arr)
1060{
1061        memcpy(arr, vpd84_data, sizeof(vpd84_data));
1062        return sizeof(vpd84_data);
1063}
1064
1065/* Management network addresses VPD page */
1066static int inquiry_vpd_85(unsigned char *arr)
1067{
1068        int num = 0;
1069        const char * na1 = "https://www.kernel.org/config";
1070        const char * na2 = "http://www.kernel.org/log";
1071        int plen, olen;
1072
1073        arr[num++] = 0x1;       /* lu, storage config */
1074        arr[num++] = 0x0;       /* reserved */
1075        arr[num++] = 0x0;
1076        olen = strlen(na1);
1077        plen = olen + 1;
1078        if (plen % 4)
1079                plen = ((plen / 4) + 1) * 4;
1080        arr[num++] = plen;      /* length, null termianted, padded */
1081        memcpy(arr + num, na1, olen);
1082        memset(arr + num + olen, 0, plen - olen);
1083        num += plen;
1084
1085        arr[num++] = 0x4;       /* lu, logging */
1086        arr[num++] = 0x0;       /* reserved */
1087        arr[num++] = 0x0;
1088        olen = strlen(na2);
1089        plen = olen + 1;
1090        if (plen % 4)
1091                plen = ((plen / 4) + 1) * 4;
1092        arr[num++] = plen;      /* length, null terminated, padded */
1093        memcpy(arr + num, na2, olen);
1094        memset(arr + num + olen, 0, plen - olen);
1095        num += plen;
1096
1097        return num;
1098}
1099
1100/* SCSI ports VPD page */
1101static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1102{
1103        int num = 0;
1104        int port_a, port_b;
1105
1106        port_a = target_dev_id + 1;
1107        port_b = port_a + 1;
1108        arr[num++] = 0x0;       /* reserved */
1109        arr[num++] = 0x0;       /* reserved */
1110        arr[num++] = 0x0;
1111        arr[num++] = 0x1;       /* relative port 1 (primary) */
1112        memset(arr + num, 0, 6);
1113        num += 6;
1114        arr[num++] = 0x0;
1115        arr[num++] = 12;        /* length tp descriptor */
1116        /* naa-5 target port identifier (A) */
1117        arr[num++] = 0x61;      /* proto=sas, binary */
1118        arr[num++] = 0x93;      /* PIV=1, target port, NAA */
1119        arr[num++] = 0x0;       /* reserved */
1120        arr[num++] = 0x8;       /* length */
1121        put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1122        num += 8;
1123        arr[num++] = 0x0;       /* reserved */
1124        arr[num++] = 0x0;       /* reserved */
1125        arr[num++] = 0x0;
1126        arr[num++] = 0x2;       /* relative port 2 (secondary) */
1127        memset(arr + num, 0, 6);
1128        num += 6;
1129        arr[num++] = 0x0;
1130        arr[num++] = 12;        /* length tp descriptor */
1131        /* naa-5 target port identifier (B) */
1132        arr[num++] = 0x61;      /* proto=sas, binary */
1133        arr[num++] = 0x93;      /* PIV=1, target port, NAA */
1134        arr[num++] = 0x0;       /* reserved */
1135        arr[num++] = 0x8;       /* length */
1136        put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1137        num += 8;
1138
1139        return num;
1140}
1141
1142
1143static unsigned char vpd89_data[] = {
1144/* from 4th byte */ 0,0,0,0,
1145'l','i','n','u','x',' ',' ',' ',
1146'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1147'1','2','3','4',
11480x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
11490xec,0,0,0,
11500x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
11510,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
11520x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
11530x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
11540x53,0x41,
11550x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
11560x20,0x20,
11570x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
11580x10,0x80,
11590,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
11600x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
11610x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
11620,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
11630x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
11640x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
11650,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
11660,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
11670,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
11680,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
11690x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
11700,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
11710xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
11720,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
11730,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
11740,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
11750,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
11760,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
11770,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
11780,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
11790,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
11800,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
11810,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
11820,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
11830,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
11840,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1185};
1186
1187/* ATA Information VPD page */
1188static int inquiry_vpd_89(unsigned char *arr)
1189{
1190        memcpy(arr, vpd89_data, sizeof(vpd89_data));
1191        return sizeof(vpd89_data);
1192}
1193
1194
1195static unsigned char vpdb0_data[] = {
1196        /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1197        0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1198        0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1199        0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1200};
1201
1202/* Block limits VPD page (SBC-3) */
1203static int inquiry_vpd_b0(unsigned char *arr)
1204{
1205        unsigned int gran;
1206
1207        memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1208
1209        /* Optimal transfer length granularity */
1210        if (sdebug_opt_xferlen_exp != 0 &&
1211            sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1212                gran = 1 << sdebug_opt_xferlen_exp;
1213        else
1214                gran = 1 << sdebug_physblk_exp;
1215        put_unaligned_be16(gran, arr + 2);
1216
1217        /* Maximum Transfer Length */
1218        if (sdebug_store_sectors > 0x400)
1219                put_unaligned_be32(sdebug_store_sectors, arr + 4);
1220
1221        /* Optimal Transfer Length */
1222        put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1223
1224        if (sdebug_lbpu) {
1225                /* Maximum Unmap LBA Count */
1226                put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1227
1228                /* Maximum Unmap Block Descriptor Count */
1229                put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1230        }
1231
1232        /* Unmap Granularity Alignment */
1233        if (sdebug_unmap_alignment) {
1234                put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1235                arr[28] |= 0x80; /* UGAVALID */
1236        }
1237
1238        /* Optimal Unmap Granularity */
1239        put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1240
1241        /* Maximum WRITE SAME Length */
1242        put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1243
1244        return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1245
1246        return sizeof(vpdb0_data);
1247}
1248
1249/* Block device characteristics VPD page (SBC-3) */
1250static int inquiry_vpd_b1(unsigned char *arr)
1251{
1252        memset(arr, 0, 0x3c);
1253        arr[0] = 0;
1254        arr[1] = 1;     /* non rotating medium (e.g. solid state) */
1255        arr[2] = 0;
1256        arr[3] = 5;     /* less than 1.8" */
1257
1258        return 0x3c;
1259}
1260
1261/* Logical block provisioning VPD page (SBC-4) */
1262static int inquiry_vpd_b2(unsigned char *arr)
1263{
1264        memset(arr, 0, 0x4);
1265        arr[0] = 0;                     /* threshold exponent */
1266        if (sdebug_lbpu)
1267                arr[1] = 1 << 7;
1268        if (sdebug_lbpws)
1269                arr[1] |= 1 << 6;
1270        if (sdebug_lbpws10)
1271                arr[1] |= 1 << 5;
1272        if (sdebug_lbprz && scsi_debug_lbp())
1273                arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1274        /* anc_sup=0; dp=0 (no provisioning group descriptor) */
1275        /* minimum_percentage=0; provisioning_type=0 (unknown) */
1276        /* threshold_percentage=0 */
1277        return 0x4;
1278}
1279
1280#define SDEBUG_LONG_INQ_SZ 96
1281#define SDEBUG_MAX_INQ_ARR_SZ 584
1282
1283static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1284{
1285        unsigned char pq_pdt;
1286        unsigned char * arr;
1287        unsigned char *cmd = scp->cmnd;
1288        int alloc_len, n, ret;
1289        bool have_wlun, is_disk;
1290
1291        alloc_len = get_unaligned_be16(cmd + 3);
1292        arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1293        if (! arr)
1294                return DID_REQUEUE << 16;
1295        is_disk = (sdebug_ptype == TYPE_DISK);
1296        have_wlun = scsi_is_wlun(scp->device->lun);
1297        if (have_wlun)
1298                pq_pdt = TYPE_WLUN;     /* present, wlun */
1299        else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1300                pq_pdt = 0x7f;  /* not present, PQ=3, PDT=0x1f */
1301        else
1302                pq_pdt = (sdebug_ptype & 0x1f);
1303        arr[0] = pq_pdt;
1304        if (0x2 & cmd[1]) {  /* CMDDT bit set */
1305                mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1306                kfree(arr);
1307                return check_condition_result;
1308        } else if (0x1 & cmd[1]) {  /* EVPD bit set */
1309                int lu_id_num, port_group_id, target_dev_id, len;
1310                char lu_id_str[6];
1311                int host_no = devip->sdbg_host->shost->host_no;
1312                
1313                port_group_id = (((host_no + 1) & 0x7f) << 8) +
1314                    (devip->channel & 0x7f);
1315                if (sdebug_vpd_use_hostno == 0)
1316                        host_no = 0;
1317                lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1318                            (devip->target * 1000) + devip->lun);
1319                target_dev_id = ((host_no + 1) * 2000) +
1320                                 (devip->target * 1000) - 3;
1321                len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1322                if (0 == cmd[2]) { /* supported vital product data pages */
1323                        arr[1] = cmd[2];        /*sanity */
1324                        n = 4;
1325                        arr[n++] = 0x0;   /* this page */
1326                        arr[n++] = 0x80;  /* unit serial number */
1327                        arr[n++] = 0x83;  /* device identification */
1328                        arr[n++] = 0x84;  /* software interface ident. */
1329                        arr[n++] = 0x85;  /* management network addresses */
1330                        arr[n++] = 0x86;  /* extended inquiry */
1331                        arr[n++] = 0x87;  /* mode page policy */
1332                        arr[n++] = 0x88;  /* SCSI ports */
1333                        if (is_disk) {    /* SBC only */
1334                                arr[n++] = 0x89;  /* ATA information */
1335                                arr[n++] = 0xb0;  /* Block limits */
1336                                arr[n++] = 0xb1;  /* Block characteristics */
1337                                arr[n++] = 0xb2;  /* Logical Block Prov */
1338                        }
1339                        arr[3] = n - 4;   /* number of supported VPD pages */
1340                } else if (0x80 == cmd[2]) { /* unit serial number */
1341                        arr[1] = cmd[2];        /*sanity */
1342                        arr[3] = len;
1343                        memcpy(&arr[4], lu_id_str, len);
1344                } else if (0x83 == cmd[2]) { /* device identification */
1345                        arr[1] = cmd[2];        /*sanity */
1346                        arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1347                                                target_dev_id, lu_id_num,
1348                                                lu_id_str, len,
1349                                                &devip->lu_name);
1350                } else if (0x84 == cmd[2]) { /* Software interface ident. */
1351                        arr[1] = cmd[2];        /*sanity */
1352                        arr[3] = inquiry_vpd_84(&arr[4]);
1353                } else if (0x85 == cmd[2]) { /* Management network addresses */
1354                        arr[1] = cmd[2];        /*sanity */
1355                        arr[3] = inquiry_vpd_85(&arr[4]);
1356                } else if (0x86 == cmd[2]) { /* extended inquiry */
1357                        arr[1] = cmd[2];        /*sanity */
1358                        arr[3] = 0x3c;  /* number of following entries */
1359                        if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1360                                arr[4] = 0x4;   /* SPT: GRD_CHK:1 */
1361                        else if (have_dif_prot)
1362                                arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1363                        else
1364                                arr[4] = 0x0;   /* no protection stuff */
1365                        arr[5] = 0x7;   /* head of q, ordered + simple q's */
1366                } else if (0x87 == cmd[2]) { /* mode page policy */
1367                        arr[1] = cmd[2];        /*sanity */
1368                        arr[3] = 0x8;   /* number of following entries */
1369                        arr[4] = 0x2;   /* disconnect-reconnect mp */
1370                        arr[6] = 0x80;  /* mlus, shared */
1371                        arr[8] = 0x18;   /* protocol specific lu */
1372                        arr[10] = 0x82;  /* mlus, per initiator port */
1373                } else if (0x88 == cmd[2]) { /* SCSI Ports */
1374                        arr[1] = cmd[2];        /*sanity */
1375                        arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1376                } else if (is_disk && 0x89 == cmd[2]) { /* ATA information */
1377                        arr[1] = cmd[2];        /*sanity */
1378                        n = inquiry_vpd_89(&arr[4]);
1379                        put_unaligned_be16(n, arr + 2);
1380                } else if (is_disk && 0xb0 == cmd[2]) { /* Block limits */
1381                        arr[1] = cmd[2];        /*sanity */
1382                        arr[3] = inquiry_vpd_b0(&arr[4]);
1383                } else if (is_disk && 0xb1 == cmd[2]) { /* Block char. */
1384                        arr[1] = cmd[2];        /*sanity */
1385                        arr[3] = inquiry_vpd_b1(&arr[4]);
1386                } else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1387                        arr[1] = cmd[2];        /*sanity */
1388                        arr[3] = inquiry_vpd_b2(&arr[4]);
1389                } else {
1390                        mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1391                        kfree(arr);
1392                        return check_condition_result;
1393                }
1394                len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
1395                ret = fill_from_dev_buffer(scp, arr,
1396                            min(len, SDEBUG_MAX_INQ_ARR_SZ));
1397                kfree(arr);
1398                return ret;
1399        }
1400        /* drops through here for a standard inquiry */
1401        arr[1] = sdebug_removable ? 0x80 : 0;   /* Removable disk */
1402        arr[2] = sdebug_scsi_level;
1403        arr[3] = 2;    /* response_data_format==2 */
1404        arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1405        arr[5] = (int)have_dif_prot;    /* PROTECT bit */
1406        if (sdebug_vpd_use_hostno == 0)
1407                arr[5] = 0x10; /* claim: implicit TGPS */
1408        arr[6] = 0x10; /* claim: MultiP */
1409        /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1410        arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1411        memcpy(&arr[8], inq_vendor_id, 8);
1412        memcpy(&arr[16], inq_product_id, 16);
1413        memcpy(&arr[32], inq_product_rev, 4);
1414        /* version descriptors (2 bytes each) follow */
1415        put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
1416        put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
1417        n = 62;
1418        if (is_disk) {          /* SBC-4 no version claimed */
1419                put_unaligned_be16(0x600, arr + n);
1420                n += 2;
1421        } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */
1422                put_unaligned_be16(0x525, arr + n);
1423                n += 2;
1424        }
1425        put_unaligned_be16(0x2100, arr + n);    /* SPL-4 no version claimed */
1426        ret = fill_from_dev_buffer(scp, arr,
1427                            min(alloc_len, SDEBUG_LONG_INQ_SZ));
1428        kfree(arr);
1429        return ret;
1430}
1431
1432static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1433                                   0, 0, 0x0, 0x0};
1434
1435static int resp_requests(struct scsi_cmnd * scp,
1436                         struct sdebug_dev_info * devip)
1437{
1438        unsigned char * sbuff;
1439        unsigned char *cmd = scp->cmnd;
1440        unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1441        bool dsense;
1442        int len = 18;
1443
1444        memset(arr, 0, sizeof(arr));
1445        dsense = !!(cmd[1] & 1);
1446        sbuff = scp->sense_buffer;
1447        if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1448                if (dsense) {
1449                        arr[0] = 0x72;
1450                        arr[1] = 0x0;           /* NO_SENSE in sense_key */
1451                        arr[2] = THRESHOLD_EXCEEDED;
1452                        arr[3] = 0xff;          /* TEST set and MRIE==6 */
1453                        len = 8;
1454                } else {
1455                        arr[0] = 0x70;
1456                        arr[2] = 0x0;           /* NO_SENSE in sense_key */
1457                        arr[7] = 0xa;           /* 18 byte sense buffer */
1458                        arr[12] = THRESHOLD_EXCEEDED;
1459                        arr[13] = 0xff;         /* TEST set and MRIE==6 */
1460                }
1461        } else {
1462                memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
1463                if (arr[0] >= 0x70 && dsense == sdebug_dsense)
1464                        ;       /* have sense and formats match */
1465                else if (arr[0] <= 0x70) {
1466                        if (dsense) {
1467                                memset(arr, 0, 8);
1468                                arr[0] = 0x72;
1469                                len = 8;
1470                        } else {
1471                                memset(arr, 0, 18);
1472                                arr[0] = 0x70;
1473                                arr[7] = 0xa;
1474                        }
1475                } else if (dsense) {
1476                        memset(arr, 0, 8);
1477                        arr[0] = 0x72;
1478                        arr[1] = sbuff[2];     /* sense key */
1479                        arr[2] = sbuff[12];    /* asc */
1480                        arr[3] = sbuff[13];    /* ascq */
1481                        len = 8;
1482                } else {
1483                        memset(arr, 0, 18);
1484                        arr[0] = 0x70;
1485                        arr[2] = sbuff[1];
1486                        arr[7] = 0xa;
1487                        arr[12] = sbuff[1];
1488                        arr[13] = sbuff[3];
1489                }
1490
1491        }
1492        mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0);
1493        return fill_from_dev_buffer(scp, arr, len);
1494}
1495
1496static int resp_start_stop(struct scsi_cmnd * scp,
1497                           struct sdebug_dev_info * devip)
1498{
1499        unsigned char *cmd = scp->cmnd;
1500        int power_cond, stop;
1501
1502        power_cond = (cmd[4] & 0xf0) >> 4;
1503        if (power_cond) {
1504                mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1505                return check_condition_result;
1506        }
1507        stop = !(cmd[4] & 1);
1508        atomic_xchg(&devip->stopped, stop);
1509        return 0;
1510}
1511
1512static sector_t get_sdebug_capacity(void)
1513{
1514        static const unsigned int gibibyte = 1073741824;
1515
1516        if (sdebug_virtual_gb > 0)
1517                return (sector_t)sdebug_virtual_gb *
1518                        (gibibyte / sdebug_sector_size);
1519        else
1520                return sdebug_store_sectors;
1521}
1522
1523#define SDEBUG_READCAP_ARR_SZ 8
1524static int resp_readcap(struct scsi_cmnd * scp,
1525                        struct sdebug_dev_info * devip)
1526{
1527        unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1528        unsigned int capac;
1529
1530        /* following just in case virtual_gb changed */
1531        sdebug_capacity = get_sdebug_capacity();
1532        memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1533        if (sdebug_capacity < 0xffffffff) {
1534                capac = (unsigned int)sdebug_capacity - 1;
1535                put_unaligned_be32(capac, arr + 0);
1536        } else
1537                put_unaligned_be32(0xffffffff, arr + 0);
1538        put_unaligned_be16(sdebug_sector_size, arr + 6);
1539        return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1540}
1541
1542#define SDEBUG_READCAP16_ARR_SZ 32
1543static int resp_readcap16(struct scsi_cmnd * scp,
1544                          struct sdebug_dev_info * devip)
1545{
1546        unsigned char *cmd = scp->cmnd;
1547        unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1548        int alloc_len;
1549
1550        alloc_len = get_unaligned_be32(cmd + 10);
1551        /* following just in case virtual_gb changed */
1552        sdebug_capacity = get_sdebug_capacity();
1553        memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1554        put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1555        put_unaligned_be32(sdebug_sector_size, arr + 8);
1556        arr[13] = sdebug_physblk_exp & 0xf;
1557        arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1558
1559        if (scsi_debug_lbp()) {
1560                arr[14] |= 0x80; /* LBPME */
1561                /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1562                 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1563                 * in the wider field maps to 0 in this field.
1564                 */
1565                if (sdebug_lbprz & 1)   /* precisely what the draft requires */
1566                        arr[14] |= 0x40;
1567        }
1568
1569        arr[15] = sdebug_lowest_aligned & 0xff;
1570
1571        if (have_dif_prot) {
1572                arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1573                arr[12] |= 1; /* PROT_EN */
1574        }
1575
1576        return fill_from_dev_buffer(scp, arr,
1577                                    min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1578}
1579
1580#define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1581
1582static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1583                              struct sdebug_dev_info * devip)
1584{
1585        unsigned char *cmd = scp->cmnd;
1586        unsigned char * arr;
1587        int host_no = devip->sdbg_host->shost->host_no;
1588        int n, ret, alen, rlen;
1589        int port_group_a, port_group_b, port_a, port_b;
1590
1591        alen = get_unaligned_be32(cmd + 6);
1592        arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1593        if (! arr)
1594                return DID_REQUEUE << 16;
1595        /*
1596         * EVPD page 0x88 states we have two ports, one
1597         * real and a fake port with no device connected.
1598         * So we create two port groups with one port each
1599         * and set the group with port B to unavailable.
1600         */
1601        port_a = 0x1; /* relative port A */
1602        port_b = 0x2; /* relative port B */
1603        port_group_a = (((host_no + 1) & 0x7f) << 8) +
1604                        (devip->channel & 0x7f);
1605        port_group_b = (((host_no + 1) & 0x7f) << 8) +
1606                        (devip->channel & 0x7f) + 0x80;
1607
1608        /*
1609         * The asymmetric access state is cycled according to the host_id.
1610         */
1611        n = 4;
1612        if (sdebug_vpd_use_hostno == 0) {
1613                arr[n++] = host_no % 3; /* Asymm access state */
1614                arr[n++] = 0x0F; /* claim: all states are supported */
1615        } else {
1616                arr[n++] = 0x0; /* Active/Optimized path */
1617                arr[n++] = 0x01; /* only support active/optimized paths */
1618        }
1619        put_unaligned_be16(port_group_a, arr + n);
1620        n += 2;
1621        arr[n++] = 0;    /* Reserved */
1622        arr[n++] = 0;    /* Status code */
1623        arr[n++] = 0;    /* Vendor unique */
1624        arr[n++] = 0x1;  /* One port per group */
1625        arr[n++] = 0;    /* Reserved */
1626        arr[n++] = 0;    /* Reserved */
1627        put_unaligned_be16(port_a, arr + n);
1628        n += 2;
1629        arr[n++] = 3;    /* Port unavailable */
1630        arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1631        put_unaligned_be16(port_group_b, arr + n);
1632        n += 2;
1633        arr[n++] = 0;    /* Reserved */
1634        arr[n++] = 0;    /* Status code */
1635        arr[n++] = 0;    /* Vendor unique */
1636        arr[n++] = 0x1;  /* One port per group */
1637        arr[n++] = 0;    /* Reserved */
1638        arr[n++] = 0;    /* Reserved */
1639        put_unaligned_be16(port_b, arr + n);
1640        n += 2;
1641
1642        rlen = n - 4;
1643        put_unaligned_be32(rlen, arr + 0);
1644
1645        /*
1646         * Return the smallest value of either
1647         * - The allocated length
1648         * - The constructed command length
1649         * - The maximum array size
1650         */
1651        rlen = min(alen,n);
1652        ret = fill_from_dev_buffer(scp, arr,
1653                                   min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1654        kfree(arr);
1655        return ret;
1656}
1657
1658static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1659                             struct sdebug_dev_info *devip)
1660{
1661        bool rctd;
1662        u8 reporting_opts, req_opcode, sdeb_i, supp;
1663        u16 req_sa, u;
1664        u32 alloc_len, a_len;
1665        int k, offset, len, errsts, count, bump, na;
1666        const struct opcode_info_t *oip;
1667        const struct opcode_info_t *r_oip;
1668        u8 *arr;
1669        u8 *cmd = scp->cmnd;
1670
1671        rctd = !!(cmd[2] & 0x80);
1672        reporting_opts = cmd[2] & 0x7;
1673        req_opcode = cmd[3];
1674        req_sa = get_unaligned_be16(cmd + 4);
1675        alloc_len = get_unaligned_be32(cmd + 6);
1676        if (alloc_len < 4 || alloc_len > 0xffff) {
1677                mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1678                return check_condition_result;
1679        }
1680        if (alloc_len > 8192)
1681                a_len = 8192;
1682        else
1683                a_len = alloc_len;
1684        arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1685        if (NULL == arr) {
1686                mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
1687                                INSUFF_RES_ASCQ);
1688                return check_condition_result;
1689        }
1690        switch (reporting_opts) {
1691        case 0: /* all commands */
1692                /* count number of commands */
1693                for (count = 0, oip = opcode_info_arr;
1694                     oip->num_attached != 0xff; ++oip) {
1695                        if (F_INV_OP & oip->flags)
1696                                continue;
1697                        count += (oip->num_attached + 1);
1698                }
1699                bump = rctd ? 20 : 8;
1700                put_unaligned_be32(count * bump, arr);
1701                for (offset = 4, oip = opcode_info_arr;
1702                     oip->num_attached != 0xff && offset < a_len; ++oip) {
1703                        if (F_INV_OP & oip->flags)
1704                                continue;
1705                        na = oip->num_attached;
1706                        arr[offset] = oip->opcode;
1707                        put_unaligned_be16(oip->sa, arr + offset + 2);
1708                        if (rctd)
1709                                arr[offset + 5] |= 0x2;
1710                        if (FF_SA & oip->flags)
1711                                arr[offset + 5] |= 0x1;
1712                        put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
1713                        if (rctd)
1714                                put_unaligned_be16(0xa, arr + offset + 8);
1715                        r_oip = oip;
1716                        for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
1717                                if (F_INV_OP & oip->flags)
1718                                        continue;
1719                                offset += bump;
1720                                arr[offset] = oip->opcode;
1721                                put_unaligned_be16(oip->sa, arr + offset + 2);
1722                                if (rctd)
1723                                        arr[offset + 5] |= 0x2;
1724                                if (FF_SA & oip->flags)
1725                                        arr[offset + 5] |= 0x1;
1726                                put_unaligned_be16(oip->len_mask[0],
1727                                                   arr + offset + 6);
1728                                if (rctd)
1729                                        put_unaligned_be16(0xa,
1730                                                           arr + offset + 8);
1731                        }
1732                        oip = r_oip;
1733                        offset += bump;
1734                }
1735                break;
1736        case 1: /* one command: opcode only */
1737        case 2: /* one command: opcode plus service action */
1738        case 3: /* one command: if sa==0 then opcode only else opcode+sa */
1739                sdeb_i = opcode_ind_arr[req_opcode];
1740                oip = &opcode_info_arr[sdeb_i];
1741                if (F_INV_OP & oip->flags) {
1742                        supp = 1;
1743                        offset = 4;
1744                } else {
1745                        if (1 == reporting_opts) {
1746                                if (FF_SA & oip->flags) {
1747                                        mk_sense_invalid_fld(scp, SDEB_IN_CDB,
1748                                                             2, 2);
1749                                        kfree(arr);
1750                                        return check_condition_result;
1751                                }
1752                                req_sa = 0;
1753                        } else if (2 == reporting_opts &&
1754                                   0 == (FF_SA & oip->flags)) {
1755                                mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
1756                                kfree(arr);     /* point at requested sa */
1757                                return check_condition_result;
1758                        }
1759                        if (0 == (FF_SA & oip->flags) &&
1760                            req_opcode == oip->opcode)
1761                                supp = 3;
1762                        else if (0 == (FF_SA & oip->flags)) {
1763                                na = oip->num_attached;
1764                                for (k = 0, oip = oip->arrp; k < na;
1765                                     ++k, ++oip) {
1766                                        if (req_opcode == oip->opcode)
1767                                                break;
1768                                }
1769                                supp = (k >= na) ? 1 : 3;
1770                        } else if (req_sa != oip->sa) {
1771                                na = oip->num_attached;
1772                                for (k = 0, oip = oip->arrp; k < na;
1773                                     ++k, ++oip) {
1774                                        if (req_sa == oip->sa)
1775                                                break;
1776                                }
1777                                supp = (k >= na) ? 1 : 3;
1778                        } else
1779                                supp = 3;
1780                        if (3 == supp) {
1781                                u = oip->len_mask[0];
1782                                put_unaligned_be16(u, arr + 2);
1783                                arr[4] = oip->opcode;
1784                                for (k = 1; k < u; ++k)
1785                                        arr[4 + k] = (k < 16) ?
1786                                                 oip->len_mask[k] : 0xff;
1787                                offset = 4 + u;
1788                        } else
1789                                offset = 4;
1790                }
1791                arr[1] = (rctd ? 0x80 : 0) | supp;
1792                if (rctd) {
1793                        put_unaligned_be16(0xa, arr + offset);
1794                        offset += 12;
1795                }
1796                break;
1797        default:
1798                mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
1799                kfree(arr);
1800                return check_condition_result;
1801        }
1802        offset = (offset < a_len) ? offset : a_len;
1803        len = (offset < alloc_len) ? offset : alloc_len;
1804        errsts = fill_from_dev_buffer(scp, arr, len);
1805        kfree(arr);
1806        return errsts;
1807}
1808
1809static int resp_rsup_tmfs(struct scsi_cmnd *scp,
1810                          struct sdebug_dev_info *devip)
1811{
1812        bool repd;
1813        u32 alloc_len, len;
1814        u8 arr[16];
1815        u8 *cmd = scp->cmnd;
1816
1817        memset(arr, 0, sizeof(arr));
1818        repd = !!(cmd[2] & 0x80);
1819        alloc_len = get_unaligned_be32(cmd + 6);
1820        if (alloc_len < 4) {
1821                mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1822                return check_condition_result;
1823        }
1824        arr[0] = 0xc8;          /* ATS | ATSS | LURS */
1825        arr[1] = 0x1;           /* ITNRS */
1826        if (repd) {
1827                arr[3] = 0xc;
1828                len = 16;
1829        } else
1830                len = 4;
1831
1832        len = (len < alloc_len) ? len : alloc_len;
1833        return fill_from_dev_buffer(scp, arr, len);
1834}
1835
1836/* <<Following mode page info copied from ST318451LW>> */
1837
1838static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
1839{       /* Read-Write Error Recovery page for mode_sense */
1840        unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1841                                        5, 0, 0xff, 0xff};
1842
1843        memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1844        if (1 == pcontrol)
1845                memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1846        return sizeof(err_recov_pg);
1847}
1848
1849static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
1850{       /* Disconnect-Reconnect page for mode_sense */
1851        unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1852                                         0, 0, 0, 0, 0, 0, 0, 0};
1853
1854        memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1855        if (1 == pcontrol)
1856                memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1857        return sizeof(disconnect_pg);
1858}
1859
1860static int resp_format_pg(unsigned char * p, int pcontrol, int target)
1861{       /* Format device page for mode_sense */
1862        unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1863                                     0, 0, 0, 0, 0, 0, 0, 0,
1864                                     0, 0, 0, 0, 0x40, 0, 0, 0};
1865
1866        memcpy(p, format_pg, sizeof(format_pg));
1867        put_unaligned_be16(sdebug_sectors_per, p + 10);
1868        put_unaligned_be16(sdebug_sector_size, p + 12);
1869        if (sdebug_removable)
1870                p[20] |= 0x20; /* should agree with INQUIRY */
1871        if (1 == pcontrol)
1872                memset(p + 2, 0, sizeof(format_pg) - 2);
1873        return sizeof(format_pg);
1874}
1875
1876static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1877                                     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
1878                                     0, 0, 0, 0};
1879
1880static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
1881{       /* Caching page for mode_sense */
1882        unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
1883                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
1884        unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1885                0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
1886
1887        if (SDEBUG_OPT_N_WCE & sdebug_opts)
1888                caching_pg[2] &= ~0x4;  /* set WCE=0 (default WCE=1) */
1889        memcpy(p, caching_pg, sizeof(caching_pg));
1890        if (1 == pcontrol)
1891                memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
1892        else if (2 == pcontrol)
1893                memcpy(p, d_caching_pg, sizeof(d_caching_pg));
1894        return sizeof(caching_pg);
1895}
1896
1897static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1898                                    0, 0, 0x2, 0x4b};
1899
1900static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
1901{       /* Control mode page for mode_sense */
1902        unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1903                                        0, 0, 0, 0};
1904        unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1905                                     0, 0, 0x2, 0x4b};
1906
1907        if (sdebug_dsense)
1908                ctrl_m_pg[2] |= 0x4;
1909        else
1910                ctrl_m_pg[2] &= ~0x4;
1911
1912        if (sdebug_ato)
1913                ctrl_m_pg[5] |= 0x80; /* ATO=1 */
1914
1915        memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
1916        if (1 == pcontrol)
1917                memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
1918        else if (2 == pcontrol)
1919                memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
1920        return sizeof(ctrl_m_pg);
1921}
1922
1923
1924static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target)
1925{       /* Informational Exceptions control mode page for mode_sense */
1926        unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1927                                       0, 0, 0x0, 0x0};
1928        unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1929                                      0, 0, 0x0, 0x0};
1930
1931        memcpy(p, iec_m_pg, sizeof(iec_m_pg));
1932        if (1 == pcontrol)
1933                memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
1934        else if (2 == pcontrol)
1935                memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
1936        return sizeof(iec_m_pg);
1937}
1938
1939static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target)
1940{       /* SAS SSP mode page - short format for mode_sense */
1941        unsigned char sas_sf_m_pg[] = {0x19, 0x6,
1942                0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1943
1944        memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
1945        if (1 == pcontrol)
1946                memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
1947        return sizeof(sas_sf_m_pg);
1948}
1949
1950
1951static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
1952                              int target_dev_id)
1953{       /* SAS phy control and discover mode page for mode_sense */
1954        unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1955                    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1956                    0, 0, 0, 0, 0, 0, 0, 0,     /* insert SAS addr */
1957                    0, 0, 0, 0, 0, 0, 0, 0,     /* insert SAS addr */
1958                    0x2, 0, 0, 0, 0, 0, 0, 0,
1959                    0x88, 0x99, 0, 0, 0, 0, 0, 0,
1960                    0, 0, 0, 0, 0, 0, 0, 0,
1961                    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1962                    0, 0, 0, 0, 0, 0, 0, 0,     /* insert SAS addr */
1963                    0, 0, 0, 0, 0, 0, 0, 0,     /* insert SAS addr */
1964                    0x3, 0, 0, 0, 0, 0, 0, 0,
1965                    0x88, 0x99, 0, 0, 0, 0, 0, 0,
1966                    0, 0, 0, 0, 0, 0, 0, 0,
1967                };
1968        int port_a, port_b;
1969
1970        put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
1971        put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
1972        put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
1973        put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
1974        port_a = target_dev_id + 1;
1975        port_b = port_a + 1;
1976        memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
1977        put_unaligned_be32(port_a, p + 20);
1978        put_unaligned_be32(port_b, p + 48 + 20);
1979        if (1 == pcontrol)
1980                memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
1981        return sizeof(sas_pcd_m_pg);
1982}
1983
1984static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
1985{       /* SAS SSP shared protocol specific port mode subpage */
1986        unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1987                    0, 0, 0, 0, 0, 0, 0, 0,
1988                };
1989
1990        memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
1991        if (1 == pcontrol)
1992                memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
1993        return sizeof(sas_sha_m_pg);
1994}
1995
1996#define SDEBUG_MAX_MSENSE_SZ 256
1997
1998static int resp_mode_sense(struct scsi_cmnd *scp,
1999                           struct sdebug_dev_info *devip)
2000{
2001        int pcontrol, pcode, subpcode, bd_len;
2002        unsigned char dev_spec;
2003        int alloc_len, offset, len, target_dev_id;
2004        int target = scp->device->id;
2005        unsigned char * ap;
2006        unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2007        unsigned char *cmd = scp->cmnd;
2008        bool dbd, llbaa, msense_6, is_disk, bad_pcode;
2009
2010        dbd = !!(cmd[1] & 0x8);         /* disable block descriptors */
2011        pcontrol = (cmd[2] & 0xc0) >> 6;
2012        pcode = cmd[2] & 0x3f;
2013        subpcode = cmd[3];
2014        msense_6 = (MODE_SENSE == cmd[0]);
2015        llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2016        is_disk = (sdebug_ptype == TYPE_DISK);
2017        if (is_disk && !dbd)
2018                bd_len = llbaa ? 16 : 8;
2019        else
2020                bd_len = 0;
2021        alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2022        memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2023        if (0x3 == pcontrol) {  /* Saving values not supported */
2024                mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2025                return check_condition_result;
2026        }
2027        target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2028                        (devip->target * 1000) - 3;
2029        /* for disks set DPOFUA bit and clear write protect (WP) bit */
2030        if (is_disk)
2031                dev_spec = 0x10;        /* =0x90 if WP=1 implies read-only */
2032        else
2033                dev_spec = 0x0;
2034        if (msense_6) {
2035                arr[2] = dev_spec;
2036                arr[3] = bd_len;
2037                offset = 4;
2038        } else {
2039                arr[3] = dev_spec;
2040                if (16 == bd_len)
2041                        arr[4] = 0x1;   /* set LONGLBA bit */
2042                arr[7] = bd_len;        /* assume 255 or less */
2043                offset = 8;
2044        }
2045        ap = arr + offset;
2046        if ((bd_len > 0) && (!sdebug_capacity))
2047                sdebug_capacity = get_sdebug_capacity();
2048
2049        if (8 == bd_len) {
2050                if (sdebug_capacity > 0xfffffffe)
2051                        put_unaligned_be32(0xffffffff, ap + 0);
2052                else
2053                        put_unaligned_be32(sdebug_capacity, ap + 0);
2054                put_unaligned_be16(sdebug_sector_size, ap + 6);
2055                offset += bd_len;
2056                ap = arr + offset;
2057        } else if (16 == bd_len) {
2058                put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2059                put_unaligned_be32(sdebug_sector_size, ap + 12);
2060                offset += bd_len;
2061                ap = arr + offset;
2062        }
2063
2064        if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2065                /* TODO: Control Extension page */
2066                mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2067                return check_condition_result;
2068        }
2069        bad_pcode = false;
2070
2071        switch (pcode) {
2072        case 0x1:       /* Read-Write error recovery page, direct access */
2073                len = resp_err_recov_pg(ap, pcontrol, target);
2074                offset += len;
2075                break;
2076        case 0x2:       /* Disconnect-Reconnect page, all devices */
2077                len = resp_disconnect_pg(ap, pcontrol, target);
2078                offset += len;
2079                break;
2080        case 0x3:       /* Format device page, direct access */
2081                if (is_disk) {
2082                        len = resp_format_pg(ap, pcontrol, target);
2083                        offset += len;
2084                } else
2085                        bad_pcode = true;
2086                break;
2087        case 0x8:       /* Caching page, direct access */
2088                if (is_disk) {
2089                        len = resp_caching_pg(ap, pcontrol, target);
2090                        offset += len;
2091                } else
2092                        bad_pcode = true;
2093                break;
2094        case 0xa:       /* Control Mode page, all devices */
2095                len = resp_ctrl_m_pg(ap, pcontrol, target);
2096                offset += len;
2097                break;
2098        case 0x19:      /* if spc==1 then sas phy, control+discover */
2099                if ((subpcode > 0x2) && (subpcode < 0xff)) {
2100                        mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2101                        return check_condition_result;
2102                }
2103                len = 0;
2104                if ((0x0 == subpcode) || (0xff == subpcode))
2105                        len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2106                if ((0x1 == subpcode) || (0xff == subpcode))
2107                        len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2108                                                  target_dev_id);
2109                if ((0x2 == subpcode) || (0xff == subpcode))
2110                        len += resp_sas_sha_m_spg(ap + len, pcontrol);
2111                offset += len;
2112                break;
2113        case 0x1c:      /* Informational Exceptions Mode page, all devices */
2114                len = resp_iec_m_pg(ap, pcontrol, target);
2115                offset += len;
2116                break;
2117        case 0x3f:      /* Read all Mode pages */
2118                if ((0 == subpcode) || (0xff == subpcode)) {
2119                        len = resp_err_recov_pg(ap, pcontrol, target);
2120                        len += resp_disconnect_pg(ap + len, pcontrol, target);
2121                        if (is_disk) {
2122                                len += resp_format_pg(ap + len, pcontrol,
2123                                                      target);
2124                                len += resp_caching_pg(ap + len, pcontrol,
2125                                                       target);
2126                        }
2127                        len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2128                        len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2129                        if (0xff == subpcode) {
2130                                len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2131                                                  target, target_dev_id);
2132                                len += resp_sas_sha_m_spg(ap + len, pcontrol);
2133                        }
2134                        len += resp_iec_m_pg(ap + len, pcontrol, target);
2135                        offset += len;
2136                } else {
2137                        mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2138                        return check_condition_result;
2139                }
2140                break;
2141        default:
2142                bad_pcode = true;
2143                break;
2144        }
2145        if (bad_pcode) {
2146                mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2147                return check_condition_result;
2148        }
2149        if (msense_6)
2150                arr[0] = offset - 1;
2151        else
2152                put_unaligned_be16((offset - 2), arr + 0);
2153        return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
2154}
2155
2156#define SDEBUG_MAX_MSELECT_SZ 512
2157
2158static int resp_mode_select(struct scsi_cmnd *scp,
2159                            struct sdebug_dev_info *devip)
2160{
2161        int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2162        int param_len, res, mpage;
2163        unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2164        unsigned char *cmd = scp->cmnd;
2165        int mselect6 = (MODE_SELECT == cmd[0]);
2166
2167        memset(arr, 0, sizeof(arr));
2168        pf = cmd[1] & 0x10;
2169        sp = cmd[1] & 0x1;
2170        param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2171        if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2172                mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2173                return check_condition_result;
2174        }
2175        res = fetch_to_dev_buffer(scp, arr, param_len);
2176        if (-1 == res)
2177                return DID_ERROR << 16;
2178        else if (sdebug_verbose && (res < param_len))
2179                sdev_printk(KERN_INFO, scp->device,
2180                            "%s: cdb indicated=%d, IO sent=%d bytes\n",
2181                            __func__, param_len, res);
2182        md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2183        bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2184        if (md_len > 2) {
2185                mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2186                return check_condition_result;
2187        }
2188        off = bd_len + (mselect6 ? 4 : 8);
2189        mpage = arr[off] & 0x3f;
2190        ps = !!(arr[off] & 0x80);
2191        if (ps) {
2192                mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2193                return check_condition_result;
2194        }
2195        spf = !!(arr[off] & 0x40);
2196        pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2197                       (arr[off + 1] + 2);
2198        if ((pg_len + off) > param_len) {
2199                mk_sense_buffer(scp, ILLEGAL_REQUEST,
2200                                PARAMETER_LIST_LENGTH_ERR, 0);
2201                return check_condition_result;
2202        }
2203        switch (mpage) {
2204        case 0x8:      /* Caching Mode page */
2205                if (caching_pg[1] == arr[off + 1]) {
2206                        memcpy(caching_pg + 2, arr + off + 2,
2207                               sizeof(caching_pg) - 2);
2208                        goto set_mode_changed_ua;
2209                }
2210                break;
2211        case 0xa:      /* Control Mode page */
2212                if (ctrl_m_pg[1] == arr[off + 1]) {
2213                        memcpy(ctrl_m_pg + 2, arr + off + 2,
2214                               sizeof(ctrl_m_pg) - 2);
2215                        sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2216                        goto set_mode_changed_ua;
2217                }
2218                break;
2219        case 0x1c:      /* Informational Exceptions Mode page */
2220                if (iec_m_pg[1] == arr[off + 1]) {
2221                        memcpy(iec_m_pg + 2, arr + off + 2,
2222                               sizeof(iec_m_pg) - 2);
2223                        goto set_mode_changed_ua;
2224                }
2225                break;
2226        default:
2227                break;
2228        }
2229        mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2230        return check_condition_result;
2231set_mode_changed_ua:
2232        set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2233        return 0;
2234}
2235
2236static int resp_temp_l_pg(unsigned char * arr)
2237{
2238        unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2239                                     0x0, 0x1, 0x3, 0x2, 0x0, 65,
2240                };
2241
2242        memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2243        return sizeof(temp_l_pg);
2244}
2245
2246static int resp_ie_l_pg(unsigned char * arr)
2247{
2248        unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2249                };
2250
2251        memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2252        if (iec_m_pg[2] & 0x4) {        /* TEST bit set */
2253                arr[4] = THRESHOLD_EXCEEDED;
2254                arr[5] = 0xff;
2255        }
2256        return sizeof(ie_l_pg);
2257}
2258
2259#define SDEBUG_MAX_LSENSE_SZ 512
2260
2261static int resp_log_sense(struct scsi_cmnd * scp,
2262                          struct sdebug_dev_info * devip)
2263{
2264        int ppc, sp, pcontrol, pcode, subpcode, alloc_len, len, n;
2265        unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2266        unsigned char *cmd = scp->cmnd;
2267
2268        memset(arr, 0, sizeof(arr));
2269        ppc = cmd[1] & 0x2;
2270        sp = cmd[1] & 0x1;
2271        if (ppc || sp) {
2272                mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2273                return check_condition_result;
2274        }
2275        pcontrol = (cmd[2] & 0xc0) >> 6;
2276        pcode = cmd[2] & 0x3f;
2277        subpcode = cmd[3] & 0xff;
2278        alloc_len = get_unaligned_be16(cmd + 7);
2279        arr[0] = pcode;
2280        if (0 == subpcode) {
2281                switch (pcode) {
2282                case 0x0:       /* Supported log pages log page */
2283                        n = 4;
2284                        arr[n++] = 0x0;         /* this page */
2285                        arr[n++] = 0xd;         /* Temperature */
2286                        arr[n++] = 0x2f;        /* Informational exceptions */
2287                        arr[3] = n - 4;
2288                        break;
2289                case 0xd:       /* Temperature log page */
2290                        arr[3] = resp_temp_l_pg(arr + 4);
2291                        break;
2292                case 0x2f:      /* Informational exceptions log page */
2293                        arr[3] = resp_ie_l_pg(arr + 4);
2294                        break;
2295                default:
2296                        mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2297                        return check_condition_result;
2298                }
2299        } else if (0xff == subpcode) {
2300                arr[0] |= 0x40;
2301                arr[1] = subpcode;
2302                switch (pcode) {
2303                case 0x0:       /* Supported log pages and subpages log page */
2304                        n = 4;
2305                        arr[n++] = 0x0;
2306                        arr[n++] = 0x0;         /* 0,0 page */
2307                        arr[n++] = 0x0;
2308                        arr[n++] = 0xff;        /* this page */
2309                        arr[n++] = 0xd;
2310                        arr[n++] = 0x0;         /* Temperature */
2311                        arr[n++] = 0x2f;
2312                        arr[n++] = 0x0; /* Informational exceptions */
2313                        arr[3] = n - 4;
2314                        break;
2315                case 0xd:       /* Temperature subpages */
2316                        n = 4;
2317                        arr[n++] = 0xd;
2318                        arr[n++] = 0x0;         /* Temperature */
2319                        arr[3] = n - 4;
2320                        break;
2321                case 0x2f:      /* Informational exceptions subpages */
2322                        n = 4;
2323                        arr[n++] = 0x2f;
2324                        arr[n++] = 0x0;         /* Informational exceptions */
2325                        arr[3] = n - 4;
2326                        break;
2327                default:
2328                        mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2329                        return check_condition_result;
2330                }
2331        } else {
2332                mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2333                return check_condition_result;
2334        }
2335        len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
2336        return fill_from_dev_buffer(scp, arr,
2337                    min(len, SDEBUG_MAX_INQ_ARR_SZ));
2338}
2339
2340static int check_device_access_params(struct scsi_cmnd *scp,
2341                                      unsigned long long lba, unsigned int num)
2342{
2343        if (lba + num > sdebug_capacity) {
2344                mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2345                return check_condition_result;
2346        }
2347        /* transfer length excessive (tie in to block limits VPD page) */
2348        if (num > sdebug_store_sectors) {
2349                /* needs work to find which cdb byte 'num' comes from */
2350                mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2351                return check_condition_result;
2352        }
2353        return 0;
2354}
2355
2356/* Returns number of bytes copied or -1 if error. */
2357static int do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num,
2358                            bool do_write)
2359{
2360        int ret;
2361        u64 block, rest = 0;
2362        struct scsi_data_buffer *sdb;
2363        enum dma_data_direction dir;
2364
2365        if (do_write) {
2366                sdb = scsi_out(scmd);
2367                dir = DMA_TO_DEVICE;
2368        } else {
2369                sdb = scsi_in(scmd);
2370                dir = DMA_FROM_DEVICE;
2371        }
2372
2373        if (!sdb->length)
2374                return 0;
2375        if (!(scsi_bidi_cmnd(scmd) || scmd->sc_data_direction == dir))
2376                return -1;
2377
2378        block = do_div(lba, sdebug_store_sectors);
2379        if (block + num > sdebug_store_sectors)
2380                rest = block + num - sdebug_store_sectors;
2381
2382        ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2383                   fake_storep + (block * sdebug_sector_size),
2384                   (num - rest) * sdebug_sector_size, 0, do_write);
2385        if (ret != (num - rest) * sdebug_sector_size)
2386                return ret;
2387
2388        if (rest) {
2389                ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2390                            fake_storep, rest * sdebug_sector_size,
2391                            (num - rest) * sdebug_sector_size, do_write);
2392        }
2393
2394        return ret;
2395}
2396
2397/* If fake_store(lba,num) compares equal to arr(num), then copy top half of
2398 * arr into fake_store(lba,num) and return true. If comparison fails then
2399 * return false. */
2400static bool comp_write_worker(u64 lba, u32 num, const u8 *arr)
2401{
2402        bool res;
2403        u64 block, rest = 0;
2404        u32 store_blks = sdebug_store_sectors;
2405        u32 lb_size = sdebug_sector_size;
2406
2407        block = do_div(lba, store_blks);
2408        if (block + num > store_blks)
2409                rest = block + num - store_blks;
2410
2411        res = !memcmp(fake_storep + (block * lb_size), arr,
2412                      (num - rest) * lb_size);
2413        if (!res)
2414                return res;
2415        if (rest)
2416                res = memcmp(fake_storep, arr + ((num - rest) * lb_size),
2417                             rest * lb_size);
2418        if (!res)
2419                return res;
2420        arr += num * lb_size;
2421        memcpy(fake_storep + (block * lb_size), arr, (num - rest) * lb_size);
2422        if (rest)
2423                memcpy(fake_storep, arr + ((num - rest) * lb_size),
2424                       rest * lb_size);
2425        return res;
2426}
2427
2428static __be16 dif_compute_csum(const void *buf, int len)
2429{
2430        __be16 csum;
2431
2432        if (sdebug_guard)
2433                csum = (__force __be16)ip_compute_csum(buf, len);
2434        else
2435                csum = cpu_to_be16(crc_t10dif(buf, len));
2436
2437        return csum;
2438}
2439
2440static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
2441                      sector_t sector, u32 ei_lba)
2442{
2443        __be16 csum = dif_compute_csum(data, sdebug_sector_size);
2444
2445        if (sdt->guard_tag != csum) {
2446                pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
2447                        (unsigned long)sector,
2448                        be16_to_cpu(sdt->guard_tag),
2449                        be16_to_cpu(csum));
2450                return 0x01;
2451        }
2452        if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
2453            be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
2454                pr_err("REF check failed on sector %lu\n",
2455                        (unsigned long)sector);
2456                return 0x03;
2457        }
2458        if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
2459            be32_to_cpu(sdt->ref_tag) != ei_lba) {
2460                pr_err("REF check failed on sector %lu\n",
2461                        (unsigned long)sector);
2462                return 0x03;
2463        }
2464        return 0;
2465}
2466
2467static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
2468                          unsigned int sectors, bool read)
2469{
2470        size_t resid;
2471        void *paddr;
2472        const void *dif_store_end = dif_storep + sdebug_store_sectors;
2473        struct sg_mapping_iter miter;
2474
2475        /* Bytes of protection data to copy into sgl */
2476        resid = sectors * sizeof(*dif_storep);
2477
2478        sg_miter_start(&miter, scsi_prot_sglist(SCpnt),
2479                        scsi_prot_sg_count(SCpnt), SG_MITER_ATOMIC |
2480                        (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
2481
2482        while (sg_miter_next(&miter) && resid > 0) {
2483                size_t len = min(miter.length, resid);
2484                void *start = dif_store(sector);
2485                size_t rest = 0;
2486
2487                if (dif_store_end < start + len)
2488                        rest = start + len - dif_store_end;
2489
2490                paddr = miter.addr;
2491
2492                if (read)
2493                        memcpy(paddr, start, len - rest);
2494                else
2495                        memcpy(start, paddr, len - rest);
2496
2497                if (rest) {
2498                        if (read)
2499                                memcpy(paddr + len - rest, dif_storep, rest);
2500                        else
2501                                memcpy(dif_storep, paddr + len - rest, rest);
2502                }
2503
2504                sector += len / sizeof(*dif_storep);
2505                resid -= len;
2506        }
2507        sg_miter_stop(&miter);
2508}
2509
2510static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
2511                            unsigned int sectors, u32 ei_lba)
2512{
2513        unsigned int i;
2514        struct t10_pi_tuple *sdt;
2515        sector_t sector;
2516
2517        for (i = 0; i < sectors; i++, ei_lba++) {
2518                int ret;
2519
2520                sector = start_sec + i;
2521                sdt = dif_store(sector);
2522
2523                if (sdt->app_tag == cpu_to_be16(0xffff))
2524                        continue;
2525
2526                ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
2527                if (ret) {
2528                        dif_errors++;
2529                        return ret;
2530                }
2531        }
2532
2533        dif_copy_prot(SCpnt, start_sec, sectors, true);
2534        dix_reads++;
2535
2536        return 0;
2537}
2538
2539static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2540{
2541        u8 *cmd = scp->cmnd;
2542        struct sdebug_queued_cmd *sqcp;
2543        u64 lba;
2544        u32 num;
2545        u32 ei_lba;
2546        unsigned long iflags;
2547        int ret;
2548        bool check_prot;
2549
2550        switch (cmd[0]) {
2551        case READ_16:
2552                ei_lba = 0;
2553                lba = get_unaligned_be64(cmd + 2);
2554                num = get_unaligned_be32(cmd + 10);
2555                check_prot = true;
2556                break;
2557        case READ_10:
2558                ei_lba = 0;
2559                lba = get_unaligned_be32(cmd + 2);
2560                num = get_unaligned_be16(cmd + 7);
2561                check_prot = true;
2562                break;
2563        case READ_6:
2564                ei_lba = 0;
2565                lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2566                      (u32)(cmd[1] & 0x1f) << 16;
2567                num = (0 == cmd[4]) ? 256 : cmd[4];
2568                check_prot = true;
2569                break;
2570        case READ_12:
2571                ei_lba = 0;
2572                lba = get_unaligned_be32(cmd + 2);
2573                num = get_unaligned_be32(cmd + 6);
2574                check_prot = true;
2575                break;
2576        case XDWRITEREAD_10:
2577                ei_lba = 0;
2578                lba = get_unaligned_be32(cmd + 2);
2579                num = get_unaligned_be16(cmd + 7);
2580                check_prot = false;
2581                break;
2582        default:        /* assume READ(32) */
2583                lba = get_unaligned_be64(cmd + 12);
2584                ei_lba = get_unaligned_be32(cmd + 20);
2585                num = get_unaligned_be32(cmd + 28);
2586                check_prot = false;
2587                break;
2588        }
2589        if (unlikely(have_dif_prot && check_prot)) {
2590                if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
2591                    (cmd[1] & 0xe0)) {
2592                        mk_sense_invalid_opcode(scp);
2593                        return check_condition_result;
2594                }
2595                if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
2596                     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
2597                    (cmd[1] & 0xe0) == 0)
2598                        sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
2599                                    "to DIF device\n");
2600        }
2601        if (unlikely(sdebug_any_injecting_opt)) {
2602                sqcp = (struct sdebug_queued_cmd *)scp->host_scribble;
2603
2604                if (sqcp) {
2605                        if (sqcp->inj_short)
2606                                num /= 2;
2607                }
2608        } else
2609                sqcp = NULL;
2610
2611        /* inline check_device_access_params() */
2612        if (unlikely(lba + num > sdebug_capacity)) {
2613                mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2614                return check_condition_result;
2615        }
2616        /* transfer length excessive (tie in to block limits VPD page) */
2617        if (unlikely(num > sdebug_store_sectors)) {
2618                /* needs work to find which cdb byte 'num' comes from */
2619                mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2620                return check_condition_result;
2621        }
2622
2623        if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
2624                     (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
2625                     ((lba + num) > OPT_MEDIUM_ERR_ADDR))) {
2626                /* claim unrecoverable read error */
2627                mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
2628                /* set info field and valid bit for fixed descriptor */
2629                if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
2630                        scp->sense_buffer[0] |= 0x80;   /* Valid bit */
2631                        ret = (lba < OPT_MEDIUM_ERR_ADDR)
2632                              ? OPT_MEDIUM_ERR_ADDR : (int)lba;
2633                        put_unaligned_be32(ret, scp->sense_buffer + 3);
2634                }
2635                scsi_set_resid(scp, scsi_bufflen(scp));
2636                return check_condition_result;
2637        }
2638
2639        read_lock_irqsave(&atomic_rw, iflags);
2640
2641        /* DIX + T10 DIF */
2642        if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
2643                int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
2644
2645                if (prot_ret) {
2646                        read_unlock_irqrestore(&atomic_rw, iflags);
2647                        mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
2648                        return illegal_condition_result;
2649                }
2650        }
2651
2652        ret = do_device_access(scp, lba, num, false);
2653        read_unlock_irqrestore(&atomic_rw, iflags);
2654        if (unlikely(ret == -1))
2655                return DID_ERROR << 16;
2656
2657        scsi_in(scp)->resid = scsi_bufflen(scp) - ret;
2658
2659        if (unlikely(sqcp)) {
2660                if (sqcp->inj_recovered) {
2661                        mk_sense_buffer(scp, RECOVERED_ERROR,
2662                                        THRESHOLD_EXCEEDED, 0);
2663                        return check_condition_result;
2664                } else if (sqcp->inj_transport) {
2665                        mk_sense_buffer(scp, ABORTED_COMMAND,
2666                                        TRANSPORT_PROBLEM, ACK_NAK_TO);
2667                        return check_condition_result;
2668                } else if (sqcp->inj_dif) {
2669                        /* Logical block guard check failed */
2670                        mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2671                        return illegal_condition_result;
2672                } else if (sqcp->inj_dix) {
2673                        mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2674                        return illegal_condition_result;
2675                }
2676        }
2677        return 0;
2678}
2679
2680static void dump_sector(unsigned char *buf, int len)
2681{
2682        int i, j, n;
2683
2684        pr_err(">>> Sector Dump <<<\n");
2685        for (i = 0 ; i < len ; i += 16) {
2686                char b[128];
2687
2688                for (j = 0, n = 0; j < 16; j++) {
2689                        unsigned char c = buf[i+j];
2690
2691                        if (c >= 0x20 && c < 0x7e)
2692                                n += scnprintf(b + n, sizeof(b) - n,
2693                                               " %c ", buf[i+j]);
2694                        else
2695                                n += scnprintf(b + n, sizeof(b) - n,
2696                                               "%02x ", buf[i+j]);
2697                }
2698                pr_err("%04d: %s\n", i, b);
2699        }
2700}
2701
2702static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
2703                             unsigned int sectors, u32 ei_lba)
2704{
2705        int ret;
2706        struct t10_pi_tuple *sdt;
2707        void *daddr;
2708        sector_t sector = start_sec;
2709        int ppage_offset;
2710        int dpage_offset;
2711        struct sg_mapping_iter diter;
2712        struct sg_mapping_iter piter;
2713
2714        BUG_ON(scsi_sg_count(SCpnt) == 0);
2715        BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
2716
2717        sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
2718                        scsi_prot_sg_count(SCpnt),
2719                        SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2720        sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
2721                        SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2722
2723        /* For each protection page */
2724        while (sg_miter_next(&piter)) {
2725                dpage_offset = 0;
2726                if (WARN_ON(!sg_miter_next(&diter))) {
2727                        ret = 0x01;
2728                        goto out;
2729                }
2730
2731                for (ppage_offset = 0; ppage_offset < piter.length;
2732                     ppage_offset += sizeof(struct t10_pi_tuple)) {
2733                        /* If we're at the end of the current
2734                         * data page advance to the next one
2735                         */
2736                        if (dpage_offset >= diter.length) {
2737                                if (WARN_ON(!sg_miter_next(&diter))) {
2738                                        ret = 0x01;
2739                                        goto out;
2740                                }
2741                                dpage_offset = 0;
2742                        }
2743
2744                        sdt = piter.addr + ppage_offset;
2745                        daddr = diter.addr + dpage_offset;
2746
2747                        ret = dif_verify(sdt, daddr, sector, ei_lba);
2748                        if (ret) {
2749                                dump_sector(daddr, sdebug_sector_size);
2750                                goto out;
2751                        }
2752
2753                        sector++;
2754                        ei_lba++;
2755                        dpage_offset += sdebug_sector_size;
2756                }
2757                diter.consumed = dpage_offset;
2758                sg_miter_stop(&diter);
2759        }
2760        sg_miter_stop(&piter);
2761
2762        dif_copy_prot(SCpnt, start_sec, sectors, false);
2763        dix_writes++;
2764
2765        return 0;
2766
2767out:
2768        dif_errors++;
2769        sg_miter_stop(&diter);
2770        sg_miter_stop(&piter);
2771        return ret;
2772}
2773
2774static unsigned long lba_to_map_index(sector_t lba)
2775{
2776        if (sdebug_unmap_alignment)
2777                lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
2778        sector_div(lba, sdebug_unmap_granularity);
2779        return lba;
2780}
2781
2782static sector_t map_index_to_lba(unsigned long index)
2783{
2784        sector_t lba = index * sdebug_unmap_granularity;
2785
2786        if (sdebug_unmap_alignment)
2787                lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
2788        return lba;
2789}
2790
2791static unsigned int map_state(sector_t lba, unsigned int *num)
2792{
2793        sector_t end;
2794        unsigned int mapped;
2795        unsigned long index;
2796        unsigned long next;
2797
2798        index = lba_to_map_index(lba);
2799        mapped = test_bit(index, map_storep);
2800
2801        if (mapped)
2802                next = find_next_zero_bit(map_storep, map_size, index);
2803        else
2804                next = find_next_bit(map_storep, map_size, index);
2805
2806        end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
2807        *num = end - lba;
2808        return mapped;
2809}
2810
2811static void map_region(sector_t lba, unsigned int len)
2812{
2813        sector_t end = lba + len;
2814
2815        while (lba < end) {
2816                unsigned long index = lba_to_map_index(lba);
2817
2818                if (index < map_size)
2819                        set_bit(index, map_storep);
2820
2821                lba = map_index_to_lba(index + 1);
2822        }
2823}
2824
2825static void unmap_region(sector_t lba, unsigned int len)
2826{
2827        sector_t end = lba + len;
2828
2829        while (lba < end) {
2830                unsigned long index = lba_to_map_index(lba);
2831
2832                if (lba == map_index_to_lba(index) &&
2833                    lba + sdebug_unmap_granularity <= end &&
2834                    index < map_size) {
2835                        clear_bit(index, map_storep);
2836                        if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
2837                                memset(fake_storep +
2838                                       lba * sdebug_sector_size,
2839                                       (sdebug_lbprz & 1) ? 0 : 0xff,
2840                                       sdebug_sector_size *
2841                                       sdebug_unmap_granularity);
2842                        }
2843                        if (dif_storep) {
2844                                memset(dif_storep + lba, 0xff,
2845                                       sizeof(*dif_storep) *
2846                                       sdebug_unmap_granularity);
2847                        }
2848                }
2849                lba = map_index_to_lba(index + 1);
2850        }
2851}
2852
2853static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2854{
2855        u8 *cmd = scp->cmnd;
2856        u64 lba;
2857        u32 num;
2858        u32 ei_lba;
2859        unsigned long iflags;
2860        int ret;
2861        bool check_prot;
2862
2863        switch (cmd[0]) {
2864        case WRITE_16:
2865                ei_lba = 0;
2866                lba = get_unaligned_be64(cmd + 2);
2867                num = get_unaligned_be32(cmd + 10);
2868                check_prot = true;
2869                break;
2870        case WRITE_10:
2871                ei_lba = 0;
2872                lba = get_unaligned_be32(cmd + 2);
2873                num = get_unaligned_be16(cmd + 7);
2874                check_prot = true;
2875                break;
2876        case WRITE_6:
2877                ei_lba = 0;
2878                lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2879                      (u32)(cmd[1] & 0x1f) << 16;
2880                num = (0 == cmd[4]) ? 256 : cmd[4];
2881                check_prot = true;
2882                break;
2883        case WRITE_12:
2884                ei_lba = 0;
2885                lba = get_unaligned_be32(cmd + 2);
2886                num = get_unaligned_be32(cmd + 6);
2887                check_prot = true;
2888                break;
2889        case 0x53:      /* XDWRITEREAD(10) */
2890                ei_lba = 0;
2891                lba = get_unaligned_be32(cmd + 2);
2892                num = get_unaligned_be16(cmd + 7);
2893                check_prot = false;
2894                break;
2895        default:        /* assume WRITE(32) */
2896                lba = get_unaligned_be64(cmd + 12);
2897                ei_lba = get_unaligned_be32(cmd + 20);
2898                num = get_unaligned_be32(cmd + 28);
2899                check_prot = false;
2900                break;
2901        }
2902        if (unlikely(have_dif_prot && check_prot)) {
2903                if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
2904                    (cmd[1] & 0xe0)) {
2905                        mk_sense_invalid_opcode(scp);
2906                        return check_condition_result;
2907                }
2908                if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
2909                     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
2910                    (cmd[1] & 0xe0) == 0)
2911                        sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
2912                                    "to DIF device\n");
2913        }
2914
2915        /* inline check_device_access_params() */
2916        if (unlikely(lba + num > sdebug_capacity)) {
2917                mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2918                return check_condition_result;
2919        }
2920        /* transfer length excessive (tie in to block limits VPD page) */
2921        if (unlikely(num > sdebug_store_sectors)) {
2922                /* needs work to find which cdb byte 'num' comes from */
2923                mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2924                return check_condition_result;
2925        }
2926
2927        write_lock_irqsave(&atomic_rw, iflags);
2928
2929        /* DIX + T10 DIF */
2930        if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
2931                int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
2932
2933                if (prot_ret) {
2934                        write_unlock_irqrestore(&atomic_rw, iflags);
2935                        mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
2936                        return illegal_condition_result;
2937                }
2938        }
2939
2940        ret = do_device_access(scp, lba, num, true);
2941        if (unlikely(scsi_debug_lbp()))
2942                map_region(lba, num);
2943        write_unlock_irqrestore(&atomic_rw, iflags);
2944        if (unlikely(-1 == ret))
2945                return DID_ERROR << 16;
2946        else if (unlikely(sdebug_verbose &&
2947                          (ret < (num * sdebug_sector_size))))
2948                sdev_printk(KERN_INFO, scp->device,
2949                            "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
2950                            my_name, num * sdebug_sector_size, ret);
2951
2952        if (unlikely(sdebug_any_injecting_opt)) {
2953                struct sdebug_queued_cmd *sqcp =
2954                                (struct sdebug_queued_cmd *)scp->host_scribble;
2955
2956                if (sqcp) {
2957                        if (sqcp->inj_recovered) {
2958                                mk_sense_buffer(scp, RECOVERED_ERROR,
2959                                                THRESHOLD_EXCEEDED, 0);
2960                                return check_condition_result;
2961                        } else if (sqcp->inj_dif) {
2962                                /* Logical block guard check failed */
2963                                mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2964                                return illegal_condition_result;
2965                        } else if (sqcp->inj_dix) {
2966                                mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2967                                return illegal_condition_result;
2968                        }
2969                }
2970        }
2971        return 0;
2972}
2973
2974static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
2975                           u32 ei_lba, bool unmap, bool ndob)
2976{
2977        unsigned long iflags;
2978        unsigned long long i;
2979        int ret;
2980        u64 lba_off;
2981
2982        ret = check_device_access_params(scp, lba, num);
2983        if (ret)
2984                return ret;
2985
2986        write_lock_irqsave(&atomic_rw, iflags);
2987
2988        if (unmap && scsi_debug_lbp()) {
2989                unmap_region(lba, num);
2990                goto out;
2991        }
2992
2993        lba_off = lba * sdebug_sector_size;
2994        /* if ndob then zero 1 logical block, else fetch 1 logical block */
2995        if (ndob) {
2996                memset(fake_storep + lba_off, 0, sdebug_sector_size);
2997                ret = 0;
2998        } else
2999                ret = fetch_to_dev_buffer(scp, fake_storep + lba_off,
3000                                          sdebug_sector_size);
3001
3002        if (-1 == ret) {
3003                write_unlock_irqrestore(&atomic_rw, iflags);
3004                return DID_ERROR << 16;
3005        } else if (sdebug_verbose && (ret < (num * sdebug_sector_size)))
3006                sdev_printk(KERN_INFO, scp->device,
3007                            "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
3008                            my_name, "write same",
3009                            num * sdebug_sector_size, ret);
3010
3011        /* Copy first sector to remaining blocks */
3012        for (i = 1 ; i < num ; i++)
3013                memcpy(fake_storep + ((lba + i) * sdebug_sector_size),
3014                       fake_storep + lba_off,
3015                       sdebug_sector_size);
3016
3017        if (scsi_debug_lbp())
3018                map_region(lba, num);
3019out:
3020        write_unlock_irqrestore(&atomic_rw, iflags);
3021
3022        return 0;
3023}
3024
3025static int resp_write_same_10(struct scsi_cmnd *scp,
3026                              struct sdebug_dev_info *devip)
3027{
3028        u8 *cmd = scp->cmnd;
3029        u32 lba;
3030        u16 num;
3031        u32 ei_lba = 0;
3032        bool unmap = false;
3033
3034        if (cmd[1] & 0x8) {
3035                if (sdebug_lbpws10 == 0) {
3036                        mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3037                        return check_condition_result;
3038                } else
3039                        unmap = true;
3040        }
3041        lba = get_unaligned_be32(cmd + 2);
3042        num = get_unaligned_be16(cmd + 7);
3043        if (num > sdebug_write_same_length) {
3044                mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3045                return check_condition_result;
3046        }
3047        return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3048}
3049
3050static int resp_write_same_16(struct scsi_cmnd *scp,
3051                              struct sdebug_dev_info *devip)
3052{
3053        u8 *cmd = scp->cmnd;
3054        u64 lba;
3055        u32 num;
3056        u32 ei_lba = 0;
3057        bool unmap = false;
3058        bool ndob = false;
3059
3060        if (cmd[1] & 0x8) {     /* UNMAP */
3061                if (sdebug_lbpws == 0) {
3062                        mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3063                        return check_condition_result;
3064                } else
3065                        unmap = true;
3066        }
3067        if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3068                ndob = true;
3069        lba = get_unaligned_be64(cmd + 2);
3070        num = get_unaligned_be32(cmd + 10);
3071        if (num > sdebug_write_same_length) {
3072                mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3073                return check_condition_result;
3074        }
3075        return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3076}
3077
3078/* Note the mode field is in the same position as the (lower) service action
3079 * field. For the Report supported operation codes command, SPC-4 suggests
3080 * each mode of this command should be reported separately; for future. */
3081static int resp_write_buffer(struct scsi_cmnd *scp,
3082                             struct sdebug_dev_info *devip)
3083{
3084        u8 *cmd = scp->cmnd;
3085        struct scsi_device *sdp = scp->device;
3086        struct sdebug_dev_info *dp;
3087        u8 mode;
3088
3089        mode = cmd[1] & 0x1f;
3090        switch (mode) {
3091        case 0x4:       /* download microcode (MC) and activate (ACT) */
3092                /* set UAs on this device only */
3093                set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3094                set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3095                break;
3096        case 0x5:       /* download MC, save and ACT */
3097                set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3098                break;
3099        case 0x6:       /* download MC with offsets and ACT */
3100                /* set UAs on most devices (LUs) in this target */
3101                list_for_each_entry(dp,
3102                                    &devip->sdbg_host->dev_info_list,
3103                                    dev_list)
3104                        if (dp->target == sdp->id) {
3105                                set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3106                                if (devip != dp)
3107                                        set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3108                                                dp->uas_bm);
3109                        }
3110                break;
3111        case 0x7:       /* download MC with offsets, save, and ACT */
3112                /* set UA on all devices (LUs) in this target */
3113                list_for_each_entry(dp,
3114                                    &devip->sdbg_host->dev_info_list,
3115                                    dev_list)
3116                        if (dp->target == sdp->id)
3117                                set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3118                                        dp->uas_bm);
3119                break;
3120        default:
3121                /* do nothing for this command for other mode values */
3122                break;
3123        }
3124        return 0;
3125}
3126
3127static int resp_comp_write(struct scsi_cmnd *scp,
3128                           struct sdebug_dev_info *devip)
3129{
3130        u8 *cmd = scp->cmnd;
3131        u8 *arr;
3132        u8 *fake_storep_hold;
3133        u64 lba;
3134        u32 dnum;
3135        u32 lb_size = sdebug_sector_size;
3136        u8 num;
3137        unsigned long iflags;
3138        int ret;
3139        int retval = 0;
3140
3141        lba = get_unaligned_be64(cmd + 2);
3142        num = cmd[13];          /* 1 to a maximum of 255 logical blocks */
3143        if (0 == num)
3144                return 0;       /* degenerate case, not an error */
3145        if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3146            (cmd[1] & 0xe0)) {
3147                mk_sense_invalid_opcode(scp);
3148                return check_condition_result;
3149        }
3150        if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3151             sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3152            (cmd[1] & 0xe0) == 0)
3153                sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3154                            "to DIF device\n");
3155
3156        /* inline check_device_access_params() */
3157        if (lba + num > sdebug_capacity) {
3158                mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3159                return check_condition_result;
3160        }
3161        /* transfer length excessive (tie in to block limits VPD page) */
3162        if (num > sdebug_store_sectors) {
3163                /* needs work to find which cdb byte 'num' comes from */
3164                mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3165                return check_condition_result;
3166        }
3167        dnum = 2 * num;
3168        arr = kzalloc(dnum * lb_size, GFP_ATOMIC);
3169        if (NULL == arr) {
3170                mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3171                                INSUFF_RES_ASCQ);
3172                return check_condition_result;
3173        }
3174
3175        write_lock_irqsave(&atomic_rw, iflags);
3176
3177        /* trick do_device_access() to fetch both compare and write buffers
3178         * from data-in into arr. Safe (atomic) since write_lock held. */
3179        fake_storep_hold = fake_storep;
3180        fake_storep = arr;
3181        ret = do_device_access(scp, 0, dnum, true);
3182        fake_storep = fake_storep_hold;
3183        if (ret == -1) {
3184                retval = DID_ERROR << 16;
3185                goto cleanup;
3186        } else if (sdebug_verbose && (ret < (dnum * lb_size)))
3187                sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3188                            "indicated=%u, IO sent=%d bytes\n", my_name,
3189                            dnum * lb_size, ret);
3190        if (!comp_write_worker(lba, num, arr)) {
3191                mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3192                retval = check_condition_result;
3193                goto cleanup;
3194        }
3195        if (scsi_debug_lbp())
3196                map_region(lba, num);
3197cleanup:
3198        write_unlock_irqrestore(&atomic_rw, iflags);
3199        kfree(arr);
3200        return retval;
3201}
3202
3203struct unmap_block_desc {
3204        __be64  lba;
3205        __be32  blocks;
3206        __be32  __reserved;
3207};
3208
3209static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3210{
3211        unsigned char *buf;
3212        struct unmap_block_desc *desc;
3213        unsigned int i, payload_len, descriptors;
3214        int ret;
3215        unsigned long iflags;
3216
3217
3218        if (!scsi_debug_lbp())
3219                return 0;       /* fib and say its done */
3220        payload_len = get_unaligned_be16(scp->cmnd + 7);
3221        BUG_ON(scsi_bufflen(scp) != payload_len);
3222
3223        descriptors = (payload_len - 8) / 16;
3224        if (descriptors > sdebug_unmap_max_desc) {
3225                mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3226                return check_condition_result;
3227        }
3228
3229        buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3230        if (!buf) {
3231                mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3232                                INSUFF_RES_ASCQ);
3233                return check_condition_result;
3234        }
3235
3236        scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3237
3238        BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3239        BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3240
3241        desc = (void *)&buf[8];
3242
3243        write_lock_irqsave(&atomic_rw, iflags);
3244
3245        for (i = 0 ; i < descriptors ; i++) {
3246                unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3247                unsigned int num = get_unaligned_be32(&desc[i].blocks);
3248
3249                ret = check_device_access_params(scp, lba, num);
3250                if (ret)
3251                        goto out;
3252
3253                unmap_region(lba, num);
3254        }
3255
3256        ret = 0;
3257
3258out:
3259        write_unlock_irqrestore(&atomic_rw, iflags);
3260        kfree(buf);
3261
3262        return ret;
3263}
3264
3265#define SDEBUG_GET_LBA_STATUS_LEN 32
3266
3267static int resp_get_lba_status(struct scsi_cmnd *scp,
3268                               struct sdebug_dev_info *devip)
3269{
3270        u8 *cmd = scp->cmnd;
3271        u64 lba;
3272        u32 alloc_len, mapped, num;
3273        u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
3274        int ret;
3275
3276        lba = get_unaligned_be64(cmd + 2);
3277        alloc_len = get_unaligned_be32(cmd + 10);
3278
3279        if (alloc_len < 24)
3280                return 0;
3281
3282        ret = check_device_access_params(scp, lba, 1);
3283        if (ret)
3284                return ret;
3285
3286        if (scsi_debug_lbp())
3287                mapped = map_state(lba, &num);
3288        else {
3289                mapped = 1;
3290                /* following just in case virtual_gb changed */
3291                sdebug_capacity = get_sdebug_capacity();
3292                if (sdebug_capacity - lba <= 0xffffffff)
3293                        num = sdebug_capacity - lba;
3294                else
3295                        num = 0xffffffff;
3296        }
3297
3298        memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
3299        put_unaligned_be32(20, arr);            /* Parameter Data Length */
3300        put_unaligned_be64(lba, arr + 8);       /* LBA */
3301        put_unaligned_be32(num, arr + 16);      /* Number of blocks */
3302        arr[20] = !mapped;              /* prov_stat=0: mapped; 1: dealloc */
3303
3304        return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
3305}
3306
3307#define RL_BUCKET_ELEMS 8
3308
3309/* Even though each pseudo target has a REPORT LUNS "well known logical unit"
3310 * (W-LUN), the normal Linux scanning logic does not associate it with a
3311 * device (e.g. /dev/sg7). The following magic will make that association:
3312 *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
3313 * where <n> is a host number. If there are multiple targets in a host then
3314 * the above will associate a W-LUN to each target. To only get a W-LUN
3315 * for target 2, then use "echo '- 2 49409' > scan" .
3316 */
3317static int resp_report_luns(struct scsi_cmnd *scp,
3318                            struct sdebug_dev_info *devip)
3319{
3320        unsigned char *cmd = scp->cmnd;
3321        unsigned int alloc_len;
3322        unsigned char select_report;
3323        u64 lun;
3324        struct scsi_lun *lun_p;
3325        u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
3326        unsigned int lun_cnt;   /* normal LUN count (max: 256) */
3327        unsigned int wlun_cnt;  /* report luns W-LUN count */
3328        unsigned int tlun_cnt;  /* total LUN count */
3329        unsigned int rlen;      /* response length (in bytes) */
3330        int k, j, n, res;
3331        unsigned int off_rsp = 0;
3332        const int sz_lun = sizeof(struct scsi_lun);
3333
3334        clear_luns_changed_on_target(devip);
3335
3336        select_report = cmd[2];
3337        alloc_len = get_unaligned_be32(cmd + 6);
3338
3339        if (alloc_len < 4) {
3340                pr_err("alloc len too small %d\n", alloc_len);
3341                mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
3342                return check_condition_result;
3343        }
3344
3345        switch (select_report) {
3346        case 0:         /* all LUNs apart from W-LUNs */
3347                lun_cnt = sdebug_max_luns;
3348                wlun_cnt = 0;
3349                break;
3350        case 1:         /* only W-LUNs */
3351                lun_cnt = 0;
3352                wlun_cnt = 1;
3353                break;
3354        case 2:         /* all LUNs */
3355                lun_cnt = sdebug_max_luns;
3356                wlun_cnt = 1;
3357                break;
3358        case 0x10:      /* only administrative LUs */
3359        case 0x11:      /* see SPC-5 */
3360        case 0x12:      /* only subsiduary LUs owned by referenced LU */
3361        default:
3362                pr_debug("select report invalid %d\n", select_report);
3363                mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
3364                return check_condition_result;
3365        }
3366
3367        if (sdebug_no_lun_0 && (lun_cnt > 0))
3368                --lun_cnt;
3369
3370        tlun_cnt = lun_cnt + wlun_cnt;
3371        rlen = tlun_cnt * sz_lun;       /* excluding 8 byte header */
3372        scsi_set_resid(scp, scsi_bufflen(scp));
3373        pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
3374                 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
3375
3376        /* loops rely on sizeof response header same as sizeof lun (both 8) */
3377        lun = sdebug_no_lun_0 ? 1 : 0;
3378        for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
3379                memset(arr, 0, sizeof(arr));
3380                lun_p = (struct scsi_lun *)&arr[0];
3381                if (k == 0) {
3382                        put_unaligned_be32(rlen, &arr[0]);
3383                        ++lun_p;
3384                        j = 1;
3385                }
3386                for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
3387                        if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
3388                                break;
3389                        int_to_scsilun(lun++, lun_p);
3390                }
3391                if (j < RL_BUCKET_ELEMS)
3392                        break;
3393                n = j * sz_lun;
3394                res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
3395                if (res)
3396                        return res;
3397                off_rsp += n;
3398        }
3399        if (wlun_cnt) {
3400                int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
3401                ++j;
3402        }
3403        if (j > 0)
3404                res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
3405        return res;
3406}
3407
3408static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
3409                            unsigned int num, struct sdebug_dev_info *devip)
3410{
3411        int j;
3412        unsigned char *kaddr, *buf;
3413        unsigned int offset;
3414        struct scsi_data_buffer *sdb = scsi_in(scp);
3415        struct sg_mapping_iter miter;
3416
3417        /* better not to use temporary buffer. */
3418        buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3419        if (!buf) {
3420                mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3421                                INSUFF_RES_ASCQ);
3422                return check_condition_result;
3423        }
3424
3425        scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3426
3427        offset = 0;
3428        sg_miter_start(&miter, sdb->table.sgl, sdb->table.nents,
3429                        SG_MITER_ATOMIC | SG_MITER_TO_SG);
3430
3431        while (sg_miter_next(&miter)) {
3432                kaddr = miter.addr;
3433                for (j = 0; j < miter.length; j++)
3434                        *(kaddr + j) ^= *(buf + offset + j);
3435
3436                offset += miter.length;
3437        }
3438        sg_miter_stop(&miter);
3439        kfree(buf);
3440
3441        return 0;
3442}
3443
3444static int resp_xdwriteread_10(struct scsi_cmnd *scp,
3445                               struct sdebug_dev_info *devip)
3446{
3447        u8 *cmd = scp->cmnd;
3448        u64 lba;
3449        u32 num;
3450        int errsts;
3451
3452        if (!scsi_bidi_cmnd(scp)) {
3453                mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3454                                INSUFF_RES_ASCQ);
3455                return check_condition_result;
3456        }
3457        errsts = resp_read_dt0(scp, devip);
3458        if (errsts)
3459                return errsts;
3460        if (!(cmd[1] & 0x4)) {          /* DISABLE_WRITE is not set */
3461                errsts = resp_write_dt0(scp, devip);
3462                if (errsts)
3463                        return errsts;
3464        }
3465        lba = get_unaligned_be32(cmd + 2);
3466        num = get_unaligned_be16(cmd + 7);
3467        return resp_xdwriteread(scp, lba, num, devip);
3468}
3469
3470static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
3471{
3472        struct sdebug_queue *sqp = sdebug_q_arr;
3473
3474        if (sdebug_mq_active) {
3475                u32 tag = blk_mq_unique_tag(cmnd->request);
3476                u16 hwq = blk_mq_unique_tag_to_hwq(tag);
3477
3478                if (unlikely(hwq >= submit_queues)) {
3479                        pr_warn("Unexpected hwq=%d, apply modulo\n", hwq);
3480                        hwq %= submit_queues;
3481                }
3482                pr_debug("tag=%u, hwq=%d\n", tag, hwq);
3483                return sqp + hwq;
3484        } else
3485                return sqp;
3486}
3487
3488/* Queued (deferred) command completions converge here. */
3489static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
3490{
3491        int qc_idx;
3492        int retiring = 0;
3493        unsigned long iflags;
3494        struct sdebug_queue *sqp;
3495        struct sdebug_queued_cmd *sqcp;
3496        struct scsi_cmnd *scp;
3497        struct sdebug_dev_info *devip;
3498
3499        qc_idx = sd_dp->qc_idx;
3500        sqp = sdebug_q_arr + sd_dp->sqa_idx;
3501        if (sdebug_statistics) {
3502                atomic_inc(&sdebug_completions);
3503                if (raw_smp_processor_id() != sd_dp->issuing_cpu)
3504                        atomic_inc(&sdebug_miss_cpus);
3505        }
3506        if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
3507                pr_err("wild qc_idx=%d\n", qc_idx);
3508                return;
3509        }
3510        spin_lock_irqsave(&sqp->qc_lock, iflags);
3511        sqcp = &sqp->qc_arr[qc_idx];
3512        scp = sqcp->a_cmnd;
3513        if (unlikely(scp == NULL)) {
3514                spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3515                pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d\n",
3516                       sd_dp->sqa_idx, qc_idx);
3517                return;
3518        }
3519        devip = (struct sdebug_dev_info *)scp->device->hostdata;
3520        if (likely(devip))
3521                atomic_dec(&devip->num_in_q);
3522        else
3523                pr_err("devip=NULL\n");
3524        if (unlikely(atomic_read(&retired_max_queue) > 0))
3525                retiring = 1;
3526
3527        sqcp->a_cmnd = NULL;
3528        if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
3529                spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3530                pr_err("Unexpected completion\n");
3531                return;
3532        }
3533
3534        if (unlikely(retiring)) {       /* user has reduced max_queue */
3535                int k, retval;
3536
3537                retval = atomic_read(&retired_max_queue);
3538                if (qc_idx >= retval) {
3539                        spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3540                        pr_err("index %d too large\n", retval);
3541                        return;
3542                }
3543                k = find_last_bit(sqp->in_use_bm, retval);
3544                if ((k < sdebug_max_queue) || (k == retval))
3545                        atomic_set(&retired_max_queue, 0);
3546                else
3547                        atomic_set(&retired_max_queue, k + 1);
3548        }
3549        spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3550        scp->scsi_done(scp); /* callback to mid level */
3551}
3552
3553/* When high resolution timer goes off this function is called. */
3554static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
3555{
3556        struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
3557                                                  hrt);
3558        sdebug_q_cmd_complete(sd_dp);
3559        return HRTIMER_NORESTART;
3560}
3561
3562/* When work queue schedules work, it calls this function. */
3563static void sdebug_q_cmd_wq_complete(struct work_struct *work)
3564{
3565        struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
3566                                                  ew.work);
3567        sdebug_q_cmd_complete(sd_dp);
3568}
3569
3570static bool got_shared_uuid;
3571static uuid_be shared_uuid;
3572
3573static struct sdebug_dev_info *sdebug_device_create(
3574                        struct sdebug_host_info *sdbg_host, gfp_t flags)
3575{
3576        struct sdebug_dev_info *devip;
3577
3578        devip = kzalloc(sizeof(*devip), flags);
3579        if (devip) {
3580                if (sdebug_uuid_ctl == 1)
3581                        uuid_be_gen(&devip->lu_name);
3582                else if (sdebug_uuid_ctl == 2) {
3583                        if (got_shared_uuid)
3584                                devip->lu_name = shared_uuid;
3585                        else {
3586                                uuid_be_gen(&shared_uuid);
3587                                got_shared_uuid = true;
3588                                devip->lu_name = shared_uuid;
3589                        }
3590                }
3591                devip->sdbg_host = sdbg_host;
3592                list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
3593        }
3594        return devip;
3595}
3596
3597static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
3598{
3599        struct sdebug_host_info *sdbg_host;
3600        struct sdebug_dev_info *open_devip = NULL;
3601        struct sdebug_dev_info *devip;
3602
3603        sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
3604        if (!sdbg_host) {
3605                pr_err("Host info NULL\n");
3606                return NULL;
3607        }
3608        list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
3609                if ((devip->used) && (devip->channel == sdev->channel) &&
3610                    (devip->target == sdev->id) &&
3611                    (devip->lun == sdev->lun))
3612                        return devip;
3613                else {
3614                        if ((!devip->used) && (!open_devip))
3615                                open_devip = devip;
3616                }
3617        }
3618        if (!open_devip) { /* try and make a new one */
3619                open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
3620                if (!open_devip) {
3621                        pr_err("out of memory at line %d\n", __LINE__);
3622                        return NULL;
3623                }
3624        }
3625
3626        open_devip->channel = sdev->channel;
3627        open_devip->target = sdev->id;
3628        open_devip->lun = sdev->lun;
3629        open_devip->sdbg_host = sdbg_host;
3630        atomic_set(&open_devip->num_in_q, 0);
3631        set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
3632        open_devip->used = true;
3633        return open_devip;
3634}
3635
3636static int scsi_debug_slave_alloc(struct scsi_device *sdp)
3637{
3638        if (sdebug_verbose)
3639                pr_info("slave_alloc <%u %u %u %llu>\n",
3640                       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3641        queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
3642        return 0;
3643}
3644
3645static int scsi_debug_slave_configure(struct scsi_device *sdp)
3646{
3647        struct sdebug_dev_info *devip =
3648                        (struct sdebug_dev_info *)sdp->hostdata;
3649
3650        if (sdebug_verbose)
3651                pr_info("slave_configure <%u %u %u %llu>\n",
3652                       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3653        if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
3654                sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
3655        if (devip == NULL) {
3656                devip = find_build_dev_info(sdp);
3657                if (devip == NULL)
3658                        return 1;  /* no resources, will be marked offline */
3659        }
3660        sdp->hostdata = devip;
3661        blk_queue_max_segment_size(sdp->request_queue, -1U);
3662        if (sdebug_no_uld)
3663                sdp->no_uld_attach = 1;
3664        return 0;
3665}
3666
3667static void scsi_debug_slave_destroy(struct scsi_device *sdp)
3668{
3669        struct sdebug_dev_info *devip =
3670                (struct sdebug_dev_info *)sdp->hostdata;
3671
3672        if (sdebug_verbose)
3673                pr_info("slave_destroy <%u %u %u %llu>\n",
3674                       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3675        if (devip) {
3676                /* make this slot available for re-use */
3677                devip->used = false;
3678                sdp->hostdata = NULL;
3679        }
3680}
3681
3682static void stop_qc_helper(struct sdebug_defer *sd_dp)
3683{
3684        if (!sd_dp)
3685                return;
3686        if ((sdebug_jdelay > 0) || (sdebug_ndelay > 0))
3687                hrtimer_cancel(&sd_dp->hrt);
3688        else if (sdebug_jdelay < 0)
3689                cancel_work_sync(&sd_dp->ew.work);
3690}
3691
3692/* If @cmnd found deletes its timer or work queue and returns true; else
3693   returns false */
3694static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
3695{
3696        unsigned long iflags;
3697        int j, k, qmax, r_qmax;
3698        struct sdebug_queue *sqp;
3699        struct sdebug_queued_cmd *sqcp;
3700        struct sdebug_dev_info *devip;
3701        struct sdebug_defer *sd_dp;
3702
3703        for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
3704                spin_lock_irqsave(&sqp->qc_lock, iflags);
3705                qmax = sdebug_max_queue;
3706                r_qmax = atomic_read(&retired_max_queue);
3707                if (r_qmax > qmax)
3708                        qmax = r_qmax;
3709                for (k = 0; k < qmax; ++k) {
3710                        if (test_bit(k, sqp->in_use_bm)) {
3711                                sqcp = &sqp->qc_arr[k];
3712                                if (cmnd != sqcp->a_cmnd)
3713                                        continue;
3714                                /* found */
3715                                devip = (struct sdebug_dev_info *)
3716                                                cmnd->device->hostdata;
3717                                if (devip)
3718                                        atomic_dec(&devip->num_in_q);
3719                                sqcp->a_cmnd = NULL;
3720                                sd_dp = sqcp->sd_dp;
3721                                spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3722                                stop_qc_helper(sd_dp);
3723                                clear_bit(k, sqp->in_use_bm);
3724                                return true;
3725                        }
3726                }
3727                spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3728        }
3729        return false;
3730}
3731
3732/* Deletes (stops) timers or work queues of all queued commands */
3733static void stop_all_queued(void)
3734{
3735        unsigned long iflags;
3736        int j, k;
3737        struct sdebug_queue *sqp;
3738        struct sdebug_queued_cmd *sqcp;
3739        struct sdebug_dev_info *devip;
3740        struct sdebug_defer *sd_dp;
3741
3742        for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
3743                spin_lock_irqsave(&sqp->qc_lock, iflags);
3744                for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
3745                        if (test_bit(k, sqp->in_use_bm)) {
3746                                sqcp = &sqp->qc_arr[k];
3747                                if (sqcp->a_cmnd == NULL)
3748                                        continue;
3749                                devip = (struct sdebug_dev_info *)
3750                                        sqcp->a_cmnd->device->hostdata;
3751                                if (devip)
3752                                        atomic_dec(&devip->num_in_q);
3753                                sqcp->a_cmnd = NULL;
3754                                sd_dp = sqcp->sd_dp;
3755                                spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3756                                stop_qc_helper(sd_dp);
3757                                clear_bit(k, sqp->in_use_bm);
3758                                spin_lock_irqsave(&sqp->qc_lock, iflags);
3759                        }
3760                }
3761                spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3762        }
3763}
3764
3765/* Free queued command memory on heap */
3766static void free_all_queued(void)
3767{
3768        int j, k;
3769        struct sdebug_queue *sqp;
3770        struct sdebug_queued_cmd *sqcp;
3771
3772        for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
3773                for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
3774                        sqcp = &sqp->qc_arr[k];
3775                        kfree(sqcp->sd_dp);
3776                        sqcp->sd_dp = NULL;
3777                }
3778        }
3779}
3780
3781static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
3782{
3783        bool ok;
3784
3785        ++num_aborts;
3786        if (SCpnt) {
3787                ok = stop_queued_cmnd(SCpnt);
3788                if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
3789                        sdev_printk(KERN_INFO, SCpnt->device,
3790                                    "%s: command%s found\n", __func__,
3791                                    ok ? "" : " not");
3792        }
3793        return SUCCESS;
3794}
3795
3796static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
3797{
3798        ++num_dev_resets;
3799        if (SCpnt && SCpnt->device) {
3800                struct scsi_device *sdp = SCpnt->device;
3801                struct sdebug_dev_info *devip =
3802                                (struct sdebug_dev_info *)sdp->hostdata;
3803
3804                if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
3805                        sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3806                if (devip)
3807                        set_bit(SDEBUG_UA_POR, devip->uas_bm);
3808        }
3809        return SUCCESS;
3810}
3811
3812static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
3813{
3814        struct sdebug_host_info *sdbg_host;
3815        struct sdebug_dev_info *devip;
3816        struct scsi_device *sdp;
3817        struct Scsi_Host *hp;
3818        int k = 0;
3819
3820        ++num_target_resets;
3821        if (!SCpnt)
3822                goto lie;
3823        sdp = SCpnt->device;
3824        if (!sdp)
3825                goto lie;
3826        if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
3827                sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3828        hp = sdp->host;
3829        if (!hp)
3830                goto lie;
3831        sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
3832        if (sdbg_host) {
3833                list_for_each_entry(devip,
3834                                    &sdbg_host->dev_info_list,
3835                                    dev_list)
3836                        if (devip->target == sdp->id) {
3837                                set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3838                                ++k;
3839                        }
3840        }
3841        if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
3842                sdev_printk(KERN_INFO, sdp,
3843                            "%s: %d device(s) found in target\n", __func__, k);
3844lie:
3845        return SUCCESS;
3846}
3847
3848static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
3849{
3850        struct sdebug_host_info *sdbg_host;
3851        struct sdebug_dev_info *devip;
3852        struct scsi_device * sdp;
3853        struct Scsi_Host * hp;
3854        int k = 0;
3855
3856        ++num_bus_resets;
3857        if (!(SCpnt && SCpnt->device))
3858                goto lie;
3859        sdp = SCpnt->device;
3860        if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
3861                sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3862        hp = sdp->host;
3863        if (hp) {
3864                sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
3865                if (sdbg_host) {
3866                        list_for_each_entry(devip,
3867                                            &sdbg_host->dev_info_list,
3868                                            dev_list) {
3869                                set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3870                                ++k;
3871                        }
3872                }
3873        }
3874        if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
3875                sdev_printk(KERN_INFO, sdp,
3876                            "%s: %d device(s) found in host\n", __func__, k);
3877lie:
3878        return SUCCESS;
3879}
3880
3881static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
3882{
3883        struct sdebug_host_info * sdbg_host;
3884        struct sdebug_dev_info *devip;
3885        int k = 0;
3886
3887        ++num_host_resets;
3888        if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
3889                sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
3890        spin_lock(&sdebug_host_list_lock);
3891        list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
3892                list_for_each_entry(devip, &sdbg_host->dev_info_list,
3893                                    dev_list) {
3894                        set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3895                        ++k;
3896                }
3897        }
3898        spin_unlock(&sdebug_host_list_lock);
3899        stop_all_queued();
3900        if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
3901                sdev_printk(KERN_INFO, SCpnt->device,
3902                            "%s: %d device(s) found\n", __func__, k);
3903        return SUCCESS;
3904}
3905
3906static void __init sdebug_build_parts(unsigned char *ramp,
3907                                      unsigned long store_size)
3908{
3909        struct partition * pp;
3910        int starts[SDEBUG_MAX_PARTS + 2];
3911        int sectors_per_part, num_sectors, k;
3912        int heads_by_sects, start_sec, end_sec;
3913
3914        /* assume partition table already zeroed */
3915        if ((sdebug_num_parts < 1) || (store_size < 1048576))
3916                return;
3917        if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
3918                sdebug_num_parts = SDEBUG_MAX_PARTS;
3919                pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
3920        }
3921        num_sectors = (int)sdebug_store_sectors;
3922        sectors_per_part = (num_sectors - sdebug_sectors_per)
3923                           / sdebug_num_parts;
3924        heads_by_sects = sdebug_heads * sdebug_sectors_per;
3925        starts[0] = sdebug_sectors_per;
3926        for (k = 1; k < sdebug_num_parts; ++k)
3927                starts[k] = ((k * sectors_per_part) / heads_by_sects)
3928                            * heads_by_sects;
3929        starts[sdebug_num_parts] = num_sectors;
3930        starts[sdebug_num_parts + 1] = 0;
3931
3932        ramp[510] = 0x55;       /* magic partition markings */
3933        ramp[511] = 0xAA;
3934        pp = (struct partition *)(ramp + 0x1be);
3935        for (k = 0; starts[k + 1]; ++k, ++pp) {
3936                start_sec = starts[k];
3937                end_sec = starts[k + 1] - 1;
3938                pp->boot_ind = 0;
3939
3940                pp->cyl = start_sec / heads_by_sects;
3941                pp->head = (start_sec - (pp->cyl * heads_by_sects))
3942                           / sdebug_sectors_per;
3943                pp->sector = (start_sec % sdebug_sectors_per) + 1;
3944
3945                pp->end_cyl = end_sec / heads_by_sects;
3946                pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
3947                               / sdebug_sectors_per;
3948                pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
3949
3950                pp->start_sect = cpu_to_le32(start_sec);
3951                pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
3952                pp->sys_ind = 0x83;     /* plain Linux partition */
3953        }
3954}
3955
3956static void block_unblock_all_queues(bool block)
3957{
3958        int j;
3959        struct sdebug_queue *sqp;
3960
3961        for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
3962                atomic_set(&sqp->blocked, (int)block);
3963}
3964
3965/* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
3966 * commands will be processed normally before triggers occur.
3967 */
3968static void tweak_cmnd_count(void)
3969{
3970        int count, modulo;
3971
3972        modulo = abs(sdebug_every_nth);
3973        if (modulo < 2)
3974                return;
3975        block_unblock_all_queues(true);
3976        count = atomic_read(&sdebug_cmnd_count);
3977        atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
3978        block_unblock_all_queues(false);
3979}
3980
3981static void clear_queue_stats(void)
3982{
3983        atomic_set(&sdebug_cmnd_count, 0);
3984        atomic_set(&sdebug_completions, 0);
3985        atomic_set(&sdebug_miss_cpus, 0);
3986        atomic_set(&sdebug_a_tsf, 0);
3987}
3988
3989static void setup_inject(struct sdebug_queue *sqp,
3990                         struct sdebug_queued_cmd *sqcp)
3991{
3992        if ((atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) > 0)
3993                return;
3994        sqcp->inj_recovered = !!(SDEBUG_OPT_RECOVERED_ERR & sdebug_opts);
3995        sqcp->inj_transport = !!(SDEBUG_OPT_TRANSPORT_ERR & sdebug_opts);
3996        sqcp->inj_dif = !!(SDEBUG_OPT_DIF_ERR & sdebug_opts);
3997        sqcp->inj_dix = !!(SDEBUG_OPT_DIX_ERR & sdebug_opts);
3998        sqcp->inj_short = !!(SDEBUG_OPT_SHORT_TRANSFER & sdebug_opts);
3999}
4000
4001/* Complete the processing of the thread that queued a SCSI command to this
4002 * driver. It either completes the command by calling cmnd_done() or
4003 * schedules a hr timer or work queue then returns 0. Returns
4004 * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
4005 */
4006static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
4007                         int scsi_result, int delta_jiff)
4008{
4009        unsigned long iflags;
4010        int k, num_in_q, qdepth, inject;
4011        struct sdebug_queue *sqp;
4012        struct sdebug_queued_cmd *sqcp;
4013        struct scsi_device *sdp;
4014        struct sdebug_defer *sd_dp;
4015
4016        if (unlikely(devip == NULL)) {
4017                if (scsi_result == 0)
4018                        scsi_result = DID_NO_CONNECT << 16;
4019                goto respond_in_thread;
4020        }
4021        sdp = cmnd->device;
4022
4023        if (unlikely(sdebug_verbose && scsi_result))
4024                sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
4025                            __func__, scsi_result);
4026        if (delta_jiff == 0)
4027                goto respond_in_thread;
4028
4029        /* schedule the response at a later time if resources permit */
4030        sqp = get_queue(cmnd);
4031        spin_lock_irqsave(&sqp->qc_lock, iflags);
4032        if (unlikely(atomic_read(&sqp->blocked))) {
4033                spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4034                return SCSI_MLQUEUE_HOST_BUSY;
4035        }
4036        num_in_q = atomic_read(&devip->num_in_q);
4037        qdepth = cmnd->device->queue_depth;
4038        inject = 0;
4039        if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
4040                if (scsi_result) {
4041                        spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4042                        goto respond_in_thread;
4043                } else
4044                        scsi_result = device_qfull_result;
4045        } else if (unlikely(sdebug_every_nth &&
4046                            (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
4047                            (scsi_result == 0))) {
4048                if ((num_in_q == (qdepth - 1)) &&
4049                    (atomic_inc_return(&sdebug_a_tsf) >=
4050                     abs(sdebug_every_nth))) {
4051                        atomic_set(&sdebug_a_tsf, 0);
4052                        inject = 1;
4053                        scsi_result = device_qfull_result;
4054                }
4055        }
4056
4057        k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
4058        if (unlikely(k >= sdebug_max_queue)) {
4059                spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4060                if (scsi_result)
4061                        goto respond_in_thread;
4062                else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
4063                        scsi_result = device_qfull_result;
4064                if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
4065                        sdev_printk(KERN_INFO, sdp,
4066                                    "%s: max_queue=%d exceeded, %s\n",
4067                                    __func__, sdebug_max_queue,
4068                                    (scsi_result ?  "status: TASK SET FULL" :
4069                                                    "report: host busy"));
4070                if (scsi_result)
4071                        goto respond_in_thread;
4072                else
4073                        return SCSI_MLQUEUE_HOST_BUSY;
4074        }
4075        __set_bit(k, sqp->in_use_bm);
4076        atomic_inc(&devip->num_in_q);
4077        sqcp = &sqp->qc_arr[k];
4078        sqcp->a_cmnd = cmnd;
4079        cmnd->host_scribble = (unsigned char *)sqcp;
4080        cmnd->result = scsi_result;
4081        sd_dp = sqcp->sd_dp;
4082        spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4083        if (unlikely(sdebug_every_nth && sdebug_any_injecting_opt))
4084                setup_inject(sqp, sqcp);
4085        if (delta_jiff > 0 || sdebug_ndelay > 0) {
4086                ktime_t kt;
4087
4088                if (delta_jiff > 0) {
4089                        struct timespec ts;
4090
4091                        jiffies_to_timespec(delta_jiff, &ts);
4092                        kt = ktime_set(ts.tv_sec, ts.tv_nsec);
4093                } else
4094                        kt = sdebug_ndelay;
4095                if (NULL == sd_dp) {
4096                        sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
4097                        if (NULL == sd_dp)
4098                                return SCSI_MLQUEUE_HOST_BUSY;
4099                        sqcp->sd_dp = sd_dp;
4100                        hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
4101                                     HRTIMER_MODE_REL_PINNED);
4102                        sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
4103                        sd_dp->sqa_idx = sqp - sdebug_q_arr;
4104                        sd_dp->qc_idx = k;
4105                }
4106                if (sdebug_statistics)
4107                        sd_dp->issuing_cpu = raw_smp_processor_id();
4108                hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
4109        } else {        /* jdelay < 0, use work queue */
4110                if (NULL == sd_dp) {
4111                        sd_dp = kzalloc(sizeof(*sqcp->sd_dp), GFP_ATOMIC);
4112                        if (NULL == sd_dp)
4113                                return SCSI_MLQUEUE_HOST_BUSY;
4114                        sqcp->sd_dp = sd_dp;
4115                        sd_dp->sqa_idx = sqp - sdebug_q_arr;
4116                        sd_dp->qc_idx = k;
4117                        INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
4118                }
4119                if (sdebug_statistics)
4120                        sd_dp->issuing_cpu = raw_smp_processor_id();
4121                schedule_work(&sd_dp->ew.work);
4122        }
4123        if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) &&
4124                     (scsi_result == device_qfull_result)))
4125                sdev_printk(KERN_INFO, sdp,
4126                            "%s: num_in_q=%d +1, %s%s\n", __func__,
4127                            num_in_q, (inject ? "<inject> " : ""),
4128                            "status: TASK SET FULL");
4129        return 0;
4130
4131respond_in_thread:      /* call back to mid-layer using invocation thread */
4132        cmnd->result = scsi_result;
4133        cmnd->scsi_done(cmnd);
4134        return 0;
4135}
4136
4137/* Note: The following macros create attribute files in the
4138   /sys/module/scsi_debug/parameters directory. Unfortunately this
4139   driver is unaware of a change and cannot trigger auxiliary actions
4140   as it can when the corresponding attribute in the
4141   /sys/bus/pseudo/drivers/scsi_debug directory is changed.
4142 */
4143module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
4144module_param_named(ato, sdebug_ato, int, S_IRUGO);
4145module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
4146module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
4147module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
4148module_param_named(dif, sdebug_dif, int, S_IRUGO);
4149module_param_named(dix, sdebug_dix, int, S_IRUGO);
4150module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
4151module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
4152module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
4153module_param_named(guard, sdebug_guard, uint, S_IRUGO);
4154module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
4155module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
4156module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
4157module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
4158module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
4159module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
4160module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
4161module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
4162module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
4163module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
4164module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
4165module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
4166module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
4167module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
4168module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
4169module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
4170module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
4171module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
4172module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
4173module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
4174module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
4175module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
4176module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
4177module_param_named(submit_queues, submit_queues, int, S_IRUGO);
4178module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
4179module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
4180module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
4181module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
4182module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
4183module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
4184module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
4185                   S_IRUGO | S_IWUSR);
4186module_param_named(write_same_length, sdebug_write_same_length, int,
4187                   S_IRUGO | S_IWUSR);
4188
4189MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
4190MODULE_DESCRIPTION("SCSI debug adapter driver");
4191MODULE_LICENSE("GPL");
4192MODULE_VERSION(SDEBUG_VERSION);
4193
4194MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
4195MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
4196MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
4197MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
4198MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
4199MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
4200MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
4201MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
4202MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
4203MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
4204MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
4205MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
4206MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
4207MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
4208MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
4209MODULE_PARM_DESC(lbprz,
4210        "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
4211MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
4212MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
4213MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
4214MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
4215MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
4216MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
4217MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
4218MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
4219MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
4220MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
4221MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
4222MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
4223MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
4224MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
4225MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
4226MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
4227MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
4228MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
4229MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
4230MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
4231MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
4232MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
4233MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
4234MODULE_PARM_DESC(uuid_ctl,
4235                 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
4236MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
4237MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
4238MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
4239
4240#define SDEBUG_INFO_LEN 256
4241static char sdebug_info[SDEBUG_INFO_LEN];
4242
4243static const char * scsi_debug_info(struct Scsi_Host * shp)
4244{
4245        int k;
4246
4247        k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
4248                      my_name, SDEBUG_VERSION, sdebug_version_date);
4249        if (k >= (SDEBUG_INFO_LEN - 1))
4250                return sdebug_info;
4251        scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
4252                  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
4253                  sdebug_dev_size_mb, sdebug_opts, submit_queues,
4254                  "statistics", (int)sdebug_statistics);
4255        return sdebug_info;
4256}
4257
4258/* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
4259static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
4260                                 int length)
4261{
4262        char arr[16];
4263        int opts;
4264        int minLen = length > 15 ? 15 : length;
4265
4266        if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
4267                return -EACCES;
4268        memcpy(arr, buffer, minLen);
4269        arr[minLen] = '\0';
4270        if (1 != sscanf(arr, "%d", &opts))
4271                return -EINVAL;
4272        sdebug_opts = opts;
4273        sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
4274        sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
4275        if (sdebug_every_nth != 0)
4276                tweak_cmnd_count();
4277        return length;
4278}
4279
4280/* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
4281 * same for each scsi_debug host (if more than one). Some of the counters
4282 * output are not atomics so might be inaccurate in a busy system. */
4283static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
4284{
4285        int f, j, l;
4286        struct sdebug_queue *sqp;
4287
4288        seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
4289                   SDEBUG_VERSION, sdebug_version_date);
4290        seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
4291                   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
4292                   sdebug_opts, sdebug_every_nth);
4293        seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
4294                   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
4295                   sdebug_sector_size, "bytes");
4296        seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
4297                   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
4298                   num_aborts);
4299        seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
4300                   num_dev_resets, num_target_resets, num_bus_resets,
4301                   num_host_resets);
4302        seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
4303                   dix_reads, dix_writes, dif_errors);
4304        seq_printf(m, "usec_in_jiffy=%lu, %s=%d, mq_active=%d\n",
4305                   TICK_NSEC / 1000, "statistics", sdebug_statistics,
4306                   sdebug_mq_active);
4307        seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n",
4308                   atomic_read(&sdebug_cmnd_count),
4309                   atomic_read(&sdebug_completions),
4310                   "miss_cpus", atomic_read(&sdebug_miss_cpus),
4311                   atomic_read(&sdebug_a_tsf));
4312
4313        seq_printf(m, "submit_queues=%d\n", submit_queues);
4314        for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4315                seq_printf(m, "  queue %d:\n", j);
4316                f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
4317                if (f != sdebug_max_queue) {
4318                        l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
4319                        seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
4320                                   "first,last bits", f, l);
4321                }
4322        }
4323        return 0;
4324}
4325
4326static ssize_t delay_show(struct device_driver *ddp, char *buf)
4327{
4328        return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
4329}
4330/* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
4331 * of delay is jiffies.
4332 */
4333static ssize_t delay_store(struct device_driver *ddp, const char *buf,
4334                           size_t count)
4335{
4336        int jdelay, res;
4337
4338        if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
4339                res = count;
4340                if (sdebug_jdelay != jdelay) {
4341                        int j, k;
4342                        struct sdebug_queue *sqp;
4343
4344                        block_unblock_all_queues(true);
4345                        for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4346                             ++j, ++sqp) {
4347                                k = find_first_bit(sqp->in_use_bm,
4348                                                   sdebug_max_queue);
4349                                if (k != sdebug_max_queue) {
4350                                        res = -EBUSY;   /* queued commands */
4351                                        break;
4352                                }
4353                        }
4354                        if (res > 0) {
4355                                /* make sure sdebug_defer instances get
4356                                 * re-allocated for new delay variant */
4357                                free_all_queued();
4358                                sdebug_jdelay = jdelay;
4359                                sdebug_ndelay = 0;
4360                        }
4361                        block_unblock_all_queues(false);
4362                }
4363                return res;
4364        }
4365        return -EINVAL;
4366}
4367static DRIVER_ATTR_RW(delay);
4368
4369static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
4370{
4371        return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
4372}
4373/* Returns -EBUSY if ndelay is being changed and commands are queued */
4374/* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
4375static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
4376                            size_t count)
4377{
4378        int ndelay, res;
4379
4380        if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
4381            (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
4382                res = count;
4383                if (sdebug_ndelay != ndelay) {
4384                        int j, k;
4385                        struct sdebug_queue *sqp;
4386
4387                        block_unblock_all_queues(true);
4388                        for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4389                             ++j, ++sqp) {
4390                                k = find_first_bit(sqp->in_use_bm,
4391                                                   sdebug_max_queue);
4392                                if (k != sdebug_max_queue) {
4393                                        res = -EBUSY;   /* queued commands */
4394                                        break;
4395                                }
4396                        }
4397                        if (res > 0) {
4398                                /* make sure sdebug_defer instances get
4399                                 * re-allocated for new delay variant */
4400                                free_all_queued();
4401                                sdebug_ndelay = ndelay;
4402                                sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
4403                                                        : DEF_JDELAY;
4404                        }
4405                        block_unblock_all_queues(false);
4406                }
4407                return res;
4408        }
4409        return -EINVAL;
4410}
4411static DRIVER_ATTR_RW(ndelay);
4412
4413static ssize_t opts_show(struct device_driver *ddp, char *buf)
4414{
4415        return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
4416}
4417
4418static ssize_t opts_store(struct device_driver *ddp, const char *buf,
4419                          size_t count)
4420{
4421        int opts;
4422        char work[20];
4423
4424        if (1 == sscanf(buf, "%10s", work)) {
4425                if (0 == strncasecmp(work,"0x", 2)) {
4426                        if (1 == sscanf(&work[2], "%x", &opts))
4427                                goto opts_done;
4428                } else {
4429                        if (1 == sscanf(work, "%d", &opts))
4430                                goto opts_done;
4431                }
4432        }
4433        return -EINVAL;
4434opts_done:
4435        sdebug_opts = opts;
4436        sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
4437        sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
4438        tweak_cmnd_count();
4439        return count;
4440}
4441static DRIVER_ATTR_RW(opts);
4442
4443static ssize_t ptype_show(struct device_driver *ddp, char *buf)
4444{
4445        return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
4446}
4447static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
4448                           size_t count)
4449{
4450        int n;
4451
4452        if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4453                sdebug_ptype = n;
4454                return count;
4455        }
4456        return -EINVAL;
4457}
4458static DRIVER_ATTR_RW(ptype);
4459
4460static ssize_t dsense_show(struct device_driver *ddp, char *buf)
4461{
4462        return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
4463}
4464static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
4465                            size_t count)
4466{
4467        int n;
4468
4469        if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4470                sdebug_dsense = n;
4471                return count;
4472        }
4473        return -EINVAL;
4474}
4475static DRIVER_ATTR_RW(dsense);
4476
4477static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
4478{
4479        return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
4480}
4481static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
4482                             size_t count)
4483{
4484        int n;
4485
4486        if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4487                n = (n > 0);
4488                sdebug_fake_rw = (sdebug_fake_rw > 0);
4489                if (sdebug_fake_rw != n) {
4490                        if ((0 == n) && (NULL == fake_storep)) {
4491                                unsigned long sz =
4492                                        (unsigned long)sdebug_dev_size_mb *
4493                                        1048576;
4494
4495                                fake_storep = vmalloc(sz);
4496                                if (NULL == fake_storep) {
4497                                        pr_err("out of memory, 9\n");
4498                                        return -ENOMEM;
4499                                }
4500                                memset(fake_storep, 0, sz);
4501                        }
4502                        sdebug_fake_rw = n;
4503                }
4504                return count;
4505        }
4506        return -EINVAL;
4507}
4508static DRIVER_ATTR_RW(fake_rw);
4509
4510static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
4511{
4512        return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
4513}
4514static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
4515                              size_t count)
4516{
4517        int n;
4518
4519        if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4520                sdebug_no_lun_0 = n;
4521                return count;
4522        }
4523        return -EINVAL;
4524}
4525static DRIVER_ATTR_RW(no_lun_0);
4526
4527static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
4528{
4529        return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
4530}
4531static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
4532                              size_t count)
4533{
4534        int n;
4535
4536        if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4537                sdebug_num_tgts = n;
4538                sdebug_max_tgts_luns();
4539                return count;
4540        }
4541        return -EINVAL;
4542}
4543static DRIVER_ATTR_RW(num_tgts);
4544
4545static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
4546{
4547        return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
4548}
4549static DRIVER_ATTR_RO(dev_size_mb);
4550
4551static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
4552{
4553        return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
4554}
4555static DRIVER_ATTR_RO(num_parts);
4556
4557static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
4558{
4559        return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
4560}
4561static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
4562                               size_t count)
4563{
4564        int nth;
4565
4566        if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
4567                sdebug_every_nth = nth;
4568                if (nth && !sdebug_statistics) {
4569                        pr_info("every_nth needs statistics=1, set it\n");
4570                        sdebug_statistics = true;
4571                }
4572                tweak_cmnd_count();
4573                return count;
4574        }
4575        return -EINVAL;
4576}
4577static DRIVER_ATTR_RW(every_nth);
4578
4579static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
4580{
4581        return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
4582}
4583static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
4584                              size_t count)
4585{
4586        int n;
4587        bool changed;
4588
4589        if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4590                if (n > 256) {
4591                        pr_warn("max_luns can be no more than 256\n");
4592                        return -EINVAL;
4593                }
4594                changed = (sdebug_max_luns != n);
4595                sdebug_max_luns = n;
4596                sdebug_max_tgts_luns();
4597                if (changed && (sdebug_scsi_level >= 5)) {      /* >= SPC-3 */
4598                        struct sdebug_host_info *sdhp;
4599                        struct sdebug_dev_info *dp;
4600
4601                        spin_lock(&sdebug_host_list_lock);
4602                        list_for_each_entry(sdhp, &sdebug_host_list,
4603                                            host_list) {
4604                                list_for_each_entry(dp, &sdhp->dev_info_list,
4605                                                    dev_list) {
4606                                        set_bit(SDEBUG_UA_LUNS_CHANGED,
4607                                                dp->uas_bm);
4608                                }
4609                        }
4610                        spin_unlock(&sdebug_host_list_lock);
4611                }
4612                return count;
4613        }
4614        return -EINVAL;
4615}
4616static DRIVER_ATTR_RW(max_luns);
4617
4618static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
4619{
4620        return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
4621}
4622/* N.B. max_queue can be changed while there are queued commands. In flight
4623 * commands beyond the new max_queue will be completed. */
4624static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
4625                               size_t count)
4626{
4627        int j, n, k, a;
4628        struct sdebug_queue *sqp;
4629
4630        if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
4631            (n <= SDEBUG_CANQUEUE)) {
4632                block_unblock_all_queues(true);
4633                k = 0;
4634                for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4635                     ++j, ++sqp) {
4636                        a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
4637                        if (a > k)
4638                                k = a;
4639                }
4640                sdebug_max_queue = n;
4641                if (k == SDEBUG_CANQUEUE)
4642                        atomic_set(&retired_max_queue, 0);
4643                else if (k >= n)
4644                        atomic_set(&retired_max_queue, k + 1);
4645                else
4646                        atomic_set(&retired_max_queue, 0);
4647                block_unblock_all_queues(false);
4648                return count;
4649        }
4650        return -EINVAL;
4651}
4652static DRIVER_ATTR_RW(max_queue);
4653
4654static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
4655{
4656        return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
4657}
4658static DRIVER_ATTR_RO(no_uld);
4659
4660static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
4661{
4662        return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
4663}
4664static DRIVER_ATTR_RO(scsi_level);
4665
4666static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
4667{
4668        return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
4669}
4670static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
4671                                size_t count)
4672{
4673        int n;
4674        bool changed;
4675
4676        if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4677                changed = (sdebug_virtual_gb != n);
4678                sdebug_virtual_gb = n;
4679                sdebug_capacity = get_sdebug_capacity();
4680                if (changed) {
4681                        struct sdebug_host_info *sdhp;
4682                        struct sdebug_dev_info *dp;
4683
4684                        spin_lock(&sdebug_host_list_lock);
4685                        list_for_each_entry(sdhp, &sdebug_host_list,
4686                                            host_list) {
4687                                list_for_each_entry(dp, &sdhp->dev_info_list,
4688                                                    dev_list) {
4689                                        set_bit(SDEBUG_UA_CAPACITY_CHANGED,
4690                                                dp->uas_bm);
4691                                }
4692                        }
4693                        spin_unlock(&sdebug_host_list_lock);
4694                }
4695                return count;
4696        }
4697        return -EINVAL;
4698}
4699static DRIVER_ATTR_RW(virtual_gb);
4700
4701static ssize_t add_host_show(struct device_driver *ddp, char *buf)
4702{
4703        return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_add_host);
4704}
4705
4706static int sdebug_add_adapter(void);
4707static void sdebug_remove_adapter(void);
4708
4709static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
4710                              size_t count)
4711{
4712        int delta_hosts;
4713
4714        if (sscanf(buf, "%d", &delta_hosts) != 1)
4715                return -EINVAL;
4716        if (delta_hosts > 0) {
4717                do {
4718                        sdebug_add_adapter();
4719                } while (--delta_hosts);
4720        } else if (delta_hosts < 0) {
4721                do {
4722                        sdebug_remove_adapter();
4723                } while (++delta_hosts);
4724        }
4725        return count;
4726}
4727static DRIVER_ATTR_RW(add_host);
4728
4729static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
4730{
4731        return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
4732}
4733static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
4734                                    size_t count)
4735{
4736        int n;
4737
4738        if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4739                sdebug_vpd_use_hostno = n;
4740                return count;
4741        }
4742        return -EINVAL;
4743}
4744static DRIVER_ATTR_RW(vpd_use_hostno);
4745
4746static ssize_t statistics_show(struct device_driver *ddp, char *buf)
4747{
4748        return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
4749}
4750static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
4751                                size_t count)
4752{
4753        int n;
4754
4755        if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
4756                if (n > 0)
4757                        sdebug_statistics = true;
4758                else {
4759                        clear_queue_stats();
4760                        sdebug_statistics = false;
4761                }
4762                return count;
4763        }
4764        return -EINVAL;
4765}
4766static DRIVER_ATTR_RW(statistics);
4767
4768static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
4769{
4770        return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
4771}
4772static DRIVER_ATTR_RO(sector_size);
4773
4774static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
4775{
4776        return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
4777}
4778static DRIVER_ATTR_RO(submit_queues);
4779
4780static ssize_t dix_show(struct device_driver *ddp, char *buf)
4781{
4782        return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
4783}
4784static DRIVER_ATTR_RO(dix);
4785
4786static ssize_t dif_show(struct device_driver *ddp, char *buf)
4787{
4788        return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
4789}
4790static DRIVER_ATTR_RO(dif);
4791
4792static ssize_t guard_show(struct device_driver *ddp, char *buf)
4793{
4794        return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
4795}
4796static DRIVER_ATTR_RO(guard);
4797
4798static ssize_t ato_show(struct device_driver *ddp, char *buf)
4799{
4800        return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
4801}
4802static DRIVER_ATTR_RO(ato);
4803
4804static ssize_t map_show(struct device_driver *ddp, char *buf)
4805{
4806        ssize_t count;
4807
4808        if (!scsi_debug_lbp())
4809                return scnprintf(buf, PAGE_SIZE, "0-%u\n",
4810                                 sdebug_store_sectors);
4811
4812        count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
4813                          (int)map_size, map_storep);
4814        buf[count++] = '\n';
4815        buf[count] = '\0';
4816
4817        return count;
4818}
4819static DRIVER_ATTR_RO(map);
4820
4821static ssize_t removable_show(struct device_driver *ddp, char *buf)
4822{
4823        return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
4824}
4825static ssize_t removable_store(struct device_driver *ddp, const char *buf,
4826                               size_t count)
4827{
4828        int n;
4829
4830        if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4831                sdebug_removable = (n > 0);
4832                return count;
4833        }
4834        return -EINVAL;
4835}
4836static DRIVER_ATTR_RW(removable);
4837
4838static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
4839{
4840        return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
4841}
4842/* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
4843static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
4844                               size_t count)
4845{
4846        int n;
4847
4848        if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4849                sdebug_host_lock = (n > 0);
4850                return count;
4851        }
4852        return -EINVAL;
4853}
4854static DRIVER_ATTR_RW(host_lock);
4855
4856static ssize_t strict_show(struct device_driver *ddp, char *buf)
4857{
4858        return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
4859}
4860static ssize_t strict_store(struct device_driver *ddp, const char *buf,
4861                            size_t count)
4862{
4863        int n;
4864
4865        if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4866                sdebug_strict = (n > 0);
4867                return count;
4868        }
4869        return -EINVAL;
4870}
4871static DRIVER_ATTR_RW(strict);
4872
4873static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
4874{
4875        return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
4876}
4877static DRIVER_ATTR_RO(uuid_ctl);
4878
4879
4880/* Note: The following array creates attribute files in the
4881   /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
4882   files (over those found in the /sys/module/scsi_debug/parameters
4883   directory) is that auxiliary actions can be triggered when an attribute
4884   is changed. For example see: sdebug_add_host_store() above.
4885 */
4886
4887static struct attribute *sdebug_drv_attrs[] = {
4888        &driver_attr_delay.attr,
4889        &driver_attr_opts.attr,
4890        &driver_attr_ptype.attr,
4891        &driver_attr_dsense.attr,
4892        &driver_attr_fake_rw.attr,
4893        &driver_attr_no_lun_0.attr,
4894        &driver_attr_num_tgts.attr,
4895        &driver_attr_dev_size_mb.attr,
4896        &driver_attr_num_parts.attr,
4897        &driver_attr_every_nth.attr,
4898        &driver_attr_max_luns.attr,
4899        &driver_attr_max_queue.attr,
4900        &driver_attr_no_uld.attr,
4901        &driver_attr_scsi_level.attr,
4902        &driver_attr_virtual_gb.attr,
4903        &driver_attr_add_host.attr,
4904        &driver_attr_vpd_use_hostno.attr,
4905        &driver_attr_sector_size.attr,
4906        &driver_attr_statistics.attr,
4907        &driver_attr_submit_queues.attr,
4908        &driver_attr_dix.attr,
4909        &driver_attr_dif.attr,
4910        &driver_attr_guard.attr,
4911        &driver_attr_ato.attr,
4912        &driver_attr_map.attr,
4913        &driver_attr_removable.attr,
4914        &driver_attr_host_lock.attr,
4915        &driver_attr_ndelay.attr,
4916        &driver_attr_strict.attr,
4917        &driver_attr_uuid_ctl.attr,
4918        NULL,
4919};
4920ATTRIBUTE_GROUPS(sdebug_drv);
4921
4922static struct device *pseudo_primary;
4923
4924static int __init scsi_debug_init(void)
4925{
4926        unsigned long sz;
4927        int host_to_add;
4928        int k;
4929        int ret;
4930
4931        atomic_set(&retired_max_queue, 0);
4932
4933        if (sdebug_ndelay >= 1000 * 1000 * 1000) {
4934                pr_warn("ndelay must be less than 1 second, ignored\n");
4935                sdebug_ndelay = 0;
4936        } else if (sdebug_ndelay > 0)
4937                sdebug_jdelay = JDELAY_OVERRIDDEN;
4938
4939        switch (sdebug_sector_size) {
4940        case  512:
4941        case 1024:
4942        case 2048:
4943        case 4096:
4944                break;
4945        default:
4946                pr_err("invalid sector_size %d\n", sdebug_sector_size);
4947                return -EINVAL;
4948        }
4949
4950        switch (sdebug_dif) {
4951        case T10_PI_TYPE0_PROTECTION:
4952                break;
4953        case T10_PI_TYPE1_PROTECTION:
4954        case T10_PI_TYPE2_PROTECTION:
4955        case T10_PI_TYPE3_PROTECTION:
4956                have_dif_prot = true;
4957                break;
4958
4959        default:
4960                pr_err("dif must be 0, 1, 2 or 3\n");
4961                return -EINVAL;
4962        }
4963
4964        if (sdebug_guard > 1) {
4965                pr_err("guard must be 0 or 1\n");
4966                return -EINVAL;
4967        }
4968
4969        if (sdebug_ato > 1) {
4970                pr_err("ato must be 0 or 1\n");
4971                return -EINVAL;
4972        }
4973
4974        if (sdebug_physblk_exp > 15) {
4975                pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
4976                return -EINVAL;
4977        }
4978        if (sdebug_max_luns > 256) {
4979                pr_warn("max_luns can be no more than 256, use default\n");
4980                sdebug_max_luns = DEF_MAX_LUNS;
4981        }
4982
4983        if (sdebug_lowest_aligned > 0x3fff) {
4984                pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
4985                return -EINVAL;
4986        }
4987
4988        if (submit_queues < 1) {
4989                pr_err("submit_queues must be 1 or more\n");
4990                return -EINVAL;
4991        }
4992        sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
4993                               GFP_KERNEL);
4994        if (sdebug_q_arr == NULL)
4995                return -ENOMEM;
4996        for (k = 0; k < submit_queues; ++k)
4997                spin_lock_init(&sdebug_q_arr[k].qc_lock);
4998
4999        if (sdebug_dev_size_mb < 1)
5000                sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
5001        sz = (unsigned long)sdebug_dev_size_mb * 1048576;
5002        sdebug_store_sectors = sz / sdebug_sector_size;
5003        sdebug_capacity = get_sdebug_capacity();
5004
5005        /* play around with geometry, don't waste too much on track 0 */
5006        sdebug_heads = 8;
5007        sdebug_sectors_per = 32;
5008        if (sdebug_dev_size_mb >= 256)
5009                sdebug_heads = 64;
5010        else if (sdebug_dev_size_mb >= 16)
5011                sdebug_heads = 32;
5012        sdebug_cylinders_per = (unsigned long)sdebug_capacity /
5013                               (sdebug_sectors_per * sdebug_heads);
5014        if (sdebug_cylinders_per >= 1024) {
5015                /* other LLDs do this; implies >= 1GB ram disk ... */
5016                sdebug_heads = 255;
5017                sdebug_sectors_per = 63;
5018                sdebug_cylinders_per = (unsigned long)sdebug_capacity /
5019                               (sdebug_sectors_per * sdebug_heads);
5020        }
5021
5022        if (sdebug_fake_rw == 0) {
5023                fake_storep = vmalloc(sz);
5024                if (NULL == fake_storep) {
5025                        pr_err("out of memory, 1\n");
5026                        ret = -ENOMEM;
5027                        goto free_q_arr;
5028                }
5029                memset(fake_storep, 0, sz);
5030                if (sdebug_num_parts > 0)
5031                        sdebug_build_parts(fake_storep, sz);
5032        }
5033
5034        if (sdebug_dix) {
5035                int dif_size;
5036
5037                dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
5038                dif_storep = vmalloc(dif_size);
5039
5040                pr_err("dif_storep %u bytes @ %p\n", dif_size, dif_storep);
5041
5042                if (dif_storep == NULL) {
5043                        pr_err("out of mem. (DIX)\n");
5044                        ret = -ENOMEM;
5045                        goto free_vm;
5046                }
5047
5048                memset(dif_storep, 0xff, dif_size);
5049        }
5050
5051        /* Logical Block Provisioning */
5052        if (scsi_debug_lbp()) {
5053                sdebug_unmap_max_blocks =
5054                        clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
5055
5056                sdebug_unmap_max_desc =
5057                        clamp(sdebug_unmap_max_desc, 0U, 256U);
5058
5059                sdebug_unmap_granularity =
5060                        clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
5061
5062                if (sdebug_unmap_alignment &&
5063                    sdebug_unmap_granularity <=
5064                    sdebug_unmap_alignment) {
5065                        pr_err("ERR: unmap_granularity <= unmap_alignment\n");
5066                        ret = -EINVAL;
5067                        goto free_vm;
5068                }
5069
5070                map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
5071                map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
5072
5073                pr_info("%lu provisioning blocks\n", map_size);
5074
5075                if (map_storep == NULL) {
5076                        pr_err("out of mem. (MAP)\n");
5077                        ret = -ENOMEM;
5078                        goto free_vm;
5079                }
5080
5081                bitmap_zero(map_storep, map_size);
5082
5083                /* Map first 1KB for partition table */
5084                if (sdebug_num_parts)
5085                        map_region(0, 2);
5086        }
5087
5088        pseudo_primary = root_device_register("pseudo_0");
5089        if (IS_ERR(pseudo_primary)) {
5090                pr_warn("root_device_register() error\n");
5091                ret = PTR_ERR(pseudo_primary);
5092                goto free_vm;
5093        }
5094        ret = bus_register(&pseudo_lld_bus);
5095        if (ret < 0) {
5096                pr_warn("bus_register error: %d\n", ret);
5097                goto dev_unreg;
5098        }
5099        ret = driver_register(&sdebug_driverfs_driver);
5100        if (ret < 0) {
5101                pr_warn("driver_register error: %d\n", ret);
5102                goto bus_unreg;
5103        }
5104
5105        host_to_add = sdebug_add_host;
5106        sdebug_add_host = 0;
5107
5108        for (k = 0; k < host_to_add; k++) {
5109                if (sdebug_add_adapter()) {
5110                        pr_err("sdebug_add_adapter failed k=%d\n", k);
5111                        break;
5112                }
5113        }
5114
5115        if (sdebug_verbose)
5116                pr_info("built %d host(s)\n", sdebug_add_host);
5117
5118        return 0;
5119
5120bus_unreg:
5121        bus_unregister(&pseudo_lld_bus);
5122dev_unreg:
5123        root_device_unregister(pseudo_primary);
5124free_vm:
5125        vfree(map_storep);
5126        vfree(dif_storep);
5127        vfree(fake_storep);
5128free_q_arr:
5129        kfree(sdebug_q_arr);
5130        return ret;
5131}
5132
5133static void __exit scsi_debug_exit(void)
5134{
5135        int k = sdebug_add_host;
5136
5137        stop_all_queued();
5138        free_all_queued();
5139        for (; k; k--)
5140                sdebug_remove_adapter();
5141        driver_unregister(&sdebug_driverfs_driver);
5142        bus_unregister(&pseudo_lld_bus);
5143        root_device_unregister(pseudo_primary);
5144
5145        vfree(map_storep);
5146        vfree(dif_storep);
5147        vfree(fake_storep);
5148        kfree(sdebug_q_arr);
5149}
5150
5151device_initcall(scsi_debug_init);
5152module_exit(scsi_debug_exit);
5153
5154static void sdebug_release_adapter(struct device * dev)
5155{
5156        struct sdebug_host_info *sdbg_host;
5157
5158        sdbg_host = to_sdebug_host(dev);
5159        kfree(sdbg_host);
5160}
5161
5162static int sdebug_add_adapter(void)
5163{
5164        int k, devs_per_host;
5165        int error = 0;
5166        struct sdebug_host_info *sdbg_host;
5167        struct sdebug_dev_info *sdbg_devinfo, *tmp;
5168
5169        sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
5170        if (NULL == sdbg_host) {
5171                pr_err("out of memory at line %d\n", __LINE__);
5172                return -ENOMEM;
5173        }
5174
5175        INIT_LIST_HEAD(&sdbg_host->dev_info_list);
5176
5177        devs_per_host = sdebug_num_tgts * sdebug_max_luns;
5178        for (k = 0; k < devs_per_host; k++) {
5179                sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
5180                if (!sdbg_devinfo) {
5181                        pr_err("out of memory at line %d\n", __LINE__);
5182                        error = -ENOMEM;
5183                        goto clean;
5184                }
5185        }
5186
5187        spin_lock(&sdebug_host_list_lock);
5188        list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
5189        spin_unlock(&sdebug_host_list_lock);
5190
5191        sdbg_host->dev.bus = &pseudo_lld_bus;
5192        sdbg_host->dev.parent = pseudo_primary;
5193        sdbg_host->dev.release = &sdebug_release_adapter;
5194        dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_add_host);
5195
5196        error = device_register(&sdbg_host->dev);
5197
5198        if (error)
5199                goto clean;
5200
5201        ++sdebug_add_host;
5202        return error;
5203
5204clean:
5205        list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5206                                 dev_list) {
5207                list_del(&sdbg_devinfo->dev_list);
5208                kfree(sdbg_devinfo);
5209        }
5210
5211        kfree(sdbg_host);
5212        return error;
5213}
5214
5215static void sdebug_remove_adapter(void)
5216{
5217        struct sdebug_host_info * sdbg_host = NULL;
5218
5219        spin_lock(&sdebug_host_list_lock);
5220        if (!list_empty(&sdebug_host_list)) {
5221                sdbg_host = list_entry(sdebug_host_list.prev,
5222                                       struct sdebug_host_info, host_list);
5223                list_del(&sdbg_host->host_list);
5224        }
5225        spin_unlock(&sdebug_host_list_lock);
5226
5227        if (!sdbg_host)
5228                return;
5229
5230        device_unregister(&sdbg_host->dev);
5231        --sdebug_add_host;
5232}
5233
5234static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
5235{
5236        int num_in_q = 0;
5237        struct sdebug_dev_info *devip;
5238
5239        block_unblock_all_queues(true);
5240        devip = (struct sdebug_dev_info *)sdev->hostdata;
5241        if (NULL == devip) {
5242                block_unblock_all_queues(false);
5243                return  -ENODEV;
5244        }
5245        num_in_q = atomic_read(&devip->num_in_q);
5246
5247        if (qdepth < 1)
5248                qdepth = 1;
5249        /* allow to exceed max host qc_arr elements for testing */
5250        if (qdepth > SDEBUG_CANQUEUE + 10)
5251                qdepth = SDEBUG_CANQUEUE + 10;
5252        scsi_change_queue_depth(sdev, qdepth);
5253
5254        if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
5255                sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
5256                            __func__, qdepth, num_in_q);
5257        }
5258        block_unblock_all_queues(false);
5259        return sdev->queue_depth;
5260}
5261
5262static bool fake_timeout(struct scsi_cmnd *scp)
5263{
5264        if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
5265                if (sdebug_every_nth < -1)
5266                        sdebug_every_nth = -1;
5267                if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
5268                        return true; /* ignore command causing timeout */
5269                else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
5270                         scsi_medium_access_command(scp))
5271                        return true; /* time out reads and writes */
5272        }
5273        return false;
5274}
5275
5276static int scsi_debug_queuecommand(struct Scsi_Host *shost,
5277                                   struct scsi_cmnd *scp)
5278{
5279        u8 sdeb_i;
5280        struct scsi_device *sdp = scp->device;
5281        const struct opcode_info_t *oip;
5282        const struct opcode_info_t *r_oip;
5283        struct sdebug_dev_info *devip;
5284        u8 *cmd = scp->cmnd;
5285        int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
5286        int k, na;
5287        int errsts = 0;
5288        u32 flags;
5289        u16 sa;
5290        u8 opcode = cmd[0];
5291        bool has_wlun_rl;
5292
5293        scsi_set_resid(scp, 0);
5294        if (sdebug_statistics)
5295                atomic_inc(&sdebug_cmnd_count);
5296        if (unlikely(sdebug_verbose &&
5297                     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
5298                char b[120];
5299                int n, len, sb;
5300
5301                len = scp->cmd_len;
5302                sb = (int)sizeof(b);
5303                if (len > 32)
5304                        strcpy(b, "too long, over 32 bytes");
5305                else {
5306                        for (k = 0, n = 0; k < len && n < sb; ++k)
5307                                n += scnprintf(b + n, sb - n, "%02x ",
5308                                               (u32)cmd[k]);
5309                }
5310                if (sdebug_mq_active)
5311                        sdev_printk(KERN_INFO, sdp, "%s: tag=%u, cmd %s\n",
5312                                    my_name, blk_mq_unique_tag(scp->request),
5313                                    b);
5314                else
5315                        sdev_printk(KERN_INFO, sdp, "%s: cmd %s\n", my_name,
5316                                    b);
5317        }
5318        has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
5319        if (unlikely((sdp->lun >= sdebug_max_luns) && !has_wlun_rl))
5320                goto err_out;
5321
5322        sdeb_i = opcode_ind_arr[opcode];        /* fully mapped */
5323        oip = &opcode_info_arr[sdeb_i];         /* safe if table consistent */
5324        devip = (struct sdebug_dev_info *)sdp->hostdata;
5325        if (unlikely(!devip)) {
5326                devip = find_build_dev_info(sdp);
5327                if (NULL == devip)
5328                        goto err_out;
5329        }
5330        na = oip->num_attached;
5331        r_pfp = oip->pfp;
5332        if (na) {       /* multiple commands with this opcode */
5333                r_oip = oip;
5334                if (FF_SA & r_oip->flags) {
5335                        if (F_SA_LOW & oip->flags)
5336                                sa = 0x1f & cmd[1];
5337                        else
5338                                sa = get_unaligned_be16(cmd + 8);
5339                        for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5340                                if (opcode == oip->opcode && sa == oip->sa)
5341                                        break;
5342                        }
5343                } else {   /* since no service action only check opcode */
5344                        for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5345                                if (opcode == oip->opcode)
5346                                        break;
5347                        }
5348                }
5349                if (k > na) {
5350                        if (F_SA_LOW & r_oip->flags)
5351                                mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
5352                        else if (F_SA_HIGH & r_oip->flags)
5353                                mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
5354                        else
5355                                mk_sense_invalid_opcode(scp);
5356                        goto check_cond;
5357                }
5358        }       /* else (when na==0) we assume the oip is a match */
5359        flags = oip->flags;
5360        if (unlikely(F_INV_OP & flags)) {
5361                mk_sense_invalid_opcode(scp);
5362                goto check_cond;
5363        }
5364        if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
5365                if (sdebug_verbose)
5366                        sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
5367                                    my_name, opcode, " supported for wlun");
5368                mk_sense_invalid_opcode(scp);
5369                goto check_cond;
5370        }
5371        if (unlikely(sdebug_strict)) {  /* check cdb against mask */
5372                u8 rem;
5373                int j;
5374
5375                for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
5376                        rem = ~oip->len_mask[k] & cmd[k];
5377                        if (rem) {
5378                                for (j = 7; j >= 0; --j, rem <<= 1) {
5379                                        if (0x80 & rem)
5380                                                break;
5381                                }
5382                                mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
5383                                goto check_cond;
5384                        }
5385                }
5386        }
5387        if (unlikely(!(F_SKIP_UA & flags) &&
5388                     find_first_bit(devip->uas_bm,
5389                                    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
5390                errsts = make_ua(scp, devip);
5391                if (errsts)
5392                        goto check_cond;
5393        }
5394        if (unlikely((F_M_ACCESS & flags) && atomic_read(&devip->stopped))) {
5395                mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
5396                if (sdebug_verbose)
5397                        sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: "
5398                                    "%s\n", my_name, "initializing command "
5399                                    "required");
5400                errsts = check_condition_result;
5401                goto fini;
5402        }
5403        if (sdebug_fake_rw && (F_FAKE_RW & flags))
5404                goto fini;
5405        if (unlikely(sdebug_every_nth)) {
5406                if (fake_timeout(scp))
5407                        return 0;       /* ignore command: make trouble */
5408        }
5409        if (likely(oip->pfp))
5410                errsts = oip->pfp(scp, devip);  /* calls a resp_* function */
5411        else if (r_pfp) /* if leaf function ptr NULL, try the root's */
5412                errsts = r_pfp(scp, devip);
5413
5414fini:
5415        return schedule_resp(scp, devip, errsts,
5416                             ((F_DELAY_OVERR & flags) ? 0 : sdebug_jdelay));
5417check_cond:
5418        return schedule_resp(scp, devip, check_condition_result, 0);
5419err_out:
5420        return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, 0);
5421}
5422
5423static struct scsi_host_template sdebug_driver_template = {
5424        .show_info =            scsi_debug_show_info,
5425        .write_info =           scsi_debug_write_info,
5426        .proc_name =            sdebug_proc_name,
5427        .name =                 "SCSI DEBUG",
5428        .info =                 scsi_debug_info,
5429        .slave_alloc =          scsi_debug_slave_alloc,
5430        .slave_configure =      scsi_debug_slave_configure,
5431        .slave_destroy =        scsi_debug_slave_destroy,
5432        .ioctl =                scsi_debug_ioctl,
5433        .queuecommand =         scsi_debug_queuecommand,
5434        .change_queue_depth =   sdebug_change_qdepth,
5435        .eh_abort_handler =     scsi_debug_abort,
5436        .eh_device_reset_handler = scsi_debug_device_reset,
5437        .eh_target_reset_handler = scsi_debug_target_reset,
5438        .eh_bus_reset_handler = scsi_debug_bus_reset,
5439        .eh_host_reset_handler = scsi_debug_host_reset,
5440        .can_queue =            SDEBUG_CANQUEUE,
5441        .this_id =              7,
5442        .sg_tablesize =         SG_MAX_SEGMENTS,
5443        .cmd_per_lun =          DEF_CMD_PER_LUN,
5444        .max_sectors =          -1U,
5445        .use_clustering =       DISABLE_CLUSTERING,
5446        .module =               THIS_MODULE,
5447        .track_queue_depth =    1,
5448};
5449
5450static int sdebug_driver_probe(struct device * dev)
5451{
5452        int error = 0;
5453        struct sdebug_host_info *sdbg_host;
5454        struct Scsi_Host *hpnt;
5455        int hprot;
5456
5457        sdbg_host = to_sdebug_host(dev);
5458
5459        sdebug_driver_template.can_queue = sdebug_max_queue;
5460        if (sdebug_clustering)
5461                sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;
5462        hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
5463        if (NULL == hpnt) {
5464                pr_err("scsi_host_alloc failed\n");
5465                error = -ENODEV;
5466                return error;
5467        }
5468        if (submit_queues > nr_cpu_ids) {
5469                pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%d\n",
5470                        my_name, submit_queues, nr_cpu_ids);
5471                submit_queues = nr_cpu_ids;
5472        }
5473        /* Decide whether to tell scsi subsystem that we want mq */
5474        /* Following should give the same answer for each host */
5475        sdebug_mq_active = shost_use_blk_mq(hpnt) && (submit_queues > 1);
5476        if (sdebug_mq_active)
5477                hpnt->nr_hw_queues = submit_queues;
5478
5479        sdbg_host->shost = hpnt;
5480        *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
5481        if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
5482                hpnt->max_id = sdebug_num_tgts + 1;
5483        else
5484                hpnt->max_id = sdebug_num_tgts;
5485        /* = sdebug_max_luns; */
5486        hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
5487
5488        hprot = 0;
5489
5490        switch (sdebug_dif) {
5491
5492        case T10_PI_TYPE1_PROTECTION:
5493                hprot = SHOST_DIF_TYPE1_PROTECTION;
5494                if (sdebug_dix)
5495                        hprot |= SHOST_DIX_TYPE1_PROTECTION;
5496                break;
5497
5498        case T10_PI_TYPE2_PROTECTION:
5499                hprot = SHOST_DIF_TYPE2_PROTECTION;
5500                if (sdebug_dix)
5501                        hprot |= SHOST_DIX_TYPE2_PROTECTION;
5502                break;
5503
5504        case T10_PI_TYPE3_PROTECTION:
5505                hprot = SHOST_DIF_TYPE3_PROTECTION;
5506                if (sdebug_dix)
5507                        hprot |= SHOST_DIX_TYPE3_PROTECTION;
5508                break;
5509
5510        default:
5511                if (sdebug_dix)
5512                        hprot |= SHOST_DIX_TYPE0_PROTECTION;
5513                break;
5514        }
5515
5516        scsi_host_set_prot(hpnt, hprot);
5517
5518        if (have_dif_prot || sdebug_dix)
5519                pr_info("host protection%s%s%s%s%s%s%s\n",
5520                        (hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
5521                        (hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
5522                        (hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
5523                        (hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
5524                        (hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
5525                        (hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
5526                        (hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
5527
5528        if (sdebug_guard == 1)
5529                scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
5530        else
5531                scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
5532
5533        sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
5534        sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
5535        if (sdebug_every_nth)   /* need stats counters for every_nth */
5536                sdebug_statistics = true;
5537        error = scsi_add_host(hpnt, &sdbg_host->dev);
5538        if (error) {
5539                pr_err("scsi_add_host failed\n");
5540                error = -ENODEV;
5541                scsi_host_put(hpnt);
5542        } else
5543                scsi_scan_host(hpnt);
5544
5545        return error;
5546}
5547
5548static int sdebug_driver_remove(struct device * dev)
5549{
5550        struct sdebug_host_info *sdbg_host;
5551        struct sdebug_dev_info *sdbg_devinfo, *tmp;
5552
5553        sdbg_host = to_sdebug_host(dev);
5554
5555        if (!sdbg_host) {
5556                pr_err("Unable to locate host info\n");
5557                return -ENODEV;
5558        }
5559
5560        scsi_remove_host(sdbg_host->shost);
5561
5562        list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5563                                 dev_list) {
5564                list_del(&sdbg_devinfo->dev_list);
5565                kfree(sdbg_devinfo);
5566        }
5567
5568        scsi_host_put(sdbg_host->shost);
5569        return 0;
5570}
5571
5572static int pseudo_lld_bus_match(struct device *dev,
5573                                struct device_driver *dev_driver)
5574{
5575        return 1;
5576}
5577
5578static struct bus_type pseudo_lld_bus = {
5579        .name = "pseudo",
5580        .match = pseudo_lld_bus_match,
5581        .probe = sdebug_driver_probe,
5582        .remove = sdebug_driver_remove,
5583        .drv_groups = sdebug_drv_groups,
5584};
5585