linux/drivers/scsi/scsi_debug.c
<<
>>
Prefs
   1/*
   2 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
   3 *  Copyright (C) 1992  Eric Youngdale
   4 *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
   5 *  to make sure that we are not getting blocks mixed up, and PANIC if
   6 *  anything out of the ordinary is seen.
   7 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
   8 *
   9 * Copyright (C) 2001 - 2018 Douglas Gilbert
  10 *
  11 * This program is free software; you can redistribute it and/or modify
  12 * it under the terms of the GNU General Public License as published by
  13 * the Free Software Foundation; either version 2, or (at your option)
  14 * any later version.
  15 *
  16 *  For documentation see http://sg.danny.cz/sg/sdebug26.html
  17 *
  18 */
  19
  20
  21#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
  22
  23#include <linux/module.h>
  24
  25#include <linux/kernel.h>
  26#include <linux/errno.h>
  27#include <linux/jiffies.h>
  28#include <linux/slab.h>
  29#include <linux/types.h>
  30#include <linux/string.h>
  31#include <linux/genhd.h>
  32#include <linux/fs.h>
  33#include <linux/init.h>
  34#include <linux/proc_fs.h>
  35#include <linux/vmalloc.h>
  36#include <linux/moduleparam.h>
  37#include <linux/scatterlist.h>
  38#include <linux/blkdev.h>
  39#include <linux/crc-t10dif.h>
  40#include <linux/spinlock.h>
  41#include <linux/interrupt.h>
  42#include <linux/atomic.h>
  43#include <linux/hrtimer.h>
  44#include <linux/uuid.h>
  45#include <linux/t10-pi.h>
  46
  47#include <net/checksum.h>
  48
  49#include <asm/unaligned.h>
  50
  51#include <scsi/scsi.h>
  52#include <scsi/scsi_cmnd.h>
  53#include <scsi/scsi_device.h>
  54#include <scsi/scsi_host.h>
  55#include <scsi/scsicam.h>
  56#include <scsi/scsi_eh.h>
  57#include <scsi/scsi_tcq.h>
  58#include <scsi/scsi_dbg.h>
  59
  60#include "sd.h"
  61#include "scsi_logging.h"
  62
  63/* make sure inq_product_rev string corresponds to this version */
  64#define SDEBUG_VERSION "0188"   /* format to fit INQUIRY revision field */
  65static const char *sdebug_version_date = "20180128";
  66
  67#define MY_NAME "scsi_debug"
  68
  69/* Additional Sense Code (ASC) */
  70#define NO_ADDITIONAL_SENSE 0x0
  71#define LOGICAL_UNIT_NOT_READY 0x4
  72#define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
  73#define UNRECOVERED_READ_ERR 0x11
  74#define PARAMETER_LIST_LENGTH_ERR 0x1a
  75#define INVALID_OPCODE 0x20
  76#define LBA_OUT_OF_RANGE 0x21
  77#define INVALID_FIELD_IN_CDB 0x24
  78#define INVALID_FIELD_IN_PARAM_LIST 0x26
  79#define UA_RESET_ASC 0x29
  80#define UA_CHANGED_ASC 0x2a
  81#define TARGET_CHANGED_ASC 0x3f
  82#define LUNS_CHANGED_ASCQ 0x0e
  83#define INSUFF_RES_ASC 0x55
  84#define INSUFF_RES_ASCQ 0x3
  85#define POWER_ON_RESET_ASCQ 0x0
  86#define BUS_RESET_ASCQ 0x2      /* scsi bus reset occurred */
  87#define MODE_CHANGED_ASCQ 0x1   /* mode parameters changed */
  88#define CAPACITY_CHANGED_ASCQ 0x9
  89#define SAVING_PARAMS_UNSUP 0x39
  90#define TRANSPORT_PROBLEM 0x4b
  91#define THRESHOLD_EXCEEDED 0x5d
  92#define LOW_POWER_COND_ON 0x5e
  93#define MISCOMPARE_VERIFY_ASC 0x1d
  94#define MICROCODE_CHANGED_ASCQ 0x1      /* with TARGET_CHANGED_ASC */
  95#define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
  96#define WRITE_ERROR_ASC 0xc
  97
  98/* Additional Sense Code Qualifier (ASCQ) */
  99#define ACK_NAK_TO 0x3
 100
 101/* Default values for driver parameters */
 102#define DEF_NUM_HOST   1
 103#define DEF_NUM_TGTS   1
 104#define DEF_MAX_LUNS   1
 105/* With these defaults, this driver will make 1 host with 1 target
 106 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
 107 */
 108#define DEF_ATO 1
 109#define DEF_CDB_LEN 10
 110#define DEF_JDELAY   1          /* if > 0 unit is a jiffy */
 111#define DEF_DEV_SIZE_MB   8
 112#define DEF_DIF 0
 113#define DEF_DIX 0
 114#define DEF_D_SENSE   0
 115#define DEF_EVERY_NTH   0
 116#define DEF_FAKE_RW     0
 117#define DEF_GUARD 0
 118#define DEF_HOST_LOCK 0
 119#define DEF_LBPU 0
 120#define DEF_LBPWS 0
 121#define DEF_LBPWS10 0
 122#define DEF_LBPRZ 1
 123#define DEF_LOWEST_ALIGNED 0
 124#define DEF_NDELAY   0          /* if > 0 unit is a nanosecond */
 125#define DEF_NO_LUN_0   0
 126#define DEF_NUM_PARTS   0
 127#define DEF_OPTS   0
 128#define DEF_OPT_BLKS 1024
 129#define DEF_PHYSBLK_EXP 0
 130#define DEF_OPT_XFERLEN_EXP 0
 131#define DEF_PTYPE   TYPE_DISK
 132#define DEF_REMOVABLE false
 133#define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
 134#define DEF_SECTOR_SIZE 512
 135#define DEF_UNMAP_ALIGNMENT 0
 136#define DEF_UNMAP_GRANULARITY 1
 137#define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
 138#define DEF_UNMAP_MAX_DESC 256
 139#define DEF_VIRTUAL_GB   0
 140#define DEF_VPD_USE_HOSTNO 1
 141#define DEF_WRITESAME_LENGTH 0xFFFF
 142#define DEF_STRICT 0
 143#define DEF_STATISTICS false
 144#define DEF_SUBMIT_QUEUES 1
 145#define DEF_UUID_CTL 0
 146#define JDELAY_OVERRIDDEN -9999
 147
 148#define SDEBUG_LUN_0_VAL 0
 149
 150/* bit mask values for sdebug_opts */
 151#define SDEBUG_OPT_NOISE                1
 152#define SDEBUG_OPT_MEDIUM_ERR           2
 153#define SDEBUG_OPT_TIMEOUT              4
 154#define SDEBUG_OPT_RECOVERED_ERR        8
 155#define SDEBUG_OPT_TRANSPORT_ERR        16
 156#define SDEBUG_OPT_DIF_ERR              32
 157#define SDEBUG_OPT_DIX_ERR              64
 158#define SDEBUG_OPT_MAC_TIMEOUT          128
 159#define SDEBUG_OPT_SHORT_TRANSFER       0x100
 160#define SDEBUG_OPT_Q_NOISE              0x200
 161#define SDEBUG_OPT_ALL_TSF              0x400
 162#define SDEBUG_OPT_RARE_TSF             0x800
 163#define SDEBUG_OPT_N_WCE                0x1000
 164#define SDEBUG_OPT_RESET_NOISE          0x2000
 165#define SDEBUG_OPT_NO_CDB_NOISE         0x4000
 166#define SDEBUG_OPT_HOST_BUSY            0x8000
 167#define SDEBUG_OPT_CMD_ABORT            0x10000
 168#define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
 169                              SDEBUG_OPT_RESET_NOISE)
 170#define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
 171                                  SDEBUG_OPT_TRANSPORT_ERR | \
 172                                  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
 173                                  SDEBUG_OPT_SHORT_TRANSFER | \
 174                                  SDEBUG_OPT_HOST_BUSY | \
 175                                  SDEBUG_OPT_CMD_ABORT)
 176/* When "every_nth" > 0 then modulo "every_nth" commands:
 177 *   - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
 178 *   - a RECOVERED_ERROR is simulated on successful read and write
 179 *     commands if SDEBUG_OPT_RECOVERED_ERR is set.
 180 *   - a TRANSPORT_ERROR is simulated on successful read and write
 181 *     commands if SDEBUG_OPT_TRANSPORT_ERR is set.
 182 *   - similarly for DIF_ERR, DIX_ERR, SHORT_TRANSFER, HOST_BUSY and
 183 *     CMD_ABORT
 184 *
 185 * When "every_nth" < 0 then after "- every_nth" commands the selected
 186 * error will be injected. The error will be injected on every subsequent
 187 * command until some other action occurs; for example, the user writing
 188 * a new value (other than -1 or 1) to every_nth:
 189 *      echo 0 > /sys/bus/pseudo/drivers/scsi_debug/every_nth
 190 */
 191
 192/* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
 193 * priority order. In the subset implemented here lower numbers have higher
 194 * priority. The UA numbers should be a sequence starting from 0 with
 195 * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
 196#define SDEBUG_UA_POR 0         /* Power on, reset, or bus device reset */
 197#define SDEBUG_UA_BUS_RESET 1
 198#define SDEBUG_UA_MODE_CHANGED 2
 199#define SDEBUG_UA_CAPACITY_CHANGED 3
 200#define SDEBUG_UA_LUNS_CHANGED 4
 201#define SDEBUG_UA_MICROCODE_CHANGED 5   /* simulate firmware change */
 202#define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
 203#define SDEBUG_NUM_UAS 7
 204
 205/* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
 206 * sector on read commands: */
 207#define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
 208#define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
 209
 210/* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
 211 * or "peripheral device" addressing (value 0) */
 212#define SAM2_LUN_ADDRESS_METHOD 0
 213
 214/* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
 215 * (for response) per submit queue at one time. Can be reduced by max_queue
 216 * option. Command responses are not queued when jdelay=0 and ndelay=0. The
 217 * per-device DEF_CMD_PER_LUN can be changed via sysfs:
 218 * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
 219 * but cannot exceed SDEBUG_CANQUEUE .
 220 */
 221#define SDEBUG_CANQUEUE_WORDS  3        /* a WORD is bits in a long */
 222#define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
 223#define DEF_CMD_PER_LUN  255
 224
 225#define F_D_IN                  1
 226#define F_D_OUT                 2
 227#define F_D_OUT_MAYBE           4       /* WRITE SAME, NDOB bit */
 228#define F_D_UNKN                8
 229#define F_RL_WLUN_OK            0x10
 230#define F_SKIP_UA               0x20
 231#define F_DELAY_OVERR           0x40
 232#define F_SA_LOW                0x80    /* cdb byte 1, bits 4 to 0 */
 233#define F_SA_HIGH               0x100   /* as used by variable length cdbs */
 234#define F_INV_OP                0x200
 235#define F_FAKE_RW               0x400
 236#define F_M_ACCESS              0x800   /* media access */
 237#define F_SSU_DELAY             0x1000
 238#define F_SYNC_DELAY            0x2000
 239
 240#define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
 241#define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
 242#define FF_SA (F_SA_HIGH | F_SA_LOW)
 243#define F_LONG_DELAY            (F_SSU_DELAY | F_SYNC_DELAY)
 244
 245#define SDEBUG_MAX_PARTS 4
 246
 247#define SDEBUG_MAX_CMD_LEN 32
 248
 249
 250struct sdebug_dev_info {
 251        struct list_head dev_list;
 252        unsigned int channel;
 253        unsigned int target;
 254        u64 lun;
 255        uuid_t lu_name;
 256        struct sdebug_host_info *sdbg_host;
 257        unsigned long uas_bm[1];
 258        atomic_t num_in_q;
 259        atomic_t stopped;
 260        bool used;
 261};
 262
 263struct sdebug_host_info {
 264        struct list_head host_list;
 265        struct Scsi_Host *shost;
 266        struct device dev;
 267        struct list_head dev_info_list;
 268};
 269
 270#define to_sdebug_host(d)       \
 271        container_of(d, struct sdebug_host_info, dev)
 272
 273enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
 274                      SDEB_DEFER_WQ = 2};
 275
 276struct sdebug_defer {
 277        struct hrtimer hrt;
 278        struct execute_work ew;
 279        int sqa_idx;    /* index of sdebug_queue array */
 280        int qc_idx;     /* index of sdebug_queued_cmd array within sqa_idx */
 281        int issuing_cpu;
 282        bool init_hrt;
 283        bool init_wq;
 284        bool aborted;   /* true when blk_abort_request() already called */
 285        enum sdeb_defer_type defer_t;
 286};
 287
 288struct sdebug_queued_cmd {
 289        /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
 290         * instance indicates this slot is in use.
 291         */
 292        struct sdebug_defer *sd_dp;
 293        struct scsi_cmnd *a_cmnd;
 294        unsigned int inj_recovered:1;
 295        unsigned int inj_transport:1;
 296        unsigned int inj_dif:1;
 297        unsigned int inj_dix:1;
 298        unsigned int inj_short:1;
 299        unsigned int inj_host_busy:1;
 300        unsigned int inj_cmd_abort:1;
 301};
 302
 303struct sdebug_queue {
 304        struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
 305        unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
 306        spinlock_t qc_lock;
 307        atomic_t blocked;       /* to temporarily stop more being queued */
 308};
 309
 310static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
 311static atomic_t sdebug_completions;  /* count of deferred completions */
 312static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
 313static atomic_t sdebug_a_tsf;        /* 'almost task set full' counter */
 314
 315struct opcode_info_t {
 316        u8 num_attached;        /* 0 if this is it (i.e. a leaf); use 0xff */
 317                                /* for terminating element */
 318        u8 opcode;              /* if num_attached > 0, preferred */
 319        u16 sa;                 /* service action */
 320        u32 flags;              /* OR-ed set of SDEB_F_* */
 321        int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
 322        const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
 323        u8 len_mask[16];        /* len_mask[0]-->cdb_len, then mask for cdb */
 324                                /* 1 to min(cdb_len, 15); ignore cdb[15...] */
 325};
 326
 327/* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
 328enum sdeb_opcode_index {
 329        SDEB_I_INVALID_OPCODE = 0,
 330        SDEB_I_INQUIRY = 1,
 331        SDEB_I_REPORT_LUNS = 2,
 332        SDEB_I_REQUEST_SENSE = 3,
 333        SDEB_I_TEST_UNIT_READY = 4,
 334        SDEB_I_MODE_SENSE = 5,          /* 6, 10 */
 335        SDEB_I_MODE_SELECT = 6,         /* 6, 10 */
 336        SDEB_I_LOG_SENSE = 7,
 337        SDEB_I_READ_CAPACITY = 8,       /* 10; 16 is in SA_IN(16) */
 338        SDEB_I_READ = 9,                /* 6, 10, 12, 16 */
 339        SDEB_I_WRITE = 10,              /* 6, 10, 12, 16 */
 340        SDEB_I_START_STOP = 11,
 341        SDEB_I_SERV_ACT_IN_16 = 12,     /* add ...SERV_ACT_IN_12 if needed */
 342        SDEB_I_SERV_ACT_OUT_16 = 13,    /* add ...SERV_ACT_OUT_12 if needed */
 343        SDEB_I_MAINT_IN = 14,
 344        SDEB_I_MAINT_OUT = 15,
 345        SDEB_I_VERIFY = 16,             /* 10 only */
 346        SDEB_I_VARIABLE_LEN = 17,       /* READ(32), WRITE(32), WR_SCAT(32) */
 347        SDEB_I_RESERVE = 18,            /* 6, 10 */
 348        SDEB_I_RELEASE = 19,            /* 6, 10 */
 349        SDEB_I_ALLOW_REMOVAL = 20,      /* PREVENT ALLOW MEDIUM REMOVAL */
 350        SDEB_I_REZERO_UNIT = 21,        /* REWIND in SSC */
 351        SDEB_I_ATA_PT = 22,             /* 12, 16 */
 352        SDEB_I_SEND_DIAG = 23,
 353        SDEB_I_UNMAP = 24,
 354        SDEB_I_XDWRITEREAD = 25,        /* 10 only */
 355        SDEB_I_WRITE_BUFFER = 26,
 356        SDEB_I_WRITE_SAME = 27,         /* 10, 16 */
 357        SDEB_I_SYNC_CACHE = 28,         /* 10, 16 */
 358        SDEB_I_COMP_WRITE = 29,
 359        SDEB_I_LAST_ELEMENT = 30,       /* keep this last (previous + 1) */
 360};
 361
 362
 363static const unsigned char opcode_ind_arr[256] = {
 364/* 0x0; 0x0->0x1f: 6 byte cdbs */
 365        SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
 366            0, 0, 0, 0,
 367        SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
 368        0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
 369            SDEB_I_RELEASE,
 370        0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
 371            SDEB_I_ALLOW_REMOVAL, 0,
 372/* 0x20; 0x20->0x3f: 10 byte cdbs */
 373        0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
 374        SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
 375        0, 0, 0, 0, 0, SDEB_I_SYNC_CACHE, 0, 0,
 376        0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
 377/* 0x40; 0x40->0x5f: 10 byte cdbs */
 378        0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
 379        0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
 380        0, 0, 0, SDEB_I_XDWRITEREAD, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
 381            SDEB_I_RELEASE,
 382        0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
 383/* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
 384        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 385        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 386        0, SDEB_I_VARIABLE_LEN,
 387/* 0x80; 0x80->0x9f: 16 byte cdbs */
 388        0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
 389        SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0, 0, 0, 0, 0,
 390        0, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME, 0, 0, 0, 0,
 391        0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
 392/* 0xa0; 0xa0->0xbf: 12 byte cdbs */
 393        SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
 394             SDEB_I_MAINT_OUT, 0, 0, 0,
 395        SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
 396             0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
 397        0, 0, 0, 0, 0, 0, 0, 0,
 398        0, 0, 0, 0, 0, 0, 0, 0,
 399/* 0xc0; 0xc0->0xff: vendor specific */
 400        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 401        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 402        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 403        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 404};
 405
 406/*
 407 * The following "response" functions return the SCSI mid-level's 4 byte
 408 * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
 409 * command completion, they can mask their return value with
 410 * SDEG_RES_IMMED_MASK .
 411 */
 412#define SDEG_RES_IMMED_MASK 0x40000000
 413
 414static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
 415static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
 416static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
 417static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
 418static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
 419static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
 420static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
 421static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
 422static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
 423static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
 424static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
 425static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
 426static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
 427static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
 428static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
 429static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
 430static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
 431static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
 432static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
 433static int resp_xdwriteread_10(struct scsi_cmnd *, struct sdebug_dev_info *);
 434static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
 435static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
 436static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
 437
 438/*
 439 * The following are overflow arrays for cdbs that "hit" the same index in
 440 * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
 441 * should be placed in opcode_info_arr[], the others should be placed here.
 442 */
 443static const struct opcode_info_t msense_iarr[] = {
 444        {0, 0x1a, 0, F_D_IN, NULL, NULL,
 445            {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 446};
 447
 448static const struct opcode_info_t mselect_iarr[] = {
 449        {0, 0x15, 0, F_D_OUT, NULL, NULL,
 450            {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 451};
 452
 453static const struct opcode_info_t read_iarr[] = {
 454        {0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
 455            {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
 456             0, 0, 0, 0} },
 457        {0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
 458            {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 459        {0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
 460            {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
 461             0xc7, 0, 0, 0, 0} },
 462};
 463
 464static const struct opcode_info_t write_iarr[] = {
 465        {0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
 466            NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
 467                   0, 0, 0, 0, 0, 0} },
 468        {0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,   /* WRITE(6) */
 469            NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
 470                   0, 0, 0} },
 471        {0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
 472            NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
 473                   0xbf, 0xc7, 0, 0, 0, 0} },
 474};
 475
 476static const struct opcode_info_t sa_in_16_iarr[] = {
 477        {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
 478            {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
 479             0xff, 0xff, 0xff, 0, 0xc7} },      /* GET LBA STATUS(16) */
 480};
 481
 482static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */
 483        {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
 484            NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
 485                   0, 0xff, 0xff, 0xff, 0xff} },        /* WRITE(32) */
 486        {0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
 487            NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
 488                   0, 0xff, 0xff, 0x0, 0x0} },  /* WRITE SCATTERED(32) */
 489};
 490
 491static const struct opcode_info_t maint_in_iarr[] = {   /* MAINT IN */
 492        {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
 493            {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
 494             0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
 495        {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
 496            {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
 497             0, 0} },   /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
 498};
 499
 500static const struct opcode_info_t write_same_iarr[] = {
 501        {0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
 502            {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
 503             0xff, 0xff, 0xff, 0x3f, 0xc7} },           /* WRITE SAME(16) */
 504};
 505
 506static const struct opcode_info_t reserve_iarr[] = {
 507        {0, 0x16, 0, F_D_OUT, NULL, NULL,               /* RESERVE(6) */
 508            {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 509};
 510
 511static const struct opcode_info_t release_iarr[] = {
 512        {0, 0x17, 0, F_D_OUT, NULL, NULL,               /* RELEASE(6) */
 513            {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 514};
 515
 516static const struct opcode_info_t sync_cache_iarr[] = {
 517        {0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
 518            {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
 519             0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },     /* SYNC_CACHE (16) */
 520};
 521
 522
 523/* This array is accessed via SDEB_I_* values. Make sure all are mapped,
 524 * plus the terminating elements for logic that scans this table such as
 525 * REPORT SUPPORTED OPERATION CODES. */
 526static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
 527/* 0 */
 528        {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,    /* unknown opcodes */
 529            {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 530        {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
 531            {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 532        {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
 533            {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
 534             0, 0} },                                   /* REPORT LUNS */
 535        {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
 536            {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 537        {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
 538            {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 539/* 5 */
 540        {ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN,      /* MODE SENSE(10) */
 541            resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
 542                0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
 543        {ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT,    /* MODE SELECT(10) */
 544            resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
 545                0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
 546        {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,      /* LOG SENSE */
 547            {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
 548             0, 0, 0} },
 549        {0, 0x25, 0, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
 550            {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
 551             0, 0} },
 552        {ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
 553            resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
 554            0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
 555/* 10 */
 556        {ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
 557            resp_write_dt0, write_iarr,                 /* WRITE(16) */
 558                {16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
 559                 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
 560        {0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
 561            {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 562        {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
 563            resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
 564                {16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
 565                 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
 566        {0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
 567            NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
 568            0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
 569        {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
 570            resp_report_tgtpgs, /* MAINT IN, REPORT TARGET PORT GROUPS */
 571                maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
 572                                0xff, 0, 0xc7, 0, 0, 0, 0} },
 573/* 15 */
 574        {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
 575            {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 576        {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, NULL, NULL, /* VERIFY(10) */
 577            {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7,
 578             0, 0, 0, 0, 0, 0} },
 579        {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
 580            resp_read_dt0, vl_iarr,     /* VARIABLE LENGTH, READ(32) */
 581            {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
 582             0xff, 0xff} },
 583        {ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
 584            NULL, reserve_iarr, /* RESERVE(10) <no response function> */
 585            {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
 586             0} },
 587        {ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
 588            NULL, release_iarr, /* RELEASE(10) <no response function> */
 589            {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
 590             0} },
 591/* 20 */
 592        {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
 593            {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 594        {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
 595            {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 596        {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
 597            {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 598        {0, 0x1d, F_D_OUT, 0, NULL, NULL,       /* SEND DIAGNOSTIC */
 599            {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 600        {0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
 601            {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
 602/* 25 */
 603        {0, 0x53, 0, F_D_IN | F_D_OUT | FF_MEDIA_IO, resp_xdwriteread_10,
 604            NULL, {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
 605                   0, 0, 0, 0, 0, 0} },         /* XDWRITEREAD(10) */
 606        {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
 607            {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
 608             0, 0, 0, 0} },                     /* WRITE_BUFFER */
 609        {ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
 610            resp_write_same_10, write_same_iarr,        /* WRITE SAME(10) */
 611                {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
 612                 0, 0, 0, 0, 0} },
 613        {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
 614            resp_sync_cache, sync_cache_iarr,
 615            {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
 616             0, 0, 0, 0} },                     /* SYNC_CACHE (10) */
 617        {0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
 618            {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
 619             0, 0xff, 0x3f, 0xc7} },            /* COMPARE AND WRITE */
 620
 621/* 30 */
 622        {0xff, 0, 0, 0, NULL, NULL,             /* terminating element */
 623            {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 624};
 625
 626static int sdebug_add_host = DEF_NUM_HOST;
 627static int sdebug_ato = DEF_ATO;
 628static int sdebug_cdb_len = DEF_CDB_LEN;
 629static int sdebug_jdelay = DEF_JDELAY;  /* if > 0 then unit is jiffies */
 630static int sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
 631static int sdebug_dif = DEF_DIF;
 632static int sdebug_dix = DEF_DIX;
 633static int sdebug_dsense = DEF_D_SENSE;
 634static int sdebug_every_nth = DEF_EVERY_NTH;
 635static int sdebug_fake_rw = DEF_FAKE_RW;
 636static unsigned int sdebug_guard = DEF_GUARD;
 637static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
 638static int sdebug_max_luns = DEF_MAX_LUNS;
 639static int sdebug_max_queue = SDEBUG_CANQUEUE;  /* per submit queue */
 640static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
 641static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
 642static atomic_t retired_max_queue;      /* if > 0 then was prior max_queue */
 643static int sdebug_ndelay = DEF_NDELAY;  /* if > 0 then unit is nanoseconds */
 644static int sdebug_no_lun_0 = DEF_NO_LUN_0;
 645static int sdebug_no_uld;
 646static int sdebug_num_parts = DEF_NUM_PARTS;
 647static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
 648static int sdebug_opt_blks = DEF_OPT_BLKS;
 649static int sdebug_opts = DEF_OPTS;
 650static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
 651static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
 652static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
 653static int sdebug_scsi_level = DEF_SCSI_LEVEL;
 654static int sdebug_sector_size = DEF_SECTOR_SIZE;
 655static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
 656static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
 657static unsigned int sdebug_lbpu = DEF_LBPU;
 658static unsigned int sdebug_lbpws = DEF_LBPWS;
 659static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
 660static unsigned int sdebug_lbprz = DEF_LBPRZ;
 661static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
 662static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
 663static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
 664static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
 665static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
 666static int sdebug_uuid_ctl = DEF_UUID_CTL;
 667static bool sdebug_removable = DEF_REMOVABLE;
 668static bool sdebug_clustering;
 669static bool sdebug_host_lock = DEF_HOST_LOCK;
 670static bool sdebug_strict = DEF_STRICT;
 671static bool sdebug_any_injecting_opt;
 672static bool sdebug_verbose;
 673static bool have_dif_prot;
 674static bool write_since_sync;
 675static bool sdebug_statistics = DEF_STATISTICS;
 676
 677static unsigned int sdebug_store_sectors;
 678static sector_t sdebug_capacity;        /* in sectors */
 679
 680/* old BIOS stuff, kernel may get rid of them but some mode sense pages
 681   may still need them */
 682static int sdebug_heads;                /* heads per disk */
 683static int sdebug_cylinders_per;        /* cylinders per surface */
 684static int sdebug_sectors_per;          /* sectors per cylinder */
 685
 686static LIST_HEAD(sdebug_host_list);
 687static DEFINE_SPINLOCK(sdebug_host_list_lock);
 688
 689static unsigned char *fake_storep;      /* ramdisk storage */
 690static struct t10_pi_tuple *dif_storep; /* protection info */
 691static void *map_storep;                /* provisioning map */
 692
 693static unsigned long map_size;
 694static int num_aborts;
 695static int num_dev_resets;
 696static int num_target_resets;
 697static int num_bus_resets;
 698static int num_host_resets;
 699static int dix_writes;
 700static int dix_reads;
 701static int dif_errors;
 702
 703static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
 704static struct sdebug_queue *sdebug_q_arr;  /* ptr to array of submit queues */
 705
 706static DEFINE_RWLOCK(atomic_rw);
 707
 708static char sdebug_proc_name[] = MY_NAME;
 709static const char *my_name = MY_NAME;
 710
 711static struct bus_type pseudo_lld_bus;
 712
 713static struct device_driver sdebug_driverfs_driver = {
 714        .name           = sdebug_proc_name,
 715        .bus            = &pseudo_lld_bus,
 716};
 717
 718static const int check_condition_result =
 719                (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
 720
 721static const int illegal_condition_result =
 722        (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
 723
 724static const int device_qfull_result =
 725        (DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
 726
 727
 728/* Only do the extra work involved in logical block provisioning if one or
 729 * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
 730 * real reads and writes (i.e. not skipping them for speed).
 731 */
 732static inline bool scsi_debug_lbp(void)
 733{
 734        return 0 == sdebug_fake_rw &&
 735                (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
 736}
 737
 738static void *fake_store(unsigned long long lba)
 739{
 740        lba = do_div(lba, sdebug_store_sectors);
 741
 742        return fake_storep + lba * sdebug_sector_size;
 743}
 744
 745static struct t10_pi_tuple *dif_store(sector_t sector)
 746{
 747        sector = sector_div(sector, sdebug_store_sectors);
 748
 749        return dif_storep + sector;
 750}
 751
 752static void sdebug_max_tgts_luns(void)
 753{
 754        struct sdebug_host_info *sdbg_host;
 755        struct Scsi_Host *hpnt;
 756
 757        spin_lock(&sdebug_host_list_lock);
 758        list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
 759                hpnt = sdbg_host->shost;
 760                if ((hpnt->this_id >= 0) &&
 761                    (sdebug_num_tgts > hpnt->this_id))
 762                        hpnt->max_id = sdebug_num_tgts + 1;
 763                else
 764                        hpnt->max_id = sdebug_num_tgts;
 765                /* sdebug_max_luns; */
 766                hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
 767        }
 768        spin_unlock(&sdebug_host_list_lock);
 769}
 770
 771enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
 772
 773/* Set in_bit to -1 to indicate no bit position of invalid field */
 774static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
 775                                 enum sdeb_cmd_data c_d,
 776                                 int in_byte, int in_bit)
 777{
 778        unsigned char *sbuff;
 779        u8 sks[4];
 780        int sl, asc;
 781
 782        sbuff = scp->sense_buffer;
 783        if (!sbuff) {
 784                sdev_printk(KERN_ERR, scp->device,
 785                            "%s: sense_buffer is NULL\n", __func__);
 786                return;
 787        }
 788        asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
 789        memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
 790        scsi_build_sense_buffer(sdebug_dsense, sbuff, ILLEGAL_REQUEST, asc, 0);
 791        memset(sks, 0, sizeof(sks));
 792        sks[0] = 0x80;
 793        if (c_d)
 794                sks[0] |= 0x40;
 795        if (in_bit >= 0) {
 796                sks[0] |= 0x8;
 797                sks[0] |= 0x7 & in_bit;
 798        }
 799        put_unaligned_be16(in_byte, sks + 1);
 800        if (sdebug_dsense) {
 801                sl = sbuff[7] + 8;
 802                sbuff[7] = sl;
 803                sbuff[sl] = 0x2;
 804                sbuff[sl + 1] = 0x6;
 805                memcpy(sbuff + sl + 4, sks, 3);
 806        } else
 807                memcpy(sbuff + 15, sks, 3);
 808        if (sdebug_verbose)
 809                sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
 810                            "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
 811                            my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
 812}
 813
 814static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
 815{
 816        unsigned char *sbuff;
 817
 818        sbuff = scp->sense_buffer;
 819        if (!sbuff) {
 820                sdev_printk(KERN_ERR, scp->device,
 821                            "%s: sense_buffer is NULL\n", __func__);
 822                return;
 823        }
 824        memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
 825
 826        scsi_build_sense_buffer(sdebug_dsense, sbuff, key, asc, asq);
 827
 828        if (sdebug_verbose)
 829                sdev_printk(KERN_INFO, scp->device,
 830                            "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
 831                            my_name, key, asc, asq);
 832}
 833
 834static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
 835{
 836        mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
 837}
 838
 839static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
 840{
 841        if (sdebug_verbose) {
 842                if (0x1261 == cmd)
 843                        sdev_printk(KERN_INFO, dev,
 844                                    "%s: BLKFLSBUF [0x1261]\n", __func__);
 845                else if (0x5331 == cmd)
 846                        sdev_printk(KERN_INFO, dev,
 847                                    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
 848                                    __func__);
 849                else
 850                        sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
 851                                    __func__, cmd);
 852        }
 853        return -EINVAL;
 854        /* return -ENOTTY; // correct return but upsets fdisk */
 855}
 856
 857static void config_cdb_len(struct scsi_device *sdev)
 858{
 859        switch (sdebug_cdb_len) {
 860        case 6: /* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
 861                sdev->use_10_for_rw = false;
 862                sdev->use_16_for_rw = false;
 863                sdev->use_10_for_ms = false;
 864                break;
 865        case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
 866                sdev->use_10_for_rw = true;
 867                sdev->use_16_for_rw = false;
 868                sdev->use_10_for_ms = false;
 869                break;
 870        case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
 871                sdev->use_10_for_rw = true;
 872                sdev->use_16_for_rw = false;
 873                sdev->use_10_for_ms = true;
 874                break;
 875        case 16:
 876                sdev->use_10_for_rw = false;
 877                sdev->use_16_for_rw = true;
 878                sdev->use_10_for_ms = true;
 879                break;
 880        case 32: /* No knobs to suggest this so same as 16 for now */
 881                sdev->use_10_for_rw = false;
 882                sdev->use_16_for_rw = true;
 883                sdev->use_10_for_ms = true;
 884                break;
 885        default:
 886                pr_warn("unexpected cdb_len=%d, force to 10\n",
 887                        sdebug_cdb_len);
 888                sdev->use_10_for_rw = true;
 889                sdev->use_16_for_rw = false;
 890                sdev->use_10_for_ms = false;
 891                sdebug_cdb_len = 10;
 892                break;
 893        }
 894}
 895
 896static void all_config_cdb_len(void)
 897{
 898        struct sdebug_host_info *sdbg_host;
 899        struct Scsi_Host *shost;
 900        struct scsi_device *sdev;
 901
 902        spin_lock(&sdebug_host_list_lock);
 903        list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
 904                shost = sdbg_host->shost;
 905                shost_for_each_device(sdev, shost) {
 906                        config_cdb_len(sdev);
 907                }
 908        }
 909        spin_unlock(&sdebug_host_list_lock);
 910}
 911
 912static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
 913{
 914        struct sdebug_host_info *sdhp;
 915        struct sdebug_dev_info *dp;
 916
 917        spin_lock(&sdebug_host_list_lock);
 918        list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
 919                list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
 920                        if ((devip->sdbg_host == dp->sdbg_host) &&
 921                            (devip->target == dp->target))
 922                                clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
 923                }
 924        }
 925        spin_unlock(&sdebug_host_list_lock);
 926}
 927
 928static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
 929{
 930        int k;
 931
 932        k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
 933        if (k != SDEBUG_NUM_UAS) {
 934                const char *cp = NULL;
 935
 936                switch (k) {
 937                case SDEBUG_UA_POR:
 938                        mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
 939                                        POWER_ON_RESET_ASCQ);
 940                        if (sdebug_verbose)
 941                                cp = "power on reset";
 942                        break;
 943                case SDEBUG_UA_BUS_RESET:
 944                        mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
 945                                        BUS_RESET_ASCQ);
 946                        if (sdebug_verbose)
 947                                cp = "bus reset";
 948                        break;
 949                case SDEBUG_UA_MODE_CHANGED:
 950                        mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
 951                                        MODE_CHANGED_ASCQ);
 952                        if (sdebug_verbose)
 953                                cp = "mode parameters changed";
 954                        break;
 955                case SDEBUG_UA_CAPACITY_CHANGED:
 956                        mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
 957                                        CAPACITY_CHANGED_ASCQ);
 958                        if (sdebug_verbose)
 959                                cp = "capacity data changed";
 960                        break;
 961                case SDEBUG_UA_MICROCODE_CHANGED:
 962                        mk_sense_buffer(scp, UNIT_ATTENTION,
 963                                        TARGET_CHANGED_ASC,
 964                                        MICROCODE_CHANGED_ASCQ);
 965                        if (sdebug_verbose)
 966                                cp = "microcode has been changed";
 967                        break;
 968                case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
 969                        mk_sense_buffer(scp, UNIT_ATTENTION,
 970                                        TARGET_CHANGED_ASC,
 971                                        MICROCODE_CHANGED_WO_RESET_ASCQ);
 972                        if (sdebug_verbose)
 973                                cp = "microcode has been changed without reset";
 974                        break;
 975                case SDEBUG_UA_LUNS_CHANGED:
 976                        /*
 977                         * SPC-3 behavior is to report a UNIT ATTENTION with
 978                         * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
 979                         * on the target, until a REPORT LUNS command is
 980                         * received.  SPC-4 behavior is to report it only once.
 981                         * NOTE:  sdebug_scsi_level does not use the same
 982                         * values as struct scsi_device->scsi_level.
 983                         */
 984                        if (sdebug_scsi_level >= 6)     /* SPC-4 and above */
 985                                clear_luns_changed_on_target(devip);
 986                        mk_sense_buffer(scp, UNIT_ATTENTION,
 987                                        TARGET_CHANGED_ASC,
 988                                        LUNS_CHANGED_ASCQ);
 989                        if (sdebug_verbose)
 990                                cp = "reported luns data has changed";
 991                        break;
 992                default:
 993                        pr_warn("unexpected unit attention code=%d\n", k);
 994                        if (sdebug_verbose)
 995                                cp = "unknown";
 996                        break;
 997                }
 998                clear_bit(k, devip->uas_bm);
 999                if (sdebug_verbose)
1000                        sdev_printk(KERN_INFO, scp->device,
1001                                   "%s reports: Unit attention: %s\n",
1002                                   my_name, cp);
1003                return check_condition_result;
1004        }
1005        return 0;
1006}
1007
1008/* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1009static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1010                                int arr_len)
1011{
1012        int act_len;
1013        struct scsi_data_buffer *sdb = scsi_in(scp);
1014
1015        if (!sdb->length)
1016                return 0;
1017        if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
1018                return DID_ERROR << 16;
1019
1020        act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1021                                      arr, arr_len);
1022        sdb->resid = scsi_bufflen(scp) - act_len;
1023
1024        return 0;
1025}
1026
1027/* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1028 * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1029 * calls, not required to write in ascending offset order. Assumes resid
1030 * set to scsi_bufflen() prior to any calls.
1031 */
1032static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1033                                  int arr_len, unsigned int off_dst)
1034{
1035        int act_len, n;
1036        struct scsi_data_buffer *sdb = scsi_in(scp);
1037        off_t skip = off_dst;
1038
1039        if (sdb->length <= off_dst)
1040                return 0;
1041        if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
1042                return DID_ERROR << 16;
1043
1044        act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1045                                       arr, arr_len, skip);
1046        pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1047                 __func__, off_dst, scsi_bufflen(scp), act_len, sdb->resid);
1048        n = (int)scsi_bufflen(scp) - ((int)off_dst + act_len);
1049        sdb->resid = min(sdb->resid, n);
1050        return 0;
1051}
1052
1053/* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1054 * 'arr' or -1 if error.
1055 */
1056static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1057                               int arr_len)
1058{
1059        if (!scsi_bufflen(scp))
1060                return 0;
1061        if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
1062                return -1;
1063
1064        return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1065}
1066
1067
1068static char sdebug_inq_vendor_id[9] = "Linux   ";
1069static char sdebug_inq_product_id[17] = "scsi_debug      ";
1070static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1071/* Use some locally assigned NAAs for SAS addresses. */
1072static const u64 naa3_comp_a = 0x3222222000000000ULL;
1073static const u64 naa3_comp_b = 0x3333333000000000ULL;
1074static const u64 naa3_comp_c = 0x3111111000000000ULL;
1075
1076/* Device identification VPD page. Returns number of bytes placed in arr */
1077static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1078                          int target_dev_id, int dev_id_num,
1079                          const char *dev_id_str, int dev_id_str_len,
1080                          const uuid_t *lu_name)
1081{
1082        int num, port_a;
1083        char b[32];
1084
1085        port_a = target_dev_id + 1;
1086        /* T10 vendor identifier field format (faked) */
1087        arr[0] = 0x2;   /* ASCII */
1088        arr[1] = 0x1;
1089        arr[2] = 0x0;
1090        memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1091        memcpy(&arr[12], sdebug_inq_product_id, 16);
1092        memcpy(&arr[28], dev_id_str, dev_id_str_len);
1093        num = 8 + 16 + dev_id_str_len;
1094        arr[3] = num;
1095        num += 4;
1096        if (dev_id_num >= 0) {
1097                if (sdebug_uuid_ctl) {
1098                        /* Locally assigned UUID */
1099                        arr[num++] = 0x1;  /* binary (not necessarily sas) */
1100                        arr[num++] = 0xa;  /* PIV=0, lu, naa */
1101                        arr[num++] = 0x0;
1102                        arr[num++] = 0x12;
1103                        arr[num++] = 0x10; /* uuid type=1, locally assigned */
1104                        arr[num++] = 0x0;
1105                        memcpy(arr + num, lu_name, 16);
1106                        num += 16;
1107                } else {
1108                        /* NAA-3, Logical unit identifier (binary) */
1109                        arr[num++] = 0x1;  /* binary (not necessarily sas) */
1110                        arr[num++] = 0x3;  /* PIV=0, lu, naa */
1111                        arr[num++] = 0x0;
1112                        arr[num++] = 0x8;
1113                        put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1114                        num += 8;
1115                }
1116                /* Target relative port number */
1117                arr[num++] = 0x61;      /* proto=sas, binary */
1118                arr[num++] = 0x94;      /* PIV=1, target port, rel port */
1119                arr[num++] = 0x0;       /* reserved */
1120                arr[num++] = 0x4;       /* length */
1121                arr[num++] = 0x0;       /* reserved */
1122                arr[num++] = 0x0;       /* reserved */
1123                arr[num++] = 0x0;
1124                arr[num++] = 0x1;       /* relative port A */
1125        }
1126        /* NAA-3, Target port identifier */
1127        arr[num++] = 0x61;      /* proto=sas, binary */
1128        arr[num++] = 0x93;      /* piv=1, target port, naa */
1129        arr[num++] = 0x0;
1130        arr[num++] = 0x8;
1131        put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1132        num += 8;
1133        /* NAA-3, Target port group identifier */
1134        arr[num++] = 0x61;      /* proto=sas, binary */
1135        arr[num++] = 0x95;      /* piv=1, target port group id */
1136        arr[num++] = 0x0;
1137        arr[num++] = 0x4;
1138        arr[num++] = 0;
1139        arr[num++] = 0;
1140        put_unaligned_be16(port_group_id, arr + num);
1141        num += 2;
1142        /* NAA-3, Target device identifier */
1143        arr[num++] = 0x61;      /* proto=sas, binary */
1144        arr[num++] = 0xa3;      /* piv=1, target device, naa */
1145        arr[num++] = 0x0;
1146        arr[num++] = 0x8;
1147        put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1148        num += 8;
1149        /* SCSI name string: Target device identifier */
1150        arr[num++] = 0x63;      /* proto=sas, UTF-8 */
1151        arr[num++] = 0xa8;      /* piv=1, target device, SCSI name string */
1152        arr[num++] = 0x0;
1153        arr[num++] = 24;
1154        memcpy(arr + num, "naa.32222220", 12);
1155        num += 12;
1156        snprintf(b, sizeof(b), "%08X", target_dev_id);
1157        memcpy(arr + num, b, 8);
1158        num += 8;
1159        memset(arr + num, 0, 4);
1160        num += 4;
1161        return num;
1162}
1163
1164static unsigned char vpd84_data[] = {
1165/* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1166    0x22,0x22,0x22,0x0,0xbb,0x1,
1167    0x22,0x22,0x22,0x0,0xbb,0x2,
1168};
1169
1170/*  Software interface identification VPD page */
1171static int inquiry_vpd_84(unsigned char *arr)
1172{
1173        memcpy(arr, vpd84_data, sizeof(vpd84_data));
1174        return sizeof(vpd84_data);
1175}
1176
1177/* Management network addresses VPD page */
1178static int inquiry_vpd_85(unsigned char *arr)
1179{
1180        int num = 0;
1181        const char *na1 = "https://www.kernel.org/config";
1182        const char *na2 = "http://www.kernel.org/log";
1183        int plen, olen;
1184
1185        arr[num++] = 0x1;       /* lu, storage config */
1186        arr[num++] = 0x0;       /* reserved */
1187        arr[num++] = 0x0;
1188        olen = strlen(na1);
1189        plen = olen + 1;
1190        if (plen % 4)
1191                plen = ((plen / 4) + 1) * 4;
1192        arr[num++] = plen;      /* length, null termianted, padded */
1193        memcpy(arr + num, na1, olen);
1194        memset(arr + num + olen, 0, plen - olen);
1195        num += plen;
1196
1197        arr[num++] = 0x4;       /* lu, logging */
1198        arr[num++] = 0x0;       /* reserved */
1199        arr[num++] = 0x0;
1200        olen = strlen(na2);
1201        plen = olen + 1;
1202        if (plen % 4)
1203                plen = ((plen / 4) + 1) * 4;
1204        arr[num++] = plen;      /* length, null terminated, padded */
1205        memcpy(arr + num, na2, olen);
1206        memset(arr + num + olen, 0, plen - olen);
1207        num += plen;
1208
1209        return num;
1210}
1211
1212/* SCSI ports VPD page */
1213static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1214{
1215        int num = 0;
1216        int port_a, port_b;
1217
1218        port_a = target_dev_id + 1;
1219        port_b = port_a + 1;
1220        arr[num++] = 0x0;       /* reserved */
1221        arr[num++] = 0x0;       /* reserved */
1222        arr[num++] = 0x0;
1223        arr[num++] = 0x1;       /* relative port 1 (primary) */
1224        memset(arr + num, 0, 6);
1225        num += 6;
1226        arr[num++] = 0x0;
1227        arr[num++] = 12;        /* length tp descriptor */
1228        /* naa-5 target port identifier (A) */
1229        arr[num++] = 0x61;      /* proto=sas, binary */
1230        arr[num++] = 0x93;      /* PIV=1, target port, NAA */
1231        arr[num++] = 0x0;       /* reserved */
1232        arr[num++] = 0x8;       /* length */
1233        put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1234        num += 8;
1235        arr[num++] = 0x0;       /* reserved */
1236        arr[num++] = 0x0;       /* reserved */
1237        arr[num++] = 0x0;
1238        arr[num++] = 0x2;       /* relative port 2 (secondary) */
1239        memset(arr + num, 0, 6);
1240        num += 6;
1241        arr[num++] = 0x0;
1242        arr[num++] = 12;        /* length tp descriptor */
1243        /* naa-5 target port identifier (B) */
1244        arr[num++] = 0x61;      /* proto=sas, binary */
1245        arr[num++] = 0x93;      /* PIV=1, target port, NAA */
1246        arr[num++] = 0x0;       /* reserved */
1247        arr[num++] = 0x8;       /* length */
1248        put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1249        num += 8;
1250
1251        return num;
1252}
1253
1254
1255static unsigned char vpd89_data[] = {
1256/* from 4th byte */ 0,0,0,0,
1257'l','i','n','u','x',' ',' ',' ',
1258'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1259'1','2','3','4',
12600x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
12610xec,0,0,0,
12620x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
12630,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
12640x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
12650x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
12660x53,0x41,
12670x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
12680x20,0x20,
12690x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
12700x10,0x80,
12710,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
12720x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
12730x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
12740,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
12750x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
12760x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
12770,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
12780,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
12790,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
12800,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
12810x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
12820,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
12830xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
12840,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
12850,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
12860,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
12870,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
12880,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
12890,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
12900,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
12910,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
12920,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
12930,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
12940,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
12950,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
12960,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1297};
1298
1299/* ATA Information VPD page */
1300static int inquiry_vpd_89(unsigned char *arr)
1301{
1302        memcpy(arr, vpd89_data, sizeof(vpd89_data));
1303        return sizeof(vpd89_data);
1304}
1305
1306
1307static unsigned char vpdb0_data[] = {
1308        /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1309        0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1310        0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1311        0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1312};
1313
1314/* Block limits VPD page (SBC-3) */
1315static int inquiry_vpd_b0(unsigned char *arr)
1316{
1317        unsigned int gran;
1318
1319        memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1320
1321        /* Optimal transfer length granularity */
1322        if (sdebug_opt_xferlen_exp != 0 &&
1323            sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1324                gran = 1 << sdebug_opt_xferlen_exp;
1325        else
1326                gran = 1 << sdebug_physblk_exp;
1327        put_unaligned_be16(gran, arr + 2);
1328
1329        /* Maximum Transfer Length */
1330        if (sdebug_store_sectors > 0x400)
1331                put_unaligned_be32(sdebug_store_sectors, arr + 4);
1332
1333        /* Optimal Transfer Length */
1334        put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1335
1336        if (sdebug_lbpu) {
1337                /* Maximum Unmap LBA Count */
1338                put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1339
1340                /* Maximum Unmap Block Descriptor Count */
1341                put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1342        }
1343
1344        /* Unmap Granularity Alignment */
1345        if (sdebug_unmap_alignment) {
1346                put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1347                arr[28] |= 0x80; /* UGAVALID */
1348        }
1349
1350        /* Optimal Unmap Granularity */
1351        put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1352
1353        /* Maximum WRITE SAME Length */
1354        put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1355
1356        return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1357
1358        return sizeof(vpdb0_data);
1359}
1360
1361/* Block device characteristics VPD page (SBC-3) */
1362static int inquiry_vpd_b1(unsigned char *arr)
1363{
1364        memset(arr, 0, 0x3c);
1365        arr[0] = 0;
1366        arr[1] = 1;     /* non rotating medium (e.g. solid state) */
1367        arr[2] = 0;
1368        arr[3] = 5;     /* less than 1.8" */
1369
1370        return 0x3c;
1371}
1372
1373/* Logical block provisioning VPD page (SBC-4) */
1374static int inquiry_vpd_b2(unsigned char *arr)
1375{
1376        memset(arr, 0, 0x4);
1377        arr[0] = 0;                     /* threshold exponent */
1378        if (sdebug_lbpu)
1379                arr[1] = 1 << 7;
1380        if (sdebug_lbpws)
1381                arr[1] |= 1 << 6;
1382        if (sdebug_lbpws10)
1383                arr[1] |= 1 << 5;
1384        if (sdebug_lbprz && scsi_debug_lbp())
1385                arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1386        /* anc_sup=0; dp=0 (no provisioning group descriptor) */
1387        /* minimum_percentage=0; provisioning_type=0 (unknown) */
1388        /* threshold_percentage=0 */
1389        return 0x4;
1390}
1391
1392#define SDEBUG_LONG_INQ_SZ 96
1393#define SDEBUG_MAX_INQ_ARR_SZ 584
1394
1395static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1396{
1397        unsigned char pq_pdt;
1398        unsigned char *arr;
1399        unsigned char *cmd = scp->cmnd;
1400        int alloc_len, n, ret;
1401        bool have_wlun, is_disk;
1402
1403        alloc_len = get_unaligned_be16(cmd + 3);
1404        arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1405        if (! arr)
1406                return DID_REQUEUE << 16;
1407        is_disk = (sdebug_ptype == TYPE_DISK);
1408        have_wlun = scsi_is_wlun(scp->device->lun);
1409        if (have_wlun)
1410                pq_pdt = TYPE_WLUN;     /* present, wlun */
1411        else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1412                pq_pdt = 0x7f;  /* not present, PQ=3, PDT=0x1f */
1413        else
1414                pq_pdt = (sdebug_ptype & 0x1f);
1415        arr[0] = pq_pdt;
1416        if (0x2 & cmd[1]) {  /* CMDDT bit set */
1417                mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1418                kfree(arr);
1419                return check_condition_result;
1420        } else if (0x1 & cmd[1]) {  /* EVPD bit set */
1421                int lu_id_num, port_group_id, target_dev_id, len;
1422                char lu_id_str[6];
1423                int host_no = devip->sdbg_host->shost->host_no;
1424                
1425                port_group_id = (((host_no + 1) & 0x7f) << 8) +
1426                    (devip->channel & 0x7f);
1427                if (sdebug_vpd_use_hostno == 0)
1428                        host_no = 0;
1429                lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1430                            (devip->target * 1000) + devip->lun);
1431                target_dev_id = ((host_no + 1) * 2000) +
1432                                 (devip->target * 1000) - 3;
1433                len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1434                if (0 == cmd[2]) { /* supported vital product data pages */
1435                        arr[1] = cmd[2];        /*sanity */
1436                        n = 4;
1437                        arr[n++] = 0x0;   /* this page */
1438                        arr[n++] = 0x80;  /* unit serial number */
1439                        arr[n++] = 0x83;  /* device identification */
1440                        arr[n++] = 0x84;  /* software interface ident. */
1441                        arr[n++] = 0x85;  /* management network addresses */
1442                        arr[n++] = 0x86;  /* extended inquiry */
1443                        arr[n++] = 0x87;  /* mode page policy */
1444                        arr[n++] = 0x88;  /* SCSI ports */
1445                        if (is_disk) {    /* SBC only */
1446                                arr[n++] = 0x89;  /* ATA information */
1447                                arr[n++] = 0xb0;  /* Block limits */
1448                                arr[n++] = 0xb1;  /* Block characteristics */
1449                                arr[n++] = 0xb2;  /* Logical Block Prov */
1450                        }
1451                        arr[3] = n - 4;   /* number of supported VPD pages */
1452                } else if (0x80 == cmd[2]) { /* unit serial number */
1453                        arr[1] = cmd[2];        /*sanity */
1454                        arr[3] = len;
1455                        memcpy(&arr[4], lu_id_str, len);
1456                } else if (0x83 == cmd[2]) { /* device identification */
1457                        arr[1] = cmd[2];        /*sanity */
1458                        arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1459                                                target_dev_id, lu_id_num,
1460                                                lu_id_str, len,
1461                                                &devip->lu_name);
1462                } else if (0x84 == cmd[2]) { /* Software interface ident. */
1463                        arr[1] = cmd[2];        /*sanity */
1464                        arr[3] = inquiry_vpd_84(&arr[4]);
1465                } else if (0x85 == cmd[2]) { /* Management network addresses */
1466                        arr[1] = cmd[2];        /*sanity */
1467                        arr[3] = inquiry_vpd_85(&arr[4]);
1468                } else if (0x86 == cmd[2]) { /* extended inquiry */
1469                        arr[1] = cmd[2];        /*sanity */
1470                        arr[3] = 0x3c;  /* number of following entries */
1471                        if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1472                                arr[4] = 0x4;   /* SPT: GRD_CHK:1 */
1473                        else if (have_dif_prot)
1474                                arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1475                        else
1476                                arr[4] = 0x0;   /* no protection stuff */
1477                        arr[5] = 0x7;   /* head of q, ordered + simple q's */
1478                } else if (0x87 == cmd[2]) { /* mode page policy */
1479                        arr[1] = cmd[2];        /*sanity */
1480                        arr[3] = 0x8;   /* number of following entries */
1481                        arr[4] = 0x2;   /* disconnect-reconnect mp */
1482                        arr[6] = 0x80;  /* mlus, shared */
1483                        arr[8] = 0x18;   /* protocol specific lu */
1484                        arr[10] = 0x82;  /* mlus, per initiator port */
1485                } else if (0x88 == cmd[2]) { /* SCSI Ports */
1486                        arr[1] = cmd[2];        /*sanity */
1487                        arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1488                } else if (is_disk && 0x89 == cmd[2]) { /* ATA information */
1489                        arr[1] = cmd[2];        /*sanity */
1490                        n = inquiry_vpd_89(&arr[4]);
1491                        put_unaligned_be16(n, arr + 2);
1492                } else if (is_disk && 0xb0 == cmd[2]) { /* Block limits */
1493                        arr[1] = cmd[2];        /*sanity */
1494                        arr[3] = inquiry_vpd_b0(&arr[4]);
1495                } else if (is_disk && 0xb1 == cmd[2]) { /* Block char. */
1496                        arr[1] = cmd[2];        /*sanity */
1497                        arr[3] = inquiry_vpd_b1(&arr[4]);
1498                } else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1499                        arr[1] = cmd[2];        /*sanity */
1500                        arr[3] = inquiry_vpd_b2(&arr[4]);
1501                } else {
1502                        mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1503                        kfree(arr);
1504                        return check_condition_result;
1505                }
1506                len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
1507                ret = fill_from_dev_buffer(scp, arr,
1508                            min(len, SDEBUG_MAX_INQ_ARR_SZ));
1509                kfree(arr);
1510                return ret;
1511        }
1512        /* drops through here for a standard inquiry */
1513        arr[1] = sdebug_removable ? 0x80 : 0;   /* Removable disk */
1514        arr[2] = sdebug_scsi_level;
1515        arr[3] = 2;    /* response_data_format==2 */
1516        arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1517        arr[5] = (int)have_dif_prot;    /* PROTECT bit */
1518        if (sdebug_vpd_use_hostno == 0)
1519                arr[5] |= 0x10; /* claim: implicit TPGS */
1520        arr[6] = 0x10; /* claim: MultiP */
1521        /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1522        arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1523        memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1524        memcpy(&arr[16], sdebug_inq_product_id, 16);
1525        memcpy(&arr[32], sdebug_inq_product_rev, 4);
1526        /* Use Vendor Specific area to place driver date in ASCII hex */
1527        memcpy(&arr[36], sdebug_version_date, 8);
1528        /* version descriptors (2 bytes each) follow */
1529        put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
1530        put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
1531        n = 62;
1532        if (is_disk) {          /* SBC-4 no version claimed */
1533                put_unaligned_be16(0x600, arr + n);
1534                n += 2;
1535        } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */
1536                put_unaligned_be16(0x525, arr + n);
1537                n += 2;
1538        }
1539        put_unaligned_be16(0x2100, arr + n);    /* SPL-4 no version claimed */
1540        ret = fill_from_dev_buffer(scp, arr,
1541                            min(alloc_len, SDEBUG_LONG_INQ_SZ));
1542        kfree(arr);
1543        return ret;
1544}
1545
1546static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1547                                   0, 0, 0x0, 0x0};
1548
1549static int resp_requests(struct scsi_cmnd *scp,
1550                         struct sdebug_dev_info *devip)
1551{
1552        unsigned char *sbuff;
1553        unsigned char *cmd = scp->cmnd;
1554        unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1555        bool dsense;
1556        int len = 18;
1557
1558        memset(arr, 0, sizeof(arr));
1559        dsense = !!(cmd[1] & 1);
1560        sbuff = scp->sense_buffer;
1561        if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1562                if (dsense) {
1563                        arr[0] = 0x72;
1564                        arr[1] = 0x0;           /* NO_SENSE in sense_key */
1565                        arr[2] = THRESHOLD_EXCEEDED;
1566                        arr[3] = 0xff;          /* TEST set and MRIE==6 */
1567                        len = 8;
1568                } else {
1569                        arr[0] = 0x70;
1570                        arr[2] = 0x0;           /* NO_SENSE in sense_key */
1571                        arr[7] = 0xa;           /* 18 byte sense buffer */
1572                        arr[12] = THRESHOLD_EXCEEDED;
1573                        arr[13] = 0xff;         /* TEST set and MRIE==6 */
1574                }
1575        } else {
1576                memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
1577                if (arr[0] >= 0x70 && dsense == sdebug_dsense)
1578                        ;       /* have sense and formats match */
1579                else if (arr[0] <= 0x70) {
1580                        if (dsense) {
1581                                memset(arr, 0, 8);
1582                                arr[0] = 0x72;
1583                                len = 8;
1584                        } else {
1585                                memset(arr, 0, 18);
1586                                arr[0] = 0x70;
1587                                arr[7] = 0xa;
1588                        }
1589                } else if (dsense) {
1590                        memset(arr, 0, 8);
1591                        arr[0] = 0x72;
1592                        arr[1] = sbuff[2];     /* sense key */
1593                        arr[2] = sbuff[12];    /* asc */
1594                        arr[3] = sbuff[13];    /* ascq */
1595                        len = 8;
1596                } else {
1597                        memset(arr, 0, 18);
1598                        arr[0] = 0x70;
1599                        arr[2] = sbuff[1];
1600                        arr[7] = 0xa;
1601                        arr[12] = sbuff[1];
1602                        arr[13] = sbuff[3];
1603                }
1604
1605        }
1606        mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0);
1607        return fill_from_dev_buffer(scp, arr, len);
1608}
1609
1610static int resp_start_stop(struct scsi_cmnd *scp,
1611                           struct sdebug_dev_info *devip)
1612{
1613        unsigned char *cmd = scp->cmnd;
1614        int power_cond, stop;
1615        bool changing;
1616
1617        power_cond = (cmd[4] & 0xf0) >> 4;
1618        if (power_cond) {
1619                mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1620                return check_condition_result;
1621        }
1622        stop = !(cmd[4] & 1);
1623        changing = atomic_read(&devip->stopped) == !stop;
1624        atomic_xchg(&devip->stopped, stop);
1625        if (!changing || cmd[1] & 0x1)  /* state unchanged or IMMED set */
1626                return SDEG_RES_IMMED_MASK;
1627        else
1628                return 0;
1629}
1630
1631static sector_t get_sdebug_capacity(void)
1632{
1633        static const unsigned int gibibyte = 1073741824;
1634
1635        if (sdebug_virtual_gb > 0)
1636                return (sector_t)sdebug_virtual_gb *
1637                        (gibibyte / sdebug_sector_size);
1638        else
1639                return sdebug_store_sectors;
1640}
1641
1642#define SDEBUG_READCAP_ARR_SZ 8
1643static int resp_readcap(struct scsi_cmnd *scp,
1644                        struct sdebug_dev_info *devip)
1645{
1646        unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1647        unsigned int capac;
1648
1649        /* following just in case virtual_gb changed */
1650        sdebug_capacity = get_sdebug_capacity();
1651        memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1652        if (sdebug_capacity < 0xffffffff) {
1653                capac = (unsigned int)sdebug_capacity - 1;
1654                put_unaligned_be32(capac, arr + 0);
1655        } else
1656                put_unaligned_be32(0xffffffff, arr + 0);
1657        put_unaligned_be16(sdebug_sector_size, arr + 6);
1658        return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1659}
1660
1661#define SDEBUG_READCAP16_ARR_SZ 32
1662static int resp_readcap16(struct scsi_cmnd *scp,
1663                          struct sdebug_dev_info *devip)
1664{
1665        unsigned char *cmd = scp->cmnd;
1666        unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1667        int alloc_len;
1668
1669        alloc_len = get_unaligned_be32(cmd + 10);
1670        /* following just in case virtual_gb changed */
1671        sdebug_capacity = get_sdebug_capacity();
1672        memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1673        put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1674        put_unaligned_be32(sdebug_sector_size, arr + 8);
1675        arr[13] = sdebug_physblk_exp & 0xf;
1676        arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1677
1678        if (scsi_debug_lbp()) {
1679                arr[14] |= 0x80; /* LBPME */
1680                /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1681                 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1682                 * in the wider field maps to 0 in this field.
1683                 */
1684                if (sdebug_lbprz & 1)   /* precisely what the draft requires */
1685                        arr[14] |= 0x40;
1686        }
1687
1688        arr[15] = sdebug_lowest_aligned & 0xff;
1689
1690        if (have_dif_prot) {
1691                arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1692                arr[12] |= 1; /* PROT_EN */
1693        }
1694
1695        return fill_from_dev_buffer(scp, arr,
1696                                    min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1697}
1698
1699#define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1700
1701static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1702                              struct sdebug_dev_info *devip)
1703{
1704        unsigned char *cmd = scp->cmnd;
1705        unsigned char *arr;
1706        int host_no = devip->sdbg_host->shost->host_no;
1707        int n, ret, alen, rlen;
1708        int port_group_a, port_group_b, port_a, port_b;
1709
1710        alen = get_unaligned_be32(cmd + 6);
1711        arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1712        if (! arr)
1713                return DID_REQUEUE << 16;
1714        /*
1715         * EVPD page 0x88 states we have two ports, one
1716         * real and a fake port with no device connected.
1717         * So we create two port groups with one port each
1718         * and set the group with port B to unavailable.
1719         */
1720        port_a = 0x1; /* relative port A */
1721        port_b = 0x2; /* relative port B */
1722        port_group_a = (((host_no + 1) & 0x7f) << 8) +
1723                        (devip->channel & 0x7f);
1724        port_group_b = (((host_no + 1) & 0x7f) << 8) +
1725                        (devip->channel & 0x7f) + 0x80;
1726
1727        /*
1728         * The asymmetric access state is cycled according to the host_id.
1729         */
1730        n = 4;
1731        if (sdebug_vpd_use_hostno == 0) {
1732                arr[n++] = host_no % 3; /* Asymm access state */
1733                arr[n++] = 0x0F; /* claim: all states are supported */
1734        } else {
1735                arr[n++] = 0x0; /* Active/Optimized path */
1736                arr[n++] = 0x01; /* only support active/optimized paths */
1737        }
1738        put_unaligned_be16(port_group_a, arr + n);
1739        n += 2;
1740        arr[n++] = 0;    /* Reserved */
1741        arr[n++] = 0;    /* Status code */
1742        arr[n++] = 0;    /* Vendor unique */
1743        arr[n++] = 0x1;  /* One port per group */
1744        arr[n++] = 0;    /* Reserved */
1745        arr[n++] = 0;    /* Reserved */
1746        put_unaligned_be16(port_a, arr + n);
1747        n += 2;
1748        arr[n++] = 3;    /* Port unavailable */
1749        arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1750        put_unaligned_be16(port_group_b, arr + n);
1751        n += 2;
1752        arr[n++] = 0;    /* Reserved */
1753        arr[n++] = 0;    /* Status code */
1754        arr[n++] = 0;    /* Vendor unique */
1755        arr[n++] = 0x1;  /* One port per group */
1756        arr[n++] = 0;    /* Reserved */
1757        arr[n++] = 0;    /* Reserved */
1758        put_unaligned_be16(port_b, arr + n);
1759        n += 2;
1760
1761        rlen = n - 4;
1762        put_unaligned_be32(rlen, arr + 0);
1763
1764        /*
1765         * Return the smallest value of either
1766         * - The allocated length
1767         * - The constructed command length
1768         * - The maximum array size
1769         */
1770        rlen = min(alen,n);
1771        ret = fill_from_dev_buffer(scp, arr,
1772                                   min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1773        kfree(arr);
1774        return ret;
1775}
1776
1777static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1778                             struct sdebug_dev_info *devip)
1779{
1780        bool rctd;
1781        u8 reporting_opts, req_opcode, sdeb_i, supp;
1782        u16 req_sa, u;
1783        u32 alloc_len, a_len;
1784        int k, offset, len, errsts, count, bump, na;
1785        const struct opcode_info_t *oip;
1786        const struct opcode_info_t *r_oip;
1787        u8 *arr;
1788        u8 *cmd = scp->cmnd;
1789
1790        rctd = !!(cmd[2] & 0x80);
1791        reporting_opts = cmd[2] & 0x7;
1792        req_opcode = cmd[3];
1793        req_sa = get_unaligned_be16(cmd + 4);
1794        alloc_len = get_unaligned_be32(cmd + 6);
1795        if (alloc_len < 4 || alloc_len > 0xffff) {
1796                mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1797                return check_condition_result;
1798        }
1799        if (alloc_len > 8192)
1800                a_len = 8192;
1801        else
1802                a_len = alloc_len;
1803        arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1804        if (NULL == arr) {
1805                mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
1806                                INSUFF_RES_ASCQ);
1807                return check_condition_result;
1808        }
1809        switch (reporting_opts) {
1810        case 0: /* all commands */
1811                /* count number of commands */
1812                for (count = 0, oip = opcode_info_arr;
1813                     oip->num_attached != 0xff; ++oip) {
1814                        if (F_INV_OP & oip->flags)
1815                                continue;
1816                        count += (oip->num_attached + 1);
1817                }
1818                bump = rctd ? 20 : 8;
1819                put_unaligned_be32(count * bump, arr);
1820                for (offset = 4, oip = opcode_info_arr;
1821                     oip->num_attached != 0xff && offset < a_len; ++oip) {
1822                        if (F_INV_OP & oip->flags)
1823                                continue;
1824                        na = oip->num_attached;
1825                        arr[offset] = oip->opcode;
1826                        put_unaligned_be16(oip->sa, arr + offset + 2);
1827                        if (rctd)
1828                                arr[offset + 5] |= 0x2;
1829                        if (FF_SA & oip->flags)
1830                                arr[offset + 5] |= 0x1;
1831                        put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
1832                        if (rctd)
1833                                put_unaligned_be16(0xa, arr + offset + 8);
1834                        r_oip = oip;
1835                        for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
1836                                if (F_INV_OP & oip->flags)
1837                                        continue;
1838                                offset += bump;
1839                                arr[offset] = oip->opcode;
1840                                put_unaligned_be16(oip->sa, arr + offset + 2);
1841                                if (rctd)
1842                                        arr[offset + 5] |= 0x2;
1843                                if (FF_SA & oip->flags)
1844                                        arr[offset + 5] |= 0x1;
1845                                put_unaligned_be16(oip->len_mask[0],
1846                                                   arr + offset + 6);
1847                                if (rctd)
1848                                        put_unaligned_be16(0xa,
1849                                                           arr + offset + 8);
1850                        }
1851                        oip = r_oip;
1852                        offset += bump;
1853                }
1854                break;
1855        case 1: /* one command: opcode only */
1856        case 2: /* one command: opcode plus service action */
1857        case 3: /* one command: if sa==0 then opcode only else opcode+sa */
1858                sdeb_i = opcode_ind_arr[req_opcode];
1859                oip = &opcode_info_arr[sdeb_i];
1860                if (F_INV_OP & oip->flags) {
1861                        supp = 1;
1862                        offset = 4;
1863                } else {
1864                        if (1 == reporting_opts) {
1865                                if (FF_SA & oip->flags) {
1866                                        mk_sense_invalid_fld(scp, SDEB_IN_CDB,
1867                                                             2, 2);
1868                                        kfree(arr);
1869                                        return check_condition_result;
1870                                }
1871                                req_sa = 0;
1872                        } else if (2 == reporting_opts &&
1873                                   0 == (FF_SA & oip->flags)) {
1874                                mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
1875                                kfree(arr);     /* point at requested sa */
1876                                return check_condition_result;
1877                        }
1878                        if (0 == (FF_SA & oip->flags) &&
1879                            req_opcode == oip->opcode)
1880                                supp = 3;
1881                        else if (0 == (FF_SA & oip->flags)) {
1882                                na = oip->num_attached;
1883                                for (k = 0, oip = oip->arrp; k < na;
1884                                     ++k, ++oip) {
1885                                        if (req_opcode == oip->opcode)
1886                                                break;
1887                                }
1888                                supp = (k >= na) ? 1 : 3;
1889                        } else if (req_sa != oip->sa) {
1890                                na = oip->num_attached;
1891                                for (k = 0, oip = oip->arrp; k < na;
1892                                     ++k, ++oip) {
1893                                        if (req_sa == oip->sa)
1894                                                break;
1895                                }
1896                                supp = (k >= na) ? 1 : 3;
1897                        } else
1898                                supp = 3;
1899                        if (3 == supp) {
1900                                u = oip->len_mask[0];
1901                                put_unaligned_be16(u, arr + 2);
1902                                arr[4] = oip->opcode;
1903                                for (k = 1; k < u; ++k)
1904                                        arr[4 + k] = (k < 16) ?
1905                                                 oip->len_mask[k] : 0xff;
1906                                offset = 4 + u;
1907                        } else
1908                                offset = 4;
1909                }
1910                arr[1] = (rctd ? 0x80 : 0) | supp;
1911                if (rctd) {
1912                        put_unaligned_be16(0xa, arr + offset);
1913                        offset += 12;
1914                }
1915                break;
1916        default:
1917                mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
1918                kfree(arr);
1919                return check_condition_result;
1920        }
1921        offset = (offset < a_len) ? offset : a_len;
1922        len = (offset < alloc_len) ? offset : alloc_len;
1923        errsts = fill_from_dev_buffer(scp, arr, len);
1924        kfree(arr);
1925        return errsts;
1926}
1927
1928static int resp_rsup_tmfs(struct scsi_cmnd *scp,
1929                          struct sdebug_dev_info *devip)
1930{
1931        bool repd;
1932        u32 alloc_len, len;
1933        u8 arr[16];
1934        u8 *cmd = scp->cmnd;
1935
1936        memset(arr, 0, sizeof(arr));
1937        repd = !!(cmd[2] & 0x80);
1938        alloc_len = get_unaligned_be32(cmd + 6);
1939        if (alloc_len < 4) {
1940                mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1941                return check_condition_result;
1942        }
1943        arr[0] = 0xc8;          /* ATS | ATSS | LURS */
1944        arr[1] = 0x1;           /* ITNRS */
1945        if (repd) {
1946                arr[3] = 0xc;
1947                len = 16;
1948        } else
1949                len = 4;
1950
1951        len = (len < alloc_len) ? len : alloc_len;
1952        return fill_from_dev_buffer(scp, arr, len);
1953}
1954
1955/* <<Following mode page info copied from ST318451LW>> */
1956
1957static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
1958{       /* Read-Write Error Recovery page for mode_sense */
1959        unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1960                                        5, 0, 0xff, 0xff};
1961
1962        memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1963        if (1 == pcontrol)
1964                memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1965        return sizeof(err_recov_pg);
1966}
1967
1968static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
1969{       /* Disconnect-Reconnect page for mode_sense */
1970        unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1971                                         0, 0, 0, 0, 0, 0, 0, 0};
1972
1973        memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1974        if (1 == pcontrol)
1975                memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1976        return sizeof(disconnect_pg);
1977}
1978
1979static int resp_format_pg(unsigned char *p, int pcontrol, int target)
1980{       /* Format device page for mode_sense */
1981        unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1982                                     0, 0, 0, 0, 0, 0, 0, 0,
1983                                     0, 0, 0, 0, 0x40, 0, 0, 0};
1984
1985        memcpy(p, format_pg, sizeof(format_pg));
1986        put_unaligned_be16(sdebug_sectors_per, p + 10);
1987        put_unaligned_be16(sdebug_sector_size, p + 12);
1988        if (sdebug_removable)
1989                p[20] |= 0x20; /* should agree with INQUIRY */
1990        if (1 == pcontrol)
1991                memset(p + 2, 0, sizeof(format_pg) - 2);
1992        return sizeof(format_pg);
1993}
1994
1995static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1996                                     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
1997                                     0, 0, 0, 0};
1998
1999static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2000{       /* Caching page for mode_sense */
2001        unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2002                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2003        unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2004                0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
2005
2006        if (SDEBUG_OPT_N_WCE & sdebug_opts)
2007                caching_pg[2] &= ~0x4;  /* set WCE=0 (default WCE=1) */
2008        memcpy(p, caching_pg, sizeof(caching_pg));
2009        if (1 == pcontrol)
2010                memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2011        else if (2 == pcontrol)
2012                memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2013        return sizeof(caching_pg);
2014}
2015
2016static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2017                                    0, 0, 0x2, 0x4b};
2018
2019static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2020{       /* Control mode page for mode_sense */
2021        unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2022                                        0, 0, 0, 0};
2023        unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2024                                     0, 0, 0x2, 0x4b};
2025
2026        if (sdebug_dsense)
2027                ctrl_m_pg[2] |= 0x4;
2028        else
2029                ctrl_m_pg[2] &= ~0x4;
2030
2031        if (sdebug_ato)
2032                ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2033
2034        memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2035        if (1 == pcontrol)
2036                memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2037        else if (2 == pcontrol)
2038                memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2039        return sizeof(ctrl_m_pg);
2040}
2041
2042
2043static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2044{       /* Informational Exceptions control mode page for mode_sense */
2045        unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2046                                       0, 0, 0x0, 0x0};
2047        unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2048                                      0, 0, 0x0, 0x0};
2049
2050        memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2051        if (1 == pcontrol)
2052                memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2053        else if (2 == pcontrol)
2054                memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2055        return sizeof(iec_m_pg);
2056}
2057
2058static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2059{       /* SAS SSP mode page - short format for mode_sense */
2060        unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2061                0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2062
2063        memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2064        if (1 == pcontrol)
2065                memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2066        return sizeof(sas_sf_m_pg);
2067}
2068
2069
2070static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2071                              int target_dev_id)
2072{       /* SAS phy control and discover mode page for mode_sense */
2073        unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2074                    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2075                    0, 0, 0, 0, 0, 0, 0, 0,     /* insert SAS addr */
2076                    0, 0, 0, 0, 0, 0, 0, 0,     /* insert SAS addr */
2077                    0x2, 0, 0, 0, 0, 0, 0, 0,
2078                    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2079                    0, 0, 0, 0, 0, 0, 0, 0,
2080                    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2081                    0, 0, 0, 0, 0, 0, 0, 0,     /* insert SAS addr */
2082                    0, 0, 0, 0, 0, 0, 0, 0,     /* insert SAS addr */
2083                    0x3, 0, 0, 0, 0, 0, 0, 0,
2084                    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2085                    0, 0, 0, 0, 0, 0, 0, 0,
2086                };
2087        int port_a, port_b;
2088
2089        put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2090        put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2091        put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2092        put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2093        port_a = target_dev_id + 1;
2094        port_b = port_a + 1;
2095        memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2096        put_unaligned_be32(port_a, p + 20);
2097        put_unaligned_be32(port_b, p + 48 + 20);
2098        if (1 == pcontrol)
2099                memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2100        return sizeof(sas_pcd_m_pg);
2101}
2102
2103static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2104{       /* SAS SSP shared protocol specific port mode subpage */
2105        unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2106                    0, 0, 0, 0, 0, 0, 0, 0,
2107                };
2108
2109        memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2110        if (1 == pcontrol)
2111                memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2112        return sizeof(sas_sha_m_pg);
2113}
2114
2115#define SDEBUG_MAX_MSENSE_SZ 256
2116
2117static int resp_mode_sense(struct scsi_cmnd *scp,
2118                           struct sdebug_dev_info *devip)
2119{
2120        int pcontrol, pcode, subpcode, bd_len;
2121        unsigned char dev_spec;
2122        int alloc_len, offset, len, target_dev_id;
2123        int target = scp->device->id;
2124        unsigned char *ap;
2125        unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2126        unsigned char *cmd = scp->cmnd;
2127        bool dbd, llbaa, msense_6, is_disk, bad_pcode;
2128
2129        dbd = !!(cmd[1] & 0x8);         /* disable block descriptors */
2130        pcontrol = (cmd[2] & 0xc0) >> 6;
2131        pcode = cmd[2] & 0x3f;
2132        subpcode = cmd[3];
2133        msense_6 = (MODE_SENSE == cmd[0]);
2134        llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2135        is_disk = (sdebug_ptype == TYPE_DISK);
2136        if (is_disk && !dbd)
2137                bd_len = llbaa ? 16 : 8;
2138        else
2139                bd_len = 0;
2140        alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2141        memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2142        if (0x3 == pcontrol) {  /* Saving values not supported */
2143                mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2144                return check_condition_result;
2145        }
2146        target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2147                        (devip->target * 1000) - 3;
2148        /* for disks set DPOFUA bit and clear write protect (WP) bit */
2149        if (is_disk)
2150                dev_spec = 0x10;        /* =0x90 if WP=1 implies read-only */
2151        else
2152                dev_spec = 0x0;
2153        if (msense_6) {
2154                arr[2] = dev_spec;
2155                arr[3] = bd_len;
2156                offset = 4;
2157        } else {
2158                arr[3] = dev_spec;
2159                if (16 == bd_len)
2160                        arr[4] = 0x1;   /* set LONGLBA bit */
2161                arr[7] = bd_len;        /* assume 255 or less */
2162                offset = 8;
2163        }
2164        ap = arr + offset;
2165        if ((bd_len > 0) && (!sdebug_capacity))
2166                sdebug_capacity = get_sdebug_capacity();
2167
2168        if (8 == bd_len) {
2169                if (sdebug_capacity > 0xfffffffe)
2170                        put_unaligned_be32(0xffffffff, ap + 0);
2171                else
2172                        put_unaligned_be32(sdebug_capacity, ap + 0);
2173                put_unaligned_be16(sdebug_sector_size, ap + 6);
2174                offset += bd_len;
2175                ap = arr + offset;
2176        } else if (16 == bd_len) {
2177                put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2178                put_unaligned_be32(sdebug_sector_size, ap + 12);
2179                offset += bd_len;
2180                ap = arr + offset;
2181        }
2182
2183        if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2184                /* TODO: Control Extension page */
2185                mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2186                return check_condition_result;
2187        }
2188        bad_pcode = false;
2189
2190        switch (pcode) {
2191        case 0x1:       /* Read-Write error recovery page, direct access */
2192                len = resp_err_recov_pg(ap, pcontrol, target);
2193                offset += len;
2194                break;
2195        case 0x2:       /* Disconnect-Reconnect page, all devices */
2196                len = resp_disconnect_pg(ap, pcontrol, target);
2197                offset += len;
2198                break;
2199        case 0x3:       /* Format device page, direct access */
2200                if (is_disk) {
2201                        len = resp_format_pg(ap, pcontrol, target);
2202                        offset += len;
2203                } else
2204                        bad_pcode = true;
2205                break;
2206        case 0x8:       /* Caching page, direct access */
2207                if (is_disk) {
2208                        len = resp_caching_pg(ap, pcontrol, target);
2209                        offset += len;
2210                } else
2211                        bad_pcode = true;
2212                break;
2213        case 0xa:       /* Control Mode page, all devices */
2214                len = resp_ctrl_m_pg(ap, pcontrol, target);
2215                offset += len;
2216                break;
2217        case 0x19:      /* if spc==1 then sas phy, control+discover */
2218                if ((subpcode > 0x2) && (subpcode < 0xff)) {
2219                        mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2220                        return check_condition_result;
2221                }
2222                len = 0;
2223                if ((0x0 == subpcode) || (0xff == subpcode))
2224                        len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2225                if ((0x1 == subpcode) || (0xff == subpcode))
2226                        len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2227                                                  target_dev_id);
2228                if ((0x2 == subpcode) || (0xff == subpcode))
2229                        len += resp_sas_sha_m_spg(ap + len, pcontrol);
2230                offset += len;
2231                break;
2232        case 0x1c:      /* Informational Exceptions Mode page, all devices */
2233                len = resp_iec_m_pg(ap, pcontrol, target);
2234                offset += len;
2235                break;
2236        case 0x3f:      /* Read all Mode pages */
2237                if ((0 == subpcode) || (0xff == subpcode)) {
2238                        len = resp_err_recov_pg(ap, pcontrol, target);
2239                        len += resp_disconnect_pg(ap + len, pcontrol, target);
2240                        if (is_disk) {
2241                                len += resp_format_pg(ap + len, pcontrol,
2242                                                      target);
2243                                len += resp_caching_pg(ap + len, pcontrol,
2244                                                       target);
2245                        }
2246                        len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2247                        len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2248                        if (0xff == subpcode) {
2249                                len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2250                                                  target, target_dev_id);
2251                                len += resp_sas_sha_m_spg(ap + len, pcontrol);
2252                        }
2253                        len += resp_iec_m_pg(ap + len, pcontrol, target);
2254                        offset += len;
2255                } else {
2256                        mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2257                        return check_condition_result;
2258                }
2259                break;
2260        default:
2261                bad_pcode = true;
2262                break;
2263        }
2264        if (bad_pcode) {
2265                mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2266                return check_condition_result;
2267        }
2268        if (msense_6)
2269                arr[0] = offset - 1;
2270        else
2271                put_unaligned_be16((offset - 2), arr + 0);
2272        return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
2273}
2274
2275#define SDEBUG_MAX_MSELECT_SZ 512
2276
2277static int resp_mode_select(struct scsi_cmnd *scp,
2278                            struct sdebug_dev_info *devip)
2279{
2280        int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2281        int param_len, res, mpage;
2282        unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2283        unsigned char *cmd = scp->cmnd;
2284        int mselect6 = (MODE_SELECT == cmd[0]);
2285
2286        memset(arr, 0, sizeof(arr));
2287        pf = cmd[1] & 0x10;
2288        sp = cmd[1] & 0x1;
2289        param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2290        if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2291                mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2292                return check_condition_result;
2293        }
2294        res = fetch_to_dev_buffer(scp, arr, param_len);
2295        if (-1 == res)
2296                return DID_ERROR << 16;
2297        else if (sdebug_verbose && (res < param_len))
2298                sdev_printk(KERN_INFO, scp->device,
2299                            "%s: cdb indicated=%d, IO sent=%d bytes\n",
2300                            __func__, param_len, res);
2301        md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2302        bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2303        if (md_len > 2) {
2304                mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2305                return check_condition_result;
2306        }
2307        off = bd_len + (mselect6 ? 4 : 8);
2308        mpage = arr[off] & 0x3f;
2309        ps = !!(arr[off] & 0x80);
2310        if (ps) {
2311                mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2312                return check_condition_result;
2313        }
2314        spf = !!(arr[off] & 0x40);
2315        pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2316                       (arr[off + 1] + 2);
2317        if ((pg_len + off) > param_len) {
2318                mk_sense_buffer(scp, ILLEGAL_REQUEST,
2319                                PARAMETER_LIST_LENGTH_ERR, 0);
2320                return check_condition_result;
2321        }
2322        switch (mpage) {
2323        case 0x8:      /* Caching Mode page */
2324                if (caching_pg[1] == arr[off + 1]) {
2325                        memcpy(caching_pg + 2, arr + off + 2,
2326                               sizeof(caching_pg) - 2);
2327                        goto set_mode_changed_ua;
2328                }
2329                break;
2330        case 0xa:      /* Control Mode page */
2331                if (ctrl_m_pg[1] == arr[off + 1]) {
2332                        memcpy(ctrl_m_pg + 2, arr + off + 2,
2333                               sizeof(ctrl_m_pg) - 2);
2334                        sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2335                        goto set_mode_changed_ua;
2336                }
2337                break;
2338        case 0x1c:      /* Informational Exceptions Mode page */
2339                if (iec_m_pg[1] == arr[off + 1]) {
2340                        memcpy(iec_m_pg + 2, arr + off + 2,
2341                               sizeof(iec_m_pg) - 2);
2342                        goto set_mode_changed_ua;
2343                }
2344                break;
2345        default:
2346                break;
2347        }
2348        mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2349        return check_condition_result;
2350set_mode_changed_ua:
2351        set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2352        return 0;
2353}
2354
2355static int resp_temp_l_pg(unsigned char *arr)
2356{
2357        unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2358                                     0x0, 0x1, 0x3, 0x2, 0x0, 65,
2359                };
2360
2361        memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2362        return sizeof(temp_l_pg);
2363}
2364
2365static int resp_ie_l_pg(unsigned char *arr)
2366{
2367        unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2368                };
2369
2370        memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2371        if (iec_m_pg[2] & 0x4) {        /* TEST bit set */
2372                arr[4] = THRESHOLD_EXCEEDED;
2373                arr[5] = 0xff;
2374        }
2375        return sizeof(ie_l_pg);
2376}
2377
2378#define SDEBUG_MAX_LSENSE_SZ 512
2379
2380static int resp_log_sense(struct scsi_cmnd *scp,
2381                          struct sdebug_dev_info *devip)
2382{
2383        int ppc, sp, pcode, subpcode, alloc_len, len, n;
2384        unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2385        unsigned char *cmd = scp->cmnd;
2386
2387        memset(arr, 0, sizeof(arr));
2388        ppc = cmd[1] & 0x2;
2389        sp = cmd[1] & 0x1;
2390        if (ppc || sp) {
2391                mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2392                return check_condition_result;
2393        }
2394        pcode = cmd[2] & 0x3f;
2395        subpcode = cmd[3] & 0xff;
2396        alloc_len = get_unaligned_be16(cmd + 7);
2397        arr[0] = pcode;
2398        if (0 == subpcode) {
2399                switch (pcode) {
2400                case 0x0:       /* Supported log pages log page */
2401                        n = 4;
2402                        arr[n++] = 0x0;         /* this page */
2403                        arr[n++] = 0xd;         /* Temperature */
2404                        arr[n++] = 0x2f;        /* Informational exceptions */
2405                        arr[3] = n - 4;
2406                        break;
2407                case 0xd:       /* Temperature log page */
2408                        arr[3] = resp_temp_l_pg(arr + 4);
2409                        break;
2410                case 0x2f:      /* Informational exceptions log page */
2411                        arr[3] = resp_ie_l_pg(arr + 4);
2412                        break;
2413                default:
2414                        mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2415                        return check_condition_result;
2416                }
2417        } else if (0xff == subpcode) {
2418                arr[0] |= 0x40;
2419                arr[1] = subpcode;
2420                switch (pcode) {
2421                case 0x0:       /* Supported log pages and subpages log page */
2422                        n = 4;
2423                        arr[n++] = 0x0;
2424                        arr[n++] = 0x0;         /* 0,0 page */
2425                        arr[n++] = 0x0;
2426                        arr[n++] = 0xff;        /* this page */
2427                        arr[n++] = 0xd;
2428                        arr[n++] = 0x0;         /* Temperature */
2429                        arr[n++] = 0x2f;
2430                        arr[n++] = 0x0; /* Informational exceptions */
2431                        arr[3] = n - 4;
2432                        break;
2433                case 0xd:       /* Temperature subpages */
2434                        n = 4;
2435                        arr[n++] = 0xd;
2436                        arr[n++] = 0x0;         /* Temperature */
2437                        arr[3] = n - 4;
2438                        break;
2439                case 0x2f:      /* Informational exceptions subpages */
2440                        n = 4;
2441                        arr[n++] = 0x2f;
2442                        arr[n++] = 0x0;         /* Informational exceptions */
2443                        arr[3] = n - 4;
2444                        break;
2445                default:
2446                        mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2447                        return check_condition_result;
2448                }
2449        } else {
2450                mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2451                return check_condition_result;
2452        }
2453        len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
2454        return fill_from_dev_buffer(scp, arr,
2455                    min(len, SDEBUG_MAX_INQ_ARR_SZ));
2456}
2457
2458static int check_device_access_params(struct scsi_cmnd *scp,
2459                                      unsigned long long lba, unsigned int num)
2460{
2461        if (lba + num > sdebug_capacity) {
2462                mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2463                return check_condition_result;
2464        }
2465        /* transfer length excessive (tie in to block limits VPD page) */
2466        if (num > sdebug_store_sectors) {
2467                /* needs work to find which cdb byte 'num' comes from */
2468                mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2469                return check_condition_result;
2470        }
2471        return 0;
2472}
2473
2474/* Returns number of bytes copied or -1 if error. */
2475static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba,
2476                            u32 num, bool do_write)
2477{
2478        int ret;
2479        u64 block, rest = 0;
2480        struct scsi_data_buffer *sdb;
2481        enum dma_data_direction dir;
2482
2483        if (do_write) {
2484                sdb = scsi_out(scmd);
2485                dir = DMA_TO_DEVICE;
2486                write_since_sync = true;
2487        } else {
2488                sdb = scsi_in(scmd);
2489                dir = DMA_FROM_DEVICE;
2490        }
2491
2492        if (!sdb->length)
2493                return 0;
2494        if (!(scsi_bidi_cmnd(scmd) || scmd->sc_data_direction == dir))
2495                return -1;
2496
2497        block = do_div(lba, sdebug_store_sectors);
2498        if (block + num > sdebug_store_sectors)
2499                rest = block + num - sdebug_store_sectors;
2500
2501        ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2502                   fake_storep + (block * sdebug_sector_size),
2503                   (num - rest) * sdebug_sector_size, sg_skip, do_write);
2504        if (ret != (num - rest) * sdebug_sector_size)
2505                return ret;
2506
2507        if (rest) {
2508                ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2509                            fake_storep, rest * sdebug_sector_size,
2510                            sg_skip + ((num - rest) * sdebug_sector_size),
2511                            do_write);
2512        }
2513
2514        return ret;
2515}
2516
2517/* If fake_store(lba,num) compares equal to arr(num), then copy top half of
2518 * arr into fake_store(lba,num) and return true. If comparison fails then
2519 * return false. */
2520static bool comp_write_worker(u64 lba, u32 num, const u8 *arr)
2521{
2522        bool res;
2523        u64 block, rest = 0;
2524        u32 store_blks = sdebug_store_sectors;
2525        u32 lb_size = sdebug_sector_size;
2526
2527        block = do_div(lba, store_blks);
2528        if (block + num > store_blks)
2529                rest = block + num - store_blks;
2530
2531        res = !memcmp(fake_storep + (block * lb_size), arr,
2532                      (num - rest) * lb_size);
2533        if (!res)
2534                return res;
2535        if (rest)
2536                res = memcmp(fake_storep, arr + ((num - rest) * lb_size),
2537                             rest * lb_size);
2538        if (!res)
2539                return res;
2540        arr += num * lb_size;
2541        memcpy(fake_storep + (block * lb_size), arr, (num - rest) * lb_size);
2542        if (rest)
2543                memcpy(fake_storep, arr + ((num - rest) * lb_size),
2544                       rest * lb_size);
2545        return res;
2546}
2547
2548static __be16 dif_compute_csum(const void *buf, int len)
2549{
2550        __be16 csum;
2551
2552        if (sdebug_guard)
2553                csum = (__force __be16)ip_compute_csum(buf, len);
2554        else
2555                csum = cpu_to_be16(crc_t10dif(buf, len));
2556
2557        return csum;
2558}
2559
2560static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
2561                      sector_t sector, u32 ei_lba)
2562{
2563        __be16 csum = dif_compute_csum(data, sdebug_sector_size);
2564
2565        if (sdt->guard_tag != csum) {
2566                pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
2567                        (unsigned long)sector,
2568                        be16_to_cpu(sdt->guard_tag),
2569                        be16_to_cpu(csum));
2570                return 0x01;
2571        }
2572        if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
2573            be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
2574                pr_err("REF check failed on sector %lu\n",
2575                        (unsigned long)sector);
2576                return 0x03;
2577        }
2578        if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
2579            be32_to_cpu(sdt->ref_tag) != ei_lba) {
2580                pr_err("REF check failed on sector %lu\n",
2581                        (unsigned long)sector);
2582                return 0x03;
2583        }
2584        return 0;
2585}
2586
2587static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
2588                          unsigned int sectors, bool read)
2589{
2590        size_t resid;
2591        void *paddr;
2592        const void *dif_store_end = dif_storep + sdebug_store_sectors;
2593        struct sg_mapping_iter miter;
2594
2595        /* Bytes of protection data to copy into sgl */
2596        resid = sectors * sizeof(*dif_storep);
2597
2598        sg_miter_start(&miter, scsi_prot_sglist(SCpnt),
2599                        scsi_prot_sg_count(SCpnt), SG_MITER_ATOMIC |
2600                        (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
2601
2602        while (sg_miter_next(&miter) && resid > 0) {
2603                size_t len = min(miter.length, resid);
2604                void *start = dif_store(sector);
2605                size_t rest = 0;
2606
2607                if (dif_store_end < start + len)
2608                        rest = start + len - dif_store_end;
2609
2610                paddr = miter.addr;
2611
2612                if (read)
2613                        memcpy(paddr, start, len - rest);
2614                else
2615                        memcpy(start, paddr, len - rest);
2616
2617                if (rest) {
2618                        if (read)
2619                                memcpy(paddr + len - rest, dif_storep, rest);
2620                        else
2621                                memcpy(dif_storep, paddr + len - rest, rest);
2622                }
2623
2624                sector += len / sizeof(*dif_storep);
2625                resid -= len;
2626        }
2627        sg_miter_stop(&miter);
2628}
2629
2630static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
2631                            unsigned int sectors, u32 ei_lba)
2632{
2633        unsigned int i;
2634        struct t10_pi_tuple *sdt;
2635        sector_t sector;
2636
2637        for (i = 0; i < sectors; i++, ei_lba++) {
2638                int ret;
2639
2640                sector = start_sec + i;
2641                sdt = dif_store(sector);
2642
2643                if (sdt->app_tag == cpu_to_be16(0xffff))
2644                        continue;
2645
2646                ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
2647                if (ret) {
2648                        dif_errors++;
2649                        return ret;
2650                }
2651        }
2652
2653        dif_copy_prot(SCpnt, start_sec, sectors, true);
2654        dix_reads++;
2655
2656        return 0;
2657}
2658
2659static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2660{
2661        u8 *cmd = scp->cmnd;
2662        struct sdebug_queued_cmd *sqcp;
2663        u64 lba;
2664        u32 num;
2665        u32 ei_lba;
2666        unsigned long iflags;
2667        int ret;
2668        bool check_prot;
2669
2670        switch (cmd[0]) {
2671        case READ_16:
2672                ei_lba = 0;
2673                lba = get_unaligned_be64(cmd + 2);
2674                num = get_unaligned_be32(cmd + 10);
2675                check_prot = true;
2676                break;
2677        case READ_10:
2678                ei_lba = 0;
2679                lba = get_unaligned_be32(cmd + 2);
2680                num = get_unaligned_be16(cmd + 7);
2681                check_prot = true;
2682                break;
2683        case READ_6:
2684                ei_lba = 0;
2685                lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2686                      (u32)(cmd[1] & 0x1f) << 16;
2687                num = (0 == cmd[4]) ? 256 : cmd[4];
2688                check_prot = true;
2689                break;
2690        case READ_12:
2691                ei_lba = 0;
2692                lba = get_unaligned_be32(cmd + 2);
2693                num = get_unaligned_be32(cmd + 6);
2694                check_prot = true;
2695                break;
2696        case XDWRITEREAD_10:
2697                ei_lba = 0;
2698                lba = get_unaligned_be32(cmd + 2);
2699                num = get_unaligned_be16(cmd + 7);
2700                check_prot = false;
2701                break;
2702        default:        /* assume READ(32) */
2703                lba = get_unaligned_be64(cmd + 12);
2704                ei_lba = get_unaligned_be32(cmd + 20);
2705                num = get_unaligned_be32(cmd + 28);
2706                check_prot = false;
2707                break;
2708        }
2709        if (unlikely(have_dif_prot && check_prot)) {
2710                if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
2711                    (cmd[1] & 0xe0)) {
2712                        mk_sense_invalid_opcode(scp);
2713                        return check_condition_result;
2714                }
2715                if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
2716                     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
2717                    (cmd[1] & 0xe0) == 0)
2718                        sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
2719                                    "to DIF device\n");
2720        }
2721        if (unlikely(sdebug_any_injecting_opt)) {
2722                sqcp = (struct sdebug_queued_cmd *)scp->host_scribble;
2723
2724                if (sqcp) {
2725                        if (sqcp->inj_short)
2726                                num /= 2;
2727                }
2728        } else
2729                sqcp = NULL;
2730
2731        /* inline check_device_access_params() */
2732        if (unlikely(lba + num > sdebug_capacity)) {
2733                mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2734                return check_condition_result;
2735        }
2736        /* transfer length excessive (tie in to block limits VPD page) */
2737        if (unlikely(num > sdebug_store_sectors)) {
2738                /* needs work to find which cdb byte 'num' comes from */
2739                mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2740                return check_condition_result;
2741        }
2742
2743        if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
2744                     (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
2745                     ((lba + num) > sdebug_medium_error_start))) {
2746                /* claim unrecoverable read error */
2747                mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
2748                /* set info field and valid bit for fixed descriptor */
2749                if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
2750                        scp->sense_buffer[0] |= 0x80;   /* Valid bit */
2751                        ret = (lba < OPT_MEDIUM_ERR_ADDR)
2752                              ? OPT_MEDIUM_ERR_ADDR : (int)lba;
2753                        put_unaligned_be32(ret, scp->sense_buffer + 3);
2754                }
2755                scsi_set_resid(scp, scsi_bufflen(scp));
2756                return check_condition_result;
2757        }
2758
2759        read_lock_irqsave(&atomic_rw, iflags);
2760
2761        /* DIX + T10 DIF */
2762        if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
2763                int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
2764
2765                if (prot_ret) {
2766                        read_unlock_irqrestore(&atomic_rw, iflags);
2767                        mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
2768                        return illegal_condition_result;
2769                }
2770        }
2771
2772        ret = do_device_access(scp, 0, lba, num, false);
2773        read_unlock_irqrestore(&atomic_rw, iflags);
2774        if (unlikely(ret == -1))
2775                return DID_ERROR << 16;
2776
2777        scsi_in(scp)->resid = scsi_bufflen(scp) - ret;
2778
2779        if (unlikely(sqcp)) {
2780                if (sqcp->inj_recovered) {
2781                        mk_sense_buffer(scp, RECOVERED_ERROR,
2782                                        THRESHOLD_EXCEEDED, 0);
2783                        return check_condition_result;
2784                } else if (sqcp->inj_transport) {
2785                        mk_sense_buffer(scp, ABORTED_COMMAND,
2786                                        TRANSPORT_PROBLEM, ACK_NAK_TO);
2787                        return check_condition_result;
2788                } else if (sqcp->inj_dif) {
2789                        /* Logical block guard check failed */
2790                        mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2791                        return illegal_condition_result;
2792                } else if (sqcp->inj_dix) {
2793                        mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2794                        return illegal_condition_result;
2795                }
2796        }
2797        return 0;
2798}
2799
2800static void dump_sector(unsigned char *buf, int len)
2801{
2802        int i, j, n;
2803
2804        pr_err(">>> Sector Dump <<<\n");
2805        for (i = 0 ; i < len ; i += 16) {
2806                char b[128];
2807
2808                for (j = 0, n = 0; j < 16; j++) {
2809                        unsigned char c = buf[i+j];
2810
2811                        if (c >= 0x20 && c < 0x7e)
2812                                n += scnprintf(b + n, sizeof(b) - n,
2813                                               " %c ", buf[i+j]);
2814                        else
2815                                n += scnprintf(b + n, sizeof(b) - n,
2816                                               "%02x ", buf[i+j]);
2817                }
2818                pr_err("%04d: %s\n", i, b);
2819        }
2820}
2821
2822static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
2823                             unsigned int sectors, u32 ei_lba)
2824{
2825        int ret;
2826        struct t10_pi_tuple *sdt;
2827        void *daddr;
2828        sector_t sector = start_sec;
2829        int ppage_offset;
2830        int dpage_offset;
2831        struct sg_mapping_iter diter;
2832        struct sg_mapping_iter piter;
2833
2834        BUG_ON(scsi_sg_count(SCpnt) == 0);
2835        BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
2836
2837        sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
2838                        scsi_prot_sg_count(SCpnt),
2839                        SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2840        sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
2841                        SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2842
2843        /* For each protection page */
2844        while (sg_miter_next(&piter)) {
2845                dpage_offset = 0;
2846                if (WARN_ON(!sg_miter_next(&diter))) {
2847                        ret = 0x01;
2848                        goto out;
2849                }
2850
2851                for (ppage_offset = 0; ppage_offset < piter.length;
2852                     ppage_offset += sizeof(struct t10_pi_tuple)) {
2853                        /* If we're at the end of the current
2854                         * data page advance to the next one
2855                         */
2856                        if (dpage_offset >= diter.length) {
2857                                if (WARN_ON(!sg_miter_next(&diter))) {
2858                                        ret = 0x01;
2859                                        goto out;
2860                                }
2861                                dpage_offset = 0;
2862                        }
2863
2864                        sdt = piter.addr + ppage_offset;
2865                        daddr = diter.addr + dpage_offset;
2866
2867                        ret = dif_verify(sdt, daddr, sector, ei_lba);
2868                        if (ret) {
2869                                dump_sector(daddr, sdebug_sector_size);
2870                                goto out;
2871                        }
2872
2873                        sector++;
2874                        ei_lba++;
2875                        dpage_offset += sdebug_sector_size;
2876                }
2877                diter.consumed = dpage_offset;
2878                sg_miter_stop(&diter);
2879        }
2880        sg_miter_stop(&piter);
2881
2882        dif_copy_prot(SCpnt, start_sec, sectors, false);
2883        dix_writes++;
2884
2885        return 0;
2886
2887out:
2888        dif_errors++;
2889        sg_miter_stop(&diter);
2890        sg_miter_stop(&piter);
2891        return ret;
2892}
2893
2894static unsigned long lba_to_map_index(sector_t lba)
2895{
2896        if (sdebug_unmap_alignment)
2897                lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
2898        sector_div(lba, sdebug_unmap_granularity);
2899        return lba;
2900}
2901
2902static sector_t map_index_to_lba(unsigned long index)
2903{
2904        sector_t lba = index * sdebug_unmap_granularity;
2905
2906        if (sdebug_unmap_alignment)
2907                lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
2908        return lba;
2909}
2910
2911static unsigned int map_state(sector_t lba, unsigned int *num)
2912{
2913        sector_t end;
2914        unsigned int mapped;
2915        unsigned long index;
2916        unsigned long next;
2917
2918        index = lba_to_map_index(lba);
2919        mapped = test_bit(index, map_storep);
2920
2921        if (mapped)
2922                next = find_next_zero_bit(map_storep, map_size, index);
2923        else
2924                next = find_next_bit(map_storep, map_size, index);
2925
2926        end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
2927        *num = end - lba;
2928        return mapped;
2929}
2930
2931static void map_region(sector_t lba, unsigned int len)
2932{
2933        sector_t end = lba + len;
2934
2935        while (lba < end) {
2936                unsigned long index = lba_to_map_index(lba);
2937
2938                if (index < map_size)
2939                        set_bit(index, map_storep);
2940
2941                lba = map_index_to_lba(index + 1);
2942        }
2943}
2944
2945static void unmap_region(sector_t lba, unsigned int len)
2946{
2947        sector_t end = lba + len;
2948
2949        while (lba < end) {
2950                unsigned long index = lba_to_map_index(lba);
2951
2952                if (lba == map_index_to_lba(index) &&
2953                    lba + sdebug_unmap_granularity <= end &&
2954                    index < map_size) {
2955                        clear_bit(index, map_storep);
2956                        if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
2957                                memset(fake_storep +
2958                                       lba * sdebug_sector_size,
2959                                       (sdebug_lbprz & 1) ? 0 : 0xff,
2960                                       sdebug_sector_size *
2961                                       sdebug_unmap_granularity);
2962                        }
2963                        if (dif_storep) {
2964                                memset(dif_storep + lba, 0xff,
2965                                       sizeof(*dif_storep) *
2966                                       sdebug_unmap_granularity);
2967                        }
2968                }
2969                lba = map_index_to_lba(index + 1);
2970        }
2971}
2972
2973static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2974{
2975        u8 *cmd = scp->cmnd;
2976        u64 lba;
2977        u32 num;
2978        u32 ei_lba;
2979        unsigned long iflags;
2980        int ret;
2981        bool check_prot;
2982
2983        switch (cmd[0]) {
2984        case WRITE_16:
2985                ei_lba = 0;
2986                lba = get_unaligned_be64(cmd + 2);
2987                num = get_unaligned_be32(cmd + 10);
2988                check_prot = true;
2989                break;
2990        case WRITE_10:
2991                ei_lba = 0;
2992                lba = get_unaligned_be32(cmd + 2);
2993                num = get_unaligned_be16(cmd + 7);
2994                check_prot = true;
2995                break;
2996        case WRITE_6:
2997                ei_lba = 0;
2998                lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2999                      (u32)(cmd[1] & 0x1f) << 16;
3000                num = (0 == cmd[4]) ? 256 : cmd[4];
3001                check_prot = true;
3002                break;
3003        case WRITE_12:
3004                ei_lba = 0;
3005                lba = get_unaligned_be32(cmd + 2);
3006                num = get_unaligned_be32(cmd + 6);
3007                check_prot = true;
3008                break;
3009        case 0x53:      /* XDWRITEREAD(10) */
3010                ei_lba = 0;
3011                lba = get_unaligned_be32(cmd + 2);
3012                num = get_unaligned_be16(cmd + 7);
3013                check_prot = false;
3014                break;
3015        default:        /* assume WRITE(32) */
3016                lba = get_unaligned_be64(cmd + 12);
3017                ei_lba = get_unaligned_be32(cmd + 20);
3018                num = get_unaligned_be32(cmd + 28);
3019                check_prot = false;
3020                break;
3021        }
3022        if (unlikely(have_dif_prot && check_prot)) {
3023                if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3024                    (cmd[1] & 0xe0)) {
3025                        mk_sense_invalid_opcode(scp);
3026                        return check_condition_result;
3027                }
3028                if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3029                     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3030                    (cmd[1] & 0xe0) == 0)
3031                        sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3032                                    "to DIF device\n");
3033        }
3034
3035        /* inline check_device_access_params() */
3036        if (unlikely(lba + num > sdebug_capacity)) {
3037                mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3038                return check_condition_result;
3039        }
3040        /* transfer length excessive (tie in to block limits VPD page) */
3041        if (unlikely(num > sdebug_store_sectors)) {
3042                /* needs work to find which cdb byte 'num' comes from */
3043                mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3044                return check_condition_result;
3045        }
3046
3047        write_lock_irqsave(&atomic_rw, iflags);
3048
3049        /* DIX + T10 DIF */
3050        if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3051                int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
3052
3053                if (prot_ret) {
3054                        write_unlock_irqrestore(&atomic_rw, iflags);
3055                        mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
3056                        return illegal_condition_result;
3057                }
3058        }
3059
3060        ret = do_device_access(scp, 0, lba, num, true);
3061        if (unlikely(scsi_debug_lbp()))
3062                map_region(lba, num);
3063        write_unlock_irqrestore(&atomic_rw, iflags);
3064        if (unlikely(-1 == ret))
3065                return DID_ERROR << 16;
3066        else if (unlikely(sdebug_verbose &&
3067                          (ret < (num * sdebug_sector_size))))
3068                sdev_printk(KERN_INFO, scp->device,
3069                            "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3070                            my_name, num * sdebug_sector_size, ret);
3071
3072        if (unlikely(sdebug_any_injecting_opt)) {
3073                struct sdebug_queued_cmd *sqcp =
3074                                (struct sdebug_queued_cmd *)scp->host_scribble;
3075
3076                if (sqcp) {
3077                        if (sqcp->inj_recovered) {
3078                                mk_sense_buffer(scp, RECOVERED_ERROR,
3079                                                THRESHOLD_EXCEEDED, 0);
3080                                return check_condition_result;
3081                        } else if (sqcp->inj_dif) {
3082                                /* Logical block guard check failed */
3083                                mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3084                                return illegal_condition_result;
3085                        } else if (sqcp->inj_dix) {
3086                                mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3087                                return illegal_condition_result;
3088                        }
3089                }
3090        }
3091        return 0;
3092}
3093
3094/*
3095 * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3096 * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3097 */
3098static int resp_write_scat(struct scsi_cmnd *scp,
3099                           struct sdebug_dev_info *devip)
3100{
3101        u8 *cmd = scp->cmnd;
3102        u8 *lrdp = NULL;
3103        u8 *up;
3104        u8 wrprotect;
3105        u16 lbdof, num_lrd, k;
3106        u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3107        u32 lb_size = sdebug_sector_size;
3108        u32 ei_lba;
3109        u64 lba;
3110        unsigned long iflags;
3111        int ret, res;
3112        bool is_16;
3113        static const u32 lrd_size = 32; /* + parameter list header size */
3114
3115        if (cmd[0] == VARIABLE_LENGTH_CMD) {
3116                is_16 = false;
3117                wrprotect = (cmd[10] >> 5) & 0x7;
3118                lbdof = get_unaligned_be16(cmd + 12);
3119                num_lrd = get_unaligned_be16(cmd + 16);
3120                bt_len = get_unaligned_be32(cmd + 28);
3121        } else {        /* that leaves WRITE SCATTERED(16) */
3122                is_16 = true;
3123                wrprotect = (cmd[2] >> 5) & 0x7;
3124                lbdof = get_unaligned_be16(cmd + 4);
3125                num_lrd = get_unaligned_be16(cmd + 8);
3126                bt_len = get_unaligned_be32(cmd + 10);
3127                if (unlikely(have_dif_prot)) {
3128                        if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3129                            wrprotect) {
3130                                mk_sense_invalid_opcode(scp);
3131                                return illegal_condition_result;
3132                        }
3133                        if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3134                             sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3135                             wrprotect == 0)
3136                                sdev_printk(KERN_ERR, scp->device,
3137                                            "Unprotected WR to DIF device\n");
3138                }
3139        }
3140        if ((num_lrd == 0) || (bt_len == 0))
3141                return 0;       /* T10 says these do-nothings are not errors */
3142        if (lbdof == 0) {
3143                if (sdebug_verbose)
3144                        sdev_printk(KERN_INFO, scp->device,
3145                                "%s: %s: LB Data Offset field bad\n",
3146                                my_name, __func__);
3147                mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3148                return illegal_condition_result;
3149        }
3150        lbdof_blen = lbdof * lb_size;
3151        if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3152                if (sdebug_verbose)
3153                        sdev_printk(KERN_INFO, scp->device,
3154                                "%s: %s: LBA range descriptors don't fit\n",
3155                                my_name, __func__);
3156                mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3157                return illegal_condition_result;
3158        }
3159        lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
3160        if (lrdp == NULL)
3161                return SCSI_MLQUEUE_HOST_BUSY;
3162        if (sdebug_verbose)
3163                sdev_printk(KERN_INFO, scp->device,
3164                        "%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3165                        my_name, __func__, lbdof_blen);
3166        res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3167        if (res == -1) {
3168                ret = DID_ERROR << 16;
3169                goto err_out;
3170        }
3171
3172        write_lock_irqsave(&atomic_rw, iflags);
3173        sg_off = lbdof_blen;
3174        /* Spec says Buffer xfer Length field in number of LBs in dout */
3175        cum_lb = 0;
3176        for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3177                lba = get_unaligned_be64(up + 0);
3178                num = get_unaligned_be32(up + 8);
3179                if (sdebug_verbose)
3180                        sdev_printk(KERN_INFO, scp->device,
3181                                "%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
3182                                my_name, __func__, k, lba, num, sg_off);
3183                if (num == 0)
3184                        continue;
3185                ret = check_device_access_params(scp, lba, num);
3186                if (ret)
3187                        goto err_out_unlock;
3188                num_by = num * lb_size;
3189                ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3190
3191                if ((cum_lb + num) > bt_len) {
3192                        if (sdebug_verbose)
3193                                sdev_printk(KERN_INFO, scp->device,
3194                                    "%s: %s: sum of blocks > data provided\n",
3195                                    my_name, __func__);
3196                        mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3197                                        0);
3198                        ret = illegal_condition_result;
3199                        goto err_out_unlock;
3200                }
3201
3202                /* DIX + T10 DIF */
3203                if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3204                        int prot_ret = prot_verify_write(scp, lba, num,
3205                                                         ei_lba);
3206
3207                        if (prot_ret) {
3208                                mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3209                                                prot_ret);
3210                                ret = illegal_condition_result;
3211                                goto err_out_unlock;
3212                        }
3213                }
3214
3215                ret = do_device_access(scp, sg_off, lba, num, true);
3216                if (unlikely(scsi_debug_lbp()))
3217                        map_region(lba, num);
3218                if (unlikely(-1 == ret)) {
3219                        ret = DID_ERROR << 16;
3220                        goto err_out_unlock;
3221                } else if (unlikely(sdebug_verbose && (ret < num_by)))
3222                        sdev_printk(KERN_INFO, scp->device,
3223                            "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3224                            my_name, num_by, ret);
3225
3226                if (unlikely(sdebug_any_injecting_opt)) {
3227                        struct sdebug_queued_cmd *sqcp =
3228                                (struct sdebug_queued_cmd *)scp->host_scribble;
3229
3230                        if (sqcp) {
3231                                if (sqcp->inj_recovered) {
3232                                        mk_sense_buffer(scp, RECOVERED_ERROR,
3233                                                        THRESHOLD_EXCEEDED, 0);
3234                                        ret = illegal_condition_result;
3235                                        goto err_out_unlock;
3236                                } else if (sqcp->inj_dif) {
3237                                        /* Logical block guard check failed */
3238                                        mk_sense_buffer(scp, ABORTED_COMMAND,
3239                                                        0x10, 1);
3240                                        ret = illegal_condition_result;
3241                                        goto err_out_unlock;
3242                                } else if (sqcp->inj_dix) {
3243                                        mk_sense_buffer(scp, ILLEGAL_REQUEST,
3244                                                        0x10, 1);
3245                                        ret = illegal_condition_result;
3246                                        goto err_out_unlock;
3247                                }
3248                        }
3249                }
3250                sg_off += num_by;
3251                cum_lb += num;
3252        }
3253        ret = 0;
3254err_out_unlock:
3255        write_unlock_irqrestore(&atomic_rw, iflags);
3256err_out:
3257        kfree(lrdp);
3258        return ret;
3259}
3260
3261static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3262                           u32 ei_lba, bool unmap, bool ndob)
3263{
3264        unsigned long iflags;
3265        unsigned long long i;
3266        int ret;
3267        u64 lba_off;
3268
3269        ret = check_device_access_params(scp, lba, num);
3270        if (ret)
3271                return ret;
3272
3273        write_lock_irqsave(&atomic_rw, iflags);
3274
3275        if (unmap && scsi_debug_lbp()) {
3276                unmap_region(lba, num);
3277                goto out;
3278        }
3279
3280        lba_off = lba * sdebug_sector_size;
3281        /* if ndob then zero 1 logical block, else fetch 1 logical block */
3282        if (ndob) {
3283                memset(fake_storep + lba_off, 0, sdebug_sector_size);
3284                ret = 0;
3285        } else
3286                ret = fetch_to_dev_buffer(scp, fake_storep + lba_off,
3287                                          sdebug_sector_size);
3288
3289        if (-1 == ret) {
3290                write_unlock_irqrestore(&atomic_rw, iflags);
3291                return DID_ERROR << 16;
3292        } else if (sdebug_verbose && !ndob && (ret < sdebug_sector_size))
3293                sdev_printk(KERN_INFO, scp->device,
3294                            "%s: %s: lb size=%u, IO sent=%d bytes\n",
3295                            my_name, "write same",
3296                            sdebug_sector_size, ret);
3297
3298        /* Copy first sector to remaining blocks */
3299        for (i = 1 ; i < num ; i++)
3300                memcpy(fake_storep + ((lba + i) * sdebug_sector_size),
3301                       fake_storep + lba_off,
3302                       sdebug_sector_size);
3303
3304        if (scsi_debug_lbp())
3305                map_region(lba, num);
3306out:
3307        write_unlock_irqrestore(&atomic_rw, iflags);
3308
3309        return 0;
3310}
3311
3312static int resp_write_same_10(struct scsi_cmnd *scp,
3313                              struct sdebug_dev_info *devip)
3314{
3315        u8 *cmd = scp->cmnd;
3316        u32 lba;
3317        u16 num;
3318        u32 ei_lba = 0;
3319        bool unmap = false;
3320
3321        if (cmd[1] & 0x8) {
3322                if (sdebug_lbpws10 == 0) {
3323                        mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3324                        return check_condition_result;
3325                } else
3326                        unmap = true;
3327        }
3328        lba = get_unaligned_be32(cmd + 2);
3329        num = get_unaligned_be16(cmd + 7);
3330        if (num > sdebug_write_same_length) {
3331                mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3332                return check_condition_result;
3333        }
3334        return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3335}
3336
3337static int resp_write_same_16(struct scsi_cmnd *scp,
3338                              struct sdebug_dev_info *devip)
3339{
3340        u8 *cmd = scp->cmnd;
3341        u64 lba;
3342        u32 num;
3343        u32 ei_lba = 0;
3344        bool unmap = false;
3345        bool ndob = false;
3346
3347        if (cmd[1] & 0x8) {     /* UNMAP */
3348                if (sdebug_lbpws == 0) {
3349                        mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3350                        return check_condition_result;
3351                } else
3352                        unmap = true;
3353        }
3354        if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3355                ndob = true;
3356        lba = get_unaligned_be64(cmd + 2);
3357        num = get_unaligned_be32(cmd + 10);
3358        if (num > sdebug_write_same_length) {
3359                mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3360                return check_condition_result;
3361        }
3362        return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3363}
3364
3365/* Note the mode field is in the same position as the (lower) service action
3366 * field. For the Report supported operation codes command, SPC-4 suggests
3367 * each mode of this command should be reported separately; for future. */
3368static int resp_write_buffer(struct scsi_cmnd *scp,
3369                             struct sdebug_dev_info *devip)
3370{
3371        u8 *cmd = scp->cmnd;
3372        struct scsi_device *sdp = scp->device;
3373        struct sdebug_dev_info *dp;
3374        u8 mode;
3375
3376        mode = cmd[1] & 0x1f;
3377        switch (mode) {
3378        case 0x4:       /* download microcode (MC) and activate (ACT) */
3379                /* set UAs on this device only */
3380                set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3381                set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3382                break;
3383        case 0x5:       /* download MC, save and ACT */
3384                set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3385                break;
3386        case 0x6:       /* download MC with offsets and ACT */
3387                /* set UAs on most devices (LUs) in this target */
3388                list_for_each_entry(dp,
3389                                    &devip->sdbg_host->dev_info_list,
3390                                    dev_list)
3391                        if (dp->target == sdp->id) {
3392                                set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3393                                if (devip != dp)
3394                                        set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3395                                                dp->uas_bm);
3396                        }
3397                break;
3398        case 0x7:       /* download MC with offsets, save, and ACT */
3399                /* set UA on all devices (LUs) in this target */
3400                list_for_each_entry(dp,
3401                                    &devip->sdbg_host->dev_info_list,
3402                                    dev_list)
3403                        if (dp->target == sdp->id)
3404                                set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3405                                        dp->uas_bm);
3406                break;
3407        default:
3408                /* do nothing for this command for other mode values */
3409                break;
3410        }
3411        return 0;
3412}
3413
3414static int resp_comp_write(struct scsi_cmnd *scp,
3415                           struct sdebug_dev_info *devip)
3416{
3417        u8 *cmd = scp->cmnd;
3418        u8 *arr;
3419        u8 *fake_storep_hold;
3420        u64 lba;
3421        u32 dnum;
3422        u32 lb_size = sdebug_sector_size;
3423        u8 num;
3424        unsigned long iflags;
3425        int ret;
3426        int retval = 0;
3427
3428        lba = get_unaligned_be64(cmd + 2);
3429        num = cmd[13];          /* 1 to a maximum of 255 logical blocks */
3430        if (0 == num)
3431                return 0;       /* degenerate case, not an error */
3432        if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3433            (cmd[1] & 0xe0)) {
3434                mk_sense_invalid_opcode(scp);
3435                return check_condition_result;
3436        }
3437        if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3438             sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3439            (cmd[1] & 0xe0) == 0)
3440                sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3441                            "to DIF device\n");
3442
3443        /* inline check_device_access_params() */
3444        if (lba + num > sdebug_capacity) {
3445                mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3446                return check_condition_result;
3447        }
3448        /* transfer length excessive (tie in to block limits VPD page) */
3449        if (num > sdebug_store_sectors) {
3450                /* needs work to find which cdb byte 'num' comes from */
3451                mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3452                return check_condition_result;
3453        }
3454        dnum = 2 * num;
3455        arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
3456        if (NULL == arr) {
3457                mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3458                                INSUFF_RES_ASCQ);
3459                return check_condition_result;
3460        }
3461
3462        write_lock_irqsave(&atomic_rw, iflags);
3463
3464        /* trick do_device_access() to fetch both compare and write buffers
3465         * from data-in into arr. Safe (atomic) since write_lock held. */
3466        fake_storep_hold = fake_storep;
3467        fake_storep = arr;
3468        ret = do_device_access(scp, 0, 0, dnum, true);
3469        fake_storep = fake_storep_hold;
3470        if (ret == -1) {
3471                retval = DID_ERROR << 16;
3472                goto cleanup;
3473        } else if (sdebug_verbose && (ret < (dnum * lb_size)))
3474                sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3475                            "indicated=%u, IO sent=%d bytes\n", my_name,
3476                            dnum * lb_size, ret);
3477        if (!comp_write_worker(lba, num, arr)) {
3478                mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3479                retval = check_condition_result;
3480                goto cleanup;
3481        }
3482        if (scsi_debug_lbp())
3483                map_region(lba, num);
3484cleanup:
3485        write_unlock_irqrestore(&atomic_rw, iflags);
3486        kfree(arr);
3487        return retval;
3488}
3489
3490struct unmap_block_desc {
3491        __be64  lba;
3492        __be32  blocks;
3493        __be32  __reserved;
3494};
3495
3496static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3497{
3498        unsigned char *buf;
3499        struct unmap_block_desc *desc;
3500        unsigned int i, payload_len, descriptors;
3501        int ret;
3502        unsigned long iflags;
3503
3504
3505        if (!scsi_debug_lbp())
3506                return 0;       /* fib and say its done */
3507        payload_len = get_unaligned_be16(scp->cmnd + 7);
3508        BUG_ON(scsi_bufflen(scp) != payload_len);
3509
3510        descriptors = (payload_len - 8) / 16;
3511        if (descriptors > sdebug_unmap_max_desc) {
3512                mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3513                return check_condition_result;
3514        }
3515
3516        buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3517        if (!buf) {
3518                mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3519                                INSUFF_RES_ASCQ);
3520                return check_condition_result;
3521        }
3522
3523        scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3524
3525        BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3526        BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3527
3528        desc = (void *)&buf[8];
3529
3530        write_lock_irqsave(&atomic_rw, iflags);
3531
3532        for (i = 0 ; i < descriptors ; i++) {
3533                unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3534                unsigned int num = get_unaligned_be32(&desc[i].blocks);
3535
3536                ret = check_device_access_params(scp, lba, num);
3537                if (ret)
3538                        goto out;
3539
3540                unmap_region(lba, num);
3541        }
3542
3543        ret = 0;
3544
3545out:
3546        write_unlock_irqrestore(&atomic_rw, iflags);
3547        kfree(buf);
3548
3549        return ret;
3550}
3551
3552#define SDEBUG_GET_LBA_STATUS_LEN 32
3553
3554static int resp_get_lba_status(struct scsi_cmnd *scp,
3555                               struct sdebug_dev_info *devip)
3556{
3557        u8 *cmd = scp->cmnd;
3558        u64 lba;
3559        u32 alloc_len, mapped, num;
3560        u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
3561        int ret;
3562
3563        lba = get_unaligned_be64(cmd + 2);
3564        alloc_len = get_unaligned_be32(cmd + 10);
3565
3566        if (alloc_len < 24)
3567                return 0;
3568
3569        ret = check_device_access_params(scp, lba, 1);
3570        if (ret)
3571                return ret;
3572
3573        if (scsi_debug_lbp())
3574                mapped = map_state(lba, &num);
3575        else {
3576                mapped = 1;
3577                /* following just in case virtual_gb changed */
3578                sdebug_capacity = get_sdebug_capacity();
3579                if (sdebug_capacity - lba <= 0xffffffff)
3580                        num = sdebug_capacity - lba;
3581                else
3582                        num = 0xffffffff;
3583        }
3584
3585        memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
3586        put_unaligned_be32(20, arr);            /* Parameter Data Length */
3587        put_unaligned_be64(lba, arr + 8);       /* LBA */
3588        put_unaligned_be32(num, arr + 16);      /* Number of blocks */
3589        arr[20] = !mapped;              /* prov_stat=0: mapped; 1: dealloc */
3590
3591        return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
3592}
3593
3594static int resp_sync_cache(struct scsi_cmnd *scp,
3595                           struct sdebug_dev_info *devip)
3596{
3597        int res = 0;
3598        u64 lba;
3599        u32 num_blocks;
3600        u8 *cmd = scp->cmnd;
3601
3602        if (cmd[0] == SYNCHRONIZE_CACHE) {      /* 10 byte cdb */
3603                lba = get_unaligned_be32(cmd + 2);
3604                num_blocks = get_unaligned_be16(cmd + 7);
3605        } else {                                /* SYNCHRONIZE_CACHE(16) */
3606                lba = get_unaligned_be64(cmd + 2);
3607                num_blocks = get_unaligned_be32(cmd + 10);
3608        }
3609        if (lba + num_blocks > sdebug_capacity) {
3610                mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3611                return check_condition_result;
3612        }
3613        if (!write_since_sync || cmd[1] & 0x2)
3614                res = SDEG_RES_IMMED_MASK;
3615        else            /* delay if write_since_sync and IMMED clear */
3616                write_since_sync = false;
3617        return res;
3618}
3619
3620#define RL_BUCKET_ELEMS 8
3621
3622/* Even though each pseudo target has a REPORT LUNS "well known logical unit"
3623 * (W-LUN), the normal Linux scanning logic does not associate it with a
3624 * device (e.g. /dev/sg7). The following magic will make that association:
3625 *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
3626 * where <n> is a host number. If there are multiple targets in a host then
3627 * the above will associate a W-LUN to each target. To only get a W-LUN
3628 * for target 2, then use "echo '- 2 49409' > scan" .
3629 */
3630static int resp_report_luns(struct scsi_cmnd *scp,
3631                            struct sdebug_dev_info *devip)
3632{
3633        unsigned char *cmd = scp->cmnd;
3634        unsigned int alloc_len;
3635        unsigned char select_report;
3636        u64 lun;
3637        struct scsi_lun *lun_p;
3638        u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
3639        unsigned int lun_cnt;   /* normal LUN count (max: 256) */
3640        unsigned int wlun_cnt;  /* report luns W-LUN count */
3641        unsigned int tlun_cnt;  /* total LUN count */
3642        unsigned int rlen;      /* response length (in bytes) */
3643        int k, j, n, res;
3644        unsigned int off_rsp = 0;
3645        const int sz_lun = sizeof(struct scsi_lun);
3646
3647        clear_luns_changed_on_target(devip);
3648
3649        select_report = cmd[2];
3650        alloc_len = get_unaligned_be32(cmd + 6);
3651
3652        if (alloc_len < 4) {
3653                pr_err("alloc len too small %d\n", alloc_len);
3654                mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
3655                return check_condition_result;
3656        }
3657
3658        switch (select_report) {
3659        case 0:         /* all LUNs apart from W-LUNs */
3660                lun_cnt = sdebug_max_luns;
3661                wlun_cnt = 0;
3662                break;
3663        case 1:         /* only W-LUNs */
3664                lun_cnt = 0;
3665                wlun_cnt = 1;
3666                break;
3667        case 2:         /* all LUNs */
3668                lun_cnt = sdebug_max_luns;
3669                wlun_cnt = 1;
3670                break;
3671        case 0x10:      /* only administrative LUs */
3672        case 0x11:      /* see SPC-5 */
3673        case 0x12:      /* only subsiduary LUs owned by referenced LU */
3674        default:
3675                pr_debug("select report invalid %d\n", select_report);
3676                mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
3677                return check_condition_result;
3678        }
3679
3680        if (sdebug_no_lun_0 && (lun_cnt > 0))
3681                --lun_cnt;
3682
3683        tlun_cnt = lun_cnt + wlun_cnt;
3684        rlen = tlun_cnt * sz_lun;       /* excluding 8 byte header */
3685        scsi_set_resid(scp, scsi_bufflen(scp));
3686        pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
3687                 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
3688
3689        /* loops rely on sizeof response header same as sizeof lun (both 8) */
3690        lun = sdebug_no_lun_0 ? 1 : 0;
3691        for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
3692                memset(arr, 0, sizeof(arr));
3693                lun_p = (struct scsi_lun *)&arr[0];
3694                if (k == 0) {
3695                        put_unaligned_be32(rlen, &arr[0]);
3696                        ++lun_p;
3697                        j = 1;
3698                }
3699                for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
3700                        if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
3701                                break;
3702                        int_to_scsilun(lun++, lun_p);
3703                }
3704                if (j < RL_BUCKET_ELEMS)
3705                        break;
3706                n = j * sz_lun;
3707                res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
3708                if (res)
3709                        return res;
3710                off_rsp += n;
3711        }
3712        if (wlun_cnt) {
3713                int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
3714                ++j;
3715        }
3716        if (j > 0)
3717                res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
3718        return res;
3719}
3720
3721static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
3722                            unsigned int num, struct sdebug_dev_info *devip)
3723{
3724        int j;
3725        unsigned char *kaddr, *buf;
3726        unsigned int offset;
3727        struct scsi_data_buffer *sdb = scsi_in(scp);
3728        struct sg_mapping_iter miter;
3729
3730        /* better not to use temporary buffer. */
3731        buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3732        if (!buf) {
3733                mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3734                                INSUFF_RES_ASCQ);
3735                return check_condition_result;
3736        }
3737
3738        scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3739
3740        offset = 0;
3741        sg_miter_start(&miter, sdb->table.sgl, sdb->table.nents,
3742                        SG_MITER_ATOMIC | SG_MITER_TO_SG);
3743
3744        while (sg_miter_next(&miter)) {
3745                kaddr = miter.addr;
3746                for (j = 0; j < miter.length; j++)
3747                        *(kaddr + j) ^= *(buf + offset + j);
3748
3749                offset += miter.length;
3750        }
3751        sg_miter_stop(&miter);
3752        kfree(buf);
3753
3754        return 0;
3755}
3756
3757static int resp_xdwriteread_10(struct scsi_cmnd *scp,
3758                               struct sdebug_dev_info *devip)
3759{
3760        u8 *cmd = scp->cmnd;
3761        u64 lba;
3762        u32 num;
3763        int errsts;
3764
3765        if (!scsi_bidi_cmnd(scp)) {
3766                mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3767                                INSUFF_RES_ASCQ);
3768                return check_condition_result;
3769        }
3770        errsts = resp_read_dt0(scp, devip);
3771        if (errsts)
3772                return errsts;
3773        if (!(cmd[1] & 0x4)) {          /* DISABLE_WRITE is not set */
3774                errsts = resp_write_dt0(scp, devip);
3775                if (errsts)
3776                        return errsts;
3777        }
3778        lba = get_unaligned_be32(cmd + 2);
3779        num = get_unaligned_be16(cmd + 7);
3780        return resp_xdwriteread(scp, lba, num, devip);
3781}
3782
3783static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
3784{
3785        u32 tag = blk_mq_unique_tag(cmnd->request);
3786        u16 hwq = blk_mq_unique_tag_to_hwq(tag);
3787
3788        pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
3789        if (WARN_ON_ONCE(hwq >= submit_queues))
3790                hwq = 0;
3791        return sdebug_q_arr + hwq;
3792}
3793
3794/* Queued (deferred) command completions converge here. */
3795static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
3796{
3797        bool aborted = sd_dp->aborted;
3798        int qc_idx;
3799        int retiring = 0;
3800        unsigned long iflags;
3801        struct sdebug_queue *sqp;
3802        struct sdebug_queued_cmd *sqcp;
3803        struct scsi_cmnd *scp;
3804        struct sdebug_dev_info *devip;
3805
3806        sd_dp->defer_t = SDEB_DEFER_NONE;
3807        if (unlikely(aborted))
3808                sd_dp->aborted = false;
3809        qc_idx = sd_dp->qc_idx;
3810        sqp = sdebug_q_arr + sd_dp->sqa_idx;
3811        if (sdebug_statistics) {
3812                atomic_inc(&sdebug_completions);
3813                if (raw_smp_processor_id() != sd_dp->issuing_cpu)
3814                        atomic_inc(&sdebug_miss_cpus);
3815        }
3816        if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
3817                pr_err("wild qc_idx=%d\n", qc_idx);
3818                return;
3819        }
3820        spin_lock_irqsave(&sqp->qc_lock, iflags);
3821        sqcp = &sqp->qc_arr[qc_idx];
3822        scp = sqcp->a_cmnd;
3823        if (unlikely(scp == NULL)) {
3824                spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3825                pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d\n",
3826                       sd_dp->sqa_idx, qc_idx);
3827                return;
3828        }
3829        devip = (struct sdebug_dev_info *)scp->device->hostdata;
3830        if (likely(devip))
3831                atomic_dec(&devip->num_in_q);
3832        else
3833                pr_err("devip=NULL\n");
3834        if (unlikely(atomic_read(&retired_max_queue) > 0))
3835                retiring = 1;
3836
3837        sqcp->a_cmnd = NULL;
3838        if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
3839                spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3840                pr_err("Unexpected completion\n");
3841                return;
3842        }
3843
3844        if (unlikely(retiring)) {       /* user has reduced max_queue */
3845                int k, retval;
3846
3847                retval = atomic_read(&retired_max_queue);
3848                if (qc_idx >= retval) {
3849                        spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3850                        pr_err("index %d too large\n", retval);
3851                        return;
3852                }
3853                k = find_last_bit(sqp->in_use_bm, retval);
3854                if ((k < sdebug_max_queue) || (k == retval))
3855                        atomic_set(&retired_max_queue, 0);
3856                else
3857                        atomic_set(&retired_max_queue, k + 1);
3858        }
3859        spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3860        if (unlikely(aborted)) {
3861                if (sdebug_verbose)
3862                        pr_info("bypassing scsi_done() due to aborted cmd\n");
3863                return;
3864        }
3865        scp->scsi_done(scp); /* callback to mid level */
3866}
3867
3868/* When high resolution timer goes off this function is called. */
3869static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
3870{
3871        struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
3872                                                  hrt);
3873        sdebug_q_cmd_complete(sd_dp);
3874        return HRTIMER_NORESTART;
3875}
3876
3877/* When work queue schedules work, it calls this function. */
3878static void sdebug_q_cmd_wq_complete(struct work_struct *work)
3879{
3880        struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
3881                                                  ew.work);
3882        sdebug_q_cmd_complete(sd_dp);
3883}
3884
3885static bool got_shared_uuid;
3886static uuid_t shared_uuid;
3887
3888static struct sdebug_dev_info *sdebug_device_create(
3889                        struct sdebug_host_info *sdbg_host, gfp_t flags)
3890{
3891        struct sdebug_dev_info *devip;
3892
3893        devip = kzalloc(sizeof(*devip), flags);
3894        if (devip) {
3895                if (sdebug_uuid_ctl == 1)
3896                        uuid_gen(&devip->lu_name);
3897                else if (sdebug_uuid_ctl == 2) {
3898                        if (got_shared_uuid)
3899                                devip->lu_name = shared_uuid;
3900                        else {
3901                                uuid_gen(&shared_uuid);
3902                                got_shared_uuid = true;
3903                                devip->lu_name = shared_uuid;
3904                        }
3905                }
3906                devip->sdbg_host = sdbg_host;
3907                list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
3908        }
3909        return devip;
3910}
3911
3912static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
3913{
3914        struct sdebug_host_info *sdbg_host;
3915        struct sdebug_dev_info *open_devip = NULL;
3916        struct sdebug_dev_info *devip;
3917
3918        sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
3919        if (!sdbg_host) {
3920                pr_err("Host info NULL\n");
3921                return NULL;
3922        }
3923        list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
3924                if ((devip->used) && (devip->channel == sdev->channel) &&
3925                    (devip->target == sdev->id) &&
3926                    (devip->lun == sdev->lun))
3927                        return devip;
3928                else {
3929                        if ((!devip->used) && (!open_devip))
3930                                open_devip = devip;
3931                }
3932        }
3933        if (!open_devip) { /* try and make a new one */
3934                open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
3935                if (!open_devip) {
3936                        pr_err("out of memory at line %d\n", __LINE__);
3937                        return NULL;
3938                }
3939        }
3940
3941        open_devip->channel = sdev->channel;
3942        open_devip->target = sdev->id;
3943        open_devip->lun = sdev->lun;
3944        open_devip->sdbg_host = sdbg_host;
3945        atomic_set(&open_devip->num_in_q, 0);
3946        set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
3947        open_devip->used = true;
3948        return open_devip;
3949}
3950
3951static int scsi_debug_slave_alloc(struct scsi_device *sdp)
3952{
3953        if (sdebug_verbose)
3954                pr_info("slave_alloc <%u %u %u %llu>\n",
3955                       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3956        blk_queue_flag_set(QUEUE_FLAG_BIDI, sdp->request_queue);
3957        return 0;
3958}
3959
3960static int scsi_debug_slave_configure(struct scsi_device *sdp)
3961{
3962        struct sdebug_dev_info *devip =
3963                        (struct sdebug_dev_info *)sdp->hostdata;
3964
3965        if (sdebug_verbose)
3966                pr_info("slave_configure <%u %u %u %llu>\n",
3967                       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3968        if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
3969                sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
3970        if (devip == NULL) {
3971                devip = find_build_dev_info(sdp);
3972                if (devip == NULL)
3973                        return 1;  /* no resources, will be marked offline */
3974        }
3975        sdp->hostdata = devip;
3976        blk_queue_max_segment_size(sdp->request_queue, -1U);
3977        if (sdebug_no_uld)
3978                sdp->no_uld_attach = 1;
3979        config_cdb_len(sdp);
3980        return 0;
3981}
3982
3983static void scsi_debug_slave_destroy(struct scsi_device *sdp)
3984{
3985        struct sdebug_dev_info *devip =
3986                (struct sdebug_dev_info *)sdp->hostdata;
3987
3988        if (sdebug_verbose)
3989                pr_info("slave_destroy <%u %u %u %llu>\n",
3990                       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3991        if (devip) {
3992                /* make this slot available for re-use */
3993                devip->used = false;
3994                sdp->hostdata = NULL;
3995        }
3996}
3997
3998static void stop_qc_helper(struct sdebug_defer *sd_dp,
3999                           enum sdeb_defer_type defer_t)
4000{
4001        if (!sd_dp)
4002                return;
4003        if (defer_t == SDEB_DEFER_HRT)
4004                hrtimer_cancel(&sd_dp->hrt);
4005        else if (defer_t == SDEB_DEFER_WQ)
4006                cancel_work_sync(&sd_dp->ew.work);
4007}
4008
4009/* If @cmnd found deletes its timer or work queue and returns true; else
4010   returns false */
4011static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
4012{
4013        unsigned long iflags;
4014        int j, k, qmax, r_qmax;
4015        enum sdeb_defer_type l_defer_t;
4016        struct sdebug_queue *sqp;
4017        struct sdebug_queued_cmd *sqcp;
4018        struct sdebug_dev_info *devip;
4019        struct sdebug_defer *sd_dp;
4020
4021        for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4022                spin_lock_irqsave(&sqp->qc_lock, iflags);
4023                qmax = sdebug_max_queue;
4024                r_qmax = atomic_read(&retired_max_queue);
4025                if (r_qmax > qmax)
4026                        qmax = r_qmax;
4027                for (k = 0; k < qmax; ++k) {
4028                        if (test_bit(k, sqp->in_use_bm)) {
4029                                sqcp = &sqp->qc_arr[k];
4030                                if (cmnd != sqcp->a_cmnd)
4031                                        continue;
4032                                /* found */
4033                                devip = (struct sdebug_dev_info *)
4034                                                cmnd->device->hostdata;
4035                                if (devip)
4036                                        atomic_dec(&devip->num_in_q);
4037                                sqcp->a_cmnd = NULL;
4038                                sd_dp = sqcp->sd_dp;
4039                                if (sd_dp) {
4040                                        l_defer_t = sd_dp->defer_t;
4041                                        sd_dp->defer_t = SDEB_DEFER_NONE;
4042                                } else
4043                                        l_defer_t = SDEB_DEFER_NONE;
4044                                spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4045                                stop_qc_helper(sd_dp, l_defer_t);
4046                                clear_bit(k, sqp->in_use_bm);
4047                                return true;
4048                        }
4049                }
4050                spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4051        }
4052        return false;
4053}
4054
4055/* Deletes (stops) timers or work queues of all queued commands */
4056static void stop_all_queued(void)
4057{
4058        unsigned long iflags;
4059        int j, k;
4060        enum sdeb_defer_type l_defer_t;
4061        struct sdebug_queue *sqp;
4062        struct sdebug_queued_cmd *sqcp;
4063        struct sdebug_dev_info *devip;
4064        struct sdebug_defer *sd_dp;
4065
4066        for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4067                spin_lock_irqsave(&sqp->qc_lock, iflags);
4068                for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
4069                        if (test_bit(k, sqp->in_use_bm)) {
4070                                sqcp = &sqp->qc_arr[k];
4071                                if (sqcp->a_cmnd == NULL)
4072                                        continue;
4073                                devip = (struct sdebug_dev_info *)
4074                                        sqcp->a_cmnd->device->hostdata;
4075                                if (devip)
4076                                        atomic_dec(&devip->num_in_q);
4077                                sqcp->a_cmnd = NULL;
4078                                sd_dp = sqcp->sd_dp;
4079                                if (sd_dp) {
4080                                        l_defer_t = sd_dp->defer_t;
4081                                        sd_dp->defer_t = SDEB_DEFER_NONE;
4082                                } else
4083                                        l_defer_t = SDEB_DEFER_NONE;
4084                                spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4085                                stop_qc_helper(sd_dp, l_defer_t);
4086                                clear_bit(k, sqp->in_use_bm);
4087                                spin_lock_irqsave(&sqp->qc_lock, iflags);
4088                        }
4089                }
4090                spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4091        }
4092}
4093
4094/* Free queued command memory on heap */
4095static void free_all_queued(void)
4096{
4097        int j, k;
4098        struct sdebug_queue *sqp;
4099        struct sdebug_queued_cmd *sqcp;
4100
4101        for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4102                for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
4103                        sqcp = &sqp->qc_arr[k];
4104                        kfree(sqcp->sd_dp);
4105                        sqcp->sd_dp = NULL;
4106                }
4107        }
4108}
4109
4110static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
4111{
4112        bool ok;
4113
4114        ++num_aborts;
4115        if (SCpnt) {
4116                ok = stop_queued_cmnd(SCpnt);
4117                if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
4118                        sdev_printk(KERN_INFO, SCpnt->device,
4119                                    "%s: command%s found\n", __func__,
4120                                    ok ? "" : " not");
4121        }
4122        return SUCCESS;
4123}
4124
4125static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
4126{
4127        ++num_dev_resets;
4128        if (SCpnt && SCpnt->device) {
4129                struct scsi_device *sdp = SCpnt->device;
4130                struct sdebug_dev_info *devip =
4131                                (struct sdebug_dev_info *)sdp->hostdata;
4132
4133                if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
4134                        sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
4135                if (devip)
4136                        set_bit(SDEBUG_UA_POR, devip->uas_bm);
4137        }
4138        return SUCCESS;
4139}
4140
4141static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
4142{
4143        struct sdebug_host_info *sdbg_host;
4144        struct sdebug_dev_info *devip;
4145        struct scsi_device *sdp;
4146        struct Scsi_Host *hp;
4147        int k = 0;
4148
4149        ++num_target_resets;
4150        if (!SCpnt)
4151                goto lie;
4152        sdp = SCpnt->device;
4153        if (!sdp)
4154                goto lie;
4155        if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
4156                sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
4157        hp = sdp->host;
4158        if (!hp)
4159                goto lie;
4160        sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
4161        if (sdbg_host) {
4162                list_for_each_entry(devip,
4163                                    &sdbg_host->dev_info_list,
4164                                    dev_list)
4165                        if (devip->target == sdp->id) {
4166                                set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4167                                ++k;
4168                        }
4169        }
4170        if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
4171                sdev_printk(KERN_INFO, sdp,
4172                            "%s: %d device(s) found in target\n", __func__, k);
4173lie:
4174        return SUCCESS;
4175}
4176
4177static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
4178{
4179        struct sdebug_host_info *sdbg_host;
4180        struct sdebug_dev_info *devip;
4181        struct scsi_device *sdp;
4182        struct Scsi_Host *hp;
4183        int k = 0;
4184
4185        ++num_bus_resets;
4186        if (!(SCpnt && SCpnt->device))
4187                goto lie;
4188        sdp = SCpnt->device;
4189        if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
4190                sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
4191        hp = sdp->host;
4192        if (hp) {
4193                sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
4194                if (sdbg_host) {
4195                        list_for_each_entry(devip,
4196                                            &sdbg_host->dev_info_list,
4197                                            dev_list) {
4198                                set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4199                                ++k;
4200                        }
4201                }
4202        }
4203        if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
4204                sdev_printk(KERN_INFO, sdp,
4205                            "%s: %d device(s) found in host\n", __func__, k);
4206lie:
4207        return SUCCESS;
4208}
4209
4210static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
4211{
4212        struct sdebug_host_info *sdbg_host;
4213        struct sdebug_dev_info *devip;
4214        int k = 0;
4215
4216        ++num_host_resets;
4217        if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
4218                sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
4219        spin_lock(&sdebug_host_list_lock);
4220        list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
4221                list_for_each_entry(devip, &sdbg_host->dev_info_list,
4222                                    dev_list) {
4223                        set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4224                        ++k;
4225                }
4226        }
4227        spin_unlock(&sdebug_host_list_lock);
4228        stop_all_queued();
4229        if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
4230                sdev_printk(KERN_INFO, SCpnt->device,
4231                            "%s: %d device(s) found\n", __func__, k);
4232        return SUCCESS;
4233}
4234
4235static void __init sdebug_build_parts(unsigned char *ramp,
4236                                      unsigned long store_size)
4237{
4238        struct partition *pp;
4239        int starts[SDEBUG_MAX_PARTS + 2];
4240        int sectors_per_part, num_sectors, k;
4241        int heads_by_sects, start_sec, end_sec;
4242
4243        /* assume partition table already zeroed */
4244        if ((sdebug_num_parts < 1) || (store_size < 1048576))
4245                return;
4246        if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
4247                sdebug_num_parts = SDEBUG_MAX_PARTS;
4248                pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
4249        }
4250        num_sectors = (int)sdebug_store_sectors;
4251        sectors_per_part = (num_sectors - sdebug_sectors_per)
4252                           / sdebug_num_parts;
4253        heads_by_sects = sdebug_heads * sdebug_sectors_per;
4254        starts[0] = sdebug_sectors_per;
4255        for (k = 1; k < sdebug_num_parts; ++k)
4256                starts[k] = ((k * sectors_per_part) / heads_by_sects)
4257                            * heads_by_sects;
4258        starts[sdebug_num_parts] = num_sectors;
4259        starts[sdebug_num_parts + 1] = 0;
4260
4261        ramp[510] = 0x55;       /* magic partition markings */
4262        ramp[511] = 0xAA;
4263        pp = (struct partition *)(ramp + 0x1be);
4264        for (k = 0; starts[k + 1]; ++k, ++pp) {
4265                start_sec = starts[k];
4266                end_sec = starts[k + 1] - 1;
4267                pp->boot_ind = 0;
4268
4269                pp->cyl = start_sec / heads_by_sects;
4270                pp->head = (start_sec - (pp->cyl * heads_by_sects))
4271                           / sdebug_sectors_per;
4272                pp->sector = (start_sec % sdebug_sectors_per) + 1;
4273
4274                pp->end_cyl = end_sec / heads_by_sects;
4275                pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
4276                               / sdebug_sectors_per;
4277                pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
4278
4279                pp->start_sect = cpu_to_le32(start_sec);
4280                pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
4281                pp->sys_ind = 0x83;     /* plain Linux partition */
4282        }
4283}
4284
4285static void block_unblock_all_queues(bool block)
4286{
4287        int j;
4288        struct sdebug_queue *sqp;
4289
4290        for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
4291                atomic_set(&sqp->blocked, (int)block);
4292}
4293
4294/* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
4295 * commands will be processed normally before triggers occur.
4296 */
4297static void tweak_cmnd_count(void)
4298{
4299        int count, modulo;
4300
4301        modulo = abs(sdebug_every_nth);
4302        if (modulo < 2)
4303                return;
4304        block_unblock_all_queues(true);
4305        count = atomic_read(&sdebug_cmnd_count);
4306        atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
4307        block_unblock_all_queues(false);
4308}
4309
4310static void clear_queue_stats(void)
4311{
4312        atomic_set(&sdebug_cmnd_count, 0);
4313        atomic_set(&sdebug_completions, 0);
4314        atomic_set(&sdebug_miss_cpus, 0);
4315        atomic_set(&sdebug_a_tsf, 0);
4316}
4317
4318static void setup_inject(struct sdebug_queue *sqp,
4319                         struct sdebug_queued_cmd *sqcp)
4320{
4321        if ((atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) > 0) {
4322                if (sdebug_every_nth > 0)
4323                        sqcp->inj_recovered = sqcp->inj_transport
4324                                = sqcp->inj_dif
4325                                = sqcp->inj_dix = sqcp->inj_short
4326                                = sqcp->inj_host_busy = sqcp->inj_cmd_abort = 0;
4327                return;
4328        }
4329        sqcp->inj_recovered = !!(SDEBUG_OPT_RECOVERED_ERR & sdebug_opts);
4330        sqcp->inj_transport = !!(SDEBUG_OPT_TRANSPORT_ERR & sdebug_opts);
4331        sqcp->inj_dif = !!(SDEBUG_OPT_DIF_ERR & sdebug_opts);
4332        sqcp->inj_dix = !!(SDEBUG_OPT_DIX_ERR & sdebug_opts);
4333        sqcp->inj_short = !!(SDEBUG_OPT_SHORT_TRANSFER & sdebug_opts);
4334        sqcp->inj_host_busy = !!(SDEBUG_OPT_HOST_BUSY & sdebug_opts);
4335        sqcp->inj_cmd_abort = !!(SDEBUG_OPT_CMD_ABORT & sdebug_opts);
4336}
4337
4338/* Complete the processing of the thread that queued a SCSI command to this
4339 * driver. It either completes the command by calling cmnd_done() or
4340 * schedules a hr timer or work queue then returns 0. Returns
4341 * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
4342 */
4343static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
4344                         int scsi_result,
4345                         int (*pfp)(struct scsi_cmnd *,
4346                                    struct sdebug_dev_info *),
4347                         int delta_jiff, int ndelay)
4348{
4349        unsigned long iflags;
4350        int k, num_in_q, qdepth, inject;
4351        struct sdebug_queue *sqp;
4352        struct sdebug_queued_cmd *sqcp;
4353        struct scsi_device *sdp;
4354        struct sdebug_defer *sd_dp;
4355
4356        if (unlikely(devip == NULL)) {
4357                if (scsi_result == 0)
4358                        scsi_result = DID_NO_CONNECT << 16;
4359                goto respond_in_thread;
4360        }
4361        sdp = cmnd->device;
4362
4363        if (delta_jiff == 0)
4364                goto respond_in_thread;
4365
4366        /* schedule the response at a later time if resources permit */
4367        sqp = get_queue(cmnd);
4368        spin_lock_irqsave(&sqp->qc_lock, iflags);
4369        if (unlikely(atomic_read(&sqp->blocked))) {
4370                spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4371                return SCSI_MLQUEUE_HOST_BUSY;
4372        }
4373        num_in_q = atomic_read(&devip->num_in_q);
4374        qdepth = cmnd->device->queue_depth;
4375        inject = 0;
4376        if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
4377                if (scsi_result) {
4378                        spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4379                        goto respond_in_thread;
4380                } else
4381                        scsi_result = device_qfull_result;
4382        } else if (unlikely(sdebug_every_nth &&
4383                            (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
4384                            (scsi_result == 0))) {
4385                if ((num_in_q == (qdepth - 1)) &&
4386                    (atomic_inc_return(&sdebug_a_tsf) >=
4387                     abs(sdebug_every_nth))) {
4388                        atomic_set(&sdebug_a_tsf, 0);
4389                        inject = 1;
4390                        scsi_result = device_qfull_result;
4391                }
4392        }
4393
4394        k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
4395        if (unlikely(k >= sdebug_max_queue)) {
4396                spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4397                if (scsi_result)
4398                        goto respond_in_thread;
4399                else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
4400                        scsi_result = device_qfull_result;
4401                if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
4402                        sdev_printk(KERN_INFO, sdp,
4403                                    "%s: max_queue=%d exceeded, %s\n",
4404                                    __func__, sdebug_max_queue,
4405                                    (scsi_result ?  "status: TASK SET FULL" :
4406                                                    "report: host busy"));
4407                if (scsi_result)
4408                        goto respond_in_thread;
4409                else
4410                        return SCSI_MLQUEUE_HOST_BUSY;
4411        }
4412        __set_bit(k, sqp->in_use_bm);
4413        atomic_inc(&devip->num_in_q);
4414        sqcp = &sqp->qc_arr[k];
4415        sqcp->a_cmnd = cmnd;
4416        cmnd->host_scribble = (unsigned char *)sqcp;
4417        sd_dp = sqcp->sd_dp;
4418        spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4419        if (unlikely(sdebug_every_nth && sdebug_any_injecting_opt))
4420                setup_inject(sqp, sqcp);
4421        if (sd_dp == NULL) {
4422                sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
4423                if (sd_dp == NULL)
4424                        return SCSI_MLQUEUE_HOST_BUSY;
4425        }
4426
4427        cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
4428        if (cmnd->result & SDEG_RES_IMMED_MASK) {
4429                /*
4430                 * This is the F_DELAY_OVERR case. No delay.
4431                 */
4432                cmnd->result &= ~SDEG_RES_IMMED_MASK;
4433                delta_jiff = ndelay = 0;
4434        }
4435        if (cmnd->result == 0 && scsi_result != 0)
4436                cmnd->result = scsi_result;
4437
4438        if (unlikely(sdebug_verbose && cmnd->result))
4439                sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
4440                            __func__, cmnd->result);
4441
4442        if (delta_jiff > 0 || ndelay > 0) {
4443                ktime_t kt;
4444
4445                if (delta_jiff > 0) {
4446                        kt = ns_to_ktime((u64)delta_jiff * (NSEC_PER_SEC / HZ));
4447                } else
4448                        kt = ndelay;
4449                if (!sd_dp->init_hrt) {
4450                        sd_dp->init_hrt = true;
4451                        sqcp->sd_dp = sd_dp;
4452                        hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
4453                                     HRTIMER_MODE_REL_PINNED);
4454                        sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
4455                        sd_dp->sqa_idx = sqp - sdebug_q_arr;
4456                        sd_dp->qc_idx = k;
4457                }
4458                if (sdebug_statistics)
4459                        sd_dp->issuing_cpu = raw_smp_processor_id();
4460                sd_dp->defer_t = SDEB_DEFER_HRT;
4461                hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
4462        } else {        /* jdelay < 0, use work queue */
4463                if (!sd_dp->init_wq) {
4464                        sd_dp->init_wq = true;
4465                        sqcp->sd_dp = sd_dp;
4466                        sd_dp->sqa_idx = sqp - sdebug_q_arr;
4467                        sd_dp->qc_idx = k;
4468                        INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
4469                }
4470                if (sdebug_statistics)
4471                        sd_dp->issuing_cpu = raw_smp_processor_id();
4472                sd_dp->defer_t = SDEB_DEFER_WQ;
4473                if (unlikely(sqcp->inj_cmd_abort))
4474                        sd_dp->aborted = true;
4475                schedule_work(&sd_dp->ew.work);
4476                if (unlikely(sqcp->inj_cmd_abort)) {
4477                        sdev_printk(KERN_INFO, sdp, "abort request tag %d\n",
4478                                    cmnd->request->tag);
4479                        blk_abort_request(cmnd->request);
4480                }
4481        }
4482        if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) &&
4483                     (scsi_result == device_qfull_result)))
4484                sdev_printk(KERN_INFO, sdp,
4485                            "%s: num_in_q=%d +1, %s%s\n", __func__,
4486                            num_in_q, (inject ? "<inject> " : ""),
4487                            "status: TASK SET FULL");
4488        return 0;
4489
4490respond_in_thread:      /* call back to mid-layer using invocation thread */
4491        cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
4492        cmnd->result &= ~SDEG_RES_IMMED_MASK;
4493        if (cmnd->result == 0 && scsi_result != 0)
4494                cmnd->result = scsi_result;
4495        cmnd->scsi_done(cmnd);
4496        return 0;
4497}
4498
4499/* Note: The following macros create attribute files in the
4500   /sys/module/scsi_debug/parameters directory. Unfortunately this
4501   driver is unaware of a change and cannot trigger auxiliary actions
4502   as it can when the corresponding attribute in the
4503   /sys/bus/pseudo/drivers/scsi_debug directory is changed.
4504 */
4505module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
4506module_param_named(ato, sdebug_ato, int, S_IRUGO);
4507module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
4508module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
4509module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
4510module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
4511module_param_named(dif, sdebug_dif, int, S_IRUGO);
4512module_param_named(dix, sdebug_dix, int, S_IRUGO);
4513module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
4514module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
4515module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
4516module_param_named(guard, sdebug_guard, uint, S_IRUGO);
4517module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
4518module_param_string(inq_vendor, sdebug_inq_vendor_id,
4519                    sizeof(sdebug_inq_vendor_id), S_IRUGO|S_IWUSR);
4520module_param_string(inq_product, sdebug_inq_product_id,
4521                    sizeof(sdebug_inq_product_id), S_IRUGO|S_IWUSR);
4522module_param_string(inq_rev, sdebug_inq_product_rev,
4523                    sizeof(sdebug_inq_product_rev), S_IRUGO|S_IWUSR);
4524module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
4525module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
4526module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
4527module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
4528module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
4529module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
4530module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
4531module_param_named(medium_error_start, sdebug_medium_error_start, int, S_IRUGO | S_IWUSR);
4532module_param_named(medium_error_count, sdebug_medium_error_count, int, S_IRUGO | S_IWUSR);
4533module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
4534module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
4535module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
4536module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
4537module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
4538module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
4539module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
4540module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
4541module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
4542module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
4543module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
4544module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
4545module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
4546module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
4547module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
4548module_param_named(submit_queues, submit_queues, int, S_IRUGO);
4549module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
4550module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
4551module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
4552module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
4553module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
4554module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
4555module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
4556                   S_IRUGO | S_IWUSR);
4557module_param_named(write_same_length, sdebug_write_same_length, int,
4558                   S_IRUGO | S_IWUSR);
4559
4560MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
4561MODULE_DESCRIPTION("SCSI debug adapter driver");
4562MODULE_LICENSE("GPL");
4563MODULE_VERSION(SDEBUG_VERSION);
4564
4565MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
4566MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
4567MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
4568MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
4569MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
4570MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
4571MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
4572MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
4573MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
4574MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
4575MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
4576MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
4577MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
4578MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
4579MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
4580MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
4581                 SDEBUG_VERSION "\")");
4582MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
4583MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
4584MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
4585MODULE_PARM_DESC(lbprz,
4586        "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
4587MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
4588MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
4589MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
4590MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
4591MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
4592MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
4593MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
4594MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
4595MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
4596MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
4597MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
4598MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
4599MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
4600MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
4601MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
4602MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
4603MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
4604MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
4605MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
4606MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
4607MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
4608MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
4609MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
4610MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
4611MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
4612MODULE_PARM_DESC(uuid_ctl,
4613                 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
4614MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
4615MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
4616MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
4617
4618#define SDEBUG_INFO_LEN 256
4619static char sdebug_info[SDEBUG_INFO_LEN];
4620
4621static const char *scsi_debug_info(struct Scsi_Host *shp)
4622{
4623        int k;
4624
4625        k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
4626                      my_name, SDEBUG_VERSION, sdebug_version_date);
4627        if (k >= (SDEBUG_INFO_LEN - 1))
4628                return sdebug_info;
4629        scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
4630                  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
4631                  sdebug_dev_size_mb, sdebug_opts, submit_queues,
4632                  "statistics", (int)sdebug_statistics);
4633        return sdebug_info;
4634}
4635
4636/* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
4637static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
4638                                 int length)
4639{
4640        char arr[16];
4641        int opts;
4642        int minLen = length > 15 ? 15 : length;
4643
4644        if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
4645                return -EACCES;
4646        memcpy(arr, buffer, minLen);
4647        arr[minLen] = '\0';
4648        if (1 != sscanf(arr, "%d", &opts))
4649                return -EINVAL;
4650        sdebug_opts = opts;
4651        sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
4652        sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
4653        if (sdebug_every_nth != 0)
4654                tweak_cmnd_count();
4655        return length;
4656}
4657
4658/* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
4659 * same for each scsi_debug host (if more than one). Some of the counters
4660 * output are not atomics so might be inaccurate in a busy system. */
4661static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
4662{
4663        int f, j, l;
4664        struct sdebug_queue *sqp;
4665
4666        seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
4667                   SDEBUG_VERSION, sdebug_version_date);
4668        seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
4669                   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
4670                   sdebug_opts, sdebug_every_nth);
4671        seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
4672                   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
4673                   sdebug_sector_size, "bytes");
4674        seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
4675                   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
4676                   num_aborts);
4677        seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
4678                   num_dev_resets, num_target_resets, num_bus_resets,
4679                   num_host_resets);
4680        seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
4681                   dix_reads, dix_writes, dif_errors);
4682        seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
4683                   sdebug_statistics);
4684        seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n",
4685                   atomic_read(&sdebug_cmnd_count),
4686                   atomic_read(&sdebug_completions),
4687                   "miss_cpus", atomic_read(&sdebug_miss_cpus),
4688                   atomic_read(&sdebug_a_tsf));
4689
4690        seq_printf(m, "submit_queues=%d\n", submit_queues);
4691        for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4692                seq_printf(m, "  queue %d:\n", j);
4693                f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
4694                if (f != sdebug_max_queue) {
4695                        l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
4696                        seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
4697                                   "first,last bits", f, l);
4698                }
4699        }
4700        return 0;
4701}
4702
4703static ssize_t delay_show(struct device_driver *ddp, char *buf)
4704{
4705        return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
4706}
4707/* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
4708 * of delay is jiffies.
4709 */
4710static ssize_t delay_store(struct device_driver *ddp, const char *buf,
4711                           size_t count)
4712{
4713        int jdelay, res;
4714
4715        if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
4716                res = count;
4717                if (sdebug_jdelay != jdelay) {
4718                        int j, k;
4719                        struct sdebug_queue *sqp;
4720
4721                        block_unblock_all_queues(true);
4722                        for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4723                             ++j, ++sqp) {
4724                                k = find_first_bit(sqp->in_use_bm,
4725                                                   sdebug_max_queue);
4726                                if (k != sdebug_max_queue) {
4727                                        res = -EBUSY;   /* queued commands */
4728                                        break;
4729                                }
4730                        }
4731                        if (res > 0) {
4732                                sdebug_jdelay = jdelay;
4733                                sdebug_ndelay = 0;
4734                        }
4735                        block_unblock_all_queues(false);
4736                }
4737                return res;
4738        }
4739        return -EINVAL;
4740}
4741static DRIVER_ATTR_RW(delay);
4742
4743static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
4744{
4745        return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
4746}
4747/* Returns -EBUSY if ndelay is being changed and commands are queued */
4748/* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
4749static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
4750                            size_t count)
4751{
4752        int ndelay, res;
4753
4754        if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
4755            (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
4756                res = count;
4757                if (sdebug_ndelay != ndelay) {
4758                        int j, k;
4759                        struct sdebug_queue *sqp;
4760
4761                        block_unblock_all_queues(true);
4762                        for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4763                             ++j, ++sqp) {
4764                                k = find_first_bit(sqp->in_use_bm,
4765                                                   sdebug_max_queue);
4766                                if (k != sdebug_max_queue) {
4767                                        res = -EBUSY;   /* queued commands */
4768                                        break;
4769                                }
4770                        }
4771                        if (res > 0) {
4772                                sdebug_ndelay = ndelay;
4773                                sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
4774                                                        : DEF_JDELAY;
4775                        }
4776                        block_unblock_all_queues(false);
4777                }
4778                return res;
4779        }
4780        return -EINVAL;
4781}
4782static DRIVER_ATTR_RW(ndelay);
4783
4784static ssize_t opts_show(struct device_driver *ddp, char *buf)
4785{
4786        return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
4787}
4788
4789static ssize_t opts_store(struct device_driver *ddp, const char *buf,
4790                          size_t count)
4791{
4792        int opts;
4793        char work[20];
4794
4795        if (sscanf(buf, "%10s", work) == 1) {
4796                if (strncasecmp(work, "0x", 2) == 0) {
4797                        if (kstrtoint(work + 2, 16, &opts) == 0)
4798                                goto opts_done;
4799                } else {
4800                        if (kstrtoint(work, 10, &opts) == 0)
4801                                goto opts_done;
4802                }
4803        }
4804        return -EINVAL;
4805opts_done:
4806        sdebug_opts = opts;
4807        sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
4808        sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
4809        tweak_cmnd_count();
4810        return count;
4811}
4812static DRIVER_ATTR_RW(opts);
4813
4814static ssize_t ptype_show(struct device_driver *ddp, char *buf)
4815{
4816        return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
4817}
4818static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
4819                           size_t count)
4820{
4821        int n;
4822
4823        if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4824                sdebug_ptype = n;
4825                return count;
4826        }
4827        return -EINVAL;
4828}
4829static DRIVER_ATTR_RW(ptype);
4830
4831static ssize_t dsense_show(struct device_driver *ddp, char *buf)
4832{
4833        return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
4834}
4835static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
4836                            size_t count)
4837{
4838        int n;
4839
4840        if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4841                sdebug_dsense = n;
4842                return count;
4843        }
4844        return -EINVAL;
4845}
4846static DRIVER_ATTR_RW(dsense);
4847
4848static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
4849{
4850        return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
4851}
4852static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
4853                             size_t count)
4854{
4855        int n;
4856
4857        if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4858                n = (n > 0);
4859                sdebug_fake_rw = (sdebug_fake_rw > 0);
4860                if (sdebug_fake_rw != n) {
4861                        if ((0 == n) && (NULL == fake_storep)) {
4862                                unsigned long sz =
4863                                        (unsigned long)sdebug_dev_size_mb *
4864                                        1048576;
4865
4866                                fake_storep = vzalloc(sz);
4867                                if (NULL == fake_storep) {
4868                                        pr_err("out of memory, 9\n");
4869                                        return -ENOMEM;
4870                                }
4871                        }
4872                        sdebug_fake_rw = n;
4873                }
4874                return count;
4875        }
4876        return -EINVAL;
4877}
4878static DRIVER_ATTR_RW(fake_rw);
4879
4880static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
4881{
4882        return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
4883}
4884static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
4885                              size_t count)
4886{
4887        int n;
4888
4889        if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4890                sdebug_no_lun_0 = n;
4891                return count;
4892        }
4893        return -EINVAL;
4894}
4895static DRIVER_ATTR_RW(no_lun_0);
4896
4897static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
4898{
4899        return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
4900}
4901static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
4902                              size_t count)
4903{
4904        int n;
4905
4906        if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4907                sdebug_num_tgts = n;
4908                sdebug_max_tgts_luns();
4909                return count;
4910        }
4911        return -EINVAL;
4912}
4913static DRIVER_ATTR_RW(num_tgts);
4914
4915static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
4916{
4917        return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
4918}
4919static DRIVER_ATTR_RO(dev_size_mb);
4920
4921static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
4922{
4923        return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
4924}
4925static DRIVER_ATTR_RO(num_parts);
4926
4927static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
4928{
4929        return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
4930}
4931static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
4932                               size_t count)
4933{
4934        int nth;
4935
4936        if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
4937                sdebug_every_nth = nth;
4938                if (nth && !sdebug_statistics) {
4939                        pr_info("every_nth needs statistics=1, set it\n");
4940                        sdebug_statistics = true;
4941                }
4942                tweak_cmnd_count();
4943                return count;
4944        }
4945        return -EINVAL;
4946}
4947static DRIVER_ATTR_RW(every_nth);
4948
4949static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
4950{
4951        return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
4952}
4953static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
4954                              size_t count)
4955{
4956        int n;
4957        bool changed;
4958
4959        if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4960                if (n > 256) {
4961                        pr_warn("max_luns can be no more than 256\n");
4962                        return -EINVAL;
4963                }
4964                changed = (sdebug_max_luns != n);
4965                sdebug_max_luns = n;
4966                sdebug_max_tgts_luns();
4967                if (changed && (sdebug_scsi_level >= 5)) {      /* >= SPC-3 */
4968                        struct sdebug_host_info *sdhp;
4969                        struct sdebug_dev_info *dp;
4970
4971                        spin_lock(&sdebug_host_list_lock);
4972                        list_for_each_entry(sdhp, &sdebug_host_list,
4973                                            host_list) {
4974                                list_for_each_entry(dp, &sdhp->dev_info_list,
4975                                                    dev_list) {
4976                                        set_bit(SDEBUG_UA_LUNS_CHANGED,
4977                                                dp->uas_bm);
4978                                }
4979                        }
4980                        spin_unlock(&sdebug_host_list_lock);
4981                }
4982                return count;
4983        }
4984        return -EINVAL;
4985}
4986static DRIVER_ATTR_RW(max_luns);
4987
4988static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
4989{
4990        return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
4991}
4992/* N.B. max_queue can be changed while there are queued commands. In flight
4993 * commands beyond the new max_queue will be completed. */
4994static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
4995                               size_t count)
4996{
4997        int j, n, k, a;
4998        struct sdebug_queue *sqp;
4999
5000        if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
5001            (n <= SDEBUG_CANQUEUE)) {
5002                block_unblock_all_queues(true);
5003                k = 0;
5004                for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5005                     ++j, ++sqp) {
5006                        a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
5007                        if (a > k)
5008                                k = a;
5009                }
5010                sdebug_max_queue = n;
5011                if (k == SDEBUG_CANQUEUE)
5012                        atomic_set(&retired_max_queue, 0);
5013                else if (k >= n)
5014                        atomic_set(&retired_max_queue, k + 1);
5015                else
5016                        atomic_set(&retired_max_queue, 0);
5017                block_unblock_all_queues(false);
5018                return count;
5019        }
5020        return -EINVAL;
5021}
5022static DRIVER_ATTR_RW(max_queue);
5023
5024static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
5025{
5026        return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
5027}
5028static DRIVER_ATTR_RO(no_uld);
5029
5030static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
5031{
5032        return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
5033}
5034static DRIVER_ATTR_RO(scsi_level);
5035
5036static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
5037{
5038        return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
5039}
5040static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
5041                                size_t count)
5042{
5043        int n;
5044        bool changed;
5045
5046        if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5047                changed = (sdebug_virtual_gb != n);
5048                sdebug_virtual_gb = n;
5049                sdebug_capacity = get_sdebug_capacity();
5050                if (changed) {
5051                        struct sdebug_host_info *sdhp;
5052                        struct sdebug_dev_info *dp;
5053
5054                        spin_lock(&sdebug_host_list_lock);
5055                        list_for_each_entry(sdhp, &sdebug_host_list,
5056                                            host_list) {
5057                                list_for_each_entry(dp, &sdhp->dev_info_list,
5058                                                    dev_list) {
5059                                        set_bit(SDEBUG_UA_CAPACITY_CHANGED,
5060                                                dp->uas_bm);
5061                                }
5062                        }
5063                        spin_unlock(&sdebug_host_list_lock);
5064                }
5065                return count;
5066        }
5067        return -EINVAL;
5068}
5069static DRIVER_ATTR_RW(virtual_gb);
5070
5071static ssize_t add_host_show(struct device_driver *ddp, char *buf)
5072{
5073        return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_add_host);
5074}
5075
5076static int sdebug_add_adapter(void);
5077static void sdebug_remove_adapter(void);
5078
5079static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
5080                              size_t count)
5081{
5082        int delta_hosts;
5083
5084        if (sscanf(buf, "%d", &delta_hosts) != 1)
5085                return -EINVAL;
5086        if (delta_hosts > 0) {
5087                do {
5088                        sdebug_add_adapter();
5089                } while (--delta_hosts);
5090        } else if (delta_hosts < 0) {
5091                do {
5092                        sdebug_remove_adapter();
5093                } while (++delta_hosts);
5094        }
5095        return count;
5096}
5097static DRIVER_ATTR_RW(add_host);
5098
5099static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
5100{
5101        return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
5102}
5103static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
5104                                    size_t count)
5105{
5106        int n;
5107
5108        if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5109                sdebug_vpd_use_hostno = n;
5110                return count;
5111        }
5112        return -EINVAL;
5113}
5114static DRIVER_ATTR_RW(vpd_use_hostno);
5115
5116static ssize_t statistics_show(struct device_driver *ddp, char *buf)
5117{
5118        return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
5119}
5120static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
5121                                size_t count)
5122{
5123        int n;
5124
5125        if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
5126                if (n > 0)
5127                        sdebug_statistics = true;
5128                else {
5129                        clear_queue_stats();
5130                        sdebug_statistics = false;
5131                }
5132                return count;
5133        }
5134        return -EINVAL;
5135}
5136static DRIVER_ATTR_RW(statistics);
5137
5138static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
5139{
5140        return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
5141}
5142static DRIVER_ATTR_RO(sector_size);
5143
5144static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
5145{
5146        return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
5147}
5148static DRIVER_ATTR_RO(submit_queues);
5149
5150static ssize_t dix_show(struct device_driver *ddp, char *buf)
5151{
5152        return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
5153}
5154static DRIVER_ATTR_RO(dix);
5155
5156static ssize_t dif_show(struct device_driver *ddp, char *buf)
5157{
5158        return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
5159}
5160static DRIVER_ATTR_RO(dif);
5161
5162static ssize_t guard_show(struct device_driver *ddp, char *buf)
5163{
5164        return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
5165}
5166static DRIVER_ATTR_RO(guard);
5167
5168static ssize_t ato_show(struct device_driver *ddp, char *buf)
5169{
5170        return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
5171}
5172static DRIVER_ATTR_RO(ato);
5173
5174static ssize_t map_show(struct device_driver *ddp, char *buf)
5175{
5176        ssize_t count;
5177
5178        if (!scsi_debug_lbp())
5179                return scnprintf(buf, PAGE_SIZE, "0-%u\n",
5180                                 sdebug_store_sectors);
5181
5182        count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
5183                          (int)map_size, map_storep);
5184        buf[count++] = '\n';
5185        buf[count] = '\0';
5186
5187        return count;
5188}
5189static DRIVER_ATTR_RO(map);
5190
5191static ssize_t removable_show(struct device_driver *ddp, char *buf)
5192{
5193        return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
5194}
5195static ssize_t removable_store(struct device_driver *ddp, const char *buf,
5196                               size_t count)
5197{
5198        int n;
5199
5200        if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5201                sdebug_removable = (n > 0);
5202                return count;
5203        }
5204        return -EINVAL;
5205}
5206static DRIVER_ATTR_RW(removable);
5207
5208static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
5209{
5210        return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
5211}
5212/* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
5213static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
5214                               size_t count)
5215{
5216        int n;
5217
5218        if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5219                sdebug_host_lock = (n > 0);
5220                return count;
5221        }
5222        return -EINVAL;
5223}
5224static DRIVER_ATTR_RW(host_lock);
5225
5226static ssize_t strict_show(struct device_driver *ddp, char *buf)
5227{
5228        return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
5229}
5230static ssize_t strict_store(struct device_driver *ddp, const char *buf,
5231                            size_t count)
5232{
5233        int n;
5234
5235        if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5236                sdebug_strict = (n > 0);
5237                return count;
5238        }
5239        return -EINVAL;
5240}
5241static DRIVER_ATTR_RW(strict);
5242
5243static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
5244{
5245        return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
5246}
5247static DRIVER_ATTR_RO(uuid_ctl);
5248
5249static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
5250{
5251        return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
5252}
5253static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
5254                             size_t count)
5255{
5256        int ret, n;
5257
5258        ret = kstrtoint(buf, 0, &n);
5259        if (ret)
5260                return ret;
5261        sdebug_cdb_len = n;
5262        all_config_cdb_len();
5263        return count;
5264}
5265static DRIVER_ATTR_RW(cdb_len);
5266
5267
5268/* Note: The following array creates attribute files in the
5269   /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
5270   files (over those found in the /sys/module/scsi_debug/parameters
5271   directory) is that auxiliary actions can be triggered when an attribute
5272   is changed. For example see: sdebug_add_host_store() above.
5273 */
5274
5275static struct attribute *sdebug_drv_attrs[] = {
5276        &driver_attr_delay.attr,
5277        &driver_attr_opts.attr,
5278        &driver_attr_ptype.attr,
5279        &driver_attr_dsense.attr,
5280        &driver_attr_fake_rw.attr,
5281        &driver_attr_no_lun_0.attr,
5282        &driver_attr_num_tgts.attr,
5283        &driver_attr_dev_size_mb.attr,
5284        &driver_attr_num_parts.attr,
5285        &driver_attr_every_nth.attr,
5286        &driver_attr_max_luns.attr,
5287        &driver_attr_max_queue.attr,
5288        &driver_attr_no_uld.attr,
5289        &driver_attr_scsi_level.attr,
5290        &driver_attr_virtual_gb.attr,
5291        &driver_attr_add_host.attr,
5292        &driver_attr_vpd_use_hostno.attr,
5293        &driver_attr_sector_size.attr,
5294        &driver_attr_statistics.attr,
5295        &driver_attr_submit_queues.attr,
5296        &driver_attr_dix.attr,
5297        &driver_attr_dif.attr,
5298        &driver_attr_guard.attr,
5299        &driver_attr_ato.attr,
5300        &driver_attr_map.attr,
5301        &driver_attr_removable.attr,
5302        &driver_attr_host_lock.attr,
5303        &driver_attr_ndelay.attr,
5304        &driver_attr_strict.attr,
5305        &driver_attr_uuid_ctl.attr,
5306        &driver_attr_cdb_len.attr,
5307        NULL,
5308};
5309ATTRIBUTE_GROUPS(sdebug_drv);
5310
5311static struct device *pseudo_primary;
5312
5313static int __init scsi_debug_init(void)
5314{
5315        unsigned long sz;
5316        int host_to_add;
5317        int k;
5318        int ret;
5319
5320        atomic_set(&retired_max_queue, 0);
5321
5322        if (sdebug_ndelay >= 1000 * 1000 * 1000) {
5323                pr_warn("ndelay must be less than 1 second, ignored\n");
5324                sdebug_ndelay = 0;
5325        } else if (sdebug_ndelay > 0)
5326                sdebug_jdelay = JDELAY_OVERRIDDEN;
5327
5328        switch (sdebug_sector_size) {
5329        case  512:
5330        case 1024:
5331        case 2048:
5332        case 4096:
5333                break;
5334        default:
5335                pr_err("invalid sector_size %d\n", sdebug_sector_size);
5336                return -EINVAL;
5337        }
5338
5339        switch (sdebug_dif) {
5340        case T10_PI_TYPE0_PROTECTION:
5341                break;
5342        case T10_PI_TYPE1_PROTECTION:
5343        case T10_PI_TYPE2_PROTECTION:
5344        case T10_PI_TYPE3_PROTECTION:
5345                have_dif_prot = true;
5346                break;
5347
5348        default:
5349                pr_err("dif must be 0, 1, 2 or 3\n");
5350                return -EINVAL;
5351        }
5352
5353        if (sdebug_guard > 1) {
5354                pr_err("guard must be 0 or 1\n");
5355                return -EINVAL;
5356        }
5357
5358        if (sdebug_ato > 1) {
5359                pr_err("ato must be 0 or 1\n");
5360                return -EINVAL;
5361        }
5362
5363        if (sdebug_physblk_exp > 15) {
5364                pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
5365                return -EINVAL;
5366        }
5367        if (sdebug_max_luns > 256) {
5368                pr_warn("max_luns can be no more than 256, use default\n");
5369                sdebug_max_luns = DEF_MAX_LUNS;
5370        }
5371
5372        if (sdebug_lowest_aligned > 0x3fff) {
5373                pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
5374                return -EINVAL;
5375        }
5376
5377        if (submit_queues < 1) {
5378                pr_err("submit_queues must be 1 or more\n");
5379                return -EINVAL;
5380        }
5381        sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
5382                               GFP_KERNEL);
5383        if (sdebug_q_arr == NULL)
5384                return -ENOMEM;
5385        for (k = 0; k < submit_queues; ++k)
5386                spin_lock_init(&sdebug_q_arr[k].qc_lock);
5387
5388        if (sdebug_dev_size_mb < 1)
5389                sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
5390        sz = (unsigned long)sdebug_dev_size_mb * 1048576;
5391        sdebug_store_sectors = sz / sdebug_sector_size;
5392        sdebug_capacity = get_sdebug_capacity();
5393
5394        /* play around with geometry, don't waste too much on track 0 */
5395        sdebug_heads = 8;
5396        sdebug_sectors_per = 32;
5397        if (sdebug_dev_size_mb >= 256)
5398                sdebug_heads = 64;
5399        else if (sdebug_dev_size_mb >= 16)
5400                sdebug_heads = 32;
5401        sdebug_cylinders_per = (unsigned long)sdebug_capacity /
5402                               (sdebug_sectors_per * sdebug_heads);
5403        if (sdebug_cylinders_per >= 1024) {
5404                /* other LLDs do this; implies >= 1GB ram disk ... */
5405                sdebug_heads = 255;
5406                sdebug_sectors_per = 63;
5407                sdebug_cylinders_per = (unsigned long)sdebug_capacity /
5408                               (sdebug_sectors_per * sdebug_heads);
5409        }
5410
5411        if (sdebug_fake_rw == 0) {
5412                fake_storep = vzalloc(sz);
5413                if (NULL == fake_storep) {
5414                        pr_err("out of memory, 1\n");
5415                        ret = -ENOMEM;
5416                        goto free_q_arr;
5417                }
5418                if (sdebug_num_parts > 0)
5419                        sdebug_build_parts(fake_storep, sz);
5420        }
5421
5422        if (sdebug_dix) {
5423                int dif_size;
5424
5425                dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
5426                dif_storep = vmalloc(dif_size);
5427
5428                pr_err("dif_storep %u bytes @ %p\n", dif_size, dif_storep);
5429
5430                if (dif_storep == NULL) {
5431                        pr_err("out of mem. (DIX)\n");
5432                        ret = -ENOMEM;
5433                        goto free_vm;
5434                }
5435
5436                memset(dif_storep, 0xff, dif_size);
5437        }
5438
5439        /* Logical Block Provisioning */
5440        if (scsi_debug_lbp()) {
5441                sdebug_unmap_max_blocks =
5442                        clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
5443
5444                sdebug_unmap_max_desc =
5445                        clamp(sdebug_unmap_max_desc, 0U, 256U);
5446
5447                sdebug_unmap_granularity =
5448                        clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
5449
5450                if (sdebug_unmap_alignment &&
5451                    sdebug_unmap_granularity <=
5452                    sdebug_unmap_alignment) {
5453                        pr_err("ERR: unmap_granularity <= unmap_alignment\n");
5454                        ret = -EINVAL;
5455                        goto free_vm;
5456                }
5457
5458                map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
5459                map_storep = vmalloc(array_size(sizeof(long),
5460                                                BITS_TO_LONGS(map_size)));
5461
5462                pr_info("%lu provisioning blocks\n", map_size);
5463
5464                if (map_storep == NULL) {
5465                        pr_err("out of mem. (MAP)\n");
5466                        ret = -ENOMEM;
5467                        goto free_vm;
5468                }
5469
5470                bitmap_zero(map_storep, map_size);
5471
5472                /* Map first 1KB for partition table */
5473                if (sdebug_num_parts)
5474                        map_region(0, 2);
5475        }
5476
5477        pseudo_primary = root_device_register("pseudo_0");
5478        if (IS_ERR(pseudo_primary)) {
5479                pr_warn("root_device_register() error\n");
5480                ret = PTR_ERR(pseudo_primary);
5481                goto free_vm;
5482        }
5483        ret = bus_register(&pseudo_lld_bus);
5484        if (ret < 0) {
5485                pr_warn("bus_register error: %d\n", ret);
5486                goto dev_unreg;
5487        }
5488        ret = driver_register(&sdebug_driverfs_driver);
5489        if (ret < 0) {
5490                pr_warn("driver_register error: %d\n", ret);
5491                goto bus_unreg;
5492        }
5493
5494        host_to_add = sdebug_add_host;
5495        sdebug_add_host = 0;
5496
5497        for (k = 0; k < host_to_add; k++) {
5498                if (sdebug_add_adapter()) {
5499                        pr_err("sdebug_add_adapter failed k=%d\n", k);
5500                        break;
5501                }
5502        }
5503
5504        if (sdebug_verbose)
5505                pr_info("built %d host(s)\n", sdebug_add_host);
5506
5507        return 0;
5508
5509bus_unreg:
5510        bus_unregister(&pseudo_lld_bus);
5511dev_unreg:
5512        root_device_unregister(pseudo_primary);
5513free_vm:
5514        vfree(map_storep);
5515        vfree(dif_storep);
5516        vfree(fake_storep);
5517free_q_arr:
5518        kfree(sdebug_q_arr);
5519        return ret;
5520}
5521
5522static void __exit scsi_debug_exit(void)
5523{
5524        int k = sdebug_add_host;
5525
5526        stop_all_queued();
5527        for (; k; k--)
5528                sdebug_remove_adapter();
5529        free_all_queued();
5530        driver_unregister(&sdebug_driverfs_driver);
5531        bus_unregister(&pseudo_lld_bus);
5532        root_device_unregister(pseudo_primary);
5533
5534        vfree(map_storep);
5535        vfree(dif_storep);
5536        vfree(fake_storep);
5537        kfree(sdebug_q_arr);
5538}
5539
5540device_initcall(scsi_debug_init);
5541module_exit(scsi_debug_exit);
5542
5543static void sdebug_release_adapter(struct device *dev)
5544{
5545        struct sdebug_host_info *sdbg_host;
5546
5547        sdbg_host = to_sdebug_host(dev);
5548        kfree(sdbg_host);
5549}
5550
5551static int sdebug_add_adapter(void)
5552{
5553        int k, devs_per_host;
5554        int error = 0;
5555        struct sdebug_host_info *sdbg_host;
5556        struct sdebug_dev_info *sdbg_devinfo, *tmp;
5557
5558        sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
5559        if (sdbg_host == NULL) {
5560                pr_err("out of memory at line %d\n", __LINE__);
5561                return -ENOMEM;
5562        }
5563
5564        INIT_LIST_HEAD(&sdbg_host->dev_info_list);
5565
5566        devs_per_host = sdebug_num_tgts * sdebug_max_luns;
5567        for (k = 0; k < devs_per_host; k++) {
5568                sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
5569                if (!sdbg_devinfo) {
5570                        pr_err("out of memory at line %d\n", __LINE__);
5571                        error = -ENOMEM;
5572                        goto clean;
5573                }
5574        }
5575
5576        spin_lock(&sdebug_host_list_lock);
5577        list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
5578        spin_unlock(&sdebug_host_list_lock);
5579
5580        sdbg_host->dev.bus = &pseudo_lld_bus;
5581        sdbg_host->dev.parent = pseudo_primary;
5582        sdbg_host->dev.release = &sdebug_release_adapter;
5583        dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_add_host);
5584
5585        error = device_register(&sdbg_host->dev);
5586
5587        if (error)
5588                goto clean;
5589
5590        ++sdebug_add_host;
5591        return error;
5592
5593clean:
5594        list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5595                                 dev_list) {
5596                list_del(&sdbg_devinfo->dev_list);
5597                kfree(sdbg_devinfo);
5598        }
5599
5600        kfree(sdbg_host);
5601        return error;
5602}
5603
5604static void sdebug_remove_adapter(void)
5605{
5606        struct sdebug_host_info *sdbg_host = NULL;
5607
5608        spin_lock(&sdebug_host_list_lock);
5609        if (!list_empty(&sdebug_host_list)) {
5610                sdbg_host = list_entry(sdebug_host_list.prev,
5611                                       struct sdebug_host_info, host_list);
5612                list_del(&sdbg_host->host_list);
5613        }
5614        spin_unlock(&sdebug_host_list_lock);
5615
5616        if (!sdbg_host)
5617                return;
5618
5619        device_unregister(&sdbg_host->dev);
5620        --sdebug_add_host;
5621}
5622
5623static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
5624{
5625        int num_in_q = 0;
5626        struct sdebug_dev_info *devip;
5627
5628        block_unblock_all_queues(true);
5629        devip = (struct sdebug_dev_info *)sdev->hostdata;
5630        if (NULL == devip) {
5631                block_unblock_all_queues(false);
5632                return  -ENODEV;
5633        }
5634        num_in_q = atomic_read(&devip->num_in_q);
5635
5636        if (qdepth < 1)
5637                qdepth = 1;
5638        /* allow to exceed max host qc_arr elements for testing */
5639        if (qdepth > SDEBUG_CANQUEUE + 10)
5640                qdepth = SDEBUG_CANQUEUE + 10;
5641        scsi_change_queue_depth(sdev, qdepth);
5642
5643        if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
5644                sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
5645                            __func__, qdepth, num_in_q);
5646        }
5647        block_unblock_all_queues(false);
5648        return sdev->queue_depth;
5649}
5650
5651static bool fake_timeout(struct scsi_cmnd *scp)
5652{
5653        if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
5654                if (sdebug_every_nth < -1)
5655                        sdebug_every_nth = -1;
5656                if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
5657                        return true; /* ignore command causing timeout */
5658                else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
5659                         scsi_medium_access_command(scp))
5660                        return true; /* time out reads and writes */
5661        }
5662        return false;
5663}
5664
5665static bool fake_host_busy(struct scsi_cmnd *scp)
5666{
5667        return (sdebug_opts & SDEBUG_OPT_HOST_BUSY) &&
5668                (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5669}
5670
5671static int scsi_debug_queuecommand(struct Scsi_Host *shost,
5672                                   struct scsi_cmnd *scp)
5673{
5674        u8 sdeb_i;
5675        struct scsi_device *sdp = scp->device;
5676        const struct opcode_info_t *oip;
5677        const struct opcode_info_t *r_oip;
5678        struct sdebug_dev_info *devip;
5679        u8 *cmd = scp->cmnd;
5680        int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
5681        int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
5682        int k, na;
5683        int errsts = 0;
5684        u32 flags;
5685        u16 sa;
5686        u8 opcode = cmd[0];
5687        bool has_wlun_rl;
5688
5689        scsi_set_resid(scp, 0);
5690        if (sdebug_statistics)
5691                atomic_inc(&sdebug_cmnd_count);
5692        if (unlikely(sdebug_verbose &&
5693                     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
5694                char b[120];
5695                int n, len, sb;
5696
5697                len = scp->cmd_len;
5698                sb = (int)sizeof(b);
5699                if (len > 32)
5700                        strcpy(b, "too long, over 32 bytes");
5701                else {
5702                        for (k = 0, n = 0; k < len && n < sb; ++k)
5703                                n += scnprintf(b + n, sb - n, "%02x ",
5704                                               (u32)cmd[k]);
5705                }
5706                sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
5707                            blk_mq_unique_tag(scp->request), b);
5708        }
5709        if (fake_host_busy(scp))
5710                return SCSI_MLQUEUE_HOST_BUSY;
5711        has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
5712        if (unlikely((sdp->lun >= sdebug_max_luns) && !has_wlun_rl))
5713                goto err_out;
5714
5715        sdeb_i = opcode_ind_arr[opcode];        /* fully mapped */
5716        oip = &opcode_info_arr[sdeb_i];         /* safe if table consistent */
5717        devip = (struct sdebug_dev_info *)sdp->hostdata;
5718        if (unlikely(!devip)) {
5719                devip = find_build_dev_info(sdp);
5720                if (NULL == devip)
5721                        goto err_out;
5722        }
5723        na = oip->num_attached;
5724        r_pfp = oip->pfp;
5725        if (na) {       /* multiple commands with this opcode */
5726                r_oip = oip;
5727                if (FF_SA & r_oip->flags) {
5728                        if (F_SA_LOW & oip->flags)
5729                                sa = 0x1f & cmd[1];
5730                        else
5731                                sa = get_unaligned_be16(cmd + 8);
5732                        for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5733                                if (opcode == oip->opcode && sa == oip->sa)
5734                                        break;
5735                        }
5736                } else {   /* since no service action only check opcode */
5737                        for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5738                                if (opcode == oip->opcode)
5739                                        break;
5740                        }
5741                }
5742                if (k > na) {
5743                        if (F_SA_LOW & r_oip->flags)
5744                                mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
5745                        else if (F_SA_HIGH & r_oip->flags)
5746                                mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
5747                        else
5748                                mk_sense_invalid_opcode(scp);
5749                        goto check_cond;
5750                }
5751        }       /* else (when na==0) we assume the oip is a match */
5752        flags = oip->flags;
5753        if (unlikely(F_INV_OP & flags)) {
5754                mk_sense_invalid_opcode(scp);
5755                goto check_cond;
5756        }
5757        if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
5758                if (sdebug_verbose)
5759                        sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
5760                                    my_name, opcode, " supported for wlun");
5761                mk_sense_invalid_opcode(scp);
5762                goto check_cond;
5763        }
5764        if (unlikely(sdebug_strict)) {  /* check cdb against mask */
5765                u8 rem;
5766                int j;
5767
5768                for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
5769                        rem = ~oip->len_mask[k] & cmd[k];
5770                        if (rem) {
5771                                for (j = 7; j >= 0; --j, rem <<= 1) {
5772                                        if (0x80 & rem)
5773                                                break;
5774                                }
5775                                mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
5776                                goto check_cond;
5777                        }
5778                }
5779        }
5780        if (unlikely(!(F_SKIP_UA & flags) &&
5781                     find_first_bit(devip->uas_bm,
5782                                    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
5783                errsts = make_ua(scp, devip);
5784                if (errsts)
5785                        goto check_cond;
5786        }
5787        if (unlikely((F_M_ACCESS & flags) && atomic_read(&devip->stopped))) {
5788                mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
5789                if (sdebug_verbose)
5790                        sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: "
5791                                    "%s\n", my_name, "initializing command "
5792                                    "required");
5793                errsts = check_condition_result;
5794                goto fini;
5795        }
5796        if (sdebug_fake_rw && (F_FAKE_RW & flags))
5797                goto fini;
5798        if (unlikely(sdebug_every_nth)) {
5799                if (fake_timeout(scp))
5800                        return 0;       /* ignore command: make trouble */
5801        }
5802        if (likely(oip->pfp))
5803                pfp = oip->pfp; /* calls a resp_* function */
5804        else
5805                pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
5806
5807fini:
5808        if (F_DELAY_OVERR & flags)
5809                return schedule_resp(scp, devip, errsts, pfp, 0, 0);
5810        else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
5811                                            sdebug_ndelay > 10000)) {
5812                /*
5813                 * Skip long delays if ndelay <= 10 microseconds. Otherwise
5814                 * for Start Stop Unit (SSU) want at least 1 second delay and
5815                 * if sdebug_jdelay>1 want a long delay of that many seconds.
5816                 * For Synchronize Cache want 1/20 of SSU's delay.
5817                 */
5818                int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
5819                int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
5820
5821                jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
5822                return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
5823        } else
5824                return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
5825                                     sdebug_ndelay);
5826check_cond:
5827        return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
5828err_out:
5829        return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
5830}
5831
5832static struct scsi_host_template sdebug_driver_template = {
5833        .show_info =            scsi_debug_show_info,
5834        .write_info =           scsi_debug_write_info,
5835        .proc_name =            sdebug_proc_name,
5836        .name =                 "SCSI DEBUG",
5837        .info =                 scsi_debug_info,
5838        .slave_alloc =          scsi_debug_slave_alloc,
5839        .slave_configure =      scsi_debug_slave_configure,
5840        .slave_destroy =        scsi_debug_slave_destroy,
5841        .ioctl =                scsi_debug_ioctl,
5842        .queuecommand =         scsi_debug_queuecommand,
5843        .change_queue_depth =   sdebug_change_qdepth,
5844        .eh_abort_handler =     scsi_debug_abort,
5845        .eh_device_reset_handler = scsi_debug_device_reset,
5846        .eh_target_reset_handler = scsi_debug_target_reset,
5847        .eh_bus_reset_handler = scsi_debug_bus_reset,
5848        .eh_host_reset_handler = scsi_debug_host_reset,
5849        .can_queue =            SDEBUG_CANQUEUE,
5850        .this_id =              7,
5851        .sg_tablesize =         SG_MAX_SEGMENTS,
5852        .cmd_per_lun =          DEF_CMD_PER_LUN,
5853        .max_sectors =          -1U,
5854        .use_clustering =       DISABLE_CLUSTERING,
5855        .module =               THIS_MODULE,
5856        .track_queue_depth =    1,
5857};
5858
5859static int sdebug_driver_probe(struct device *dev)
5860{
5861        int error = 0;
5862        struct sdebug_host_info *sdbg_host;
5863        struct Scsi_Host *hpnt;
5864        int hprot;
5865
5866        sdbg_host = to_sdebug_host(dev);
5867
5868        sdebug_driver_template.can_queue = sdebug_max_queue;
5869        if (sdebug_clustering)
5870                sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;
5871        hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
5872        if (NULL == hpnt) {
5873                pr_err("scsi_host_alloc failed\n");
5874                error = -ENODEV;
5875                return error;
5876        }
5877        if (submit_queues > nr_cpu_ids) {
5878                pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
5879                        my_name, submit_queues, nr_cpu_ids);
5880                submit_queues = nr_cpu_ids;
5881        }
5882        /* Decide whether to tell scsi subsystem that we want mq */
5883        /* Following should give the same answer for each host */
5884        if (shost_use_blk_mq(hpnt))
5885                hpnt->nr_hw_queues = submit_queues;
5886
5887        sdbg_host->shost = hpnt;
5888        *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
5889        if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
5890                hpnt->max_id = sdebug_num_tgts + 1;
5891        else
5892                hpnt->max_id = sdebug_num_tgts;
5893        /* = sdebug_max_luns; */
5894        hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
5895
5896        hprot = 0;
5897
5898        switch (sdebug_dif) {
5899
5900        case T10_PI_TYPE1_PROTECTION:
5901                hprot = SHOST_DIF_TYPE1_PROTECTION;
5902                if (sdebug_dix)
5903                        hprot |= SHOST_DIX_TYPE1_PROTECTION;
5904                break;
5905
5906        case T10_PI_TYPE2_PROTECTION:
5907                hprot = SHOST_DIF_TYPE2_PROTECTION;
5908                if (sdebug_dix)
5909                        hprot |= SHOST_DIX_TYPE2_PROTECTION;
5910                break;
5911
5912        case T10_PI_TYPE3_PROTECTION:
5913                hprot = SHOST_DIF_TYPE3_PROTECTION;
5914                if (sdebug_dix)
5915                        hprot |= SHOST_DIX_TYPE3_PROTECTION;
5916                break;
5917
5918        default:
5919                if (sdebug_dix)
5920                        hprot |= SHOST_DIX_TYPE0_PROTECTION;
5921                break;
5922        }
5923
5924        scsi_host_set_prot(hpnt, hprot);
5925
5926        if (have_dif_prot || sdebug_dix)
5927                pr_info("host protection%s%s%s%s%s%s%s\n",
5928                        (hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
5929                        (hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
5930                        (hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
5931                        (hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
5932                        (hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
5933                        (hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
5934                        (hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
5935
5936        if (sdebug_guard == 1)
5937                scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
5938        else
5939                scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
5940
5941        sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
5942        sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
5943        if (sdebug_every_nth)   /* need stats counters for every_nth */
5944                sdebug_statistics = true;
5945        error = scsi_add_host(hpnt, &sdbg_host->dev);
5946        if (error) {
5947                pr_err("scsi_add_host failed\n");
5948                error = -ENODEV;
5949                scsi_host_put(hpnt);
5950        } else
5951                scsi_scan_host(hpnt);
5952
5953        return error;
5954}
5955
5956static int sdebug_driver_remove(struct device *dev)
5957{
5958        struct sdebug_host_info *sdbg_host;
5959        struct sdebug_dev_info *sdbg_devinfo, *tmp;
5960
5961        sdbg_host = to_sdebug_host(dev);
5962
5963        if (!sdbg_host) {
5964                pr_err("Unable to locate host info\n");
5965                return -ENODEV;
5966        }
5967
5968        scsi_remove_host(sdbg_host->shost);
5969
5970        list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5971                                 dev_list) {
5972                list_del(&sdbg_devinfo->dev_list);
5973                kfree(sdbg_devinfo);
5974        }
5975
5976        scsi_host_put(sdbg_host->shost);
5977        return 0;
5978}
5979
5980static int pseudo_lld_bus_match(struct device *dev,
5981                                struct device_driver *dev_driver)
5982{
5983        return 1;
5984}
5985
5986static struct bus_type pseudo_lld_bus = {
5987        .name = "pseudo",
5988        .match = pseudo_lld_bus_match,
5989        .probe = sdebug_driver_probe,
5990        .remove = sdebug_driver_remove,
5991        .drv_groups = sdebug_drv_groups,
5992};
5993