1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/module.h>
29
30#include <linux/kernel.h>
31#include <linux/errno.h>
32#include <linux/timer.h>
33#include <linux/slab.h>
34#include <linux/types.h>
35#include <linux/string.h>
36#include <linux/genhd.h>
37#include <linux/fs.h>
38#include <linux/init.h>
39#include <linux/proc_fs.h>
40#include <linux/vmalloc.h>
41#include <linux/moduleparam.h>
42#include <linux/scatterlist.h>
43#include <linux/blkdev.h>
44#include <linux/crc-t10dif.h>
45#include <linux/spinlock.h>
46#include <linux/interrupt.h>
47#include <linux/atomic.h>
48#include <linux/hrtimer.h>
49
50#include <net/checksum.h>
51
52#include <asm/unaligned.h>
53
54#include <scsi/scsi.h>
55#include <scsi/scsi_cmnd.h>
56#include <scsi/scsi_device.h>
57#include <scsi/scsi_host.h>
58#include <scsi/scsicam.h>
59#include <scsi/scsi_eh.h>
60#include <scsi/scsi_tcq.h>
61#include <scsi/scsi_dbg.h>
62
63#include "sd.h"
64#include "scsi_logging.h"
65
66#define SCSI_DEBUG_VERSION "1.84"
67static const char *scsi_debug_version_date = "20140706";
68
69#define MY_NAME "scsi_debug"
70
71
72#define NO_ADDITIONAL_SENSE 0x0
73#define LOGICAL_UNIT_NOT_READY 0x4
74#define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
75#define UNRECOVERED_READ_ERR 0x11
76#define PARAMETER_LIST_LENGTH_ERR 0x1a
77#define INVALID_OPCODE 0x20
78#define ADDR_OUT_OF_RANGE 0x21
79#define INVALID_COMMAND_OPCODE 0x20
80#define INVALID_FIELD_IN_CDB 0x24
81#define INVALID_FIELD_IN_PARAM_LIST 0x26
82#define UA_RESET_ASC 0x29
83#define UA_CHANGED_ASC 0x2a
84#define POWER_ON_RESET_ASCQ 0x0
85#define BUS_RESET_ASCQ 0x2
86#define MODE_CHANGED_ASCQ 0x1
87#define SAVING_PARAMS_UNSUP 0x39
88#define TRANSPORT_PROBLEM 0x4b
89#define THRESHOLD_EXCEEDED 0x5d
90#define LOW_POWER_COND_ON 0x5e
91
92
93#define ACK_NAK_TO 0x3
94
95
96
97#define DEF_NUM_HOST 1
98#define DEF_NUM_TGTS 1
99#define DEF_MAX_LUNS 1
100
101
102
103#define DEF_ATO 1
104#define DEF_DELAY 1
105#define DEF_DEV_SIZE_MB 8
106#define DEF_DIF 0
107#define DEF_DIX 0
108#define DEF_D_SENSE 0
109#define DEF_EVERY_NTH 0
110#define DEF_FAKE_RW 0
111#define DEF_GUARD 0
112#define DEF_HOST_LOCK 0
113#define DEF_LBPU 0
114#define DEF_LBPWS 0
115#define DEF_LBPWS10 0
116#define DEF_LBPRZ 1
117#define DEF_LOWEST_ALIGNED 0
118#define DEF_NDELAY 0
119#define DEF_NO_LUN_0 0
120#define DEF_NUM_PARTS 0
121#define DEF_OPTS 0
122#define DEF_OPT_BLKS 64
123#define DEF_PHYSBLK_EXP 0
124#define DEF_PTYPE 0
125#define DEF_REMOVABLE false
126#define DEF_SCSI_LEVEL 5
127#define DEF_SECTOR_SIZE 512
128#define DEF_TAGGED_QUEUING 0
129#define DEF_UNMAP_ALIGNMENT 0
130#define DEF_UNMAP_GRANULARITY 1
131#define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
132#define DEF_UNMAP_MAX_DESC 256
133#define DEF_VIRTUAL_GB 0
134#define DEF_VPD_USE_HOSTNO 1
135#define DEF_WRITESAME_LENGTH 0xFFFF
136#define DELAY_OVERRIDDEN -9999
137
138
139#define SCSI_DEBUG_OPT_NOISE 1
140#define SCSI_DEBUG_OPT_MEDIUM_ERR 2
141#define SCSI_DEBUG_OPT_TIMEOUT 4
142#define SCSI_DEBUG_OPT_RECOVERED_ERR 8
143#define SCSI_DEBUG_OPT_TRANSPORT_ERR 16
144#define SCSI_DEBUG_OPT_DIF_ERR 32
145#define SCSI_DEBUG_OPT_DIX_ERR 64
146#define SCSI_DEBUG_OPT_MAC_TIMEOUT 128
147#define SCSI_DEBUG_OPT_SHORT_TRANSFER 0x100
148#define SCSI_DEBUG_OPT_Q_NOISE 0x200
149#define SCSI_DEBUG_OPT_ALL_TSF 0x400
150#define SCSI_DEBUG_OPT_RARE_TSF 0x800
151#define SCSI_DEBUG_OPT_N_WCE 0x1000
152#define SCSI_DEBUG_OPT_RESET_NOISE 0x2000
153#define SCSI_DEBUG_OPT_NO_CDB_NOISE 0x4000
154#define SCSI_DEBUG_OPT_ALL_NOISE (0x1 | 0x200 | 0x2000)
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176#define SDEBUG_UA_POR 0
177#define SDEBUG_UA_BUS_RESET 1
178#define SDEBUG_UA_MODE_CHANGED 2
179#define SDEBUG_NUM_UAS 3
180
181
182#define UAS_ONLY 1
183#define UAS_TUR 0
184
185
186
187#define OPT_MEDIUM_ERR_ADDR 0x1234
188#define OPT_MEDIUM_ERR_NUM 10
189
190
191
192#define SAM2_LUN_ADDRESS_METHOD 0
193#define SAM2_WLUN_REPORT_LUNS 0xc101
194
195
196
197
198
199
200
201#define SCSI_DEBUG_CANQUEUE_WORDS 9
202#define SCSI_DEBUG_CANQUEUE (SCSI_DEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
203#define DEF_CMD_PER_LUN 255
204
205#if DEF_CMD_PER_LUN > SCSI_DEBUG_CANQUEUE
206#warning "Expect DEF_CMD_PER_LUN <= SCSI_DEBUG_CANQUEUE"
207#endif
208
209static int scsi_debug_add_host = DEF_NUM_HOST;
210static int scsi_debug_ato = DEF_ATO;
211static int scsi_debug_delay = DEF_DELAY;
212static int scsi_debug_dev_size_mb = DEF_DEV_SIZE_MB;
213static int scsi_debug_dif = DEF_DIF;
214static int scsi_debug_dix = DEF_DIX;
215static int scsi_debug_dsense = DEF_D_SENSE;
216static int scsi_debug_every_nth = DEF_EVERY_NTH;
217static int scsi_debug_fake_rw = DEF_FAKE_RW;
218static unsigned int scsi_debug_guard = DEF_GUARD;
219static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
220static int scsi_debug_max_luns = DEF_MAX_LUNS;
221static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE;
222static atomic_t retired_max_queue;
223static int scsi_debug_ndelay = DEF_NDELAY;
224static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
225static int scsi_debug_no_uld = 0;
226static int scsi_debug_num_parts = DEF_NUM_PARTS;
227static int scsi_debug_num_tgts = DEF_NUM_TGTS;
228static int scsi_debug_opt_blks = DEF_OPT_BLKS;
229static int scsi_debug_opts = DEF_OPTS;
230static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
231static int scsi_debug_ptype = DEF_PTYPE;
232static int scsi_debug_scsi_level = DEF_SCSI_LEVEL;
233static int scsi_debug_sector_size = DEF_SECTOR_SIZE;
234static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB;
235static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
236static unsigned int scsi_debug_lbpu = DEF_LBPU;
237static unsigned int scsi_debug_lbpws = DEF_LBPWS;
238static unsigned int scsi_debug_lbpws10 = DEF_LBPWS10;
239static unsigned int scsi_debug_lbprz = DEF_LBPRZ;
240static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
241static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
242static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
243static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
244static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH;
245static bool scsi_debug_removable = DEF_REMOVABLE;
246static bool scsi_debug_clustering;
247static bool scsi_debug_host_lock = DEF_HOST_LOCK;
248
249static atomic_t sdebug_cmnd_count;
250static atomic_t sdebug_completions;
251static atomic_t sdebug_a_tsf;
252
253#define DEV_READONLY(TGT) (0)
254
255static unsigned int sdebug_store_sectors;
256static sector_t sdebug_capacity;
257
258
259
260static int sdebug_heads;
261static int sdebug_cylinders_per;
262static int sdebug_sectors_per;
263
264#define SDEBUG_MAX_PARTS 4
265
266#define SCSI_DEBUG_MAX_CMD_LEN 32
267
268static unsigned int scsi_debug_lbp(void)
269{
270 return ((0 == scsi_debug_fake_rw) &&
271 (scsi_debug_lbpu | scsi_debug_lbpws | scsi_debug_lbpws10));
272}
273
274struct sdebug_dev_info {
275 struct list_head dev_list;
276 unsigned int channel;
277 unsigned int target;
278 u64 lun;
279 struct sdebug_host_info *sdbg_host;
280 u64 wlun;
281 unsigned long uas_bm[1];
282 atomic_t num_in_q;
283 char stopped;
284 char used;
285};
286
287struct sdebug_host_info {
288 struct list_head host_list;
289 struct Scsi_Host *shost;
290 struct device dev;
291 struct list_head dev_info_list;
292};
293
294#define to_sdebug_host(d) \
295 container_of(d, struct sdebug_host_info, dev)
296
297static LIST_HEAD(sdebug_host_list);
298static DEFINE_SPINLOCK(sdebug_host_list_lock);
299
300
301struct sdebug_hrtimer {
302 struct hrtimer hrt;
303 int qa_indx;
304};
305
306struct sdebug_queued_cmd {
307
308 struct timer_list *cmnd_timerp;
309 struct tasklet_struct *tletp;
310 struct sdebug_hrtimer *sd_hrtp;
311 struct scsi_cmnd * a_cmnd;
312};
313static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE];
314static unsigned long queued_in_use_bm[SCSI_DEBUG_CANQUEUE_WORDS];
315
316
317static unsigned char * fake_storep;
318static struct sd_dif_tuple *dif_storep;
319static void *map_storep;
320
321static unsigned long map_size;
322static int num_aborts;
323static int num_dev_resets;
324static int num_target_resets;
325static int num_bus_resets;
326static int num_host_resets;
327static int dix_writes;
328static int dix_reads;
329static int dif_errors;
330
331static DEFINE_SPINLOCK(queued_arr_lock);
332static DEFINE_RWLOCK(atomic_rw);
333
334static char sdebug_proc_name[] = MY_NAME;
335static const char *my_name = MY_NAME;
336
337static struct bus_type pseudo_lld_bus;
338
339static struct device_driver sdebug_driverfs_driver = {
340 .name = sdebug_proc_name,
341 .bus = &pseudo_lld_bus,
342};
343
344static const int check_condition_result =
345 (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
346
347static const int illegal_condition_result =
348 (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
349
350static const int device_qfull_result =
351 (DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
352
353static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
354 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
355 0, 0, 0, 0};
356static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
357 0, 0, 0x2, 0x4b};
358static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
359 0, 0, 0x0, 0x0};
360
361static void *fake_store(unsigned long long lba)
362{
363 lba = do_div(lba, sdebug_store_sectors);
364
365 return fake_storep + lba * scsi_debug_sector_size;
366}
367
368static struct sd_dif_tuple *dif_store(sector_t sector)
369{
370 sector = do_div(sector, sdebug_store_sectors);
371
372 return dif_storep + sector;
373}
374
375static int sdebug_add_adapter(void);
376static void sdebug_remove_adapter(void);
377
378static void sdebug_max_tgts_luns(void)
379{
380 struct sdebug_host_info *sdbg_host;
381 struct Scsi_Host *hpnt;
382
383 spin_lock(&sdebug_host_list_lock);
384 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
385 hpnt = sdbg_host->shost;
386 if ((hpnt->this_id >= 0) &&
387 (scsi_debug_num_tgts > hpnt->this_id))
388 hpnt->max_id = scsi_debug_num_tgts + 1;
389 else
390 hpnt->max_id = scsi_debug_num_tgts;
391
392 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS;
393 }
394 spin_unlock(&sdebug_host_list_lock);
395}
396
397static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
398{
399 unsigned char *sbuff;
400
401 sbuff = scp->sense_buffer;
402 if (!sbuff) {
403 sdev_printk(KERN_ERR, scp->device,
404 "%s: sense_buffer is NULL\n", __func__);
405 return;
406 }
407 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
408
409 scsi_build_sense_buffer(scsi_debug_dsense, sbuff, key, asc, asq);
410
411 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
412 sdev_printk(KERN_INFO, scp->device,
413 "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
414 my_name, key, asc, asq);
415}
416
417static void get_data_transfer_info(unsigned char *cmd,
418 unsigned long long *lba, unsigned int *num,
419 u32 *ei_lba)
420{
421 *ei_lba = 0;
422
423 switch (*cmd) {
424 case VARIABLE_LENGTH_CMD:
425 *lba = (u64)cmd[19] | (u64)cmd[18] << 8 |
426 (u64)cmd[17] << 16 | (u64)cmd[16] << 24 |
427 (u64)cmd[15] << 32 | (u64)cmd[14] << 40 |
428 (u64)cmd[13] << 48 | (u64)cmd[12] << 56;
429
430 *ei_lba = (u32)cmd[23] | (u32)cmd[22] << 8 |
431 (u32)cmd[21] << 16 | (u32)cmd[20] << 24;
432
433 *num = (u32)cmd[31] | (u32)cmd[30] << 8 | (u32)cmd[29] << 16 |
434 (u32)cmd[28] << 24;
435 break;
436
437 case WRITE_SAME_16:
438 case WRITE_16:
439 case READ_16:
440 *lba = (u64)cmd[9] | (u64)cmd[8] << 8 |
441 (u64)cmd[7] << 16 | (u64)cmd[6] << 24 |
442 (u64)cmd[5] << 32 | (u64)cmd[4] << 40 |
443 (u64)cmd[3] << 48 | (u64)cmd[2] << 56;
444
445 *num = (u32)cmd[13] | (u32)cmd[12] << 8 | (u32)cmd[11] << 16 |
446 (u32)cmd[10] << 24;
447 break;
448 case WRITE_12:
449 case READ_12:
450 *lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 |
451 (u32)cmd[2] << 24;
452
453 *num = (u32)cmd[9] | (u32)cmd[8] << 8 | (u32)cmd[7] << 16 |
454 (u32)cmd[6] << 24;
455 break;
456 case WRITE_SAME:
457 case WRITE_10:
458 case READ_10:
459 case XDWRITEREAD_10:
460 *lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 |
461 (u32)cmd[2] << 24;
462
463 *num = (u32)cmd[8] | (u32)cmd[7] << 8;
464 break;
465 case WRITE_6:
466 case READ_6:
467 *lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
468 (u32)(cmd[1] & 0x1f) << 16;
469 *num = (0 == cmd[4]) ? 256 : cmd[4];
470 break;
471 default:
472 break;
473 }
474}
475
476static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
477{
478 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
479 if (0x1261 == cmd)
480 sdev_printk(KERN_INFO, dev,
481 "%s: BLKFLSBUF [0x1261]\n", __func__);
482 else if (0x5331 == cmd)
483 sdev_printk(KERN_INFO, dev,
484 "%s: CDROM_GET_CAPABILITY [0x5331]\n",
485 __func__);
486 else
487 sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
488 __func__, cmd);
489 }
490 return -EINVAL;
491
492}
493
494static int check_readiness(struct scsi_cmnd *SCpnt, int uas_only,
495 struct sdebug_dev_info * devip)
496{
497 int k;
498 bool debug = !!(SCSI_DEBUG_OPT_NOISE & scsi_debug_opts);
499
500 k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
501 if (k != SDEBUG_NUM_UAS) {
502 const char *cp = NULL;
503
504 switch (k) {
505 case SDEBUG_UA_POR:
506 mk_sense_buffer(SCpnt, UNIT_ATTENTION,
507 UA_RESET_ASC, POWER_ON_RESET_ASCQ);
508 if (debug)
509 cp = "power on reset";
510 break;
511 case SDEBUG_UA_BUS_RESET:
512 mk_sense_buffer(SCpnt, UNIT_ATTENTION,
513 UA_RESET_ASC, BUS_RESET_ASCQ);
514 if (debug)
515 cp = "bus reset";
516 break;
517 case SDEBUG_UA_MODE_CHANGED:
518 mk_sense_buffer(SCpnt, UNIT_ATTENTION,
519 UA_CHANGED_ASC, MODE_CHANGED_ASCQ);
520 if (debug)
521 cp = "mode parameters changed";
522 break;
523 default:
524 pr_warn("%s: unexpected unit attention code=%d\n",
525 __func__, k);
526 if (debug)
527 cp = "unknown";
528 break;
529 }
530 clear_bit(k, devip->uas_bm);
531 if (debug)
532 sdev_printk(KERN_INFO, SCpnt->device,
533 "%s reports: Unit attention: %s\n",
534 my_name, cp);
535 return check_condition_result;
536 }
537 if ((UAS_TUR == uas_only) && devip->stopped) {
538 mk_sense_buffer(SCpnt, NOT_READY, LOGICAL_UNIT_NOT_READY,
539 0x2);
540 if (debug)
541 sdev_printk(KERN_INFO, SCpnt->device,
542 "%s reports: Not ready: %s\n", my_name,
543 "initializing command required");
544 return check_condition_result;
545 }
546 return 0;
547}
548
549
550static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
551 int arr_len)
552{
553 int act_len;
554 struct scsi_data_buffer *sdb = scsi_in(scp);
555
556 if (!sdb->length)
557 return 0;
558 if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
559 return (DID_ERROR << 16);
560
561 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
562 arr, arr_len);
563 sdb->resid = scsi_bufflen(scp) - act_len;
564
565 return 0;
566}
567
568
569static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
570 int arr_len)
571{
572 if (!scsi_bufflen(scp))
573 return 0;
574 if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
575 return -1;
576
577 return scsi_sg_copy_to_buffer(scp, arr, arr_len);
578}
579
580
581static const char * inq_vendor_id = "Linux ";
582static const char * inq_product_id = "scsi_debug ";
583static const char *inq_product_rev = "0184";
584
585
586static int inquiry_evpd_83(unsigned char * arr, int port_group_id,
587 int target_dev_id, int dev_id_num,
588 const char * dev_id_str,
589 int dev_id_str_len)
590{
591 int num, port_a;
592 char b[32];
593
594 port_a = target_dev_id + 1;
595
596 arr[0] = 0x2;
597 arr[1] = 0x1;
598 arr[2] = 0x0;
599 memcpy(&arr[4], inq_vendor_id, 8);
600 memcpy(&arr[12], inq_product_id, 16);
601 memcpy(&arr[28], dev_id_str, dev_id_str_len);
602 num = 8 + 16 + dev_id_str_len;
603 arr[3] = num;
604 num += 4;
605 if (dev_id_num >= 0) {
606
607 arr[num++] = 0x1;
608 arr[num++] = 0x3;
609 arr[num++] = 0x0;
610 arr[num++] = 0x8;
611 arr[num++] = 0x53;
612 arr[num++] = 0x33;
613 arr[num++] = 0x33;
614 arr[num++] = 0x30;
615 arr[num++] = (dev_id_num >> 24);
616 arr[num++] = (dev_id_num >> 16) & 0xff;
617 arr[num++] = (dev_id_num >> 8) & 0xff;
618 arr[num++] = dev_id_num & 0xff;
619
620 arr[num++] = 0x61;
621 arr[num++] = 0x94;
622 arr[num++] = 0x0;
623 arr[num++] = 0x4;
624 arr[num++] = 0x0;
625 arr[num++] = 0x0;
626 arr[num++] = 0x0;
627 arr[num++] = 0x1;
628 }
629
630 arr[num++] = 0x61;
631 arr[num++] = 0x93;
632 arr[num++] = 0x0;
633 arr[num++] = 0x8;
634 arr[num++] = 0x52;
635 arr[num++] = 0x22;
636 arr[num++] = 0x22;
637 arr[num++] = 0x20;
638 arr[num++] = (port_a >> 24);
639 arr[num++] = (port_a >> 16) & 0xff;
640 arr[num++] = (port_a >> 8) & 0xff;
641 arr[num++] = port_a & 0xff;
642
643 arr[num++] = 0x61;
644 arr[num++] = 0x95;
645 arr[num++] = 0x0;
646 arr[num++] = 0x4;
647 arr[num++] = 0;
648 arr[num++] = 0;
649 arr[num++] = (port_group_id >> 8) & 0xff;
650 arr[num++] = port_group_id & 0xff;
651
652 arr[num++] = 0x61;
653 arr[num++] = 0xa3;
654 arr[num++] = 0x0;
655 arr[num++] = 0x8;
656 arr[num++] = 0x52;
657 arr[num++] = 0x22;
658 arr[num++] = 0x22;
659 arr[num++] = 0x20;
660 arr[num++] = (target_dev_id >> 24);
661 arr[num++] = (target_dev_id >> 16) & 0xff;
662 arr[num++] = (target_dev_id >> 8) & 0xff;
663 arr[num++] = target_dev_id & 0xff;
664
665 arr[num++] = 0x63;
666 arr[num++] = 0xa8;
667 arr[num++] = 0x0;
668 arr[num++] = 24;
669 memcpy(arr + num, "naa.52222220", 12);
670 num += 12;
671 snprintf(b, sizeof(b), "%08X", target_dev_id);
672 memcpy(arr + num, b, 8);
673 num += 8;
674 memset(arr + num, 0, 4);
675 num += 4;
676 return num;
677}
678
679
680static unsigned char vpd84_data[] = {
681 0x22,0x22,0x22,0x0,0xbb,0x0,
682 0x22,0x22,0x22,0x0,0xbb,0x1,
683 0x22,0x22,0x22,0x0,0xbb,0x2,
684};
685
686
687static int inquiry_evpd_84(unsigned char * arr)
688{
689 memcpy(arr, vpd84_data, sizeof(vpd84_data));
690 return sizeof(vpd84_data);
691}
692
693
694static int inquiry_evpd_85(unsigned char * arr)
695{
696 int num = 0;
697 const char * na1 = "https://www.kernel.org/config";
698 const char * na2 = "http://www.kernel.org/log";
699 int plen, olen;
700
701 arr[num++] = 0x1;
702 arr[num++] = 0x0;
703 arr[num++] = 0x0;
704 olen = strlen(na1);
705 plen = olen + 1;
706 if (plen % 4)
707 plen = ((plen / 4) + 1) * 4;
708 arr[num++] = plen;
709 memcpy(arr + num, na1, olen);
710 memset(arr + num + olen, 0, plen - olen);
711 num += plen;
712
713 arr[num++] = 0x4;
714 arr[num++] = 0x0;
715 arr[num++] = 0x0;
716 olen = strlen(na2);
717 plen = olen + 1;
718 if (plen % 4)
719 plen = ((plen / 4) + 1) * 4;
720 arr[num++] = plen;
721 memcpy(arr + num, na2, olen);
722 memset(arr + num + olen, 0, plen - olen);
723 num += plen;
724
725 return num;
726}
727
728
729static int inquiry_evpd_88(unsigned char * arr, int target_dev_id)
730{
731 int num = 0;
732 int port_a, port_b;
733
734 port_a = target_dev_id + 1;
735 port_b = port_a + 1;
736 arr[num++] = 0x0;
737 arr[num++] = 0x0;
738 arr[num++] = 0x0;
739 arr[num++] = 0x1;
740 memset(arr + num, 0, 6);
741 num += 6;
742 arr[num++] = 0x0;
743 arr[num++] = 12;
744
745 arr[num++] = 0x61;
746 arr[num++] = 0x93;
747 arr[num++] = 0x0;
748 arr[num++] = 0x8;
749 arr[num++] = 0x52;
750 arr[num++] = 0x22;
751 arr[num++] = 0x22;
752 arr[num++] = 0x20;
753 arr[num++] = (port_a >> 24);
754 arr[num++] = (port_a >> 16) & 0xff;
755 arr[num++] = (port_a >> 8) & 0xff;
756 arr[num++] = port_a & 0xff;
757
758 arr[num++] = 0x0;
759 arr[num++] = 0x0;
760 arr[num++] = 0x0;
761 arr[num++] = 0x2;
762 memset(arr + num, 0, 6);
763 num += 6;
764 arr[num++] = 0x0;
765 arr[num++] = 12;
766
767 arr[num++] = 0x61;
768 arr[num++] = 0x93;
769 arr[num++] = 0x0;
770 arr[num++] = 0x8;
771 arr[num++] = 0x52;
772 arr[num++] = 0x22;
773 arr[num++] = 0x22;
774 arr[num++] = 0x20;
775 arr[num++] = (port_b >> 24);
776 arr[num++] = (port_b >> 16) & 0xff;
777 arr[num++] = (port_b >> 8) & 0xff;
778 arr[num++] = port_b & 0xff;
779
780 return num;
781}
782
783
784static unsigned char vpd89_data[] = {
785 0,0,0,0,
786'l','i','n','u','x',' ',' ',' ',
787'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
788'1','2','3','4',
7890x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
7900xec,0,0,0,
7910x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
7920,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
7930x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
7940x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
7950x53,0x41,
7960x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
7970x20,0x20,
7980x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
7990x10,0x80,
8000,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
8010x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
8020x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
8030,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
8040x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
8050x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
8060,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
8070,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
8080,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
8090,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
8100x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
8110,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
8120xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
8130,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
8140,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
8150,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
8160,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
8170,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
8180,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
8190,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
8200,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
8210,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
8220,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
8230,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
8240,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
8250,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
826};
827
828
829static int inquiry_evpd_89(unsigned char * arr)
830{
831 memcpy(arr, vpd89_data, sizeof(vpd89_data));
832 return sizeof(vpd89_data);
833}
834
835
836static unsigned char vpdb0_data[] = {
837 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
838 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
839 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
840 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
841};
842
843
844static int inquiry_evpd_b0(unsigned char * arr)
845{
846 unsigned int gran;
847
848 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
849
850
851 gran = 1 << scsi_debug_physblk_exp;
852 arr[2] = (gran >> 8) & 0xff;
853 arr[3] = gran & 0xff;
854
855
856 if (sdebug_store_sectors > 0x400) {
857 arr[4] = (sdebug_store_sectors >> 24) & 0xff;
858 arr[5] = (sdebug_store_sectors >> 16) & 0xff;
859 arr[6] = (sdebug_store_sectors >> 8) & 0xff;
860 arr[7] = sdebug_store_sectors & 0xff;
861 }
862
863
864 put_unaligned_be32(scsi_debug_opt_blks, &arr[8]);
865
866 if (scsi_debug_lbpu) {
867
868 put_unaligned_be32(scsi_debug_unmap_max_blocks, &arr[16]);
869
870
871 put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]);
872 }
873
874
875 if (scsi_debug_unmap_alignment) {
876 put_unaligned_be32(scsi_debug_unmap_alignment, &arr[28]);
877 arr[28] |= 0x80;
878 }
879
880
881 put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]);
882
883
884 put_unaligned_be64(scsi_debug_write_same_length, &arr[32]);
885
886 return 0x3c;
887
888 return sizeof(vpdb0_data);
889}
890
891
892static int inquiry_evpd_b1(unsigned char *arr)
893{
894 memset(arr, 0, 0x3c);
895 arr[0] = 0;
896 arr[1] = 1;
897 arr[2] = 0;
898 arr[3] = 5;
899
900 return 0x3c;
901}
902
903
904static int inquiry_evpd_b2(unsigned char *arr)
905{
906 memset(arr, 0, 0x4);
907 arr[0] = 0;
908
909 if (scsi_debug_lbpu)
910 arr[1] = 1 << 7;
911
912 if (scsi_debug_lbpws)
913 arr[1] |= 1 << 6;
914
915 if (scsi_debug_lbpws10)
916 arr[1] |= 1 << 5;
917
918 if (scsi_debug_lbprz)
919 arr[1] |= 1 << 2;
920
921 return 0x4;
922}
923
924#define SDEBUG_LONG_INQ_SZ 96
925#define SDEBUG_MAX_INQ_ARR_SZ 584
926
927static int resp_inquiry(struct scsi_cmnd *scp, int target,
928 struct sdebug_dev_info * devip)
929{
930 unsigned char pq_pdt;
931 unsigned char * arr;
932 unsigned char *cmd = (unsigned char *)scp->cmnd;
933 int alloc_len, n, ret;
934
935 alloc_len = (cmd[3] << 8) + cmd[4];
936 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
937 if (! arr)
938 return DID_REQUEUE << 16;
939 if (devip->wlun)
940 pq_pdt = 0x1e;
941 else if (scsi_debug_no_lun_0 && (0 == devip->lun))
942 pq_pdt = 0x7f;
943 else
944 pq_pdt = (scsi_debug_ptype & 0x1f);
945 arr[0] = pq_pdt;
946 if (0x2 & cmd[1]) {
947 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
948 0);
949 kfree(arr);
950 return check_condition_result;
951 } else if (0x1 & cmd[1]) {
952 int lu_id_num, port_group_id, target_dev_id, len;
953 char lu_id_str[6];
954 int host_no = devip->sdbg_host->shost->host_no;
955
956 port_group_id = (((host_no + 1) & 0x7f) << 8) +
957 (devip->channel & 0x7f);
958 if (0 == scsi_debug_vpd_use_hostno)
959 host_no = 0;
960 lu_id_num = devip->wlun ? -1 : (((host_no + 1) * 2000) +
961 (devip->target * 1000) + devip->lun);
962 target_dev_id = ((host_no + 1) * 2000) +
963 (devip->target * 1000) - 3;
964 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
965 if (0 == cmd[2]) {
966 arr[1] = cmd[2];
967 n = 4;
968 arr[n++] = 0x0;
969 arr[n++] = 0x80;
970 arr[n++] = 0x83;
971 arr[n++] = 0x84;
972 arr[n++] = 0x85;
973 arr[n++] = 0x86;
974 arr[n++] = 0x87;
975 arr[n++] = 0x88;
976 arr[n++] = 0x89;
977 arr[n++] = 0xb0;
978 arr[n++] = 0xb1;
979 if (scsi_debug_lbp())
980 arr[n++] = 0xb2;
981 arr[3] = n - 4;
982 } else if (0x80 == cmd[2]) {
983 arr[1] = cmd[2];
984 arr[3] = len;
985 memcpy(&arr[4], lu_id_str, len);
986 } else if (0x83 == cmd[2]) {
987 arr[1] = cmd[2];
988 arr[3] = inquiry_evpd_83(&arr[4], port_group_id,
989 target_dev_id, lu_id_num,
990 lu_id_str, len);
991 } else if (0x84 == cmd[2]) {
992 arr[1] = cmd[2];
993 arr[3] = inquiry_evpd_84(&arr[4]);
994 } else if (0x85 == cmd[2]) {
995 arr[1] = cmd[2];
996 arr[3] = inquiry_evpd_85(&arr[4]);
997 } else if (0x86 == cmd[2]) {
998 arr[1] = cmd[2];
999 arr[3] = 0x3c;
1000 if (scsi_debug_dif == SD_DIF_TYPE3_PROTECTION)
1001 arr[4] = 0x4;
1002 else if (scsi_debug_dif)
1003 arr[4] = 0x5;
1004 else
1005 arr[4] = 0x0;
1006 arr[5] = 0x7;
1007 } else if (0x87 == cmd[2]) {
1008 arr[1] = cmd[2];
1009 arr[3] = 0x8;
1010 arr[4] = 0x2;
1011 arr[6] = 0x80;
1012 arr[8] = 0x18;
1013 arr[10] = 0x82;
1014 } else if (0x88 == cmd[2]) {
1015 arr[1] = cmd[2];
1016 arr[3] = inquiry_evpd_88(&arr[4], target_dev_id);
1017 } else if (0x89 == cmd[2]) {
1018 arr[1] = cmd[2];
1019 n = inquiry_evpd_89(&arr[4]);
1020 arr[2] = (n >> 8);
1021 arr[3] = (n & 0xff);
1022 } else if (0xb0 == cmd[2]) {
1023 arr[1] = cmd[2];
1024 arr[3] = inquiry_evpd_b0(&arr[4]);
1025 } else if (0xb1 == cmd[2]) {
1026 arr[1] = cmd[2];
1027 arr[3] = inquiry_evpd_b1(&arr[4]);
1028 } else if (0xb2 == cmd[2]) {
1029 arr[1] = cmd[2];
1030 arr[3] = inquiry_evpd_b2(&arr[4]);
1031 } else {
1032
1033 mk_sense_buffer(scp, ILLEGAL_REQUEST,
1034 INVALID_FIELD_IN_CDB, 0);
1035 kfree(arr);
1036 return check_condition_result;
1037 }
1038 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
1039 ret = fill_from_dev_buffer(scp, arr,
1040 min(len, SDEBUG_MAX_INQ_ARR_SZ));
1041 kfree(arr);
1042 return ret;
1043 }
1044
1045 arr[1] = scsi_debug_removable ? 0x80 : 0;
1046 arr[2] = scsi_debug_scsi_level;
1047 arr[3] = 2;
1048 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1049 arr[5] = scsi_debug_dif ? 1 : 0;
1050 if (0 == scsi_debug_vpd_use_hostno)
1051 arr[5] = 0x10;
1052 arr[6] = 0x10;
1053
1054 arr[7] = 0xa;
1055 memcpy(&arr[8], inq_vendor_id, 8);
1056 memcpy(&arr[16], inq_product_id, 16);
1057 memcpy(&arr[32], inq_product_rev, 4);
1058
1059 arr[58] = 0x0; arr[59] = 0x77;
1060 arr[60] = 0x3; arr[61] = 0x14;
1061 n = 62;
1062 if (scsi_debug_ptype == 0) {
1063 arr[n++] = 0x3; arr[n++] = 0x3d;
1064 } else if (scsi_debug_ptype == 1) {
1065 arr[n++] = 0x3; arr[n++] = 0x60;
1066 }
1067 arr[n++] = 0xc; arr[n++] = 0xf;
1068 ret = fill_from_dev_buffer(scp, arr,
1069 min(alloc_len, SDEBUG_LONG_INQ_SZ));
1070 kfree(arr);
1071 return ret;
1072}
1073
1074static int resp_requests(struct scsi_cmnd * scp,
1075 struct sdebug_dev_info * devip)
1076{
1077 unsigned char * sbuff;
1078 unsigned char *cmd = (unsigned char *)scp->cmnd;
1079 unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1080 int want_dsense;
1081 int len = 18;
1082
1083 memset(arr, 0, sizeof(arr));
1084 want_dsense = !!(cmd[1] & 1) || scsi_debug_dsense;
1085 sbuff = scp->sense_buffer;
1086 if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1087 if (want_dsense) {
1088 arr[0] = 0x72;
1089 arr[1] = 0x0;
1090 arr[2] = THRESHOLD_EXCEEDED;
1091 arr[3] = 0xff;
1092 } else {
1093 arr[0] = 0x70;
1094 arr[2] = 0x0;
1095 arr[7] = 0xa;
1096 arr[12] = THRESHOLD_EXCEEDED;
1097 arr[13] = 0xff;
1098 }
1099 } else {
1100 memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
1101 if ((cmd[1] & 1) && (! scsi_debug_dsense)) {
1102
1103 memset(arr, 0, sizeof(arr));
1104 arr[0] = 0x72;
1105 arr[1] = sbuff[2];
1106 arr[2] = sbuff[12];
1107 arr[3] = sbuff[13];
1108 len = 8;
1109 }
1110 }
1111 mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0);
1112 return fill_from_dev_buffer(scp, arr, len);
1113}
1114
1115static int resp_start_stop(struct scsi_cmnd * scp,
1116 struct sdebug_dev_info * devip)
1117{
1118 unsigned char *cmd = (unsigned char *)scp->cmnd;
1119 int power_cond, errsts, start;
1120
1121 errsts = check_readiness(scp, UAS_ONLY, devip);
1122 if (errsts)
1123 return errsts;
1124 power_cond = (cmd[4] & 0xf0) >> 4;
1125 if (power_cond) {
1126 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1127 0);
1128 return check_condition_result;
1129 }
1130 start = cmd[4] & 1;
1131 if (start == devip->stopped)
1132 devip->stopped = !start;
1133 return 0;
1134}
1135
1136static sector_t get_sdebug_capacity(void)
1137{
1138 if (scsi_debug_virtual_gb > 0)
1139 return (sector_t)scsi_debug_virtual_gb *
1140 (1073741824 / scsi_debug_sector_size);
1141 else
1142 return sdebug_store_sectors;
1143}
1144
1145#define SDEBUG_READCAP_ARR_SZ 8
1146static int resp_readcap(struct scsi_cmnd * scp,
1147 struct sdebug_dev_info * devip)
1148{
1149 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1150 unsigned int capac;
1151 int errsts;
1152
1153 errsts = check_readiness(scp, UAS_ONLY, devip);
1154 if (errsts)
1155 return errsts;
1156
1157 sdebug_capacity = get_sdebug_capacity();
1158 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1159 if (sdebug_capacity < 0xffffffff) {
1160 capac = (unsigned int)sdebug_capacity - 1;
1161 arr[0] = (capac >> 24);
1162 arr[1] = (capac >> 16) & 0xff;
1163 arr[2] = (capac >> 8) & 0xff;
1164 arr[3] = capac & 0xff;
1165 } else {
1166 arr[0] = 0xff;
1167 arr[1] = 0xff;
1168 arr[2] = 0xff;
1169 arr[3] = 0xff;
1170 }
1171 arr[6] = (scsi_debug_sector_size >> 8) & 0xff;
1172 arr[7] = scsi_debug_sector_size & 0xff;
1173 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1174}
1175
1176#define SDEBUG_READCAP16_ARR_SZ 32
1177static int resp_readcap16(struct scsi_cmnd * scp,
1178 struct sdebug_dev_info * devip)
1179{
1180 unsigned char *cmd = (unsigned char *)scp->cmnd;
1181 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1182 unsigned long long capac;
1183 int errsts, k, alloc_len;
1184
1185 errsts = check_readiness(scp, UAS_ONLY, devip);
1186 if (errsts)
1187 return errsts;
1188 alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8)
1189 + cmd[13]);
1190
1191 sdebug_capacity = get_sdebug_capacity();
1192 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1193 capac = sdebug_capacity - 1;
1194 for (k = 0; k < 8; ++k, capac >>= 8)
1195 arr[7 - k] = capac & 0xff;
1196 arr[8] = (scsi_debug_sector_size >> 24) & 0xff;
1197 arr[9] = (scsi_debug_sector_size >> 16) & 0xff;
1198 arr[10] = (scsi_debug_sector_size >> 8) & 0xff;
1199 arr[11] = scsi_debug_sector_size & 0xff;
1200 arr[13] = scsi_debug_physblk_exp & 0xf;
1201 arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f;
1202
1203 if (scsi_debug_lbp()) {
1204 arr[14] |= 0x80;
1205 if (scsi_debug_lbprz)
1206 arr[14] |= 0x40;
1207 }
1208
1209 arr[15] = scsi_debug_lowest_aligned & 0xff;
1210
1211 if (scsi_debug_dif) {
1212 arr[12] = (scsi_debug_dif - 1) << 1;
1213 arr[12] |= 1;
1214 }
1215
1216 return fill_from_dev_buffer(scp, arr,
1217 min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1218}
1219
1220#define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1221
1222static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1223 struct sdebug_dev_info * devip)
1224{
1225 unsigned char *cmd = (unsigned char *)scp->cmnd;
1226 unsigned char * arr;
1227 int host_no = devip->sdbg_host->shost->host_no;
1228 int n, ret, alen, rlen;
1229 int port_group_a, port_group_b, port_a, port_b;
1230
1231 alen = ((cmd[6] << 24) + (cmd[7] << 16) + (cmd[8] << 8)
1232 + cmd[9]);
1233
1234 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1235 if (! arr)
1236 return DID_REQUEUE << 16;
1237
1238
1239
1240
1241
1242
1243 port_a = 0x1;
1244 port_b = 0x2;
1245 port_group_a = (((host_no + 1) & 0x7f) << 8) +
1246 (devip->channel & 0x7f);
1247 port_group_b = (((host_no + 1) & 0x7f) << 8) +
1248 (devip->channel & 0x7f) + 0x80;
1249
1250
1251
1252
1253 n = 4;
1254 if (0 == scsi_debug_vpd_use_hostno) {
1255 arr[n++] = host_no % 3;
1256 arr[n++] = 0x0F;
1257 } else {
1258 arr[n++] = 0x0;
1259 arr[n++] = 0x01;
1260 }
1261 arr[n++] = (port_group_a >> 8) & 0xff;
1262 arr[n++] = port_group_a & 0xff;
1263 arr[n++] = 0;
1264 arr[n++] = 0;
1265 arr[n++] = 0;
1266 arr[n++] = 0x1;
1267 arr[n++] = 0;
1268 arr[n++] = 0;
1269 arr[n++] = (port_a >> 8) & 0xff;
1270 arr[n++] = port_a & 0xff;
1271 arr[n++] = 3;
1272 arr[n++] = 0x08;
1273 arr[n++] = (port_group_b >> 8) & 0xff;
1274 arr[n++] = port_group_b & 0xff;
1275 arr[n++] = 0;
1276 arr[n++] = 0;
1277 arr[n++] = 0;
1278 arr[n++] = 0x1;
1279 arr[n++] = 0;
1280 arr[n++] = 0;
1281 arr[n++] = (port_b >> 8) & 0xff;
1282 arr[n++] = port_b & 0xff;
1283
1284 rlen = n - 4;
1285 arr[0] = (rlen >> 24) & 0xff;
1286 arr[1] = (rlen >> 16) & 0xff;
1287 arr[2] = (rlen >> 8) & 0xff;
1288 arr[3] = rlen & 0xff;
1289
1290
1291
1292
1293
1294
1295
1296 rlen = min(alen,n);
1297 ret = fill_from_dev_buffer(scp, arr,
1298 min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1299 kfree(arr);
1300 return ret;
1301}
1302
1303
1304
1305static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
1306{
1307 unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1308 5, 0, 0xff, 0xff};
1309
1310 memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1311 if (1 == pcontrol)
1312 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1313 return sizeof(err_recov_pg);
1314}
1315
1316static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
1317{
1318 unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1319 0, 0, 0, 0, 0, 0, 0, 0};
1320
1321 memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1322 if (1 == pcontrol)
1323 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1324 return sizeof(disconnect_pg);
1325}
1326
1327static int resp_format_pg(unsigned char * p, int pcontrol, int target)
1328{
1329 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1330 0, 0, 0, 0, 0, 0, 0, 0,
1331 0, 0, 0, 0, 0x40, 0, 0, 0};
1332
1333 memcpy(p, format_pg, sizeof(format_pg));
1334 p[10] = (sdebug_sectors_per >> 8) & 0xff;
1335 p[11] = sdebug_sectors_per & 0xff;
1336 p[12] = (scsi_debug_sector_size >> 8) & 0xff;
1337 p[13] = scsi_debug_sector_size & 0xff;
1338 if (scsi_debug_removable)
1339 p[20] |= 0x20;
1340 if (1 == pcontrol)
1341 memset(p + 2, 0, sizeof(format_pg) - 2);
1342 return sizeof(format_pg);
1343}
1344
1345static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
1346{
1347 unsigned char ch_caching_pg[] = { 0x4, 0, 0, 0, 0, 0,
1348 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
1349 unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1350 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
1351
1352 if (SCSI_DEBUG_OPT_N_WCE & scsi_debug_opts)
1353 caching_pg[2] &= ~0x4;
1354 memcpy(p, caching_pg, sizeof(caching_pg));
1355 if (1 == pcontrol)
1356 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
1357 else if (2 == pcontrol)
1358 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
1359 return sizeof(caching_pg);
1360}
1361
1362static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
1363{
1364 unsigned char ch_ctrl_m_pg[] = { 0x6, 0, 0, 0, 0, 0,
1365 0, 0, 0, 0};
1366 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1367 0, 0, 0x2, 0x4b};
1368
1369 if (scsi_debug_dsense)
1370 ctrl_m_pg[2] |= 0x4;
1371 else
1372 ctrl_m_pg[2] &= ~0x4;
1373
1374 if (scsi_debug_ato)
1375 ctrl_m_pg[5] |= 0x80;
1376
1377 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
1378 if (1 == pcontrol)
1379 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
1380 else if (2 == pcontrol)
1381 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
1382 return sizeof(ctrl_m_pg);
1383}
1384
1385
1386static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target)
1387{
1388 unsigned char ch_iec_m_pg[] = { 0x4, 0xf, 0, 0, 0, 0,
1389 0, 0, 0x0, 0x0};
1390 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1391 0, 0, 0x0, 0x0};
1392
1393 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
1394 if (1 == pcontrol)
1395 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
1396 else if (2 == pcontrol)
1397 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
1398 return sizeof(iec_m_pg);
1399}
1400
1401static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target)
1402{
1403 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
1404 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1405
1406 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
1407 if (1 == pcontrol)
1408 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
1409 return sizeof(sas_sf_m_pg);
1410}
1411
1412
1413static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
1414 int target_dev_id)
1415{
1416 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1417 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1418 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1419 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1420 0x2, 0, 0, 0, 0, 0, 0, 0,
1421 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1422 0, 0, 0, 0, 0, 0, 0, 0,
1423 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1424 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1425 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1426 0x3, 0, 0, 0, 0, 0, 0, 0,
1427 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1428 0, 0, 0, 0, 0, 0, 0, 0,
1429 };
1430 int port_a, port_b;
1431
1432 port_a = target_dev_id + 1;
1433 port_b = port_a + 1;
1434 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
1435 p[20] = (port_a >> 24);
1436 p[21] = (port_a >> 16) & 0xff;
1437 p[22] = (port_a >> 8) & 0xff;
1438 p[23] = port_a & 0xff;
1439 p[48 + 20] = (port_b >> 24);
1440 p[48 + 21] = (port_b >> 16) & 0xff;
1441 p[48 + 22] = (port_b >> 8) & 0xff;
1442 p[48 + 23] = port_b & 0xff;
1443 if (1 == pcontrol)
1444 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
1445 return sizeof(sas_pcd_m_pg);
1446}
1447
1448static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
1449{
1450 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1451 0, 0, 0, 0, 0, 0, 0, 0,
1452 };
1453
1454 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
1455 if (1 == pcontrol)
1456 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
1457 return sizeof(sas_sha_m_pg);
1458}
1459
1460#define SDEBUG_MAX_MSENSE_SZ 256
1461
1462static int resp_mode_sense(struct scsi_cmnd * scp, int target,
1463 struct sdebug_dev_info * devip)
1464{
1465 unsigned char dbd, llbaa;
1466 int pcontrol, pcode, subpcode, bd_len;
1467 unsigned char dev_spec;
1468 int k, alloc_len, msense_6, offset, len, errsts, target_dev_id;
1469 unsigned char * ap;
1470 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
1471 unsigned char *cmd = (unsigned char *)scp->cmnd;
1472
1473 errsts = check_readiness(scp, UAS_ONLY, devip);
1474 if (errsts)
1475 return errsts;
1476 dbd = !!(cmd[1] & 0x8);
1477 pcontrol = (cmd[2] & 0xc0) >> 6;
1478 pcode = cmd[2] & 0x3f;
1479 subpcode = cmd[3];
1480 msense_6 = (MODE_SENSE == cmd[0]);
1481 llbaa = msense_6 ? 0 : !!(cmd[1] & 0x10);
1482 if ((0 == scsi_debug_ptype) && (0 == dbd))
1483 bd_len = llbaa ? 16 : 8;
1484 else
1485 bd_len = 0;
1486 alloc_len = msense_6 ? cmd[4] : ((cmd[7] << 8) | cmd[8]);
1487 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
1488 if (0x3 == pcontrol) {
1489 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
1490 return check_condition_result;
1491 }
1492 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
1493 (devip->target * 1000) - 3;
1494
1495 if (0 == scsi_debug_ptype)
1496 dev_spec = (DEV_READONLY(target) ? 0x80 : 0x0) | 0x10;
1497 else
1498 dev_spec = 0x0;
1499 if (msense_6) {
1500 arr[2] = dev_spec;
1501 arr[3] = bd_len;
1502 offset = 4;
1503 } else {
1504 arr[3] = dev_spec;
1505 if (16 == bd_len)
1506 arr[4] = 0x1;
1507 arr[7] = bd_len;
1508 offset = 8;
1509 }
1510 ap = arr + offset;
1511 if ((bd_len > 0) && (!sdebug_capacity))
1512 sdebug_capacity = get_sdebug_capacity();
1513
1514 if (8 == bd_len) {
1515 if (sdebug_capacity > 0xfffffffe) {
1516 ap[0] = 0xff;
1517 ap[1] = 0xff;
1518 ap[2] = 0xff;
1519 ap[3] = 0xff;
1520 } else {
1521 ap[0] = (sdebug_capacity >> 24) & 0xff;
1522 ap[1] = (sdebug_capacity >> 16) & 0xff;
1523 ap[2] = (sdebug_capacity >> 8) & 0xff;
1524 ap[3] = sdebug_capacity & 0xff;
1525 }
1526 ap[6] = (scsi_debug_sector_size >> 8) & 0xff;
1527 ap[7] = scsi_debug_sector_size & 0xff;
1528 offset += bd_len;
1529 ap = arr + offset;
1530 } else if (16 == bd_len) {
1531 unsigned long long capac = sdebug_capacity;
1532
1533 for (k = 0; k < 8; ++k, capac >>= 8)
1534 ap[7 - k] = capac & 0xff;
1535 ap[12] = (scsi_debug_sector_size >> 24) & 0xff;
1536 ap[13] = (scsi_debug_sector_size >> 16) & 0xff;
1537 ap[14] = (scsi_debug_sector_size >> 8) & 0xff;
1538 ap[15] = scsi_debug_sector_size & 0xff;
1539 offset += bd_len;
1540 ap = arr + offset;
1541 }
1542
1543 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
1544
1545 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1546 0);
1547 return check_condition_result;
1548 }
1549 switch (pcode) {
1550 case 0x1:
1551 len = resp_err_recov_pg(ap, pcontrol, target);
1552 offset += len;
1553 break;
1554 case 0x2:
1555 len = resp_disconnect_pg(ap, pcontrol, target);
1556 offset += len;
1557 break;
1558 case 0x3:
1559 len = resp_format_pg(ap, pcontrol, target);
1560 offset += len;
1561 break;
1562 case 0x8:
1563 len = resp_caching_pg(ap, pcontrol, target);
1564 offset += len;
1565 break;
1566 case 0xa:
1567 len = resp_ctrl_m_pg(ap, pcontrol, target);
1568 offset += len;
1569 break;
1570 case 0x19:
1571 if ((subpcode > 0x2) && (subpcode < 0xff)) {
1572 mk_sense_buffer(scp, ILLEGAL_REQUEST,
1573 INVALID_FIELD_IN_CDB, 0);
1574 return check_condition_result;
1575 }
1576 len = 0;
1577 if ((0x0 == subpcode) || (0xff == subpcode))
1578 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1579 if ((0x1 == subpcode) || (0xff == subpcode))
1580 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
1581 target_dev_id);
1582 if ((0x2 == subpcode) || (0xff == subpcode))
1583 len += resp_sas_sha_m_spg(ap + len, pcontrol);
1584 offset += len;
1585 break;
1586 case 0x1c:
1587 len = resp_iec_m_pg(ap, pcontrol, target);
1588 offset += len;
1589 break;
1590 case 0x3f:
1591 if ((0 == subpcode) || (0xff == subpcode)) {
1592 len = resp_err_recov_pg(ap, pcontrol, target);
1593 len += resp_disconnect_pg(ap + len, pcontrol, target);
1594 len += resp_format_pg(ap + len, pcontrol, target);
1595 len += resp_caching_pg(ap + len, pcontrol, target);
1596 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
1597 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1598 if (0xff == subpcode) {
1599 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
1600 target, target_dev_id);
1601 len += resp_sas_sha_m_spg(ap + len, pcontrol);
1602 }
1603 len += resp_iec_m_pg(ap + len, pcontrol, target);
1604 } else {
1605 mk_sense_buffer(scp, ILLEGAL_REQUEST,
1606 INVALID_FIELD_IN_CDB, 0);
1607 return check_condition_result;
1608 }
1609 offset += len;
1610 break;
1611 default:
1612 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1613 0);
1614 return check_condition_result;
1615 }
1616 if (msense_6)
1617 arr[0] = offset - 1;
1618 else {
1619 arr[0] = ((offset - 2) >> 8) & 0xff;
1620 arr[1] = (offset - 2) & 0xff;
1621 }
1622 return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
1623}
1624
1625#define SDEBUG_MAX_MSELECT_SZ 512
1626
1627static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
1628 struct sdebug_dev_info * devip)
1629{
1630 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
1631 int param_len, res, errsts, mpage;
1632 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
1633 unsigned char *cmd = (unsigned char *)scp->cmnd;
1634
1635 errsts = check_readiness(scp, UAS_ONLY, devip);
1636 if (errsts)
1637 return errsts;
1638 memset(arr, 0, sizeof(arr));
1639 pf = cmd[1] & 0x10;
1640 sp = cmd[1] & 0x1;
1641 param_len = mselect6 ? cmd[4] : ((cmd[7] << 8) + cmd[8]);
1642 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
1643 mk_sense_buffer(scp, ILLEGAL_REQUEST,
1644 INVALID_FIELD_IN_CDB, 0);
1645 return check_condition_result;
1646 }
1647 res = fetch_to_dev_buffer(scp, arr, param_len);
1648 if (-1 == res)
1649 return (DID_ERROR << 16);
1650 else if ((res < param_len) &&
1651 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
1652 sdev_printk(KERN_INFO, scp->device,
1653 "%s: cdb indicated=%d, IO sent=%d bytes\n",
1654 __func__, param_len, res);
1655 md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2);
1656 bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]);
1657 if (md_len > 2) {
1658 mk_sense_buffer(scp, ILLEGAL_REQUEST,
1659 INVALID_FIELD_IN_PARAM_LIST, 0);
1660 return check_condition_result;
1661 }
1662 off = bd_len + (mselect6 ? 4 : 8);
1663 mpage = arr[off] & 0x3f;
1664 ps = !!(arr[off] & 0x80);
1665 if (ps) {
1666 mk_sense_buffer(scp, ILLEGAL_REQUEST,
1667 INVALID_FIELD_IN_PARAM_LIST, 0);
1668 return check_condition_result;
1669 }
1670 spf = !!(arr[off] & 0x40);
1671 pg_len = spf ? ((arr[off + 2] << 8) + arr[off + 3] + 4) :
1672 (arr[off + 1] + 2);
1673 if ((pg_len + off) > param_len) {
1674 mk_sense_buffer(scp, ILLEGAL_REQUEST,
1675 PARAMETER_LIST_LENGTH_ERR, 0);
1676 return check_condition_result;
1677 }
1678 switch (mpage) {
1679 case 0x8:
1680 if (caching_pg[1] == arr[off + 1]) {
1681 memcpy(caching_pg + 2, arr + off + 2,
1682 sizeof(caching_pg) - 2);
1683 goto set_mode_changed_ua;
1684 }
1685 break;
1686 case 0xa:
1687 if (ctrl_m_pg[1] == arr[off + 1]) {
1688 memcpy(ctrl_m_pg + 2, arr + off + 2,
1689 sizeof(ctrl_m_pg) - 2);
1690 scsi_debug_dsense = !!(ctrl_m_pg[2] & 0x4);
1691 goto set_mode_changed_ua;
1692 }
1693 break;
1694 case 0x1c:
1695 if (iec_m_pg[1] == arr[off + 1]) {
1696 memcpy(iec_m_pg + 2, arr + off + 2,
1697 sizeof(iec_m_pg) - 2);
1698 goto set_mode_changed_ua;
1699 }
1700 break;
1701 default:
1702 break;
1703 }
1704 mk_sense_buffer(scp, ILLEGAL_REQUEST,
1705 INVALID_FIELD_IN_PARAM_LIST, 0);
1706 return check_condition_result;
1707set_mode_changed_ua:
1708 set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
1709 return 0;
1710}
1711
1712static int resp_temp_l_pg(unsigned char * arr)
1713{
1714 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
1715 0x0, 0x1, 0x3, 0x2, 0x0, 65,
1716 };
1717
1718 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
1719 return sizeof(temp_l_pg);
1720}
1721
1722static int resp_ie_l_pg(unsigned char * arr)
1723{
1724 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
1725 };
1726
1727 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
1728 if (iec_m_pg[2] & 0x4) {
1729 arr[4] = THRESHOLD_EXCEEDED;
1730 arr[5] = 0xff;
1731 }
1732 return sizeof(ie_l_pg);
1733}
1734
1735#define SDEBUG_MAX_LSENSE_SZ 512
1736
1737static int resp_log_sense(struct scsi_cmnd * scp,
1738 struct sdebug_dev_info * devip)
1739{
1740 int ppc, sp, pcontrol, pcode, subpcode, alloc_len, errsts, len, n;
1741 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
1742 unsigned char *cmd = (unsigned char *)scp->cmnd;
1743
1744 errsts = check_readiness(scp, UAS_ONLY, devip);
1745 if (errsts)
1746 return errsts;
1747 memset(arr, 0, sizeof(arr));
1748 ppc = cmd[1] & 0x2;
1749 sp = cmd[1] & 0x1;
1750 if (ppc || sp) {
1751 mk_sense_buffer(scp, ILLEGAL_REQUEST,
1752 INVALID_FIELD_IN_CDB, 0);
1753 return check_condition_result;
1754 }
1755 pcontrol = (cmd[2] & 0xc0) >> 6;
1756 pcode = cmd[2] & 0x3f;
1757 subpcode = cmd[3] & 0xff;
1758 alloc_len = (cmd[7] << 8) + cmd[8];
1759 arr[0] = pcode;
1760 if (0 == subpcode) {
1761 switch (pcode) {
1762 case 0x0:
1763 n = 4;
1764 arr[n++] = 0x0;
1765 arr[n++] = 0xd;
1766 arr[n++] = 0x2f;
1767 arr[3] = n - 4;
1768 break;
1769 case 0xd:
1770 arr[3] = resp_temp_l_pg(arr + 4);
1771 break;
1772 case 0x2f:
1773 arr[3] = resp_ie_l_pg(arr + 4);
1774 break;
1775 default:
1776 mk_sense_buffer(scp, ILLEGAL_REQUEST,
1777 INVALID_FIELD_IN_CDB, 0);
1778 return check_condition_result;
1779 }
1780 } else if (0xff == subpcode) {
1781 arr[0] |= 0x40;
1782 arr[1] = subpcode;
1783 switch (pcode) {
1784 case 0x0:
1785 n = 4;
1786 arr[n++] = 0x0;
1787 arr[n++] = 0x0;
1788 arr[n++] = 0x0;
1789 arr[n++] = 0xff;
1790 arr[n++] = 0xd;
1791 arr[n++] = 0x0;
1792 arr[n++] = 0x2f;
1793 arr[n++] = 0x0;
1794 arr[3] = n - 4;
1795 break;
1796 case 0xd:
1797 n = 4;
1798 arr[n++] = 0xd;
1799 arr[n++] = 0x0;
1800 arr[3] = n - 4;
1801 break;
1802 case 0x2f:
1803 n = 4;
1804 arr[n++] = 0x2f;
1805 arr[n++] = 0x0;
1806 arr[3] = n - 4;
1807 break;
1808 default:
1809 mk_sense_buffer(scp, ILLEGAL_REQUEST,
1810 INVALID_FIELD_IN_CDB, 0);
1811 return check_condition_result;
1812 }
1813 } else {
1814 mk_sense_buffer(scp, ILLEGAL_REQUEST,
1815 INVALID_FIELD_IN_CDB, 0);
1816 return check_condition_result;
1817 }
1818 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
1819 return fill_from_dev_buffer(scp, arr,
1820 min(len, SDEBUG_MAX_INQ_ARR_SZ));
1821}
1822
1823static int check_device_access_params(struct scsi_cmnd *scp,
1824 unsigned long long lba, unsigned int num)
1825{
1826 if (lba + num > sdebug_capacity) {
1827 mk_sense_buffer(scp, ILLEGAL_REQUEST, ADDR_OUT_OF_RANGE, 0);
1828 return check_condition_result;
1829 }
1830
1831 if (num > sdebug_store_sectors) {
1832 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
1833 return check_condition_result;
1834 }
1835 return 0;
1836}
1837
1838
1839static int do_device_access(struct scsi_cmnd *scmd,
1840 unsigned long long lba, unsigned int num, int write)
1841{
1842 int ret;
1843 unsigned long long block, rest = 0;
1844 struct scsi_data_buffer *sdb;
1845 enum dma_data_direction dir;
1846 size_t (*func)(struct scatterlist *, unsigned int, void *, size_t,
1847 off_t);
1848
1849 if (write) {
1850 sdb = scsi_out(scmd);
1851 dir = DMA_TO_DEVICE;
1852 func = sg_pcopy_to_buffer;
1853 } else {
1854 sdb = scsi_in(scmd);
1855 dir = DMA_FROM_DEVICE;
1856 func = sg_pcopy_from_buffer;
1857 }
1858
1859 if (!sdb->length)
1860 return 0;
1861 if (!(scsi_bidi_cmnd(scmd) || scmd->sc_data_direction == dir))
1862 return -1;
1863
1864 block = do_div(lba, sdebug_store_sectors);
1865 if (block + num > sdebug_store_sectors)
1866 rest = block + num - sdebug_store_sectors;
1867
1868 ret = func(sdb->table.sgl, sdb->table.nents,
1869 fake_storep + (block * scsi_debug_sector_size),
1870 (num - rest) * scsi_debug_sector_size, 0);
1871 if (ret != (num - rest) * scsi_debug_sector_size)
1872 return ret;
1873
1874 if (rest) {
1875 ret += func(sdb->table.sgl, sdb->table.nents,
1876 fake_storep, rest * scsi_debug_sector_size,
1877 (num - rest) * scsi_debug_sector_size);
1878 }
1879
1880 return ret;
1881}
1882
1883static __be16 dif_compute_csum(const void *buf, int len)
1884{
1885 __be16 csum;
1886
1887 if (scsi_debug_guard)
1888 csum = (__force __be16)ip_compute_csum(buf, len);
1889 else
1890 csum = cpu_to_be16(crc_t10dif(buf, len));
1891
1892 return csum;
1893}
1894
1895static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
1896 sector_t sector, u32 ei_lba)
1897{
1898 __be16 csum = dif_compute_csum(data, scsi_debug_sector_size);
1899
1900 if (sdt->guard_tag != csum) {
1901 pr_err("%s: GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
1902 __func__,
1903 (unsigned long)sector,
1904 be16_to_cpu(sdt->guard_tag),
1905 be16_to_cpu(csum));
1906 return 0x01;
1907 }
1908 if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
1909 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
1910 pr_err("%s: REF check failed on sector %lu\n",
1911 __func__, (unsigned long)sector);
1912 return 0x03;
1913 }
1914 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
1915 be32_to_cpu(sdt->ref_tag) != ei_lba) {
1916 pr_err("%s: REF check failed on sector %lu\n",
1917 __func__, (unsigned long)sector);
1918 return 0x03;
1919 }
1920 return 0;
1921}
1922
1923static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
1924 unsigned int sectors, bool read)
1925{
1926 size_t resid;
1927 void *paddr;
1928 const void *dif_store_end = dif_storep + sdebug_store_sectors;
1929 struct sg_mapping_iter miter;
1930
1931
1932 resid = sectors * sizeof(*dif_storep);
1933
1934 sg_miter_start(&miter, scsi_prot_sglist(SCpnt),
1935 scsi_prot_sg_count(SCpnt), SG_MITER_ATOMIC |
1936 (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
1937
1938 while (sg_miter_next(&miter) && resid > 0) {
1939 size_t len = min(miter.length, resid);
1940 void *start = dif_store(sector);
1941 size_t rest = 0;
1942
1943 if (dif_store_end < start + len)
1944 rest = start + len - dif_store_end;
1945
1946 paddr = miter.addr;
1947
1948 if (read)
1949 memcpy(paddr, start, len - rest);
1950 else
1951 memcpy(start, paddr, len - rest);
1952
1953 if (rest) {
1954 if (read)
1955 memcpy(paddr + len - rest, dif_storep, rest);
1956 else
1957 memcpy(dif_storep, paddr + len - rest, rest);
1958 }
1959
1960 sector += len / sizeof(*dif_storep);
1961 resid -= len;
1962 }
1963 sg_miter_stop(&miter);
1964}
1965
1966static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
1967 unsigned int sectors, u32 ei_lba)
1968{
1969 unsigned int i;
1970 struct sd_dif_tuple *sdt;
1971 sector_t sector;
1972
1973 for (i = 0; i < sectors; i++, ei_lba++) {
1974 int ret;
1975
1976 sector = start_sec + i;
1977 sdt = dif_store(sector);
1978
1979 if (sdt->app_tag == cpu_to_be16(0xffff))
1980 continue;
1981
1982 ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
1983 if (ret) {
1984 dif_errors++;
1985 return ret;
1986 }
1987 }
1988
1989 dif_copy_prot(SCpnt, start_sec, sectors, true);
1990 dix_reads++;
1991
1992 return 0;
1993}
1994
1995static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba,
1996 unsigned int num, u32 ei_lba)
1997{
1998 unsigned long iflags;
1999 int ret;
2000
2001 ret = check_device_access_params(SCpnt, lba, num);
2002 if (ret)
2003 return ret;
2004
2005 if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) &&
2006 (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
2007 ((lba + num) > OPT_MEDIUM_ERR_ADDR)) {
2008
2009 mk_sense_buffer(SCpnt, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
2010
2011 if (0x70 == (SCpnt->sense_buffer[0] & 0x7f)) {
2012 SCpnt->sense_buffer[0] |= 0x80;
2013 ret = (lba < OPT_MEDIUM_ERR_ADDR)
2014 ? OPT_MEDIUM_ERR_ADDR : (int)lba;
2015 SCpnt->sense_buffer[3] = (ret >> 24) & 0xff;
2016 SCpnt->sense_buffer[4] = (ret >> 16) & 0xff;
2017 SCpnt->sense_buffer[5] = (ret >> 8) & 0xff;
2018 SCpnt->sense_buffer[6] = ret & 0xff;
2019 }
2020 scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
2021 return check_condition_result;
2022 }
2023
2024 read_lock_irqsave(&atomic_rw, iflags);
2025
2026
2027 if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
2028 int prot_ret = prot_verify_read(SCpnt, lba, num, ei_lba);
2029
2030 if (prot_ret) {
2031 read_unlock_irqrestore(&atomic_rw, iflags);
2032 mk_sense_buffer(SCpnt, ABORTED_COMMAND, 0x10, prot_ret);
2033 return illegal_condition_result;
2034 }
2035 }
2036
2037 ret = do_device_access(SCpnt, lba, num, 0);
2038 read_unlock_irqrestore(&atomic_rw, iflags);
2039 if (ret == -1)
2040 return DID_ERROR << 16;
2041
2042 scsi_in(SCpnt)->resid = scsi_bufflen(SCpnt) - ret;
2043
2044 return 0;
2045}
2046
2047void dump_sector(unsigned char *buf, int len)
2048{
2049 int i, j, n;
2050
2051 pr_err(">>> Sector Dump <<<\n");
2052 for (i = 0 ; i < len ; i += 16) {
2053 char b[128];
2054
2055 for (j = 0, n = 0; j < 16; j++) {
2056 unsigned char c = buf[i+j];
2057
2058 if (c >= 0x20 && c < 0x7e)
2059 n += scnprintf(b + n, sizeof(b) - n,
2060 " %c ", buf[i+j]);
2061 else
2062 n += scnprintf(b + n, sizeof(b) - n,
2063 "%02x ", buf[i+j]);
2064 }
2065 pr_err("%04d: %s\n", i, b);
2066 }
2067}
2068
2069static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
2070 unsigned int sectors, u32 ei_lba)
2071{
2072 int ret;
2073 struct sd_dif_tuple *sdt;
2074 void *daddr;
2075 sector_t sector = start_sec;
2076 int ppage_offset;
2077 int dpage_offset;
2078 struct sg_mapping_iter diter;
2079 struct sg_mapping_iter piter;
2080
2081 BUG_ON(scsi_sg_count(SCpnt) == 0);
2082 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
2083
2084 sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
2085 scsi_prot_sg_count(SCpnt),
2086 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2087 sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
2088 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2089
2090
2091 while (sg_miter_next(&piter)) {
2092 dpage_offset = 0;
2093 if (WARN_ON(!sg_miter_next(&diter))) {
2094 ret = 0x01;
2095 goto out;
2096 }
2097
2098 for (ppage_offset = 0; ppage_offset < piter.length;
2099 ppage_offset += sizeof(struct sd_dif_tuple)) {
2100
2101
2102
2103 if (dpage_offset >= diter.length) {
2104 if (WARN_ON(!sg_miter_next(&diter))) {
2105 ret = 0x01;
2106 goto out;
2107 }
2108 dpage_offset = 0;
2109 }
2110
2111 sdt = piter.addr + ppage_offset;
2112 daddr = diter.addr + dpage_offset;
2113
2114 ret = dif_verify(sdt, daddr, sector, ei_lba);
2115 if (ret) {
2116 dump_sector(daddr, scsi_debug_sector_size);
2117 goto out;
2118 }
2119
2120 sector++;
2121 ei_lba++;
2122 dpage_offset += scsi_debug_sector_size;
2123 }
2124 diter.consumed = dpage_offset;
2125 sg_miter_stop(&diter);
2126 }
2127 sg_miter_stop(&piter);
2128
2129 dif_copy_prot(SCpnt, start_sec, sectors, false);
2130 dix_writes++;
2131
2132 return 0;
2133
2134out:
2135 dif_errors++;
2136 sg_miter_stop(&diter);
2137 sg_miter_stop(&piter);
2138 return ret;
2139}
2140
2141static unsigned long lba_to_map_index(sector_t lba)
2142{
2143 if (scsi_debug_unmap_alignment) {
2144 lba += scsi_debug_unmap_granularity -
2145 scsi_debug_unmap_alignment;
2146 }
2147 do_div(lba, scsi_debug_unmap_granularity);
2148
2149 return lba;
2150}
2151
2152static sector_t map_index_to_lba(unsigned long index)
2153{
2154 sector_t lba = index * scsi_debug_unmap_granularity;
2155
2156 if (scsi_debug_unmap_alignment) {
2157 lba -= scsi_debug_unmap_granularity -
2158 scsi_debug_unmap_alignment;
2159 }
2160
2161 return lba;
2162}
2163
2164static unsigned int map_state(sector_t lba, unsigned int *num)
2165{
2166 sector_t end;
2167 unsigned int mapped;
2168 unsigned long index;
2169 unsigned long next;
2170
2171 index = lba_to_map_index(lba);
2172 mapped = test_bit(index, map_storep);
2173
2174 if (mapped)
2175 next = find_next_zero_bit(map_storep, map_size, index);
2176 else
2177 next = find_next_bit(map_storep, map_size, index);
2178
2179 end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next));
2180 *num = end - lba;
2181
2182 return mapped;
2183}
2184
2185static void map_region(sector_t lba, unsigned int len)
2186{
2187 sector_t end = lba + len;
2188
2189 while (lba < end) {
2190 unsigned long index = lba_to_map_index(lba);
2191
2192 if (index < map_size)
2193 set_bit(index, map_storep);
2194
2195 lba = map_index_to_lba(index + 1);
2196 }
2197}
2198
2199static void unmap_region(sector_t lba, unsigned int len)
2200{
2201 sector_t end = lba + len;
2202
2203 while (lba < end) {
2204 unsigned long index = lba_to_map_index(lba);
2205
2206 if (lba == map_index_to_lba(index) &&
2207 lba + scsi_debug_unmap_granularity <= end &&
2208 index < map_size) {
2209 clear_bit(index, map_storep);
2210 if (scsi_debug_lbprz) {
2211 memset(fake_storep +
2212 lba * scsi_debug_sector_size, 0,
2213 scsi_debug_sector_size *
2214 scsi_debug_unmap_granularity);
2215 }
2216 if (dif_storep) {
2217 memset(dif_storep + lba, 0xff,
2218 sizeof(*dif_storep) *
2219 scsi_debug_unmap_granularity);
2220 }
2221 }
2222 lba = map_index_to_lba(index + 1);
2223 }
2224}
2225
2226static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
2227 unsigned int num, u32 ei_lba)
2228{
2229 unsigned long iflags;
2230 int ret;
2231
2232 ret = check_device_access_params(SCpnt, lba, num);
2233 if (ret)
2234 return ret;
2235
2236 write_lock_irqsave(&atomic_rw, iflags);
2237
2238
2239 if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
2240 int prot_ret = prot_verify_write(SCpnt, lba, num, ei_lba);
2241
2242 if (prot_ret) {
2243 write_unlock_irqrestore(&atomic_rw, iflags);
2244 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, 0x10,
2245 prot_ret);
2246 return illegal_condition_result;
2247 }
2248 }
2249
2250 ret = do_device_access(SCpnt, lba, num, 1);
2251 if (scsi_debug_lbp())
2252 map_region(lba, num);
2253 write_unlock_irqrestore(&atomic_rw, iflags);
2254 if (-1 == ret)
2255 return (DID_ERROR << 16);
2256 else if ((ret < (num * scsi_debug_sector_size)) &&
2257 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2258 sdev_printk(KERN_INFO, SCpnt->device,
2259 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
2260 my_name, num * scsi_debug_sector_size, ret);
2261
2262 return 0;
2263}
2264
2265static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba,
2266 unsigned int num, u32 ei_lba, unsigned int unmap)
2267{
2268 unsigned long iflags;
2269 unsigned long long i;
2270 int ret;
2271
2272 ret = check_device_access_params(scmd, lba, num);
2273 if (ret)
2274 return ret;
2275
2276 if (num > scsi_debug_write_same_length) {
2277 mk_sense_buffer(scmd, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
2278 0);
2279 return check_condition_result;
2280 }
2281
2282 write_lock_irqsave(&atomic_rw, iflags);
2283
2284 if (unmap && scsi_debug_lbp()) {
2285 unmap_region(lba, num);
2286 goto out;
2287 }
2288
2289
2290 ret = fetch_to_dev_buffer(scmd,
2291 fake_storep + (lba * scsi_debug_sector_size),
2292 scsi_debug_sector_size);
2293
2294 if (-1 == ret) {
2295 write_unlock_irqrestore(&atomic_rw, iflags);
2296 return (DID_ERROR << 16);
2297 } else if ((ret < (num * scsi_debug_sector_size)) &&
2298 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2299 sdev_printk(KERN_INFO, scmd->device,
2300 "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
2301 my_name, "write same",
2302 num * scsi_debug_sector_size, ret);
2303
2304
2305 for (i = 1 ; i < num ; i++)
2306 memcpy(fake_storep + ((lba + i) * scsi_debug_sector_size),
2307 fake_storep + (lba * scsi_debug_sector_size),
2308 scsi_debug_sector_size);
2309
2310 if (scsi_debug_lbp())
2311 map_region(lba, num);
2312out:
2313 write_unlock_irqrestore(&atomic_rw, iflags);
2314
2315 return 0;
2316}
2317
2318struct unmap_block_desc {
2319 __be64 lba;
2320 __be32 blocks;
2321 __be32 __reserved;
2322};
2323
2324static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip)
2325{
2326 unsigned char *buf;
2327 struct unmap_block_desc *desc;
2328 unsigned int i, payload_len, descriptors;
2329 int ret;
2330 unsigned long iflags;
2331
2332 ret = check_readiness(scmd, UAS_ONLY, devip);
2333 if (ret)
2334 return ret;
2335
2336 payload_len = get_unaligned_be16(&scmd->cmnd[7]);
2337 BUG_ON(scsi_bufflen(scmd) != payload_len);
2338
2339 descriptors = (payload_len - 8) / 16;
2340
2341 buf = kmalloc(scsi_bufflen(scmd), GFP_ATOMIC);
2342 if (!buf)
2343 return check_condition_result;
2344
2345 scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd));
2346
2347 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
2348 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
2349
2350 desc = (void *)&buf[8];
2351
2352 write_lock_irqsave(&atomic_rw, iflags);
2353
2354 for (i = 0 ; i < descriptors ; i++) {
2355 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
2356 unsigned int num = get_unaligned_be32(&desc[i].blocks);
2357
2358 ret = check_device_access_params(scmd, lba, num);
2359 if (ret)
2360 goto out;
2361
2362 unmap_region(lba, num);
2363 }
2364
2365 ret = 0;
2366
2367out:
2368 write_unlock_irqrestore(&atomic_rw, iflags);
2369 kfree(buf);
2370
2371 return ret;
2372}
2373
2374#define SDEBUG_GET_LBA_STATUS_LEN 32
2375
2376static int resp_get_lba_status(struct scsi_cmnd * scmd,
2377 struct sdebug_dev_info * devip)
2378{
2379 unsigned long long lba;
2380 unsigned int alloc_len, mapped, num;
2381 unsigned char arr[SDEBUG_GET_LBA_STATUS_LEN];
2382 int ret;
2383
2384 ret = check_readiness(scmd, UAS_ONLY, devip);
2385 if (ret)
2386 return ret;
2387
2388 lba = get_unaligned_be64(&scmd->cmnd[2]);
2389 alloc_len = get_unaligned_be32(&scmd->cmnd[10]);
2390
2391 if (alloc_len < 24)
2392 return 0;
2393
2394 ret = check_device_access_params(scmd, lba, 1);
2395 if (ret)
2396 return ret;
2397
2398 mapped = map_state(lba, &num);
2399
2400 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
2401 put_unaligned_be32(20, &arr[0]);
2402 put_unaligned_be64(lba, &arr[8]);
2403 put_unaligned_be32(num, &arr[16]);
2404 arr[20] = !mapped;
2405
2406 return fill_from_dev_buffer(scmd, arr, SDEBUG_GET_LBA_STATUS_LEN);
2407}
2408
2409#define SDEBUG_RLUN_ARR_SZ 256
2410
2411static int resp_report_luns(struct scsi_cmnd * scp,
2412 struct sdebug_dev_info * devip)
2413{
2414 unsigned int alloc_len;
2415 int lun_cnt, i, upper, num, n;
2416 u64 wlun, lun;
2417 unsigned char *cmd = (unsigned char *)scp->cmnd;
2418 int select_report = (int)cmd[2];
2419 struct scsi_lun *one_lun;
2420 unsigned char arr[SDEBUG_RLUN_ARR_SZ];
2421 unsigned char * max_addr;
2422
2423 alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
2424 if ((alloc_len < 4) || (select_report > 2)) {
2425 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
2426 0);
2427 return check_condition_result;
2428 }
2429
2430 memset(arr, 0, SDEBUG_RLUN_ARR_SZ);
2431 lun_cnt = scsi_debug_max_luns;
2432 if (1 == select_report)
2433 lun_cnt = 0;
2434 else if (scsi_debug_no_lun_0 && (lun_cnt > 0))
2435 --lun_cnt;
2436 wlun = (select_report > 0) ? 1 : 0;
2437 num = lun_cnt + wlun;
2438 arr[2] = ((sizeof(struct scsi_lun) * num) >> 8) & 0xff;
2439 arr[3] = (sizeof(struct scsi_lun) * num) & 0xff;
2440 n = min((int)((SDEBUG_RLUN_ARR_SZ - 8) /
2441 sizeof(struct scsi_lun)), num);
2442 if (n < num) {
2443 wlun = 0;
2444 lun_cnt = n;
2445 }
2446 one_lun = (struct scsi_lun *) &arr[8];
2447 max_addr = arr + SDEBUG_RLUN_ARR_SZ;
2448 for (i = 0, lun = (scsi_debug_no_lun_0 ? 1 : 0);
2449 ((i < lun_cnt) && ((unsigned char *)(one_lun + i) < max_addr));
2450 i++, lun++) {
2451 upper = (lun >> 8) & 0x3f;
2452 if (upper)
2453 one_lun[i].scsi_lun[0] =
2454 (upper | (SAM2_LUN_ADDRESS_METHOD << 6));
2455 one_lun[i].scsi_lun[1] = lun & 0xff;
2456 }
2457 if (wlun) {
2458 one_lun[i].scsi_lun[0] = (SAM2_WLUN_REPORT_LUNS >> 8) & 0xff;
2459 one_lun[i].scsi_lun[1] = SAM2_WLUN_REPORT_LUNS & 0xff;
2460 i++;
2461 }
2462 alloc_len = (unsigned char *)(one_lun + i) - arr;
2463 return fill_from_dev_buffer(scp, arr,
2464 min((int)alloc_len, SDEBUG_RLUN_ARR_SZ));
2465}
2466
2467static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
2468 unsigned int num, struct sdebug_dev_info *devip)
2469{
2470 int j;
2471 unsigned char *kaddr, *buf;
2472 unsigned int offset;
2473 struct scsi_data_buffer *sdb = scsi_in(scp);
2474 struct sg_mapping_iter miter;
2475
2476
2477 buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
2478 if (!buf) {
2479 mk_sense_buffer(scp, NOT_READY,
2480 LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
2481 return check_condition_result;
2482 }
2483
2484 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
2485
2486 offset = 0;
2487 sg_miter_start(&miter, sdb->table.sgl, sdb->table.nents,
2488 SG_MITER_ATOMIC | SG_MITER_TO_SG);
2489
2490 while (sg_miter_next(&miter)) {
2491 kaddr = miter.addr;
2492 for (j = 0; j < miter.length; j++)
2493 *(kaddr + j) ^= *(buf + offset + j);
2494
2495 offset += miter.length;
2496 }
2497 sg_miter_stop(&miter);
2498 kfree(buf);
2499
2500 return 0;
2501}
2502
2503
2504static void sdebug_q_cmd_complete(unsigned long indx)
2505{
2506 int qa_indx;
2507 int retiring = 0;
2508 unsigned long iflags;
2509 struct sdebug_queued_cmd *sqcp;
2510 struct scsi_cmnd *scp;
2511 struct sdebug_dev_info *devip;
2512
2513 atomic_inc(&sdebug_completions);
2514 qa_indx = indx;
2515 if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
2516 pr_err("%s: wild qa_indx=%d\n", __func__, qa_indx);
2517 return;
2518 }
2519 spin_lock_irqsave(&queued_arr_lock, iflags);
2520 sqcp = &queued_arr[qa_indx];
2521 scp = sqcp->a_cmnd;
2522 if (NULL == scp) {
2523 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2524 pr_err("%s: scp is NULL\n", __func__);
2525 return;
2526 }
2527 devip = (struct sdebug_dev_info *)scp->device->hostdata;
2528 if (devip)
2529 atomic_dec(&devip->num_in_q);
2530 else
2531 pr_err("%s: devip=NULL\n", __func__);
2532 if (atomic_read(&retired_max_queue) > 0)
2533 retiring = 1;
2534
2535 sqcp->a_cmnd = NULL;
2536 if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) {
2537 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2538 pr_err("%s: Unexpected completion\n", __func__);
2539 return;
2540 }
2541
2542 if (unlikely(retiring)) {
2543 int k, retval;
2544
2545 retval = atomic_read(&retired_max_queue);
2546 if (qa_indx >= retval) {
2547 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2548 pr_err("%s: index %d too large\n", __func__, retval);
2549 return;
2550 }
2551 k = find_last_bit(queued_in_use_bm, retval);
2552 if ((k < scsi_debug_max_queue) || (k == retval))
2553 atomic_set(&retired_max_queue, 0);
2554 else
2555 atomic_set(&retired_max_queue, k + 1);
2556 }
2557 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2558 scp->scsi_done(scp);
2559}
2560
2561
2562static enum hrtimer_restart
2563sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
2564{
2565 int qa_indx;
2566 int retiring = 0;
2567 unsigned long iflags;
2568 struct sdebug_hrtimer *sd_hrtp = (struct sdebug_hrtimer *)timer;
2569 struct sdebug_queued_cmd *sqcp;
2570 struct scsi_cmnd *scp;
2571 struct sdebug_dev_info *devip;
2572
2573 atomic_inc(&sdebug_completions);
2574 qa_indx = sd_hrtp->qa_indx;
2575 if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
2576 pr_err("%s: wild qa_indx=%d\n", __func__, qa_indx);
2577 goto the_end;
2578 }
2579 spin_lock_irqsave(&queued_arr_lock, iflags);
2580 sqcp = &queued_arr[qa_indx];
2581 scp = sqcp->a_cmnd;
2582 if (NULL == scp) {
2583 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2584 pr_err("%s: scp is NULL\n", __func__);
2585 goto the_end;
2586 }
2587 devip = (struct sdebug_dev_info *)scp->device->hostdata;
2588 if (devip)
2589 atomic_dec(&devip->num_in_q);
2590 else
2591 pr_err("%s: devip=NULL\n", __func__);
2592 if (atomic_read(&retired_max_queue) > 0)
2593 retiring = 1;
2594
2595 sqcp->a_cmnd = NULL;
2596 if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) {
2597 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2598 pr_err("%s: Unexpected completion\n", __func__);
2599 goto the_end;
2600 }
2601
2602 if (unlikely(retiring)) {
2603 int k, retval;
2604
2605 retval = atomic_read(&retired_max_queue);
2606 if (qa_indx >= retval) {
2607 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2608 pr_err("%s: index %d too large\n", __func__, retval);
2609 goto the_end;
2610 }
2611 k = find_last_bit(queued_in_use_bm, retval);
2612 if ((k < scsi_debug_max_queue) || (k == retval))
2613 atomic_set(&retired_max_queue, 0);
2614 else
2615 atomic_set(&retired_max_queue, k + 1);
2616 }
2617 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2618 scp->scsi_done(scp);
2619the_end:
2620 return HRTIMER_NORESTART;
2621}
2622
2623static struct sdebug_dev_info *
2624sdebug_device_create(struct sdebug_host_info *sdbg_host, gfp_t flags)
2625{
2626 struct sdebug_dev_info *devip;
2627
2628 devip = kzalloc(sizeof(*devip), flags);
2629 if (devip) {
2630 devip->sdbg_host = sdbg_host;
2631 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
2632 }
2633 return devip;
2634}
2635
2636static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
2637{
2638 struct sdebug_host_info * sdbg_host;
2639 struct sdebug_dev_info * open_devip = NULL;
2640 struct sdebug_dev_info * devip =
2641 (struct sdebug_dev_info *)sdev->hostdata;
2642
2643 if (devip)
2644 return devip;
2645 sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
2646 if (!sdbg_host) {
2647 pr_err("%s: Host info NULL\n", __func__);
2648 return NULL;
2649 }
2650 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
2651 if ((devip->used) && (devip->channel == sdev->channel) &&
2652 (devip->target == sdev->id) &&
2653 (devip->lun == sdev->lun))
2654 return devip;
2655 else {
2656 if ((!devip->used) && (!open_devip))
2657 open_devip = devip;
2658 }
2659 }
2660 if (!open_devip) {
2661 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
2662 if (!open_devip) {
2663 printk(KERN_ERR "%s: out of memory at line %d\n",
2664 __func__, __LINE__);
2665 return NULL;
2666 }
2667 }
2668
2669 open_devip->channel = sdev->channel;
2670 open_devip->target = sdev->id;
2671 open_devip->lun = sdev->lun;
2672 open_devip->sdbg_host = sdbg_host;
2673 atomic_set(&open_devip->num_in_q, 0);
2674 set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
2675 open_devip->used = 1;
2676 if (sdev->lun == SAM2_WLUN_REPORT_LUNS)
2677 open_devip->wlun = SAM2_WLUN_REPORT_LUNS & 0xff;
2678
2679 return open_devip;
2680}
2681
2682static int scsi_debug_slave_alloc(struct scsi_device *sdp)
2683{
2684 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2685 printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %llu>\n",
2686 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2687 queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
2688 return 0;
2689}
2690
2691static int scsi_debug_slave_configure(struct scsi_device *sdp)
2692{
2693 struct sdebug_dev_info *devip;
2694
2695 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2696 printk(KERN_INFO "scsi_debug: slave_configure <%u %u %u %llu>\n",
2697 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2698 if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN)
2699 sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN;
2700 devip = devInfoReg(sdp);
2701 if (NULL == devip)
2702 return 1;
2703 sdp->hostdata = devip;
2704 sdp->tagged_supported = 1;
2705 if (sdp->host->cmd_per_lun)
2706 scsi_adjust_queue_depth(sdp, DEF_TAGGED_QUEUING,
2707 DEF_CMD_PER_LUN);
2708 blk_queue_max_segment_size(sdp->request_queue, -1U);
2709 if (scsi_debug_no_uld)
2710 sdp->no_uld_attach = 1;
2711 return 0;
2712}
2713
2714static void scsi_debug_slave_destroy(struct scsi_device *sdp)
2715{
2716 struct sdebug_dev_info *devip =
2717 (struct sdebug_dev_info *)sdp->hostdata;
2718
2719 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2720 printk(KERN_INFO "scsi_debug: slave_destroy <%u %u %u %llu>\n",
2721 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2722 if (devip) {
2723
2724 devip->used = 0;
2725 sdp->hostdata = NULL;
2726 }
2727}
2728
2729
2730static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
2731{
2732 unsigned long iflags;
2733 int k, qmax, r_qmax;
2734 struct sdebug_queued_cmd *sqcp;
2735 struct sdebug_dev_info *devip;
2736
2737 spin_lock_irqsave(&queued_arr_lock, iflags);
2738 qmax = scsi_debug_max_queue;
2739 r_qmax = atomic_read(&retired_max_queue);
2740 if (r_qmax > qmax)
2741 qmax = r_qmax;
2742 for (k = 0; k < qmax; ++k) {
2743 if (test_bit(k, queued_in_use_bm)) {
2744 sqcp = &queued_arr[k];
2745 if (cmnd == sqcp->a_cmnd) {
2746 if (scsi_debug_ndelay > 0) {
2747 if (sqcp->sd_hrtp)
2748 hrtimer_cancel(
2749 &sqcp->sd_hrtp->hrt);
2750 } else if (scsi_debug_delay > 0) {
2751 if (sqcp->cmnd_timerp)
2752 del_timer_sync(
2753 sqcp->cmnd_timerp);
2754 } else if (scsi_debug_delay < 0) {
2755 if (sqcp->tletp)
2756 tasklet_kill(sqcp->tletp);
2757 }
2758 __clear_bit(k, queued_in_use_bm);
2759 devip = (struct sdebug_dev_info *)
2760 cmnd->device->hostdata;
2761 if (devip)
2762 atomic_dec(&devip->num_in_q);
2763 sqcp->a_cmnd = NULL;
2764 break;
2765 }
2766 }
2767 }
2768 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2769 return (k < qmax) ? 1 : 0;
2770}
2771
2772
2773static void stop_all_queued(void)
2774{
2775 unsigned long iflags;
2776 int k;
2777 struct sdebug_queued_cmd *sqcp;
2778 struct sdebug_dev_info *devip;
2779
2780 spin_lock_irqsave(&queued_arr_lock, iflags);
2781 for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
2782 if (test_bit(k, queued_in_use_bm)) {
2783 sqcp = &queued_arr[k];
2784 if (sqcp->a_cmnd) {
2785 if (scsi_debug_ndelay > 0) {
2786 if (sqcp->sd_hrtp)
2787 hrtimer_cancel(
2788 &sqcp->sd_hrtp->hrt);
2789 } else if (scsi_debug_delay > 0) {
2790 if (sqcp->cmnd_timerp)
2791 del_timer_sync(
2792 sqcp->cmnd_timerp);
2793 } else if (scsi_debug_delay < 0) {
2794 if (sqcp->tletp)
2795 tasklet_kill(sqcp->tletp);
2796 }
2797 __clear_bit(k, queued_in_use_bm);
2798 devip = (struct sdebug_dev_info *)
2799 sqcp->a_cmnd->device->hostdata;
2800 if (devip)
2801 atomic_dec(&devip->num_in_q);
2802 sqcp->a_cmnd = NULL;
2803 }
2804 }
2805 }
2806 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2807}
2808
2809
2810static void free_all_queued(void)
2811{
2812 unsigned long iflags;
2813 int k;
2814 struct sdebug_queued_cmd *sqcp;
2815
2816 spin_lock_irqsave(&queued_arr_lock, iflags);
2817 for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
2818 sqcp = &queued_arr[k];
2819 kfree(sqcp->cmnd_timerp);
2820 sqcp->cmnd_timerp = NULL;
2821 kfree(sqcp->tletp);
2822 sqcp->tletp = NULL;
2823 kfree(sqcp->sd_hrtp);
2824 sqcp->sd_hrtp = NULL;
2825 }
2826 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2827}
2828
2829static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
2830{
2831 ++num_aborts;
2832 if (SCpnt) {
2833 if (SCpnt->device &&
2834 (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts))
2835 sdev_printk(KERN_INFO, SCpnt->device, "%s\n",
2836 __func__);
2837 stop_queued_cmnd(SCpnt);
2838 }
2839 return SUCCESS;
2840}
2841
2842static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
2843{
2844 struct sdebug_dev_info * devip;
2845
2846 ++num_dev_resets;
2847 if (SCpnt && SCpnt->device) {
2848 struct scsi_device *sdp = SCpnt->device;
2849
2850 if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
2851 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
2852 devip = devInfoReg(sdp);
2853 if (devip)
2854 set_bit(SDEBUG_UA_POR, devip->uas_bm);
2855 }
2856 return SUCCESS;
2857}
2858
2859static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
2860{
2861 struct sdebug_host_info *sdbg_host;
2862 struct sdebug_dev_info *devip;
2863 struct scsi_device *sdp;
2864 struct Scsi_Host *hp;
2865 int k = 0;
2866
2867 ++num_target_resets;
2868 if (!SCpnt)
2869 goto lie;
2870 sdp = SCpnt->device;
2871 if (!sdp)
2872 goto lie;
2873 if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
2874 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
2875 hp = sdp->host;
2876 if (!hp)
2877 goto lie;
2878 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
2879 if (sdbg_host) {
2880 list_for_each_entry(devip,
2881 &sdbg_host->dev_info_list,
2882 dev_list)
2883 if (devip->target == sdp->id) {
2884 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
2885 ++k;
2886 }
2887 }
2888 if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
2889 sdev_printk(KERN_INFO, sdp,
2890 "%s: %d device(s) found in target\n", __func__, k);
2891lie:
2892 return SUCCESS;
2893}
2894
2895static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
2896{
2897 struct sdebug_host_info *sdbg_host;
2898 struct sdebug_dev_info *devip;
2899 struct scsi_device * sdp;
2900 struct Scsi_Host * hp;
2901 int k = 0;
2902
2903 ++num_bus_resets;
2904 if (!(SCpnt && SCpnt->device))
2905 goto lie;
2906 sdp = SCpnt->device;
2907 if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
2908 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
2909 hp = sdp->host;
2910 if (hp) {
2911 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
2912 if (sdbg_host) {
2913 list_for_each_entry(devip,
2914 &sdbg_host->dev_info_list,
2915 dev_list) {
2916 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
2917 ++k;
2918 }
2919 }
2920 }
2921 if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
2922 sdev_printk(KERN_INFO, sdp,
2923 "%s: %d device(s) found in host\n", __func__, k);
2924lie:
2925 return SUCCESS;
2926}
2927
2928static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
2929{
2930 struct sdebug_host_info * sdbg_host;
2931 struct sdebug_dev_info *devip;
2932 int k = 0;
2933
2934 ++num_host_resets;
2935 if ((SCpnt->device) && (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts))
2936 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
2937 spin_lock(&sdebug_host_list_lock);
2938 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
2939 list_for_each_entry(devip, &sdbg_host->dev_info_list,
2940 dev_list) {
2941 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
2942 ++k;
2943 }
2944 }
2945 spin_unlock(&sdebug_host_list_lock);
2946 stop_all_queued();
2947 if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
2948 sdev_printk(KERN_INFO, SCpnt->device,
2949 "%s: %d device(s) found\n", __func__, k);
2950 return SUCCESS;
2951}
2952
2953static void __init sdebug_build_parts(unsigned char *ramp,
2954 unsigned long store_size)
2955{
2956 struct partition * pp;
2957 int starts[SDEBUG_MAX_PARTS + 2];
2958 int sectors_per_part, num_sectors, k;
2959 int heads_by_sects, start_sec, end_sec;
2960
2961
2962 if ((scsi_debug_num_parts < 1) || (store_size < 1048576))
2963 return;
2964 if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) {
2965 scsi_debug_num_parts = SDEBUG_MAX_PARTS;
2966 pr_warn("%s: reducing partitions to %d\n", __func__,
2967 SDEBUG_MAX_PARTS);
2968 }
2969 num_sectors = (int)sdebug_store_sectors;
2970 sectors_per_part = (num_sectors - sdebug_sectors_per)
2971 / scsi_debug_num_parts;
2972 heads_by_sects = sdebug_heads * sdebug_sectors_per;
2973 starts[0] = sdebug_sectors_per;
2974 for (k = 1; k < scsi_debug_num_parts; ++k)
2975 starts[k] = ((k * sectors_per_part) / heads_by_sects)
2976 * heads_by_sects;
2977 starts[scsi_debug_num_parts] = num_sectors;
2978 starts[scsi_debug_num_parts + 1] = 0;
2979
2980 ramp[510] = 0x55;
2981 ramp[511] = 0xAA;
2982 pp = (struct partition *)(ramp + 0x1be);
2983 for (k = 0; starts[k + 1]; ++k, ++pp) {
2984 start_sec = starts[k];
2985 end_sec = starts[k + 1] - 1;
2986 pp->boot_ind = 0;
2987
2988 pp->cyl = start_sec / heads_by_sects;
2989 pp->head = (start_sec - (pp->cyl * heads_by_sects))
2990 / sdebug_sectors_per;
2991 pp->sector = (start_sec % sdebug_sectors_per) + 1;
2992
2993 pp->end_cyl = end_sec / heads_by_sects;
2994 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
2995 / sdebug_sectors_per;
2996 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
2997
2998 pp->start_sect = cpu_to_le32(start_sec);
2999 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
3000 pp->sys_ind = 0x83;
3001 }
3002}
3003
3004static int
3005schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
3006 int scsi_result, int delta_jiff)
3007{
3008 unsigned long iflags;
3009 int k, num_in_q, tsf, qdepth, inject;
3010 struct sdebug_queued_cmd *sqcp = NULL;
3011 struct scsi_device *sdp = cmnd->device;
3012
3013 if (NULL == cmnd || NULL == devip) {
3014 pr_warn("%s: called with NULL cmnd or devip pointer\n",
3015 __func__);
3016
3017 return SCSI_MLQUEUE_HOST_BUSY;
3018 }
3019 if ((scsi_result) && (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
3020 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
3021 __func__, scsi_result);
3022 if (delta_jiff == 0) {
3023
3024 cmnd->result = scsi_result;
3025 cmnd->scsi_done(cmnd);
3026 return 0;
3027 }
3028
3029
3030 spin_lock_irqsave(&queued_arr_lock, iflags);
3031 num_in_q = atomic_read(&devip->num_in_q);
3032 qdepth = cmnd->device->queue_depth;
3033 k = find_first_zero_bit(queued_in_use_bm, scsi_debug_max_queue);
3034 tsf = 0;
3035 inject = 0;
3036 if ((qdepth > 0) && (num_in_q >= qdepth))
3037 tsf = 1;
3038 else if ((scsi_debug_every_nth != 0) &&
3039 (SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts)) {
3040 if ((num_in_q == (qdepth - 1)) &&
3041 (atomic_inc_return(&sdebug_a_tsf) >=
3042 abs(scsi_debug_every_nth))) {
3043 atomic_set(&sdebug_a_tsf, 0);
3044 inject = 1;
3045 tsf = 1;
3046 }
3047 }
3048
3049
3050
3051 if (tsf)
3052 scsi_result = device_qfull_result;
3053 if (k >= scsi_debug_max_queue) {
3054 if (SCSI_DEBUG_OPT_ALL_TSF & scsi_debug_opts)
3055 tsf = 1;
3056 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3057 if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts)
3058 sdev_printk(KERN_INFO, sdp,
3059 "%s: num_in_q=%d, bypass q, %s%s\n",
3060 __func__, num_in_q,
3061 (inject ? "<inject> " : ""),
3062 (tsf ? "status: TASK SET FULL" :
3063 "report: host busy"));
3064 if (tsf) {
3065
3066 cmnd->result = scsi_result;
3067 cmnd->scsi_done(cmnd);
3068
3069 return 0;
3070 } else
3071 return SCSI_MLQUEUE_HOST_BUSY;
3072 }
3073 __set_bit(k, queued_in_use_bm);
3074 atomic_inc(&devip->num_in_q);
3075 sqcp = &queued_arr[k];
3076 sqcp->a_cmnd = cmnd;
3077 cmnd->result = scsi_result;
3078 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3079 if (delta_jiff > 0) {
3080 if (NULL == sqcp->cmnd_timerp) {
3081 sqcp->cmnd_timerp = kmalloc(sizeof(struct timer_list),
3082 GFP_ATOMIC);
3083 if (NULL == sqcp->cmnd_timerp)
3084 return SCSI_MLQUEUE_HOST_BUSY;
3085 init_timer(sqcp->cmnd_timerp);
3086 }
3087 sqcp->cmnd_timerp->function = sdebug_q_cmd_complete;
3088 sqcp->cmnd_timerp->data = k;
3089 sqcp->cmnd_timerp->expires = get_jiffies_64() + delta_jiff;
3090 add_timer(sqcp->cmnd_timerp);
3091 } else if (scsi_debug_ndelay > 0) {
3092 ktime_t kt = ktime_set(0, scsi_debug_ndelay);
3093 struct sdebug_hrtimer *sd_hp = sqcp->sd_hrtp;
3094
3095 if (NULL == sd_hp) {
3096 sd_hp = kmalloc(sizeof(*sd_hp), GFP_ATOMIC);
3097 if (NULL == sd_hp)
3098 return SCSI_MLQUEUE_HOST_BUSY;
3099 sqcp->sd_hrtp = sd_hp;
3100 hrtimer_init(&sd_hp->hrt, CLOCK_MONOTONIC,
3101 HRTIMER_MODE_REL);
3102 sd_hp->hrt.function = sdebug_q_cmd_hrt_complete;
3103 sd_hp->qa_indx = k;
3104 }
3105 hrtimer_start(&sd_hp->hrt, kt, HRTIMER_MODE_REL);
3106 } else {
3107 if (NULL == sqcp->tletp) {
3108 sqcp->tletp = kmalloc(sizeof(*sqcp->tletp),
3109 GFP_ATOMIC);
3110 if (NULL == sqcp->tletp)
3111 return SCSI_MLQUEUE_HOST_BUSY;
3112 tasklet_init(sqcp->tletp,
3113 sdebug_q_cmd_complete, k);
3114 }
3115 if (-1 == delta_jiff)
3116 tasklet_hi_schedule(sqcp->tletp);
3117 else
3118 tasklet_schedule(sqcp->tletp);
3119 }
3120 if (tsf && (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts))
3121 sdev_printk(KERN_INFO, sdp,
3122 "%s: num_in_q=%d +1, %s%s\n", __func__,
3123 num_in_q, (inject ? "<inject> " : ""),
3124 "status: TASK SET FULL");
3125 return 0;
3126}
3127
3128
3129
3130
3131
3132
3133
3134module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR);
3135module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
3136module_param_named(clustering, scsi_debug_clustering, bool, S_IRUGO | S_IWUSR);
3137module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR);
3138module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO);
3139module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
3140module_param_named(dix, scsi_debug_dix, int, S_IRUGO);
3141module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
3142module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
3143module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
3144module_param_named(guard, scsi_debug_guard, uint, S_IRUGO);
3145module_param_named(host_lock, scsi_debug_host_lock, bool, S_IRUGO | S_IWUSR);
3146module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO);
3147module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO);
3148module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO);
3149module_param_named(lbprz, scsi_debug_lbprz, int, S_IRUGO);
3150module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
3151module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
3152module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR);
3153module_param_named(ndelay, scsi_debug_ndelay, int, S_IRUGO | S_IWUSR);
3154module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR);
3155module_param_named(no_uld, scsi_debug_no_uld, int, S_IRUGO);
3156module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO);
3157module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR);
3158module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO);
3159module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR);
3160module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
3161module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR);
3162module_param_named(removable, scsi_debug_removable, bool, S_IRUGO | S_IWUSR);
3163module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
3164module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO);
3165module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO);
3166module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO);
3167module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO);
3168module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO);
3169module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
3170module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int,
3171 S_IRUGO | S_IWUSR);
3172module_param_named(write_same_length, scsi_debug_write_same_length, int,
3173 S_IRUGO | S_IWUSR);
3174
3175MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
3176MODULE_DESCRIPTION("SCSI debug adapter driver");
3177MODULE_LICENSE("GPL");
3178MODULE_VERSION(SCSI_DEBUG_VERSION);
3179
3180MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
3181MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
3182MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
3183MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
3184MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs(def=8)");
3185MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
3186MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
3187MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
3188MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
3189MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
3190MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
3191MODULE_PARM_DESC(host_lock, "use host_lock around all commands (def=0)");
3192MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
3193MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
3194MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
3195MODULE_PARM_DESC(lbprz, "unmapped blocks return 0 on read (def=1)");
3196MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
3197MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
3198MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
3199MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
3200MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
3201MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
3202MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
3203MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
3204MODULE_PARM_DESC(opt_blks, "optimal transfer length in block (def=64)");
3205MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
3206MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
3207MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
3208MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
3209MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])");
3210MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
3211MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
3212MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
3213MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
3214MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
3215MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)");
3216MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
3217MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
3218
3219static char sdebug_info[256];
3220
3221static const char * scsi_debug_info(struct Scsi_Host * shp)
3222{
3223 sprintf(sdebug_info, "scsi_debug, version %s [%s], "
3224 "dev_size_mb=%d, opts=0x%x", SCSI_DEBUG_VERSION,
3225 scsi_debug_version_date, scsi_debug_dev_size_mb,
3226 scsi_debug_opts);
3227 return sdebug_info;
3228}
3229
3230
3231static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer, int length)
3232{
3233 char arr[16];
3234 int opts;
3235 int minLen = length > 15 ? 15 : length;
3236
3237 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
3238 return -EACCES;
3239 memcpy(arr, buffer, minLen);
3240 arr[minLen] = '\0';
3241 if (1 != sscanf(arr, "%d", &opts))
3242 return -EINVAL;
3243 scsi_debug_opts = opts;
3244 if (scsi_debug_every_nth != 0)
3245 atomic_set(&sdebug_cmnd_count, 0);
3246 return length;
3247}
3248
3249
3250
3251
3252static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
3253{
3254 int f, l;
3255 char b[32];
3256
3257 if (scsi_debug_every_nth > 0)
3258 snprintf(b, sizeof(b), " (curr:%d)",
3259 ((SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts) ?
3260 atomic_read(&sdebug_a_tsf) :
3261 atomic_read(&sdebug_cmnd_count)));
3262 else
3263 b[0] = '\0';
3264
3265 seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n"
3266 "num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, "
3267 "every_nth=%d%s\n"
3268 "delay=%d, ndelay=%d, max_luns=%d, q_completions=%d\n"
3269 "sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n"
3270 "command aborts=%d; RESETs: device=%d, target=%d, bus=%d, "
3271 "host=%d\ndix_reads=%d dix_writes=%d dif_errors=%d "
3272 "usec_in_jiffy=%lu\n",
3273 SCSI_DEBUG_VERSION, scsi_debug_version_date,
3274 scsi_debug_num_tgts, scsi_debug_dev_size_mb, scsi_debug_opts,
3275 scsi_debug_every_nth, b, scsi_debug_delay, scsi_debug_ndelay,
3276 scsi_debug_max_luns, atomic_read(&sdebug_completions),
3277 scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads,
3278 sdebug_sectors_per, num_aborts, num_dev_resets,
3279 num_target_resets, num_bus_resets, num_host_resets,
3280 dix_reads, dix_writes, dif_errors, TICK_NSEC / 1000);
3281
3282 f = find_first_bit(queued_in_use_bm, scsi_debug_max_queue);
3283 if (f != scsi_debug_max_queue) {
3284 l = find_last_bit(queued_in_use_bm, scsi_debug_max_queue);
3285 seq_printf(m, " %s BUSY: first,last bits set: %d,%d\n",
3286 "queued_in_use_bm", f, l);
3287 }
3288 return 0;
3289}
3290
3291static ssize_t delay_show(struct device_driver *ddp, char *buf)
3292{
3293 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_delay);
3294}
3295
3296static ssize_t delay_store(struct device_driver *ddp, const char *buf,
3297 size_t count)
3298{
3299 int delay, res;
3300
3301 if ((count > 0) && (1 == sscanf(buf, "%d", &delay))) {
3302 res = count;
3303 if (scsi_debug_delay != delay) {
3304 unsigned long iflags;
3305 int k;
3306
3307 spin_lock_irqsave(&queued_arr_lock, iflags);
3308 k = find_first_bit(queued_in_use_bm,
3309 scsi_debug_max_queue);
3310 if (k != scsi_debug_max_queue)
3311 res = -EBUSY;
3312 else {
3313 scsi_debug_delay = delay;
3314 scsi_debug_ndelay = 0;
3315 }
3316 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3317 }
3318 return res;
3319 }
3320 return -EINVAL;
3321}
3322static DRIVER_ATTR_RW(delay);
3323
3324static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
3325{
3326 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ndelay);
3327}
3328
3329
3330static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
3331 size_t count)
3332{
3333 unsigned long iflags;
3334 int ndelay, res, k;
3335
3336 if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
3337 (ndelay >= 0) && (ndelay < 1000000000)) {
3338 res = count;
3339 if (scsi_debug_ndelay != ndelay) {
3340 spin_lock_irqsave(&queued_arr_lock, iflags);
3341 k = find_first_bit(queued_in_use_bm,
3342 scsi_debug_max_queue);
3343 if (k != scsi_debug_max_queue)
3344 res = -EBUSY;
3345 else {
3346 scsi_debug_ndelay = ndelay;
3347 scsi_debug_delay = ndelay ? DELAY_OVERRIDDEN
3348 : DEF_DELAY;
3349 }
3350 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3351 }
3352 return res;
3353 }
3354 return -EINVAL;
3355}
3356static DRIVER_ATTR_RW(ndelay);
3357
3358static ssize_t opts_show(struct device_driver *ddp, char *buf)
3359{
3360 return scnprintf(buf, PAGE_SIZE, "0x%x\n", scsi_debug_opts);
3361}
3362
3363static ssize_t opts_store(struct device_driver *ddp, const char *buf,
3364 size_t count)
3365{
3366 int opts;
3367 char work[20];
3368
3369 if (1 == sscanf(buf, "%10s", work)) {
3370 if (0 == strnicmp(work,"0x", 2)) {
3371 if (1 == sscanf(&work[2], "%x", &opts))
3372 goto opts_done;
3373 } else {
3374 if (1 == sscanf(work, "%d", &opts))
3375 goto opts_done;
3376 }
3377 }
3378 return -EINVAL;
3379opts_done:
3380 scsi_debug_opts = opts;
3381 atomic_set(&sdebug_cmnd_count, 0);
3382 atomic_set(&sdebug_a_tsf, 0);
3383 return count;
3384}
3385static DRIVER_ATTR_RW(opts);
3386
3387static ssize_t ptype_show(struct device_driver *ddp, char *buf)
3388{
3389 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ptype);
3390}
3391static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
3392 size_t count)
3393{
3394 int n;
3395
3396 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3397 scsi_debug_ptype = n;
3398 return count;
3399 }
3400 return -EINVAL;
3401}
3402static DRIVER_ATTR_RW(ptype);
3403
3404static ssize_t dsense_show(struct device_driver *ddp, char *buf)
3405{
3406 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dsense);
3407}
3408static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
3409 size_t count)
3410{
3411 int n;
3412
3413 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3414 scsi_debug_dsense = n;
3415 return count;
3416 }
3417 return -EINVAL;
3418}
3419static DRIVER_ATTR_RW(dsense);
3420
3421static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
3422{
3423 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_fake_rw);
3424}
3425static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
3426 size_t count)
3427{
3428 int n;
3429
3430 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3431 n = (n > 0);
3432 scsi_debug_fake_rw = (scsi_debug_fake_rw > 0);
3433 if (scsi_debug_fake_rw != n) {
3434 if ((0 == n) && (NULL == fake_storep)) {
3435 unsigned long sz =
3436 (unsigned long)scsi_debug_dev_size_mb *
3437 1048576;
3438
3439 fake_storep = vmalloc(sz);
3440 if (NULL == fake_storep) {
3441 pr_err("%s: out of memory, 9\n",
3442 __func__);
3443 return -ENOMEM;
3444 }
3445 memset(fake_storep, 0, sz);
3446 }
3447 scsi_debug_fake_rw = n;
3448 }
3449 return count;
3450 }
3451 return -EINVAL;
3452}
3453static DRIVER_ATTR_RW(fake_rw);
3454
3455static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
3456{
3457 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_lun_0);
3458}
3459static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
3460 size_t count)
3461{
3462 int n;
3463
3464 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3465 scsi_debug_no_lun_0 = n;
3466 return count;
3467 }
3468 return -EINVAL;
3469}
3470static DRIVER_ATTR_RW(no_lun_0);
3471
3472static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
3473{
3474 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_tgts);
3475}
3476static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
3477 size_t count)
3478{
3479 int n;
3480
3481 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3482 scsi_debug_num_tgts = n;
3483 sdebug_max_tgts_luns();
3484 return count;
3485 }
3486 return -EINVAL;
3487}
3488static DRIVER_ATTR_RW(num_tgts);
3489
3490static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
3491{
3492 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dev_size_mb);
3493}
3494static DRIVER_ATTR_RO(dev_size_mb);
3495
3496static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
3497{
3498 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_parts);
3499}
3500static DRIVER_ATTR_RO(num_parts);
3501
3502static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
3503{
3504 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_every_nth);
3505}
3506static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
3507 size_t count)
3508{
3509 int nth;
3510
3511 if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
3512 scsi_debug_every_nth = nth;
3513 atomic_set(&sdebug_cmnd_count, 0);
3514 return count;
3515 }
3516 return -EINVAL;
3517}
3518static DRIVER_ATTR_RW(every_nth);
3519
3520static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
3521{
3522 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_luns);
3523}
3524static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
3525 size_t count)
3526{
3527 int n;
3528
3529 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3530 scsi_debug_max_luns = n;
3531 sdebug_max_tgts_luns();
3532 return count;
3533 }
3534 return -EINVAL;
3535}
3536static DRIVER_ATTR_RW(max_luns);
3537
3538static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
3539{
3540 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_queue);
3541}
3542
3543
3544static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
3545 size_t count)
3546{
3547 unsigned long iflags;
3548 int n, k;
3549
3550 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
3551 (n <= SCSI_DEBUG_CANQUEUE)) {
3552 spin_lock_irqsave(&queued_arr_lock, iflags);
3553 k = find_last_bit(queued_in_use_bm, SCSI_DEBUG_CANQUEUE);
3554 scsi_debug_max_queue = n;
3555 if (SCSI_DEBUG_CANQUEUE == k)
3556 atomic_set(&retired_max_queue, 0);
3557 else if (k >= n)
3558 atomic_set(&retired_max_queue, k + 1);
3559 else
3560 atomic_set(&retired_max_queue, 0);
3561 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3562 return count;
3563 }
3564 return -EINVAL;
3565}
3566static DRIVER_ATTR_RW(max_queue);
3567
3568static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
3569{
3570 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_uld);
3571}
3572static DRIVER_ATTR_RO(no_uld);
3573
3574static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
3575{
3576 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_scsi_level);
3577}
3578static DRIVER_ATTR_RO(scsi_level);
3579
3580static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
3581{
3582 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_virtual_gb);
3583}
3584static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
3585 size_t count)
3586{
3587 int n;
3588
3589 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3590 scsi_debug_virtual_gb = n;
3591
3592 sdebug_capacity = get_sdebug_capacity();
3593
3594 return count;
3595 }
3596 return -EINVAL;
3597}
3598static DRIVER_ATTR_RW(virtual_gb);
3599
3600static ssize_t add_host_show(struct device_driver *ddp, char *buf)
3601{
3602 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_add_host);
3603}
3604
3605static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
3606 size_t count)
3607{
3608 int delta_hosts;
3609
3610 if (sscanf(buf, "%d", &delta_hosts) != 1)
3611 return -EINVAL;
3612 if (delta_hosts > 0) {
3613 do {
3614 sdebug_add_adapter();
3615 } while (--delta_hosts);
3616 } else if (delta_hosts < 0) {
3617 do {
3618 sdebug_remove_adapter();
3619 } while (++delta_hosts);
3620 }
3621 return count;
3622}
3623static DRIVER_ATTR_RW(add_host);
3624
3625static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
3626{
3627 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_vpd_use_hostno);
3628}
3629static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
3630 size_t count)
3631{
3632 int n;
3633
3634 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3635 scsi_debug_vpd_use_hostno = n;
3636 return count;
3637 }
3638 return -EINVAL;
3639}
3640static DRIVER_ATTR_RW(vpd_use_hostno);
3641
3642static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
3643{
3644 return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_sector_size);
3645}
3646static DRIVER_ATTR_RO(sector_size);
3647
3648static ssize_t dix_show(struct device_driver *ddp, char *buf)
3649{
3650 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dix);
3651}
3652static DRIVER_ATTR_RO(dix);
3653
3654static ssize_t dif_show(struct device_driver *ddp, char *buf)
3655{
3656 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dif);
3657}
3658static DRIVER_ATTR_RO(dif);
3659
3660static ssize_t guard_show(struct device_driver *ddp, char *buf)
3661{
3662 return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_guard);
3663}
3664static DRIVER_ATTR_RO(guard);
3665
3666static ssize_t ato_show(struct device_driver *ddp, char *buf)
3667{
3668 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ato);
3669}
3670static DRIVER_ATTR_RO(ato);
3671
3672static ssize_t map_show(struct device_driver *ddp, char *buf)
3673{
3674 ssize_t count;
3675
3676 if (!scsi_debug_lbp())
3677 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
3678 sdebug_store_sectors);
3679
3680 count = bitmap_scnlistprintf(buf, PAGE_SIZE, map_storep, map_size);
3681
3682 buf[count++] = '\n';
3683 buf[count++] = 0;
3684
3685 return count;
3686}
3687static DRIVER_ATTR_RO(map);
3688
3689static ssize_t removable_show(struct device_driver *ddp, char *buf)
3690{
3691 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_removable ? 1 : 0);
3692}
3693static ssize_t removable_store(struct device_driver *ddp, const char *buf,
3694 size_t count)
3695{
3696 int n;
3697
3698 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3699 scsi_debug_removable = (n > 0);
3700 return count;
3701 }
3702 return -EINVAL;
3703}
3704static DRIVER_ATTR_RW(removable);
3705
3706static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
3707{
3708 return scnprintf(buf, PAGE_SIZE, "%d\n", !!scsi_debug_host_lock);
3709}
3710
3711static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
3712 size_t count)
3713{
3714 int n, res;
3715
3716 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3717 bool new_host_lock = (n > 0);
3718
3719 res = count;
3720 if (new_host_lock != scsi_debug_host_lock) {
3721 unsigned long iflags;
3722 int k;
3723
3724 spin_lock_irqsave(&queued_arr_lock, iflags);
3725 k = find_first_bit(queued_in_use_bm,
3726 scsi_debug_max_queue);
3727 if (k != scsi_debug_max_queue)
3728 res = -EBUSY;
3729 else
3730 scsi_debug_host_lock = new_host_lock;
3731 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3732 }
3733 return res;
3734 }
3735 return -EINVAL;
3736}
3737static DRIVER_ATTR_RW(host_lock);
3738
3739
3740
3741
3742
3743
3744
3745
3746
3747static struct attribute *sdebug_drv_attrs[] = {
3748 &driver_attr_delay.attr,
3749 &driver_attr_opts.attr,
3750 &driver_attr_ptype.attr,
3751 &driver_attr_dsense.attr,
3752 &driver_attr_fake_rw.attr,
3753 &driver_attr_no_lun_0.attr,
3754 &driver_attr_num_tgts.attr,
3755 &driver_attr_dev_size_mb.attr,
3756 &driver_attr_num_parts.attr,
3757 &driver_attr_every_nth.attr,
3758 &driver_attr_max_luns.attr,
3759 &driver_attr_max_queue.attr,
3760 &driver_attr_no_uld.attr,
3761 &driver_attr_scsi_level.attr,
3762 &driver_attr_virtual_gb.attr,
3763 &driver_attr_add_host.attr,
3764 &driver_attr_vpd_use_hostno.attr,
3765 &driver_attr_sector_size.attr,
3766 &driver_attr_dix.attr,
3767 &driver_attr_dif.attr,
3768 &driver_attr_guard.attr,
3769 &driver_attr_ato.attr,
3770 &driver_attr_map.attr,
3771 &driver_attr_removable.attr,
3772 &driver_attr_host_lock.attr,
3773 &driver_attr_ndelay.attr,
3774 NULL,
3775};
3776ATTRIBUTE_GROUPS(sdebug_drv);
3777
3778static struct device *pseudo_primary;
3779
3780static int __init scsi_debug_init(void)
3781{
3782 unsigned long sz;
3783 int host_to_add;
3784 int k;
3785 int ret;
3786
3787 atomic_set(&sdebug_cmnd_count, 0);
3788 atomic_set(&sdebug_completions, 0);
3789 atomic_set(&retired_max_queue, 0);
3790
3791 if (scsi_debug_ndelay >= 1000000000) {
3792 pr_warn("%s: ndelay must be less than 1 second, ignored\n",
3793 __func__);
3794 scsi_debug_ndelay = 0;
3795 } else if (scsi_debug_ndelay > 0)
3796 scsi_debug_delay = DELAY_OVERRIDDEN;
3797
3798 switch (scsi_debug_sector_size) {
3799 case 512:
3800 case 1024:
3801 case 2048:
3802 case 4096:
3803 break;
3804 default:
3805 pr_err("%s: invalid sector_size %d\n", __func__,
3806 scsi_debug_sector_size);
3807 return -EINVAL;
3808 }
3809
3810 switch (scsi_debug_dif) {
3811
3812 case SD_DIF_TYPE0_PROTECTION:
3813 case SD_DIF_TYPE1_PROTECTION:
3814 case SD_DIF_TYPE2_PROTECTION:
3815 case SD_DIF_TYPE3_PROTECTION:
3816 break;
3817
3818 default:
3819 pr_err("%s: dif must be 0, 1, 2 or 3\n", __func__);
3820 return -EINVAL;
3821 }
3822
3823 if (scsi_debug_guard > 1) {
3824 pr_err("%s: guard must be 0 or 1\n", __func__);
3825 return -EINVAL;
3826 }
3827
3828 if (scsi_debug_ato > 1) {
3829 pr_err("%s: ato must be 0 or 1\n", __func__);
3830 return -EINVAL;
3831 }
3832
3833 if (scsi_debug_physblk_exp > 15) {
3834 pr_err("%s: invalid physblk_exp %u\n", __func__,
3835 scsi_debug_physblk_exp);
3836 return -EINVAL;
3837 }
3838
3839 if (scsi_debug_lowest_aligned > 0x3fff) {
3840 pr_err("%s: lowest_aligned too big: %u\n", __func__,
3841 scsi_debug_lowest_aligned);
3842 return -EINVAL;
3843 }
3844
3845 if (scsi_debug_dev_size_mb < 1)
3846 scsi_debug_dev_size_mb = 1;
3847 sz = (unsigned long)scsi_debug_dev_size_mb * 1048576;
3848 sdebug_store_sectors = sz / scsi_debug_sector_size;
3849 sdebug_capacity = get_sdebug_capacity();
3850
3851
3852 sdebug_heads = 8;
3853 sdebug_sectors_per = 32;
3854 if (scsi_debug_dev_size_mb >= 16)
3855 sdebug_heads = 32;
3856 else if (scsi_debug_dev_size_mb >= 256)
3857 sdebug_heads = 64;
3858 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
3859 (sdebug_sectors_per * sdebug_heads);
3860 if (sdebug_cylinders_per >= 1024) {
3861
3862 sdebug_heads = 255;
3863 sdebug_sectors_per = 63;
3864 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
3865 (sdebug_sectors_per * sdebug_heads);
3866 }
3867
3868 if (0 == scsi_debug_fake_rw) {
3869 fake_storep = vmalloc(sz);
3870 if (NULL == fake_storep) {
3871 pr_err("%s: out of memory, 1\n", __func__);
3872 return -ENOMEM;
3873 }
3874 memset(fake_storep, 0, sz);
3875 if (scsi_debug_num_parts > 0)
3876 sdebug_build_parts(fake_storep, sz);
3877 }
3878
3879 if (scsi_debug_dix) {
3880 int dif_size;
3881
3882 dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple);
3883 dif_storep = vmalloc(dif_size);
3884
3885 pr_err("%s: dif_storep %u bytes @ %p\n", __func__, dif_size,
3886 dif_storep);
3887
3888 if (dif_storep == NULL) {
3889 pr_err("%s: out of mem. (DIX)\n", __func__);
3890 ret = -ENOMEM;
3891 goto free_vm;
3892 }
3893
3894 memset(dif_storep, 0xff, dif_size);
3895 }
3896
3897
3898 if (scsi_debug_lbp()) {
3899 scsi_debug_unmap_max_blocks =
3900 clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU);
3901
3902 scsi_debug_unmap_max_desc =
3903 clamp(scsi_debug_unmap_max_desc, 0U, 256U);
3904
3905 scsi_debug_unmap_granularity =
3906 clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU);
3907
3908 if (scsi_debug_unmap_alignment &&
3909 scsi_debug_unmap_granularity <=
3910 scsi_debug_unmap_alignment) {
3911 pr_err("%s: ERR: unmap_granularity <= unmap_alignment\n",
3912 __func__);
3913 return -EINVAL;
3914 }
3915
3916 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
3917 map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
3918
3919 pr_info("%s: %lu provisioning blocks\n", __func__, map_size);
3920
3921 if (map_storep == NULL) {
3922 pr_err("%s: out of mem. (MAP)\n", __func__);
3923 ret = -ENOMEM;
3924 goto free_vm;
3925 }
3926
3927 bitmap_zero(map_storep, map_size);
3928
3929
3930 if (scsi_debug_num_parts)
3931 map_region(0, 2);
3932 }
3933
3934 pseudo_primary = root_device_register("pseudo_0");
3935 if (IS_ERR(pseudo_primary)) {
3936 pr_warn("%s: root_device_register() error\n", __func__);
3937 ret = PTR_ERR(pseudo_primary);
3938 goto free_vm;
3939 }
3940 ret = bus_register(&pseudo_lld_bus);
3941 if (ret < 0) {
3942 pr_warn("%s: bus_register error: %d\n", __func__, ret);
3943 goto dev_unreg;
3944 }
3945 ret = driver_register(&sdebug_driverfs_driver);
3946 if (ret < 0) {
3947 pr_warn("%s: driver_register error: %d\n", __func__, ret);
3948 goto bus_unreg;
3949 }
3950
3951 host_to_add = scsi_debug_add_host;
3952 scsi_debug_add_host = 0;
3953
3954 for (k = 0; k < host_to_add; k++) {
3955 if (sdebug_add_adapter()) {
3956 pr_err("%s: sdebug_add_adapter failed k=%d\n",
3957 __func__, k);
3958 break;
3959 }
3960 }
3961
3962 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
3963 pr_info("%s: built %d host(s)\n", __func__,
3964 scsi_debug_add_host);
3965 }
3966 return 0;
3967
3968bus_unreg:
3969 bus_unregister(&pseudo_lld_bus);
3970dev_unreg:
3971 root_device_unregister(pseudo_primary);
3972free_vm:
3973 if (map_storep)
3974 vfree(map_storep);
3975 if (dif_storep)
3976 vfree(dif_storep);
3977 vfree(fake_storep);
3978
3979 return ret;
3980}
3981
3982static void __exit scsi_debug_exit(void)
3983{
3984 int k = scsi_debug_add_host;
3985
3986 stop_all_queued();
3987 free_all_queued();
3988 for (; k; k--)
3989 sdebug_remove_adapter();
3990 driver_unregister(&sdebug_driverfs_driver);
3991 bus_unregister(&pseudo_lld_bus);
3992 root_device_unregister(pseudo_primary);
3993
3994 if (dif_storep)
3995 vfree(dif_storep);
3996
3997 vfree(fake_storep);
3998}
3999
4000device_initcall(scsi_debug_init);
4001module_exit(scsi_debug_exit);
4002
4003static void sdebug_release_adapter(struct device * dev)
4004{
4005 struct sdebug_host_info *sdbg_host;
4006
4007 sdbg_host = to_sdebug_host(dev);
4008 kfree(sdbg_host);
4009}
4010
4011static int sdebug_add_adapter(void)
4012{
4013 int k, devs_per_host;
4014 int error = 0;
4015 struct sdebug_host_info *sdbg_host;
4016 struct sdebug_dev_info *sdbg_devinfo, *tmp;
4017
4018 sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
4019 if (NULL == sdbg_host) {
4020 printk(KERN_ERR "%s: out of memory at line %d\n",
4021 __func__, __LINE__);
4022 return -ENOMEM;
4023 }
4024
4025 INIT_LIST_HEAD(&sdbg_host->dev_info_list);
4026
4027 devs_per_host = scsi_debug_num_tgts * scsi_debug_max_luns;
4028 for (k = 0; k < devs_per_host; k++) {
4029 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
4030 if (!sdbg_devinfo) {
4031 printk(KERN_ERR "%s: out of memory at line %d\n",
4032 __func__, __LINE__);
4033 error = -ENOMEM;
4034 goto clean;
4035 }
4036 }
4037
4038 spin_lock(&sdebug_host_list_lock);
4039 list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
4040 spin_unlock(&sdebug_host_list_lock);
4041
4042 sdbg_host->dev.bus = &pseudo_lld_bus;
4043 sdbg_host->dev.parent = pseudo_primary;
4044 sdbg_host->dev.release = &sdebug_release_adapter;
4045 dev_set_name(&sdbg_host->dev, "adapter%d", scsi_debug_add_host);
4046
4047 error = device_register(&sdbg_host->dev);
4048
4049 if (error)
4050 goto clean;
4051
4052 ++scsi_debug_add_host;
4053 return error;
4054
4055clean:
4056 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
4057 dev_list) {
4058 list_del(&sdbg_devinfo->dev_list);
4059 kfree(sdbg_devinfo);
4060 }
4061
4062 kfree(sdbg_host);
4063 return error;
4064}
4065
4066static void sdebug_remove_adapter(void)
4067{
4068 struct sdebug_host_info * sdbg_host = NULL;
4069
4070 spin_lock(&sdebug_host_list_lock);
4071 if (!list_empty(&sdebug_host_list)) {
4072 sdbg_host = list_entry(sdebug_host_list.prev,
4073 struct sdebug_host_info, host_list);
4074 list_del(&sdbg_host->host_list);
4075 }
4076 spin_unlock(&sdebug_host_list_lock);
4077
4078 if (!sdbg_host)
4079 return;
4080
4081 device_unregister(&sdbg_host->dev);
4082 --scsi_debug_add_host;
4083}
4084
4085static int
4086scsi_debug_queuecommand(struct scsi_cmnd *SCpnt)
4087{
4088 unsigned char *cmd = (unsigned char *) SCpnt->cmnd;
4089 int len, k;
4090 unsigned int num;
4091 unsigned long long lba;
4092 u32 ei_lba;
4093 int errsts = 0;
4094 int target = SCpnt->device->id;
4095 struct sdebug_dev_info *devip = NULL;
4096 int inj_recovered = 0;
4097 int inj_transport = 0;
4098 int inj_dif = 0;
4099 int inj_dix = 0;
4100 int inj_short = 0;
4101 int delay_override = 0;
4102 int unmap = 0;
4103
4104 scsi_set_resid(SCpnt, 0);
4105 if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) &&
4106 !(SCSI_DEBUG_OPT_NO_CDB_NOISE & scsi_debug_opts) && cmd) {
4107 char b[120];
4108 int n;
4109
4110 len = SCpnt->cmd_len;
4111 if (len > 32)
4112 strcpy(b, "too long, over 32 bytes");
4113 else {
4114 for (k = 0, n = 0; k < len; ++k)
4115 n += scnprintf(b + n, sizeof(b) - n, "%02x ",
4116 (unsigned int)cmd[k]);
4117 }
4118 sdev_printk(KERN_INFO, SCpnt->device, "%s: cmd %s\n", my_name,
4119 b);
4120 }
4121
4122 if ((SCpnt->device->lun >= scsi_debug_max_luns) &&
4123 (SCpnt->device->lun != SAM2_WLUN_REPORT_LUNS))
4124 return schedule_resp(SCpnt, NULL, DID_NO_CONNECT << 16, 0);
4125 devip = devInfoReg(SCpnt->device);
4126 if (NULL == devip)
4127 return schedule_resp(SCpnt, NULL, DID_NO_CONNECT << 16, 0);
4128
4129 if ((scsi_debug_every_nth != 0) &&
4130 (atomic_inc_return(&sdebug_cmnd_count) >=
4131 abs(scsi_debug_every_nth))) {
4132 atomic_set(&sdebug_cmnd_count, 0);
4133 if (scsi_debug_every_nth < -1)
4134 scsi_debug_every_nth = -1;
4135 if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts)
4136 return 0;
4137 else if (SCSI_DEBUG_OPT_MAC_TIMEOUT & scsi_debug_opts &&
4138 scsi_medium_access_command(SCpnt))
4139 return 0;
4140 else if (SCSI_DEBUG_OPT_RECOVERED_ERR & scsi_debug_opts)
4141 inj_recovered = 1;
4142 else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & scsi_debug_opts)
4143 inj_transport = 1;
4144 else if (SCSI_DEBUG_OPT_DIF_ERR & scsi_debug_opts)
4145 inj_dif = 1;
4146 else if (SCSI_DEBUG_OPT_DIX_ERR & scsi_debug_opts)
4147 inj_dix = 1;
4148 else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & scsi_debug_opts)
4149 inj_short = 1;
4150 }
4151
4152 if (devip->wlun) {
4153 switch (*cmd) {
4154 case INQUIRY:
4155 case REQUEST_SENSE:
4156 case TEST_UNIT_READY:
4157 case REPORT_LUNS:
4158 break;
4159 default:
4160 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
4161 printk(KERN_INFO "scsi_debug: Opcode: 0x%x "
4162 "not supported for wlun\n", *cmd);
4163 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4164 INVALID_OPCODE, 0);
4165 errsts = check_condition_result;
4166 return schedule_resp(SCpnt, devip, errsts, 0);
4167 }
4168 }
4169
4170 switch (*cmd) {
4171 case INQUIRY:
4172 delay_override = 1;
4173 errsts = resp_inquiry(SCpnt, target, devip);
4174 break;
4175 case REQUEST_SENSE:
4176 delay_override = 1;
4177 errsts = resp_requests(SCpnt, devip);
4178 break;
4179 case REZERO_UNIT:
4180 case START_STOP:
4181 errsts = resp_start_stop(SCpnt, devip);
4182 break;
4183 case ALLOW_MEDIUM_REMOVAL:
4184 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4185 if (errsts)
4186 break;
4187 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
4188 printk(KERN_INFO "scsi_debug: Medium removal %s\n",
4189 cmd[4] ? "inhibited" : "enabled");
4190 break;
4191 case SEND_DIAGNOSTIC:
4192 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4193 break;
4194 case TEST_UNIT_READY:
4195
4196 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4197 break;
4198 case RESERVE:
4199 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4200 break;
4201 case RESERVE_10:
4202 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4203 break;
4204 case RELEASE:
4205 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4206 break;
4207 case RELEASE_10:
4208 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4209 break;
4210 case READ_CAPACITY:
4211 errsts = resp_readcap(SCpnt, devip);
4212 break;
4213 case SERVICE_ACTION_IN:
4214 if (cmd[1] == SAI_READ_CAPACITY_16)
4215 errsts = resp_readcap16(SCpnt, devip);
4216 else if (cmd[1] == SAI_GET_LBA_STATUS) {
4217
4218 if (scsi_debug_lbp() == 0) {
4219 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4220 INVALID_COMMAND_OPCODE, 0);
4221 errsts = check_condition_result;
4222 } else
4223 errsts = resp_get_lba_status(SCpnt, devip);
4224 } else {
4225 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4226 INVALID_OPCODE, 0);
4227 errsts = check_condition_result;
4228 }
4229 break;
4230 case MAINTENANCE_IN:
4231 if (MI_REPORT_TARGET_PGS != cmd[1]) {
4232 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4233 INVALID_OPCODE, 0);
4234 errsts = check_condition_result;
4235 break;
4236 }
4237 errsts = resp_report_tgtpgs(SCpnt, devip);
4238 break;
4239 case READ_16:
4240 case READ_12:
4241 case READ_10:
4242
4243 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
4244 cmd[1] & 0xe0) {
4245 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4246 INVALID_COMMAND_OPCODE, 0);
4247 errsts = check_condition_result;
4248 break;
4249 }
4250
4251 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
4252 scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
4253 (cmd[1] & 0xe0) == 0)
4254 printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
4255
4256
4257 case READ_6:
4258read:
4259 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4260 if (errsts)
4261 break;
4262 if (scsi_debug_fake_rw)
4263 break;
4264 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
4265
4266 if (inj_short)
4267 num /= 2;
4268
4269 errsts = resp_read(SCpnt, lba, num, ei_lba);
4270 if (inj_recovered && (0 == errsts)) {
4271 mk_sense_buffer(SCpnt, RECOVERED_ERROR,
4272 THRESHOLD_EXCEEDED, 0);
4273 errsts = check_condition_result;
4274 } else if (inj_transport && (0 == errsts)) {
4275 mk_sense_buffer(SCpnt, ABORTED_COMMAND,
4276 TRANSPORT_PROBLEM, ACK_NAK_TO);
4277 errsts = check_condition_result;
4278 } else if (inj_dif && (0 == errsts)) {
4279
4280 mk_sense_buffer(SCpnt, ABORTED_COMMAND, 0x10, 1);
4281 errsts = illegal_condition_result;
4282 } else if (inj_dix && (0 == errsts)) {
4283 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, 0x10, 1);
4284 errsts = illegal_condition_result;
4285 }
4286 break;
4287 case REPORT_LUNS:
4288 delay_override = 1;
4289 errsts = resp_report_luns(SCpnt, devip);
4290 break;
4291 case VERIFY:
4292 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4293 break;
4294 case WRITE_16:
4295 case WRITE_12:
4296 case WRITE_10:
4297
4298 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
4299 cmd[1] & 0xe0) {
4300 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4301 INVALID_COMMAND_OPCODE, 0);
4302 errsts = check_condition_result;
4303 break;
4304 }
4305
4306 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
4307 scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
4308 (cmd[1] & 0xe0) == 0)
4309 printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
4310
4311
4312 case WRITE_6:
4313write:
4314 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4315 if (errsts)
4316 break;
4317 if (scsi_debug_fake_rw)
4318 break;
4319 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
4320 errsts = resp_write(SCpnt, lba, num, ei_lba);
4321 if (inj_recovered && (0 == errsts)) {
4322 mk_sense_buffer(SCpnt, RECOVERED_ERROR,
4323 THRESHOLD_EXCEEDED, 0);
4324 errsts = check_condition_result;
4325 } else if (inj_dif && (0 == errsts)) {
4326 mk_sense_buffer(SCpnt, ABORTED_COMMAND, 0x10, 1);
4327 errsts = illegal_condition_result;
4328 } else if (inj_dix && (0 == errsts)) {
4329 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, 0x10, 1);
4330 errsts = illegal_condition_result;
4331 }
4332 break;
4333 case WRITE_SAME_16:
4334 case WRITE_SAME:
4335 if (cmd[1] & 0x8) {
4336 if ((*cmd == WRITE_SAME_16 && scsi_debug_lbpws == 0) ||
4337 (*cmd == WRITE_SAME && scsi_debug_lbpws10 == 0)) {
4338 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4339 INVALID_FIELD_IN_CDB, 0);
4340 errsts = check_condition_result;
4341 } else
4342 unmap = 1;
4343 }
4344 if (errsts)
4345 break;
4346 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4347 if (errsts)
4348 break;
4349 if (scsi_debug_fake_rw)
4350 break;
4351 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
4352 errsts = resp_write_same(SCpnt, lba, num, ei_lba, unmap);
4353 break;
4354 case UNMAP:
4355 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4356 if (errsts)
4357 break;
4358 if (scsi_debug_fake_rw)
4359 break;
4360
4361 if (scsi_debug_unmap_max_desc == 0 || scsi_debug_lbpu == 0) {
4362 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4363 INVALID_COMMAND_OPCODE, 0);
4364 errsts = check_condition_result;
4365 } else
4366 errsts = resp_unmap(SCpnt, devip);
4367 break;
4368 case MODE_SENSE:
4369 case MODE_SENSE_10:
4370 errsts = resp_mode_sense(SCpnt, target, devip);
4371 break;
4372 case MODE_SELECT:
4373 errsts = resp_mode_select(SCpnt, 1, devip);
4374 break;
4375 case MODE_SELECT_10:
4376 errsts = resp_mode_select(SCpnt, 0, devip);
4377 break;
4378 case LOG_SENSE:
4379 errsts = resp_log_sense(SCpnt, devip);
4380 break;
4381 case SYNCHRONIZE_CACHE:
4382 delay_override = 1;
4383 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4384 break;
4385 case WRITE_BUFFER:
4386 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4387 break;
4388 case XDWRITEREAD_10:
4389 if (!scsi_bidi_cmnd(SCpnt)) {
4390 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4391 INVALID_FIELD_IN_CDB, 0);
4392 errsts = check_condition_result;
4393 break;
4394 }
4395
4396 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4397 if (errsts)
4398 break;
4399 if (scsi_debug_fake_rw)
4400 break;
4401 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
4402 errsts = resp_read(SCpnt, lba, num, ei_lba);
4403 if (errsts)
4404 break;
4405 errsts = resp_write(SCpnt, lba, num, ei_lba);
4406 if (errsts)
4407 break;
4408 errsts = resp_xdwriteread(SCpnt, lba, num, devip);
4409 break;
4410 case VARIABLE_LENGTH_CMD:
4411 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION) {
4412
4413 if ((cmd[10] & 0xe0) == 0)
4414 printk(KERN_ERR
4415 "Unprotected RD/WR to DIF device\n");
4416
4417 if (cmd[9] == READ_32) {
4418 BUG_ON(SCpnt->cmd_len < 32);
4419 goto read;
4420 }
4421
4422 if (cmd[9] == WRITE_32) {
4423 BUG_ON(SCpnt->cmd_len < 32);
4424 goto write;
4425 }
4426 }
4427
4428 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4429 INVALID_FIELD_IN_CDB, 0);
4430 errsts = check_condition_result;
4431 break;
4432 case 0x85:
4433 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
4434 sdev_printk(KERN_INFO, SCpnt->device,
4435 "%s: ATA PASS-THROUGH(16) not supported\n", my_name);
4436 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4437 INVALID_OPCODE, 0);
4438 errsts = check_condition_result;
4439 break;
4440 default:
4441 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
4442 sdev_printk(KERN_INFO, SCpnt->device,
4443 "%s: Opcode: 0x%x not supported\n",
4444 my_name, *cmd);
4445 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4446 if (errsts)
4447 break;
4448 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
4449 errsts = check_condition_result;
4450 break;
4451 }
4452 return schedule_resp(SCpnt, devip, errsts,
4453 (delay_override ? 0 : scsi_debug_delay));
4454}
4455
4456static int
4457sdebug_queuecommand_lock_or_not(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
4458{
4459 if (scsi_debug_host_lock) {
4460 unsigned long iflags;
4461 int rc;
4462
4463 spin_lock_irqsave(shost->host_lock, iflags);
4464 rc = scsi_debug_queuecommand(cmd);
4465 spin_unlock_irqrestore(shost->host_lock, iflags);
4466 return rc;
4467 } else
4468 return scsi_debug_queuecommand(cmd);
4469}
4470
4471static int
4472sdebug_change_qdepth(struct scsi_device *sdev, int qdepth, int reason)
4473{
4474 int num_in_q = 0;
4475 int bad = 0;
4476 unsigned long iflags;
4477 struct sdebug_dev_info *devip;
4478
4479 spin_lock_irqsave(&queued_arr_lock, iflags);
4480 devip = (struct sdebug_dev_info *)sdev->hostdata;
4481 if (NULL == devip) {
4482 spin_unlock_irqrestore(&queued_arr_lock, iflags);
4483 return -ENODEV;
4484 }
4485 num_in_q = atomic_read(&devip->num_in_q);
4486 spin_unlock_irqrestore(&queued_arr_lock, iflags);
4487 if (reason == SCSI_QDEPTH_DEFAULT || reason == SCSI_QDEPTH_RAMP_UP) {
4488 if (qdepth < 1)
4489 qdepth = 1;
4490
4491 if (qdepth > SCSI_DEBUG_CANQUEUE + 10)
4492 qdepth = SCSI_DEBUG_CANQUEUE + 10;
4493 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
4494 } else if (reason == SCSI_QDEPTH_QFULL)
4495 scsi_track_queue_full(sdev, qdepth);
4496 else
4497 bad = 1;
4498 if (bad)
4499 sdev_printk(KERN_WARNING, sdev,
4500 "%s: unknown reason=0x%x\n", __func__, reason);
4501 if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) {
4502 if (SCSI_QDEPTH_QFULL == reason)
4503 sdev_printk(KERN_INFO, sdev,
4504 "%s: -> %d, num_in_q=%d, reason: queue full\n",
4505 __func__, qdepth, num_in_q);
4506 else {
4507 const char *cp;
4508
4509 switch (reason) {
4510 case SCSI_QDEPTH_DEFAULT:
4511 cp = "default (sysfs ?)";
4512 break;
4513 case SCSI_QDEPTH_RAMP_UP:
4514 cp = "ramp up";
4515 break;
4516 default:
4517 cp = "unknown";
4518 break;
4519 }
4520 sdev_printk(KERN_INFO, sdev,
4521 "%s: qdepth=%d, num_in_q=%d, reason: %s\n",
4522 __func__, qdepth, num_in_q, cp);
4523 }
4524 }
4525 return sdev->queue_depth;
4526}
4527
4528static int
4529sdebug_change_qtype(struct scsi_device *sdev, int qtype)
4530{
4531 if (sdev->tagged_supported) {
4532 scsi_set_tag_type(sdev, qtype);
4533 if (qtype)
4534 scsi_activate_tcq(sdev, sdev->queue_depth);
4535 else
4536 scsi_deactivate_tcq(sdev, sdev->queue_depth);
4537 } else
4538 qtype = 0;
4539 if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) {
4540 const char *cp;
4541
4542 switch (qtype) {
4543 case 0:
4544 cp = "untagged";
4545 break;
4546 case MSG_SIMPLE_TAG:
4547 cp = "simple tags";
4548 break;
4549 case MSG_ORDERED_TAG:
4550 cp = "ordered tags";
4551 break;
4552 default:
4553 cp = "unknown";
4554 break;
4555 }
4556 sdev_printk(KERN_INFO, sdev, "%s: to %s\n", __func__, cp);
4557 }
4558 return qtype;
4559}
4560
4561static struct scsi_host_template sdebug_driver_template = {
4562 .show_info = scsi_debug_show_info,
4563 .write_info = scsi_debug_write_info,
4564 .proc_name = sdebug_proc_name,
4565 .name = "SCSI DEBUG",
4566 .info = scsi_debug_info,
4567 .slave_alloc = scsi_debug_slave_alloc,
4568 .slave_configure = scsi_debug_slave_configure,
4569 .slave_destroy = scsi_debug_slave_destroy,
4570 .ioctl = scsi_debug_ioctl,
4571 .queuecommand = sdebug_queuecommand_lock_or_not,
4572 .change_queue_depth = sdebug_change_qdepth,
4573 .change_queue_type = sdebug_change_qtype,
4574 .eh_abort_handler = scsi_debug_abort,
4575 .eh_device_reset_handler = scsi_debug_device_reset,
4576 .eh_target_reset_handler = scsi_debug_target_reset,
4577 .eh_bus_reset_handler = scsi_debug_bus_reset,
4578 .eh_host_reset_handler = scsi_debug_host_reset,
4579 .can_queue = SCSI_DEBUG_CANQUEUE,
4580 .this_id = 7,
4581 .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS,
4582 .cmd_per_lun = DEF_CMD_PER_LUN,
4583 .max_sectors = -1U,
4584 .use_clustering = DISABLE_CLUSTERING,
4585 .module = THIS_MODULE,
4586};
4587
4588static int sdebug_driver_probe(struct device * dev)
4589{
4590 int error = 0;
4591 struct sdebug_host_info *sdbg_host;
4592 struct Scsi_Host *hpnt;
4593 int host_prot;
4594
4595 sdbg_host = to_sdebug_host(dev);
4596
4597 sdebug_driver_template.can_queue = scsi_debug_max_queue;
4598 if (scsi_debug_clustering)
4599 sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;
4600 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
4601 if (NULL == hpnt) {
4602 printk(KERN_ERR "%s: scsi_register failed\n", __func__);
4603 error = -ENODEV;
4604 return error;
4605 }
4606
4607 sdbg_host->shost = hpnt;
4608 *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
4609 if ((hpnt->this_id >= 0) && (scsi_debug_num_tgts > hpnt->this_id))
4610 hpnt->max_id = scsi_debug_num_tgts + 1;
4611 else
4612 hpnt->max_id = scsi_debug_num_tgts;
4613 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS;
4614
4615 host_prot = 0;
4616
4617 switch (scsi_debug_dif) {
4618
4619 case SD_DIF_TYPE1_PROTECTION:
4620 host_prot = SHOST_DIF_TYPE1_PROTECTION;
4621 if (scsi_debug_dix)
4622 host_prot |= SHOST_DIX_TYPE1_PROTECTION;
4623 break;
4624
4625 case SD_DIF_TYPE2_PROTECTION:
4626 host_prot = SHOST_DIF_TYPE2_PROTECTION;
4627 if (scsi_debug_dix)
4628 host_prot |= SHOST_DIX_TYPE2_PROTECTION;
4629 break;
4630
4631 case SD_DIF_TYPE3_PROTECTION:
4632 host_prot = SHOST_DIF_TYPE3_PROTECTION;
4633 if (scsi_debug_dix)
4634 host_prot |= SHOST_DIX_TYPE3_PROTECTION;
4635 break;
4636
4637 default:
4638 if (scsi_debug_dix)
4639 host_prot |= SHOST_DIX_TYPE0_PROTECTION;
4640 break;
4641 }
4642
4643 scsi_host_set_prot(hpnt, host_prot);
4644
4645 printk(KERN_INFO "scsi_debug: host protection%s%s%s%s%s%s%s\n",
4646 (host_prot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
4647 (host_prot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
4648 (host_prot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
4649 (host_prot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
4650 (host_prot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
4651 (host_prot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
4652 (host_prot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
4653
4654 if (scsi_debug_guard == 1)
4655 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
4656 else
4657 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
4658
4659 error = scsi_add_host(hpnt, &sdbg_host->dev);
4660 if (error) {
4661 printk(KERN_ERR "%s: scsi_add_host failed\n", __func__);
4662 error = -ENODEV;
4663 scsi_host_put(hpnt);
4664 } else
4665 scsi_scan_host(hpnt);
4666
4667 return error;
4668}
4669
4670static int sdebug_driver_remove(struct device * dev)
4671{
4672 struct sdebug_host_info *sdbg_host;
4673 struct sdebug_dev_info *sdbg_devinfo, *tmp;
4674
4675 sdbg_host = to_sdebug_host(dev);
4676
4677 if (!sdbg_host) {
4678 printk(KERN_ERR "%s: Unable to locate host info\n",
4679 __func__);
4680 return -ENODEV;
4681 }
4682
4683 scsi_remove_host(sdbg_host->shost);
4684
4685 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
4686 dev_list) {
4687 list_del(&sdbg_devinfo->dev_list);
4688 kfree(sdbg_devinfo);
4689 }
4690
4691 scsi_host_put(sdbg_host->shost);
4692 return 0;
4693}
4694
4695static int pseudo_lld_bus_match(struct device *dev,
4696 struct device_driver *dev_driver)
4697{
4698 return 1;
4699}
4700
4701static struct bus_type pseudo_lld_bus = {
4702 .name = "pseudo",
4703 .match = pseudo_lld_bus_match,
4704 .probe = sdebug_driver_probe,
4705 .remove = sdebug_driver_remove,
4706 .drv_groups = sdebug_drv_groups,
4707};
4708