1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/ratelimit.h>
26#include <linux/crc-t10dif.h>
27#include <asm/unaligned.h>
28#include <scsi/scsi.h>
29#include <scsi/scsi_tcq.h>
30
31#include <target/target_core_base.h>
32#include <target/target_core_backend.h>
33#include <target/target_core_fabric.h>
34
35#include "target_core_internal.h"
36#include "target_core_ua.h"
37#include "target_core_alua.h"
38
39static sense_reason_t
40sbc_emulate_readcapacity(struct se_cmd *cmd)
41{
42 struct se_device *dev = cmd->se_dev;
43 unsigned char *cdb = cmd->t_task_cdb;
44 unsigned long long blocks_long = dev->transport->get_blocks(dev);
45 unsigned char *rbuf;
46 unsigned char buf[8];
47 u32 blocks;
48
49
50
51
52
53
54
55
56
57
58
59
60
61 if (!(cdb[8] & 1) && !!(cdb[2] | cdb[3] | cdb[4] | cdb[5]))
62 return TCM_INVALID_CDB_FIELD;
63
64 if (blocks_long >= 0x00000000ffffffff)
65 blocks = 0xffffffff;
66 else
67 blocks = (u32)blocks_long;
68
69 buf[0] = (blocks >> 24) & 0xff;
70 buf[1] = (blocks >> 16) & 0xff;
71 buf[2] = (blocks >> 8) & 0xff;
72 buf[3] = blocks & 0xff;
73 buf[4] = (dev->dev_attrib.block_size >> 24) & 0xff;
74 buf[5] = (dev->dev_attrib.block_size >> 16) & 0xff;
75 buf[6] = (dev->dev_attrib.block_size >> 8) & 0xff;
76 buf[7] = dev->dev_attrib.block_size & 0xff;
77
78 rbuf = transport_kmap_data_sg(cmd);
79 if (rbuf) {
80 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
81 transport_kunmap_data_sg(cmd);
82 }
83
84 target_complete_cmd(cmd, GOOD);
85 return 0;
86}
87
88static sense_reason_t
89sbc_emulate_readcapacity_16(struct se_cmd *cmd)
90{
91 struct se_device *dev = cmd->se_dev;
92 unsigned char *rbuf;
93 unsigned char buf[32];
94 unsigned long long blocks = dev->transport->get_blocks(dev);
95
96 memset(buf, 0, sizeof(buf));
97 buf[0] = (blocks >> 56) & 0xff;
98 buf[1] = (blocks >> 48) & 0xff;
99 buf[2] = (blocks >> 40) & 0xff;
100 buf[3] = (blocks >> 32) & 0xff;
101 buf[4] = (blocks >> 24) & 0xff;
102 buf[5] = (blocks >> 16) & 0xff;
103 buf[6] = (blocks >> 8) & 0xff;
104 buf[7] = blocks & 0xff;
105 buf[8] = (dev->dev_attrib.block_size >> 24) & 0xff;
106 buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff;
107 buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff;
108 buf[11] = dev->dev_attrib.block_size & 0xff;
109
110
111
112 if (dev->dev_attrib.pi_prot_type)
113 buf[12] = (dev->dev_attrib.pi_prot_type - 1) << 1 | 0x1;
114
115 if (dev->transport->get_lbppbe)
116 buf[13] = dev->transport->get_lbppbe(dev) & 0x0f;
117
118 if (dev->transport->get_alignment_offset_lbas) {
119 u16 lalba = dev->transport->get_alignment_offset_lbas(dev);
120 buf[14] = (lalba >> 8) & 0x3f;
121 buf[15] = lalba & 0xff;
122 }
123
124
125
126
127
128 if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws)
129 buf[14] |= 0x80;
130
131 rbuf = transport_kmap_data_sg(cmd);
132 if (rbuf) {
133 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
134 transport_kunmap_data_sg(cmd);
135 }
136
137 target_complete_cmd(cmd, GOOD);
138 return 0;
139}
140
141sector_t sbc_get_write_same_sectors(struct se_cmd *cmd)
142{
143 u32 num_blocks;
144
145 if (cmd->t_task_cdb[0] == WRITE_SAME)
146 num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]);
147 else if (cmd->t_task_cdb[0] == WRITE_SAME_16)
148 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]);
149 else
150 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]);
151
152
153
154
155
156 if (num_blocks)
157 return num_blocks;
158
159 return cmd->se_dev->transport->get_blocks(cmd->se_dev) -
160 cmd->t_task_lba + 1;
161}
162EXPORT_SYMBOL(sbc_get_write_same_sectors);
163
164static sense_reason_t
165sbc_emulate_noop(struct se_cmd *cmd)
166{
167 target_complete_cmd(cmd, GOOD);
168 return 0;
169}
170
171static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors)
172{
173 return cmd->se_dev->dev_attrib.block_size * sectors;
174}
175
176static int sbc_check_valid_sectors(struct se_cmd *cmd)
177{
178 struct se_device *dev = cmd->se_dev;
179 unsigned long long end_lba;
180 u32 sectors;
181
182 sectors = cmd->data_length / dev->dev_attrib.block_size;
183 end_lba = dev->transport->get_blocks(dev) + 1;
184
185 if (cmd->t_task_lba + sectors > end_lba) {
186 pr_err("target: lba %llu, sectors %u exceeds end lba %llu\n",
187 cmd->t_task_lba, sectors, end_lba);
188 return -EINVAL;
189 }
190
191 return 0;
192}
193
194static inline u32 transport_get_sectors_6(unsigned char *cdb)
195{
196
197
198
199
200
201
202
203
204 return cdb[4] ? : 256;
205}
206
207static inline u32 transport_get_sectors_10(unsigned char *cdb)
208{
209 return (u32)(cdb[7] << 8) + cdb[8];
210}
211
212static inline u32 transport_get_sectors_12(unsigned char *cdb)
213{
214 return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
215}
216
217static inline u32 transport_get_sectors_16(unsigned char *cdb)
218{
219 return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
220 (cdb[12] << 8) + cdb[13];
221}
222
223
224
225
226static inline u32 transport_get_sectors_32(unsigned char *cdb)
227{
228 return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
229 (cdb[30] << 8) + cdb[31];
230
231}
232
233static inline u32 transport_lba_21(unsigned char *cdb)
234{
235 return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
236}
237
238static inline u32 transport_lba_32(unsigned char *cdb)
239{
240 return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
241}
242
243static inline unsigned long long transport_lba_64(unsigned char *cdb)
244{
245 unsigned int __v1, __v2;
246
247 __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
248 __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
249
250 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
251}
252
253
254
255
256static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
257{
258 unsigned int __v1, __v2;
259
260 __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
261 __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
262
263 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
264}
265
266static sense_reason_t
267sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops)
268{
269 unsigned int sectors = sbc_get_write_same_sectors(cmd);
270
271 if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
272 pr_err("WRITE_SAME PBDATA and LBDATA"
273 " bits not supported for Block Discard"
274 " Emulation\n");
275 return TCM_UNSUPPORTED_SCSI_OPCODE;
276 }
277 if (sectors > cmd->se_dev->dev_attrib.max_write_same_len) {
278 pr_warn("WRITE_SAME sectors: %u exceeds max_write_same_len: %u\n",
279 sectors, cmd->se_dev->dev_attrib.max_write_same_len);
280 return TCM_INVALID_CDB_FIELD;
281 }
282
283 if (flags[0] & 0x10) {
284 pr_warn("WRITE SAME with ANCHOR not supported\n");
285 return TCM_INVALID_CDB_FIELD;
286 }
287
288
289
290
291 if (flags[0] & 0x08) {
292 if (!ops->execute_write_same_unmap)
293 return TCM_UNSUPPORTED_SCSI_OPCODE;
294
295 cmd->execute_cmd = ops->execute_write_same_unmap;
296 return 0;
297 }
298 if (!ops->execute_write_same)
299 return TCM_UNSUPPORTED_SCSI_OPCODE;
300
301 cmd->execute_cmd = ops->execute_write_same;
302 return 0;
303}
304
305static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd)
306{
307 unsigned char *buf, *addr;
308 struct scatterlist *sg;
309 unsigned int offset;
310 sense_reason_t ret = TCM_NO_SENSE;
311 int i, count;
312
313
314
315
316
317
318
319
320
321
322
323 buf = kmalloc(cmd->data_length, GFP_KERNEL);
324 if (!buf) {
325 pr_err("Unable to allocate xor_callback buf\n");
326 return TCM_OUT_OF_RESOURCES;
327 }
328
329
330
331
332 sg_copy_to_buffer(cmd->t_data_sg,
333 cmd->t_data_nents,
334 buf,
335 cmd->data_length);
336
337
338
339
340
341
342 offset = 0;
343 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
344 addr = kmap_atomic(sg_page(sg));
345 if (!addr) {
346 ret = TCM_OUT_OF_RESOURCES;
347 goto out;
348 }
349
350 for (i = 0; i < sg->length; i++)
351 *(addr + sg->offset + i) ^= *(buf + offset + i);
352
353 offset += sg->length;
354 kunmap_atomic(addr);
355 }
356
357out:
358 kfree(buf);
359 return ret;
360}
361
362static sense_reason_t
363sbc_execute_rw(struct se_cmd *cmd)
364{
365 return cmd->execute_rw(cmd, cmd->t_data_sg, cmd->t_data_nents,
366 cmd->data_direction);
367}
368
369static sense_reason_t compare_and_write_post(struct se_cmd *cmd)
370{
371 struct se_device *dev = cmd->se_dev;
372
373
374
375
376
377
378 spin_lock_irq(&cmd->t_state_lock);
379 if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status)
380 cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
381 spin_unlock_irq(&cmd->t_state_lock);
382
383
384
385
386
387 up(&dev->caw_sem);
388
389 return TCM_NO_SENSE;
390}
391
392static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
393{
394 struct se_device *dev = cmd->se_dev;
395 struct scatterlist *write_sg = NULL, *sg;
396 unsigned char *buf = NULL, *addr;
397 struct sg_mapping_iter m;
398 unsigned int offset = 0, len;
399 unsigned int nlbas = cmd->t_task_nolb;
400 unsigned int block_size = dev->dev_attrib.block_size;
401 unsigned int compare_len = (nlbas * block_size);
402 sense_reason_t ret = TCM_NO_SENSE;
403 int rc, i;
404
405
406
407
408
409 if (!cmd->t_data_sg || !cmd->t_bidi_data_sg)
410 return TCM_NO_SENSE;
411
412
413
414
415 if (cmd->scsi_status) {
416 pr_err("compare_and_write_callback: non zero scsi_status:"
417 " 0x%02x\n", cmd->scsi_status);
418 goto out;
419 }
420
421 buf = kzalloc(cmd->data_length, GFP_KERNEL);
422 if (!buf) {
423 pr_err("Unable to allocate compare_and_write buf\n");
424 ret = TCM_OUT_OF_RESOURCES;
425 goto out;
426 }
427
428 write_sg = kzalloc(sizeof(struct scatterlist) * cmd->t_data_nents,
429 GFP_KERNEL);
430 if (!write_sg) {
431 pr_err("Unable to allocate compare_and_write sg\n");
432 ret = TCM_OUT_OF_RESOURCES;
433 goto out;
434 }
435
436
437
438 rc = sg_copy_to_buffer(cmd->t_data_sg, cmd->t_data_nents, buf,
439 cmd->data_length);
440 if (!rc) {
441 pr_err("sg_copy_to_buffer() failed for compare_and_write\n");
442 ret = TCM_OUT_OF_RESOURCES;
443 goto out;
444 }
445
446
447
448 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, i) {
449 addr = (unsigned char *)kmap_atomic(sg_page(sg));
450 if (!addr) {
451 ret = TCM_OUT_OF_RESOURCES;
452 goto out;
453 }
454
455 len = min(sg->length, compare_len);
456
457 if (memcmp(addr, buf + offset, len)) {
458 pr_warn("Detected MISCOMPARE for addr: %p buf: %p\n",
459 addr, buf + offset);
460 kunmap_atomic(addr);
461 goto miscompare;
462 }
463 kunmap_atomic(addr);
464
465 offset += len;
466 compare_len -= len;
467 if (!compare_len)
468 break;
469 }
470
471 i = 0;
472 len = cmd->t_task_nolb * block_size;
473 sg_miter_start(&m, cmd->t_data_sg, cmd->t_data_nents, SG_MITER_TO_SG);
474
475
476
477 while (len) {
478 sg_miter_next(&m);
479
480 if (block_size < PAGE_SIZE) {
481 sg_set_page(&write_sg[i], m.page, block_size,
482 block_size);
483 } else {
484 sg_miter_next(&m);
485 sg_set_page(&write_sg[i], m.page, block_size,
486 0);
487 }
488 len -= block_size;
489 i++;
490 }
491 sg_miter_stop(&m);
492
493
494
495
496
497 cmd->t_data_sg_orig = cmd->t_data_sg;
498 cmd->t_data_sg = write_sg;
499 cmd->t_data_nents_orig = cmd->t_data_nents;
500 cmd->t_data_nents = 1;
501
502 cmd->sam_task_attr = MSG_HEAD_TAG;
503 cmd->transport_complete_callback = compare_and_write_post;
504
505
506
507
508 cmd->execute_cmd = sbc_execute_rw;
509
510 spin_lock_irq(&cmd->t_state_lock);
511 cmd->t_state = TRANSPORT_PROCESSING;
512 cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT;
513 spin_unlock_irq(&cmd->t_state_lock);
514
515 __target_execute_cmd(cmd);
516
517 kfree(buf);
518 return ret;
519
520miscompare:
521 pr_warn("Target/%s: Send MISCOMPARE check condition and sense\n",
522 dev->transport->name);
523 ret = TCM_MISCOMPARE_VERIFY;
524out:
525
526
527
528
529 up(&dev->caw_sem);
530 kfree(write_sg);
531 kfree(buf);
532 return ret;
533}
534
535static sense_reason_t
536sbc_compare_and_write(struct se_cmd *cmd)
537{
538 struct se_device *dev = cmd->se_dev;
539 sense_reason_t ret;
540 int rc;
541
542
543
544
545 rc = down_interruptible(&dev->caw_sem);
546 if ((rc != 0) || signal_pending(current)) {
547 cmd->transport_complete_callback = NULL;
548 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
549 }
550
551
552
553
554
555 cmd->data_length = cmd->t_task_nolb * dev->dev_attrib.block_size;
556
557 ret = cmd->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents,
558 DMA_FROM_DEVICE);
559 if (ret) {
560 cmd->transport_complete_callback = NULL;
561 up(&dev->caw_sem);
562 return ret;
563 }
564
565
566
567
568
569 return TCM_NO_SENSE;
570}
571
572static bool
573sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
574 u32 sectors)
575{
576 if (!cmd->t_prot_sg || !cmd->t_prot_nents)
577 return true;
578
579 switch (dev->dev_attrib.pi_prot_type) {
580 case TARGET_DIF_TYPE3_PROT:
581 if (!(cdb[1] & 0xe0))
582 return true;
583
584 cmd->reftag_seed = 0xffffffff;
585 break;
586 case TARGET_DIF_TYPE2_PROT:
587 if (cdb[1] & 0xe0)
588 return false;
589
590 cmd->reftag_seed = cmd->t_task_lba;
591 break;
592 case TARGET_DIF_TYPE1_PROT:
593 if (!(cdb[1] & 0xe0))
594 return true;
595
596 cmd->reftag_seed = cmd->t_task_lba;
597 break;
598 case TARGET_DIF_TYPE0_PROT:
599 default:
600 return true;
601 }
602
603 cmd->prot_type = dev->dev_attrib.pi_prot_type;
604 cmd->prot_length = dev->prot_length * sectors;
605 cmd->prot_handover = PROT_SEPERATED;
606
607 return true;
608}
609
610sense_reason_t
611sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
612{
613 struct se_device *dev = cmd->se_dev;
614 unsigned char *cdb = cmd->t_task_cdb;
615 unsigned int size;
616 u32 sectors = 0;
617 sense_reason_t ret;
618
619 switch (cdb[0]) {
620 case READ_6:
621 sectors = transport_get_sectors_6(cdb);
622 cmd->t_task_lba = transport_lba_21(cdb);
623 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
624 cmd->execute_rw = ops->execute_rw;
625 cmd->execute_cmd = sbc_execute_rw;
626 break;
627 case READ_10:
628 sectors = transport_get_sectors_10(cdb);
629 cmd->t_task_lba = transport_lba_32(cdb);
630
631 if (!sbc_check_prot(dev, cmd, cdb, sectors))
632 return TCM_UNSUPPORTED_SCSI_OPCODE;
633
634 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
635 cmd->execute_rw = ops->execute_rw;
636 cmd->execute_cmd = sbc_execute_rw;
637 break;
638 case READ_12:
639 sectors = transport_get_sectors_12(cdb);
640 cmd->t_task_lba = transport_lba_32(cdb);
641
642 if (!sbc_check_prot(dev, cmd, cdb, sectors))
643 return TCM_UNSUPPORTED_SCSI_OPCODE;
644
645 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
646 cmd->execute_rw = ops->execute_rw;
647 cmd->execute_cmd = sbc_execute_rw;
648 break;
649 case READ_16:
650 sectors = transport_get_sectors_16(cdb);
651 cmd->t_task_lba = transport_lba_64(cdb);
652
653 if (!sbc_check_prot(dev, cmd, cdb, sectors))
654 return TCM_UNSUPPORTED_SCSI_OPCODE;
655
656 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
657 cmd->execute_rw = ops->execute_rw;
658 cmd->execute_cmd = sbc_execute_rw;
659 break;
660 case WRITE_6:
661 sectors = transport_get_sectors_6(cdb);
662 cmd->t_task_lba = transport_lba_21(cdb);
663 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
664 cmd->execute_rw = ops->execute_rw;
665 cmd->execute_cmd = sbc_execute_rw;
666 break;
667 case WRITE_10:
668 case WRITE_VERIFY:
669 sectors = transport_get_sectors_10(cdb);
670 cmd->t_task_lba = transport_lba_32(cdb);
671
672 if (!sbc_check_prot(dev, cmd, cdb, sectors))
673 return TCM_UNSUPPORTED_SCSI_OPCODE;
674
675 if (cdb[1] & 0x8)
676 cmd->se_cmd_flags |= SCF_FUA;
677 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
678 cmd->execute_rw = ops->execute_rw;
679 cmd->execute_cmd = sbc_execute_rw;
680 break;
681 case WRITE_12:
682 sectors = transport_get_sectors_12(cdb);
683 cmd->t_task_lba = transport_lba_32(cdb);
684
685 if (!sbc_check_prot(dev, cmd, cdb, sectors))
686 return TCM_UNSUPPORTED_SCSI_OPCODE;
687
688 if (cdb[1] & 0x8)
689 cmd->se_cmd_flags |= SCF_FUA;
690 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
691 cmd->execute_rw = ops->execute_rw;
692 cmd->execute_cmd = sbc_execute_rw;
693 break;
694 case WRITE_16:
695 sectors = transport_get_sectors_16(cdb);
696 cmd->t_task_lba = transport_lba_64(cdb);
697
698 if (!sbc_check_prot(dev, cmd, cdb, sectors))
699 return TCM_UNSUPPORTED_SCSI_OPCODE;
700
701 if (cdb[1] & 0x8)
702 cmd->se_cmd_flags |= SCF_FUA;
703 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
704 cmd->execute_rw = ops->execute_rw;
705 cmd->execute_cmd = sbc_execute_rw;
706 break;
707 case XDWRITEREAD_10:
708 if (cmd->data_direction != DMA_TO_DEVICE ||
709 !(cmd->se_cmd_flags & SCF_BIDI))
710 return TCM_INVALID_CDB_FIELD;
711 sectors = transport_get_sectors_10(cdb);
712
713 cmd->t_task_lba = transport_lba_32(cdb);
714 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
715
716
717
718
719 cmd->execute_rw = ops->execute_rw;
720 cmd->execute_cmd = sbc_execute_rw;
721 cmd->transport_complete_callback = &xdreadwrite_callback;
722 if (cdb[1] & 0x8)
723 cmd->se_cmd_flags |= SCF_FUA;
724 break;
725 case VARIABLE_LENGTH_CMD:
726 {
727 u16 service_action = get_unaligned_be16(&cdb[8]);
728 switch (service_action) {
729 case XDWRITEREAD_32:
730 sectors = transport_get_sectors_32(cdb);
731
732
733
734
735
736 cmd->t_task_lba = transport_lba_64_ext(cdb);
737 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
738
739
740
741
742
743 cmd->execute_rw = ops->execute_rw;
744 cmd->execute_cmd = sbc_execute_rw;
745 cmd->transport_complete_callback = &xdreadwrite_callback;
746 if (cdb[1] & 0x8)
747 cmd->se_cmd_flags |= SCF_FUA;
748 break;
749 case WRITE_SAME_32:
750 sectors = transport_get_sectors_32(cdb);
751 if (!sectors) {
752 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
753 " supported\n");
754 return TCM_INVALID_CDB_FIELD;
755 }
756
757 size = sbc_get_size(cmd, 1);
758 cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
759
760 ret = sbc_setup_write_same(cmd, &cdb[10], ops);
761 if (ret)
762 return ret;
763 break;
764 default:
765 pr_err("VARIABLE_LENGTH_CMD service action"
766 " 0x%04x not supported\n", service_action);
767 return TCM_UNSUPPORTED_SCSI_OPCODE;
768 }
769 break;
770 }
771 case COMPARE_AND_WRITE:
772 sectors = cdb[13];
773
774
775
776 if (sectors > 1) {
777 pr_err("COMPARE_AND_WRITE contains NoLB: %u greater"
778 " than 1\n", sectors);
779 return TCM_INVALID_CDB_FIELD;
780 }
781
782
783
784
785 size = 2 * sbc_get_size(cmd, sectors);
786 cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
787 cmd->t_task_nolb = sectors;
788 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB | SCF_COMPARE_AND_WRITE;
789 cmd->execute_rw = ops->execute_rw;
790 cmd->execute_cmd = sbc_compare_and_write;
791 cmd->transport_complete_callback = compare_and_write_callback;
792 break;
793 case READ_CAPACITY:
794 size = READ_CAP_LEN;
795 cmd->execute_cmd = sbc_emulate_readcapacity;
796 break;
797 case SERVICE_ACTION_IN:
798 switch (cmd->t_task_cdb[1] & 0x1f) {
799 case SAI_READ_CAPACITY_16:
800 cmd->execute_cmd = sbc_emulate_readcapacity_16;
801 break;
802 case SAI_REPORT_REFERRALS:
803 cmd->execute_cmd = target_emulate_report_referrals;
804 break;
805 default:
806 pr_err("Unsupported SA: 0x%02x\n",
807 cmd->t_task_cdb[1] & 0x1f);
808 return TCM_INVALID_CDB_FIELD;
809 }
810 size = (cdb[10] << 24) | (cdb[11] << 16) |
811 (cdb[12] << 8) | cdb[13];
812 break;
813 case SYNCHRONIZE_CACHE:
814 case SYNCHRONIZE_CACHE_16:
815 if (!ops->execute_sync_cache) {
816 size = 0;
817 cmd->execute_cmd = sbc_emulate_noop;
818 break;
819 }
820
821
822
823
824 if (cdb[0] == SYNCHRONIZE_CACHE) {
825 sectors = transport_get_sectors_10(cdb);
826 cmd->t_task_lba = transport_lba_32(cdb);
827 } else {
828 sectors = transport_get_sectors_16(cdb);
829 cmd->t_task_lba = transport_lba_64(cdb);
830 }
831
832 size = sbc_get_size(cmd, sectors);
833
834
835
836
837
838 if (cmd->t_task_lba || sectors) {
839 if (sbc_check_valid_sectors(cmd) < 0)
840 return TCM_ADDRESS_OUT_OF_RANGE;
841 }
842 cmd->execute_cmd = ops->execute_sync_cache;
843 break;
844 case UNMAP:
845 if (!ops->execute_unmap)
846 return TCM_UNSUPPORTED_SCSI_OPCODE;
847
848 size = get_unaligned_be16(&cdb[7]);
849 cmd->execute_cmd = ops->execute_unmap;
850 break;
851 case WRITE_SAME_16:
852 sectors = transport_get_sectors_16(cdb);
853 if (!sectors) {
854 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
855 return TCM_INVALID_CDB_FIELD;
856 }
857
858 size = sbc_get_size(cmd, 1);
859 cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
860
861 ret = sbc_setup_write_same(cmd, &cdb[1], ops);
862 if (ret)
863 return ret;
864 break;
865 case WRITE_SAME:
866 sectors = transport_get_sectors_10(cdb);
867 if (!sectors) {
868 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
869 return TCM_INVALID_CDB_FIELD;
870 }
871
872 size = sbc_get_size(cmd, 1);
873 cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
874
875
876
877
878
879 ret = sbc_setup_write_same(cmd, &cdb[1], ops);
880 if (ret)
881 return ret;
882 break;
883 case VERIFY:
884 size = 0;
885 cmd->execute_cmd = sbc_emulate_noop;
886 break;
887 case REZERO_UNIT:
888 case SEEK_6:
889 case SEEK_10:
890
891
892
893
894
895
896 size = 0;
897 cmd->execute_cmd = sbc_emulate_noop;
898 break;
899 default:
900 ret = spc_parse_cdb(cmd, &size);
901 if (ret)
902 return ret;
903 }
904
905
906 if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && !cmd->execute_cmd)
907 return TCM_UNSUPPORTED_SCSI_OPCODE;
908
909 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
910 unsigned long long end_lba;
911
912 if (sectors > dev->dev_attrib.fabric_max_sectors) {
913 printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
914 " big sectors %u exceeds fabric_max_sectors:"
915 " %u\n", cdb[0], sectors,
916 dev->dev_attrib.fabric_max_sectors);
917 return TCM_INVALID_CDB_FIELD;
918 }
919 if (sectors > dev->dev_attrib.hw_max_sectors) {
920 printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
921 " big sectors %u exceeds backend hw_max_sectors:"
922 " %u\n", cdb[0], sectors,
923 dev->dev_attrib.hw_max_sectors);
924 return TCM_INVALID_CDB_FIELD;
925 }
926
927 end_lba = dev->transport->get_blocks(dev) + 1;
928 if (cmd->t_task_lba + sectors > end_lba) {
929 pr_err("cmd exceeds last lba %llu "
930 "(lba %llu, sectors %u)\n",
931 end_lba, cmd->t_task_lba, sectors);
932 return TCM_ADDRESS_OUT_OF_RANGE;
933 }
934
935 if (!(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE))
936 size = sbc_get_size(cmd, sectors);
937 }
938
939 return target_cmd_size_check(cmd, size);
940}
941EXPORT_SYMBOL(sbc_parse_cdb);
942
943u32 sbc_get_device_type(struct se_device *dev)
944{
945 return TYPE_DISK;
946}
947EXPORT_SYMBOL(sbc_get_device_type);
948
949sense_reason_t
950sbc_execute_unmap(struct se_cmd *cmd,
951 sense_reason_t (*do_unmap_fn)(struct se_cmd *, void *,
952 sector_t, sector_t),
953 void *priv)
954{
955 struct se_device *dev = cmd->se_dev;
956 unsigned char *buf, *ptr = NULL;
957 sector_t lba;
958 int size;
959 u32 range;
960 sense_reason_t ret = 0;
961 int dl, bd_dl;
962
963
964 if (cmd->t_task_cdb[1])
965 return TCM_INVALID_CDB_FIELD;
966
967 if (cmd->data_length == 0) {
968 target_complete_cmd(cmd, SAM_STAT_GOOD);
969 return 0;
970 }
971
972 if (cmd->data_length < 8) {
973 pr_warn("UNMAP parameter list length %u too small\n",
974 cmd->data_length);
975 return TCM_PARAMETER_LIST_LENGTH_ERROR;
976 }
977
978 buf = transport_kmap_data_sg(cmd);
979 if (!buf)
980 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
981
982 dl = get_unaligned_be16(&buf[0]);
983 bd_dl = get_unaligned_be16(&buf[2]);
984
985 size = cmd->data_length - 8;
986 if (bd_dl > size)
987 pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n",
988 cmd->data_length, bd_dl);
989 else
990 size = bd_dl;
991
992 if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) {
993 ret = TCM_INVALID_PARAMETER_LIST;
994 goto err;
995 }
996
997
998 ptr = &buf[8];
999 pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u"
1000 " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
1001
1002 while (size >= 16) {
1003 lba = get_unaligned_be64(&ptr[0]);
1004 range = get_unaligned_be32(&ptr[8]);
1005 pr_debug("UNMAP: Using lba: %llu and range: %u\n",
1006 (unsigned long long)lba, range);
1007
1008 if (range > dev->dev_attrib.max_unmap_lba_count) {
1009 ret = TCM_INVALID_PARAMETER_LIST;
1010 goto err;
1011 }
1012
1013 if (lba + range > dev->transport->get_blocks(dev) + 1) {
1014 ret = TCM_ADDRESS_OUT_OF_RANGE;
1015 goto err;
1016 }
1017
1018 ret = do_unmap_fn(cmd, priv, lba, range);
1019 if (ret)
1020 goto err;
1021
1022 ptr += 16;
1023 size -= 16;
1024 }
1025
1026err:
1027 transport_kunmap_data_sg(cmd);
1028 if (!ret)
1029 target_complete_cmd(cmd, GOOD);
1030 return ret;
1031}
1032EXPORT_SYMBOL(sbc_execute_unmap);
1033
1034static sense_reason_t
1035sbc_dif_v1_verify(struct se_device *dev, struct se_dif_v1_tuple *sdt,
1036 const void *p, sector_t sector, unsigned int ei_lba)
1037{
1038 int block_size = dev->dev_attrib.block_size;
1039 __be16 csum;
1040
1041 csum = cpu_to_be16(crc_t10dif(p, block_size));
1042
1043 if (sdt->guard_tag != csum) {
1044 pr_err("DIFv1 checksum failed on sector %llu guard tag 0x%04x"
1045 " csum 0x%04x\n", (unsigned long long)sector,
1046 be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum));
1047 return TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
1048 }
1049
1050 if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT &&
1051 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
1052 pr_err("DIFv1 Type 1 reference failed on sector: %llu tag: 0x%08x"
1053 " sector MSB: 0x%08x\n", (unsigned long long)sector,
1054 be32_to_cpu(sdt->ref_tag), (u32)(sector & 0xffffffff));
1055 return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
1056 }
1057
1058 if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE2_PROT &&
1059 be32_to_cpu(sdt->ref_tag) != ei_lba) {
1060 pr_err("DIFv1 Type 2 reference failed on sector: %llu tag: 0x%08x"
1061 " ei_lba: 0x%08x\n", (unsigned long long)sector,
1062 be32_to_cpu(sdt->ref_tag), ei_lba);
1063 return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
1064 }
1065
1066 return 0;
1067}
1068
1069static void
1070sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read,
1071 struct scatterlist *sg, int sg_off)
1072{
1073 struct se_device *dev = cmd->se_dev;
1074 struct scatterlist *psg;
1075 void *paddr, *addr;
1076 unsigned int i, len, left;
1077 unsigned int offset = sg_off;
1078
1079 left = sectors * dev->prot_length;
1080
1081 for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) {
1082 unsigned int psg_len, copied = 0;
1083
1084 paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1085 psg_len = min(left, psg->length);
1086 while (psg_len) {
1087 len = min(psg_len, sg->length - offset);
1088 addr = kmap_atomic(sg_page(sg)) + sg->offset + offset;
1089
1090 if (read)
1091 memcpy(paddr + copied, addr, len);
1092 else
1093 memcpy(addr, paddr + copied, len);
1094
1095 left -= len;
1096 offset += len;
1097 copied += len;
1098 psg_len -= len;
1099
1100 if (offset >= sg->length) {
1101 sg = sg_next(sg);
1102 offset = 0;
1103 }
1104 kunmap_atomic(addr);
1105 }
1106 kunmap_atomic(paddr);
1107 }
1108}
1109
1110sense_reason_t
1111sbc_dif_verify_write(struct se_cmd *cmd, sector_t start, unsigned int sectors,
1112 unsigned int ei_lba, struct scatterlist *sg, int sg_off)
1113{
1114 struct se_device *dev = cmd->se_dev;
1115 struct se_dif_v1_tuple *sdt;
1116 struct scatterlist *dsg, *psg = cmd->t_prot_sg;
1117 sector_t sector = start;
1118 void *daddr, *paddr;
1119 int i, j, offset = 0;
1120 sense_reason_t rc;
1121
1122 for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
1123 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
1124 paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1125
1126 for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
1127
1128 if (offset >= psg->length) {
1129 kunmap_atomic(paddr);
1130 psg = sg_next(psg);
1131 paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1132 offset = 0;
1133 }
1134
1135 sdt = paddr + offset;
1136
1137 pr_debug("DIF WRITE sector: %llu guard_tag: 0x%04x"
1138 " app_tag: 0x%04x ref_tag: %u\n",
1139 (unsigned long long)sector, sdt->guard_tag,
1140 sdt->app_tag, be32_to_cpu(sdt->ref_tag));
1141
1142 rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector,
1143 ei_lba);
1144 if (rc) {
1145 kunmap_atomic(paddr);
1146 kunmap_atomic(daddr);
1147 cmd->bad_sector = sector;
1148 return rc;
1149 }
1150
1151 sector++;
1152 ei_lba++;
1153 offset += sizeof(struct se_dif_v1_tuple);
1154 }
1155
1156 kunmap_atomic(paddr);
1157 kunmap_atomic(daddr);
1158 }
1159 sbc_dif_copy_prot(cmd, sectors, false, sg, sg_off);
1160
1161 return 0;
1162}
1163EXPORT_SYMBOL(sbc_dif_verify_write);
1164
1165sense_reason_t
1166sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
1167 unsigned int ei_lba, struct scatterlist *sg, int sg_off)
1168{
1169 struct se_device *dev = cmd->se_dev;
1170 struct se_dif_v1_tuple *sdt;
1171 struct scatterlist *dsg, *psg = sg;
1172 sector_t sector = start;
1173 void *daddr, *paddr;
1174 int i, j, offset = sg_off;
1175 sense_reason_t rc;
1176
1177 for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
1178 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
1179 paddr = kmap_atomic(sg_page(psg)) + sg->offset;
1180
1181 for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
1182
1183 if (offset >= psg->length) {
1184 kunmap_atomic(paddr);
1185 psg = sg_next(psg);
1186 paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1187 offset = 0;
1188 }
1189
1190 sdt = paddr + offset;
1191
1192 pr_debug("DIF READ sector: %llu guard_tag: 0x%04x"
1193 " app_tag: 0x%04x ref_tag: %u\n",
1194 (unsigned long long)sector, sdt->guard_tag,
1195 sdt->app_tag, be32_to_cpu(sdt->ref_tag));
1196
1197 if (sdt->app_tag == cpu_to_be16(0xffff)) {
1198 sector++;
1199 offset += sizeof(struct se_dif_v1_tuple);
1200 continue;
1201 }
1202
1203 rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector,
1204 ei_lba);
1205 if (rc) {
1206 kunmap_atomic(paddr);
1207 kunmap_atomic(daddr);
1208 cmd->bad_sector = sector;
1209 return rc;
1210 }
1211
1212 sector++;
1213 ei_lba++;
1214 offset += sizeof(struct se_dif_v1_tuple);
1215 }
1216
1217 kunmap_atomic(paddr);
1218 kunmap_atomic(daddr);
1219 }
1220 sbc_dif_copy_prot(cmd, sectors, true, sg, sg_off);
1221
1222 return 0;
1223}
1224EXPORT_SYMBOL(sbc_dif_verify_read);
1225